diff --git a/.github/license-check/config.json b/.github/license-check/config.json index 0625a49e0d..ffaa32c450 100644 --- a/.github/license-check/config.json +++ b/.github/license-check/config.json @@ -3,6 +3,19 @@ "include": [ "**/*.rs" ], + "exclude": [ + "pallets/collator-selection/**", + "pallets/custom-signatures/**", + "precompiles/utils/**", + "vendor/**" + ], "license": "./.github/license-check/headers/HEADER-GNUv3" + }, + { + "include": [ + "pallets/collator-selection/*.rs", + "pallets/custom-signatures/*.rs" + ], + "license": "./.github/license-check/headers/HEADER-APACHE2" } ] diff --git a/.github/license-check/headers/HEADER-APACHE2 b/.github/license-check/headers/HEADER-APACHE2 new file mode 100644 index 0000000000..2fae0a167e --- /dev/null +++ b/.github/license-check/headers/HEADER-APACHE2 @@ -0,0 +1,16 @@ +// This file is part of Astar. + +// Copyright (C) 2019-2023 Stake Technologies Pte.Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. diff --git a/.github/scripts/generate-release-body.ts b/.github/scripts/generate-release-body.ts index a77b9b5c32..67d9765261 100644 --- a/.github/scripts/generate-release-body.ts +++ b/.github/scripts/generate-release-body.ts @@ -255,7 +255,7 @@ async function main() { getRuntimeInfo(argv["srtool-report-folder"], runtimeName) ); - const moduleLinks = ["substrate", "polkadot", "cumulus", "frontier", "astar-frame"].map((repoName) => ({ + const moduleLinks = ["substrate", "polkadot", "cumulus", "frontier"].map((repoName) => ({ name: repoName, link: getCompareLink(repoName, previousTag, newTag), })); @@ -339,7 +339,7 @@ ${moduleLinks.map((modules) => `${capitalize(modules.name)}: ${modules.link}`).j | \`Ubuntu x86_64\` | [Download](https://github.com/AstarNetwork/Astar/releases/download/${newTag}/astar-collator-${newTag}-ubuntu-x86_64.tar.gz) | | \`Ubuntu aarch64\` | [Download](https://github.com/AstarNetwork/Astar/releases/download/${newTag}/astar-collator-${newTag}-ubuntu-aarch64.tar.gz) | -[](https://hub.docker.com/r/staketechnologies/astar-collator/tags) +[](https://hub.docker.com/r/staketechnologies/astar-collator/tags) ` console.log(template); diff --git a/.github/workflows/base_checks.yaml b/.github/workflows/base_checks.yaml index d61c3b151d..9fab7737b9 100644 --- a/.github/workflows/base_checks.yaml +++ b/.github/workflows/base_checks.yaml @@ -7,6 +7,16 @@ on: - v[0-9]+.[0-9]+.[0-9]+* workflow_dispatch: jobs: + clean-up-actions: + runs-on: ubuntu-latest + steps: + - name: Cancel Previous Runs + # Only cancel non-master branch runs + if: ${{ github.ref != 'refs/heads/master' }} + uses: styfle/cancel-workflow-action@0.11.0 + with: + access_token: ${{ github.token }} + compile-and-check: runs-on: ubuntu-latest steps: diff --git a/.github/workflows/coverage.yaml b/.github/workflows/coverage.yaml new file mode 100644 index 0000000000..3134714845 --- /dev/null +++ b/.github/workflows/coverage.yaml @@ -0,0 +1,74 @@ +name: Code coverage +on: + pull_request: + types: [opened, reopened, synchronize, ready_for_review] + workflow_dispatch: +jobs: + clean-up-actions: + runs-on: ubuntu-latest + steps: + - name: Cancel Previous Runs + # Only cancel non-master branch runs + if: ${{ github.ref != 'refs/heads/master' }} + uses: styfle/cancel-workflow-action@0.11.0 + with: + access_token: ${{ github.token }} + + coverage: + if: github.event.pull_request.draft == false + runs-on: ubuntu-latest + steps: + - name: Checkout the source code + uses: actions/checkout@v3 + + - name: Install & display rust toolchain + run: rustup show + + - name: Check targets are installed correctly + run: rustup target list --installed + + - uses: actions/cache@v3 + with: + path: | + ~/.cargo/registry + ~/.cargo/git + target/ + key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.toml') }} + restore-keys: | + ${{ runner.os }}-cargo- + + - name: Install deps + run: sudo apt -y install protobuf-compiler + + - name: Run all tests & Generate report + uses: actions-rs/tarpaulin@v0.1 + with: + version: 0.22.0 + args: '--workspace -e astar-collator xcm-tools local-runtime shibuya-runtime shiden-runtime astar-runtime integration-tests --exclude-files vendor/* bin/* runtime/* tests/* **/mock.rs **/weights.rs' + out-type: Xml + + - name: Code Coverage Summary Report + uses: irongut/CodeCoverageSummary@v1.2.0 + with: + filename: cobertura.xml + badge: true + fail_below_min: true + format: markdown + hide_branch_rate: false + hide_complexity: true + indicators: true + output: both + thresholds: '50 80' + + - name: Add Coverage PR Comment + uses: marocchino/sticky-pull-request-comment@v2 + if: github.event_name == 'pull_request' + with: + recreate: true + path: code-coverage-results.md + + - name: Archive code coverage results + uses: actions/upload-artifact@v1 + with: + name: code-coverage-report + path: cobertura.xml diff --git a/.github/workflows/static-analysis.yml b/.github/workflows/static-analysis.yml index d986ecf584..e02991191b 100644 --- a/.github/workflows/static-analysis.yml +++ b/.github/workflows/static-analysis.yml @@ -1,6 +1,16 @@ name: Static Analysis on: [push, workflow_dispatch] jobs: + clean-up-actions: + runs-on: ubuntu-latest + steps: + - name: Cancel Previous Runs + # Only cancel non-master branch runs + if: ${{ github.ref != 'refs/heads/master' }} + uses: styfle/cancel-workflow-action@0.11.0 + with: + access_token: ${{ github.token }} + fmt: if: github.event.pull_request.draft == false runs-on: ubuntu-latest @@ -64,7 +74,7 @@ jobs: - name: Checkout the source code uses: actions/checkout@v3 - - name: Check license + - name: Check license uses: viperproject/check-license-header@v2 with: path: ./ diff --git a/.github/workflows/tests.yaml b/.github/workflows/tests.yaml index acb41063fa..87b6177a83 100644 --- a/.github/workflows/tests.yaml +++ b/.github/workflows/tests.yaml @@ -7,7 +7,17 @@ on: - v[0-9]+.[0-9]+.[0-9]+* workflow_dispatch: jobs: - run-tests: + clean-up-actions: + runs-on: ubuntu-latest + steps: + - name: Cancel Previous Runs + # Only cancel non-master branch runs + if: ${{ github.ref != 'refs/heads/master' }} + uses: styfle/cancel-workflow-action@0.11.0 + with: + access_token: ${{ github.token }} + + test-runtimes: runs-on: ubuntu-latest steps: - name: Free disk space diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 90df720ae6..c6bf2cd0ab 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -12,10 +12,6 @@ We welcome any types of contributions that can improve the project/network in an The Astar Network project uses GitHub as the main source control hosting service. Most forms of communication regarding changes to the code will be done within the issue board of the repository. -The core of Astar Network codebase is split into two repositories: -* [Astar](https://github.com/AstarNetwork/Astar) -* [astar-frame](https://github.com/AstarNetwork/astar-frame) - ### Opening an Issue Contributions within GitHub can take on the following forms: @@ -32,7 +28,7 @@ In short: 1. Open an issue regarding a bug fix or feature request (fill in our issue templates) 2. Briefly describe how you plan to make changes to the code -3. Fork the current default branch on _Astar_ or _astar-frame_ or both (depending on where you need to make changes) +3. Fork the current default branch on _Astar_. 4. Open a pull request to the default branch (fill in our pull request template) and add the appropriate label. 5. Ensure all workflow checks have passed 6. Wait for the maintainers approval or change requests @@ -75,7 +71,7 @@ The expected flow is: ### Contributor Licenses -By contributing, you agree that your contributions will be licensed under the [GNU General Public License v3.0](https://github.com/AstarNetwork/astar-frame/blob/polkadot-v0.9.19/LICENSE) as is with the Astar source code. If you have any concerns regarding this matter, please contact the maintainer. +By contributing, you agree that your contributions will be licensed under the [GNU General Public License v3.0](https://github.com/AstarNetwork/Astar/blob/master/LICENSE) as is with the Astar source code. If you have any concerns regarding this matter, please contact the maintainer. ## Community Contribution diff --git a/Cargo.lock b/Cargo.lock index 255f9d8f30..bf5dbf95d1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -115,9 +115,9 @@ dependencies = [ [[package]] name = "aes-gcm" -version = "0.10.1" +version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "82e1366e0c69c9f927b1fa5ce2c7bf9eafc8f9268c0b9800729e8b267612447c" +checksum = "209b47e8954a928e1d72e86eca7000ebb6655fe1436d33eefc2201cad027e237" dependencies = [ "aead 0.5.2", "aes 0.8.2", @@ -153,7 +153,7 @@ version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fcb51a0695d8f838b1ee009b3fbf66bda078cd64590202a864a8f3e8c4315c47" dependencies = [ - "getrandom 0.2.9", + "getrandom 0.2.10", "once_cell", "version_check", ] @@ -165,7 +165,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2c99f64d1e06488f620f932677e24bc6e2897582980441ae90a671415bd7ec2f" dependencies = [ "cfg-if", - "getrandom 0.2.9", + "getrandom 0.2.10", "once_cell", "version_check", ] @@ -181,9 +181,9 @@ dependencies = [ [[package]] name = "aho-corasick" -version = "1.0.1" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67fc08ce920c31afb70f013dcce1bfc3a3195de6a228474e45e1f145b36f8d04" +checksum = "43f6cb1bf222025340178f382c426f13757b2960e89779dfcb319c32542a5a41" dependencies = [ "memchr", ] @@ -194,6 +194,12 @@ version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4436e0292ab1bb631b42973c61205e704475fe8126af845c8d923c0996328127" +[[package]] +name = "android-tzdata" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e999941b234f3131b00bc13c22d06e8c5ff726d1b6318ac7eb276997bbb4fef0" + [[package]] name = "android_system_properties" version = "0.1.5" @@ -308,9 +314,9 @@ checksum = "23b62fc65de8e4e7f52534fb52b0f3ed04746ae267519eef2a83941e8085068b" [[package]] name = "arrayvec" -version = "0.7.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8da52d66c7071e2e3fa2a1e5c6d088fec47b593032b254f5e980de8ea54454d6" +checksum = "8868f09ff8cea88b079da74ae569d9b8c62a23c68c746240b704ee6f7525c89c" [[package]] name = "asn1-rs" @@ -325,7 +331,7 @@ dependencies = [ "num-traits", "rusticata-macros", "thiserror", - "time 0.3.21", + "time 0.3.22", ] [[package]] @@ -341,7 +347,7 @@ dependencies = [ "num-traits", "rusticata-macros", "thiserror", - "time 0.3.21", + "time 0.3.22", ] [[package]] @@ -394,7 +400,6 @@ checksum = "9b34d609dfbaf33d6889b2b7106d3ca345eacad44200913df5ba02bfd31d2ba9" [[package]] name = "assets-chain-extension-types" version = "0.1.0" -source = "git+https://github.com/AstarNetwork/astar-frame?branch=polkadot-v0.9.39#a3f214919bbbdefeb6962f528b35b5384626961d" dependencies = [ "frame-system", "pallet-contracts", @@ -548,6 +553,7 @@ dependencies = [ "pallet-evm-precompile-assets-erc20", "pallet-evm-precompile-blake2", "pallet-evm-precompile-bn128", + "pallet-evm-precompile-dapps-staking", "pallet-evm-precompile-dispatch", "pallet-evm-precompile-ed25519", "pallet-evm-precompile-modexp", @@ -558,7 +564,6 @@ dependencies = [ "pallet-evm-precompile-xcm", "pallet-identity", "pallet-multisig", - "pallet-precompile-dapps-staking", "pallet-proxy", "pallet-session", "pallet-state-trie-migration", @@ -612,9 +617,9 @@ dependencies = [ "log", "parking", "polling", - "rustix 0.37.19", + "rustix 0.37.20", "slab", - "socket2", + "socket2 0.4.9", "waker-fn", ] @@ -635,7 +640,7 @@ checksum = "b9ccdd8f2a161be9bd5c023df56f1b2a0bd1d83872ae53b71a84a12c9bf6e842" dependencies = [ "proc-macro2", "quote", - "syn 2.0.15", + "syn 2.0.18", ] [[package]] @@ -697,7 +702,7 @@ dependencies = [ "cfg-if", "libc", "miniz_oxide 0.6.2", - "object 0.30.3", + "object 0.30.4", "rustc-demangle", ] @@ -727,9 +732,9 @@ checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" [[package]] name = "base64" -version = "0.21.0" +version = "0.21.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4a4ddaa51a5bc52a6948f74c06d20aaaddb71924eab79b8c97a8c556e942d6a" +checksum = "604178f6c5c21f02dc555784810edfb88d34ac2c73b2eae109655649ee73ce3d" [[package]] name = "base64ct" @@ -861,7 +866,7 @@ version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "46502ad458c9a52b69d4d4d32775c788b7a1b85e8bc9d482d92250fc0e3f8efe" dependencies = [ - "digest 0.10.6", + "digest 0.10.7", ] [[package]] @@ -871,7 +876,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3c2f0dc9a68c6317d884f97cc36cf5a3d20ba14ce404227df55e1af708ab04bc" dependencies = [ "arrayref", - "arrayvec 0.7.2", + "arrayvec 0.7.3", "constant_time_eq", ] @@ -882,22 +887,22 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6637f448b9e61dfadbdcbae9a885fadee1f3eaffb1f8d3c1965d3ade8bdfd44f" dependencies = [ "arrayref", - "arrayvec 0.7.2", + "arrayvec 0.7.3", "constant_time_eq", ] [[package]] name = "blake3" -version = "1.3.3" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42ae2468a89544a466886840aa467a25b766499f4f04bf7d9fcd10ecee9fccef" +checksum = "729b71f35bd3fa1a4c86b85d32c8b9069ea7fe14f7a53cfabb65f62d4265b888" dependencies = [ "arrayref", - "arrayvec 0.7.2", + "arrayvec 0.7.3", "cc", "cfg-if", "constant_time_eq", - "digest 0.10.6", + "digest 0.10.7", ] [[package]] @@ -957,9 +962,9 @@ checksum = "8d696c370c750c948ada61c69a0ee2cbbb9c50b1019ddb86d9317157a99c2cae" [[package]] name = "bounded-collections" -version = "0.1.6" +version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3888522b497857eb606bf51695988dba7096941822c1bcf676e3a929a9ae7a0" +checksum = "eb5b05133427c07c4776906f673ccf36c21b102c9829c641a5b56bd151d44fd6" dependencies = [ "log", "parity-scale-codec", @@ -984,9 +989,20 @@ checksum = "771fe0050b883fcc3ea2359b1a96bcfbc090b7116eae7c3c512c7a083fdf23d3" [[package]] name = "bstr" -version = "1.4.0" +version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3d4260bcc2e8fc9df1eac4919a720effeb63a3f0952f5bf4944adfa18897f09" +checksum = "ba3569f383e8f1598449f1a423e72e99569137b47740b1da11ef19af3d5c3223" +dependencies = [ + "lazy_static", + "memchr", + "regex-automata", +] + +[[package]] +name = "bstr" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a246e68bb43f6cd9db24bea052a53e40405417c5fb372e3d1a8a7f770a564ef5" dependencies = [ "memchr", "serde", @@ -1003,9 +1019,9 @@ dependencies = [ [[package]] name = "bumpalo" -version = "3.12.2" +version = "3.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c6ed94e98ecff0c12dd1b04c15ec0d7d9458ca8fe806cea6f12954efe74c63b" +checksum = "a3e2c3daef883ecc1b5d58c15adae93470a91d425f3532ba1695849656af3fc1" [[package]] name = "byte-slice-cast" @@ -1157,13 +1173,13 @@ dependencies = [ [[package]] name = "chrono" -version = "0.4.24" +version = "0.4.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e3c5919066adf22df73762e50cffcde3a758f2a848b113b586d1f86728b673b" +checksum = "ec837a71355b28f6556dbd569b37b3f363091c0bd4b2e735674521b4c5fd9bc5" dependencies = [ + "android-tzdata", "iana-time-zone", "js-sys", - "num-integer", "num-traits", "time 0.1.45", "wasm-bindgen", @@ -1233,9 +1249,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.2.7" +version = "4.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34d21f9bf1b425d2968943631ec91202fe5e837264063503708b83013f8fc938" +checksum = "ca8f255e4b8027970e78db75e78831229c9815fdbfa67eb1a1b777a62e24b4a0" dependencies = [ "clap_builder", "clap_derive", @@ -1244,9 +1260,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.2.7" +version = "4.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "914c8c79fb560f238ef6429439a30023c862f7a28e688c58f7203f12b29970bd" +checksum = "acd4f3c17c83b0ba34ffbc4f8bbd74f079413f747f84a6f89292f138057e36ab" dependencies = [ "anstream", "anstyle", @@ -1257,21 +1273,21 @@ dependencies = [ [[package]] name = "clap_derive" -version = "4.2.0" +version = "4.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f9644cd56d6b87dbe899ef8b053e331c0637664e9e21a33dfcdc36093f5c5c4" +checksum = "b8cd2b2a819ad6eec39e8f1d6b53001af1e5469f8c177579cdaeb313115b825f" dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.15", + "syn 2.0.18", ] [[package]] name = "clap_lex" -version = "0.4.1" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a2dd5a6fe8c6e3502f568a6353e5273bbb15193ad9a89e457b9970798efbea1" +checksum = "2da6da31387c7e4ef160ffab6d5e7f00c42626fe39aea70a7b0f1773f7dd6c1b" [[package]] name = "coarsetime" @@ -1303,9 +1319,9 @@ checksum = "acbf1af155f9b9ef647e42cdc158db4b64a1b61f743629225fde6f3e0be2a7c7" [[package]] name = "comfy-table" -version = "6.1.4" +version = "6.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e7b787b0dc42e8111badfdbe4c3059158ccb2db8780352fa1b01e8ccf45cc4d" +checksum = "7e959d788268e3bf9d35ace83e81b124190378e4c91c9067524675e33394b8ba" dependencies = [ "strum", "strum_macros", @@ -1321,6 +1337,18 @@ dependencies = [ "crossbeam-utils", ] +[[package]] +name = "console" +version = "0.15.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c926e00cc70edefdc64d3a5ff31cc65bb97a3460097762bd23afb4d8145fccf8" +dependencies = [ + "encode_unicode", + "lazy_static", + "libc", + "windows-sys 0.45.0", +] + [[package]] name = "const-oid" version = "0.9.2" @@ -1329,9 +1357,9 @@ checksum = "520fbf3c07483f94e3e3ca9d0cfd913d7718ef2483d2cfd91c0d9e91474ab913" [[package]] name = "constant_time_eq" -version = "0.2.5" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13418e745008f7349ec7e449155f419a61b92b58a99cc3616942b926825ec76b" +checksum = "21a53c0a4d288377e7415b53dcfc3c04da5cdc2cc95c8d5ac178b58f0b861ad6" [[package]] name = "convert_case" @@ -1407,7 +1435,7 @@ version = "0.93.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "253531aca9b6f56103c9420369db3263e784df39aa1c90685a1f69cfbba0623e" dependencies = [ - "arrayvec 0.7.2", + "arrayvec 0.7.3", "bumpalo", "cranelift-bforest", "cranelift-codegen-meta", @@ -1538,14 +1566,14 @@ dependencies = [ [[package]] name = "crossbeam-epoch" -version = "0.9.14" +version = "0.9.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46bd5f3f85273295a9d14aedfb86f6aadbff6d8f5295c4a9edb08e819dcf5695" +checksum = "ae211234986c545741a7dc064309f67ee1e5ad243d0e48335adc0484d960bcc7" dependencies = [ "autocfg", "cfg-if", "crossbeam-utils", - "memoffset 0.8.0", + "memoffset 0.9.0", "scopeguard", ] @@ -1561,9 +1589,9 @@ dependencies = [ [[package]] name = "crossbeam-utils" -version = "0.8.15" +version = "0.8.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c063cd8cc95f5c377ed0d4b49a4b21f632396ff690e8470c29b3359b346984b" +checksum = "5a22b2d63d4d1dc0b7f1b6b2747dd0088008a9be28b6ddf0b1e7d335e3037294" dependencies = [ "cfg-if", ] @@ -2179,9 +2207,9 @@ dependencies = [ [[package]] name = "cxx" -version = "1.0.94" +version = "1.0.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f61f1b6389c3fe1c316bf8a4dccc90a38208354b330925bce1f74a6c4756eb93" +checksum = "109308c20e8445959c2792e81871054c6a17e6976489a93d2769641a2ba5839c" dependencies = [ "cc", "cxxbridge-flags", @@ -2191,9 +2219,9 @@ dependencies = [ [[package]] name = "cxx-build" -version = "1.0.94" +version = "1.0.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12cee708e8962df2aeb38f594aae5d827c022b6460ac71a7a3e2c3c2aae5a07b" +checksum = "daf4c6755cdf10798b97510e0e2b3edb9573032bd9379de8fffa59d68165494f" dependencies = [ "cc", "codespan-reporting", @@ -2201,30 +2229,29 @@ dependencies = [ "proc-macro2", "quote", "scratch", - "syn 2.0.15", + "syn 2.0.18", ] [[package]] name = "cxxbridge-flags" -version = "1.0.94" +version = "1.0.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7944172ae7e4068c533afbb984114a56c46e9ccddda550499caa222902c7f7bb" +checksum = "882074421238e84fe3b4c65d0081de34e5b323bf64555d3e61991f76eb64a7bb" [[package]] name = "cxxbridge-macro" -version = "1.0.94" +version = "1.0.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2345488264226bf682893e25de0769f3360aac9957980ec49361b083ddaa5bc5" +checksum = "4a076022ece33e7686fb76513518e219cca4fce5750a8ae6d1ce6c0f48fd1af9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.15", + "syn 2.0.18", ] [[package]] name = "dapps-staking-chain-extension-types" version = "1.1.0" -source = "git+https://github.com/AstarNetwork/astar-frame?branch=polkadot-v0.9.39#a3f214919bbbdefeb6962f528b35b5384626961d" dependencies = [ "frame-support", "parity-scale-codec", @@ -2270,15 +2297,15 @@ dependencies = [ [[package]] name = "data-encoding" -version = "2.3.3" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23d8666cb01533c39dde32bcbab8e227b4ed6679b2c925eba05feabea39508fb" +checksum = "c2e66c9d817f1720209181c316d28635c050fa304f9c79e47a520882661b7308" [[package]] name = "data-encoding-macro" -version = "0.1.12" +version = "0.1.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86927b7cd2fe88fa698b87404b287ab98d1a0063a34071d92e575b72d3029aca" +checksum = "c904b33cc60130e1aeea4956ab803d08a3f4a0ca82d64ed757afac3891f2bb99" dependencies = [ "data-encoding", "data-encoding-macro-internal", @@ -2286,9 +2313,9 @@ dependencies = [ [[package]] name = "data-encoding-macro-internal" -version = "0.1.10" +version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a5bbed42daaa95e780b60a50546aa345b8413a1e46f9a40a12907d3598f038db" +checksum = "8fdf3fce3ce863539ec1d7fd1b6dcc3c645663376b43ed376bbf887733e4f772" dependencies = [ "data-encoding", "syn 1.0.109", @@ -2425,9 +2452,9 @@ dependencies = [ [[package]] name = "digest" -version = "0.10.6" +version = "0.10.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8168378f4e5023e7218c89c891c0fd8ecdb5e5e4f18cb78f38cf245dd021e76f" +checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" dependencies = [ "block-buffer 0.10.4", "crypto-common", @@ -2483,7 +2510,7 @@ checksum = "487585f4d0c6655fe74905e2504d8ad6908e4db67f744eb140876906c2f3175d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.15", + "syn 2.0.18", ] [[package]] @@ -2595,7 +2622,7 @@ dependencies = [ "base16ct", "crypto-bigint", "der", - "digest 0.10.6", + "digest 0.10.7", "ff", "generic-array 0.14.7", "group", @@ -2608,6 +2635,12 @@ dependencies = [ "zeroize", ] +[[package]] +name = "encode_unicode" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a357d28ed41a50f9c765dbfe56cbc04a64e53e5fc58ba79fbc34c10ef3df831f" + [[package]] name = "enum-as-inner" version = "0.5.1" @@ -2637,7 +2670,7 @@ checksum = "5e9a1f9f7d83e59740248a6e14ecf93929ade55027844dfcea78beafccc15745" dependencies = [ "proc-macro2", "quote", - "syn 2.0.15", + "syn 2.0.18", ] [[package]] @@ -2648,7 +2681,7 @@ checksum = "48016319042fb7c87b78d2993084a831793a897a5cd1a2a67cab9d1eeb4b7d76" dependencies = [ "proc-macro2", "quote", - "syn 2.0.15", + "syn 2.0.18", ] [[package]] @@ -2820,7 +2853,6 @@ dependencies = [ [[package]] name = "evm-tracing-events" version = "0.1.0" -source = "git+https://github.com/AstarNetwork/astar-frame?branch=polkadot-v0.9.39#a3f214919bbbdefeb6962f528b35b5384626961d" dependencies = [ "environmental", "ethereum", @@ -3163,9 +3195,9 @@ dependencies = [ [[package]] name = "form_urlencoded" -version = "1.1.0" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9c384f161156f5260c24a097c56119f9be8c798586aecc13afbcbe7b7e26bf8" +checksum = "a62bc1cf6f830c2ec14a513a9fb124d0a213a629668a4186f329db21fe045652" dependencies = [ "percent-encoding", ] @@ -3639,7 +3671,7 @@ checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72" dependencies = [ "proc-macro2", "quote", - "syn 2.0.15", + "syn 2.0.18", ] [[package]] @@ -3741,9 +3773,9 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.9" +version = "0.2.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c85e1d9ab2eadba7e5040d4e09cbd6d072b76a557ad64e797c2cb9d4da21d7e4" +checksum = "be4136b2a15dd319360be1c07d9933517ccf0be8f16bf62a3bee4f0d618df427" dependencies = [ "cfg-if", "libc", @@ -3800,7 +3832,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "029d74589adefde59de1a0c4f4732695c32805624aec7b68d91503d4dba79afc" dependencies = [ "aho-corasick 0.7.20", - "bstr", + "bstr 1.5.0", "fnv", "log", "regex", @@ -3819,9 +3851,9 @@ dependencies = [ [[package]] name = "h2" -version = "0.3.18" +version = "0.3.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17f8a914c2987b688368b5138aa05321db91f4090cf26118185672ad588bce21" +checksum = "d357c7ae988e7d2182f7d7871d0b963962420b0678b0997ce7de72001aeab782" dependencies = [ "bytes", "fnv", @@ -3838,9 +3870,9 @@ dependencies = [ [[package]] name = "handlebars" -version = "4.3.6" +version = "4.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "035ef95d03713f2c347a72547b7cd38cbc9af7cd51e6099fb62d586d4a6dee3a" +checksum = "83c3372087601b532857d332f5957cbae686da52bb7810bf038c3e3c3cc2fa0d" dependencies = [ "log", "pest", @@ -3969,7 +4001,7 @@ version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6c49c37c09c17a53d937dfbb742eb3a961d65a994e6bcdcf37e7399d0cc8ab5e" dependencies = [ - "digest 0.10.6", + "digest 0.10.7", ] [[package]] @@ -4057,7 +4089,7 @@ dependencies = [ "httpdate", "itoa", "pin-project-lite 0.2.9", - "socket2", + "socket2 0.4.9", "tokio", "tower-service", "tracing", @@ -4081,9 +4113,9 @@ dependencies = [ [[package]] name = "iana-time-zone" -version = "0.1.56" +version = "0.1.57" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0722cd7114b7de04316e7ea5456a0bbb20e4adb46fd27a3697adb812cff0f37c" +checksum = "2fad5b825842d2b38bd206f3e81d6957625fd7f0a361e345c30e01a0ae2dd613" dependencies = [ "android_system_properties", "core-foundation-sys", @@ -4095,12 +4127,11 @@ dependencies = [ [[package]] name = "iana-time-zone-haiku" -version = "0.1.1" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0703ae284fc167426161c2e3f1da3ea71d94b21bedbcc9494e92b28e334e3dca" +checksum = "f31827a206f56af32e590ba56d5d2d085f558508192593743f16b2306495269f" dependencies = [ - "cxx", - "cxx-build", + "cc", ] [[package]] @@ -4122,9 +4153,9 @@ dependencies = [ [[package]] name = "idna" -version = "0.3.0" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e14ddfc70884202db2244c223200c204c2bda1bc6e0998d11b5e024d657209e6" +checksum = "7d20d6b07bfbc108882d88ed8e37d39636dcc260e15e30c45e6ba089610b917c" dependencies = [ "unicode-bidi", "unicode-normalization", @@ -4286,9 +4317,9 @@ dependencies = [ [[package]] name = "io-lifetimes" -version = "1.0.10" +version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c66c74d2ae7e79a5a8f7ac924adbe38ee42a859c6539ad869eb51f0b52dc220" +checksum = "eae7b9aee968036d54dce06cebaefd919e4472e753296daccd6d344e3e2df0c2" dependencies = [ "hermit-abi 0.3.1", "libc", @@ -4303,13 +4334,13 @@ checksum = "aa2f047c0a98b2f299aa5d6d7088443570faae494e9ae1305e48be000c9e0eb1" [[package]] name = "ipconfig" -version = "0.3.1" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd302af1b90f2463a98fa5ad469fc212c8e3175a41c3068601bfa2727591c5be" +checksum = "b58db92f96b720de98181bbbe63c831e87005ab460c1bf306eb2622b4707997f" dependencies = [ - "socket2", + "socket2 0.5.3", "widestring", - "winapi", + "windows-sys 0.48.0", "winreg", ] @@ -4327,7 +4358,7 @@ checksum = "adcf93614601c8129ddf72e2d5633df827ba6551541c6d8c59520a371475be1f" dependencies = [ "hermit-abi 0.3.1", "io-lifetimes", - "rustix 0.37.19", + "rustix 0.37.20", "windows-sys 0.48.0", ] @@ -4357,9 +4388,9 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.62" +version = "0.3.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68c16e1bfd491478ab155fd8b4896b86f9ede344949b641e61501e07c2b8b4d5" +checksum = "c5f195fe497f702db0f318b07fdd68edb16955aed830df8363d837542f8f935a" dependencies = [ "wasm-bindgen", ] @@ -4406,7 +4437,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a4e70b4439a751a5de7dd5ed55eacff78ebf4ffe0fc009cb1ebb11417f5b536b" dependencies = [ "anyhow", - "arrayvec 0.7.2", + "arrayvec 0.7.3", "async-lock", "async-trait", "beef", @@ -4671,9 +4702,9 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "libc" -version = "0.2.144" +version = "0.2.146" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b00cc1c228a6782d0f076e7b232802e0c5689d41bb5df366f2a6b6621cfdfe1" +checksum = "f92be4933c13fd498862a9e02a3055f8a8d9c039ce33db97306fd5a6caa7f29b" [[package]] name = "libloading" @@ -4693,9 +4724,9 @@ checksum = "7fc7aa29613bd6a620df431842069224d8bc9011086b1db4c0e0cd47fa03ec9a" [[package]] name = "libm" -version = "0.2.6" +version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "348108ab3fba42ec82ff6e9564fc4ca0247bdccdc68dd8af9764bbc79c3c8ffb" +checksum = "f7012b1bbb0719e1097c47611d3898568c546d597c2e74d66f6087edd5233ff4" [[package]] name = "libp2p" @@ -4706,7 +4737,7 @@ dependencies = [ "bytes", "futures 0.3.28", "futures-timer", - "getrandom 0.2.9", + "getrandom 0.2.10", "instant", "libp2p-core 0.38.0", "libp2p-dns", @@ -4852,7 +4883,7 @@ version = "0.42.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2766dcd2be8c87d5e1f35487deb22d765f49c6ae1251b3633efe3b25698bd3d2" dependencies = [ - "arrayvec 0.7.2", + "arrayvec 0.7.3", "asynchronous-codec", "bytes", "either", @@ -4888,7 +4919,7 @@ dependencies = [ "log", "rand 0.8.5", "smallvec", - "socket2", + "socket2 0.4.9", "tokio", "trust-dns-proto", "void", @@ -5049,7 +5080,7 @@ dependencies = [ "libc", "libp2p-core 0.38.0", "log", - "socket2", + "socket2 0.4.9", "tokio", ] @@ -5265,9 +5296,9 @@ checksum = "f051f77a7c8e6957c0696eac88f26b0117e54f52d3fc682ab19397a8812846a4" [[package]] name = "linux-raw-sys" -version = "0.3.7" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ece97ea872ece730aed82664c424eb4c8291e1ff2480247ccf7409044bc6479f" +checksum = "ef53942eb7bf7ff43a617b3e2c1c4a5ecf5944a7c1bc12d7ee39bbb15e5c1519" [[package]] name = "local-runtime" @@ -5306,6 +5337,7 @@ dependencies = [ "pallet-evm-precompile-assets-erc20", "pallet-evm-precompile-blake2", "pallet-evm-precompile-bn128", + "pallet-evm-precompile-dapps-staking", "pallet-evm-precompile-dispatch", "pallet-evm-precompile-ed25519", "pallet-evm-precompile-modexp", @@ -5316,7 +5348,6 @@ dependencies = [ "pallet-evm-precompile-xvm", "pallet-grandpa", "pallet-insecure-randomness-collective-flip", - "pallet-precompile-dapps-staking", "pallet-preimage", "pallet-proxy", "pallet-scheduler", @@ -5346,9 +5377,9 @@ dependencies = [ [[package]] name = "lock_api" -version = "0.4.9" +version = "0.4.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "435011366fe56583b16cf956f9df0095b405b82d76425bc8981c0e22e60ec4df" +checksum = "c1cc9717a20b1bb222f333e6a92fd32f7d8a18ddc5a3191a11af45dcbf4dcd16" dependencies = [ "autocfg", "scopeguard", @@ -5356,12 +5387,9 @@ dependencies = [ [[package]] name = "log" -version = "0.4.17" +version = "0.4.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "abb12e687cfb44aa40f41fc3978ef76448f9b6038cad6aef4259d3c095a2382e" -dependencies = [ - "cfg-if", -] +checksum = "b06a4cde4c0f271a446782e3eff8de789548ce57dbc8eca9292c27f4a42004b4" [[package]] name = "lru" @@ -5456,7 +5484,7 @@ version = "0.10.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6365506850d44bff6e2fbcb5176cf63650e48bd45ef2fe2665ae1570e0f4b9ca" dependencies = [ - "digest 0.10.6", + "digest 0.10.7", ] [[package]] @@ -5471,7 +5499,7 @@ version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ffc89ccdc6e10d6907450f753537ebc5c5d3460d2e4e62ea74bd571db62c0f9e" dependencies = [ - "rustix 0.37.19", + "rustix 0.37.20", ] [[package]] @@ -5503,9 +5531,9 @@ dependencies = [ [[package]] name = "memoffset" -version = "0.8.0" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d61c719bcfbcf5d62b3a09efa6088de8c54bc0bfcd3ea7ae39fcc186108b8de1" +checksum = "5a634b1c61a95585bd15607c6ab0c4e5b226e695ff2800ba0cdccddf208c406c" dependencies = [ "autocfg", ] @@ -5575,14 +5603,13 @@ dependencies = [ [[package]] name = "mio" -version = "0.8.6" +version = "0.8.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b9d9a46eff5b4ff64b45a9e316a6d1e0bc719ef429cbec4dc630684212bfdf9" +checksum = "927a765cd3fc26206e66b296465fa9d3e5ab003e651c1b3c060e7956d96b19d2" dependencies = [ "libc", - "log", "wasi 0.11.0+wasi-snapshot-preview1", - "windows-sys 0.45.0", + "windows-sys 0.48.0", ] [[package]] @@ -5650,7 +5677,6 @@ dependencies = [ [[package]] name = "moonbeam-client-evm-tracing" version = "0.1.0" -source = "git+https://github.com/AstarNetwork/astar-frame?branch=polkadot-v0.9.39#a3f214919bbbdefeb6962f528b35b5384626961d" dependencies = [ "ethereum-types", "evm-tracing-events", @@ -5665,7 +5691,6 @@ dependencies = [ [[package]] name = "moonbeam-evm-tracer" version = "0.1.0" -source = "git+https://github.com/AstarNetwork/astar-frame?branch=polkadot-v0.9.39#a3f214919bbbdefeb6962f528b35b5384626961d" dependencies = [ "ethereum-types", "evm", @@ -5685,7 +5710,6 @@ dependencies = [ [[package]] name = "moonbeam-primitives-ext" version = "0.1.0" -source = "git+https://github.com/AstarNetwork/astar-frame?branch=polkadot-v0.9.39#a3f214919bbbdefeb6962f528b35b5384626961d" dependencies = [ "ethereum-types", "evm-tracing-events", @@ -5698,7 +5722,6 @@ dependencies = [ [[package]] name = "moonbeam-rpc-core-debug" version = "0.1.0" -source = "git+https://github.com/AstarNetwork/astar-frame?branch=polkadot-v0.9.39#a3f214919bbbdefeb6962f528b35b5384626961d" dependencies = [ "ethereum-types", "futures 0.3.28", @@ -5713,7 +5736,6 @@ dependencies = [ [[package]] name = "moonbeam-rpc-core-trace" version = "0.6.0" -source = "git+https://github.com/AstarNetwork/astar-frame?branch=polkadot-v0.9.39#a3f214919bbbdefeb6962f528b35b5384626961d" dependencies = [ "ethereum-types", "futures 0.3.28", @@ -5727,7 +5749,6 @@ dependencies = [ [[package]] name = "moonbeam-rpc-core-txpool" version = "0.6.0" -source = "git+https://github.com/AstarNetwork/astar-frame?branch=polkadot-v0.9.39#a3f214919bbbdefeb6962f528b35b5384626961d" dependencies = [ "ethereum", "ethereum-types", @@ -5740,7 +5761,6 @@ dependencies = [ [[package]] name = "moonbeam-rpc-core-types" version = "0.1.0" -source = "git+https://github.com/AstarNetwork/astar-frame?branch=polkadot-v0.9.39#a3f214919bbbdefeb6962f528b35b5384626961d" dependencies = [ "ethereum-types", "serde", @@ -5750,7 +5770,6 @@ dependencies = [ [[package]] name = "moonbeam-rpc-debug" version = "0.1.0" -source = "git+https://github.com/AstarNetwork/astar-frame?branch=polkadot-v0.9.39#a3f214919bbbdefeb6962f528b35b5384626961d" dependencies = [ "ethereum", "ethereum-types", @@ -5760,7 +5779,7 @@ dependencies = [ "fc-storage", "fp-rpc", "futures 0.3.28", - "hex-literal 0.3.4", + "hex-literal 0.4.1", "jsonrpsee", "moonbeam-client-evm-tracing", "moonbeam-rpc-core-debug", @@ -5780,7 +5799,6 @@ dependencies = [ [[package]] name = "moonbeam-rpc-primitives-debug" version = "0.1.0" -source = "git+https://github.com/AstarNetwork/astar-frame?branch=polkadot-v0.9.39#a3f214919bbbdefeb6962f528b35b5384626961d" dependencies = [ "environmental", "ethereum", @@ -5799,7 +5817,6 @@ dependencies = [ [[package]] name = "moonbeam-rpc-primitives-txpool" version = "0.6.0" -source = "git+https://github.com/AstarNetwork/astar-frame?branch=polkadot-v0.9.39#a3f214919bbbdefeb6962f528b35b5384626961d" dependencies = [ "ethereum", "parity-scale-codec", @@ -5812,7 +5829,6 @@ dependencies = [ [[package]] name = "moonbeam-rpc-trace" version = "0.6.0" -source = "git+https://github.com/AstarNetwork/astar-frame?branch=polkadot-v0.9.39#a3f214919bbbdefeb6962f528b35b5384626961d" dependencies = [ "ethereum", "ethereum-types", @@ -5846,7 +5862,6 @@ dependencies = [ [[package]] name = "moonbeam-rpc-txpool" version = "0.6.0" -source = "git+https://github.com/AstarNetwork/astar-frame?branch=polkadot-v0.9.39#a3f214919bbbdefeb6962f528b35b5384626961d" dependencies = [ "ethereum-types", "fc-rpc", @@ -5924,7 +5939,7 @@ dependencies = [ "blake2s_simd", "blake3", "core2", - "digest 0.10.6", + "digest 0.10.7", "multihash-derive", "sha2 0.10.6", "sha3", @@ -6172,7 +6187,7 @@ version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a652d9771a63711fd3c3deb670acfbe5c30a4072e664d7a3bf5a9e1056ac72c3" dependencies = [ - "arrayvec 0.7.2", + "arrayvec 0.7.3", "itoa", ] @@ -6263,9 +6278,9 @@ dependencies = [ [[package]] name = "object" -version = "0.30.3" +version = "0.30.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea86265d3d3dcb6a27fc51bd29a4bf387fae9d2986b823079d4986af253eb439" +checksum = "03b4680b86d9cfafba8fc491dc9b6df26b68cf40e9e6cd73909194759a63c385" dependencies = [ "memchr", ] @@ -6290,9 +6305,9 @@ dependencies = [ [[package]] name = "once_cell" -version = "1.17.1" +version = "1.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b7e5500299e16ebb147ae15a00a942af264cf3688f47923b8fc2cd5858f23ad3" +checksum = "dd8b5dd2ae5ed71462c540258bedcb51965123ad7e7ccf4b9a8cafaa4a63576d" [[package]] name = "opaque-debug" @@ -6633,15 +6648,17 @@ dependencies = [ [[package]] name = "pallet-block-reward" version = "2.2.2" -source = "git+https://github.com/AstarNetwork/astar-frame?branch=polkadot-v0.9.39#a3f214919bbbdefeb6962f528b35b5384626961d" dependencies = [ "frame-benchmarking", "frame-support", "frame-system", + "pallet-balances", + "pallet-timestamp", "parity-scale-codec", "scale-info", "serde", "sp-arithmetic", + "sp-core", "sp-runtime", "sp-std", ] @@ -6667,7 +6684,6 @@ dependencies = [ [[package]] name = "pallet-chain-extension-assets" version = "0.1.0" -source = "git+https://github.com/AstarNetwork/astar-frame?branch=polkadot-v0.9.39#a3f214919bbbdefeb6962f528b35b5384626961d" dependencies = [ "assets-chain-extension-types", "frame-support", @@ -6687,7 +6703,6 @@ dependencies = [ [[package]] name = "pallet-chain-extension-dapps-staking" version = "1.1.0" -source = "git+https://github.com/AstarNetwork/astar-frame?branch=polkadot-v0.9.39#a3f214919bbbdefeb6962f528b35b5384626961d" dependencies = [ "dapps-staking-chain-extension-types", "frame-support", @@ -6707,7 +6722,6 @@ dependencies = [ [[package]] name = "pallet-chain-extension-xvm" version = "0.1.0" -source = "git+https://github.com/AstarNetwork/astar-frame?branch=polkadot-v0.9.39#a3f214919bbbdefeb6962f528b35b5384626961d" dependencies = [ "frame-support", "frame-system", @@ -6746,21 +6760,27 @@ dependencies = [ [[package]] name = "pallet-collator-selection" version = "3.3.2" -source = "git+https://github.com/AstarNetwork/astar-frame?branch=polkadot-v0.9.39#a3f214919bbbdefeb6962f528b35b5384626961d" dependencies = [ "frame-benchmarking", "frame-support", "frame-system", "log", + "pallet-aura", "pallet-authorship", + "pallet-balances", "pallet-session", + "pallet-timestamp", "parity-scale-codec", "rand 0.8.5", "scale-info", "serde", + "sp-consensus-aura", + "sp-core", + "sp-io", "sp-runtime", "sp-staking", "sp-std", + "sp-tracing", ] [[package]] @@ -6809,6 +6829,20 @@ dependencies = [ "wasmparser-nostd", ] +[[package]] +name = "pallet-contracts-migration" +version = "1.0.0" +dependencies = [ + "frame-support", + "frame-system", + "pallet-contracts", + "parity-scale-codec", + "scale-info", + "sp-arithmetic", + "sp-runtime", + "sp-std", +] + [[package]] name = "pallet-contracts-primitives" version = "7.0.0" @@ -6852,15 +6886,19 @@ dependencies = [ [[package]] name = "pallet-custom-signatures" version = "4.6.0" -source = "git+https://github.com/AstarNetwork/astar-frame?branch=polkadot-v0.9.39#a3f214919bbbdefeb6962f528b35b5384626961d" dependencies = [ + "assert_matches", "frame-support", "frame-system", + "hex-literal 0.4.1", + "libsecp256k1", + "pallet-balances", "parity-scale-codec", "scale-info", "serde", "sp-core", "sp-io", + "sp-keyring", "sp-runtime", "sp-std", ] @@ -6868,12 +6906,14 @@ dependencies = [ [[package]] name = "pallet-dapps-staking" version = "3.9.0" -source = "git+https://github.com/AstarNetwork/astar-frame?branch=polkadot-v0.9.39#a3f214919bbbdefeb6962f528b35b5384626961d" dependencies = [ "frame-benchmarking", "frame-support", "frame-system", "num-traits", + "pallet-balances", + "pallet-session", + "pallet-timestamp", "parity-scale-codec", "scale-info", "serde", @@ -7021,8 +7061,8 @@ dependencies = [ [[package]] name = "pallet-evm-precompile-assets-erc20" version = "0.5.2" -source = "git+https://github.com/AstarNetwork/astar-frame?branch=polkadot-v0.9.39#a3f214919bbbdefeb6962f528b35b5384626961d" dependencies = [ + "derive_more", "fp-evm", "frame-support", "frame-system", @@ -7031,8 +7071,12 @@ dependencies = [ "pallet-assets", "pallet-balances", "pallet-evm", + "pallet-timestamp", "parity-scale-codec", "precompile-utils", + "scale-info", + "serde", + "sha3", "slices", "sp-core", "sp-io", @@ -7058,6 +7102,31 @@ dependencies = [ "substrate-bn", ] +[[package]] +name = "pallet-evm-precompile-dapps-staking" +version = "3.6.3" +dependencies = [ + "derive_more", + "fp-evm", + "frame-support", + "frame-system", + "log", + "num_enum", + "pallet-balances", + "pallet-dapps-staking", + "pallet-evm", + "pallet-timestamp", + "parity-scale-codec", + "precompile-utils", + "scale-info", + "serde", + "sha3", + "sp-core", + "sp-io", + "sp-runtime", + "sp-std", +] + [[package]] name = "pallet-evm-precompile-dispatch" version = "2.0.0-dev" @@ -7108,72 +7177,100 @@ dependencies = [ [[package]] name = "pallet-evm-precompile-sr25519" version = "1.2.1" -source = "git+https://github.com/AstarNetwork/astar-frame?branch=polkadot-v0.9.39#a3f214919bbbdefeb6962f528b35b5384626961d" dependencies = [ + "derive_more", "fp-evm", + "frame-support", + "frame-system", + "hex-literal 0.4.1", "log", "num_enum", + "pallet-balances", "pallet-evm", + "pallet-timestamp", "parity-scale-codec", "precompile-utils", + "scale-info", + "serde", "sp-core", "sp-io", + "sp-runtime", "sp-std", ] [[package]] name = "pallet-evm-precompile-substrate-ecdsa" version = "1.2.2" -source = "git+https://github.com/AstarNetwork/astar-frame?branch=polkadot-v0.9.39#a3f214919bbbdefeb6962f528b35b5384626961d" dependencies = [ + "derive_more", "fp-evm", + "frame-support", + "frame-system", + "hex-literal 0.4.1", "log", "num_enum", + "pallet-balances", "pallet-evm", + "pallet-timestamp", "parity-scale-codec", "precompile-utils", + "scale-info", + "serde", "sp-core", "sp-io", + "sp-runtime", "sp-std", ] [[package]] name = "pallet-evm-precompile-xcm" version = "0.9.0" -source = "git+https://github.com/AstarNetwork/astar-frame?branch=polkadot-v0.9.39#a3f214919bbbdefeb6962f528b35b5384626961d" dependencies = [ + "derive_more", "fp-evm", "frame-support", "frame-system", + "hex-literal 0.4.1", "log", "num_enum", "pallet-assets", + "pallet-balances", "pallet-evm", "pallet-evm-precompile-assets-erc20", + "pallet-timestamp", "pallet-xcm 0.9.39", "parity-scale-codec", "precompile-utils", + "scale-info", + "serde", "sp-core", "sp-io", + "sp-runtime", "sp-std", "xcm", + "xcm-builder", "xcm-executor", ] [[package]] name = "pallet-evm-precompile-xvm" version = "0.1.0" -source = "git+https://github.com/AstarNetwork/astar-frame?branch=polkadot-v0.9.39#a3f214919bbbdefeb6962f528b35b5384626961d" dependencies = [ + "derive_more", "fp-evm", "frame-support", "frame-system", + "hex-literal 0.4.1", "log", "num_enum", + "pallet-balances", "pallet-evm", + "pallet-timestamp", "pallet-xvm", "parity-scale-codec", "precompile-utils", + "scale-info", + "serde", "sp-core", "sp-io", "sp-runtime", @@ -7443,26 +7540,6 @@ dependencies = [ "sp-std", ] -[[package]] -name = "pallet-precompile-dapps-staking" -version = "3.6.3" -source = "git+https://github.com/AstarNetwork/astar-frame?branch=polkadot-v0.9.39#a3f214919bbbdefeb6962f528b35b5384626961d" -dependencies = [ - "fp-evm", - "frame-support", - "frame-system", - "log", - "num_enum", - "pallet-dapps-staking", - "pallet-evm", - "parity-scale-codec", - "precompile-utils", - "scale-info", - "sp-core", - "sp-runtime", - "sp-std", -] - [[package]] name = "pallet-preimage" version = "4.0.0-dev" @@ -7845,15 +7922,17 @@ dependencies = [ [[package]] name = "pallet-xc-asset-config" version = "1.3.0" -source = "git+https://github.com/AstarNetwork/astar-frame?branch=polkadot-v0.9.39#a3f214919bbbdefeb6962f528b35b5384626961d" dependencies = [ "frame-benchmarking", "frame-support", "frame-system", "log", + "pallet-balances", + "pallet-timestamp", "parity-scale-codec", "scale-info", "serde", + "sp-core", "sp-io", "sp-runtime", "sp-std", @@ -7884,14 +7963,16 @@ dependencies = [ [[package]] name = "pallet-xcm" version = "0.9.39" -source = "git+https://github.com/AstarNetwork/astar-frame?branch=polkadot-v0.9.39#a3f214919bbbdefeb6962f528b35b5384626961d" dependencies = [ "bounded-collections", "frame-benchmarking", "frame-support", "frame-system", "log", + "pallet-balances", "parity-scale-codec", + "polkadot-parachain", + "polkadot-runtime-parachains", "scale-info", "serde", "sp-core", @@ -7899,6 +7980,7 @@ dependencies = [ "sp-runtime", "sp-std", "xcm", + "xcm-builder", "xcm-executor", ] @@ -7924,8 +8006,8 @@ dependencies = [ [[package]] name = "pallet-xvm" version = "0.2.1" -source = "git+https://github.com/AstarNetwork/astar-frame?branch=polkadot-v0.9.39#a3f214919bbbdefeb6962f528b35b5384626961d" dependencies = [ + "frame-benchmarking", "frame-support", "frame-system", "impl-trait-for-tuples", @@ -7954,9 +8036,9 @@ dependencies = [ [[package]] name = "parity-db" -version = "0.4.7" +version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd4572a52711e2ccff02b4973ec7e4a5b5c23387ebbfbd6cd42b34755714cefc" +checksum = "4890dcb9556136a4ec2b0c51fa4a08c8b733b829506af8fff2e853f3a065985b" dependencies = [ "blake2", "crc32fast", @@ -7978,7 +8060,7 @@ version = "3.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5ddb756ca205bd108aee3c62c6d3c994e1df84a59b9d6d4a5ea42ee1fd5a9a28" dependencies = [ - "arrayvec 0.7.2", + "arrayvec 0.7.3", "bitvec", "byte-slice-cast", "bytes", @@ -8035,7 +8117,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f" dependencies = [ "lock_api", - "parking_lot_core 0.9.7", + "parking_lot_core 0.9.8", ] [[package]] @@ -8054,15 +8136,15 @@ dependencies = [ [[package]] name = "parking_lot_core" -version = "0.9.7" +version = "0.9.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9069cbb9f99e3a5083476ccb29ceb1de18b9118cafa53e90c9551235de2b9521" +checksum = "93f00c865fe7cabf650081affecd3871070f26767e7b2070a3ffae14c654b447" dependencies = [ "cfg-if", "libc", - "redox_syscall 0.2.16", + "redox_syscall 0.3.5", "smallvec", - "windows-sys 0.45.0", + "windows-targets 0.48.0", ] [[package]] @@ -8086,7 +8168,7 @@ version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "83a0692ec44e4cf1ef28ca317f14f8f07da2d95ec3fa01f86e4467b725e60917" dependencies = [ - "digest 0.10.6", + "digest 0.10.7", ] [[package]] @@ -8115,9 +8197,9 @@ dependencies = [ [[package]] name = "percent-encoding" -version = "2.2.0" +version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "478c572c3d73181ff3c2539045f6eb99e5491218eae919370993b890cdbdd98e" +checksum = "9b2a4787296e9989611394c33f193f676704af1686e70b8f8033ab5ba9a35a94" [[package]] name = "pest" @@ -8149,7 +8231,7 @@ dependencies = [ "pest_meta", "proc-macro2", "quote", - "syn 2.0.15", + "syn 2.0.18", ] [[package]] @@ -8175,22 +8257,22 @@ dependencies = [ [[package]] name = "pin-project" -version = "1.0.12" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad29a609b6bcd67fee905812e544992d216af9d755757c05ed2d0e15a74c6ecc" +checksum = "c95a7476719eab1e366eaf73d0260af3021184f18177925b07f54b30089ceead" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.0.12" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "069bdb1e05adc7a8990dce9cc75370895fbe4e3d58b9b73bf1aee56359344a55" +checksum = "39407670928234ebc5e6e580247dd567ad73a3578460c5990f9503df207e8f07" dependencies = [ "proc-macro2", "quote", - "syn 1.0.109", + "syn 2.0.18", ] [[package]] @@ -9432,7 +9514,7 @@ dependencies = [ "cfg-if", "cpufeatures", "opaque-debug 0.3.0", - "universal-hash 0.5.0", + "universal-hash 0.5.1", ] [[package]] @@ -9444,13 +9526,13 @@ checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" [[package]] name = "precompile-utils" version = "0.4.3" -source = "git+https://github.com/AstarNetwork/astar-frame?branch=polkadot-v0.9.39#a3f214919bbbdefeb6962f528b35b5384626961d" dependencies = [ "assert_matches", "evm", "fp-evm", "frame-support", "frame-system", + "hex-literal 0.4.1", "impl-trait-for-tuples", "log", "num_enum", @@ -9458,6 +9540,7 @@ dependencies = [ "parity-scale-codec", "precompile-utils-macro", "sha3", + "similar-asserts", "sp-core", "sp-io", "sp-runtime", @@ -9468,7 +9551,6 @@ dependencies = [ [[package]] name = "precompile-utils-macro" version = "0.1.0" -source = "git+https://github.com/AstarNetwork/astar-frame?branch=polkadot-v0.9.39#a3f214919bbbdefeb6962f528b35b5384626961d" dependencies = [ "num_enum", "proc-macro2", @@ -9583,9 +9665,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.56" +version = "1.0.60" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b63bdb0cd06f1f4dedf69b254734f9b45af66e4a031e42a7480257d9898b435" +checksum = "dec2b086b7a862cf4de201096214fa870344cf922b2b30c167badb3af3195406" dependencies = [ "unicode-ident", ] @@ -9749,9 +9831,9 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.27" +version = "1.0.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f4f29d145265ec1c483c7c654450edde0bfe043d3938d6972630663356d9500" +checksum = "1b9ab9c7eadfd8df19006f1cf1a4aed13540ed5cbc047010ece5826e10825488" dependencies = [ "proc-macro2", ] @@ -9821,7 +9903,7 @@ version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" dependencies = [ - "getrandom 0.2.9", + "getrandom 0.2.10", ] [[package]] @@ -9878,7 +9960,7 @@ checksum = "6413f3de1edee53342e6138e75b56d32e7bc6e332b3bd62d497b1929d4cfbcdd" dependencies = [ "pem", "ring", - "time 0.3.21", + "time 0.3.22", "x509-parser 0.13.2", "yasna", ] @@ -9891,7 +9973,7 @@ checksum = "ffbe84efe2f38dea12e9bfc1f65377fdf03e53a18cb3b995faedf7934c7e785b" dependencies = [ "pem", "ring", - "time 0.3.21", + "time 0.3.22", "yasna", ] @@ -9919,7 +10001,7 @@ version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b033d837a7cf162d7993aded9304e30a83213c648b6e389db233191f891e5c2b" dependencies = [ - "getrandom 0.2.9", + "getrandom 0.2.10", "redox_syscall 0.2.16", "thiserror", ] @@ -9954,7 +10036,7 @@ checksum = "8d2275aab483050ab2a7364c1a46604865ee7d6906684e08db0f090acf74f9e7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.15", + "syn 2.0.18", ] [[package]] @@ -9971,13 +10053,13 @@ dependencies = [ [[package]] name = "regex" -version = "1.8.1" +version = "1.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af83e617f331cc6ae2da5443c602dfa5af81e517212d9d611a5b3ba1777b5370" +checksum = "d0ab3ca65655bb1e41f2a8c8cd662eb4fb035e67c3f78da1d61dffe89d07300f" dependencies = [ - "aho-corasick 1.0.1", + "aho-corasick 1.0.2", "memchr", - "regex-syntax 0.7.1", + "regex-syntax 0.7.2", ] [[package]] @@ -9997,9 +10079,9 @@ checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" [[package]] name = "regex-syntax" -version = "0.7.1" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a5996294f19bd3aae0453a862ad728f60e6600695733dd5df01da90c54363a3c" +checksum = "436b050e76ed2903236f032a59761c1eb99e1b0aead2c257922771dab1fc8c78" [[package]] name = "region" @@ -10055,7 +10137,7 @@ version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bd124222d17ad93a644ed9d011a40f4fb64aa54275c08cc216524a9ea82fb09f" dependencies = [ - "digest 0.10.6", + "digest 0.10.7", ] [[package]] @@ -10298,9 +10380,9 @@ dependencies = [ [[package]] name = "rustix" -version = "0.36.13" +version = "0.36.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3a38f9520be93aba504e8ca974197f46158de5dcaa9fa04b57c57cd6a679d658" +checksum = "14e4d67015953998ad0eb82887a0eb0129e18a7e2f3b7b0f6c422fddcd503d62" dependencies = [ "bitflags", "errno", @@ -10312,15 +10394,15 @@ dependencies = [ [[package]] name = "rustix" -version = "0.37.19" +version = "0.37.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "acf8729d8542766f1b2cf77eb034d52f40d375bb8b615d0b147089946e16613d" +checksum = "b96e891d04aa506a6d1f318d2771bcb1c7dfda84e126660ace067c9b474bb2c0" dependencies = [ "bitflags", "errno", "io-lifetimes", "libc", - "linux-raw-sys 0.3.7", + "linux-raw-sys 0.3.8", "windows-sys 0.48.0", ] @@ -10367,7 +10449,7 @@ version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d194b56d58803a43635bdc398cd17e383d6f71f9182b9a192c127ca42494a59b" dependencies = [ - "base64 0.21.0", + "base64 0.21.2", ] [[package]] @@ -10404,9 +10486,9 @@ dependencies = [ [[package]] name = "safe_arch" -version = "0.6.0" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "794821e4ccb0d9f979512f9c1973480123f9bd62a90d74ab0f9426fcf8f4a529" +checksum = "62a7484307bd40f8f7ccbacccac730108f2cae119a3b11c74485b48aa9ea650f" dependencies = [ "bytemuck", ] @@ -10826,7 +10908,7 @@ dependencies = [ "libc", "log", "once_cell", - "rustix 0.36.13", + "rustix 0.36.14", "sc-allocator", "sc-executor-common", "sp-runtime-interface", @@ -11493,9 +11575,9 @@ dependencies = [ [[package]] name = "scale-info" -version = "2.6.0" +version = "2.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dfdef77228a4c05dc94211441595746732131ad7f6530c6c18f045da7b7ab937" +checksum = "b569c32c806ec3abdf3b5869fb8bf1e0d275a7c1c9b0b05603d9464632649edf" dependencies = [ "bitvec", "cfg-if", @@ -11642,9 +11724,9 @@ dependencies = [ [[package]] name = "security-framework" -version = "2.8.2" +version = "2.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a332be01508d814fed64bf28f798a146d73792121129962fdf335bb3c49a4254" +checksum = "1fc758eb7bffce5b308734e9b0c1468893cae9ff70ebf13e7090be8dcbcc83a8" dependencies = [ "bitflags", "core-foundation", @@ -11655,9 +11737,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.8.0" +version = "2.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31c9bb296072e961fcbd8853511dd39c2d8be2deb1e17c6860b1d30732b323b4" +checksum = "f51d0c0d83bec45f16480d0ce0058397a69e48fcdc52d1dc8855fb68acbd31a7" dependencies = [ "core-foundation-sys", "libc", @@ -11698,22 +11780,22 @@ checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" [[package]] name = "serde" -version = "1.0.162" +version = "1.0.164" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "71b2f6e1ab5c2b98c05f0f35b236b22e8df7ead6ffbf51d7808da7f8817e7ab6" +checksum = "9e8c8cf938e98f769bc164923b06dce91cea1751522f46f8466461af04c9027d" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.162" +version = "1.0.164" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2a0814352fd64b58489904a44ea8d90cb1a91dcb6b4f5ebabc32c8318e93cb6" +checksum = "d9735b638ccc51c28bf6914d90a2e9725b377144fc612c49a611fddd1b631d68" dependencies = [ "proc-macro2", "quote", - "syn 2.0.15", + "syn 2.0.18", ] [[package]] @@ -11748,7 +11830,7 @@ checksum = "f04293dc80c3993519f2d7f6f511707ee7094fe0c6d3406feb330cdb3540eba3" dependencies = [ "cfg-if", "cpufeatures", - "digest 0.10.6", + "digest 0.10.7", ] [[package]] @@ -11784,7 +11866,7 @@ checksum = "82e6b795fe2e3b1e845bafcb27aa35405c4d47cdfc92af5fc8d3002f76cebdc0" dependencies = [ "cfg-if", "cpufeatures", - "digest 0.10.6", + "digest 0.10.7", ] [[package]] @@ -11793,7 +11875,7 @@ version = "0.10.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "75872d278a8f37ef87fa0ddbda7802605cb18344497949862c0d4dcb291eba60" dependencies = [ - "digest 0.10.6", + "digest 0.10.7", "keccak", ] @@ -11858,6 +11940,7 @@ dependencies = [ "pallet-evm-precompile-assets-erc20", "pallet-evm-precompile-blake2", "pallet-evm-precompile-bn128", + "pallet-evm-precompile-dapps-staking", "pallet-evm-precompile-dispatch", "pallet-evm-precompile-ed25519", "pallet-evm-precompile-modexp", @@ -11870,7 +11953,6 @@ dependencies = [ "pallet-identity", "pallet-insecure-randomness-collective-flip", "pallet-multisig", - "pallet-precompile-dapps-staking", "pallet-preimage", "pallet-proxy", "pallet-scheduler", @@ -11961,6 +12043,7 @@ dependencies = [ "pallet-evm-precompile-assets-erc20", "pallet-evm-precompile-blake2", "pallet-evm-precompile-bn128", + "pallet-evm-precompile-dapps-staking", "pallet-evm-precompile-dispatch", "pallet-evm-precompile-ed25519", "pallet-evm-precompile-modexp", @@ -11972,7 +12055,6 @@ dependencies = [ "pallet-identity", "pallet-insecure-randomness-collective-flip", "pallet-multisig", - "pallet-precompile-dapps-staking", "pallet-proxy", "pallet-session", "pallet-state-trie-migration", @@ -12033,7 +12115,7 @@ version = "1.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "74233d3b3b2f6d4b006dc19dee745e73e2a6bfb6f93607cd3b02bd5b00797d7c" dependencies = [ - "digest 0.10.6", + "digest 0.10.7", "rand_core 0.6.4", ] @@ -12050,6 +12132,26 @@ dependencies = [ "wide", ] +[[package]] +name = "similar" +version = "2.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "420acb44afdae038210c99e69aae24109f32f15500aa708e81d46c9f29d55fcf" +dependencies = [ + "bstr 0.2.17", + "unicode-segmentation", +] + +[[package]] +name = "similar-asserts" +version = "1.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbf644ad016b75129f01a34a355dcb8d66a5bc803e417c7a77cc5d5ee9fa0f18" +dependencies = [ + "console", + "similar", +] + [[package]] name = "siphasher" version = "0.3.10" @@ -12143,6 +12245,16 @@ dependencies = [ "winapi", ] +[[package]] +name = "socket2" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2538b18701741680e0322a2302176d3253a35388e2e62f172f64f4f16605f877" +dependencies = [ + "libc", + "windows-sys 0.48.0", +] + [[package]] name = "soketto" version = "0.7.1" @@ -12413,7 +12525,7 @@ source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.39#8c dependencies = [ "blake2", "byteorder", - "digest 0.10.6", + "digest 0.10.7", "sha2 0.10.6", "sha3", "sp-std", @@ -13157,9 +13269,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.15" +version = "2.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a34fcf3e8b60f57e6a14301a2e916d323af98b0ea63c599441eec8558660c822" +checksum = "32d41677bcbe24c20c52e7c70b0d8db04134c5d1066bf98662e2871ad200ea3e" dependencies = [ "proc-macro2", "quote", @@ -13180,9 +13292,9 @@ dependencies = [ [[package]] name = "system-configuration" -version = "0.5.0" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d75182f12f490e953596550b65ee31bda7c8e043d9386174b353bda50838c3fd" +checksum = "ba3a3adc5c275d719af8cb4272ea1c4a6d668a777f37e115f6d11ddbc1c8e0e7" dependencies = [ "bitflags", "core-foundation", @@ -13213,15 +13325,16 @@ checksum = "fd1ba337640d60c3e96bc6f0638a939b9c9a7f2c316a1598c279828b3d1dc8c5" [[package]] name = "tempfile" -version = "3.5.0" +version = "3.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9fbec84f381d5795b08656e4912bec604d162bff9291d6189a78f4c8ab87998" +checksum = "31c0432476357e58790aaa47a8efb0c5138f137343f3b5f23bd36a27e3b0a6d6" dependencies = [ + "autocfg", "cfg-if", "fastrand", "redox_syscall 0.3.5", - "rustix 0.37.19", - "windows-sys 0.45.0", + "rustix 0.37.20", + "windows-sys 0.48.0", ] [[package]] @@ -13256,7 +13369,7 @@ checksum = "f9456a42c5b0d803c8cd86e73dd7cc9edd429499f37a3550d286d5e86720569f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.15", + "syn 2.0.18", ] [[package]] @@ -13331,9 +13444,9 @@ dependencies = [ [[package]] name = "time" -version = "0.3.21" +version = "0.3.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f3403384eaacbca9923fa06940178ac13e4edb725486d70e8e15881d0c836cc" +checksum = "ea9e1b3cf1243ae005d9e74085d4d542f3125458f3a81af210d901dcd7411efd" dependencies = [ "itoa", "serde", @@ -13411,9 +13524,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.28.0" +version = "1.28.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3c786bf8134e5a3a166db9b29ab8f48134739014a3eca7bc6bfa95d673b136f" +checksum = "94d7b1cfd2aa4011f2de74c2c4c63665e27a71006b0a192dcd2710272e73dfa2" dependencies = [ "autocfg", "bytes", @@ -13423,7 +13536,7 @@ dependencies = [ "parking_lot 0.12.1", "pin-project-lite 0.2.9", "signal-hook-registry", - "socket2", + "socket2 0.4.9", "tokio-macros", "windows-sys 0.48.0", ] @@ -13436,7 +13549,7 @@ checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.15", + "syn 2.0.18", ] [[package]] @@ -13488,15 +13601,15 @@ dependencies = [ [[package]] name = "toml_datetime" -version = "0.6.1" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ab8ed2edee10b50132aed5f331333428b011c99402b5a534154ed15746f9622" +checksum = "5a76a9312f5ba4c2dec6b9161fdf25d87ad8a09256ccea5a556fef03c706a10f" [[package]] name = "toml_edit" -version = "0.19.8" +version = "0.19.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "239410c8609e8125456927e6707163a3b1fdb40561e4b803bc041f466ccfdc13" +checksum = "2380d56e8670370eee6566b0bfd4265f65b3f432e8c6d85623f728d4fa31f739" dependencies = [ "indexmap", "toml_datetime", @@ -13565,14 +13678,14 @@ checksum = "0f57e3ca2a01450b1a921183a9c9cbfda207fd822cef4ccb00a65402cbba7a74" dependencies = [ "proc-macro2", "quote", - "syn 2.0.15", + "syn 2.0.18", ] [[package]] name = "tracing-core" -version = "0.1.30" +version = "0.1.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24eb03ba0eab1fd845050058ce5e616558e8f8d8fca633e6b163fe25c797213a" +checksum = "0955b8137a1df6f1a2e9a37d8a6656291ff0297c1a97c24e0d8425fe2312f79a" dependencies = [ "once_cell", "valuable", @@ -13705,7 +13818,7 @@ dependencies = [ "lazy_static", "rand 0.8.5", "smallvec", - "socket2", + "socket2 0.4.9", "thiserror", "tinyvec", "tokio", @@ -13808,7 +13921,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "97fee6b57c6a41524a810daee9286c02d7752c4253064d0b05472833a438f675" dependencies = [ "cfg-if", - "digest 0.10.6", + "digest 0.10.7", "rand 0.8.5", "static_assertions", ] @@ -13845,9 +13958,9 @@ checksum = "92888ba5573ff080736b3648696b70cafad7d250551175acbaa4e0385b3e1460" [[package]] name = "unicode-ident" -version = "1.0.8" +version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5464a87b239f13a63a501f2701565754bae92d243d4bb7eb12f6d57d2269bf4" +checksum = "b15811caf2415fb889178633e7724bad2509101cde276048e013b9def5e51fa0" [[package]] name = "unicode-normalization" @@ -13858,6 +13971,12 @@ dependencies = [ "tinyvec", ] +[[package]] +name = "unicode-segmentation" +version = "1.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1dd624098567895118886609431a7c3b8f516e41d30e0643f03d94592a147e36" + [[package]] name = "unicode-width" version = "0.1.10" @@ -13882,9 +14001,9 @@ dependencies = [ [[package]] name = "universal-hash" -version = "0.5.0" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d3160b73c9a19f7e2939a2fdad446c57c1bbbbf4d919d3213ff1267a580d8b5" +checksum = "fc1de2c688dc15305988b563c3854064043356019f97a4b46276fe734c4f07ea" dependencies = [ "crypto-common", "subtle", @@ -13910,12 +14029,12 @@ checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a" [[package]] name = "url" -version = "2.3.1" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d68c799ae75762b8c3fe375feb6600ef5602c883c5d21eb51c09f22b83c4643" +checksum = "50bff7831e19200a85b17131d085c25d7811bc4e186efdaf54bbd132994a88cb" dependencies = [ "form_urlencoded", - "idna 0.3.0", + "idna 0.4.0", "percent-encoding", ] @@ -13927,11 +14046,11 @@ checksum = "711b9620af191e0cdc7468a8d14e709c3dcdb115b36f838e601583af800a370a" [[package]] name = "uuid" -version = "1.3.2" +version = "1.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4dad5567ad0cf5b760e5665964bec1b47dfd077ba8a2544b513f3556d3d239a2" +checksum = "0fa2982af2eec27de306107c027578ff7f423d65f7250e40ce0fea8f45248b81" dependencies = [ - "getrandom 0.2.9", + "getrandom 0.2.10", ] [[package]] @@ -14013,9 +14132,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.85" +version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b6cb788c4e39112fbe1822277ef6fb3c55cd86b95cb3d3c4c1c9597e4ac74b4" +checksum = "7706a72ab36d8cb1f80ffbf0e071533974a60d0a308d01a5d0375bf60499a342" dependencies = [ "cfg-if", "wasm-bindgen-macro", @@ -14023,24 +14142,24 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.85" +version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35e522ed4105a9d626d885b35d62501b30d9666283a5c8be12c14a8bdafe7822" +checksum = "5ef2b6d3c510e9625e5fe6f509ab07d66a760f0885d858736483c32ed7809abd" dependencies = [ "bumpalo", "log", "once_cell", "proc-macro2", "quote", - "syn 2.0.15", + "syn 2.0.18", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-futures" -version = "0.4.35" +version = "0.4.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "083abe15c5d88556b77bdf7aef403625be9e327ad37c62c4e4129af740168163" +checksum = "c02dbc21516f9f1f04f187958890d7e6026df8d16540b7ad9492bc34a67cea03" dependencies = [ "cfg-if", "js-sys", @@ -14050,9 +14169,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.85" +version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "358a79a0cb89d21db8120cbfb91392335913e4890665b1a7981d9e956903b434" +checksum = "dee495e55982a3bd48105a7b947fd2a9b4a8ae3010041b9e0faab3f9cd028f1d" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -14060,22 +14179,22 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.85" +version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4783ce29f09b9d93134d41297aded3a712b7b979e9c6f28c32cb88c973a94869" +checksum = "54681b18a46765f095758388f2d0cf16eb8d4169b639ab575a8f5693af210c7b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.15", + "syn 2.0.18", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.85" +version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a901d592cafaa4d711bc324edfaff879ac700b19c3dfd60058d2b445be2691eb" +checksum = "ca6ad05a4870b2bf5fe995117d3728437bd27d7cd5f06f13c17443ef369775a1" [[package]] name = "wasm-instrument" @@ -14196,7 +14315,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "57d20cb3c59b788653d99541c646c561c9dd26506f25c0cebfe810659c54c6d7" dependencies = [ "downcast-rs", - "libm 0.2.6", + "libm 0.2.7", "memory_units", "num-rational", "num-traits", @@ -14210,7 +14329,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c5bf998ab792be85e20e771fe14182b4295571ad1d4f89d3da521c1bef5f597a" dependencies = [ "downcast-rs", - "libm 0.2.6", + "libm 0.2.7", "num-traits", ] @@ -14282,7 +14401,7 @@ dependencies = [ "directories-next", "file-per-thread-logger", "log", - "rustix 0.36.13", + "rustix 0.36.14", "serde", "sha2 0.10.6", "toml", @@ -14362,7 +14481,7 @@ checksum = "eed41cbcbf74ce3ff6f1d07d1b707888166dc408d1a880f651268f4f7c9194b2" dependencies = [ "object 0.29.0", "once_cell", - "rustix 0.36.13", + "rustix 0.36.14", ] [[package]] @@ -14393,7 +14512,7 @@ dependencies = [ "memoffset 0.6.5", "paste", "rand 0.8.5", - "rustix 0.36.13", + "rustix 0.36.14", "wasmtime-asm-macros", "wasmtime-environ", "wasmtime-jit-debug", @@ -14414,9 +14533,9 @@ dependencies = [ [[package]] name = "web-sys" -version = "0.3.62" +version = "0.3.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16b5f940c7edfdc6d12126d98c9ef4d1b3d470011c47c76a6581df47ad9ba721" +checksum = "9b85cbef8c220a6abc02aefd892dfc0fc23afb1c6a426316ec33253a3877249b" dependencies = [ "js-sys", "wasm-bindgen", @@ -14477,7 +14596,7 @@ dependencies = [ "sha2 0.10.6", "stun", "thiserror", - "time 0.3.21", + "time 0.3.22", "tokio", "turn", "url", @@ -14514,7 +14633,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "942be5bd85f072c3128396f6e5a9bfb93ca8c1939ded735d177b7bcba9a13d05" dependencies = [ "aes 0.6.0", - "aes-gcm 0.10.1", + "aes-gcm 0.10.2", "async-trait", "bincode", "block-modes", @@ -14579,7 +14698,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f08dfd7a6e3987e255c4dbe710dde5d94d0f0574f8a21afa95d171376c143106" dependencies = [ "log", - "socket2", + "socket2 0.4.9", "thiserror", "tokio", "webrtc-util", @@ -14779,9 +14898,9 @@ dependencies = [ [[package]] name = "wide" -version = "0.7.8" +version = "0.7.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b689b6c49d6549434bf944e6b0f39238cf63693cb7a147e9d887507fffa3b223" +checksum = "40018623e2dba2602a9790faba8d33f2ebdebf4b86561b83928db735f8784728" dependencies = [ "bytemuck", "safe_arch", @@ -14789,9 +14908,9 @@ dependencies = [ [[package]] name = "widestring" -version = "0.5.1" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17882f045410753661207383517a6f62ec3dbeb6a4ed2acce01f0728238d1983" +checksum = "653f141f39ec16bba3c5abe400a0c60da7468261cc2cbf36805022876bc721a8" [[package]] name = "winapi" @@ -15034,11 +15153,12 @@ dependencies = [ [[package]] name = "winreg" -version = "0.10.1" +version = "0.50.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "80d0f4e272c85def139476380b12f9ac60926689dd2e01d4923222f40580869d" +checksum = "524e57b2c537c0f9b1e69f1965311ec12182b4122e45035b1508cd24d2adadb1" dependencies = [ - "winapi", + "cfg-if", + "windows-sys 0.48.0", ] [[package]] @@ -15088,7 +15208,7 @@ dependencies = [ "ring", "rusticata-macros", "thiserror", - "time 0.3.21", + "time 0.3.22", ] [[package]] @@ -15106,7 +15226,7 @@ dependencies = [ "oid-registry 0.6.1", "rusticata-macros", "thiserror", - "time 0.3.21", + "time 0.3.22", ] [[package]] @@ -15169,7 +15289,6 @@ dependencies = [ [[package]] name = "xcm-primitives" version = "0.4.1" -source = "git+https://github.com/AstarNetwork/astar-frame?branch=polkadot-v0.9.39#a3f214919bbbdefeb6962f528b35b5384626961d" dependencies = [ "frame-support", "log", @@ -15271,7 +15390,6 @@ dependencies = [ [[package]] name = "xvm-chain-extension-types" version = "0.1.0" -source = "git+https://github.com/AstarNetwork/astar-frame?branch=polkadot-v0.9.39#a3f214919bbbdefeb6962f528b35b5384626961d" dependencies = [ "parity-scale-codec", "scale-info", @@ -15299,7 +15417,7 @@ version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e17bb3549cc1321ae1296b9cdc2698e2b6cb1992adfa19a8c72e5b7a738f44cd" dependencies = [ - "time 0.3.21", + "time 0.3.22", ] [[package]] @@ -15319,7 +15437,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.15", + "syn 2.0.18", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index f53d14221c..b3cf139cae 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -8,6 +8,28 @@ members = [ "runtime/shibuya", "tests/xcm-simulator", "tests/integration", + + "pallets/*", + "precompiles/*", + + "chain-extensions/dapps-staking", + "chain-extensions/pallet-assets", + "chain-extensions/xvm", + "chain-extensions/types/*", + + "vendor/evm-tracing", + "vendor/primitives/debug", + "vendor/primitives/evm-tracing-events", + "vendor/primitives/txpool", + "vendor/rpc/debug", + "vendor/rpc/trace", + "vendor/rpc/txpool", + "vendor/rpc-core/debug", + "vendor/rpc-core/trace", + "vendor/rpc-core/txpool", + "vendor/rpc-core/types", + "vendor/runtime/evm-tracer", + "vendor/runtime/ext", ] exclude = ["vendor"] @@ -32,6 +54,12 @@ repository = "https://github.com/AstarNetwork/Astar" parity-scale-codec = { version = "3.4.0", default-features = false, features = ["derive"] } scale-info = { version = "2.3.0", default-features = false, features = ["derive"] } log = { version = "0.4.17", default-features = false } +environmental = { version = "1.1.2", default-features = false } +sha3 = { version = "0.10.1", default-features = false } +num_enum = { version = "0.5.3", default-features = false } +num-traits = { version = "0.2", default-features = false } +rand = { version = "0.8.5", default-features = false } +bounded-collections = { version = "0.1.5", default-features = false } # (native) array-bytes = "6.0.0" @@ -44,9 +72,20 @@ serde_json = "1.0.92" tokio = { version = "1.24.2", features = ["macros", "sync"] } url = "2.2.2" jsonrpsee = { version = "0.16.2", features = ["server"] } -hex = "0.4.3" +hex = { version = "0.4.3", features = ["serde"] } hex-literal = "0.4.1" regex = "1.6.0" +rlp = "0.5" +tracing = "0.1.34" +similar-asserts = { version = "1.1.0" } +assert_matches = "1.3.0" +libsecp256k1 = "0.7.0" +impl-trait-for-tuples = "0.2.2" +slices = "0.2.0" +derive_more = { version = "0.99" } +proc-macro2 = "1.0" +quote = "1.0" +syn = { version = "1.0" } # Substrate # (wasm) @@ -64,6 +103,9 @@ sp-runtime-interface = { git = "https://github.com/paritytech/substrate", branch sp-session = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.39", default-features = false } sp-transaction-pool = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.39", default-features = false } sp-version = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.39", default-features = false } +sp-arithmetic = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.39", default-features = false } +sp-staking = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.39", default-features = false } +sp-externalities = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.39", default-features = false } # (native) sp-blockchain = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.39" } @@ -89,6 +131,7 @@ sc-telemetry = { git = "https://github.com/paritytech/substrate", branch = "polk sc-tracing = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.39" } sc-transaction-pool = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.39" } sc-transaction-pool-api = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.39" } +sc-utils = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.39" } substrate-prometheus-endpoint = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.39" } pallet-transaction-payment-rpc = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.39" } substrate-frame-rpc-system = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.39" } @@ -130,6 +173,14 @@ pallet-treasury = { git = "https://github.com/paritytech/substrate", branch = "p pallet-grandpa = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.39", default-features = false } pallet-state-trie-migration = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.39", default-features = false } +# EVM & Ethereum +# (wasm) +evm = { version = "0.37.0", default-features = false } +ethereum-types = { version = "0.14", default-features = false } +ethereum = { version = "0.14.0", default-features = false } +evm-gasometer = { version = "0.37.0", default-features = false } +evm-runtime = { version = "0.37.0", default-features = false } + # Frontier # (wasm) fp-rpc = { git = "https://github.com/AstarNetwork/frontier", branch = "polkadot-v0.9.39", default-features = false } @@ -213,36 +264,55 @@ orml-xcm-support = { git = "https://github.com/open-web3-stack/open-runtime-modu # Astar pallets & modules # (wasm) -pallet-block-reward = { git = "https://github.com/AstarNetwork/astar-frame", branch = "polkadot-v0.9.39", default-features = false } -pallet-collator-selection = { git = "https://github.com/AstarNetwork/astar-frame", branch = "polkadot-v0.9.39", default-features = false } -pallet-custom-signatures = { git = "https://github.com/AstarNetwork/astar-frame", branch = "polkadot-v0.9.39", default-features = false } -pallet-evm-precompile-assets-erc20 = { git = "https://github.com/AstarNetwork/astar-frame", branch = "polkadot-v0.9.39", default-features = false } -pallet-evm-precompile-sr25519 = { git = "https://github.com/AstarNetwork/astar-frame", branch = "polkadot-v0.9.39", default-features = false } -pallet-evm-precompile-substrate-ecdsa = { git = "https://github.com/AstarNetwork/astar-frame", branch = "polkadot-v0.9.39", default-features = false } -pallet-evm-precompile-xcm = { git = "https://github.com/AstarNetwork/astar-frame", branch = "polkadot-v0.9.39", default-features = false } -pallet-evm-precompile-xvm = { git = "https://github.com/AstarNetwork/astar-frame", branch = "polkadot-v0.9.39", default-features = false } -pallet-precompile-dapps-staking = { git = "https://github.com/AstarNetwork/astar-frame", branch = "polkadot-v0.9.39", default-features = false } -pallet-dapps-staking = { git = "https://github.com/AstarNetwork/astar-frame", branch = "polkadot-v0.9.39", default-features = false } -pallet-xc-asset-config = { git = "https://github.com/AstarNetwork/astar-frame", branch = "polkadot-v0.9.39", default-features = false } -xcm-primitives = { git = "https://github.com/AstarNetwork/astar-frame", branch = "polkadot-v0.9.39", default-features = false } -pallet-xvm = { git = "https://github.com/AstarNetwork/astar-frame", branch = "polkadot-v0.9.39", default-features = false } -pallet-xcm = { git = "https://github.com/AstarNetwork/astar-frame", branch = "polkadot-v0.9.39", default-features = false } - -pallet-chain-extension-dapps-staking = { git = "https://github.com/AstarNetwork/astar-frame", branch = "polkadot-v0.9.39", default-features = false } -pallet-chain-extension-xvm = { git = "https://github.com/AstarNetwork/astar-frame", branch = "polkadot-v0.9.39", default-features = false } -pallet-chain-extension-assets = { git = "https://github.com/AstarNetwork/astar-frame", branch = "polkadot-v0.9.39", default-features = false } +pallet-block-reward = { path = "./pallets/block-reward", default-features = false } +pallet-collator-selection = { path = "./pallets/collator-selection", default-features = false } +pallet-custom-signatures = { path = "./pallets/custom-signatures", default-features = false } +pallet-dapps-staking = { path = "./pallets/dapps-staking", default-features = false } +pallet-xc-asset-config = { path = "./pallets/xc-asset-config", default-features = false } +pallet-xvm = { path = "./pallets/pallet-xvm", default-features = false } +pallet-xcm = { path = "./pallets/pallet-xcm", default-features = false } + +xcm-primitives = { path = "./primitives/xcm", default-features = false } + +pallet-evm-precompile-assets-erc20 = { path = "./precompiles/assets-erc20", default-features = false } +pallet-evm-precompile-sr25519 = { path = "./precompiles/sr25519", default-features = false } +pallet-evm-precompile-substrate-ecdsa = { path = "./precompiles/substrate-ecdsa", default-features = false } +pallet-evm-precompile-xcm = { path = "./precompiles/xcm", default-features = false } +pallet-evm-precompile-xvm = { path = "./precompiles/xvm", default-features = false } +pallet-evm-precompile-dapps-staking = { path = "./precompiles/dapps-staking", default-features = false } + +pallet-chain-extension-dapps-staking = { path = "./chain-extensions/dapps-staking", default-features = false } +pallet-chain-extension-xvm = { path = "./chain-extensions/xvm", default-features = false } +pallet-chain-extension-assets = { path = "./chain-extensions/pallet-assets", default-features = false } + +dapps-staking-chain-extension-types = { path = "./chain-extensions/types/dapps-staking", default-features = false } +xvm-chain-extension-types = { path = "./chain-extensions/types/xvm", default-features = false } +assets-chain-extension-types = { path = "./chain-extensions/types/assets", default-features = false } + +precompile-utils = { path = "./precompiles/utils", default-features = false } + +local-runtime = { path = "./runtime/local", default-features = false } +shibuya-runtime = { path = "./runtime/shibuya", default-features = false } +shiden-runtime = { path = "./runtime/shiden", default-features = false } +astar-runtime = { path = "./runtime/astar", default-features = false } ## Moonbeam tracing ## (wasm) -moonbeam-evm-tracer = { git = "https://github.com/AstarNetwork/astar-frame", branch = "polkadot-v0.9.39", default-features = false } -moonbeam-rpc-primitives-debug = { git = "https://github.com/AstarNetwork/astar-frame", branch = "polkadot-v0.9.39", default-features = false } -moonbeam-rpc-primitives-txpool = { git = "https://github.com/AstarNetwork/astar-frame", branch = "polkadot-v0.9.39", default-features = false } +moonbeam-evm-tracer = { path = "./vendor/runtime/evm-tracer", default-features = false } +moonbeam-rpc-primitives-debug = { path = "./vendor/primitives/debug", default-features = false } +moonbeam-rpc-primitives-txpool = { path = "./vendor/primitives/txpool", default-features = false } +evm-tracing-events = { path = "./vendor/primitives/evm-tracing-events", default-features = false } +moonbeam-primitives-ext = { path = "./vendor/runtime/ext", default-features = false } ## (native) -moonbeam-primitives-ext = { git = "https://github.com/AstarNetwork/astar-frame", branch = "polkadot-v0.9.39" } -moonbeam-rpc-debug = { git = "https://github.com/AstarNetwork/astar-frame", branch = "polkadot-v0.9.39" } -moonbeam-rpc-trace = { git = "https://github.com/AstarNetwork/astar-frame", branch = "polkadot-v0.9.39" } -moonbeam-rpc-txpool = { git = "https://github.com/AstarNetwork/astar-frame", branch = "polkadot-v0.9.39" } +moonbeam-rpc-debug = { path = "./vendor/rpc/debug" } +moonbeam-rpc-trace = { path = "./vendor/rpc/trace" } +moonbeam-rpc-txpool = { path = "./vendor/rpc/txpool" } +moonbeam-client-evm-tracing = { path = "./vendor/evm-tracing" } +moonbeam-rpc-core-types = { path = "./vendor/rpc-core/types" } +moonbeam-rpc-core-txpool = { path = "./vendor/rpc-core/txpool" } +moonbeam-rpc-core-trace = { path = "./vendor/rpc-core/trace" } +moonbeam-rpc-core-debug = { path = "./vendor/rpc-core/debug" } # Build deps substrate-wasm-builder = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.39" } diff --git a/bin/collator/Cargo.toml b/bin/collator/Cargo.toml index a432ee01f5..d6a935ff5d 100644 --- a/bin/collator/Cargo.toml +++ b/bin/collator/Cargo.toml @@ -86,12 +86,12 @@ fp-evm = { workspace = true, features = ["std"] } fp-storage = { workspace = true } # astar-specific dependencies -astar-runtime = { path = "../../runtime/astar" } -local-runtime = { path = "../../runtime/local" } -shibuya-runtime = { path = "../../runtime/shibuya" } -shiden-runtime = { path = "../../runtime/shiden" } +astar-runtime = { workspace = true, features = ["std"] } +local-runtime = { workspace = true, features = ["std"] } +shibuya-runtime = { workspace = true, features = ["std"] } +shiden-runtime = { workspace = true, features = ["std"] } -# astar-frame dependencies +# astar pallets dependencies pallet-block-reward = { workspace = true } # frame dependencies diff --git a/chain-extensions/dapps-staking/Cargo.toml b/chain-extensions/dapps-staking/Cargo.toml new file mode 100644 index 0000000000..4169a7bd00 --- /dev/null +++ b/chain-extensions/dapps-staking/Cargo.toml @@ -0,0 +1,43 @@ +[package] +name = "pallet-chain-extension-dapps-staking" +version = "1.1.0" +license = "Apache-2.0" +description = "dApps Staking chain extension for WASM contracts" +authors.workspace = true +edition.workspace = true +homepage.workspace = true +repository.workspace = true + +[dependencies] +frame-support = { workspace = true } +frame-system = { workspace = true } +log = { workspace = true } +num-traits = { workspace = true } +pallet-contracts = { workspace = true } +pallet-contracts-primitives = { workspace = true } +parity-scale-codec = { workspace = true } +scale-info = { workspace = true } +sp-core = { workspace = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } + +# Astar +dapps-staking-chain-extension-types = { workspace = true } +pallet-dapps-staking = { workspace = true } + +[features] +default = ["std"] +std = [ + "parity-scale-codec/std", + "dapps-staking-chain-extension-types/std", + "frame-support/std", + "frame-system/std", + "num-traits/std", + "pallet-contracts/std", + "pallet-contracts-primitives/std", + "pallet-dapps-staking/std", + "scale-info/std", + "sp-std/std", + "sp-core/std", + "sp-runtime/std", +] diff --git a/chain-extensions/dapps-staking/src/lib.rs b/chain-extensions/dapps-staking/src/lib.rs new file mode 100644 index 0000000000..4d5149eb4f --- /dev/null +++ b/chain-extensions/dapps-staking/src/lib.rs @@ -0,0 +1,369 @@ +// This file is part of Astar. + +// Copyright (C) 2019-2023 Stake Technologies Pte.Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later + +// Astar is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Astar is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Astar. If not, see . + +#![cfg_attr(not(feature = "std"), no_std)] +use sp_runtime::{ + traits::{Saturating, Zero}, + DispatchError, +}; + +use dapps_staking_chain_extension_types::{ + DSError, DappsStakingAccountInput, DappsStakingEraInput, DappsStakingNominationInput, + DappsStakingValueInput, +}; +use frame_support::traits::{Currency, Get}; +use frame_system::RawOrigin; +use pallet_contracts::chain_extension::{ + ChainExtension, Environment, Ext, InitState, RetVal, SysConfig, +}; +use pallet_dapps_staking::{RewardDestination, WeightInfo}; +use parity_scale_codec::Encode; +use sp_std::marker::PhantomData; + +type BalanceOf = <::Currency as Currency< + ::AccountId, +>>::Balance; + +enum DappsStakingFunc { + CurrentEra, + UnbondingPeriod, + EraRewards, + EraStaked, + StakedAmount, + StakedAmountOnContract, + ReadContractStake, + BondAndStake, + UnbondAndUnstake, + WithdrawUnbonded, + ClaimStaker, + ClaimDapp, + SetRewardDestination, + NominationTransfer, +} + +impl TryFrom for DappsStakingFunc { + type Error = DispatchError; + + fn try_from(value: u16) -> Result { + match value { + 1 => Ok(DappsStakingFunc::CurrentEra), + 2 => Ok(DappsStakingFunc::UnbondingPeriod), + 3 => Ok(DappsStakingFunc::EraRewards), + 4 => Ok(DappsStakingFunc::EraStaked), + 5 => Ok(DappsStakingFunc::StakedAmount), + 6 => Ok(DappsStakingFunc::StakedAmountOnContract), + 7 => Ok(DappsStakingFunc::ReadContractStake), + 8 => Ok(DappsStakingFunc::BondAndStake), + 9 => Ok(DappsStakingFunc::UnbondAndUnstake), + 10 => Ok(DappsStakingFunc::WithdrawUnbonded), + 11 => Ok(DappsStakingFunc::ClaimStaker), + 12 => Ok(DappsStakingFunc::ClaimDapp), + 13 => Ok(DappsStakingFunc::SetRewardDestination), + 14 => Ok(DappsStakingFunc::NominationTransfer), + _ => Err(DispatchError::Other( + "DappsStakingExtension: Unimplemented func_id", + )), + } + } +} + +/// Dapps Staking chain extension. +pub struct DappsStakingExtension(PhantomData); + +impl Default for DappsStakingExtension { + fn default() -> Self { + DappsStakingExtension(PhantomData) + } +} + +impl ChainExtension for DappsStakingExtension +where + T: pallet_dapps_staking::Config + pallet_contracts::Config, + ::SmartContract: From<[u8; 32]>, + ::AccountId: From<[u8; 32]>, +{ + fn call(&mut self, env: Environment) -> Result + where + E: Ext, + { + let func_id = env.func_id().try_into()?; + let mut env = env.buf_in_buf_out(); + + match func_id { + DappsStakingFunc::CurrentEra => { + let base_weight = ::DbWeight::get().reads(1); + env.charge_weight(base_weight)?; + + let era_index = pallet_dapps_staking::CurrentEra::::get(); + env.write(&era_index.encode(), false, None)?; + } + + DappsStakingFunc::UnbondingPeriod => { + let base_weight = ::DbWeight::get().reads(1); + env.charge_weight(base_weight)?; + + let unbonding_period = T::UnbondingPeriod::get(); + env.write(&unbonding_period.encode(), false, None)?; + } + + DappsStakingFunc::EraRewards => { + let arg: u32 = env.read_as()?; + + let base_weight = ::DbWeight::get().reads(1); + env.charge_weight(base_weight)?; + + let era_info = pallet_dapps_staking::GeneralEraInfo::::get(arg); + let reward = era_info.map_or(Zero::zero(), |r| { + r.rewards.stakers.saturating_add(r.rewards.dapps) + }); + env.write(&reward.encode(), false, None)?; + } + + DappsStakingFunc::EraStaked => { + let arg: u32 = env.read_as()?; + + let base_weight = ::DbWeight::get().reads(1); + env.charge_weight(base_weight)?; + + let era_info = pallet_dapps_staking::GeneralEraInfo::::get(arg); + let staked_amount = era_info.map_or(Zero::zero(), |r| r.staked); + env.write(&staked_amount.encode(), false, None)?; + } + + DappsStakingFunc::StakedAmount => { + let staker: T::AccountId = env.read_as()?; + + let base_weight = ::DbWeight::get().reads(1); + env.charge_weight(base_weight)?; + + let ledger = pallet_dapps_staking::Ledger::::get(&staker); + env.write(&ledger.locked.encode(), false, None)?; + } + + DappsStakingFunc::StakedAmountOnContract => { + let args: DappsStakingAccountInput = env.read_as()?; + let staker: T::AccountId = args.staker.into(); + let contract: ::SmartContract = + args.contract.into(); + + let base_weight = ::DbWeight::get().reads(1); + env.charge_weight(base_weight)?; + + let staking_info = + pallet_dapps_staking::GeneralStakerInfo::::get(&staker, &contract); + let staked_amount = staking_info.latest_staked_value(); + env.write(&staked_amount.encode(), false, None)?; + } + + DappsStakingFunc::ReadContractStake => { + let contract_bytes: [u8; 32] = env.read_as()?; + let contract: ::SmartContract = + contract_bytes.into(); + + let base_weight = ::DbWeight::get().reads(1); + env.charge_weight(base_weight.saturating_add(base_weight))?; + + let current_era = pallet_dapps_staking::CurrentEra::::get(); + let staking_info = + pallet_dapps_staking::Pallet::::contract_stake_info(&contract, current_era) + .unwrap_or_default(); + let total = TryInto::::try_into(staking_info.total).unwrap_or(0); + env.write(&total.encode(), false, None)?; + } + + DappsStakingFunc::BondAndStake => { + let args: DappsStakingValueInput> = env.read_as()?; + let contract = args.contract.into(); + let value: BalanceOf = args.value; + + let base_weight = ::WeightInfo::bond_and_stake(); + env.charge_weight(base_weight)?; + + let caller = env.ext().address().clone(); + let call_result = pallet_dapps_staking::Pallet::::bond_and_stake( + RawOrigin::Signed(caller).into(), + contract, + value, + ); + return match call_result { + Err(e) => { + let mapped_error = DSError::try_from(e.error)?; + Ok(RetVal::Converging(mapped_error as u32)) + } + Ok(_) => Ok(RetVal::Converging(DSError::Success as u32)), + }; + } + + DappsStakingFunc::UnbondAndUnstake => { + let args: DappsStakingValueInput> = env.read_as()?; + let contract = args.contract.into(); + let value: BalanceOf = args.value; + + let base_weight = + ::WeightInfo::unbond_and_unstake(); + env.charge_weight(base_weight)?; + + let caller = env.ext().address().clone(); + let call_result = pallet_dapps_staking::Pallet::::unbond_and_unstake( + RawOrigin::Signed(caller).into(), + contract, + value, + ); + return match call_result { + Err(e) => { + let mapped_error = DSError::try_from(e.error)?; + Ok(RetVal::Converging(mapped_error as u32)) + } + Ok(_) => Ok(RetVal::Converging(DSError::Success as u32)), + }; + } + + DappsStakingFunc::WithdrawUnbonded => { + let caller = env.ext().address().clone(); + + let base_weight = + ::WeightInfo::withdraw_unbonded(); + env.charge_weight(base_weight)?; + + let call_result = pallet_dapps_staking::Pallet::::withdraw_unbonded( + RawOrigin::Signed(caller).into(), + ); + return match call_result { + Err(e) => { + let mapped_error = DSError::try_from(e.error)?; + Ok(RetVal::Converging(mapped_error as u32)) + } + Ok(_) => Ok(RetVal::Converging(DSError::Success as u32)), + }; + } + + DappsStakingFunc::ClaimStaker => { + let contract_bytes: [u8; 32] = env.read_as()?; + let contract = contract_bytes.into(); + + let base_weight = ::WeightInfo::claim_staker_with_restake() + .max(::WeightInfo::claim_staker_without_restake()); + let charged_weight = env.charge_weight(base_weight)?; + + let caller = env.ext().address().clone(); + let call_result = pallet_dapps_staking::Pallet::::claim_staker( + RawOrigin::Signed(caller).into(), + contract, + ); + + let actual_weight = match call_result { + Ok(e) => e.actual_weight, + Err(e) => e.post_info.actual_weight, + }; + if let Some(actual_weight) = actual_weight { + env.adjust_weight(charged_weight, actual_weight); + } + + return match call_result { + Err(e) => { + let mapped_error = DSError::try_from(e.error)?; + Ok(RetVal::Converging(mapped_error as u32)) + } + Ok(_) => Ok(RetVal::Converging(DSError::Success as u32)), + }; + } + + DappsStakingFunc::ClaimDapp => { + let args: DappsStakingEraInput = env.read_as()?; + let contract = args.contract.into(); + let era: u32 = args.era; + + let base_weight = ::WeightInfo::claim_dapp(); + env.charge_weight(base_weight)?; + + let caller = env.ext().address().clone(); + let call_result = pallet_dapps_staking::Pallet::::claim_dapp( + RawOrigin::Signed(caller).into(), + contract, + era, + ); + return match call_result { + Err(e) => { + let mapped_error = DSError::try_from(e.error)?; + Ok(RetVal::Converging(mapped_error as u32)) + } + Ok(_) => Ok(RetVal::Converging(DSError::Success as u32)), + }; + } + + DappsStakingFunc::SetRewardDestination => { + let reward_destination_raw: u8 = env.read_as()?; + + let base_weight = + ::WeightInfo::set_reward_destination(); + env.charge_weight(base_weight)?; + + // Transform raw value into dapps staking enum + let reward_destination = if reward_destination_raw == 0 { + RewardDestination::FreeBalance + } else if reward_destination_raw == 1 { + RewardDestination::StakeBalance + } else { + let error = DSError::RewardDestinationValueOutOfBounds; + return Ok(RetVal::Converging(error as u32)); + }; + + let caller = env.ext().address().clone(); + let call_result = pallet_dapps_staking::Pallet::::set_reward_destination( + RawOrigin::Signed(caller).into(), + reward_destination, + ); + return match call_result { + Err(e) => { + let mapped_error = DSError::try_from(e.error)?; + Ok(RetVal::Converging(mapped_error as u32)) + } + Ok(_) => Ok(RetVal::Converging(DSError::Success as u32)), + }; + } + + DappsStakingFunc::NominationTransfer => { + let args: DappsStakingNominationInput> = env.read_as()?; + let origin_smart_contract = args.origin_contract.into(); + let target_smart_contract = args.target_contract.into(); + let value: BalanceOf = args.value; + + let base_weight = + ::WeightInfo::nomination_transfer(); + env.charge_weight(base_weight)?; + + let caller = env.ext().address().clone(); + let call_result = pallet_dapps_staking::Pallet::::nomination_transfer( + RawOrigin::Signed(caller).into(), + origin_smart_contract, + value, + target_smart_contract, + ); + return match call_result { + Err(e) => { + let mapped_error = DSError::try_from(e.error)?; + Ok(RetVal::Converging(mapped_error as u32)) + } + Ok(_) => Ok(RetVal::Converging(DSError::Success as u32)), + }; + } + } + + Ok(RetVal::Converging(DSError::Success as u32)) + } +} diff --git a/chain-extensions/pallet-assets/Cargo.toml b/chain-extensions/pallet-assets/Cargo.toml new file mode 100644 index 0000000000..94319eef7f --- /dev/null +++ b/chain-extensions/pallet-assets/Cargo.toml @@ -0,0 +1,41 @@ +[package] +name = "pallet-chain-extension-assets" +version = "0.1.0" +license = "Apache-2.0" +description = "Assets chain extension for WASM contracts" +authors.workspace = true +edition.workspace = true +homepage.workspace = true +repository.workspace = true + +[dependencies] +assets-chain-extension-types = { workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +log = { workspace = true } +num-traits = { workspace = true } +pallet-assets = { workspace = true } +pallet-contracts = { workspace = true } +pallet-contracts-primitives = { workspace = true } +parity-scale-codec = { workspace = true } +scale-info = { workspace = true } +sp-core = { workspace = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } + +[features] +default = ["std"] +std = [ + "parity-scale-codec/std", + "frame-support/std", + "frame-system/std", + "num-traits/std", + "pallet-contracts/std", + "pallet-contracts-primitives/std", + "scale-info/std", + "sp-std/std", + "sp-core/std", + "sp-runtime/std", + "pallet-assets/std", + "assets-chain-extension-types/std", +] diff --git a/chain-extensions/pallet-assets/src/lib.rs b/chain-extensions/pallet-assets/src/lib.rs new file mode 100644 index 0000000000..f4ff4695e9 --- /dev/null +++ b/chain-extensions/pallet-assets/src/lib.rs @@ -0,0 +1,415 @@ +// This file is part of Astar. + +// Copyright (C) 2019-2023 Stake Technologies Pte.Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later + +// Astar is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Astar is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Astar. If not, see . + +#![cfg_attr(not(feature = "std"), no_std)] + +pub mod weights; + +use assets_chain_extension_types::{select_origin, Origin, Outcome}; +use frame_support::traits::fungibles::InspectMetadata; +use frame_support::traits::tokens::fungibles::approvals::Inspect; +use frame_system::RawOrigin; +use pallet_assets::WeightInfo; +use pallet_contracts::chain_extension::{ + ChainExtension, Environment, Ext, InitState, RetVal, SysConfig, +}; +use parity_scale_codec::Encode; +use sp_runtime::traits::StaticLookup; +use sp_runtime::DispatchError; +use sp_std::marker::PhantomData; +use sp_std::vec::Vec; + +enum AssetsFunc { + Create, + Transfer, + Mint, + Burn, + BalanceOf, + TotalSupply, + Allowance, + ApproveTransfer, + CancelApproval, + TransferApproved, + SetMetadata, + MetadataName, + MetadataSymbol, + MetadataDecimals, + TransferOwnership, +} + +impl TryFrom for AssetsFunc { + type Error = DispatchError; + + fn try_from(value: u16) -> Result { + match value { + 1 => Ok(AssetsFunc::Create), + 2 => Ok(AssetsFunc::Transfer), + 3 => Ok(AssetsFunc::Mint), + 4 => Ok(AssetsFunc::Burn), + 5 => Ok(AssetsFunc::BalanceOf), + 6 => Ok(AssetsFunc::TotalSupply), + 7 => Ok(AssetsFunc::Allowance), + 8 => Ok(AssetsFunc::ApproveTransfer), + 9 => Ok(AssetsFunc::CancelApproval), + 10 => Ok(AssetsFunc::TransferApproved), + 11 => Ok(AssetsFunc::SetMetadata), + 12 => Ok(AssetsFunc::MetadataName), + 13 => Ok(AssetsFunc::MetadataSymbol), + 14 => Ok(AssetsFunc::MetadataDecimals), + 15 => Ok(AssetsFunc::TransferOwnership), + _ => Err(DispatchError::Other( + "PalletAssetsExtension: Unimplemented func_id", + )), + } + } +} + +/// Pallet Assets chain extension. +pub struct AssetsExtension(PhantomData<(T, W)>); + +impl Default for AssetsExtension { + fn default() -> Self { + AssetsExtension(PhantomData) + } +} + +impl ChainExtension for AssetsExtension +where + T: pallet_assets::Config + pallet_contracts::Config, + <::Lookup as StaticLookup>::Source: From<::AccountId>, + ::AccountId: From<[u8; 32]>, + W: weights::WeightInfo, +{ + fn call(&mut self, env: Environment) -> Result + where + E: Ext, + { + let func_id = env.func_id().try_into()?; + let mut env = env.buf_in_buf_out(); + + match func_id { + AssetsFunc::Create => { + let (origin, id, admin, min_balance): ( + Origin, + ::AssetId, + T::AccountId, + T::Balance, + ) = env.read_as()?; + + let base_weight = ::WeightInfo::create(); + env.charge_weight(base_weight)?; + + let raw_origin = select_origin!(&origin, env.ext().address().clone()); + + let call_result = pallet_assets::Pallet::::create( + raw_origin.into(), + id.into(), + admin.into(), + min_balance, + ); + return match call_result { + Err(e) => { + let mapped_error = Outcome::from(e); + Ok(RetVal::Converging(mapped_error as u32)) + } + Ok(_) => Ok(RetVal::Converging(Outcome::Success as u32)), + }; + } + AssetsFunc::Transfer => { + let (origin, id, target, amount): ( + Origin, + ::AssetId, + T::AccountId, + T::Balance, + ) = env.read_as()?; + + let base_weight = ::WeightInfo::transfer(); + env.charge_weight(base_weight)?; + + let raw_origin = select_origin!(&origin, env.ext().address().clone()); + + let call_result = pallet_assets::Pallet::::transfer( + raw_origin.into(), + id.into(), + target.into(), + amount, + ); + return match call_result { + Err(e) => { + let mapped_error = Outcome::from(e); + Ok(RetVal::Converging(mapped_error as u32)) + } + Ok(_) => Ok(RetVal::Converging(Outcome::Success as u32)), + }; + } + AssetsFunc::Mint => { + let (origin, id, beneficiary, amount): ( + Origin, + ::AssetId, + T::AccountId, + T::Balance, + ) = env.read_as()?; + + let base_weight = ::WeightInfo::mint(); + env.charge_weight(base_weight)?; + + let raw_origin = select_origin!(&origin, env.ext().address().clone()); + + let call_result = pallet_assets::Pallet::::mint( + raw_origin.into(), + id.into(), + beneficiary.into(), + amount, + ); + return match call_result { + Err(e) => { + let mapped_error = Outcome::from(e); + Ok(RetVal::Converging(mapped_error as u32)) + } + Ok(_) => Ok(RetVal::Converging(Outcome::Success as u32)), + }; + } + AssetsFunc::Burn => { + let (origin, id, who, amount): ( + Origin, + ::AssetId, + T::AccountId, + T::Balance, + ) = env.read_as()?; + + let base_weight = ::WeightInfo::burn(); + env.charge_weight(base_weight)?; + + let raw_origin = select_origin!(&origin, env.ext().address().clone()); + + let call_result = pallet_assets::Pallet::::burn( + raw_origin.into(), + id.into(), + who.into(), + amount, + ); + return match call_result { + Err(e) => { + let mapped_error = Outcome::from(e); + Ok(RetVal::Converging(mapped_error as u32)) + } + Ok(_) => Ok(RetVal::Converging(Outcome::Success as u32)), + }; + } + AssetsFunc::BalanceOf => { + let (id, who): (::AssetId, T::AccountId) = + env.read_as()?; + + let base_weight = ::balance_of(); + env.charge_weight(base_weight)?; + + let balance = pallet_assets::Pallet::::balance(id, who); + env.write(&balance.encode(), false, None)?; + } + AssetsFunc::TotalSupply => { + let id: ::AssetId = env.read_as()?; + + let base_weight = ::total_supply(); + env.charge_weight(base_weight)?; + + let total_supply = pallet_assets::Pallet::::total_supply(id); + env.write(&total_supply.encode(), false, None)?; + } + AssetsFunc::Allowance => { + let (id, owner, delegate): ( + ::AssetId, + T::AccountId, + T::AccountId, + ) = env.read_as()?; + + let base_weight = ::allowance(); + env.charge_weight(base_weight)?; + + let allowance = pallet_assets::Pallet::::allowance(id, &owner, &delegate); + env.write(&allowance.encode(), false, None)?; + } + AssetsFunc::ApproveTransfer => { + let (origin, id, delegate, amount): ( + Origin, + ::AssetId, + T::AccountId, + T::Balance, + ) = env.read_as()?; + + let base_weight = ::WeightInfo::approve_transfer(); + env.charge_weight(base_weight)?; + + let raw_origin = select_origin!(&origin, env.ext().address().clone()); + + let call_result = pallet_assets::Pallet::::approve_transfer( + raw_origin.into(), + id.into(), + delegate.into(), + amount, + ); + return match call_result { + Err(e) => { + let mapped_error = Outcome::from(e); + Ok(RetVal::Converging(mapped_error as u32)) + } + Ok(_) => Ok(RetVal::Converging(Outcome::Success as u32)), + }; + } + AssetsFunc::CancelApproval => { + let (origin, id, delegate): ( + Origin, + ::AssetId, + T::AccountId, + ) = env.read_as()?; + + let base_weight = ::WeightInfo::cancel_approval(); + env.charge_weight(base_weight)?; + + let raw_origin = select_origin!(&origin, env.ext().address().clone()); + + let call_result = pallet_assets::Pallet::::cancel_approval( + raw_origin.into(), + id.into(), + delegate.into(), + ); + return match call_result { + Err(e) => { + let mapped_error = Outcome::from(e); + Ok(RetVal::Converging(mapped_error as u32)) + } + Ok(_) => Ok(RetVal::Converging(Outcome::Success as u32)), + }; + } + AssetsFunc::TransferApproved => { + let (origin, id, owner, destination, amount): ( + Origin, + ::AssetId, + T::AccountId, + T::AccountId, + T::Balance, + ) = env.read_as()?; + + let base_weight = ::WeightInfo::transfer_approved(); + env.charge_weight(base_weight)?; + + let raw_origin = select_origin!(&origin, env.ext().address().clone()); + + let call_result = pallet_assets::Pallet::::transfer_approved( + raw_origin.into(), + id.into(), + owner.into(), + destination.into(), + amount, + ); + return match call_result { + Err(e) => { + let mapped_error = Outcome::from(e); + Ok(RetVal::Converging(mapped_error as u32)) + } + Ok(_) => Ok(RetVal::Converging(Outcome::Success as u32)), + }; + } + AssetsFunc::SetMetadata => { + let (origin, id, name, symbol, decimals): ( + Origin, + ::AssetId, + Vec, + Vec, + u8, + ) = env.read_as_unbounded(env.in_len())?; + + let base_weight = ::WeightInfo::set_metadata( + name.len() as u32, + symbol.len() as u32, + ); + env.charge_weight(base_weight)?; + + let raw_origin = select_origin!(&origin, env.ext().address().clone()); + + let call_result = pallet_assets::Pallet::::set_metadata( + raw_origin.into(), + id.into(), + name, + symbol, + decimals, + ); + return match call_result { + Err(e) => { + let mapped_error = Outcome::from(e); + Ok(RetVal::Converging(mapped_error as u32)) + } + Ok(_) => Ok(RetVal::Converging(Outcome::Success as u32)), + }; + } + AssetsFunc::MetadataName => { + let id: ::AssetId = env.read_as()?; + + let base_weight = ::metadata_name(); + env.charge_weight(base_weight)?; + + let name = pallet_assets::Pallet::::name(&id); + env.write(&name.encode(), false, None)?; + } + AssetsFunc::MetadataSymbol => { + let id: ::AssetId = env.read_as()?; + + let base_weight = ::metadata_symbol(); + env.charge_weight(base_weight)?; + + let symbol = pallet_assets::Pallet::::symbol(&id); + env.write(&symbol.encode(), false, None)?; + } + AssetsFunc::MetadataDecimals => { + let id: ::AssetId = env.read_as()?; + + let base_weight = ::metadata_decimals(); + env.charge_weight(base_weight)?; + + let decimals = pallet_assets::Pallet::::decimals(&id); + env.write(&decimals.encode(), false, None)?; + } + AssetsFunc::TransferOwnership => { + let (origin, id, owner): ( + Origin, + ::AssetId, + T::AccountId, + ) = env.read_as()?; + + let base_weight = ::WeightInfo::transfer_ownership(); + env.charge_weight(base_weight)?; + + let raw_origin = select_origin!(&origin, env.ext().address().clone()); + + let call_result = pallet_assets::Pallet::::transfer_ownership( + raw_origin.into(), + id.into(), + owner.into(), + ); + return match call_result { + Err(e) => { + let mapped_error = Outcome::from(e); + Ok(RetVal::Converging(mapped_error as u32)) + } + Ok(_) => Ok(RetVal::Converging(Outcome::Success as u32)), + }; + } + } + + Ok(RetVal::Converging(Outcome::Success as u32)) + } +} diff --git a/chain-extensions/pallet-assets/src/weights.rs b/chain-extensions/pallet-assets/src/weights.rs new file mode 100644 index 0000000000..3b2913684a --- /dev/null +++ b/chain-extensions/pallet-assets/src/weights.rs @@ -0,0 +1,62 @@ +// This file is part of Astar. + +// Copyright (C) 2019-2023 Stake Technologies Pte.Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later + +// Astar is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Astar is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Astar. If not, see . + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] + +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use sp_std::marker::PhantomData; + +/// Weight functions needed for pallet-assets chain-extension. +pub trait WeightInfo { + fn balance_of() -> Weight; + fn total_supply() -> Weight; + fn allowance() -> Weight; + fn metadata_name() -> Weight; + fn metadata_symbol() -> Weight; + fn metadata_decimals() -> Weight; +} + +/// Weights for pallet-assets chain-extension +pub struct SubstrateWeight(PhantomData); +impl WeightInfo for SubstrateWeight { + fn balance_of() -> Weight { + T::DbWeight::get().reads(1 as u64) + } + + fn total_supply() -> Weight { + T::DbWeight::get().reads(1 as u64) + } + + fn allowance() -> Weight { + T::DbWeight::get().reads(1 as u64) + } + + fn metadata_name() -> Weight { + T::DbWeight::get().reads(1 as u64) + } + + fn metadata_symbol() -> Weight { + T::DbWeight::get().reads(1 as u64) + } + + fn metadata_decimals() -> Weight { + T::DbWeight::get().reads(1 as u64) + } +} diff --git a/chain-extensions/types/assets/Cargo.toml b/chain-extensions/types/assets/Cargo.toml new file mode 100644 index 0000000000..af2c61ada9 --- /dev/null +++ b/chain-extensions/types/assets/Cargo.toml @@ -0,0 +1,27 @@ +[package] +name = "assets-chain-extension-types" +version = "0.1.0" +license = "Apache-2.0" +description = "Types definitions for assets chain-extension" +authors.workspace = true +edition.workspace = true +homepage.workspace = true +repository.workspace = true + +[dependencies] +parity-scale-codec = { workspace = true } +scale-info = { workspace = true } + +frame-system = { workspace = true } +pallet-contracts = { workspace = true } +sp-runtime = { workspace = true } + +[features] +default = ["std"] +std = [ + "scale-info/std", + "parity-scale-codec/std", + "pallet-contracts/std", + "sp-runtime/std", + "frame-system/std", +] diff --git a/chain-extensions/types/assets/src/lib.rs b/chain-extensions/types/assets/src/lib.rs new file mode 100644 index 0000000000..5ca4b8bfbb --- /dev/null +++ b/chain-extensions/types/assets/src/lib.rs @@ -0,0 +1,128 @@ +// This file is part of Astar. + +// Copyright (C) 2019-2023 Stake Technologies Pte.Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later + +// Astar is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Astar is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Astar. If not, see . + +#![cfg_attr(not(feature = "std"), no_std)] +use parity_scale_codec::MaxEncodedLen; +use parity_scale_codec::{Decode, Encode}; +use sp_runtime::{DispatchError, ModuleError}; + +#[derive(PartialEq, Eq, Copy, Clone, Encode, Decode, Debug)] +#[cfg_attr(feature = "std", derive(scale_info::TypeInfo))] +pub enum Outcome { + /// Success + Success = 0, + /// Account balance must be greater than or equal to the transfer amount. + BalanceLow = 1, + /// The account to alter does not exist. + NoAccount = 2, + /// The signing account has no permission to do the operation. + NoPermission = 3, + /// The given asset ID is unknown. + Unknown = 4, + /// The origin account is frozen. + Frozen = 5, + /// The asset ID is already taken. + InUse = 6, + /// Invalid witness data given. + BadWitness = 7, + /// Minimum balance should be non-zero. + MinBalanceZero = 8, + /// Unable to increment the consumer reference counters on the account. Either no provider + /// reference exists to allow a non-zero balance of a non-self-sufficient asset, or the + /// maximum number of consumers has been reached. + NoProvider = 9, + /// Invalid metadata given. + BadMetadata = 10, + /// No approval exists that would allow the transfer. + Unapproved = 11, + /// The source account would not survive the transfer and it needs to stay alive. + WouldDie = 12, + /// The asset-account already exists. + AlreadyExists = 13, + /// The asset-account doesn't have an associated deposit. + NoDeposit = 14, + /// The operation would result in funds being burned. + WouldBurn = 15, + /// The asset is a live asset and is actively being used. Usually emit for operations such + /// as `start_destroy` which require the asset to be in a destroying state. + LiveAsset = 16, + /// The asset is not live, and likely being destroyed. + AssetNotLive = 17, + /// The asset status is not the expected status. + IncorrectStatus = 18, + /// The asset should be frozen before the given operation. + NotFrozen = 19, + /// Origin Caller is not supported + OriginCannotBeCaller = 98, + /// Unknown error + RuntimeError = 99, +} + +impl From for Outcome { + fn from(input: DispatchError) -> Self { + let error_text = match input { + DispatchError::Module(ModuleError { message, .. }) => message, + _ => Some("No module error Info"), + }; + return match error_text { + Some("BalanceLow") => Outcome::BalanceLow, + Some("NoAccount") => Outcome::NoAccount, + Some("NoPermission") => Outcome::NoPermission, + Some("Unknown") => Outcome::Unknown, + Some("Frozen") => Outcome::Frozen, + Some("InUse") => Outcome::InUse, + Some("BadWitness") => Outcome::BadWitness, + Some("MinBalanceZero") => Outcome::MinBalanceZero, + Some("NoProvider") => Outcome::NoProvider, + Some("BadMetadata") => Outcome::BadMetadata, + Some("Unapproved") => Outcome::Unapproved, + Some("WouldDie") => Outcome::WouldDie, + Some("AlreadyExists") => Outcome::AlreadyExists, + Some("NoDeposit") => Outcome::NoDeposit, + Some("WouldBurn") => Outcome::WouldBurn, + Some("LiveAsset") => Outcome::LiveAsset, + Some("AssetNotLive") => Outcome::AssetNotLive, + Some("IncorrectStatus") => Outcome::IncorrectStatus, + Some("NotFrozen") => Outcome::NotFrozen, + _ => Outcome::RuntimeError, + }; + } +} + +#[derive(Debug, Copy, Clone, PartialEq, Eq, Encode, Decode, MaxEncodedLen)] +#[cfg_attr(feature = "std", derive(scale_info::TypeInfo))] +pub enum Origin { + Caller, + Address, +} + +impl Default for Origin { + fn default() -> Self { + Self::Address + } +} + +#[macro_export] +macro_rules! select_origin { + ($origin:expr, $account:expr) => { + match $origin { + Origin::Caller => return Ok(RetVal::Converging(Outcome::OriginCannotBeCaller as u32)), + Origin::Address => RawOrigin::Signed($account), + } + }; +} diff --git a/chain-extensions/types/dapps-staking/Cargo.toml b/chain-extensions/types/dapps-staking/Cargo.toml new file mode 100644 index 0000000000..fcd45a0034 --- /dev/null +++ b/chain-extensions/types/dapps-staking/Cargo.toml @@ -0,0 +1,26 @@ +[package] +name = "dapps-staking-chain-extension-types" +version = "1.1.0" +license = "Apache-2.0" +description = "Types definitions for dapps-staking chain-extension" +authors.workspace = true +edition.workspace = true +homepage.workspace = true +repository.workspace = true + +[dependencies] +frame-support = { workspace = true } +parity-scale-codec = { workspace = true } +scale-info = { workspace = true } +sp-core = { workspace = true } +sp-runtime = { workspace = true } + +[features] +default = ["std"] +std = [ + "parity-scale-codec/std", + "frame-support/std", + "scale-info/std", + "sp-core/std", + "sp-runtime/std", +] diff --git a/chain-extensions/types/dapps-staking/src/lib.rs b/chain-extensions/types/dapps-staking/src/lib.rs new file mode 100644 index 0000000000..585a15213c --- /dev/null +++ b/chain-extensions/types/dapps-staking/src/lib.rs @@ -0,0 +1,154 @@ +// This file is part of Astar. + +// Copyright (C) 2019-2023 Stake Technologies Pte.Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later + +// Astar is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Astar is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Astar. If not, see . + +#![cfg_attr(not(feature = "std"), no_std)] +use frame_support::pallet_prelude::MaxEncodedLen; +use parity_scale_codec::{Decode, Encode}; +use sp_runtime::{DispatchError, ModuleError}; + +#[cfg_attr(feature = "std", derive(scale_info::TypeInfo))] +#[derive(PartialEq, Eq, Copy, Clone, Encode, Decode, Debug)] +pub enum DSError { + /// Success + Success = 0, + /// Disabled + Disabled = 1, + /// No change in maintenance mode + NoMaintenanceModeChange = 2, + /// Upgrade is too heavy, reduce the weight parameter. + UpgradeTooHeavy = 3, + /// Can not stake with zero value. + StakingWithNoValue = 4, + /// Can not stake with value less than minimum staking value + InsufficientValue = 5, + /// Number of stakers per contract exceeded. + MaxNumberOfStakersExceeded = 6, + /// Targets must be operated contracts + NotOperatedContract = 7, + /// Contract isn't staked. + NotStakedContract = 8, + /// Contract isn't unregistered. + NotUnregisteredContract = 9, + /// Unclaimed rewards should be claimed before withdrawing stake. + UnclaimedRewardsRemaining = 10, + /// Unstaking a contract with zero value + UnstakingWithNoValue = 11, + /// There are no previously unbonded funds that can be unstaked and withdrawn. + NothingToWithdraw = 12, + /// The contract is already registered by other account + AlreadyRegisteredContract = 13, + /// User attempts to register with address which is not contract + ContractIsNotValid = 14, + /// This account was already used to register contract + AlreadyUsedDeveloperAccount = 15, + /// Smart contract not owned by the account id. + NotOwnedContract = 16, + /// Report issue on github if this is ever emitted + UnknownEraReward = 17, + /// Report issue on github if this is ever emitted + UnexpectedStakeInfoEra = 18, + /// Contract has too many unlocking chunks. Withdraw the existing chunks if possible + /// or wait for current chunks to complete unlocking process to withdraw them. + TooManyUnlockingChunks = 19, + /// Contract already claimed in this era and reward is distributed + AlreadyClaimedInThisEra = 20, + /// Era parameter is out of bounds + EraOutOfBounds = 21, + /// Too many active `EraStake` values for (staker, contract) pairing. + /// Claim existing rewards to fix this problem. + TooManyEraStakeValues = 22, + /// To register a contract, pre-approval is needed for this address + RequiredContractPreApproval = 23, + /// Developer's account is already part of pre-approved list + AlreadyPreApprovedDeveloper = 24, + /// Account is not actively staking + NotActiveStaker = 25, + /// Transfering nomination to the same contract + NominationTransferToSameContract = 26, + /// Unexpected reward destination value + RewardDestinationValueOutOfBounds = 27, + /// Unknown error + UnknownError = 99, +} + +impl TryFrom for DSError { + type Error = DispatchError; + + fn try_from(input: DispatchError) -> Result { + let error_text = match input { + DispatchError::Module(ModuleError { message, .. }) => message, + _ => Some("No module error Info"), + }; + return match error_text { + Some("Disabled") => Ok(DSError::Disabled), + Some("NoMaintenanceModeChange") => Ok(DSError::NoMaintenanceModeChange), + Some("UpgradeTooHeavy") => Ok(DSError::UpgradeTooHeavy), + Some("StakingWithNoValue") => Ok(DSError::StakingWithNoValue), + Some("InsufficientValue") => Ok(DSError::InsufficientValue), + Some("MaxNumberOfStakersExceeded") => Ok(DSError::MaxNumberOfStakersExceeded), + Some("NotOperatedContract") => Ok(DSError::NotOperatedContract), + Some("NotStakedContract") => Ok(DSError::NotStakedContract), + Some("NotUnregisteredContract") => Ok(DSError::NotUnregisteredContract), + Some("UnclaimedRewardsRemaining") => Ok(DSError::UnclaimedRewardsRemaining), + Some("UnstakingWithNoValue") => Ok(DSError::UnstakingWithNoValue), + Some("NothingToWithdraw") => Ok(DSError::NothingToWithdraw), + Some("AlreadyRegisteredContract") => Ok(DSError::AlreadyRegisteredContract), + Some("ContractIsNotValid") => Ok(DSError::ContractIsNotValid), + Some("AlreadyUsedDeveloperAccount") => Ok(DSError::AlreadyUsedDeveloperAccount), + Some("NotOwnedContract") => Ok(DSError::NotOwnedContract), + Some("UnknownEraReward") => Ok(DSError::UnknownEraReward), + Some("UnexpectedStakeInfoEra") => Ok(DSError::UnexpectedStakeInfoEra), + Some("TooManyUnlockingChunks") => Ok(DSError::TooManyUnlockingChunks), + Some("AlreadyClaimedInThisEra") => Ok(DSError::AlreadyClaimedInThisEra), + Some("EraOutOfBounds") => Ok(DSError::EraOutOfBounds), + Some("TooManyEraStakeValues") => Ok(DSError::TooManyEraStakeValues), + Some("RequiredContractPreApproval") => Ok(DSError::RequiredContractPreApproval), + Some("AlreadyPreApprovedDeveloper") => Ok(DSError::AlreadyPreApprovedDeveloper), + Some("NotActiveStaker") => Ok(DSError::NotActiveStaker), + Some("NominationTransferToSameContract") => { + Ok(DSError::NominationTransferToSameContract) + } + _ => Ok(DSError::UnknownError), + }; + } +} + +#[derive(Debug, Copy, Clone, PartialEq, Eq, Encode, Decode, MaxEncodedLen)] +pub struct DappsStakingValueInput { + pub contract: [u8; 32], + pub value: Balance, +} + +#[derive(Debug, Copy, Clone, PartialEq, Eq, Encode, Decode, MaxEncodedLen)] +pub struct DappsStakingAccountInput { + pub contract: [u8; 32], + pub staker: [u8; 32], +} + +#[derive(Debug, Copy, Clone, PartialEq, Eq, Encode, Decode, MaxEncodedLen)] +pub struct DappsStakingEraInput { + pub contract: [u8; 32], + pub era: u32, +} + +#[derive(Debug, Copy, Clone, PartialEq, Eq, Encode, Decode, MaxEncodedLen)] +pub struct DappsStakingNominationInput { + pub origin_contract: [u8; 32], + pub target_contract: [u8; 32], + pub value: Balance, +} diff --git a/chain-extensions/types/xvm/Cargo.toml b/chain-extensions/types/xvm/Cargo.toml new file mode 100644 index 0000000000..dd9129cc2b --- /dev/null +++ b/chain-extensions/types/xvm/Cargo.toml @@ -0,0 +1,24 @@ +[package] +name = "xvm-chain-extension-types" +version = "0.1.0" +license = "Apache-2.0" +description = "Types definitions for contracts using xvm chain-extension." +authors.workspace = true +edition.workspace = true +homepage.workspace = true +repository.workspace = true + +[dependencies] +parity-scale-codec = { workspace = true } +scale-info = { workspace = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } + +[features] +default = ["std"] +std = [ + "parity-scale-codec/std", + "scale-info/std", + "sp-runtime/std", + "sp-std/std", +] diff --git a/chain-extensions/types/xvm/src/lib.rs b/chain-extensions/types/xvm/src/lib.rs new file mode 100644 index 0000000000..76443104fd --- /dev/null +++ b/chain-extensions/types/xvm/src/lib.rs @@ -0,0 +1,60 @@ +// This file is part of Astar. + +// Copyright (C) 2019-2023 Stake Technologies Pte.Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later + +// Astar is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Astar is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Astar. If not, see . + +#![cfg_attr(not(feature = "std"), no_std)] + +use parity_scale_codec::{Decode, Encode}; +use sp_runtime::{DispatchError, ModuleError}; +use sp_std::vec::Vec; + +#[cfg_attr(feature = "std", derive(scale_info::TypeInfo))] +#[derive(PartialEq, Eq, Copy, Clone, Encode, Decode, Debug)] +pub enum XvmExecutionResult { + /// Success + Success = 0, + // TODO: expand this with concrete XVM errors + /// Error not (yet) covered by a dedidacted code + UnknownError = 255, +} + +impl TryFrom for XvmExecutionResult { + type Error = DispatchError; + + fn try_from(input: DispatchError) -> Result { + let _error_text = match input { + DispatchError::Module(ModuleError { message, .. }) => message, + _ => Some("No module error Info"), + }; + + // TODO: expand this with concrete XVM errors (see dapps-staking types for example) + Ok(XvmExecutionResult::UnknownError) + } +} + +#[derive(Clone, PartialEq, Eq, Encode, Decode, Debug)] +pub struct XvmCallArgs { + /// virtual machine identifier + pub vm_id: u8, + /// Call destination (e.g. address) + pub to: Vec, + /// Encoded call params + pub input: Vec, +} + +pub const FRONTIER_VM_ID: u8 = 0x0F; +pub const PARITY_WASM_VM_ID: u8 = 0x1F; diff --git a/chain-extensions/xvm/Cargo.toml b/chain-extensions/xvm/Cargo.toml new file mode 100644 index 0000000000..ae661fa79d --- /dev/null +++ b/chain-extensions/xvm/Cargo.toml @@ -0,0 +1,43 @@ +[package] +name = "pallet-chain-extension-xvm" +version = "0.1.0" +license = "Apache-2.0" +description = "Chain extension for XVM" +authors.workspace = true +edition.workspace = true +homepage.workspace = true +repository.workspace = true + +[dependencies] +frame-support = { workspace = true } +frame-system = { workspace = true } +log = { workspace = true } +num-traits = { workspace = true } +pallet-contracts = { workspace = true } +pallet-contracts-primitives = { workspace = true } +parity-scale-codec = { workspace = true } +scale-info = { workspace = true } +sp-core = { workspace = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } + +# Astar +pallet-xvm = { workspace = true } +xvm-chain-extension-types = { workspace = true } + +[features] +default = ["std"] +std = [ + "parity-scale-codec/std", + "frame-support/std", + "frame-system/std", + "num-traits/std", + "pallet-contracts/std", + "pallet-contracts-primitives/std", + "scale-info/std", + "sp-std/std", + "sp-core/std", + "sp-runtime/std", + # Astar + "pallet-xvm/std", +] diff --git a/chain-extensions/xvm/src/lib.rs b/chain-extensions/xvm/src/lib.rs new file mode 100644 index 0000000000..851271c187 --- /dev/null +++ b/chain-extensions/xvm/src/lib.rs @@ -0,0 +1,120 @@ +// This file is part of Astar. + +// Copyright (C) 2019-2023 Stake Technologies Pte.Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later + +// Astar is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Astar is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Astar. If not, see . + +#![cfg_attr(not(feature = "std"), no_std)] + +use frame_support::dispatch::Encode; +use frame_support::weights::Weight; +use pallet_contracts::chain_extension::{ChainExtension, Environment, Ext, InitState, RetVal}; +use pallet_xvm::XvmContext; +use sp_runtime::DispatchError; +use sp_std::marker::PhantomData; +use xvm_chain_extension_types::{XvmCallArgs, XvmExecutionResult}; + +enum XvmFuncId { + XvmCall, + // TODO: expand with other calls too +} + +impl TryFrom for XvmFuncId { + type Error = DispatchError; + + fn try_from(value: u16) -> Result { + match value { + 1 => Ok(XvmFuncId::XvmCall), + _ => Err(DispatchError::Other( + "Unsupported func id in Xvm chain extension", + )), + } + } +} + +/// XVM chain extension. +pub struct XvmExtension(PhantomData); + +impl Default for XvmExtension { + fn default() -> Self { + XvmExtension(PhantomData) + } +} + +impl ChainExtension for XvmExtension +where + T: pallet_contracts::Config + pallet_xvm::Config, +{ + fn call(&mut self, env: Environment) -> Result + where + E: Ext, + { + let func_id = env.func_id().try_into()?; + let mut env = env.buf_in_buf_out(); + + match func_id { + XvmFuncId::XvmCall => { + // We need to immediately charge for the worst case scenario. Gas equals Weight in pallet-contracts context. + let remaining_weight = env.ext().gas_meter().gas_left(); + // We don't track used proof size, so we can't refund after. + // So we will charge a 32KB dummy value as a temporary replacement. + let charged_weight = + env.charge_weight(remaining_weight.set_proof_size(32 * 1024))?; + + let caller = env.ext().caller().clone(); + + let XvmCallArgs { vm_id, to, input } = env.read_as_unbounded(env.in_len())?; + + let _origin_address = env.ext().address().clone(); + let _value = env.ext().value_transferred(); + let xvm_context = XvmContext { + id: vm_id, + max_weight: remaining_weight, + env: None, + }; + + let call_result = + pallet_xvm::Pallet::::xvm_bare_call(xvm_context, caller, to, input); + + let actual_weight = pallet_xvm::consumed_weight(&call_result); + // TODO: implement proof of size refund. + env.adjust_weight(charged_weight, Weight::from_ref_time(actual_weight)); + + match call_result { + Ok(success) => { + log::trace!( + target: "xvm-extension::xvm_call", + "success: {:?}", success + ); + + let buffer: sp_std::vec::Vec<_> = success.output().encode(); + env.write(&buffer, false, None)?; + Ok(RetVal::Converging(XvmExecutionResult::Success as u32)) + } + + Err(failure) => { + log::trace!( + target: "xvm-extension::xvm_call", + "failure: {:?}", failure + ); + + // TODO Propagate error + Ok(RetVal::Converging(XvmExecutionResult::UnknownError as u32)) + } + } + } + } + } +} diff --git a/pallets/block-reward/Cargo.toml b/pallets/block-reward/Cargo.toml new file mode 100644 index 0000000000..4520a1afff --- /dev/null +++ b/pallets/block-reward/Cargo.toml @@ -0,0 +1,48 @@ +[package] +name = "pallet-block-reward" +version = "2.2.2" +license = "Apache-2.0" +description = "FRAME pallet for managing block reward issuance & distribution" +authors.workspace = true +edition.workspace = true +homepage.workspace = true +repository.workspace = true + +[dependencies] +parity-scale-codec = { workspace = true } +serde = { workspace = true } + +frame-support = { workspace = true } +frame-system = { workspace = true } +scale-info = { workspace = true } +sp-arithmetic = { workspace = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } + +frame-benchmarking = { workspace = true, optional = true } + +[dev-dependencies] +pallet-balances = { workspace = true } +pallet-timestamp = { workspace = true } +sp-core = { workspace = true } + +[features] +default = ["std"] +std = [ + "parity-scale-codec/std", + "sp-core/std", + "scale-info/std", + "sp-std/std", + "serde/std", + "frame-support/std", + "frame-system/std", + "pallet-timestamp/std", + "pallet-balances/std", +] +runtime-benchmarks = [ + "frame-benchmarking", + "frame-support/runtime-benchmarks", + "frame-system/runtime-benchmarks", + "sp-runtime/runtime-benchmarks", +] +try-runtime = ["frame-support/try-runtime"] diff --git a/pallets/block-reward/src/benchmarking.rs b/pallets/block-reward/src/benchmarking.rs new file mode 100644 index 0000000000..5f1a9c1a47 --- /dev/null +++ b/pallets/block-reward/src/benchmarking.rs @@ -0,0 +1,56 @@ +// This file is part of Astar. + +// Copyright (C) 2019-2023 Stake Technologies Pte.Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later + +// Astar is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Astar is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Astar. If not, see . + +#![cfg(feature = "runtime-benchmarks")] + +use super::*; + +use frame_benchmarking::{benchmarks, impl_benchmark_test_suite}; +use frame_system::{Pallet as System, RawOrigin}; + +/// Assert that the last event equals the provided one. +fn assert_last_event(generic_event: ::RuntimeEvent) { + System::::assert_last_event(generic_event.into()); +} + +benchmarks! { + + set_configuration { + let reward_config = RewardDistributionConfig::default(); + assert!(reward_config.is_consistent()); + }: _(RawOrigin::Root, reward_config.clone()) + verify { + assert_last_event::(Event::::DistributionConfigurationChanged(reward_config).into()); + } +} + +#[cfg(test)] +mod tests { + use crate::mock; + use frame_support::sp_io::TestExternalities; + + pub fn new_test_ext() -> TestExternalities { + mock::ExternalityBuilder::build() + } +} + +impl_benchmark_test_suite!( + Pallet, + crate::benchmarking::tests::new_test_ext(), + crate::mock::TestRuntime, +); diff --git a/pallets/block-reward/src/lib.rs b/pallets/block-reward/src/lib.rs new file mode 100644 index 0000000000..dee213e114 --- /dev/null +++ b/pallets/block-reward/src/lib.rs @@ -0,0 +1,350 @@ +// This file is part of Astar. + +// Copyright (C) 2019-2023 Stake Technologies Pte.Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later + +// Astar is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Astar is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Astar. If not, see . + +//! # Block Reward Distribution Pallet +//! +//! - [`Config`] +//! +//! ## Overview +//! +//! Pallet that implements block reward issuance and distribution mechanics. +//! +//! After issuing a block reward, pallet will calculate how to distribute the reward +//! based on configurable parameters and chain state. +//! +//! Major on-chain factors which can influence reward distribution are total issuance and total value locked by dapps staking. +//! +//! ## Interface +//! +//! ### Dispatchable Function +//! +//! - `set_configuration` - used to change reward distribution configuration parameters +//! +//! ### Other +//! +//! - `on_timestamp_set` - This pallet implements the `OnTimestampSet` trait to handle block production. +//! Note: We assume that it's impossible to set timestamp two times in a block. +//! +//! ## Usage +//! +//! 1. Pallet should be set as a handler of `OnTimestampSet`. +//! 2. `DappsStakingTvlProvider` handler should be defined as an impl of `TvlProvider` trait. For example: +//! ```nocompile +//! pub struct TvlProvider(); +//! impl Get for TvlProvider { +//! fn tvl() -> Balance { +//! DappsStaking::total_locked_value() +//! } +//! } +//! ``` +//! 3. `BeneficiaryPayout` handler should be defined as an impl of `BeneficiaryPayout` trait. For example: +//! ```nocompile +//! pub struct BeneficiaryPayout(); +//! impl BeneficiaryPayout> for BeneficiaryPayout { +//! +//! fn treasury(reward: NegativeImbalanceOf) { +//! Balances::resolve_creating(&TREASURY_POT.into_account(), reward); +//! } +//! +//! fn collators(reward: NegativeImbalanceOf) { +//! Balances::resolve_creating(&COLLATOR_POT.into_account(), reward); +//! } +//! +//! fn dapps_staking(stakers: NegativeImbalanceOf, dapps: NegativeImbalanceOf) { +//! DappsStaking::rewards(stakers, dapps); +//! } +//! } +//! ``` +//! 4. Set `RewardAmount` to desired block reward value in native currency. +//! + +#![cfg_attr(not(feature = "std"), no_std)] + +pub use pallet::*; + +use frame_support::pallet_prelude::*; +use frame_support::{ + log, + traits::{Currency, Get, Imbalance, OnTimestampSet}, +}; +use frame_system::{ensure_root, pallet_prelude::*}; +use sp_runtime::{ + traits::{CheckedAdd, Zero}, + Perbill, +}; +use sp_std::vec; + +#[cfg(any(feature = "runtime-benchmarks"))] +pub mod benchmarking; +#[cfg(test)] +mod mock; +#[cfg(test)] +mod tests; + +pub mod weights; +pub use weights::WeightInfo; + +#[frame_support::pallet] +pub mod pallet { + + use super::*; + + #[pallet::pallet] + pub struct Pallet(PhantomData); + + /// The balance type of this pallet. + pub(crate) type BalanceOf = + <::Currency as Currency<::AccountId>>::Balance; + + // Negative imbalance type of this pallet. + pub(crate) type NegativeImbalanceOf = <::Currency as Currency< + ::AccountId, + >>::NegativeImbalance; + + #[pallet::config] + pub trait Config: frame_system::Config { + /// The currency trait. + type Currency: Currency; + + /// Provides information about how much value is locked by dapps staking + type DappsStakingTvlProvider: Get>; + + /// Used to payout rewards + type BeneficiaryPayout: BeneficiaryPayout>; + + /// The amount of issuance for each block. + #[pallet::constant] + type RewardAmount: Get>; + + /// The overarching event type. + type RuntimeEvent: From> + IsType<::RuntimeEvent>; + + /// Weight information for extrinsics in this pallet. + type WeightInfo: WeightInfo; + } + + #[pallet::storage] + #[pallet::getter(fn reward_config)] + pub type RewardDistributionConfigStorage = + StorageValue<_, RewardDistributionConfig, ValueQuery>; + + #[pallet::event] + #[pallet::generate_deposit(pub(crate) fn deposit_event)] + pub enum Event { + /// Distribution configuration has been updated. + DistributionConfigurationChanged(RewardDistributionConfig), + } + + #[pallet::error] + pub enum Error { + /// Sum of all rations must be one whole (100%) + InvalidDistributionConfiguration, + } + + #[pallet::genesis_config] + #[cfg_attr(feature = "std", derive(Default))] + pub struct GenesisConfig { + pub reward_config: RewardDistributionConfig, + } + + #[pallet::genesis_build] + impl GenesisBuild for GenesisConfig { + fn build(&self) { + assert!(self.reward_config.is_consistent()); + RewardDistributionConfigStorage::::put(self.reward_config.clone()) + } + } + + #[pallet::call] + impl Pallet { + /// Sets the reward distribution configuration parameters which will be used from next block reward distribution. + /// + /// It is mandatory that all components of configuration sum up to one whole (**100%**), + /// otherwise an error `InvalidDistributionConfiguration` will be raised. + /// + /// - `reward_distro_params` - reward distribution params + /// + /// Emits `DistributionConfigurationChanged` with config embeded into event itself. + /// + #[pallet::call_index(0)] + #[pallet::weight(T::WeightInfo::set_configuration())] + pub fn set_configuration( + origin: OriginFor, + reward_distro_params: RewardDistributionConfig, + ) -> DispatchResultWithPostInfo { + ensure_root(origin)?; + + ensure!( + reward_distro_params.is_consistent(), + Error::::InvalidDistributionConfiguration + ); + RewardDistributionConfigStorage::::put(reward_distro_params.clone()); + + Self::deposit_event(Event::::DistributionConfigurationChanged( + reward_distro_params, + )); + + Ok(().into()) + } + } + + impl OnTimestampSet for Pallet { + fn on_timestamp_set(_moment: Moment) { + let inflation = T::Currency::issue(T::RewardAmount::get()); + Self::distribute_rewards(inflation); + } + } + + impl Pallet { + /// Distribute reward between beneficiaries. + /// + /// # Arguments + /// * `reward` - reward that will be split and distributed + /// + fn distribute_rewards(block_reward: NegativeImbalanceOf) { + let distro_params = Self::reward_config(); + + // Pre-calculate balance which will be deposited for each beneficiary + let base_staker_balance = distro_params.base_staker_percent * block_reward.peek(); + let dapps_balance = distro_params.dapps_percent * block_reward.peek(); + let collator_balance = distro_params.collators_percent * block_reward.peek(); + + // This is part that's distributed between stakers and treasury + let adjustable_balance = distro_params.adjustable_percent * block_reward.peek(); + + // Calculate total staker and treasury reward balance + let adjustable_staker_part = if distro_params.ideal_dapps_staking_tvl.is_zero() { + adjustable_balance + } else { + Self::tvl_percentage() / distro_params.ideal_dapps_staking_tvl * adjustable_balance + }; + + let total_staker_balance = base_staker_balance + adjustable_staker_part; + + // Prepare imbalances + let (dapps_imbalance, remainder) = block_reward.split(dapps_balance); + let (stakers_imbalance, remainder) = remainder.split(total_staker_balance); + let (collator_imbalance, treasury_imbalance) = remainder.split(collator_balance); + + // Payout beneficiaries + T::BeneficiaryPayout::treasury(treasury_imbalance); + T::BeneficiaryPayout::collators(collator_imbalance); + T::BeneficiaryPayout::dapps_staking(stakers_imbalance, dapps_imbalance); + } + + /// Provides TVL as percentage of total issuance + fn tvl_percentage() -> Perbill { + let total_issuance = T::Currency::total_issuance(); + if total_issuance.is_zero() { + log::warn!("Total issuance is zero - this should be impossible."); + Zero::zero() + } else { + Perbill::from_rational(T::DappsStakingTvlProvider::get(), total_issuance) + } + } + } +} + +/// List of configuration parameters used to calculate reward distribution portions for all the beneficiaries. +/// +/// Note that if `ideal_dapps_staking_tvl` is set to `Zero`, entire `adjustable_percent` goes to the stakers. +/// +#[derive(PartialEq, Eq, Clone, Encode, Decode, RuntimeDebug, TypeInfo, MaxEncodedLen)] +#[cfg_attr(feature = "std", derive(serde::Serialize, serde::Deserialize))] +pub struct RewardDistributionConfig { + /// Base percentage of reward that goes to treasury + #[codec(compact)] + pub base_treasury_percent: Perbill, + /// Base percentage of reward that goes to stakers + #[codec(compact)] + pub base_staker_percent: Perbill, + /// Percentage of rewards that goes to dApps + #[codec(compact)] + pub dapps_percent: Perbill, + /// Percentage of reward that goes to collators + #[codec(compact)] + pub collators_percent: Perbill, + /// Adjustable reward percentage that either goes to treasury or to stakers + #[codec(compact)] + pub adjustable_percent: Perbill, + /// Target dapps-staking TVL percentage at which adjustable inflation towards stakers becomes saturated + #[codec(compact)] + pub ideal_dapps_staking_tvl: Perbill, +} + +impl Default for RewardDistributionConfig { + /// `default` values based on configuration at the time of writing this code. + /// Should be overriden by desired params. + fn default() -> Self { + RewardDistributionConfig { + base_treasury_percent: Perbill::from_percent(40), + base_staker_percent: Perbill::from_percent(25), + dapps_percent: Perbill::from_percent(25), + collators_percent: Perbill::from_percent(10), + adjustable_percent: Zero::zero(), + ideal_dapps_staking_tvl: Zero::zero(), + } + } +} + +impl RewardDistributionConfig { + /// `true` if sum of all percentages is `one whole`, `false` otherwise. + pub fn is_consistent(&self) -> bool { + // TODO: perhaps this can be writen in a more cleaner way? + // experimental-only `try_reduce` could be used but it's not available + // https://doc.rust-lang.org/std/iter/trait.Iterator.html#method.try_reduce + + let variables = vec![ + &self.base_treasury_percent, + &self.base_staker_percent, + &self.dapps_percent, + &self.collators_percent, + &self.adjustable_percent, + ]; + + let mut accumulator = Perbill::zero(); + for config_param in variables { + let result = accumulator.checked_add(config_param); + if let Some(mid_result) = result { + accumulator = mid_result; + } else { + return false; + } + } + + Perbill::one() == accumulator + } +} + +/// Defines functions used to payout the beneficiaries of block rewards +pub trait BeneficiaryPayout { + /// Payout reward to the treasury + fn treasury(reward: Imbalance); + + /// Payout reward to the collators + fn collators(reward: Imbalance); + + /// Payout reward to dapps staking + /// + /// # Arguments + /// + /// * `stakers` - reward that goes towards staker reward pot + /// * `dapps` - reward that goes towards dapps reward pot + /// + fn dapps_staking(stakers: Imbalance, dapps: Imbalance); +} diff --git a/pallets/block-reward/src/mock.rs b/pallets/block-reward/src/mock.rs new file mode 100644 index 0000000000..5201fdd8af --- /dev/null +++ b/pallets/block-reward/src/mock.rs @@ -0,0 +1,192 @@ +// This file is part of Astar. + +// Copyright (C) 2019-2023 Stake Technologies Pte.Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later + +// Astar is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Astar is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Astar. If not, see . + +use crate::{self as pallet_block_reward, NegativeImbalanceOf}; + +use frame_support::{ + construct_runtime, parameter_types, sp_io::TestExternalities, traits::Currency, traits::Get, + weights::Weight, PalletId, +}; + +use sp_core::H256; +use sp_runtime::{ + testing::Header, + traits::{AccountIdConversion, BlakeTwo256, IdentityLookup}, +}; + +pub(crate) type AccountId = u64; +pub(crate) type BlockNumber = u64; +pub(crate) type Balance = u128; + +type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; +type Block = frame_system::mocking::MockBlock; + +/// Value shouldn't be less than 2 for testing purposes, otherwise we cannot test certain corner cases. +pub(crate) const EXISTENTIAL_DEPOSIT: Balance = 2; + +construct_runtime!( + pub struct TestRuntime + where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: frame_system, + Balances: pallet_balances, + Timestamp: pallet_timestamp, + BlockReward: pallet_block_reward, + } +); + +parameter_types! { + pub const BlockHashCount: u64 = 250; + pub BlockWeights: frame_system::limits::BlockWeights = + frame_system::limits::BlockWeights::simple_max(Weight::from_ref_time(1024)); +} + +impl frame_system::Config for TestRuntime { + type BaseCallFilter = frame_support::traits::Everything; + type BlockWeights = (); + type BlockLength = (); + type RuntimeOrigin = RuntimeOrigin; + type Index = u64; + type RuntimeCall = RuntimeCall; + type BlockNumber = BlockNumber; + type Hash = H256; + type Hashing = BlakeTwo256; + type AccountId = AccountId; + type Lookup = IdentityLookup; + type Header = Header; + type RuntimeEvent = RuntimeEvent; + type BlockHashCount = BlockHashCount; + type DbWeight = (); + type Version = (); + type PalletInfo = PalletInfo; + type AccountData = pallet_balances::AccountData; + type OnNewAccount = (); + type OnKilledAccount = (); + type SystemWeightInfo = (); + type SS58Prefix = (); + type OnSetCode = (); + type MaxConsumers = frame_support::traits::ConstU32<16>; +} + +parameter_types! { + pub const MaxLocks: u32 = 4; + pub const ExistentialDeposit: Balance = EXISTENTIAL_DEPOSIT; +} + +impl pallet_balances::Config for TestRuntime { + type MaxLocks = MaxLocks; + type MaxReserves = (); + type ReserveIdentifier = [u8; 8]; + type Balance = Balance; + type RuntimeEvent = RuntimeEvent; + type DustRemoval = (); + type ExistentialDeposit = ExistentialDeposit; + type AccountStore = System; + type WeightInfo = (); +} + +parameter_types! { + pub const MinimumPeriod: u64 = 3; +} + +impl pallet_timestamp::Config for TestRuntime { + type Moment = u64; + type OnTimestampSet = (); + type MinimumPeriod = MinimumPeriod; + type WeightInfo = (); +} + +// A fairly high block reward so we can detect slight changes in reward distribution +// due to TVL changes. +pub(crate) const BLOCK_REWARD: Balance = 1_000_000; + +// This gives us enough flexibility to get valid percentages by controlling issuance. +pub(crate) const TVL: Balance = 1_000_000_000; + +// Fake accounts used to simulate reward beneficiaries balances +pub(crate) const TREASURY_POT: PalletId = PalletId(*b"moktrsry"); +pub(crate) const COLLATOR_POT: PalletId = PalletId(*b"mokcolat"); +pub(crate) const STAKERS_POT: PalletId = PalletId(*b"mokstakr"); +pub(crate) const DAPPS_POT: PalletId = PalletId(*b"mokdapps"); + +// Type used as TVL provider +pub struct TvlProvider(); +impl Get for TvlProvider { + fn get() -> Balance { + TVL + } +} + +// Type used as beneficiary payout handle +pub struct BeneficiaryPayout(); +impl pallet_block_reward::BeneficiaryPayout> + for BeneficiaryPayout +{ + fn treasury(reward: NegativeImbalanceOf) { + Balances::resolve_creating(&TREASURY_POT.into_account_truncating(), reward); + } + + fn collators(reward: NegativeImbalanceOf) { + Balances::resolve_creating(&COLLATOR_POT.into_account_truncating(), reward); + } + + fn dapps_staking( + stakers: NegativeImbalanceOf, + dapps: NegativeImbalanceOf, + ) { + Balances::resolve_creating(&STAKERS_POT.into_account_truncating(), stakers); + Balances::resolve_creating(&DAPPS_POT.into_account_truncating(), dapps); + } +} + +parameter_types! { + pub const RewardAmount: Balance = BLOCK_REWARD; +} + +impl pallet_block_reward::Config for TestRuntime { + type RuntimeEvent = RuntimeEvent; + type Currency = Balances; + type RewardAmount = RewardAmount; + type DappsStakingTvlProvider = TvlProvider; + type BeneficiaryPayout = BeneficiaryPayout; + type WeightInfo = (); +} + +pub struct ExternalityBuilder; + +impl ExternalityBuilder { + pub fn build() -> TestExternalities { + let mut storage = frame_system::GenesisConfig::default() + .build_storage::() + .unwrap(); + + // This will cause some initial issuance + pallet_balances::GenesisConfig:: { + balances: vec![(1, 9000), (2, 800), (3, 10000)], + } + .assimilate_storage(&mut storage) + .ok(); + + let mut ext = TestExternalities::from(storage); + ext.execute_with(|| System::set_block_number(1)); + ext + } +} diff --git a/pallets/block-reward/src/tests.rs b/pallets/block-reward/src/tests.rs new file mode 100644 index 0000000000..2669a74a28 --- /dev/null +++ b/pallets/block-reward/src/tests.rs @@ -0,0 +1,429 @@ +// This file is part of Astar. + +// Copyright (C) 2019-2023 Stake Technologies Pte.Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later + +// Astar is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Astar is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Astar. If not, see . + +use super::{pallet::Error, Event, *}; +use frame_support::{assert_noop, assert_ok, traits::OnTimestampSet}; +use mock::*; +use sp_runtime::{ + traits::{AccountIdConversion, BadOrigin, Zero}, + Perbill, +}; + +#[test] +fn default_reward_distribution_config_is_consitent() { + let reward_config = RewardDistributionConfig::default(); + assert!(reward_config.is_consistent()); +} + +#[test] +fn reward_distribution_config_is_consistent() { + // 1 + let reward_config = RewardDistributionConfig { + base_treasury_percent: Perbill::from_percent(100), + base_staker_percent: Zero::zero(), + dapps_percent: Zero::zero(), + collators_percent: Zero::zero(), + adjustable_percent: Zero::zero(), + ideal_dapps_staking_tvl: Zero::zero(), + }; + assert!(reward_config.is_consistent()); + + // 2 + let reward_config = RewardDistributionConfig { + base_treasury_percent: Zero::zero(), + base_staker_percent: Perbill::from_percent(100), + dapps_percent: Zero::zero(), + collators_percent: Zero::zero(), + adjustable_percent: Zero::zero(), + ideal_dapps_staking_tvl: Zero::zero(), + }; + assert!(reward_config.is_consistent()); + + // 3 + let reward_config = RewardDistributionConfig { + base_treasury_percent: Zero::zero(), + base_staker_percent: Zero::zero(), + dapps_percent: Zero::zero(), + collators_percent: Zero::zero(), + adjustable_percent: Perbill::from_percent(100), + ideal_dapps_staking_tvl: Perbill::from_percent(13), + }; + assert!(reward_config.is_consistent()); + + // 4 + // 100% + let reward_config = RewardDistributionConfig { + base_treasury_percent: Perbill::from_percent(3), + base_staker_percent: Perbill::from_percent(14), + dapps_percent: Perbill::from_percent(18), + collators_percent: Perbill::from_percent(31), + adjustable_percent: Perbill::from_percent(34), + ideal_dapps_staking_tvl: Zero::zero(), + }; + assert!(reward_config.is_consistent()); +} + +#[test] +fn reward_distribution_config_not_consistent() { + // 1 + let reward_config = RewardDistributionConfig { + base_treasury_percent: Perbill::from_percent(100), + ..Default::default() + }; + assert!(!reward_config.is_consistent()); + + // 2 + let reward_config = RewardDistributionConfig { + adjustable_percent: Perbill::from_percent(100), + ..Default::default() + }; + assert!(!reward_config.is_consistent()); + + // 3 + // 99% + let reward_config = RewardDistributionConfig { + base_treasury_percent: Perbill::from_percent(10), + base_staker_percent: Perbill::from_percent(20), + dapps_percent: Perbill::from_percent(20), + collators_percent: Perbill::from_percent(30), + adjustable_percent: Perbill::from_percent(19), + ideal_dapps_staking_tvl: Zero::zero(), + }; + assert!(!reward_config.is_consistent()); + + // 4 + // 101% + let reward_config = RewardDistributionConfig { + base_treasury_percent: Perbill::from_percent(10), + base_staker_percent: Perbill::from_percent(20), + dapps_percent: Perbill::from_percent(20), + collators_percent: Perbill::from_percent(31), + adjustable_percent: Perbill::from_percent(20), + ideal_dapps_staking_tvl: Zero::zero(), + }; + assert!(!reward_config.is_consistent()); +} + +#[test] +pub fn set_configuration_fails() { + ExternalityBuilder::build().execute_with(|| { + // 1 + assert_noop!( + BlockReward::set_configuration(RuntimeOrigin::signed(1), Default::default()), + BadOrigin + ); + + // 2 + let reward_config = RewardDistributionConfig { + base_treasury_percent: Perbill::from_percent(100), + ..Default::default() + }; + assert!(!reward_config.is_consistent()); + assert_noop!( + BlockReward::set_configuration(RuntimeOrigin::root(), reward_config), + Error::::InvalidDistributionConfiguration, + ); + }) +} + +#[test] +pub fn set_configuration_is_ok() { + ExternalityBuilder::build().execute_with(|| { + // custom config so it differs from the default one + let reward_config = RewardDistributionConfig { + base_treasury_percent: Perbill::from_percent(3), + base_staker_percent: Perbill::from_percent(14), + dapps_percent: Perbill::from_percent(18), + collators_percent: Perbill::from_percent(31), + adjustable_percent: Perbill::from_percent(34), + ideal_dapps_staking_tvl: Perbill::from_percent(87), + }; + assert!(reward_config.is_consistent()); + + assert_ok!(BlockReward::set_configuration( + RuntimeOrigin::root(), + reward_config.clone() + )); + System::assert_last_event(mock::RuntimeEvent::BlockReward( + Event::DistributionConfigurationChanged(reward_config.clone()), + )); + + assert_eq!( + RewardDistributionConfigStorage::::get(), + reward_config + ); + }) +} + +#[test] +pub fn inflation_and_total_issuance_as_expected() { + ExternalityBuilder::build().execute_with(|| { + let init_issuance = ::Currency::total_issuance(); + + for block in 0..10 { + assert_eq!( + ::Currency::total_issuance(), + block * BLOCK_REWARD + init_issuance + ); + BlockReward::on_timestamp_set(0); + assert_eq!( + ::Currency::total_issuance(), + (block + 1) * BLOCK_REWARD + init_issuance + ); + } + }) +} + +#[test] +pub fn reward_distribution_as_expected() { + ExternalityBuilder::build().execute_with(|| { + // Ensure that initially, all beneficiaries have no free balance + let init_balance_snapshot = FreeBalanceSnapshot::new(); + assert!(init_balance_snapshot.is_zero()); + + // Prepare a custom config (easily discernable percentages for visual verification) + let reward_config = RewardDistributionConfig { + base_treasury_percent: Perbill::from_percent(10), + base_staker_percent: Perbill::from_percent(20), + dapps_percent: Perbill::from_percent(25), + collators_percent: Perbill::from_percent(5), + adjustable_percent: Perbill::from_percent(40), + ideal_dapps_staking_tvl: Perbill::from_percent(50), + }; + assert!(reward_config.is_consistent()); + assert_ok!(BlockReward::set_configuration( + RuntimeOrigin::root(), + reward_config.clone() + )); + + // Initial adjustment of TVL + adjust_tvl_percentage(Perbill::from_percent(30)); + + // Issue rewards a couple of times and verify distribution is as expected + for _block in 1..=100 { + let init_balance_state = FreeBalanceSnapshot::new(); + let rewards = Rewards::calculate(&reward_config); + + BlockReward::on_timestamp_set(0); + + let final_balance_state = FreeBalanceSnapshot::new(); + init_balance_state.assert_distribution(&final_balance_state, &rewards); + } + }) +} + +#[test] +pub fn reward_distribution_no_adjustable_part() { + ExternalityBuilder::build().execute_with(|| { + let reward_config = RewardDistributionConfig { + base_treasury_percent: Perbill::from_percent(10), + base_staker_percent: Perbill::from_percent(45), + dapps_percent: Perbill::from_percent(40), + collators_percent: Perbill::from_percent(5), + adjustable_percent: Perbill::zero(), + ideal_dapps_staking_tvl: Perbill::from_percent(50), // this is irrelevant + }; + assert!(reward_config.is_consistent()); + assert_ok!(BlockReward::set_configuration( + RuntimeOrigin::root(), + reward_config.clone() + )); + + // no adjustable part so we don't expect rewards to change with TVL percentage + let const_rewards = Rewards::calculate(&reward_config); + + for _block in 1..=100 { + let init_balance_state = FreeBalanceSnapshot::new(); + let rewards = Rewards::calculate(&reward_config); + + assert_eq!(rewards, const_rewards); + + BlockReward::on_timestamp_set(0); + + let final_balance_state = FreeBalanceSnapshot::new(); + init_balance_state.assert_distribution(&final_balance_state, &rewards); + } + }) +} + +#[test] +pub fn reward_distribution_all_zero_except_one() { + ExternalityBuilder::build().execute_with(|| { + let reward_config = RewardDistributionConfig { + base_treasury_percent: Perbill::zero(), + base_staker_percent: Perbill::zero(), + dapps_percent: Perbill::zero(), + collators_percent: Perbill::zero(), + adjustable_percent: Perbill::one(), + ideal_dapps_staking_tvl: Perbill::from_percent(50), // this is irrelevant + }; + assert!(reward_config.is_consistent()); + assert_ok!(BlockReward::set_configuration( + RuntimeOrigin::root(), + reward_config.clone() + )); + + for _block in 1..=10 { + let init_balance_state = FreeBalanceSnapshot::new(); + let rewards = Rewards::calculate(&reward_config); + + BlockReward::on_timestamp_set(0); + + let final_balance_state = FreeBalanceSnapshot::new(); + init_balance_state.assert_distribution(&final_balance_state, &rewards); + } + }) +} + +/// Represents free balance snapshot at a specific point in time +#[derive(PartialEq, Eq, Clone, RuntimeDebug)] +struct FreeBalanceSnapshot { + treasury: Balance, + collators: Balance, + stakers: Balance, + dapps: Balance, +} + +impl FreeBalanceSnapshot { + /// Creates a new free balance snapshot using current balance state. + /// + /// Future balance changes won't be reflected in this instance. + fn new() -> Self { + Self { + treasury: ::Currency::free_balance( + &TREASURY_POT.into_account_truncating(), + ), + collators: ::Currency::free_balance( + &COLLATOR_POT.into_account_truncating(), + ), + stakers: ::Currency::free_balance( + &STAKERS_POT.into_account_truncating(), + ), + dapps: ::Currency::free_balance( + &DAPPS_POT.into_account_truncating(), + ), + } + } + + /// `true` if all free balances equal `Zero`, `false` otherwise + fn is_zero(&self) -> bool { + self.treasury.is_zero() + && self.collators.is_zero() + && self.stakers.is_zero() + && self.dapps.is_zero() + } + + /// Asserts that `post_reward_state` is as expected. + /// + /// Increase in balances, based on `rewards` values, is verified. + /// + fn assert_distribution(&self, post_reward_state: &Self, rewards: &Rewards) { + assert_eq!( + self.treasury + rewards.base_treasury_reward + rewards.adjustable_treasury_reward, + post_reward_state.treasury + ); + assert_eq!( + self.stakers + rewards.base_staker_reward + rewards.adjustable_staker_reward, + post_reward_state.stakers + ); + assert_eq!( + self.collators + rewards.collators_reward, + post_reward_state.collators + ); + assert_eq!(self.dapps + rewards.dapps_reward, post_reward_state.dapps); + } +} + +/// Represents reward distribution balances for a single distribution. +#[derive(PartialEq, Eq, Clone, RuntimeDebug)] +struct Rewards { + base_treasury_reward: Balance, + base_staker_reward: Balance, + dapps_reward: Balance, + collators_reward: Balance, + adjustable_treasury_reward: Balance, + adjustable_staker_reward: Balance, +} + +impl Rewards { + /// Pre-calculates the reward distribution, using the provided `RewardDistributionConfig`. + /// Method assumes that total issuance will be increased by `BLOCK_REWARD`. + /// + /// Both current `total_issuance` and `TVL` are used. If these are changed after calling this function, + /// they won't be reflected in the struct. + /// + fn calculate(reward_config: &RewardDistributionConfig) -> Self { + // Calculate `tvl-independent` portions + let base_treasury_reward = reward_config.base_treasury_percent * BLOCK_REWARD; + let base_staker_reward = reward_config.base_staker_percent * BLOCK_REWARD; + let dapps_reward = reward_config.dapps_percent * BLOCK_REWARD; + let collators_reward = reward_config.collators_percent * BLOCK_REWARD; + let adjustable_reward = reward_config.adjustable_percent * BLOCK_REWARD; + + // Calculate `tvl-dependent` portions + let future_total_issuance = + ::Currency::total_issuance() + BLOCK_REWARD; + let tvl = ::DappsStakingTvlProvider::get(); + let tvl_percentage = Perbill::from_rational(tvl, future_total_issuance); + + // Calculate factor for adjusting staker reward portion + let factor = if reward_config.ideal_dapps_staking_tvl <= tvl_percentage + || reward_config.ideal_dapps_staking_tvl.is_zero() + { + Perbill::one() + } else { + tvl_percentage / reward_config.ideal_dapps_staking_tvl + }; + + // Adjustable reward portions + let adjustable_staker_reward = factor * adjustable_reward; + let adjustable_treasury_reward = adjustable_reward - adjustable_staker_reward; + + Self { + base_treasury_reward, + base_staker_reward, + dapps_reward, + collators_reward, + adjustable_treasury_reward, + adjustable_staker_reward, + } + } +} + +/// Adjusts total_issuance in order to try-and-match the requested TVL percentage +fn adjust_tvl_percentage(desired_tvl_percentage: Perbill) { + // Calculate the required total issuance + let tvl = ::DappsStakingTvlProvider::get(); + let required_total_issuance = desired_tvl_percentage.saturating_reciprocal_mul(tvl); + + // Calculate how much more we need to issue in order to get the desired TVL percentage + let init_total_issuance = ::Currency::total_issuance(); + let to_issue = required_total_issuance.saturating_sub(init_total_issuance); + + let dummy_acc = 1; + ::Currency::resolve_creating( + &dummy_acc, + ::Currency::issue(to_issue), + ); + + // Sanity check + assert_eq!( + ::Currency::total_issuance(), + required_total_issuance + ); +} diff --git a/pallets/block-reward/src/weights.rs b/pallets/block-reward/src/weights.rs new file mode 100644 index 0000000000..a040a73857 --- /dev/null +++ b/pallets/block-reward/src/weights.rs @@ -0,0 +1,77 @@ +// This file is part of Astar. + +// Copyright (C) 2019-2023 Stake Technologies Pte.Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later + +// Astar is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Astar is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Astar. If not, see . + + +//! Autogenerated weights for pallet_block_reward +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-04-04, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! HOSTNAME: `devserver-01`, CPU: `Intel(R) Xeon(R) E-2236 CPU @ 3.40GHz` +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("shibuya-dev"), DB CACHE: 1024 + +// Executed Command: +// ./target/release/astar-collator +// benchmark +// pallet +// --chain=shibuya-dev +// --steps=50 +// --repeat=20 +// --pallet=pallet_block_reward +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --heap-pages=4096 +// --output=./benchmark-results/block_reward_weights.rs +// --template=./scripts/templates/weight-template.hbs + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] + +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use sp_std::marker::PhantomData; + +/// Weight functions needed for pallet_block_reward. +pub trait WeightInfo { + fn set_configuration() -> Weight; +} + +/// Weights for pallet_block_reward using the Substrate node and recommended hardware. +pub struct SubstrateWeight(PhantomData); +impl WeightInfo for SubstrateWeight { + // Storage: BlockReward RewardDistributionConfigStorage (r:0 w:1) + // Proof: BlockReward RewardDistributionConfigStorage (max_values: Some(1), max_size: Some(24), added: 519, mode: MaxEncodedLen) + fn set_configuration() -> Weight { + // Minimum execution time: 9_085 nanoseconds. + Weight::from_ref_time(9_328_000) + .saturating_add(Weight::from_proof_size(0)) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } +} + +// For backwards compatibility and tests +impl WeightInfo for () { + // Storage: BlockReward RewardDistributionConfigStorage (r:0 w:1) + // Proof: BlockReward RewardDistributionConfigStorage (max_values: Some(1), max_size: Some(24), added: 519, mode: MaxEncodedLen) + fn set_configuration() -> Weight { + // Minimum execution time: 9_085 nanoseconds. + Weight::from_ref_time(9_328_000) + .saturating_add(Weight::from_proof_size(0)) + .saturating_add(RocksDbWeight::get().writes(1_u64)) + } +} \ No newline at end of file diff --git a/pallets/collator-selection/Cargo.toml b/pallets/collator-selection/Cargo.toml new file mode 100644 index 0000000000..4374e602f4 --- /dev/null +++ b/pallets/collator-selection/Cargo.toml @@ -0,0 +1,64 @@ +[package] +description = "Simple staking pallet with a fixed stake." +license = "Apache-2.0" +name = "pallet-collator-selection" +readme = "README.md" +version = "3.3.2" +authors.workspace = true +edition.workspace = true +homepage.workspace = true +repository.workspace = true + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +log = { workspace = true } +parity-scale-codec = { workspace = true } +rand = { workspace = true, features = ["std_rng"] } +scale-info = { workspace = true } +serde = { workspace = true } + +frame-support = { workspace = true } +frame-system = { workspace = true } +pallet-authorship = { workspace = true } +pallet-session = { workspace = true } +sp-runtime = { workspace = true } +sp-staking = { workspace = true } +sp-std = { workspace = true } + +frame-benchmarking = { workspace = true, optional = true } + +[dev-dependencies] +pallet-aura = { workspace = true } +pallet-balances = { workspace = true } +pallet-timestamp = { workspace = true } +sp-consensus-aura = { workspace = true } +sp-core = { workspace = true } +sp-io = { workspace = true } +sp-tracing = { workspace = true } + +[features] +default = ["std"] +runtime-benchmarks = [ + "frame-benchmarking", + "frame-support/runtime-benchmarks", + "frame-system/runtime-benchmarks", +] +std = [ + "parity-scale-codec/std", + "log/std", + "scale-info/std", + "rand/std", + "sp-runtime/std", + "sp-staking/std", + "sp-std/std", + "frame-support/std", + "frame-system/std", + "frame-benchmarking/std", + "pallet-authorship/std", + "pallet-session/std", + "pallet-aura/std", +] + +try-runtime = ["frame-support/try-runtime"] diff --git a/pallets/collator-selection/README.md b/pallets/collator-selection/README.md new file mode 100644 index 0000000000..9718db58b3 --- /dev/null +++ b/pallets/collator-selection/README.md @@ -0,0 +1 @@ +License: Apache-2.0 \ No newline at end of file diff --git a/pallets/collator-selection/src/benchmarking.rs b/pallets/collator-selection/src/benchmarking.rs new file mode 100644 index 0000000000..2844b69565 --- /dev/null +++ b/pallets/collator-selection/src/benchmarking.rs @@ -0,0 +1,271 @@ +// This file is part of Astar. + +// Copyright (C) 2019-2023 Stake Technologies Pte.Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Benchmarking setup for pallet-collator-selection + +use super::*; + +#[allow(unused)] +use crate::Pallet as CollatorSelection; +use frame_benchmarking::{ + account, benchmarks, impl_benchmark_test_suite, whitelisted_caller, BenchmarkError, +}; +use frame_support::{ + assert_ok, + codec::Decode, + traits::{Currency, EnsureOrigin, Get}, +}; +use frame_system::{EventRecord, RawOrigin}; +use pallet_authorship::EventHandler; +use pallet_session::{self as session, SessionManager}; +use sp_std::prelude::*; + +pub type BalanceOf = + <::Currency as Currency<::AccountId>>::Balance; + +const SEED: u32 = 0; + +// TODO: remove if this is given in substrate commit. +macro_rules! whitelist { + ($acc:ident) => { + frame_benchmarking::benchmarking::add_to_whitelist( + frame_system::Account::::hashed_key_for(&$acc).into(), + ); + }; +} + +fn assert_last_event(generic_event: ::RuntimeEvent) { + let events = frame_system::Pallet::::events(); + let system_event: ::RuntimeEvent = generic_event.into(); + // compare to the last event record + let EventRecord { event, .. } = &events[events.len() - 1]; + assert_eq!(event, &system_event); +} + +fn create_funded_user( + string: &'static str, + n: u32, + balance_factor: u32, +) -> T::AccountId { + let user = account(string, n, SEED); + let balance = T::Currency::minimum_balance() * balance_factor.into(); + let _ = T::Currency::make_free_balance_be(&user, balance); + user +} + +fn keys(c: u32) -> ::Keys { + use rand::{RngCore, SeedableRng}; + + let keys = { + let mut keys = [0u8; 128]; + + if c > 0 { + let mut rng = rand::rngs::StdRng::seed_from_u64(c as u64); + rng.fill_bytes(&mut keys); + } + + keys + }; + + Decode::decode(&mut &keys[..]).unwrap() +} + +fn validator(c: u32) -> (T::AccountId, ::Keys) { + (create_funded_user::("candidate", c, 1000), keys::(c)) +} + +fn register_validators(count: u32) -> Vec { + let validators = (0..count).map(|c| validator::(c)).collect::>(); + + for (who, keys) in validators.clone() { + >::set_keys(RawOrigin::Signed(who).into(), keys, Vec::new()).unwrap(); + } + + validators.into_iter().map(|(who, _)| who).collect() +} + +fn register_candidates(count: u32) { + let candidates = (0..count) + .map(|c| account("candidate", c, SEED)) + .collect::>(); + assert!( + >::get() > 0u32.into(), + "Bond cannot be zero!" + ); + + for who in candidates { + T::Currency::make_free_balance_be(&who, >::get() * 2u32.into()); + >::register_as_candidate(RawOrigin::Signed(who).into()).unwrap(); + } +} + +benchmarks! { + where_clause { where T: pallet_authorship::Config + session::Config } + + set_invulnerables { + let b in 1 .. T::MaxInvulnerables::get(); + let new_invulnerables = register_validators::(b); + let origin = T::UpdateOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; + }: { + assert_ok!( + >::set_invulnerables(origin, new_invulnerables.clone()) + ); + } + verify { + assert_last_event::(Event::NewInvulnerables(new_invulnerables).into()); + } + + set_desired_candidates { + let max: u32 = 148; + let origin = T::UpdateOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; + }: { + assert_ok!( + >::set_desired_candidates(origin, max) + ); + } + verify { + assert_last_event::(Event::NewDesiredCandidates(max).into()); + } + + set_candidacy_bond { + let bond: BalanceOf = T::Currency::minimum_balance() * 10u32.into(); + let origin = T::UpdateOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; + }: { + assert_ok!( + >::set_candidacy_bond(origin, bond) + ); + } + verify { + assert_last_event::(Event::NewCandidacyBond(bond).into()); + } + + // worse case is when we have all the max-candidate slots filled except one, and we fill that + // one. + register_as_candidate { + let c in 1 .. T::MaxCandidates::get(); + + >::put(T::Currency::minimum_balance()); + >::put(c + 1); + + register_validators::(c); + register_candidates::(c); + + let caller: T::AccountId = whitelisted_caller(); + let bond: BalanceOf = T::Currency::minimum_balance() * 2u32.into(); + T::Currency::make_free_balance_be(&caller, bond); + + >::set_keys( + RawOrigin::Signed(caller.clone()).into(), + keys::(c + 1), + Vec::new() + ).unwrap(); + + }: _(RawOrigin::Signed(caller.clone())) + verify { + assert_last_event::(Event::CandidateAdded(caller, bond / 2u32.into()).into()); + } + + // worse case is the last candidate leaving. + leave_intent { + let c in (T::MinCandidates::get() + 1) .. T::MaxCandidates::get(); + >::put(T::Currency::minimum_balance()); + >::put(c); + + register_validators::(c); + register_candidates::(c); + + let leaving = >::get().last().unwrap().who.clone(); + whitelist!(leaving); + }: _(RawOrigin::Signed(leaving.clone())) + verify { + assert_last_event::(Event::CandidateRemoved(leaving).into()); + } + + // worse case is paying a non-existing candidate account. + note_author { + >::put(T::Currency::minimum_balance()); + T::Currency::make_free_balance_be( + &>::account_id(), + T::Currency::minimum_balance() * 4u32.into(), + ); + let author = account("author", 0, SEED); + let new_block: T::BlockNumber = 10u32.into(); + + frame_system::Pallet::::set_block_number(new_block); + assert!(T::Currency::free_balance(&author) == 0u32.into()); + }: { + as EventHandler<_, _>>::note_author(author.clone()) + } verify { + assert!(T::Currency::free_balance(&author) > 0u32.into()); + assert_eq!(frame_system::Pallet::::block_number(), new_block); + } + + // worst case for new session. + new_session { + let r in 1 .. T::MaxCandidates::get(); + let c in 1 .. T::MaxCandidates::get(); + + >::put(T::Currency::minimum_balance()); + >::put(c); + frame_system::Pallet::::set_block_number(0u32.into()); + + register_validators::(c); + register_candidates::(c); + + let new_block: T::BlockNumber = 1800u32.into(); + let zero_block: T::BlockNumber = 0u32.into(); + let candidates = >::get(); + + let non_removals = c.saturating_sub(r); + + for i in 0..c { + >::insert(candidates[i as usize].who.clone(), zero_block); + } + + if non_removals > 0 { + for i in 0..non_removals { + >::insert(candidates[i as usize].who.clone(), new_block); + } + } else { + for i in 0..c { + >::insert(candidates[i as usize].who.clone(), new_block); + } + } + + let pre_length = >::get().len(); + + frame_system::Pallet::::set_block_number(new_block); + + assert!(>::get().len() == c as usize); + }: { + as SessionManager<_>>::new_session(0) + } verify { + if c > r && non_removals >= T::MinCandidates::get() { + assert!(>::get().len() < pre_length); + } else if c > r && non_removals < T::MinCandidates::get() { + assert!(>::get().len() == T::MinCandidates::get() as usize); + } else { + assert!(>::get().len() == pre_length); + } + } +} + +impl_benchmark_test_suite!( + CollatorSelection, + crate::mock::new_test_ext(), + crate::mock::Test, +); diff --git a/pallets/collator-selection/src/lib.rs b/pallets/collator-selection/src/lib.rs new file mode 100644 index 0000000000..1f348e65bc --- /dev/null +++ b/pallets/collator-selection/src/lib.rs @@ -0,0 +1,561 @@ +// This file is part of Astar. + +// Copyright (C) 2019-2023 Stake Technologies Pte.Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Collator Selection pallet. +//! +//! A pallet to manage collators in a parachain. +//! +//! ## Overview +//! +//! The Collator Selection pallet manages the collators of a parachain. **Collation is _not_ a +//! secure activity** and this pallet does not implement any game-theoretic mechanisms to meet BFT +//! safety assumptions of the chosen set. +//! +//! ## Terminology +//! +//! - Collator: A parachain block producer. +//! - Bond: An amount of `Balance` _reserved_ for candidate registration. +//! - Invulnerable: An account guaranteed to be in the collator set. +//! +//! ## Implementation +//! +//! The final `Collators` are aggregated from two individual lists: +//! +//! 1. [`Invulnerables`]: a set of collators appointed by governance. These accounts will always be +//! collators. +//! 2. [`Candidates`]: these are *candidates to the collation task* and may or may not be elected as +//! a final collator. +//! +//! The current implementation resolves congestion of [`Candidates`] in a first-come-first-serve +//! manner. +//! +//! Candidates will not be allowed to get kicked or leave_intent if the total number of candidates +//! fall below MinCandidates. This is for potential disaster recovery scenarios. +//! +//! ### Rewards +//! +//! The Collator Selection pallet maintains an on-chain account (the "Pot"). In each block, the +//! collator who authored it receives: +//! +//! - Half the value of the Pot. +//! - Half the value of the transaction fees within the block. The other half of the transaction +//! fees are deposited into the Pot. +//! +//! To initiate rewards an ED needs to be transferred to the pot address. +//! +//! Note: Eventually the Pot distribution may be modified as discussed in +//! [this issue](https://github.com/paritytech/statemint/issues/21#issuecomment-810481073). + +#![cfg_attr(not(feature = "std"), no_std)] + +pub use pallet::*; + +#[cfg(test)] +mod mock; + +#[cfg(test)] +mod tests; + +#[cfg(feature = "runtime-benchmarks")] +mod benchmarking; +pub mod weights; + +#[frame_support::pallet] +pub mod pallet { + pub use crate::weights::WeightInfo; + use core::ops::Div; + use frame_support::{ + dispatch::{DispatchClass, DispatchResultWithPostInfo}, + inherent::Vec, + pallet_prelude::*, + sp_runtime::{ + traits::{AccountIdConversion, CheckedSub, Saturating, Zero}, + RuntimeDebug, + }, + traits::{ + Currency, EnsureOrigin, ExistenceRequirement::KeepAlive, ReservableCurrency, + ValidatorRegistration, + }, + PalletId, + }; + use frame_system::{pallet_prelude::*, Config as SystemConfig}; + use pallet_session::SessionManager; + use sp_runtime::{traits::Convert, Perbill}; + use sp_staking::SessionIndex; + + type BalanceOf = + <::Currency as Currency<::AccountId>>::Balance; + + /// A convertor from collators id. Since this pallet does not have stash/controller, this is + /// just identity. + pub struct IdentityCollator; + impl sp_runtime::traits::Convert> for IdentityCollator { + fn convert(t: T) -> Option { + Some(t) + } + } + + /// Configure the pallet by specifying the parameters and types on which it depends. + #[pallet::config] + pub trait Config: frame_system::Config { + /// Overarching event type. + type RuntimeEvent: From> + IsType<::RuntimeEvent>; + + /// The currency mechanism. + type Currency: ReservableCurrency; + + /// Origin that can dictate updating parameters of this pallet. + type UpdateOrigin: EnsureOrigin; + + /// Account Identifier from which the internal Pot is generated. + type PotId: Get; + + /// Maximum number of candidates that we should have. This is used for benchmarking and is not + /// enforced. + /// + /// This does not take into account the invulnerables. + type MaxCandidates: Get; + + /// Minimum number of candidates that we should have. This is used for disaster recovery. + /// + /// This does not take into account the invulnerables. + type MinCandidates: Get; + + /// Maximum number of invulnerables. + /// + /// Used only for benchmarking. + type MaxInvulnerables: Get; + + // Will be kicked if block is not produced in threshold. + type KickThreshold: Get; + + /// A stable ID for a validator. + type ValidatorId: Member + Parameter; + + /// A conversion from account ID to validator ID. + /// + /// Its cost must be at most one storage read. + type ValidatorIdOf: Convert>; + + /// Validate a user is registered + type ValidatorRegistration: ValidatorRegistration; + + /// How many in perc kicked collators should be slashed (set 0 to disable) + type SlashRatio: Get; + + /// The weight information of this pallet. + type WeightInfo: WeightInfo; + } + + /// Basic information about a collation candidate. + #[derive(PartialEq, Eq, Clone, Encode, Decode, RuntimeDebug, scale_info::TypeInfo)] + pub struct CandidateInfo { + /// Account identifier. + pub who: AccountId, + /// Reserved deposit. + pub deposit: Balance, + } + + #[pallet::pallet] + #[pallet::generate_store(pub(super) trait Store)] + #[pallet::without_storage_info] + pub struct Pallet(_); + + /// The invulnerable, fixed collators. + #[pallet::storage] + #[pallet::getter(fn invulnerables)] + pub type Invulnerables = StorageValue<_, Vec, ValueQuery>; + + /// The (community, limited) collation candidates. + #[pallet::storage] + #[pallet::getter(fn candidates)] + pub type Candidates = + StorageValue<_, Vec>>, ValueQuery>; + + /// Last block authored by collator. + #[pallet::storage] + #[pallet::getter(fn last_authored_block)] + pub type LastAuthoredBlock = + StorageMap<_, Twox64Concat, T::AccountId, T::BlockNumber, ValueQuery>; + + /// Desired number of candidates. + /// + /// This should ideally always be less than [`Config::MaxCandidates`] for weights to be correct. + #[pallet::storage] + #[pallet::getter(fn desired_candidates)] + pub type DesiredCandidates = StorageValue<_, u32, ValueQuery>; + + /// Fixed amount to deposit to become a collator. + /// + /// When a collator calls `leave_intent` they immediately receive the deposit back. + #[pallet::storage] + #[pallet::getter(fn candidacy_bond)] + pub type CandidacyBond = StorageValue<_, BalanceOf, ValueQuery>; + + /// Destination account for slashed amount. + #[pallet::storage] + #[pallet::getter(fn slash_destination)] + pub type SlashDestination = StorageValue<_, ::AccountId>; + + #[pallet::genesis_config] + pub struct GenesisConfig { + pub invulnerables: Vec, + pub candidacy_bond: BalanceOf, + pub desired_candidates: u32, + } + + #[cfg(feature = "std")] + impl Default for GenesisConfig { + fn default() -> Self { + Self { + invulnerables: Default::default(), + candidacy_bond: Default::default(), + desired_candidates: Default::default(), + } + } + } + + #[pallet::genesis_build] + impl GenesisBuild for GenesisConfig { + fn build(&self) { + let duplicate_invulnerables = self + .invulnerables + .iter() + .collect::>(); + assert!( + duplicate_invulnerables.len() == self.invulnerables.len(), + "duplicate invulnerables in genesis." + ); + + assert!( + T::MaxInvulnerables::get() >= (self.invulnerables.len() as u32), + "genesis invulnerables are more than T::MaxInvulnerables", + ); + assert!( + T::MaxCandidates::get() >= self.desired_candidates, + "genesis desired_candidates are more than T::MaxCandidates", + ); + + >::put(&self.desired_candidates); + >::put(&self.candidacy_bond); + >::put(&self.invulnerables); + } + } + + #[pallet::event] + #[pallet::generate_deposit(pub(super) fn deposit_event)] + pub enum Event { + NewInvulnerables(Vec), + NewDesiredCandidates(u32), + NewCandidacyBond(BalanceOf), + CandidateAdded(T::AccountId, BalanceOf), + CandidateRemoved(T::AccountId), + CandidateSlashed(T::AccountId), + } + + // Errors inform users that something went wrong. + #[pallet::error] + pub enum Error { + /// Too many candidates + TooManyCandidates, + /// Too few candidates + TooFewCandidates, + /// Unknown error + Unknown, + /// Permission issue + Permission, + /// User is already a candidate + AlreadyCandidate, + /// User is not a candidate + NotCandidate, + /// User is already an Invulnerable + AlreadyInvulnerable, + /// Account has no associated validator ID + NoAssociatedValidatorId, + /// Validator ID is not yet registered + ValidatorNotRegistered, + } + + #[pallet::hooks] + impl Hooks> for Pallet {} + + #[pallet::call] + impl Pallet { + /// Set the list of invulnerable (fixed) collators. + #[pallet::call_index(0)] + #[pallet::weight(T::WeightInfo::set_invulnerables(new.len() as u32))] + pub fn set_invulnerables( + origin: OriginFor, + new: Vec, + ) -> DispatchResultWithPostInfo { + T::UpdateOrigin::ensure_origin(origin)?; + // we trust origin calls, this is just a for more accurate benchmarking + if (new.len() as u32) > T::MaxInvulnerables::get() { + log::warn!( + "invulnerables > T::MaxInvulnerables; you might need to run benchmarks again" + ); + } + + // check if the invulnerables have associated validator keys before they are set + for account_id in &new { + let validator_key = T::ValidatorIdOf::convert(account_id.clone()) + .ok_or(Error::::NoAssociatedValidatorId)?; + ensure!( + T::ValidatorRegistration::is_registered(&validator_key), + Error::::ValidatorNotRegistered + ); + } + + >::put(&new); + Self::deposit_event(Event::NewInvulnerables(new)); + Ok(().into()) + } + + /// Set the ideal number of collators (not including the invulnerables). + /// If lowering this number, then the number of running collators could be higher than this figure. + /// Aside from that edge case, there should be no other way to have more collators than the desired number. + #[pallet::call_index(1)] + #[pallet::weight(T::WeightInfo::set_desired_candidates())] + pub fn set_desired_candidates( + origin: OriginFor, + max: u32, + ) -> DispatchResultWithPostInfo { + T::UpdateOrigin::ensure_origin(origin)?; + // we trust origin calls, this is just a for more accurate benchmarking + if max > T::MaxCandidates::get() { + log::warn!("max > T::MaxCandidates; you might need to run benchmarks again"); + } + >::put(&max); + Self::deposit_event(Event::NewDesiredCandidates(max)); + Ok(().into()) + } + + /// Set the candidacy bond amount. + #[pallet::call_index(2)] + #[pallet::weight(T::WeightInfo::set_candidacy_bond())] + pub fn set_candidacy_bond( + origin: OriginFor, + bond: BalanceOf, + ) -> DispatchResultWithPostInfo { + T::UpdateOrigin::ensure_origin(origin)?; + >::put(&bond); + Self::deposit_event(Event::NewCandidacyBond(bond)); + Ok(().into()) + } + + /// Register this account as a collator candidate. The account must (a) already have + /// registered session keys and (b) be able to reserve the `CandidacyBond`. + /// + /// This call is not available to `Invulnerable` collators. + #[pallet::call_index(3)] + #[pallet::weight(T::WeightInfo::register_as_candidate(T::MaxCandidates::get()))] + pub fn register_as_candidate(origin: OriginFor) -> DispatchResultWithPostInfo { + let who = ensure_signed(origin)?; + + // ensure we are below limit. + let length = >::decode_len().unwrap_or_default(); + ensure!( + (length as u32) < Self::desired_candidates(), + Error::::TooManyCandidates + ); + ensure!( + !Self::invulnerables().contains(&who), + Error::::AlreadyInvulnerable + ); + + let validator_key = T::ValidatorIdOf::convert(who.clone()) + .ok_or(Error::::NoAssociatedValidatorId)?; + ensure!( + T::ValidatorRegistration::is_registered(&validator_key), + Error::::ValidatorNotRegistered + ); + + let deposit = Self::candidacy_bond(); + // First authored block is current block plus kick threshold to handle session delay + let incoming = CandidateInfo { + who: who.clone(), + deposit, + }; + + let current_count = + >::try_mutate(|candidates| -> Result { + if candidates.iter_mut().any(|candidate| candidate.who == who) { + Err(Error::::AlreadyCandidate)? + } else { + T::Currency::reserve(&who, deposit)?; + candidates.push(incoming); + >::insert( + who.clone(), + frame_system::Pallet::::block_number() + T::KickThreshold::get(), + ); + Ok(candidates.len()) + } + })?; + + Self::deposit_event(Event::CandidateAdded(who, deposit)); + Ok(Some(T::WeightInfo::register_as_candidate(current_count as u32)).into()) + } + + /// Deregister `origin` as a collator candidate. Note that the collator can only leave on + /// session change. The `CandidacyBond` will be unreserved immediately. + /// + /// This call will fail if the total number of candidates would drop below `MinCandidates`. + /// + /// This call is not available to `Invulnerable` collators. + #[pallet::call_index(4)] + #[pallet::weight(T::WeightInfo::leave_intent(T::MaxCandidates::get()))] + pub fn leave_intent(origin: OriginFor) -> DispatchResultWithPostInfo { + let who = ensure_signed(origin)?; + ensure!( + Self::candidates().len() as u32 > T::MinCandidates::get(), + Error::::TooFewCandidates + ); + let current_count = Self::try_remove_candidate(&who, false)?; + + Ok(Some(T::WeightInfo::leave_intent(current_count as u32)).into()) + } + } + + impl Pallet { + /// Get a unique, inaccessible account id from the `PotId`. + pub fn account_id() -> T::AccountId { + T::PotId::get().into_account_truncating() + } + /// Removes a candidate if they exist and sends them back their deposit + /// If second argument is `true` then a candidate will be slashed + fn try_remove_candidate(who: &T::AccountId, slash: bool) -> Result { + let current_count = + >::try_mutate(|candidates| -> Result { + let index = candidates + .iter() + .position(|candidate| candidate.who == *who) + .ok_or(Error::::NotCandidate)?; + let deposit = candidates[index].deposit; + + if slash { + let slash = T::SlashRatio::get() * deposit; + let remain = deposit - slash; + + let (imbalance, _) = T::Currency::slash_reserved(who, slash); + T::Currency::unreserve(who, remain); + + if let Some(dest) = Self::slash_destination() { + T::Currency::resolve_creating(&dest, imbalance); + } + + Self::deposit_event(Event::CandidateSlashed(who.clone())); + } else { + T::Currency::unreserve(who, deposit); + } + candidates.remove(index); + >::remove(who.clone()); + Ok(candidates.len()) + })?; + Self::deposit_event(Event::CandidateRemoved(who.clone())); + Ok(current_count) + } + + /// Assemble the current set of candidates and invulnerables into the next collator set. + /// + /// This is done on the fly, as frequent as we are told to do so, as the session manager. + pub fn assemble_collators(candidates: Vec) -> Vec { + let mut collators = Self::invulnerables(); + collators.extend(candidates.into_iter().collect::>()); + collators + } + /// Kicks out and candidates that did not produce a block in the kick threshold. + pub fn kick_stale_candidates( + candidates: Vec>>, + ) -> Vec { + let now = frame_system::Pallet::::block_number(); + let kick_threshold = T::KickThreshold::get(); + candidates + .into_iter() + .filter_map(|c| { + let last_block = >::get(c.who.clone()); + let since_last = now.saturating_sub(last_block); + if since_last < kick_threshold + || Self::candidates().len() as u32 <= T::MinCandidates::get() + { + Some(c.who) + } else { + let outcome = Self::try_remove_candidate(&c.who, true); + if let Err(why) = outcome { + log::warn!("Failed to remove candidate {:?}", why); + debug_assert!(false, "failed to remove candidate {:?}", why); + } + None + } + }) + .collect::>() + } + } + + /// Keep track of number of authored blocks per authority, uncles are counted as well since + /// they're a valid proof of being online. + impl + pallet_authorship::EventHandler for Pallet + { + fn note_author(author: T::AccountId) { + let pot = Self::account_id(); + // assumes an ED will be sent to pot. + let reward = T::Currency::free_balance(&pot) + .checked_sub(&T::Currency::minimum_balance()) + .unwrap_or_else(Zero::zero) + .div(2u32.into()); + // `reward` is half of pot account minus ED, this should never fail. + let _success = T::Currency::transfer(&pot, &author, reward, KeepAlive); + debug_assert!(_success.is_ok()); + >::insert(author, frame_system::Pallet::::block_number()); + + frame_system::Pallet::::register_extra_weight_unchecked( + T::WeightInfo::note_author(), + DispatchClass::Mandatory, + ); + } + } + + /// Play the role of the session manager. + impl SessionManager for Pallet { + fn new_session(index: SessionIndex) -> Option> { + log::info!( + "assembling new collators for new session {} at #{:?}", + index, + >::block_number(), + ); + + let candidates = Self::candidates(); + let candidates_len_before = candidates.len(); + let active_candidates = Self::kick_stale_candidates(candidates); + let active_candidates_len = active_candidates.len(); + let result = Self::assemble_collators(active_candidates); + let removed = candidates_len_before - active_candidates_len; + + frame_system::Pallet::::register_extra_weight_unchecked( + T::WeightInfo::new_session(candidates_len_before as u32, removed as u32), + DispatchClass::Mandatory, + ); + Some(result) + } + fn start_session(_: SessionIndex) { + // we don't care. + } + fn end_session(_: SessionIndex) { + // we don't care. + } + } +} diff --git a/pallets/collator-selection/src/mock.rs b/pallets/collator-selection/src/mock.rs new file mode 100644 index 0000000000..b21a924a7a --- /dev/null +++ b/pallets/collator-selection/src/mock.rs @@ -0,0 +1,268 @@ +// This file is part of Astar. + +// Copyright (C) 2019-2023 Stake Technologies Pte.Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::*; +use crate as collator_selection; +use frame_support::{ + ord_parameter_types, parameter_types, + traits::{FindAuthor, GenesisBuild, ValidatorRegistration}, + PalletId, +}; +use frame_system as system; +use frame_system::EnsureSignedBy; +use sp_core::H256; +use sp_runtime::{ + testing::{Header, UintAuthorityId}, + traits::{BlakeTwo256, IdentityLookup, OpaqueKeys}, + Perbill, RuntimeAppPublic, +}; + +type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; +type Block = frame_system::mocking::MockBlock; + +// Configure a mock runtime to test the pallet. +frame_support::construct_runtime!( + pub struct Test + where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: frame_system, + Timestamp: pallet_timestamp, + Session: pallet_session, + Aura: pallet_aura, + Balances: pallet_balances, + CollatorSelection: collator_selection, + Authorship: pallet_authorship, + } +); + +parameter_types! { + pub const BlockHashCount: u64 = 250; + pub const SS58Prefix: u8 = 42; +} + +impl system::Config for Test { + type BaseCallFilter = frame_support::traits::Everything; + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); + type RuntimeOrigin = RuntimeOrigin; + type RuntimeCall = RuntimeCall; + type Index = u64; + type BlockNumber = u64; + type Hash = H256; + type Hashing = BlakeTwo256; + type AccountId = u64; + type Lookup = IdentityLookup; + type Header = Header; + type RuntimeEvent = RuntimeEvent; + type BlockHashCount = BlockHashCount; + type Version = (); + type PalletInfo = PalletInfo; + type AccountData = pallet_balances::AccountData; + type OnNewAccount = (); + type OnKilledAccount = (); + type SystemWeightInfo = (); + type SS58Prefix = SS58Prefix; + type OnSetCode = (); + type MaxConsumers = frame_support::traits::ConstU32<16>; +} + +parameter_types! { + pub const ExistentialDeposit: u64 = 5; + pub const MaxReserves: u32 = 50; +} + +impl pallet_balances::Config for Test { + type Balance = u64; + type RuntimeEvent = RuntimeEvent; + type DustRemoval = (); + type ExistentialDeposit = ExistentialDeposit; + type AccountStore = System; + type WeightInfo = (); + type MaxLocks = (); + type MaxReserves = MaxReserves; + type ReserveIdentifier = [u8; 8]; +} + +pub struct Author4; +impl FindAuthor for Author4 { + fn find_author<'a, I>(_digests: I) -> Option + where + I: 'a + IntoIterator, + { + Some(4) + } +} + +impl pallet_authorship::Config for Test { + type FindAuthor = Author4; + type EventHandler = CollatorSelection; +} + +parameter_types! { + pub const MinimumPeriod: u64 = 1; +} + +impl pallet_timestamp::Config for Test { + type Moment = u64; + type OnTimestampSet = Aura; + type MinimumPeriod = MinimumPeriod; + type WeightInfo = (); +} + +impl pallet_aura::Config for Test { + type AuthorityId = sp_consensus_aura::sr25519::AuthorityId; + type MaxAuthorities = MaxAuthorities; + type DisabledValidators = (); +} + +sp_runtime::impl_opaque_keys! { + pub struct MockSessionKeys { + // a key for aura authoring + pub aura: UintAuthorityId, + } +} + +impl From for MockSessionKeys { + fn from(aura: sp_runtime::testing::UintAuthorityId) -> Self { + Self { aura } + } +} + +parameter_types! { + pub static SessionHandlerCollators: Vec = Vec::new(); + pub static SessionChangeBlock: u64 = 0; +} + +pub struct TestSessionHandler; +impl pallet_session::SessionHandler for TestSessionHandler { + const KEY_TYPE_IDS: &'static [sp_runtime::KeyTypeId] = &[UintAuthorityId::ID]; + fn on_genesis_session(keys: &[(u64, Ks)]) { + SessionHandlerCollators::set(keys.into_iter().map(|(a, _)| *a).collect::>()) + } + fn on_new_session(_: bool, keys: &[(u64, Ks)], _: &[(u64, Ks)]) { + SessionChangeBlock::set(System::block_number()); + dbg!(keys.len()); + SessionHandlerCollators::set(keys.into_iter().map(|(a, _)| *a).collect::>()) + } + fn on_before_session_ending() {} + fn on_disabled(_: u32) {} +} + +parameter_types! { + pub const Offset: u64 = 0; + pub const Period: u64 = 10; +} + +impl pallet_session::Config for Test { + type RuntimeEvent = RuntimeEvent; + type ValidatorId = ::AccountId; + // we don't have stash and controller, thus we don't need the convert as well. + type ValidatorIdOf = IdentityCollator; + type ShouldEndSession = pallet_session::PeriodicSessions; + type NextSessionRotation = pallet_session::PeriodicSessions; + type SessionManager = CollatorSelection; + type SessionHandler = TestSessionHandler; + type Keys = MockSessionKeys; + type WeightInfo = (); +} + +ord_parameter_types! { + pub const RootAccount: u64 = 777; +} + +parameter_types! { + pub const PotId: PalletId = PalletId(*b"PotStake"); + pub const MaxCandidates: u32 = 20; + pub const MaxInvulnerables: u32 = 20; + pub const MinCandidates: u32 = 1; + pub const MaxAuthorities: u32 = 100_000; + pub const SlashRatio: Perbill = Perbill::from_percent(10); +} + +pub struct IsRegistered; +impl ValidatorRegistration for IsRegistered { + fn is_registered(id: &u64) -> bool { + if *id == 7u64 { + false + } else { + true + } + } +} + +impl Config for Test { + type RuntimeEvent = RuntimeEvent; + type Currency = Balances; + type UpdateOrigin = EnsureSignedBy; + type PotId = PotId; + type MaxCandidates = MaxCandidates; + type MinCandidates = MinCandidates; + type MaxInvulnerables = MaxInvulnerables; + type KickThreshold = Period; + type ValidatorId = ::AccountId; + type ValidatorIdOf = IdentityCollator; + type ValidatorRegistration = IsRegistered; + type SlashRatio = SlashRatio; + type WeightInfo = (); +} + +pub fn new_test_ext() -> sp_io::TestExternalities { + sp_tracing::try_init_simple(); + let mut t = frame_system::GenesisConfig::default() + .build_storage::() + .unwrap(); + let invulnerables = vec![1, 2]; + + let balances = vec![(1, 100), (2, 100), (3, 100), (4, 100), (5, 100)]; + let keys = balances + .iter() + .map(|&(i, _)| { + ( + i, + i, + MockSessionKeys { + aura: UintAuthorityId(i), + }, + ) + }) + .collect::>(); + let collator_selection = collator_selection::GenesisConfig:: { + desired_candidates: 2, + candidacy_bond: 10, + invulnerables, + }; + let session = pallet_session::GenesisConfig:: { keys }; + pallet_balances::GenesisConfig:: { balances } + .assimilate_storage(&mut t) + .unwrap(); + // collator selection must be initialized before session. + collator_selection.assimilate_storage(&mut t).unwrap(); + session.assimilate_storage(&mut t).unwrap(); + + t.into() +} + +pub fn initialize_to_block(n: u64) { + for i in System::block_number() + 1..=n { + System::set_block_number(i); + >::on_initialize(i); + } +} diff --git a/pallets/collator-selection/src/tests.rs b/pallets/collator-selection/src/tests.rs new file mode 100644 index 0000000000..dd60803912 --- /dev/null +++ b/pallets/collator-selection/src/tests.rs @@ -0,0 +1,452 @@ +// This file is part of Astar. + +// Copyright (C) 2019-2023 Stake Technologies Pte.Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate as collator_selection; +use crate::{mock::*, CandidateInfo, Error}; +use frame_support::{ + assert_noop, assert_ok, + traits::{Currency, GenesisBuild, OnInitialize}, +}; +use pallet_balances::Error as BalancesError; +use sp_runtime::traits::BadOrigin; + +#[test] +fn basic_setup_works() { + new_test_ext().execute_with(|| { + assert_eq!(CollatorSelection::desired_candidates(), 2); + assert_eq!(CollatorSelection::candidacy_bond(), 10); + + assert!(CollatorSelection::candidates().is_empty()); + assert_eq!(CollatorSelection::invulnerables(), vec![1, 2]); + }); +} + +#[test] +fn it_should_set_invulnerables() { + new_test_ext().execute_with(|| { + let new_set = vec![1, 2, 3, 4]; + assert_ok!(CollatorSelection::set_invulnerables( + RuntimeOrigin::signed(RootAccount::get()), + new_set.clone() + )); + assert_eq!(CollatorSelection::invulnerables(), new_set); + + // cannot set with non-root. + assert_noop!( + CollatorSelection::set_invulnerables(RuntimeOrigin::signed(1), new_set.clone()), + BadOrigin + ); + + // cannot set invulnerables without associated validator keys + let invulnerables = vec![7]; + assert_noop!( + CollatorSelection::set_invulnerables( + RuntimeOrigin::signed(RootAccount::get()), + invulnerables.clone() + ), + Error::::ValidatorNotRegistered + ); + }); +} + +#[test] +fn set_desired_candidates_works() { + new_test_ext().execute_with(|| { + // given + assert_eq!(CollatorSelection::desired_candidates(), 2); + + // can set + assert_ok!(CollatorSelection::set_desired_candidates( + RuntimeOrigin::signed(RootAccount::get()), + 7 + )); + assert_eq!(CollatorSelection::desired_candidates(), 7); + + // rejects bad origin + assert_noop!( + CollatorSelection::set_desired_candidates(RuntimeOrigin::signed(1), 8), + BadOrigin + ); + }); +} + +#[test] +fn set_candidacy_bond() { + new_test_ext().execute_with(|| { + // given + assert_eq!(CollatorSelection::candidacy_bond(), 10); + + // can set + assert_ok!(CollatorSelection::set_candidacy_bond( + RuntimeOrigin::signed(RootAccount::get()), + 7 + )); + assert_eq!(CollatorSelection::candidacy_bond(), 7); + + // rejects bad origin. + assert_noop!( + CollatorSelection::set_candidacy_bond(RuntimeOrigin::signed(1), 8), + BadOrigin + ); + }); +} + +#[test] +fn cannot_register_candidate_if_too_many() { + new_test_ext().execute_with(|| { + // reset desired candidates: + >::put(0); + + // can't accept anyone anymore. + assert_noop!( + CollatorSelection::register_as_candidate(RuntimeOrigin::signed(3)), + Error::::TooManyCandidates, + ); + + // reset desired candidates: + >::put(1); + assert_ok!(CollatorSelection::register_as_candidate( + RuntimeOrigin::signed(4) + )); + + // but no more + assert_noop!( + CollatorSelection::register_as_candidate(RuntimeOrigin::signed(5)), + Error::::TooManyCandidates, + ); + }) +} + +#[test] +fn cannot_unregister_candidate_if_too_few() { + new_test_ext().execute_with(|| { + // reset desired candidates: + >::put(1); + assert_ok!(CollatorSelection::register_as_candidate( + RuntimeOrigin::signed(4) + )); + + // can not remove too few + assert_noop!( + CollatorSelection::leave_intent(RuntimeOrigin::signed(4)), + Error::::TooFewCandidates, + ); + }) +} + +#[test] +fn cannot_register_as_candidate_if_invulnerable() { + new_test_ext().execute_with(|| { + assert_eq!(CollatorSelection::invulnerables(), vec![1, 2]); + + // can't 1 because it is invulnerable. + assert_noop!( + CollatorSelection::register_as_candidate(RuntimeOrigin::signed(1)), + Error::::AlreadyInvulnerable, + ); + }) +} + +#[test] +fn cannot_register_as_candidate_if_keys_not_registered() { + new_test_ext().execute_with(|| { + // can't 7 because keys not registered. + assert_noop!( + CollatorSelection::register_as_candidate(RuntimeOrigin::signed(7)), + Error::::ValidatorNotRegistered + ); + }) +} + +#[test] +fn cannot_register_dupe_candidate() { + new_test_ext().execute_with(|| { + // can add 3 as candidate + assert_ok!(CollatorSelection::register_as_candidate( + RuntimeOrigin::signed(3) + )); + let addition = CandidateInfo { + who: 3, + deposit: 10, + }; + assert_eq!(CollatorSelection::candidates(), vec![addition]); + assert_eq!(CollatorSelection::last_authored_block(3), 10); + assert_eq!(Balances::free_balance(3), 90); + + // but no more + assert_noop!( + CollatorSelection::register_as_candidate(RuntimeOrigin::signed(3)), + Error::::AlreadyCandidate, + ); + }) +} + +#[test] +fn cannot_register_as_candidate_if_poor() { + new_test_ext().execute_with(|| { + assert_eq!(Balances::free_balance(&3), 100); + assert_eq!(Balances::free_balance(&33), 0); + + // works + assert_ok!(CollatorSelection::register_as_candidate( + RuntimeOrigin::signed(3) + )); + + // poor + assert_noop!( + CollatorSelection::register_as_candidate(RuntimeOrigin::signed(33)), + BalancesError::::InsufficientBalance, + ); + }); +} + +#[test] +fn register_as_candidate_works() { + new_test_ext().execute_with(|| { + // given + assert_eq!(CollatorSelection::desired_candidates(), 2); + assert_eq!(CollatorSelection::candidacy_bond(), 10); + assert_eq!(CollatorSelection::candidates(), Vec::new()); + assert_eq!(CollatorSelection::invulnerables(), vec![1, 2]); + + // take two endowed, non-invulnerables accounts. + assert_eq!(Balances::free_balance(&3), 100); + assert_eq!(Balances::free_balance(&4), 100); + + assert_ok!(CollatorSelection::register_as_candidate( + RuntimeOrigin::signed(3) + )); + assert_ok!(CollatorSelection::register_as_candidate( + RuntimeOrigin::signed(4) + )); + + assert_eq!(Balances::free_balance(&3), 90); + assert_eq!(Balances::free_balance(&4), 90); + + assert_eq!(CollatorSelection::candidates().len(), 2); + }); +} + +#[test] +fn leave_intent() { + new_test_ext().execute_with(|| { + // register a candidate. + assert_ok!(CollatorSelection::register_as_candidate( + RuntimeOrigin::signed(3) + )); + assert_eq!(Balances::free_balance(3), 90); + + // register too so can leave above min candidates + assert_ok!(CollatorSelection::register_as_candidate( + RuntimeOrigin::signed(5) + )); + assert_eq!(Balances::free_balance(5), 90); + + // cannot leave if not candidate. + assert_noop!( + CollatorSelection::leave_intent(RuntimeOrigin::signed(4)), + Error::::NotCandidate + ); + + // bond is returned + assert_ok!(CollatorSelection::leave_intent(RuntimeOrigin::signed(3))); + assert_eq!(Balances::free_balance(3), 100); + assert_eq!(CollatorSelection::last_authored_block(3), 0); + }); +} + +#[test] +fn authorship_event_handler() { + new_test_ext().execute_with(|| { + // put 100 in the pot + 5 for ED + Balances::make_free_balance_be(&CollatorSelection::account_id(), 105); + + // 4 is the default author. + assert_eq!(Balances::free_balance(4), 100); + assert_ok!(CollatorSelection::register_as_candidate( + RuntimeOrigin::signed(4) + )); + // triggers `note_author` + Authorship::on_initialize(1); + + let collator = CandidateInfo { + who: 4, + deposit: 10, + }; + + assert_eq!(CollatorSelection::candidates(), vec![collator]); + assert_eq!(CollatorSelection::last_authored_block(4), 0); + + // half of the pot goes to the collator who's the author (4 in tests). + assert_eq!(Balances::free_balance(4), 140); + // half + ED stays. + assert_eq!(Balances::free_balance(CollatorSelection::account_id()), 55); + }); +} + +#[test] +fn fees_edgecases() { + new_test_ext().execute_with(|| { + // Nothing panics, no reward when no ED in balance + Authorship::on_initialize(1); + // put some money into the pot at ED + Balances::make_free_balance_be(&CollatorSelection::account_id(), 5); + // 4 is the default author. + assert_eq!(Balances::free_balance(4), 100); + assert_ok!(CollatorSelection::register_as_candidate( + RuntimeOrigin::signed(4) + )); + // triggers `note_author` + Authorship::on_initialize(1); + + let collator = CandidateInfo { + who: 4, + deposit: 10, + }; + + assert_eq!(CollatorSelection::candidates(), vec![collator]); + assert_eq!(CollatorSelection::last_authored_block(4), 0); + // Nothing received + assert_eq!(Balances::free_balance(4), 90); + // all fee stays + assert_eq!(Balances::free_balance(CollatorSelection::account_id()), 5); + }); +} + +#[test] +fn session_management_works() { + new_test_ext().execute_with(|| { + initialize_to_block(1); + + assert_eq!(SessionChangeBlock::get(), 0); + assert_eq!(SessionHandlerCollators::get(), vec![1, 2]); + + initialize_to_block(4); + + assert_eq!(SessionChangeBlock::get(), 0); + assert_eq!(SessionHandlerCollators::get(), vec![1, 2]); + + // add a new collator + assert_ok!(CollatorSelection::register_as_candidate( + RuntimeOrigin::signed(3) + )); + + // session won't see this. + assert_eq!(SessionHandlerCollators::get(), vec![1, 2]); + // but we have a new candidate. + assert_eq!(CollatorSelection::candidates().len(), 1); + + initialize_to_block(10); + assert_eq!(SessionChangeBlock::get(), 10); + // pallet-session has 1 session delay; current validators are the same. + assert_eq!(Session::validators(), vec![1, 2]); + // queued ones are changed, and now we have 3. + assert_eq!(Session::queued_keys().len(), 3); + // session handlers (aura, et. al.) cannot see this yet. + assert_eq!(SessionHandlerCollators::get(), vec![1, 2]); + + initialize_to_block(20); + assert_eq!(SessionChangeBlock::get(), 20); + // changed are now reflected to session handlers. + assert_eq!(SessionHandlerCollators::get(), vec![1, 2, 3]); + }); +} + +#[test] +fn kick_and_slash_mechanism() { + new_test_ext().execute_with(|| { + // Define slash destination account + >::put(5); + // add a new collator + assert_ok!(CollatorSelection::register_as_candidate( + RuntimeOrigin::signed(3) + )); + assert_ok!(CollatorSelection::register_as_candidate( + RuntimeOrigin::signed(4) + )); + initialize_to_block(10); + assert_eq!(CollatorSelection::candidates().len(), 2); + initialize_to_block(20); + assert_eq!(SessionChangeBlock::get(), 20); + // 4 authored this block, gets to stay 3 was kicked + assert_eq!(CollatorSelection::candidates().len(), 1); + // 3 will be kicked after 1 session delay + assert_eq!(SessionHandlerCollators::get(), vec![1, 2, 3, 4]); + let collator = CandidateInfo { + who: 4, + deposit: 10, + }; + assert_eq!(CollatorSelection::candidates(), vec![collator]); + assert_eq!(CollatorSelection::last_authored_block(4), 20); + initialize_to_block(30); + // 3 gets kicked after 1 session delay + assert_eq!(SessionHandlerCollators::get(), vec![1, 2, 4]); + // kicked collator gets funds back except slashed 10% (of 10 bond) + assert_eq!(Balances::free_balance(3), 99); + assert_eq!(Balances::free_balance(5), 101); + }); +} + +#[test] +fn should_not_kick_mechanism_too_few() { + new_test_ext().execute_with(|| { + // add a new collator + assert_ok!(CollatorSelection::register_as_candidate( + RuntimeOrigin::signed(3) + )); + assert_ok!(CollatorSelection::register_as_candidate( + RuntimeOrigin::signed(5) + )); + initialize_to_block(10); + assert_eq!(CollatorSelection::candidates().len(), 2); + initialize_to_block(20); + assert_eq!(SessionChangeBlock::get(), 20); + // 4 authored this block, 5 gets to stay too few 3 was kicked + assert_eq!(CollatorSelection::candidates().len(), 1); + // 3 will be kicked after 1 session delay + assert_eq!(SessionHandlerCollators::get(), vec![1, 2, 3, 5]); + let collator = CandidateInfo { + who: 5, + deposit: 10, + }; + assert_eq!(CollatorSelection::candidates(), vec![collator]); + assert_eq!(CollatorSelection::last_authored_block(4), 20); + initialize_to_block(30); + // 3 gets kicked after 1 session delay + assert_eq!(SessionHandlerCollators::get(), vec![1, 2, 5]); + // kicked collator gets funds back (but slashed) + assert_eq!(Balances::free_balance(3), 99); + }); +} + +#[test] +#[should_panic = "duplicate invulnerables in genesis."] +fn cannot_set_genesis_value_twice() { + sp_tracing::try_init_simple(); + let mut t = frame_system::GenesisConfig::default() + .build_storage::() + .unwrap(); + let invulnerables = vec![1, 1]; + + let collator_selection = collator_selection::GenesisConfig:: { + desired_candidates: 2, + candidacy_bond: 10, + invulnerables, + }; + // collator selection must be initialized before session. + collator_selection.assimilate_storage(&mut t).unwrap(); +} diff --git a/pallets/collator-selection/src/weights.rs b/pallets/collator-selection/src/weights.rs new file mode 100644 index 0000000000..ad80e6b2f3 --- /dev/null +++ b/pallets/collator-selection/src/weights.rs @@ -0,0 +1,274 @@ +// This file is part of Astar. + +// Copyright (C) 2019-2023 Stake Technologies Pte.Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + + +//! Autogenerated weights for pallet_collator_selection +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-04-04, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! HOSTNAME: `devserver-01`, CPU: `Intel(R) Xeon(R) E-2236 CPU @ 3.40GHz` +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("shibuya-dev"), DB CACHE: 1024 + +// Executed Command: +// ./target/release/astar-collator +// benchmark +// pallet +// --chain=shibuya-dev +// --steps=50 +// --repeat=20 +// --pallet=pallet_collator_selection +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --heap-pages=4096 +// --output=./benchmark-results/collator_selection_weights.rs +// --template=./scripts/templates/weight-template.hbs + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] + +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use sp_std::marker::PhantomData; + +/// Weight functions needed for pallet_collator_selection. +pub trait WeightInfo { + fn set_invulnerables(b: u32, ) -> Weight; + fn set_desired_candidates() -> Weight; + fn set_candidacy_bond() -> Weight; + fn register_as_candidate(c: u32, ) -> Weight; + fn leave_intent(c: u32, ) -> Weight; + fn note_author() -> Weight; + fn new_session(r: u32, c: u32, ) -> Weight; +} + +/// Weights for pallet_collator_selection using the Substrate node and recommended hardware. +pub struct SubstrateWeight(PhantomData); +impl WeightInfo for SubstrateWeight { + // Storage: Session NextKeys (r:48 w:0) + // Proof Skipped: Session NextKeys (max_values: None, max_size: None, mode: Measured) + // Storage: CollatorSelection Invulnerables (r:0 w:1) + // Proof Skipped: CollatorSelection Invulnerables (max_values: Some(1), max_size: None, mode: Measured) + /// The range of component `b` is `[1, 48]`. + fn set_invulnerables(b: u32, ) -> Weight { + // Minimum execution time: 14_956 nanoseconds. + Weight::from_ref_time(15_730_242) + .saturating_add(Weight::from_proof_size(408)) + // Standard Error: 4_959 + .saturating_add(Weight::from_ref_time(2_577_606).saturating_mul(b.into())) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(b.into()))) + .saturating_add(T::DbWeight::get().writes(1_u64)) + .saturating_add(Weight::from_proof_size(2633).saturating_mul(b.into())) + } + // Storage: CollatorSelection DesiredCandidates (r:0 w:1) + // Proof Skipped: CollatorSelection DesiredCandidates (max_values: Some(1), max_size: None, mode: Measured) + fn set_desired_candidates() -> Weight { + // Minimum execution time: 7_042 nanoseconds. + Weight::from_ref_time(7_159_000) + .saturating_add(Weight::from_proof_size(0)) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } + // Storage: CollatorSelection CandidacyBond (r:0 w:1) + // Proof Skipped: CollatorSelection CandidacyBond (max_values: Some(1), max_size: None, mode: Measured) + fn set_candidacy_bond() -> Weight { + // Minimum execution time: 7_245 nanoseconds. + Weight::from_ref_time(7_395_000) + .saturating_add(Weight::from_proof_size(0)) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } + // Storage: CollatorSelection Candidates (r:1 w:1) + // Proof Skipped: CollatorSelection Candidates (max_values: Some(1), max_size: None, mode: Measured) + // Storage: CollatorSelection DesiredCandidates (r:1 w:0) + // Proof Skipped: CollatorSelection DesiredCandidates (max_values: Some(1), max_size: None, mode: Measured) + // Storage: CollatorSelection Invulnerables (r:1 w:0) + // Proof Skipped: CollatorSelection Invulnerables (max_values: Some(1), max_size: None, mode: Measured) + // Storage: Session NextKeys (r:1 w:0) + // Proof Skipped: Session NextKeys (max_values: None, max_size: None, mode: Measured) + // Storage: CollatorSelection CandidacyBond (r:1 w:0) + // Proof Skipped: CollatorSelection CandidacyBond (max_values: Some(1), max_size: None, mode: Measured) + // Storage: CollatorSelection LastAuthoredBlock (r:0 w:1) + // Proof Skipped: CollatorSelection LastAuthoredBlock (max_values: None, max_size: None, mode: Measured) + /// The range of component `c` is `[1, 148]`. + fn register_as_candidate(c: u32, ) -> Weight { + // Minimum execution time: 39_356 nanoseconds. + Weight::from_ref_time(42_616_377) + .saturating_add(Weight::from_proof_size(9891)) + // Standard Error: 670 + .saturating_add(Weight::from_ref_time(39_880).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(5_u64)) + .saturating_add(T::DbWeight::get().writes(2_u64)) + .saturating_add(Weight::from_proof_size(306).saturating_mul(c.into())) + } + // Storage: CollatorSelection Candidates (r:1 w:1) + // Proof Skipped: CollatorSelection Candidates (max_values: Some(1), max_size: None, mode: Measured) + // Storage: CollatorSelection LastAuthoredBlock (r:0 w:1) + // Proof Skipped: CollatorSelection LastAuthoredBlock (max_values: None, max_size: None, mode: Measured) + /// The range of component `c` is `[6, 148]`. + fn leave_intent(c: u32, ) -> Weight { + // Minimum execution time: 28_341 nanoseconds. + Weight::from_ref_time(30_119_984) + .saturating_add(Weight::from_proof_size(1369)) + // Standard Error: 1_581 + .saturating_add(Weight::from_ref_time(42_495).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().writes(2_u64)) + .saturating_add(Weight::from_proof_size(98).saturating_mul(c.into())) + } + // Storage: System Account (r:2 w:2) + // Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + // Storage: CollatorSelection LastAuthoredBlock (r:0 w:1) + // Proof Skipped: CollatorSelection LastAuthoredBlock (max_values: None, max_size: None, mode: Measured) + fn note_author() -> Weight { + // Minimum execution time: 31_536 nanoseconds. + Weight::from_ref_time(31_986_000) + .saturating_add(Weight::from_proof_size(5497)) + .saturating_add(T::DbWeight::get().reads(2_u64)) + .saturating_add(T::DbWeight::get().writes(3_u64)) + } + // Storage: CollatorSelection Candidates (r:1 w:0) + // Proof Skipped: CollatorSelection Candidates (max_values: Some(1), max_size: None, mode: Measured) + // Storage: CollatorSelection LastAuthoredBlock (r:148 w:0) + // Proof Skipped: CollatorSelection LastAuthoredBlock (max_values: None, max_size: None, mode: Measured) + // Storage: CollatorSelection Invulnerables (r:1 w:0) + // Proof Skipped: CollatorSelection Invulnerables (max_values: Some(1), max_size: None, mode: Measured) + // Storage: System Account (r:143 w:143) + // Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + // Storage: CollatorSelection SlashDestination (r:1 w:0) + // Proof Skipped: CollatorSelection SlashDestination (max_values: Some(1), max_size: None, mode: Measured) + /// The range of component `r` is `[1, 148]`. + /// The range of component `c` is `[1, 148]`. + fn new_session(r: u32, c: u32, ) -> Weight { + // Minimum execution time: 16_816 nanoseconds. + Weight::from_ref_time(17_132_000) + .saturating_add(Weight::from_proof_size(4714)) + // Standard Error: 475_405 + .saturating_add(Weight::from_ref_time(15_758_567).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(3_u64)) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(c.into()))) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(c.into()))) + .saturating_add(Weight::from_proof_size(3131).saturating_mul(c.into())) + .saturating_add(Weight::from_proof_size(2751).saturating_mul(r.into())) + } +} + +// For backwards compatibility and tests +impl WeightInfo for () { + // Storage: Session NextKeys (r:48 w:0) + // Proof Skipped: Session NextKeys (max_values: None, max_size: None, mode: Measured) + // Storage: CollatorSelection Invulnerables (r:0 w:1) + // Proof Skipped: CollatorSelection Invulnerables (max_values: Some(1), max_size: None, mode: Measured) + /// The range of component `b` is `[1, 48]`. + fn set_invulnerables(b: u32, ) -> Weight { + // Minimum execution time: 14_956 nanoseconds. + Weight::from_ref_time(15_730_242) + .saturating_add(Weight::from_proof_size(408)) + // Standard Error: 4_959 + .saturating_add(Weight::from_ref_time(2_577_606).saturating_mul(b.into())) + .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(b.into()))) + .saturating_add(RocksDbWeight::get().writes(1_u64)) + .saturating_add(Weight::from_proof_size(2633).saturating_mul(b.into())) + } + // Storage: CollatorSelection DesiredCandidates (r:0 w:1) + // Proof Skipped: CollatorSelection DesiredCandidates (max_values: Some(1), max_size: None, mode: Measured) + fn set_desired_candidates() -> Weight { + // Minimum execution time: 7_042 nanoseconds. + Weight::from_ref_time(7_159_000) + .saturating_add(Weight::from_proof_size(0)) + .saturating_add(RocksDbWeight::get().writes(1_u64)) + } + // Storage: CollatorSelection CandidacyBond (r:0 w:1) + // Proof Skipped: CollatorSelection CandidacyBond (max_values: Some(1), max_size: None, mode: Measured) + fn set_candidacy_bond() -> Weight { + // Minimum execution time: 7_245 nanoseconds. + Weight::from_ref_time(7_395_000) + .saturating_add(Weight::from_proof_size(0)) + .saturating_add(RocksDbWeight::get().writes(1_u64)) + } + // Storage: CollatorSelection Candidates (r:1 w:1) + // Proof Skipped: CollatorSelection Candidates (max_values: Some(1), max_size: None, mode: Measured) + // Storage: CollatorSelection DesiredCandidates (r:1 w:0) + // Proof Skipped: CollatorSelection DesiredCandidates (max_values: Some(1), max_size: None, mode: Measured) + // Storage: CollatorSelection Invulnerables (r:1 w:0) + // Proof Skipped: CollatorSelection Invulnerables (max_values: Some(1), max_size: None, mode: Measured) + // Storage: Session NextKeys (r:1 w:0) + // Proof Skipped: Session NextKeys (max_values: None, max_size: None, mode: Measured) + // Storage: CollatorSelection CandidacyBond (r:1 w:0) + // Proof Skipped: CollatorSelection CandidacyBond (max_values: Some(1), max_size: None, mode: Measured) + // Storage: CollatorSelection LastAuthoredBlock (r:0 w:1) + // Proof Skipped: CollatorSelection LastAuthoredBlock (max_values: None, max_size: None, mode: Measured) + /// The range of component `c` is `[1, 148]`. + fn register_as_candidate(c: u32, ) -> Weight { + // Minimum execution time: 39_356 nanoseconds. + Weight::from_ref_time(42_616_377) + .saturating_add(Weight::from_proof_size(9891)) + // Standard Error: 670 + .saturating_add(Weight::from_ref_time(39_880).saturating_mul(c.into())) + .saturating_add(RocksDbWeight::get().reads(5_u64)) + .saturating_add(RocksDbWeight::get().writes(2_u64)) + .saturating_add(Weight::from_proof_size(306).saturating_mul(c.into())) + } + // Storage: CollatorSelection Candidates (r:1 w:1) + // Proof Skipped: CollatorSelection Candidates (max_values: Some(1), max_size: None, mode: Measured) + // Storage: CollatorSelection LastAuthoredBlock (r:0 w:1) + // Proof Skipped: CollatorSelection LastAuthoredBlock (max_values: None, max_size: None, mode: Measured) + /// The range of component `c` is `[6, 148]`. + fn leave_intent(c: u32, ) -> Weight { + // Minimum execution time: 28_341 nanoseconds. + Weight::from_ref_time(30_119_984) + .saturating_add(Weight::from_proof_size(1369)) + // Standard Error: 1_581 + .saturating_add(Weight::from_ref_time(42_495).saturating_mul(c.into())) + .saturating_add(RocksDbWeight::get().reads(1_u64)) + .saturating_add(RocksDbWeight::get().writes(2_u64)) + .saturating_add(Weight::from_proof_size(98).saturating_mul(c.into())) + } + // Storage: System Account (r:2 w:2) + // Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + // Storage: CollatorSelection LastAuthoredBlock (r:0 w:1) + // Proof Skipped: CollatorSelection LastAuthoredBlock (max_values: None, max_size: None, mode: Measured) + fn note_author() -> Weight { + // Minimum execution time: 31_536 nanoseconds. + Weight::from_ref_time(31_986_000) + .saturating_add(Weight::from_proof_size(5497)) + .saturating_add(RocksDbWeight::get().reads(2_u64)) + .saturating_add(RocksDbWeight::get().writes(3_u64)) + } + // Storage: CollatorSelection Candidates (r:1 w:0) + // Proof Skipped: CollatorSelection Candidates (max_values: Some(1), max_size: None, mode: Measured) + // Storage: CollatorSelection LastAuthoredBlock (r:148 w:0) + // Proof Skipped: CollatorSelection LastAuthoredBlock (max_values: None, max_size: None, mode: Measured) + // Storage: CollatorSelection Invulnerables (r:1 w:0) + // Proof Skipped: CollatorSelection Invulnerables (max_values: Some(1), max_size: None, mode: Measured) + // Storage: System Account (r:143 w:143) + // Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + // Storage: CollatorSelection SlashDestination (r:1 w:0) + // Proof Skipped: CollatorSelection SlashDestination (max_values: Some(1), max_size: None, mode: Measured) + /// The range of component `r` is `[1, 148]`. + /// The range of component `c` is `[1, 148]`. + fn new_session(r: u32, c: u32, ) -> Weight { + // Minimum execution time: 16_816 nanoseconds. + Weight::from_ref_time(17_132_000) + .saturating_add(Weight::from_proof_size(4714)) + // Standard Error: 475_405 + .saturating_add(Weight::from_ref_time(15_758_567).saturating_mul(c.into())) + .saturating_add(RocksDbWeight::get().reads(3_u64)) + .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(c.into()))) + .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(c.into()))) + .saturating_add(Weight::from_proof_size(3131).saturating_mul(c.into())) + .saturating_add(Weight::from_proof_size(2751).saturating_mul(r.into())) + } +} \ No newline at end of file diff --git a/pallets/contracts-migration/Cargo.toml b/pallets/contracts-migration/Cargo.toml new file mode 100644 index 0000000000..e0959269df --- /dev/null +++ b/pallets/contracts-migration/Cargo.toml @@ -0,0 +1,31 @@ +[package] +name = "pallet-contracts-migration" +version = "1.0.0" +license = "Apache-2.0" +description = "FRAME pallet for managing multi-block pallet contracts storage migration" +authors.workspace = true +edition.workspace = true +homepage.workspace = true +repository.workspace = true + +[dependencies] +frame-support = { workspace = true } +frame-system = { workspace = true } +pallet-contracts = { workspace = true } +parity-scale-codec = { workspace = true } +scale-info = { workspace = true } +sp-arithmetic = { workspace = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } + +[features] +default = ["std"] +std = [ + "parity-scale-codec/std", + "scale-info/std", + "sp-std/std", + "frame-support/std", + "frame-system/std", + "pallet-contracts/std", +] +try-runtime = ["frame-support/try-runtime"] diff --git a/pallets/contracts-migration/src/lib.rs b/pallets/contracts-migration/src/lib.rs new file mode 100644 index 0000000000..5596b9a819 --- /dev/null +++ b/pallets/contracts-migration/src/lib.rs @@ -0,0 +1,318 @@ +// This file is part of Astar. + +// Copyright (C) 2019-2023 Stake Technologies Pte.Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later + +// Astar is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Astar is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Astar. If not, see . + +#![cfg_attr(not(feature = "std"), no_std)] + +/// Purpose of this pallet is to provide multi-stage migration features for pallet-contracts v9 migration. +/// Once it's finished for both `Shibuya` and `Shiden`, it should be deleted. +pub use pallet::*; + +use frame_support::{ + log, + pallet_prelude::*, + storage::{generator::StorageMap, unhashed}, + storage_alias, + traits::Get, + WeakBoundedVec, +}; + +use frame_system::pallet_prelude::*; +use pallet_contracts::Determinism; +use parity_scale_codec::{Decode, Encode, FullCodec}; +use sp_runtime::Saturating; +#[cfg(feature = "try-runtime")] +use sp_std::vec::Vec; + +pub use crate::pallet::CustomMigration; + +const LOG_TARGET: &str = "pallet-contracts-migration"; + +#[frame_support::pallet] +pub mod pallet { + use super::*; + + #[pallet::pallet] + pub struct Pallet(PhantomData); + + #[pallet::config] + pub trait Config: frame_system::Config + pallet_contracts::Config { + /// The overarching event type. + type RuntimeEvent: From> + IsType<::RuntimeEvent>; + } + + #[pallet::storage] + #[pallet::getter(fn migration_state)] + pub type MigrationStateStorage = StorageValue<_, MigrationState, ValueQuery>; + + #[pallet::event] + #[pallet::generate_deposit(pub(crate) fn deposit_event)] + pub enum Event { + /// Number of contracts that were migrated in the migration call + ContractsMigrated(u32), + } + + // The following structs & types were taken from `pallet-contracts` since they aren't exposed outside of the `pallet-contracts` crate. + + #[storage_alias] + type CodeStorage = + StorageMap, Identity, CodeHash, PrefabWasmModule>; + + type CodeHash = ::Hash; + type RelaxedCodeVec = WeakBoundedVec::MaxCodeLen>; + + #[derive(Encode, Decode, RuntimeDebug, MaxEncodedLen)] + pub struct OldPrefabWasmModule { + #[codec(compact)] + pub instruction_weights_version: u32, + #[codec(compact)] + pub initial: u32, + #[codec(compact)] + pub maximum: u32, + pub code: RelaxedCodeVec, + } + + #[derive(Encode, Decode, RuntimeDebug, MaxEncodedLen)] + pub struct PrefabWasmModule { + #[codec(compact)] + pub instruction_weights_version: u32, + #[codec(compact)] + pub initial: u32, + #[codec(compact)] + pub maximum: u32, + pub code: RelaxedCodeVec, + pub determinism: Determinism, + } + + #[pallet::hooks] + impl Hooks> for Pallet { + fn on_initialize(_now: BlockNumberFor) -> Weight { + // This is done in order to account for the read in call filter + >::on_chain_storage_version(); + T::DbWeight::get().reads(1) + } + } + + #[pallet::call] + impl Pallet { + #[pallet::call_index(0)] + #[pallet::weight({ + let max_allowed_call_weight = Pallet::::max_call_weight(); + weight_limit + .unwrap_or(max_allowed_call_weight) + .min(max_allowed_call_weight) + })] + pub fn migrate( + origin: OriginFor, + weight_limit: Option, + ) -> DispatchResultWithPostInfo { + ensure_signed(origin)?; + + let consumed_weight = Self::do_migrate(weight_limit); + + Ok(Some(consumed_weight).into()) + } + } + + impl Pallet { + fn do_migrate(requested_weight_limit: Option) -> Weight { + let version = >::on_chain_storage_version(); + let mut consumed_weight = T::DbWeight::get().reads(1); + + if version != 8 { + log::trace!( + target: LOG_TARGET, + "Version is {:?} so skipping migration procedures.", + version, + ); + Self::deposit_event(Event::::ContractsMigrated(0)); + return consumed_weight; + } + + let max_allowed_call_weight = Self::max_call_weight(); + let weight_limit = requested_weight_limit + .unwrap_or(max_allowed_call_weight) + .min(max_allowed_call_weight); + log::trace!( + target: LOG_TARGET, + "CodeStorage migration weight limit will be {:?}.", + weight_limit, + ); + + let migration_state = MigrationStateStorage::::get().for_iteration(); + + if let MigrationState::CodeStorage(last_processed_key) = migration_state { + // First, get correct iterator. + let key_iter = if let Some(previous_key) = last_processed_key { + CodeStorage::::iter_keys_from(previous_key.into_inner()) + } else { + CodeStorage::::iter_keys() + }; + + let mut counter = 0_u32; + + for key in key_iter { + let key_as_vec = CodeStorage::::storage_map_final_key(key); + let used_weight = + Self::translate(&key_as_vec, |old: OldPrefabWasmModule| { + Some(PrefabWasmModule:: { + instruction_weights_version: old.instruction_weights_version, + initial: old.initial, + maximum: old.maximum, + code: old.code, + determinism: Determinism::Deterministic, + }) + }); + + // Increment total consumed weight. + consumed_weight.saturating_accrue(used_weight); + counter += 1; + + // Check if we've consumed enough weight already. + if consumed_weight.any_gt(weight_limit) { + log::trace!( + target: LOG_TARGET, + "CodeStorage migration stopped after consuming {:?} weight and after processing {:?} DB entries.", + consumed_weight, counter, + ); + MigrationStateStorage::::put(MigrationState::CodeStorage(Some( + WeakBoundedVec::force_from(key_as_vec, None), + ))); + consumed_weight.saturating_accrue(T::DbWeight::get().writes(1)); + + Self::deposit_event(Event::::ContractsMigrated(counter)); + + // we want try-runtime to execute the entire migration + if cfg!(feature = "try-runtime") { + return Self::do_migrate(Some(weight_limit)) + .saturating_add(consumed_weight); + } else { + return consumed_weight; + } + } + } + + log::trace!(target: LOG_TARGET, "CodeStorage migration finished.",); + Self::deposit_event(Event::::ContractsMigrated(counter)); + + // Clean up storage value so we can safely remove the pallet later + MigrationStateStorage::::kill(); + StorageVersion::new(9).put::>(); + consumed_weight.saturating_accrue(T::DbWeight::get().writes(2)); + } + + consumed_weight + } + + /// Max allowed weight that migration should be allowed to consume + fn max_call_weight() -> Weight { + // 50% of block should be fine + T::BlockWeights::get().max_block / 2 + } + + /// Used to translate a single value in the DB + /// Returns conservative weight estimate of the operation + fn translate Option>( + key: &[u8], + mut f: F, + ) -> Weight { + let value = match unhashed::get::(key) { + Some(value) => value, + None => { + return Weight::from_parts( + T::DbWeight::get().reads(1).ref_time(), + OldPrefabWasmModule::::max_encoded_len() as u64, + ); + } + }; + + let mut proof_size = value.using_encoded(|o| o.len() as u64); + + match f(value) { + Some(new) => { + proof_size.saturating_accrue(new.using_encoded(|n| n.len() as u64)); + unhashed::put::(key, &new); + } + // Cannot happen in this file + None => unhashed::kill(key), + } + + Weight::from_parts(T::DbWeight::get().reads_writes(1, 1).ref_time(), proof_size) + } + } + + #[derive(PartialEq, Eq, Clone, Encode, Decode, TypeInfo, RuntimeDebug, MaxEncodedLen)] + pub enum MigrationState { + /// No migration in progress + NotInProgress, + /// In the middle of `CodeStorage` migration. The const for max size is an overestimate but that's fine. + CodeStorage(Option>>), + } + + impl MigrationState { + /// Convert `self` into value applicable for iteration + fn for_iteration(self) -> Self { + if self == Self::NotInProgress { + Self::CodeStorage(None) + } else { + self + } + } + } + + impl Default for MigrationState { + fn default() -> Self { + MigrationState::NotInProgress + } + } + + pub struct CustomMigration(PhantomData); + impl frame_support::traits::OnRuntimeUpgrade for CustomMigration { + fn on_runtime_upgrade() -> Weight { + // Ensures that first step only starts the migration with minimal changes in case of production build. + // In case of `try-runtime`, we want predefined limit. + let limit = if cfg!(feature = "try-runtime") { + None + } else { + Some(Weight::zero()) + }; + Pallet::::do_migrate(limit) + } + + #[cfg(feature = "try-runtime")] + fn post_upgrade(_state: Vec) -> Result<(), &'static str> { + for value in CodeStorage::::iter_values() { + ensure!( + value.determinism == Determinism::Deterministic, + "All pre-existing codes need to be deterministic." + ); + } + + ensure!( + !MigrationStateStorage::::exists(), + "MigrationStateStorage has to be killed at the end of migration." + ); + + ensure!( + >::on_chain_storage_version() == 9, + "pallet-contracts storage version must be 9 at the end of migration" + ); + + Ok(()) + } + } +} diff --git a/pallets/custom-signatures/Cargo.toml b/pallets/custom-signatures/Cargo.toml new file mode 100644 index 0000000000..ff5bef7151 --- /dev/null +++ b/pallets/custom-signatures/Cargo.toml @@ -0,0 +1,42 @@ +[package] +name = "pallet-custom-signatures" +version = "4.6.0" +license = "Apache-2.0" +description = "FRAME pallet for user defined extrinsic signatures" +authors.workspace = true +edition.workspace = true +homepage.workspace = true +repository.workspace = true + +[dependencies] +frame-support = { workspace = true } +frame-system = { workspace = true } +parity-scale-codec = { workspace = true } +scale-info = { workspace = true } +serde = { workspace = true, optional = true } +sp-core = { workspace = true } +sp-io = { workspace = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } + +[dev-dependencies] +assert_matches = { workspace = true } +hex-literal = { workspace = true } +libsecp256k1 = { workspace = true } +pallet-balances = { workspace = true } +sp-keyring = { workspace = true } + +[features] +default = ["std"] +std = [ + "serde", + "parity-scale-codec/std", + "scale-info/std", + "sp-io/std", + "sp-std/std", + "sp-core/std", + "sp-runtime/std", + "frame-support/std", + "frame-system/std", +] +try-runtime = ["frame-support/try-runtime"] diff --git a/pallets/custom-signatures/src/ethereum.rs b/pallets/custom-signatures/src/ethereum.rs new file mode 100644 index 0000000000..0a93d3803b --- /dev/null +++ b/pallets/custom-signatures/src/ethereum.rs @@ -0,0 +1,100 @@ +// This file is part of Astar. + +// Copyright (C) 2019-2023 Stake Technologies Pte.Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Ethereum prefixed signatures compatibility instances. + +use parity_scale_codec::{Decode, Encode}; +use sp_core::ecdsa; +use sp_io::{crypto::secp256k1_ecdsa_recover_compressed, hashing::keccak_256}; +use sp_runtime::traits::{IdentifyAccount, Lazy, Verify}; +use sp_runtime::MultiSignature; +use sp_std::prelude::*; + +/// Ethereum-compatible signature type. +#[derive(Encode, Decode, PartialEq, Eq, Clone, scale_info::TypeInfo)] +pub struct EthereumSignature(pub [u8; 65]); + +impl sp_std::fmt::Debug for EthereumSignature { + fn fmt(&self, f: &mut sp_std::fmt::Formatter<'_>) -> sp_std::fmt::Result { + write!(f, "EthereumSignature({:?})", &self.0[..]) + } +} + +impl From for EthereumSignature { + fn from(signature: ecdsa::Signature) -> Self { + Self(signature.into()) + } +} + +impl sp_std::convert::TryFrom> for EthereumSignature { + type Error = (); + + fn try_from(data: Vec) -> Result { + if data.len() == 65 { + let mut inner = [0u8; 65]; + inner.copy_from_slice(&data[..]); + Ok(EthereumSignature(inner)) + } else { + Err(()) + } + } +} + +/// Constructs the message that Ethereum RPC's `personal_sign` and `eth_sign` would sign. +/// +/// Note: sign message hash to escape of message length estimation. +pub fn signable_message(what: &[u8]) -> Vec { + let hash = keccak_256(what); + let mut v = b"\x19Ethereum Signed Message:\n32".to_vec(); + v.extend_from_slice(&hash[..]); + v +} + +/// Attempts to recover the Ethereum public key from a message signature signed by using +/// the Ethereum RPC's `personal_sign` and `eth_sign`. +impl Verify for EthereumSignature { + type Signer = ::Signer; + + fn verify>( + &self, + mut msg: L, + account: &::AccountId, + ) -> bool { + let msg = keccak_256(&signable_message(msg.get())); + match secp256k1_ecdsa_recover_compressed(&self.0, &msg).ok() { + Some(public) => { + let signer = Self::Signer::from(ecdsa::Public::from_raw(public)); + *account == signer.into_account() + } + None => false, + } + } +} + +#[test] +fn verify_should_works() { + use hex_literal::hex; + use sp_core::{ecdsa, Pair}; + + let msg = "test eth signed message"; + let pair = ecdsa::Pair::from_seed(&hex![ + "ac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80" + ]); + let account = ::Signer::from(pair.public()).into_account(); + let signature = EthereumSignature(hex!["f5d5cc953828e3fb0d81f3176d88fa5c73d3ad3dc4bc7a8061b03a6db2cd73337778df75a1443e8c642f6ceae0db39b90c321ac270ad7836695cae76f703f3031c"]); + assert_eq!(signature.verify(msg.as_ref(), &account), true); +} diff --git a/pallets/custom-signatures/src/lib.rs b/pallets/custom-signatures/src/lib.rs new file mode 100644 index 0000000000..fa9dd1b37d --- /dev/null +++ b/pallets/custom-signatures/src/lib.rs @@ -0,0 +1,220 @@ +// This file is part of Astar. + +// Copyright (C) 2019-2023 Stake Technologies Pte.Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#![cfg_attr(not(feature = "std"), no_std)] + +pub use pallet::*; + +/// Ethereum-compatible signatures (eth_sign API call). +pub mod ethereum; + +#[cfg(test)] +mod tests; + +#[frame_support::pallet] +pub mod pallet { + use frame_support::{ + dispatch::{Dispatchable, GetDispatchInfo}, + pallet_prelude::*, + traits::{Currency, ExistenceRequirement, Get, OnUnbalanced, WithdrawReasons}, + }; + use frame_system::{ensure_none, pallet_prelude::*}; + use sp_runtime::traits::{IdentifyAccount, Verify}; + use sp_std::{convert::TryFrom, prelude::*}; + + #[pallet::pallet] + pub struct Pallet(_); + + /// The balance type of this pallet. + pub type BalanceOf = + <::Currency as Currency<::AccountId>>::Balance; + + #[pallet::config] + pub trait Config: frame_system::Config { + /// The overarching event type. + type RuntimeEvent: From> + IsType<::RuntimeEvent>; + + /// A signable call. + type RuntimeCall: Parameter + + Dispatchable + + GetDispatchInfo; + + /// User defined signature type. + type Signature: Parameter + Verify + TryFrom>; + + /// User defined signer type. + type Signer: IdentifyAccount; + + /// The currency trait. + type Currency: Currency; + + /// The call fee destination. + type OnChargeTransaction: OnUnbalanced< + >::NegativeImbalance, + >; + + /// The call processing fee amount. + #[pallet::constant] + type CallFee: Get>; + + /// The call magic number. + #[pallet::constant] + type CallMagicNumber: Get; + + /// A configuration for base priority of unsigned transactions. + /// + /// This is exposed so that it can be tuned for particular runtime, when + /// multiple pallets send unsigned transactions. + type UnsignedPriority: Get; + } + + #[pallet::error] + pub enum Error { + /// Signature decode fails. + DecodeFailure, + /// Signature and account mismatched. + InvalidSignature, + /// Bad nonce parameter. + BadNonce, + } + + #[pallet::event] + #[pallet::generate_deposit(pub(crate) fn deposit_event)] + pub enum Event { + /// A call just executed. \[result\] + Executed(T::AccountId, DispatchResult), + } + + #[pallet::call] + impl Pallet { + /// # + /// - O(1). + /// - Limited storage reads. + /// - One DB write (event). + /// - Weight of derivative `call` execution + read/write + 10_000. + /// # + #[pallet::call_index(0)] + #[pallet::weight({ + let dispatch_info = call.get_dispatch_info(); + (dispatch_info.weight.saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + .saturating_add(Weight::from_ref_time(10_000)), + dispatch_info.class) + })] + pub fn call( + origin: OriginFor, + call: Box<::RuntimeCall>, + signer: T::AccountId, + signature: Vec, + #[pallet::compact] nonce: T::Index, + ) -> DispatchResultWithPostInfo { + ensure_none(origin)?; + + // Ensure that transaction isn't stale + ensure!( + nonce == frame_system::Pallet::::account_nonce(signer.clone()), + Error::::BadNonce, + ); + + let signature = ::Signature::try_from(signature) + .map_err(|_| Error::::DecodeFailure)?; + + // Ensure that transaction signature is valid + ensure!( + Self::valid_signature(&call, &signer, &signature, &nonce), + Error::::InvalidSignature + ); + + // Increment account nonce + frame_system::Pallet::::inc_account_nonce(signer.clone()); + + // Processing fee + let tx_fee = T::Currency::withdraw( + &signer, + T::CallFee::get(), + WithdrawReasons::FEE, + ExistenceRequirement::AllowDeath, + )?; + T::OnChargeTransaction::on_unbalanced(tx_fee); + + // Dispatch call + let new_origin = frame_system::RawOrigin::Signed(signer.clone()).into(); + let res = call.dispatch(new_origin).map(|_| ()); + Self::deposit_event(Event::Executed(signer, res.map_err(|e| e.error))); + + // Fee already charged + Ok(Pays::No.into()) + } + } + + impl Pallet { + /// Verify custom signature and returns `true` if correct. + pub fn valid_signature( + call: &Box<::RuntimeCall>, + signer: &T::AccountId, + signature: &T::Signature, + nonce: &T::Index, + ) -> bool { + let payload = (T::CallMagicNumber::get(), *nonce, call.clone()); + signature.verify(&payload.encode()[..], signer) + } + } + + pub(crate) const SIGNATURE_DECODE_FAILURE: u8 = 1; + + #[pallet::validate_unsigned] + impl frame_support::unsigned::ValidateUnsigned for Pallet { + type Call = Call; + + fn validate_unsigned(_source: TransactionSource, call: &Self::Call) -> TransactionValidity { + // Call decomposition (we have only one possible value here) + let (call, signer, signature, nonce) = match call { + Call::call { + call, + signer, + signature, + nonce, + } => (call, signer, signature, nonce), + _ => return InvalidTransaction::Call.into(), + }; + + // Check that tx isn't stale + if *nonce != frame_system::Pallet::::account_nonce(signer.clone()) { + return InvalidTransaction::Stale.into(); + } + + // Check signature encoding + if let Ok(signature) = ::Signature::try_from(signature.clone()) { + // Verify signature + if Self::valid_signature(call, signer, &signature, nonce) { + ValidTransaction::with_tag_prefix("CustomSignatures") + .priority(T::UnsignedPriority::get()) + .and_provides((call, signer, nonce)) + .longevity(64_u64) + .propagate(true) + .build() + } else { + // Signature mismatched to given signer + InvalidTransaction::BadProof.into() + } + } else { + // Signature encoding broken + InvalidTransaction::Custom(SIGNATURE_DECODE_FAILURE).into() + } + } + } +} diff --git a/pallets/custom-signatures/src/tests.rs b/pallets/custom-signatures/src/tests.rs new file mode 100644 index 0000000000..56eb68737b --- /dev/null +++ b/pallets/custom-signatures/src/tests.rs @@ -0,0 +1,329 @@ +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate as custom_signatures; +use assert_matches::assert_matches; +use custom_signatures::*; +use frame_support::{ + traits::Contains, + {assert_err, assert_ok, parameter_types}, +}; +use hex_literal::hex; +use parity_scale_codec::Encode; +use sp_core::{ecdsa, Pair}; +use sp_io::{hashing::keccak_256, TestExternalities}; +use sp_keyring::AccountKeyring as Keyring; +use sp_runtime::{ + testing::{Header, H256}, + traits::{BlakeTwo256, IdentifyAccount, IdentityLookup, Verify}, + transaction_validity::TransactionPriority, + MultiSignature, MultiSigner, +}; + +pub const ECDSA_SEED: [u8; 32] = + hex_literal::hex!["7e9c7ad85df5cdc88659f53e06fb2eb9bab3ebc59083a3190eaf2c730332529c"]; + +type Balance = u128; +type BlockNumber = u64; +type Signature = MultiSignature; +type AccountId = <::Signer as IdentifyAccount>::AccountId; +type Block = frame_system::mocking::MockBlock; +type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; + +frame_support::construct_runtime!( + pub enum Runtime where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + Balances: pallet_balances, + System: frame_system, + CustomSignatures: custom_signatures, + } +); + +parameter_types! { + pub const BlockHashCount: u64 = 250; +} + +pub struct NoRemarkFilter; +impl Contains for NoRemarkFilter { + fn contains(call: &RuntimeCall) -> bool { + match call { + RuntimeCall::System(method) => match method { + frame_system::Call::remark { .. } => false, + _ => true, + }, + _ => true, + } + } +} + +impl frame_system::Config for Runtime { + type RuntimeOrigin = RuntimeOrigin; + type BaseCallFilter = NoRemarkFilter; + type Index = u32; + type BlockNumber = BlockNumber; + type RuntimeCall = RuntimeCall; + type Hash = H256; + type Hashing = BlakeTwo256; + type AccountId = AccountId; + type Lookup = IdentityLookup; + type Header = Header; + type RuntimeEvent = RuntimeEvent; + type BlockHashCount = BlockHashCount; + type Version = (); + type PalletInfo = PalletInfo; + type AccountData = pallet_balances::AccountData; + type OnNewAccount = (); + type OnKilledAccount = (); + type DbWeight = (); + type SystemWeightInfo = (); + type BlockWeights = (); + type BlockLength = (); + type SS58Prefix = (); + type OnSetCode = (); + type MaxConsumers = frame_support::traits::ConstU32<16>; +} + +parameter_types! { + pub const ExistentialDeposit: Balance = 1; +} + +impl pallet_balances::Config for Runtime { + type Balance = Balance; + type RuntimeEvent = RuntimeEvent; + type DustRemoval = (); + type ExistentialDeposit = ExistentialDeposit; + type AccountStore = frame_system::Pallet; + type WeightInfo = (); + type MaxLocks = (); + type MaxReserves = (); + type ReserveIdentifier = (); +} + +const MAGIC_NUMBER: u16 = 0xff50; +parameter_types! { + pub const Priority: TransactionPriority = TransactionPriority::MAX; + pub const CallFee: Balance = 42; + pub const CallMagicNumber: u16 = MAGIC_NUMBER; +} + +impl Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type RuntimeCall = RuntimeCall; + type Signature = ethereum::EthereumSignature; + type Signer = ::Signer; + type CallMagicNumber = CallMagicNumber; + type Currency = Balances; + type CallFee = CallFee; + type OnChargeTransaction = (); + type UnsignedPriority = Priority; +} + +fn new_test_ext() -> TestExternalities { + let mut storage = frame_system::GenesisConfig::default() + .build_storage::() + .unwrap(); + + let pair = ecdsa::Pair::from_seed(&ECDSA_SEED); + let account = MultiSigner::from(pair.public()).into_account(); + let _ = pallet_balances::GenesisConfig:: { + balances: vec![(account, 1_000_000_000)], + } + .assimilate_storage(&mut storage); + + let mut ext = TestExternalities::from(storage); + ext.execute_with(|| System::set_block_number(1)); + ext +} + +/// Simple `eth_sign` implementation, should be equal to exported by RPC +fn eth_sign(seed: &[u8; 32], data: &[u8]) -> Vec { + let call_msg = ethereum::signable_message(data); + let ecdsa_msg = libsecp256k1::Message::parse(&keccak_256(&call_msg)); + let secret = libsecp256k1::SecretKey::parse(&seed).expect("valid seed"); + let (signature, recovery_id) = libsecp256k1::sign(&ecdsa_msg, &secret); + let mut out = Vec::new(); + out.extend_from_slice(&signature.serialize()[..]); + // Fix recovery ID: Ethereum uses 27/28 notation + out.push(recovery_id.serialize() + 27); + out +} + +#[test] +fn eth_sign_works() { + let seed = hex!["ac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80"]; + let text = b"Hello Astar"; + let signature = hex!["0cc6d5de6db06727fe43a260e7c9a417be3daab9b0e4e65e276f543e5c2f3de67e9e26d903d5301181e13033f61692db2dca67c1f8992b62476eaf8cb3a597101c"]; + assert_eq!(eth_sign(&seed, &text[..]), signature); +} + +#[test] +fn invalid_signature() { + let bob: ::AccountId = Keyring::Bob.into(); + let alice: ::AccountId = Keyring::Alice.into(); + let call = pallet_balances::Call::::transfer { + dest: alice.clone(), + value: 1_000, + } + .into(); + let signature = Vec::from(&hex!["dd0992d40e5cdf99db76bed162808508ac65acd7ae2fdc8573594f03ed9c939773e813181788fc02c3c68f3fdc592759b35f6354484343e18cb5317d34dab6c61b"][..]); + new_test_ext().execute_with(|| { + assert_err!( + CustomSignatures::call(RuntimeOrigin::none(), Box::new(call), bob, signature, 0), + Error::::InvalidSignature, + ); + }); +} + +#[test] +fn balance_transfer() { + new_test_ext().execute_with(|| { + let pair = ecdsa::Pair::from_seed(&ECDSA_SEED); + let account = MultiSigner::from(pair.public()).into_account(); + + let alice: ::AccountId = Keyring::Alice.into(); + assert_eq!(System::account(alice.clone()).data.free, 0); + + let call: RuntimeCall = pallet_balances::Call::::transfer { + dest: alice.clone(), + value: 1_000, + } + .into(); + let payload = (MAGIC_NUMBER, 0u32, call.clone()); + let signature = eth_sign(&ECDSA_SEED, payload.encode().as_ref()).into(); + + assert_eq!(System::account(account.clone()).nonce, 0); + assert_ok!(CustomSignatures::call( + RuntimeOrigin::none(), + Box::new(call.clone()), + account.clone(), + signature, + 0, + )); + assert_eq!(System::account(alice.clone()).data.free, 1_000); + assert_eq!(System::account(account.clone()).nonce, 1); + assert_eq!(System::account(account.clone()).data.free, 999_998_958); + assert_matches!( + System::events() + .last() + .expect("events expected") + .event + .clone(), + RuntimeEvent::CustomSignatures(Event::Executed(used_account, Ok(..),)) + if used_account == account + ); + + let signature = eth_sign(&ECDSA_SEED, payload.encode().as_ref()).into(); + assert_err!( + CustomSignatures::call( + RuntimeOrigin::none(), + Box::new(call.clone()), + account.clone(), + signature, + 0, + ), + Error::::BadNonce, + ); + + let payload = (MAGIC_NUMBER, 1u32, call.clone()); + let signature = eth_sign(&ECDSA_SEED, payload.encode().as_ref()).into(); + assert_eq!(System::account(account.clone()).nonce, 1); + assert_ok!(CustomSignatures::call( + RuntimeOrigin::none(), + Box::new(call.clone()), + account.clone(), + signature, + 1, + )); + assert_eq!(System::account(alice).data.free, 2_000); + assert_eq!(System::account(account.clone()).nonce, 2); + assert_eq!(System::account(account.clone()).data.free, 999_997_916); + }) +} + +#[test] +fn call_fixtures() { + use sp_core::crypto::Ss58Codec; + + let seed = hex!["ac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80"]; + let pair = ecdsa::Pair::from_seed(&seed); + assert_eq!( + MultiSigner::from(pair.public()) + .into_account() + .to_ss58check(), + "5EGynCAEvv8NLeHx8vDMvb8hTcEcMYUMWCDQEEncNEfNWB2W", + ); + + let dest = + AccountId::from_ss58check("5GVwcV6EzxxYbXBm7H6dtxc9TCgL4oepMXtgqWYEc3VXJoaf").unwrap(); + let call: RuntimeCall = pallet_balances::Call::::transfer { dest, value: 1000 }.into(); + assert_eq!( + call.encode(), + hex!["0000c4305fb88b6ccb43d6552dc11d18e7b0ee3185247adcc6e885eb284adf6c563da10f"], + ); + + let payload = (MAGIC_NUMBER, 0u32, call.clone()); + assert_eq!( + payload.encode(), + hex![ + "50ff000000000000c4305fb88b6ccb43d6552dc11d18e7b0ee3185247adcc6e885eb284adf6c563da10f" + ], + ); + + let signature = hex!["6ecb474240df46ee5cde8f51cf5ccf4c75d15ac3c1772aea6c8189604263c98b16350883438c4eaa447ebcb6889d516f70351fd704bb3521072cd2fccc7c99dc1c"]; + assert_eq!(eth_sign(&seed, payload.encode().as_ref()), signature) +} + +#[test] +fn not_allowed_call_filtered() { + new_test_ext().execute_with(|| { + let pair = ecdsa::Pair::from_seed(&ECDSA_SEED); + let account = MultiSigner::from(pair.public()).into_account(); + + let alice: ::AccountId = Keyring::Alice.into(); + assert_eq!(System::account(alice.clone()).data.free, 0); + + let call: RuntimeCall = frame_system::Call::::remark { + remark: Vec::<_>::new(), + } + .into(); + // sanity check, call should be filtered out + assert!(!::BaseCallFilter::contains(&call)); + + let payload = (MAGIC_NUMBER, 0u32, call.clone()); + let signature = eth_sign(&ECDSA_SEED, payload.encode().as_ref()).into(); + + assert_eq!(System::account(account.clone()).nonce, 0); + assert_ok!(CustomSignatures::call( + RuntimeOrigin::none(), + Box::new(call.clone()), + account.clone(), + signature, + 0, + )); + assert_eq!(System::account(account.clone()).nonce, 1); + + assert_matches!( + System::events() + .last() + .expect("events expected") + .event + .clone(), + RuntimeEvent::CustomSignatures(Event::Executed(used_account, Err(..),)) + if used_account == account + ); + }) +} diff --git a/pallets/dapps-staking/Cargo.toml b/pallets/dapps-staking/Cargo.toml new file mode 100644 index 0000000000..3290455e15 --- /dev/null +++ b/pallets/dapps-staking/Cargo.toml @@ -0,0 +1,58 @@ +[package] +name = "pallet-dapps-staking" +version = "3.9.0" +description = "FRAME pallet to staking for dapps" +authors.workspace = true +edition.workspace = true +homepage.workspace = true +repository.workspace = true + +[dependencies] +frame-support = { workspace = true } +frame-system = { workspace = true } +num-traits = { workspace = true } +parity-scale-codec = { workspace = true } + +scale-info = { workspace = true } +serde = { workspace = true, optional = true } +sp-arithmetic = { workspace = true } +sp-core = { workspace = true } +sp-io = { workspace = true } +sp-runtime = { workspace = true } +sp-staking = { workspace = true } +sp-std = { workspace = true } + +frame-benchmarking = { workspace = true, optional = true } + +[dev-dependencies] +pallet-balances = { workspace = true } +pallet-session = { workspace = true } +pallet-timestamp = { workspace = true } + +[features] +default = ["std"] +std = [ + "serde", + "parity-scale-codec/std", + "scale-info/std", + "num-traits/std", + "sp-core/std", + "sp-runtime/std", + "sp-arithmetic/std", + "sp-io/std", + "sp-std/std", + "frame-support/std", + "frame-system/std", + "pallet-balances/std", + "pallet-session/std", + "pallet-timestamp/std", + "sp-staking/std", + "frame-benchmarking?/std", +] +runtime-benchmarks = [ + "frame-benchmarking", + "frame-support/runtime-benchmarks", + "frame-system/runtime-benchmarks", + "sp-runtime/runtime-benchmarks", +] +try-runtime = ["frame-support/try-runtime"] diff --git a/pallets/dapps-staking/README.md b/pallets/dapps-staking/README.md new file mode 100644 index 0000000000..d8ec211a11 --- /dev/null +++ b/pallets/dapps-staking/README.md @@ -0,0 +1,85 @@ +# Pallet dapps-staking RPC API +This document describes the interface for the pallet-dapps-staking. + +Table of Contents: +1. [Terminology](#Terminology) +2. [Referent implementatio](#Referent) +3. [FAQ](#FAQ) + +## Terminology +### Actors in dApps Staking + +- `developer`: a developer or organization who deploys the smart contract +- `staker`: any Astar user who stakes tokens on the developer's smart contract + + +### Abbreviations and Terminology +- `dApp`: decentralized application, is an application that runs on a distributed network. +- `smart contract`: on-chain part of the dApp +- `contract`: short for smart contract +- `EVM`: Ethereum Virtual Machine. Solidity Smart contract runs on it. +- `ink!`: Smart Contract written in Rust, compiled to WASM. +- `era`: Period of time. After it ends, rewards can be claimed. It is defined by the number of produced blocks. Duration of an era for this pallet is configurable. The exact duration depends on block production duration. +- `claim`: Claim ownership of the rewards from the contract's reward pool. +- `bond`: Freeze funds to gain rewards. +- `stake`: In this pallet a staker stakes bonded funds on a smart contract . +- `unstake`: Unfreeze bonded funds and stop gaining rewards. +- `wasm`: Web Assembly. +- `contracts's reward pool`: Sum of unclaimed rewards on the contract. Including developer and staker parts. + +--- + +--- +## Referent API implementation +https://github.com/AstarNetwork/astar-apps + +--- +## FAQ + +### Does it matter which project I stake on? +It matters because this means you're supporting that project. +Project reward is calculated based on how much stakers have staked on that project. +You want to support good projects which bring value to the ecosystem since that will make +the ecosystem more valuable, increasing the value of your tokens as a result. + +Use the power you have and make sure to stake on projects you support and find beneficial. + +### Does my reward depend on the project I stake on? +No, the reward you get only depends on the total amount you have staked, invariant of dapp(s) on which you staked. +This allows you to select the dapp you like and want to support, without having to worry if you'll be earning less rewards than you +would if you staked on another dapp. + +### When do the projects/developers get their rewards? +Rewards will be deposited to beneficiaries once either `claim_staker` or `claim_dapp` is called. +We advise users to use our official portal for claiming rewards since the complexity of the protocol is hidden there. + +### What happens if nobody calls the claim function for a long time? +At the moment, there is no history depth limit and your reward will be waiting for you. +However, this will be changed in the future. + +### When developers register their dApp, which has no contract yet, what kind of address do they need to input? +There has to be a contract. Registration can’t be done without the contract. + +### Can projects/developers change contract address once it is registered for dApps staking? +The contract address can't be changed for the dApps staking. However, if the project needs to deploy new version of the contract, they can still use old (registered) contract address for dApp staking purposes. + +### How do projects/developers (who joins dApps staking) get their stakers' address and the amount staked? +`GeneralStakerInfo` storage item can be checked. +This would require developer to fetch all values from the map and find the ones where second key equals that of the contract they are interested in. +If the last staked value is greater than `Zero`, it means staker (first key) is staking on that contract. + +### What is the maximum numbers of stakers per dapps? +Please check in the source code constant `MaxNumberOfStakersPerContract`. + +### What is the minimum numbers of stakers per dapps? +Please check in the source code constant `MinimumStakingAmount`. + +### When developers register their dApp, can they registar WASM contract? (If not, can they update it in the future?) +The developers can register several dApps. But they need to use separate accounts and separate contract addresses. +The rule is + +```1 developer <=> 1 contract``` + +### Does dApps staking supports Wasm contracts? +Yes. +Once the Wasm contracts are enabled on a parachain, Wasm contract could be used for dApps staking. diff --git a/pallets/dapps-staking/src/benchmarking.rs b/pallets/dapps-staking/src/benchmarking.rs new file mode 100644 index 0000000000..3ad2543ea4 --- /dev/null +++ b/pallets/dapps-staking/src/benchmarking.rs @@ -0,0 +1,320 @@ +// This file is part of Astar. + +// Copyright (C) 2019-2023 Stake Technologies Pte.Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later + +// Astar is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Astar is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Astar. If not, see . + +#![cfg(feature = "runtime-benchmarks")] + +use super::*; +use crate::Pallet as DappsStaking; + +use frame_benchmarking::{account, benchmarks, impl_benchmark_test_suite, whitelisted_caller}; +use frame_support::traits::{Get, OnFinalize, OnInitialize}; +use frame_system::{Pallet as System, RawOrigin}; +use sp_runtime::traits::{Bounded, One, TrailingZeroInput}; + +const SEED: u32 = 9000; +const STAKER_BLOCK_REWARD: u32 = 1234u32; +const DAPP_BLOCK_REWARD: u32 = 9876u32; + +/// Used to prepare Dapps staking for testing. +/// Resets all existing storage ensuring a clean run for the code that follows. +/// +/// Also initializes the first block which should start a new era. +fn initialize() { + // Remove everything from storage. + let _ = Ledger::::clear(u32::MAX, None); + let _ = RegisteredDevelopers::::clear(u32::MAX, None); + let _ = RegisteredDapps::::clear(u32::MAX, None); + let _ = GeneralEraInfo::::clear(u32::MAX, None); + let _ = ContractEraStake::::clear(u32::MAX, None); + let _ = GeneralStakerInfo::::clear(u32::MAX, None); + CurrentEra::::kill(); + BlockRewardAccumulator::::kill(); + + // Initialize the first block. + payout_block_rewards::(); + DappsStaking::::on_initialize(1u32.into()); +} + +/// Generate an unique smart contract using the provided index as a sort-of indetifier +fn smart_contract(index: u8) -> T::SmartContract { + // This is a hacky approach to provide different smart contracts without touching the smart contract trait. + // In case this proves troublesome in the future, recommendation is to just replace it with + // runtime-benchmarks only trait that allows us to construct an arbitrary valid smart contract instance. + let mut encoded_smart_contract = T::SmartContract::default().encode(); + *encoded_smart_contract.last_mut().unwrap() = index; + + Decode::decode(&mut TrailingZeroInput::new(encoded_smart_contract.as_ref())) + .expect("Shouldn't occur as long as EVM is the default type.") +} + +/// Payout block rewards to stakers & dapps +fn payout_block_rewards() { + DappsStaking::::rewards( + T::Currency::issue(STAKER_BLOCK_REWARD.into()), + T::Currency::issue(DAPP_BLOCK_REWARD.into()), + ); +} + +/// Assert that the last event equals the provided one. +fn assert_last_event(generic_event: ::RuntimeEvent) { + frame_system::Pallet::::assert_last_event(generic_event.into()); +} + +/// Advance to the specified era, block by block. +fn advance_to_era(n: EraIndex) { + while DappsStaking::::current_era() < n { + DappsStaking::::on_finalize(System::::block_number()); + System::::set_block_number(System::::block_number() + One::one()); + // This is performed outside of dapps staking but we expect it before on_initialize + payout_block_rewards::(); + DappsStaking::::on_initialize(System::::block_number()); + } +} + +/// Used to register a contract by a developer account. +/// +/// Registered contract is returned. +fn register_contract( + index: u8, +) -> Result<(T::AccountId, T::SmartContract), &'static str> { + let developer: T::AccountId = account("developer", index.into(), SEED); + let smart_contract = smart_contract::(index); + T::Currency::make_free_balance_be(&developer, BalanceOf::::max_value()); + DappsStaking::::register( + RawOrigin::Root.into(), + developer.clone(), + smart_contract.clone(), + )?; + + Ok((developer, smart_contract)) +} + +/// Used to bond_and_stake the given contract with the specified amount of stakers. +/// Method will create new staker accounts using the provided seed. +/// +/// Returns all created staker accounts in a vector. +fn prepare_bond_and_stake( + number_of_stakers: u32, + contract_id: &T::SmartContract, + seed: u32, +) -> Result, &'static str> { + let stake_balance = T::MinimumStakingAmount::get(); // maybe make this an argument? + let mut stakers = Vec::new(); + + for id in 0..number_of_stakers { + let staker_acc: T::AccountId = account("pre_staker", id, seed); + stakers.push(staker_acc.clone()); + T::Currency::make_free_balance_be(&staker_acc, BalanceOf::::max_value()); + + DappsStaking::::bond_and_stake( + RawOrigin::Signed(staker_acc).into(), + contract_id.clone(), + stake_balance, + )?; + } + + Ok(stakers) +} + +benchmarks! { + + register { + initialize::(); + let developer_id = whitelisted_caller(); + let contract_id = T::SmartContract::default(); + T::Currency::make_free_balance_be(&developer_id, BalanceOf::::max_value()); + }: _(RawOrigin::Root, developer_id.clone(), contract_id.clone()) + verify { + assert_last_event::(Event::::NewContract(developer_id, contract_id).into()); + } + + unregister { + initialize::(); + let (developer_id, contract_id) = register_contract::(1)?; + prepare_bond_and_stake::(1, &contract_id, SEED)?; + + }: _(RawOrigin::Root, contract_id.clone()) + verify { + assert_last_event::(Event::::ContractRemoved(developer_id, contract_id).into()); + } + + withdraw_from_unregistered { + initialize::(); + let (developer, contract_id) = register_contract::(1)?; + let stakers = prepare_bond_and_stake::(1, &contract_id, SEED)?; + let staker = stakers[0].clone(); + let stake_amount = BalanceOf::::max_value() / 2u32.into(); + + DappsStaking::::bond_and_stake(RawOrigin::Signed(staker.clone()).into(), contract_id.clone(), stake_amount)?; + DappsStaking::::unregister(RawOrigin::Root.into(), contract_id.clone())?; + }: _(RawOrigin::Signed(staker.clone()), contract_id.clone()) + verify { + let staker_info = DappsStaking::::staker_info(&staker, &contract_id); + assert!(staker_info.latest_staked_value().is_zero()); + } + + bond_and_stake { + initialize::(); + + let (_, contract_id) = register_contract::(1)?; + + let staker = whitelisted_caller(); + let _ = T::Currency::make_free_balance_be(&staker, BalanceOf::::max_value()); + let amount = BalanceOf::::max_value() / 2u32.into(); + + }: _(RawOrigin::Signed(staker.clone()), contract_id.clone(), amount) + verify { + assert_last_event::(Event::::BondAndStake(staker, contract_id, amount).into()); + } + + unbond_and_unstake { + initialize::(); + + let (_, contract_id) = register_contract::(1)?; + + let staker = whitelisted_caller(); + let _ = T::Currency::make_free_balance_be(&staker, BalanceOf::::max_value()); + let amount = BalanceOf::::max_value() / 2u32.into(); + + DappsStaking::::bond_and_stake(RawOrigin::Signed(staker.clone()).into(), contract_id.clone(), amount)?; + + }: _(RawOrigin::Signed(staker.clone()), contract_id.clone(), amount) + verify { + assert_last_event::(Event::::UnbondAndUnstake(staker, contract_id, amount).into()); + } + + withdraw_unbonded { + initialize::(); + + let (_, contract_id) = register_contract::(1)?; + + let staker = whitelisted_caller(); + let _ = T::Currency::make_free_balance_be(&staker, BalanceOf::::max_value()); + let stake_amount = BalanceOf::::max_value() / 2u32.into(); + let unstake_amount = stake_amount / 2u32.into(); + + DappsStaking::::bond_and_stake(RawOrigin::Signed(staker.clone()).into(), contract_id.clone(), stake_amount)?; + DappsStaking::::unbond_and_unstake(RawOrigin::Signed(staker.clone()).into(), contract_id, unstake_amount)?; + + let current_era = DappsStaking::::current_era(); + advance_to_era::(current_era + 1 + T::UnbondingPeriod::get()); + + }: _(RawOrigin::Signed(staker.clone())) + verify { + assert_last_event::(Event::::Withdrawn(staker, unstake_amount).into()); + } + + nomination_transfer { + initialize::(); + + let (_, origin_contract_id) = register_contract::(1)?; + let (_, target_contract_id) = register_contract::(2)?; + + let staker = prepare_bond_and_stake::(1, &origin_contract_id, SEED)?[0].clone(); + + }: _(RawOrigin::Signed(staker.clone()), origin_contract_id.clone(), T::MinimumStakingAmount::get(), target_contract_id.clone()) + verify { + assert_last_event::(Event::::NominationTransfer(staker, origin_contract_id, T::MinimumStakingAmount::get(), target_contract_id).into()); + } + + claim_staker_with_restake { + initialize::(); + let (_, contract_id) = register_contract::(1)?; + + let claim_era = DappsStaking::::current_era(); + let stakers = prepare_bond_and_stake::(1, &contract_id, SEED)?; + let staker = stakers[0].clone(); + + DappsStaking::::set_reward_destination(RawOrigin::Signed(staker.clone()).into(), RewardDestination::StakeBalance)?; + advance_to_era::(claim_era + 1u32); + + }: claim_staker(RawOrigin::Signed(staker.clone()), contract_id.clone()) + verify { + let mut staker_info = DappsStaking::::staker_info(&staker, &contract_id); + let (era, _) = staker_info.claim(); + assert!(era > claim_era); + } + + claim_staker_without_restake { + initialize::(); + let (_, contract_id) = register_contract::(1)?; + + let claim_era = DappsStaking::::current_era(); + let stakers = prepare_bond_and_stake::(1, &contract_id, SEED)?; + let staker = stakers[0].clone(); + + DappsStaking::::set_reward_destination(RawOrigin::Signed(staker.clone()).into(), RewardDestination::FreeBalance)?; + advance_to_era::(claim_era + 1u32); + + }: claim_staker(RawOrigin::Signed(staker.clone()), contract_id.clone()) + verify { + let mut staker_info = DappsStaking::::staker_info(&staker, &contract_id); + let (era, _) = staker_info.claim(); + assert!(era > claim_era); + } + + claim_dapp { + initialize::(); + let (developer, contract_id) = register_contract::(1)?; + + let claim_era = DappsStaking::::current_era(); + prepare_bond_and_stake::(1, &contract_id, SEED)?; + advance_to_era::(claim_era + 1u32); + + }: _(RawOrigin::Signed(developer.clone()), contract_id.clone(), claim_era) + verify { + let staking_info = DappsStaking::::contract_stake_info(&contract_id, claim_era).unwrap(); + assert!(staking_info.contract_reward_claimed); + } + + force_new_era { + }: _(RawOrigin::Root) + + maintenance_mode { + }: _(RawOrigin::Root, true) + + set_reward_destination { + initialize::(); + + let option = RewardDestination::FreeBalance; + let (_, contract_id) = register_contract::(1)?; + + let stakers = prepare_bond_and_stake::(1, &contract_id, SEED)?; + let staker = stakers[0].clone(); + }: _(RawOrigin::Signed(staker.clone()), option) + verify { + assert_last_event::(Event::::RewardDestination(staker, option).into()); + } + +} + +#[cfg(test)] +mod tests { + use crate::mock; + use sp_io::TestExternalities; + + pub fn new_test_ext() -> TestExternalities { + mock::ExternalityBuilder::build() + } +} + +impl_benchmark_test_suite!( + DappsStaking, + crate::benchmarking::tests::new_test_ext(), + crate::mock::TestRuntime, +); diff --git a/pallets/dapps-staking/src/lib.rs b/pallets/dapps-staking/src/lib.rs new file mode 100644 index 0000000000..ec400687f4 --- /dev/null +++ b/pallets/dapps-staking/src/lib.rs @@ -0,0 +1,590 @@ +// This file is part of Astar. + +// Copyright (C) 2019-2023 Stake Technologies Pte.Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later + +// Astar is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Astar is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Astar. If not, see . + +//! # Dapps Staking Pallet +//! +//! - [`Config`] +//! +//! ## Overview +//! +//! Pallet that implements dapps staking protocol. +//! +//! Dapps staking protocol is a completely decentralized & innovative approach to reward developers for their contribution to the Astar/Shiden ecosystem. +//! Stakers can pick a dapp and nominate it for rewards by locking their tokens. Dapps will be rewarded, based on the proportion of locked tokens. +//! Stakers are also rewarded, based on the total amount they've locked (invariant of the dapp they staked on). +//! +//! Rewards are accumulated throughout an **era** and when **era** finishes, both stakers and developers can claim their rewards for that era. +//! This is a continous process. Rewards can be claimed even for eras which are older than the last one (no limit at the moment). +//! +//! Reward claiming isn't automated since the whole process is done **on-chain** and is fully decentralized. +//! Both stakers and developers are responsible for claiming their own rewards. +//! +//! +//! ## Interface +//! +//! ### Dispatchable Function +//! +//! - `register` - used to register a new contract for dapps staking +//! - `unregister` - used to unregister contract from dapps staking, making it ineligible for receiveing future rewards +//! - `withdraw_from_unregistered` - used by stakers to withdraw their stake from an unregistered contract (no unbonding period) +//! - `bond_and_stake` - basic call for nominating a dapp and locking stakers tokens into dapps staking +//! - `unbond_and_unstake` - removes nomination from the contract, starting the unbonding process for the unstaked funds +//! - `withdraw_unbonded` - withdraws all funds that have completed the unbonding period +//! - `nomination_transfer` - transfer nomination from one contract to another contract (avoids unbonding period) +//! - `claim_staker` - claims staker reward for a single era +//! - `claim_dapp` - claims dapp rewards for the specified era +//! - `force_new_era` - forces new era on the start of the next block +//! - `maintenance_mode` - enables or disables pallet maintenance mode +//! - `set_reward_destination` - sets reward destination for the staker rewards +//! - `set_contract_stake_info` - root-only call to set storage value (used for fixing corrupted data) +//! - `burn_stale_reward` - root-only call to burn unclaimed, stale rewards from unregistered contracts +//! +//! User is encouraged to refer to specific function implementations for more comprehensive documentation. +//! +//! ### Other +//! +//! - `on_initialize` - part of `Hooks` trait, it's important to call this per block since it handles reward snapshots and era advancement. +//! - `account_id` - returns pallet's account Id +//! - `ensure_pallet_enabled` - checks whether pallet is in maintenance mode or not and returns appropriate `Result` +//! - `rewards` - used to deposit staker and dapps rewards into dApps staking reward pool +//! - `tvl` - total value locked in dApps staking (might differ from total staked value) +//! +#![cfg_attr(not(feature = "std"), no_std)] + +use frame_support::traits::Currency; +use frame_system::{self as system}; +use parity_scale_codec::{Decode, Encode, HasCompact, MaxEncodedLen}; +use scale_info::TypeInfo; +use sp_runtime::{ + traits::{AtLeast32BitUnsigned, Zero}, + RuntimeDebug, +}; +use sp_std::{ops::Add, prelude::*}; + +pub mod pallet; +pub mod weights; + +#[cfg(any(feature = "runtime-benchmarks"))] +pub mod benchmarking; +#[cfg(test)] +mod mock; +#[cfg(test)] +mod testing_utils; +#[cfg(test)] +mod tests; +#[cfg(test)] +mod tests_lib; + +pub use pallet::pallet::*; +pub use weights::WeightInfo; + +pub type BalanceOf = + <::Currency as Currency<::AccountId>>::Balance; + +/// Counter for the number of eras that have passed. +pub type EraIndex = u32; + +// This represents the max assumed vector length that any storage item should have. +// In particular, this relates to `UnbondingInfo` and `StakerInfo`. +// In structs which are bound in size, `MaxEncodedLen` can just be derived but that's not the case for standard `vec`. +// To fix this 100% correctly, we'd need to do one of the following: +// +// - Use `BoundedVec` instead of `Vec` and do storage migration +// - Introduce a new type `S: Get` into the aforementioned structs and use it to inject max allowed size, +// thus allowing us to correctly calculate max encoded len +// +// The issue with first approach is that it requires storage migration which we want to avoid +// unless it's really necessary. The issue with second approach is that it makes code much more +// difficult to work with since all of it will be ridden with injections of the `S` type. +// +// Since dApps staking has been stable for long time and there are plans to redesign & refactor it, +// doing neither of the above makes sense, timewise. So we use an assumption that vec length +// won't go over the following constant. +const MAX_ASSUMED_VEC_LEN: u32 = 10; + +/// DApp State descriptor +#[derive(Copy, Clone, PartialEq, Eq, Encode, Decode, RuntimeDebug, TypeInfo, MaxEncodedLen)] +enum DAppState { + /// Contract is registered and active. + Registered, + /// Contract has been unregistered and is inactive. + /// Claim for past eras and unbonding is still possible but no additional staking can be done. + Unregistered(EraIndex), +} + +#[derive(Clone, PartialEq, Eq, Encode, Decode, RuntimeDebug, TypeInfo, MaxEncodedLen)] +pub struct DAppInfo { + /// Developer (owner) account + developer: AccountId, + /// Current DApp State + state: DAppState, +} + +impl DAppInfo { + /// Create new `DAppInfo` struct instance with the given developer and state `Registered` + fn new(developer: AccountId) -> Self { + Self { + developer, + state: DAppState::Registered, + } + } + + /// `true` if dApp has been unregistered, `false` otherwise + fn is_unregistered(&self) -> bool { + matches!(self.state, DAppState::Unregistered(_)) + } +} + +/// Mode of era-forcing. +#[derive(Copy, Clone, PartialEq, Eq, Encode, Decode, RuntimeDebug, TypeInfo, MaxEncodedLen)] +#[cfg_attr(feature = "std", derive(serde::Serialize, serde::Deserialize))] +pub enum Forcing { + /// Not forcing anything - just let whatever happen. + NotForcing, + /// Force a new era, then reset to `NotForcing` as soon as it is done. + /// Note that this will force to trigger an election until a new era is triggered, if the + /// election failed, the next session end will trigger a new election again, until success. + ForceNew, +} + +impl Default for Forcing { + fn default() -> Self { + Forcing::NotForcing + } +} + +/// A record of rewards allocated for stakers and dapps +#[derive(PartialEq, Eq, Clone, Default, Encode, Decode, RuntimeDebug, TypeInfo, MaxEncodedLen)] +pub struct RewardInfo { + /// Total amount of rewards for stakers in an era + #[codec(compact)] + pub stakers: Balance, + /// Total amount of rewards for dapps in an era + #[codec(compact)] + pub dapps: Balance, +} + +/// A record for total rewards and total amount staked for an era +#[derive(PartialEq, Eq, Clone, Default, Encode, Decode, RuntimeDebug, TypeInfo, MaxEncodedLen)] +pub struct EraInfo { + /// Total amount of earned rewards for an era + pub rewards: RewardInfo, + /// Total staked amount in an era + #[codec(compact)] + pub staked: Balance, + /// Total locked amount in an era + #[codec(compact)] + pub locked: Balance, +} + +/// Used to split total EraPayout among contracts. +/// Each tuple (contract, era) has this structure. +/// This will be used to reward contracts developer and his stakers. +#[derive(Clone, PartialEq, Encode, Decode, Default, RuntimeDebug, TypeInfo, MaxEncodedLen)] +pub struct ContractStakeInfo { + /// Total staked amount. + #[codec(compact)] + pub total: Balance, + /// Total number of active stakers + #[codec(compact)] + number_of_stakers: u32, + /// Indicates whether rewards were claimed for this era or not + contract_reward_claimed: bool, +} + +/// Storage value representing the current Dapps staking pallet storage version. +/// Used by `on_runtime_upgrade` to determine whether a storage migration is needed or not. +#[derive(Encode, Decode, Clone, Copy, PartialEq, Eq, RuntimeDebug, TypeInfo, MaxEncodedLen)] +pub enum Version { + V1_0_0, + V2_0_0, + V3_0_0, + V4_0_0, +} + +impl Default for Version { + fn default() -> Self { + Version::V4_0_0 + } +} + +/// Used to represent how much was staked in a particular era. +/// E.g. `{staked: 1000, era: 5}` means that in era `5`, staked amount was 1000. +#[derive(Encode, Decode, Clone, Copy, PartialEq, Eq, RuntimeDebug, TypeInfo, MaxEncodedLen)] +pub struct EraStake { + /// Staked amount in era + #[codec(compact)] + staked: Balance, + /// Staked era + #[codec(compact)] + era: EraIndex, +} + +impl EraStake { + /// Create a new instance of `EraStake` with given values + fn new(staked: Balance, era: EraIndex) -> Self { + Self { staked, era } + } +} + +/// Used to provide a compact and bounded storage for information about stakes in unclaimed eras. +/// +/// In order to avoid creating a separate storage entry for each `(staker, contract, era)` triplet, +/// this struct is used to provide a more memory efficient solution. +/// +/// Basic idea is to store `EraStake` structs into a vector from which a complete +/// picture of **unclaimed eras** and stakes can be constructed. +/// +/// # Example +/// For simplicity, the following example will represent `EraStake` using `` notation. +/// Let us assume we have the following vector in `StakerInfo` struct. +/// +/// `[<5, 1000>, <6, 1500>, <8, 2100>, <9, 0>, <11, 500>]` +/// +/// This tells us which eras are unclaimed and how much it was staked in each era. +/// The interpretation is the following: +/// 1. In era **5**, staked amount was **1000** (interpreted from `<5, 1000>`) +/// 2. In era **6**, staker staked additional **500**, increasing total staked amount to **1500** +/// 3. No entry for era **7** exists which means there were no changes from the former entry. +/// This means that in era **7**, staked amount was also **1500** +/// 4. In era **8**, staker staked an additional **600**, increasing total stake to **2100** +/// 5. In era **9**, staker unstaked everything from the contract (interpreted from `<9, 0>`) +/// 6. No changes were made in era **10** so we can interpret this same as the previous entry which means **0** staked amount. +/// 7. In era **11**, staker staked **500** on the contract, making his stake active again after 2 eras of inactivity. +/// +/// **NOTE:** It is important to understand that staker **DID NOT** claim any rewards during this period. +/// +#[derive(Encode, Decode, Clone, Default, PartialEq, Eq, RuntimeDebug, TypeInfo)] +pub struct StakerInfo { + // Size of this list would be limited by a configurable constant + stakes: Vec>, +} + +impl MaxEncodedLen for StakerInfo { + // This is just an assumption, will be calculated properly in the future. See the comment for `MAX_ASSUMED_VEC_LEN`. + fn max_encoded_len() -> usize { + parity_scale_codec::Compact(MAX_ASSUMED_VEC_LEN) + .encoded_size() + .saturating_add( + (MAX_ASSUMED_VEC_LEN as usize) + .saturating_mul(EraStake::::max_encoded_len()), + ) + } +} + +impl StakerInfo { + /// `true` if no active stakes and unclaimed eras exist, `false` otherwise + fn is_empty(&self) -> bool { + self.stakes.is_empty() + } + + /// number of `EraStake` chunks + fn len(&self) -> u32 { + self.stakes.len() as u32 + } + + /// Stakes some value in the specified era. + /// + /// User should ensure that given era is either equal or greater than the + /// latest available era in the staking info. + /// + /// # Example + /// + /// The following example demonstrates how internal vector changes when `stake` is called: + /// + /// `stakes: [<5, 1000>, <7, 1300>]` + /// * `stake(7, 100)` will result in `[<5, 1000>, <7, 1400>]` + /// * `stake(9, 200)` will result in `[<5, 1000>, <7, 1400>, <9, 1600>]` + /// + fn stake(&mut self, current_era: EraIndex, value: Balance) -> Result<(), &str> { + if let Some(era_stake) = self.stakes.last_mut() { + if era_stake.era > current_era { + return Err("Unexpected era"); + } + + let new_stake_value = era_stake.staked.saturating_add(value); + + if current_era == era_stake.era { + *era_stake = EraStake::new(new_stake_value, current_era) + } else { + self.stakes + .push(EraStake::new(new_stake_value, current_era)) + } + } else { + self.stakes.push(EraStake::new(value, current_era)); + } + + Ok(()) + } + + /// Unstakes some value in the specified era. + /// + /// User should ensure that given era is either equal or greater than the + /// latest available era in the staking info. + /// + /// # Example 1 + /// + /// `stakes: [<5, 1000>, <7, 1300>]` + /// * `unstake(7, 100)` will result in `[<5, 1000>, <7, 1200>]` + /// * `unstake(9, 400)` will result in `[<5, 1000>, <7, 1200>, <9, 800>]` + /// * `unstake(10, 800)` will result in `[<5, 1000>, <7, 1200>, <9, 800>, <10, 0>]` + /// + /// # Example 2 + /// + /// `stakes: [<5, 1000>]` + /// * `unstake(5, 1000)` will result in `[]` + /// + /// Note that if no unclaimed eras remain, vector will be cleared. + /// + fn unstake(&mut self, current_era: EraIndex, value: Balance) -> Result<(), &str> { + if let Some(era_stake) = self.stakes.last_mut() { + if era_stake.era > current_era { + return Err("Unexpected era"); + } + + let new_stake_value = era_stake.staked.saturating_sub(value); + if current_era == era_stake.era { + *era_stake = EraStake::new(new_stake_value, current_era) + } else { + self.stakes + .push(EraStake::new(new_stake_value, current_era)) + } + + // Removes unstaked values if they're no longer valid for comprehension + if !self.stakes.is_empty() && self.stakes[0].staked.is_zero() { + self.stakes.remove(0); + } + } + + Ok(()) + } + + /// `Claims` the oldest era available for claiming. + /// In case valid era exists, returns `(claim era, staked amount)` tuple. + /// If no valid era exists, returns `(0, 0)` tuple. + /// + /// # Example + /// + /// The following example will demonstrate how the internal vec changes when `claim` is called consecutively. + /// + /// `stakes: [<5, 1000>, <7, 1300>, <8, 0>, <15, 3000>]` + /// + /// 1. `claim()` will return `(5, 1000)` + /// Internal vector is modified to `[<6, 1000>, <7, 1300>, <8, 0>, <15, 3000>]` + /// + /// 2. `claim()` will return `(6, 1000)`. + /// Internal vector is modified to `[<7, 1300>, <8, 0>, <15, 3000>]` + /// + /// 3. `claim()` will return `(7, 1300)`. + /// Internal vector is modified to `[<15, 3000>]` + /// Note that `0` staked period is discarded since nothing can be claimed there. + /// + /// 4. `claim()` will return `(15, 3000)`. + /// Internal vector is modified to `[16, 3000]` + /// + /// Repeated calls would continue to modify vector following the same rule as in *4.* + /// + fn claim(&mut self) -> (EraIndex, Balance) { + if let Some(era_stake) = self.stakes.first() { + let era_stake = *era_stake; + + if self.stakes.len() == 1 || self.stakes[1].era > era_stake.era + 1 { + self.stakes[0] = EraStake { + staked: era_stake.staked, + era: era_stake.era.saturating_add(1), + } + } else { + // in case: self.stakes[1].era == era_stake.era + 1 + self.stakes.remove(0); + } + + // Removes unstaked values if they're no longer valid for comprehension + if !self.stakes.is_empty() && self.stakes[0].staked.is_zero() { + self.stakes.remove(0); + } + + (era_stake.era, era_stake.staked) + } else { + (0, Zero::zero()) + } + } + + /// Latest staked value. + /// E.g. if staker is fully unstaked, this will return `Zero`. + /// Otherwise returns a non-zero balance. + pub fn latest_staked_value(&self) -> Balance { + self.stakes.last().map_or(Zero::zero(), |x| x.staked) + } +} + +/// Represents an balance amount undergoing the unbonding process. +/// Since unbonding takes time, it's important to keep track of when and how much was unbonded. +#[derive( + Clone, Copy, PartialEq, Encode, Decode, Default, RuntimeDebug, TypeInfo, MaxEncodedLen, +)] +pub struct UnlockingChunk { + /// Amount being unlocked + #[codec(compact)] + amount: Balance, + /// Era in which the amount will become unlocked and can be withdrawn. + #[codec(compact)] + unlock_era: EraIndex, +} + +impl UnlockingChunk +where + Balance: Add + Copy + MaxEncodedLen, +{ + // Adds the specified amount to this chunk + fn add_amount(&mut self, amount: Balance) { + self.amount = self.amount + amount + } +} + +/// Contains unlocking chunks. +/// This is a convenience struct that provides various utility methods to help with unbonding handling. +#[derive(Clone, PartialEq, Encode, Decode, Default, RuntimeDebug, TypeInfo)] +pub struct UnbondingInfo { + // Vector of unlocking chunks. Sorted in ascending order in respect to unlock_era. + unlocking_chunks: Vec>, +} + +impl MaxEncodedLen + for UnbondingInfo +{ + // This is just an assumption, will be calculated properly in the future. See the comment for `MAX_ASSUMED_VEC_LEN`. + fn max_encoded_len() -> usize { + parity_scale_codec::Compact(MAX_ASSUMED_VEC_LEN) + .encoded_size() + .saturating_add( + (MAX_ASSUMED_VEC_LEN as usize) + .saturating_mul(UnlockingChunk::::max_encoded_len()), + ) + } +} + +impl UnbondingInfo +where + Balance: AtLeast32BitUnsigned + Default + Copy + MaxEncodedLen, +{ + /// Returns total number of unlocking chunks. + fn len(&self) -> u32 { + self.unlocking_chunks.len() as u32 + } + + /// True if no unlocking chunks exist, false otherwise. + fn is_empty(&self) -> bool { + self.unlocking_chunks.is_empty() + } + + /// Returns sum of all unlocking chunks. + fn sum(&self) -> Balance { + self.unlocking_chunks + .iter() + .map(|chunk| chunk.amount) + .reduce(|c1, c2| c1 + c2) + .unwrap_or_default() + } + + /// Adds a new unlocking chunk to the vector, preserving the unlock_era based ordering. + fn add(&mut self, chunk: UnlockingChunk) { + // It is possible that the unbonding period changes so we need to account for that + match self + .unlocking_chunks + .binary_search_by(|x| x.unlock_era.cmp(&chunk.unlock_era)) + { + // Merge with existing chunk if unlock_eras match + Ok(pos) => self.unlocking_chunks[pos].add_amount(chunk.amount), + // Otherwise insert where it should go. Note that this will in almost all cases return the last index. + Err(pos) => self.unlocking_chunks.insert(pos, chunk), + } + } + + /// Partitions the unlocking chunks into two groups: + /// + /// First group includes all chunks which have unlock era lesser or equal to the specified era. + /// Second group includes all the rest. + /// + /// Order of chunks is preserved in the two new structs. + fn partition(self, era: EraIndex) -> (Self, Self) { + let (matching_chunks, other_chunks): ( + Vec>, + Vec>, + ) = self + .unlocking_chunks + .iter() + .partition(|chunk| chunk.unlock_era <= era); + + ( + Self { + unlocking_chunks: matching_chunks, + }, + Self { + unlocking_chunks: other_chunks, + }, + ) + } + + #[cfg(test)] + /// Return clone of the internal vector. Should only be used for testing. + fn vec(&self) -> Vec> { + self.unlocking_chunks.clone() + } +} + +/// Instruction on how to handle reward payout for stakers. +/// In order to make staking more competitive, majority of stakers will want to +/// automatically restake anything they earn. +#[derive(Encode, Decode, Clone, Copy, PartialEq, Eq, RuntimeDebug, TypeInfo, MaxEncodedLen)] +pub enum RewardDestination { + /// Rewards are transferred to stakers free balance without any further action. + FreeBalance, + /// Rewards are transferred to stakers balance and are immediately re-staked + /// on the contract from which the reward was received. + StakeBalance, +} + +impl Default for RewardDestination { + fn default() -> Self { + RewardDestination::StakeBalance + } +} + +/// Contains information about account's locked & unbonding balances. +#[derive(Clone, PartialEq, Encode, Decode, Default, RuntimeDebug, TypeInfo, MaxEncodedLen)] +pub struct AccountLedger { + /// Total balance locked. + #[codec(compact)] + pub locked: Balance, + /// Information about unbonding chunks. + unbonding_info: UnbondingInfo, + /// Instruction on how to handle reward payout + reward_destination: RewardDestination, +} + +impl AccountLedger { + /// `true` if ledger is empty (no locked funds, no unbonding chunks), `false` otherwise. + pub fn is_empty(&self) -> bool { + self.locked.is_zero() && self.unbonding_info.is_empty() + } + + /// Configured reward destination + pub fn reward_destination(&self) -> RewardDestination { + self.reward_destination + } +} diff --git a/pallets/dapps-staking/src/mock.rs b/pallets/dapps-staking/src/mock.rs new file mode 100644 index 0000000000..d4df2cbc76 --- /dev/null +++ b/pallets/dapps-staking/src/mock.rs @@ -0,0 +1,280 @@ +// This file is part of Astar. + +// Copyright (C) 2019-2023 Stake Technologies Pte.Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later + +// Astar is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Astar is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Astar. If not, see . + +use crate::{self as pallet_dapps_staking, weights}; + +use frame_support::{ + construct_runtime, parameter_types, + traits::{Currency, OnFinalize, OnInitialize}, + weights::Weight, + PalletId, +}; +use sp_core::{H160, H256}; + +use parity_scale_codec::{Decode, Encode, MaxEncodedLen}; +use sp_io::TestExternalities; +use sp_runtime::{ + testing::Header, + traits::{BlakeTwo256, ConstU32, IdentityLookup}, +}; + +pub(crate) type AccountId = u64; +pub(crate) type BlockNumber = u64; +pub(crate) type Balance = u128; +pub(crate) type EraIndex = u32; + +type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; +type Block = frame_system::mocking::MockBlock; + +/// Value shouldn't be less than 2 for testing purposes, otherwise we cannot test certain corner cases. +pub(crate) const EXISTENTIAL_DEPOSIT: Balance = 2; +pub(crate) const MAX_NUMBER_OF_STAKERS: u32 = 4; +/// Value shouldn't be less than 2 for testing purposes, otherwise we cannot test certain corner cases. +pub(crate) const MINIMUM_STAKING_AMOUNT: Balance = 10; +pub(crate) const MINIMUM_REMAINING_AMOUNT: Balance = 1; +pub(crate) const MAX_UNLOCKING_CHUNKS: u32 = 4; +pub(crate) const UNBONDING_PERIOD: EraIndex = 3; +pub(crate) const MAX_ERA_STAKE_VALUES: u32 = 8; +pub(crate) const REWARD_RETENTION_PERIOD: u32 = 2; + +// Do note that this needs to at least be 3 for tests to be valid. It can be greater but not smaller. +pub(crate) const BLOCKS_PER_ERA: BlockNumber = 3; + +pub(crate) const REGISTER_DEPOSIT: Balance = 10; + +pub(crate) const STAKER_BLOCK_REWARD: Balance = 531911; +pub(crate) const DAPP_BLOCK_REWARD: Balance = 773333; + +construct_runtime!( + pub struct TestRuntime + where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: frame_system, + Balances: pallet_balances, + Timestamp: pallet_timestamp, + DappsStaking: pallet_dapps_staking, + } +); + +parameter_types! { + pub const BlockHashCount: u64 = 250; + pub BlockWeights: frame_system::limits::BlockWeights = + frame_system::limits::BlockWeights::simple_max(Weight::from_ref_time(1024)); +} + +impl frame_system::Config for TestRuntime { + type BaseCallFilter = frame_support::traits::Everything; + type BlockWeights = (); + type BlockLength = (); + type RuntimeOrigin = RuntimeOrigin; + type Index = u64; + type RuntimeCall = RuntimeCall; + type BlockNumber = BlockNumber; + type Hash = H256; + type Hashing = BlakeTwo256; + type AccountId = AccountId; + type Lookup = IdentityLookup; + type Header = Header; + type RuntimeEvent = RuntimeEvent; + type BlockHashCount = BlockHashCount; + type DbWeight = (); + type Version = (); + type PalletInfo = PalletInfo; + type AccountData = pallet_balances::AccountData; + type OnNewAccount = (); + type OnKilledAccount = (); + type SystemWeightInfo = (); + type SS58Prefix = (); + type OnSetCode = (); + type MaxConsumers = frame_support::traits::ConstU32<16>; +} + +parameter_types! { + pub const MaxLocks: u32 = 4; + pub const ExistentialDeposit: Balance = EXISTENTIAL_DEPOSIT; +} + +impl pallet_balances::Config for TestRuntime { + type MaxLocks = MaxLocks; + type MaxReserves = (); + type ReserveIdentifier = [u8; 8]; + type Balance = Balance; + type RuntimeEvent = RuntimeEvent; + type DustRemoval = (); + type ExistentialDeposit = ExistentialDeposit; + type AccountStore = System; + type WeightInfo = (); +} + +parameter_types! { + pub const MinimumPeriod: u64 = 3; +} + +impl pallet_timestamp::Config for TestRuntime { + type Moment = u64; + type OnTimestampSet = (); + type MinimumPeriod = MinimumPeriod; + type WeightInfo = (); +} + +parameter_types! { + pub const RegisterDeposit: Balance = REGISTER_DEPOSIT; + pub const BlockPerEra: BlockNumber = BLOCKS_PER_ERA; + pub const MaxNumberOfStakersPerContract: u32 = MAX_NUMBER_OF_STAKERS; + pub const MinimumStakingAmount: Balance = MINIMUM_STAKING_AMOUNT; + pub const DappsStakingPalletId: PalletId = PalletId(*b"mokdpstk"); + pub const MinimumRemainingAmount: Balance = MINIMUM_REMAINING_AMOUNT; + pub const MaxUnlockingChunks: u32 = MAX_UNLOCKING_CHUNKS; + pub const UnbondingPeriod: EraIndex = UNBONDING_PERIOD; + pub const MaxEraStakeValues: u32 = MAX_ERA_STAKE_VALUES; +} + +impl pallet_dapps_staking::Config for TestRuntime { + type RuntimeEvent = RuntimeEvent; + type Currency = Balances; + type BlockPerEra = BlockPerEra; + type RegisterDeposit = RegisterDeposit; + type SmartContract = MockSmartContract; + type WeightInfo = weights::SubstrateWeight; + type MaxNumberOfStakersPerContract = MaxNumberOfStakersPerContract; + type MinimumStakingAmount = MinimumStakingAmount; + type PalletId = DappsStakingPalletId; + type MinimumRemainingAmount = MinimumRemainingAmount; + type MaxUnlockingChunks = MaxUnlockingChunks; + type UnbondingPeriod = UnbondingPeriod; + type MaxEraStakeValues = MaxEraStakeValues; + type UnregisteredDappRewardRetention = ConstU32; +} + +#[derive( + PartialEq, Eq, Copy, Clone, Encode, Decode, Debug, scale_info::TypeInfo, MaxEncodedLen, +)] +pub enum MockSmartContract { + Evm(sp_core::H160), + Wasm(AccountId), +} + +impl Default for MockSmartContract { + fn default() -> Self { + MockSmartContract::Evm(H160::repeat_byte(0x01)) + } +} + +pub struct ExternalityBuilder; + +impl ExternalityBuilder { + pub fn build() -> TestExternalities { + let mut storage = frame_system::GenesisConfig::default() + .build_storage::() + .unwrap(); + + pallet_balances::GenesisConfig:: { + balances: vec![ + (1, 9000), + (2, 800), + (3, 10000), + (4, 4900), + (5, 3800), + (6, 10), + (7, 1000), + (8, 2000), + (9, 10000), + (10, 300), + (11, 400), + (20, 10), + (540, EXISTENTIAL_DEPOSIT), + (1337, 1_000_000_000_000), + ], + } + .assimilate_storage(&mut storage) + .ok(); + + let mut ext = TestExternalities::from(storage); + ext.execute_with(|| System::set_block_number(1)); + ext + } +} + +/// Used to run to the specified block number +pub fn run_to_block(n: u64) { + while System::block_number() < n { + DappsStaking::on_finalize(System::block_number()); + System::set_block_number(System::block_number() + 1); + // This is performed outside of dapps staking but we expect it before on_initialize + payout_block_rewards(); + DappsStaking::on_initialize(System::block_number()); + } +} + +/// Used to run the specified number of blocks +pub fn run_for_blocks(n: u64) { + run_to_block(System::block_number() + n); +} + +/// Advance blocks to the beginning of an era. +/// +/// Function has no effect if era is already passed. +pub fn advance_to_era(n: EraIndex) { + while DappsStaking::current_era() < n { + run_for_blocks(1); + } +} + +/// Initialize first block. +/// This method should only be called once in a UT otherwise the first block will get initialized multiple times. +pub fn initialize_first_block() { + // This assert prevents method misuse + assert_eq!(System::block_number(), 1 as BlockNumber); + + // This is performed outside of dapps staking but we expect it before on_initialize + payout_block_rewards(); + DappsStaking::on_initialize(System::block_number()); + run_to_block(2); +} + +/// Returns total block rewards that goes to dapps-staking. +/// Contains both `dapps` reward and `stakers` reward. +pub fn joint_block_reward() -> Balance { + STAKER_BLOCK_REWARD + DAPP_BLOCK_REWARD +} + +/// Payout block rewards to stakers & dapps +fn payout_block_rewards() { + DappsStaking::rewards( + Balances::issue(STAKER_BLOCK_REWARD.into()), + Balances::issue(DAPP_BLOCK_REWARD.into()), + ); +} + +// Used to get a vec of all dapps staking events +pub fn dapps_staking_events() -> Vec> { + System::events() + .into_iter() + .map(|r| r.event) + .filter_map(|e| { + if let RuntimeEvent::DappsStaking(inner) = e { + Some(inner) + } else { + None + } + }) + .collect() +} diff --git a/pallets/dapps-staking/src/pallet/mod.rs b/pallets/dapps-staking/src/pallet/mod.rs new file mode 100644 index 0000000000..cfd098760e --- /dev/null +++ b/pallets/dapps-staking/src/pallet/mod.rs @@ -0,0 +1,1311 @@ +// This file is part of Astar. + +// Copyright (C) 2019-2023 Stake Technologies Pte.Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later + +// Astar is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Astar is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Astar. If not, see . + +//! Dapps staking FRAME Pallet. + +use super::*; +use frame_support::{ + dispatch::DispatchResult, + ensure, + pallet_prelude::*, + traits::{ + Currency, ExistenceRequirement, Get, Imbalance, LockIdentifier, LockableCurrency, + ReservableCurrency, WithdrawReasons, + }, + weights::Weight, + PalletId, +}; +use frame_system::{ensure_root, ensure_signed, pallet_prelude::*}; +use sp_runtime::{ + traits::{AccountIdConversion, Saturating, Zero}, + Perbill, +}; +use sp_std::{convert::From, mem}; + +const STAKING_ID: LockIdentifier = *b"dapstake"; + +#[frame_support::pallet] +#[allow(clippy::module_inception)] +pub mod pallet { + use super::*; + + /// The balance type of this pallet. + pub type BalanceOf = + <::Currency as Currency<::AccountId>>::Balance; + + #[pallet::pallet] + #[pallet::generate_store(pub(crate) trait Store)] + pub struct Pallet(PhantomData); + + // Negative imbalance type of this pallet. + type NegativeImbalanceOf = <::Currency as Currency< + ::AccountId, + >>::NegativeImbalance; + + #[pallet::config] + pub trait Config: frame_system::Config { + /// The staking balance. + type Currency: LockableCurrency + + ReservableCurrency; + + /// Describes smart contract in the context required by dapps staking. + type SmartContract: Default + Parameter + Member + MaxEncodedLen; + + /// Number of blocks per era. + #[pallet::constant] + type BlockPerEra: Get>; + + /// Deposit that will be reserved as part of new contract registration. + #[pallet::constant] + type RegisterDeposit: Get>; + + /// Maximum number of unique stakers per contract. + #[pallet::constant] + type MaxNumberOfStakersPerContract: Get; + + /// Minimum amount user must have staked on contract. + /// User can stake less if they already have the minimum staking amount staked on that particular contract. + #[pallet::constant] + type MinimumStakingAmount: Get>; + + /// Dapps staking pallet Id + #[pallet::constant] + type PalletId: Get; + + /// Minimum amount that should be left on staker account after staking. + /// Serves as a safeguard to prevent users from locking their entire free balance. + #[pallet::constant] + type MinimumRemainingAmount: Get>; + + /// Max number of unlocking chunks per account Id <-> contract Id pairing. + /// If value is zero, unlocking becomes impossible. + #[pallet::constant] + type MaxUnlockingChunks: Get; + + /// Number of eras that need to pass until unstaked value can be withdrawn. + /// Current era is always counted as full era (regardless how much blocks are remaining). + /// When set to `0`, it's equal to having no unbonding period. + #[pallet::constant] + type UnbondingPeriod: Get; + + /// Max number of unique `EraStake` values that can exist for a `(staker, contract)` pairing. + /// When stakers claims rewards, they will either keep the number of `EraStake` values the same or they will reduce them by one. + /// Stakers cannot add an additional `EraStake` value by calling `bond&stake` or `unbond&unstake` if they've reached the max number of values. + /// + /// This ensures that history doesn't grow indefinitely - if there are too many chunks, stakers should first claim their former rewards + /// before adding additional `EraStake` values. + #[pallet::constant] + type MaxEraStakeValues: Get; + + /// Number of eras that need to pass until dApp rewards for the unregistered contracts can be burned. + /// Developer can still claim rewards after this period has passed, iff it hasn't been burned yet. + /// + /// For example, if retention is set to `2` and current era is `10`, it means that all unclaimed rewards bellow era `8` can be burned. + #[pallet::constant] + type UnregisteredDappRewardRetention: Get; + + /// The overarching event type. + type RuntimeEvent: From> + IsType<::RuntimeEvent>; + + /// Weight information for extrinsics in this pallet. + type WeightInfo: WeightInfo; + } + + /// Denotes whether pallet is disabled (in maintenance mode) or not + #[pallet::storage] + #[pallet::whitelist_storage] + #[pallet::getter(fn pallet_disabled)] + pub type PalletDisabled = StorageValue<_, bool, ValueQuery>; + + /// General information about the staker (non-smart-contract specific). + #[pallet::storage] + #[pallet::getter(fn ledger)] + pub type Ledger = + StorageMap<_, Blake2_128Concat, T::AccountId, AccountLedger>, ValueQuery>; + + /// The current era index. + #[pallet::storage] + #[pallet::whitelist_storage] + #[pallet::getter(fn current_era)] + pub type CurrentEra = StorageValue<_, EraIndex, ValueQuery>; + + /// Accumulator for block rewards during an era. It is reset at every new era + #[pallet::storage] + #[pallet::getter(fn block_reward_accumulator)] + pub type BlockRewardAccumulator = StorageValue<_, RewardInfo>, ValueQuery>; + + #[pallet::type_value] + pub fn ForceEraOnEmpty() -> Forcing { + Forcing::NotForcing + } + + /// Mode of era forcing. + #[pallet::storage] + #[pallet::whitelist_storage] + #[pallet::getter(fn force_era)] + pub type ForceEra = StorageValue<_, Forcing, ValueQuery, ForceEraOnEmpty>; + + /// Stores the block number of when the next era starts + #[pallet::storage] + #[pallet::whitelist_storage] + #[pallet::getter(fn next_era_starting_block)] + pub type NextEraStartingBlock = StorageValue<_, T::BlockNumber, ValueQuery>; + + /// Simple map where developer account points to their smart contract + #[pallet::storage] + #[pallet::getter(fn registered_contract)] + pub(crate) type RegisteredDevelopers = + StorageMap<_, Blake2_128Concat, T::AccountId, T::SmartContract>; + + /// Simple map where smart contract points to basic info about it (e.g. developer address, state) + #[pallet::storage] + #[pallet::getter(fn dapp_info)] + pub(crate) type RegisteredDapps = + StorageMap<_, Blake2_128Concat, T::SmartContract, DAppInfo>; + + /// General information about an era like TVL, total staked value, rewards. + #[pallet::storage] + #[pallet::getter(fn general_era_info)] + pub type GeneralEraInfo = + StorageMap<_, Twox64Concat, EraIndex, EraInfo>>; + + /// Staking information about contract in a particular era. + #[pallet::storage] + #[pallet::getter(fn contract_stake_info)] + pub type ContractEraStake = StorageDoubleMap< + _, + Blake2_128Concat, + T::SmartContract, + Twox64Concat, + EraIndex, + ContractStakeInfo>, + >; + + /// Info about stakers stakes on particular contracts. + #[pallet::storage] + #[pallet::getter(fn staker_info)] + pub type GeneralStakerInfo = StorageDoubleMap< + _, + Blake2_128Concat, + T::AccountId, + Blake2_128Concat, + T::SmartContract, + StakerInfo>, + ValueQuery, + >; + + /// Stores the current pallet storage version. + #[pallet::storage] + #[pallet::getter(fn storage_version)] + pub(crate) type StorageVersion = StorageValue<_, Version, ValueQuery>; + + #[pallet::event] + #[pallet::generate_deposit(pub(crate) fn deposit_event)] + pub enum Event { + /// Account has bonded and staked funds on a smart contract. + BondAndStake(T::AccountId, T::SmartContract, BalanceOf), + /// Account has unbonded & unstaked some funds. Unbonding process begins. + UnbondAndUnstake(T::AccountId, T::SmartContract, BalanceOf), + /// Account has fully withdrawn all staked amount from an unregistered contract. + WithdrawFromUnregistered(T::AccountId, T::SmartContract, BalanceOf), + /// Account has withdrawn unbonded funds. + Withdrawn(T::AccountId, BalanceOf), + /// New contract added for staking. + NewContract(T::AccountId, T::SmartContract), + /// Contract removed from dapps staking. + ContractRemoved(T::AccountId, T::SmartContract), + /// New dapps staking era. Distribute era rewards to contracts. + NewDappStakingEra(EraIndex), + /// Reward paid to staker or developer. + Reward(T::AccountId, T::SmartContract, EraIndex, BalanceOf), + /// Maintenance mode has been enabled or disabled + MaintenanceMode(bool), + /// Reward handling modified + RewardDestination(T::AccountId, RewardDestination), + /// Nomination part has been transfered from one contract to another. + /// + /// \(staker account, origin smart contract, amount, target smart contract\) + NominationTransfer( + T::AccountId, + T::SmartContract, + BalanceOf, + T::SmartContract, + ), + /// Stale, unclaimed reward from an unregistered contract has been burned. + /// + /// \(developer account, smart contract, era, amount burned\) + StaleRewardBurned(T::AccountId, T::SmartContract, EraIndex, BalanceOf), + } + + #[pallet::error] + pub enum Error { + /// Disabled + Disabled, + /// No change in maintenance mode + NoMaintenanceModeChange, + /// Upgrade is too heavy, reduce the weight parameter. + UpgradeTooHeavy, + /// Can not stake with zero value. + StakingWithNoValue, + /// Can not stake with value less than minimum staking value + InsufficientValue, + /// Number of stakers per contract exceeded. + MaxNumberOfStakersExceeded, + /// Targets must be operated contracts + NotOperatedContract, + /// Contract isn't staked. + NotStakedContract, + /// Contract isn't unregistered. + NotUnregisteredContract, + /// Unclaimed rewards should be claimed before withdrawing stake. + UnclaimedRewardsRemaining, + /// Unstaking a contract with zero value + UnstakingWithNoValue, + /// There are no previously unbonded funds that can be unstaked and withdrawn. + NothingToWithdraw, + /// The contract is already registered by other account + AlreadyRegisteredContract, + /// This account was already used to register contract + AlreadyUsedDeveloperAccount, + /// Smart contract not owned by the account id. + NotOwnedContract, + /// Report issue on github if this is ever emitted + UnknownEraReward, + /// Report issue on github if this is ever emitted + UnexpectedStakeInfoEra, + /// Contract has too many unlocking chunks. Withdraw the existing chunks if possible + /// or wait for current chunks to complete unlocking process to withdraw them. + TooManyUnlockingChunks, + /// Contract already claimed in this era and reward is distributed + AlreadyClaimedInThisEra, + /// Era parameter is out of bounds + EraOutOfBounds, + /// Too many active `EraStake` values for (staker, contract) pairing. + /// Claim existing rewards to fix this problem. + TooManyEraStakeValues, + /// Account is not actively staking + NotActiveStaker, + /// Transfering nomination to the same contract + NominationTransferToSameContract, + } + + #[pallet::hooks] + impl Hooks> for Pallet { + fn on_initialize(now: BlockNumberFor) -> Weight { + // As long as pallet is disabled, we shouldn't allow any storage modifications. + // This means we might prolong an era but it's acceptable. + // Runtime upgrade should be timed so we ensure that we complete it before + // a new era is triggered. This code is just a safety net to ensure nothing is broken + // if we fail to do that. + if PalletDisabled::::get() { + return T::DbWeight::get().reads(1); + } + + let force_new_era = Self::force_era().eq(&Forcing::ForceNew); + let previous_era = Self::current_era(); + let next_era_starting_block = Self::next_era_starting_block(); + + // Value is compared to 1 since genesis block is ignored + if now >= next_era_starting_block || force_new_era || previous_era.is_zero() { + let blocks_per_era = T::BlockPerEra::get(); + let next_era = previous_era + 1; + CurrentEra::::put(next_era); + + NextEraStartingBlock::::put(now + blocks_per_era); + + let reward = BlockRewardAccumulator::::take(); + Self::reward_balance_snapshot(previous_era, reward); + let consumed_weight = Self::rotate_staking_info(previous_era); + + if force_new_era { + ForceEra::::put(Forcing::NotForcing); + } + + Self::deposit_event(Event::::NewDappStakingEra(next_era)); + + consumed_weight + T::DbWeight::get().reads_writes(5, 3) + } else { + T::DbWeight::get().reads(4) + } + } + } + + #[pallet::call] + impl Pallet { + /// Used to register contract for dapps staking. + /// The origin account used is treated as the `developer` account. + /// + /// Depending on the pallet configuration/state it is possible that developer needs to be whitelisted prior to registration. + /// + /// As part of this call, `RegisterDeposit` will be reserved from devs account. + #[pallet::call_index(0)] + #[pallet::weight(T::WeightInfo::register())] + pub fn register( + origin: OriginFor, + developer: T::AccountId, + contract_id: T::SmartContract, + ) -> DispatchResultWithPostInfo { + Self::ensure_pallet_enabled()?; + ensure_root(origin)?; + + ensure!( + !RegisteredDevelopers::::contains_key(&developer), + Error::::AlreadyUsedDeveloperAccount, + ); + ensure!( + !RegisteredDapps::::contains_key(&contract_id), + Error::::AlreadyRegisteredContract, + ); + + T::Currency::reserve(&developer, T::RegisterDeposit::get())?; + + RegisteredDapps::::insert(contract_id.clone(), DAppInfo::new(developer.clone())); + RegisteredDevelopers::::insert(&developer, contract_id.clone()); + + Self::deposit_event(Event::::NewContract(developer, contract_id)); + + Ok(().into()) + } + + /// Unregister existing contract from dapps staking, making it ineligible for rewards from current era onwards. + /// This must be called by the root (at the moment). + /// + /// Deposit is returned to the developer but existing stakers should manually call `withdraw_from_unregistered` if they wish to to unstake. + /// + /// **Warning**: After this action ,contract can not be registered for dapps staking again. + #[pallet::call_index(1)] + #[pallet::weight(T::WeightInfo::unregister())] + pub fn unregister( + origin: OriginFor, + contract_id: T::SmartContract, + ) -> DispatchResultWithPostInfo { + Self::ensure_pallet_enabled()?; + ensure_root(origin)?; + + let mut dapp_info = + RegisteredDapps::::get(&contract_id).ok_or(Error::::NotOperatedContract)?; + ensure!( + dapp_info.state == DAppState::Registered, + Error::::NotOperatedContract + ); + let developer = dapp_info.developer.clone(); + + let current_era = Self::current_era(); + dapp_info.state = DAppState::Unregistered(current_era); + RegisteredDapps::::insert(&contract_id, dapp_info); + + T::Currency::unreserve(&developer, T::RegisterDeposit::get()); + + Self::deposit_event(Event::::ContractRemoved(developer, contract_id)); + + Ok(().into()) + } + + /// Withdraw locked funds from a contract that was unregistered. + /// + /// Funds don't need to undergo the unbonding period - they are returned immediately to the staker's free balance. + #[pallet::call_index(2)] + #[pallet::weight(T::WeightInfo::withdraw_from_unregistered())] + pub fn withdraw_from_unregistered( + origin: OriginFor, + contract_id: T::SmartContract, + ) -> DispatchResultWithPostInfo { + Self::ensure_pallet_enabled()?; + let staker = ensure_signed(origin)?; + + // dApp must exist and it has to be unregistered + let dapp_info = + RegisteredDapps::::get(&contract_id).ok_or(Error::::NotOperatedContract)?; + + let unregistered_era = if let DAppState::Unregistered(x) = dapp_info.state { + x + } else { + return Err(Error::::NotUnregisteredContract.into()); + }; + + // There should be some leftover staked amount + let mut staker_info = Self::staker_info(&staker, &contract_id); + let staked_value = staker_info.latest_staked_value(); + ensure!(staked_value > Zero::zero(), Error::::NotStakedContract); + + // Don't allow withdrawal until all rewards have been claimed. + let (claimable_era, _) = staker_info.claim(); + ensure!( + claimable_era >= unregistered_era || claimable_era.is_zero(), + Error::::UnclaimedRewardsRemaining + ); + + // Unlock the staked amount immediately. No unbonding period for this scenario. + let mut ledger = Self::ledger(&staker); + ledger.locked = ledger.locked.saturating_sub(staked_value); + Self::update_ledger(&staker, ledger); + + Self::update_staker_info(&staker, &contract_id, Default::default()); + + let current_era = Self::current_era(); + GeneralEraInfo::::mutate(¤t_era, |value| { + if let Some(x) = value { + x.staked = x.staked.saturating_sub(staked_value); + x.locked = x.locked.saturating_sub(staked_value); + } + }); + + Self::deposit_event(Event::::WithdrawFromUnregistered( + staker, + contract_id, + staked_value, + )); + + Ok(().into()) + } + + /// Lock up and stake balance of the origin account. + /// + /// `value` must be more than the `minimum_balance` specified by `MinimumStakingAmount` + /// unless account already has bonded value equal or more than 'minimum_balance'. + /// + /// The dispatch origin for this call must be _Signed_ by the staker's account. + #[pallet::call_index(3)] + #[pallet::weight(T::WeightInfo::bond_and_stake())] + pub fn bond_and_stake( + origin: OriginFor, + contract_id: T::SmartContract, + #[pallet::compact] value: BalanceOf, + ) -> DispatchResultWithPostInfo { + Self::ensure_pallet_enabled()?; + let staker = ensure_signed(origin)?; + + // Check that contract is ready for staking. + ensure!( + Self::is_active(&contract_id), + Error::::NotOperatedContract + ); + + // Get the staking ledger or create an entry if it doesn't exist. + let mut ledger = Self::ledger(&staker); + let available_balance = Self::available_staking_balance(&staker, &ledger); + let value_to_stake = value.min(available_balance); + ensure!( + value_to_stake > Zero::zero(), + Error::::StakingWithNoValue + ); + + let current_era = Self::current_era(); + let mut staking_info = + Self::contract_stake_info(&contract_id, current_era).unwrap_or_default(); + let mut staker_info = Self::staker_info(&staker, &contract_id); + + Self::stake_on_contract( + &mut staker_info, + &mut staking_info, + value_to_stake, + current_era, + )?; + + ledger.locked = ledger.locked.saturating_add(value_to_stake); + + // Update storage + GeneralEraInfo::::mutate(¤t_era, |value| { + if let Some(x) = value { + x.staked = x.staked.saturating_add(value_to_stake); + x.locked = x.locked.saturating_add(value_to_stake); + } + }); + + Self::update_ledger(&staker, ledger); + Self::update_staker_info(&staker, &contract_id, staker_info); + ContractEraStake::::insert(&contract_id, current_era, staking_info); + + Self::deposit_event(Event::::BondAndStake( + staker, + contract_id, + value_to_stake, + )); + Ok(().into()) + } + + /// Start unbonding process and unstake balance from the contract. + /// + /// The unstaked amount will no longer be eligible for rewards but still won't be unlocked. + /// User needs to wait for the unbonding period to finish before being able to withdraw + /// the funds via `withdraw_unbonded` call. + /// + /// In case remaining staked balance on contract is below minimum staking amount, + /// entire stake for that contract will be unstaked. + #[pallet::call_index(4)] + #[pallet::weight(T::WeightInfo::unbond_and_unstake())] + pub fn unbond_and_unstake( + origin: OriginFor, + contract_id: T::SmartContract, + #[pallet::compact] value: BalanceOf, + ) -> DispatchResultWithPostInfo { + Self::ensure_pallet_enabled()?; + let staker = ensure_signed(origin)?; + + ensure!(value > Zero::zero(), Error::::UnstakingWithNoValue); + ensure!( + Self::is_active(&contract_id), + Error::::NotOperatedContract, + ); + + let current_era = Self::current_era(); + let mut staker_info = Self::staker_info(&staker, &contract_id); + let mut contract_stake_info = + Self::contract_stake_info(&contract_id, current_era).unwrap_or_default(); + + let value_to_unstake = Self::unstake_from_contract( + &mut staker_info, + &mut contract_stake_info, + value, + current_era, + )?; + + // Update the chunks and write them to storage + let mut ledger = Self::ledger(&staker); + ledger.unbonding_info.add(UnlockingChunk { + amount: value_to_unstake, + unlock_era: current_era + T::UnbondingPeriod::get(), + }); + // This should be done AFTER insertion since it's possible for chunks to merge + ensure!( + ledger.unbonding_info.len() <= T::MaxUnlockingChunks::get(), + Error::::TooManyUnlockingChunks + ); + + Self::update_ledger(&staker, ledger); + + // Update total staked value in era. + GeneralEraInfo::::mutate(¤t_era, |value| { + if let Some(x) = value { + x.staked = x.staked.saturating_sub(value_to_unstake) + } + }); + Self::update_staker_info(&staker, &contract_id, staker_info); + ContractEraStake::::insert(&contract_id, current_era, contract_stake_info); + + Self::deposit_event(Event::::UnbondAndUnstake( + staker, + contract_id, + value_to_unstake, + )); + + Ok(().into()) + } + + /// Withdraw all funds that have completed the unbonding process. + /// + /// If there are unbonding chunks which will be fully unbonded in future eras, + /// they will remain and can be withdrawn later. + #[pallet::call_index(5)] + #[pallet::weight(T::WeightInfo::withdraw_unbonded())] + pub fn withdraw_unbonded(origin: OriginFor) -> DispatchResultWithPostInfo { + Self::ensure_pallet_enabled()?; + let staker = ensure_signed(origin)?; + + let mut ledger = Self::ledger(&staker); + let current_era = Self::current_era(); + + let (valid_chunks, future_chunks) = ledger.unbonding_info.partition(current_era); + let withdraw_amount = valid_chunks.sum(); + + ensure!(!withdraw_amount.is_zero(), Error::::NothingToWithdraw); + + // Get the staking ledger and update it + ledger.locked = ledger.locked.saturating_sub(withdraw_amount); + ledger.unbonding_info = future_chunks; + + Self::update_ledger(&staker, ledger); + GeneralEraInfo::::mutate(¤t_era, |value| { + if let Some(x) = value { + x.locked = x.locked.saturating_sub(withdraw_amount) + } + }); + + Self::deposit_event(Event::::Withdrawn(staker, withdraw_amount)); + + Ok(().into()) + } + + /// Transfer nomination from one contract to another. + /// + /// Same rules as for `bond_and_stake` and `unbond_and_unstake` apply. + /// Minor difference is that there is no unbonding period so this call won't + /// check whether max number of unbonding chunks is exceeded. + /// + #[pallet::call_index(6)] + #[pallet::weight(T::WeightInfo::nomination_transfer())] + pub fn nomination_transfer( + origin: OriginFor, + origin_contract_id: T::SmartContract, + #[pallet::compact] value: BalanceOf, + target_contract_id: T::SmartContract, + ) -> DispatchResultWithPostInfo { + Self::ensure_pallet_enabled()?; + let staker = ensure_signed(origin)?; + + // Contracts must differ and both must be active + ensure!( + origin_contract_id != target_contract_id, + Error::::NominationTransferToSameContract + ); + ensure!( + Self::is_active(&origin_contract_id), + Error::::NotOperatedContract + ); + ensure!( + Self::is_active(&target_contract_id), + Error::::NotOperatedContract + ); + + // Validate origin contract related data & update it + let current_era = Self::current_era(); + let mut origin_staker_info = Self::staker_info(&staker, &origin_contract_id); + let mut origin_staking_info = + Self::contract_stake_info(&origin_contract_id, current_era).unwrap_or_default(); + + let origin_to_target_transfer_value = Self::unstake_from_contract( + &mut origin_staker_info, + &mut origin_staking_info, + value, + current_era, + )?; + + // Validate target contract related data & update it + let mut target_staker_info = Self::staker_info(&staker, &target_contract_id); + let mut target_staking_info = + Self::contract_stake_info(&target_contract_id, current_era).unwrap_or_default(); + + Self::stake_on_contract( + &mut target_staker_info, + &mut target_staking_info, + origin_to_target_transfer_value, + current_era, + )?; + + // Update origin data + ContractEraStake::::insert(&origin_contract_id, current_era, origin_staking_info); + Self::update_staker_info(&staker, &origin_contract_id, origin_staker_info); + + // Update target data + ContractEraStake::::insert(&target_contract_id, current_era, target_staking_info); + Self::update_staker_info(&staker, &target_contract_id, target_staker_info); + + Self::deposit_event(Event::::NominationTransfer( + staker, + origin_contract_id, + origin_to_target_transfer_value, + target_contract_id, + )); + + Ok(().into()) + } + + // TODO: do we need to add force methods or at least methods that allow others to claim for someone else? + + /// Claim earned staker rewards for the oldest unclaimed era. + /// In order to claim multiple eras, this call has to be called multiple times. + /// + /// The rewards are always added to the staker's free balance (account) but depending on the reward destination configuration, + /// they might be immediately re-staked. + #[pallet::call_index(7)] + #[pallet::weight(T::WeightInfo::claim_staker_with_restake().max(T::WeightInfo::claim_staker_without_restake()))] + pub fn claim_staker( + origin: OriginFor, + contract_id: T::SmartContract, + ) -> DispatchResultWithPostInfo { + Self::ensure_pallet_enabled()?; + let staker = ensure_signed(origin)?; + + // Ensure we have something to claim + let mut staker_info = Self::staker_info(&staker, &contract_id); + let (era, staked) = staker_info.claim(); + ensure!(staked > Zero::zero(), Error::::NotStakedContract); + + let dapp_info = + RegisteredDapps::::get(&contract_id).ok_or(Error::::NotOperatedContract)?; + + if let DAppState::Unregistered(unregister_era) = dapp_info.state { + ensure!(era < unregister_era, Error::::NotOperatedContract); + } + + let current_era = Self::current_era(); + ensure!(era < current_era, Error::::EraOutOfBounds); + + let staking_info = Self::contract_stake_info(&contract_id, era).unwrap_or_default(); + let reward_and_stake = + Self::general_era_info(era).ok_or(Error::::UnknownEraReward)?; + + let (_, stakers_joint_reward) = + Self::dev_stakers_split(&staking_info, &reward_and_stake); + let staker_reward = + Perbill::from_rational(staked, staking_info.total) * stakers_joint_reward; + + let mut ledger = Self::ledger(&staker); + + let should_restake_reward = Self::should_restake_reward( + ledger.reward_destination, + dapp_info.state, + staker_info.latest_staked_value(), + ); + + if should_restake_reward { + staker_info + .stake(current_era, staker_reward) + .map_err(|_| Error::::UnexpectedStakeInfoEra)?; + + // Restaking will, in the worst case, remove one, and add one record, + // so it's fine if the vector is full + ensure!( + staker_info.len() <= T::MaxEraStakeValues::get(), + Error::::TooManyEraStakeValues + ); + } + + // Withdraw reward funds from the dapps staking pot + let reward_imbalance = T::Currency::withdraw( + &Self::account_id(), + staker_reward, + WithdrawReasons::TRANSFER, + ExistenceRequirement::AllowDeath, + )?; + + if should_restake_reward { + ledger.locked = ledger.locked.saturating_add(staker_reward); + Self::update_ledger(&staker, ledger); + + // Update storage + GeneralEraInfo::::mutate(¤t_era, |value| { + if let Some(x) = value { + x.staked = x.staked.saturating_add(staker_reward); + x.locked = x.locked.saturating_add(staker_reward); + } + }); + + ContractEraStake::::mutate(contract_id.clone(), current_era, |staking_info| { + if let Some(x) = staking_info { + x.total = x.total.saturating_add(staker_reward); + } + }); + + Self::deposit_event(Event::::BondAndStake( + staker.clone(), + contract_id.clone(), + staker_reward, + )); + } + + T::Currency::resolve_creating(&staker, reward_imbalance); + Self::update_staker_info(&staker, &contract_id, staker_info); + Self::deposit_event(Event::::Reward(staker, contract_id, era, staker_reward)); + + Ok(Some(if should_restake_reward { + T::WeightInfo::claim_staker_with_restake() + } else { + T::WeightInfo::claim_staker_without_restake() + }) + .into()) + } + + /// Claim earned dapp rewards for the specified era. + /// + /// Call must ensure that the specified era is eligible for reward payout and that it hasn't already been paid out for the dapp. + #[pallet::call_index(8)] + #[pallet::weight(T::WeightInfo::claim_dapp())] + pub fn claim_dapp( + origin: OriginFor, + contract_id: T::SmartContract, + #[pallet::compact] era: EraIndex, + ) -> DispatchResultWithPostInfo { + Self::ensure_pallet_enabled()?; + let _ = ensure_signed(origin)?; + + let dapp_info = + RegisteredDapps::::get(&contract_id).ok_or(Error::::NotOperatedContract)?; + + let mut contract_stake_info = + Self::contract_stake_info(&contract_id, era).unwrap_or_default(); + + let dapp_reward = Self::calculate_dapp_reward(&contract_stake_info, &dapp_info, era)?; + + // Withdraw reward funds from the dapps staking + let reward_imbalance = T::Currency::withdraw( + &Self::account_id(), + dapp_reward, + WithdrawReasons::TRANSFER, + ExistenceRequirement::AllowDeath, + )?; + + T::Currency::resolve_creating(&dapp_info.developer, reward_imbalance); + Self::deposit_event(Event::::Reward( + dapp_info.developer, + contract_id.clone(), + era, + dapp_reward, + )); + + // updated counter for total rewards paid to the contract + contract_stake_info.contract_reward_claimed = true; + ContractEraStake::::insert(&contract_id, era, contract_stake_info); + + Ok(().into()) + } + + /// Force a new era at the start of the next block. + /// + /// The dispatch origin must be Root. + #[pallet::call_index(9)] + #[pallet::weight(T::WeightInfo::force_new_era())] + pub fn force_new_era(origin: OriginFor) -> DispatchResult { + Self::ensure_pallet_enabled()?; + ensure_root(origin)?; + ForceEra::::put(Forcing::ForceNew); + Ok(()) + } + + /// `true` will disable pallet, enabling maintenance mode. `false` will do the opposite. + /// + /// The dispatch origin must be Root. + #[pallet::call_index(10)] + #[pallet::weight(T::WeightInfo::maintenance_mode())] + pub fn maintenance_mode( + origin: OriginFor, + enable_maintenance: bool, + ) -> DispatchResultWithPostInfo { + ensure_root(origin)?; + let is_disabled = PalletDisabled::::get(); + + ensure!( + is_disabled ^ enable_maintenance, + Error::::NoMaintenanceModeChange + ); + PalletDisabled::::put(enable_maintenance); + + Self::deposit_event(Event::::MaintenanceMode(enable_maintenance)); + Ok(().into()) + } + + /// Used to set reward destination for staker rewards. + /// + /// User must be an active staker in order to use this call. + /// This will apply to all existing unclaimed rewards. + #[pallet::call_index(11)] + #[pallet::weight(T::WeightInfo::set_reward_destination())] + pub fn set_reward_destination( + origin: OriginFor, + reward_destination: RewardDestination, + ) -> DispatchResultWithPostInfo { + Self::ensure_pallet_enabled()?; + let staker = ensure_signed(origin)?; + let mut ledger = Self::ledger(&staker); + + ensure!(!ledger.is_empty(), Error::::NotActiveStaker); + + // this is done directly instead of using update_ledger helper + // because there's no need to interact with the Currency locks + ledger.reward_destination = reward_destination; + Ledger::::insert(&staker, ledger); + + Self::deposit_event(Event::::RewardDestination(staker, reward_destination)); + Ok(().into()) + } + + /// Used to force set `ContractEraStake` storage values. + /// The purpose of this call is only for fixing one of the issues detected with dapps-staking. + /// + /// The dispatch origin must be Root. + #[pallet::call_index(12)] + #[pallet::weight(T::DbWeight::get().writes(1))] + pub fn set_contract_stake_info( + origin: OriginFor, + contract: T::SmartContract, + era: EraIndex, + contract_stake_info: ContractStakeInfo>, + ) -> DispatchResultWithPostInfo { + ensure_root(origin)?; + + ContractEraStake::::insert(contract, era, contract_stake_info); + + Ok(().into()) + } + + /// Used to burn unclaimed & stale rewards from an unregistered contract. + #[pallet::call_index(13)] + #[pallet::weight(T::WeightInfo::claim_dapp())] + pub fn burn_stale_reward( + origin: OriginFor, + contract_id: T::SmartContract, + #[pallet::compact] era: EraIndex, + ) -> DispatchResultWithPostInfo { + Self::ensure_pallet_enabled()?; + ensure_root(origin)?; + + let dapp_info = + RegisteredDapps::::get(&contract_id).ok_or(Error::::NotOperatedContract)?; + ensure!( + dapp_info.is_unregistered(), + Error::::NotUnregisteredContract + ); + + let current_era = Self::current_era(); + + let burn_era_limit = + current_era.saturating_sub(T::UnregisteredDappRewardRetention::get()); + ensure!(era < burn_era_limit, Error::::EraOutOfBounds); + + let mut contract_stake_info = + Self::contract_stake_info(&contract_id, era).unwrap_or_default(); + + let dapp_reward = Self::calculate_dapp_reward(&contract_stake_info, &dapp_info, era)?; + + // Withdraw reward funds from the dapps staking pot and burn them + let imbalance_to_burn = T::Currency::withdraw( + &Self::account_id(), + dapp_reward, + WithdrawReasons::TRANSFER, + ExistenceRequirement::AllowDeath, + )?; + mem::drop(imbalance_to_burn); + + // mark entry as `claimed` but it means it's just handled (want to avoid rename since pallet will soon be redesigned). + contract_stake_info.contract_reward_claimed = true; + ContractEraStake::::insert(&contract_id, era, contract_stake_info); + + Self::deposit_event(Event::::StaleRewardBurned( + dapp_info.developer, + contract_id.clone(), + era, + dapp_reward, + )); + + Ok(().into()) + } + } + + impl Pallet { + /// Calculate the dApp reward for the specified era. + /// If successfull, returns reward amount. + /// In case reward cannot be claimed or was already claimed, an error is raised. + fn calculate_dapp_reward( + contract_stake_info: &ContractStakeInfo>, + dapp_info: &DAppInfo, + era: EraIndex, + ) -> Result, Error> { + let current_era = Self::current_era(); + if let DAppState::Unregistered(unregister_era) = dapp_info.state { + ensure!(era < unregister_era, Error::::NotOperatedContract); + } + ensure!(era < current_era, Error::::EraOutOfBounds); + + ensure!( + !contract_stake_info.contract_reward_claimed, + Error::::AlreadyClaimedInThisEra, + ); + ensure!( + contract_stake_info.total > Zero::zero(), + Error::::NotStakedContract, + ); + + let reward_and_stake = + Self::general_era_info(era).ok_or(Error::::UnknownEraReward)?; + + // Calculate the contract reward for this era. + let (dapp_reward, _) = Self::dev_stakers_split(&contract_stake_info, &reward_and_stake); + + Ok(dapp_reward) + } + + /// An utility method used to stake specified amount on an arbitrary contract. + /// + /// `StakerInfo` and `ContractStakeInfo` are provided and all checks are made to ensure that it's possible to + /// complete staking operation. + /// + /// # Arguments + /// + /// * `staker_info` - info about staker's stakes on the contract up to current moment + /// * `staking_info` - general info about contract stakes up to current moment + /// * `value` - value which is being bonded & staked + /// * `current_era` - current dapps-staking era + /// + /// # Returns + /// + /// If stake operation was successful, given structs are properly modified. + /// If not, an error is returned and structs are left in an undefined state. + /// + fn stake_on_contract( + staker_info: &mut StakerInfo>, + staking_info: &mut ContractStakeInfo>, + value: BalanceOf, + current_era: EraIndex, + ) -> Result<(), Error> { + ensure!( + !staker_info.latest_staked_value().is_zero() + || staking_info.number_of_stakers < T::MaxNumberOfStakersPerContract::get(), + Error::::MaxNumberOfStakersExceeded + ); + if staker_info.latest_staked_value().is_zero() { + staking_info.number_of_stakers = staking_info.number_of_stakers.saturating_add(1); + } + + staker_info + .stake(current_era, value) + .map_err(|_| Error::::UnexpectedStakeInfoEra)?; + ensure!( + // One spot should remain for compounding reward claim call + staker_info.len() < T::MaxEraStakeValues::get(), + Error::::TooManyEraStakeValues + ); + ensure!( + staker_info.latest_staked_value() >= T::MinimumStakingAmount::get(), + Error::::InsufficientValue, + ); + + // Increment ledger and total staker value for contract. + staking_info.total = staking_info.total.saturating_add(value); + + Ok(()) + } + + /// An utility method used to unstake specified amount from an arbitrary contract. + /// + /// The amount unstaked can be different in case staked amount would fall bellow `MinimumStakingAmount`. + /// In that case, entire staked amount will be unstaked. + /// + /// `StakerInfo` and `ContractStakeInfo` are provided and all checks are made to ensure that it's possible to + /// complete unstake operation. + /// + /// # Arguments + /// + /// * `staker_info` - info about staker's stakes on the contract up to current moment + /// * `staking_info` - general info about contract stakes up to current moment + /// * `value` - value which should be unstaked + /// * `current_era` - current dapps-staking era + /// + /// # Returns + /// + /// If unstake operation was successful, given structs are properly modified and total unstaked value is returned. + /// If not, an error is returned and structs are left in an undefined state. + /// + fn unstake_from_contract( + staker_info: &mut StakerInfo>, + contract_stake_info: &mut ContractStakeInfo>, + value: BalanceOf, + current_era: EraIndex, + ) -> Result, Error> { + let staked_value = staker_info.latest_staked_value(); + ensure!(staked_value > Zero::zero(), Error::::NotStakedContract); + + // Calculate the value which will be unstaked. + let remaining = staked_value.saturating_sub(value); + let value_to_unstake = if remaining < T::MinimumStakingAmount::get() { + contract_stake_info.number_of_stakers = + contract_stake_info.number_of_stakers.saturating_sub(1); + staked_value + } else { + value + }; + contract_stake_info.total = contract_stake_info.total.saturating_sub(value_to_unstake); + + // Sanity check + ensure!( + value_to_unstake > Zero::zero(), + Error::::UnstakingWithNoValue + ); + + staker_info + .unstake(current_era, value_to_unstake) + .map_err(|_| Error::::UnexpectedStakeInfoEra)?; + ensure!( + // One spot should remain for compounding reward claim call + staker_info.len() < T::MaxEraStakeValues::get(), + Error::::TooManyEraStakeValues + ); + + Ok(value_to_unstake) + } + + /// Get AccountId assigned to the pallet. + pub(crate) fn account_id() -> T::AccountId { + T::PalletId::get().into_account_truncating() + } + + /// `Err` if pallet disabled for maintenance, `Ok` otherwise + pub fn ensure_pallet_enabled() -> Result<(), Error> { + if PalletDisabled::::get() { + Err(Error::::Disabled) + } else { + Ok(()) + } + } + + /// Update the ledger for a staker. This will also update the stash lock. + /// This lock will lock the entire funds except paying for further transactions. + fn update_ledger(staker: &T::AccountId, ledger: AccountLedger>) { + if ledger.is_empty() { + Ledger::::remove(&staker); + T::Currency::remove_lock(STAKING_ID, staker); + } else { + T::Currency::set_lock(STAKING_ID, staker, ledger.locked, WithdrawReasons::all()); + Ledger::::insert(staker, ledger); + } + } + + /// Update the staker info for the `(staker, contract_id)` pairing. + /// If staker_info is empty, remove it from the DB. Otherwise, store it. + fn update_staker_info( + staker: &T::AccountId, + contract_id: &T::SmartContract, + staker_info: StakerInfo>, + ) { + if staker_info.is_empty() { + GeneralStakerInfo::::remove(staker, contract_id) + } else { + GeneralStakerInfo::::insert(staker, contract_id, staker_info) + } + } + + /// The block rewards are accumulated on the pallets's account during an era. + /// This function takes a snapshot of the pallet's balance accrued during current era + /// and stores it for future distribution + /// + /// This is called just at the beginning of an era. + fn reward_balance_snapshot(era: EraIndex, rewards: RewardInfo>) { + // Get the reward and stake information for previous era + let mut era_info = Self::general_era_info(era).unwrap_or_default(); + + // Prepare info for the next era + GeneralEraInfo::::insert( + era + 1, + EraInfo { + rewards: Default::default(), + staked: era_info.staked, + locked: era_info.locked, + }, + ); + + // Set the reward for the previous era. + era_info.rewards = rewards; + + GeneralEraInfo::::insert(era, era_info); + } + + /// Used to copy all `ContractStakeInfo` from the ending era over to the next era. + /// This is the most primitive solution since it scales with number of dApps. + /// It is possible to provide a hybrid solution which allows laziness but also prevents + /// a situation where we don't have access to the required data. + fn rotate_staking_info(current_era: EraIndex) -> Weight { + let next_era = current_era + 1; + + let mut consumed_weight = Weight::zero(); + + for (contract_id, dapp_info) in RegisteredDapps::::iter() { + // Ignore dapp if it was unregistered + consumed_weight = consumed_weight.saturating_add(T::DbWeight::get().reads(1)); + if let DAppState::Unregistered(_) = dapp_info.state { + continue; + } + + // Copy data from era `X` to era `X + 1` + if let Some(mut staking_info) = Self::contract_stake_info(&contract_id, current_era) + { + staking_info.contract_reward_claimed = false; + ContractEraStake::::insert(&contract_id, next_era, staking_info); + + consumed_weight = + consumed_weight.saturating_add(T::DbWeight::get().reads_writes(1, 1)); + } else { + consumed_weight = consumed_weight.saturating_add(T::DbWeight::get().reads(1)); + } + } + + consumed_weight + } + + /// Returns available staking balance for the potential staker + fn available_staking_balance( + staker: &T::AccountId, + ledger: &AccountLedger>, + ) -> BalanceOf { + // Ensure that staker has enough balance to bond & stake. + let free_balance = + T::Currency::free_balance(staker).saturating_sub(T::MinimumRemainingAmount::get()); + + // Remove already locked funds from the free balance + free_balance.saturating_sub(ledger.locked) + } + + /// `true` if contract is active, `false` if it has been unregistered + fn is_active(contract_id: &T::SmartContract) -> bool { + RegisteredDapps::::get(contract_id) + .map_or(false, |dapp_info| dapp_info.state == DAppState::Registered) + } + + /// `true` if all the conditions for restaking the reward have been met, `false` otherwise + pub(crate) fn should_restake_reward( + reward_destination: RewardDestination, + dapp_state: DAppState, + latest_staked_value: BalanceOf, + ) -> bool { + reward_destination == RewardDestination::StakeBalance + && dapp_state == DAppState::Registered + && latest_staked_value > Zero::zero() + } + + /// Calculate reward split between developer and stakers. + /// + /// Returns (developer reward, joint stakers reward) + pub(crate) fn dev_stakers_split( + contract_info: &ContractStakeInfo>, + era_info: &EraInfo>, + ) -> (BalanceOf, BalanceOf) { + let contract_stake_portion = + Perbill::from_rational(contract_info.total, era_info.staked); + + let developer_reward_part = contract_stake_portion * era_info.rewards.dapps; + let stakers_joint_reward = contract_stake_portion * era_info.rewards.stakers; + + (developer_reward_part, stakers_joint_reward) + } + + /// Adds `stakers` and `dapps` rewards to the reward pool. + /// + /// - `stakers` - portion of the reward that will be distributed to stakers + /// - `dapps` - portion of the reward that will be distributed to dapps + pub fn rewards(stakers: NegativeImbalanceOf, dapps: NegativeImbalanceOf) { + BlockRewardAccumulator::::mutate(|accumulated_reward| { + accumulated_reward.dapps = accumulated_reward.dapps.saturating_add(dapps.peek()); + accumulated_reward.stakers = + accumulated_reward.stakers.saturating_add(stakers.peek()); + }); + + T::Currency::resolve_creating(&Self::account_id(), stakers.merge(dapps)); + } + + /// Returns total value locked by dapps-staking. + /// + /// Note that this can differ from _total staked value_ since some funds might be undergoing the unbonding period. + pub fn tvl() -> BalanceOf { + let current_era = Self::current_era(); + if let Some(era_info) = Self::general_era_info(current_era) { + era_info.locked + } else { + // Should never happen since era info for current era must always exist + Zero::zero() + } + } + } +} diff --git a/pallets/dapps-staking/src/testing_utils.rs b/pallets/dapps-staking/src/testing_utils.rs new file mode 100644 index 0000000000..1bf028ddc4 --- /dev/null +++ b/pallets/dapps-staking/src/testing_utils.rs @@ -0,0 +1,738 @@ +// This file is part of Astar. + +// Copyright (C) 2019-2023 Stake Technologies Pte.Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later + +// Astar is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Astar is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Astar. If not, see . + +use super::{pallet::pallet::Event, *}; +use frame_support::assert_ok; +use mock::{EraIndex, *}; +use sp_runtime::{traits::AccountIdConversion, Perbill}; + +/// Helper struct used to store information relevant to era/contract/staker combination. +pub(crate) struct MemorySnapshot { + era_info: EraInfo, + dapp_info: DAppInfo, + staker_info: StakerInfo, + contract_info: ContractStakeInfo, + free_balance: Balance, + ledger: AccountLedger, +} + +impl MemorySnapshot { + /// Prepares a new `MemorySnapshot` struct based on the given arguments. + pub(crate) fn all( + era: EraIndex, + contract_id: &MockSmartContract, + account: AccountId, + ) -> Self { + Self { + era_info: DappsStaking::general_era_info(era).unwrap(), + dapp_info: RegisteredDapps::::get(contract_id).unwrap(), + staker_info: GeneralStakerInfo::::get(&account, contract_id), + contract_info: DappsStaking::contract_stake_info(contract_id, era).unwrap_or_default(), + ledger: DappsStaking::ledger(&account), + free_balance: ::Currency::free_balance(&account), + } + } + + /// Prepares a new `MemorySnapshot` struct but only with contract-related info + /// (no info specific for individual staker). + pub(crate) fn contract(era: EraIndex, contract_id: &MockSmartContract) -> Self { + Self { + era_info: DappsStaking::general_era_info(era).unwrap(), + dapp_info: RegisteredDapps::::get(contract_id).unwrap(), + staker_info: Default::default(), + contract_info: DappsStaking::contract_stake_info(contract_id, era).unwrap_or_default(), + ledger: Default::default(), + free_balance: Default::default(), + } + } +} + +/// Used to fetch the free balance of dapps staking account +pub(crate) fn free_balance_of_dapps_staking_account() -> Balance { + ::Currency::free_balance(&account_id()) +} + +/// Used to fetch pallet account Id +pub(crate) fn account_id() -> AccountId { + ::PalletId::get().into_account_truncating() +} + +/// Used to get total dapps reward for an era. +pub(crate) fn get_total_reward_per_era() -> Balance { + mock::joint_block_reward() * BLOCKS_PER_ERA as Balance +} + +/// Used to register contract for staking and assert success. +pub(crate) fn assert_register(developer: AccountId, contract_id: &MockSmartContract) { + let init_reserved_balance = ::Currency::reserved_balance(&developer); + + // Contract shouldn't exist. + assert!(!RegisteredDapps::::contains_key(contract_id)); + assert!(!RegisteredDevelopers::::contains_key( + developer + )); + + // Verify op is successful + assert_ok!(DappsStaking::register( + RuntimeOrigin::root(), + developer, + contract_id.clone() + )); + + let dapp_info = RegisteredDapps::::get(contract_id).unwrap(); + assert_eq!(dapp_info.state, DAppState::Registered); + assert_eq!(dapp_info.developer, developer); + assert_eq!( + *contract_id, + RegisteredDevelopers::::get(developer).unwrap() + ); + + let final_reserved_balance = ::Currency::reserved_balance(&developer); + assert_eq!( + final_reserved_balance, + init_reserved_balance + ::RegisterDeposit::get() + ); +} + +/// Perform `unregister` with all the accompanied checks including before/after storage comparison. +pub(crate) fn assert_unregister(developer: AccountId, contract_id: &MockSmartContract) { + let current_era = DappsStaking::current_era(); + let init_state = MemorySnapshot::contract(current_era, contract_id); + let init_reserved_balance = ::Currency::reserved_balance(&developer); + + // dApp should be registered prior to unregistering it + assert_eq!(init_state.dapp_info.state, DAppState::Registered); + + // Ensure that contract can be unregistered + assert_ok!(DappsStaking::unregister( + RuntimeOrigin::root(), + contract_id.clone() + )); + System::assert_last_event(mock::RuntimeEvent::DappsStaking(Event::ContractRemoved( + developer, + contract_id.clone(), + ))); + + let final_state = MemorySnapshot::contract(current_era, contract_id); + let final_reserved_balance = ::Currency::reserved_balance(&developer); + assert_eq!( + final_reserved_balance, + init_reserved_balance - ::RegisterDeposit::get() + ); + + assert_eq!(final_state.era_info.staked, init_state.era_info.staked); + + assert_eq!( + final_state.contract_info.total, + init_state.contract_info.total + ); + assert_eq!( + final_state.contract_info.number_of_stakers, + init_state.contract_info.number_of_stakers + ); + + assert_eq!( + final_state.dapp_info.state, + DAppState::Unregistered(current_era) + ); + assert_eq!(final_state.dapp_info.developer, developer); +} + +/// Perform `withdraw_from_unregistered` with all the accompanied checks including before/after storage comparison. +pub(crate) fn assert_withdraw_from_unregistered( + staker: AccountId, + contract_id: &MockSmartContract, +) { + let current_era = DappsStaking::current_era(); + let init_state = MemorySnapshot::all(current_era, contract_id, staker); + + // Initial checks + if let DAppState::Unregistered(era) = init_state.dapp_info.state { + assert!(era <= DappsStaking::current_era()); + } else { + panic!("Contract should be unregistered.") + }; + + let staked_value = init_state.staker_info.latest_staked_value(); + assert!(staked_value > 0); + + // Op with verification + assert_ok!(DappsStaking::withdraw_from_unregistered( + RuntimeOrigin::signed(staker.clone()), + contract_id.clone() + )); + System::assert_last_event(mock::RuntimeEvent::DappsStaking( + Event::WithdrawFromUnregistered(staker, contract_id.clone(), staked_value), + )); + + let final_state = MemorySnapshot::all(current_era, contract_id, staker); + + // Verify that all final states are as expected + assert_eq!( + init_state.era_info.staked, + final_state.era_info.staked + staked_value + ); + assert_eq!( + init_state.era_info.locked, + final_state.era_info.locked + staked_value + ); + assert_eq!(init_state.dapp_info, final_state.dapp_info); + assert_eq!( + init_state.ledger.locked, + final_state.ledger.locked + staked_value + ); + assert_eq!( + init_state.ledger.unbonding_info, + final_state.ledger.unbonding_info + ); + + assert!(final_state.staker_info.latest_staked_value().is_zero()); + assert!(!GeneralStakerInfo::::contains_key( + &staker, + contract_id + )); +} + +/// Perform `bond_and_stake` with all the accompanied checks including before/after storage comparison. +pub(crate) fn assert_bond_and_stake( + staker: AccountId, + contract_id: &MockSmartContract, + value: Balance, +) { + let current_era = DappsStaking::current_era(); + let init_state = MemorySnapshot::all(current_era, &contract_id, staker); + + // Calculate the expected value that will be staked. + let available_for_staking = init_state.free_balance + - init_state.ledger.locked + - ::MinimumRemainingAmount::get(); + let staking_value = available_for_staking.min(value); + + // Perform op and verify everything is as expected + assert_ok!(DappsStaking::bond_and_stake( + RuntimeOrigin::signed(staker), + contract_id.clone(), + value, + )); + System::assert_last_event(mock::RuntimeEvent::DappsStaking(Event::BondAndStake( + staker, + contract_id.clone(), + staking_value, + ))); + + let final_state = MemorySnapshot::all(current_era, &contract_id, staker); + + // In case staker hasn't been staking this contract until now + if init_state.staker_info.latest_staked_value() == 0 { + assert!(GeneralStakerInfo::::contains_key( + &staker, + contract_id + )); + assert_eq!( + final_state.contract_info.number_of_stakers, + init_state.contract_info.number_of_stakers + 1 + ); + } + + // Verify the remaining states + assert_eq!( + final_state.era_info.staked, + init_state.era_info.staked + staking_value + ); + assert_eq!( + final_state.era_info.locked, + init_state.era_info.locked + staking_value + ); + assert_eq!( + final_state.contract_info.total, + init_state.contract_info.total + staking_value + ); + assert_eq!( + final_state.staker_info.latest_staked_value(), + init_state.staker_info.latest_staked_value() + staking_value + ); + assert_eq!( + final_state.ledger.locked, + init_state.ledger.locked + staking_value + ); +} + +/// Used to perform start_unbonding with success and storage assertions. +pub(crate) fn assert_unbond_and_unstake( + staker: AccountId, + contract_id: &MockSmartContract, + value: Balance, +) { + // Get latest staking info + let current_era = DappsStaking::current_era(); + let init_state = MemorySnapshot::all(current_era, &contract_id, staker); + + // Calculate the expected resulting unbonding amount + let remaining_staked = init_state + .staker_info + .latest_staked_value() + .saturating_sub(value); + let expected_unbond_amount = if remaining_staked < MINIMUM_STAKING_AMOUNT { + init_state.staker_info.latest_staked_value() + } else { + value + }; + let remaining_staked = init_state.staker_info.latest_staked_value() - expected_unbond_amount; + + // Ensure op is successful and event is emitted + assert_ok!(DappsStaking::unbond_and_unstake( + RuntimeOrigin::signed(staker), + contract_id.clone(), + value + )); + System::assert_last_event(mock::RuntimeEvent::DappsStaking(Event::UnbondAndUnstake( + staker, + contract_id.clone(), + expected_unbond_amount, + ))); + + // Fetch the latest unbonding info so we can compare it to initial unbonding info + let final_state = MemorySnapshot::all(current_era, &contract_id, staker); + let expected_unlock_era = current_era + UNBONDING_PERIOD; + match init_state + .ledger + .unbonding_info + .vec() + .binary_search_by(|x| x.unlock_era.cmp(&expected_unlock_era)) + { + Ok(_) => assert_eq!( + init_state.ledger.unbonding_info.len(), + final_state.ledger.unbonding_info.len() + ), + Err(_) => assert_eq!( + init_state.ledger.unbonding_info.len() + 1, + final_state.ledger.unbonding_info.len() + ), + } + assert_eq!( + init_state.ledger.unbonding_info.sum() + expected_unbond_amount, + final_state.ledger.unbonding_info.sum() + ); + + // Push the unlocking chunk we expect to have at the end and compare two structs + let mut unbonding_info = init_state.ledger.unbonding_info.clone(); + unbonding_info.add(UnlockingChunk { + amount: expected_unbond_amount, + unlock_era: current_era + UNBONDING_PERIOD, + }); + assert_eq!(unbonding_info, final_state.ledger.unbonding_info); + + // Ensure that total locked value for staker hasn't been changed. + assert_eq!(init_state.ledger.locked, final_state.ledger.locked); + if final_state.ledger.is_empty() { + assert!(!Ledger::::contains_key(&staker)); + } + + // Ensure that total staked amount has been decreased for contract and staking points are updated + assert_eq!( + init_state.contract_info.total - expected_unbond_amount, + final_state.contract_info.total + ); + assert_eq!( + init_state.staker_info.latest_staked_value() - expected_unbond_amount, + final_state.staker_info.latest_staked_value() + ); + + // Ensure that the number of stakers is as expected + let delta = if remaining_staked > 0 { 0 } else { 1 }; + assert_eq!( + init_state.contract_info.number_of_stakers - delta, + final_state.contract_info.number_of_stakers + ); + + // Ensure that total staked value has been decreased + assert_eq!( + init_state.era_info.staked - expected_unbond_amount, + final_state.era_info.staked + ); + // Ensure that locked amount is the same since this will only start the unbonding period + assert_eq!(init_state.era_info.locked, final_state.era_info.locked); +} + +/// Used to perform start_unbonding with success and storage assertions. +pub(crate) fn assert_withdraw_unbonded(staker: AccountId) { + let current_era = DappsStaking::current_era(); + + let init_era_info = GeneralEraInfo::::get(current_era).unwrap(); + let init_ledger = Ledger::::get(&staker); + + // Get the current unlocking chunks + let (valid_info, remaining_info) = init_ledger.unbonding_info.partition(current_era); + let expected_unbond_amount = valid_info.sum(); + + // Ensure op is successful and event is emitted + assert_ok!(DappsStaking::withdraw_unbonded(RuntimeOrigin::signed( + staker + ),)); + System::assert_last_event(mock::RuntimeEvent::DappsStaking(Event::Withdrawn( + staker, + expected_unbond_amount, + ))); + + // Fetch the latest unbonding info so we can compare it to expected remainder + let final_ledger = Ledger::::get(&staker); + assert_eq!(remaining_info, final_ledger.unbonding_info); + if final_ledger.unbonding_info.is_empty() && final_ledger.locked == 0 { + assert!(!Ledger::::contains_key(&staker)); + } + + // Compare the ledger and total staked value + let final_rewards_and_stakes = GeneralEraInfo::::get(current_era).unwrap(); + assert_eq!(final_rewards_and_stakes.staked, init_era_info.staked); + assert_eq!( + final_rewards_and_stakes.locked, + init_era_info.locked - expected_unbond_amount + ); + assert_eq!( + final_ledger.locked, + init_ledger.locked - expected_unbond_amount + ); +} + +/// Used to perform nomination transfer with success and storage assertions. +pub(crate) fn assert_nomination_transfer( + staker: AccountId, + origin_contract_id: &MockSmartContract, + value: Balance, + target_contract_id: &MockSmartContract, +) { + // Get latest staking info + let current_era = DappsStaking::current_era(); + let origin_init_state = MemorySnapshot::all(current_era, &origin_contract_id, staker); + let target_init_state = MemorySnapshot::all(current_era, &target_contract_id, staker); + + // Calculate value which will actually be transfered + let init_staked_value = origin_init_state.staker_info.latest_staked_value(); + let expected_transfer_amount = if init_staked_value - value >= MINIMUM_STAKING_AMOUNT { + value + } else { + init_staked_value + }; + + // Ensure op is successful and event is emitted + assert_ok!(DappsStaking::nomination_transfer( + RuntimeOrigin::signed(staker), + origin_contract_id.clone(), + value, + target_contract_id.clone() + )); + System::assert_last_event(mock::RuntimeEvent::DappsStaking(Event::NominationTransfer( + staker, + origin_contract_id.clone(), + expected_transfer_amount, + target_contract_id.clone(), + ))); + + let origin_final_state = MemorySnapshot::all(current_era, &origin_contract_id, staker); + let target_final_state = MemorySnapshot::all(current_era, &target_contract_id, staker); + + // Ensure staker info has increased/decreased staked amount + assert_eq!( + origin_final_state.staker_info.latest_staked_value(), + init_staked_value - expected_transfer_amount + ); + assert_eq!( + target_final_state.staker_info.latest_staked_value(), + target_init_state.staker_info.latest_staked_value() + expected_transfer_amount + ); + + // Ensure total value staked on contracts has appropriately increased/decreased + assert_eq!( + origin_final_state.contract_info.total, + origin_init_state.contract_info.total - expected_transfer_amount + ); + assert_eq!( + target_final_state.contract_info.total, + target_init_state.contract_info.total + expected_transfer_amount + ); + + // Ensure number of contracts has been reduced on origin contract if it is fully unstaked + let origin_contract_fully_unstaked = init_staked_value == expected_transfer_amount; + if origin_contract_fully_unstaked { + assert_eq!( + origin_final_state.contract_info.number_of_stakers + 1, + origin_init_state.contract_info.number_of_stakers + ); + } + + // Ensure number of contracts has been increased on target contract it is first stake by the staker + let no_init_stake_on_target_contract = target_init_state + .staker_info + .latest_staked_value() + .is_zero(); + if no_init_stake_on_target_contract { + assert_eq!( + target_final_state.contract_info.number_of_stakers, + target_init_state.contract_info.number_of_stakers + 1 + ); + } + + // Ensure DB entry has been removed if era stake vector is empty + let fully_unstaked_and_nothing_to_claim = + origin_contract_fully_unstaked && origin_final_state.staker_info.clone().claim() == (0, 0); + if fully_unstaked_and_nothing_to_claim { + assert!(!GeneralStakerInfo::::contains_key( + &staker, + &origin_contract_id + )); + } +} + +/// Used to perform claim for stakers with success assertion +pub(crate) fn assert_claim_staker(claimer: AccountId, contract_id: &MockSmartContract) { + let (claim_era, _) = DappsStaking::staker_info(&claimer, contract_id).claim(); + let current_era = DappsStaking::current_era(); + + //clean up possible leftover events + System::reset_events(); + + let init_state_claim_era = MemorySnapshot::all(claim_era, contract_id, claimer); + let init_state_current_era = MemorySnapshot::all(current_era, contract_id, claimer); + + // Calculate contract portion of the reward + let (_, stakers_joint_reward) = DappsStaking::dev_stakers_split( + &init_state_claim_era.contract_info, + &init_state_claim_era.era_info, + ); + + let (claim_era, staked) = init_state_claim_era.staker_info.clone().claim(); + assert!(claim_era > 0); // Sanity check - if this fails, method is being used incorrectly + + // Cannot claim rewards post unregister era, this indicates a bug! + if let DAppState::Unregistered(unregistered_era) = init_state_claim_era.dapp_info.state { + assert!(unregistered_era > claim_era); + } + + let calculated_reward = + Perbill::from_rational(staked, init_state_claim_era.contract_info.total) + * stakers_joint_reward; + let issuance_before_claim = ::Currency::total_issuance(); + + assert_ok!(DappsStaking::claim_staker( + RuntimeOrigin::signed(claimer), + contract_id.clone(), + )); + + let final_state_current_era = MemorySnapshot::all(current_era, contract_id, claimer); + + // assert staked and free balances depending on restake check, + assert_restake_reward( + &init_state_current_era, + &final_state_current_era, + calculated_reward, + ); + + // check for stake event if restaking is performed + if DappsStaking::should_restake_reward( + init_state_current_era.ledger.reward_destination, + init_state_current_era.dapp_info.state, + init_state_current_era.staker_info.latest_staked_value(), + ) { + // There should be at least 2 events, Reward and BondAndStake. + // if there's less, panic is acceptable + let events = dapps_staking_events(); + let second_last_event = &events[events.len() - 2]; + assert_eq!( + second_last_event.clone(), + Event::::BondAndStake(claimer, contract_id.clone(), calculated_reward) + ); + } + + // last event should be Reward, regardless of restaking + System::assert_last_event(mock::RuntimeEvent::DappsStaking(Event::Reward( + claimer, + contract_id.clone(), + claim_era, + calculated_reward, + ))); + + let (new_era, _) = final_state_current_era.staker_info.clone().claim(); + if final_state_current_era.staker_info.is_empty() { + assert!(new_era.is_zero()); + assert!(!GeneralStakerInfo::::contains_key( + &claimer, + contract_id + )); + } else { + assert!(new_era > claim_era); + } + assert!(new_era.is_zero() || new_era > claim_era); + + // Claim shouldn't mint new tokens, instead it should just transfer from the dapps staking pallet account + let issuance_after_claim = ::Currency::total_issuance(); + assert_eq!(issuance_before_claim, issuance_after_claim); + + // Old `claim_era` contract info should never be changed + let final_state_claim_era = MemorySnapshot::all(claim_era, contract_id, claimer); + assert_eq!( + init_state_claim_era.contract_info, + final_state_claim_era.contract_info + ); +} + +// assert staked and locked states depending on should_restake_reward +// returns should_restake_reward result so further checks can be made +fn assert_restake_reward( + init_state_current_era: &MemorySnapshot, + final_state_current_era: &MemorySnapshot, + reward: Balance, +) { + if DappsStaking::should_restake_reward( + init_state_current_era.ledger.reward_destination, + init_state_current_era.dapp_info.state, + init_state_current_era.staker_info.latest_staked_value(), + ) { + // staked values should increase + assert_eq!( + init_state_current_era.staker_info.latest_staked_value() + reward, + final_state_current_era.staker_info.latest_staked_value() + ); + assert_eq!( + init_state_current_era.era_info.staked + reward, + final_state_current_era.era_info.staked + ); + assert_eq!( + init_state_current_era.era_info.locked + reward, + final_state_current_era.era_info.locked + ); + assert_eq!( + init_state_current_era.contract_info.total + reward, + final_state_current_era.contract_info.total + ); + } else { + // staked values should remain the same, and free balance increase + assert_eq!( + init_state_current_era.free_balance + reward, + final_state_current_era.free_balance + ); + assert_eq!( + init_state_current_era.era_info.staked, + final_state_current_era.era_info.staked + ); + assert_eq!( + init_state_current_era.era_info.locked, + final_state_current_era.era_info.locked + ); + assert_eq!( + init_state_current_era.contract_info, + final_state_current_era.contract_info + ); + } +} + +/// Used to perform claim for dApp reward with success assertion +pub(crate) fn assert_claim_dapp(contract_id: &MockSmartContract, claim_era: EraIndex) { + let developer = DappsStaking::dapp_info(contract_id).unwrap().developer; + let init_state = MemorySnapshot::all(claim_era, contract_id, developer); + assert!(!init_state.contract_info.contract_reward_claimed); + + // Cannot claim rewards post unregister era + if let DAppState::Unregistered(unregistered_era) = init_state.dapp_info.state { + assert!(unregistered_era > claim_era); + } + + // Calculate contract portion of the reward + let (calculated_reward, _) = + DappsStaking::dev_stakers_split(&init_state.contract_info, &init_state.era_info); + + assert_ok!(DappsStaking::claim_dapp( + RuntimeOrigin::signed(developer), + contract_id.clone(), + claim_era, + )); + System::assert_last_event(mock::RuntimeEvent::DappsStaking(Event::Reward( + developer, + contract_id.clone(), + claim_era, + calculated_reward, + ))); + + let final_state = MemorySnapshot::all(claim_era, &contract_id, developer); + assert_eq!( + init_state.free_balance + calculated_reward, + final_state.free_balance + ); + + assert!(final_state.contract_info.contract_reward_claimed); + + // Just in case dev is also a staker - this shouldn't cause any change in StakerInfo or Ledger + assert_eq!(init_state.staker_info, final_state.staker_info); + assert_eq!(init_state.ledger, final_state.ledger); +} + +// change reward destination and verify the update +pub(crate) fn assert_set_reward_destination( + account_id: AccountId, + reward_destination: RewardDestination, +) { + assert_ok!(DappsStaking::set_reward_destination( + RuntimeOrigin::signed(account_id), + reward_destination + )); + + System::assert_last_event(mock::RuntimeEvent::DappsStaking(Event::RewardDestination( + account_id, + reward_destination, + ))); + + let ledger = Ledger::::get(&account_id); + + assert_eq!(ledger.reward_destination, reward_destination); +} + +/// Used to burn stale rewards with success assertions +pub(crate) fn assert_burn_stale_reward( + contract_id: &MockSmartContract, + claim_era: EraIndex, +) { + let developer = DappsStaking::dapp_info(contract_id).unwrap().developer; + let init_state = MemorySnapshot::all(claim_era, contract_id, developer); + let issuance_before_claim = ::Currency::total_issuance(); + + assert!(!init_state.contract_info.contract_reward_claimed); + + // Calculate contract portion of the reward + let (calculated_reward, _) = + DappsStaking::dev_stakers_split(&init_state.contract_info, &init_state.era_info); + + assert_ok!(DappsStaking::burn_stale_reward( + RuntimeOrigin::root(), + contract_id.clone(), + claim_era, + )); + System::assert_last_event(mock::RuntimeEvent::DappsStaking(Event::StaleRewardBurned( + developer, + contract_id.clone(), + claim_era, + calculated_reward, + ))); + + let final_state = MemorySnapshot::all(claim_era, &contract_id, developer); + let issuance_after_claim = ::Currency::total_issuance(); + assert_eq!(init_state.free_balance, final_state.free_balance); + assert!(final_state.contract_info.contract_reward_claimed); + assert_eq!( + issuance_before_claim - calculated_reward, + issuance_after_claim + ); +} diff --git a/pallets/dapps-staking/src/tests.rs b/pallets/dapps-staking/src/tests.rs new file mode 100644 index 0000000000..13a3e69d5c --- /dev/null +++ b/pallets/dapps-staking/src/tests.rs @@ -0,0 +1,2330 @@ +// This file is part of Astar. + +// Copyright (C) 2019-2023 Stake Technologies Pte.Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later + +// Astar is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Astar is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Astar. If not, see . + +use super::{pallet::pallet::Error, pallet::pallet::Event, *}; +use frame_support::{assert_noop, assert_ok, traits::OnInitialize, weights::Weight}; +use mock::{Balances, MockSmartContract, *}; +use sp_core::H160; +use sp_runtime::{ + traits::{BadOrigin, Zero}, + Perbill, +}; + +use testing_utils::*; + +#[test] +fn on_initialize_when_dapp_staking_enabled_in_mid_of_an_era_is_ok() { + ExternalityBuilder::build().execute_with(|| { + // Set a block number in mid of an era + System::set_block_number(2); + + // Verify that current era is 0 since dapps staking hasn't been initialized yet + assert_eq!(0u32, DappsStaking::current_era()); + + // Call on initialize in the mid of an era (according to block number calculation) + // but since no era was initialized before, it will trigger a new era init. + DappsStaking::on_initialize(System::block_number()); + assert_eq!(1u32, DappsStaking::current_era()); + }) +} + +#[test] +fn rewards_is_ok() { + ExternalityBuilder::build().execute_with(|| { + // At the beginning, both should be 0 + assert_eq!( + BlockRewardAccumulator::::get(), + Default::default() + ); + assert!(free_balance_of_dapps_staking_account().is_zero()); + + // After handling imbalance, accumulator and account should be updated + let dapps_reward = 12345; + let stakers_reward = 9999; + let total_reward = dapps_reward + stakers_reward; + DappsStaking::rewards( + Balances::issue(stakers_reward), + Balances::issue(dapps_reward), + ); + + assert_eq!(total_reward, free_balance_of_dapps_staking_account()); + let reward_accumulator = BlockRewardAccumulator::::get(); + assert_eq!(reward_accumulator.stakers, stakers_reward); + assert_eq!(reward_accumulator.dapps, dapps_reward); + + // After triggering a new era, accumulator should be set to 0 but account shouldn't consume any new imbalance + DappsStaking::on_initialize(System::block_number()); + assert_eq!( + BlockRewardAccumulator::::get(), + Default::default() + ); + assert_eq!(total_reward, free_balance_of_dapps_staking_account()); + }) +} + +#[test] +fn on_initialize_is_ok() { + ExternalityBuilder::build().execute_with(|| { + // Before we start, era is zero + assert!(DappsStaking::current_era().is_zero()); + + // We initialize the first block and advance to second one. New era must be triggered. + initialize_first_block(); + let current_era = DappsStaking::current_era(); + assert_eq!(1, current_era); + + let previous_era = current_era; + advance_to_era(previous_era + 10); + + // Check that all reward&stakes are as expected + let current_era = DappsStaking::current_era(); + for era in 1..current_era { + let reward_info = GeneralEraInfo::::get(era).unwrap().rewards; + assert_eq!( + get_total_reward_per_era(), + reward_info.stakers + reward_info.dapps + ); + } + // Current era rewards should be 0 + let era_rewards = GeneralEraInfo::::get(current_era).unwrap(); + assert_eq!(0, era_rewards.staked); + assert_eq!(era_rewards.rewards, Default::default()); + }) +} + +#[test] +fn new_era_length_is_always_blocks_per_era() { + ExternalityBuilder::build().execute_with(|| { + initialize_first_block(); + let blocks_per_era = mock::BLOCKS_PER_ERA; + + // go to beginning of an era + advance_to_era(mock::DappsStaking::current_era() + 1); + + // record era number and block number + let start_era = mock::DappsStaking::current_era(); + let starting_block_number = System::block_number(); + + // go to next era + advance_to_era(mock::DappsStaking::current_era() + 1); + let ending_block_number = System::block_number(); + + // make sure block number difference is is blocks_per_era + assert_eq!(mock::DappsStaking::current_era(), start_era + 1); + assert_eq!(ending_block_number - starting_block_number, blocks_per_era); + }) +} + +#[test] +fn new_era_is_handled_with_maintenance_mode() { + ExternalityBuilder::build().execute_with(|| { + initialize_first_block(); + + // enable maintenance mode + assert_ok!(DappsStaking::maintenance_mode(RuntimeOrigin::root(), true)); + assert!(PalletDisabled::::exists()); + System::assert_last_event(mock::RuntimeEvent::DappsStaking(Event::MaintenanceMode( + true, + ))); + + // advance 9 blocks or 3 era lengths (advance_to_era() doesn't work in maintenance mode) + run_for_blocks(mock::BLOCKS_PER_ERA * 3); + + // verify that `current block > NextEraStartingBlock` but era hasn't changed + assert!(System::block_number() > DappsStaking::next_era_starting_block()); + assert_eq!(DappsStaking::current_era(), 1); + + // disable maintenance mode + assert_ok!(DappsStaking::maintenance_mode(RuntimeOrigin::root(), false)); + System::assert_last_event(mock::RuntimeEvent::DappsStaking(Event::MaintenanceMode( + false, + ))); + + // advance one era + run_for_blocks(mock::BLOCKS_PER_ERA); + + // verify we're at block 14 + assert_eq!(System::block_number(), (4 * mock::BLOCKS_PER_ERA) + 2); // 2 from initialization, advanced 4 eras worth of blocks + + // verify era was updated and NextEraStartingBlock is 15 + assert_eq!(DappsStaking::current_era(), 2); + assert_eq!( + DappsStaking::next_era_starting_block(), + (5 * mock::BLOCKS_PER_ERA) + ); + }) +} + +#[test] +fn new_forced_era_length_is_always_blocks_per_era() { + ExternalityBuilder::build().execute_with(|| { + initialize_first_block(); + let blocks_per_era = mock::BLOCKS_PER_ERA; + + // go to beginning of an era + advance_to_era(mock::DappsStaking::current_era() + 1); + + // go to middle of era + run_for_blocks(1); // can be any number between 0 and blocks_per_era + + // force new era + >::put(Forcing::ForceNew); + run_for_blocks(1); // calls on_initialize() + + // note the start block number of new (forced) era + let start_block_number = System::block_number(); + + // go to start of next era + advance_to_era(mock::DappsStaking::current_era() + 1); + + // show the length of the forced era is equal to blocks_per_era + let end_block_number = System::block_number(); + assert_eq!(end_block_number - start_block_number, blocks_per_era); + }) +} + +#[test] +fn new_era_is_ok() { + ExternalityBuilder::build().execute_with(|| { + // set initial era index + advance_to_era(DappsStaking::current_era() + 10); + let starting_era = DappsStaking::current_era(); + + // verify that block reward is zero at the beginning of an era + assert_eq!(DappsStaking::block_reward_accumulator(), Default::default()); + + // Increment block by setting it to the first block in era value + run_for_blocks(1); + let current_era = DappsStaking::current_era(); + assert_eq!(starting_era, current_era); + + // verify that block reward is added to the block_reward_accumulator + let block_reward = DappsStaking::block_reward_accumulator(); + assert_eq!( + joint_block_reward(), + block_reward.stakers + block_reward.dapps + ); + + // register and bond to verify storage item + let staker = 2; + let developer = 3; + let staked_amount = 100; + let contract = MockSmartContract::Evm(H160::repeat_byte(0x01)); + assert_register(developer, &contract); + assert_bond_and_stake(staker, &contract, staked_amount); + + // CurrentEra should be incremented + // block_reward_accumulator should be reset to 0 + advance_to_era(DappsStaking::current_era() + 1); + + let current_era = DappsStaking::current_era(); + assert_eq!(starting_era + 1, current_era); + System::assert_last_event(mock::RuntimeEvent::DappsStaking(Event::NewDappStakingEra( + starting_era + 1, + ))); + + // verify that block reward accumulator is reset to 0 + let block_reward = DappsStaking::block_reward_accumulator(); + assert_eq!(block_reward, Default::default()); + + let expected_era_reward = get_total_reward_per_era(); + let expected_dapps_reward = DAPP_BLOCK_REWARD * BLOCKS_PER_ERA as Balance; + let expected_stakers_reward = STAKER_BLOCK_REWARD * BLOCKS_PER_ERA as Balance; + + // verify that .staked is copied and .reward is added + let era_rewards = GeneralEraInfo::::get(starting_era).unwrap(); + assert_eq!(staked_amount, era_rewards.staked); + assert_eq!( + expected_era_reward, + era_rewards.rewards.dapps + era_rewards.rewards.stakers + ); + assert_eq!(expected_dapps_reward, era_rewards.rewards.dapps); + assert_eq!(expected_stakers_reward, era_rewards.rewards.stakers); + }) +} + +#[test] +fn new_era_forcing() { + ExternalityBuilder::build().execute_with(|| { + initialize_first_block(); + advance_to_era(3); + let starting_era = mock::DappsStaking::current_era(); + + // call on_initialize. It is not last block in the era, but it should increment the era + >::put(Forcing::ForceNew); + run_for_blocks(1); + + // check that era is incremented + let current = mock::DappsStaking::current_era(); + assert_eq!(starting_era + 1, current); + + // check that forcing is cleared + assert_eq!(mock::DappsStaking::force_era(), Forcing::NotForcing); + + // check the event for the new era + System::assert_last_event(mock::RuntimeEvent::DappsStaking(Event::NewDappStakingEra( + starting_era + 1, + ))); + }) +} + +#[test] +fn general_staker_info_is_ok() { + ExternalityBuilder::build().execute_with(|| { + initialize_first_block(); + + let first_contract_id = MockSmartContract::Evm(H160::repeat_byte(0x01)); + assert_register(10, &first_contract_id); + + let second_contract_id = MockSmartContract::Evm(H160::repeat_byte(0x02)); + assert_register(11, &second_contract_id); + + let (staker_1, staker_2, staker_3) = (1, 2, 3); + let amount = 100; + + let starting_era = 3; + advance_to_era(starting_era); + assert_bond_and_stake(staker_1, &first_contract_id, amount); + assert_bond_and_stake(staker_2, &first_contract_id, amount); + + let mid_era = 7; + advance_to_era(mid_era); + assert_unbond_and_unstake(staker_2, &first_contract_id, amount); + assert_bond_and_stake(staker_3, &first_contract_id, amount); + assert_bond_and_stake(staker_3, &second_contract_id, amount); + + let final_era = 12; + advance_to_era(final_era); + + // Check first interval + let mut first_staker_info = DappsStaking::staker_info(&staker_1, &first_contract_id); + let mut second_staker_info = DappsStaking::staker_info(&staker_2, &first_contract_id); + let mut third_staker_info = DappsStaking::staker_info(&staker_3, &first_contract_id); + + for era in starting_era..mid_era { + let contract_info = DappsStaking::contract_stake_info(&first_contract_id, era).unwrap(); + assert_eq!(2, contract_info.number_of_stakers); + + assert_eq!((era, amount), first_staker_info.claim()); + assert_eq!((era, amount), second_staker_info.claim()); + + assert!(!ContractEraStake::::contains_key( + &second_contract_id, + era + )); + } + + // Check second interval + for era in mid_era..=final_era { + let first_contract_info = + DappsStaking::contract_stake_info(&first_contract_id, era).unwrap(); + assert_eq!(2, first_contract_info.number_of_stakers); + + assert_eq!((era, amount), first_staker_info.claim()); + assert_eq!((era, amount), third_staker_info.claim()); + + assert_eq!( + DappsStaking::contract_stake_info(&second_contract_id, era) + .unwrap() + .number_of_stakers, + 1 + ); + } + + // Check that before starting era nothing exists + assert!(!ContractEraStake::::contains_key( + &first_contract_id, + starting_era - 1 + )); + assert!(!ContractEraStake::::contains_key( + &second_contract_id, + starting_era - 1 + )); + }) +} + +#[test] +fn register_is_ok() { + ExternalityBuilder::build().execute_with(|| { + initialize_first_block(); + + let developer = 1; + let ok_contract = MockSmartContract::Evm(H160::repeat_byte(0x01)); + + assert!(::Currency::reserved_balance(&developer).is_zero()); + assert_register(developer, &ok_contract); + System::assert_last_event(mock::RuntimeEvent::DappsStaking(Event::NewContract( + developer, + ok_contract, + ))); + + assert_eq!( + RegisterDeposit::get(), + ::Currency::reserved_balance(&developer) + ); + }) +} + +#[test] +fn register_with_non_root_fails() { + ExternalityBuilder::build().execute_with(|| { + initialize_first_block(); + + let developer = 1; + let ok_contract = MockSmartContract::Evm(H160::repeat_byte(0x01)); + + assert_noop!( + DappsStaking::register(RuntimeOrigin::signed(developer), developer, ok_contract), + BadOrigin + ); + }) +} + +#[test] +fn register_twice_with_same_account_fails() { + ExternalityBuilder::build().execute_with(|| { + initialize_first_block(); + + let developer = 1; + let contract1 = MockSmartContract::Evm(H160::repeat_byte(0x01)); + let contract2 = MockSmartContract::Evm(H160::repeat_byte(0x02)); + + assert_register(developer, &contract1); + + System::assert_last_event(mock::RuntimeEvent::DappsStaking(Event::NewContract( + developer, contract1, + ))); + + // now register different contract with same account + assert_noop!( + DappsStaking::register(RuntimeOrigin::root(), developer, contract2), + Error::::AlreadyUsedDeveloperAccount + ); + }) +} + +#[test] +fn register_same_contract_twice_fails() { + ExternalityBuilder::build().execute_with(|| { + initialize_first_block(); + + let developer1 = 1; + let developer2 = 2; + let contract = MockSmartContract::Evm(H160::repeat_byte(0x01)); + + assert_register(developer1, &contract); + + System::assert_last_event(mock::RuntimeEvent::DappsStaking(Event::NewContract( + developer1, contract, + ))); + + // now register same contract by different developer + assert_noop!( + DappsStaking::register(RuntimeOrigin::root(), developer2, contract), + Error::::AlreadyRegisteredContract + ); + }) +} + +#[test] +fn unregister_after_register_is_ok() { + ExternalityBuilder::build().execute_with(|| { + initialize_first_block(); + + let developer = 1; + let contract_id = MockSmartContract::Evm(H160::repeat_byte(0x01)); + + assert_register(developer, &contract_id); + assert_unregister(developer, &contract_id); + assert!(::Currency::reserved_balance(&developer).is_zero()); + + // Not possible to unregister a contract twice + assert_noop!( + DappsStaking::unregister(RuntimeOrigin::root(), contract_id.clone()), + Error::::NotOperatedContract + ); + }) +} + +#[test] +fn unregister_with_non_root() { + ExternalityBuilder::build().execute_with(|| { + initialize_first_block(); + + let developer = 1; + let contract_id = MockSmartContract::Evm(H160::repeat_byte(0x01)); + + assert_register(developer, &contract_id); + + // Not possible to unregister if caller isn't root + assert_noop!( + DappsStaking::unregister(RuntimeOrigin::signed(developer), contract_id.clone()), + BadOrigin + ); + }) +} + +#[test] +fn unregister_stake_and_unstake_is_not_ok() { + ExternalityBuilder::build().execute_with(|| { + initialize_first_block(); + + let developer = 1; + let staker = 2; + let contract_id = MockSmartContract::Evm(H160::repeat_byte(0x01)); + + // Register contract, stake it, unstake a bit + assert_register(developer, &contract_id); + assert_bond_and_stake(staker, &contract_id, 100); + assert_unbond_and_unstake(staker, &contract_id, 10); + + // Unregister contract and verify that stake & unstake no longer work + assert_unregister(developer, &contract_id); + + assert_noop!( + DappsStaking::bond_and_stake(RuntimeOrigin::signed(staker), contract_id.clone(), 100), + Error::::NotOperatedContract + ); + assert_noop!( + DappsStaking::unbond_and_unstake( + RuntimeOrigin::signed(staker), + contract_id.clone(), + 100 + ), + Error::::NotOperatedContract + ); + }) +} + +#[test] +fn withdraw_from_unregistered_is_ok() { + ExternalityBuilder::build().execute_with(|| { + initialize_first_block(); + + let developer = 1; + let dummy_developer = 2; + let staker_1 = 3; + let staker_2 = 4; + let staked_value_1 = 150; + let staked_value_2 = 330; + let contract_id = MockSmartContract::Evm(H160::repeat_byte(0x01)); + let dummy_contract_id = MockSmartContract::Evm(H160::repeat_byte(0x05)); + + // Register both contracts and stake them + assert_register(developer, &contract_id); + assert_register(dummy_developer, &dummy_contract_id); + assert_bond_and_stake(staker_1, &contract_id, staked_value_1); + assert_bond_and_stake(staker_2, &contract_id, staked_value_2); + + // This contract will just exist so it helps us with testing ledger content + assert_bond_and_stake(staker_1, &dummy_contract_id, staked_value_1); + + // Advance eras. This will accumulate some rewards. + advance_to_era(5); + + assert_unregister(developer, &contract_id); + + // Claim all past rewards + for era in 1..DappsStaking::current_era() { + assert_claim_staker(staker_1, &contract_id); + assert_claim_staker(staker_2, &contract_id); + assert_claim_dapp(&contract_id, era); + } + + // Unbond everything from the contract. + assert_withdraw_from_unregistered(staker_1, &contract_id); + assert_withdraw_from_unregistered(staker_2, &contract_id); + + // No additional claim ops should be possible + assert_noop!( + DappsStaking::claim_staker(RuntimeOrigin::signed(staker_1), contract_id.clone()), + Error::::NotStakedContract + ); + assert_noop!( + DappsStaking::claim_staker(RuntimeOrigin::signed(staker_2), contract_id.clone()), + Error::::NotStakedContract + ); + assert_noop!( + DappsStaking::claim_dapp( + RuntimeOrigin::signed(developer), + contract_id.clone(), + DappsStaking::current_era() + ), + Error::::NotOperatedContract + ); + }) +} + +#[test] +fn withdraw_from_unregistered_when_contract_doesnt_exist() { + ExternalityBuilder::build().execute_with(|| { + initialize_first_block(); + + let contract_id = MockSmartContract::Evm(H160::repeat_byte(0x01)); + assert_noop!( + DappsStaking::withdraw_from_unregistered(RuntimeOrigin::signed(1), contract_id), + Error::::NotOperatedContract + ); + }) +} + +#[test] +fn withdraw_from_unregistered_when_contract_is_still_registered() { + ExternalityBuilder::build().execute_with(|| { + initialize_first_block(); + + let developer = 1; + let contract_id = MockSmartContract::Evm(H160::repeat_byte(0x01)); + assert_register(developer, &contract_id); + + assert_noop!( + DappsStaking::withdraw_from_unregistered(RuntimeOrigin::signed(1), contract_id), + Error::::NotUnregisteredContract + ); + }) +} + +#[test] +fn withdraw_from_unregistered_when_nothing_is_staked() { + ExternalityBuilder::build().execute_with(|| { + initialize_first_block(); + + let developer = 1; + let contract_id = MockSmartContract::Evm(H160::repeat_byte(0x01)); + assert_register(developer, &contract_id); + + let staker = 2; + let no_staker = 3; + assert_bond_and_stake(staker, &contract_id, 100); + + assert_unregister(developer, &contract_id); + + // No staked amount so call should fail. + assert_noop!( + DappsStaking::withdraw_from_unregistered(RuntimeOrigin::signed(no_staker), contract_id), + Error::::NotStakedContract + ); + + // Call should fail if called twice since no staked funds remain. + assert_withdraw_from_unregistered(staker, &contract_id); + assert_noop!( + DappsStaking::withdraw_from_unregistered(RuntimeOrigin::signed(staker), contract_id), + Error::::NotStakedContract + ); + }) +} + +#[test] +fn withdraw_from_unregistered_when_unclaimed_rewards_remaining() { + ExternalityBuilder::build().execute_with(|| { + initialize_first_block(); + + let developer = 1; + let contract_id = MockSmartContract::Evm(H160::repeat_byte(0x01)); + assert_register(developer, &contract_id); + + let staker = 2; + assert_bond_and_stake(staker, &contract_id, 100); + + // Advance eras. This will accumulate some rewards. + advance_to_era(5); + + assert_unregister(developer, &contract_id); + + for _ in 1..DappsStaking::current_era() { + assert_noop!( + DappsStaking::withdraw_from_unregistered( + RuntimeOrigin::signed(staker), + contract_id + ), + Error::::UnclaimedRewardsRemaining + ); + assert_claim_staker(staker, &contract_id); + } + + // Withdraw should work after all rewards have been claimed + assert_withdraw_from_unregistered(staker, &contract_id); + }) +} + +#[test] +fn bond_and_stake_different_eras_is_ok() { + ExternalityBuilder::build().execute_with(|| { + initialize_first_block(); + + let staker_id = 1; + let contract_id = MockSmartContract::Evm(H160::repeat_byte(0x01)); + assert_register(20, &contract_id); + + // initially, storage values should be None + let current_era = DappsStaking::current_era(); + assert!(DappsStaking::contract_stake_info(&contract_id, current_era).is_none()); + + assert_bond_and_stake(staker_id, &contract_id, 100); + + advance_to_era(current_era + 2); + + // Stake and bond again on the same contract but using a different amount. + assert_bond_and_stake(staker_id, &contract_id, 300); + }) +} + +#[test] +fn bond_and_stake_two_different_contracts_is_ok() { + ExternalityBuilder::build().execute_with(|| { + initialize_first_block(); + + let staker_id = 1; + let first_contract_id = MockSmartContract::Evm(H160::repeat_byte(0x01)); + let second_contract_id = MockSmartContract::Evm(H160::repeat_byte(0x02)); + + // Insert contracts under registered contracts. Don't use the staker Id. + assert_register(5, &first_contract_id); + assert_register(6, &second_contract_id); + + // Stake on both contracts. + assert_bond_and_stake(staker_id, &first_contract_id, 100); + assert_bond_and_stake(staker_id, &second_contract_id, 300); + }) +} + +#[test] +fn bond_and_stake_two_stakers_one_contract_is_ok() { + ExternalityBuilder::build().execute_with(|| { + initialize_first_block(); + + let first_staker_id = 1; + let second_staker_id = 2; + let first_stake_value = 50; + let second_stake_value = 235; + let contract_id = MockSmartContract::Evm(H160::repeat_byte(0x01)); + + // Insert a contract under registered contracts. + assert_register(10, &contract_id); + + // Both stakers stake on the same contract, expect a pass. + assert_bond_and_stake(first_staker_id, &contract_id, first_stake_value); + assert_bond_and_stake(second_staker_id, &contract_id, second_stake_value); + }) +} + +#[test] +fn bond_and_stake_different_value_is_ok() { + ExternalityBuilder::build().execute_with(|| { + initialize_first_block(); + + let staker_id = 1; + let contract_id = MockSmartContract::Evm(H160::repeat_byte(0x01)); + + // Insert a contract under registered contracts. + assert_register(20, &contract_id); + + // Bond&stake almost the entire available balance of the staker. + let staker_free_balance = + Balances::free_balance(&staker_id).saturating_sub(MINIMUM_REMAINING_AMOUNT); + assert_bond_and_stake(staker_id, &contract_id, staker_free_balance - 1); + + // Bond&stake again with less than existential deposit but this time expect a pass + // since we're only increasing the already staked amount. + assert_bond_and_stake(staker_id, &contract_id, 1); + + // Bond&stake more than what's available in funds. Verify that only what's available is bonded&staked. + let staker_id = 2; + let staker_free_balance = Balances::free_balance(&staker_id); + assert_bond_and_stake(staker_id, &contract_id, staker_free_balance + 1); + + // Verify the minimum transferable amount of stakers account + let transferable_balance = + Balances::free_balance(&staker_id) - Ledger::::get(staker_id).locked; + assert_eq!(MINIMUM_REMAINING_AMOUNT, transferable_balance); + + // Bond&stake some amount, a bit less than free balance + let staker_id = 3; + let staker_free_balance = + Balances::free_balance(&staker_id).saturating_sub(MINIMUM_REMAINING_AMOUNT); + assert_bond_and_stake(staker_id, &contract_id, staker_free_balance - 200); + + // Try to bond&stake more than we have available (since we already locked most of the free balance). + assert_bond_and_stake(staker_id, &contract_id, 500); + }) +} + +#[test] +fn bond_and_stake_on_unregistered_contract_fails() { + ExternalityBuilder::build().execute_with(|| { + initialize_first_block(); + + let staker_id = 1; + let stake_value = 100; + + // Check not registered contract. Expect an error. + let evm_contract = MockSmartContract::Evm(H160::repeat_byte(0x01)); + assert_noop!( + DappsStaking::bond_and_stake( + RuntimeOrigin::signed(staker_id), + evm_contract, + stake_value + ), + Error::::NotOperatedContract + ); + }) +} + +#[test] +fn bond_and_stake_insufficient_value() { + ExternalityBuilder::build().execute_with(|| { + initialize_first_block(); + let staker_id = 1; + let contract_id = MockSmartContract::Evm(H160::repeat_byte(0x01)); + + // Insert a contract under registered contracts. + assert_register(20, &contract_id); + + // If user tries to make an initial bond&stake with less than minimum amount, raise an error. + assert_noop!( + DappsStaking::bond_and_stake( + RuntimeOrigin::signed(staker_id), + contract_id.clone(), + MINIMUM_STAKING_AMOUNT - 1 + ), + Error::::InsufficientValue + ); + + // Now bond&stake the entire stash so we lock all the available funds. + let staker_free_balance = Balances::free_balance(&staker_id); + assert_bond_and_stake(staker_id, &contract_id, staker_free_balance); + + // Now try to bond&stake some additional funds and expect an error since we cannot bond&stake 0. + assert_noop!( + DappsStaking::bond_and_stake(RuntimeOrigin::signed(staker_id), contract_id.clone(), 1), + Error::::StakingWithNoValue + ); + }) +} + +#[test] +fn bond_and_stake_too_many_stakers_per_contract() { + ExternalityBuilder::build().execute_with(|| { + initialize_first_block(); + + let contract_id = MockSmartContract::Evm(H160::repeat_byte(0x01)); + // Insert a contract under registered contracts. + assert_register(10, &contract_id); + + // Stake with MAX_NUMBER_OF_STAKERS on the same contract. It must work. + for staker_id in 1..=MAX_NUMBER_OF_STAKERS { + assert_bond_and_stake(staker_id.into(), &contract_id, 100); + } + + // Now try to stake with an additional staker and expect an error. + assert_noop!( + DappsStaking::bond_and_stake( + RuntimeOrigin::signed((1 + MAX_NUMBER_OF_STAKERS).into()), + contract_id.clone(), + 100 + ), + Error::::MaxNumberOfStakersExceeded + ); + }) +} + +#[test] +fn bond_and_stake_too_many_era_stakes() { + ExternalityBuilder::build().execute_with(|| { + initialize_first_block(); + + let staker_id = 1; + let contract_id = MockSmartContract::Evm(H160::repeat_byte(0x01)); + // Insert a contract under registered contracts. + assert_register(10, &contract_id); + + // Stake with MAX_NUMBER_OF_STAKERS - 1 on the same contract. It must work. + let start_era = DappsStaking::current_era(); + for offset in 1..MAX_ERA_STAKE_VALUES { + assert_bond_and_stake(staker_id, &contract_id, 100); + advance_to_era(start_era + offset); + } + + // Now try to stake with an additional staker and expect an error. + assert_noop!( + DappsStaking::bond_and_stake(RuntimeOrigin::signed(staker_id), contract_id, 100), + Error::::TooManyEraStakeValues + ); + }) +} + +#[test] +fn unbond_and_unstake_multiple_time_is_ok() { + ExternalityBuilder::build().execute_with(|| { + initialize_first_block(); + + let staker_id = 1; + let contract_id = MockSmartContract::Evm(H160::repeat_byte(0x01)); + let original_staked_value = 300 + MINIMUM_STAKING_AMOUNT; + let old_era = DappsStaking::current_era(); + + // Insert a contract under registered contracts, bond&stake it. + assert_register(10, &contract_id); + assert_bond_and_stake(staker_id, &contract_id, original_staked_value); + advance_to_era(old_era + 1); + + // Unstake such an amount so there will remain staked funds on the contract + let unstaked_value = 100; + assert_unbond_and_unstake(staker_id, &contract_id, unstaked_value); + + // Unbond yet again, but don't advance era + // Unstake such an amount so there will remain staked funds on the contract + let unstaked_value = 50; + assert_unbond_and_unstake(staker_id, &contract_id, unstaked_value); + }) +} + +#[test] +fn unbond_and_unstake_value_below_staking_threshold() { + ExternalityBuilder::build().execute_with(|| { + initialize_first_block(); + + let staker_id = 1; + let contract_id = MockSmartContract::Evm(H160::repeat_byte(0x01)); + let first_value_to_unstake = 300; + let staked_value = first_value_to_unstake + MINIMUM_STAKING_AMOUNT; + + // Insert a contract under registered contracts, bond&stake it. + assert_register(10, &contract_id); + assert_bond_and_stake(staker_id, &contract_id, staked_value); + + // Unstake such an amount that exactly minimum staking amount will remain staked. + assert_unbond_and_unstake(staker_id, &contract_id, first_value_to_unstake); + + // Unstake 1 token and expect that the entire staked amount will be unstaked. + assert_unbond_and_unstake(staker_id, &contract_id, 1); + }) +} + +#[test] +fn unbond_and_unstake_in_different_eras() { + ExternalityBuilder::build().execute_with(|| { + initialize_first_block(); + + let (first_staker_id, second_staker_id) = (1, 2); + let contract_id = MockSmartContract::Evm(H160::repeat_byte(0x01)); + let staked_value = 500; + + // Insert a contract under registered contracts, bond&stake it with two different stakers. + assert_register(10, &contract_id); + assert_bond_and_stake(first_staker_id, &contract_id, staked_value); + assert_bond_and_stake(second_staker_id, &contract_id, staked_value); + + // Advance era, unbond&withdraw with first staker, verify that it was successful + advance_to_era(DappsStaking::current_era() + 10); + let current_era = DappsStaking::current_era(); + assert_unbond_and_unstake(first_staker_id, &contract_id, 100); + + // Advance era, unbond with second staker and verify storage values are as expected + advance_to_era(current_era + 10); + assert_unbond_and_unstake(second_staker_id, &contract_id, 333); + }) +} + +#[test] +fn unbond_and_unstake_calls_in_same_era_can_exceed_max_chunks() { + ExternalityBuilder::build().execute_with(|| { + initialize_first_block(); + + let contract_id = MockSmartContract::Evm(H160::repeat_byte(0x01)); + assert_register(10, &contract_id); + + let staker = 1; + assert_bond_and_stake(staker, &contract_id, 200 * MAX_UNLOCKING_CHUNKS as Balance); + + // Ensure that we can unbond up to a limited amount of time. + for _ in 0..MAX_UNLOCKING_CHUNKS * 2 { + assert_unbond_and_unstake(1, &contract_id, 10); + assert_eq!(1, Ledger::::get(&staker).unbonding_info.len()); + } + }) +} + +#[test] +fn unbond_and_unstake_with_zero_value_is_not_ok() { + ExternalityBuilder::build().execute_with(|| { + initialize_first_block(); + + let contract_id = MockSmartContract::Evm(H160::repeat_byte(0x01)); + assert_register(10, &contract_id); + + assert_noop!( + DappsStaking::unbond_and_unstake(RuntimeOrigin::signed(1), contract_id, 0), + Error::::UnstakingWithNoValue + ); + }) +} + +#[test] +fn unbond_and_unstake_on_not_operated_contract_is_not_ok() { + ExternalityBuilder::build().execute_with(|| { + initialize_first_block(); + + let contract_id = MockSmartContract::Evm(H160::repeat_byte(0x01)); + assert_noop!( + DappsStaking::unbond_and_unstake(RuntimeOrigin::signed(1), contract_id, 100), + Error::::NotOperatedContract + ); + }) +} + +#[test] +fn unbond_and_unstake_too_many_unlocking_chunks_is_not_ok() { + ExternalityBuilder::build().execute_with(|| { + initialize_first_block(); + + let contract_id = MockSmartContract::Evm(H160::repeat_byte(0x01)); + assert_register(10, &contract_id); + + let staker = 1; + let unstake_amount = 10; + let stake_amount = + MINIMUM_STAKING_AMOUNT * 10 + unstake_amount * MAX_UNLOCKING_CHUNKS as Balance; + + assert_bond_and_stake(staker, &contract_id, stake_amount); + + // Ensure that we can unbond up to a limited amount of time. + for _ in 0..MAX_UNLOCKING_CHUNKS { + advance_to_era(DappsStaking::current_era() + 1); + assert_unbond_and_unstake(staker, &contract_id, unstake_amount); + } + + // Ensure that we're at the max but can still add new chunks since it should be merged with the existing one + assert_eq!( + MAX_UNLOCKING_CHUNKS, + DappsStaking::ledger(&staker).unbonding_info.len() + ); + assert_unbond_and_unstake(staker, &contract_id, unstake_amount); + + // Ensure that further unbonding attempts result in an error. + advance_to_era(DappsStaking::current_era() + 1); + assert_noop!( + DappsStaking::unbond_and_unstake( + RuntimeOrigin::signed(staker), + contract_id.clone(), + unstake_amount + ), + Error::::TooManyUnlockingChunks, + ); + }) +} + +#[test] +fn unbond_and_unstake_on_not_staked_contract_is_not_ok() { + ExternalityBuilder::build().execute_with(|| { + initialize_first_block(); + + let contract_id = MockSmartContract::Evm(H160::repeat_byte(0x01)); + assert_register(10, &contract_id); + + assert_noop!( + DappsStaking::unbond_and_unstake(RuntimeOrigin::signed(1), contract_id, 10), + Error::::NotStakedContract, + ); + }) +} + +#[test] +fn unbond_and_unstake_too_many_era_stakes() { + ExternalityBuilder::build().execute_with(|| { + initialize_first_block(); + + let staker_id = 1; + let contract_id = MockSmartContract::Evm(H160::repeat_byte(0x01)); + assert_register(10, &contract_id); + + // Fill up the `EraStakes` vec + let start_era = DappsStaking::current_era(); + for offset in 1..MAX_ERA_STAKE_VALUES { + assert_bond_and_stake(staker_id, &contract_id, 100); + advance_to_era(start_era + offset); + } + + // At this point, we have max allowed amount of `EraStake` values so we cannot create + // an additional one. + assert_noop!( + DappsStaking::unbond_and_unstake(RuntimeOrigin::signed(staker_id), contract_id, 10), + Error::::TooManyEraStakeValues + ); + }) +} + +#[ignore] +#[test] +fn unbond_and_unstake_with_no_chunks_allowed() { + // UT can be used to verify situation when MaxUnlockingChunks = 0. Requires mock modification. + ExternalityBuilder::build().execute_with(|| { + initialize_first_block(); + + // Sanity check + assert_eq!(::MaxUnlockingChunks::get(), 0); + + let contract_id = MockSmartContract::Evm(H160::repeat_byte(0x01)); + assert_register(10, &contract_id); + + let staker_id = 1; + assert_bond_and_stake(staker_id, &contract_id, 100); + + assert_noop!( + DappsStaking::unbond_and_unstake( + RuntimeOrigin::signed(staker_id), + contract_id.clone(), + 20 + ), + Error::::TooManyUnlockingChunks, + ); + }) +} + +#[test] +fn withdraw_unbonded_is_ok() { + ExternalityBuilder::build().execute_with(|| { + initialize_first_block(); + + let contract_id = MockSmartContract::Evm(H160::repeat_byte(0x01)); + assert_register(10, &contract_id); + + let staker_id = 1; + assert_bond_and_stake(staker_id, &contract_id, 1000); + + let first_unbond_value = 75; + let second_unbond_value = 39; + let initial_era = DappsStaking::current_era(); + + // Unbond some amount in the initial era + assert_unbond_and_unstake(staker_id, &contract_id, first_unbond_value); + + // Advance one era and then unbond some more + advance_to_era(initial_era + 1); + assert_unbond_and_unstake(staker_id, &contract_id, second_unbond_value); + + // Now advance one era before first chunks finishes the unbonding process + advance_to_era(initial_era + UNBONDING_PERIOD - 1); + assert_noop!( + DappsStaking::withdraw_unbonded(RuntimeOrigin::signed(staker_id)), + Error::::NothingToWithdraw + ); + + // Advance one additional era and expect that the first chunk can be withdrawn + advance_to_era(DappsStaking::current_era() + 1); + assert_ok!(DappsStaking::withdraw_unbonded(RuntimeOrigin::signed( + staker_id + ),)); + System::assert_last_event(mock::RuntimeEvent::DappsStaking(Event::Withdrawn( + staker_id, + first_unbond_value, + ))); + + // Advance one additional era and expect that the first chunk can be withdrawn + advance_to_era(DappsStaking::current_era() + 1); + assert_ok!(DappsStaking::withdraw_unbonded(RuntimeOrigin::signed( + staker_id + ),)); + System::assert_last_event(mock::RuntimeEvent::DappsStaking(Event::Withdrawn( + staker_id, + second_unbond_value, + ))); + + // Advance one additional era but since we have nothing else to withdraw, expect an error + advance_to_era(initial_era + UNBONDING_PERIOD - 1); + assert_noop!( + DappsStaking::withdraw_unbonded(RuntimeOrigin::signed(staker_id)), + Error::::NothingToWithdraw + ); + }) +} + +#[test] +fn withdraw_unbonded_full_vector_is_ok() { + ExternalityBuilder::build().execute_with(|| { + initialize_first_block(); + + let contract_id = MockSmartContract::Evm(H160::repeat_byte(0x01)); + assert_register(10, &contract_id); + + let staker_id = 1; + assert_bond_and_stake(staker_id, &contract_id, 1000); + + // Repeatedly start unbonding and advance era to create unlocking chunks + let init_unbonding_amount = 15; + for x in 1..=MAX_UNLOCKING_CHUNKS { + assert_unbond_and_unstake(staker_id, &contract_id, init_unbonding_amount * x as u128); + advance_to_era(DappsStaking::current_era() + 1); + } + + // Now clean up all that are eligible for clean-up + assert_withdraw_unbonded(staker_id); + + // This is a sanity check for the test. Some chunks should remain, otherwise test isn't testing realistic unbonding period. + assert!(!Ledger::::get(&staker_id) + .unbonding_info + .is_empty()); + + while !Ledger::::get(&staker_id) + .unbonding_info + .is_empty() + { + advance_to_era(DappsStaking::current_era() + 1); + assert_withdraw_unbonded(staker_id); + } + }) +} + +#[test] +fn withdraw_unbonded_no_value_is_not_ok() { + ExternalityBuilder::build().execute_with(|| { + initialize_first_block(); + + assert_noop!( + DappsStaking::withdraw_unbonded(RuntimeOrigin::signed(1)), + Error::::NothingToWithdraw, + ); + }) +} + +#[ignore] +#[test] +fn withdraw_unbonded_no_unbonding_period() { + // UT can be used to verify situation when UnbondingPeriod = 0. Requires mock modification. + ExternalityBuilder::build().execute_with(|| { + initialize_first_block(); + + // Sanity check + assert_eq!(::UnbondingPeriod::get(), 0); + + let contract_id = MockSmartContract::Evm(H160::repeat_byte(0x01)); + assert_register(10, &contract_id); + + let staker_id = 1; + assert_bond_and_stake(staker_id, &contract_id, 100); + assert_unbond_and_unstake(staker_id, &contract_id, 20); + + // Try to withdraw but expect an error since current era hasn't passed yet + assert_noop!( + DappsStaking::withdraw_unbonded(RuntimeOrigin::signed(staker_id)), + Error::::NothingToWithdraw, + ); + + // Advance an era and expect successful withdrawal + advance_to_era(DappsStaking::current_era() + 1); + assert_withdraw_unbonded(staker_id); + }) +} + +#[test] +fn nomination_transfer_is_ok() { + ExternalityBuilder::build().execute_with(|| { + initialize_first_block(); + + let origin_developer = 1; + let target_developer = 2; + let staker = 3; + let origin_contract_id = MockSmartContract::Evm(H160::repeat_byte(0x01)); + let target_contract_id = MockSmartContract::Evm(H160::repeat_byte(0x02)); + + assert_register(origin_developer, &origin_contract_id); + assert_register(target_developer, &target_contract_id); + assert_bond_and_stake(staker, &origin_contract_id, MINIMUM_STAKING_AMOUNT * 2); + + // The first transfer will ensure that both contracts are staked after operation is complete + assert_nomination_transfer( + staker, + &origin_contract_id, + MINIMUM_STAKING_AMOUNT, + &target_contract_id, + ); + assert!( + !GeneralStakerInfo::::get(&staker, &origin_contract_id) + .latest_staked_value() + .is_zero() + ); + + // The second operation should fully unstake origin contract since it takes it below minimum staking amount + assert_nomination_transfer( + staker, + &origin_contract_id, + MINIMUM_STAKING_AMOUNT, + &target_contract_id, + ); + assert!( + GeneralStakerInfo::::get(&staker, &origin_contract_id) + .latest_staked_value() + .is_zero() + ); + }) +} + +#[test] +fn nomination_transfer_to_same_contract_is_not_ok() { + ExternalityBuilder::build().execute_with(|| { + initialize_first_block(); + let developer = 1; + let staker = 2; + let contract_id = MockSmartContract::Evm(H160::repeat_byte(0x01)); + + assert_register(developer, &contract_id); + + assert_noop!( + DappsStaking::nomination_transfer( + RuntimeOrigin::signed(staker), + contract_id, + 100, + contract_id, + ), + Error::::NominationTransferToSameContract + ); + }) +} + +#[test] +fn nomination_transfer_to_inactive_contracts_is_not_ok() { + ExternalityBuilder::build().execute_with(|| { + initialize_first_block(); + + let origin_developer = 1; + let target_developer = 2; + let staker = 3; + let origin_contract_id = MockSmartContract::Evm(H160::repeat_byte(0x01)); + let target_contract_id = MockSmartContract::Evm(H160::repeat_byte(0x02)); + + // 1. Neither contract is registered + assert_noop!( + DappsStaking::nomination_transfer( + RuntimeOrigin::signed(staker), + origin_contract_id, + 100, + target_contract_id, + ), + Error::::NotOperatedContract + ); + + // 2. Only first contract is registered + assert_register(origin_developer, &origin_contract_id); + assert_noop!( + DappsStaking::nomination_transfer( + RuntimeOrigin::signed(staker), + origin_contract_id, + 100, + target_contract_id, + ), + Error::::NotOperatedContract + ); + + // 3. Both are registered but then target contract gets unregistered + assert_register(target_developer, &target_contract_id); + assert_bond_and_stake(staker, &origin_contract_id, 100); + assert_nomination_transfer(staker, &origin_contract_id, 100, &target_contract_id); + + assert_unregister(target_developer, &target_contract_id); + assert_noop!( + DappsStaking::nomination_transfer( + RuntimeOrigin::signed(staker), + origin_contract_id, + 100, + target_contract_id, + ), + Error::::NotOperatedContract + ); + + // 4. Origin contract is unregistered + assert_unregister(origin_developer, &origin_contract_id); + assert_noop!( + DappsStaking::nomination_transfer( + RuntimeOrigin::signed(staker), + origin_contract_id, + 100, + target_contract_id, + ), + Error::::NotOperatedContract + ); + }) +} + +#[test] +fn nomination_transfer_from_not_staked_contract() { + ExternalityBuilder::build().execute_with(|| { + initialize_first_block(); + + let origin_developer = 1; + let target_developer = 2; + let staker = 3; + let origin_contract_id = MockSmartContract::Evm(H160::repeat_byte(0x01)); + let target_contract_id = MockSmartContract::Evm(H160::repeat_byte(0x02)); + + assert_register(origin_developer, &origin_contract_id); + assert_register(target_developer, &target_contract_id); + + assert_noop!( + DappsStaking::nomination_transfer( + RuntimeOrigin::signed(staker), + origin_contract_id.clone(), + 20, + target_contract_id.clone() + ), + Error::::NotStakedContract + ); + }) +} + +#[test] +fn nomination_transfer_with_no_value() { + ExternalityBuilder::build().execute_with(|| { + initialize_first_block(); + + let origin_developer = 1; + let target_developer = 2; + let staker = 3; + let origin_contract_id = MockSmartContract::Evm(H160::repeat_byte(0x01)); + let target_contract_id = MockSmartContract::Evm(H160::repeat_byte(0x02)); + + assert_register(origin_developer, &origin_contract_id); + assert_register(target_developer, &target_contract_id); + assert_bond_and_stake(staker, &origin_contract_id, 100); + + assert_noop!( + DappsStaking::nomination_transfer( + RuntimeOrigin::signed(staker), + origin_contract_id.clone(), + Zero::zero(), + target_contract_id.clone() + ), + Error::::UnstakingWithNoValue + ); + }) +} + +#[test] +fn nomination_transfer_with_insufficient_value() { + ExternalityBuilder::build().execute_with(|| { + initialize_first_block(); + + let origin_developer = 1; + let target_developer = 2; + let staker = 3; + let origin_contract_id = MockSmartContract::Evm(H160::repeat_byte(0x01)); + let target_contract_id = MockSmartContract::Evm(H160::repeat_byte(0x02)); + + assert_register(origin_developer, &origin_contract_id); + assert_register(target_developer, &target_contract_id); + assert_bond_and_stake(staker, &origin_contract_id, 100); + + assert_noop!( + DappsStaking::nomination_transfer( + RuntimeOrigin::signed(staker), + origin_contract_id.clone(), + MINIMUM_STAKING_AMOUNT - 1, + target_contract_id.clone() + ), + Error::::InsufficientValue + ); + }) +} + +#[test] +fn nomination_transfer_contracts_have_too_many_era_stake_values() { + ExternalityBuilder::build().execute_with(|| { + initialize_first_block(); + + let origin_developer = 1; + let target_developer = 2; + let staker = 3; + let origin_contract_id = MockSmartContract::Evm(H160::repeat_byte(0x01)); + let target_contract_id = MockSmartContract::Evm(H160::repeat_byte(0x02)); + + assert_register(origin_developer, &origin_contract_id); + assert_register(target_developer, &target_contract_id); + + // Ensure we fill up era stakes vector + for _ in 1..MAX_ERA_STAKE_VALUES { + // We use bond&stake since its only limiting factor is max era stake values + assert_bond_and_stake(staker, &origin_contract_id, 15); + advance_to_era(DappsStaking::current_era() + 1); + } + assert_noop!( + DappsStaking::bond_and_stake( + RuntimeOrigin::signed(staker), + origin_contract_id.clone(), + 15 + ), + Error::::TooManyEraStakeValues + ); + + // Ensure it's not possible to transfer from origin contract since it's era stake values are maxed + assert_noop!( + DappsStaking::nomination_transfer( + RuntimeOrigin::signed(staker), + origin_contract_id.clone(), + 15, + target_contract_id.clone() + ), + Error::::TooManyEraStakeValues + ); + + // Swap origin and target to verify that same is true if target contract era stake values is maxed out + let (origin_contract_id, target_contract_id) = (target_contract_id, origin_contract_id); + assert_bond_and_stake(staker, &origin_contract_id, 15); + assert_noop!( + DappsStaking::nomination_transfer( + RuntimeOrigin::signed(staker), + origin_contract_id.clone(), + 15, + target_contract_id.clone() + ), + Error::::TooManyEraStakeValues + ); + }) +} + +#[test] +fn nomination_transfer_max_number_of_stakers_exceeded() { + ExternalityBuilder::build().execute_with(|| { + initialize_first_block(); + + let origin_developer = 1; + let target_developer = 2; + // This one will only stake on origin contract + let first_staker = 3; + // This one will stake on both origin and target contracts + let second_staker = 4; + let origin_contract_id = MockSmartContract::Evm(H160::repeat_byte(0x01)); + let target_contract_id = MockSmartContract::Evm(H160::repeat_byte(0x02)); + + // Register contracts and bond&stake them with both stakers + assert_register(origin_developer, &origin_contract_id); + assert_register(target_developer, &target_contract_id); + + assert_bond_and_stake(first_staker, &origin_contract_id, 23); + assert_bond_and_stake(second_staker, &target_contract_id, 37); + assert_bond_and_stake(second_staker, &target_contract_id, 41); + + // Fill up the second contract with stakers until max number of stakers limit has been reached + for temp_staker in (second_staker + 1)..(MAX_NUMBER_OF_STAKERS as u64 + second_staker) { + Balances::resolve_creating(&temp_staker, Balances::issue(100)); + assert_bond_and_stake(temp_staker, &target_contract_id, 13); + } + // Sanity check + assurance that first_staker isn't staking on target contract + assert_noop!( + DappsStaking::bond_and_stake( + RuntimeOrigin::signed(first_staker), + target_contract_id.clone(), + 19 + ), + Error::::MaxNumberOfStakersExceeded + ); + + // Now attempt transfer nomination and expect an error + assert_noop!( + DappsStaking::nomination_transfer( + RuntimeOrigin::signed(first_staker), + origin_contract_id.clone(), + 19, + target_contract_id.clone(), + ), + Error::::MaxNumberOfStakersExceeded + ); + }) +} + +#[test] +fn claim_not_staked_contract() { + ExternalityBuilder::build().execute_with(|| { + initialize_first_block(); + + let developer = 1; + let staker = 2; + let contract_id = MockSmartContract::Evm(H160::repeat_byte(0x01)); + + assert_register(developer, &contract_id); + + assert_noop!( + DappsStaking::claim_staker(RuntimeOrigin::signed(staker), contract_id), + Error::::NotStakedContract + ); + + advance_to_era(DappsStaking::current_era() + 1); + assert_noop!( + DappsStaking::claim_dapp(RuntimeOrigin::signed(developer), contract_id, 1), + Error::::NotStakedContract + ); + }) +} + +#[test] +fn claim_not_operated_contract() { + ExternalityBuilder::build().execute_with(|| { + initialize_first_block(); + + let developer = 1; + let staker = 2; + let contract_id = MockSmartContract::Evm(H160::repeat_byte(0x01)); + + assert_register(developer, &contract_id); + assert_bond_and_stake(staker, &contract_id, 100); + + // Advance one era and unregister the contract + advance_to_era(DappsStaking::current_era() + 1); + assert_unregister(developer, &contract_id); + + // First claim should pass but second should fail because contract was unregistered + assert_claim_staker(staker, &contract_id); + assert_noop!( + DappsStaking::claim_staker(RuntimeOrigin::signed(staker), contract_id), + Error::::NotOperatedContract + ); + + assert_claim_dapp(&contract_id, 1); + assert_noop!( + DappsStaking::claim_dapp(RuntimeOrigin::signed(developer), contract_id, 2), + Error::::NotOperatedContract + ); + }) +} + +#[test] +fn claim_invalid_era() { + ExternalityBuilder::build().execute_with(|| { + initialize_first_block(); + + let developer = 1; + let staker = 2; + let contract_id = MockSmartContract::Evm(H160::repeat_byte(0x01)); + + let start_era = DappsStaking::current_era(); + assert_register(developer, &contract_id); + assert_bond_and_stake(staker, &contract_id, 100); + advance_to_era(start_era + 5); + + for era in start_era..DappsStaking::current_era() { + assert_claim_staker(staker, &contract_id); + assert_claim_dapp(&contract_id, era); + } + + assert_noop!( + DappsStaking::claim_staker(RuntimeOrigin::signed(staker), contract_id), + Error::::EraOutOfBounds + ); + assert_noop!( + DappsStaking::claim_dapp( + RuntimeOrigin::signed(developer), + contract_id, + DappsStaking::current_era() + ), + Error::::EraOutOfBounds + ); + }) +} + +#[test] +fn claim_dapp_same_era_twice() { + ExternalityBuilder::build().execute_with(|| { + initialize_first_block(); + + let developer = 1; + let staker = 2; + let contract_id = MockSmartContract::Evm(H160::repeat_byte(0x01)); + + let start_era = DappsStaking::current_era(); + assert_register(developer, &contract_id); + assert_bond_and_stake(staker, &contract_id, 100); + advance_to_era(start_era + 1); + + assert_claim_dapp(&contract_id, start_era); + assert_noop!( + DappsStaking::claim_dapp(RuntimeOrigin::signed(developer), contract_id, start_era), + Error::::AlreadyClaimedInThisEra + ); + }) +} + +#[test] +fn claim_is_ok() { + ExternalityBuilder::build().execute_with(|| { + initialize_first_block(); + + let first_developer = 1; + let second_developer = 2; + let first_staker = 3; + let second_staker = 4; + let first_contract_id = MockSmartContract::Evm(H160::repeat_byte(0x01)); + let second_contract_id = MockSmartContract::Evm(H160::repeat_byte(0x02)); + + let start_era = DappsStaking::current_era(); + + // Prepare a scenario with different stakes + + assert_register(first_developer, &first_contract_id); + assert_register(second_developer, &second_contract_id); + assert_bond_and_stake(first_staker, &first_contract_id, 100); + assert_bond_and_stake(second_staker, &first_contract_id, 45); + + // Just so ratio isn't 100% in favor of the first contract + assert_bond_and_stake(first_staker, &second_contract_id, 33); + assert_bond_and_stake(second_staker, &second_contract_id, 22); + + let eras_advanced = 3; + advance_to_era(start_era + eras_advanced); + + for x in 0..eras_advanced.into() { + assert_bond_and_stake(first_staker, &first_contract_id, 20 + x * 3); + assert_bond_and_stake(second_staker, &first_contract_id, 5 + x * 5); + advance_to_era(DappsStaking::current_era() + 1); + } + + // Ensure that all past eras can be claimed + let current_era = DappsStaking::current_era(); + for era in start_era..current_era { + assert_claim_staker(first_staker, &first_contract_id); + assert_claim_dapp(&first_contract_id, era); + assert_claim_staker(second_staker, &first_contract_id); + } + + // Shouldn't be possible to claim current era. + // Also, previous claim calls should have claimed everything prior to current era. + assert_noop!( + DappsStaking::claim_staker( + RuntimeOrigin::signed(first_staker), + first_contract_id.clone() + ), + Error::::EraOutOfBounds + ); + assert_noop!( + DappsStaking::claim_dapp( + RuntimeOrigin::signed(first_developer), + first_contract_id, + current_era + ), + Error::::EraOutOfBounds + ); + }) +} + +#[test] +fn claim_after_unregister_is_ok() { + ExternalityBuilder::build().execute_with(|| { + initialize_first_block(); + + let developer = 1; + let staker = 2; + let contract_id = MockSmartContract::Evm(H160::repeat_byte(0x01)); + + let start_era = DappsStaking::current_era(); + assert_register(developer, &contract_id); + let stake_value = 100; + assert_bond_and_stake(staker, &contract_id, stake_value); + + // Advance few eras, then unstake everything + advance_to_era(start_era + 5); + assert_unbond_and_unstake(staker, &contract_id, stake_value); + let full_unstake_era = DappsStaking::current_era(); + let number_of_staking_eras = full_unstake_era - start_era; + + // Few eras pass, then staker stakes again + advance_to_era(DappsStaking::current_era() + 3); + let stake_value = 75; + let restake_era = DappsStaking::current_era(); + assert_bond_and_stake(staker, &contract_id, stake_value); + + // Again, few eras pass then contract is unregistered + advance_to_era(DappsStaking::current_era() + 3); + assert_unregister(developer, &contract_id); + let unregister_era = DappsStaking::current_era(); + let number_of_staking_eras = number_of_staking_eras + unregister_era - restake_era; + advance_to_era(DappsStaking::current_era() + 2); + + // Ensure that staker can claim all the eras that he had an active stake + for _ in 0..number_of_staking_eras { + assert_claim_staker(staker, &contract_id); + } + assert_noop!( + DappsStaking::claim_staker(RuntimeOrigin::signed(staker), contract_id.clone()), + Error::::NotOperatedContract + ); + + // Ensure the same for dapp reward + for era in start_era..unregister_era { + if era >= full_unstake_era && era < restake_era { + assert_noop!( + DappsStaking::claim_dapp( + RuntimeOrigin::signed(developer), + contract_id.clone(), + era + ), + Error::::NotStakedContract + ); + } else { + assert_claim_dapp(&contract_id, era); + } + } + }) +} + +#[test] +fn claim_only_payout_is_ok() { + ExternalityBuilder::build().execute_with(|| { + initialize_first_block(); + + let developer = 1; + let staker = 2; + let contract_id = MockSmartContract::Evm(H160::repeat_byte(0x01)); + + // stake some tokens + let start_era = DappsStaking::current_era(); + assert_register(developer, &contract_id); + let stake_value = 100; + assert_bond_and_stake(staker, &contract_id, stake_value); + + // disable reward restaking + advance_to_era(start_era + 1); + assert_set_reward_destination(staker, RewardDestination::FreeBalance); + + // ensure it's claimed correctly + assert_claim_staker(staker, &contract_id); + }) +} + +#[test] +fn claim_with_zero_staked_is_ok() { + ExternalityBuilder::build().execute_with(|| { + initialize_first_block(); + + let developer = 1; + let staker = 2; + let contract_id = MockSmartContract::Evm(H160::repeat_byte(0x01)); + let start_era = DappsStaking::current_era(); + assert_register(developer, &contract_id); + + // stake some tokens and wait for an era + let stake_value = 100; + assert_bond_and_stake(staker, &contract_id, stake_value); + advance_to_era(start_era + 1); + + // ensure reward_destination is set to StakeBalance + assert_set_reward_destination(staker, RewardDestination::StakeBalance); + + // unstake all the tokens + assert_unbond_and_unstake(staker, &contract_id, stake_value); + + // ensure claimed value goes to claimer's free balance + assert_claim_staker(staker, &contract_id); + }) +} + +#[test] +fn claims_with_different_reward_destination_is_ok() { + ExternalityBuilder::build().execute_with(|| { + initialize_first_block(); + + let developer = 1; + let staker = 2; + let contract_id = MockSmartContract::Evm(H160::repeat_byte(0x01)); + + // stake some tokens + let start_era = DappsStaking::current_era(); + assert_register(developer, &contract_id); + let stake_value = 100; + assert_bond_and_stake(staker, &contract_id, stake_value); + + // disable compounding mode, wait 3 eras + assert_set_reward_destination(staker, RewardDestination::FreeBalance); + advance_to_era(start_era + 1); + // ensure staker can claim rewards to wallet + assert_claim_staker(staker, &contract_id); + + // enable compounding mode, wait 3 eras + assert_set_reward_destination(staker, RewardDestination::StakeBalance); + advance_to_era(start_era + 2); + // ensure staker can claim with compounding + assert_claim_staker(staker, &contract_id); + }) +} + +#[test] +fn claiming_when_stakes_full_without_compounding_is_ok() { + ExternalityBuilder::build().execute_with(|| { + initialize_first_block(); + + let staker_id = 1; + let contract_id = MockSmartContract::Evm(H160::repeat_byte(0x01)); + // Insert a contract under registered contracts. + assert_register(10, &contract_id); + + // Stake with MAX_ERA_STAKE_VALUES - 1 on the same contract. It must work. + let start_era = DappsStaking::current_era(); + for offset in 1..MAX_ERA_STAKE_VALUES { + assert_bond_and_stake(staker_id, &contract_id, 100); + advance_to_era(start_era + offset * 5); + } + + // Make sure reward_destination is set to StakeBalance + assert_set_reward_destination(staker_id, RewardDestination::StakeBalance); + + // claim and restake once, so there's a claim record for the for the current era in the stakes vec + assert_claim_staker(staker_id, &contract_id); + + // making another gap in eras and trying to claim and restake would exceed MAX_ERA_STAKE_VALUES + advance_to_era(DappsStaking::current_era() + 1); + assert_noop!( + DappsStaking::claim_staker(RuntimeOrigin::signed(staker_id), contract_id), + Error::::TooManyEraStakeValues + ); + + // set reward_destination to FreeBalance (disable restaking) + assert_set_reward_destination(staker_id, RewardDestination::FreeBalance); + + // claiming should work again + assert_claim_staker(staker_id, &contract_id); + }) +} + +#[test] +fn changing_reward_destination_for_empty_ledger_is_not_ok() { + ExternalityBuilder::build().execute_with(|| { + initialize_first_block(); + let staker = 1; + assert_noop!( + DappsStaking::set_reward_destination( + RuntimeOrigin::signed(staker), + RewardDestination::FreeBalance + ), + Error::::NotActiveStaker + ); + }); +} + +#[test] +fn claim_dapp_with_zero_stake_periods_is_ok() { + ExternalityBuilder::build().execute_with(|| { + initialize_first_block(); + + let developer = 1; + let staker = 2; + let contract_id = MockSmartContract::Evm(H160::repeat_byte(0x01)); + + // Prepare scenario: + + let start_era = DappsStaking::current_era(); + assert_register(developer, &contract_id); + let stake_value = 100; + assert_bond_and_stake(staker, &contract_id, stake_value); + + advance_to_era(start_era + 5); + let first_full_unstake_era = DappsStaking::current_era(); + assert_unbond_and_unstake(staker, &contract_id, stake_value); + + advance_to_era(DappsStaking::current_era() + 7); + let restake_era = DappsStaking::current_era(); + assert_bond_and_stake(staker, &contract_id, stake_value); + + advance_to_era(DappsStaking::current_era() + 4); + let second_full_unstake_era = DappsStaking::current_era(); + assert_unbond_and_unstake(staker, &contract_id, stake_value); + advance_to_era(DappsStaking::current_era() + 10); + + // Ensure that first interval can be claimed + for era in start_era..first_full_unstake_era { + assert_claim_dapp(&contract_id, era); + } + + // Ensure that the empty interval cannot be claimed + for era in first_full_unstake_era..restake_era { + assert_noop!( + DappsStaking::claim_dapp( + RuntimeOrigin::signed(developer), + contract_id.clone(), + era + ), + Error::::NotStakedContract + ); + } + + // Ensure that second interval can be claimed + for era in restake_era..second_full_unstake_era { + assert_claim_dapp(&contract_id, era); + } + + // Ensure no more claims are possible since contract was fully unstaked + assert_noop!( + DappsStaking::claim_dapp( + RuntimeOrigin::signed(developer), + contract_id.clone(), + second_full_unstake_era + ), + Error::::NotStakedContract + ); + + // Now stake again and ensure contract can once again be claimed + let last_claim_era = DappsStaking::current_era(); + assert_bond_and_stake(staker, &contract_id, stake_value); + advance_to_era(last_claim_era + 1); + assert_claim_dapp(&contract_id, last_claim_era); + }) +} + +#[test] +fn maintenance_mode_is_ok() { + ExternalityBuilder::build().execute_with(|| { + initialize_first_block(); + + assert_ok!(DappsStaking::ensure_pallet_enabled()); + assert!(!PalletDisabled::::exists()); + + assert_ok!(DappsStaking::maintenance_mode(RuntimeOrigin::root(), true)); + assert!(PalletDisabled::::exists()); + System::assert_last_event(mock::RuntimeEvent::DappsStaking(Event::MaintenanceMode( + true, + ))); + + let account = 1; + let contract_id = MockSmartContract::Evm(H160::repeat_byte(0x01)); + + // + // 1 + assert_noop!( + DappsStaking::register(RuntimeOrigin::root(), account, contract_id), + Error::::Disabled + ); + assert_noop!( + DappsStaking::unregister(RuntimeOrigin::root(), contract_id), + Error::::Disabled + ); + assert_noop!( + DappsStaking::withdraw_from_unregistered(RuntimeOrigin::signed(account), contract_id), + Error::::Disabled + ); + + // + // 2 + assert_noop!( + DappsStaking::bond_and_stake(RuntimeOrigin::signed(account), contract_id, 100), + Error::::Disabled + ); + assert_noop!( + DappsStaking::unbond_and_unstake(RuntimeOrigin::signed(account), contract_id, 100), + Error::::Disabled + ); + assert_noop!( + DappsStaking::claim_dapp(RuntimeOrigin::signed(account), contract_id, 5), + Error::::Disabled + ); + assert_noop!( + DappsStaking::claim_staker(RuntimeOrigin::signed(account), contract_id), + Error::::Disabled + ); + assert_noop!( + DappsStaking::withdraw_unbonded(RuntimeOrigin::signed(account)), + Error::::Disabled + ); + assert_noop!( + DappsStaking::nomination_transfer( + RuntimeOrigin::signed(account), + contract_id, + 100, + contract_id, + ), + Error::::Disabled + ); + + // + // 3 + assert_noop!( + DappsStaking::force_new_era(RuntimeOrigin::root()), + Error::::Disabled + ); + // shouldn't do anything since we're in maintenance mode + assert_eq!(DappsStaking::on_initialize(3), Weight::zero()); + + // + // 4 + assert_ok!(DappsStaking::maintenance_mode(RuntimeOrigin::root(), false)); + System::assert_last_event(mock::RuntimeEvent::DappsStaking(Event::MaintenanceMode( + false, + ))); + assert_register(account, &contract_id); + }) +} + +#[test] +fn maintenance_mode_no_change() { + ExternalityBuilder::build().execute_with(|| { + initialize_first_block(); + + // Expect an error since maintenance mode is already disabled + assert_ok!(DappsStaking::ensure_pallet_enabled()); + assert_noop!( + DappsStaking::maintenance_mode(RuntimeOrigin::root(), false), + Error::::NoMaintenanceModeChange + ); + + // Same for the case when maintenance mode is already enabled + assert_ok!(DappsStaking::maintenance_mode(RuntimeOrigin::root(), true)); + assert_noop!( + DappsStaking::maintenance_mode(RuntimeOrigin::root(), true), + Error::::NoMaintenanceModeChange + ); + }) +} + +#[test] +fn dev_stakers_split_util() { + let base_stakers_reward = 7 * 11 * 13 * 17; + let base_dapps_reward = 19 * 23 * 31; + let staked_on_contract = 123456; + let total_staked = staked_on_contract * 3; + + // Prepare structs + let staking_points = ContractStakeInfo:: { + total: staked_on_contract, + number_of_stakers: 10, + contract_reward_claimed: false, + }; + let era_info = EraInfo:: { + rewards: RewardInfo { + dapps: base_dapps_reward, + stakers: base_stakers_reward, + }, + staked: total_staked, + locked: total_staked, + }; + + let (dev_reward, stakers_reward) = DappsStaking::dev_stakers_split(&staking_points, &era_info); + + let contract_stake_ratio = Perbill::from_rational(staked_on_contract, total_staked); + let calculated_stakers_reward = contract_stake_ratio * base_stakers_reward; + let calculated_dev_reward = contract_stake_ratio * base_dapps_reward; + assert_eq!(calculated_dev_reward, dev_reward); + assert_eq!(calculated_stakers_reward, stakers_reward); + + assert_eq!( + calculated_stakers_reward + calculated_dev_reward, + dev_reward + stakers_reward + ); +} + +#[test] +pub fn tvl_util_test() { + ExternalityBuilder::build().execute_with(|| { + // Ensure TVL is zero before first block and also after first block + assert!(DappsStaking::tvl().is_zero()); + initialize_first_block(); + assert!(DappsStaking::tvl().is_zero()); + + let developer = 1; + let contract_id = MockSmartContract::Evm(H160::repeat_byte(0x01)); + assert_register(developer, &contract_id); + + // Expect TVL to change as we bond&stake more + let iterations = 10; + let stake_value = 100; + for x in 1..=iterations { + assert_bond_and_stake(developer, &contract_id, stake_value); + assert_eq!(DappsStaking::tvl(), stake_value * x); + } + + // Era advancement should have no effect on TVL + advance_to_era(5); + assert_eq!(DappsStaking::tvl(), stake_value * iterations); + }) +} + +#[test] +pub fn set_contract_stake_info() { + ExternalityBuilder::build().execute_with(|| { + initialize_first_block(); + + let contract_id = MockSmartContract::Evm(H160::repeat_byte(0x01)); + assert_register(10, &contract_id); + + let staker_id = 1; + assert_bond_and_stake(staker_id, &contract_id, 1000); + + // Read current contract stake info, then overwrite it with different value + let original_contract_stake_info = + DappsStaking::contract_stake_info(&contract_id, 1).unwrap(); + let mut modified_info = original_contract_stake_info.clone(); + modified_info.total = modified_info.total + 17; + ContractEraStake::::insert(&contract_id, 1, modified_info); + + // Ensure only root can call it + assert_noop!( + DappsStaking::set_contract_stake_info( + RuntimeOrigin::signed(1), + contract_id.clone(), + 1, + original_contract_stake_info.clone() + ), + BadOrigin + ); + + // Verify we can fix the corrupted stroage + assert_ne!( + ContractEraStake::::get(&contract_id, 1).unwrap(), + original_contract_stake_info + ); + assert_ok!(DappsStaking::set_contract_stake_info( + RuntimeOrigin::root(), + contract_id.clone(), + 1, + original_contract_stake_info.clone() + )); + assert_eq!( + ContractEraStake::::get(&contract_id, 1).unwrap(), + original_contract_stake_info + ); + }) +} + +#[test] +fn custom_max_encoded_len() { + let max_unbonding_info_len = 10 * (4 + 16) + 1; + assert_eq!( + UnbondingInfo::::max_encoded_len(), + max_unbonding_info_len as usize + ); + + let max_staker_info_len = 10 * (4 + 16) + 1; + assert_eq!( + StakerInfo::::max_encoded_len(), + max_staker_info_len as usize + ); +} + +#[test] +fn burn_stale_reward_is_ok() { + ExternalityBuilder::build().execute_with(|| { + initialize_first_block(); + + let developer = 1; + let staker = 3; + let contract_id = MockSmartContract::Evm(H160::repeat_byte(0x01)); + + let start_era = DappsStaking::current_era(); + + // Register & stake on contract + assert_register(developer, &contract_id); + assert_bond_and_stake(staker, &contract_id, 100); + + // Advance enough eras so stale rewards become burnable + let eras_advanced = REWARD_RETENTION_PERIOD + 1; + advance_to_era(start_era + eras_advanced); + assert_unregister(developer, &contract_id); + + assert_burn_stale_reward(&contract_id, start_era); + }) +} + +#[test] +fn burn_stale_reward_from_registered_dapp_fails() { + ExternalityBuilder::build().execute_with(|| { + initialize_first_block(); + + let developer = 1; + let staker = 3; + let contract_id = MockSmartContract::Evm(H160::repeat_byte(0x01)); + + let start_era = DappsStaking::current_era(); + + // Register & stake on contract + assert_register(developer, &contract_id); + assert_bond_and_stake(staker, &contract_id, 100); + + // Advance enough eras so stale rewards would become burnable, in case dapp was unregistered + let eras_advanced = REWARD_RETENTION_PERIOD; + advance_to_era(start_era + eras_advanced); + + // Rewards shouldn't be burnable since retention period hasn't expired yet + assert_noop!( + DappsStaking::burn_stale_reward(RuntimeOrigin::root(), contract_id, start_era,), + Error::::NotUnregisteredContract + ); + }) +} + +#[test] +fn burn_stale_reward_before_retention_period_finished_fails() { + ExternalityBuilder::build().execute_with(|| { + initialize_first_block(); + + let developer = 1; + let staker = 3; + let contract_id = MockSmartContract::Evm(H160::repeat_byte(0x01)); + + let start_era = DappsStaking::current_era(); + + // Register & stake on contract + assert_register(developer, &contract_id); + assert_bond_and_stake(staker, &contract_id, 100); + + // Advance enough eras so stale rewards become burnable + let eras_advanced = REWARD_RETENTION_PERIOD; + advance_to_era(start_era + eras_advanced); + assert_unregister(developer, &contract_id); + + // Rewards shouldn't be burnable since retention period hasn't expired yet + assert_noop!( + DappsStaking::burn_stale_reward(RuntimeOrigin::root(), contract_id, start_era,), + Error::::EraOutOfBounds + ); + }) +} + +#[test] +fn burn_stale_reward_negative_checks() { + ExternalityBuilder::build().execute_with(|| { + initialize_first_block(); + + let developer = 1; + let staker = 3; + let contract_id = MockSmartContract::Evm(H160::repeat_byte(0x01)); + + // Cannot burn from non-existing contract + assert_noop!( + DappsStaking::burn_stale_reward(RuntimeOrigin::root(), contract_id, 1,), + Error::::NotOperatedContract + ); + + // Cannot burn unless called with root privileges + assert_noop!( + DappsStaking::burn_stale_reward(RuntimeOrigin::signed(developer), contract_id, 1,), + BadOrigin + ); + + // Register & stake on contract + assert_register(developer, &contract_id); + assert_bond_and_stake(staker, &contract_id, 100); + + // Advance enough eras so stale rewards become burnable + let start_era = DappsStaking::current_era(); + let eras_advanced = REWARD_RETENTION_PERIOD + 2; + advance_to_era(start_era + eras_advanced); + assert_unregister(developer, &contract_id); + + // Claim them (before they are burned) + assert_claim_dapp(&contract_id, start_era); + + // No longer possible to burn if reward was claimed + assert_noop!( + DappsStaking::burn_stale_reward(RuntimeOrigin::root(), contract_id, start_era,), + Error::::AlreadyClaimedInThisEra + ); + }) +} diff --git a/pallets/dapps-staking/src/tests_lib.rs b/pallets/dapps-staking/src/tests_lib.rs new file mode 100644 index 0000000000..dfe0e8809b --- /dev/null +++ b/pallets/dapps-staking/src/tests_lib.rs @@ -0,0 +1,310 @@ +// This file is part of Astar. + +// Copyright (C) 2019-2023 Stake Technologies Pte.Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later + +// Astar is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Astar is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Astar. If not, see . + +use super::*; +use frame_support::assert_ok; +use mock::Balance; + +#[test] +fn unbonding_info_test() { + let mut unbonding_info = UnbondingInfo::::default(); + + // assert basic ops on empty info + assert!(unbonding_info.is_empty()); + assert!(unbonding_info.len().is_zero()); + let (first_info, second_info) = unbonding_info.clone().partition(2); + assert!(first_info.is_empty()); + assert!(second_info.is_empty()); + + // Prepare unlocking chunks. + let count = 5; + let base_amount: Balance = 100; + let base_unlock_era = 4 * count; + let mut chunks = vec![]; + for x in 1_u32..=count as u32 { + chunks.push(UnlockingChunk { + amount: base_amount * x as Balance, + unlock_era: base_unlock_era - 3 * x, + }); + } + + // Add one unlocking chunk and verify basic ops. + unbonding_info.add(chunks[0 as usize]); + + assert!(!unbonding_info.is_empty()); + assert_eq!(1, unbonding_info.len()); + assert_eq!(chunks[0 as usize].amount, unbonding_info.sum()); + + let (first_info, second_info) = unbonding_info.clone().partition(base_unlock_era); + assert_eq!(1, first_info.len()); + assert_eq!(chunks[0 as usize].amount, first_info.sum()); + assert!(second_info.is_empty()); + + // Add remainder and verify basic ops + for x in unbonding_info.len() as usize..chunks.len() { + unbonding_info.add(chunks[x]); + // Ensure internal vec is sorted + assert!(unbonding_info + .vec() + .windows(2) + .all(|w| w[0].unlock_era <= w[1].unlock_era)); + } + assert_eq!(chunks.len(), unbonding_info.len() as usize); + let total: Balance = chunks.iter().map(|c| c.amount).sum(); + assert_eq!(total, unbonding_info.sum()); + + let partition_era = chunks[2].unlock_era + 1; + let (first_info, second_info) = unbonding_info.clone().partition(partition_era); + assert_eq!(3, first_info.len()); + assert_eq!(2, second_info.len()); + assert_eq!(unbonding_info.sum(), first_info.sum() + second_info.sum()); +} + +#[test] +fn staker_info_basic() { + let staker_info = StakerInfo::::default(); + + assert!(staker_info.is_empty()); + assert_eq!(staker_info.len(), 0); + assert_eq!(staker_info.latest_staked_value(), 0); +} + +#[test] +fn staker_info_stake_ops() { + let mut staker_info = StakerInfo::::default(); + + // Do first stake and verify it + let first_era = 1; + let first_stake = 100; + assert_ok!(staker_info.stake(first_era, first_stake)); + assert!(!staker_info.is_empty()); + assert_eq!(staker_info.len(), 1); + assert_eq!(staker_info.latest_staked_value(), first_stake); + + // Do second stake and verify it + let second_era = first_era + 1; + let second_stake = 200; + assert_ok!(staker_info.stake(second_era, second_stake)); + assert_eq!(staker_info.len(), 2); + assert_eq!( + staker_info.latest_staked_value(), + first_stake + second_stake + ); + + // Do third stake and verify it + let third_era = second_era + 2; // must be greater than 1 so a `hole` is present + let third_stake = 333; + assert_ok!(staker_info.stake(third_era, third_stake)); + assert_eq!( + staker_info.latest_staked_value(), + first_stake + second_stake + third_stake + ); + assert_eq!(staker_info.len(), 3); + + // Do fourth stake and verify it + let fourth_era = third_era; // ensure that multi-stake in same era works + let fourth_stake = 444; + assert_ok!(staker_info.stake(fourth_era, fourth_stake)); + assert_eq!(staker_info.len(), 3); + assert_eq!( + staker_info.latest_staked_value(), + first_stake + second_stake + third_stake + fourth_stake + ); +} + +#[test] +fn staker_info_stake_error() { + let mut staker_info = StakerInfo::::default(); + assert_ok!(staker_info.stake(5, 100)); + if let Err(_) = staker_info.stake(4, 100) { + } else { + panic!("Mustn't be able to stake with past era."); + } +} + +#[test] +fn staker_info_unstake_ops() { + let mut staker_info = StakerInfo::::default(); + + // Unstake on empty staker_info + assert!(staker_info.is_empty()); + assert_ok!(staker_info.unstake(1, 100)); + assert!(staker_info.is_empty()); + + // Prepare some stakes + let (first_era, second_era) = (1, 3); + let (first_stake, second_stake) = (110, 222); + let total_staked = first_stake + second_stake; + assert_ok!(staker_info.stake(first_era, first_stake)); + assert_ok!(staker_info.stake(second_era, second_stake)); + + // Unstake an existing EraStake + let first_unstake_era = second_era; + let first_unstake = 55; + assert_ok!(staker_info.unstake(first_unstake_era, first_unstake)); + assert_eq!(staker_info.len(), 2); + assert_eq!( + staker_info.latest_staked_value(), + total_staked - first_unstake + ); + let total_staked = total_staked - first_unstake; + + // Unstake an non-existing EraStake + let second_unstake_era = first_unstake_era + 2; + let second_unstake = 37; + assert_ok!(staker_info.unstake(second_unstake_era, second_unstake)); + assert_eq!(staker_info.len(), 3); + assert_eq!( + staker_info.latest_staked_value(), + total_staked - second_unstake + ); + let total_staked = total_staked - second_unstake; + + // Save this for later + let temp_staker_info = staker_info.clone(); + + // Fully unstake existing EraStake + assert_ok!(staker_info.unstake(second_unstake_era, total_staked)); + assert_eq!(staker_info.len(), 3); + assert_eq!(staker_info.latest_staked_value(), 0); + + // Fully unstake non-existing EraStake + let mut staker_info = temp_staker_info; // restore + assert_ok!(staker_info.unstake(second_unstake_era + 1, total_staked)); + assert_eq!(staker_info.len(), 4); + assert_eq!(staker_info.latest_staked_value(), 0); +} + +#[test] +fn stake_after_full_unstake() { + let mut staker_info = StakerInfo::::default(); + + // Stake some amount + let first_era = 1; + let first_stake = 100; + assert_ok!(staker_info.stake(first_era, first_stake)); + assert_eq!(staker_info.latest_staked_value(), first_stake); + + // Unstake all in next era + let unstake_era = first_era + 1; + assert_ok!(staker_info.unstake(unstake_era, first_stake)); + assert!(staker_info.latest_staked_value().is_zero()); + assert_eq!(staker_info.len(), 2); + + // Stake again in the next era + let restake_era = unstake_era + 2; + let restake_value = 57; + assert_ok!(staker_info.stake(restake_era, restake_value)); + assert_eq!(staker_info.latest_staked_value(), restake_value); + assert_eq!(staker_info.len(), 3); +} + +#[test] +fn staker_info_unstake_error() { + let mut staker_info = StakerInfo::::default(); + assert_ok!(staker_info.stake(5, 100)); + if let Err(_) = staker_info.unstake(4, 100) { + } else { + panic!("Mustn't be able to unstake with past era."); + } +} + +#[test] +fn staker_info_claim_ops_basic() { + let mut staker_info = StakerInfo::::default(); + + // Empty staker info + assert!(staker_info.is_empty()); + assert_eq!(staker_info.claim(), (0, 0)); + assert!(staker_info.is_empty()); + + // Only one unstaked exists + assert_ok!(staker_info.stake(1, 100)); + assert_ok!(staker_info.unstake(1, 100)); + assert!(staker_info.is_empty()); + assert_eq!(staker_info.claim(), (0, 0)); + assert!(staker_info.is_empty()); + + // Only one staked exists + staker_info = StakerInfo::::default(); + let stake_era = 1; + let stake_value = 123; + assert_ok!(staker_info.stake(stake_era, stake_value)); + assert_eq!(staker_info.len(), 1); + assert_eq!(staker_info.claim(), (stake_era, stake_value)); + assert_eq!(staker_info.len(), 1); +} + +#[test] +fn staker_info_claim_ops_advanced() { + let mut staker_info = StakerInfo::::default(); + + // Two consecutive eras staked, third era contains a gap with the second one + let (first_stake_era, second_stake_era, third_stake_era) = (1, 2, 4); + let (first_stake_value, second_stake_value, third_stake_value) = (123, 456, 789); + + assert_ok!(staker_info.stake(first_stake_era, first_stake_value)); + assert_ok!(staker_info.stake(second_stake_era, second_stake_value)); + assert_ok!(staker_info.stake(third_stake_era, third_stake_value)); + + // First claim + assert_eq!(staker_info.len(), 3); + assert_eq!(staker_info.claim(), (first_stake_era, first_stake_value)); + assert_eq!(staker_info.len(), 2); + + // Second claim + assert_eq!( + staker_info.claim(), + (second_stake_era, first_stake_value + second_stake_value) + ); + assert_eq!(staker_info.len(), 2); + + // Third claim, expect that 3rd era stake is the same as second + assert_eq!( + staker_info.claim(), + (3, first_stake_value + second_stake_value) + ); + assert_eq!(staker_info.len(), 1); + + // Fully unstake 5th era + let total_staked = first_stake_value + second_stake_value + third_stake_value; + assert_ok!(staker_info.unstake(5, total_staked)); + assert_eq!(staker_info.len(), 2); + + // Stake 7th era (so after it was unstaked) + let fourth_era = 7; + let fourth_stake_value = 147; + assert_ok!(staker_info.stake(fourth_era, fourth_stake_value)); + assert_eq!(staker_info.len(), 3); + + // Claim 4th era + assert_eq!(staker_info.claim(), (third_stake_era, total_staked)); + assert_eq!(staker_info.len(), 1); + + // Claim 7th era + assert_eq!(staker_info.claim(), (fourth_era, fourth_stake_value)); + assert_eq!(staker_info.len(), 1); + assert_eq!(staker_info.latest_staked_value(), fourth_stake_value); + + // Claim future eras + for x in 1..10 { + assert_eq!(staker_info.claim(), (fourth_era + x, fourth_stake_value)); + assert_eq!(staker_info.len(), 1); + assert_eq!(staker_info.latest_staked_value(), fourth_stake_value); + } +} diff --git a/pallets/dapps-staking/src/weights.rs b/pallets/dapps-staking/src/weights.rs new file mode 100644 index 0000000000..4e02ed393c --- /dev/null +++ b/pallets/dapps-staking/src/weights.rs @@ -0,0 +1,422 @@ +// This file is part of Astar. + +// Copyright (C) 2019-2023 Stake Technologies Pte.Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later + +// Astar is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Astar is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Astar. If not, see . + +//! Autogenerated weights for pallet_dapps_staking +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-04-04, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! HOSTNAME: `devserver-01`, CPU: `Intel(R) Xeon(R) E-2236 CPU @ 3.40GHz` +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("shibuya-dev"), DB CACHE: 1024 + +// Executed Command: +// ./target/release/astar-collator +// benchmark +// pallet +// --chain=shibuya-dev +// --steps=50 +// --repeat=20 +// --pallet=pallet_dapps_staking +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --heap-pages=4096 +// --output=./benchmark-results/dapps_staking_weights.rs +// --template=./scripts/templates/weight-template.hbs + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] + +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use sp_std::marker::PhantomData; + +/// Weight functions needed for pallet_dapps_staking. +pub trait WeightInfo { + fn register() -> Weight; + fn unregister() -> Weight; + fn withdraw_from_unregistered() -> Weight; + fn bond_and_stake() -> Weight; + fn unbond_and_unstake() -> Weight; + fn withdraw_unbonded() -> Weight; + fn nomination_transfer() -> Weight; + fn claim_staker_with_restake() -> Weight; + fn claim_staker_without_restake() -> Weight; + fn claim_dapp() -> Weight; + fn force_new_era() -> Weight; + fn maintenance_mode() -> Weight; + fn set_reward_destination() -> Weight; +} + +/// Weights for pallet_dapps_staking using the Substrate node and recommended hardware. +pub struct SubstrateWeight(PhantomData); +impl WeightInfo for SubstrateWeight { + // Storage: DappsStaking RegisteredDevelopers (r:1 w:1) + // Proof: DappsStaking RegisteredDevelopers (max_values: None, max_size: Some(81), added: 2556, mode: MaxEncodedLen) + // Storage: DappsStaking RegisteredDapps (r:1 w:1) + // Proof: DappsStaking RegisteredDapps (max_values: None, max_size: Some(86), added: 2561, mode: MaxEncodedLen) + fn register() -> Weight { + // Minimum execution time: 25_265 nanoseconds. + Weight::from_ref_time(26_091_000) + .saturating_add(Weight::from_proof_size(5117)) + .saturating_add(T::DbWeight::get().reads(2_u64)) + .saturating_add(T::DbWeight::get().writes(2_u64)) + } + // Storage: DappsStaking RegisteredDapps (r:1 w:1) + // Proof: DappsStaking RegisteredDapps (max_values: None, max_size: Some(86), added: 2561, mode: MaxEncodedLen) + // Storage: System Account (r:1 w:1) + // Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + fn unregister() -> Weight { + // Minimum execution time: 27_241 nanoseconds. + Weight::from_ref_time(27_411_000) + .saturating_add(Weight::from_proof_size(5164)) + .saturating_add(T::DbWeight::get().reads(2_u64)) + .saturating_add(T::DbWeight::get().writes(2_u64)) + } + // Storage: DappsStaking RegisteredDapps (r:1 w:0) + // Proof: DappsStaking RegisteredDapps (max_values: None, max_size: Some(86), added: 2561, mode: MaxEncodedLen) + // Storage: DappsStaking GeneralStakerInfo (r:1 w:1) + // Proof: DappsStaking GeneralStakerInfo (max_values: None, max_size: Some(298), added: 2773, mode: MaxEncodedLen) + // Storage: DappsStaking Ledger (r:1 w:1) + // Proof: DappsStaking Ledger (max_values: None, max_size: Some(266), added: 2741, mode: MaxEncodedLen) + // Storage: Balances Locks (r:1 w:1) + // Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) + // Storage: System Account (r:1 w:1) + // Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + // Storage: DappsStaking GeneralEraInfo (r:1 w:1) + // Proof: DappsStaking GeneralEraInfo (max_values: None, max_size: Some(76), added: 2551, mode: MaxEncodedLen) + fn withdraw_from_unregistered() -> Weight { + // Minimum execution time: 44_491 nanoseconds. + Weight::from_ref_time(45_042_000) + .saturating_add(Weight::from_proof_size(17003)) + .saturating_add(T::DbWeight::get().reads(6_u64)) + .saturating_add(T::DbWeight::get().writes(5_u64)) + } + // Storage: DappsStaking RegisteredDapps (r:1 w:0) + // Proof: DappsStaking RegisteredDapps (max_values: None, max_size: Some(86), added: 2561, mode: MaxEncodedLen) + // Storage: DappsStaking Ledger (r:1 w:1) + // Proof: DappsStaking Ledger (max_values: None, max_size: Some(266), added: 2741, mode: MaxEncodedLen) + // Storage: DappsStaking ContractEraStake (r:1 w:1) + // Proof: DappsStaking ContractEraStake (max_values: None, max_size: Some(82), added: 2557, mode: MaxEncodedLen) + // Storage: DappsStaking GeneralStakerInfo (r:1 w:1) + // Proof: DappsStaking GeneralStakerInfo (max_values: None, max_size: Some(298), added: 2773, mode: MaxEncodedLen) + // Storage: DappsStaking GeneralEraInfo (r:1 w:1) + // Proof: DappsStaking GeneralEraInfo (max_values: None, max_size: Some(76), added: 2551, mode: MaxEncodedLen) + // Storage: Balances Locks (r:1 w:1) + // Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) + fn bond_and_stake() -> Weight { + // Minimum execution time: 43_099 nanoseconds. + Weight::from_ref_time(43_939_000) + .saturating_add(Weight::from_proof_size(16957)) + .saturating_add(T::DbWeight::get().reads(6_u64)) + .saturating_add(T::DbWeight::get().writes(5_u64)) + } + // Storage: DappsStaking RegisteredDapps (r:1 w:0) + // Proof: DappsStaking RegisteredDapps (max_values: None, max_size: Some(86), added: 2561, mode: MaxEncodedLen) + // Storage: DappsStaking GeneralStakerInfo (r:1 w:1) + // Proof: DappsStaking GeneralStakerInfo (max_values: None, max_size: Some(298), added: 2773, mode: MaxEncodedLen) + // Storage: DappsStaking ContractEraStake (r:1 w:1) + // Proof: DappsStaking ContractEraStake (max_values: None, max_size: Some(82), added: 2557, mode: MaxEncodedLen) + // Storage: DappsStaking Ledger (r:1 w:1) + // Proof: DappsStaking Ledger (max_values: None, max_size: Some(266), added: 2741, mode: MaxEncodedLen) + // Storage: Balances Locks (r:1 w:1) + // Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) + // Storage: DappsStaking GeneralEraInfo (r:1 w:1) + // Proof: DappsStaking GeneralEraInfo (max_values: None, max_size: Some(76), added: 2551, mode: MaxEncodedLen) + fn unbond_and_unstake() -> Weight { + // Minimum execution time: 46_783 nanoseconds. + Weight::from_ref_time(47_487_000) + .saturating_add(Weight::from_proof_size(16957)) + .saturating_add(T::DbWeight::get().reads(6_u64)) + .saturating_add(T::DbWeight::get().writes(5_u64)) + } + // Storage: DappsStaking Ledger (r:1 w:1) + // Proof: DappsStaking Ledger (max_values: None, max_size: Some(266), added: 2741, mode: MaxEncodedLen) + // Storage: Balances Locks (r:1 w:1) + // Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) + // Storage: DappsStaking GeneralEraInfo (r:1 w:1) + // Proof: DappsStaking GeneralEraInfo (max_values: None, max_size: Some(76), added: 2551, mode: MaxEncodedLen) + fn withdraw_unbonded() -> Weight { + // Minimum execution time: 30_952 nanoseconds. + Weight::from_ref_time(31_615_000) + .saturating_add(Weight::from_proof_size(9066)) + .saturating_add(T::DbWeight::get().reads(3_u64)) + .saturating_add(T::DbWeight::get().writes(3_u64)) + } + // Storage: DappsStaking RegisteredDapps (r:2 w:0) + // Proof: DappsStaking RegisteredDapps (max_values: None, max_size: Some(86), added: 2561, mode: MaxEncodedLen) + // Storage: DappsStaking GeneralStakerInfo (r:2 w:2) + // Proof: DappsStaking GeneralStakerInfo (max_values: None, max_size: Some(298), added: 2773, mode: MaxEncodedLen) + // Storage: DappsStaking ContractEraStake (r:2 w:2) + // Proof: DappsStaking ContractEraStake (max_values: None, max_size: Some(82), added: 2557, mode: MaxEncodedLen) + fn nomination_transfer() -> Weight { + // Minimum execution time: 38_247 nanoseconds. + Weight::from_ref_time(38_832_000) + .saturating_add(Weight::from_proof_size(15782)) + .saturating_add(T::DbWeight::get().reads(6_u64)) + .saturating_add(T::DbWeight::get().writes(4_u64)) + } + // Storage: DappsStaking GeneralStakerInfo (r:1 w:1) + // Proof: DappsStaking GeneralStakerInfo (max_values: None, max_size: Some(298), added: 2773, mode: MaxEncodedLen) + // Storage: DappsStaking RegisteredDapps (r:1 w:0) + // Proof: DappsStaking RegisteredDapps (max_values: None, max_size: Some(86), added: 2561, mode: MaxEncodedLen) + // Storage: DappsStaking ContractEraStake (r:2 w:1) + // Proof: DappsStaking ContractEraStake (max_values: None, max_size: Some(82), added: 2557, mode: MaxEncodedLen) + // Storage: DappsStaking GeneralEraInfo (r:2 w:1) + // Proof: DappsStaking GeneralEraInfo (max_values: None, max_size: Some(76), added: 2551, mode: MaxEncodedLen) + // Storage: DappsStaking Ledger (r:1 w:1) + // Proof: DappsStaking Ledger (max_values: None, max_size: Some(266), added: 2741, mode: MaxEncodedLen) + // Storage: Balances Locks (r:1 w:1) + // Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) + // Storage: System Account (r:1 w:1) + // Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + fn claim_staker_with_restake() -> Weight { + // Minimum execution time: 60_558 nanoseconds. + Weight::from_ref_time(61_264_000) + .saturating_add(Weight::from_proof_size(24668)) + .saturating_add(T::DbWeight::get().reads(9_u64)) + .saturating_add(T::DbWeight::get().writes(6_u64)) + } + // Storage: DappsStaking GeneralStakerInfo (r:1 w:1) + // Proof: DappsStaking GeneralStakerInfo (max_values: None, max_size: Some(298), added: 2773, mode: MaxEncodedLen) + // Storage: DappsStaking RegisteredDapps (r:1 w:0) + // Proof: DappsStaking RegisteredDapps (max_values: None, max_size: Some(86), added: 2561, mode: MaxEncodedLen) + // Storage: DappsStaking ContractEraStake (r:1 w:0) + // Proof: DappsStaking ContractEraStake (max_values: None, max_size: Some(82), added: 2557, mode: MaxEncodedLen) + // Storage: DappsStaking GeneralEraInfo (r:1 w:0) + // Proof: DappsStaking GeneralEraInfo (max_values: None, max_size: Some(76), added: 2551, mode: MaxEncodedLen) + // Storage: DappsStaking Ledger (r:1 w:0) + // Proof: DappsStaking Ledger (max_values: None, max_size: Some(266), added: 2741, mode: MaxEncodedLen) + fn claim_staker_without_restake() -> Weight { + // Minimum execution time: 33_178 nanoseconds. + Weight::from_ref_time(33_576_000) + .saturating_add(Weight::from_proof_size(13183)) + .saturating_add(T::DbWeight::get().reads(5_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } + // Storage: DappsStaking RegisteredDapps (r:1 w:0) + // Proof: DappsStaking RegisteredDapps (max_values: None, max_size: Some(86), added: 2561, mode: MaxEncodedLen) + // Storage: DappsStaking ContractEraStake (r:1 w:1) + // Proof: DappsStaking ContractEraStake (max_values: None, max_size: Some(82), added: 2557, mode: MaxEncodedLen) + // Storage: DappsStaking GeneralEraInfo (r:1 w:0) + // Proof: DappsStaking GeneralEraInfo (max_values: None, max_size: Some(76), added: 2551, mode: MaxEncodedLen) + fn claim_dapp() -> Weight { + // Minimum execution time: 25_126 nanoseconds. + Weight::from_ref_time(25_489_000) + .saturating_add(Weight::from_proof_size(7669)) + .saturating_add(T::DbWeight::get().reads(3_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } + fn force_new_era() -> Weight { + // Minimum execution time: 3_446 nanoseconds. + Weight::from_ref_time(3_676_000) + .saturating_add(Weight::from_proof_size(0)) + } + fn maintenance_mode() -> Weight { + // Minimum execution time: 7_871 nanoseconds. + Weight::from_ref_time(8_137_000) + .saturating_add(Weight::from_proof_size(0)) + } + // Storage: DappsStaking Ledger (r:1 w:1) + // Proof: DappsStaking Ledger (max_values: None, max_size: Some(266), added: 2741, mode: MaxEncodedLen) + fn set_reward_destination() -> Weight { + // Minimum execution time: 15_697 nanoseconds. + Weight::from_ref_time(16_009_000) + .saturating_add(Weight::from_proof_size(2741)) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } +} + +// For backwards compatibility and tests +impl WeightInfo for () { + // Storage: DappsStaking RegisteredDevelopers (r:1 w:1) + // Proof: DappsStaking RegisteredDevelopers (max_values: None, max_size: Some(81), added: 2556, mode: MaxEncodedLen) + // Storage: DappsStaking RegisteredDapps (r:1 w:1) + // Proof: DappsStaking RegisteredDapps (max_values: None, max_size: Some(86), added: 2561, mode: MaxEncodedLen) + fn register() -> Weight { + // Minimum execution time: 25_265 nanoseconds. + Weight::from_ref_time(26_091_000) + .saturating_add(Weight::from_proof_size(5117)) + .saturating_add(RocksDbWeight::get().reads(2_u64)) + .saturating_add(RocksDbWeight::get().writes(2_u64)) + } + // Storage: DappsStaking RegisteredDapps (r:1 w:1) + // Proof: DappsStaking RegisteredDapps (max_values: None, max_size: Some(86), added: 2561, mode: MaxEncodedLen) + // Storage: System Account (r:1 w:1) + // Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + fn unregister() -> Weight { + // Minimum execution time: 27_241 nanoseconds. + Weight::from_ref_time(27_411_000) + .saturating_add(Weight::from_proof_size(5164)) + .saturating_add(RocksDbWeight::get().reads(2_u64)) + .saturating_add(RocksDbWeight::get().writes(2_u64)) + } + // Storage: DappsStaking RegisteredDapps (r:1 w:0) + // Proof: DappsStaking RegisteredDapps (max_values: None, max_size: Some(86), added: 2561, mode: MaxEncodedLen) + // Storage: DappsStaking GeneralStakerInfo (r:1 w:1) + // Proof: DappsStaking GeneralStakerInfo (max_values: None, max_size: Some(298), added: 2773, mode: MaxEncodedLen) + // Storage: DappsStaking Ledger (r:1 w:1) + // Proof: DappsStaking Ledger (max_values: None, max_size: Some(266), added: 2741, mode: MaxEncodedLen) + // Storage: Balances Locks (r:1 w:1) + // Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) + // Storage: System Account (r:1 w:1) + // Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + // Storage: DappsStaking GeneralEraInfo (r:1 w:1) + // Proof: DappsStaking GeneralEraInfo (max_values: None, max_size: Some(76), added: 2551, mode: MaxEncodedLen) + fn withdraw_from_unregistered() -> Weight { + // Minimum execution time: 44_491 nanoseconds. + Weight::from_ref_time(45_042_000) + .saturating_add(Weight::from_proof_size(17003)) + .saturating_add(RocksDbWeight::get().reads(6_u64)) + .saturating_add(RocksDbWeight::get().writes(5_u64)) + } + // Storage: DappsStaking RegisteredDapps (r:1 w:0) + // Proof: DappsStaking RegisteredDapps (max_values: None, max_size: Some(86), added: 2561, mode: MaxEncodedLen) + // Storage: DappsStaking Ledger (r:1 w:1) + // Proof: DappsStaking Ledger (max_values: None, max_size: Some(266), added: 2741, mode: MaxEncodedLen) + // Storage: DappsStaking ContractEraStake (r:1 w:1) + // Proof: DappsStaking ContractEraStake (max_values: None, max_size: Some(82), added: 2557, mode: MaxEncodedLen) + // Storage: DappsStaking GeneralStakerInfo (r:1 w:1) + // Proof: DappsStaking GeneralStakerInfo (max_values: None, max_size: Some(298), added: 2773, mode: MaxEncodedLen) + // Storage: DappsStaking GeneralEraInfo (r:1 w:1) + // Proof: DappsStaking GeneralEraInfo (max_values: None, max_size: Some(76), added: 2551, mode: MaxEncodedLen) + // Storage: Balances Locks (r:1 w:1) + // Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) + fn bond_and_stake() -> Weight { + // Minimum execution time: 43_099 nanoseconds. + Weight::from_ref_time(43_939_000) + .saturating_add(Weight::from_proof_size(16957)) + .saturating_add(RocksDbWeight::get().reads(6_u64)) + .saturating_add(RocksDbWeight::get().writes(5_u64)) + } + // Storage: DappsStaking RegisteredDapps (r:1 w:0) + // Proof: DappsStaking RegisteredDapps (max_values: None, max_size: Some(86), added: 2561, mode: MaxEncodedLen) + // Storage: DappsStaking GeneralStakerInfo (r:1 w:1) + // Proof: DappsStaking GeneralStakerInfo (max_values: None, max_size: Some(298), added: 2773, mode: MaxEncodedLen) + // Storage: DappsStaking ContractEraStake (r:1 w:1) + // Proof: DappsStaking ContractEraStake (max_values: None, max_size: Some(82), added: 2557, mode: MaxEncodedLen) + // Storage: DappsStaking Ledger (r:1 w:1) + // Proof: DappsStaking Ledger (max_values: None, max_size: Some(266), added: 2741, mode: MaxEncodedLen) + // Storage: Balances Locks (r:1 w:1) + // Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) + // Storage: DappsStaking GeneralEraInfo (r:1 w:1) + // Proof: DappsStaking GeneralEraInfo (max_values: None, max_size: Some(76), added: 2551, mode: MaxEncodedLen) + fn unbond_and_unstake() -> Weight { + // Minimum execution time: 46_783 nanoseconds. + Weight::from_ref_time(47_487_000) + .saturating_add(Weight::from_proof_size(16957)) + .saturating_add(RocksDbWeight::get().reads(6_u64)) + .saturating_add(RocksDbWeight::get().writes(5_u64)) + } + // Storage: DappsStaking Ledger (r:1 w:1) + // Proof: DappsStaking Ledger (max_values: None, max_size: Some(266), added: 2741, mode: MaxEncodedLen) + // Storage: Balances Locks (r:1 w:1) + // Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) + // Storage: DappsStaking GeneralEraInfo (r:1 w:1) + // Proof: DappsStaking GeneralEraInfo (max_values: None, max_size: Some(76), added: 2551, mode: MaxEncodedLen) + fn withdraw_unbonded() -> Weight { + // Minimum execution time: 30_952 nanoseconds. + Weight::from_ref_time(31_615_000) + .saturating_add(Weight::from_proof_size(9066)) + .saturating_add(RocksDbWeight::get().reads(3_u64)) + .saturating_add(RocksDbWeight::get().writes(3_u64)) + } + // Storage: DappsStaking RegisteredDapps (r:2 w:0) + // Proof: DappsStaking RegisteredDapps (max_values: None, max_size: Some(86), added: 2561, mode: MaxEncodedLen) + // Storage: DappsStaking GeneralStakerInfo (r:2 w:2) + // Proof: DappsStaking GeneralStakerInfo (max_values: None, max_size: Some(298), added: 2773, mode: MaxEncodedLen) + // Storage: DappsStaking ContractEraStake (r:2 w:2) + // Proof: DappsStaking ContractEraStake (max_values: None, max_size: Some(82), added: 2557, mode: MaxEncodedLen) + fn nomination_transfer() -> Weight { + // Minimum execution time: 38_247 nanoseconds. + Weight::from_ref_time(38_832_000) + .saturating_add(Weight::from_proof_size(15782)) + .saturating_add(RocksDbWeight::get().reads(6_u64)) + .saturating_add(RocksDbWeight::get().writes(4_u64)) + } + // Storage: DappsStaking GeneralStakerInfo (r:1 w:1) + // Proof: DappsStaking GeneralStakerInfo (max_values: None, max_size: Some(298), added: 2773, mode: MaxEncodedLen) + // Storage: DappsStaking RegisteredDapps (r:1 w:0) + // Proof: DappsStaking RegisteredDapps (max_values: None, max_size: Some(86), added: 2561, mode: MaxEncodedLen) + // Storage: DappsStaking ContractEraStake (r:2 w:1) + // Proof: DappsStaking ContractEraStake (max_values: None, max_size: Some(82), added: 2557, mode: MaxEncodedLen) + // Storage: DappsStaking GeneralEraInfo (r:2 w:1) + // Proof: DappsStaking GeneralEraInfo (max_values: None, max_size: Some(76), added: 2551, mode: MaxEncodedLen) + // Storage: DappsStaking Ledger (r:1 w:1) + // Proof: DappsStaking Ledger (max_values: None, max_size: Some(266), added: 2741, mode: MaxEncodedLen) + // Storage: Balances Locks (r:1 w:1) + // Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) + // Storage: System Account (r:1 w:1) + // Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + fn claim_staker_with_restake() -> Weight { + // Minimum execution time: 60_558 nanoseconds. + Weight::from_ref_time(61_264_000) + .saturating_add(Weight::from_proof_size(24668)) + .saturating_add(RocksDbWeight::get().reads(9_u64)) + .saturating_add(RocksDbWeight::get().writes(6_u64)) + } + // Storage: DappsStaking GeneralStakerInfo (r:1 w:1) + // Proof: DappsStaking GeneralStakerInfo (max_values: None, max_size: Some(298), added: 2773, mode: MaxEncodedLen) + // Storage: DappsStaking RegisteredDapps (r:1 w:0) + // Proof: DappsStaking RegisteredDapps (max_values: None, max_size: Some(86), added: 2561, mode: MaxEncodedLen) + // Storage: DappsStaking ContractEraStake (r:1 w:0) + // Proof: DappsStaking ContractEraStake (max_values: None, max_size: Some(82), added: 2557, mode: MaxEncodedLen) + // Storage: DappsStaking GeneralEraInfo (r:1 w:0) + // Proof: DappsStaking GeneralEraInfo (max_values: None, max_size: Some(76), added: 2551, mode: MaxEncodedLen) + // Storage: DappsStaking Ledger (r:1 w:0) + // Proof: DappsStaking Ledger (max_values: None, max_size: Some(266), added: 2741, mode: MaxEncodedLen) + fn claim_staker_without_restake() -> Weight { + // Minimum execution time: 33_178 nanoseconds. + Weight::from_ref_time(33_576_000) + .saturating_add(Weight::from_proof_size(13183)) + .saturating_add(RocksDbWeight::get().reads(5_u64)) + .saturating_add(RocksDbWeight::get().writes(1_u64)) + } + // Storage: DappsStaking RegisteredDapps (r:1 w:0) + // Proof: DappsStaking RegisteredDapps (max_values: None, max_size: Some(86), added: 2561, mode: MaxEncodedLen) + // Storage: DappsStaking ContractEraStake (r:1 w:1) + // Proof: DappsStaking ContractEraStake (max_values: None, max_size: Some(82), added: 2557, mode: MaxEncodedLen) + // Storage: DappsStaking GeneralEraInfo (r:1 w:0) + // Proof: DappsStaking GeneralEraInfo (max_values: None, max_size: Some(76), added: 2551, mode: MaxEncodedLen) + fn claim_dapp() -> Weight { + // Minimum execution time: 25_126 nanoseconds. + Weight::from_ref_time(25_489_000) + .saturating_add(Weight::from_proof_size(7669)) + .saturating_add(RocksDbWeight::get().reads(3_u64)) + .saturating_add(RocksDbWeight::get().writes(1_u64)) + } + fn force_new_era() -> Weight { + // Minimum execution time: 3_446 nanoseconds. + Weight::from_ref_time(3_676_000) + .saturating_add(Weight::from_proof_size(0)) + } + fn maintenance_mode() -> Weight { + // Minimum execution time: 7_871 nanoseconds. + Weight::from_ref_time(8_137_000) + .saturating_add(Weight::from_proof_size(0)) + } + // Storage: DappsStaking Ledger (r:1 w:1) + // Proof: DappsStaking Ledger (max_values: None, max_size: Some(266), added: 2741, mode: MaxEncodedLen) + fn set_reward_destination() -> Weight { + // Minimum execution time: 15_697 nanoseconds. + Weight::from_ref_time(16_009_000) + .saturating_add(Weight::from_proof_size(2741)) + .saturating_add(RocksDbWeight::get().reads(1_u64)) + .saturating_add(RocksDbWeight::get().writes(1_u64)) + } +} \ No newline at end of file diff --git a/pallets/pallet-xcm/Cargo.toml b/pallets/pallet-xcm/Cargo.toml new file mode 100644 index 0000000000..ec72ad31e5 --- /dev/null +++ b/pallets/pallet-xcm/Cargo.toml @@ -0,0 +1,58 @@ +[package] +authors = ["Parity Technologies ", "Stake Technologies "] +name = "pallet-xcm" +version = "0.9.39" +edition.workspace = true +homepage.workspace = true +repository.workspace = true + +[dependencies] +bounded-collections = { workspace = true } +log = { workspace = true } +parity-scale-codec = { workspace = true } +scale-info = { workspace = true } +serde = { workspace = true, optional = true } + +frame-support = { workspace = true } +frame-system = { workspace = true } +sp-core = { workspace = true } +sp-io = { workspace = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } + +xcm = { workspace = true } +xcm-executor = { workspace = true } + +frame-benchmarking = { workspace = true, optional = true } + +[dev-dependencies] +pallet-balances = { workspace = true } +polkadot-parachain = { workspace = true } +polkadot-runtime-parachains = { workspace = true } + +xcm-builder = { workspace = true, features = ["std"] } + +[features] +default = ["std"] +std = [ + "parity-scale-codec/std", + "bounded-collections/std", + "scale-info/std", + "serde", + "sp-std/std", + "sp-core/std", + "sp-io/std", + "sp-runtime/std", + "frame-benchmarking?/std", + "frame-support/std", + "frame-system/std", + "xcm/std", + "xcm-executor/std", +] +runtime-benchmarks = [ + "frame-benchmarking/runtime-benchmarks", + "frame-support/runtime-benchmarks", + "frame-system/runtime-benchmarks", + "xcm-builder/runtime-benchmarks", +] +try-runtime = ["frame-support/try-runtime"] diff --git a/pallets/pallet-xcm/src/benchmarking.rs b/pallets/pallet-xcm/src/benchmarking.rs new file mode 100644 index 0000000000..2679ebb511 --- /dev/null +++ b/pallets/pallet-xcm/src/benchmarking.rs @@ -0,0 +1,218 @@ +// This file is part of Astar. + +// Copyright (C) 2019-2023 Stake Technologies Pte.Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later + +// Astar is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Astar is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Astar. If not, see . + +use super::*; +use bounded_collections::{ConstU32, WeakBoundedVec}; +use frame_benchmarking::{benchmarks, BenchmarkError, BenchmarkResult}; +use frame_support::weights::Weight; +use frame_system::RawOrigin; +use sp_std::prelude::*; +use xcm::{latest::prelude::*, v2}; + +type RuntimeOrigin = ::RuntimeOrigin; + +benchmarks! { + send { + let send_origin = + T::SendXcmOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; + if T::SendXcmOrigin::try_origin(send_origin.clone()).is_err() { + return Err(BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX))) + } + let msg = Xcm(vec![ClearOrigin]); + let versioned_dest: VersionedMultiLocation = T::ReachableDest::get().ok_or( + BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)), + )? + .into(); + let versioned_msg = VersionedXcm::from(msg); + }: _>(send_origin, Box::new(versioned_dest), Box::new(versioned_msg)) + + teleport_assets { + let asset: MultiAsset = (Here, 10).into(); + let send_origin = + T::ExecuteXcmOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; + let origin_location = T::ExecuteXcmOrigin::try_origin(send_origin.clone()) + .map_err(|_| BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)))?; + if !T::XcmTeleportFilter::contains(&(origin_location, vec![asset.clone()])) { + return Err(BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX))) + } + + let recipient = [0u8; 32]; + let versioned_dest: VersionedMultiLocation = T::ReachableDest::get().ok_or( + BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)), + )? + .into(); + let versioned_beneficiary: VersionedMultiLocation = + AccountId32 { network: None, id: recipient.into() }.into(); + let versioned_assets: VersionedMultiAssets = asset.into(); + }: _>(send_origin, Box::new(versioned_dest), Box::new(versioned_beneficiary), Box::new(versioned_assets), 0) + + reserve_transfer_assets { + let asset: MultiAsset = (Here, 10).into(); + let send_origin = + T::ExecuteXcmOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; + let origin_location = T::ExecuteXcmOrigin::try_origin(send_origin.clone()) + .map_err(|_| BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)))?; + if !T::XcmReserveTransferFilter::contains(&(origin_location, vec![asset.clone()])) { + return Err(BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX))) + } + + let recipient = [0u8; 32]; + let versioned_dest: VersionedMultiLocation = T::ReachableDest::get().ok_or( + BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)), + )? + .into(); + let versioned_beneficiary: VersionedMultiLocation = + AccountId32 { network: None, id: recipient.into() }.into(); + let versioned_assets: VersionedMultiAssets = asset.into(); + }: _>(send_origin, Box::new(versioned_dest), Box::new(versioned_beneficiary), Box::new(versioned_assets), 0) + + execute { + let execute_origin = + T::ExecuteXcmOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; + let origin_location = T::ExecuteXcmOrigin::try_origin(execute_origin.clone()) + .map_err(|_| BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)))?; + let msg = Xcm(vec![ClearOrigin]); + if !T::XcmExecuteFilter::contains(&(origin_location, msg.clone())) { + return Err(BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX))) + } + let versioned_msg = VersionedXcm::from(msg); + }: _>(execute_origin, Box::new(versioned_msg), Weight::zero()) + + force_xcm_version { + let loc = T::ReachableDest::get().ok_or( + BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)), + )?; + let xcm_version = 2; + }: _(RawOrigin::Root, Box::new(loc), xcm_version) + + force_default_xcm_version {}: _(RawOrigin::Root, Some(2)) + + force_subscribe_version_notify { + let versioned_loc: VersionedMultiLocation = T::ReachableDest::get().ok_or( + BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)), + )? + .into(); + }: _(RawOrigin::Root, Box::new(versioned_loc)) + + force_unsubscribe_version_notify { + let loc = T::ReachableDest::get().ok_or( + BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)), + )?; + let versioned_loc: VersionedMultiLocation = loc.into(); + let _ = Pallet::::request_version_notify(loc); + }: _(RawOrigin::Root, Box::new(versioned_loc)) + + migrate_supported_version { + let old_version = XCM_VERSION - 1; + let loc = VersionedMultiLocation::from(MultiLocation::from(Parent)); + SupportedVersion::::insert(old_version, loc, old_version); + }: { + Pallet::::check_xcm_version_change(VersionMigrationStage::MigrateSupportedVersion, Weight::zero()); + } + + migrate_version_notifiers { + let old_version = XCM_VERSION - 1; + let loc = VersionedMultiLocation::from(MultiLocation::from(Parent)); + VersionNotifiers::::insert(old_version, loc, 0); + }: { + Pallet::::check_xcm_version_change(VersionMigrationStage::MigrateVersionNotifiers, Weight::zero()); + } + + already_notified_target { + let loc = T::ReachableDest::get().ok_or( + BenchmarkError::Override(BenchmarkResult::from_weight(T::DbWeight::get().reads(1))), + )?; + let loc = VersionedMultiLocation::from(loc); + let current_version = T::AdvertisedXcmVersion::get(); + VersionNotifyTargets::::insert(current_version, loc, (0, Weight::zero(), current_version)); + }: { + Pallet::::check_xcm_version_change(VersionMigrationStage::NotifyCurrentTargets(None), Weight::zero()); + } + + notify_current_targets { + let loc = T::ReachableDest::get().ok_or( + BenchmarkError::Override(BenchmarkResult::from_weight(T::DbWeight::get().reads_writes(1, 3))), + )?; + let loc = VersionedMultiLocation::from(loc); + let current_version = T::AdvertisedXcmVersion::get(); + let old_version = current_version - 1; + VersionNotifyTargets::::insert(current_version, loc, (0, Weight::zero(), old_version)); + }: { + Pallet::::check_xcm_version_change(VersionMigrationStage::NotifyCurrentTargets(None), Weight::zero()); + } + + notify_target_migration_fail { + let bad_loc: v2::MultiLocation = v2::Junction::Plurality { + id: v2::BodyId::Named(WeakBoundedVec::>::try_from(vec![0; 32]) + .expect("vec has a length of 32 bits; qed")), + part: v2::BodyPart::Voice, + } + .into(); + let bad_loc = VersionedMultiLocation::from(bad_loc); + let current_version = T::AdvertisedXcmVersion::get(); + VersionNotifyTargets::::insert(current_version, bad_loc, (0, Weight::zero(), current_version)); + }: { + Pallet::::check_xcm_version_change(VersionMigrationStage::MigrateAndNotifyOldTargets, Weight::zero()); + } + + migrate_version_notify_targets { + let current_version = T::AdvertisedXcmVersion::get(); + let old_version = current_version - 1; + let loc = VersionedMultiLocation::from(MultiLocation::from(Parent)); + VersionNotifyTargets::::insert(old_version, loc, (0, Weight::zero(), current_version)); + }: { + Pallet::::check_xcm_version_change(VersionMigrationStage::MigrateAndNotifyOldTargets, Weight::zero()); + } + + migrate_and_notify_old_targets { + let loc = T::ReachableDest::get().ok_or( + BenchmarkError::Override(BenchmarkResult::from_weight(T::DbWeight::get().reads_writes(1, 3))), + )?; + let loc = VersionedMultiLocation::from(loc); + let old_version = T::AdvertisedXcmVersion::get() - 1; + VersionNotifyTargets::::insert(old_version, loc, (0, Weight::zero(), old_version)); + }: { + Pallet::::check_xcm_version_change(VersionMigrationStage::MigrateAndNotifyOldTargets, Weight::zero()); + } + + reserve_withdraw_assets { + let asset: MultiAsset = (Here, 10).into(); + let send_origin = + T::ExecuteXcmOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; + let origin_location = T::ExecuteXcmOrigin::try_origin(send_origin.clone()) + .map_err(|_| BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)))?; + if !T::XcmReserveTransferFilter::contains(&(origin_location, vec![asset.clone()])) { + return Err(BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX))) + } + + let recipient = [0u8; 32]; + let versioned_dest: VersionedMultiLocation = T::ReachableDest::get().ok_or( + BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)), + )? + .into(); + let versioned_beneficiary: VersionedMultiLocation = + AccountId32 { network: None, id: recipient.into() }.into(); + let versioned_assets: VersionedMultiAssets = asset.into(); + }: _>(send_origin, Box::new(versioned_dest), Box::new(versioned_beneficiary), Box::new(versioned_assets), 0) + + impl_benchmark_test_suite!( + Pallet, + crate::mock::new_test_ext_with_balances(Vec::new()), + crate::mock::Test + ); +} diff --git a/pallets/pallet-xcm/src/lib.rs b/pallets/pallet-xcm/src/lib.rs new file mode 100644 index 0000000000..24970eded8 --- /dev/null +++ b/pallets/pallet-xcm/src/lib.rs @@ -0,0 +1,2487 @@ +// This file is part of Astar. + +// Copyright (C) 2019-2023 Stake Technologies Pte.Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later + +// Astar is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Astar is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Astar. If not, see . + +//! Pallet to handle XCM messages. + +#![cfg_attr(not(feature = "std"), no_std)] + +#[cfg(feature = "runtime-benchmarks")] +mod benchmarking; +#[cfg(test)] +mod mock; +#[cfg(test)] +mod tests; + +pub mod migration; +pub mod weights; + +use frame_support::traits::{ + Contains, ContainsPair, Currency, Defensive, EnsureOrigin, Get, LockableCurrency, OriginTrait, +}; +use parity_scale_codec::{Decode, Encode, EncodeLike, MaxEncodedLen}; +use scale_info::TypeInfo; +use sp_runtime::{ + traits::{ + AccountIdConversion, BadOrigin, BlakeTwo256, BlockNumberProvider, Hash, Saturating, Zero, + }, + RuntimeDebug, +}; +use sp_std::{boxed::Box, marker::PhantomData, prelude::*, result::Result, vec}; +use xcm::{latest::QueryResponseInfo, prelude::*}; +use xcm_executor::traits::{Convert, ConvertOrigin}; + +use frame_support::{ + dispatch::{Dispatchable, GetDispatchInfo}, + pallet_prelude::*, + traits::WithdrawReasons, + PalletId, +}; +use frame_system::pallet_prelude::*; +pub use pallet::*; +use xcm_executor::{ + traits::{ + ClaimAssets, DropAssets, MatchesFungible, OnResponse, VersionChangeNotifier, WeightBounds, + }, + Assets, +}; + +pub trait WeightInfo { + fn send() -> Weight; + fn teleport_assets() -> Weight; + fn reserve_transfer_assets() -> Weight; + fn execute() -> Weight; + fn force_xcm_version() -> Weight; + fn force_default_xcm_version() -> Weight; + fn force_subscribe_version_notify() -> Weight; + fn force_unsubscribe_version_notify() -> Weight; + fn migrate_supported_version() -> Weight; + fn migrate_version_notifiers() -> Weight; + fn already_notified_target() -> Weight; + fn notify_current_targets() -> Weight; + fn notify_target_migration_fail() -> Weight; + fn migrate_version_notify_targets() -> Weight; + fn migrate_and_notify_old_targets() -> Weight; + fn reserve_withdraw_assets() -> Weight; +} + +/// fallback implementation +pub struct TestWeightInfo; +impl WeightInfo for TestWeightInfo { + fn send() -> Weight { + Weight::from_ref_time(100_000_000) + } + + fn teleport_assets() -> Weight { + Weight::from_ref_time(100_000_000) + } + + fn reserve_transfer_assets() -> Weight { + Weight::from_ref_time(100_000_000) + } + + fn execute() -> Weight { + Weight::from_ref_time(100_000_000) + } + + fn force_xcm_version() -> Weight { + Weight::from_ref_time(100_000_000) + } + + fn force_default_xcm_version() -> Weight { + Weight::from_ref_time(100_000_000) + } + + fn force_subscribe_version_notify() -> Weight { + Weight::from_ref_time(100_000_000) + } + + fn force_unsubscribe_version_notify() -> Weight { + Weight::from_ref_time(100_000_000) + } + + fn migrate_supported_version() -> Weight { + Weight::from_ref_time(100_000_000) + } + + fn migrate_version_notifiers() -> Weight { + Weight::from_ref_time(100_000_000) + } + + fn already_notified_target() -> Weight { + Weight::from_ref_time(100_000_000) + } + + fn notify_current_targets() -> Weight { + Weight::from_ref_time(100_000_000) + } + + fn notify_target_migration_fail() -> Weight { + Weight::from_ref_time(100_000_000) + } + + fn migrate_version_notify_targets() -> Weight { + Weight::from_ref_time(100_000_000) + } + + fn migrate_and_notify_old_targets() -> Weight { + Weight::from_ref_time(100_000_000) + } + + fn reserve_withdraw_assets() -> Weight { + Weight::from_ref_time(100_000_000) + } +} + +#[frame_support::pallet] +pub mod pallet { + use super::*; + use frame_support::{ + dispatch::{Dispatchable, GetDispatchInfo, PostDispatchInfo}, + parameter_types, + }; + use frame_system::Config as SysConfig; + use sp_core::H256; + use xcm_executor::traits::{MatchesFungible, WeightBounds}; + + parameter_types! { + /// An implementation of `Get` which just returns the latest XCM version which we can + /// support. + pub const CurrentXcmVersion: u32 = XCM_VERSION; + } + + #[pallet::pallet] + #[pallet::generate_store(pub(super) trait Store)] + #[pallet::storage_version(migration::STORAGE_VERSION)] + #[pallet::without_storage_info] + pub struct Pallet(_); + + pub type BalanceOf = + <::Currency as Currency<::AccountId>>::Balance; + + #[pallet::config] + /// The module configuration trait. + pub trait Config: frame_system::Config { + /// The overarching event type. + type RuntimeEvent: From> + IsType<::RuntimeEvent>; + + /// A lockable currency. + // TODO: We should really use a trait which can handle multiple currencies. + type Currency: LockableCurrency; + + /// The `MultiAsset` matcher for `Currency`. + type CurrencyMatcher: MatchesFungible>; + + /// Required origin for sending XCM messages. If successful, it resolves to `MultiLocation` + /// which exists as an interior location within this chain's XCM context. + type SendXcmOrigin: EnsureOrigin< + ::RuntimeOrigin, + Success = MultiLocation, + >; + + /// The type used to actually dispatch an XCM to its destination. + type XcmRouter: SendXcm; + + /// Required origin for executing XCM messages, including the teleport functionality. If successful, + /// then it resolves to `MultiLocation` which exists as an interior location within this chain's XCM + /// context. + type ExecuteXcmOrigin: EnsureOrigin< + ::RuntimeOrigin, + Success = MultiLocation, + >; + + /// Our XCM filter which messages to be executed using `XcmExecutor` must pass. + type XcmExecuteFilter: Contains<(MultiLocation, Xcm<::RuntimeCall>)>; + + /// Something to execute an XCM message. + type XcmExecutor: ExecuteXcm<::RuntimeCall>; + + /// Our XCM filter which messages to be teleported using the dedicated extrinsic must pass. + type XcmTeleportFilter: Contains<(MultiLocation, Vec)>; + + /// Our XCM filter which messages to be reserve-transferred using the dedicated extrinsic must pass. + type XcmReserveTransferFilter: Contains<(MultiLocation, Vec)>; + + /// Means of measuring the weight consumed by an XCM message locally. + type Weigher: WeightBounds<::RuntimeCall>; + + /// This chain's Universal Location. + type UniversalLocation: Get; + + /// The runtime `Origin` type. + type RuntimeOrigin: From + From<::RuntimeOrigin>; + + /// The runtime `Call` type. + type RuntimeCall: Parameter + + GetDispatchInfo + + IsType<::RuntimeCall> + + Dispatchable< + RuntimeOrigin = ::RuntimeOrigin, + PostInfo = PostDispatchInfo, + >; + + const VERSION_DISCOVERY_QUEUE_SIZE: u32; + + /// The latest supported version that we advertise. Generally just set it to + /// `pallet_xcm::CurrentXcmVersion`. + type AdvertisedXcmVersion: Get; + + /// The assets which we consider a given origin is trusted if they claim to have placed a + /// lock. + type TrustedLockers: ContainsPair; + + /// How to get an `AccountId` value from a `MultiLocation`, useful for handling asset locks. + type SovereignAccountOf: Convert; + + /// The maximum number of local XCM locks that a single account may have. + type MaxLockers: Get; + + /// Weight information for extrinsics in this pallet. + type WeightInfo: WeightInfo; + + /// A `MultiLocation` that can be reached via `XcmRouter`. Used only in benchmarks. + /// + /// If `None`, the benchmarks that depend on a reachable destination will be skipped. + #[cfg(feature = "runtime-benchmarks")] + type ReachableDest: Get>; + } + + #[pallet::event] + #[pallet::generate_deposit(pub(super) fn deposit_event)] + pub enum Event { + /// Execution of an XCM message was attempted. + /// + /// \[ outcome \] + Attempted(xcm::latest::Outcome), + /// A XCM message was sent. + /// + /// \[ origin, destination, message \] + Sent(MultiLocation, MultiLocation, Xcm<()>), + /// Query response received which does not match a registered query. This may be because a + /// matching query was never registered, it may be because it is a duplicate response, or + /// because the query timed out. + /// + /// \[ origin location, id \] + UnexpectedResponse(MultiLocation, QueryId), + /// Query response has been received and is ready for taking with `take_response`. There is + /// no registered notification call. + /// + /// \[ id, response \] + ResponseReady(QueryId, Response), + /// Query response has been received and query is removed. The registered notification has + /// been dispatched and executed successfully. + /// + /// \[ id, pallet index, call index \] + Notified(QueryId, u8, u8), + /// Query response has been received and query is removed. The registered notification could + /// not be dispatched because the dispatch weight is greater than the maximum weight + /// originally budgeted by this runtime for the query result. + /// + /// \[ id, pallet index, call index, actual weight, max budgeted weight \] + NotifyOverweight(QueryId, u8, u8, Weight, Weight), + /// Query response has been received and query is removed. There was a general error with + /// dispatching the notification call. + /// + /// \[ id, pallet index, call index \] + NotifyDispatchError(QueryId, u8, u8), + /// Query response has been received and query is removed. The dispatch was unable to be + /// decoded into a `Call`; this might be due to dispatch function having a signature which + /// is not `(origin, QueryId, Response)`. + /// + /// \[ id, pallet index, call index \] + NotifyDecodeFailed(QueryId, u8, u8), + /// Expected query response has been received but the origin location of the response does + /// not match that expected. The query remains registered for a later, valid, response to + /// be received and acted upon. + /// + /// \[ origin location, id, expected location \] + InvalidResponder(MultiLocation, QueryId, Option), + /// Expected query response has been received but the expected origin location placed in + /// storage by this runtime previously cannot be decoded. The query remains registered. + /// + /// This is unexpected (since a location placed in storage in a previously executing + /// runtime should be readable prior to query timeout) and dangerous since the possibly + /// valid response will be dropped. Manual governance intervention is probably going to be + /// needed. + /// + /// \[ origin location, id \] + InvalidResponderVersion(MultiLocation, QueryId), + /// Received query response has been read and removed. + /// + /// \[ id \] + ResponseTaken(QueryId), + /// Some assets have been placed in an asset trap. + /// + /// \[ hash, origin, assets \] + AssetsTrapped(H256, MultiLocation, VersionedMultiAssets), + /// An XCM version change notification message has been attempted to be sent. + /// + /// The cost of sending it (borne by the chain) is included. + /// + /// \[ destination, result, cost \] + VersionChangeNotified(MultiLocation, XcmVersion, MultiAssets), + /// The supported version of a location has been changed. This might be through an + /// automatic notification or a manual intervention. + /// + /// \[ location, XCM version \] + SupportedVersionChanged(MultiLocation, XcmVersion), + /// A given location which had a version change subscription was dropped owing to an error + /// sending the notification to it. + /// + /// \[ location, query ID, error \] + NotifyTargetSendFail(MultiLocation, QueryId, XcmError), + /// A given location which had a version change subscription was dropped owing to an error + /// migrating the location to our new XCM format. + /// + /// \[ location, query ID \] + NotifyTargetMigrationFail(VersionedMultiLocation, QueryId), + /// Expected query response has been received but the expected querier location placed in + /// storage by this runtime previously cannot be decoded. The query remains registered. + /// + /// This is unexpected (since a location placed in storage in a previously executing + /// runtime should be readable prior to query timeout) and dangerous since the possibly + /// valid response will be dropped. Manual governance intervention is probably going to be + /// needed. + /// + /// \[ origin location, id \] + InvalidQuerierVersion(MultiLocation, QueryId), + /// Expected query response has been received but the querier location of the response does + /// not match the expected. The query remains registered for a later, valid, response to + /// be received and acted upon. + /// + /// \[ origin location, id, expected querier, maybe actual querier \] + InvalidQuerier(MultiLocation, QueryId, MultiLocation, Option), + /// A remote has requested XCM version change notification from us and we have honored it. + /// A version information message is sent to them and its cost is included. + /// + /// \[ destination location, cost \] + VersionNotifyStarted(MultiLocation, MultiAssets), + /// We have requested that a remote chain sends us XCM version change notifications. + /// + /// \[ destination location, cost \] + VersionNotifyRequested(MultiLocation, MultiAssets), + /// We have requested that a remote chain stops sending us XCM version change notifications. + /// + /// \[ destination location, cost \] + VersionNotifyUnrequested(MultiLocation, MultiAssets), + /// Fees were paid from a location for an operation (often for using `SendXcm`). + /// + /// \[ paying location, fees \] + FeesPaid(MultiLocation, MultiAssets), + /// Some assets have been claimed from an asset trap + /// + /// \[ hash, origin, assets \] + AssetsClaimed(H256, MultiLocation, VersionedMultiAssets), + } + + #[pallet::origin] + #[derive(PartialEq, Eq, Clone, Encode, Decode, RuntimeDebug, TypeInfo, MaxEncodedLen)] + pub enum Origin { + /// It comes from somewhere in the XCM space wanting to transact. + Xcm(MultiLocation), + /// It comes as an expected response from an XCM location. + Response(MultiLocation), + } + impl From for Origin { + fn from(location: MultiLocation) -> Origin { + Origin::Xcm(location) + } + } + + #[pallet::error] + pub enum Error { + /// The desired destination was unreachable, generally because there is a no way of routing + /// to it. + Unreachable, + /// There was some other issue (i.e. not to do with routing) in sending the message. Perhaps + /// a lack of space for buffering the message. + SendFailure, + /// The message execution fails the filter. + Filtered, + /// The message's weight could not be determined. + UnweighableMessage, + /// The destination `MultiLocation` provided cannot be inverted. + DestinationNotInvertible, + /// The assets to be sent are empty. + Empty, + /// Could not re-anchor the assets to declare the fees for the destination chain. + CannotReanchor, + /// Too many assets have been attempted for transfer. + TooManyAssets, + /// Origin is invalid for sending. + InvalidOrigin, + /// The version of the `Versioned` value used is not able to be interpreted. + BadVersion, + /// The given location could not be used (e.g. because it cannot be expressed in the + /// desired version of XCM). + BadLocation, + /// The referenced subscription could not be found. + NoSubscription, + /// The location is invalid since it already has a subscription from us. + AlreadySubscribed, + /// Invalid asset for the operation. + InvalidAsset, + /// The owner does not own (all) of the asset that they wish to do the operation on. + LowBalance, + /// The asset owner has too many locks on the asset. + TooManyLocks, + /// The given account is not an identifiable sovereign account for any location. + AccountNotSovereign, + /// The operation required fees to be paid which the initiator could not meet. + FeesNotMet, + /// A remote lock with the corresponding data could not be found. + LockNotFound, + /// The unlock operation cannot succeed because there are still users of the lock. + InUse, + } + + impl From for Error { + fn from(e: SendError) -> Self { + match e { + SendError::Fees => Error::::FeesNotMet, + SendError::NotApplicable => Error::::Unreachable, + _ => Error::::SendFailure, + } + } + } + + /// The status of a query. + #[derive(Clone, Eq, PartialEq, Encode, Decode, RuntimeDebug, TypeInfo)] + pub enum QueryStatus { + /// The query was sent but no response has yet been received. + Pending { + /// The `QueryResponse` XCM must have this origin to be considered a reply for this + /// query. + responder: VersionedMultiLocation, + /// The `QueryResponse` XCM must have this value as the `querier` field to be + /// considered a reply for this query. If `None` then the querier is ignored. + maybe_match_querier: Option, + maybe_notify: Option<(u8, u8)>, + timeout: BlockNumber, + }, + /// The query is for an ongoing version notification subscription. + VersionNotifier { + origin: VersionedMultiLocation, + is_active: bool, + }, + /// A response has been received. + Ready { + response: VersionedResponse, + at: BlockNumber, + }, + } + + #[derive(Copy, Clone)] + pub(crate) struct LatestVersionedMultiLocation<'a>(pub(crate) &'a MultiLocation); + impl<'a> EncodeLike for LatestVersionedMultiLocation<'a> {} + impl<'a> Encode for LatestVersionedMultiLocation<'a> { + fn encode(&self) -> Vec { + let mut r = VersionedMultiLocation::from(MultiLocation::default()).encode(); + r.truncate(1); + self.0.using_encoded(|d| r.extend_from_slice(d)); + r + } + } + + #[derive(Clone, Encode, Decode, Eq, PartialEq, Ord, PartialOrd, TypeInfo)] + pub enum VersionMigrationStage { + MigrateSupportedVersion, + MigrateVersionNotifiers, + NotifyCurrentTargets(Option>), + MigrateAndNotifyOldTargets, + } + + impl Default for VersionMigrationStage { + fn default() -> Self { + Self::MigrateSupportedVersion + } + } + + /// The latest available query index. + #[pallet::storage] + pub(super) type QueryCounter = StorageValue<_, QueryId, ValueQuery>; + + /// The ongoing queries. + #[pallet::storage] + #[pallet::getter(fn query)] + pub(super) type Queries = + StorageMap<_, Blake2_128Concat, QueryId, QueryStatus, OptionQuery>; + + /// The existing asset traps. + /// + /// Key is the blake2 256 hash of (origin, versioned `MultiAssets`) pair. Value is the number of + /// times this pair has been trapped (usually just 1 if it exists at all). + #[pallet::storage] + #[pallet::getter(fn asset_trap)] + pub(super) type AssetTraps = StorageMap<_, Identity, H256, u32, ValueQuery>; + + /// Default version to encode XCM when latest version of destination is unknown. If `None`, + /// then the destinations whose XCM version is unknown are considered unreachable. + #[pallet::storage] + pub(super) type SafeXcmVersion = StorageValue<_, XcmVersion, OptionQuery>; + + /// The Latest versions that we know various locations support. + #[pallet::storage] + pub(super) type SupportedVersion = StorageDoubleMap< + _, + Twox64Concat, + XcmVersion, + Blake2_128Concat, + VersionedMultiLocation, + XcmVersion, + OptionQuery, + >; + + /// All locations that we have requested version notifications from. + #[pallet::storage] + pub(super) type VersionNotifiers = StorageDoubleMap< + _, + Twox64Concat, + XcmVersion, + Blake2_128Concat, + VersionedMultiLocation, + QueryId, + OptionQuery, + >; + + /// The target locations that are subscribed to our version changes, as well as the most recent + /// of our versions we informed them of. + #[pallet::storage] + pub(super) type VersionNotifyTargets = StorageDoubleMap< + _, + Twox64Concat, + XcmVersion, + Blake2_128Concat, + VersionedMultiLocation, + (QueryId, Weight, XcmVersion), + OptionQuery, + >; + + pub struct VersionDiscoveryQueueSize(PhantomData); + impl Get for VersionDiscoveryQueueSize { + fn get() -> u32 { + T::VERSION_DISCOVERY_QUEUE_SIZE + } + } + + /// Destinations whose latest XCM version we would like to know. Duplicates not allowed, and + /// the `u32` counter is the number of times that a send to the destination has been attempted, + /// which is used as a prioritization. + #[pallet::storage] + pub(super) type VersionDiscoveryQueue = StorageValue< + _, + BoundedVec<(VersionedMultiLocation, u32), VersionDiscoveryQueueSize>, + ValueQuery, + >; + + /// The current migration's stage, if any. + #[pallet::storage] + pub(super) type CurrentMigration = + StorageValue<_, VersionMigrationStage, OptionQuery>; + + #[derive(Clone, Encode, Decode, Eq, PartialEq, Ord, PartialOrd, TypeInfo, MaxEncodedLen)] + pub struct RemoteLockedFungibleRecord { + pub amount: u128, + pub owner: VersionedMultiLocation, + pub locker: VersionedMultiLocation, + pub users: u32, + } + + /// Fungible assets which we know are locked on a remote chain. + #[pallet::storage] + pub(super) type RemoteLockedFungibles = StorageNMap< + _, + ( + NMapKey, + NMapKey, + NMapKey, + ), + RemoteLockedFungibleRecord, + OptionQuery, + >; + + /// Fungible assets which we know are locked on this chain. + #[pallet::storage] + pub(super) type LockedFungibles = StorageMap< + _, + Blake2_128Concat, + T::AccountId, + BoundedVec<(BalanceOf, VersionedMultiLocation), T::MaxLockers>, + OptionQuery, + >; + + #[pallet::genesis_config] + pub struct GenesisConfig { + /// The default version to encode outgoing XCM messages with. + pub safe_xcm_version: Option, + } + + #[cfg(feature = "std")] + impl Default for GenesisConfig { + fn default() -> Self { + Self { + safe_xcm_version: Some(XCM_VERSION), + } + } + } + + #[pallet::genesis_build] + impl GenesisBuild for GenesisConfig { + fn build(&self) { + SafeXcmVersion::::set(self.safe_xcm_version); + } + } + + #[pallet::hooks] + impl Hooks> for Pallet { + fn on_initialize(_n: BlockNumberFor) -> Weight { + let mut weight_used = Weight::zero(); + if let Some(migration) = CurrentMigration::::get() { + // Consume 10% of block at most + let max_weight = T::BlockWeights::get().max_block / 10; + let (w, maybe_migration) = Self::check_xcm_version_change(migration, max_weight); + CurrentMigration::::set(maybe_migration); + weight_used.saturating_accrue(w); + } + + // Here we aim to get one successful version negotiation request sent per block, ordered + // by the destinations being most sent to. + let mut q = VersionDiscoveryQueue::::take().into_inner(); + // TODO: correct weights. + weight_used.saturating_accrue(T::DbWeight::get().reads_writes(1, 1)); + q.sort_by_key(|i| i.1); + while let Some((versioned_dest, _)) = q.pop() { + if let Ok(dest) = MultiLocation::try_from(versioned_dest) { + if Self::request_version_notify(dest).is_ok() { + // TODO: correct weights. + weight_used.saturating_accrue(T::DbWeight::get().reads_writes(1, 1)); + break; + } + } + } + // Should never fail since we only removed items. But better safe than panicking as it's + // way better to drop the queue than panic on initialize. + if let Ok(q) = BoundedVec::try_from(q) { + VersionDiscoveryQueue::::put(q); + } + weight_used + } + fn on_runtime_upgrade() -> Weight { + // Start a migration (this happens before on_initialize so it'll happen later in this + // block, which should be good enough)... + CurrentMigration::::put(VersionMigrationStage::default()); + T::DbWeight::get().writes(1) + } + } + + pub mod migrations { + use super::*; + use frame_support::traits::{PalletInfoAccess, StorageVersion}; + + #[derive(Clone, Eq, PartialEq, Encode, Decode, RuntimeDebug, TypeInfo)] + enum QueryStatusV0 { + Pending { + responder: VersionedMultiLocation, + maybe_notify: Option<(u8, u8)>, + timeout: BlockNumber, + }, + VersionNotifier { + origin: VersionedMultiLocation, + is_active: bool, + }, + Ready { + response: VersionedResponse, + at: BlockNumber, + }, + } + impl From> for QueryStatus { + fn from(old: QueryStatusV0) -> Self { + use QueryStatusV0::*; + match old { + Pending { + responder, + maybe_notify, + timeout, + } => QueryStatus::Pending { + responder, + maybe_notify, + timeout, + maybe_match_querier: Some(MultiLocation::here().into()), + }, + VersionNotifier { origin, is_active } => { + QueryStatus::VersionNotifier { origin, is_active } + } + Ready { response, at } => QueryStatus::Ready { response, at }, + } + } + } + + pub fn migrate_to_v1( + ) -> frame_support::weights::Weight { + let on_chain_storage_version =

::on_chain_storage_version(); + log::info!( + target: "runtime::xcm", + "Running migration storage v1 for xcm with storage version {:?}", + on_chain_storage_version, + ); + + if on_chain_storage_version < 1 { + let mut count = 0; + Queries::::translate::, _>(|_key, value| { + count += 1; + Some(value.into()) + }); + StorageVersion::new(1).put::

(); + log::info!( + target: "runtime::xcm", + "Running migration storage v1 for xcm with storage version {:?} was complete", + on_chain_storage_version, + ); + // calculate and return migration weights + T::DbWeight::get().reads_writes(count as u64 + 1, count as u64 + 1) + } else { + log::warn!( + target: "runtime::xcm", + "Attempted to apply migration to v1 but failed because storage version is {:?}", + on_chain_storage_version, + ); + T::DbWeight::get().reads(1) + } + } + } + + #[pallet::call] + impl Pallet { + #[pallet::call_index(0)] + #[pallet::weight({ + let maybe_msg: Result, ()> = (*message.clone()).try_into(); + match maybe_msg { + Ok(msg) => { + T::Weigher::weight(&mut msg.into()) + .map_or(Weight::MAX, |w| T::WeightInfo::send().saturating_add(w)) + } + _ => Weight::MAX, + } + })] + pub fn send( + origin: OriginFor, + dest: Box, + message: Box>, + ) -> DispatchResult { + let origin_location = T::SendXcmOrigin::ensure_origin(origin)?; + let interior: Junctions = origin_location + .try_into() + .map_err(|_| Error::::InvalidOrigin)?; + let dest = MultiLocation::try_from(*dest).map_err(|()| Error::::BadVersion)?; + let message: Xcm<()> = (*message).try_into().map_err(|()| Error::::BadVersion)?; + + Self::send_xcm(interior, dest, message.clone()).map_err(Error::::from)?; + Self::deposit_event(Event::Sent(origin_location, dest, message)); + Ok(()) + } + + /// Teleport some assets from the local chain to some destination chain. + /// + /// Fee payment on the destination side is made from the asset in the `assets` vector of + /// index `fee_asset_item`. The weight limit for fees is not provided and thus is unlimited, + /// with all fees taken as needed from the asset. + /// + /// - `origin`: Must be capable of withdrawing the `assets` and executing XCM. + /// - `dest`: Destination context for the assets. Will typically be `X2(Parent, Parachain(..))` to send + /// from parachain to parachain, or `X1(Parachain(..))` to send from relay to parachain. + /// - `beneficiary`: A beneficiary location for the assets in the context of `dest`. Will generally be + /// an `AccountId32` value. + /// - `assets`: The assets to be withdrawn. The first item should be the currency used to to pay the fee on the + /// `dest` side. May not be empty. + /// - `fee_asset_item`: The index into `assets` of the item which should be used to pay + /// fees. + #[pallet::call_index(1)] + #[pallet::weight({ + let maybe_assets: Result = (*assets.clone()).try_into(); + let maybe_dest: Result = (*dest.clone()).try_into(); + match (maybe_assets, maybe_dest) { + (Ok(assets), Ok(dest)) => { + use sp_std::vec; + let count = assets.len() as u32; + let mut message = Xcm(vec![ + WithdrawAsset(assets), + InitiateTeleport { + assets: Wild(AllCounted(count)), + dest, + xcm: Xcm(vec![]), + }, + ]); + T::Weigher::weight(&mut message).map_or(Weight::MAX, |w| T::WeightInfo::teleport_assets().saturating_add(w)) + } + _ => Weight::MAX, + } + })] + pub fn teleport_assets( + origin: OriginFor, + dest: Box, + beneficiary: Box, + assets: Box, + fee_asset_item: u32, + ) -> DispatchResult { + Self::do_teleport_assets(origin, dest, beneficiary, assets, fee_asset_item, None) + } + + /// Transfer some assets from the local chain to the sovereign account of a destination + /// chain and forward a notification XCM. + /// + /// Fee payment on the destination side is made from the asset in the `assets` vector of + /// index `fee_asset_item`. The weight limit for fees is not provided and thus is unlimited, + /// with all fees taken as needed from the asset. + /// + /// - `origin`: Must be capable of withdrawing the `assets` and executing XCM. + /// - `dest`: Destination context for the assets. Will typically be `X2(Parent, Parachain(..))` to send + /// from parachain to parachain, or `X1(Parachain(..))` to send from relay to parachain. + /// - `beneficiary`: A beneficiary location for the assets in the context of `dest`. Will generally be + /// an `AccountId32` value. + /// - `assets`: The assets to be withdrawn. This should include the assets used to pay the fee on the + /// `dest` side. + /// - `fee_asset_item`: The index into `assets` of the item which should be used to pay + /// fees. + #[pallet::call_index(2)] + #[pallet::weight({ + let maybe_assets: Result = (*assets.clone()).try_into(); + let maybe_dest: Result = (*dest.clone()).try_into(); + match (maybe_assets, maybe_dest) { + (Ok(assets), Ok(dest)) => { + use sp_std::vec; + let mut message = Xcm(vec![ + TransferReserveAsset { assets, dest, xcm: Xcm(vec![]) } + ]); + T::Weigher::weight(&mut message).map_or(Weight::MAX, |w| T::WeightInfo::reserve_transfer_assets().saturating_add(w)) + } + _ => Weight::MAX, + } + })] + pub fn reserve_transfer_assets( + origin: OriginFor, + dest: Box, + beneficiary: Box, + assets: Box, + fee_asset_item: u32, + ) -> DispatchResult { + Self::do_reserve_transfer_assets( + origin, + dest, + beneficiary, + assets, + fee_asset_item, + None, + ) + } + + /// Execute an XCM message from a local, signed, origin. + /// + /// An event is deposited indicating whether `msg` could be executed completely or only + /// partially. + /// + /// No more than `max_weight` will be used in its attempted execution. If this is less than the + /// maximum amount of weight that the message could take to be executed, then no execution + /// attempt will be made. + /// + /// NOTE: A successful return to this does *not* imply that the `msg` was executed successfully + /// to completion; only that *some* of it was executed. + #[pallet::call_index(3)] + #[pallet::weight(max_weight.saturating_add(T::WeightInfo::execute()))] + pub fn execute( + origin: OriginFor, + message: Box::RuntimeCall>>, + max_weight: Weight, + ) -> DispatchResultWithPostInfo { + let origin_location = T::ExecuteXcmOrigin::ensure_origin(origin)?; + let hash = message.using_encoded(sp_io::hashing::blake2_256); + let message = (*message).try_into().map_err(|()| Error::::BadVersion)?; + let value = (origin_location, message); + ensure!(T::XcmExecuteFilter::contains(&value), Error::::Filtered); + let (origin_location, message) = value; + let outcome = T::XcmExecutor::execute_xcm_in_credit( + origin_location, + message, + hash, + max_weight, + max_weight, + ); + let result = Ok(Some( + outcome + .weight_used() + .saturating_add(T::WeightInfo::execute()), + ) + .into()); + Self::deposit_event(Event::Attempted(outcome)); + result + } + + /// Extoll that a particular destination can be communicated with through a particular + /// version of XCM. + /// + /// - `origin`: Must be Root. + /// - `location`: The destination that is being described. + /// - `xcm_version`: The latest version of XCM that `location` supports. + #[pallet::call_index(4)] + #[pallet::weight(T::WeightInfo::force_xcm_version())] + pub fn force_xcm_version( + origin: OriginFor, + location: Box, + xcm_version: XcmVersion, + ) -> DispatchResult { + ensure_root(origin)?; + let location = *location; + SupportedVersion::::insert( + XCM_VERSION, + LatestVersionedMultiLocation(&location), + xcm_version, + ); + Self::deposit_event(Event::SupportedVersionChanged(location, xcm_version)); + Ok(()) + } + + /// Set a safe XCM version (the version that XCM should be encoded with if the most recent + /// version a destination can accept is unknown). + /// + /// - `origin`: Must be Root. + /// - `maybe_xcm_version`: The default XCM encoding version, or `None` to disable. + #[pallet::call_index(5)] + #[pallet::weight(T::WeightInfo::force_default_xcm_version())] + pub fn force_default_xcm_version( + origin: OriginFor, + maybe_xcm_version: Option, + ) -> DispatchResult { + ensure_root(origin)?; + SafeXcmVersion::::set(maybe_xcm_version); + Ok(()) + } + + /// Ask a location to notify us regarding their XCM version and any changes to it. + /// + /// - `origin`: Must be Root. + /// - `location`: The location to which we should subscribe for XCM version notifications. + #[pallet::call_index(6)] + #[pallet::weight(T::WeightInfo::force_subscribe_version_notify())] + pub fn force_subscribe_version_notify( + origin: OriginFor, + location: Box, + ) -> DispatchResult { + ensure_root(origin)?; + let location: MultiLocation = (*location) + .try_into() + .map_err(|()| Error::::BadLocation)?; + Self::request_version_notify(location).map_err(|e| { + match e { + XcmError::InvalidLocation => Error::::AlreadySubscribed, + _ => Error::::InvalidOrigin, + } + .into() + }) + } + + /// Require that a particular destination should no longer notify us regarding any XCM + /// version changes. + /// + /// - `origin`: Must be Root. + /// - `location`: The location to which we are currently subscribed for XCM version + /// notifications which we no longer desire. + #[pallet::call_index(7)] + #[pallet::weight(T::WeightInfo::force_unsubscribe_version_notify())] + pub fn force_unsubscribe_version_notify( + origin: OriginFor, + location: Box, + ) -> DispatchResult { + ensure_root(origin)?; + let location: MultiLocation = (*location) + .try_into() + .map_err(|()| Error::::BadLocation)?; + Self::unrequest_version_notify(location).map_err(|e| { + match e { + XcmError::InvalidLocation => Error::::NoSubscription, + _ => Error::::InvalidOrigin, + } + .into() + }) + } + + /// Transfer some assets from the local chain to the sovereign account of a destination + /// chain and forward a notification XCM. + /// + /// Fee payment on the destination side is made from the asset in the `assets` vector of + /// index `fee_asset_item`, up to enough to pay for `weight_limit` of weight. If more weight + /// is needed than `weight_limit`, then the operation will fail and the assets send may be + /// at risk. + /// + /// - `origin`: Must be capable of withdrawing the `assets` and executing XCM. + /// - `dest`: Destination context for the assets. Will typically be `X2(Parent, Parachain(..))` to send + /// from parachain to parachain, or `X1(Parachain(..))` to send from relay to parachain. + /// - `beneficiary`: A beneficiary location for the assets in the context of `dest`. Will generally be + /// an `AccountId32` value. + /// - `assets`: The assets to be withdrawn. This should include the assets used to pay the fee on the + /// `dest` side. + /// - `fee_asset_item`: The index into `assets` of the item which should be used to pay + /// fees. + /// - `weight_limit`: The remote-side weight limit, if any, for the XCM fee purchase. + #[pallet::call_index(8)] + #[pallet::weight({ + let maybe_assets: Result = (*assets.clone()).try_into(); + let maybe_dest: Result = (*dest.clone()).try_into(); + match (maybe_assets, maybe_dest) { + (Ok(assets), Ok(dest)) => { + use sp_std::vec; + let mut message = Xcm(vec![ + TransferReserveAsset { assets, dest, xcm: Xcm(vec![]) } + ]); + T::Weigher::weight(&mut message).map_or(Weight::MAX, |w| T::WeightInfo::reserve_transfer_assets().saturating_add(w)) + } + _ => Weight::MAX, + } + })] + pub fn limited_reserve_transfer_assets( + origin: OriginFor, + dest: Box, + beneficiary: Box, + assets: Box, + fee_asset_item: u32, + weight_limit: WeightLimit, + ) -> DispatchResult { + Self::do_reserve_transfer_assets( + origin, + dest, + beneficiary, + assets, + fee_asset_item, + Some(weight_limit), + ) + } + + /// Teleport some assets from the local chain to some destination chain. + /// + /// Fee payment on the destination side is made from the asset in the `assets` vector of + /// index `fee_asset_item`, up to enough to pay for `weight_limit` of weight. If more weight + /// is needed than `weight_limit`, then the operation will fail and the assets send may be + /// at risk. + /// + /// - `origin`: Must be capable of withdrawing the `assets` and executing XCM. + /// - `dest`: Destination context for the assets. Will typically be `X2(Parent, Parachain(..))` to send + /// from parachain to parachain, or `X1(Parachain(..))` to send from relay to parachain. + /// - `beneficiary`: A beneficiary location for the assets in the context of `dest`. Will generally be + /// an `AccountId32` value. + /// - `assets`: The assets to be withdrawn. The first item should be the currency used to to pay the fee on the + /// `dest` side. May not be empty. + /// - `fee_asset_item`: The index into `assets` of the item which should be used to pay + /// fees. + /// - `weight_limit`: The remote-side weight limit, if any, for the XCM fee purchase. + #[pallet::call_index(9)] + #[pallet::weight({ + let maybe_assets: Result = (*assets.clone()).try_into(); + let maybe_dest: Result = (*dest.clone()).try_into(); + match (maybe_assets, maybe_dest) { + (Ok(assets), Ok(dest)) => { + use sp_std::vec; + let mut message = Xcm(vec![ + WithdrawAsset(assets), + InitiateTeleport { assets: Wild(All), dest, xcm: Xcm(vec![]) }, + ]); + T::Weigher::weight(&mut message).map_or(Weight::MAX, |w| T::WeightInfo::teleport_assets().saturating_add(w)) + } + _ => Weight::MAX, + } + })] + pub fn limited_teleport_assets( + origin: OriginFor, + dest: Box, + beneficiary: Box, + assets: Box, + fee_asset_item: u32, + weight_limit: WeightLimit, + ) -> DispatchResult { + Self::do_teleport_assets( + origin, + dest, + beneficiary, + assets, + fee_asset_item, + Some(weight_limit), + ) + } + + /// Transfer some assets from sovereign account to reserve holder chain and + /// forward a notification XCM. + /// + /// Fee payment on the destination side is made from the asset in the `assets` vector of + /// index `fee_asset_item`. The weight limit for fees is not provided and thus is unlimited, + /// with all fees taken as needed from the asset. + /// + /// - `origin`: Must be capable of withdrawing the `assets` and executing XCM. + /// - `dest`: Destination context for the assets. Will typically be `X2(Parent, Parachain(..))` to send + /// from parachain to parachain, or `X1(Parachain(..))` to send from relay to parachain. + /// - `beneficiary`: A beneficiary location for the assets in the context of `dest`. Will generally be + /// an `AccountId32` value. + /// - `assets`: The assets to be withdrawn. This should include the assets used to pay the fee on the + /// `dest` side. + /// - `fee_asset_item`: The index into `assets` of the item which should be used to pay + /// fees. + #[pallet::call_index(200)] + #[pallet::weight({ + match ((*assets.clone()).try_into(), (*dest.clone()).try_into()) { + (Ok(assets), Ok(dest)) => { + use sp_std::vec; + let mut message = Xcm(vec![ + WithdrawAsset(assets), + InitiateReserveWithdraw { assets: Wild(All), reserve: dest, xcm: Xcm(vec![]) } + ]); + T::Weigher::weight(&mut message).map_or(Weight::MAX, |w| T::WeightInfo::reserve_withdraw_assets().saturating_add(w)) + }, + _ => Weight::MAX, + } + })] + pub fn reserve_withdraw_assets( + origin: OriginFor, + dest: Box, + beneficiary: Box, + assets: Box, + fee_asset_item: u32, + ) -> DispatchResult { + Self::do_reserve_withdraw_assets( + origin, + dest, + beneficiary, + assets, + fee_asset_item, + None, + ) + } + + /// Transfer some assets from sovereign account to reserve holder chain and + /// forward a notification XCM. + /// + /// Fee payment on the destination side is made from the asset in the `assets` vector of + /// index `fee_asset_item`. The weight limit for fees is not provided and thus is unlimited, + /// with all fees taken as needed from the asset. + /// + /// - `origin`: Must be capable of withdrawing the `assets` and executing XCM. + /// - `dest`: Destination context for the assets. Will typically be `X2(Parent, Parachain(..))` to send + /// from parachain to parachain, or `X1(Parachain(..))` to send from relay to parachain. + /// - `beneficiary`: A beneficiary location for the assets in the context of `dest`. Will generally be + /// an `AccountId32` value. + /// - `assets`: The assets to be withdrawn. This should include the assets used to pay the fee on the + /// `dest` side. + /// - `fee_asset_item`: The index into `assets` of the item which should be used to pay + /// fees. + /// - `weight_limit`: The remote-side weight limit, if any, for the XCM fee purchase. + #[pallet::call_index(201)] + #[pallet::weight({ + match ((*assets.clone()).try_into(), (*dest.clone()).try_into()) { + (Ok(assets), Ok(dest)) => { + use sp_std::vec; + let mut message = Xcm(vec![ + TransferReserveAsset { assets, dest, xcm: Xcm(vec![]) } + ]); + T::Weigher::weight(&mut message).map_or(Weight::MAX, |w| T::WeightInfo::reserve_withdraw_assets().saturating_add(w)) + }, + _ => Weight::MAX, + } + })] + pub fn limited_reserve_withdraw_assets( + origin: OriginFor, + dest: Box, + beneficiary: Box, + assets: Box, + fee_asset_item: u32, + weight_limit: WeightLimit, + ) -> DispatchResult { + Self::do_reserve_withdraw_assets( + origin, + dest, + beneficiary, + assets, + fee_asset_item, + Some(weight_limit), + ) + } + } +} + +/// The maximum number of distinct assets allowed to be transferred in a single helper extrinsic. +const MAX_ASSETS_FOR_TRANSFER: usize = 2; + +impl Pallet { + fn do_reserve_withdraw_assets( + origin: OriginFor, + dest: Box, + beneficiary: Box, + assets: Box, + fee_asset_item: u32, + maybe_weight_limit: Option, + ) -> DispatchResult { + let origin_location = T::ExecuteXcmOrigin::ensure_origin(origin)?; + let dest = (*dest).try_into().map_err(|()| Error::::BadVersion)?; + let beneficiary: MultiLocation = (*beneficiary) + .try_into() + .map_err(|()| Error::::BadVersion)?; + let assets: MultiAssets = (*assets).try_into().map_err(|()| Error::::BadVersion)?; + + ensure!( + assets.len() <= MAX_ASSETS_FOR_TRANSFER, + Error::::TooManyAssets + ); + let value = (origin_location, assets.into_inner()); + ensure!( + T::XcmReserveTransferFilter::contains(&value), + Error::::Filtered + ); + let (origin_location, assets) = value; + let context = T::UniversalLocation::get(); + let fees = assets + .get(fee_asset_item as usize) + .ok_or(Error::::Empty)? + .clone() + .reanchored(&dest, context) + .map_err(|_| Error::::CannotReanchor)?; + let max_assets = assets.len() as u32; + let assets: MultiAssets = assets.into(); + let weight_limit = match maybe_weight_limit { + Some(weight_limit) => weight_limit, + None => { + let beneficiary = beneficiary.clone(); + let fees = fees.clone(); + let mut remote_message = Xcm(vec![ + WithdrawAsset(assets.clone()), + ClearOrigin, + BuyExecution { + fees, + weight_limit: Limited(Weight::zero()), + }, + DepositAsset { + assets: Wild(AllCounted(max_assets)), + beneficiary, + }, + ]); + // use local weight for remote message and hope for the best. + let remote_weight = T::Weigher::weight(&mut remote_message) + .map_err(|()| Error::::UnweighableMessage)?; + Limited(remote_weight) + } + }; + let xcm = Xcm(vec![ + BuyExecution { fees, weight_limit }, + DepositAsset { + assets: Wild(AllCounted(max_assets)), + beneficiary, + }, + ]); + let mut message = Xcm(vec![ + WithdrawAsset(assets), + InitiateReserveWithdraw { + assets: Wild(All), + reserve: dest, + xcm, + }, + ]); + let weight = + T::Weigher::weight(&mut message).map_err(|()| Error::::UnweighableMessage)?; + let hash = message.using_encoded(sp_io::hashing::blake2_256); + let outcome = + T::XcmExecutor::execute_xcm_in_credit(origin_location, message, hash, weight, weight); + Self::deposit_event(Event::Attempted(outcome)); + Ok(()) + } + + fn do_reserve_transfer_assets( + origin: OriginFor, + dest: Box, + beneficiary: Box, + assets: Box, + fee_asset_item: u32, + maybe_weight_limit: Option, + ) -> DispatchResult { + let origin_location = T::ExecuteXcmOrigin::ensure_origin(origin)?; + let dest = (*dest).try_into().map_err(|()| Error::::BadVersion)?; + let beneficiary: MultiLocation = (*beneficiary) + .try_into() + .map_err(|()| Error::::BadVersion)?; + let assets: MultiAssets = (*assets).try_into().map_err(|()| Error::::BadVersion)?; + + ensure!( + assets.len() <= MAX_ASSETS_FOR_TRANSFER, + Error::::TooManyAssets + ); + let value = (origin_location, assets.into_inner()); + ensure!( + T::XcmReserveTransferFilter::contains(&value), + Error::::Filtered + ); + let (origin_location, assets) = value; + let context = T::UniversalLocation::get(); + let fees = assets + .get(fee_asset_item as usize) + .ok_or(Error::::Empty)? + .clone() + .reanchored(&dest, context) + .map_err(|_| Error::::CannotReanchor)?; + let max_assets = assets.len() as u32; + let assets: MultiAssets = assets.into(); + let weight_limit = match maybe_weight_limit { + Some(weight_limit) => weight_limit, + None => { + let fees = fees.clone(); + let mut remote_message = Xcm(vec![ + ReserveAssetDeposited(assets.clone()), + ClearOrigin, + BuyExecution { + fees, + weight_limit: Limited(Weight::zero()), + }, + DepositAsset { + assets: Wild(AllCounted(max_assets)), + beneficiary, + }, + ]); + // use local weight for remote message and hope for the best. + let remote_weight = T::Weigher::weight(&mut remote_message) + .map_err(|()| Error::::UnweighableMessage)?; + Limited(remote_weight) + } + }; + let xcm = Xcm(vec![ + BuyExecution { fees, weight_limit }, + DepositAsset { + assets: Wild(AllCounted(max_assets)), + beneficiary, + }, + ]); + let mut message = Xcm(vec![TransferReserveAsset { assets, dest, xcm }]); + let weight = + T::Weigher::weight(&mut message).map_err(|()| Error::::UnweighableMessage)?; + let hash = message.using_encoded(sp_io::hashing::blake2_256); + let outcome = + T::XcmExecutor::execute_xcm_in_credit(origin_location, message, hash, weight, weight); + Self::deposit_event(Event::Attempted(outcome)); + Ok(()) + } + + fn do_teleport_assets( + origin: OriginFor, + dest: Box, + beneficiary: Box, + assets: Box, + fee_asset_item: u32, + maybe_weight_limit: Option, + ) -> DispatchResult { + let origin_location = T::ExecuteXcmOrigin::ensure_origin(origin)?; + let dest = (*dest).try_into().map_err(|()| Error::::BadVersion)?; + let beneficiary: MultiLocation = (*beneficiary) + .try_into() + .map_err(|()| Error::::BadVersion)?; + let assets: MultiAssets = (*assets).try_into().map_err(|()| Error::::BadVersion)?; + + ensure!( + assets.len() <= MAX_ASSETS_FOR_TRANSFER, + Error::::TooManyAssets + ); + let value = (origin_location, assets.into_inner()); + ensure!(T::XcmTeleportFilter::contains(&value), Error::::Filtered); + let (origin_location, assets) = value; + let context = T::UniversalLocation::get(); + let fees = assets + .get(fee_asset_item as usize) + .ok_or(Error::::Empty)? + .clone() + .reanchored(&dest, context) + .map_err(|_| Error::::CannotReanchor)?; + let max_assets = assets.len() as u32; + let assets: MultiAssets = assets.into(); + let weight_limit = match maybe_weight_limit { + Some(weight_limit) => weight_limit, + None => { + let fees = fees.clone(); + let mut remote_message = Xcm(vec![ + ReceiveTeleportedAsset(assets.clone()), + ClearOrigin, + BuyExecution { + fees, + weight_limit: Limited(Weight::zero()), + }, + DepositAsset { + assets: Wild(AllCounted(max_assets)), + beneficiary, + }, + ]); + // use local weight for remote message and hope for the best. + let remote_weight = T::Weigher::weight(&mut remote_message) + .map_err(|()| Error::::UnweighableMessage)?; + Limited(remote_weight) + } + }; + let xcm = Xcm(vec![ + BuyExecution { fees, weight_limit }, + DepositAsset { + assets: Wild(AllCounted(max_assets)), + beneficiary, + }, + ]); + let mut message = Xcm(vec![ + WithdrawAsset(assets), + InitiateTeleport { + assets: Wild(All), + dest, + xcm, + }, + ]); + let weight = + T::Weigher::weight(&mut message).map_err(|()| Error::::UnweighableMessage)?; + let hash = message.using_encoded(sp_io::hashing::blake2_256); + let outcome = + T::XcmExecutor::execute_xcm_in_credit(origin_location, message, hash, weight, weight); + Self::deposit_event(Event::Attempted(outcome)); + Ok(()) + } + + /// Will always make progress, and will do its best not to use much more than `weight_cutoff` + /// in doing so. + pub(crate) fn check_xcm_version_change( + mut stage: VersionMigrationStage, + weight_cutoff: Weight, + ) -> (Weight, Option) { + let mut weight_used = Weight::zero(); + + let sv_migrate_weight = T::WeightInfo::migrate_supported_version(); + let vn_migrate_weight = T::WeightInfo::migrate_version_notifiers(); + let vnt_already_notified_weight = T::WeightInfo::already_notified_target(); + let vnt_notify_weight = T::WeightInfo::notify_current_targets(); + let vnt_migrate_weight = T::WeightInfo::migrate_version_notify_targets(); + let vnt_migrate_fail_weight = T::WeightInfo::notify_target_migration_fail(); + let vnt_notify_migrate_weight = T::WeightInfo::migrate_and_notify_old_targets(); + + use VersionMigrationStage::*; + + if stage == MigrateSupportedVersion { + // We assume that supported XCM version only ever increases, so just cycle through lower + // XCM versioned from the current. + for v in 0..XCM_VERSION { + for (old_key, value) in SupportedVersion::::drain_prefix(v) { + if let Ok(new_key) = old_key.into_latest() { + SupportedVersion::::insert(XCM_VERSION, new_key, value); + } + weight_used.saturating_accrue(sv_migrate_weight); + if weight_used.any_gte(weight_cutoff) { + return (weight_used, Some(stage)); + } + } + } + stage = MigrateVersionNotifiers; + } + if stage == MigrateVersionNotifiers { + for v in 0..XCM_VERSION { + for (old_key, value) in VersionNotifiers::::drain_prefix(v) { + if let Ok(new_key) = old_key.into_latest() { + VersionNotifiers::::insert(XCM_VERSION, new_key, value); + } + weight_used.saturating_accrue(vn_migrate_weight); + if weight_used.any_gte(weight_cutoff) { + return (weight_used, Some(stage)); + } + } + } + stage = NotifyCurrentTargets(None); + } + + let xcm_version = T::AdvertisedXcmVersion::get(); + + if let NotifyCurrentTargets(maybe_last_raw_key) = stage { + let mut iter = match maybe_last_raw_key { + Some(k) => VersionNotifyTargets::::iter_prefix_from(XCM_VERSION, k), + None => VersionNotifyTargets::::iter_prefix(XCM_VERSION), + }; + while let Some((key, value)) = iter.next() { + let (query_id, max_weight, target_xcm_version) = value; + let new_key: MultiLocation = match key.clone().try_into() { + Ok(k) if target_xcm_version != xcm_version => k, + _ => { + // We don't early return here since we need to be certain that we + // make some progress. + weight_used.saturating_accrue(vnt_already_notified_weight); + continue; + } + }; + let response = Response::Version(xcm_version); + let message = Xcm(vec![QueryResponse { + query_id, + response, + max_weight, + querier: None, + }]); + let event = match send_xcm::(new_key, message) { + Ok((_hash, cost)) => { + let value = (query_id, max_weight, xcm_version); + VersionNotifyTargets::::insert(XCM_VERSION, key, value); + Event::VersionChangeNotified(new_key, xcm_version, cost) + } + Err(e) => { + VersionNotifyTargets::::remove(XCM_VERSION, key); + Event::NotifyTargetSendFail(new_key, query_id, e.into()) + } + }; + Self::deposit_event(event); + weight_used.saturating_accrue(vnt_notify_weight); + if weight_used.any_gte(weight_cutoff) { + let last = Some(iter.last_raw_key().into()); + return (weight_used, Some(NotifyCurrentTargets(last))); + } + } + stage = MigrateAndNotifyOldTargets; + } + if stage == MigrateAndNotifyOldTargets { + for v in 0..XCM_VERSION { + for (old_key, value) in VersionNotifyTargets::::drain_prefix(v) { + let (query_id, max_weight, target_xcm_version) = value; + let new_key = match MultiLocation::try_from(old_key.clone()) { + Ok(k) => k, + Err(()) => { + Self::deposit_event(Event::NotifyTargetMigrationFail(old_key, value.0)); + weight_used.saturating_accrue(vnt_migrate_fail_weight); + if weight_used.any_gte(weight_cutoff) { + return (weight_used, Some(stage)); + } + continue; + } + }; + + let versioned_key = LatestVersionedMultiLocation(&new_key); + if target_xcm_version == xcm_version { + VersionNotifyTargets::::insert(XCM_VERSION, versioned_key, value); + weight_used.saturating_accrue(vnt_migrate_weight); + } else { + // Need to notify target. + let response = Response::Version(xcm_version); + let message = Xcm(vec![QueryResponse { + query_id, + response, + max_weight, + querier: None, + }]); + let event = match send_xcm::(new_key, message) { + Ok((_hash, cost)) => { + VersionNotifyTargets::::insert( + XCM_VERSION, + versioned_key, + (query_id, max_weight, xcm_version), + ); + Event::VersionChangeNotified(new_key, xcm_version, cost) + } + Err(e) => Event::NotifyTargetSendFail(new_key, query_id, e.into()), + }; + Self::deposit_event(event); + weight_used.saturating_accrue(vnt_notify_migrate_weight); + } + if weight_used.any_gte(weight_cutoff) { + return (weight_used, Some(stage)); + } + } + } + } + (weight_used, None) + } + + /// Request that `dest` informs us of its version. + pub fn request_version_notify(dest: impl Into) -> XcmResult { + let dest = dest.into(); + let versioned_dest = VersionedMultiLocation::from(dest); + let already = VersionNotifiers::::contains_key(XCM_VERSION, &versioned_dest); + ensure!(!already, XcmError::InvalidLocation); + let query_id = QueryCounter::::mutate(|q| { + let r = *q; + q.saturating_inc(); + r + }); + // TODO #3735: Correct weight. + let instruction = SubscribeVersion { + query_id, + max_response_weight: Weight::zero(), + }; + let (_hash, cost) = send_xcm::(dest, Xcm(vec![instruction]))?; + Self::deposit_event(Event::VersionNotifyRequested(dest, cost)); + VersionNotifiers::::insert(XCM_VERSION, &versioned_dest, query_id); + let query_status = QueryStatus::VersionNotifier { + origin: versioned_dest, + is_active: false, + }; + Queries::::insert(query_id, query_status); + Ok(()) + } + + /// Request that `dest` ceases informing us of its version. + pub fn unrequest_version_notify(dest: impl Into) -> XcmResult { + let dest = dest.into(); + let versioned_dest = LatestVersionedMultiLocation(&dest); + let query_id = VersionNotifiers::::take(XCM_VERSION, versioned_dest) + .ok_or(XcmError::InvalidLocation)?; + let (_hash, cost) = send_xcm::(dest, Xcm(vec![UnsubscribeVersion]))?; + Self::deposit_event(Event::VersionNotifyUnrequested(dest, cost)); + Queries::::remove(query_id); + Ok(()) + } + + /// Relay an XCM `message` from a given `interior` location in this context to a given `dest` + /// location. The `fee_payer` is charged for the delivery unless `None` in which case fees + /// are not charged (and instead borne by the chain). + pub fn send_xcm( + interior: impl Into, + dest: impl Into, + mut message: Xcm<()>, + ) -> Result { + let interior = interior.into(); + let dest = dest.into(); + let maybe_fee_payer = if interior != Junctions::Here { + message.0.insert(0, DescendOrigin(interior)); + Some(interior.into()) + } else { + None + }; + log::debug!(target: "xcm::send_xcm", "dest: {:?}, message: {:?}", &dest, &message); + let (ticket, price) = validate_send::(dest, message)?; + if let Some(fee_payer) = maybe_fee_payer { + Self::charge_fees(fee_payer, price).map_err(|_| SendError::Fees)?; + } + T::XcmRouter::deliver(ticket) + } + + pub fn check_account() -> T::AccountId { + const ID: PalletId = PalletId(*b"py/xcmch"); + AccountIdConversion::::into_account_truncating(&ID) + } + + /// Create a new expectation of a query response with the querier being here. + fn do_new_query( + responder: impl Into, + maybe_notify: Option<(u8, u8)>, + timeout: T::BlockNumber, + match_querier: impl Into, + ) -> u64 { + QueryCounter::::mutate(|q| { + let r = *q; + q.saturating_inc(); + Queries::::insert( + r, + QueryStatus::Pending { + responder: responder.into().into(), + maybe_match_querier: Some(match_querier.into().into()), + maybe_notify, + timeout, + }, + ); + r + }) + } + + /// Consume `message` and return another which is equivalent to it except that it reports + /// back the outcome. + /// + /// - `message`: The message whose outcome should be reported. + /// - `responder`: The origin from which a response should be expected. + /// - `timeout`: The block number after which it is permissible for `notify` not to be + /// called even if a response is received. + /// + /// `report_outcome` may return an error if the `responder` is not invertible. + /// + /// It is assumed that the querier of the response will be `Here`. + /// + /// To check the status of the query, use `fn query()` passing the resultant `QueryId` + /// value. + pub fn report_outcome( + message: &mut Xcm<()>, + responder: impl Into, + timeout: T::BlockNumber, + ) -> Result { + let responder = responder.into(); + let destination = T::UniversalLocation::get() + .invert_target(&responder) + .map_err(|()| XcmError::LocationNotInvertible)?; + let query_id = Self::new_query(responder, timeout, Here); + let response_info = QueryResponseInfo { + destination, + query_id, + max_weight: Weight::zero(), + }; + let report_error = Xcm(vec![ReportError(response_info)]); + message.0.insert(0, SetAppendix(report_error)); + Ok(query_id) + } + + /// Consume `message` and return another which is equivalent to it except that it reports + /// back the outcome and dispatches `notify` on this chain. + /// + /// - `message`: The message whose outcome should be reported. + /// - `responder`: The origin from which a response should be expected. + /// - `notify`: A dispatchable function which will be called once the outcome of `message` + /// is known. It may be a dispatchable in any pallet of the local chain, but other than + /// the usual origin, it must accept exactly two arguments: `query_id: QueryId` and + /// `outcome: Response`, and in that order. It should expect that the origin is + /// `Origin::Response` and will contain the responder's location. + /// - `timeout`: The block number after which it is permissible for `notify` not to be + /// called even if a response is received. + /// + /// `report_outcome_notify` may return an error if the `responder` is not invertible. + /// + /// It is assumed that the querier of the response will be `Here`. + /// + /// NOTE: `notify` gets called as part of handling an incoming message, so it should be + /// lightweight. Its weight is estimated during this function and stored ready for + /// weighing `ReportOutcome` on the way back. If it turns out to be heavier once it returns + /// then reporting the outcome will fail. Futhermore if the estimate is too high, then it + /// may be put in the overweight queue and need to be manually executed. + pub fn report_outcome_notify( + message: &mut Xcm<()>, + responder: impl Into, + notify: impl Into<::RuntimeCall>, + timeout: T::BlockNumber, + ) -> Result<(), XcmError> { + let responder = responder.into(); + let destination = T::UniversalLocation::get() + .invert_target(&responder) + .map_err(|()| XcmError::LocationNotInvertible)?; + let notify: ::RuntimeCall = notify.into(); + let max_weight = notify.get_dispatch_info().weight; + let query_id = Self::new_notify_query(responder, notify, timeout, Here); + let response_info = QueryResponseInfo { + destination, + query_id, + max_weight, + }; + let report_error = Xcm(vec![ReportError(response_info)]); + message.0.insert(0, SetAppendix(report_error)); + Ok(()) + } + + /// Attempt to create a new query ID and register it as a query that is yet to respond. + pub fn new_query( + responder: impl Into, + timeout: T::BlockNumber, + match_querier: impl Into, + ) -> u64 { + Self::do_new_query(responder, None, timeout, match_querier) + } + + /// Attempt to create a new query ID and register it as a query that is yet to respond, and + /// which will call a dispatchable when a response happens. + pub fn new_notify_query( + responder: impl Into, + notify: impl Into<::RuntimeCall>, + timeout: T::BlockNumber, + match_querier: impl Into, + ) -> u64 { + let notify = notify + .into() + .using_encoded(|mut bytes| Decode::decode(&mut bytes)) + .expect( + "decode input is output of Call encode; Call guaranteed to have two enums; qed", + ); + Self::do_new_query(responder, Some(notify), timeout, match_querier) + } + + /// Attempt to remove and return the response of query with ID `query_id`. + /// + /// Returns `None` if the response is not (yet) available. + pub fn take_response(query_id: QueryId) -> Option<(Response, T::BlockNumber)> { + if let Some(QueryStatus::Ready { response, at }) = Queries::::get(query_id) { + let response = response.try_into().ok()?; + Queries::::remove(query_id); + Self::deposit_event(Event::ResponseTaken(query_id)); + Some((response, at)) + } else { + None + } + } + + /// Note that a particular destination to whom we would like to send a message is unknown + /// and queue it for version discovery. + fn note_unknown_version(dest: &MultiLocation) { + log::trace!( + target: "xcm::pallet_xcm::note_unknown_version", + "XCM version is unknown for destination: {:?}", + dest, + ); + let versioned_dest = VersionedMultiLocation::from(*dest); + VersionDiscoveryQueue::::mutate(|q| { + if let Some(index) = q.iter().position(|i| &i.0 == &versioned_dest) { + // exists - just bump the count. + q[index].1.saturating_inc(); + } else { + let _ = q.try_push((versioned_dest, 1)); + } + }); + } + + /// Withdraw given `assets` from the given `location` and pay as XCM fees. + /// + /// Fails if: + /// - the `assets` are not known on this chain; + /// - the `assets` cannot be withdrawn with that location as the Origin. + fn charge_fees(location: MultiLocation, assets: MultiAssets) -> DispatchResult { + T::XcmExecutor::charge_fees(location, assets.clone()) + .map_err(|_| Error::::FeesNotMet)?; + Self::deposit_event(Event::FeesPaid(location, assets)); + Ok(()) + } +} + +pub struct LockTicket { + sovereign_account: T::AccountId, + amount: BalanceOf, + unlocker: MultiLocation, + item_index: Option, +} + +impl xcm_executor::traits::Enact for LockTicket { + fn enact(self) -> Result<(), xcm_executor::traits::LockError> { + use xcm_executor::traits::LockError::UnexpectedState; + let mut locks = LockedFungibles::::get(&self.sovereign_account).unwrap_or_default(); + match self.item_index { + Some(index) => { + ensure!(locks.len() > index, UnexpectedState); + ensure!( + locks[index].1.try_as::<_>() == Ok(&self.unlocker), + UnexpectedState + ); + locks[index].0 = locks[index].0.max(self.amount); + } + None => { + locks + .try_push((self.amount, self.unlocker.into())) + .map_err(|(_balance, _location)| UnexpectedState)?; + } + } + LockedFungibles::::insert(&self.sovereign_account, locks); + T::Currency::extend_lock( + *b"py/xcmlk", + &self.sovereign_account, + self.amount, + WithdrawReasons::all(), + ); + Ok(()) + } +} + +pub struct UnlockTicket { + sovereign_account: T::AccountId, + amount: BalanceOf, + unlocker: MultiLocation, +} + +impl xcm_executor::traits::Enact for UnlockTicket { + fn enact(self) -> Result<(), xcm_executor::traits::LockError> { + use xcm_executor::traits::LockError::UnexpectedState; + let mut locks = + LockedFungibles::::get(&self.sovereign_account).ok_or(UnexpectedState)?; + let mut maybe_remove_index = None; + let mut locked = BalanceOf::::zero(); + let mut found = false; + // We could just as well do with with an into_iter, filter_map and collect, however this way + // avoids making an allocation. + for (i, x) in locks.iter_mut().enumerate() { + if x.1.try_as::<_>().defensive() == Ok(&self.unlocker) { + x.0 = x.0.saturating_sub(self.amount); + if x.0.is_zero() { + maybe_remove_index = Some(i); + } + found = true; + } + locked = locked.max(x.0); + } + ensure!(found, UnexpectedState); + if let Some(remove_index) = maybe_remove_index { + locks.swap_remove(remove_index); + } + LockedFungibles::::insert(&self.sovereign_account, locks); + let reasons = WithdrawReasons::all(); + T::Currency::set_lock(*b"py/xcmlk", &self.sovereign_account, locked, reasons); + Ok(()) + } +} + +pub struct ReduceTicket { + key: (u32, T::AccountId, VersionedAssetId), + amount: u128, + locker: VersionedMultiLocation, + owner: VersionedMultiLocation, +} + +impl xcm_executor::traits::Enact for ReduceTicket { + fn enact(self) -> Result<(), xcm_executor::traits::LockError> { + use xcm_executor::traits::LockError::UnexpectedState; + let mut record = RemoteLockedFungibles::::get(&self.key).ok_or(UnexpectedState)?; + ensure!( + self.locker == record.locker && self.owner == record.owner, + UnexpectedState + ); + ensure!(record.users == 0, UnexpectedState); + record.amount = record + .amount + .checked_sub(self.amount) + .ok_or(UnexpectedState)?; + if record.amount == 0 { + RemoteLockedFungibles::::remove(&self.key); + } else { + RemoteLockedFungibles::::insert(&self.key, &record); + } + Ok(()) + } +} + +impl xcm_executor::traits::AssetLock for Pallet { + type LockTicket = LockTicket; + type UnlockTicket = UnlockTicket; + type ReduceTicket = ReduceTicket; + + fn prepare_lock( + unlocker: MultiLocation, + asset: MultiAsset, + owner: MultiLocation, + ) -> Result, xcm_executor::traits::LockError> { + use xcm_executor::traits::LockError::*; + let sovereign_account = T::SovereignAccountOf::convert_ref(&owner).map_err(|_| BadOwner)?; + let amount = T::CurrencyMatcher::matches_fungible(&asset).ok_or(UnknownAsset)?; + ensure!( + T::Currency::free_balance(&sovereign_account) >= amount, + AssetNotOwned + ); + let locks = LockedFungibles::::get(&sovereign_account).unwrap_or_default(); + let item_index = locks + .iter() + .position(|x| x.1.try_as::<_>() == Ok(&unlocker)); + ensure!( + item_index.is_some() || locks.len() < T::MaxLockers::get() as usize, + NoResources + ); + Ok(LockTicket { + sovereign_account, + amount, + unlocker, + item_index, + }) + } + + fn prepare_unlock( + unlocker: MultiLocation, + asset: MultiAsset, + owner: MultiLocation, + ) -> Result, xcm_executor::traits::LockError> { + use xcm_executor::traits::LockError::*; + let sovereign_account = T::SovereignAccountOf::convert_ref(&owner).map_err(|_| BadOwner)?; + let amount = T::CurrencyMatcher::matches_fungible(&asset).ok_or(UnknownAsset)?; + ensure!( + T::Currency::free_balance(&sovereign_account) >= amount, + AssetNotOwned + ); + let locks = LockedFungibles::::get(&sovereign_account).unwrap_or_default(); + let item_index = locks + .iter() + .position(|x| x.1.try_as::<_>() == Ok(&unlocker)) + .ok_or(NotLocked)?; + ensure!(locks[item_index].0 >= amount, NotLocked); + Ok(UnlockTicket { + sovereign_account, + amount, + unlocker, + }) + } + + fn note_unlockable( + locker: MultiLocation, + asset: MultiAsset, + mut owner: MultiLocation, + ) -> Result<(), xcm_executor::traits::LockError> { + use xcm_executor::traits::LockError::*; + ensure!(T::TrustedLockers::contains(&locker, &asset), NotTrusted); + let amount = match asset.fun { + Fungible(a) => a, + NonFungible(_) => return Err(Unimplemented), + }; + owner.remove_network_id(); + let account = T::SovereignAccountOf::convert_ref(&owner).map_err(|_| BadOwner)?; + let locker = locker.into(); + let owner = owner.into(); + let id: VersionedAssetId = asset.id.into(); + let key = (XCM_VERSION, account, id); + let mut record = RemoteLockedFungibleRecord { + amount, + owner, + locker, + users: 0, + }; + if let Some(old) = RemoteLockedFungibles::::get(&key) { + // Make sure that the new record wouldn't clobber any old data. + ensure!( + old.locker == record.locker && old.owner == record.owner, + WouldClobber + ); + record.users = old.users; + record.amount = record.amount.max(old.amount); + } + RemoteLockedFungibles::::insert(&key, record); + Ok(()) + } + + fn prepare_reduce_unlockable( + locker: MultiLocation, + asset: MultiAsset, + mut owner: MultiLocation, + ) -> Result { + use xcm_executor::traits::LockError::*; + let amount = match asset.fun { + Fungible(a) => a, + NonFungible(_) => return Err(Unimplemented), + }; + owner.remove_network_id(); + let sovereign_account = T::SovereignAccountOf::convert_ref(&owner).map_err(|_| BadOwner)?; + let locker = locker.into(); + let owner = owner.into(); + let id: VersionedAssetId = asset.id.into(); + let key = (XCM_VERSION, sovereign_account, id); + + let record = RemoteLockedFungibles::::get(&key).ok_or(NotLocked)?; + // Make sure that the record contains what we expect and there's enough to unlock. + ensure!( + locker == record.locker && owner == record.owner, + WouldClobber + ); + ensure!(record.users == 0, InUse); + ensure!(record.amount >= amount, NotEnoughLocked); + Ok(ReduceTicket { + key, + amount, + locker, + owner, + }) + } +} + +impl WrapVersion for Pallet { + fn wrap_version( + dest: &MultiLocation, + xcm: impl Into>, + ) -> Result, ()> { + SupportedVersion::::get(XCM_VERSION, LatestVersionedMultiLocation(dest)) + .or_else(|| { + Self::note_unknown_version(dest); + SafeXcmVersion::::get() + }) + .ok_or_else(|| { + log::trace!( + target: "xcm::pallet_xcm::wrap_version", + "Could not determine a version to wrap XCM for destination: {:?}", + dest, + ); + () + }) + .and_then(|v| xcm.into().into_version(v.min(XCM_VERSION))) + } +} + +impl VersionChangeNotifier for Pallet { + /// Start notifying `location` should the XCM version of this chain change. + /// + /// When it does, this type should ensure a `QueryResponse` message is sent with the given + /// `query_id` & `max_weight` and with a `response` of `Response::Version`. This should happen + /// until/unless `stop` is called with the correct `query_id`. + /// + /// If the `location` has an ongoing notification and when this function is called, then an + /// error should be returned. + fn start( + dest: &MultiLocation, + query_id: QueryId, + max_weight: Weight, + _context: &XcmContext, + ) -> XcmResult { + let versioned_dest = LatestVersionedMultiLocation(dest); + let already = VersionNotifyTargets::::contains_key(XCM_VERSION, versioned_dest); + ensure!(!already, XcmError::InvalidLocation); + + let xcm_version = T::AdvertisedXcmVersion::get(); + let response = Response::Version(xcm_version); + let instruction = QueryResponse { + query_id, + response, + max_weight, + querier: None, + }; + let (_hash, cost) = send_xcm::(*dest, Xcm(vec![instruction]))?; + Self::deposit_event(Event::::VersionNotifyStarted(*dest, cost)); + + let value = (query_id, max_weight, xcm_version); + VersionNotifyTargets::::insert(XCM_VERSION, versioned_dest, value); + Ok(()) + } + + /// Stop notifying `location` should the XCM change. This is a no-op if there was never a + /// subscription. + fn stop(dest: &MultiLocation, _context: &XcmContext) -> XcmResult { + VersionNotifyTargets::::remove(XCM_VERSION, LatestVersionedMultiLocation(dest)); + Ok(()) + } + + /// Return true if a location is subscribed to XCM version changes. + fn is_subscribed(dest: &MultiLocation) -> bool { + let versioned_dest = LatestVersionedMultiLocation(dest); + VersionNotifyTargets::::contains_key(XCM_VERSION, versioned_dest) + } +} + +impl DropAssets for Pallet { + fn drop_assets(origin: &MultiLocation, assets: Assets, _context: &XcmContext) -> Weight { + if assets.is_empty() { + return Weight::zero(); + } + let versioned = VersionedMultiAssets::from(MultiAssets::from(assets)); + let hash = BlakeTwo256::hash_of(&(&origin, &versioned)); + AssetTraps::::mutate(hash, |n| *n += 1); + Self::deposit_event(Event::AssetsTrapped(hash, *origin, versioned)); + // TODO #3735: Put the real weight in there. + Weight::zero() + } +} + +impl ClaimAssets for Pallet { + fn claim_assets( + origin: &MultiLocation, + ticket: &MultiLocation, + assets: &MultiAssets, + _context: &XcmContext, + ) -> bool { + let mut versioned = VersionedMultiAssets::from(assets.clone()); + match (ticket.parents, &ticket.interior) { + (0, X1(GeneralIndex(i))) => { + versioned = match versioned.into_version(*i as u32) { + Ok(v) => v, + Err(()) => return false, + } + } + (0, Here) => (), + _ => return false, + }; + let hash = BlakeTwo256::hash_of(&(origin, versioned.clone())); + match AssetTraps::::get(hash) { + 0 => return false, + 1 => AssetTraps::::remove(hash), + n => AssetTraps::::insert(hash, n - 1), + } + Self::deposit_event(Event::AssetsClaimed(hash, *origin, versioned)); + return true; + } +} + +impl OnResponse for Pallet { + fn expecting_response( + origin: &MultiLocation, + query_id: QueryId, + querier: Option<&MultiLocation>, + ) -> bool { + match Queries::::get(query_id) { + Some(QueryStatus::Pending { + responder, + maybe_match_querier, + .. + }) => { + MultiLocation::try_from(responder).map_or(false, |r| origin == &r) + && maybe_match_querier.map_or(true, |match_querier| { + MultiLocation::try_from(match_querier).map_or(false, |match_querier| { + querier.map_or(false, |q| q == &match_querier) + }) + }) + } + Some(QueryStatus::VersionNotifier { origin: r, .. }) => { + MultiLocation::try_from(r).map_or(false, |r| origin == &r) + } + _ => false, + } + } + + fn on_response( + origin: &MultiLocation, + query_id: QueryId, + querier: Option<&MultiLocation>, + response: Response, + max_weight: Weight, + _context: &XcmContext, + ) -> Weight { + match (response, Queries::::get(query_id)) { + ( + Response::Version(v), + Some(QueryStatus::VersionNotifier { + origin: expected_origin, + is_active, + }), + ) => { + let origin: MultiLocation = match expected_origin.try_into() { + Ok(o) if &o == origin => o, + Ok(o) => { + Self::deposit_event(Event::InvalidResponder(*origin, query_id, Some(o))); + return Weight::zero(); + } + _ => { + Self::deposit_event(Event::InvalidResponder(*origin, query_id, None)); + // TODO #3735: Correct weight for this. + return Weight::zero(); + } + }; + // TODO #3735: Check max_weight is correct. + if !is_active { + Queries::::insert( + query_id, + QueryStatus::VersionNotifier { + origin: origin.into(), + is_active: true, + }, + ); + } + // We're being notified of a version change. + SupportedVersion::::insert( + XCM_VERSION, + LatestVersionedMultiLocation(&origin), + v, + ); + Self::deposit_event(Event::SupportedVersionChanged(origin, v)); + Weight::zero() + } + ( + response, + Some(QueryStatus::Pending { + responder, + maybe_notify, + maybe_match_querier, + .. + }), + ) => { + if let Some(match_querier) = maybe_match_querier { + let match_querier = match MultiLocation::try_from(match_querier) { + Ok(mq) => mq, + Err(_) => { + Self::deposit_event(Event::InvalidQuerierVersion(*origin, query_id)); + return Weight::zero(); + } + }; + if querier.map_or(true, |q| q != &match_querier) { + Self::deposit_event(Event::InvalidQuerier( + *origin, + query_id, + match_querier, + querier.cloned(), + )); + return Weight::zero(); + } + } + let responder = match MultiLocation::try_from(responder) { + Ok(r) => r, + Err(_) => { + Self::deposit_event(Event::InvalidResponderVersion(*origin, query_id)); + return Weight::zero(); + } + }; + if origin != &responder { + Self::deposit_event(Event::InvalidResponder( + *origin, + query_id, + Some(responder), + )); + return Weight::zero(); + } + return match maybe_notify { + Some((pallet_index, call_index)) => { + // This is a bit horrible, but we happen to know that the `Call` will + // be built by `(pallet_index: u8, call_index: u8, QueryId, Response)`. + // So we just encode that and then re-encode to a real Call. + let bare = (pallet_index, call_index, query_id, response); + if let Ok(call) = bare.using_encoded(|mut bytes| { + ::RuntimeCall::decode(&mut bytes) + }) { + Queries::::remove(query_id); + let weight = call.get_dispatch_info().weight; + if weight.any_gt(max_weight) { + let e = Event::NotifyOverweight( + query_id, + pallet_index, + call_index, + weight, + max_weight, + ); + Self::deposit_event(e); + return Weight::zero(); + } + let dispatch_origin = Origin::Response(*origin).into(); + match call.dispatch(dispatch_origin) { + Ok(post_info) => { + let e = Event::Notified(query_id, pallet_index, call_index); + Self::deposit_event(e); + post_info.actual_weight + } + Err(error_and_info) => { + let e = Event::NotifyDispatchError( + query_id, + pallet_index, + call_index, + ); + Self::deposit_event(e); + // Not much to do with the result as it is. It's up to the parachain to ensure that the + // message makes sense. + error_and_info.post_info.actual_weight + } + } + .unwrap_or(weight) + } else { + let e = Event::NotifyDecodeFailed(query_id, pallet_index, call_index); + Self::deposit_event(e); + Weight::zero() + } + } + None => { + let e = Event::ResponseReady(query_id, response.clone()); + Self::deposit_event(e); + let at = frame_system::Pallet::::current_block_number(); + let response = response.into(); + Queries::::insert(query_id, QueryStatus::Ready { response, at }); + Weight::zero() + } + }; + } + _ => { + Self::deposit_event(Event::UnexpectedResponse(*origin, query_id)); + Weight::zero() + } + } + } +} + +/// Ensure that the origin `o` represents an XCM (`Transact`) origin. +/// +/// Returns `Ok` with the location of the XCM sender or an `Err` otherwise. +pub fn ensure_xcm(o: OuterOrigin) -> Result +where + OuterOrigin: Into>, +{ + match o.into() { + Ok(Origin::Xcm(location)) => Ok(location), + _ => Err(BadOrigin), + } +} + +/// Ensure that the origin `o` represents an XCM response origin. +/// +/// Returns `Ok` with the location of the responder or an `Err` otherwise. +pub fn ensure_response(o: OuterOrigin) -> Result +where + OuterOrigin: Into>, +{ + match o.into() { + Ok(Origin::Response(location)) => Ok(location), + _ => Err(BadOrigin), + } +} + +/// Filter for `MultiLocation` to find those which represent a strict majority approval of an identified +/// plurality. +/// +/// May reasonably be used with `EnsureXcm`. +pub struct IsMajorityOfBody(PhantomData<(Prefix, Body)>); +impl, Body: Get> Contains + for IsMajorityOfBody +{ + fn contains(l: &MultiLocation) -> bool { + let maybe_suffix = l.match_and_split(&Prefix::get()); + matches!(maybe_suffix, Some(Plurality { id, part }) if id == &Body::get() && part.is_majority()) + } +} + +/// Filter for `MultiLocation` to find those which represent a voice of an identified plurality. +/// +/// May reasonably be used with `EnsureXcm`. +pub struct IsVoiceOfBody(PhantomData<(Prefix, Body)>); +impl, Body: Get> Contains + for IsVoiceOfBody +{ + fn contains(l: &MultiLocation) -> bool { + let maybe_suffix = l.match_and_split(&Prefix::get()); + matches!(maybe_suffix, Some(Plurality { id, part }) if id == &Body::get() && part == &BodyPart::Voice) + } +} + +/// `EnsureOrigin` implementation succeeding with a `MultiLocation` value to recognize and filter the +/// `Origin::Xcm` item. +pub struct EnsureXcm(PhantomData); +impl, F: Contains> EnsureOrigin for EnsureXcm +where + O::PalletsOrigin: From + TryInto, +{ + type Success = MultiLocation; + + fn try_origin(outer: O) -> Result { + outer.try_with_caller(|caller| { + caller.try_into().and_then(|o| match o { + Origin::Xcm(location) if F::contains(&location) => Ok(location), + Origin::Xcm(location) => Err(Origin::Xcm(location).into()), + o => Err(o.into()), + }) + }) + } + + #[cfg(feature = "runtime-benchmarks")] + fn try_successful_origin() -> Result { + Ok(O::from(Origin::Xcm(Here.into()))) + } +} + +/// `EnsureOrigin` implementation succeeding with a `MultiLocation` value to recognize and filter +/// the `Origin::Response` item. +pub struct EnsureResponse(PhantomData); +impl, F: Contains> EnsureOrigin + for EnsureResponse +where + O::PalletsOrigin: From + TryInto, +{ + type Success = MultiLocation; + + fn try_origin(outer: O) -> Result { + outer.try_with_caller(|caller| { + caller.try_into().and_then(|o| match o { + Origin::Response(responder) => Ok(responder), + o => Err(o.into()), + }) + }) + } + + #[cfg(feature = "runtime-benchmarks")] + fn try_successful_origin() -> Result { + Ok(O::from(Origin::Response(Here.into()))) + } +} + +/// A simple passthrough where we reuse the `MultiLocation`-typed XCM origin as the inner value of +/// this crate's `Origin::Xcm` value. +pub struct XcmPassthrough(PhantomData); +impl> ConvertOrigin + for XcmPassthrough +{ + fn convert_origin( + origin: impl Into, + kind: OriginKind, + ) -> Result { + let origin = origin.into(); + match kind { + OriginKind::Xcm => Ok(crate::Origin::Xcm(origin).into()), + _ => Err(origin), + } + } +} diff --git a/pallets/pallet-xcm/src/migration.rs b/pallets/pallet-xcm/src/migration.rs new file mode 100644 index 0000000000..20d7739a8e --- /dev/null +++ b/pallets/pallet-xcm/src/migration.rs @@ -0,0 +1,68 @@ +// This file is part of Astar. + +// Copyright (C) 2019-2023 Stake Technologies Pte.Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later + +// Astar is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Astar is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Astar. If not, see . + +use crate::{Config, Pallet, Store}; +use frame_support::{ + pallet_prelude::*, + traits::{OnRuntimeUpgrade, StorageVersion}, + weights::Weight, +}; + +pub const STORAGE_VERSION: StorageVersion = StorageVersion::new(1); + +const DEFAULT_PROOF_SIZE: u64 = 64 * 1024; + +pub mod v1 { + use super::*; + + pub struct MigrateToV1(sp_std::marker::PhantomData); + impl OnRuntimeUpgrade for MigrateToV1 { + #[cfg(feature = "try-runtime")] + fn pre_upgrade() -> Result, &'static str> { + ensure!( + StorageVersion::get::>() == 0, + "must upgrade linearly" + ); + + Ok(sp_std::vec::Vec::new()) + } + + fn on_runtime_upgrade() -> Weight { + if StorageVersion::get::>() == 0 { + let mut weight = T::DbWeight::get().reads(1); + + let translate = |pre: (u64, u64, u32)| -> Option<(u64, Weight, u32)> { + weight = weight.saturating_add(T::DbWeight::get().reads_writes(1, 1)); + let translated = (pre.0, Weight::from_parts(pre.1, DEFAULT_PROOF_SIZE), pre.2); + log::info!("Migrated VersionNotifyTarget {:?} to {:?}", pre, translated); + Some(translated) + }; + + as Store>::VersionNotifyTargets::translate_values(translate); + + log::info!("v1 applied successfully"); + STORAGE_VERSION.put::>(); + + weight.saturating_add(T::DbWeight::get().writes(1)) + } else { + log::warn!("skipping v1, should be removed"); + T::DbWeight::get().reads(1) + } + } + } +} diff --git a/pallets/pallet-xcm/src/mock.rs b/pallets/pallet-xcm/src/mock.rs new file mode 100644 index 0000000000..be7091eb3f --- /dev/null +++ b/pallets/pallet-xcm/src/mock.rs @@ -0,0 +1,419 @@ +// This file is part of Astar. + +// Copyright (C) 2019-2023 Stake Technologies Pte.Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later + +// Astar is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Astar is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Astar. If not, see . + +use frame_support::{ + construct_runtime, parameter_types, + traits::{Everything, Nothing}, + weights::Weight, +}; +use parity_scale_codec::Encode; +use polkadot_parachain::primitives::Id as ParaId; +use polkadot_runtime_parachains::origin; +use sp_core::H256; +use sp_runtime::{testing::Header, traits::IdentityLookup, AccountId32}; +pub use sp_std::{cell::RefCell, fmt::Debug, marker::PhantomData}; +use xcm::prelude::*; +use xcm_builder::{ + AccountId32Aliases, AllowKnownQueryResponses, AllowSubscriptionsFrom, + AllowTopLevelPaidExecutionFrom, Case, ChildParachainAsNative, ChildParachainConvertsVia, + ChildSystemParachainAsSuperuser, CurrencyAdapter as XcmCurrencyAdapter, FixedRateOfFungible, + FixedWeightBounds, IsConcrete, SignedAccountId32AsNative, SignedToAccountId32, + SovereignSignedViaLocation, TakeWeightCredit, +}; +use xcm_executor::XcmExecutor; + +use crate::{self as pallet_xcm, TestWeightInfo}; + +pub type AccountId = AccountId32; +pub type Balance = u128; +type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; +type Block = frame_system::mocking::MockBlock; + +#[frame_support::pallet] +pub mod pallet_test_notifier { + use crate::{ensure_response, QueryId}; + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; + use sp_runtime::DispatchResult; + use xcm::latest::prelude::*; + + #[pallet::pallet] + #[pallet::generate_store(pub(super) trait Store)] + pub struct Pallet(_); + + #[pallet::config] + pub trait Config: frame_system::Config + crate::Config { + type RuntimeEvent: IsType<::RuntimeEvent> + From>; + type RuntimeOrigin: IsType<::RuntimeOrigin> + + Into::RuntimeOrigin>>; + type RuntimeCall: IsType<::RuntimeCall> + From>; + } + + #[pallet::event] + #[pallet::generate_deposit(pub(super) fn deposit_event)] + pub enum Event { + QueryPrepared(QueryId), + NotifyQueryPrepared(QueryId), + ResponseReceived(MultiLocation, QueryId, Response), + } + + #[pallet::error] + pub enum Error { + UnexpectedId, + BadAccountFormat, + } + + #[pallet::call] + impl Pallet { + #[pallet::call_index(0)] + #[pallet::weight(Weight::from_parts(1_000_000, 1_000_000))] + pub fn prepare_new_query(origin: OriginFor, querier: MultiLocation) -> DispatchResult { + let who = ensure_signed(origin)?; + let id = who + .using_encoded(|mut d| <[u8; 32]>::decode(&mut d)) + .map_err(|_| Error::::BadAccountFormat)?; + let qid = crate::Pallet::::new_query( + Junction::AccountId32 { network: None, id }, + 100u32.into(), + querier, + ); + Self::deposit_event(Event::::QueryPrepared(qid)); + Ok(()) + } + + #[pallet::call_index(1)] + #[pallet::weight(Weight::from_parts(1_000_000, 1_000_000))] + pub fn prepare_new_notify_query( + origin: OriginFor, + querier: MultiLocation, + ) -> DispatchResult { + let who = ensure_signed(origin)?; + let id = who + .using_encoded(|mut d| <[u8; 32]>::decode(&mut d)) + .map_err(|_| Error::::BadAccountFormat)?; + let call = Call::::notification_received { + query_id: 0, + response: Default::default(), + }; + let qid = crate::Pallet::::new_notify_query( + Junction::AccountId32 { network: None, id }, + ::RuntimeCall::from(call), + 100u32.into(), + querier, + ); + Self::deposit_event(Event::::NotifyQueryPrepared(qid)); + Ok(()) + } + + #[pallet::call_index(2)] + #[pallet::weight(Weight::from_parts(1_000_000, 1_000_000))] + pub fn notification_received( + origin: OriginFor, + query_id: QueryId, + response: Response, + ) -> DispatchResult { + let responder = ensure_response(::RuntimeOrigin::from(origin))?; + Self::deposit_event(Event::::ResponseReceived(responder, query_id, response)); + Ok(()) + } + } +} + +construct_runtime!( + pub enum Test where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: frame_system::{Pallet, Call, Storage, Config, Event}, + Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, + ParasOrigin: origin::{Pallet, Origin}, + XcmPallet: pallet_xcm::{Pallet, Call, Storage, Event, Origin, Config}, + TestNotifier: pallet_test_notifier::{Pallet, Call, Event}, + } +); + +thread_local! { + pub static SENT_XCM: RefCell)>> = RefCell::new(Vec::new()); +} +pub(crate) fn sent_xcm() -> Vec<(MultiLocation, Xcm<()>)> { + SENT_XCM.with(|q| (*q.borrow()).clone()) +} +pub(crate) fn take_sent_xcm() -> Vec<(MultiLocation, Xcm<()>)> { + SENT_XCM.with(|q| { + let mut r = Vec::new(); + std::mem::swap(&mut r, &mut *q.borrow_mut()); + r + }) +} +/// Sender that never returns error, always sends +pub struct TestSendXcm; +impl SendXcm for TestSendXcm { + type Ticket = (MultiLocation, Xcm<()>); + fn validate( + dest: &mut Option, + msg: &mut Option>, + ) -> SendResult<(MultiLocation, Xcm<()>)> { + let pair = (dest.take().unwrap(), msg.take().unwrap()); + Ok((pair, MultiAssets::new())) + } + fn deliver(pair: (MultiLocation, Xcm<()>)) -> Result { + let hash = fake_message_hash(&pair.1); + SENT_XCM.with(|q| q.borrow_mut().push(pair)); + Ok(hash) + } +} +/// Sender that returns error if `X8` junction and stops routing +pub struct TestSendXcmErrX8; +impl SendXcm for TestSendXcmErrX8 { + type Ticket = (MultiLocation, Xcm<()>); + fn validate( + dest: &mut Option, + msg: &mut Option>, + ) -> SendResult<(MultiLocation, Xcm<()>)> { + let (dest, msg) = (dest.take().unwrap(), msg.take().unwrap()); + if dest.len() == 8 { + Err(SendError::Transport("Destination location full")) + } else { + Ok(((dest, msg), MultiAssets::new())) + } + } + fn deliver(pair: (MultiLocation, Xcm<()>)) -> Result { + let hash = fake_message_hash(&pair.1); + SENT_XCM.with(|q| q.borrow_mut().push(pair)); + Ok(hash) + } +} + +parameter_types! { + pub const BlockHashCount: u64 = 250; +} + +impl frame_system::Config for Test { + type RuntimeOrigin = RuntimeOrigin; + type RuntimeCall = RuntimeCall; + type Index = u64; + type BlockNumber = u64; + type Hash = H256; + type Hashing = ::sp_runtime::traits::BlakeTwo256; + type AccountId = AccountId; + type Lookup = IdentityLookup; + type Header = Header; + type RuntimeEvent = RuntimeEvent; + type BlockHashCount = BlockHashCount; + type BlockWeights = (); + type BlockLength = (); + type Version = (); + type PalletInfo = PalletInfo; + type AccountData = pallet_balances::AccountData; + type OnNewAccount = (); + type OnKilledAccount = (); + type DbWeight = (); + type BaseCallFilter = Everything; + type SystemWeightInfo = (); + type SS58Prefix = (); + type OnSetCode = (); + type MaxConsumers = frame_support::traits::ConstU32<16>; +} + +parameter_types! { + pub ExistentialDeposit: Balance = 1; + pub const MaxLocks: u32 = 50; + pub const MaxReserves: u32 = 50; +} + +impl pallet_balances::Config for Test { + type MaxLocks = MaxLocks; + type Balance = Balance; + type RuntimeEvent = RuntimeEvent; + type DustRemoval = (); + type ExistentialDeposit = ExistentialDeposit; + type AccountStore = System; + type WeightInfo = (); + type MaxReserves = MaxReserves; + type ReserveIdentifier = [u8; 8]; +} + +parameter_types! { + pub const RelayLocation: MultiLocation = Here.into_location(); + pub const AnyNetwork: Option = None; + pub UniversalLocation: InteriorMultiLocation = Here; + pub UnitWeightCost: u64 = 1_000; +} + +pub type SovereignAccountOf = ( + ChildParachainConvertsVia, + AccountId32Aliases, +); + +pub type LocalAssetTransactor = + XcmCurrencyAdapter, SovereignAccountOf, AccountId, ()>; + +type LocalOriginConverter = ( + SovereignSignedViaLocation, + ChildParachainAsNative, + SignedAccountId32AsNative, + ChildSystemParachainAsSuperuser, +); + +parameter_types! { + pub const BaseXcmWeight: Weight = Weight::from_parts(1_000, 1_000); + pub CurrencyPerSecondPerByte: (AssetId, u128, u128) = (Concrete(RelayLocation::get()), 1, 1); + pub TrustedAssets: (MultiAssetFilter, MultiLocation) = (All.into(), Here.into()); + pub const MaxInstructions: u32 = 100; + pub const MaxAssetsIntoHolding: u32 = 64; +} + +pub type Barrier = ( + TakeWeightCredit, + AllowTopLevelPaidExecutionFrom, + AllowKnownQueryResponses, + AllowSubscriptionsFrom, +); + +pub struct XcmConfig; +impl xcm_executor::Config for XcmConfig { + type RuntimeCall = RuntimeCall; + type XcmSender = TestSendXcm; + type AssetTransactor = LocalAssetTransactor; + type OriginConverter = LocalOriginConverter; + type IsReserve = (); + type IsTeleporter = Case; + type UniversalLocation = UniversalLocation; + type Barrier = Barrier; + type Weigher = FixedWeightBounds; + type Trader = FixedRateOfFungible; + type ResponseHandler = XcmPallet; + type AssetTrap = XcmPallet; + type AssetLocker = (); + type AssetExchanger = (); + type AssetClaims = XcmPallet; + type SubscriptionService = XcmPallet; + type PalletInstancesInfo = AllPalletsWithSystem; + type MaxAssetsIntoHolding = MaxAssetsIntoHolding; + type FeeManager = (); + type MessageExporter = (); + type UniversalAliases = Nothing; + type CallDispatcher = RuntimeCall; + type SafeCallFilter = Everything; +} + +pub type LocalOriginToLocation = SignedToAccountId32; + +parameter_types! { + pub static AdvertisedXcmVersion: pallet_xcm::XcmVersion = 3; +} + +#[cfg(feature = "runtime-benchmarks")] +parameter_types! { + pub ReachableDest: Option = Some(Parachain(1000).into()); +} + +impl pallet_xcm::Config for Test { + type RuntimeEvent = RuntimeEvent; + type SendXcmOrigin = xcm_builder::EnsureXcmOrigin; + type XcmRouter = (TestSendXcmErrX8, TestSendXcm); + type ExecuteXcmOrigin = xcm_builder::EnsureXcmOrigin; + type XcmExecuteFilter = Everything; + type XcmExecutor = XcmExecutor; + type XcmTeleportFilter = Everything; + type XcmReserveTransferFilter = Everything; + type Weigher = FixedWeightBounds; + type UniversalLocation = UniversalLocation; + type RuntimeOrigin = RuntimeOrigin; + type RuntimeCall = RuntimeCall; + const VERSION_DISCOVERY_QUEUE_SIZE: u32 = 100; + type AdvertisedXcmVersion = AdvertisedXcmVersion; + type TrustedLockers = (); + type SovereignAccountOf = AccountId32Aliases<(), AccountId32>; + type Currency = Balances; + type CurrencyMatcher = IsConcrete; + type MaxLockers = frame_support::traits::ConstU32<8>; + type WeightInfo = TestWeightInfo; + #[cfg(feature = "runtime-benchmarks")] + type ReachableDest = ReachableDest; +} + +impl origin::Config for Test {} + +impl pallet_test_notifier::Config for Test { + type RuntimeEvent = RuntimeEvent; + type RuntimeOrigin = RuntimeOrigin; + type RuntimeCall = RuntimeCall; +} + +pub(crate) fn last_event() -> RuntimeEvent { + System::events().pop().expect("RuntimeEvent expected").event +} + +pub(crate) fn last_events(n: usize) -> Vec { + System::events() + .into_iter() + .map(|e| e.event) + .rev() + .take(n) + .rev() + .collect() +} + +pub(crate) fn buy_execution(fees: impl Into) -> Instruction { + use xcm::latest::prelude::*; + BuyExecution { + fees: fees.into(), + weight_limit: Unlimited, + } +} + +pub(crate) fn buy_limited_execution( + fees: impl Into, + weight: Weight, +) -> Instruction { + use xcm::latest::prelude::*; + BuyExecution { + fees: fees.into(), + weight_limit: Limited(weight), + } +} + +pub(crate) fn new_test_ext_with_balances( + balances: Vec<(AccountId, Balance)>, +) -> sp_io::TestExternalities { + let mut t = frame_system::GenesisConfig::default() + .build_storage::() + .unwrap(); + + pallet_balances::GenesisConfig:: { balances } + .assimilate_storage(&mut t) + .unwrap(); + + >::assimilate_storage( + &pallet_xcm::GenesisConfig { + safe_xcm_version: Some(2), + }, + &mut t, + ) + .unwrap(); + + let mut ext = sp_io::TestExternalities::new(t); + ext.execute_with(|| System::set_block_number(1)); + ext +} + +pub(crate) fn fake_message_hash(message: &Xcm) -> XcmHash { + message.using_encoded(sp_io::hashing::blake2_256) +} diff --git a/pallets/pallet-xcm/src/tests.rs b/pallets/pallet-xcm/src/tests.rs new file mode 100644 index 0000000000..5fd66e6bdd --- /dev/null +++ b/pallets/pallet-xcm/src/tests.rs @@ -0,0 +1,1511 @@ +// This file is part of Astar. + +// Copyright (C) 2019-2023 Stake Technologies Pte.Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later + +// Astar is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Astar is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Astar. If not, see . + +use crate::{ + mock::*, AssetTraps, CurrentMigration, Error, LatestVersionedMultiLocation, Queries, + QueryStatus, VersionDiscoveryQueue, VersionNotifiers, VersionNotifyTargets, +}; +use frame_support::{ + assert_noop, assert_ok, + traits::{Currency, Hooks}, + weights::Weight, +}; +use polkadot_parachain::primitives::Id as ParaId; +use sp_runtime::traits::{AccountIdConversion, BlakeTwo256, Hash}; +use xcm::{latest::QueryResponseInfo, prelude::*}; +use xcm_builder::AllowKnownQueryResponses; +use xcm_executor::{traits::ShouldExecute, XcmExecutor}; + +const ALICE: AccountId = AccountId::new([0u8; 32]); +const BOB: AccountId = AccountId::new([1u8; 32]); +const PARA_ID: u32 = 2000; +const INITIAL_BALANCE: u128 = 100; +const SEND_AMOUNT: u128 = 10; + +#[test] +fn report_outcome_notify_works() { + let balances = vec![ + (ALICE, INITIAL_BALANCE), + ( + ParaId::from(PARA_ID).into_account_truncating(), + INITIAL_BALANCE, + ), + ]; + let sender: MultiLocation = AccountId32 { + network: None, + id: ALICE.into(), + } + .into(); + let mut message = Xcm(vec![TransferAsset { + assets: (Here, SEND_AMOUNT).into(), + beneficiary: sender.clone(), + }]); + let call = pallet_test_notifier::Call::notification_received { + query_id: 0, + response: Default::default(), + }; + let notify = RuntimeCall::TestNotifier(call); + new_test_ext_with_balances(balances).execute_with(|| { + XcmPallet::report_outcome_notify( + &mut message, + Parachain(PARA_ID).into_location(), + notify, + 100, + ) + .unwrap(); + assert_eq!( + message, + Xcm(vec![ + SetAppendix(Xcm(vec![ReportError(QueryResponseInfo { + destination: Parent.into(), + query_id: 0, + max_weight: Weight::from_parts(1_000_000, 1_000_000), + })])), + TransferAsset { + assets: (Here, SEND_AMOUNT).into(), + beneficiary: sender.clone() + }, + ]) + ); + let querier: MultiLocation = Here.into(); + let status = QueryStatus::Pending { + responder: MultiLocation::from(Parachain(PARA_ID)).into(), + maybe_notify: Some((4, 2)), + timeout: 100, + maybe_match_querier: Some(querier.clone().into()), + }; + assert_eq!( + crate::Queries::::iter().collect::>(), + vec![(0, status)] + ); + + let message = Xcm(vec![QueryResponse { + query_id: 0, + response: Response::ExecutionResult(None), + max_weight: Weight::from_parts(1_000_000, 1_000_000), + querier: Some(querier), + }]); + let hash = fake_message_hash(&message); + let r = XcmExecutor::::execute_xcm( + Parachain(PARA_ID), + message, + hash, + Weight::from_parts(1_000_000_000, 1_000_000_000), + ); + assert_eq!(r, Outcome::Complete(Weight::from_parts(1_000, 1_000))); + assert_eq!( + last_events(2), + vec![ + RuntimeEvent::TestNotifier(pallet_test_notifier::Event::ResponseReceived( + Parachain(PARA_ID).into(), + 0, + Response::ExecutionResult(None), + )), + RuntimeEvent::XcmPallet(crate::Event::Notified(0, 4, 2)), + ] + ); + assert_eq!(crate::Queries::::iter().collect::>(), vec![]); + }); +} + +#[test] +fn report_outcome_works() { + let balances = vec![ + (ALICE, INITIAL_BALANCE), + ( + ParaId::from(PARA_ID).into_account_truncating(), + INITIAL_BALANCE, + ), + ]; + let sender: MultiLocation = AccountId32 { + network: None, + id: ALICE.into(), + } + .into(); + let mut message = Xcm(vec![TransferAsset { + assets: (Here, SEND_AMOUNT).into(), + beneficiary: sender.clone(), + }]); + new_test_ext_with_balances(balances).execute_with(|| { + XcmPallet::report_outcome(&mut message, Parachain(PARA_ID).into_location(), 100).unwrap(); + assert_eq!( + message, + Xcm(vec![ + SetAppendix(Xcm(vec![ReportError(QueryResponseInfo { + destination: Parent.into(), + query_id: 0, + max_weight: Weight::zero(), + })])), + TransferAsset { + assets: (Here, SEND_AMOUNT).into(), + beneficiary: sender.clone() + }, + ]) + ); + let querier: MultiLocation = Here.into(); + let status = QueryStatus::Pending { + responder: MultiLocation::from(Parachain(PARA_ID)).into(), + maybe_notify: None, + timeout: 100, + maybe_match_querier: Some(querier.clone().into()), + }; + assert_eq!( + crate::Queries::::iter().collect::>(), + vec![(0, status)] + ); + + let message = Xcm(vec![QueryResponse { + query_id: 0, + response: Response::ExecutionResult(None), + max_weight: Weight::zero(), + querier: Some(querier), + }]); + let hash = fake_message_hash(&message); + let r = XcmExecutor::::execute_xcm( + Parachain(PARA_ID), + message, + hash, + Weight::from_parts(1_000_000_000, 1_000_000_000), + ); + assert_eq!(r, Outcome::Complete(Weight::from_parts(1_000, 1_000))); + assert_eq!( + last_event(), + RuntimeEvent::XcmPallet(crate::Event::ResponseReady( + 0, + Response::ExecutionResult(None), + )) + ); + + let response = Some((Response::ExecutionResult(None), 1)); + assert_eq!(XcmPallet::take_response(0), response); + }); +} + +#[test] +fn custom_querier_works() { + let balances = vec![ + (ALICE, INITIAL_BALANCE), + ( + ParaId::from(PARA_ID).into_account_truncating(), + INITIAL_BALANCE, + ), + ]; + new_test_ext_with_balances(balances).execute_with(|| { + let querier: MultiLocation = ( + Parent, + AccountId32 { + network: None, + id: ALICE.into(), + }, + ) + .into(); + + let r = TestNotifier::prepare_new_query(RuntimeOrigin::signed(ALICE), querier.clone()); + assert_eq!(r, Ok(())); + let status = QueryStatus::Pending { + responder: MultiLocation::from(AccountId32 { + network: None, + id: ALICE.into(), + }) + .into(), + maybe_notify: None, + timeout: 100, + maybe_match_querier: Some(querier.clone().into()), + }; + assert_eq!( + crate::Queries::::iter().collect::>(), + vec![(0, status)] + ); + + // Supplying no querier when one is expected will fail + let message = Xcm(vec![QueryResponse { + query_id: 0, + response: Response::ExecutionResult(None), + max_weight: Weight::zero(), + querier: None, + }]); + let hash = fake_message_hash(&message); + let r = XcmExecutor::::execute_xcm_in_credit( + AccountId32 { + network: None, + id: ALICE.into(), + }, + message, + hash, + Weight::from_parts(1_000_000_000, 1_000_000_000), + Weight::from_parts(1_000, 1_000), + ); + assert_eq!(r, Outcome::Complete(Weight::from_parts(1_000, 1_000))); + assert_eq!( + last_event(), + RuntimeEvent::XcmPallet(crate::Event::InvalidQuerier( + AccountId32 { + network: None, + id: ALICE.into() + } + .into(), + 0, + querier.clone(), + None, + )), + ); + + // Supplying the wrong querier will also fail + let message = Xcm(vec![QueryResponse { + query_id: 0, + response: Response::ExecutionResult(None), + max_weight: Weight::zero(), + querier: Some(MultiLocation::here()), + }]); + let hash = fake_message_hash(&message); + let r = XcmExecutor::::execute_xcm_in_credit( + AccountId32 { + network: None, + id: ALICE.into(), + }, + message, + hash, + Weight::from_parts(1_000_000_000, 1_000_000_000), + Weight::from_parts(1_000, 1_000), + ); + assert_eq!(r, Outcome::Complete(Weight::from_parts(1_000, 1_000))); + assert_eq!( + last_event(), + RuntimeEvent::XcmPallet(crate::Event::InvalidQuerier( + AccountId32 { + network: None, + id: ALICE.into() + } + .into(), + 0, + querier.clone(), + Some(MultiLocation::here()), + )), + ); + + // Multiple failures should not have changed the query state + let message = Xcm(vec![QueryResponse { + query_id: 0, + response: Response::ExecutionResult(None), + max_weight: Weight::zero(), + querier: Some(querier), + }]); + let hash = fake_message_hash(&message); + let r = XcmExecutor::::execute_xcm( + AccountId32 { + network: None, + id: ALICE.into(), + }, + message, + hash, + Weight::from_parts(1_000_000_000, 1_000_000_000), + ); + assert_eq!(r, Outcome::Complete(Weight::from_parts(1_000, 1_000))); + assert_eq!( + last_event(), + RuntimeEvent::XcmPallet(crate::Event::ResponseReady( + 0, + Response::ExecutionResult(None), + )) + ); + + let response = Some((Response::ExecutionResult(None), 1)); + assert_eq!(XcmPallet::take_response(0), response); + }); +} + +/// Test sending an `XCM` message (`XCM::ReserveAssetDeposit`) +/// +/// Asserts that the expected message is sent and the event is emitted +#[test] +fn send_works() { + let balances = vec![ + (ALICE, INITIAL_BALANCE), + ( + ParaId::from(PARA_ID).into_account_truncating(), + INITIAL_BALANCE, + ), + ]; + new_test_ext_with_balances(balances).execute_with(|| { + let sender: MultiLocation = AccountId32 { + network: None, + id: ALICE.into(), + } + .into(); + let message = Xcm(vec![ + ReserveAssetDeposited((Parent, SEND_AMOUNT).into()), + ClearOrigin, + buy_execution((Parent, SEND_AMOUNT)), + DepositAsset { + assets: AllCounted(1).into(), + beneficiary: sender.clone(), + }, + ]); + let versioned_dest = Box::new(RelayLocation::get().into()); + let versioned_message = Box::new(VersionedXcm::from(message.clone())); + assert_ok!(XcmPallet::send( + RuntimeOrigin::signed(ALICE), + versioned_dest, + versioned_message + )); + assert_eq!( + sent_xcm(), + vec![( + Here.into(), + Xcm(Some(DescendOrigin(sender.clone().try_into().unwrap())) + .into_iter() + .chain(message.0.clone().into_iter()) + .collect()) + )], + ); + assert_eq!( + last_event(), + RuntimeEvent::XcmPallet(crate::Event::Sent(sender, RelayLocation::get(), message)) + ); + }); +} + +/// Test that sending an `XCM` message fails when the `XcmRouter` blocks the +/// matching message format +/// +/// Asserts that `send` fails with `Error::SendFailure` +#[test] +fn send_fails_when_xcm_router_blocks() { + let balances = vec![ + (ALICE, INITIAL_BALANCE), + ( + ParaId::from(PARA_ID).into_account_truncating(), + INITIAL_BALANCE, + ), + ]; + new_test_ext_with_balances(balances).execute_with(|| { + let sender: MultiLocation = Junction::AccountId32 { + network: None, + id: ALICE.into(), + } + .into(); + let message = Xcm(vec![ + ReserveAssetDeposited((Parent, SEND_AMOUNT).into()), + buy_execution((Parent, SEND_AMOUNT)), + DepositAsset { + assets: AllCounted(1).into(), + beneficiary: sender.clone(), + }, + ]); + assert_noop!( + XcmPallet::send( + RuntimeOrigin::signed(ALICE), + Box::new(MultiLocation::ancestor(8).into()), + Box::new(VersionedXcm::from(message.clone())), + ), + crate::Error::::SendFailure + ); + }); +} + +/// Test `teleport_assets` +/// +/// Asserts that the sender's balance is decreased as a result of execution of +/// local effects. +#[test] +fn teleport_assets_works() { + let balances = vec![ + (ALICE, INITIAL_BALANCE), + ( + ParaId::from(PARA_ID).into_account_truncating(), + INITIAL_BALANCE, + ), + ]; + new_test_ext_with_balances(balances).execute_with(|| { + let weight = BaseXcmWeight::get() * 2; + assert_eq!(Balances::total_balance(&ALICE), INITIAL_BALANCE); + let dest: MultiLocation = AccountId32 { + network: None, + id: BOB.into(), + } + .into(); + assert_ok!(XcmPallet::teleport_assets( + RuntimeOrigin::signed(ALICE), + Box::new(RelayLocation::get().into()), + Box::new(dest.clone().into()), + Box::new((Here, SEND_AMOUNT).into()), + 0, + )); + assert_eq!( + Balances::total_balance(&ALICE), + INITIAL_BALANCE - SEND_AMOUNT + ); + assert_eq!( + sent_xcm(), + vec![( + RelayLocation::get().into(), + Xcm(vec![ + ReceiveTeleportedAsset((Here, SEND_AMOUNT).into()), + ClearOrigin, + buy_limited_execution((Here, SEND_AMOUNT), Weight::from_parts(4000, 4000)), + DepositAsset { + assets: AllCounted(1).into(), + beneficiary: dest + }, + ]), + )] + ); + let versioned_sent = VersionedXcm::from(sent_xcm().into_iter().next().unwrap().1); + let _check_v2_ok: xcm::v2::Xcm<()> = versioned_sent.try_into().unwrap(); + assert_eq!( + last_event(), + RuntimeEvent::XcmPallet(crate::Event::Attempted(Outcome::Complete(weight))) + ); + }); +} + +/// Test `limited_teleport_assets` +/// +/// Asserts that the sender's balance is decreased as a result of execution of +/// local effects. +#[test] +fn limited_teleport_assets_works() { + let balances = vec![ + (ALICE, INITIAL_BALANCE), + ( + ParaId::from(PARA_ID).into_account_truncating(), + INITIAL_BALANCE, + ), + ]; + new_test_ext_with_balances(balances).execute_with(|| { + let weight = BaseXcmWeight::get() * 2; + assert_eq!(Balances::total_balance(&ALICE), INITIAL_BALANCE); + let dest: MultiLocation = AccountId32 { + network: None, + id: BOB.into(), + } + .into(); + assert_ok!(XcmPallet::limited_teleport_assets( + RuntimeOrigin::signed(ALICE), + Box::new(RelayLocation::get().into()), + Box::new(dest.clone().into()), + Box::new((Here, SEND_AMOUNT).into()), + 0, + WeightLimit::Limited(Weight::from_parts(5000, 5000)), + )); + assert_eq!( + Balances::total_balance(&ALICE), + INITIAL_BALANCE - SEND_AMOUNT + ); + assert_eq!( + sent_xcm(), + vec![( + RelayLocation::get().into(), + Xcm(vec![ + ReceiveTeleportedAsset((Here, SEND_AMOUNT).into()), + ClearOrigin, + buy_limited_execution((Here, SEND_AMOUNT), Weight::from_parts(5000, 5000)), + DepositAsset { + assets: AllCounted(1).into(), + beneficiary: dest + }, + ]), + )] + ); + let versioned_sent = VersionedXcm::from(sent_xcm().into_iter().next().unwrap().1); + let _check_v2_ok: xcm::v2::Xcm<()> = versioned_sent.try_into().unwrap(); + assert_eq!( + last_event(), + RuntimeEvent::XcmPallet(crate::Event::Attempted(Outcome::Complete(weight))) + ); + }); +} + +/// Test `limited_teleport_assets` with unlimited weight +/// +/// Asserts that the sender's balance is decreased as a result of execution of +/// local effects. +#[test] +fn unlimited_teleport_assets_works() { + let balances = vec![ + (ALICE, INITIAL_BALANCE), + ( + ParaId::from(PARA_ID).into_account_truncating(), + INITIAL_BALANCE, + ), + ]; + new_test_ext_with_balances(balances).execute_with(|| { + let weight = BaseXcmWeight::get() * 2; + assert_eq!(Balances::total_balance(&ALICE), INITIAL_BALANCE); + let dest: MultiLocation = AccountId32 { + network: None, + id: BOB.into(), + } + .into(); + assert_ok!(XcmPallet::limited_teleport_assets( + RuntimeOrigin::signed(ALICE), + Box::new(RelayLocation::get().into()), + Box::new(dest.clone().into()), + Box::new((Here, SEND_AMOUNT).into()), + 0, + WeightLimit::Unlimited, + )); + assert_eq!( + Balances::total_balance(&ALICE), + INITIAL_BALANCE - SEND_AMOUNT + ); + assert_eq!( + sent_xcm(), + vec![( + RelayLocation::get().into(), + Xcm(vec![ + ReceiveTeleportedAsset((Here, SEND_AMOUNT).into()), + ClearOrigin, + buy_execution((Here, SEND_AMOUNT)), + DepositAsset { + assets: AllCounted(1).into(), + beneficiary: dest + }, + ]), + )] + ); + assert_eq!( + last_event(), + RuntimeEvent::XcmPallet(crate::Event::Attempted(Outcome::Complete(weight))) + ); + }); +} + +/// Test `reserve_transfer_assets` +/// +/// Asserts that the sender's balance is decreased and the beneficiary's balance +/// is increased. Verifies the correct message is sent and event is emitted. +#[test] +fn reserve_transfer_assets_works() { + let balances = vec![ + (ALICE, INITIAL_BALANCE), + ( + ParaId::from(PARA_ID).into_account_truncating(), + INITIAL_BALANCE, + ), + ]; + new_test_ext_with_balances(balances).execute_with(|| { + let weight = BaseXcmWeight::get(); + let dest: MultiLocation = Junction::AccountId32 { + network: None, + id: ALICE.into(), + } + .into(); + assert_eq!(Balances::total_balance(&ALICE), INITIAL_BALANCE); + assert_ok!(XcmPallet::reserve_transfer_assets( + RuntimeOrigin::signed(ALICE), + Box::new(Parachain(PARA_ID).into()), + Box::new(dest.clone().into()), + Box::new((Here, SEND_AMOUNT).into()), + 0, + )); + // Alice spent amount + assert_eq!(Balances::free_balance(ALICE), INITIAL_BALANCE - SEND_AMOUNT); + // Destination account (parachain account) has amount + let para_acc: AccountId = ParaId::from(PARA_ID).into_account_truncating(); + assert_eq!( + Balances::free_balance(para_acc), + INITIAL_BALANCE + SEND_AMOUNT + ); + assert_eq!( + sent_xcm(), + vec![( + Parachain(PARA_ID).into(), + Xcm(vec![ + ReserveAssetDeposited((Parent, SEND_AMOUNT).into()), + ClearOrigin, + buy_limited_execution((Parent, SEND_AMOUNT), Weight::from_parts(4000, 4000)), + DepositAsset { + assets: AllCounted(1).into(), + beneficiary: dest + }, + ]), + )] + ); + let versioned_sent = VersionedXcm::from(sent_xcm().into_iter().next().unwrap().1); + let _check_v2_ok: xcm::v2::Xcm<()> = versioned_sent.try_into().unwrap(); + assert_eq!( + last_event(), + RuntimeEvent::XcmPallet(crate::Event::Attempted(Outcome::Complete(weight))) + ); + }); +} + +/// Test `limited_reserve_transfer_assets` +/// +/// Asserts that the sender's balance is decreased and the beneficiary's balance +/// is increased. Verifies the correct message is sent and event is emitted. +#[test] +fn limited_reserve_transfer_assets_works() { + let balances = vec![ + (ALICE, INITIAL_BALANCE), + ( + ParaId::from(PARA_ID).into_account_truncating(), + INITIAL_BALANCE, + ), + ]; + new_test_ext_with_balances(balances).execute_with(|| { + let weight = BaseXcmWeight::get(); + let dest: MultiLocation = Junction::AccountId32 { + network: None, + id: ALICE.into(), + } + .into(); + assert_eq!(Balances::total_balance(&ALICE), INITIAL_BALANCE); + assert_ok!(XcmPallet::limited_reserve_transfer_assets( + RuntimeOrigin::signed(ALICE), + Box::new(Parachain(PARA_ID).into()), + Box::new(dest.clone().into()), + Box::new((Here, SEND_AMOUNT).into()), + 0, + WeightLimit::Limited(Weight::from_parts(5000, 5000)), + )); + // Alice spent amount + assert_eq!(Balances::free_balance(ALICE), INITIAL_BALANCE - SEND_AMOUNT); + // Destination account (parachain account) has amount + let para_acc: AccountId = ParaId::from(PARA_ID).into_account_truncating(); + assert_eq!( + Balances::free_balance(para_acc), + INITIAL_BALANCE + SEND_AMOUNT + ); + assert_eq!( + sent_xcm(), + vec![( + Parachain(PARA_ID).into(), + Xcm(vec![ + ReserveAssetDeposited((Parent, SEND_AMOUNT).into()), + ClearOrigin, + buy_limited_execution((Parent, SEND_AMOUNT), Weight::from_parts(5000, 5000)), + DepositAsset { + assets: AllCounted(1).into(), + beneficiary: dest + }, + ]), + )] + ); + let versioned_sent = VersionedXcm::from(sent_xcm().into_iter().next().unwrap().1); + let _check_v2_ok: xcm::v2::Xcm<()> = versioned_sent.try_into().unwrap(); + assert_eq!( + last_event(), + RuntimeEvent::XcmPallet(crate::Event::Attempted(Outcome::Complete(weight))) + ); + }); +} + +/// Test `limited_reserve_transfer_assets` with unlimited weight purchasing +/// +/// Asserts that the sender's balance is decreased and the beneficiary's balance +/// is increased. Verifies the correct message is sent and event is emitted. +#[test] +fn unlimited_reserve_transfer_assets_works() { + let balances = vec![ + (ALICE, INITIAL_BALANCE), + ( + ParaId::from(PARA_ID).into_account_truncating(), + INITIAL_BALANCE, + ), + ]; + new_test_ext_with_balances(balances).execute_with(|| { + let weight = BaseXcmWeight::get(); + let dest: MultiLocation = Junction::AccountId32 { + network: None, + id: ALICE.into(), + } + .into(); + assert_eq!(Balances::total_balance(&ALICE), INITIAL_BALANCE); + assert_ok!(XcmPallet::limited_reserve_transfer_assets( + RuntimeOrigin::signed(ALICE), + Box::new(Parachain(PARA_ID).into()), + Box::new(dest.clone().into()), + Box::new((Here, SEND_AMOUNT).into()), + 0, + WeightLimit::Unlimited, + )); + // Alice spent amount + assert_eq!(Balances::free_balance(ALICE), INITIAL_BALANCE - SEND_AMOUNT); + // Destination account (parachain account) has amount + let para_acc: AccountId = ParaId::from(PARA_ID).into_account_truncating(); + assert_eq!( + Balances::free_balance(para_acc), + INITIAL_BALANCE + SEND_AMOUNT + ); + assert_eq!( + sent_xcm(), + vec![( + Parachain(PARA_ID).into(), + Xcm(vec![ + ReserveAssetDeposited((Parent, SEND_AMOUNT).into()), + ClearOrigin, + buy_execution((Parent, SEND_AMOUNT)), + DepositAsset { + assets: AllCounted(1).into(), + beneficiary: dest + }, + ]), + )] + ); + assert_eq!( + last_event(), + RuntimeEvent::XcmPallet(crate::Event::Attempted(Outcome::Complete(weight))) + ); + }); +} + +/// Test `reserve_withdraw_assets` +/// +/// Asserts that the sender's balance is decreased and the beneficiary's balance +/// is increased. Verifies the correct message is sent and event is emitted. +#[test] +fn reserve_withdraw_assets_works() { + let balances = vec![ + (ALICE, INITIAL_BALANCE), + ( + ParaId::from(PARA_ID).into_account_truncating(), + INITIAL_BALANCE, + ), + ]; + new_test_ext_with_balances(balances).execute_with(|| { + let weight = BaseXcmWeight::get(); + let dest: MultiLocation = Junction::AccountId32 { + network: None, + id: ALICE.into(), + } + .into(); + assert_eq!(Balances::total_balance(&ALICE), INITIAL_BALANCE); + assert_ok!(XcmPallet::reserve_withdraw_assets( + RuntimeOrigin::signed(ALICE), + Box::new(Parachain(PARA_ID).into_location().into()), + Box::new(dest.clone().into()), + Box::new((Here, SEND_AMOUNT).into()), + 0, + )); + // Alice spent amount + assert_eq!(Balances::free_balance(ALICE), INITIAL_BALANCE - SEND_AMOUNT); + // Check destination XCM program + assert_eq!( + sent_xcm(), + vec![( + Parachain(PARA_ID).into(), + Xcm(vec![ + WithdrawAsset((Parent, SEND_AMOUNT).into()), + ClearOrigin, + buy_limited_execution((Parent, SEND_AMOUNT), Weight::from_parts(4000, 4000)), + DepositAsset { + assets: AllCounted(1).into(), + beneficiary: dest + }, + ]), + )] + ); + let versioned_sent = VersionedXcm::from(sent_xcm().into_iter().next().unwrap().1); + let _check_v2_ok: xcm::v2::Xcm<()> = versioned_sent.try_into().unwrap(); + assert_eq!( + last_event(), + RuntimeEvent::XcmPallet(crate::Event::Attempted(Outcome::Complete(weight * 2))) + ); + }); +} + +/// Test local execution of XCM +/// +/// Asserts that the sender's balance is decreased and the beneficiary's balance +/// is increased. Verifies the expected event is emitted. +#[test] +fn execute_withdraw_to_deposit_works() { + let balances = vec![ + (ALICE, INITIAL_BALANCE), + ( + ParaId::from(PARA_ID).into_account_truncating(), + INITIAL_BALANCE, + ), + ]; + new_test_ext_with_balances(balances).execute_with(|| { + let weight = BaseXcmWeight::get() * 3; + let dest: MultiLocation = Junction::AccountId32 { + network: None, + id: BOB.into(), + } + .into(); + assert_eq!(Balances::total_balance(&ALICE), INITIAL_BALANCE); + assert_ok!(XcmPallet::execute( + RuntimeOrigin::signed(ALICE), + Box::new(VersionedXcm::from(Xcm(vec![ + WithdrawAsset((Here, SEND_AMOUNT).into()), + buy_execution((Here, SEND_AMOUNT)), + DepositAsset { + assets: AllCounted(1).into(), + beneficiary: dest + }, + ]))), + weight + )); + assert_eq!( + Balances::total_balance(&ALICE), + INITIAL_BALANCE - SEND_AMOUNT + ); + assert_eq!(Balances::total_balance(&BOB), SEND_AMOUNT); + assert_eq!( + last_event(), + RuntimeEvent::XcmPallet(crate::Event::Attempted(Outcome::Complete(weight))) + ); + }); +} + +/// Test drop/claim assets. +#[test] +fn trapped_assets_can_be_claimed() { + let balances = vec![(ALICE, INITIAL_BALANCE), (BOB, INITIAL_BALANCE)]; + new_test_ext_with_balances(balances).execute_with(|| { + let weight = BaseXcmWeight::get() * 6; + let dest: MultiLocation = Junction::AccountId32 { + network: None, + id: BOB.into(), + } + .into(); + + assert_ok!(XcmPallet::execute( + RuntimeOrigin::signed(ALICE), + Box::new(VersionedXcm::from(Xcm(vec![ + WithdrawAsset((Here, SEND_AMOUNT).into()), + buy_execution((Here, SEND_AMOUNT)), + // Don't propagated the error into the result. + SetErrorHandler(Xcm(vec![ClearError])), + // This will make an error. + Trap(0), + // This would succeed, but we never get to it. + DepositAsset { + assets: AllCounted(1).into(), + beneficiary: dest.clone() + }, + ]))), + weight + )); + let source: MultiLocation = Junction::AccountId32 { + network: None, + id: ALICE.into(), + } + .into(); + let trapped = AssetTraps::::iter().collect::>(); + let vma = VersionedMultiAssets::from(MultiAssets::from((Here, SEND_AMOUNT))); + let hash = BlakeTwo256::hash_of(&(source.clone(), vma.clone())); + assert_eq!( + last_events(2), + vec![ + RuntimeEvent::XcmPallet(crate::Event::AssetsTrapped(hash.clone(), source, vma)), + RuntimeEvent::XcmPallet(crate::Event::Attempted(Outcome::Complete( + BaseXcmWeight::get() * 5 + ))) + ] + ); + assert_eq!( + Balances::total_balance(&ALICE), + INITIAL_BALANCE - SEND_AMOUNT + ); + assert_eq!(Balances::total_balance(&BOB), INITIAL_BALANCE); + + let expected = vec![(hash, 1u32)]; + assert_eq!(trapped, expected); + + let weight = BaseXcmWeight::get() * 3; + assert_ok!(XcmPallet::execute( + RuntimeOrigin::signed(ALICE), + Box::new(VersionedXcm::from(Xcm(vec![ + ClaimAsset { + assets: (Here, SEND_AMOUNT).into(), + ticket: Here.into() + }, + buy_execution((Here, SEND_AMOUNT)), + DepositAsset { + assets: AllCounted(1).into(), + beneficiary: dest.clone() + }, + ]))), + weight + )); + + assert_eq!( + Balances::total_balance(&ALICE), + INITIAL_BALANCE - SEND_AMOUNT + ); + assert_eq!(Balances::total_balance(&BOB), INITIAL_BALANCE + SEND_AMOUNT); + assert_eq!(AssetTraps::::iter().collect::>(), vec![]); + + let weight = BaseXcmWeight::get() * 3; + assert_ok!(XcmPallet::execute( + RuntimeOrigin::signed(ALICE), + Box::new(VersionedXcm::from(Xcm(vec![ + ClaimAsset { + assets: (Here, SEND_AMOUNT).into(), + ticket: Here.into() + }, + buy_execution((Here, SEND_AMOUNT)), + DepositAsset { + assets: AllCounted(1).into(), + beneficiary: dest + }, + ]))), + weight + )); + assert_eq!( + last_event(), + RuntimeEvent::XcmPallet(crate::Event::Attempted(Outcome::Incomplete( + BaseXcmWeight::get(), + XcmError::UnknownClaim + ))) + ); + }); +} + +#[test] +fn fake_latest_versioned_multilocation_works() { + use parity_scale_codec::Encode; + let remote: MultiLocation = Parachain(1000).into(); + let versioned_remote = LatestVersionedMultiLocation(&remote); + assert_eq!(versioned_remote.encode(), remote.into_versioned().encode()); +} + +#[test] +fn basic_subscription_works() { + new_test_ext_with_balances(vec![]).execute_with(|| { + let remote: MultiLocation = Parachain(1000).into(); + assert_ok!(XcmPallet::force_subscribe_version_notify( + RuntimeOrigin::root(), + Box::new(remote.clone().into()), + )); + + assert_eq!( + Queries::::iter().collect::>(), + vec![( + 0, + QueryStatus::VersionNotifier { + origin: remote.clone().into(), + is_active: false + } + )] + ); + assert_eq!( + VersionNotifiers::::iter().collect::>(), + vec![(XCM_VERSION, remote.clone().into(), 0)] + ); + + assert_eq!( + take_sent_xcm(), + vec![( + remote.clone(), + Xcm(vec![SubscribeVersion { + query_id: 0, + max_response_weight: Weight::zero() + }]), + ),] + ); + + let weight = BaseXcmWeight::get(); + let mut message = Xcm::<()>(vec![ + // Remote supports XCM v2 + QueryResponse { + query_id: 0, + max_weight: Weight::zero(), + response: Response::Version(1), + querier: None, + }, + ]); + assert_ok!(AllowKnownQueryResponses::::should_execute( + &remote, + message.inner_mut(), + weight, + &mut Weight::zero(), + )); + }); +} + +#[test] +fn subscriptions_increment_id() { + new_test_ext_with_balances(vec![]).execute_with(|| { + let remote: MultiLocation = Parachain(1000).into(); + assert_ok!(XcmPallet::force_subscribe_version_notify( + RuntimeOrigin::root(), + Box::new(remote.clone().into()), + )); + + let remote2: MultiLocation = Parachain(1001).into(); + assert_ok!(XcmPallet::force_subscribe_version_notify( + RuntimeOrigin::root(), + Box::new(remote2.clone().into()), + )); + + assert_eq!( + take_sent_xcm(), + vec![ + ( + remote.clone(), + Xcm(vec![SubscribeVersion { + query_id: 0, + max_response_weight: Weight::zero() + }]), + ), + ( + remote2.clone(), + Xcm(vec![SubscribeVersion { + query_id: 1, + max_response_weight: Weight::zero() + }]), + ), + ] + ); + }); +} + +#[test] +fn double_subscription_fails() { + new_test_ext_with_balances(vec![]).execute_with(|| { + let remote: MultiLocation = Parachain(1000).into(); + assert_ok!(XcmPallet::force_subscribe_version_notify( + RuntimeOrigin::root(), + Box::new(remote.clone().into()), + )); + assert_noop!( + XcmPallet::force_subscribe_version_notify( + RuntimeOrigin::root(), + Box::new(remote.clone().into()) + ), + Error::::AlreadySubscribed, + ); + }) +} + +#[test] +fn unsubscribe_works() { + new_test_ext_with_balances(vec![]).execute_with(|| { + let remote: MultiLocation = Parachain(1000).into(); + assert_ok!(XcmPallet::force_subscribe_version_notify( + RuntimeOrigin::root(), + Box::new(remote.clone().into()), + )); + assert_ok!(XcmPallet::force_unsubscribe_version_notify( + RuntimeOrigin::root(), + Box::new(remote.clone().into()) + )); + assert_noop!( + XcmPallet::force_unsubscribe_version_notify( + RuntimeOrigin::root(), + Box::new(remote.clone().into()) + ), + Error::::NoSubscription, + ); + + assert_eq!( + take_sent_xcm(), + vec![ + ( + remote.clone(), + Xcm(vec![SubscribeVersion { + query_id: 0, + max_response_weight: Weight::zero() + }]), + ), + (remote.clone(), Xcm(vec![UnsubscribeVersion]),), + ] + ); + }); +} + +/// Parachain 1000 is asking us for a version subscription. +#[test] +fn subscription_side_works() { + new_test_ext_with_balances(vec![]).execute_with(|| { + AdvertisedXcmVersion::set(1); + + let remote: MultiLocation = Parachain(1000).into(); + let weight = BaseXcmWeight::get(); + let message = Xcm(vec![SubscribeVersion { + query_id: 0, + max_response_weight: Weight::zero(), + }]); + let hash = fake_message_hash(&message); + let r = XcmExecutor::::execute_xcm(remote.clone(), message, hash, weight); + assert_eq!(r, Outcome::Complete(weight)); + + let instr = QueryResponse { + query_id: 0, + max_weight: Weight::zero(), + response: Response::Version(1), + querier: None, + }; + assert_eq!(take_sent_xcm(), vec![(remote.clone(), Xcm(vec![instr]))]); + + // A runtime upgrade which doesn't alter the version sends no notifications. + XcmPallet::on_runtime_upgrade(); + XcmPallet::on_initialize(1); + assert_eq!(take_sent_xcm(), vec![]); + + // New version. + AdvertisedXcmVersion::set(2); + + // A runtime upgrade which alters the version does send notifications. + XcmPallet::on_runtime_upgrade(); + XcmPallet::on_initialize(2); + let instr = QueryResponse { + query_id: 0, + max_weight: Weight::zero(), + response: Response::Version(2), + querier: None, + }; + assert_eq!(take_sent_xcm(), vec![(remote.clone(), Xcm(vec![instr]))]); + }); +} + +#[test] +fn subscription_side_upgrades_work_with_notify() { + new_test_ext_with_balances(vec![]).execute_with(|| { + AdvertisedXcmVersion::set(1); + + // An entry from a previous runtime with v2 XCM. + let v2_location = VersionedMultiLocation::V2(xcm::v2::Junction::Parachain(1001).into()); + VersionNotifyTargets::::insert(1, v2_location, (70, Weight::zero(), 2)); + let v3_location = Parachain(1003).into_versioned(); + VersionNotifyTargets::::insert(3, v3_location, (72, Weight::zero(), 2)); + + // New version. + AdvertisedXcmVersion::set(3); + + // A runtime upgrade which alters the version does send notifications. + XcmPallet::on_runtime_upgrade(); + XcmPallet::on_initialize(1); + + let instr1 = QueryResponse { + query_id: 70, + max_weight: Weight::zero(), + response: Response::Version(3), + querier: None, + }; + let instr3 = QueryResponse { + query_id: 72, + max_weight: Weight::zero(), + response: Response::Version(3), + querier: None, + }; + let mut sent = take_sent_xcm(); + sent.sort_by_key(|k| match (k.1).0[0] { + QueryResponse { query_id: q, .. } => q, + _ => 0, + }); + assert_eq!( + sent, + vec![ + (Parachain(1001).into(), Xcm(vec![instr1])), + (Parachain(1003).into(), Xcm(vec![instr3])), + ] + ); + + let mut contents = VersionNotifyTargets::::iter().collect::>(); + contents.sort_by_key(|k| k.2 .0); + assert_eq!( + contents, + vec![ + ( + XCM_VERSION, + Parachain(1001).into_versioned(), + (70, Weight::zero(), 3) + ), + ( + XCM_VERSION, + Parachain(1003).into_versioned(), + (72, Weight::zero(), 3) + ), + ] + ); + }); +} + +#[test] +fn subscription_side_upgrades_work_without_notify() { + new_test_ext_with_balances(vec![]).execute_with(|| { + // An entry from a previous runtime with v2 XCM. + let v2_location = VersionedMultiLocation::V2(xcm::v2::Junction::Parachain(1001).into()); + VersionNotifyTargets::::insert(1, v2_location, (70, Weight::zero(), 2)); + let v3_location = Parachain(1003).into_versioned(); + VersionNotifyTargets::::insert(3, v3_location, (72, Weight::zero(), 2)); + + // A runtime upgrade which alters the version does send notifications. + XcmPallet::on_runtime_upgrade(); + XcmPallet::on_initialize(1); + + let mut contents = VersionNotifyTargets::::iter().collect::>(); + contents.sort_by_key(|k| k.2 .0); + assert_eq!( + contents, + vec![ + ( + XCM_VERSION, + Parachain(1001).into_versioned(), + (70, Weight::zero(), 3) + ), + ( + XCM_VERSION, + Parachain(1003).into_versioned(), + (72, Weight::zero(), 3) + ), + ] + ); + }); +} + +#[test] +fn subscriber_side_subscription_works() { + new_test_ext_with_balances(vec![]).execute_with(|| { + let remote: MultiLocation = Parachain(1000).into(); + assert_ok!(XcmPallet::force_subscribe_version_notify( + RuntimeOrigin::root(), + Box::new(remote.clone().into()), + )); + take_sent_xcm(); + + // Assume subscription target is working ok. + + let weight = BaseXcmWeight::get(); + let message = Xcm(vec![ + // Remote supports XCM v2 + QueryResponse { + query_id: 0, + max_weight: Weight::zero(), + response: Response::Version(1), + querier: None, + }, + ]); + let hash = fake_message_hash(&message); + let r = XcmExecutor::::execute_xcm(remote.clone(), message, hash, weight); + assert_eq!(r, Outcome::Complete(weight)); + assert_eq!(take_sent_xcm(), vec![]); + + // This message cannot be sent to a v2 remote. + let v2_msg = xcm::v2::Xcm::<()>(vec![xcm::v2::Instruction::Trap(0)]); + assert_eq!(XcmPallet::wrap_version(&remote, v2_msg.clone()), Err(())); + + let message = Xcm(vec![ + // Remote upgraded to XCM v2 + QueryResponse { + query_id: 0, + max_weight: Weight::zero(), + response: Response::Version(2), + querier: None, + }, + ]); + let hash = fake_message_hash(&message); + let r = XcmExecutor::::execute_xcm(remote.clone(), message, hash, weight); + assert_eq!(r, Outcome::Complete(weight)); + + // This message can now be sent to remote as it's v2. + assert_eq!( + XcmPallet::wrap_version(&remote, v2_msg.clone()), + Ok(VersionedXcm::from(v2_msg)) + ); + }); +} + +/// We should auto-subscribe when we don't know the remote's version. +#[test] +fn auto_subscription_works() { + new_test_ext_with_balances(vec![]).execute_with(|| { + let remote_v2: MultiLocation = Parachain(1000).into(); + let remote_v3: MultiLocation = Parachain(1001).into(); + + assert_ok!(XcmPallet::force_default_xcm_version( + RuntimeOrigin::root(), + Some(2) + )); + + // Wrapping a version for a destination we don't know elicits a subscription. + let msg_v2 = xcm::v2::Xcm::<()>(vec![xcm::v2::Instruction::Trap(0)]); + let msg_v3 = xcm::v3::Xcm::<()>(vec![xcm::v3::Instruction::ClearTopic]); + assert_eq!( + XcmPallet::wrap_version(&remote_v2, msg_v2.clone()), + Ok(VersionedXcm::from(msg_v2.clone())), + ); + assert_eq!(XcmPallet::wrap_version(&remote_v2, msg_v3.clone()), Err(())); + + let expected = vec![(remote_v2.clone().into(), 2)]; + assert_eq!(VersionDiscoveryQueue::::get().into_inner(), expected); + + assert_eq!( + XcmPallet::wrap_version(&remote_v3, msg_v2.clone()), + Ok(VersionedXcm::from(msg_v2.clone())), + ); + assert_eq!(XcmPallet::wrap_version(&remote_v3, msg_v3.clone()), Err(())); + + let expected = vec![(remote_v2.clone().into(), 2), (remote_v3.clone().into(), 2)]; + assert_eq!(VersionDiscoveryQueue::::get().into_inner(), expected); + + XcmPallet::on_initialize(1); + assert_eq!( + take_sent_xcm(), + vec![( + remote_v3.clone(), + Xcm(vec![SubscribeVersion { + query_id: 0, + max_response_weight: Weight::zero() + }]), + )] + ); + + // Assume remote_v3 is working ok and XCM version 3. + + let weight = BaseXcmWeight::get(); + let message = Xcm(vec![ + // Remote supports XCM v3 + QueryResponse { + query_id: 0, + max_weight: Weight::zero(), + response: Response::Version(3), + querier: None, + }, + ]); + let hash = fake_message_hash(&message); + let r = XcmExecutor::::execute_xcm(remote_v3.clone(), message, hash, weight); + assert_eq!(r, Outcome::Complete(weight)); + + // V2 messages can be sent to remote_v3 under XCM v3. + assert_eq!( + XcmPallet::wrap_version(&remote_v3, msg_v2.clone()), + Ok(VersionedXcm::from(msg_v2.clone()).into_version(3).unwrap()), + ); + // This message can now be sent to remote_v3 as it's v3. + assert_eq!( + XcmPallet::wrap_version(&remote_v3, msg_v3.clone()), + Ok(VersionedXcm::from(msg_v3.clone())) + ); + + XcmPallet::on_initialize(2); + assert_eq!( + take_sent_xcm(), + vec![( + remote_v2.clone(), + Xcm(vec![SubscribeVersion { + query_id: 1, + max_response_weight: Weight::zero() + }]), + )] + ); + + // Assume remote_v2 is working ok and XCM version 2. + + let weight = BaseXcmWeight::get(); + let message = Xcm(vec![ + // Remote supports XCM v2 + QueryResponse { + query_id: 1, + max_weight: Weight::zero(), + response: Response::Version(2), + querier: None, + }, + ]); + let hash = fake_message_hash(&message); + let r = XcmExecutor::::execute_xcm(remote_v2.clone(), message, hash, weight); + assert_eq!(r, Outcome::Complete(weight)); + + // v3 messages cannot be sent to remote_v2... + assert_eq!( + XcmPallet::wrap_version(&remote_v2, msg_v2.clone()), + Ok(VersionedXcm::V2(msg_v2)) + ); + assert_eq!(XcmPallet::wrap_version(&remote_v2, msg_v3.clone()), Err(())); + }) +} + +#[test] +fn subscription_side_upgrades_work_with_multistage_notify() { + new_test_ext_with_balances(vec![]).execute_with(|| { + AdvertisedXcmVersion::set(1); + + // An entry from a previous runtime with v0 XCM. + let v2_location = VersionedMultiLocation::V2(xcm::v2::Junction::Parachain(1001).into()); + VersionNotifyTargets::::insert(1, v2_location, (70, Weight::zero(), 1)); + let v2_location = VersionedMultiLocation::V2(xcm::v2::Junction::Parachain(1002).into()); + VersionNotifyTargets::::insert(2, v2_location, (71, Weight::zero(), 1)); + let v3_location = Parachain(1003).into_versioned(); + VersionNotifyTargets::::insert(3, v3_location, (72, Weight::zero(), 1)); + + // New version. + AdvertisedXcmVersion::set(3); + + // A runtime upgrade which alters the version does send notifications. + XcmPallet::on_runtime_upgrade(); + let mut maybe_migration = CurrentMigration::::take(); + let mut counter = 0; + while let Some(migration) = maybe_migration.take() { + counter += 1; + let (_, m) = XcmPallet::check_xcm_version_change(migration, Weight::zero()); + maybe_migration = m; + } + assert_eq!(counter, 4); + + let instr1 = QueryResponse { + query_id: 70, + max_weight: Weight::zero(), + response: Response::Version(3), + querier: None, + }; + let instr2 = QueryResponse { + query_id: 71, + max_weight: Weight::zero(), + response: Response::Version(3), + querier: None, + }; + let instr3 = QueryResponse { + query_id: 72, + max_weight: Weight::zero(), + response: Response::Version(3), + querier: None, + }; + let mut sent = take_sent_xcm(); + sent.sort_by_key(|k| match (k.1).0[0] { + QueryResponse { query_id: q, .. } => q, + _ => 0, + }); + assert_eq!( + sent, + vec![ + (Parachain(1001).into(), Xcm(vec![instr1])), + (Parachain(1002).into(), Xcm(vec![instr2])), + (Parachain(1003).into(), Xcm(vec![instr3])), + ] + ); + + let mut contents = VersionNotifyTargets::::iter().collect::>(); + contents.sort_by_key(|k| k.2 .0); + assert_eq!( + contents, + vec![ + ( + XCM_VERSION, + Parachain(1001).into_versioned(), + (70, Weight::zero(), 3) + ), + ( + XCM_VERSION, + Parachain(1002).into_versioned(), + (71, Weight::zero(), 3) + ), + ( + XCM_VERSION, + Parachain(1003).into_versioned(), + (72, Weight::zero(), 3) + ), + ] + ); + }); +} diff --git a/pallets/pallet-xcm/src/weights.rs b/pallets/pallet-xcm/src/weights.rs new file mode 100644 index 0000000000..f56e439c89 --- /dev/null +++ b/pallets/pallet-xcm/src/weights.rs @@ -0,0 +1,441 @@ + +// This file is part of Astar. + +// Copyright (C) 2019-2023 Stake Technologies Pte.Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later + +// Astar is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Astar is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Astar. If not, see . + +//! Autogenerated weights for pallet_xcm +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-04-04, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! HOSTNAME: `devserver-01`, CPU: `Intel(R) Xeon(R) E-2236 CPU @ 3.40GHz` +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("shibuya-dev"), DB CACHE: 1024 + +// Executed Command: +// ./target/release/astar-collator +// benchmark +// pallet +// --chain=shibuya-dev +// --steps=50 +// --repeat=20 +// --pallet=pallet_xcm +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --heap-pages=4096 +// --output=./benchmark-results/xcm_weights.rs +// --template=./scripts/templates/weight-template.hbs + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] + +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use sp_std::marker::PhantomData; +use super::WeightInfo; + +/// Weights for pallet_xcm using the Substrate node and recommended hardware. +pub struct SubstrateWeight(PhantomData); +impl WeightInfo for SubstrateWeight { + // Storage: PolkadotXcm SupportedVersion (r:1 w:0) + // Proof Skipped: PolkadotXcm SupportedVersion (max_values: None, max_size: None, mode: Measured) + // Storage: PolkadotXcm VersionDiscoveryQueue (r:1 w:1) + // Proof Skipped: PolkadotXcm VersionDiscoveryQueue (max_values: Some(1), max_size: None, mode: Measured) + // Storage: PolkadotXcm SafeXcmVersion (r:1 w:0) + // Proof Skipped: PolkadotXcm SafeXcmVersion (max_values: Some(1), max_size: None, mode: Measured) + // Storage: ParachainSystem HostConfiguration (r:1 w:0) + // Proof Skipped: ParachainSystem HostConfiguration (max_values: Some(1), max_size: None, mode: Measured) + // Storage: ParachainSystem PendingUpwardMessages (r:1 w:1) + // Proof Skipped: ParachainSystem PendingUpwardMessages (max_values: Some(1), max_size: None, mode: Measured) + fn send() -> Weight { + // Minimum execution time: 28_954 nanoseconds. + Weight::from_ref_time(29_757_000) + .saturating_add(Weight::from_proof_size(5010)) + .saturating_add(T::DbWeight::get().reads(5_u64)) + .saturating_add(T::DbWeight::get().writes(2_u64)) + } + // Storage: Benchmark Override (r:0 w:0) + // Proof Skipped: Benchmark Override (max_values: None, max_size: None, mode: Measured) + fn teleport_assets() -> Weight { + // Minimum execution time: 18_446_744_073_709_551 nanoseconds. + Weight::from_ref_time(18_446_744_073_709_551_000) + .saturating_add(Weight::from_proof_size(0)) + } + // Storage: ParachainInfo ParachainId (r:1 w:0) + // Proof: ParachainInfo ParachainId (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + // Storage: System Account (r:2 w:0) + // Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + fn reserve_transfer_assets() -> Weight { + // Minimum execution time: 31_227 nanoseconds. + Weight::from_ref_time(31_863_000) + .saturating_add(Weight::from_proof_size(5705)) + .saturating_add(T::DbWeight::get().reads(3_u64)) + } + // Storage: Benchmark Override (r:0 w:0) + // Proof Skipped: Benchmark Override (max_values: None, max_size: None, mode: Measured) + fn execute() -> Weight { + // Minimum execution time: 18_446_744_073_709_551 nanoseconds. + Weight::from_ref_time(18_446_744_073_709_551_000) + .saturating_add(Weight::from_proof_size(0)) + } + // Storage: PolkadotXcm SupportedVersion (r:0 w:1) + // Proof Skipped: PolkadotXcm SupportedVersion (max_values: None, max_size: None, mode: Measured) + fn force_xcm_version() -> Weight { + // Minimum execution time: 9_393 nanoseconds. + Weight::from_ref_time(9_916_000) + .saturating_add(Weight::from_proof_size(0)) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } + // Storage: PolkadotXcm SafeXcmVersion (r:0 w:1) + // Proof Skipped: PolkadotXcm SafeXcmVersion (max_values: Some(1), max_size: None, mode: Measured) + fn force_default_xcm_version() -> Weight { + // Minimum execution time: 2_828 nanoseconds. + Weight::from_ref_time(2_982_000) + .saturating_add(Weight::from_proof_size(0)) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } + // Storage: PolkadotXcm VersionNotifiers (r:1 w:1) + // Proof Skipped: PolkadotXcm VersionNotifiers (max_values: None, max_size: None, mode: Measured) + // Storage: PolkadotXcm QueryCounter (r:1 w:1) + // Proof Skipped: PolkadotXcm QueryCounter (max_values: Some(1), max_size: None, mode: Measured) + // Storage: PolkadotXcm SupportedVersion (r:1 w:0) + // Proof Skipped: PolkadotXcm SupportedVersion (max_values: None, max_size: None, mode: Measured) + // Storage: PolkadotXcm VersionDiscoveryQueue (r:1 w:1) + // Proof Skipped: PolkadotXcm VersionDiscoveryQueue (max_values: Some(1), max_size: None, mode: Measured) + // Storage: PolkadotXcm SafeXcmVersion (r:1 w:0) + // Proof Skipped: PolkadotXcm SafeXcmVersion (max_values: Some(1), max_size: None, mode: Measured) + // Storage: ParachainSystem HostConfiguration (r:1 w:0) + // Proof Skipped: ParachainSystem HostConfiguration (max_values: Some(1), max_size: None, mode: Measured) + // Storage: ParachainSystem PendingUpwardMessages (r:1 w:1) + // Proof Skipped: ParachainSystem PendingUpwardMessages (max_values: Some(1), max_size: None, mode: Measured) + // Storage: PolkadotXcm Queries (r:0 w:1) + // Proof Skipped: PolkadotXcm Queries (max_values: None, max_size: None, mode: Measured) + fn force_subscribe_version_notify() -> Weight { + // Minimum execution time: 32_918 nanoseconds. + Weight::from_ref_time(35_102_000) + .saturating_add(Weight::from_proof_size(8313)) + .saturating_add(T::DbWeight::get().reads(7_u64)) + .saturating_add(T::DbWeight::get().writes(5_u64)) + } + // Storage: PolkadotXcm VersionNotifiers (r:1 w:1) + // Proof Skipped: PolkadotXcm VersionNotifiers (max_values: None, max_size: None, mode: Measured) + // Storage: PolkadotXcm SupportedVersion (r:1 w:0) + // Proof Skipped: PolkadotXcm SupportedVersion (max_values: None, max_size: None, mode: Measured) + // Storage: PolkadotXcm VersionDiscoveryQueue (r:1 w:1) + // Proof Skipped: PolkadotXcm VersionDiscoveryQueue (max_values: Some(1), max_size: None, mode: Measured) + // Storage: PolkadotXcm SafeXcmVersion (r:1 w:0) + // Proof Skipped: PolkadotXcm SafeXcmVersion (max_values: Some(1), max_size: None, mode: Measured) + // Storage: ParachainSystem HostConfiguration (r:1 w:0) + // Proof Skipped: ParachainSystem HostConfiguration (max_values: Some(1), max_size: None, mode: Measured) + // Storage: ParachainSystem PendingUpwardMessages (r:1 w:1) + // Proof Skipped: ParachainSystem PendingUpwardMessages (max_values: Some(1), max_size: None, mode: Measured) + // Storage: PolkadotXcm Queries (r:0 w:1) + // Proof Skipped: PolkadotXcm Queries (max_values: None, max_size: None, mode: Measured) + fn force_unsubscribe_version_notify() -> Weight { + // Minimum execution time: 35_929 nanoseconds. + Weight::from_ref_time(36_954_000) + .saturating_add(Weight::from_proof_size(8988)) + .saturating_add(T::DbWeight::get().reads(6_u64)) + .saturating_add(T::DbWeight::get().writes(4_u64)) + } + // Storage: PolkadotXcm SupportedVersion (r:4 w:2) + // Proof Skipped: PolkadotXcm SupportedVersion (max_values: None, max_size: None, mode: Measured) + fn migrate_supported_version() -> Weight { + // Minimum execution time: 17_936 nanoseconds. + Weight::from_ref_time(18_392_000) + .saturating_add(Weight::from_proof_size(10029)) + .saturating_add(T::DbWeight::get().reads(4_u64)) + .saturating_add(T::DbWeight::get().writes(2_u64)) + } + // Storage: PolkadotXcm VersionNotifiers (r:4 w:2) + // Proof Skipped: PolkadotXcm VersionNotifiers (max_values: None, max_size: None, mode: Measured) + fn migrate_version_notifiers() -> Weight { + // Minimum execution time: 17_874 nanoseconds. + Weight::from_ref_time(18_353_000) + .saturating_add(Weight::from_proof_size(10033)) + .saturating_add(T::DbWeight::get().reads(4_u64)) + .saturating_add(T::DbWeight::get().writes(2_u64)) + } + // Storage: PolkadotXcm VersionNotifyTargets (r:5 w:0) + // Proof Skipped: PolkadotXcm VersionNotifyTargets (max_values: None, max_size: None, mode: Measured) + fn already_notified_target() -> Weight { + // Minimum execution time: 20_320 nanoseconds. + Weight::from_ref_time(20_858_000) + .saturating_add(Weight::from_proof_size(12515)) + .saturating_add(T::DbWeight::get().reads(5_u64)) + } + // Storage: PolkadotXcm VersionNotifyTargets (r:2 w:1) + // Proof Skipped: PolkadotXcm VersionNotifyTargets (max_values: None, max_size: None, mode: Measured) + // Storage: PolkadotXcm SupportedVersion (r:1 w:0) + // Proof Skipped: PolkadotXcm SupportedVersion (max_values: None, max_size: None, mode: Measured) + // Storage: PolkadotXcm VersionDiscoveryQueue (r:1 w:1) + // Proof Skipped: PolkadotXcm VersionDiscoveryQueue (max_values: Some(1), max_size: None, mode: Measured) + // Storage: PolkadotXcm SafeXcmVersion (r:1 w:0) + // Proof Skipped: PolkadotXcm SafeXcmVersion (max_values: Some(1), max_size: None, mode: Measured) + // Storage: ParachainSystem HostConfiguration (r:1 w:0) + // Proof Skipped: ParachainSystem HostConfiguration (max_values: Some(1), max_size: None, mode: Measured) + // Storage: ParachainSystem PendingUpwardMessages (r:1 w:1) + // Proof Skipped: ParachainSystem PendingUpwardMessages (max_values: Some(1), max_size: None, mode: Measured) + fn notify_current_targets() -> Weight { + // Minimum execution time: 33_135 nanoseconds. + Weight::from_ref_time(33_575_000) + .saturating_add(Weight::from_proof_size(10473)) + .saturating_add(T::DbWeight::get().reads(7_u64)) + .saturating_add(T::DbWeight::get().writes(3_u64)) + } + // Storage: PolkadotXcm VersionNotifyTargets (r:3 w:0) + // Proof Skipped: PolkadotXcm VersionNotifyTargets (max_values: None, max_size: None, mode: Measured) + fn notify_target_migration_fail() -> Weight { + // Minimum execution time: 8_047 nanoseconds. + Weight::from_ref_time(8_255_000) + .saturating_add(Weight::from_proof_size(7597)) + .saturating_add(T::DbWeight::get().reads(3_u64)) + } + // Storage: PolkadotXcm VersionNotifyTargets (r:4 w:2) + // Proof Skipped: PolkadotXcm VersionNotifyTargets (max_values: None, max_size: None, mode: Measured) + fn migrate_version_notify_targets() -> Weight { + // Minimum execution time: 17_230 nanoseconds. + Weight::from_ref_time(17_749_000) + .saturating_add(Weight::from_proof_size(10040)) + .saturating_add(T::DbWeight::get().reads(4_u64)) + .saturating_add(T::DbWeight::get().writes(2_u64)) + } + // Storage: PolkadotXcm VersionNotifyTargets (r:4 w:2) + // Proof Skipped: PolkadotXcm VersionNotifyTargets (max_values: None, max_size: None, mode: Measured) + // Storage: PolkadotXcm SupportedVersion (r:1 w:0) + // Proof Skipped: PolkadotXcm SupportedVersion (max_values: None, max_size: None, mode: Measured) + // Storage: PolkadotXcm VersionDiscoveryQueue (r:1 w:1) + // Proof Skipped: PolkadotXcm VersionDiscoveryQueue (max_values: Some(1), max_size: None, mode: Measured) + // Storage: PolkadotXcm SafeXcmVersion (r:1 w:0) + // Proof Skipped: PolkadotXcm SafeXcmVersion (max_values: Some(1), max_size: None, mode: Measured) + // Storage: ParachainSystem HostConfiguration (r:1 w:0) + // Proof Skipped: ParachainSystem HostConfiguration (max_values: Some(1), max_size: None, mode: Measured) + // Storage: ParachainSystem PendingUpwardMessages (r:1 w:1) + // Proof Skipped: ParachainSystem PendingUpwardMessages (max_values: Some(1), max_size: None, mode: Measured) + fn migrate_and_notify_old_targets() -> Weight { + // Minimum execution time: 61_977 nanoseconds. + Weight::from_ref_time(62_285_000) + .saturating_add(Weight::from_proof_size(15447)) + .saturating_add(T::DbWeight::get().reads(9_u64)) + .saturating_add(T::DbWeight::get().writes(4_u64)) + } + // Storage: ParachainInfo ParachainId (r:1 w:0) + // Proof: ParachainInfo ParachainId (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + // Storage: System Account (r:1 w:0) + // Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + fn reserve_withdraw_assets() -> Weight { + // Minimum execution time: 32_020 nanoseconds. + Weight::from_ref_time(32_452_000) + .saturating_add(Weight::from_proof_size(3102)) + .saturating_add(T::DbWeight::get().reads(2_u64)) + } +} + +// For backwards compatibility and tests +impl WeightInfo for () { + // Storage: PolkadotXcm SupportedVersion (r:1 w:0) + // Proof Skipped: PolkadotXcm SupportedVersion (max_values: None, max_size: None, mode: Measured) + // Storage: PolkadotXcm VersionDiscoveryQueue (r:1 w:1) + // Proof Skipped: PolkadotXcm VersionDiscoveryQueue (max_values: Some(1), max_size: None, mode: Measured) + // Storage: PolkadotXcm SafeXcmVersion (r:1 w:0) + // Proof Skipped: PolkadotXcm SafeXcmVersion (max_values: Some(1), max_size: None, mode: Measured) + // Storage: ParachainSystem HostConfiguration (r:1 w:0) + // Proof Skipped: ParachainSystem HostConfiguration (max_values: Some(1), max_size: None, mode: Measured) + // Storage: ParachainSystem PendingUpwardMessages (r:1 w:1) + // Proof Skipped: ParachainSystem PendingUpwardMessages (max_values: Some(1), max_size: None, mode: Measured) + fn send() -> Weight { + // Minimum execution time: 28_954 nanoseconds. + Weight::from_ref_time(29_757_000) + .saturating_add(Weight::from_proof_size(5010)) + .saturating_add(RocksDbWeight::get().reads(5_u64)) + .saturating_add(RocksDbWeight::get().writes(2_u64)) + } + // Storage: Benchmark Override (r:0 w:0) + // Proof Skipped: Benchmark Override (max_values: None, max_size: None, mode: Measured) + fn teleport_assets() -> Weight { + // Minimum execution time: 18_446_744_073_709_551 nanoseconds. + Weight::from_ref_time(18_446_744_073_709_551_000) + .saturating_add(Weight::from_proof_size(0)) + } + // Storage: ParachainInfo ParachainId (r:1 w:0) + // Proof: ParachainInfo ParachainId (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + // Storage: System Account (r:2 w:0) + // Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + fn reserve_transfer_assets() -> Weight { + // Minimum execution time: 31_227 nanoseconds. + Weight::from_ref_time(31_863_000) + .saturating_add(Weight::from_proof_size(5705)) + .saturating_add(RocksDbWeight::get().reads(3_u64)) + } + // Storage: Benchmark Override (r:0 w:0) + // Proof Skipped: Benchmark Override (max_values: None, max_size: None, mode: Measured) + fn execute() -> Weight { + // Minimum execution time: 18_446_744_073_709_551 nanoseconds. + Weight::from_ref_time(18_446_744_073_709_551_000) + .saturating_add(Weight::from_proof_size(0)) + } + // Storage: PolkadotXcm SupportedVersion (r:0 w:1) + // Proof Skipped: PolkadotXcm SupportedVersion (max_values: None, max_size: None, mode: Measured) + fn force_xcm_version() -> Weight { + // Minimum execution time: 9_393 nanoseconds. + Weight::from_ref_time(9_916_000) + .saturating_add(Weight::from_proof_size(0)) + .saturating_add(RocksDbWeight::get().writes(1_u64)) + } + // Storage: PolkadotXcm SafeXcmVersion (r:0 w:1) + // Proof Skipped: PolkadotXcm SafeXcmVersion (max_values: Some(1), max_size: None, mode: Measured) + fn force_default_xcm_version() -> Weight { + // Minimum execution time: 2_828 nanoseconds. + Weight::from_ref_time(2_982_000) + .saturating_add(Weight::from_proof_size(0)) + .saturating_add(RocksDbWeight::get().writes(1_u64)) + } + // Storage: PolkadotXcm VersionNotifiers (r:1 w:1) + // Proof Skipped: PolkadotXcm VersionNotifiers (max_values: None, max_size: None, mode: Measured) + // Storage: PolkadotXcm QueryCounter (r:1 w:1) + // Proof Skipped: PolkadotXcm QueryCounter (max_values: Some(1), max_size: None, mode: Measured) + // Storage: PolkadotXcm SupportedVersion (r:1 w:0) + // Proof Skipped: PolkadotXcm SupportedVersion (max_values: None, max_size: None, mode: Measured) + // Storage: PolkadotXcm VersionDiscoveryQueue (r:1 w:1) + // Proof Skipped: PolkadotXcm VersionDiscoveryQueue (max_values: Some(1), max_size: None, mode: Measured) + // Storage: PolkadotXcm SafeXcmVersion (r:1 w:0) + // Proof Skipped: PolkadotXcm SafeXcmVersion (max_values: Some(1), max_size: None, mode: Measured) + // Storage: ParachainSystem HostConfiguration (r:1 w:0) + // Proof Skipped: ParachainSystem HostConfiguration (max_values: Some(1), max_size: None, mode: Measured) + // Storage: ParachainSystem PendingUpwardMessages (r:1 w:1) + // Proof Skipped: ParachainSystem PendingUpwardMessages (max_values: Some(1), max_size: None, mode: Measured) + // Storage: PolkadotXcm Queries (r:0 w:1) + // Proof Skipped: PolkadotXcm Queries (max_values: None, max_size: None, mode: Measured) + fn force_subscribe_version_notify() -> Weight { + // Minimum execution time: 32_918 nanoseconds. + Weight::from_ref_time(35_102_000) + .saturating_add(Weight::from_proof_size(8313)) + .saturating_add(RocksDbWeight::get().reads(7_u64)) + .saturating_add(RocksDbWeight::get().writes(5_u64)) + } + // Storage: PolkadotXcm VersionNotifiers (r:1 w:1) + // Proof Skipped: PolkadotXcm VersionNotifiers (max_values: None, max_size: None, mode: Measured) + // Storage: PolkadotXcm SupportedVersion (r:1 w:0) + // Proof Skipped: PolkadotXcm SupportedVersion (max_values: None, max_size: None, mode: Measured) + // Storage: PolkadotXcm VersionDiscoveryQueue (r:1 w:1) + // Proof Skipped: PolkadotXcm VersionDiscoveryQueue (max_values: Some(1), max_size: None, mode: Measured) + // Storage: PolkadotXcm SafeXcmVersion (r:1 w:0) + // Proof Skipped: PolkadotXcm SafeXcmVersion (max_values: Some(1), max_size: None, mode: Measured) + // Storage: ParachainSystem HostConfiguration (r:1 w:0) + // Proof Skipped: ParachainSystem HostConfiguration (max_values: Some(1), max_size: None, mode: Measured) + // Storage: ParachainSystem PendingUpwardMessages (r:1 w:1) + // Proof Skipped: ParachainSystem PendingUpwardMessages (max_values: Some(1), max_size: None, mode: Measured) + // Storage: PolkadotXcm Queries (r:0 w:1) + // Proof Skipped: PolkadotXcm Queries (max_values: None, max_size: None, mode: Measured) + fn force_unsubscribe_version_notify() -> Weight { + // Minimum execution time: 35_929 nanoseconds. + Weight::from_ref_time(36_954_000) + .saturating_add(Weight::from_proof_size(8988)) + .saturating_add(RocksDbWeight::get().reads(6_u64)) + .saturating_add(RocksDbWeight::get().writes(4_u64)) + } + // Storage: PolkadotXcm SupportedVersion (r:4 w:2) + // Proof Skipped: PolkadotXcm SupportedVersion (max_values: None, max_size: None, mode: Measured) + fn migrate_supported_version() -> Weight { + // Minimum execution time: 17_936 nanoseconds. + Weight::from_ref_time(18_392_000) + .saturating_add(Weight::from_proof_size(10029)) + .saturating_add(RocksDbWeight::get().reads(4_u64)) + .saturating_add(RocksDbWeight::get().writes(2_u64)) + } + // Storage: PolkadotXcm VersionNotifiers (r:4 w:2) + // Proof Skipped: PolkadotXcm VersionNotifiers (max_values: None, max_size: None, mode: Measured) + fn migrate_version_notifiers() -> Weight { + // Minimum execution time: 17_874 nanoseconds. + Weight::from_ref_time(18_353_000) + .saturating_add(Weight::from_proof_size(10033)) + .saturating_add(RocksDbWeight::get().reads(4_u64)) + .saturating_add(RocksDbWeight::get().writes(2_u64)) + } + // Storage: PolkadotXcm VersionNotifyTargets (r:5 w:0) + // Proof Skipped: PolkadotXcm VersionNotifyTargets (max_values: None, max_size: None, mode: Measured) + fn already_notified_target() -> Weight { + // Minimum execution time: 20_320 nanoseconds. + Weight::from_ref_time(20_858_000) + .saturating_add(Weight::from_proof_size(12515)) + .saturating_add(RocksDbWeight::get().reads(5_u64)) + } + // Storage: PolkadotXcm VersionNotifyTargets (r:2 w:1) + // Proof Skipped: PolkadotXcm VersionNotifyTargets (max_values: None, max_size: None, mode: Measured) + // Storage: PolkadotXcm SupportedVersion (r:1 w:0) + // Proof Skipped: PolkadotXcm SupportedVersion (max_values: None, max_size: None, mode: Measured) + // Storage: PolkadotXcm VersionDiscoveryQueue (r:1 w:1) + // Proof Skipped: PolkadotXcm VersionDiscoveryQueue (max_values: Some(1), max_size: None, mode: Measured) + // Storage: PolkadotXcm SafeXcmVersion (r:1 w:0) + // Proof Skipped: PolkadotXcm SafeXcmVersion (max_values: Some(1), max_size: None, mode: Measured) + // Storage: ParachainSystem HostConfiguration (r:1 w:0) + // Proof Skipped: ParachainSystem HostConfiguration (max_values: Some(1), max_size: None, mode: Measured) + // Storage: ParachainSystem PendingUpwardMessages (r:1 w:1) + // Proof Skipped: ParachainSystem PendingUpwardMessages (max_values: Some(1), max_size: None, mode: Measured) + fn notify_current_targets() -> Weight { + // Minimum execution time: 33_135 nanoseconds. + Weight::from_ref_time(33_575_000) + .saturating_add(Weight::from_proof_size(10473)) + .saturating_add(RocksDbWeight::get().reads(7_u64)) + .saturating_add(RocksDbWeight::get().writes(3_u64)) + } + // Storage: PolkadotXcm VersionNotifyTargets (r:3 w:0) + // Proof Skipped: PolkadotXcm VersionNotifyTargets (max_values: None, max_size: None, mode: Measured) + fn notify_target_migration_fail() -> Weight { + // Minimum execution time: 8_047 nanoseconds. + Weight::from_ref_time(8_255_000) + .saturating_add(Weight::from_proof_size(7597)) + .saturating_add(RocksDbWeight::get().reads(3_u64)) + } + // Storage: PolkadotXcm VersionNotifyTargets (r:4 w:2) + // Proof Skipped: PolkadotXcm VersionNotifyTargets (max_values: None, max_size: None, mode: Measured) + fn migrate_version_notify_targets() -> Weight { + // Minimum execution time: 17_230 nanoseconds. + Weight::from_ref_time(17_749_000) + .saturating_add(Weight::from_proof_size(10040)) + .saturating_add(RocksDbWeight::get().reads(4_u64)) + .saturating_add(RocksDbWeight::get().writes(2_u64)) + } + // Storage: PolkadotXcm VersionNotifyTargets (r:4 w:2) + // Proof Skipped: PolkadotXcm VersionNotifyTargets (max_values: None, max_size: None, mode: Measured) + // Storage: PolkadotXcm SupportedVersion (r:1 w:0) + // Proof Skipped: PolkadotXcm SupportedVersion (max_values: None, max_size: None, mode: Measured) + // Storage: PolkadotXcm VersionDiscoveryQueue (r:1 w:1) + // Proof Skipped: PolkadotXcm VersionDiscoveryQueue (max_values: Some(1), max_size: None, mode: Measured) + // Storage: PolkadotXcm SafeXcmVersion (r:1 w:0) + // Proof Skipped: PolkadotXcm SafeXcmVersion (max_values: Some(1), max_size: None, mode: Measured) + // Storage: ParachainSystem HostConfiguration (r:1 w:0) + // Proof Skipped: ParachainSystem HostConfiguration (max_values: Some(1), max_size: None, mode: Measured) + // Storage: ParachainSystem PendingUpwardMessages (r:1 w:1) + // Proof Skipped: ParachainSystem PendingUpwardMessages (max_values: Some(1), max_size: None, mode: Measured) + fn migrate_and_notify_old_targets() -> Weight { + // Minimum execution time: 61_977 nanoseconds. + Weight::from_ref_time(62_285_000) + .saturating_add(Weight::from_proof_size(15447)) + .saturating_add(RocksDbWeight::get().reads(9_u64)) + .saturating_add(RocksDbWeight::get().writes(4_u64)) + } + // Storage: ParachainInfo ParachainId (r:1 w:0) + // Proof: ParachainInfo ParachainId (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + // Storage: System Account (r:1 w:0) + // Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + fn reserve_withdraw_assets() -> Weight { + // Minimum execution time: 32_020 nanoseconds. + Weight::from_ref_time(32_452_000) + .saturating_add(Weight::from_proof_size(3102)) + .saturating_add(RocksDbWeight::get().reads(2_u64)) + } +} diff --git a/pallets/pallet-xvm/Cargo.toml b/pallets/pallet-xvm/Cargo.toml new file mode 100644 index 0000000000..8955ff752c --- /dev/null +++ b/pallets/pallet-xvm/Cargo.toml @@ -0,0 +1,58 @@ +[package] +name = "pallet-xvm" +version = "0.2.1" +authors.workspace = true +edition.workspace = true +homepage.workspace = true +repository.workspace = true + +[dependencies] +impl-trait-for-tuples = { workspace = true } +log = { workspace = true } +serde = { workspace = true, optional = true } + +# Substrate +frame-support = { workspace = true } +frame-system = { workspace = true } +parity-scale-codec = { workspace = true } +scale-info = { workspace = true } +sp-core = { workspace = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } + +# Benchmarks +frame-benchmarking = { workspace = true, optional = true } + +# EVM support +pallet-evm = { workspace = true, optional = true } + +# Substrate WASM VM support +pallet-contracts = { workspace = true, optional = true } + +[dev-dependencies] + +[features] +default = ["std"] +evm = [ + "pallet-evm", +] +wasm = [ + "pallet-contracts", +] +std = [ + "parity-scale-codec/std", + "frame-support/std", + "frame-system/std", + "pallet-contracts/std", + "pallet-evm/std", + "scale-info/std", + "serde", + "sp-core/std", + "sp-runtime/std", + "sp-std/std", +] + +runtime-benchmarks = [ + "frame-benchmarking", +] +try-runtime = ["frame-support/try-runtime"] diff --git a/pallets/pallet-xvm/src/evm.rs b/pallets/pallet-xvm/src/evm.rs new file mode 100644 index 0000000000..fd4cb7faef --- /dev/null +++ b/pallets/pallet-xvm/src/evm.rs @@ -0,0 +1,99 @@ +// This file is part of Astar. + +// Copyright (C) 2019-2023 Stake Technologies Pte.Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later + +// Astar is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Astar is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Astar. If not, see . + +//! EVM support for XVM pallet. + +use crate::*; +use pallet_evm::{GasWeightMapping, Runner}; +use sp_core::{H160, U256}; +use sp_runtime::traits::{Get, UniqueSaturatedInto}; + +/// EVM adapter for XVM calls. +/// +/// This adapter supports generic XVM calls and encode it into EVM native calls +/// using Solidity ABI codec (https://docs.soliditylang.org/en/v0.8.16/abi-spec.html). +pub struct EVM(sp_std::marker::PhantomData<(I, T)>); + +impl SyncVM for EVM +where + I: Get, + T: pallet_evm::Config + frame_system::Config, +{ + fn id() -> VmId { + I::get() + } + + fn xvm_call(context: XvmContext, from: T::AccountId, to: Vec, input: Vec) -> XvmResult { + log::trace!( + target: "xvm::EVM::xvm_call", + "Start EVM XVM: {:?}, {:?}, {:?}", + from, to, input, + ); + let value = U256::zero(); + + // Tells the EVM executor that no fees should be charged for this execution. + let max_fee_per_gas = U256::zero(); + let gas_limit = T::GasWeightMapping::weight_to_gas(context.max_weight); + log::trace!( + target: "xvm::EVM::xvm_call", + "EVM xvm call gas limit: {:?} or as weight: {:?}", gas_limit, context.max_weight); + let evm_to = Decode::decode(&mut to.as_ref()).map_err(|_| XvmCallError { + error: XvmError::EncodingFailure, + consumed_weight: PLACEHOLDER_WEIGHT, + })?; + + let is_transactional = true; + // Since this is in the context of XVM, no standard validation is required. + let validate = false; + let info = T::Runner::call( + H160::from_slice(&from.encode()[0..20]), + evm_to, + input, + value, + gas_limit, + Some(max_fee_per_gas), + None, + None, + Vec::new(), + is_transactional, + validate, + T::config(), + ) + .map_err(|e| { + let consumed_weight = e.weight.ref_time(); + XvmCallError { + error: XvmError::ExecutionError(Into::<&str>::into(e.error.into()).into()), + consumed_weight, + } + })?; + + log::trace!( + target: "xvm::EVM::xvm_call", + "EVM XVM call result: exit_reason: {:?}, used_gas: {:?}", info.exit_reason, info.used_gas, + ); + + Ok(XvmCallOk { + output: info.value, + consumed_weight: T::GasWeightMapping::gas_to_weight( + info.used_gas.unique_saturated_into(), + false, + ) + .ref_time(), + }) + } +} diff --git a/pallets/pallet-xvm/src/lib.rs b/pallets/pallet-xvm/src/lib.rs new file mode 100644 index 0000000000..67675d9231 --- /dev/null +++ b/pallets/pallet-xvm/src/lib.rs @@ -0,0 +1,228 @@ +// This file is part of Astar. + +// Copyright (C) 2019-2023 Stake Technologies Pte.Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later + +// Astar is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Astar is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Astar. If not, see . + +//! # XVM pallet +//! +//! ## Overview +//! +//! ## Interface +//! +//! ### Dispatchable Function +//! +//! +//! ### Other +//! +//! + +#![cfg_attr(not(feature = "std"), no_std)] + +use frame_support::weights::Weight; +use parity_scale_codec::{Decode, Encode}; +use sp_runtime::{traits::Member, RuntimeDebug}; +use sp_std::prelude::*; + +pub mod pallet; +pub use pallet::pallet::*; + +/// EVM call adapter. +#[cfg(feature = "evm")] +pub mod evm; + +/// Wasm call adapter. +#[cfg(feature = "wasm")] +pub mod wasm; + +/// Unique VM identifier. +pub type VmId = u8; + +// TODO: remove later after solution is properly benchmarked +// Just a arbitrary weight constant to avoid having ZERO weight in some parts of execution +pub const PLACEHOLDER_WEIGHT: u64 = 1_000_000; + +#[derive(PartialEq, Eq, Clone, Encode, Decode, RuntimeDebug, scale_info::TypeInfo)] +pub enum XvmError { + VmNotRecognized, + EncodingFailure, + ContextConversionFailed, + OutOfGas, + ExecutionError(Vec), + // extend this list as part of improved error handling +} + +// TODO: Currently our precompile/chain-extension calls rely on direct `Call` usage of XVM pallet. +// This is perfectly fine when we're just calling a function in other VM and are interested whether the call was +// successful or not. +// +// Problem arises IF we want to get back arbitrary read value from the other VM - `DispatchResultWithPostInfo` isn't enough for this. +// We need to receive back a concrete value back from the other VM. + +/// Denotes a successful XVM call execution +#[derive(PartialEq, Eq, Clone, Encode, Decode, RuntimeDebug, scale_info::TypeInfo)] +pub struct XvmCallOk { + /// Output of XVM call. E.g. if call was a query, this will contain query response. + output: Vec, + /// Total consumed weight. This is in context of Substrate (1 unit of weight ~ 1 ps of execution time) + consumed_weight: u64, +} + +impl XvmCallOk { + pub fn output(&self) -> &[u8] { + &self.output + } +} + +/// Denotes an successful XVM call execution +#[derive(PartialEq, Eq, Clone, Encode, Decode, RuntimeDebug, scale_info::TypeInfo)] +pub struct XvmCallError { + /// Result of XVM call + // TODO: use XvmError enum from pallet? Perhaps that's a better approach. Or at least provide mapping? + error: XvmError, + /// Total consumed weight. This is in context of Substrate (1 unit of weight ~ 1 ps of execution time) + consumed_weight: u64, +} + +impl XvmCallError { + pub fn error(&self) -> &XvmError { + &self.error + } +} + +/// Result for executing X-VM calls +pub type XvmResult = Result; + +pub fn consumed_weight(result: &XvmResult) -> u64 { + match result { + Ok(res) => res.consumed_weight, + Err(err) => err.consumed_weight, + } +} + +/// XVM context consist of unique ID and optional execution arguments. +#[derive(Default, PartialEq, Eq, Clone, Encode, Decode, RuntimeDebug, scale_info::TypeInfo)] +pub struct XvmContext { + /// Identifier (should be unique for each VM in tuple). + pub id: VmId, + /// Max allowed weight for the call + pub max_weight: Weight, + /// Encoded VM execution environment. + pub env: Option>, +} + +/// The engine that support synchronous smart contract execution. +/// For example, EVM. +pub trait SyncVM { + /// Unique VM identifier. + fn id() -> VmId; + + /// Make a call to VM contract and return result or error. + /// + /// + fn xvm_call(context: XvmContext, from: AccountId, to: Vec, input: Vec) -> XvmResult; +} + +#[impl_trait_for_tuples::impl_for_tuples(30)] +impl SyncVM for Tuple { + fn id() -> VmId { + Default::default() + } + + fn xvm_call(context: XvmContext, from: AccountId, to: Vec, input: Vec) -> XvmResult { + for_tuples!( #( + if Tuple::id() == context.id { + log::trace!( + target: "xvm::SyncVm::xvm_call", + "VM found, run XVM call: {:?}, {:?}, {:?}, {:?}", + context, from, to, input, + ); + return Tuple::xvm_call(context, from, to, input) + } + )* ); + log::trace!( + target: "xvm::SyncVm::xvm_call", + "VM with ID {:?} not found", context.id + ); + Err(XvmCallError { + error: XvmError::VmNotRecognized, + consumed_weight: PLACEHOLDER_WEIGHT, + }) + } +} + +/// The engine that support asynchronous smart contract execution. +/// For example, XCVM. +pub trait AsyncVM { + /// Unique VM identifier. + fn id() -> VmId; + + /// Send a message. + fn xvm_send(context: XvmContext, from: AccountId, to: Vec, message: Vec) -> XvmResult; + + /// Query for incoming messages. + fn xvm_query(context: XvmContext, inbox: AccountId) -> XvmResult; +} + +#[impl_trait_for_tuples::impl_for_tuples(30)] +impl AsyncVM for Tuple { + fn id() -> VmId { + Default::default() + } + + fn xvm_send(context: XvmContext, from: AccountId, to: Vec, message: Vec) -> XvmResult { + for_tuples!( #( + if Tuple::id() == context.id { + log::trace!( + target: "xvm::AsyncVM::xvm_send", + "VM found, send message: {:?}, {:?}, {:?}, {:?}", + context, from, to, message, + ); + return Tuple::xvm_send(context, from, to, message) + } + )* ); + log::trace!( + target: "xvm::AsyncVM::xvm_send", + "VM with ID {:?} not found", context.id + ); + + Err(XvmCallError { + error: XvmError::VmNotRecognized, + consumed_weight: PLACEHOLDER_WEIGHT, + }) + } + + fn xvm_query(context: XvmContext, inbox: AccountId) -> XvmResult { + for_tuples!( #( + if Tuple::id() == context.id { + log::trace!( + target: "xvm::AsyncVM::xvm_query", + "VM found, query messages: {:?} {:?}", + context, inbox, + ); + return Tuple::xvm_query(context, inbox) + } + )* ); + log::trace!( + target: "xvm::AsyncVM::xvm_query", + "VM with ID {:?} not found", context.id + ); + + Err(XvmCallError { + error: XvmError::VmNotRecognized, + consumed_weight: PLACEHOLDER_WEIGHT, + }) + } +} diff --git a/pallets/pallet-xvm/src/pallet/mod.rs b/pallets/pallet-xvm/src/pallet/mod.rs new file mode 100644 index 0000000000..b34d4090a8 --- /dev/null +++ b/pallets/pallet-xvm/src/pallet/mod.rs @@ -0,0 +1,160 @@ +// This file is part of Astar. + +// Copyright (C) 2019-2023 Stake Technologies Pte.Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later + +// Astar is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Astar is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Astar. If not, see . + +//! # XVM pallet +//! +//! ## Overview +//! +//! +//! ## Interface +//! +//! ### Dispatchable Function +//! +//! +//! ### Other +//! +//! + +#[frame_support::pallet] +#[allow(clippy::module_inception)] +pub mod pallet { + use crate::*; + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; + + #[pallet::pallet] + #[pallet::without_storage_info] + pub struct Pallet(PhantomData); + + #[pallet::config] + pub trait Config: frame_system::Config { + /// Supported synchronous VM list, for example (EVM, WASM) + type SyncVM: SyncVM; + /// Supported asynchronous VM list. + type AsyncVM: AsyncVM; + /// General event type. + type RuntimeEvent: From> + IsType<::RuntimeEvent>; + } + + #[pallet::error] + pub enum Error {} + + #[pallet::event] + #[pallet::generate_deposit(pub(crate) fn deposit_event)] + pub enum Event { + XvmCall { result: Result, XvmError> }, + XvmSend { result: Result, XvmError> }, + XvmQuery { result: Result, XvmError> }, + } + + impl Pallet { + /// Internal interface for cross-pallet invocation. + /// Essentially does the same thing as `xvm_call`, but a bit differently: + /// - It does not verify origin + /// - It does not use `Dispatchable` API (cannot be called from tx) + /// - It does not deposit event upon completion + /// - It returns `XvmResult` letting the caller get return data directly + pub fn xvm_bare_call( + context: XvmContext, + from: T::AccountId, + to: Vec, + input: Vec, + ) -> XvmResult { + let result = T::SyncVM::xvm_call(context, from, to, input); + + log::trace!( + target: "xvm::pallet::xvm_bare_call", + "Execution result: {:?}", result + ); + + result + } + } + + #[pallet::call] + impl Pallet { + #[pallet::call_index(0)] + #[pallet::weight(context.max_weight)] + pub fn xvm_call( + origin: OriginFor, + context: XvmContext, + to: Vec, + input: Vec, + ) -> DispatchResultWithPostInfo { + let from = ensure_signed(origin)?; + + // Executing XVM call logic itself will consume some weight so that should be subtracted from the max allowed weight of XCM call + // TODO: fix + //context.max_weight = context.max_weight - PLACEHOLDER_WEIGHT; + + let result = T::SyncVM::xvm_call(context, from, to, input); + let consumed_weight = consumed_weight(&result); + + log::trace!( + target: "xvm::pallet::xvm_call", + "Execution result: {:?}, consumed_weight: {:?}", result, consumed_weight, + ); + + Self::deposit_event(Event::::XvmCall { + result: match result { + Ok(result) => Ok(result.output), + Err(result) => Err(result.error), + }, + }); + + Ok(Some(consumed_weight).into()) + } + + #[pallet::call_index(1)] + #[pallet::weight(context.max_weight)] + pub fn xvm_send( + origin: OriginFor, + context: XvmContext, + to: Vec, + message: Vec, + ) -> DispatchResultWithPostInfo { + let from = ensure_signed(origin)?; + let result = T::AsyncVM::xvm_send(context, from, to, message); + + Self::deposit_event(Event::::XvmSend { + result: match result { + Ok(result) => Ok(result.output), + Err(result) => Err(result.error), + }, + }); + + Ok(().into()) + } + + #[pallet::call_index(2)] + #[pallet::weight(context.max_weight)] + pub fn xvm_query(origin: OriginFor, context: XvmContext) -> DispatchResultWithPostInfo { + let inbox = ensure_signed(origin)?; + let result = T::AsyncVM::xvm_query(context, inbox); + + Self::deposit_event(Event::::XvmQuery { + result: match result { + Ok(result) => Ok(result.output), + Err(result) => Err(result.error), + }, + }); + + Ok(().into()) + } + } +} diff --git a/pallets/pallet-xvm/src/wasm.rs b/pallets/pallet-xvm/src/wasm.rs new file mode 100644 index 0000000000..0ebfecb127 --- /dev/null +++ b/pallets/pallet-xvm/src/wasm.rs @@ -0,0 +1,93 @@ +// This file is part of Astar. + +// Copyright (C) 2019-2023 Stake Technologies Pte.Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later + +// Astar is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Astar is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Astar. If not, see . + +//! WASM (substrate contracts) support for XVM pallet. + +use crate::*; +use frame_support::traits::Currency; +use parity_scale_codec::HasCompact; +use scale_info::TypeInfo; +use sp_runtime::traits::Get; +use sp_runtime::traits::StaticLookup; +use sp_std::fmt::Debug; +pub struct WASM(sp_std::marker::PhantomData<(I, T)>); + +type BalanceOf = <::Currency as Currency< + ::AccountId, +>>::Balance; + +impl SyncVM for WASM +where + I: Get, + T: pallet_contracts::Config + frame_system::Config, + as HasCompact>::Type: Clone + Eq + PartialEq + Debug + TypeInfo + Encode, +{ + fn id() -> VmId { + I::get() + } + + fn xvm_call(context: XvmContext, from: T::AccountId, to: Vec, input: Vec) -> XvmResult { + log::trace!( + target: "xvm::WASM::xvm_call", + "Start WASM XVM: {:?}, {:?}, {:?}", + from, to, input, + ); + let gas_limit = context.max_weight; + log::trace!( + target: "xvm::WASM::xvm_call", + "WASM xvm call gas (weight) limit: {:?}", gas_limit); + let dest = Decode::decode(&mut to.as_ref()).map_err(|_| XvmCallError { + error: XvmError::EncodingFailure, + consumed_weight: PLACEHOLDER_WEIGHT, + })?; + + let dest = T::Lookup::lookup(dest).map_err(|error| XvmCallError { + error: XvmError::ExecutionError(Into::<&str>::into(error).into()), + consumed_weight: PLACEHOLDER_WEIGHT, + })?; + let call_result = pallet_contracts::Pallet::::bare_call( + from, // no need to check origin, we consider it signed here + dest, + Default::default(), + gas_limit.into(), + None, + input, + false, + pallet_contracts::Determinism::Deterministic, + ); + + log::trace!( + target: "xvm::WASM::xvm_call", + "WASM XVM call result: {:?}", call_result + ); + + let consumed_weight = call_result.gas_consumed.ref_time(); + + match call_result.result { + Ok(success) => Ok(XvmCallOk { + output: success.data, + consumed_weight, + }), + + Err(error) => Err(XvmCallError { + error: XvmError::ExecutionError(Into::<&str>::into(error).into()), + consumed_weight, + }), + } + } +} diff --git a/pallets/xc-asset-config/Cargo.toml b/pallets/xc-asset-config/Cargo.toml new file mode 100644 index 0000000000..3295f1221d --- /dev/null +++ b/pallets/xc-asset-config/Cargo.toml @@ -0,0 +1,52 @@ +[package] +name = "pallet-xc-asset-config" +version = "1.3.0" +authors.workspace = true +edition.workspace = true +homepage.workspace = true +repository.workspace = true + +[dependencies] +log = { workspace = true } +serde = { workspace = true, optional = true } + +# Substrate +frame-support = { workspace = true } +frame-system = { workspace = true } +parity-scale-codec = { workspace = true } +scale-info = { workspace = true } +sp-io = { workspace = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } + +# Polkadot +xcm = { workspace = true } + +# Benchmarks +frame-benchmarking = { workspace = true, optional = true } + +[dev-dependencies] +pallet-balances = { workspace = true } +pallet-timestamp = { workspace = true } +sp-core = { workspace = true } + +[features] +default = ["std"] +std = [ + "frame-support/std", + "frame-system/std", + "parity-scale-codec/std", + "scale-info/std", + "serde", + "sp-io/std", + "sp-runtime/std", + "sp-std/std", + "xcm/std", + "pallet-balances/std", + "frame-benchmarking?/std", +] + +runtime-benchmarks = [ + "frame-benchmarking", +] +try-runtime = ["frame-support/try-runtime"] diff --git a/pallets/xc-asset-config/src/benchmarking.rs b/pallets/xc-asset-config/src/benchmarking.rs new file mode 100644 index 0000000000..70bb3330f3 --- /dev/null +++ b/pallets/xc-asset-config/src/benchmarking.rs @@ -0,0 +1,113 @@ +// This file is part of Astar. + +// Copyright (C) 2019-2023 Stake Technologies Pte.Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later + +// Astar is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Astar is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Astar. If not, see . + +#![cfg(feature = "runtime-benchmarks")] + +use super::*; +use crate::Pallet as XcAssetConfig; + +use frame_benchmarking::{benchmarks, impl_benchmark_test_suite}; +use frame_system::RawOrigin; +use sp_std::boxed::Box; +use xcm::v3::MultiLocation; + +benchmarks! { + + register_asset_location { + let asset_location = MultiLocation::parent(); + let asset_id = T::AssetId::default(); + + }: _(RawOrigin::Root, Box::new(asset_location.clone().into_versioned()), asset_id) + verify { + assert_eq!(AssetIdToLocation::::get(&asset_id), Some(asset_location.into_versioned())); + } + + set_asset_units_per_second { + let asset_location = MultiLocation::parent(); + let asset_id = T::AssetId::default(); + let units = 123; + + XcAssetConfig::::register_asset_location(RawOrigin::Root.into(), Box::new(asset_location.clone().into_versioned()), asset_id)?; + + }: _(RawOrigin::Root, Box::new(asset_location.clone().into_versioned()), units) + verify { + assert_eq!(AssetLocationUnitsPerSecond::::get(&asset_location.into_versioned()), Some(units)); + } + + change_existing_asset_location { + let asset_location = MultiLocation::parent(); + let asset_id = T::AssetId::default(); + let units = 123; + + XcAssetConfig::::register_asset_location(RawOrigin::Root.into(), Box::new(asset_location.clone().into_versioned()), asset_id)?; + XcAssetConfig::::set_asset_units_per_second(RawOrigin::Root.into(), Box::new(asset_location.clone().into_versioned()), units)?; + + let new_asset_location = MultiLocation::here(); + + }: _(RawOrigin::Root, Box::new(new_asset_location.clone().into_versioned()), asset_id) + verify { + assert!(!AssetLocationToId::::contains_key(&asset_location.clone().into_versioned())); + assert_eq!(AssetLocationToId::::get(&new_asset_location.clone().into_versioned()), Some(asset_id)); + assert_eq!(AssetLocationUnitsPerSecond::::get(&new_asset_location.into_versioned()), Some(units)); + } + + remove_payment_asset { + let asset_location = MultiLocation::parent(); + let asset_id = T::AssetId::default(); + let units = 123; + + XcAssetConfig::::register_asset_location(RawOrigin::Root.into(), Box::new(asset_location.clone().into_versioned()), asset_id)?; + XcAssetConfig::::set_asset_units_per_second(RawOrigin::Root.into(), Box::new(asset_location.clone().into_versioned()), units)?; + + }: _(RawOrigin::Root, Box::new(asset_location.clone().into_versioned())) + verify { + assert!(!AssetLocationUnitsPerSecond::::contains_key(&asset_location.into_versioned())); + } + + remove_asset { + let asset_location = MultiLocation::parent(); + let asset_id = T::AssetId::default(); + let units = 123; + + XcAssetConfig::::register_asset_location(RawOrigin::Root.into(), Box::new(asset_location.clone().into_versioned()), asset_id)?; + XcAssetConfig::::set_asset_units_per_second(RawOrigin::Root.into(), Box::new(asset_location.clone().into_versioned()), units)?; + + }: _(RawOrigin::Root, asset_id) + verify { + assert!(!AssetLocationToId::::contains_key(&asset_location.clone().into_versioned())); + assert!(!AssetIdToLocation::::contains_key(asset_id)); + assert!(!AssetLocationUnitsPerSecond::::contains_key(&asset_location.into_versioned())); + } + +} + +#[cfg(test)] +mod tests { + use crate::mock; + use sp_io::TestExternalities; + + pub fn new_test_ext() -> TestExternalities { + mock::ExternalityBuilder::build() + } +} + +impl_benchmark_test_suite!( + XcAssetConfig, + crate::benchmarking::tests::new_test_ext(), + crate::mock::Test +); diff --git a/pallets/xc-asset-config/src/lib.rs b/pallets/xc-asset-config/src/lib.rs new file mode 100644 index 0000000000..4310198def --- /dev/null +++ b/pallets/xc-asset-config/src/lib.rs @@ -0,0 +1,366 @@ +// This file is part of Astar. + +// Copyright (C) 2019-2023 Stake Technologies Pte.Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later + +// Astar is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Astar is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Astar. If not, see . + +//! # Cross-chain Asset Config Pallet +//! +//! ## Overview +//! +//! This pallet provides mappings between local asset Id and remove asset location. +//! E.g. a multilocation like `{parents: 0, interior: X1::(Junction::Parachain(1000))}` could ba mapped to local asset Id `789`. +//! +//! The pallet ensures that the latest MultiLocation version is always used. Developers must ensure to properly migrate legacy versions +//! to newest when they become available. +//! +//! Additionally, it stores information whether a foreign asset is supported as a payment currency for execution on local network. +//! +//! ## Interface +//! +//! ### Dispatchable Function +//! +//! - `register_asset_location` - used to register mapping between local asset Id and remote asset location +//! - `set_asset_units_per_second` - registers asset as payment currency and sets the desired payment per second of execution time +//! - `change_existing_asset_location` - changes the remote location of an existing local asset Id +//! - `remove_payment_asset` - removes asset from the set of supported payment assets +//! - `remove_asset` - removes all information related to this asset +//! +//! User is encouraged to refer to specific function implementations for more comprehensive documentation. +//! +//! ### Other +//! +//! `AssetLocationGetter` interface for mapping asset Id to asset location and vice versa +//! - `get_xc_asset_location` +//! - `get_asset_id` +//! +//! `ExecutionPaymentRate` interface for fetching `units per second` if asset is supported payment asset +//! - `get_units_per_second` +//! + +#![cfg_attr(not(feature = "std"), no_std)] + +use frame_support::pallet; +pub use pallet::*; + +#[cfg(any(test, feature = "runtime-benchmarks"))] +mod benchmarking; + +#[cfg(test)] +pub mod mock; +#[cfg(test)] +pub mod tests; + +pub mod migrations; + +pub mod weights; +pub use weights::WeightInfo; + +#[pallet] +pub mod pallet { + + use crate::weights::WeightInfo; + use frame_support::{pallet_prelude::*, traits::EnsureOrigin}; + use frame_system::pallet_prelude::*; + use parity_scale_codec::HasCompact; + use sp_std::boxed::Box; + use xcm::{v3::MultiLocation, VersionedMultiLocation}; + + const STORAGE_VERSION: StorageVersion = StorageVersion::new(2); + + #[pallet::pallet] + #[pallet::storage_version(STORAGE_VERSION)] + #[pallet::without_storage_info] + pub struct Pallet(PhantomData); + + /// Callback definition trait for cross-chain asset registration/deregistration notifications. + pub trait XcAssetChanged { + /// Will be called by pallet when new asset Id has been registered + fn xc_asset_registered(asset_id: T::AssetId); + + /// Will be called by pallet when asset Id has been unregistered + fn xc_asset_unregistered(asset_id: T::AssetId); + } + + /// Implementation that does nothing + impl XcAssetChanged for () { + fn xc_asset_registered(_: T::AssetId) {} + fn xc_asset_unregistered(_: T::AssetId) {} + } + + /// Defines conversion between asset Id and cross-chain asset location + pub trait XcAssetLocation { + /// Get asset type from assetId + fn get_xc_asset_location(asset_id: AssetId) -> Option; + + /// Get local asset Id from asset location + fn get_asset_id(xc_asset_location: MultiLocation) -> Option; + } + + /// Used to fetch `units per second` if cross-chain asset is applicable for local execution payment. + pub trait ExecutionPaymentRate { + /// returns units per second from asset type or `None` if asset type isn't a supported payment asset. + fn get_units_per_second(asset_location: MultiLocation) -> Option; + } + + impl XcAssetLocation for Pallet { + fn get_xc_asset_location(asset_id: T::AssetId) -> Option { + AssetIdToLocation::::get(asset_id).and_then(|x| x.try_into().ok()) + } + + fn get_asset_id(asset_location: MultiLocation) -> Option { + AssetLocationToId::::get(asset_location.into_versioned()) + } + } + + impl ExecutionPaymentRate for Pallet { + fn get_units_per_second(asset_location: MultiLocation) -> Option { + AssetLocationUnitsPerSecond::::get(asset_location.into_versioned()) + } + } + + #[pallet::config] + pub trait Config: frame_system::Config { + type RuntimeEvent: From> + IsType<::RuntimeEvent>; + + /// The Asset Id. This will be used to create the asset and to associate it with + /// a AssetLocation + type AssetId: Member + Parameter + Default + Copy + HasCompact + MaxEncodedLen; + + /// Callback handling for cross-chain asset registration or unregistration. + type XcAssetChanged: XcAssetChanged; + + /// The required origin for managing cross-chain asset configuration + /// + /// Should most likely be root. + type ManagerOrigin: EnsureOrigin<::RuntimeOrigin>; + + type WeightInfo: WeightInfo; + } + + #[pallet::error] + pub enum Error { + /// Asset is already registered. + AssetAlreadyRegistered, + /// Asset does not exist (hasn't been registered). + AssetDoesNotExist, + /// Failed to convert to latest versioned MultiLocation + MultiLocationNotSupported, + } + + #[pallet::event] + #[pallet::generate_deposit(pub(crate) fn deposit_event)] + pub enum Event { + /// Registed mapping between asset type and asset Id. + AssetRegistered { + asset_location: VersionedMultiLocation, + asset_id: T::AssetId, + }, + /// Changed the amount of units we are charging per execution second for an asset + UnitsPerSecondChanged { + asset_location: VersionedMultiLocation, + units_per_second: u128, + }, + /// Changed the asset type mapping for a given asset id + AssetLocationChanged { + previous_asset_location: VersionedMultiLocation, + asset_id: T::AssetId, + new_asset_location: VersionedMultiLocation, + }, + /// Supported asset type for fee payment removed. + SupportedAssetRemoved { + asset_location: VersionedMultiLocation, + }, + /// Removed all information related to an asset Id + AssetRemoved { + asset_location: VersionedMultiLocation, + asset_id: T::AssetId, + }, + } + + /// Mapping from an asset id to asset type. + /// Can be used when receiving transaction specifying an asset directly, + /// like transferring an asset from this chain to another. + #[pallet::storage] + #[pallet::getter(fn asset_id_to_location)] + pub type AssetIdToLocation = + StorageMap<_, Twox64Concat, T::AssetId, VersionedMultiLocation>; + + /// Mapping from an asset type to an asset id. + /// Can be used when receiving a multilocation XCM message to retrieve + /// the corresponding asset in which tokens should me minted. + #[pallet::storage] + #[pallet::getter(fn asset_location_to_id)] + pub type AssetLocationToId = + StorageMap<_, Twox64Concat, VersionedMultiLocation, T::AssetId>; + + /// Stores the units per second for local execution for a AssetLocation. + /// This is used to know how to charge for XCM execution in a particular asset. + /// + /// Not all asset types are supported for payment. If value exists here, it means it is supported. + #[pallet::storage] + #[pallet::getter(fn asset_location_units_per_second)] + pub type AssetLocationUnitsPerSecond = + StorageMap<_, Twox64Concat, VersionedMultiLocation, u128>; + + #[pallet::call] + impl Pallet { + /// Register new asset location to asset Id mapping. + /// + /// This makes the asset eligible for XCM interaction. + #[pallet::call_index(0)] + #[pallet::weight(T::WeightInfo::register_asset_location())] + pub fn register_asset_location( + origin: OriginFor, + asset_location: Box, + #[pallet::compact] asset_id: T::AssetId, + ) -> DispatchResult { + T::ManagerOrigin::ensure_origin(origin)?; + + // Ensure such an assetId does not exist + ensure!( + !AssetIdToLocation::::contains_key(&asset_id), + Error::::AssetAlreadyRegistered + ); + + let v3_asset_loc = MultiLocation::try_from(*asset_location) + .map_err(|_| Error::::MultiLocationNotSupported)?; + let asset_location = VersionedMultiLocation::V3(v3_asset_loc); + + AssetIdToLocation::::insert(&asset_id, asset_location.clone()); + AssetLocationToId::::insert(&asset_location, asset_id); + + T::XcAssetChanged::xc_asset_registered(asset_id); + + Self::deposit_event(Event::AssetRegistered { + asset_location, + asset_id, + }); + Ok(()) + } + + /// Change the amount of units we are charging per execution second + /// for a given AssetLocation. + #[pallet::call_index(1)] + #[pallet::weight(T::WeightInfo::set_asset_units_per_second())] + pub fn set_asset_units_per_second( + origin: OriginFor, + asset_location: Box, + #[pallet::compact] units_per_second: u128, + ) -> DispatchResult { + T::ManagerOrigin::ensure_origin(origin)?; + + let v3_asset_loc = MultiLocation::try_from(*asset_location) + .map_err(|_| Error::::MultiLocationNotSupported)?; + let asset_location = VersionedMultiLocation::V3(v3_asset_loc); + + ensure!( + AssetLocationToId::::contains_key(&asset_location), + Error::::AssetDoesNotExist + ); + + AssetLocationUnitsPerSecond::::insert(&asset_location, units_per_second); + + Self::deposit_event(Event::UnitsPerSecondChanged { + asset_location, + units_per_second, + }); + Ok(()) + } + + /// Change the xcm type mapping for a given asset Id. + /// The new asset type will inherit old `units per second` value. + #[pallet::call_index(2)] + #[pallet::weight(T::WeightInfo::change_existing_asset_location())] + pub fn change_existing_asset_location( + origin: OriginFor, + new_asset_location: Box, + #[pallet::compact] asset_id: T::AssetId, + ) -> DispatchResult { + T::ManagerOrigin::ensure_origin(origin)?; + + let v3_asset_loc = MultiLocation::try_from(*new_asset_location) + .map_err(|_| Error::::MultiLocationNotSupported)?; + let new_asset_location = VersionedMultiLocation::V3(v3_asset_loc); + + let previous_asset_location = + AssetIdToLocation::::get(&asset_id).ok_or(Error::::AssetDoesNotExist)?; + + // Insert new asset type info + AssetIdToLocation::::insert(&asset_id, new_asset_location.clone()); + AssetLocationToId::::insert(&new_asset_location, asset_id); + + // Remove previous asset type info + AssetLocationToId::::remove(&previous_asset_location); + + // Change AssetLocationUnitsPerSecond + if let Some(units) = AssetLocationUnitsPerSecond::::take(&previous_asset_location) { + AssetLocationUnitsPerSecond::::insert(&new_asset_location, units); + } + + Self::deposit_event(Event::AssetLocationChanged { + previous_asset_location, + asset_id, + new_asset_location, + }); + Ok(()) + } + + /// Removes asset from the set of supported payment assets. + /// + /// The asset can still be interacted with via XCM but it cannot be used to pay for execution time. + #[pallet::call_index(3)] + #[pallet::weight(T::WeightInfo::remove_payment_asset())] + pub fn remove_payment_asset( + origin: OriginFor, + asset_location: Box, + ) -> DispatchResult { + T::ManagerOrigin::ensure_origin(origin)?; + + let v3_asset_loc = MultiLocation::try_from(*asset_location) + .map_err(|_| Error::::MultiLocationNotSupported)?; + let asset_location = VersionedMultiLocation::V3(v3_asset_loc); + + AssetLocationUnitsPerSecond::::remove(&asset_location); + + Self::deposit_event(Event::SupportedAssetRemoved { asset_location }); + Ok(()) + } + + /// Removes all information related to asset, removing it from XCM support. + #[pallet::call_index(4)] + #[pallet::weight(T::WeightInfo::remove_asset())] + pub fn remove_asset( + origin: OriginFor, + #[pallet::compact] asset_id: T::AssetId, + ) -> DispatchResult { + T::ManagerOrigin::ensure_origin(origin)?; + + let asset_location = + AssetIdToLocation::::get(&asset_id).ok_or(Error::::AssetDoesNotExist)?; + + AssetIdToLocation::::remove(&asset_id); + AssetLocationToId::::remove(&asset_location); + AssetLocationUnitsPerSecond::::remove(&asset_location); + T::XcAssetChanged::xc_asset_unregistered(asset_id); + + Self::deposit_event(Event::AssetRemoved { + asset_id, + asset_location, + }); + Ok(()) + } + } +} diff --git a/pallets/xc-asset-config/src/migrations.rs b/pallets/xc-asset-config/src/migrations.rs new file mode 100644 index 0000000000..944a7d1ca5 --- /dev/null +++ b/pallets/xc-asset-config/src/migrations.rs @@ -0,0 +1,124 @@ +// This file is part of Astar. + +// Copyright (C) 2019-2023 Stake Technologies Pte.Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later + +// Astar is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Astar is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Astar. If not, see . + +use super::*; +use frame_support::traits::OnRuntimeUpgrade; +use frame_support::{dispatch::GetStorageVersion, log, pallet_prelude::*, traits::Get}; +use sp_std::{marker::PhantomData, vec::Vec}; +use xcm::IntoVersion; + +pub struct MigrationXcmV3(PhantomData); +impl OnRuntimeUpgrade for MigrationXcmV3 { + fn on_runtime_upgrade() -> Weight { + let version = Pallet::::on_chain_storage_version(); + let mut consumed_weight = Weight::zero(); + if version >= 2 { + return consumed_weight; + } + + // 1st map // + let id_to_location_entries: Vec<_> = AssetIdToLocation::::iter().collect(); + + for (asset_id, legacy_location) in id_to_location_entries { + consumed_weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 1)); + + if let Ok(new_location) = legacy_location.into_version(3) { + AssetIdToLocation::::insert(asset_id, new_location); + } else { + // Won't happen, can be verified with try-runtime before upgrade + log::warn!( + "Failed to convert AssetIdToLocation value for asset Id: {:?}", + asset_id + ); + } + } + + // 2nd map // + let location_to_id_entries: Vec<_> = AssetLocationToId::::drain().collect(); + + for (legacy_location, asset_id) in location_to_id_entries { + consumed_weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 2)); + + if let Ok(new_location) = legacy_location.into_version(3) { + AssetLocationToId::::insert(new_location, asset_id); + } else { + // Shouldn't happen, can be verified with try-runtime before upgrade + log::warn!( + "Failed to convert AssetLocationToId value for asset Id: {:?}", + asset_id + ); + } + } + + // 3rd map // + let location_to_price_entries: Vec<_> = AssetLocationUnitsPerSecond::::drain().collect(); + + for (legacy_location, price) in location_to_price_entries { + consumed_weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 2)); + + if let Ok(new_location) = legacy_location.into_version(3) { + AssetLocationUnitsPerSecond::::insert(new_location, price); + } else { + // Shouldn't happen, can be verified with try-runtime before upgrade + log::warn!("Failed to convert AssetLocationUnitsPerSecond value!"); + } + } + + StorageVersion::new(2).put::>(); + consumed_weight.saturating_accrue(T::DbWeight::get().reads(1)); + + consumed_weight + } + + #[cfg(feature = "try-runtime")] + fn pre_upgrade() -> Result, &'static str> { + assert!(Pallet::::on_chain_storage_version() < 2); + let id_to_location_entries: Vec<_> = AssetIdToLocation::::iter().collect(); + + Ok(id_to_location_entries.encode()) + } + + #[cfg(feature = "try-runtime")] + fn post_upgrade(state: Vec) -> Result<(), &'static str> { + assert_eq!(Pallet::::on_chain_storage_version(), 2); + + use xcm::VersionedMultiLocation; + let legacy_id_to_location_entries: Vec<(T::AssetId, VersionedMultiLocation)> = + Decode::decode(&mut state.as_ref()) + .map_err(|_| "Cannot decode data from pre_upgrade")?; + + let new_id_to_location_entries: Vec<_> = AssetIdToLocation::::iter().collect(); + assert_eq!( + legacy_id_to_location_entries.len(), + new_id_to_location_entries.len() + ); + + for (ref id, ref _legacy_location) in legacy_id_to_location_entries { + let new_location = AssetIdToLocation::::get(id); + assert!(new_location.is_some()); + let new_location = new_location.expect("Assert above ensures it's `Some`."); + + assert_eq!(AssetLocationToId::::get(&new_location), Some(*id)); + assert!(AssetLocationUnitsPerSecond::::contains_key( + &new_location + )); + } + + Ok(()) + } +} diff --git a/pallets/xc-asset-config/src/mock.rs b/pallets/xc-asset-config/src/mock.rs new file mode 100644 index 0000000000..77cf85e70c --- /dev/null +++ b/pallets/xc-asset-config/src/mock.rs @@ -0,0 +1,124 @@ +// This file is part of Astar. + +// Copyright (C) 2019-2023 Stake Technologies Pte.Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later + +// Astar is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Astar is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Astar. If not, see . + +use crate::{self as pallet_xc_asset_config}; + +use frame_support::{construct_runtime, parameter_types, weights::Weight}; +use sp_core::H256; + +use sp_io::TestExternalities; +use sp_runtime::{ + testing::Header, + traits::{BlakeTwo256, IdentityLookup}, +}; + +type BlockNumber = u64; +type Balance = u128; +type AccountId = u64; + +type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; +type Block = frame_system::mocking::MockBlock; + +const EXISTENTIAL_DEPOSIT: Balance = 2; + +construct_runtime!( + pub struct Test + where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: frame_system, + Balances: pallet_balances, + XcAssetConfig: pallet_xc_asset_config, + } +); + +parameter_types! { + pub const BlockHashCount: u64 = 250; + pub BlockWeights: frame_system::limits::BlockWeights = + frame_system::limits::BlockWeights::simple_max(Weight::from_ref_time(1024)); +} + +impl frame_system::Config for Test { + type BaseCallFilter = frame_support::traits::Everything; + type BlockWeights = (); + type BlockLength = (); + type RuntimeOrigin = RuntimeOrigin; + type Index = u64; + type RuntimeCall = RuntimeCall; + type BlockNumber = BlockNumber; + type Hash = H256; + type Hashing = BlakeTwo256; + type AccountId = AccountId; + type Lookup = IdentityLookup; + type Header = Header; + type RuntimeEvent = RuntimeEvent; + type BlockHashCount = BlockHashCount; + type DbWeight = (); + type Version = (); + type PalletInfo = PalletInfo; + type AccountData = pallet_balances::AccountData; + type OnNewAccount = (); + type OnKilledAccount = (); + type SystemWeightInfo = (); + type SS58Prefix = (); + type OnSetCode = (); + type MaxConsumers = frame_support::traits::ConstU32<16>; +} + +parameter_types! { + pub const MaxLocks: u32 = 4; + pub const ExistentialDeposit: Balance = EXISTENTIAL_DEPOSIT; +} + +impl pallet_balances::Config for Test { + type MaxLocks = MaxLocks; + type MaxReserves = (); + type ReserveIdentifier = [u8; 8]; + type Balance = Balance; + type RuntimeEvent = RuntimeEvent; + type DustRemoval = (); + type ExistentialDeposit = ExistentialDeposit; + type AccountStore = System; + type WeightInfo = (); +} + +type AssetId = u128; + +impl pallet_xc_asset_config::Config for Test { + type RuntimeEvent = RuntimeEvent; + type AssetId = AssetId; + type XcAssetChanged = (); + type ManagerOrigin = frame_system::EnsureRoot; + type WeightInfo = (); +} + +pub struct ExternalityBuilder; + +impl ExternalityBuilder { + pub fn build() -> TestExternalities { + let storage = frame_system::GenesisConfig::default() + .build_storage::() + .unwrap(); + + let mut ext = TestExternalities::from(storage); + ext.execute_with(|| System::set_block_number(1)); + ext + } +} diff --git a/pallets/xc-asset-config/src/tests.rs b/pallets/xc-asset-config/src/tests.rs new file mode 100644 index 0000000000..0638d7e3f5 --- /dev/null +++ b/pallets/xc-asset-config/src/tests.rs @@ -0,0 +1,451 @@ +// This file is part of Astar. + +// Copyright (C) 2019-2023 Stake Technologies Pte.Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later + +// Astar is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Astar is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Astar. If not, see . + +use super::{pallet::Error, pallet::Event, *}; +use frame_support::{assert_noop, assert_ok, WeakBoundedVec}; +use mock::*; +use sp_runtime::traits::BadOrigin; +use xcm::latest::prelude::*; + +use xcm::{v3::MultiLocation, VersionedMultiLocation}; + +#[test] +fn only_root_as_origin() { + ExternalityBuilder::build().execute_with(|| { + let asset_location = MultiLocation::here().into_versioned(); + let asset_id = 7; + + assert_noop!( + XcAssetConfig::register_asset_location( + RuntimeOrigin::signed(1), + Box::new(asset_location.clone()), + asset_id + ), + BadOrigin + ); + + assert_noop!( + XcAssetConfig::set_asset_units_per_second( + RuntimeOrigin::signed(1), + Box::new(asset_location.clone()), + 9 + ), + BadOrigin + ); + + assert_noop!( + XcAssetConfig::change_existing_asset_location( + RuntimeOrigin::signed(1), + Box::new(asset_location.clone()), + asset_id + ), + BadOrigin + ); + + assert_noop!( + XcAssetConfig::remove_payment_asset( + RuntimeOrigin::signed(1), + Box::new(asset_location.clone()), + ), + BadOrigin + ); + + assert_noop!( + XcAssetConfig::remove_asset(RuntimeOrigin::signed(1), asset_id,), + BadOrigin + ); + }) +} + +#[test] +fn register_asset_location_and_units_per_sec_is_ok() { + ExternalityBuilder::build().execute_with(|| { + // Prepare location and Id + let asset_location = MultiLocation::new( + 1, + Junctions::X2(Junction::PalletInstance(17), GeneralIndex(7)), + ); + let asset_id = 13; + + // Register asset and ensure it's ok + assert_ok!(XcAssetConfig::register_asset_location( + RuntimeOrigin::root(), + Box::new(asset_location.clone().into_versioned()), + asset_id + )); + System::assert_last_event(mock::RuntimeEvent::XcAssetConfig(Event::AssetRegistered { + asset_location: asset_location.clone().into_versioned(), + asset_id: asset_id, + })); + + // Assert storage state after registering asset + assert_eq!( + AssetIdToLocation::::get(&asset_id).unwrap(), + asset_location.clone().into_versioned() + ); + assert_eq!( + AssetLocationToId::::get(asset_location.clone().into_versioned()).unwrap(), + asset_id + ); + assert!(!AssetLocationUnitsPerSecond::::contains_key( + asset_location.clone().into_versioned() + )); + + // Register unit per second rate and verify storage + let units: u128 = 7 * 11 * 13 * 17 * 29; + assert_ok!(XcAssetConfig::set_asset_units_per_second( + RuntimeOrigin::root(), + Box::new(asset_location.clone().into_versioned()), + units + )); + System::assert_last_event(mock::RuntimeEvent::XcAssetConfig( + Event::UnitsPerSecondChanged { + asset_location: asset_location.clone().into_versioned(), + units_per_second: units, + }, + )); + assert_eq!( + AssetLocationUnitsPerSecond::::get(&asset_location.clone().into_versioned()) + .unwrap(), + units + ); + }) +} + +#[test] +fn asset_is_already_registered() { + ExternalityBuilder::build().execute_with(|| { + // Prepare location and Id + let asset_location = MultiLocation::new( + 1, + Junctions::X2(Junction::PalletInstance(17), GeneralIndex(7)), + ); + let asset_id = 13; + + // Register asset and ensure it's ok + assert_ok!(XcAssetConfig::register_asset_location( + RuntimeOrigin::root(), + Box::new(asset_location.clone().into_versioned()), + asset_id + )); + + // Now repeat the process and expect an error + assert_noop!( + XcAssetConfig::register_asset_location( + RuntimeOrigin::root(), + Box::new(asset_location.clone().into_versioned()), + asset_id + ), + Error::::AssetAlreadyRegistered + ); + }) +} + +#[test] +fn change_asset_location_is_ok() { + ExternalityBuilder::build().execute_with(|| { + // Prepare location, Id and units + let asset_location = MultiLocation::new(1, Junctions::X1(Junction::Parachain(2007))); + let asset_id = 17; + let units: u128 = 3 * 11 * 13 * 17; + + // Register asset and ups + assert_ok!(XcAssetConfig::register_asset_location( + RuntimeOrigin::root(), + Box::new(asset_location.clone().into_versioned()), + asset_id + )); + assert_ok!(XcAssetConfig::set_asset_units_per_second( + RuntimeOrigin::root(), + Box::new(asset_location.clone().into_versioned()), + units + )); + + // Change the asset location and assert change was successful + let new_asset_location = MultiLocation::new(2, Junctions::X1(Junction::PalletInstance(3))); + assert_ne!(new_asset_location, asset_location); // sanity check + + assert_ok!(XcAssetConfig::change_existing_asset_location( + RuntimeOrigin::root(), + Box::new(new_asset_location.clone().into_versioned()), + asset_id + )); + System::assert_last_event(mock::RuntimeEvent::XcAssetConfig( + Event::AssetLocationChanged { + previous_asset_location: asset_location.clone().into_versioned(), + asset_id: asset_id, + new_asset_location: new_asset_location.clone().into_versioned(), + }, + )); + + // Assert storage state + assert_eq!( + AssetIdToLocation::::get(&asset_id).unwrap(), + new_asset_location.clone().into_versioned() + ); + assert_eq!( + AssetLocationToId::::get(new_asset_location.clone().into_versioned()).unwrap(), + asset_id + ); + + // This should have been deleted + assert!(!AssetLocationUnitsPerSecond::::contains_key( + asset_location.clone().into_versioned() + )); + assert_eq!( + AssetLocationUnitsPerSecond::::get(new_asset_location.clone().into_versioned()) + .unwrap(), + units + ); + }) +} + +#[test] +fn remove_payment_asset_is_ok() { + ExternalityBuilder::build().execute_with(|| { + // Prepare location, Id and units + let asset_location = MultiLocation::new(1, Junctions::X1(Junction::Parachain(2007))); + let asset_id = 17; + let units: u128 = 3 * 11 * 13 * 17; + + // Register asset and ups + assert_ok!(XcAssetConfig::register_asset_location( + RuntimeOrigin::root(), + Box::new(asset_location.clone().into_versioned()), + asset_id + )); + assert_ok!(XcAssetConfig::set_asset_units_per_second( + RuntimeOrigin::root(), + Box::new(asset_location.clone().into_versioned()), + units + )); + + // Now we remove supported asset + assert_ok!(XcAssetConfig::remove_payment_asset( + RuntimeOrigin::root(), + Box::new(asset_location.clone().into_versioned()), + )); + System::assert_last_event(mock::RuntimeEvent::XcAssetConfig( + Event::SupportedAssetRemoved { + asset_location: asset_location.clone().into_versioned(), + }, + )); + assert!(!AssetLocationUnitsPerSecond::::contains_key( + asset_location.clone().into_versioned() + )); + + // Repeated calls don't do anything + assert_ok!(XcAssetConfig::remove_payment_asset( + RuntimeOrigin::root(), + Box::new(asset_location.clone().into_versioned()), + )); + }) +} + +#[test] +fn remove_asset_is_ok() { + ExternalityBuilder::build().execute_with(|| { + // Prepare location, Id and units + let asset_location = MultiLocation::new(1, Junctions::X1(Junction::Parachain(2007))); + let asset_id = 17; + let units: u128 = 3 * 11 * 13 * 17; + + // Register asset and ups + assert_ok!(XcAssetConfig::register_asset_location( + RuntimeOrigin::root(), + Box::new(asset_location.clone().into_versioned()), + asset_id + )); + assert_ok!(XcAssetConfig::set_asset_units_per_second( + RuntimeOrigin::root(), + Box::new(asset_location.clone().into_versioned()), + units + )); + + // Remove asset entirely and assert op is ok + assert_ok!(XcAssetConfig::remove_asset(RuntimeOrigin::root(), asset_id,)); + System::assert_last_event(mock::RuntimeEvent::XcAssetConfig(Event::AssetRemoved { + asset_location: asset_location.clone().into_versioned(), + asset_id: asset_id, + })); + + // Assert that storage is empty after successful removal + assert!(!AssetIdToLocation::::contains_key(asset_id)); + assert!(!AssetLocationToId::::contains_key( + asset_location.clone().into_versioned() + )); + assert!(!AssetLocationUnitsPerSecond::::contains_key( + asset_location.clone().into_versioned() + )); + }) +} + +#[test] +fn not_registered_asset_is_not_ok() { + ExternalityBuilder::build().execute_with(|| { + // Prepare location, Id and units + let asset_location = MultiLocation::parent(); + let asset_id = 17; + let units: u128 = 3 * 11 * 13 * 17; + + assert_noop!( + XcAssetConfig::set_asset_units_per_second( + RuntimeOrigin::root(), + Box::new(asset_location.clone().into_versioned()), + units + ), + Error::::AssetDoesNotExist + ); + + assert_noop!( + XcAssetConfig::change_existing_asset_location( + RuntimeOrigin::root(), + Box::new(asset_location.clone().into_versioned()), + asset_id + ), + Error::::AssetDoesNotExist + ); + + assert_noop!( + XcAssetConfig::remove_asset(RuntimeOrigin::root(), asset_id,), + Error::::AssetDoesNotExist + ); + }) +} + +#[test] +fn public_interfaces_are_ok() { + ExternalityBuilder::build().execute_with(|| { + // Prepare location, Id and units + let asset_location = MultiLocation::parent(); + let asset_id = 17; + let units: u128 = 3 * 11 * 13 * 17; + + // Initially, expect `None` to be returned for all + assert!(XcAssetConfig::get_xc_asset_location(asset_id).is_none()); + assert!(XcAssetConfig::get_asset_id(asset_location.clone()).is_none()); + assert!(XcAssetConfig::get_units_per_second(asset_location.clone()).is_none()); + + // Register asset and expect values to be returned but UPS should still be `None` + assert_ok!(XcAssetConfig::register_asset_location( + RuntimeOrigin::root(), + Box::new(asset_location.clone().into_versioned()), + asset_id + )); + assert_eq!( + XcAssetConfig::get_xc_asset_location(asset_id), + Some(asset_location.clone()) + ); + assert_eq!( + XcAssetConfig::get_asset_id(asset_location.clone()), + Some(asset_id) + ); + assert!(XcAssetConfig::get_units_per_second(asset_location.clone()).is_none()); + + // Register ups and expect value value to be returned + assert_ok!(XcAssetConfig::set_asset_units_per_second( + RuntimeOrigin::root(), + Box::new(asset_location.clone().into_versioned()), + units + )); + assert_eq!( + XcAssetConfig::get_units_per_second(asset_location.clone()), + Some(units) + ); + }) +} + +#[test] +fn different_xcm_versions_are_ok() { + ExternalityBuilder::build().execute_with(|| { + // Prepare location and Id + let legacy_asset_location = xcm::v2::MultiLocation::parent(); + let new_asset_location = xcm::v3::MultiLocation::parent(); + let asset_id = 17; + + // Register asset using legacy multilocation + assert_ok!(XcAssetConfig::register_asset_location( + RuntimeOrigin::root(), + Box::new(VersionedMultiLocation::V2(legacy_asset_location.clone())), + asset_id + )); + + // Ensure that the new format is properly returned + assert_eq!( + XcAssetConfig::get_xc_asset_location(asset_id), + Some(new_asset_location.clone()) + ); + }) +} + +#[test] +fn incompatible_versioned_multilocations_are_not_ok() { + ExternalityBuilder::build().execute_with(|| { + // MultiLocation that cannot be converted from v2 to v3 + let incompatible_asset_location = xcm::v2::MultiLocation { + parents: 1, + interior: xcm::v2::Junctions::X1(xcm::v2::Junction::GeneralKey( + WeakBoundedVec::<_, _>::force_from([123_u8; 33].to_vec(), None), + )), + }; + let asset_id = 123; + + assert_noop!( + XcAssetConfig::register_asset_location( + RuntimeOrigin::root(), + Box::new(VersionedMultiLocation::V2( + incompatible_asset_location.clone() + )), + asset_id + ), + Error::::MultiLocationNotSupported + ); + + assert_noop!( + XcAssetConfig::set_asset_units_per_second( + RuntimeOrigin::root(), + Box::new(VersionedMultiLocation::V2( + incompatible_asset_location.clone() + )), + 12345, + ), + Error::::MultiLocationNotSupported + ); + + assert_noop!( + XcAssetConfig::change_existing_asset_location( + RuntimeOrigin::root(), + Box::new(VersionedMultiLocation::V2( + incompatible_asset_location.clone() + )), + 12345, + ), + Error::::MultiLocationNotSupported + ); + + assert_noop!( + XcAssetConfig::remove_payment_asset( + RuntimeOrigin::root(), + Box::new(VersionedMultiLocation::V2( + incompatible_asset_location.clone() + )), + ), + Error::::MultiLocationNotSupported + ); + }) +} diff --git a/pallets/xc-asset-config/src/weights.rs b/pallets/xc-asset-config/src/weights.rs new file mode 100644 index 0000000000..67ddd81b7d --- /dev/null +++ b/pallets/xc-asset-config/src/weights.rs @@ -0,0 +1,184 @@ +// This file is part of Astar. + +// Copyright (C) 2019-2023 Stake Technologies Pte.Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later + +// Astar is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Astar is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Astar. If not, see . + +//! Autogenerated weights for pallet_xc_asset_config +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-04-04, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! HOSTNAME: `devserver-01`, CPU: `Intel(R) Xeon(R) E-2236 CPU @ 3.40GHz` +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("shibuya-dev"), DB CACHE: 1024 + +// Executed Command: +// ./target/release/astar-collator +// benchmark +// pallet +// --chain=shibuya-dev +// --steps=50 +// --repeat=20 +// --pallet=pallet_xc_asset_config +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --heap-pages=4096 +// --output=./benchmark-results/xc_asset_config_weights.rs +// --template=./scripts/templates/weight-template.hbs + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] + +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use sp_std::marker::PhantomData; + +/// Weight functions needed for pallet_xc_asset_config. +pub trait WeightInfo { + fn register_asset_location() -> Weight; + fn set_asset_units_per_second() -> Weight; + fn change_existing_asset_location() -> Weight; + fn remove_payment_asset() -> Weight; + fn remove_asset() -> Weight; +} + +/// Weights for pallet_xc_asset_config using the Substrate node and recommended hardware. +pub struct SubstrateWeight(PhantomData); +impl WeightInfo for SubstrateWeight { + // Storage: XcAssetConfig AssetIdToLocation (r:1 w:1) + // Proof Skipped: XcAssetConfig AssetIdToLocation (max_values: None, max_size: None, mode: Measured) + // Storage: EVM AccountCodes (r:0 w:1) + // Proof Skipped: EVM AccountCodes (max_values: None, max_size: None, mode: Measured) + // Storage: XcAssetConfig AssetLocationToId (r:0 w:1) + // Proof Skipped: XcAssetConfig AssetLocationToId (max_values: None, max_size: None, mode: Measured) + fn register_asset_location() -> Weight { + // Minimum execution time: 15_540 nanoseconds. + Weight::from_ref_time(16_114_000) + .saturating_add(Weight::from_proof_size(2493)) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().writes(3_u64)) + } + // Storage: XcAssetConfig AssetLocationToId (r:1 w:0) + // Proof Skipped: XcAssetConfig AssetLocationToId (max_values: None, max_size: None, mode: Measured) + // Storage: XcAssetConfig AssetLocationUnitsPerSecond (r:0 w:1) + // Proof Skipped: XcAssetConfig AssetLocationUnitsPerSecond (max_values: None, max_size: None, mode: Measured) + fn set_asset_units_per_second() -> Weight { + // Minimum execution time: 15_297 nanoseconds. + Weight::from_ref_time(15_551_000) + .saturating_add(Weight::from_proof_size(2661)) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } + // Storage: XcAssetConfig AssetIdToLocation (r:1 w:1) + // Proof Skipped: XcAssetConfig AssetIdToLocation (max_values: None, max_size: None, mode: Measured) + // Storage: XcAssetConfig AssetLocationUnitsPerSecond (r:1 w:2) + // Proof Skipped: XcAssetConfig AssetLocationUnitsPerSecond (max_values: None, max_size: None, mode: Measured) + // Storage: XcAssetConfig AssetLocationToId (r:0 w:2) + // Proof Skipped: XcAssetConfig AssetLocationToId (max_values: None, max_size: None, mode: Measured) + fn change_existing_asset_location() -> Weight { + // Minimum execution time: 22_357 nanoseconds. + Weight::from_ref_time(22_572_000) + .saturating_add(Weight::from_proof_size(5373)) + .saturating_add(T::DbWeight::get().reads(2_u64)) + .saturating_add(T::DbWeight::get().writes(5_u64)) + } + // Storage: XcAssetConfig AssetLocationUnitsPerSecond (r:0 w:1) + // Proof Skipped: XcAssetConfig AssetLocationUnitsPerSecond (max_values: None, max_size: None, mode: Measured) + fn remove_payment_asset() -> Weight { + // Minimum execution time: 9_707 nanoseconds. + Weight::from_ref_time(10_005_000) + .saturating_add(Weight::from_proof_size(0)) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } + // Storage: XcAssetConfig AssetIdToLocation (r:1 w:1) + // Proof Skipped: XcAssetConfig AssetIdToLocation (max_values: None, max_size: None, mode: Measured) + // Storage: EVM AccountCodes (r:0 w:1) + // Proof Skipped: EVM AccountCodes (max_values: None, max_size: None, mode: Measured) + // Storage: XcAssetConfig AssetLocationUnitsPerSecond (r:0 w:1) + // Proof Skipped: XcAssetConfig AssetLocationUnitsPerSecond (max_values: None, max_size: None, mode: Measured) + // Storage: XcAssetConfig AssetLocationToId (r:0 w:1) + // Proof Skipped: XcAssetConfig AssetLocationToId (max_values: None, max_size: None, mode: Measured) + fn remove_asset() -> Weight { + // Minimum execution time: 18_645 nanoseconds. + Weight::from_ref_time(18_878_000) + .saturating_add(Weight::from_proof_size(2987)) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().writes(4_u64)) + } +} + +// For backwards compatibility and tests +impl WeightInfo for () { + // Storage: XcAssetConfig AssetIdToLocation (r:1 w:1) + // Proof Skipped: XcAssetConfig AssetIdToLocation (max_values: None, max_size: None, mode: Measured) + // Storage: EVM AccountCodes (r:0 w:1) + // Proof Skipped: EVM AccountCodes (max_values: None, max_size: None, mode: Measured) + // Storage: XcAssetConfig AssetLocationToId (r:0 w:1) + // Proof Skipped: XcAssetConfig AssetLocationToId (max_values: None, max_size: None, mode: Measured) + fn register_asset_location() -> Weight { + // Minimum execution time: 15_540 nanoseconds. + Weight::from_ref_time(16_114_000) + .saturating_add(Weight::from_proof_size(2493)) + .saturating_add(RocksDbWeight::get().reads(1_u64)) + .saturating_add(RocksDbWeight::get().writes(3_u64)) + } + // Storage: XcAssetConfig AssetLocationToId (r:1 w:0) + // Proof Skipped: XcAssetConfig AssetLocationToId (max_values: None, max_size: None, mode: Measured) + // Storage: XcAssetConfig AssetLocationUnitsPerSecond (r:0 w:1) + // Proof Skipped: XcAssetConfig AssetLocationUnitsPerSecond (max_values: None, max_size: None, mode: Measured) + fn set_asset_units_per_second() -> Weight { + // Minimum execution time: 15_297 nanoseconds. + Weight::from_ref_time(15_551_000) + .saturating_add(Weight::from_proof_size(2661)) + .saturating_add(RocksDbWeight::get().reads(1_u64)) + .saturating_add(RocksDbWeight::get().writes(1_u64)) + } + // Storage: XcAssetConfig AssetIdToLocation (r:1 w:1) + // Proof Skipped: XcAssetConfig AssetIdToLocation (max_values: None, max_size: None, mode: Measured) + // Storage: XcAssetConfig AssetLocationUnitsPerSecond (r:1 w:2) + // Proof Skipped: XcAssetConfig AssetLocationUnitsPerSecond (max_values: None, max_size: None, mode: Measured) + // Storage: XcAssetConfig AssetLocationToId (r:0 w:2) + // Proof Skipped: XcAssetConfig AssetLocationToId (max_values: None, max_size: None, mode: Measured) + fn change_existing_asset_location() -> Weight { + // Minimum execution time: 22_357 nanoseconds. + Weight::from_ref_time(22_572_000) + .saturating_add(Weight::from_proof_size(5373)) + .saturating_add(RocksDbWeight::get().reads(2_u64)) + .saturating_add(RocksDbWeight::get().writes(5_u64)) + } + // Storage: XcAssetConfig AssetLocationUnitsPerSecond (r:0 w:1) + // Proof Skipped: XcAssetConfig AssetLocationUnitsPerSecond (max_values: None, max_size: None, mode: Measured) + fn remove_payment_asset() -> Weight { + // Minimum execution time: 9_707 nanoseconds. + Weight::from_ref_time(10_005_000) + .saturating_add(Weight::from_proof_size(0)) + .saturating_add(RocksDbWeight::get().writes(1_u64)) + } + // Storage: XcAssetConfig AssetIdToLocation (r:1 w:1) + // Proof Skipped: XcAssetConfig AssetIdToLocation (max_values: None, max_size: None, mode: Measured) + // Storage: EVM AccountCodes (r:0 w:1) + // Proof Skipped: EVM AccountCodes (max_values: None, max_size: None, mode: Measured) + // Storage: XcAssetConfig AssetLocationUnitsPerSecond (r:0 w:1) + // Proof Skipped: XcAssetConfig AssetLocationUnitsPerSecond (max_values: None, max_size: None, mode: Measured) + // Storage: XcAssetConfig AssetLocationToId (r:0 w:1) + // Proof Skipped: XcAssetConfig AssetLocationToId (max_values: None, max_size: None, mode: Measured) + fn remove_asset() -> Weight { + // Minimum execution time: 18_645 nanoseconds. + Weight::from_ref_time(18_878_000) + .saturating_add(Weight::from_proof_size(2987)) + .saturating_add(RocksDbWeight::get().reads(1_u64)) + .saturating_add(RocksDbWeight::get().writes(4_u64)) + } +} \ No newline at end of file diff --git a/precompiles/assets-erc20/Cargo.toml b/precompiles/assets-erc20/Cargo.toml new file mode 100644 index 0000000000..cafa20e6b9 --- /dev/null +++ b/precompiles/assets-erc20/Cargo.toml @@ -0,0 +1,57 @@ +[package] +name = "pallet-evm-precompile-assets-erc20" +description = "A Precompile to expose a Assets pallet through an ERC20-compliant interface." +version = "0.5.2" +authors.workspace = true +edition.workspace = true +homepage.workspace = true +repository.workspace = true + +[dependencies] +log = { workspace = true } +num_enum = { workspace = true } +slices = { workspace = true } + +precompile-utils = { workspace = true } + +# Substrate +frame-support = { workspace = true } +frame-system = { workspace = true } +pallet-assets = { workspace = true } +pallet-balances = { workspace = true } +parity-scale-codec = { workspace = true, features = ["max-encoded-len"] } +sp-core = { workspace = true } +sp-io = { workspace = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } + +# Frontier +fp-evm = { workspace = true } +pallet-evm = { workspace = true } + +[dev-dependencies] +derive_more = { workspace = true } +serde = { workspace = true } +sha3 = { workspace = true } + +precompile-utils = { workspace = true, features = ["testing"] } + +pallet-timestamp = { workspace = true } +scale-info = { workspace = true } + +[features] +default = ["std"] +std = [ + "parity-scale-codec/std", + "fp-evm/std", + "frame-support/std", + "frame-system/std", + "pallet-assets/std", + "pallet-evm/std", + "pallet-balances/std", + "precompile-utils/std", + "sp-core/std", + "sp-io/std", + "sp-runtime/std", + "sp-std/std", +] diff --git a/precompiles/assets-erc20/ERC20.sol b/precompiles/assets-erc20/ERC20.sol new file mode 100644 index 0000000000..32ca243f21 --- /dev/null +++ b/precompiles/assets-erc20/ERC20.sol @@ -0,0 +1,141 @@ + pragma solidity ^0.8.0; + + /** + * @title ERC20 interface + * @dev see https://github.com/ethereum/EIPs/issues/20 + * @dev copied from https://github.com/OpenZeppelin/openzeppelin-contracts + */ + interface IERC20 { + + /** + * @dev Returns the name of the token. + * Selector: 06fdde03 + */ + function name() external view returns (string memory); + + /** + * @dev Returns the symbol of the token. + * Selector: 95d89b41 + */ + function symbol() external view returns (string memory); + + /** + * @dev Returns the decimals places of the token. + * Selector: 313ce567 + */ + function decimals() external view returns (uint8); + + /** + * @dev Total number of tokens in existence + * Selector: 18160ddd + */ + function totalSupply() external view returns (uint256); + + /** + * @dev Gets the balance of the specified address. + * Selector: 70a08231 + * @param who The address to query the balance of. + * @return An uint256 representing the amount owned by the passed address. + */ + function balanceOf(address who) external view returns (uint256); + + /** + * @dev Function to check the amount of tokens that an owner allowed to a spender. + * Selector: dd62ed3e + * @param owner address The address which owns the funds. + * @param spender address The address which will spend the funds. + * @return A uint256 specifying the amount of tokens still available for the spender. + */ + function allowance(address owner, address spender) + external view returns (uint256); + + /** + * @dev Transfer token for a specified address + * Selector: a9059cbb + * @param to The address to transfer to. + * @param value The amount to be transferred. + */ + function transfer(address to, uint256 value) external returns (bool); + + /** + * @dev Approve the passed address to spend the specified amount of tokens on behalf + * of msg.sender. + * Beware that changing an allowance with this method brings the risk that someone may + * use both the old + * and the new allowance by unfortunate transaction ordering. One possible solution to + * mitigate this race condition is to first reduce the spender's allowance to 0 and set + * the desired value afterwards: + * https://github.com/ethereum/EIPs/issues/20#issuecomment-263524729 + * Selector: 095ea7b3 + * @param spender The address which will spend the funds. + * @param value The amount of tokens to be spent. + */ + function approve(address spender, uint256 value) + external returns (bool); + + /** + * @dev Transfer tokens from one address to another + * Selector: 23b872dd + * @param from address The address which you want to send tokens from + * @param to address The address which you want to transfer to + * @param value uint256 the amount of tokens to be transferred + */ + function transferFrom(address from, address to, uint256 value) + external returns (bool); + + /** + * @dev Event emited when a transfer has been performed. + * Selector: ddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef + * @param from address The address sending the tokens + * @param to address The address receiving the tokens. + * @param value uint256 The amount of tokens transfered. + */ + event Transfer( + address indexed from, + address indexed to, + uint256 value + ); + + /** + * @dev Event emited when an approval has been registered. + * Selector: 8c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b925 + * @param owner address Owner of the tokens. + * @param spender address Allowed spender. + * @param value uint256 Amount of tokens approved. + */ + event Approval( + address indexed owner, + address indexed spender, + uint256 value + ); +} + + /** + * @title Extension for ERC20 interface + * @dev Extended functions with minimum balance check as well as mint & burn. + */ + interface IERC20Plus is IERC20 { + + /** + * @dev Returns minimum balance an account must have to exist + * Selector: b9d1d49b + */ + function minimumBalance() external view returns (uint256); + + /** + * @dev Mints the specified amount of asset for the beneficiary. + * This operation will increase the total supply. + * Only usable by asset admin. + * Selector: 40c10f19 + */ + function mint(address beneficiary, uint256 amount) external returns (bool); + + /** + * @dev Burns by up to the specified amount of asset from the target. + * This operation will increase decrease the total supply. + * Only usable by asset admin. + * Selector: 9dc29fac + */ + function burn(address who, uint256 amount) external returns (bool); +} + diff --git a/precompiles/assets-erc20/src/lib.rs b/precompiles/assets-erc20/src/lib.rs new file mode 100644 index 0000000000..8195f4183e --- /dev/null +++ b/precompiles/assets-erc20/src/lib.rs @@ -0,0 +1,546 @@ +// This file is part of Astar. + +// Copyright (C) 2019-2023 Stake Technologies Pte.Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later + +// Astar is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Astar is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Astar. If not, see . + +// Copyright 2019-2022 PureStake Inc. +// Copyright 2022 Stake Technologies +// This file is part of AssetsERC20 package, originally developed by Purestake Inc. +// AssetsERC20 package used in Astar Network in terms of GPLv3. +// +// AssetsERC20 is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// AssetsERC20 is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with AssetsERC20. If not, see . + +#![cfg_attr(not(feature = "std"), no_std)] +#![cfg_attr(test, feature(assert_matches))] + +use fp_evm::{PrecompileHandle, PrecompileOutput}; +use frame_support::traits::fungibles::approvals::Inspect as ApprovalInspect; +use frame_support::traits::fungibles::metadata::Inspect as MetadataInspect; +use frame_support::traits::fungibles::Inspect; +use frame_support::traits::OriginTrait; +use frame_support::{ + dispatch::{Dispatchable, GetDispatchInfo, PostDispatchInfo}, + sp_runtime::traits::StaticLookup, +}; +use pallet_evm::{AddressMapping, PrecompileSet}; +use precompile_utils::{ + keccak256, succeed, Address, Bytes, EvmData, EvmDataWriter, EvmResult, FunctionModifier, + LogExt, LogsBuilder, PrecompileHandleExt, RuntimeHelper, +}; +use sp_runtime::traits::{Bounded, Zero}; + +use sp_core::{H160, U256}; +use sp_std::{ + convert::{TryFrom, TryInto}, + marker::PhantomData, +}; + +#[cfg(test)] +mod mock; +#[cfg(test)] +mod tests; + +/// Solidity selector of the Transfer log, which is the Keccak of the Log signature. +pub const SELECTOR_LOG_TRANSFER: [u8; 32] = keccak256!("Transfer(address,address,uint256)"); + +/// Solidity selector of the Approval log, which is the Keccak of the Log signature. +pub const SELECTOR_LOG_APPROVAL: [u8; 32] = keccak256!("Approval(address,address,uint256)"); + +/// Alias for the Balance type for the provided Runtime and Instance. +pub type BalanceOf = >::Balance; + +/// Alias for the Asset Id type for the provided Runtime and Instance. +pub type AssetIdOf = >::AssetId; + +#[precompile_utils::generate_function_selector] +#[derive(Debug, PartialEq)] +pub enum Action { + TotalSupply = "totalSupply()", + BalanceOf = "balanceOf(address)", + Allowance = "allowance(address,address)", + Transfer = "transfer(address,uint256)", + Approve = "approve(address,uint256)", + TransferFrom = "transferFrom(address,address,uint256)", + Name = "name()", + Symbol = "symbol()", + Decimals = "decimals()", + MinimumBalance = "minimumBalance()", + Mint = "mint(address,uint256)", + Burn = "burn(address,uint256)", +} + +/// This trait ensure we can convert EVM address to AssetIds +/// We will require Runtime to have this trait implemented +pub trait AddressToAssetId { + // Get assetId from address + fn address_to_asset_id(address: H160) -> Option; + + // Get address from AssetId + fn asset_id_to_address(asset_id: AssetId) -> H160; +} + +/// The following distribution has been decided for the precompiles +/// 0-1023: Ethereum Mainnet Precompiles +/// 1024-2047 Precompiles that are not in Ethereum Mainnet but are neither Astar specific +/// 2048-4095 Astar specific precompiles +/// Asset precompiles can only fall between +/// 0xFFFFFFFF00000000000000000000000000000000 - 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF +/// The precompile for AssetId X, where X is a u128 (i.e.16 bytes), if 0XFFFFFFFF + Bytes(AssetId) +/// In order to route the address to Erc20AssetsPrecompile, we first check whether the AssetId +/// exists in pallet-assets +/// We cannot do this right now, so instead we check whether the total supply is zero. If so, we +/// do not route to the precompiles + +/// This means that every address that starts with 0xFFFFFFFF will go through an additional db read, +/// but the probability for this to happen is 2^-32 for random addresses +pub struct Erc20AssetsPrecompileSet( + PhantomData<(Runtime, Instance)>, +); + +impl Erc20AssetsPrecompileSet { + pub fn new() -> Self { + Self(PhantomData) + } +} + +impl PrecompileSet for Erc20AssetsPrecompileSet +where + Instance: 'static, + Runtime: pallet_assets::Config + pallet_evm::Config + frame_system::Config, + Runtime::RuntimeCall: Dispatchable + GetDispatchInfo, + Runtime::RuntimeCall: From>, + ::RuntimeOrigin: From>, + BalanceOf: TryFrom + Into + EvmData, + Runtime: AddressToAssetId>, + <::RuntimeCall as Dispatchable>::RuntimeOrigin: OriginTrait, +{ + fn execute(&self, handle: &mut impl PrecompileHandle) -> Option> { + let address = handle.code_address(); + + if let Some(asset_id) = Runtime::address_to_asset_id(address) { + // We check maybe_total_supply. This function returns Some if the asset exists, + // which is all we care about at this point + if pallet_assets::Pallet::::maybe_total_supply(asset_id).is_some() { + let result = { + let selector = match handle.read_selector() { + Ok(selector) => selector, + Err(e) => return Some(Err(e)), + }; + + if let Err(err) = handle.check_function_modifier(match selector { + Action::Approve + | Action::Transfer + | Action::TransferFrom + | Action::Mint + | Action::Burn => FunctionModifier::NonPayable, + _ => FunctionModifier::View, + }) { + return Some(Err(err)); + } + + match selector { + // XC20 + Action::TotalSupply => Self::total_supply(asset_id, handle), + Action::BalanceOf => Self::balance_of(asset_id, handle), + Action::Allowance => Self::allowance(asset_id, handle), + Action::Approve => Self::approve(asset_id, handle), + Action::Transfer => Self::transfer(asset_id, handle), + Action::TransferFrom => Self::transfer_from(asset_id, handle), + Action::Name => Self::name(asset_id, handle), + Action::Symbol => Self::symbol(asset_id, handle), + Action::Decimals => Self::decimals(asset_id, handle), + // XC20+ + Action::MinimumBalance => Self::minimum_balance(asset_id, handle), + Action::Mint => Self::mint(asset_id, handle), + Action::Burn => Self::burn(asset_id, handle), + } + }; + return Some(result); + } + } + None + } + + fn is_precompile(&self, address: H160) -> bool { + if let Some(asset_id) = Runtime::address_to_asset_id(address) { + // If the assetId has non-zero supply + // "total_supply" returns both 0 if the assetId does not exist or if the supply is 0 + // The assumption I am making here is that a 0 supply asset is not interesting from + // the perspective of the precompiles. Once pallet-assets has more publicly accesible + // storage we can use another function for this, like check_asset_existence. + // The other options is to check the asset existence in pallet-asset-manager, but + // this makes the precompiles dependent on such a pallet, which is not ideal + !pallet_assets::Pallet::::total_supply(asset_id).is_zero() + } else { + false + } + } +} + +impl Erc20AssetsPrecompileSet +where + Instance: 'static, + Runtime: pallet_assets::Config + pallet_evm::Config + frame_system::Config, + Runtime::RuntimeCall: Dispatchable + GetDispatchInfo, + Runtime::RuntimeCall: From>, + ::RuntimeOrigin: From>, + BalanceOf: TryFrom + Into + EvmData, + Runtime: AddressToAssetId>, + <::RuntimeCall as Dispatchable>::RuntimeOrigin: OriginTrait, +{ + fn total_supply( + asset_id: AssetIdOf, + handle: &mut impl PrecompileHandle, + ) -> EvmResult { + handle.record_cost(RuntimeHelper::::db_read_gas_cost())?; + + // Fetch info. + let amount: U256 = + pallet_assets::Pallet::::total_issuance(asset_id).into(); + + Ok(succeed(EvmDataWriter::new().write(amount).build())) + } + + fn balance_of( + asset_id: AssetIdOf, + handle: &mut impl PrecompileHandle, + ) -> EvmResult { + handle.record_cost(RuntimeHelper::::db_read_gas_cost())?; + + let mut input = handle.read_input()?; + input.expect_arguments(1)?; + + let owner: H160 = input.read::

()?.into(); + + // Fetch info. + let amount: U256 = { + let owner: Runtime::AccountId = Runtime::AddressMapping::into_account_id(owner); + pallet_assets::Pallet::::balance(asset_id, &owner).into() + }; + + Ok(succeed(EvmDataWriter::new().write(amount).build())) + } + + fn allowance( + asset_id: AssetIdOf, + handle: &mut impl PrecompileHandle, + ) -> EvmResult { + handle.record_cost(RuntimeHelper::::db_read_gas_cost())?; + + let mut input = handle.read_input()?; + input.expect_arguments(2)?; + + let owner: H160 = input.read::
()?.into(); + let spender: H160 = input.read::
()?.into(); + + // Fetch info. + let amount: U256 = { + let owner: Runtime::AccountId = Runtime::AddressMapping::into_account_id(owner); + let spender: Runtime::AccountId = Runtime::AddressMapping::into_account_id(spender); + + // Fetch info. + pallet_assets::Pallet::::allowance(asset_id, &owner, &spender).into() + }; + + Ok(succeed(EvmDataWriter::new().write(amount).build())) + } + + fn approve( + asset_id: AssetIdOf, + handle: &mut impl PrecompileHandle, + ) -> EvmResult { + handle.record_log_costs_manual(3, 32)?; + + let mut input = handle.read_input()?; + input.expect_arguments(2)?; + + let spender: H160 = input.read::
()?.into(); + let amount: U256 = input.read()?; + + { + let origin = Runtime::AddressMapping::into_account_id(handle.context().caller); + let spender: Runtime::AccountId = Runtime::AddressMapping::into_account_id(spender); + // Amount saturate if too high. + let amount: BalanceOf = + amount.try_into().unwrap_or_else(|_| Bounded::max_value()); + + // Allowance read + handle.record_cost(RuntimeHelper::::db_read_gas_cost())?; + + // If previous approval exists, we need to clean it + if pallet_assets::Pallet::::allowance(asset_id, &origin, &spender) + != 0u32.into() + { + RuntimeHelper::::try_dispatch( + handle, + Some(origin.clone()).into(), + pallet_assets::Call::::cancel_approval { + id: asset_id.into(), + delegate: Runtime::Lookup::unlookup(spender.clone()), + }, + )?; + } + // Dispatch call (if enough gas). + RuntimeHelper::::try_dispatch( + handle, + Some(origin).into(), + pallet_assets::Call::::approve_transfer { + id: asset_id.into(), + delegate: Runtime::Lookup::unlookup(spender), + amount, + }, + )?; + } + + LogsBuilder::new(handle.context().address) + .log3( + SELECTOR_LOG_APPROVAL, + handle.context().caller, + spender, + EvmDataWriter::new().write(amount).build(), + ) + .record(handle)?; + + Ok(succeed(EvmDataWriter::new().write(true).build())) + } + + fn transfer( + asset_id: AssetIdOf, + handle: &mut impl PrecompileHandle, + ) -> EvmResult { + handle.record_log_costs_manual(3, 32)?; + + let mut input = handle.read_input()?; + input.expect_arguments(2)?; + + let to: H160 = input.read::
()?.into(); + let amount = input.read::>()?; + + // Build call with origin. + { + let origin = Runtime::AddressMapping::into_account_id(handle.context().caller); + let to = Runtime::AddressMapping::into_account_id(to); + + // Dispatch call (if enough gas). + RuntimeHelper::::try_dispatch( + handle, + Some(origin).into(), + pallet_assets::Call::::transfer { + id: asset_id.into(), + target: Runtime::Lookup::unlookup(to), + amount, + }, + )?; + } + + LogsBuilder::new(handle.context().address) + .log3( + SELECTOR_LOG_TRANSFER, + handle.context().caller, + to, + EvmDataWriter::new().write(amount).build(), + ) + .record(handle)?; + + Ok(succeed(EvmDataWriter::new().write(true).build())) + } + + fn transfer_from( + asset_id: AssetIdOf, + handle: &mut impl PrecompileHandle, + ) -> EvmResult { + handle.record_log_costs_manual(3, 32)?; + + let mut input = handle.read_input()?; + input.expect_arguments(3)?; + + let from: H160 = input.read::
()?.into(); + let to: H160 = input.read::
()?.into(); + let amount = input.read::>()?; + + { + let caller: Runtime::AccountId = + Runtime::AddressMapping::into_account_id(handle.context().caller); + let from: Runtime::AccountId = Runtime::AddressMapping::into_account_id(from); + let to: Runtime::AccountId = Runtime::AddressMapping::into_account_id(to); + + // If caller is "from", it can spend as much as it wants from its own balance. + if caller != from { + // Dispatch call (if enough gas). + RuntimeHelper::::try_dispatch( + handle, + Some(caller).into(), + pallet_assets::Call::::transfer_approved { + id: asset_id.into(), + owner: Runtime::Lookup::unlookup(from), + destination: Runtime::Lookup::unlookup(to), + amount, + }, + )?; + } else { + // Dispatch call (if enough gas). + RuntimeHelper::::try_dispatch( + handle, + Some(from).into(), + pallet_assets::Call::::transfer { + id: asset_id.into(), + target: Runtime::Lookup::unlookup(to), + amount, + }, + )?; + } + } + + LogsBuilder::new(handle.context().address) + .log3( + SELECTOR_LOG_TRANSFER, + from, + to, + EvmDataWriter::new().write(amount).build(), + ) + .record(handle)?; + + Ok(succeed(EvmDataWriter::new().write(true).build())) + } + + fn name( + asset_id: AssetIdOf, + handle: &mut impl PrecompileHandle, + ) -> EvmResult { + handle.record_cost(RuntimeHelper::::db_read_gas_cost())?; + + Ok(succeed( + EvmDataWriter::new() + .write::( + pallet_assets::Pallet::::name(asset_id) + .as_slice() + .into(), + ) + .build(), + )) + } + + fn symbol( + asset_id: AssetIdOf, + handle: &mut impl PrecompileHandle, + ) -> EvmResult { + handle.record_cost(RuntimeHelper::::db_read_gas_cost())?; + + // Build output. + Ok(succeed( + EvmDataWriter::new() + .write::( + pallet_assets::Pallet::::symbol(asset_id) + .as_slice() + .into(), + ) + .build(), + )) + } + + fn decimals( + asset_id: AssetIdOf, + handle: &mut impl PrecompileHandle, + ) -> EvmResult { + handle.record_cost(RuntimeHelper::::db_read_gas_cost())?; + + // Build output. + Ok(succeed( + EvmDataWriter::new() + .write::(pallet_assets::Pallet::::decimals( + asset_id, + )) + .build(), + )) + } + + fn minimum_balance( + asset_id: AssetIdOf, + handle: &mut impl PrecompileHandle, + ) -> EvmResult { + handle.record_cost(RuntimeHelper::::db_read_gas_cost())?; + + let min_balance: U256 = + pallet_assets::Pallet::::minimum_balance(asset_id).into(); + + Ok(succeed(EvmDataWriter::new().write(min_balance).build())) + } + + fn mint( + asset_id: AssetIdOf, + handle: &mut impl PrecompileHandle, + ) -> EvmResult { + let mut input = handle.read_input()?; + input.expect_arguments(2)?; + + let beneficiary: H160 = input.read::
()?.into(); + let amount = input.read::>()?; + + let origin = Runtime::AddressMapping::into_account_id(handle.context().caller); + let beneficiary = Runtime::AddressMapping::into_account_id(beneficiary); + + // Dispatch call (if enough gas). + RuntimeHelper::::try_dispatch( + handle, + Some(origin).into(), + pallet_assets::Call::::mint { + id: asset_id.into(), + beneficiary: Runtime::Lookup::unlookup(beneficiary), + amount, + }, + )?; + + Ok(succeed(EvmDataWriter::new().write(true).build())) + } + + fn burn( + asset_id: AssetIdOf, + handle: &mut impl PrecompileHandle, + ) -> EvmResult { + let mut input = handle.read_input()?; + input.expect_arguments(2)?; + + let who: H160 = input.read::
()?.into(); + let amount = input.read::>()?; + + let origin = Runtime::AddressMapping::into_account_id(handle.context().caller); + let who = Runtime::AddressMapping::into_account_id(who); + + // Dispatch call (if enough gas). + RuntimeHelper::::try_dispatch( + handle, + Some(origin).into(), + pallet_assets::Call::::burn { + id: asset_id.into(), + who: Runtime::Lookup::unlookup(who), + amount, + }, + )?; + + Ok(succeed(EvmDataWriter::new().write(true).build())) + } +} diff --git a/precompiles/assets-erc20/src/mock.rs b/precompiles/assets-erc20/src/mock.rs new file mode 100644 index 0000000000..9e182181b9 --- /dev/null +++ b/precompiles/assets-erc20/src/mock.rs @@ -0,0 +1,329 @@ +// This file is part of Astar. + +// Copyright (C) 2019-2023 Stake Technologies Pte.Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later + +// Astar is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Astar is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Astar. If not, see . + +// Copyright 2019-2022 PureStake Inc. +// Copyright 2022 Stake Technologies +// This file is part of AssetsERC20 package, originally developed by Purestake Inc. +// AssetsERC20 package used in Astar Network in terms of GPLv3. +// +// AssetsERC20 is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// AssetsERC20 is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with AssetsERC20. If not, see . +//! Testing utilities. + +use super::*; + +use frame_support::{ + construct_runtime, parameter_types, + traits::{AsEnsureOriginWithArg, Everything}, + weights::Weight, +}; +use parity_scale_codec::{Decode, Encode, MaxEncodedLen}; + +use frame_system::EnsureRoot; +use pallet_evm::{AddressMapping, EnsureAddressNever, EnsureAddressRoot}; +use scale_info::TypeInfo; +use serde::{Deserialize, Serialize}; +use sp_core::{ConstU32, H160, H256}; +use sp_runtime::{ + testing::Header, + traits::{BlakeTwo256, IdentityLookup}, +}; + +pub type AccountId = Account; +pub type AssetId = u128; +pub type Balance = u128; +pub type BlockNumber = u64; +pub type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; +pub type Block = frame_system::mocking::MockBlock; + +/// A simple account type. +#[derive( + Eq, + PartialEq, + Ord, + PartialOrd, + Clone, + Encode, + Decode, + Debug, + MaxEncodedLen, + Serialize, + Deserialize, + derive_more::Display, + TypeInfo, +)] +pub enum Account { + Alice, + Bob, + Charlie, + Bogus, + AssetId(AssetId), +} + +impl Default for Account { + fn default() -> Self { + Self::Bogus + } +} + +impl AddressMapping for Account { + fn into_account_id(h160_account: H160) -> Account { + match h160_account { + a if a == H160::repeat_byte(0xAA) => Self::Alice, + a if a == H160::repeat_byte(0xBB) => Self::Bob, + a if a == H160::repeat_byte(0xCC) => Self::Charlie, + _ => { + let mut data = [0u8; 16]; + let (prefix_part, id_part) = h160_account.as_fixed_bytes().split_at(4); + if prefix_part == &[255u8; 4] { + data.copy_from_slice(id_part); + + return Self::AssetId(u128::from_be_bytes(data)); + } + Self::Bogus + } + } + } +} + +pub const ASSET_PRECOMPILE_ADDRESS_PREFIX: &[u8] = &[255u8; 4]; + +// Implement the trait, where we convert AccountId to AssetID +impl AddressToAssetId for Runtime { + /// The way to convert an account to assetId is by ensuring that the prefix is 0XFFFFFFFF + /// and by taking the lowest 128 bits as the assetId + fn address_to_asset_id(address: H160) -> Option { + let mut data = [0u8; 16]; + let address_bytes: [u8; 20] = address.into(); + if ASSET_PRECOMPILE_ADDRESS_PREFIX.eq(&address_bytes[0..4]) { + data.copy_from_slice(&address_bytes[4..20]); + Some(u128::from_be_bytes(data)) + } else { + None + } + } + + fn asset_id_to_address(asset_id: AssetId) -> H160 { + let mut data = [0u8; 20]; + data[0..4].copy_from_slice(ASSET_PRECOMPILE_ADDRESS_PREFIX); + data[4..20].copy_from_slice(&asset_id.to_be_bytes()); + H160::from(data) + } +} + +impl From for H160 { + fn from(x: Account) -> H160 { + match x { + Account::Alice => H160::repeat_byte(0xAA), + Account::Bob => H160::repeat_byte(0xBB), + Account::Charlie => H160::repeat_byte(0xCC), + Account::AssetId(asset_id) => { + let mut data = [0u8; 20]; + let id_as_bytes = asset_id.to_be_bytes(); + data[0..4].copy_from_slice(&[255u8; 4]); + data[4..20].copy_from_slice(&id_as_bytes); + H160::from_slice(&data) + } + Account::Bogus => Default::default(), + } + } +} + +impl From for H256 { + fn from(x: Account) -> H256 { + let x: H160 = x.into(); + x.into() + } +} + +parameter_types! { + pub const BlockHashCount: u64 = 250; + pub const SS58Prefix: u8 = 42; +} + +impl frame_system::Config for Runtime { + type BaseCallFilter = Everything; + type DbWeight = (); + type RuntimeOrigin = RuntimeOrigin; + type Index = u64; + type BlockNumber = BlockNumber; + type RuntimeCall = RuntimeCall; + type Hash = H256; + type Hashing = BlakeTwo256; + type AccountId = AccountId; + type Lookup = IdentityLookup; + type Header = Header; + type RuntimeEvent = RuntimeEvent; + type BlockHashCount = BlockHashCount; + type Version = (); + type PalletInfo = PalletInfo; + type AccountData = pallet_balances::AccountData; + type OnNewAccount = (); + type OnKilledAccount = (); + type SystemWeightInfo = (); + type BlockWeights = (); + type BlockLength = (); + type SS58Prefix = SS58Prefix; + type OnSetCode = (); + type MaxConsumers = frame_support::traits::ConstU32<16>; +} + +parameter_types! { + pub const MinimumPeriod: u64 = 5; +} + +impl pallet_timestamp::Config for Runtime { + type Moment = u64; + type OnTimestampSet = (); + type MinimumPeriod = MinimumPeriod; + type WeightInfo = (); +} + +parameter_types! { + pub const ExistentialDeposit: u128 = 0; +} + +impl pallet_balances::Config for Runtime { + type MaxReserves = (); + type ReserveIdentifier = (); + type MaxLocks = (); + type Balance = Balance; + type RuntimeEvent = RuntimeEvent; + type DustRemoval = (); + type ExistentialDeposit = ExistentialDeposit; + type AccountStore = System; + type WeightInfo = (); +} + +parameter_types! { + pub const PrecompilesValue: Erc20AssetsPrecompileSet = + Erc20AssetsPrecompileSet(PhantomData); + pub WeightPerGas: Weight = Weight::from_ref_time(1); +} + +impl pallet_evm::Config for Runtime { + type FeeCalculator = (); + type GasWeightMapping = pallet_evm::FixedGasWeightMapping; + type WeightPerGas = WeightPerGas; + type CallOrigin = EnsureAddressRoot; + type WithdrawOrigin = EnsureAddressNever; + type AddressMapping = AccountId; + type Currency = Balances; + type RuntimeEvent = RuntimeEvent; + type Runner = pallet_evm::runner::stack::Runner; + type PrecompilesType = Erc20AssetsPrecompileSet; + type PrecompilesValue = PrecompilesValue; + type ChainId = (); + type OnChargeTransaction = (); + type BlockGasLimit = (); + type BlockHashMapping = pallet_evm::SubstrateBlockHashMapping; + type FindAuthor = (); + type OnCreate = (); + type WeightInfo = (); +} + +// These parameters dont matter much as this will only be called by root with the forced arguments +// No deposit is substracted with those methods +parameter_types! { + pub const AssetDeposit: Balance = 0; + pub const AssetAccountDeposit: Balance = 0; + pub const ApprovalDeposit: Balance = 0; + pub const AssetsStringLimit: u32 = 50; + pub const MetadataDepositBase: Balance = 0; + pub const MetadataDepositPerByte: Balance = 0; +} + +impl pallet_assets::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type Balance = Balance; + type AssetId = AssetId; + type Currency = Balances; + type ForceOrigin = EnsureRoot; + type AssetDeposit = AssetDeposit; + type AssetAccountDeposit = AssetAccountDeposit; + type MetadataDepositBase = MetadataDepositBase; + type MetadataDepositPerByte = MetadataDepositPerByte; + type ApprovalDeposit = ApprovalDeposit; + type StringLimit = AssetsStringLimit; + type Freezer = (); + type Extra = (); + type CreateOrigin = AsEnsureOriginWithArg>; + type WeightInfo = pallet_assets::weights::SubstrateWeight; + type RemoveItemsLimit = ConstU32<0>; + type AssetIdParameter = AssetId; + type CallbackHandle = (); +} + +// Configure a mock runtime to test the pallet. +construct_runtime!( + pub enum Runtime where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: frame_system, + Balances: pallet_balances, + Assets: pallet_assets, + Evm: pallet_evm, + Timestamp: pallet_timestamp, + } +); + +pub(crate) struct ExtBuilder { + // endowed accounts with balances + balances: Vec<(AccountId, Balance)>, +} + +impl Default for ExtBuilder { + fn default() -> ExtBuilder { + ExtBuilder { balances: vec![] } + } +} + +impl ExtBuilder { + pub(crate) fn with_balances(mut self, balances: Vec<(AccountId, Balance)>) -> Self { + self.balances = balances; + self + } + + pub(crate) fn build(self) -> sp_io::TestExternalities { + let mut t = frame_system::GenesisConfig::default() + .build_storage::() + .expect("Frame system builds valid default genesis config"); + + pallet_balances::GenesisConfig:: { + balances: self.balances, + } + .assimilate_storage(&mut t) + .expect("Pallet balances storage can be assimilated"); + + let mut ext = sp_io::TestExternalities::new(t); + ext.execute_with(|| System::set_block_number(1)); + ext + } +} diff --git a/precompiles/assets-erc20/src/tests.rs b/precompiles/assets-erc20/src/tests.rs new file mode 100644 index 0000000000..6751a0288c --- /dev/null +++ b/precompiles/assets-erc20/src/tests.rs @@ -0,0 +1,983 @@ +// This file is part of Astar. + +// Copyright (C) 2019-2023 Stake Technologies Pte.Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later + +// Astar is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Astar is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Astar. If not, see . + +// Copyright 2019-2022 PureStake Inc. +// Copyright 2022 Stake Technologies +// This file is part of AssetsERC20 package, originally developed by Purestake Inc. +// AssetsERC20 package used in Astar Network in terms of GPLv3. +// +// AssetsERC20 is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// AssetsERC20 is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with AssetsERC20. If not, see . +use frame_support::assert_ok; +use std::str::from_utf8; + +use crate::mock::*; +use crate::*; + +use precompile_utils::{testing::*, EvmDataWriter, LogsBuilder}; +use sha3::{Digest, Keccak256}; + +fn precompiles() -> Erc20AssetsPrecompileSet { + PrecompilesValue::get() +} + +#[test] +fn selector_less_than_four_bytes() { + ExtBuilder::default().build().execute_with(|| { + assert_ok!(Assets::force_create( + RuntimeOrigin::root(), + 0u128, + Account::Alice.into(), + true, + 1 + )); + // This selector is only three bytes long when four are required. + precompiles() + .prepare_test(Account::Alice, Account::AssetId(0u128), vec![1u8, 2u8, 3u8]) + .execute_reverts(|output| output == b"tried to parse selector out of bounds"); + }); +} + +#[test] +fn no_selector_exists_but_length_is_right() { + ExtBuilder::default().build().execute_with(|| { + assert_ok!(Assets::force_create( + RuntimeOrigin::root(), + 0u128, + Account::Alice.into(), + true, + 1 + )); + + precompiles() + .prepare_test( + Account::Alice, + Account::AssetId(0u128), + vec![1u8, 2u8, 3u8, 4u8], + ) + .execute_reverts(|output| output == b"unknown selector"); + }); +} + +#[test] +fn selectors() { + assert_eq!(Action::BalanceOf as u32, 0x70a08231); + assert_eq!(Action::TotalSupply as u32, 0x18160ddd); + assert_eq!(Action::Approve as u32, 0x095ea7b3); + assert_eq!(Action::Allowance as u32, 0xdd62ed3e); + assert_eq!(Action::Transfer as u32, 0xa9059cbb); + assert_eq!(Action::TransferFrom as u32, 0x23b872dd); + assert_eq!(Action::Name as u32, 0x06fdde03); + assert_eq!(Action::Symbol as u32, 0x95d89b41); + assert_eq!(Action::Decimals as u32, 0x313ce567); + assert_eq!(Action::MinimumBalance as u32, 0xb9d1d49b); + assert_eq!(Action::Mint as u32, 0x40c10f19); + assert_eq!(Action::Burn as u32, 0x9dc29fac); + + assert_eq!( + crate::SELECTOR_LOG_TRANSFER, + &Keccak256::digest(b"Transfer(address,address,uint256)")[..] + ); + + assert_eq!( + crate::SELECTOR_LOG_APPROVAL, + &Keccak256::digest(b"Approval(address,address,uint256)")[..] + ); +} + +#[test] +fn get_total_supply() { + ExtBuilder::default() + .with_balances(vec![(Account::Alice, 1000), (Account::Bob, 2500)]) + .build() + .execute_with(|| { + assert_ok!(Assets::force_create( + RuntimeOrigin::root(), + 0u128, + Account::Alice.into(), + true, + 1 + )); + assert_ok!(Assets::mint( + RuntimeOrigin::signed(Account::Alice), + 0u128, + Account::Alice.into(), + 1000 + )); + + precompiles() + .prepare_test( + Account::Alice, + Account::AssetId(0u128), + EvmDataWriter::new_with_selector(Action::TotalSupply).build(), + ) + .expect_cost(0) // TODO: Test db read/write costs + .expect_no_logs() + .execute_returns(EvmDataWriter::new().write(U256::from(1000u64)).build()); + }); +} + +#[test] +fn get_balances_known_user() { + ExtBuilder::default() + .with_balances(vec![(Account::Alice, 1000)]) + .build() + .execute_with(|| { + assert_ok!(Assets::force_create( + RuntimeOrigin::root(), + 0u128, + Account::Alice.into(), + true, + 1 + )); + assert_ok!(Assets::mint( + RuntimeOrigin::signed(Account::Alice), + 0u128, + Account::Alice.into(), + 1000 + )); + + precompiles() + .prepare_test( + Account::Alice, + Account::AssetId(0u128), + EvmDataWriter::new_with_selector(Action::BalanceOf) + .write(Address(Account::Alice.into())) + .build(), + ) + .expect_cost(0) // TODO: Test db read/write costs + .expect_no_logs() + .execute_returns(EvmDataWriter::new().write(U256::from(1000u64)).build()); + }); +} + +#[test] +fn get_balances_unknown_user() { + ExtBuilder::default() + .with_balances(vec![(Account::Alice, 1000)]) + .build() + .execute_with(|| { + assert_ok!(Assets::force_create( + RuntimeOrigin::root(), + 0u128, + Account::Alice.into(), + true, + 1 + )); + + precompiles() + .prepare_test( + Account::Alice, + Account::AssetId(0u128), + EvmDataWriter::new_with_selector(Action::BalanceOf) + .write(Address(Account::Bob.into())) + .build(), + ) + .expect_cost(0) // TODO: Test db read/write costs + .expect_no_logs() + .execute_returns(EvmDataWriter::new().write(U256::from(0u64)).build()); + }); +} + +#[test] +fn approve() { + ExtBuilder::default() + .with_balances(vec![(Account::Alice, 1000)]) + .build() + .execute_with(|| { + assert_ok!(Assets::force_create( + RuntimeOrigin::root(), + 0u128, + Account::Alice.into(), + true, + 1 + )); + assert_ok!(Assets::mint( + RuntimeOrigin::signed(Account::Alice), + 0u128, + Account::Alice.into(), + 1000 + )); + + precompiles() + .prepare_test( + Account::Alice, + Account::AssetId(0u128), + EvmDataWriter::new_with_selector(Action::Approve) + .write(Address(Account::Bob.into())) + .write(U256::from(500)) + .build(), + ) + .expect_log(LogsBuilder::new(Account::AssetId(0u128).into()).log3( + SELECTOR_LOG_APPROVAL, + Account::Alice, + Account::Bob, + EvmDataWriter::new().write(U256::from(500)).build(), + )) + .execute_returns(EvmDataWriter::new().write(true).build()); + }); +} + +#[test] +fn approve_saturating() { + ExtBuilder::default() + .with_balances(vec![(Account::Alice, 1000)]) + .build() + .execute_with(|| { + assert_ok!(Assets::force_create( + RuntimeOrigin::root(), + 0u128, + Account::Alice.into(), + true, + 1 + )); + assert_ok!(Assets::mint( + RuntimeOrigin::signed(Account::Alice), + 0u128, + Account::Alice.into(), + 1000 + )); + + precompiles() + .prepare_test( + Account::Alice, + Account::AssetId(0u128), + EvmDataWriter::new_with_selector(Action::Approve) + .write(Address(Account::Bob.into())) + .write(U256::MAX) + .build(), + ) + .expect_log(LogsBuilder::new(Account::AssetId(0u128).into()).log3( + SELECTOR_LOG_APPROVAL, + Account::Alice, + Account::Bob, + EvmDataWriter::new().write(U256::MAX).build(), + )) + .execute_returns(EvmDataWriter::new().write(true).build()); + + precompiles() + .prepare_test( + Account::Alice, + Account::AssetId(0u128), + EvmDataWriter::new_with_selector(Action::Allowance) + .write(Address(Account::Alice.into())) + .write(Address(Account::Bob.into())) + .build(), + ) + .expect_cost(0u64) + .expect_no_logs() + .execute_returns(EvmDataWriter::new().write(U256::from(u128::MAX)).build()); + }); +} + +#[test] +fn check_allowance_existing() { + ExtBuilder::default() + .with_balances(vec![(Account::Alice, 1000)]) + .build() + .execute_with(|| { + assert_ok!(Assets::force_create( + RuntimeOrigin::root(), + 0u128, + Account::Alice.into(), + true, + 1 + )); + assert_ok!(Assets::mint( + RuntimeOrigin::signed(Account::Alice), + 0u128, + Account::Alice.into(), + 1000 + )); + + precompiles() + .prepare_test( + Account::Alice, + Account::AssetId(0u128), + EvmDataWriter::new_with_selector(Action::Approve) + .write(Address(Account::Bob.into())) + .write(U256::from(500)) + .build(), + ) + .execute_some(); + + precompiles() + .prepare_test( + Account::Alice, + Account::AssetId(0u128), + EvmDataWriter::new_with_selector(Action::Allowance) + .write(Address(Account::Alice.into())) + .write(Address(Account::Bob.into())) + .build(), + ) + .expect_cost(0) // TODO: Test db read/write costs + .expect_no_logs() + .execute_returns(EvmDataWriter::new().write(U256::from(500u64)).build()); + }); +} + +#[test] +fn check_allowance_not_existing() { + ExtBuilder::default() + .with_balances(vec![(Account::Alice, 1000)]) + .build() + .execute_with(|| { + assert_ok!(Assets::force_create( + RuntimeOrigin::root(), + 0u128, + Account::Alice.into(), + true, + 1 + )); + + precompiles() + .prepare_test( + Account::Alice, + Account::AssetId(0u128), + EvmDataWriter::new_with_selector(Action::Allowance) + .write(Address(Account::Alice.into())) + .write(Address(Account::Bob.into())) + .build(), + ) + .expect_cost(0) // TODO: Test db read/write costs + .expect_no_logs() + .execute_returns(EvmDataWriter::new().write(U256::from(0u64)).build()); + }); +} + +#[test] +fn transfer() { + ExtBuilder::default() + .with_balances(vec![(Account::Alice, 1000)]) + .build() + .execute_with(|| { + assert_ok!(Assets::force_create( + RuntimeOrigin::root(), + 0u128, + Account::Alice.into(), + true, + 1 + )); + assert_ok!(Assets::mint( + RuntimeOrigin::signed(Account::Alice), + 0u128, + Account::Alice.into(), + 1000 + )); + + precompiles() + .prepare_test( + Account::Alice, + Account::AssetId(0u128), + EvmDataWriter::new_with_selector(Action::Transfer) + .write(Address(Account::Bob.into())) + .write(U256::from(400)) + .build(), + ) + .expect_log(LogsBuilder::new(Account::AssetId(0u128).into()).log3( + SELECTOR_LOG_TRANSFER, + Account::Alice, + Account::Bob, + EvmDataWriter::new().write(U256::from(400)).build(), + )) + .execute_returns(EvmDataWriter::new().write(true).build()); + + precompiles() + .prepare_test( + Account::Bob, + Account::AssetId(0u128), + EvmDataWriter::new_with_selector(Action::BalanceOf) + .write(Address(Account::Bob.into())) + .build(), + ) + .expect_cost(0) // TODO: Test db read/write costs + .expect_no_logs() + .execute_returns(EvmDataWriter::new().write(U256::from(400)).build()); + + precompiles() + .prepare_test( + Account::Alice, + Account::AssetId(0u128), + EvmDataWriter::new_with_selector(Action::BalanceOf) + .write(Address(Account::Alice.into())) + .build(), + ) + .expect_cost(0) // TODO: Test db read/write costs + .expect_no_logs() + .execute_returns(EvmDataWriter::new().write(U256::from(600)).build()); + }); +} + +#[test] +fn transfer_not_enough_founds() { + ExtBuilder::default() + .with_balances(vec![(Account::Alice, 1000)]) + .build() + .execute_with(|| { + assert_ok!(Assets::force_create( + RuntimeOrigin::root(), + 0u128, + Account::Alice.into(), + true, + 1 + )); + assert_ok!(Assets::mint( + RuntimeOrigin::signed(Account::Alice), + 0u128, + Account::Alice.into(), + 1 + )); + + precompiles() + .prepare_test( + Account::Alice, + Account::AssetId(0u128), + EvmDataWriter::new_with_selector(Action::Transfer) + .write(Address(Account::Charlie.into())) + .write(U256::from(50)) + .build(), + ) + .execute_reverts(|output| { + from_utf8(&output) + .unwrap() + .contains("Dispatched call failed with error: DispatchErrorWithPostInfo") + && from_utf8(&output).unwrap().contains("BalanceLow") + }); + }); +} + +#[test] +fn transfer_from() { + ExtBuilder::default() + .with_balances(vec![(Account::Alice, 1000)]) + .build() + .execute_with(|| { + assert_ok!(Assets::force_create( + RuntimeOrigin::root(), + 0u128, + Account::Alice.into(), + true, + 1 + )); + assert_ok!(Assets::mint( + RuntimeOrigin::signed(Account::Alice), + 0u128, + Account::Alice.into(), + 1000 + )); + + precompiles() + .prepare_test( + Account::Alice, + Account::AssetId(0u128), + EvmDataWriter::new_with_selector(Action::Approve) + .write(Address(Account::Bob.into())) + .write(U256::from(500)) + .build(), + ) + .execute_some(); + + precompiles() + .prepare_test( + Account::Alice, + Account::AssetId(0u128), + EvmDataWriter::new_with_selector(Action::Approve) + .write(Address(Account::Bob.into())) + .write(U256::from(500)) + .build(), + ) + .execute_some(); + + precompiles() + .prepare_test( + Account::Bob, // Bob is the one sending transferFrom! + Account::AssetId(0u128), + EvmDataWriter::new_with_selector(Action::TransferFrom) + .write(Address(Account::Alice.into())) + .write(Address(Account::Charlie.into())) + .write(U256::from(400)) + .build(), + ) + .expect_log(LogsBuilder::new(Account::AssetId(0u128).into()).log3( + SELECTOR_LOG_TRANSFER, + Account::Alice, + Account::Charlie, + EvmDataWriter::new().write(U256::from(400)).build(), + )) + .execute_returns(EvmDataWriter::new().write(true).build()); + + precompiles() + .prepare_test( + Account::Alice, + Account::AssetId(0u128), + EvmDataWriter::new_with_selector(Action::BalanceOf) + .write(Address(Account::Alice.into())) + .build(), + ) + .expect_cost(0) // TODO: Test db read/write costs + .expect_no_logs() + .execute_returns(EvmDataWriter::new().write(U256::from(600)).build()); + + precompiles() + .prepare_test( + Account::Bob, + Account::AssetId(0u128), + EvmDataWriter::new_with_selector(Action::BalanceOf) + .write(Address(Account::Bob.into())) + .build(), + ) + .expect_cost(0) // TODO: Test db read/write costs + .expect_no_logs() + .execute_returns(EvmDataWriter::new().write(U256::from(0)).build()); + + precompiles() + .prepare_test( + Account::Charlie, + Account::AssetId(0u128), + EvmDataWriter::new_with_selector(Action::BalanceOf) + .write(Address(Account::Charlie.into())) + .build(), + ) + .expect_cost(0) // TODO: Test db read/write costs + .expect_no_logs() + .execute_returns(EvmDataWriter::new().write(U256::from(400)).build()); + }); +} + +#[test] +fn transfer_from_non_incremental_approval() { + ExtBuilder::default() + .with_balances(vec![(Account::Alice, 1000)]) + .build() + .execute_with(|| { + assert_ok!(Assets::force_create( + RuntimeOrigin::root(), + 0u128, + Account::Alice.into(), + true, + 1 + )); + assert_ok!(Assets::mint( + RuntimeOrigin::signed(Account::Alice), + 0u128, + Account::Alice.into(), + 1000 + )); + + // We first approve 500 + precompiles() + .prepare_test( + Account::Alice, + Account::AssetId(0u128), + EvmDataWriter::new_with_selector(Action::Approve) + .write(Address(Account::Bob.into())) + .write(U256::from(500)) + .build(), + ) + .expect_log(LogsBuilder::new(Account::AssetId(0u128).into()).log3( + SELECTOR_LOG_APPROVAL, + Account::Alice, + Account::Bob, + EvmDataWriter::new().write(U256::from(500)).build(), + )) + .execute_returns(EvmDataWriter::new().write(true).build()); + + // We then approve 300. Non-incremental, so this is + // the approved new value + // Additionally, the gas used in this approval is higher because we + // need to clear the previous one + precompiles() + .prepare_test( + Account::Alice, + Account::AssetId(0u128), + EvmDataWriter::new_with_selector(Action::Approve) + .write(Address(Account::Bob.into())) + .write(U256::from(300)) + .build(), + ) + .expect_log(LogsBuilder::new(Account::AssetId(0u128).into()).log3( + SELECTOR_LOG_APPROVAL, + Account::Alice, + Account::Bob, + EvmDataWriter::new().write(U256::from(300)).build(), + )) + .execute_returns(EvmDataWriter::new().write(true).build()); + + // This should fail, as now the new approved quantity is 300 + precompiles() + .prepare_test( + Account::Bob, // Bob is the one sending transferFrom! + Account::AssetId(0u128), + EvmDataWriter::new_with_selector(Action::TransferFrom) + .write(Address(Account::Alice.into())) + .write(Address(Account::Bob.into())) + .write(U256::from(500)) + .build(), + ) + .execute_reverts(|output| { + output + == b"Dispatched call failed with error: DispatchErrorWithPostInfo { \ + post_info: PostDispatchInfo { actual_weight: None, pays_fee: Pays::Yes }, \ + error: Module(ModuleError { index: 2, error: [10, 0, 0, 0], \ + message: Some(\"Unapproved\") }) }" + }); + }); +} + +#[test] +fn transfer_from_above_allowance() { + ExtBuilder::default() + .with_balances(vec![(Account::Alice, 1000)]) + .build() + .execute_with(|| { + assert_ok!(Assets::force_create( + RuntimeOrigin::root(), + 0u128, + Account::Alice.into(), + true, + 1 + )); + assert_ok!(Assets::mint( + RuntimeOrigin::signed(Account::Alice), + 0u128, + Account::Alice.into(), + 1000 + )); + + precompiles() + .prepare_test( + Account::Alice, + Account::AssetId(0u128), + EvmDataWriter::new_with_selector(Action::Approve) + .write(Address(Account::Bob.into())) + .write(U256::from(300)) + .build(), + ) + .execute_some(); + + precompiles() + .prepare_test( + Account::Bob, // Bob is the one sending transferFrom! + Account::AssetId(0u128), + EvmDataWriter::new_with_selector(Action::TransferFrom) + .write(Address(Account::Alice.into())) + .write(Address(Account::Bob.into())) + .write(U256::from(400)) + .build(), + ) + .execute_reverts(|output| { + output + == b"Dispatched call failed with error: DispatchErrorWithPostInfo { \ + post_info: PostDispatchInfo { actual_weight: None, pays_fee: Pays::Yes }, \ + error: Module(ModuleError { index: 2, error: [10, 0, 0, 0], \ + message: Some(\"Unapproved\") }) }" + }); + }); +} + +#[test] +fn transfer_from_self() { + ExtBuilder::default() + .with_balances(vec![(Account::Alice, 1000)]) + .build() + .execute_with(|| { + assert_ok!(Assets::force_create( + RuntimeOrigin::root(), + 0u128, + Account::Alice.into(), + true, + 1 + )); + assert_ok!(Assets::mint( + RuntimeOrigin::signed(Account::Alice), + 0u128, + Account::Alice.into(), + 1000 + )); + + precompiles() + .prepare_test( + Account::Alice, // Alice sending transferFrom herself, no need for allowance. + Account::AssetId(0u128), + EvmDataWriter::new_with_selector(Action::TransferFrom) + .write(Address(Account::Alice.into())) + .write(Address(Account::Bob.into())) + .write(U256::from(400)) + .build(), + ) + .expect_log(LogsBuilder::new(Account::AssetId(0u128).into()).log3( + SELECTOR_LOG_TRANSFER, + Account::Alice, + Account::Bob, + EvmDataWriter::new().write(U256::from(400)).build(), + )) + .execute_returns(EvmDataWriter::new().write(true).build()); + + precompiles() + .prepare_test( + Account::Alice, + Account::AssetId(0u128), + EvmDataWriter::new_with_selector(Action::BalanceOf) + .write(Address(Account::Alice.into())) + .build(), + ) + .expect_cost(0) // TODO: Test db read/write costs + .expect_no_logs() + .execute_returns(EvmDataWriter::new().write(U256::from(600)).build()); + + precompiles() + .prepare_test( + Account::Alice, + Account::AssetId(0u128), + EvmDataWriter::new_with_selector(Action::BalanceOf) + .write(Address(Account::Bob.into())) + .build(), + ) + .expect_cost(0) // TODO: Test db read/write costs + .expect_no_logs() + .execute_returns(EvmDataWriter::new().write(U256::from(400)).build()); + }); +} + +#[test] +fn get_metadata() { + ExtBuilder::default() + .with_balances(vec![(Account::Alice, 1000), (Account::Bob, 2500)]) + .build() + .execute_with(|| { + assert_ok!(Assets::force_create( + RuntimeOrigin::root(), + 0u128, + Account::Alice.into(), + true, + 1 + )); + assert_ok!(Assets::force_set_metadata( + RuntimeOrigin::root(), + 0u128, + b"TestToken".to_vec(), + b"Test".to_vec(), + 12, + false + )); + + precompiles() + .prepare_test( + Account::Alice, + Account::AssetId(0u128), + EvmDataWriter::new_with_selector(Action::Name).build(), + ) + .expect_cost(0) // TODO: Test db read/write costs + .expect_no_logs() + .execute_returns( + EvmDataWriter::new() + .write::("TestToken".into()) + .build(), + ); + + precompiles() + .prepare_test( + Account::Alice, + Account::AssetId(0u128), + EvmDataWriter::new_with_selector(Action::Symbol).build(), + ) + .expect_cost(0) // TODO: Test db read/write costs + .expect_no_logs() + .execute_returns(EvmDataWriter::new().write::("Test".into()).build()); + + precompiles() + .prepare_test( + Account::Alice, + Account::AssetId(0u128), + EvmDataWriter::new_with_selector(Action::Decimals).build(), + ) + .expect_cost(0) // TODO: Test db read/write costs + .expect_no_logs() + .execute_returns(EvmDataWriter::new().write(12u8).build()); + }); +} + +#[test] +fn minimum_balance_is_right() { + ExtBuilder::default().build().execute_with(|| { + let expected_min_balance = 19; + assert_ok!(Assets::force_create( + RuntimeOrigin::root(), + 0u128, + Account::Alice.into(), + true, + expected_min_balance, + )); + + precompiles() + .prepare_test( + Account::Alice, + Account::AssetId(0u128), + EvmDataWriter::new_with_selector(Action::MinimumBalance).build(), + ) + .expect_cost(0) // TODO: Test db read/write costs + .expect_no_logs() + .execute_returns(EvmDataWriter::new().write(expected_min_balance).build()); + }); +} + +#[test] +fn mint_is_ok() { + ExtBuilder::default().build().execute_with(|| { + let asset_id = 0; + assert_ok!(Assets::force_create( + RuntimeOrigin::root(), + asset_id, + Account::Alice.into(), + true, + 1, + )); + + // Sanity check, Bob should be without assets + assert!(Assets::balance(asset_id, &Account::Bob.into()).is_zero()); + + // Mint some assets for Bob + let mint_amount = 7 * 11 * 19; + precompiles() + .prepare_test( + Account::Alice, + Account::AssetId(asset_id), + EvmDataWriter::new_with_selector(Action::Mint) + .write(Address(Account::Bob.into())) + .write(U256::from(mint_amount)) + .build(), + ) + .expect_no_logs() + .execute_returns(EvmDataWriter::new().write(true).build()); + + // Ensure Bob's asset balance was increased + assert_eq!(Assets::balance(asset_id, &Account::Bob.into()), mint_amount); + }); +} + +#[test] +fn mint_non_admin_is_not_ok() { + ExtBuilder::default().build().execute_with(|| { + let asset_id = 0; + assert_ok!(Assets::force_create( + RuntimeOrigin::root(), + asset_id, + Account::Alice.into(), + true, + 1, + )); + + precompiles() + .prepare_test( + Account::Bob, + Account::AssetId(asset_id), + EvmDataWriter::new_with_selector(Action::Mint) + .write(Address(Account::Bob.into())) + .write(U256::from(42)) + .build(), + ) + .expect_no_logs() + .execute_reverts(|output| from_utf8(&output).unwrap().contains("NoPermission")); + }); +} + +#[test] +fn burn_is_ok() { + ExtBuilder::default().build().execute_with(|| { + let asset_id = 0; + assert_ok!(Assets::force_create( + RuntimeOrigin::root(), + asset_id, + Account::Alice.into(), + true, + 1, + )); + + // Issue some initial assets for Bob + let init_amount = 123; + assert_ok!(Assets::mint( + RuntimeOrigin::signed(Account::Alice), + asset_id, + Account::Bob.into(), + init_amount, + )); + assert_eq!(Assets::balance(asset_id, &Account::Bob.into()), init_amount); + + // Burn some assets from Bob + let burn_amount = 19; + precompiles() + .prepare_test( + Account::Alice, + Account::AssetId(asset_id), + EvmDataWriter::new_with_selector(Action::Burn) + .write(Address(Account::Bob.into())) + .write(U256::from(burn_amount)) + .build(), + ) + .expect_no_logs() + .execute_returns(EvmDataWriter::new().write(true).build()); + + // Ensure Bob's asset balance was decreased + assert_eq!( + Assets::balance(asset_id, &Account::Bob.into()), + init_amount - burn_amount + ); + }); +} + +#[test] +fn burn_non_admin_is_not_ok() { + ExtBuilder::default().build().execute_with(|| { + let asset_id = 0; + assert_ok!(Assets::force_create( + RuntimeOrigin::root(), + asset_id, + Account::Alice.into(), + true, + 1, + )); + assert_ok!(Assets::mint( + RuntimeOrigin::signed(Account::Alice), + asset_id, + Account::Bob.into(), + 1000000, + )); + + precompiles() + .prepare_test( + Account::Bob, + Account::AssetId(asset_id), + EvmDataWriter::new_with_selector(Action::Burn) + .write(Address(Account::Bob.into())) + .write(U256::from(42)) + .build(), + ) + .expect_no_logs() + .execute_reverts(|output| from_utf8(&output).unwrap().contains("NoPermission")); + }); +} diff --git a/precompiles/dapps-staking/Cargo.toml b/precompiles/dapps-staking/Cargo.toml new file mode 100644 index 0000000000..dfd46ac32a --- /dev/null +++ b/precompiles/dapps-staking/Cargo.toml @@ -0,0 +1,55 @@ +[package] +name = "pallet-evm-precompile-dapps-staking" +version = "3.6.3" +license = "Apache-2.0" +description = "dApps Staking EVM precompiles" +authors.workspace = true +edition.workspace = true +homepage.workspace = true +repository.workspace = true + +[dependencies] +log = { workspace = true } +num_enum = { workspace = true } +parity-scale-codec = { workspace = true } +scale-info = { workspace = true } + +frame-support = { workspace = true } +frame-system = { workspace = true } + +sp-core = { workspace = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } + +# Astar +pallet-dapps-staking = { workspace = true } +precompile-utils = { workspace = true, default-features = false } + +# Frontier +fp-evm = { workspace = true } +pallet-evm = { workspace = true } + +[dev-dependencies] +derive_more = { workspace = true } +pallet-balances = { workspace = true } +pallet-timestamp = { workspace = true } +precompile-utils = { workspace = true, features = ["testing"] } +serde = { workspace = true } +sha3 = { workspace = true } +sp-io = { workspace = true } + +[features] +default = ["std"] +std = [ + "parity-scale-codec/std", + "scale-info/std", + "sp-std/std", + "sp-core/std", + "sp-runtime/std", + "fp-evm/std", + "frame-support/std", + "frame-system/std", + "pallet-dapps-staking/std", + "pallet-evm/std", + "precompile-utils/std", +] diff --git a/precompiles/dapps-staking/DappsStaking.sol b/precompiles/dapps-staking/DappsStaking.sol new file mode 100644 index 0000000000..b6967d9247 --- /dev/null +++ b/precompiles/dapps-staking/DappsStaking.sol @@ -0,0 +1,90 @@ +// SPDX-License-Identifier: BSD-3-Clause + +pragma solidity >=0.7.0; + +/// Interface to the precompiled contract on Shibuya/Shiden/Astar +/// Predeployed at the address 0x0000000000000000000000000000000000005001 +/// For better understanding check the source code: +/// repo: https://github.com/AstarNetwork/astar +/// code: frame/dapps-staking/src/pallet +interface DappsStaking { + + // Storage getters + + /// @notice Read current era. + /// @return era: The current era + function read_current_era() external view returns (uint256); + + /// @notice Read unbonding period constant. + /// @return period: The unbonding period in eras + function read_unbonding_period() external view returns (uint256); + + /// @notice Read Total network reward for the given era + /// @return reward: Total network reward for the given era + function read_era_reward(uint32 era) external view returns (uint128); + + /// @notice Read Total staked amount for the given era + /// @return staked: Total staked amount for the given era + function read_era_staked(uint32 era) external view returns (uint128); + + /// @notice Read Staked amount for the staker + /// @param staker: The staker address in form of 20 or 32 hex bytes + /// @return amount: Staked amount by the staker + function read_staked_amount(bytes calldata staker) external view returns (uint128); + + /// @notice Read Staked amount on a given contract for the staker + /// @param contract_id: The smart contract address used for staking + /// @param staker: The staker address in form of 20 or 32 hex bytes + /// @return amount: Staked amount by the staker + function read_staked_amount_on_contract(address contract_id, bytes calldata staker) external view returns (uint128); + + /// @notice Read the staked amount from the era when the amount was last staked/unstaked + /// @return total: The most recent total staked amount on contract + function read_contract_stake(address contract_id) external view returns (uint128); + + + // Extrinsic calls + + /// @notice Register is root origin only and not allowed via evm precompile. + /// This should always fail. + function register(address) external; + + /// @notice Stake provided amount on the contract. + function bond_and_stake(address, uint128) external; + + /// @notice Start unbonding process and unstake balance from the contract. + function unbond_and_unstake(address, uint128) external; + + /// @notice Withdraw all funds that have completed the unbonding process. + function withdraw_unbonded() external; + + /// @notice Claim earned staker rewards for the oldest unclaimed era. + /// In order to claim multiple eras, this call has to be called multiple times. + /// Staker account is derived from the caller address. + /// @param smart_contract: The smart contract address used for staking + function claim_staker(address smart_contract) external; + + /// @notice Claim one era of unclaimed dapp rewards for the specified contract and era. + /// @param smart_contract: The smart contract address used for staking + /// @param era: The era to be claimed + function claim_dapp(address smart_contract, uint128 era) external; + + /// Instruction how to handle reward payout for staker. + /// `FreeBalance` - Reward will be paid out to the staker (free balance). + /// `StakeBalance` - Reward will be paid out to the staker and is immediately restaked (locked balance) + enum RewardDestination {FreeBalance, StakeBalance} + + /// @notice Set reward destination for staker rewards + /// @param reward_destination: The instruction on how the reward payout should be handled + function set_reward_destination(RewardDestination reward_destination) external; + + /// @notice Withdraw staked funds from an unregistered contract. + /// @param smart_contract: The smart contract address used for staking + function withdraw_from_unregistered(address smart_contract) external; + + /// @notice Transfer part or entire nomination from origin smart contract to target smart contract + /// @param origin_smart_contract: The origin smart contract address + /// @param amount: The amount to transfer from origin to target + /// @param target_smart_contract: The target smart contract address + function nomination_transfer(address origin_smart_contract, uint128 amount, address target_smart_contract) external; +} diff --git a/precompiles/dapps-staking/src/lib.rs b/precompiles/dapps-staking/src/lib.rs new file mode 100644 index 0000000000..e67fc77762 --- /dev/null +++ b/precompiles/dapps-staking/src/lib.rs @@ -0,0 +1,492 @@ +// This file is part of Astar. + +// Copyright (C) 2019-2023 Stake Technologies Pte.Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later + +// Astar is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Astar is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Astar. If not, see . + +//! Astar dApps staking interface. + +#![cfg_attr(not(feature = "std"), no_std)] +#![cfg_attr(test, feature(assert_matches))] + +use fp_evm::{PrecompileHandle, PrecompileOutput}; +use parity_scale_codec::{Decode, Encode}; + +use frame_support::{ + dispatch::{Dispatchable, GetDispatchInfo, PostDispatchInfo}, + traits::{Currency, Get}, +}; +use pallet_dapps_staking::RewardDestination; +use pallet_evm::{AddressMapping, Precompile}; +use precompile_utils::{ + error, revert, succeed, Address, Bytes, EvmData, EvmDataWriter, EvmResult, FunctionModifier, + PrecompileHandleExt, RuntimeHelper, +}; +use sp_core::H160; +use sp_runtime::traits::{Saturating, Zero}; +use sp_std::marker::PhantomData; +use sp_std::prelude::*; +extern crate alloc; + +type BalanceOf = <::Currency as Currency< + ::AccountId, +>>::Balance; + +#[cfg(test)] +mod mock; +#[cfg(test)] +mod tests; + +/// This is only used to encode SmartContract enum +#[derive(PartialEq, Eq, Copy, Clone, Encode, Decode, Debug)] +pub enum Contract { + /// EVM smart contract instance. + Evm(H160), + /// Wasm smart contract instance. Not used in this precompile + Wasm(A), +} + +pub struct DappsStakingWrapper(PhantomData); + +impl DappsStakingWrapper +where + R: pallet_evm::Config + pallet_dapps_staking::Config, + BalanceOf: EvmData, + ::RuntimeOrigin: From>, + R::RuntimeCall: Dispatchable + GetDispatchInfo, + R::RuntimeCall: From>, + R::AccountId: From<[u8; 32]>, +{ + /// Fetch current era from CurrentEra storage map + fn read_current_era(handle: &mut impl PrecompileHandle) -> EvmResult { + handle.record_cost(RuntimeHelper::::db_read_gas_cost())?; + + let current_era = pallet_dapps_staking::CurrentEra::::get(); + + Ok(succeed(EvmDataWriter::new().write(current_era).build())) + } + + /// Fetch unbonding period + fn read_unbonding_period(handle: &mut impl PrecompileHandle) -> EvmResult { + handle.record_cost(RuntimeHelper::::db_read_gas_cost())?; + + let unbonding_period = R::UnbondingPeriod::get(); + + Ok(succeed( + EvmDataWriter::new().write(unbonding_period).build(), + )) + } + + /// Fetch reward from EraRewardsAndStakes storage map + fn read_era_reward(handle: &mut impl PrecompileHandle) -> EvmResult { + handle.record_cost(RuntimeHelper::::db_read_gas_cost())?; + + let mut input = handle.read_input()?; + input.expect_arguments(1)?; + + // parse input parameters for pallet-dapps-staking call + let era: u32 = input.read::()?; + + // call pallet-dapps-staking + let read_reward = pallet_dapps_staking::GeneralEraInfo::::get(era); + let reward = read_reward.map_or(Zero::zero(), |r| { + r.rewards.stakers.saturating_add(r.rewards.dapps) + }); + + Ok(succeed(EvmDataWriter::new().write(reward).build())) + } + + /// Fetch total staked amount from EraRewardsAndStakes storage map + fn read_era_staked(handle: &mut impl PrecompileHandle) -> EvmResult { + handle.record_cost(RuntimeHelper::::db_read_gas_cost())?; + + // parse input parameters for pallet-dapps-staking call + let mut input = handle.read_input()?; + input.expect_arguments(1)?; + let era: u32 = input.read::()?; + + // call pallet-dapps-staking + let reward_and_stake = pallet_dapps_staking::GeneralEraInfo::::get(era); + // compose output + let staked = reward_and_stake.map_or(Zero::zero(), |r| r.staked); + let staked = TryInto::::try_into(staked).unwrap_or(0); + + Ok(succeed(EvmDataWriter::new().write(staked).build())) + } + + /// Fetch Ledger storage map for an account + fn read_staked_amount(handle: &mut impl PrecompileHandle) -> EvmResult { + handle.record_cost(RuntimeHelper::::db_read_gas_cost())?; + + let mut input = handle.read_input()?; + input.expect_arguments(1)?; + + // parse input parameters for pallet-dapps-staking call + let staker_vec: Vec = input.read::()?.into(); + let staker = Self::parse_input_address(staker_vec)?; + + // call pallet-dapps-staking + let ledger = pallet_dapps_staking::Ledger::::get(&staker); + log::trace!(target: "ds-precompile", "read_staked_amount for account:{:?}, ledger.locked:{:?}", staker, ledger.locked); + + Ok(succeed(EvmDataWriter::new().write(ledger.locked).build())) + } + + /// Read GeneralStakerInfo for account/contract + fn read_staked_amount_on_contract( + handle: &mut impl PrecompileHandle, + ) -> EvmResult { + handle.record_cost(RuntimeHelper::::db_read_gas_cost())?; + + let mut input = handle.read_input()?; + input.expect_arguments(2)?; + + // parse contract address + let contract_h160 = input.read::
()?.0; + let contract_id = Self::decode_smart_contract(contract_h160)?; + + // parse input parameters for pallet-dapps-staking call + let staker_vec: Vec = input.read::()?.into(); + let staker = Self::parse_input_address(staker_vec)?; + + // call pallet-dapps-staking + let staking_info = pallet_dapps_staking::GeneralStakerInfo::::get(&staker, &contract_id); + let staked_amount = staking_info.latest_staked_value(); + log::trace!(target: "ds-precompile", "read_staked_amount_on_contract for account:{:?}, contract: {:?} => staked_amount:{:?}", staker, contract_id, staked_amount); + + Ok(succeed(EvmDataWriter::new().write(staked_amount).build())) + } + + /// Read the amount staked on contract in the given era + fn read_contract_stake(handle: &mut impl PrecompileHandle) -> EvmResult { + handle.record_cost(2 * RuntimeHelper::::db_read_gas_cost())?; + + let mut input = handle.read_input()?; + input.expect_arguments(1)?; + + // parse input parameters for pallet-dapps-staking call + let contract_h160 = input.read::
()?.0; + let contract_id = Self::decode_smart_contract(contract_h160)?; + let current_era = pallet_dapps_staking::CurrentEra::::get(); + + // call pallet-dapps-staking + let staking_info = + pallet_dapps_staking::Pallet::::contract_stake_info(&contract_id, current_era) + .unwrap_or_default(); + + // encode output with total + let total = TryInto::::try_into(staking_info.total).unwrap_or(0); + + Ok(succeed(EvmDataWriter::new().write(total).build())) + } + + /// Register contract with the dapp-staking pallet + /// Register is root origin only. This should always fail when called via evm precompile. + fn register(_: &mut impl PrecompileHandle) -> EvmResult { + // register is root-origin call. it should always fail when called via evm precompiles. + Err(error("register via evm precompile is not allowed")) + } + + /// Lock up and stake balance of the origin account. + fn bond_and_stake(handle: &mut impl PrecompileHandle) -> EvmResult { + let mut input = handle.read_input()?; + input.expect_arguments(2)?; + + // parse contract's address + let contract_h160 = input.read::
()?.0; + let contract_id = Self::decode_smart_contract(contract_h160)?; + + // parse balance to be staked + let value: BalanceOf = input.read()?; + + log::trace!(target: "ds-precompile", "bond_and_stake {:?}, {:?}", contract_id, value); + + // Build call with origin. + let origin = R::AddressMapping::into_account_id(handle.context().caller); + let call = pallet_dapps_staking::Call::::bond_and_stake { contract_id, value }; + + RuntimeHelper::::try_dispatch(handle, Some(origin).into(), call)?; + + Ok(succeed(EvmDataWriter::new().write(true).build())) + } + + /// Start unbonding process and unstake balance from the contract. + fn unbond_and_unstake(handle: &mut impl PrecompileHandle) -> EvmResult { + let mut input = handle.read_input()?; + input.expect_arguments(2)?; + + // parse contract's address + let contract_h160 = input.read::
()?.0; + let contract_id = Self::decode_smart_contract(contract_h160)?; + + // parse balance to be unstaked + let value: BalanceOf = input.read()?; + log::trace!(target: "ds-precompile", "unbond_and_unstake {:?}, {:?}", contract_id, value); + + // Build call with origin. + let origin = R::AddressMapping::into_account_id(handle.context().caller); + let call = pallet_dapps_staking::Call::::unbond_and_unstake { contract_id, value }; + + RuntimeHelper::::try_dispatch(handle, Some(origin).into(), call)?; + + Ok(succeed(EvmDataWriter::new().write(true).build())) + } + + /// Start unbonding process and unstake balance from the contract. + fn withdraw_unbonded(handle: &mut impl PrecompileHandle) -> EvmResult { + // Build call with origin. + let origin = R::AddressMapping::into_account_id(handle.context().caller); + let call = pallet_dapps_staking::Call::::withdraw_unbonded {}; + + RuntimeHelper::::try_dispatch(handle, Some(origin).into(), call)?; + + Ok(succeed(EvmDataWriter::new().write(true).build())) + } + + /// Claim rewards for the contract in the dapps-staking pallet + fn claim_dapp(handle: &mut impl PrecompileHandle) -> EvmResult { + let mut input = handle.read_input()?; + input.expect_arguments(2)?; + + // parse contract's address + let contract_h160 = input.read::
()?.0; + let contract_id = Self::decode_smart_contract(contract_h160)?; + + // parse era + let era: u32 = input.read::()?; + log::trace!(target: "ds-precompile", "claim_dapp {:?}, era {:?}", contract_id, era); + + // Build call with origin. + let origin = R::AddressMapping::into_account_id(handle.context().caller); + let call = pallet_dapps_staking::Call::::claim_dapp { contract_id, era }; + + RuntimeHelper::::try_dispatch(handle, Some(origin).into(), call)?; + + Ok(succeed(EvmDataWriter::new().write(true).build())) + } + + /// Claim rewards for the contract in the dapps-staking pallet + fn claim_staker(handle: &mut impl PrecompileHandle) -> EvmResult { + let mut input = handle.read_input()?; + input.expect_arguments(1)?; + + // parse contract's address + let contract_h160 = input.read::
()?.0; + let contract_id = Self::decode_smart_contract(contract_h160)?; + log::trace!(target: "ds-precompile", "claim_staker {:?}", contract_id); + + // Build call with origin. + let origin = R::AddressMapping::into_account_id(handle.context().caller); + let call = pallet_dapps_staking::Call::::claim_staker { contract_id }; + + RuntimeHelper::::try_dispatch(handle, Some(origin).into(), call)?; + + Ok(succeed(EvmDataWriter::new().write(true).build())) + } + + /// Set claim reward destination for the caller + fn set_reward_destination(handle: &mut impl PrecompileHandle) -> EvmResult { + let mut input = handle.read_input()?; + input.expect_arguments(1)?; + + // raw solidity representation of enum + let reward_destination_raw = input.read::()?; + + // Transform raw value into dapps staking enum + let reward_destination = if reward_destination_raw == 0 { + RewardDestination::FreeBalance + } else if reward_destination_raw == 1 { + RewardDestination::StakeBalance + } else { + return Err(error("Unexpected reward destination value.")); + }; + + // Build call with origin. + let origin = R::AddressMapping::into_account_id(handle.context().caller); + log::trace!(target: "ds-precompile", "set_reward_destination {:?} {:?}", origin, reward_destination); + + let call = pallet_dapps_staking::Call::::set_reward_destination { reward_destination }; + + RuntimeHelper::::try_dispatch(handle, Some(origin).into(), call)?; + + Ok(succeed(EvmDataWriter::new().write(true).build())) + } + /// Withdraw staked funds from the unregistered contract + fn withdraw_from_unregistered( + handle: &mut impl PrecompileHandle, + ) -> EvmResult { + let mut input = handle.read_input()?; + input.expect_arguments(1)?; + + // parse contract's address + let contract_h160 = input.read::
()?.0; + let contract_id = Self::decode_smart_contract(contract_h160)?; + log::trace!(target: "ds-precompile", "withdraw_from_unregistered {:?}", contract_id); + + // Build call with origin. + let origin = R::AddressMapping::into_account_id(handle.context().caller); + let call = pallet_dapps_staking::Call::::withdraw_from_unregistered { contract_id }; + + RuntimeHelper::::try_dispatch(handle, Some(origin).into(), call)?; + + Ok(succeed(EvmDataWriter::new().write(true).build())) + } + + /// Claim rewards for the contract in the dapps-staking pallet + fn nomination_transfer(handle: &mut impl PrecompileHandle) -> EvmResult { + let mut input = handle.read_input()?; + input.expect_arguments(3)?; + + // parse origin contract's address + let origin_contract_h160 = input.read::
()?.0; + let origin_contract_id = Self::decode_smart_contract(origin_contract_h160)?; + + // parse balance to be transferred + let value = input.read::>()?; + + // parse target contract's address + let target_contract_h160 = input.read::
()?.0; + let target_contract_id = Self::decode_smart_contract(target_contract_h160)?; + + log::trace!(target: "ds-precompile", "nomination_transfer {:?} {:?} {:?}", origin_contract_id, value, target_contract_id); + + // Build call with origin. + let origin = R::AddressMapping::into_account_id(handle.context().caller); + let call = pallet_dapps_staking::Call::::nomination_transfer { + origin_contract_id, + value, + target_contract_id, + }; + + RuntimeHelper::::try_dispatch(handle, Some(origin).into(), call)?; + + Ok(succeed(EvmDataWriter::new().write(true).build())) + } + + /// Helper method to decode type SmartContract enum + pub fn decode_smart_contract( + contract_h160: H160, + ) -> EvmResult<::SmartContract> { + // Encode contract address to fit SmartContract enum. + // Since the SmartContract enum type can't be accessed from this pecompile, + // use locally defined enum clone (see Contract enum) + let contract_enum_encoded = Contract::::Evm(contract_h160).encode(); + + // encoded enum will add one byte before the contract's address + // therefore we need to decode len(H160) + 1 byte = 21 + let smart_contract = ::SmartContract::decode( + &mut &contract_enum_encoded[..21], + ) + .map_err(|_| revert("Error while decoding SmartContract"))?; + + Ok(smart_contract) + } + + /// Helper method to parse H160 or SS58 address + fn parse_input_address(staker_vec: Vec) -> EvmResult { + let staker: R::AccountId = match staker_vec.len() { + // public address of the ss58 account has 32 bytes + 32 => { + let mut staker_bytes = [0_u8; 32]; + staker_bytes[..].clone_from_slice(&staker_vec[0..32]); + + staker_bytes.into() + } + // public address of the H160 account has 20 bytes + 20 => { + let mut staker_bytes = [0_u8; 20]; + staker_bytes[..].clone_from_slice(&staker_vec[0..20]); + + R::AddressMapping::into_account_id(staker_bytes.into()) + } + _ => { + // Return err if account length is wrong + return Err(revert("Error while parsing staker's address")); + } + }; + + Ok(staker) + } +} + +#[precompile_utils::generate_function_selector] +#[derive(Debug, PartialEq)] +pub enum Action { + ReadCurrentEra = "read_current_era()", + ReadUnbondingPeriod = "read_unbonding_period()", + ReadEraReward = "read_era_reward(uint32)", + ReadEraStaked = "read_era_staked(uint32)", + ReadStakedAmount = "read_staked_amount(bytes)", + ReadStakedAmountOnContract = "read_staked_amount_on_contract(address,bytes)", + ReadContractStake = "read_contract_stake(address)", + Register = "register(address)", + BondAndStake = "bond_and_stake(address,uint128)", + UnbondAndUnstake = "unbond_and_unstake(address,uint128)", + WithdrawUnbounded = "withdraw_unbonded()", + ClaimDapp = "claim_dapp(address,uint128)", + ClaimStaker = "claim_staker(address)", + SetRewardDestination = "set_reward_destination(uint8)", + WithdrawFromUnregistered = "withdraw_from_unregistered(address)", + NominationTransfer = "nomination_transfer(address,uint128,address)", +} + +impl Precompile for DappsStakingWrapper +where + R: pallet_evm::Config + pallet_dapps_staking::Config, + R::RuntimeCall: From> + + Dispatchable + + GetDispatchInfo, + ::RuntimeOrigin: From>, + BalanceOf: EvmData, + R::AccountId: From<[u8; 32]>, +{ + fn execute(handle: &mut impl PrecompileHandle) -> EvmResult { + log::trace!(target: "ds-precompile", "Execute input = {:?}", handle.input()); + + let selector = handle.read_selector()?; + + handle.check_function_modifier(match selector { + Action::ReadCurrentEra + | Action::ReadUnbondingPeriod + | Action::ReadEraReward + | Action::ReadEraStaked + | Action::ReadStakedAmount + | Action::ReadStakedAmountOnContract + | Action::ReadContractStake => FunctionModifier::View, + _ => FunctionModifier::NonPayable, + })?; + + match selector { + // read storage + Action::ReadCurrentEra => Self::read_current_era(handle), + Action::ReadUnbondingPeriod => Self::read_unbonding_period(handle), + Action::ReadEraReward => Self::read_era_reward(handle), + Action::ReadEraStaked => Self::read_era_staked(handle), + Action::ReadStakedAmount => Self::read_staked_amount(handle), + Action::ReadStakedAmountOnContract => Self::read_staked_amount_on_contract(handle), + Action::ReadContractStake => Self::read_contract_stake(handle), + // Dispatchables + Action::Register => Self::register(handle), + Action::BondAndStake => Self::bond_and_stake(handle), + Action::UnbondAndUnstake => Self::unbond_and_unstake(handle), + Action::WithdrawUnbounded => Self::withdraw_unbonded(handle), + Action::ClaimDapp => Self::claim_dapp(handle), + Action::ClaimStaker => Self::claim_staker(handle), + Action::SetRewardDestination => Self::set_reward_destination(handle), + Action::WithdrawFromUnregistered => Self::withdraw_from_unregistered(handle), + Action::NominationTransfer => Self::nomination_transfer(handle), + } + } +} diff --git a/precompiles/dapps-staking/src/mock.rs b/precompiles/dapps-staking/src/mock.rs new file mode 100644 index 0000000000..cc6ffc10a1 --- /dev/null +++ b/precompiles/dapps-staking/src/mock.rs @@ -0,0 +1,399 @@ +// This file is part of Astar. + +// Copyright (C) 2019-2023 Stake Technologies Pte.Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later + +// Astar is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Astar is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Astar. If not, see . + +use super::*; + +use frame_support::{ + construct_runtime, parameter_types, + traits::{Currency, OnFinalize, OnInitialize}, + weights::{RuntimeDbWeight, Weight}, + PalletId, +}; +use pallet_dapps_staking::weights; +use pallet_evm::{ + AddressMapping, EnsureAddressNever, EnsureAddressRoot, PrecompileResult, PrecompileSet, +}; +use parity_scale_codec::{Decode, Encode, MaxEncodedLen}; +use serde::{Deserialize, Serialize}; +use sp_core::{H160, H256}; +use sp_io::TestExternalities; +use sp_runtime::{ + testing::Header, + traits::{BlakeTwo256, ConstU32, IdentityLookup}, + AccountId32, +}; +extern crate alloc; + +pub(crate) type BlockNumber = u64; +pub(crate) type Balance = u128; +pub(crate) type EraIndex = u32; +pub(crate) const MILLIAST: Balance = 1_000_000_000_000_000; +pub(crate) const AST: Balance = 1_000 * MILLIAST; + +pub(crate) const TEST_CONTRACT: H160 = H160::repeat_byte(0x09); + +type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; +type Block = frame_system::mocking::MockBlock; + +/// Value shouldn't be less than 2 for testing purposes, otherwise we cannot test certain corner cases. +pub(crate) const MAX_NUMBER_OF_STAKERS: u32 = 4; +/// Value shouldn't be less than 2 for testing purposes, otherwise we cannot test certain corner cases. +pub(crate) const MINIMUM_STAKING_AMOUNT: Balance = 10 * AST; +pub(crate) const MINIMUM_REMAINING_AMOUNT: Balance = 1; +pub(crate) const MAX_UNLOCKING_CHUNKS: u32 = 4; +pub(crate) const UNBONDING_PERIOD: EraIndex = 3; +pub(crate) const MAX_ERA_STAKE_VALUES: u32 = 10; + +// Do note that this needs to at least be 3 for tests to be valid. It can be greater but not smaller. +pub(crate) const BLOCKS_PER_ERA: BlockNumber = 3; + +pub(crate) const REGISTER_DEPOSIT: Balance = 10 * AST; + +pub(crate) const STAKER_BLOCK_REWARD: Balance = 531911; +pub(crate) const DAPP_BLOCK_REWARD: Balance = 773333; + +#[derive( + Eq, + PartialEq, + Ord, + PartialOrd, + Clone, + Encode, + Decode, + Debug, + MaxEncodedLen, + Serialize, + Deserialize, + derive_more::Display, + scale_info::TypeInfo, +)] + +pub enum TestAccount { + Empty, + Alex, + Bobo, + Dino, +} + +impl Default for TestAccount { + fn default() -> Self { + Self::Empty + } +} + +// needed for associated type in pallet_evm +impl AddressMapping for TestAccount { + fn into_account_id(h160_account: H160) -> AccountId32 { + match h160_account { + a if a == H160::repeat_byte(0x01) => TestAccount::Alex.into(), + a if a == H160::repeat_byte(0x02) => TestAccount::Bobo.into(), + a if a == H160::repeat_byte(0x03) => TestAccount::Dino.into(), + _ => TestAccount::Empty.into(), + } + } +} + +impl From for H160 { + fn from(x: TestAccount) -> H160 { + match x { + TestAccount::Alex => H160::repeat_byte(0x01), + TestAccount::Bobo => H160::repeat_byte(0x02), + TestAccount::Dino => H160::repeat_byte(0x03), + _ => Default::default(), + } + } +} + +trait H160Conversion { + fn to_h160(&self) -> H160; +} + +impl H160Conversion for AccountId32 { + fn to_h160(&self) -> H160 { + let x = self.encode()[31]; + H160::repeat_byte(x) + } +} + +impl From for AccountId32 { + fn from(x: TestAccount) -> Self { + match x { + TestAccount::Alex => AccountId32::from([1u8; 32]), + TestAccount::Bobo => AccountId32::from([2u8; 32]), + TestAccount::Dino => AccountId32::from([3u8; 32]), + _ => AccountId32::from([0u8; 32]), + } + } +} + +pub const READ_WEIGHT: u64 = 3; +pub const WRITE_WEIGHT: u64 = 7; + +parameter_types! { + pub const BlockHashCount: u64 = 250; + pub BlockWeights: frame_system::limits::BlockWeights = + frame_system::limits::BlockWeights::simple_max(Weight::from_ref_time(1024)); + pub const TestWeights: RuntimeDbWeight = RuntimeDbWeight { + read: READ_WEIGHT, + write: WRITE_WEIGHT, + }; +} + +impl frame_system::Config for TestRuntime { + type BaseCallFilter = frame_support::traits::Everything; + type BlockWeights = (); + type BlockLength = (); + type RuntimeOrigin = RuntimeOrigin; + type Index = u64; + type RuntimeCall = RuntimeCall; + type BlockNumber = BlockNumber; + type Hash = H256; + type Hashing = BlakeTwo256; + type AccountId = AccountId32; + type Lookup = IdentityLookup; + type Header = Header; + type RuntimeEvent = RuntimeEvent; + type BlockHashCount = BlockHashCount; + type DbWeight = TestWeights; + type Version = (); + type PalletInfo = PalletInfo; + type AccountData = pallet_balances::AccountData; + type OnNewAccount = (); + type OnKilledAccount = (); + type SystemWeightInfo = (); + type SS58Prefix = (); + type OnSetCode = (); + type MaxConsumers = frame_support::traits::ConstU32<16>; +} + +parameter_types! { + pub const ExistentialDeposit: u128 = 1; +} +impl pallet_balances::Config for TestRuntime { + type MaxReserves = (); + type ReserveIdentifier = [u8; 4]; + type MaxLocks = (); + type Balance = Balance; + type RuntimeEvent = RuntimeEvent; + type DustRemoval = (); + type ExistentialDeposit = ExistentialDeposit; + type AccountStore = System; + type WeightInfo = (); +} + +pub fn precompile_address() -> H160 { + H160::from_low_u64_be(0x5001) +} + +#[derive(Debug, Clone, Copy)] +pub struct DappPrecompile(PhantomData); + +impl PrecompileSet for DappPrecompile +where + R: pallet_evm::Config, + DappsStakingWrapper: Precompile, +{ + fn execute(&self, handle: &mut impl PrecompileHandle) -> Option { + match handle.code_address() { + a if a == precompile_address() => Some(DappsStakingWrapper::::execute(handle)), + _ => None, + } + } + + fn is_precompile(&self, address: sp_core::H160) -> bool { + address == precompile_address() + } +} + +parameter_types! { + pub PrecompilesValue: DappPrecompile = DappPrecompile(Default::default()); + pub WeightPerGas: Weight = Weight::from_ref_time(1); +} + +impl pallet_evm::Config for TestRuntime { + type FeeCalculator = (); + type GasWeightMapping = pallet_evm::FixedGasWeightMapping; + type WeightPerGas = WeightPerGas; + type CallOrigin = EnsureAddressRoot; + type WithdrawOrigin = EnsureAddressNever; + type AddressMapping = TestAccount; + type Currency = Balances; + type RuntimeEvent = RuntimeEvent; + type Runner = pallet_evm::runner::stack::Runner; + type PrecompilesType = DappPrecompile; + type PrecompilesValue = PrecompilesValue; + type ChainId = (); + type OnChargeTransaction = (); + type BlockGasLimit = (); + type BlockHashMapping = pallet_evm::SubstrateBlockHashMapping; + type FindAuthor = (); + type OnCreate = (); + type WeightInfo = (); +} + +parameter_types! { + pub const MinimumPeriod: u64 = 5; +} +impl pallet_timestamp::Config for TestRuntime { + type Moment = u64; + type OnTimestampSet = (); + type MinimumPeriod = MinimumPeriod; + type WeightInfo = (); +} + +#[derive( + PartialEq, Eq, Copy, Clone, Encode, Decode, Debug, scale_info::TypeInfo, MaxEncodedLen, +)] +pub enum MockSmartContract { + Evm(sp_core::H160), + Wasm(AccountId32), +} + +impl Default for MockSmartContract { + fn default() -> Self { + MockSmartContract::Evm(H160::repeat_byte(0x00)) + } +} + +parameter_types! { + pub const RegisterDeposit: Balance = REGISTER_DEPOSIT; + pub const BlockPerEra: BlockNumber = BLOCKS_PER_ERA; + pub const MaxNumberOfStakersPerContract: u32 = MAX_NUMBER_OF_STAKERS; + pub const MinimumStakingAmount: Balance = MINIMUM_STAKING_AMOUNT; + pub const DappsStakingPalletId: PalletId = PalletId(*b"mokdpstk"); + pub const MinimumRemainingAmount: Balance = MINIMUM_REMAINING_AMOUNT; + pub const MaxUnlockingChunks: u32 = MAX_UNLOCKING_CHUNKS; + pub const UnbondingPeriod: EraIndex = UNBONDING_PERIOD; + pub const MaxEraStakeValues: u32 = MAX_ERA_STAKE_VALUES; +} + +impl pallet_dapps_staking::Config for TestRuntime { + type RuntimeEvent = RuntimeEvent; + type Currency = Balances; + type BlockPerEra = BlockPerEra; + type RegisterDeposit = RegisterDeposit; + type SmartContract = MockSmartContract; + type WeightInfo = weights::SubstrateWeight; + type MaxNumberOfStakersPerContract = MaxNumberOfStakersPerContract; + type MinimumStakingAmount = MinimumStakingAmount; + type PalletId = DappsStakingPalletId; + type MinimumRemainingAmount = MinimumRemainingAmount; + type MaxUnlockingChunks = MaxUnlockingChunks; + type UnbondingPeriod = UnbondingPeriod; + type MaxEraStakeValues = MaxEraStakeValues; + type UnregisteredDappRewardRetention = ConstU32<2>; +} + +pub struct ExternalityBuilder { + balances: Vec<(AccountId32, Balance)>, +} + +impl Default for ExternalityBuilder { + fn default() -> ExternalityBuilder { + ExternalityBuilder { balances: vec![] } + } +} + +impl ExternalityBuilder { + pub fn build(self) -> TestExternalities { + let mut storage = frame_system::GenesisConfig::default() + .build_storage::() + .unwrap(); + + pallet_balances::GenesisConfig:: { + balances: self.balances, + } + .assimilate_storage(&mut storage) + .ok(); + + let mut ext = TestExternalities::from(storage); + ext.execute_with(|| System::set_block_number(1)); + ext + } + + pub(crate) fn with_balances(mut self, balances: Vec<(AccountId32, Balance)>) -> Self { + self.balances = balances; + self + } +} + +construct_runtime!( + pub struct TestRuntime + where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: frame_system, + Balances: pallet_balances, + Evm: pallet_evm, + Timestamp: pallet_timestamp, + DappsStaking: pallet_dapps_staking, + } +); + +/// Used to run to the specified block number +pub fn run_to_block(n: u64) { + while System::block_number() < n { + DappsStaking::on_finalize(System::block_number()); + System::set_block_number(System::block_number() + 1); + // This is performed outside of dapps staking but we expect it before on_initialize + payout_block_rewards(); + DappsStaking::on_initialize(System::block_number()); + } +} + +/// Used to run the specified number of blocks +pub fn run_for_blocks(n: u64) { + run_to_block(System::block_number() + n); +} + +/// Advance blocks to the beginning of an era. +/// +/// Function has no effect if era is already passed. +pub fn advance_to_era(n: EraIndex) { + while DappsStaking::current_era() < n { + run_for_blocks(1); + } +} + +/// Initialize first block. +/// This method should only be called once in a UT otherwise the first block will get initialized multiple times. +pub fn initialize_first_block() { + // This assert prevents method misuse + assert_eq!(System::block_number(), 1 as BlockNumber); + + // This is performed outside of dapps staking but we expect it before on_initialize + payout_block_rewards(); + DappsStaking::on_initialize(System::block_number()); + run_to_block(2); +} + +/// Returns total block rewards that goes to dapps-staking. +/// Contains both `dapps` reward and `stakers` reward. +pub fn joint_block_reward() -> Balance { + STAKER_BLOCK_REWARD + DAPP_BLOCK_REWARD +} + +/// Payout block rewards to stakers & dapps +fn payout_block_rewards() { + DappsStaking::rewards( + Balances::issue(STAKER_BLOCK_REWARD.into()), + Balances::issue(DAPP_BLOCK_REWARD.into()), + ); +} diff --git a/precompiles/dapps-staking/src/tests.rs b/precompiles/dapps-staking/src/tests.rs new file mode 100644 index 0000000000..f4f409d2d1 --- /dev/null +++ b/precompiles/dapps-staking/src/tests.rs @@ -0,0 +1,695 @@ +// This file is part of Astar. + +// Copyright (C) 2019-2023 Stake Technologies Pte.Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later + +// Astar is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Astar is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Astar. If not, see . + +extern crate alloc; +use crate::{ + mock::{ + advance_to_era, initialize_first_block, precompile_address, DappsStaking, EraIndex, + ExternalityBuilder, RuntimeOrigin, TestAccount, AST, UNBONDING_PERIOD, *, + }, + *, +}; +use fp_evm::ExitError; +use frame_support::assert_ok; +use pallet_dapps_staking::RewardDestination; +use precompile_utils::testing::*; +use sp_core::H160; +use sp_runtime::{traits::Zero, AccountId32, Perbill}; + +fn precompiles() -> DappPrecompile { + PrecompilesValue::get() +} + +#[test] +fn current_era_is_ok() { + ExternalityBuilder::default().build().execute_with(|| { + initialize_first_block(); + + let current_era = DappsStaking::current_era(); + + precompiles() + .prepare_test( + TestAccount::Alex, + precompile_address(), + EvmDataWriter::new_with_selector(Action::ReadCurrentEra).build(), + ) + .expect_cost(READ_WEIGHT) + .expect_no_logs() + .execute_returns(EvmDataWriter::new().write(current_era).build()); + + // advance to era 5 and check output + advance_to_era(5); + let current_era = DappsStaking::current_era(); + + precompiles() + .prepare_test( + TestAccount::Alex, + precompile_address(), + EvmDataWriter::new_with_selector(Action::ReadCurrentEra).build(), + ) + .expect_cost(READ_WEIGHT) + .expect_no_logs() + .execute_returns(EvmDataWriter::new().write(current_era).build()); + }); +} + +#[test] +fn read_unbonding_period_is_ok() { + ExternalityBuilder::default().build().execute_with(|| { + initialize_first_block(); + + precompiles() + .prepare_test( + TestAccount::Alex, + precompile_address(), + EvmDataWriter::new_with_selector(Action::ReadUnbondingPeriod).build(), + ) + .expect_cost(READ_WEIGHT) + .expect_no_logs() + .execute_returns(EvmDataWriter::new().write(UNBONDING_PERIOD).build()); + }); +} + +#[test] +fn read_era_reward_is_ok() { + ExternalityBuilder::default().build().execute_with(|| { + initialize_first_block(); + + advance_to_era(3); + let era_reward = joint_block_reward() * BLOCKS_PER_ERA as u128; + let second_era: EraIndex = 2; + + precompiles() + .prepare_test( + TestAccount::Alex, + precompile_address(), + EvmDataWriter::new_with_selector(Action::ReadEraReward) + .write(second_era) + .build(), + ) + .expect_cost(READ_WEIGHT) + .expect_no_logs() + .execute_returns(EvmDataWriter::new().write(era_reward).build()); + }); +} + +#[test] +fn read_era_staked_is_ok() { + ExternalityBuilder::default().build().execute_with(|| { + initialize_first_block(); + + let zero_era = EraIndex::zero(); + let staked = Balance::zero(); + + precompiles() + .prepare_test( + TestAccount::Alex, + precompile_address(), + EvmDataWriter::new_with_selector(Action::ReadEraStaked) + .write(zero_era) + .build(), + ) + .expect_cost(READ_WEIGHT) + .expect_no_logs() + .execute_returns(EvmDataWriter::new().write(staked).build()); + }); +} + +#[test] +fn register_via_precompile_fails() { + ExternalityBuilder::default() + .with_balances(vec![(TestAccount::Alex.into(), 200 * AST)]) + .build() + .execute_with(|| { + initialize_first_block(); + + precompiles() + .prepare_test( + TestAccount::Alex, + precompile_address(), + EvmDataWriter::new_with_selector(Action::Register) + .write(Address(TEST_CONTRACT.clone())) + .build(), + ) + .expect_no_logs() + .execute_error(ExitError::Other(alloc::borrow::Cow::Borrowed( + "register via evm precompile is not allowed", + ))); + }); +} + +#[test] +fn bond_and_stake_is_ok() { + ExternalityBuilder::default() + .with_balances(vec![ + (TestAccount::Alex.into(), 200 * AST), + (TestAccount::Bobo.into(), 200 * AST), + (TestAccount::Dino.into(), 100 * AST), + ]) + .build() + .execute_with(|| { + initialize_first_block(); + + register_and_verify(TestAccount::Alex, TEST_CONTRACT); + + let amount_staked_bobo = 100 * AST; + bond_stake_and_verify(TestAccount::Bobo, TEST_CONTRACT, amount_staked_bobo); + + let amount_staked_dino = 50 * AST; + bond_stake_and_verify(TestAccount::Dino, TEST_CONTRACT, amount_staked_dino); + + contract_era_stake_verify(TEST_CONTRACT, amount_staked_bobo + amount_staked_dino); + verify_staked_amount(TEST_CONTRACT, TestAccount::Bobo.into(), amount_staked_bobo); + verify_staked_amount(TEST_CONTRACT, TestAccount::Dino.into(), amount_staked_dino); + }); +} + +#[test] +fn unbond_and_unstake_is_ok() { + ExternalityBuilder::default() + .with_balances(vec![ + (TestAccount::Alex.into(), 200 * AST), + (TestAccount::Bobo.into(), 200 * AST), + (TestAccount::Dino.into(), 100 * AST), + ]) + .build() + .execute_with(|| { + initialize_first_block(); + + // register new contract by Alex + let developer = TestAccount::Alex.into(); + register_and_verify(developer, TEST_CONTRACT); + + let amount_staked_bobo = 100 * AST; + bond_stake_and_verify(TestAccount::Bobo, TEST_CONTRACT, amount_staked_bobo); + let amount_staked_dino = 50 * AST; + bond_stake_and_verify(TestAccount::Dino, TEST_CONTRACT, amount_staked_dino); + + // Bobo unstakes all + let era = 2; + advance_to_era(era); + unbond_unstake_and_verify(TestAccount::Bobo, TEST_CONTRACT, amount_staked_bobo); + + contract_era_stake_verify(TEST_CONTRACT, amount_staked_dino); + verify_staked_amount(TEST_CONTRACT, TestAccount::Dino, amount_staked_dino); + + // withdraw unbonded funds + advance_to_era(era + UNBONDING_PERIOD + 1); + withdraw_unbonded_verify(TestAccount::Bobo); + }); +} + +#[test] +fn claim_dapp_is_ok() { + ExternalityBuilder::default() + .with_balances(vec![ + (TestAccount::Alex.into(), 200 * AST), + (TestAccount::Bobo.into(), 200 * AST), + (TestAccount::Dino.into(), 200 * AST), + ]) + .build() + .execute_with(|| { + initialize_first_block(); + + // register new contract by Alex + let developer = TestAccount::Alex; + register_and_verify(developer, TEST_CONTRACT); + + let stake_amount_total = 300 * AST; + let ratio_bobo = Perbill::from_rational(3u32, 5u32); + let ratio_dino = Perbill::from_rational(2u32, 5u32); + let amount_staked_bobo = ratio_bobo * stake_amount_total; + bond_stake_and_verify(TestAccount::Bobo, TEST_CONTRACT, amount_staked_bobo); + + let amount_staked_dino = ratio_dino * stake_amount_total; + bond_stake_and_verify(TestAccount::Dino, TEST_CONTRACT, amount_staked_dino); + + // advance era and claim reward + let era = 5; + advance_to_era(era); + claim_dapp_and_verify(TEST_CONTRACT, era - 1); + + //check that the reward is payed out to the developer + let developer_reward = DAPP_BLOCK_REWARD * BLOCKS_PER_ERA as Balance; + assert_eq!( + ::Currency::free_balance( + &TestAccount::Alex.into() + ), + (200 * AST) + developer_reward - REGISTER_DEPOSIT + ); + }); +} + +#[test] +fn claim_staker_is_ok() { + ExternalityBuilder::default() + .with_balances(vec![ + (TestAccount::Alex.into(), 200 * AST), + (TestAccount::Bobo.into(), 200 * AST), + (TestAccount::Dino.into(), 200 * AST), + ]) + .build() + .execute_with(|| { + initialize_first_block(); + + // register new contract by Alex + let developer = TestAccount::Alex; + register_and_verify(developer, TEST_CONTRACT); + + let stake_amount_total = 300 * AST; + let ratio_bobo = Perbill::from_rational(3u32, 5u32); + let ratio_dino = Perbill::from_rational(2u32, 5u32); + let amount_staked_bobo = ratio_bobo * stake_amount_total; + bond_stake_and_verify(TestAccount::Bobo, TEST_CONTRACT, amount_staked_bobo); + + let amount_staked_dino = ratio_dino * stake_amount_total; + bond_stake_and_verify(TestAccount::Dino, TEST_CONTRACT, amount_staked_dino); + + // advance era and claim reward + advance_to_era(5); + + let stakers_reward = STAKER_BLOCK_REWARD * BLOCKS_PER_ERA as Balance; + + // Ensure that all rewards can be claimed for the first staker + for era in 1..DappsStaking::current_era() as Balance { + claim_staker_and_verify(TestAccount::Bobo, TEST_CONTRACT); + assert_eq!( + ::Currency::free_balance( + &TestAccount::Bobo.into() + ), + (200 * AST) + ratio_bobo * stakers_reward * era + ); + } + + // Repeat the same thing for the second staker + for era in 1..DappsStaking::current_era() as Balance { + claim_staker_and_verify(TestAccount::Dino, TEST_CONTRACT); + assert_eq!( + ::Currency::free_balance( + &TestAccount::Dino.into() + ), + (200 * AST) + ratio_dino * stakers_reward * era + ); + } + }); +} + +#[test] +fn set_reward_destination() { + ExternalityBuilder::default() + .with_balances(vec![ + (TestAccount::Alex.into(), 200 * AST), + (TestAccount::Bobo.into(), 200 * AST), + ]) + .build() + .execute_with(|| { + initialize_first_block(); + // register contract and stake it + register_and_verify(TestAccount::Alex.into(), TEST_CONTRACT); + + // bond & stake the origin contract + bond_stake_and_verify(TestAccount::Bobo, TEST_CONTRACT, 100 * AST); + + // change destinations and verfiy it was successful + set_reward_destination_verify(TestAccount::Bobo.into(), RewardDestination::FreeBalance); + set_reward_destination_verify( + TestAccount::Bobo.into(), + RewardDestination::StakeBalance, + ); + set_reward_destination_verify(TestAccount::Bobo.into(), RewardDestination::FreeBalance); + }); +} + +#[test] +fn withdraw_from_unregistered() { + ExternalityBuilder::default() + .with_balances(vec![ + (TestAccount::Alex.into(), 200 * AST), + (TestAccount::Bobo.into(), 200 * AST), + ]) + .build() + .execute_with(|| { + initialize_first_block(); + + // register new contract by Alex + let developer = TestAccount::Alex.into(); + register_and_verify(developer, TEST_CONTRACT); + + let amount_staked_bobo = 100 * AST; + bond_stake_and_verify(TestAccount::Bobo, TEST_CONTRACT, amount_staked_bobo); + + let contract_id = + decode_smart_contract_from_array(TEST_CONTRACT.clone().to_fixed_bytes()).unwrap(); + assert_ok!(DappsStaking::unregister(RuntimeOrigin::root(), contract_id)); + + withdraw_from_unregistered_verify(TestAccount::Bobo.into(), TEST_CONTRACT); + }); +} + +#[test] +fn nomination_transfer() { + ExternalityBuilder::default() + .with_balances(vec![ + (TestAccount::Alex.into(), 200 * AST), + (TestAccount::Dino.into(), 200 * AST), + (TestAccount::Bobo.into(), 200 * AST), + ]) + .build() + .execute_with(|| { + initialize_first_block(); + + // register two contracts for nomination transfer test + let origin_contract = H160::repeat_byte(0x09); + let target_contract = H160::repeat_byte(0x0A); + register_and_verify(TestAccount::Alex.into(), origin_contract); + register_and_verify(TestAccount::Dino.into(), target_contract); + + // bond & stake the origin contract + let amount_staked_bobo = 100 * AST; + bond_stake_and_verify(TestAccount::Bobo, origin_contract, amount_staked_bobo); + + // transfer nomination and ensure it was successful + nomination_transfer_verify( + TestAccount::Bobo, + origin_contract, + 10 * AST, + target_contract, + ); + }); +} + +// **************************************************************************************************** +// Helper functions +// **************************************************************************************************** + +/// helper function to register and verify if registration is valid +fn register_and_verify(developer: TestAccount, contract: H160) { + let smart_contract = + decode_smart_contract_from_array(contract.clone().to_fixed_bytes()).unwrap(); + DappsStaking::register( + RuntimeOrigin::root(), + developer.clone().into(), + smart_contract, + ) + .unwrap(); + + // check the storage after the register + let dev_account_id: AccountId32 = developer.into(); + let smart_contract_bytes = + (DappsStaking::registered_contract(dev_account_id).unwrap_or_default()).encode(); + + assert_eq!( + // 0-th byte is enum value discriminator + smart_contract_bytes[1..21], + contract.to_fixed_bytes() + ); +} + +/// helper function to read ledger storage item +fn read_staked_amount_h160_verify(staker: TestAccount, amount: u128) { + precompiles() + .prepare_test( + staker.clone(), + precompile_address(), + EvmDataWriter::new_with_selector(Action::ReadStakedAmount) + .write(Bytes( + Into::::into(staker.clone()).to_fixed_bytes().to_vec(), + )) + .build(), + ) + .expect_no_logs() + .execute_returns(EvmDataWriter::new().write(amount).build()); +} + +/// helper function to read ledger storage item for ss58 account +fn read_staked_amount_ss58_verify(staker: TestAccount, amount: u128) { + let staker_acc_id: AccountId32 = staker.clone().into(); + + precompiles() + .prepare_test( + staker.clone(), + precompile_address(), + EvmDataWriter::new_with_selector(Action::ReadStakedAmount) + .write(Bytes(staker_acc_id.encode())) + .build(), + ) + .expect_no_logs() + .execute_returns(EvmDataWriter::new().write(amount).build()); +} + +/// helper function to bond, stake and verify if resulet is OK +fn bond_stake_and_verify(staker: TestAccount, contract: H160, amount: u128) { + precompiles() + .prepare_test( + staker.clone(), + precompile_address(), + EvmDataWriter::new_with_selector(Action::BondAndStake) + .write(Address(contract.clone())) + .write(amount) + .build(), + ) + .expect_no_logs() + .execute_returns(EvmDataWriter::new().write(true).build()); + + read_staked_amount_h160_verify(staker.clone(), amount); + read_staked_amount_ss58_verify(staker, amount); +} + +/// helper function to unbond, unstake and verify if result is OK +fn unbond_unstake_and_verify(staker: TestAccount, contract: H160, amount: u128) { + precompiles() + .prepare_test( + staker.clone(), + precompile_address(), + EvmDataWriter::new_with_selector(Action::UnbondAndUnstake) + .write(Address(contract.clone())) + .write(amount) + .build(), + ) + .expect_no_logs() + .execute_returns(EvmDataWriter::new().write(true).build()); +} + +/// helper function to withdraw unstaked funds and verify if result is OK +fn withdraw_unbonded_verify(staker: TestAccount) { + let staker_acc_id = AccountId32::from(staker.clone()); + + // call unbond_and_unstake(). Check usable_balance before and after the call + assert_ne!( + ::Currency::free_balance(&staker_acc_id), + ::Currency::usable_balance(&staker_acc_id) + ); + + precompiles() + .prepare_test( + staker.clone(), + precompile_address(), + EvmDataWriter::new_with_selector(Action::WithdrawUnbounded).build(), + ) + .expect_no_logs() + .execute_returns(EvmDataWriter::new().write(true).build()); + + assert_eq!( + ::Currency::free_balance(&staker_acc_id), + ::Currency::usable_balance(&staker_acc_id) + ); +} + +/// helper function to verify change of reward destination for a staker +fn set_reward_destination_verify(staker: TestAccount, reward_destination: RewardDestination) { + // Read staker's ledger + let staker_acc_id = AccountId32::from(staker.clone()); + let init_ledger = DappsStaking::ledger(&staker_acc_id); + // Ensure that something is staked or being unbonded + assert!(!init_ledger.is_empty()); + + let reward_destination_raw: u8 = match reward_destination { + RewardDestination::FreeBalance => 0, + RewardDestination::StakeBalance => 1, + }; + precompiles() + .prepare_test( + staker.clone(), + precompile_address(), + EvmDataWriter::new_with_selector(Action::SetRewardDestination) + .write(reward_destination_raw) + .build(), + ) + .expect_no_logs() + .execute_returns(EvmDataWriter::new().write(true).build()); + + let final_ledger = DappsStaking::ledger(&staker_acc_id); + assert_eq!(final_ledger.reward_destination(), reward_destination); +} + +/// helper function to withdraw funds from unregistered contract +fn withdraw_from_unregistered_verify(staker: TestAccount, contract: H160) { + let smart_contract = + decode_smart_contract_from_array(contract.clone().to_fixed_bytes()).unwrap(); + let staker_acc_id = AccountId32::from(staker.clone()); + let init_staker_info = DappsStaking::staker_info(&staker_acc_id, &smart_contract); + assert!(!init_staker_info.latest_staked_value().is_zero()); + + precompiles() + .prepare_test( + staker.clone(), + precompile_address(), + EvmDataWriter::new_with_selector(Action::WithdrawFromUnregistered) + .write(Address(contract.clone())) + .build(), + ) + .expect_no_logs() + .execute_returns(EvmDataWriter::new().write(true).build()); + + let final_staker_info = DappsStaking::staker_info(&staker_acc_id, &smart_contract); + assert!(final_staker_info.latest_staked_value().is_zero()); +} + +/// helper function to verify nomination transfer from origin to target contract +fn nomination_transfer_verify( + staker: TestAccount, + origin_contract: H160, + amount: Balance, + target_contract: H160, +) { + let origin_smart_contract = + decode_smart_contract_from_array(origin_contract.clone().to_fixed_bytes()).unwrap(); + let target_smart_contract = + decode_smart_contract_from_array(target_contract.clone().to_fixed_bytes()).unwrap(); + let staker_acc_id = AccountId32::from(staker.clone()); + + // Read init data staker info states + let init_origin_staker_info = DappsStaking::staker_info(&staker_acc_id, &origin_smart_contract); + let init_target_staker_info = DappsStaking::staker_info(&staker_acc_id, &target_smart_contract); + + precompiles() + .prepare_test( + staker.clone(), + precompile_address(), + EvmDataWriter::new_with_selector(Action::NominationTransfer) + .write(Address(origin_contract.clone())) + .write(amount) + .write(Address(target_contract.clone())) + .build(), + ) + .expect_no_logs() + .execute_returns(EvmDataWriter::new().write(true).build()); + + let final_origin_staker_info = + DappsStaking::staker_info(&staker_acc_id, &origin_smart_contract); + let final_target_staker_info = + DappsStaking::staker_info(&staker_acc_id, &target_smart_contract); + + // Verify final state + let will_be_unstaked = init_origin_staker_info + .latest_staked_value() + .saturating_sub(amount) + < MINIMUM_STAKING_AMOUNT; + let transfer_amount = if will_be_unstaked { + init_origin_staker_info.latest_staked_value() + } else { + amount + }; + + assert_eq!( + final_origin_staker_info.latest_staked_value() + transfer_amount, + init_origin_staker_info.latest_staked_value() + ); + assert_eq!( + final_target_staker_info.latest_staked_value() - transfer_amount, + init_target_staker_info.latest_staked_value() + ); +} + +/// helper function to bond, stake and verify if result is OK +fn claim_dapp_and_verify(contract: H160, era: EraIndex) { + precompiles() + .prepare_test( + TestAccount::Bobo, + precompile_address(), + EvmDataWriter::new_with_selector(Action::ClaimDapp) + .write(Address(contract.clone())) + .write(era) + .build(), + ) + .expect_no_logs() + .execute_returns(EvmDataWriter::new().write(true).build()); +} + +/// helper function to bond, stake and verify if the result is OK +fn claim_staker_and_verify(staker: TestAccount, contract: H160) { + precompiles() + .prepare_test( + staker, + precompile_address(), + EvmDataWriter::new_with_selector(Action::ClaimStaker) + .write(Address(contract.clone())) + .build(), + ) + .expect_no_logs() + .execute_returns(EvmDataWriter::new().write(true).build()); +} + +fn contract_era_stake_verify(contract: H160, amount: Balance) { + precompiles() + .prepare_test( + TestAccount::Alex, + precompile_address(), + EvmDataWriter::new_with_selector(Action::ReadContractStake) + .write(Address(contract.clone())) + .build(), + ) + .expect_cost(2 * READ_WEIGHT) + .expect_no_logs() + .execute_returns(EvmDataWriter::new().write(amount).build()); +} + +/// helper function to verify latest staked amount +fn verify_staked_amount(contract: H160, staker: TestAccount, amount: Balance) { + precompiles() + .prepare_test( + staker.clone(), + precompile_address(), + EvmDataWriter::new_with_selector(Action::ReadStakedAmountOnContract) + .write(Address(contract.clone())) + .write(Bytes( + Into::::into(staker.clone()).to_fixed_bytes().to_vec(), + )) + .build(), + ) + .expect_cost(READ_WEIGHT) + .expect_no_logs() + .execute_returns(EvmDataWriter::new().write(amount).build()); +} + +/// Helper method to decode type SmartContract enum from [u8; 20] +fn decode_smart_contract_from_array( + contract_array: [u8; 20], +) -> Result<::SmartContract, String> { + // Encode contract address to fit SmartContract enum. + let mut contract_enum_encoded: [u8; 21] = [0; 21]; + contract_enum_encoded[0] = 0; // enum for EVM H160 address is 0 + contract_enum_encoded[1..21].copy_from_slice(&contract_array); + + let smart_contract = ::SmartContract::decode( + &mut &contract_enum_encoded[..21], + ) + .map_err(|_| "Error while decoding SmartContract")?; + + Ok(smart_contract) +} diff --git a/precompiles/sr25519/Cargo.toml b/precompiles/sr25519/Cargo.toml new file mode 100644 index 0000000000..9a0741b480 --- /dev/null +++ b/precompiles/sr25519/Cargo.toml @@ -0,0 +1,49 @@ +[package] +name = "pallet-evm-precompile-sr25519" +description = "SR25519 crypto support for EVM." +version = "1.2.1" +authors.workspace = true +edition.workspace = true +homepage.workspace = true +repository.workspace = true + +[dependencies] +log = { workspace = true } +num_enum = { workspace = true } +precompile-utils = { workspace = true, default-features = false } + +# Substrate +parity-scale-codec = { workspace = true, features = ["max-encoded-len"] } +sp-core = { workspace = true } +sp-io = { workspace = true } +sp-std = { workspace = true } + +# Frontier +fp-evm = { workspace = true } +pallet-evm = { workspace = true } + +[dev-dependencies] +derive_more = { workspace = true } +hex-literal = { workspace = true } +scale-info = { workspace = true } +serde = { workspace = true } + +precompile-utils = { workspace = true, features = ["testing"] } + +frame-support = { workspace = true } +frame-system = { workspace = true } +pallet-balances = { workspace = true } +pallet-timestamp = { workspace = true } +sp-runtime = { workspace = true } + +[features] +default = ["std"] +std = [ + "parity-scale-codec/std", + "fp-evm/std", + "pallet-evm/std", + "precompile-utils/std", + "sp-core/std", + "sp-std/std", + "sp-io/std", +] diff --git a/precompiles/sr25519/SR25519.sol b/precompiles/sr25519/SR25519.sol new file mode 100644 index 0000000000..7efffbd461 --- /dev/null +++ b/precompiles/sr25519/SR25519.sol @@ -0,0 +1,16 @@ +pragma solidity ^0.8.0; + +/** + * @title SR25519 signature interface. + */ +interface SR25519 { + /** + * @dev Verify signed message using SR25519 crypto. + * @return A boolean confirming whether the public key is signer for the message. + */ + function verify( + bytes32 public_key, + bytes calldata signature, + bytes calldata message + ) external view returns (bool); +} \ No newline at end of file diff --git a/precompiles/sr25519/src/lib.rs b/precompiles/sr25519/src/lib.rs new file mode 100644 index 0000000000..690e6db87c --- /dev/null +++ b/precompiles/sr25519/src/lib.rs @@ -0,0 +1,97 @@ +// This file is part of Astar. + +// Copyright (C) 2019-2023 Stake Technologies Pte.Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later + +// Astar is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Astar is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Astar. If not, see . + +#![cfg_attr(not(feature = "std"), no_std)] +#![cfg_attr(test, feature(assert_matches))] + +use fp_evm::{PrecompileHandle, PrecompileOutput}; +use pallet_evm::Precompile; +use sp_core::{crypto::UncheckedFrom, sr25519, H256}; +use sp_std::marker::PhantomData; +use sp_std::prelude::*; + +use precompile_utils::{ + succeed, Bytes, EvmDataWriter, EvmResult, FunctionModifier, PrecompileHandleExt, +}; + +#[cfg(test)] +mod mock; +#[cfg(test)] +mod tests; + +#[precompile_utils::generate_function_selector] +#[derive(Debug, PartialEq)] +pub enum Action { + Verify = "verify(bytes32,bytes,bytes)", +} + +/// A precompile to wrap substrate sr25519 functions. +pub struct Sr25519Precompile(PhantomData); + +impl Precompile for Sr25519Precompile { + fn execute(handle: &mut impl PrecompileHandle) -> EvmResult { + log::trace!(target: "sr25519-precompile", "In sr25519 precompile"); + + let selector = handle.read_selector()?; + + handle.check_function_modifier(FunctionModifier::View)?; + + match selector { + // Dispatchables + Action::Verify => Self::verify(handle), + } + } +} + +impl Sr25519Precompile { + fn verify(handle: &mut impl PrecompileHandle) -> EvmResult { + let mut input = handle.read_input()?; + input.expect_arguments(3)?; + + // Parse arguments + let public: sr25519::Public = sr25519::Public::unchecked_from(input.read::()?); + let signature_bytes: Vec = input.read::()?.into(); + let message: Vec = input.read::()?.into(); + + // Parse signature + let signature_opt = sr25519::Signature::from_slice(&signature_bytes[..]); + + let signature = if let Some(sig) = signature_opt { + sig + } else { + // Return `false` if signature length is wrong + return Ok(succeed(EvmDataWriter::new().write(false).build())); + }; + + log::trace!( + target: "sr25519-precompile", + "Verify signature {:?} for public {:?} and message {:?}", + signature, public, message, + ); + + let is_confirmed = sp_io::crypto::sr25519_verify(&signature, &message[..], &public); + + log::trace!( + target: "sr25519-precompile", + "Verified signature {:?} is {:?}", + signature, is_confirmed, + ); + + Ok(succeed(EvmDataWriter::new().write(is_confirmed).build())) + } +} diff --git a/precompiles/sr25519/src/mock.rs b/precompiles/sr25519/src/mock.rs new file mode 100644 index 0000000000..04b2d75d90 --- /dev/null +++ b/precompiles/sr25519/src/mock.rs @@ -0,0 +1,237 @@ +// This file is part of Astar. + +// Copyright (C) 2019-2023 Stake Technologies Pte.Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later + +// Astar is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Astar is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Astar. If not, see . + +//! Testing utilities. + +use super::*; + +use frame_support::{construct_runtime, parameter_types, traits::Everything, weights::Weight}; +use parity_scale_codec::{Decode, Encode, MaxEncodedLen}; +use scale_info::TypeInfo; +use serde::{Deserialize, Serialize}; + +use pallet_evm::{ + AddressMapping, EnsureAddressNever, EnsureAddressRoot, PrecompileResult, PrecompileSet, +}; +use sp_core::{H160, H256}; +use sp_runtime::{ + testing::Header, + traits::{BlakeTwo256, IdentityLookup}, +}; + +pub type AccountId = TestAccount; +pub type Balance = u128; +pub type BlockNumber = u64; +pub type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; +pub type Block = frame_system::mocking::MockBlock; + +pub const PRECOMPILE_ADDRESS: H160 = H160::repeat_byte(0xBB); + +#[derive( + Eq, + PartialEq, + Ord, + PartialOrd, + Clone, + Encode, + Decode, + Debug, + MaxEncodedLen, + Serialize, + Deserialize, + derive_more::Display, + TypeInfo, +)] +pub enum TestAccount { + Alice, + Bob, + Charlie, + Bogus, + Precompile, +} + +impl Default for TestAccount { + fn default() -> Self { + Self::Alice + } +} + +impl AddressMapping for TestAccount { + fn into_account_id(h160_account: H160) -> TestAccount { + match h160_account { + a if a == H160::repeat_byte(0xAA) => Self::Alice, + a if a == H160::repeat_byte(0xBB) => Self::Bob, + a if a == H160::repeat_byte(0xCC) => Self::Charlie, + a if a == PRECOMPILE_ADDRESS => Self::Precompile, + _ => Self::Bogus, + } + } +} + +impl From for TestAccount { + fn from(x: H160) -> TestAccount { + TestAccount::into_account_id(x) + } +} + +impl From for H160 { + fn from(value: TestAccount) -> H160 { + match value { + TestAccount::Alice => H160::repeat_byte(0xAA), + TestAccount::Bob => H160::repeat_byte(0xBB), + TestAccount::Charlie => H160::repeat_byte(0xCC), + TestAccount::Precompile => PRECOMPILE_ADDRESS, + TestAccount::Bogus => Default::default(), + } + } +} + +parameter_types! { + pub const BlockHashCount: u64 = 250; + pub const SS58Prefix: u8 = 42; +} + +impl frame_system::Config for Runtime { + type BaseCallFilter = Everything; + type DbWeight = (); + type RuntimeOrigin = RuntimeOrigin; + type Index = u64; + type BlockNumber = BlockNumber; + type RuntimeCall = RuntimeCall; + type Hash = H256; + type Hashing = BlakeTwo256; + type AccountId = AccountId; + type Lookup = IdentityLookup; + type Header = Header; + type RuntimeEvent = RuntimeEvent; + type BlockHashCount = BlockHashCount; + type Version = (); + type PalletInfo = PalletInfo; + type AccountData = pallet_balances::AccountData; + type OnNewAccount = (); + type OnKilledAccount = (); + type SystemWeightInfo = (); + type BlockWeights = (); + type BlockLength = (); + type SS58Prefix = SS58Prefix; + type OnSetCode = (); + type MaxConsumers = frame_support::traits::ConstU32<16>; +} + +#[derive(Debug, Clone, Copy)] +pub struct TestPrecompileSet(PhantomData); + +impl PrecompileSet for TestPrecompileSet +where + R: pallet_evm::Config, + Sr25519Precompile: Precompile, +{ + fn execute(&self, handle: &mut impl PrecompileHandle) -> Option { + match handle.code_address() { + a if a == PRECOMPILE_ADDRESS => Some(Sr25519Precompile::::execute(handle)), + _ => None, + } + } + + fn is_precompile(&self, address: H160) -> bool { + address == PRECOMPILE_ADDRESS + } +} + +parameter_types! { + pub const MinimumPeriod: u64 = 5; +} + +impl pallet_timestamp::Config for Runtime { + type Moment = u64; + type OnTimestampSet = (); + type MinimumPeriod = MinimumPeriod; + type WeightInfo = (); +} + +parameter_types! { + pub const ExistentialDeposit: u128 = 0; +} + +impl pallet_balances::Config for Runtime { + type MaxReserves = (); + type ReserveIdentifier = (); + type MaxLocks = (); + type Balance = Balance; + type RuntimeEvent = RuntimeEvent; + type DustRemoval = (); + type ExistentialDeposit = ExistentialDeposit; + type AccountStore = System; + type WeightInfo = (); +} + +parameter_types! { + pub const PrecompilesValue: TestPrecompileSet = + TestPrecompileSet(PhantomData); + pub const WeightPerGas: Weight = Weight::from_ref_time(1); +} + +impl pallet_evm::Config for Runtime { + type FeeCalculator = (); + type GasWeightMapping = pallet_evm::FixedGasWeightMapping; + type WeightPerGas = WeightPerGas; + type CallOrigin = EnsureAddressRoot; + type WithdrawOrigin = EnsureAddressNever; + type AddressMapping = AccountId; + type Currency = Balances; + type RuntimeEvent = RuntimeEvent; + type Runner = pallet_evm::runner::stack::Runner; + type PrecompilesType = TestPrecompileSet; + type PrecompilesValue = PrecompilesValue; + type ChainId = (); + type OnChargeTransaction = (); + type BlockGasLimit = (); + type BlockHashMapping = pallet_evm::SubstrateBlockHashMapping; + type OnCreate = (); + type FindAuthor = (); + type WeightInfo = (); +} + +// Configure a mock runtime to test the pallet. +construct_runtime!( + pub enum Runtime where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: frame_system, + Balances: pallet_balances, + Evm: pallet_evm, + Timestamp: pallet_timestamp, + } +); + +#[derive(Default)] +pub(crate) struct ExtBuilder; + +impl ExtBuilder { + pub(crate) fn build(self) -> sp_io::TestExternalities { + let t = frame_system::GenesisConfig::default() + .build_storage::() + .expect("Frame system builds valid default genesis config"); + + let mut ext = sp_io::TestExternalities::new(t); + ext.execute_with(|| System::set_block_number(1)); + ext + } +} diff --git a/precompiles/sr25519/src/tests.rs b/precompiles/sr25519/src/tests.rs new file mode 100644 index 0000000000..0774ae7078 --- /dev/null +++ b/precompiles/sr25519/src/tests.rs @@ -0,0 +1,108 @@ +// This file is part of Astar. + +// Copyright (C) 2019-2023 Stake Technologies Pte.Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later + +// Astar is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Astar is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Astar. If not, see . + +use hex_literal::hex; + +use crate::mock::*; +use crate::*; + +use precompile_utils::testing::*; +use sp_core::{sr25519, Pair, H256}; + +fn precompiles() -> TestPrecompileSet { + PrecompilesValue::get() +} + +#[test] +fn wrong_signature_length_returns_false() { + ExtBuilder::default().build().execute_with(|| { + let pair = sr25519::Pair::from_seed(b"12345678901234567890123456789012"); + let public = pair.public(); + let signature = hex!["0042"]; + let message = hex!["00"]; + + precompiles() + .prepare_test( + TestAccount::Alice, + PRECOMPILE_ADDRESS, + EvmDataWriter::new_with_selector(Action::Verify) + .write(H256::from(public)) + .write(Bytes::from(&signature[..])) + .write(Bytes::from(&message[..])) + .build(), + ) + .expect_no_logs() + .execute_returns(EvmDataWriter::new().write(false).build()); + }); +} + +#[test] +fn bad_signature_returns_false() { + ExtBuilder::default().build().execute_with(|| { + let pair = sr25519::Pair::from_seed(b"12345678901234567890123456789012"); + let public = pair.public(); + let message = hex!("2f8c6129d816cf51c374bc7f08c3e63ed156cf78aefb4a6550d97b87997977ee00000000000000000200d75a980182b10ab7d54bfed3c964073a0ee172f3daa62325af021a68f707511a4500000000000000"); + let signature = pair.sign(&message[..]); + assert!(sr25519::Pair::verify(&signature, &message[..], &public)); + + let bad_message = hex!["00"]; + + precompiles() + .prepare_test( + TestAccount::Alice, + PRECOMPILE_ADDRESS, + EvmDataWriter::new_with_selector(Action::Verify) + .write(H256::from(public)) + .write(Bytes::from(>::as_ref(&signature))) + .write(Bytes::from(&bad_message[..])) + .build(), + ) + .expect_no_logs() + .execute_returns(EvmDataWriter::new().write(false).build()); + }); +} + +#[test] +fn substrate_test_vector_works() { + ExtBuilder::default().build().execute_with(|| { + let pair = sr25519::Pair::from_seed(b"12345678901234567890123456789012"); + let public = pair.public(); + assert_eq!( + public, + sr25519::Public::from_raw(hex!( + "741c08a06f41c596608f6774259bd9043304adfa5d3eea62760bd9be97634d63" + )) + ); + let message = hex!("2f8c6129d816cf51c374bc7f08c3e63ed156cf78aefb4a6550d97b87997977ee00000000000000000200d75a980182b10ab7d54bfed3c964073a0ee172f3daa62325af021a68f707511a4500000000000000"); + let signature = pair.sign(&message[..]); + assert!(sr25519::Pair::verify(&signature, &message[..], &public)); + + precompiles() + .prepare_test( + TestAccount::Alice, + PRECOMPILE_ADDRESS, + EvmDataWriter::new_with_selector(Action::Verify) + .write(H256::from(public)) + .write(Bytes::from(>::as_ref(&signature))) + .write(Bytes::from(&message[..])) + .build(), + ) + .expect_no_logs() + .execute_returns(EvmDataWriter::new().write(true).build()); + }); +} diff --git a/precompiles/substrate-ecdsa/Cargo.toml b/precompiles/substrate-ecdsa/Cargo.toml new file mode 100644 index 0000000000..6822d59884 --- /dev/null +++ b/precompiles/substrate-ecdsa/Cargo.toml @@ -0,0 +1,50 @@ +[package] +name = "pallet-evm-precompile-substrate-ecdsa" +description = "Substrate ECDSA crypto support for EVM." +version = "1.2.2" +authors.workspace = true +edition.workspace = true +homepage.workspace = true +repository.workspace = true + +[dependencies] +log = { workspace = true } +num_enum = { workspace = true } +precompile-utils = { workspace = true, default-features = false } + +# Substrate +parity-scale-codec = { workspace = true, features = ["max-encoded-len"] } +sp-core = { workspace = true } +sp-io = { workspace = true } +sp-std = { workspace = true } + +# Frontier +fp-evm = { workspace = true } +pallet-evm = { workspace = true } + +[dev-dependencies] +derive_more = { workspace = true } +hex-literal = { workspace = true } +scale-info = { workspace = true } +serde = { workspace = true } + +precompile-utils = { workspace = true, features = ["testing"] } + +frame-support = { workspace = true } +frame-system = { workspace = true } +pallet-balances = { workspace = true } +pallet-timestamp = { workspace = true } +sp-runtime = { workspace = true } + +[features] +default = ["std"] +std = [ + "num_enum/std", + "parity-scale-codec/std", + "fp-evm/std", + "pallet-evm/std", + "precompile-utils/std", + "sp-core/std", + "sp-std/std", + "sp-io/std", +] diff --git a/precompiles/substrate-ecdsa/SubstrateEcdsa.sol b/precompiles/substrate-ecdsa/SubstrateEcdsa.sol new file mode 100644 index 0000000000..39a94dd707 --- /dev/null +++ b/precompiles/substrate-ecdsa/SubstrateEcdsa.sol @@ -0,0 +1,16 @@ +pragma solidity ^0.8.0; + +/** + * @title SubstrateEcdsa signature interface. + */ +interface ISubstrateEcdsa { + /** + * @dev Verify signed message using Substrate version of ECDSA crypto. + * @return A boolean confirming whether the public key is signer for the message. + */ + function verify( + bytes32 public_key, + bytes calldata signature, + bytes calldata message + ) external view returns (bool); +} diff --git a/precompiles/substrate-ecdsa/src/lib.rs b/precompiles/substrate-ecdsa/src/lib.rs new file mode 100644 index 0000000000..a91309eb6d --- /dev/null +++ b/precompiles/substrate-ecdsa/src/lib.rs @@ -0,0 +1,105 @@ +// This file is part of Astar. + +// Copyright (C) 2019-2023 Stake Technologies Pte.Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later + +// Astar is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Astar is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Astar. If not, see . + +#![cfg_attr(not(feature = "std"), no_std)] +#![cfg_attr(test, feature(assert_matches))] + +use fp_evm::{PrecompileHandle, PrecompileOutput}; +use pallet_evm::Precompile; +use sp_core::ecdsa; +use sp_std::marker::PhantomData; +use sp_std::prelude::*; + +use precompile_utils::{ + succeed, Bytes, EvmDataWriter, EvmResult, FunctionModifier, PrecompileHandleExt, +}; + +#[cfg(test)] +mod mock; +#[cfg(test)] +mod tests; + +#[precompile_utils::generate_function_selector] +#[derive(Debug, PartialEq)] +pub enum Action { + Verify = "verify(bytes,bytes,bytes)", +} + +/// A precompile to wrap substrate ecdsa functions. +pub struct SubstrateEcdsaPrecompile(PhantomData); + +impl Precompile for SubstrateEcdsaPrecompile { + fn execute(handle: &mut impl PrecompileHandle) -> EvmResult { + log::trace!(target: "substrate-ecdsa-precompile", "In SubstrateEcdsa precompile"); + + let selector = handle.read_selector()?; + + handle.check_function_modifier(FunctionModifier::View)?; + + match selector { + // Dispatchables + Action::Verify => Self::verify(handle), + } + } +} + +impl SubstrateEcdsaPrecompile { + fn verify(handle: &mut impl PrecompileHandle) -> EvmResult { + let mut input = handle.read_input()?; + input.expect_arguments(3)?; + + // Parse arguments + let public_bytes: Vec = input.read::()?.into(); + let signature_bytes: Vec = input.read::()?.into(); + let message: Vec = input.read::()?.into(); + + // Parse public key + let public = if let Ok(public) = ecdsa::Public::try_from(&public_bytes[..]) { + public + } else { + // Return `false` if public key length is wrong + return Ok(succeed(EvmDataWriter::new().write(false).build())); + }; + + // Parse signature + let signature_opt = ecdsa::Signature::from_slice(&signature_bytes[..]); + + let signature = if let Some(sig) = signature_opt { + sig + } else { + // Return `false` if signature length is wrong + return Ok(succeed(EvmDataWriter::new().write(false).build())); + }; + + log::trace!( + target: "substrate-ecdsa-precompile", + "Verify signature {:?} for public {:?} and message {:?}", + signature, public, message, + ); + + let is_confirmed = sp_io::crypto::ecdsa_verify(&signature, &message[..], &public); + + log::trace!( + target: "substrate-ecdsa-precompile", + "Verified signature {:?} is {:?}", + signature, is_confirmed, + ); + + Ok(succeed(EvmDataWriter::new().write(is_confirmed).build())) + } +} diff --git a/precompiles/substrate-ecdsa/src/mock.rs b/precompiles/substrate-ecdsa/src/mock.rs new file mode 100644 index 0000000000..d093e4e6bf --- /dev/null +++ b/precompiles/substrate-ecdsa/src/mock.rs @@ -0,0 +1,237 @@ +// This file is part of Astar. + +// Copyright (C) 2019-2023 Stake Technologies Pte.Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later + +// Astar is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Astar is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Astar. If not, see . + +//! Testing utilities. + +use super::*; + +use frame_support::{construct_runtime, parameter_types, traits::Everything, weights::Weight}; +use parity_scale_codec::{Decode, Encode, MaxEncodedLen}; +use scale_info::TypeInfo; +use serde::{Deserialize, Serialize}; + +use pallet_evm::{ + AddressMapping, EnsureAddressNever, EnsureAddressRoot, PrecompileResult, PrecompileSet, +}; +use sp_core::{H160, H256}; +use sp_runtime::{ + testing::Header, + traits::{BlakeTwo256, IdentityLookup}, +}; + +pub type AccountId = TestAccount; +pub type Balance = u128; +pub type BlockNumber = u64; +pub type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; +pub type Block = frame_system::mocking::MockBlock; + +pub const PRECOMPILE_ADDRESS: H160 = H160::repeat_byte(0x3F); + +#[derive( + Eq, + PartialEq, + Ord, + PartialOrd, + Clone, + Encode, + Decode, + Debug, + MaxEncodedLen, + Serialize, + Deserialize, + derive_more::Display, + TypeInfo, +)] +pub enum TestAccount { + Alice, + Bob, + Charlie, + Bogus, + Precompile, +} + +impl Default for TestAccount { + fn default() -> Self { + Self::Alice + } +} + +impl AddressMapping for TestAccount { + fn into_account_id(h160_account: H160) -> TestAccount { + match h160_account { + a if a == H160::repeat_byte(0xAA) => Self::Alice, + a if a == H160::repeat_byte(0xBB) => Self::Bob, + a if a == H160::repeat_byte(0xCC) => Self::Charlie, + a if a == PRECOMPILE_ADDRESS => Self::Precompile, + _ => Self::Bogus, + } + } +} + +impl From for TestAccount { + fn from(x: H160) -> TestAccount { + TestAccount::into_account_id(x) + } +} + +impl From for H160 { + fn from(value: TestAccount) -> H160 { + match value { + TestAccount::Alice => H160::repeat_byte(0xAA), + TestAccount::Bob => H160::repeat_byte(0xBB), + TestAccount::Charlie => H160::repeat_byte(0xCC), + TestAccount::Precompile => PRECOMPILE_ADDRESS, + TestAccount::Bogus => Default::default(), + } + } +} + +parameter_types! { + pub const BlockHashCount: u64 = 250; + pub const SS58Prefix: u8 = 42; +} + +impl frame_system::Config for Runtime { + type BaseCallFilter = Everything; + type DbWeight = (); + type RuntimeOrigin = RuntimeOrigin; + type Index = u64; + type BlockNumber = BlockNumber; + type RuntimeCall = RuntimeCall; + type Hash = H256; + type Hashing = BlakeTwo256; + type AccountId = AccountId; + type Lookup = IdentityLookup; + type Header = Header; + type RuntimeEvent = RuntimeEvent; + type BlockHashCount = BlockHashCount; + type Version = (); + type PalletInfo = PalletInfo; + type AccountData = pallet_balances::AccountData; + type OnNewAccount = (); + type OnKilledAccount = (); + type SystemWeightInfo = (); + type BlockWeights = (); + type BlockLength = (); + type SS58Prefix = SS58Prefix; + type OnSetCode = (); + type MaxConsumers = frame_support::traits::ConstU32<16>; +} + +#[derive(Debug, Clone, Copy)] +pub struct TestPrecompileSet(PhantomData); + +impl PrecompileSet for TestPrecompileSet +where + R: pallet_evm::Config, + SubstrateEcdsaPrecompile: Precompile, +{ + fn execute(&self, handle: &mut impl PrecompileHandle) -> Option { + match handle.code_address() { + a if a == PRECOMPILE_ADDRESS => Some(SubstrateEcdsaPrecompile::::execute(handle)), + _ => None, + } + } + + fn is_precompile(&self, address: H160) -> bool { + address == PRECOMPILE_ADDRESS + } +} + +parameter_types! { + pub const MinimumPeriod: u64 = 5; +} + +impl pallet_timestamp::Config for Runtime { + type Moment = u64; + type OnTimestampSet = (); + type MinimumPeriod = MinimumPeriod; + type WeightInfo = (); +} + +parameter_types! { + pub const ExistentialDeposit: u128 = 0; +} + +impl pallet_balances::Config for Runtime { + type MaxReserves = (); + type ReserveIdentifier = (); + type MaxLocks = (); + type Balance = Balance; + type RuntimeEvent = RuntimeEvent; + type DustRemoval = (); + type ExistentialDeposit = ExistentialDeposit; + type AccountStore = System; + type WeightInfo = (); +} + +parameter_types! { + pub const PrecompilesValue: TestPrecompileSet = + TestPrecompileSet(PhantomData); + pub WeightPerGas: Weight = Weight::from_ref_time(1); +} + +impl pallet_evm::Config for Runtime { + type FeeCalculator = (); + type GasWeightMapping = pallet_evm::FixedGasWeightMapping; + type WeightPerGas = WeightPerGas; + type CallOrigin = EnsureAddressRoot; + type WithdrawOrigin = EnsureAddressNever; + type AddressMapping = AccountId; + type Currency = Balances; + type RuntimeEvent = RuntimeEvent; + type Runner = pallet_evm::runner::stack::Runner; + type PrecompilesType = TestPrecompileSet; + type PrecompilesValue = PrecompilesValue; + type ChainId = (); + type OnChargeTransaction = (); + type BlockGasLimit = (); + type BlockHashMapping = pallet_evm::SubstrateBlockHashMapping; + type FindAuthor = (); + type OnCreate = (); + type WeightInfo = (); +} + +// Configure a mock runtime to test the pallet. +construct_runtime!( + pub enum Runtime where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: frame_system, + Balances: pallet_balances, + Evm: pallet_evm, + Timestamp: pallet_timestamp, + } +); + +#[derive(Default)] +pub(crate) struct ExtBuilder; + +impl ExtBuilder { + pub(crate) fn build(self) -> sp_io::TestExternalities { + let t = frame_system::GenesisConfig::default() + .build_storage::() + .expect("Frame system builds valid default genesis config"); + + let mut ext = sp_io::TestExternalities::new(t); + ext.execute_with(|| System::set_block_number(1)); + ext + } +} diff --git a/precompiles/substrate-ecdsa/src/tests.rs b/precompiles/substrate-ecdsa/src/tests.rs new file mode 100644 index 0000000000..f5d940f0e3 --- /dev/null +++ b/precompiles/substrate-ecdsa/src/tests.rs @@ -0,0 +1,110 @@ +// This file is part of Astar. + +// Copyright (C) 2019-2023 Stake Technologies Pte.Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later + +// Astar is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Astar is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Astar. If not, see . + +use hex_literal::hex; + +use crate::mock::*; +use crate::*; + +use precompile_utils::testing::*; +use sp_core::{ecdsa, Pair}; + +fn precompiles() -> TestPrecompileSet { + PrecompilesValue::get() +} + +#[test] +fn wrong_signature_length_returns_false() { + ExtBuilder::default().build().execute_with(|| { + let pair = ecdsa::Pair::from_seed(b"12345678901234567890123456789012"); + let public = pair.public(); + let signature = hex!["0042"]; + let message = hex!["00"]; + + precompiles() + .prepare_test( + TestAccount::Alice, + PRECOMPILE_ADDRESS, + EvmDataWriter::new_with_selector(Action::Verify) + .write(Bytes::from(>::as_ref(&public))) + .write(Bytes::from(&signature[..])) + .write(Bytes::from(&message[..])) + .build(), + ) + .expect_no_logs() + .execute_returns(EvmDataWriter::new().write(false).build()); + }); +} + +#[test] +fn bad_signature_returns_false() { + ExtBuilder::default().build().execute_with(|| { + let pair = ecdsa::Pair::from_seed(b"12345678901234567890123456789012"); + let public = pair.public(); + let message = hex!("2f8c6129d816cf51c374bc7f08c3e63ed156cf78aefb4a6550d97b87997977ee00000000000000000200d75a980182b10ab7d54bfed3c964073a0ee172f3daa62325af021a68f707511a4500000000000000"); + let signature = pair.sign(&message[..]); + assert!(ecdsa::Pair::verify(&signature, &message[..], &public)); + + let bad_message = hex!["00"]; + + precompiles() + .prepare_test( + TestAccount::Alice, + PRECOMPILE_ADDRESS, + EvmDataWriter::new_with_selector(Action::Verify) + .write(Bytes::from(>::as_ref(&public))) + .write(Bytes::from(>::as_ref(&signature))) + .write(Bytes::from(&bad_message[..])) + .build(), + ) + .expect_no_logs() + .execute_returns(EvmDataWriter::new().write(false).build()); + }); +} + +#[test] +fn substrate_test_vector_works() { + ExtBuilder::default().build().execute_with(|| { + let pair = ecdsa::Pair::from_seed(&hex!( + "1d2187216832d1ee14be2e677f9e3ebceca715510ba1460a20d6fce07ba36b1e" + )); + let public = pair.public(); + assert_eq!( + public, + ecdsa::Public::from_raw(hex!( + "02071bca0b0da3cfa98d3089db224999a827fc1df1a3d6221194382872f0d1a82a" + )) + ); + let message = hex!("2f8c6129d816cf51c374bc7f08c3e63ed156cf78aefb4a6550d97b87997977ee00000000000000000200d75a980182b10ab7d54bfed3c964073a0ee172f3daa62325af021a68f707511a4500000000000000"); + let signature = pair.sign(&message[..]); + assert!(ecdsa::Pair::verify(&signature, &message[..], &public)); + + precompiles() + .prepare_test( + TestAccount::Alice, + PRECOMPILE_ADDRESS, + EvmDataWriter::new_with_selector(Action::Verify) + .write(Bytes::from(>::as_ref(&public))) + .write(Bytes::from(>::as_ref(&signature))) + .write(Bytes::from(&message[..])) + .build(), + ) + .expect_no_logs() + .execute_returns(EvmDataWriter::new().write(true).build()); + }); +} diff --git a/precompiles/utils/Cargo.toml b/precompiles/utils/Cargo.toml new file mode 100644 index 0000000000..6be1724ce6 --- /dev/null +++ b/precompiles/utils/Cargo.toml @@ -0,0 +1,57 @@ +[package] +name = "precompile-utils" +authors = ["StakeTechnologies", "PureStake"] +description = "Utils to write EVM precompiles." +version = "0.4.3" +edition.workspace = true +homepage.workspace = true +repository.workspace = true + +[dependencies] +# There's a problem with --all-features when this is moved under dev-deps +evm = { workspace = true, features = ["std"], optional = true } +impl-trait-for-tuples = { workspace = true } +log = { workspace = true } +num_enum = { workspace = true } +sha3 = { workspace = true } +similar-asserts = { workspace = true, optional = true } + +precompile-utils-macro = { path = "macro" } + +# Substrate +frame-support = { workspace = true } +frame-system = { workspace = true } +parity-scale-codec = { workspace = true } +sp-core = { workspace = true } +sp-io = { workspace = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } + +# Frontier +fp-evm = { workspace = true } +pallet-evm = { workspace = true } + +# Polkadot / XCM +xcm = { workspace = true } + +assert_matches = { workspace = true } + +[dev-dependencies] +hex-literal = { workspace = true } + +[features] +default = ["std"] +std = [ + "evm/std", + "parity-scale-codec/std", + "fp-evm/std", + "frame-support/std", + "frame-system/std", + "pallet-evm/std", + "sp-core/std", + "sp-io/std", + "sp-std/std", + "sp-runtime/std", + "xcm/std", +] +testing = ["similar-asserts", "std"] diff --git a/precompiles/utils/macro/Cargo.toml b/precompiles/utils/macro/Cargo.toml new file mode 100644 index 0000000000..86c80db831 --- /dev/null +++ b/precompiles/utils/macro/Cargo.toml @@ -0,0 +1,22 @@ +[package] +name = "precompile-utils-macro" +authors = ["StakeTechnologies", "PureStake"] +description = "" +version = "0.1.0" +edition.workspace = true +homepage.workspace = true +repository.workspace = true + +[lib] +proc-macro = true + +[[test]] +name = "tests" +path = "tests/tests.rs" + +[dependencies] +num_enum = { workspace = true } +proc-macro2 = { workspace = true } +quote = { workspace = true } +sha3 = { workspace = true } +syn = { workspace = true, features = ["extra-traits", "fold", "full", "visit"] } diff --git a/precompiles/utils/macro/src/lib.rs b/precompiles/utils/macro/src/lib.rs new file mode 100644 index 0000000000..fb66972c73 --- /dev/null +++ b/precompiles/utils/macro/src/lib.rs @@ -0,0 +1,150 @@ +// This file is part of Astar. + +// Copyright 2019-2022 PureStake Inc. +// Copyright (C) 2022-2023 Stake Technologies Pte.Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later +// +// This file is part of Utils package, originally developed by Purestake Inc. +// Utils package used in Astar Network in terms of GPLv3. +// +// Utils is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Utils is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Utils. If not, see . + +#![crate_type = "proc-macro"] +extern crate proc_macro; + +use proc_macro::TokenStream; +use proc_macro2::Literal; +use quote::{quote, quote_spanned}; +use sha3::{Digest, Keccak256}; +use syn::{parse_macro_input, spanned::Spanned, Expr, ExprLit, Ident, ItemEnum, Lit, LitStr}; + +struct Bytes(Vec); + +impl ::std::fmt::Debug for Bytes { + #[inline] + fn fmt(&self, f: &mut std::fmt::Formatter) -> ::std::fmt::Result { + let data = &self.0; + write!(f, "[")?; + if !data.is_empty() { + write!(f, "{:#04x}u8", data[0])?; + for unit in data.iter().skip(1) { + write!(f, ", {:#04x}", unit)?; + } + } + write!(f, "]") + } +} + +#[proc_macro] +pub fn keccak256(input: TokenStream) -> TokenStream { + let lit_str = parse_macro_input!(input as LitStr); + + let hash = Keccak256::digest(lit_str.value().as_bytes()); + + let bytes = Bytes(hash.to_vec()); + let eval_str = format!("{:?}", bytes); + let eval_ts: proc_macro2::TokenStream = eval_str.parse().unwrap_or_else(|_| { + panic!( + "Failed to parse the string \"{}\" to TokenStream.", + eval_str + ); + }); + quote!(#eval_ts).into() +} + +/// This macro allows to associate to each variant of an enumeration a discriminant (of type u32 +/// whose value corresponds to the first 4 bytes of the Hash Keccak256 of the character string +///indicated by the user of this macro. +/// +/// Usage: +/// +/// ```ignore +/// #[generate_function_selector] +/// enum Action { +/// Toto = "toto()", +/// Tata = "tata()", +/// } +/// ``` +/// +/// Extanded to: +/// +/// ```rust +/// #[repr(u32)] +/// enum Action { +/// Toto = 119097542u32, +/// Tata = 1414311903u32, +/// } +/// ``` +/// +#[proc_macro_attribute] +pub fn generate_function_selector(_: TokenStream, input: TokenStream) -> TokenStream { + let item = parse_macro_input!(input as ItemEnum); + + let ItemEnum { + attrs, + vis, + enum_token, + ident, + variants, + .. + } = item; + + let mut ident_expressions: Vec = vec![]; + let mut variant_expressions: Vec = vec![]; + for variant in variants { + match variant.discriminant { + Some((_, Expr::Lit(ExprLit { lit, .. }))) => { + if let Lit::Str(lit_str) = lit { + let digest = Keccak256::digest(lit_str.value().as_bytes()); + let selector = u32::from_be_bytes([digest[0], digest[1], digest[2], digest[3]]); + + ident_expressions.push(variant.ident); + variant_expressions.push(Expr::Lit(ExprLit { + lit: Lit::Verbatim(Literal::u32_suffixed(selector)), + attrs: Default::default(), + })); + } else { + return quote_spanned! { + lit.span() => compile_error("Expected literal string"); + } + .into(); + } + } + Some((_eg, expr)) => { + return quote_spanned! { + expr.span() => compile_error("Expected literal"); + } + .into() + } + None => { + return quote_spanned! { + variant.span() => compile_error("Each variant must have a discriminant"); + } + .into() + } + } + } + + (quote! { + #(#attrs)* + #[derive(num_enum::TryFromPrimitive, num_enum::IntoPrimitive)] + #[repr(u32)] + #vis #enum_token #ident { + #( + #ident_expressions = #variant_expressions, + )* + } + }) + .into() +} diff --git a/precompiles/utils/macro/tests/tests.rs b/precompiles/utils/macro/tests/tests.rs new file mode 100644 index 0000000000..95ce768513 --- /dev/null +++ b/precompiles/utils/macro/tests/tests.rs @@ -0,0 +1,58 @@ +// This file is part of Astar. + +// Copyright 2019-2022 PureStake Inc. +// Copyright (C) 2022-2023 Stake Technologies Pte.Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later +// +// This file is part of Utils package, originally developed by Purestake Inc. +// Utils package used in Astar Network in terms of GPLv3. +// +// Utils is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Utils is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Utils. If not, see . + +use sha3::{Digest, Keccak256}; + +#[precompile_utils_macro::generate_function_selector] +pub enum Action { + Toto = "toto()", + Tata = "tata()", +} + +#[test] +fn test_keccak256() { + assert_eq!( + &precompile_utils_macro::keccak256!(""), + Keccak256::digest(b"").as_slice(), + ); + assert_eq!( + &precompile_utils_macro::keccak256!("toto()"), + Keccak256::digest(b"toto()").as_slice(), + ); + assert_ne!( + &precompile_utils_macro::keccak256!("toto()"), + Keccak256::digest(b"tata()").as_slice(), + ); +} + +#[test] +fn test_generate_function_selector() { + assert_eq!( + &(Action::Toto as u32).to_be_bytes()[..], + &Keccak256::digest(b"toto()")[0..4], + ); + assert_eq!( + &(Action::Tata as u32).to_be_bytes()[..], + &Keccak256::digest(b"tata()")[0..4], + ); + assert_ne!(Action::Toto as u32, Action::Tata as u32); +} diff --git a/precompiles/utils/src/data.rs b/precompiles/utils/src/data.rs new file mode 100644 index 0000000000..2359bcefd4 --- /dev/null +++ b/precompiles/utils/src/data.rs @@ -0,0 +1,606 @@ +// This file is part of Astar. + +// Copyright 2019-2022 PureStake Inc. +// Copyright (C) 2022-2023 Stake Technologies Pte.Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later +// +// This file is part of Utils package, originally developed by Purestake Inc. +// Utils package used in Astar Network in terms of GPLv3. +// +// Utils is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Utils is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Utils. If not, see . + +use crate::{revert, EvmResult}; + +use alloc::borrow::ToOwned; +use core::{any::type_name, ops::Range}; +use impl_trait_for_tuples::impl_for_tuples; +use sp_core::{H160, H256, U256}; +use sp_std::{convert::TryInto, vec, vec::Vec}; + +/// The `address` type of Solidity. +/// H160 could represent 2 types of data (bytes20 and address) that are not encoded the same way. +/// To avoid issues writing H160 is thus not supported. +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub struct Address(pub H160); + +impl From for Address { + fn from(a: H160) -> Address { + Address(a) + } +} + +impl From
for H160 { + fn from(a: Address) -> H160 { + a.0 + } +} + +/// The `bytes`/`string` type of Solidity. +/// It is different from `Vec` which will be serialized with padding for each `u8` element +/// of the array, while `Bytes` is tightly packed. +#[derive(Clone, Debug, Eq, PartialEq)] +pub struct Bytes(pub Vec); + +impl Bytes { + /// Interpret as `bytes`. + pub fn as_bytes(&self) -> &[u8] { + &self.0 + } + + /// Interpret as `string`. + /// Can fail if the string is not valid UTF8. + pub fn as_str(&self) -> Result<&str, sp_std::str::Utf8Error> { + sp_std::str::from_utf8(&self.0) + } +} + +impl From<&[u8]> for Bytes { + fn from(a: &[u8]) -> Self { + Self(a.to_owned()) + } +} + +impl From<&str> for Bytes { + fn from(a: &str) -> Self { + a.as_bytes().into() + } +} + +impl From for Vec { + fn from(b: Bytes) -> Vec { + b.0 + } +} + +/// Wrapper around an EVM input slice, helping to parse it. +/// Provide functions to parse common types. +#[derive(Clone, Copy, Debug)] +pub struct EvmDataReader<'a> { + input: &'a [u8], + cursor: usize, +} + +impl<'a> EvmDataReader<'a> { + /// Create a new input parser. + pub fn new(input: &'a [u8]) -> Self { + Self { input, cursor: 0 } + } + + /// Create a new input parser from a selector-initial input. + pub fn read_selector(input: &'a [u8]) -> EvmResult + where + T: num_enum::TryFromPrimitive, + { + if input.len() < 4 { + return Err(revert("tried to parse selector out of bounds")); + } + + let mut buffer = [0u8; 4]; + buffer.copy_from_slice(&input[0..4]); + let selector = T::try_from_primitive(u32::from_be_bytes(buffer)).map_err(|_| { + log::trace!( + target: "precompile-utils", + "Failed to match function selector for {}", + type_name::() + ); + revert("unknown selector") + })?; + + Ok(selector) + } + + /// Create a new input parser from a selector-initial input. + pub fn new_skip_selector(input: &'a [u8]) -> EvmResult { + if input.len() < 4 { + return Err(revert("input is too short")); + } + + Ok(Self::new(&input[4..])) + } + + /// Check the input has at least the correct amount of arguments before the end (32 bytes values). + pub fn expect_arguments(&self, args: usize) -> EvmResult { + if self.input.len() >= self.cursor + args * 32 { + Ok(()) + } else { + Err(revert("input doesn't match expected length")) + } + } + + /// Read data from the input. + pub fn read(&mut self) -> EvmResult { + T::read(self) + } + + /// Read raw bytes from the input. + /// Doesn't handle any alignment checks, prefer using `read` instead of possible. + /// Returns an error if trying to parse out of bounds. + pub fn read_raw_bytes(&mut self, len: usize) -> EvmResult<&[u8]> { + let range = self.move_cursor(len)?; + + let data = self + .input + .get(range) + .ok_or_else(|| revert("tried to parse raw bytes out of bounds"))?; + + Ok(data) + } + + /// Reads a pointer, returning a reader targetting the pointed location. + pub fn read_pointer(&mut self) -> EvmResult { + let offset: usize = self + .read::() + .map_err(|_| revert("tried to parse array offset out of bounds"))? + .try_into() + .map_err(|_| revert("array offset is too large"))?; + + if offset >= self.input.len() { + return Err(revert("pointer points out of bounds")); + } + + Ok(Self { + input: &self.input[offset..], + cursor: 0, + }) + } + + /// Read remaining bytes + pub fn read_till_end(&mut self) -> EvmResult<&[u8]> { + let range = self.move_cursor(self.input.len() - self.cursor)?; + + let data = self + .input + .get(range) + .ok_or_else(|| revert("tried to parse raw bytes out of bounds"))?; + + Ok(data) + } + + /// Move the reading cursor with provided length, and return a range from the previous cursor + /// location to the new one. + /// Checks cursor overflows. + fn move_cursor(&mut self, len: usize) -> EvmResult> { + let start = self.cursor; + let end = self + .cursor + .checked_add(len) + .ok_or_else(|| revert("data reading cursor overflow"))?; + + self.cursor = end; + + Ok(start..end) + } +} + +/// Help build an EVM input/output data. +/// +/// Functions takes `self` to allow chaining all calls like +/// `EvmDataWriter::new().write(...).write(...).build()`. +/// While it could be more ergonomic to take &mut self, this would +/// prevent to have a `build` function that don't clone the output. +#[derive(Clone, Debug)] +pub struct EvmDataWriter { + pub(crate) data: Vec, + offset_data: Vec, + selector: Option, +} + +#[derive(Clone, Debug)] +struct OffsetDatum { + // Offset location in the container data. + offset_position: usize, + // Data pointed by the offset that must be inserted at the end of container data. + data: Vec, + // Inside of arrays, the offset is not from the start of array data (length), but from the start + // of the item. This shift allow to correct this. + offset_shift: usize, +} + +impl EvmDataWriter { + /// Creates a new empty output builder (without selector). + pub fn new() -> Self { + Self { + data: vec![], + offset_data: vec![], + selector: None, + } + } + + /// Creates a new empty output builder with provided selector. + /// Selector will only be appended before the data when calling + /// `build` to not mess with the offsets. + pub fn new_with_selector(selector: impl Into) -> Self { + Self { + data: vec![], + offset_data: vec![], + selector: Some(selector.into()), + } + } + + /// Return the built data. + pub fn build(mut self) -> Vec { + Self::bake_offsets(&mut self.data, self.offset_data); + + if let Some(selector) = self.selector { + let mut output = selector.to_be_bytes().to_vec(); + output.append(&mut self.data); + output + } else { + self.data + } + } + + /// Add offseted data at the end of this writer's data, updating the offsets. + fn bake_offsets(output: &mut Vec, offsets: Vec) { + for mut offset_datum in offsets { + let offset_position = offset_datum.offset_position; + let offset_position_end = offset_position + 32; + + // The offset is the distance between the start of the data and the + // start of the pointed data (start of a struct, length of an array). + // Offsets in inner data are relative to the start of their respective "container". + // However in arrays the "container" is actually the item itself instead of the whole + // array, which is corrected by `offset_shift`. + let free_space_offset = output.len() - offset_datum.offset_shift; + + // Override dummy offset to the offset it will be in the final output. + U256::from(free_space_offset) + .to_big_endian(&mut output[offset_position..offset_position_end]); + + // Append this data at the end of the current output. + output.append(&mut offset_datum.data); + } + } + + /// Write arbitrary bytes. + /// Doesn't handle any alignement checks, prefer using `write` instead if possible. + fn write_raw_bytes(mut self, value: &[u8]) -> Self { + self.data.extend_from_slice(value); + self + } + + /// Write data of requested type. + pub fn write(mut self, value: T) -> Self { + T::write(&mut self, value); + self + } + + /// Writes a pointer to given data. + /// The data will be appended when calling `build`. + /// Initially write a dummy value as offset in this writer's data, which will be replaced by + /// the correct offset once the pointed data is appended. + /// + /// Takes `&mut self` since its goal is to be used inside `EvmData` impl and not in chains. + pub fn write_pointer(&mut self, data: Vec) { + let offset_position = self.data.len(); + H256::write(self, H256::repeat_byte(0xff)); + + self.offset_data.push(OffsetDatum { + offset_position, + data, + offset_shift: 0, + }); + } +} + +impl Default for EvmDataWriter { + fn default() -> Self { + Self::new() + } +} + +/// Data that can be converted from and to EVM data types. +pub trait EvmData: Sized { + fn read(reader: &mut EvmDataReader) -> EvmResult; + fn write(writer: &mut EvmDataWriter, value: Self); + fn has_static_size() -> bool; +} + +#[impl_for_tuples(1, 18)] +impl EvmData for Tuple { + fn has_static_size() -> bool { + for_tuples!(#( Tuple::has_static_size() )&*) + } + + fn read(reader: &mut EvmDataReader) -> EvmResult { + if !Self::has_static_size() { + let reader = &mut reader.read_pointer()?; + Ok(for_tuples!( ( #( reader.read::()? ),* ) )) + } else { + Ok(for_tuples!( ( #( reader.read::()? ),* ) )) + } + } + + fn write(writer: &mut EvmDataWriter, value: Self) { + if !Self::has_static_size() { + let mut inner_writer = EvmDataWriter::new(); + for_tuples!( #( Tuple::write(&mut inner_writer, value.Tuple); )* ); + writer.write_pointer(inner_writer.build()); + } else { + for_tuples!( #( Tuple::write(writer, value.Tuple); )* ); + } + } +} + +impl EvmData for H256 { + fn read(reader: &mut EvmDataReader) -> EvmResult { + let range = reader.move_cursor(32)?; + + let data = reader + .input + .get(range) + .ok_or_else(|| revert("tried to parse H256 out of bounds"))?; + + Ok(H256::from_slice(data)) + } + + fn write(writer: &mut EvmDataWriter, value: Self) { + writer.data.extend_from_slice(value.as_bytes()); + } + + fn has_static_size() -> bool { + true + } +} + +impl EvmData for Address { + fn read(reader: &mut EvmDataReader) -> EvmResult { + let range = reader.move_cursor(32)?; + + let data = reader + .input + .get(range) + .ok_or_else(|| revert("tried to parse H160 out of bounds"))?; + + Ok(H160::from_slice(&data[12..32]).into()) + } + + fn write(writer: &mut EvmDataWriter, value: Self) { + H256::write(writer, value.0.into()); + } + + fn has_static_size() -> bool { + true + } +} + +impl EvmData for U256 { + fn read(reader: &mut EvmDataReader) -> EvmResult { + let range = reader.move_cursor(32)?; + + let data = reader + .input + .get(range) + .ok_or_else(|| revert("tried to parse U256 out of bounds"))?; + + Ok(U256::from_big_endian(data)) + } + + fn write(writer: &mut EvmDataWriter, value: Self) { + let mut buffer = [0u8; 32]; + value.to_big_endian(&mut buffer); + writer.data.extend_from_slice(&buffer); + } + + fn has_static_size() -> bool { + true + } +} + +macro_rules! impl_evmdata_for_uints { + ($($uint:ty, )*) => { + $( + impl EvmData for $uint { + fn read(reader: &mut EvmDataReader) -> EvmResult { + let range = reader.move_cursor(32)?; + + let data = reader + .input + .get(range) + .ok_or_else(|| revert(alloc::format!( + "tried to parse {} out of bounds", core::any::type_name::() + )))?; + + let mut buffer = [0u8; core::mem::size_of::()]; + buffer.copy_from_slice(&data[32 - core::mem::size_of::()..]); + Ok(Self::from_be_bytes(buffer)) + } + + fn write(writer: &mut EvmDataWriter, value: Self) { + let mut buffer = [0u8; 32]; + buffer[32 - core::mem::size_of::()..].copy_from_slice(&value.to_be_bytes()); + writer.data.extend_from_slice(&buffer); + } + + fn has_static_size() -> bool { + true + } + } + )* + }; +} + +impl_evmdata_for_uints!(u16, u32, u64, u128,); + +// The implementation for u8 is specific, for performance reasons. +impl EvmData for u8 { + fn read(reader: &mut EvmDataReader) -> EvmResult { + let range = reader.move_cursor(32)?; + + let data = reader + .input + .get(range) + .ok_or_else(|| revert("tried to parse u64 out of bounds"))?; + + Ok(data[31]) + } + + fn write(writer: &mut EvmDataWriter, value: Self) { + let mut buffer = [0u8; 32]; + buffer[31] = value; + + writer.data.extend_from_slice(&buffer); + } + + fn has_static_size() -> bool { + true + } +} + +impl EvmData for bool { + fn read(reader: &mut EvmDataReader) -> EvmResult { + let h256 = H256::read(reader).map_err(|_| revert("tried to parse bool out of bounds"))?; + + Ok(!h256.is_zero()) + } + + fn write(writer: &mut EvmDataWriter, value: Self) { + let mut buffer = [0u8; 32]; + if value { + buffer[31] = 1; + } + + writer.data.extend_from_slice(&buffer); + } + + fn has_static_size() -> bool { + true + } +} + +impl EvmData for Vec { + fn read(reader: &mut EvmDataReader) -> EvmResult { + let mut inner_reader = reader.read_pointer()?; + + let array_size: usize = inner_reader + .read::() + .map_err(|_| revert("tried to parse array length out of bounds"))? + .try_into() + .map_err(|_| revert("array length is too large"))?; + + let mut array = vec![]; + + let mut item_reader = EvmDataReader { + input: inner_reader + .input + .get(32..) + .ok_or_else(|| revert("try to read array items out of bound"))?, + cursor: 0, + }; + + for _ in 0..array_size { + array.push(item_reader.read()?); + } + + Ok(array) + } + + fn write(writer: &mut EvmDataWriter, value: Self) { + let mut inner_writer = EvmDataWriter::new().write(U256::from(value.len())); + + for inner in value { + // Any offset in items are relative to the start of the item instead of the + // start of the array. However if there is offseted data it must but appended after + // all items (offsets) are written. We thus need to rely on `compute_offsets` to do + // that, and must store a "shift" to correct the offsets. + let shift = inner_writer.data.len(); + let item_writer = EvmDataWriter::new().write(inner); + + inner_writer = inner_writer.write_raw_bytes(&item_writer.data); + for mut offset_datum in item_writer.offset_data { + offset_datum.offset_shift += 32; + offset_datum.offset_position += shift; + inner_writer.offset_data.push(offset_datum); + } + } + + writer.write_pointer(inner_writer.build()); + } + + fn has_static_size() -> bool { + false + } +} + +impl EvmData for Bytes { + fn read(reader: &mut EvmDataReader) -> EvmResult { + let mut inner_reader = reader.read_pointer()?; + + // Read bytes/string size. + let array_size: usize = inner_reader + .read::() + .map_err(|_| revert("tried to parse bytes/string length out of bounds"))? + .try_into() + .map_err(|_| revert("bytes/string length is too large"))?; + + // Get valid range over the bytes data. + let range = inner_reader.move_cursor(array_size)?; + + let data = inner_reader + .input + .get(range) + .ok_or_else(|| revert("tried to parse bytes/string out of bounds"))?; + + let bytes = Self(data.to_owned()); + + Ok(bytes) + } + + fn write(writer: &mut EvmDataWriter, value: Self) { + let length = value.0.len(); + + // Pad the data. + // Leave it as is if a multiple of 32, otherwise pad to next + // multiple or 32. + let chunks = length / 32; + let padded_size = match length % 32 { + 0 => chunks * 32, + _ => (chunks + 1) * 32, + }; + + let mut value = value.0.to_vec(); + value.resize(padded_size, 0); + + writer.write_pointer( + EvmDataWriter::new() + .write(U256::from(length)) + .write_raw_bytes(&value) + .build(), + ); + } + + fn has_static_size() -> bool { + false + } +} diff --git a/precompiles/utils/src/lib.rs b/precompiles/utils/src/lib.rs new file mode 100644 index 0000000000..f36412a21b --- /dev/null +++ b/precompiles/utils/src/lib.rs @@ -0,0 +1,380 @@ +// This file is part of Astar. + +// Copyright 2019-2022 PureStake Inc. +// Copyright (C) 2022-2023 Stake Technologies Pte.Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later +// +// This file is part of Utils package, originally developed by Purestake Inc. +// Utils package used in Astar Network in terms of GPLv3. +// +// Utils is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Utils is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Utils. If not, see . + +#![cfg_attr(not(feature = "std"), no_std)] + +extern crate alloc; + +use crate::alloc::borrow::ToOwned; +use fp_evm::{ + Context, ExitError, ExitRevert, ExitSucceed, PrecompileFailure, PrecompileHandle, + PrecompileOutput, +}; +use frame_support::{ + dispatch::{Dispatchable, GetDispatchInfo, PostDispatchInfo}, + traits::Get, +}; +use pallet_evm::{GasWeightMapping, Log}; +use sp_core::{H160, H256, U256}; +use sp_std::{marker::PhantomData, vec, vec::Vec}; + +mod data; + +pub use data::{Address, Bytes, EvmData, EvmDataReader, EvmDataWriter}; +pub use precompile_utils_macro::{generate_function_selector, keccak256}; + +#[cfg(feature = "testing")] +pub mod testing; +#[cfg(test)] +mod tests; + +/// Alias for Result returning an EVM precompile error. +pub type EvmResult = Result; + +/// Return an error with provided (static) text. +/// Using the `revert` function of `Gasometer` is preferred as erroring +/// consumed all the gas limit and the error message is not easily +/// retrievable. +pub fn error>>(text: T) -> PrecompileFailure { + PrecompileFailure::Error { + exit_status: ExitError::Other(text.into()), + } +} + +/// Builder for PrecompileOutput. +#[derive(Clone, Debug)] +pub struct LogsBuilder { + address: H160, +} + +impl LogsBuilder { + /// Create a new builder with no logs. + /// Takes the address of the precompile (usually `context.address`). + pub fn new(address: H160) -> Self { + Self { address } + } + + /// Create a 0-topic log. + #[must_use] + pub fn log0(&self, data: impl Into>) -> Log { + Log { + address: self.address, + topics: vec![], + data: data.into(), + } + } + + /// Create a 1-topic log. + #[must_use] + pub fn log1(&self, topic0: impl Into, data: impl Into>) -> Log { + Log { + address: self.address, + topics: vec![topic0.into()], + data: data.into(), + } + } + + /// Create a 2-topics log. + #[must_use] + pub fn log2( + &self, + topic0: impl Into, + topic1: impl Into, + data: impl Into>, + ) -> Log { + Log { + address: self.address, + topics: vec![topic0.into(), topic1.into()], + data: data.into(), + } + } + + /// Create a 3-topics log. + #[must_use] + pub fn log3( + &self, + topic0: impl Into, + topic1: impl Into, + topic2: impl Into, + data: impl Into>, + ) -> Log { + Log { + address: self.address, + topics: vec![topic0.into(), topic1.into(), topic2.into()], + data: data.into(), + } + } + + /// Create a 4-topics log. + #[must_use] + pub fn log4( + &self, + topic0: impl Into, + topic1: impl Into, + topic2: impl Into, + topic3: impl Into, + data: impl Into>, + ) -> Log { + Log { + address: self.address, + topics: vec![topic0.into(), topic1.into(), topic2.into(), topic3.into()], + data: data.into(), + } + } +} + +/// Extension trait allowing to record logs into a PrecompileHandle. +pub trait LogExt { + fn record(self, handle: &mut impl PrecompileHandle) -> EvmResult; + + fn compute_cost(&self) -> EvmResult; +} + +impl LogExt for Log { + fn record(self, handle: &mut impl PrecompileHandle) -> EvmResult { + handle.log(self.address, self.topics, self.data)?; + Ok(()) + } + + fn compute_cost(&self) -> EvmResult { + log_costs(self.topics.len(), self.data.len()) + } +} + +/// Helper functions requiring a Runtime. +/// This runtime must of course implement `pallet_evm::Config`. +#[derive(Clone, Copy, Debug)] +pub struct RuntimeHelper(PhantomData); + +impl RuntimeHelper +where + Runtime: pallet_evm::Config, + Runtime::RuntimeCall: Dispatchable + GetDispatchInfo, +{ + /// Try to dispatch a Substrate call. + /// Return an error if there are not enough gas, or if the call fails. + /// If successful returns the used gas using the Runtime GasWeightMapping. + pub fn try_dispatch( + handle: &mut impl PrecompileHandleExt, + origin: ::RuntimeOrigin, + call: Call, + ) -> EvmResult<()> + where + Runtime::RuntimeCall: From, + { + let call = Runtime::RuntimeCall::from(call); + let dispatch_info = call.get_dispatch_info(); + + // Make sure there is enough gas. + let remaining_gas = handle.remaining_gas(); + let required_gas = Runtime::GasWeightMapping::weight_to_gas(dispatch_info.weight); + if required_gas > remaining_gas { + return Err(PrecompileFailure::Error { + exit_status: ExitError::OutOfGas, + }); + } + + // Dispatch call. + // It may be possible to not record gas cost if the call returns Pays::No. + // However while Substrate handle checking weight while not making the sender pay for it, + // the EVM doesn't. It seems this safer to always record the costs to avoid unmetered + // computations. + let result = call + .dispatch(origin) + .map_err(|e| revert(alloc::format!("Dispatched call failed with error: {:?}", e)))?; + + let used_weight = result.actual_weight; + + let used_gas = + Runtime::GasWeightMapping::weight_to_gas(used_weight.unwrap_or(dispatch_info.weight)); + + handle.record_cost(used_gas)?; + + Ok(()) + } +} + +impl RuntimeHelper +where + Runtime: pallet_evm::Config, +{ + /// Cost of a Substrate DB write in gas. + pub fn db_write_gas_cost() -> u64 { + ::GasWeightMapping::weight_to_gas( + ::DbWeight::get().writes(1), + ) + } + + /// Cost of a Substrate DB read in gas. + pub fn db_read_gas_cost() -> u64 { + ::GasWeightMapping::weight_to_gas( + ::DbWeight::get().reads(1), + ) + } +} + +/// Represents modifiers a Solidity function can be annotated with. +#[derive(Copy, Clone, PartialEq, Eq)] +pub enum FunctionModifier { + /// Function that doesn't modify the state. + View, + /// Function that modifies the state but refuse receiving funds. + /// Correspond to a Solidity function with no modifiers. + NonPayable, + /// Function that modifies the state and accept funds. + Payable, +} + +pub trait PrecompileHandleExt: PrecompileHandle { + #[must_use] + /// Record cost of a log manually. + /// This can be useful to record log costs early when their content have static size. + fn record_log_costs_manual(&mut self, topics: usize, data_len: usize) -> EvmResult; + + #[must_use] + /// Record cost of logs. + fn record_log_costs(&mut self, logs: &[&Log]) -> EvmResult; + + #[must_use] + /// Check that a function call is compatible with the context it is + /// called into. + fn check_function_modifier(&self, modifier: FunctionModifier) -> EvmResult; + + #[must_use] + /// Read the selector from the input data. + fn read_selector(&self) -> EvmResult + where + T: num_enum::TryFromPrimitive; + + #[must_use] + /// Returns a reader of the input, skipping the selector. + fn read_input(&self) -> EvmResult; +} + +pub fn log_costs(topics: usize, data_len: usize) -> EvmResult { + // Cost calculation is copied from EVM code that is not publicly exposed by the crates. + // https://github.com/rust-blockchain/evm/blob/master/gasometer/src/costs.rs#L148 + + const G_LOG: u64 = 375; + const G_LOGDATA: u64 = 8; + const G_LOGTOPIC: u64 = 375; + + let topic_cost = G_LOGTOPIC + .checked_mul(topics as u64) + .ok_or(PrecompileFailure::Error { + exit_status: ExitError::OutOfGas, + })?; + + let data_cost = G_LOGDATA + .checked_mul(data_len as u64) + .ok_or(PrecompileFailure::Error { + exit_status: ExitError::OutOfGas, + })?; + + G_LOG + .checked_add(topic_cost) + .ok_or(PrecompileFailure::Error { + exit_status: ExitError::OutOfGas, + })? + .checked_add(data_cost) + .ok_or(PrecompileFailure::Error { + exit_status: ExitError::OutOfGas, + }) +} + +impl PrecompileHandleExt for T { + #[must_use] + /// Record cost of a log manualy. + /// This can be useful to record log costs early when their content have static size. + fn record_log_costs_manual(&mut self, topics: usize, data_len: usize) -> EvmResult { + self.record_cost(log_costs(topics, data_len)?)?; + + Ok(()) + } + + #[must_use] + /// Record cost of logs. + fn record_log_costs(&mut self, logs: &[&Log]) -> EvmResult { + for log in logs { + self.record_log_costs_manual(log.topics.len(), log.data.len())?; + } + + Ok(()) + } + + #[must_use] + /// Check that a function call is compatible with the context it is + /// called into. + fn check_function_modifier(&self, modifier: FunctionModifier) -> EvmResult { + check_function_modifier(self.context(), self.is_static(), modifier) + } + + #[must_use] + /// Read the selector from the input data. + fn read_selector(&self) -> EvmResult + where + S: num_enum::TryFromPrimitive, + { + EvmDataReader::read_selector(self.input()) + } + + #[must_use] + /// Returns a reader of the input, skipping the selector. + fn read_input(&self) -> EvmResult { + EvmDataReader::new_skip_selector(self.input()) + } +} + +#[must_use] +pub fn revert(output: impl AsRef<[u8]>) -> PrecompileFailure { + PrecompileFailure::Revert { + exit_status: ExitRevert::Reverted, + output: output.as_ref().to_owned(), + } +} + +#[must_use] +pub fn succeed(output: impl AsRef<[u8]>) -> PrecompileOutput { + PrecompileOutput { + exit_status: ExitSucceed::Returned, + output: output.as_ref().to_owned(), + } +} + +#[must_use] +/// Check that a function call is compatible with the context it is +/// called into. +fn check_function_modifier( + context: &Context, + is_static: bool, + modifier: FunctionModifier, +) -> EvmResult { + if is_static && modifier != FunctionModifier::View { + return Err(revert("can't call non-static function in static context")); + } + + if modifier != FunctionModifier::Payable && context.apparent_value > U256::zero() { + return Err(revert("function is not payable")); + } + + Ok(()) +} diff --git a/precompiles/utils/src/testing.rs b/precompiles/utils/src/testing.rs new file mode 100644 index 0000000000..0f5f55ec35 --- /dev/null +++ b/precompiles/utils/src/testing.rs @@ -0,0 +1,433 @@ +// This file is part of Astar. + +// Copyright 2019-2022 PureStake Inc. +// Copyright (C) 2022-2023 Stake Technologies Pte.Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later +// +// This file is part of Utils package, originally developed by Purestake Inc. +// Utils package used in Astar Network in terms of GPLv3. +// +// Utils is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Utils is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Utils. If not, see . +use super::*; +use assert_matches::assert_matches; +use fp_evm::{ + ExitReason, ExitSucceed, PrecompileOutput, PrecompileResult, PrecompileSet, Transfer, +}; +use sp_std::boxed::Box; + +pub struct Subcall { + pub address: H160, + pub transfer: Option, + pub input: Vec, + pub target_gas: Option, + pub is_static: bool, + pub context: Context, +} + +pub struct SubcallOutput { + pub reason: ExitReason, + pub output: Vec, + pub cost: u64, + pub logs: Vec, +} + +pub trait SubcallTrait: FnMut(Subcall) -> SubcallOutput + 'static {} + +impl SubcallOutput + 'static> SubcallTrait for T {} + +pub type SubcallHandle = Box; + +/// Mock handle to write tests for precompiles. +pub struct MockHandle { + pub gas_limit: u64, + pub gas_used: u64, + pub logs: Vec, + pub subcall_handle: Option, + pub code_address: H160, + pub input: Vec, + pub context: Context, + pub is_static: bool, +} + +impl MockHandle { + pub fn new(code_address: H160, context: Context) -> Self { + Self { + gas_limit: u64::MAX, + gas_used: 0, + logs: vec![], + subcall_handle: None, + code_address, + input: Vec::new(), + context, + is_static: false, + } + } +} + +// Compute the cost of doing a subcall. +// Some parameters cannot be known in advance, so we estimate the worst possible cost. +pub fn call_cost(value: U256, config: &evm::Config) -> u64 { + // Copied from EVM code since not public. + pub const G_CALLVALUE: u64 = 9000; + pub const G_NEWACCOUNT: u64 = 25000; + + fn address_access_cost(is_cold: bool, regular_value: u64, config: &evm::Config) -> u64 { + if config.increase_state_access_gas { + if is_cold { + config.gas_account_access_cold + } else { + config.gas_storage_read_warm + } + } else { + regular_value + } + } + + fn xfer_cost(is_call_or_callcode: bool, transfers_value: bool) -> u64 { + if is_call_or_callcode && transfers_value { + G_CALLVALUE + } else { + 0 + } + } + + fn new_cost( + is_call_or_staticcall: bool, + new_account: bool, + transfers_value: bool, + config: &evm::Config, + ) -> u64 { + let eip161 = !config.empty_considered_exists; + if is_call_or_staticcall { + if eip161 { + if transfers_value && new_account { + G_NEWACCOUNT + } else { + 0 + } + } else if new_account { + G_NEWACCOUNT + } else { + 0 + } + } else { + 0 + } + } + + let transfers_value = value != U256::default(); + let is_cold = true; + let is_call_or_callcode = true; + let is_call_or_staticcall = true; + let new_account = true; + + address_access_cost(is_cold, config.gas_call, config) + + xfer_cost(is_call_or_callcode, transfers_value) + + new_cost(is_call_or_staticcall, new_account, transfers_value, config) +} + +impl PrecompileHandle for MockHandle { + /// Perform subcall in provided context. + /// Precompile specifies in which context the subcall is executed. + fn call( + &mut self, + address: H160, + transfer: Option, + input: Vec, + target_gas: Option, + is_static: bool, + context: &Context, + ) -> (ExitReason, Vec) { + if self + .record_cost(call_cost(context.apparent_value, &evm::Config::london())) + .is_err() + { + return (ExitReason::Error(ExitError::OutOfGas), vec![]); + } + + match &mut self.subcall_handle { + Some(handle) => { + let SubcallOutput { + reason, + output, + cost, + logs, + } = handle(Subcall { + address, + transfer, + input, + target_gas, + is_static, + context: context.clone(), + }); + + if self.record_cost(cost).is_err() { + return (ExitReason::Error(ExitError::OutOfGas), vec![]); + } + + for log in logs { + self.log(log.address, log.topics, log.data) + .expect("cannot fail"); + } + + (reason, output) + } + None => panic!("no subcall handle registered"), + } + } + + fn record_cost(&mut self, cost: u64) -> Result<(), ExitError> { + self.gas_used += cost; + + if self.gas_used > self.gas_limit { + Err(ExitError::OutOfGas) + } else { + Ok(()) + } + } + + fn remaining_gas(&self) -> u64 { + self.gas_limit - self.gas_used + } + + fn log(&mut self, address: H160, topics: Vec, data: Vec) -> Result<(), ExitError> { + self.logs.push(PrettyLog(Log { + address, + topics, + data, + })); + Ok(()) + } + + /// Retreive the code address (what is the address of the precompile being called). + fn code_address(&self) -> H160 { + self.code_address + } + + /// Retreive the input data the precompile is called with. + fn input(&self) -> &[u8] { + &self.input + } + + /// Retreive the context in which the precompile is executed. + fn context(&self) -> &Context { + &self.context + } + + /// Is the precompile call is done statically. + fn is_static(&self) -> bool { + self.is_static + } + + /// Retreive the gas limit of this call. + fn gas_limit(&self) -> Option { + Some(self.gas_limit) + } +} + +pub struct PrecompilesTester<'p, P> { + precompiles: &'p P, + handle: MockHandle, + + target_gas: Option, + subcall_handle: Option, + + expected_cost: Option, + expected_logs: Option>, +} + +impl<'p, P: PrecompileSet> PrecompilesTester<'p, P> { + pub fn new( + precompiles: &'p P, + from: impl Into, + to: impl Into, + data: Vec, + ) -> Self { + let to = to.into(); + let mut handle = MockHandle::new( + to, + Context { + address: to, + caller: from.into(), + apparent_value: U256::zero(), + }, + ); + + handle.input = data; + + Self { + precompiles, + handle, + + target_gas: None, + subcall_handle: None, + + expected_cost: None, + expected_logs: None, + } + } + + pub fn with_value(mut self, value: impl Into) -> Self { + self.handle.context.apparent_value = value.into(); + self + } + + pub fn with_subcall_handle(mut self, subcall_handle: impl SubcallTrait) -> Self { + self.subcall_handle = Some(Box::new(subcall_handle)); + self + } + + pub fn with_target_gas(mut self, target_gas: Option) -> Self { + self.target_gas = target_gas; + self + } + + pub fn expect_cost(mut self, cost: u64) -> Self { + self.expected_cost = Some(cost); + self + } + + pub fn expect_no_logs(mut self) -> Self { + self.expected_logs = Some(vec![]); + self + } + + pub fn expect_log(mut self, log: Log) -> Self { + self.expected_logs = Some({ + let mut logs = self.expected_logs.unwrap_or_default(); + logs.push(PrettyLog(log)); + logs + }); + self + } + + fn assert_optionals(&self) { + if let Some(cost) = &self.expected_cost { + assert_eq!(&self.handle.gas_used, cost); + } + + if let Some(logs) = &self.expected_logs { + similar_asserts::assert_eq!(&self.handle.logs, logs); + } + } + + fn execute(&mut self) -> Option { + let handle = &mut self.handle; + handle.subcall_handle = self.subcall_handle.take(); + + if let Some(gas_limit) = self.target_gas { + handle.gas_limit = gas_limit; + } + + let res = self.precompiles.execute(handle); + + self.subcall_handle = handle.subcall_handle.take(); + + res + } + + /// Execute the precompile set and expect some precompile to have been executed, regardless of the + /// result. + pub fn execute_some(mut self) { + let res = self.execute(); + assert!(res.is_some()); + self.assert_optionals(); + } + + /// Execute the precompile set and expect no precompile to have been executed. + pub fn execute_none(mut self) { + let res = self.execute(); + assert!(res.is_none()); + self.assert_optionals(); + } + + /// Execute the precompile set and check it returns provided output. + pub fn execute_returns(mut self, output: Vec) { + let res = self.execute(); + assert_eq!( + res, + Some(Ok(PrecompileOutput { + exit_status: ExitSucceed::Returned, + output + })) + ); + self.assert_optionals(); + } + + /// Execute the precompile set and check if it reverts. + /// Take a closure allowing to perform custom matching on the output. + pub fn execute_reverts(mut self, check: impl Fn(&[u8]) -> bool) { + let res = self.execute(); + assert_matches!( + res, + Some(Err(PrecompileFailure::Revert { output, ..})) + if check(&output) + ); + self.assert_optionals(); + } + + /// Execute the precompile set and check it returns provided output. + pub fn execute_error(mut self, error: ExitError) { + let res = self.execute(); + assert_eq!( + res, + Some(Err(PrecompileFailure::Error { exit_status: error })) + ); + self.assert_optionals(); + } +} + +pub trait PrecompileTesterExt: PrecompileSet + Sized { + fn prepare_test( + &self, + from: impl Into, + to: impl Into, + data: Vec, + ) -> PrecompilesTester; +} + +impl PrecompileTesterExt for T { + fn prepare_test( + &self, + from: impl Into, + to: impl Into, + data: Vec, + ) -> PrecompilesTester { + PrecompilesTester::new(self, from, to, data) + } +} + +#[derive(Clone, PartialEq, Eq)] +pub struct PrettyLog(Log); + +impl core::fmt::Debug for PrettyLog { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> { + let bytes = self + .0 + .data + .iter() + .map(|b| format!("{:02X}", b)) + .collect::>() + .join(""); + + let message = String::from_utf8(self.0.data.clone()).ok(); + + f.debug_struct("Log") + .field("address", &self.0.address) + .field("topics", &self.0.topics) + .field("data", &bytes) + .field("data_utf8", &message) + .finish() + } +} diff --git a/precompiles/utils/src/tests.rs b/precompiles/utils/src/tests.rs new file mode 100644 index 0000000000..6756687bbe --- /dev/null +++ b/precompiles/utils/src/tests.rs @@ -0,0 +1,746 @@ +// This file is part of Astar. + +// Copyright 2019-2022 PureStake Inc. +// Copyright (C) 2022-2023 Stake Technologies Pte.Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later +// +// This file is part of Utils package, originally developed by Purestake Inc. +// Utils package used in Astar Network in terms of GPLv3. +// +// Utils is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Utils is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Utils. If not, see . + +use super::*; +use hex_literal::hex; +use sp_core::{H256, U256}; + +fn u256_repeat_byte(byte: u8) -> U256 { + let value = H256::repeat_byte(byte); + + U256::from_big_endian(value.as_bytes()) +} + +// When debugging it is useful to display data in chunks of 32 bytes. +#[allow(dead_code)] +fn display_bytes(bytes: &[u8]) { + bytes + .chunks_exact(32) + .map(|chunk| H256::from_slice(chunk)) + .for_each(|hash| println!("{:?}", hash)); +} + +#[test] +fn write_bool() { + let value = true; + + let writer_output = EvmDataWriter::new().write(value).build(); + + let mut expected_output = [0u8; 32]; + expected_output[31] = 1; + + assert_eq!(writer_output, expected_output); +} + +#[test] +fn read_bool() { + let value = true; + + let writer_output = EvmDataWriter::new().write(value).build(); + + let mut reader = EvmDataReader::new(&writer_output); + let parsed: bool = reader.read().expect("to correctly parse bool"); + + assert_eq!(value, parsed); +} + +#[test] +fn write_u64() { + let value = 42u64; + + let writer_output = EvmDataWriter::new().write(value).build(); + + let mut expected_output = [0u8; 32]; + expected_output[24..].copy_from_slice(&value.to_be_bytes()); + + assert_eq!(writer_output, expected_output); +} + +#[test] +fn read_u64() { + let value = 42u64; + let writer_output = EvmDataWriter::new().write(value).build(); + + let mut reader = EvmDataReader::new(&writer_output); + let parsed: u64 = reader.read().expect("to correctly parse u64"); + + assert_eq!(value, parsed); +} + +#[test] +fn write_u128() { + let value = 42u128; + + let writer_output = EvmDataWriter::new().write(value).build(); + + let mut expected_output = [0u8; 32]; + expected_output[16..].copy_from_slice(&value.to_be_bytes()); + + assert_eq!(writer_output, expected_output); +} + +#[test] +fn read_u128() { + let value = 42u128; + let writer_output = EvmDataWriter::new().write(value).build(); + + let mut reader = EvmDataReader::new(&writer_output); + let parsed: u128 = reader.read().expect("to correctly parse u128"); + + assert_eq!(value, parsed); +} + +#[test] +fn write_u256() { + let value = U256::from(42); + + let writer_output = EvmDataWriter::new().write(value).build(); + + let mut expected_output = [0u8; 32]; + value.to_big_endian(&mut expected_output); + + assert_eq!(writer_output, expected_output); +} + +#[test] +fn read_u256() { + let value = U256::from(42); + let writer_output = EvmDataWriter::new().write(value).build(); + + let mut reader = EvmDataReader::new(&writer_output); + let parsed: U256 = reader.read().expect("to correctly parse U256"); + + assert_eq!(value, parsed); +} + +#[test] +fn read_selector() { + use sha3::{Digest, Keccak256}; + + #[precompile_utils_macro::generate_function_selector] + #[derive(Debug, PartialEq)] + enum FakeAction { + Action1 = "action1()", + } + + let selector = &Keccak256::digest(b"action1()")[0..4]; + + let parsed_selector = + EvmDataReader::read_selector::(selector).expect("there is a selector"); + EvmDataReader::new_skip_selector(selector).expect("there is a selector"); + + assert_eq!(parsed_selector, FakeAction::Action1) +} + +#[test] +#[should_panic(expected = "to correctly parse U256")] +fn read_u256_too_short() { + let value = U256::from(42); + let writer_output = EvmDataWriter::new().write(value).build(); + + let mut reader = EvmDataReader::new(&writer_output[0..31]); + let _: U256 = reader.read().expect("to correctly parse U256"); +} + +#[test] +fn write_h256() { + let mut raw = [0u8; 32]; + raw[0] = 42; + raw[12] = 43; + raw[31] = 44; + + let value = H256::from(raw); + + let output = EvmDataWriter::new().write(value).build(); + + assert_eq!(&output, &raw); +} + +#[test] +fn tmp() { + let u = U256::from(1_000_000_000); + println!("U256={:?}", u.0); +} + +#[test] +fn read_h256() { + let mut raw = [0u8; 32]; + raw[0] = 42; + raw[12] = 43; + raw[31] = 44; + let value = H256::from(raw); + let writer_output = EvmDataWriter::new().write(value).build(); + + let mut reader = EvmDataReader::new(&writer_output); + let parsed: H256 = reader.read().expect("to correctly parse H256"); + + assert_eq!(value, parsed); +} + +#[test] +#[should_panic(expected = "to correctly parse H256")] +fn read_h256_too_short() { + let mut raw = [0u8; 32]; + raw[0] = 42; + raw[12] = 43; + raw[31] = 44; + let value = H256::from(raw); + let writer_output = EvmDataWriter::new().write(value).build(); + + let mut reader = EvmDataReader::new(&writer_output[0..31]); + let _: H256 = reader.read().expect("to correctly parse H256"); +} + +#[test] +fn write_address() { + let value = H160::repeat_byte(0xAA); + + let output = EvmDataWriter::new().write(Address(value)).build(); + + assert_eq!(output.len(), 32); + assert_eq!(&output[12..32], value.as_bytes()); +} + +#[test] +fn read_address() { + let value = H160::repeat_byte(0xAA); + let writer_output = EvmDataWriter::new().write(Address(value)).build(); + + let mut reader = EvmDataReader::new(&writer_output); + let parsed: Address = reader.read().expect("to correctly parse Address"); + + assert_eq!(value, parsed.0); +} + +#[test] +fn write_h256_array() { + let array = vec![ + H256::repeat_byte(0x11), + H256::repeat_byte(0x22), + H256::repeat_byte(0x33), + H256::repeat_byte(0x44), + H256::repeat_byte(0x55), + ]; + let writer_output = EvmDataWriter::new().write(array.clone()).build(); + assert_eq!(writer_output.len(), 0xE0); + + // We can read this "manualy" using simpler functions since arrays are 32-byte aligned. + let mut reader = EvmDataReader::new(&writer_output); + + assert_eq!(reader.read::().expect("read offset"), 32.into()); + assert_eq!(reader.read::().expect("read size"), 5.into()); + assert_eq!(reader.read::().expect("read 1st"), array[0]); + assert_eq!(reader.read::().expect("read 2nd"), array[1]); + assert_eq!(reader.read::().expect("read 3rd"), array[2]); + assert_eq!(reader.read::().expect("read 4th"), array[3]); + assert_eq!(reader.read::().expect("read 5th"), array[4]); +} + +#[test] +fn read_h256_array() { + let array = vec![ + H256::repeat_byte(0x11), + H256::repeat_byte(0x22), + H256::repeat_byte(0x33), + H256::repeat_byte(0x44), + H256::repeat_byte(0x55), + ]; + let writer_output = EvmDataWriter::new().write(array.clone()).build(); + + let mut reader = EvmDataReader::new(&writer_output); + let parsed: Vec = reader.read().expect("to correctly parse Vec"); + + assert_eq!(array, parsed); +} + +#[test] +fn write_u256_array() { + let array = vec![ + u256_repeat_byte(0x11), + u256_repeat_byte(0x22), + u256_repeat_byte(0x33), + u256_repeat_byte(0x44), + u256_repeat_byte(0x55), + ]; + let writer_output = EvmDataWriter::new().write(array.clone()).build(); + assert_eq!(writer_output.len(), 0xE0); + + // We can read this "manualy" using simpler functions since arrays are 32-byte aligned. + let mut reader = EvmDataReader::new(&writer_output); + + assert_eq!(reader.read::().expect("read offset"), 32.into()); + assert_eq!(reader.read::().expect("read size"), 5.into()); + assert_eq!(reader.read::().expect("read 1st"), array[0]); + assert_eq!(reader.read::().expect("read 2nd"), array[1]); + assert_eq!(reader.read::().expect("read 3rd"), array[2]); + assert_eq!(reader.read::().expect("read 4th"), array[3]); + assert_eq!(reader.read::().expect("read 5th"), array[4]); +} + +#[test] +fn read_u256_array() { + let array = vec![ + u256_repeat_byte(0x11), + u256_repeat_byte(0x22), + u256_repeat_byte(0x33), + u256_repeat_byte(0x44), + u256_repeat_byte(0x55), + ]; + let writer_output = EvmDataWriter::new().write(array.clone()).build(); + + let mut reader = EvmDataReader::new(&writer_output); + let parsed: Vec = reader.read().expect("to correctly parse Vec"); + + assert_eq!(array, parsed); +} + +#[test] +fn write_address_array() { + let array = vec![ + Address(H160::repeat_byte(0x11)), + Address(H160::repeat_byte(0x22)), + Address(H160::repeat_byte(0x33)), + Address(H160::repeat_byte(0x44)), + Address(H160::repeat_byte(0x55)), + ]; + let writer_output = EvmDataWriter::new().write(array.clone()).build(); + + // We can read this "manualy" using simpler functions since arrays are 32-byte aligned. + let mut reader = EvmDataReader::new(&writer_output); + + assert_eq!(reader.read::().expect("read offset"), 32.into()); + assert_eq!(reader.read::().expect("read size"), 5.into()); + assert_eq!(reader.read::
().expect("read 1st"), array[0]); + assert_eq!(reader.read::
().expect("read 2nd"), array[1]); + assert_eq!(reader.read::
().expect("read 3rd"), array[2]); + assert_eq!(reader.read::
().expect("read 4th"), array[3]); + assert_eq!(reader.read::
().expect("read 5th"), array[4]); +} + +#[test] +fn read_address_array() { + let array = vec![ + Address(H160::repeat_byte(0x11)), + Address(H160::repeat_byte(0x22)), + Address(H160::repeat_byte(0x33)), + Address(H160::repeat_byte(0x44)), + Address(H160::repeat_byte(0x55)), + ]; + let writer_output = EvmDataWriter::new().write(array.clone()).build(); + + let mut reader = EvmDataReader::new(&writer_output); + let parsed: Vec
= reader.read().expect("to correctly parse Vec"); + + assert_eq!(array, parsed); +} + +#[test] +fn read_address_array_size_too_big() { + let array = vec![ + Address(H160::repeat_byte(0x11)), + Address(H160::repeat_byte(0x22)), + Address(H160::repeat_byte(0x33)), + Address(H160::repeat_byte(0x44)), + Address(H160::repeat_byte(0x55)), + ]; + let mut writer_output = EvmDataWriter::new().write(array).build(); + + U256::from(6u32).to_big_endian(&mut writer_output[0x20..0x40]); + + let mut reader = EvmDataReader::new(&writer_output); + + match reader.read::>() { + Ok(_) => panic!("should not parse correctly"), + Err(PrecompileFailure::Revert { output: err, .. }) => { + assert_eq!(err, b"tried to parse H160 out of bounds") + } + Err(_) => panic!("unexpected error"), + } +} + +#[test] +fn write_address_nested_array() { + let array = vec![ + vec![ + Address(H160::repeat_byte(0x11)), + Address(H160::repeat_byte(0x22)), + Address(H160::repeat_byte(0x33)), + ], + vec![ + Address(H160::repeat_byte(0x44)), + Address(H160::repeat_byte(0x55)), + ], + ]; + let writer_output = EvmDataWriter::new().write(array.clone()).build(); + assert_eq!(writer_output.len(), 0x160); + + // We can read this "manualy" using simpler functions since arrays are 32-byte aligned. + let mut reader = EvmDataReader::new(&writer_output); + + assert_eq!(reader.read::().expect("read offset"), 0x20.into()); // 0x00 + assert_eq!(reader.read::().expect("read size"), 2.into()); // 0x20 + assert_eq!(reader.read::().expect("read 1st offset"), 0x40.into()); // 0x40 + assert_eq!(reader.read::().expect("read 2st offset"), 0xc0.into()); // 0x60 + assert_eq!(reader.read::().expect("read 1st size"), 3.into()); // 0x80 + assert_eq!(reader.read::
().expect("read 1-1"), array[0][0]); // 0xA0 + assert_eq!(reader.read::
().expect("read 1-2"), array[0][1]); // 0xC0 + assert_eq!(reader.read::
().expect("read 1-3"), array[0][2]); // 0xE0 + assert_eq!(reader.read::().expect("read 2nd size"), 2.into()); // 0x100 + assert_eq!(reader.read::
().expect("read 2-1"), array[1][0]); // 0x120 + assert_eq!(reader.read::
().expect("read 2-2"), array[1][1]); // 0x140 +} + +#[test] +fn read_address_nested_array() { + let array = vec![ + vec![ + Address(H160::repeat_byte(0x11)), + Address(H160::repeat_byte(0x22)), + Address(H160::repeat_byte(0x33)), + ], + vec![ + Address(H160::repeat_byte(0x44)), + Address(H160::repeat_byte(0x55)), + ], + ]; + let writer_output = EvmDataWriter::new().write(array.clone()).build(); + + let mut reader = EvmDataReader::new(&writer_output); + let parsed: Vec> = reader.read().expect("to correctly parse Vec>"); + + assert_eq!(array, parsed); +} + +#[test] + +fn write_multiple_arrays() { + let array1 = vec![ + Address(H160::repeat_byte(0x11)), + Address(H160::repeat_byte(0x22)), + Address(H160::repeat_byte(0x33)), + ]; + + let array2 = vec![H256::repeat_byte(0x44), H256::repeat_byte(0x55)]; + + let writer_output = EvmDataWriter::new() + .write(array1.clone()) + .write(array2.clone()) + .build(); + + assert_eq!(writer_output.len(), 0x120); + + // We can read this "manualy" using simpler functions since arrays are 32-byte aligned. + let mut reader = EvmDataReader::new(&writer_output); + + assert_eq!(reader.read::().expect("read 1st offset"), 0x40.into()); // 0x00 + assert_eq!(reader.read::().expect("read 2nd offset"), 0xc0.into()); // 0x20 + assert_eq!(reader.read::().expect("read 1st size"), 3.into()); // 0x40 + assert_eq!(reader.read::
().expect("read 1-1"), array1[0]); // 0x60 + assert_eq!(reader.read::
().expect("read 1-2"), array1[1]); // 0x80 + assert_eq!(reader.read::
().expect("read 1-3"), array1[2]); // 0xA0 + assert_eq!(reader.read::().expect("read 2nd size"), 2.into()); // 0xC0 + assert_eq!(reader.read::().expect("read 2-1"), array2[0]); // 0xE0 + assert_eq!(reader.read::().expect("read 2-2"), array2[1]); // 0x100 +} + +#[test] +fn read_multiple_arrays() { + let array1 = vec![ + Address(H160::repeat_byte(0x11)), + Address(H160::repeat_byte(0x22)), + Address(H160::repeat_byte(0x33)), + ]; + + let array2 = vec![H256::repeat_byte(0x44), H256::repeat_byte(0x55)]; + + let writer_output = EvmDataWriter::new() + .write(array1.clone()) + .write(array2.clone()) + .build(); + + // offset 0x20 + // offset 0x40 + // size 0x60 + // 3 addresses 0xC0 + // size 0xE0 + // 2 H256 0x120 + assert_eq!(writer_output.len(), 0x120); + + let mut reader = EvmDataReader::new(&writer_output); + + let parsed: Vec
= reader.read().expect("to correctly parse Vec
"); + assert_eq!(array1, parsed); + + let parsed: Vec = reader.read().expect("to correctly parse Vec"); + assert_eq!(array2, parsed); +} + +#[test] +fn read_bytes() { + let data = b"Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod\ + tempor incididunt ut labore et dolore magna aliqua."; + let writer_output = EvmDataWriter::new().write(Bytes::from(&data[..])).build(); + + let mut reader = EvmDataReader::new(&writer_output); + let parsed: Bytes = reader.read().expect("to correctly parse Bytes"); + + assert_eq!(data, parsed.as_bytes()); +} + +#[test] +fn write_bytes() { + let data = b"Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod\ + tempor incididunt ut labore et dolore magna aliqua."; + + let writer_output = EvmDataWriter::new().write(Bytes::from(&data[..])).build(); + + // We can read this "manualy" using simpler functions. + let mut reader = EvmDataReader::new(&writer_output); + + // We pad data to a multiple of 32 bytes. + let mut padded = data.to_vec(); + assert!(data.len() < 0x80); + padded.resize(0x80, 0); + + assert_eq!(reader.read::().expect("read offset"), 32.into()); + assert_eq!(reader.read::().expect("read size"), data.len().into()); + let mut read = |e| reader.read::().expect(e); // shorthand + assert_eq!(read("read part 1"), H256::from_slice(&padded[0x00..0x20])); + assert_eq!(read("read part 2"), H256::from_slice(&padded[0x20..0x40])); + assert_eq!(read("read part 3"), H256::from_slice(&padded[0x40..0x60])); + assert_eq!(read("read part 4"), H256::from_slice(&padded[0x60..0x80])); +} + +#[test] +fn read_string() { + let data = "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod\ + tempor incididunt ut labore et dolore magna aliqua."; + let writer_output = EvmDataWriter::new().write(Bytes::from(data)).build(); + + let mut reader = EvmDataReader::new(&writer_output); + let parsed: Bytes = reader.read().expect("to correctly parse Bytes"); + + assert_eq!(data, parsed.as_str().expect("valid utf8")); +} + +#[test] +fn write_string() { + let data = "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod\ + tempor incididunt ut labore et dolore magna aliqua."; + + let writer_output = EvmDataWriter::new().write(Bytes::from(data)).build(); + + // We can read this "manualy" using simpler functions. + let mut reader = EvmDataReader::new(&writer_output); + + // We pad data to next multiple of 32 bytes. + let mut padded = data.as_bytes().to_vec(); + assert!(data.len() < 0x80); + padded.resize(0x80, 0); + + assert_eq!(reader.read::().expect("read offset"), 32.into()); + assert_eq!(reader.read::().expect("read size"), data.len().into()); + let mut read = |e| reader.read::().expect(e); // shorthand + assert_eq!(read("read part 1"), H256::from_slice(&padded[0x00..0x20])); + assert_eq!(read("read part 2"), H256::from_slice(&padded[0x20..0x40])); + assert_eq!(read("read part 3"), H256::from_slice(&padded[0x40..0x60])); + assert_eq!(read("read part 4"), H256::from_slice(&padded[0x60..0x80])); +} + +#[test] +fn write_vec_bytes() { + let data = b"Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod\ + tempor incididunt ut labore et dolore magna aliqua."; + + let writer_output = EvmDataWriter::new() + .write(vec![Bytes::from(&data[..]), Bytes::from(&data[..])]) + .build(); + + writer_output + .chunks_exact(32) + .map(|chunk| H256::from_slice(chunk)) + .for_each(|hash| println!("{:?}", hash)); + + // We pad data to a multiple of 32 bytes. + let mut padded = data.to_vec(); + assert!(data.len() < 0x80); + padded.resize(0x80, 0); + + let mut reader = EvmDataReader::new(&writer_output); + + // Offset of vec + assert_eq!(reader.read::().expect("read offset"), 32.into()); + + // Length of vec + assert_eq!(reader.read::().expect("read offset"), 2.into()); + + // Relative offset of first bytgmes object + assert_eq!(reader.read::().expect("read offset"), 0x40.into()); + // Relative offset of second bytes object + assert_eq!(reader.read::().expect("read offset"), 0xe0.into()); + + // Length of first bytes object + assert_eq!(reader.read::().expect("read size"), data.len().into()); + + // First byte objects data + let mut read = |e| reader.read::().expect(e); // shorthand + assert_eq!(read("read part 1"), H256::from_slice(&padded[0x00..0x20])); + assert_eq!(read("read part 2"), H256::from_slice(&padded[0x20..0x40])); + assert_eq!(read("read part 3"), H256::from_slice(&padded[0x40..0x60])); + assert_eq!(read("read part 4"), H256::from_slice(&padded[0x60..0x80])); + + // Length of second bytes object + assert_eq!(reader.read::().expect("read size"), data.len().into()); + + // Second byte objects data + let mut read = |e| reader.read::().expect(e); // shorthand + assert_eq!(read("read part 1"), H256::from_slice(&padded[0x00..0x20])); + assert_eq!(read("read part 2"), H256::from_slice(&padded[0x20..0x40])); + assert_eq!(read("read part 3"), H256::from_slice(&padded[0x40..0x60])); + assert_eq!(read("read part 4"), H256::from_slice(&padded[0x60..0x80])); +} + +#[test] +fn read_vec_of_bytes() { + let data = b"Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod\ + tempor incididunt ut labore et dolore magna aliqua."; + + let writer_output = EvmDataWriter::new() + .write(vec![Bytes::from(&data[..]), Bytes::from(&data[..])]) + .build(); + + writer_output + .chunks_exact(32) + .map(|chunk| H256::from_slice(chunk)) + .for_each(|hash| println!("{:?}", hash)); + + let mut reader = EvmDataReader::new(&writer_output); + let parsed: Vec = reader.read().expect("to correctly parse Vec"); + + assert_eq!(vec![Bytes::from(&data[..]), Bytes::from(&data[..])], parsed); +} + +// The following test parses input data generated by web3 from a Solidity contract. +// This is important to test on external data since all the above tests can only test consistency +// between `EvmDataReader` and `EvmDataWriter`. +// +// It also provides an example on how to impl `EvmData` for Solidity structs. +// +// struct MultiLocation { +// uint8 parents; +// bytes [] interior; +// } +// +// function transfer( +// address currency_address, +// uint256 amount, +// MultiLocation memory destination, +// uint64 weight +// ) external; + +#[derive(Clone, Debug, Eq, PartialEq)] +struct MultiLocation { + parents: u8, + interior: Vec, +} + +impl EvmData for MultiLocation { + fn read(reader: &mut EvmDataReader) -> EvmResult { + let (parents, interior) = reader.read()?; + Ok(MultiLocation { parents, interior }) + } + + fn write(writer: &mut EvmDataWriter, value: Self) { + EvmData::write(writer, (value.parents, value.interior)); + } + + fn has_static_size() -> bool { + <(u8, Vec)>::has_static_size() + } +} + +#[crate::generate_function_selector] +#[derive(Debug, PartialEq)] +pub enum Action { + TransferMultiAsset = "transfer_multiasset((uint8,bytes[]),uint256,(uint8,bytes[]),uint64)", +} + +#[test] +fn read_complex_solidity_function() { + // Function call data generated by web3. + let data = hex!( + "b38c60fa + 0000000000000000000000000000000000000000000000000000000000000080 + 0000000000000000000000000000000000000000000000000000000000000064 + 00000000000000000000000000000000000000000000000000000000000001a0 + 0000000000000000000000000000000000000000000000000000000000000064 + 0000000000000000000000000000000000000000000000000000000000000001 + 0000000000000000000000000000000000000000000000000000000000000040 + 0000000000000000000000000000000000000000000000000000000000000002 + 0000000000000000000000000000000000000000000000000000000000000040 + 0000000000000000000000000000000000000000000000000000000000000080 + 0000000000000000000000000000000000000000000000000000000000000005 + 00000003e8000000000000000000000000000000000000000000000000000000 + 0000000000000000000000000000000000000000000000000000000000000002 + 0403000000000000000000000000000000000000000000000000000000000000 + 0000000000000000000000000000000000000000000000000000000000000001 + 0000000000000000000000000000000000000000000000000000000000000040 + 0000000000000000000000000000000000000000000000000000000000000001 + 0000000000000000000000000000000000000000000000000000000000000020 + 0000000000000000000000000000000000000000000000000000000000000022 + 0101010101010101010101010101010101010101010101010101010101010101 + 0100000000000000000000000000000000000000000000000000000000000000" + ); + + let selector = EvmDataReader::read_selector::(&data).expect("to read selector"); + let mut reader = EvmDataReader::new_skip_selector(&data).expect("to read selector"); + + assert_eq!(selector, Action::TransferMultiAsset); + // asset + assert_eq!( + reader.read::().unwrap(), + MultiLocation { + parents: 1, + interior: vec![ + Bytes::from(&hex!("00000003e8")[..]), + Bytes::from(&hex!("0403")[..]), + ], + } + ); + + // amount + assert_eq!(reader.read::().unwrap(), 100u32.into()); + + // destination + assert_eq!( + reader.read::().unwrap(), + MultiLocation { + parents: 1, + interior: vec![Bytes::from( + &hex!("01010101010101010101010101010101010101010101010101010101010101010100")[..] + )], + } + ); + + // weight + assert_eq!(reader.read::().unwrap(), 100u32.into()); +} diff --git a/precompiles/xcm/Cargo.toml b/precompiles/xcm/Cargo.toml new file mode 100644 index 0000000000..b1ee27397e --- /dev/null +++ b/precompiles/xcm/Cargo.toml @@ -0,0 +1,65 @@ +[package] +name = "pallet-evm-precompile-xcm" +description = "Basic XCM support for EVM." +version = "0.9.0" +authors.workspace = true +edition.workspace = true +homepage.workspace = true +repository.workspace = true + +[dependencies] +log = { workspace = true } +num_enum = { workspace = true } +pallet-evm-precompile-assets-erc20 = { workspace = true } +pallet-xcm = { workspace = true } +precompile-utils = { workspace = true } + +# Substrate +frame-support = { workspace = true } +frame-system = { workspace = true } +pallet-assets = { workspace = true } +parity-scale-codec = { workspace = true, features = ["max-encoded-len"] } +sp-core = { workspace = true } +sp-io = { workspace = true } +sp-std = { workspace = true } + +# Frontier +fp-evm = { workspace = true } +pallet-evm = { workspace = true } + +# Polkadot +xcm = { workspace = true } +xcm-executor = { workspace = true } + +[dev-dependencies] +derive_more = { workspace = true } +hex-literal = { workspace = true } +scale-info = { workspace = true } +serde = { workspace = true } + +precompile-utils = { workspace = true, features = ["testing"] } + +pallet-balances = { workspace = true } +pallet-timestamp = { workspace = true } +sp-runtime = { workspace = true } +xcm-builder = { workspace = true, features = ["std"] } + +[features] +default = ["std"] +std = [ + "parity-scale-codec/std", + "fp-evm/std", + "frame-support/std", + "frame-system/std", + "pallet-assets/std", + "pallet-evm/std", + "pallet-xcm/std", + "pallet-evm-precompile-assets-erc20/std", + "precompile-utils/std", + "sp-core/std", + "sp-std/std", + "sp-io/std", + "xcm/std", + "xcm-executor/std", +] +runtime-benchmarks = [] diff --git a/precompiles/xcm/XCM.sol b/precompiles/xcm/XCM.sol new file mode 100644 index 0000000000..19af7ae9b7 --- /dev/null +++ b/precompiles/xcm/XCM.sol @@ -0,0 +1,117 @@ +pragma solidity ^0.8.0; + +/** + * @title XCM interface. + */ +interface XCM { + /** + * @dev Withdraw assets using PalletXCM call. + * @param asset_id - list of XC20 asset addresses + * @param asset_amount - list of transfer amounts (must match with asset addresses above) + * @param recipient_account_id - SS58 public key of the destination account + * @param is_relay - set `true` for using relay chain as reserve + * @param parachain_id - set parachain id of reserve parachain (when is_relay set to false) + * @param fee_index - index of asset_id item that should be used as a XCM fee + * @return bool confirmation whether the XCM message sent. + * + * How method check that assets list is valid: + * - all assets resolved to multi-location (on runtime level) + * - all assets has corresponded amount (lenght of assets list matched to amount list) + */ + function assets_withdraw( + address[] calldata asset_id, + uint256[] calldata asset_amount, + bytes32 recipient_account_id, + bool is_relay, + uint256 parachain_id, + uint256 fee_index + ) external returns (bool); + + /** + * @dev Withdraw assets using PalletXCM call. + * @param asset_id - list of XC20 asset addresses + * @param asset_amount - list of transfer amounts (must match with asset addresses above) + * @param recipient_account_id - ETH address of the destination account + * @param is_relay - set `true` for using relay chain as reserve + * @param parachain_id - set parachain id of reserve parachain (when is_relay set to false) + * @param fee_index - index of asset_id item that should be used as a XCM fee + * @return bool confirmation whether the XCM message sent. + * + * How method check that assets list is valid: + * - all assets resolved to multi-location (on runtime level) + * - all assets has corresponded amount (lenght of assets list matched to amount list) + */ + function assets_withdraw( + address[] calldata asset_id, + uint256[] calldata asset_amount, + address recipient_account_id, + bool is_relay, + uint256 parachain_id, + uint256 fee_index + ) external returns (bool); + + /** + * @dev Execute a transaction on a remote chain. + * @param parachain_id - destination parachain Id (ignored if is_relay is true) + * @param is_relay - if true, destination is relay_chain, if false it is parachain (see previous argument) + * @param payment_asset_id - ETH address of the local asset derivate used to pay for execution in the destination chain + * @param payment_amount - amount of payment asset to use for execution payment - should cover cost of XCM instructions + Transact call weight. + * @param call - encoded call data (must be decodable by remote chain) + * @param transact_weight - max weight that the encoded call is allowed to consume in the destination chain + * @return bool confirmation whether the XCM message sent. + */ + function remote_transact( + uint256 parachain_id, + bool is_relay, + address payment_asset_id, + uint256 payment_amount, + bytes calldata call, + uint64 transact_weight + ) external returns (bool); + + /** + * @dev Reserve transfer assets using PalletXCM call. + * @param asset_id - list of XC20 asset addresses + * @param asset_amount - list of transfer amounts (must match with asset addresses above) + * @param recipient_account_id - SS58 public key of the destination account + * @param is_relay - set `true` for using relay chain as destination + * @param parachain_id - set parachain id of destination parachain (when is_relay set to false) + * @param fee_index - index of asset_id item that should be used as a XCM fee + * @return A boolean confirming whether the XCM message sent. + * + * How method check that assets list is valid: + * - all assets resolved to multi-location (on runtime level) + * - all assets has corresponded amount (lenght of assets list matched to amount list) + */ + function assets_reserve_transfer( + address[] calldata asset_id, + uint256[] calldata asset_amount, + bytes32 recipient_account_id, + bool is_relay, + uint256 parachain_id, + uint256 fee_index + ) external returns (bool); + + /** + * @dev Reserve transfer using PalletXCM call. + * @param asset_id - list of XC20 asset addresses + * @param asset_amount - list of transfer amounts (must match with asset addresses above) + * @param recipient_account_id - ETH address of the destination account + * @param is_relay - set `true` for using relay chain as destination + * @param parachain_id - set parachain id of destination parachain (when is_relay set to false) + * @param fee_index - index of asset_id item that should be used as a XCM fee + * @return A boolean confirming whether the XCM message sent. + * + * How method check that assets list is valid: + * - all assets resolved to multi-location (on runtime level) + * - all assets has corresponded amount (lenght of assets list matched to amount list) + */ + function assets_reserve_transfer( + address[] calldata asset_id, + uint256[] calldata asset_amount, + address recipient_account_id, + bool is_relay, + uint256 parachain_id, + uint256 fee_index + ) external returns (bool); +} diff --git a/precompiles/xcm/src/lib.rs b/precompiles/xcm/src/lib.rs new file mode 100644 index 0000000000..985efcdf95 --- /dev/null +++ b/precompiles/xcm/src/lib.rs @@ -0,0 +1,382 @@ +// This file is part of Astar. + +// Copyright (C) 2019-2023 Stake Technologies Pte.Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later + +// Astar is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Astar is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Astar. If not, see . + +#![cfg_attr(not(feature = "std"), no_std)] +#![cfg_attr(test, feature(assert_matches))] + +use fp_evm::{PrecompileHandle, PrecompileOutput}; +use frame_support::{ + dispatch::{Dispatchable, GetDispatchInfo, PostDispatchInfo}, + pallet_prelude::Weight, + traits::Get, +}; +use pallet_evm::{AddressMapping, Precompile}; +use sp_core::{H160, H256, U256}; +use sp_std::marker::PhantomData; +use sp_std::prelude::*; + +use xcm::latest::prelude::*; +use xcm_executor::traits::Convert; + +use pallet_evm_precompile_assets_erc20::AddressToAssetId; +use precompile_utils::{ + revert, succeed, Address, Bytes, EvmDataWriter, EvmResult, FunctionModifier, + PrecompileHandleExt, RuntimeHelper, +}; + +#[cfg(test)] +mod mock; +#[cfg(test)] +mod tests; + +#[precompile_utils::generate_function_selector] +#[derive(Debug, PartialEq)] +pub enum Action { + AssetsWithdrawNative = "assets_withdraw(address[],uint256[],bytes32,bool,uint256,uint256)", + AssetsWithdrawEvm = "assets_withdraw(address[],uint256[],address,bool,uint256,uint256)", + RemoteTransact = "remote_transact(uint256,bool,address,uint256,bytes,uint64)", + AssetsReserveTransferNative = + "assets_reserve_transfer(address[],uint256[],bytes32,bool,uint256,uint256)", + AssetsReserveTransferEvm = + "assets_reserve_transfer(address[],uint256[],address,bool,uint256,uint256)", +} + +/// Dummy H160 address representing native currency (e.g. ASTR or SDN) +const NATIVE_ADDRESS: H160 = H160::zero(); + +/// A precompile that expose XCM related functions. +pub struct XcmPrecompile(PhantomData<(T, C)>); + +impl Precompile for XcmPrecompile +where + R: pallet_evm::Config + + pallet_xcm::Config + + pallet_assets::Config + + AddressToAssetId<::AssetId>, + <::RuntimeCall as Dispatchable>::RuntimeOrigin: + From>, + ::RuntimeCall: + From> + Dispatchable + GetDispatchInfo, + C: Convert::AssetId>, +{ + fn execute(handle: &mut impl PrecompileHandle) -> EvmResult { + log::trace!(target: "xcm-precompile", "In XCM precompile"); + + let selector = handle.read_selector()?; + + handle.check_function_modifier(FunctionModifier::NonPayable)?; + + // Dispatch the call + match selector { + Action::AssetsWithdrawNative => { + Self::assets_withdraw(handle, BeneficiaryType::Account32) + } + Action::AssetsWithdrawEvm => Self::assets_withdraw(handle, BeneficiaryType::Account20), + Action::RemoteTransact => Self::remote_transact(handle), + Action::AssetsReserveTransferNative => { + Self::assets_reserve_transfer(handle, BeneficiaryType::Account32) + } + Action::AssetsReserveTransferEvm => { + Self::assets_reserve_transfer(handle, BeneficiaryType::Account20) + } + } + } +} + +/// The supported beneficiary account types +enum BeneficiaryType { + /// 256 bit (32 byte) public key + Account32, + /// 160 bit (20 byte) address is expected + Account20, +} + +impl XcmPrecompile +where + R: pallet_evm::Config + + pallet_xcm::Config + + pallet_assets::Config + + AddressToAssetId<::AssetId>, + <::RuntimeCall as Dispatchable>::RuntimeOrigin: + From>, + ::RuntimeCall: + From> + Dispatchable + GetDispatchInfo, + C: Convert::AssetId>, +{ + fn assets_withdraw( + handle: &mut impl PrecompileHandle, + beneficiary_type: BeneficiaryType, + ) -> EvmResult { + let mut input = handle.read_input()?; + input.expect_arguments(6)?; + + // Read arguments and check it + let assets: Vec = input + .read::>()? + .iter() + .cloned() + .filter_map(|address| { + R::address_to_asset_id(address.into()).and_then(|x| C::reverse_ref(x).ok()) + }) + .collect(); + let amounts_raw = input.read::>()?; + if amounts_raw.iter().any(|x| *x > u128::MAX.into()) { + return Err(revert("Asset amount is too big")); + } + let amounts: Vec = amounts_raw.iter().map(|x| x.low_u128()).collect(); + + // Check that assets list is valid: + // * all assets resolved to multi-location + // * all assets has corresponded amount + if assets.len() != amounts.len() || assets.is_empty() { + return Err(revert("Assets resolution failure.")); + } + + let beneficiary: MultiLocation = match beneficiary_type { + BeneficiaryType::Account32 => { + let recipient: [u8; 32] = input.read::()?.into(); + X1(Junction::AccountId32 { + network: None, + id: recipient, + }) + } + BeneficiaryType::Account20 => { + let recipient: H160 = input.read::
()?.into(); + X1(Junction::AccountKey20 { + network: None, + key: recipient.to_fixed_bytes(), + }) + } + } + .into(); + + let is_relay = input.read::()?; + let parachain_id: u32 = input.read::()?.low_u32(); + let fee_asset_item: u32 = input.read::()?.low_u32(); + + if fee_asset_item as usize > assets.len() { + return Err(revert("Bad fee index.")); + } + + // Prepare pallet-xcm call arguments + let dest = if is_relay { + MultiLocation::parent() + } else { + X1(Junction::Parachain(parachain_id)).into_exterior(1) + }; + + let assets: MultiAssets = assets + .iter() + .cloned() + .zip(amounts.iter().cloned()) + .map(Into::into) + .collect::>() + .into(); + + // Build call with origin. + let origin = Some(R::AddressMapping::into_account_id(handle.context().caller)).into(); + let call = pallet_xcm::Call::::reserve_withdraw_assets { + dest: Box::new(dest.into()), + beneficiary: Box::new(beneficiary.into()), + assets: Box::new(assets.into()), + fee_asset_item, + }; + + // Dispatch a call. + RuntimeHelper::::try_dispatch(handle, origin, call)?; + + Ok(succeed(EvmDataWriter::new().write(true).build())) + } + + fn remote_transact(handle: &mut impl PrecompileHandle) -> EvmResult { + let mut input = handle.read_input()?; + input.expect_arguments(6)?; + + // Raw call arguments + let para_id: u32 = input.read::()?.low_u32(); + let is_relay = input.read::()?; + + let fee_asset_addr = input.read::
()?; + let fee_amount = input.read::()?; + + let remote_call: Vec = input.read::()?.into(); + let transact_weight = input.read::()?; + + log::trace!(target: "xcm-precompile:remote_transact", "Raw arguments: para_id: {}, is_relay: {}, fee_asset_addr: {:?}, \ + fee_amount: {:?}, remote_call: {:?}, transact_weight: {}", + para_id, is_relay, fee_asset_addr, fee_amount, remote_call, transact_weight); + + // Process arguments + let dest = if is_relay { + MultiLocation::parent() + } else { + X1(Junction::Parachain(para_id)).into_exterior(1) + }; + + let fee_asset = { + let address: H160 = fee_asset_addr.into(); + + // Special case where zero address maps to native token by convention. + if address == NATIVE_ADDRESS { + Here.into() + } else { + let fee_asset_id = R::address_to_asset_id(address) + .ok_or(revert("Failed to resolve fee asset id from address"))?; + C::reverse_ref(fee_asset_id).map_err(|_| { + revert("Failed to resolve fee asset multilocation from local id") + })? + } + }; + + if fee_amount > u128::MAX.into() { + return Err(revert("Fee amount is too big")); + } + let fee_amount = fee_amount.low_u128(); + + let context = R::UniversalLocation::get(); + let fee_multilocation = MultiAsset { + id: Concrete(fee_asset), + fun: Fungible(fee_amount), + }; + let fee_multilocation = fee_multilocation + .reanchored(&dest, context) + .map_err(|_| revert("Failed to reanchor fee asset"))?; + + // Prepare XCM + let xcm = Xcm(vec![ + WithdrawAsset(fee_multilocation.clone().into()), + BuyExecution { + fees: fee_multilocation.clone().into(), + weight_limit: WeightLimit::Unlimited, + }, + Transact { + origin_kind: OriginKind::SovereignAccount, + require_weight_at_most: Weight::from_ref_time(transact_weight), + call: remote_call.into(), + }, + ]); + + log::trace!(target: "xcm-precompile:remote_transact", "Processed arguments: dest: {:?}, fee asset: {:?}, XCM: {:?}", dest, fee_multilocation, xcm); + + // Build call with origin. + let origin = Some(R::AddressMapping::into_account_id(handle.context().caller)).into(); + let call = pallet_xcm::Call::::send { + dest: Box::new(dest.into()), + message: Box::new(xcm::VersionedXcm::V3(xcm)), + }; + + // Dispatch a call. + RuntimeHelper::::try_dispatch(handle, origin, call)?; + + Ok(succeed(EvmDataWriter::new().write(true).build())) + } + + fn assets_reserve_transfer( + handle: &mut impl PrecompileHandle, + beneficiary_type: BeneficiaryType, + ) -> EvmResult { + let mut input = handle.read_input()?; + input.expect_arguments(6)?; + + // Read arguments and check it + let assets: Vec = input + .read::>()? + .iter() + .cloned() + .filter_map(|address| { + let address: H160 = address.into(); + + // Special case where zero address maps to native token by convention. + if address == NATIVE_ADDRESS { + Some(Here.into()) + } else { + R::address_to_asset_id(address).and_then(|x| C::reverse_ref(x).ok()) + } + }) + .collect(); + let amounts_raw = input.read::>()?; + if amounts_raw.iter().any(|x| *x > u128::MAX.into()) { + return Err(revert("Asset amount is too big")); + } + let amounts: Vec = amounts_raw.iter().map(|x| x.low_u128()).collect(); + + log::trace!(target: "xcm-precompile:assets_reserve_transfer", "Processed arguments: assets {:?}, amounts: {:?}", assets, amounts); + + // Check that assets list is valid: + // * all assets resolved to multi-location + // * all assets has corresponded amount + if assets.len() != amounts.len() || assets.is_empty() { + return Err(revert("Assets resolution failure.")); + } + + let beneficiary: MultiLocation = match beneficiary_type { + BeneficiaryType::Account32 => { + let recipient: [u8; 32] = input.read::()?.into(); + X1(Junction::AccountId32 { + network: None, + id: recipient, + }) + } + BeneficiaryType::Account20 => { + let recipient: H160 = input.read::
()?.into(); + X1(Junction::AccountKey20 { + network: None, + key: recipient.to_fixed_bytes(), + }) + } + } + .into(); + + let is_relay = input.read::()?; + let parachain_id: u32 = input.read::()?.low_u32(); + let fee_asset_item: u32 = input.read::()?.low_u32(); + + if fee_asset_item as usize > assets.len() { + return Err(revert("Bad fee index.")); + } + + // Prepare pallet-xcm call arguments + let dest = if is_relay { + MultiLocation::parent() + } else { + X1(Junction::Parachain(parachain_id)).into_exterior(1) + }; + + let assets: MultiAssets = assets + .iter() + .cloned() + .zip(amounts.iter().cloned()) + .map(Into::into) + .collect::>() + .into(); + + // Build call with origin. + let origin = Some(R::AddressMapping::into_account_id(handle.context().caller)).into(); + let call = pallet_xcm::Call::::reserve_transfer_assets { + dest: Box::new(dest.into()), + beneficiary: Box::new(beneficiary.into()), + assets: Box::new(assets.into()), + fee_asset_item, + }; + + // Dispatch a call. + RuntimeHelper::::try_dispatch(handle, origin, call)?; + + Ok(succeed(EvmDataWriter::new().write(true).build())) + } +} diff --git a/precompiles/xcm/src/mock.rs b/precompiles/xcm/src/mock.rs new file mode 100644 index 0000000000..839484abf3 --- /dev/null +++ b/precompiles/xcm/src/mock.rs @@ -0,0 +1,480 @@ +// This file is part of Astar. + +// Copyright (C) 2019-2023 Stake Technologies Pte.Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later + +// Astar is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Astar is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Astar. If not, see . + +//! Testing utilities. + +use super::*; + +use frame_support::{ + construct_runtime, parameter_types, + traits::{AsEnsureOriginWithArg, Everything, Nothing}, + weights::Weight, +}; +use parity_scale_codec::{Decode, Encode, MaxEncodedLen}; +use scale_info::TypeInfo; +use serde::{Deserialize, Serialize}; + +use pallet_evm::{ + AddressMapping, EnsureAddressNever, EnsureAddressRoot, PrecompileResult, PrecompileSet, +}; +use pallet_evm_precompile_assets_erc20::AddressToAssetId; +use sp_core::{ConstU32, H160, H256}; +use sp_runtime::{ + testing::Header, + traits::{BlakeTwo256, IdentityLookup}, +}; +use sp_std::{borrow::Borrow, cell::RefCell}; + +use xcm::prelude::XcmVersion; +use xcm_builder::{ + test_utils::TransactAsset, AllowKnownQueryResponses, AllowSubscriptionsFrom, + AllowTopLevelPaidExecutionFrom, FixedWeightBounds, SignedToAccountId32, TakeWeightCredit, +}; +use xcm_executor::XcmExecutor; + +pub type AccountId = TestAccount; +pub type AssetId = u128; +pub type Balance = u128; +pub type BlockNumber = u64; +pub type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; +pub type Block = frame_system::mocking::MockBlock; + +pub const PRECOMPILE_ADDRESS: H160 = H160::repeat_byte(0x7B); +pub const ASSET_PRECOMPILE_ADDRESS_PREFIX: &[u8] = &[255u8; 4]; + +#[derive( + Eq, + PartialEq, + Ord, + PartialOrd, + Clone, + Encode, + Decode, + Debug, + MaxEncodedLen, + Serialize, + Deserialize, + derive_more::Display, + TypeInfo, +)] +pub enum TestAccount { + Alice, + Bob, + Charlie, + Bogus, + Precompile, +} + +impl Default for TestAccount { + fn default() -> Self { + Self::Alice + } +} + +impl AddressMapping for TestAccount { + fn into_account_id(h160_account: H160) -> TestAccount { + match h160_account { + a if a == H160::repeat_byte(0xAA) => Self::Alice, + a if a == H160::repeat_byte(0xBB) => Self::Bob, + a if a == H160::repeat_byte(0xCC) => Self::Charlie, + a if a == PRECOMPILE_ADDRESS => Self::Precompile, + _ => Self::Bogus, + } + } +} + +impl From for TestAccount { + fn from(x: H160) -> TestAccount { + TestAccount::into_account_id(x) + } +} + +impl From for H160 { + fn from(value: TestAccount) -> H160 { + match value { + TestAccount::Alice => H160::repeat_byte(0xAA), + TestAccount::Bob => H160::repeat_byte(0xBB), + TestAccount::Charlie => H160::repeat_byte(0xCC), + TestAccount::Precompile => PRECOMPILE_ADDRESS, + TestAccount::Bogus => Default::default(), + } + } +} + +impl From for [u8; 32] { + fn from(value: TestAccount) -> [u8; 32] { + match value { + TestAccount::Alice => [0xAA; 32], + TestAccount::Bob => [0xBB; 32], + TestAccount::Charlie => [0xCC; 32], + _ => Default::default(), + } + } +} + +impl AddressToAssetId for Runtime { + fn address_to_asset_id(address: H160) -> Option { + let mut data = [0u8; 16]; + let address_bytes: [u8; 20] = address.into(); + if ASSET_PRECOMPILE_ADDRESS_PREFIX.eq(&address_bytes[0..4]) { + data.copy_from_slice(&address_bytes[4..20]); + Some(u128::from_be_bytes(data)) + } else { + None + } + } + + fn asset_id_to_address(asset_id: AssetId) -> H160 { + let mut data = [0u8; 20]; + data[0..4].copy_from_slice(ASSET_PRECOMPILE_ADDRESS_PREFIX); + data[4..20].copy_from_slice(&asset_id.to_be_bytes()); + H160::from(data) + } +} + +parameter_types! { + pub const BlockHashCount: u64 = 250; + pub const SS58Prefix: u8 = 42; +} + +impl frame_system::Config for Runtime { + type BaseCallFilter = Everything; + type DbWeight = (); + type RuntimeOrigin = RuntimeOrigin; + type Index = u64; + type BlockNumber = BlockNumber; + type RuntimeCall = RuntimeCall; + type Hash = H256; + type Hashing = BlakeTwo256; + type AccountId = AccountId; + type Lookup = IdentityLookup; + type Header = Header; + type RuntimeEvent = RuntimeEvent; + type BlockHashCount = BlockHashCount; + type Version = (); + type PalletInfo = PalletInfo; + type AccountData = pallet_balances::AccountData; + type OnNewAccount = (); + type OnKilledAccount = (); + type SystemWeightInfo = (); + type BlockWeights = (); + type BlockLength = (); + type SS58Prefix = SS58Prefix; + type OnSetCode = (); + type MaxConsumers = frame_support::traits::ConstU32<16>; +} + +#[derive(Debug, Clone, Copy)] +pub struct TestPrecompileSet(PhantomData); + +impl PrecompileSet for TestPrecompileSet +where + R: pallet_evm::Config + + pallet_xcm::Config + + pallet_assets::Config + + AddressToAssetId<::AssetId>, + XcmPrecompile>: Precompile, +{ + fn execute(&self, handle: &mut impl PrecompileHandle) -> Option { + match handle.code_address() { + a if a == PRECOMPILE_ADDRESS => Some( + XcmPrecompile::>::execute(handle), + ), + _ => None, + } + } + + fn is_precompile(&self, address: H160) -> bool { + address == PRECOMPILE_ADDRESS + } +} + +parameter_types! { + pub const MinimumPeriod: u64 = 5; +} + +impl pallet_timestamp::Config for Runtime { + type Moment = u64; + type OnTimestampSet = (); + type MinimumPeriod = MinimumPeriod; + type WeightInfo = (); +} + +parameter_types! { + pub const ExistentialDeposit: u128 = 0; +} + +impl pallet_balances::Config for Runtime { + type MaxReserves = (); + type ReserveIdentifier = (); + type MaxLocks = (); + type Balance = Balance; + type RuntimeEvent = RuntimeEvent; + type DustRemoval = (); + type ExistentialDeposit = ExistentialDeposit; + type AccountStore = System; + type WeightInfo = (); +} + +// These parameters dont matter much as this will only be called by root with the forced arguments +// No deposit is substracted with those methods +parameter_types! { + pub const AssetDeposit: Balance = 0; + pub const AssetAccountDeposit: Balance = 0; + pub const ApprovalDeposit: Balance = 0; + pub const AssetsStringLimit: u32 = 50; + pub const MetadataDepositBase: Balance = 0; + pub const MetadataDepositPerByte: Balance = 0; +} + +impl pallet_assets::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type Balance = Balance; + type AssetId = AssetId; + type Currency = Balances; + type ForceOrigin = frame_system::EnsureRoot; + type AssetDeposit = AssetDeposit; + type AssetAccountDeposit = AssetAccountDeposit; + type MetadataDepositBase = MetadataDepositBase; + type MetadataDepositPerByte = MetadataDepositPerByte; + type ApprovalDeposit = ApprovalDeposit; + type StringLimit = AssetsStringLimit; + type Freezer = (); + type Extra = (); + type CreateOrigin = AsEnsureOriginWithArg>; + type WeightInfo = pallet_assets::weights::SubstrateWeight; + type RemoveItemsLimit = ConstU32<0>; + type AssetIdParameter = AssetId; + type CallbackHandle = (); +} + +pub struct AssetIdConverter(PhantomData); +impl xcm_executor::traits::Convert for AssetIdConverter +where + AssetId: Clone + Eq + From, +{ + fn convert_ref(id: impl Borrow) -> Result { + if id.borrow().eq(&MultiLocation::parent()) { + Ok(AssetId::from(1u8)) + } else { + Err(()) + } + } + fn reverse_ref(what: impl Borrow) -> Result { + if what.borrow().eq(&AssetId::from(1u8)) { + Ok(MultiLocation::parent()) + } else { + Err(()) + } + } +} + +parameter_types! { + pub const PrecompilesValue: TestPrecompileSet = + TestPrecompileSet(PhantomData); + pub WeightPerGas: Weight = Weight::from_ref_time(1); +} + +impl pallet_evm::Config for Runtime { + type FeeCalculator = (); + type GasWeightMapping = pallet_evm::FixedGasWeightMapping; + type WeightPerGas = WeightPerGas; + type CallOrigin = EnsureAddressRoot; + type WithdrawOrigin = EnsureAddressNever; + type AddressMapping = AccountId; + type Currency = Balances; + type RuntimeEvent = RuntimeEvent; + type Runner = pallet_evm::runner::stack::Runner; + type PrecompilesType = TestPrecompileSet; + type PrecompilesValue = PrecompilesValue; + type ChainId = (); + type OnChargeTransaction = (); + type BlockGasLimit = (); + type BlockHashMapping = pallet_evm::SubstrateBlockHashMapping; + type FindAuthor = (); + type OnCreate = (); + type WeightInfo = (); +} + +parameter_types! { + pub const RelayLocation: MultiLocation = Here.into_location(); + pub const AnyNetwork: Option = None; + pub UniversalLocation: InteriorMultiLocation = Here; + pub Ancestry: MultiLocation = Here.into(); + pub UnitWeightCost: u64 = 1_000; + pub const MaxAssetsIntoHolding: u32 = 64; +} + +parameter_types! { + pub const BaseXcmWeight: u64 = 1_000; + pub const MaxInstructions: u32 = 100; +} + +pub type Barrier = ( + TakeWeightCredit, + AllowTopLevelPaidExecutionFrom, + AllowKnownQueryResponses, + AllowSubscriptionsFrom, +); + +pub struct LocalAssetTransactor; +impl TransactAsset for LocalAssetTransactor { + fn deposit_asset(_what: &MultiAsset, _who: &MultiLocation, _context: &XcmContext) -> XcmResult { + Ok(()) + } + + fn withdraw_asset( + _what: &MultiAsset, + _who: &MultiLocation, + _maybe_context: Option<&XcmContext>, + ) -> Result { + Ok(MultiAssets::new().into()) + } +} + +pub struct XcmConfig; +impl xcm_executor::Config for XcmConfig { + type RuntimeCall = RuntimeCall; + type XcmSender = StoringRouter; + type AssetTransactor = LocalAssetTransactor; + type OriginConverter = (); + type IsReserve = (); + type IsTeleporter = (); + type UniversalLocation = UniversalLocation; + type Barrier = Barrier; + type Weigher = FixedWeightBounds; + type Trader = (); + type ResponseHandler = XcmPallet; + type AssetTrap = XcmPallet; + type AssetLocker = (); + type AssetExchanger = (); + type AssetClaims = XcmPallet; + type SubscriptionService = XcmPallet; + type PalletInstancesInfo = AllPalletsWithSystem; + type MaxAssetsIntoHolding = MaxAssetsIntoHolding; + type FeeManager = (); + type MessageExporter = (); + type UniversalAliases = Nothing; + type CallDispatcher = RuntimeCall; + type SafeCallFilter = Everything; +} + +parameter_types! { + pub static AdvertisedXcmVersion: XcmVersion = 3; +} + +pub type LocalOriginToLocation = SignedToAccountId32; + +thread_local! { + pub static SENT_XCM: RefCell)>> = RefCell::new(Vec::new()); +} + +pub(crate) fn _sent_xcm() -> Vec<(MultiLocation, Xcm<()>)> { + SENT_XCM.with(|q| (*q.borrow()).clone()) +} + +pub(crate) fn take_sent_xcm() -> Vec<(MultiLocation, Xcm<()>)> { + SENT_XCM.with(|q| { + let mut r = Vec::new(); + std::mem::swap(&mut r, &mut *q.borrow_mut()); + r + }) +} + +pub struct StoringRouter; +impl SendXcm for StoringRouter { + type Ticket = (MultiLocation, Xcm<()>); + + fn validate( + destination: &mut Option, + message: &mut Option>, + ) -> SendResult<(MultiLocation, Xcm<()>)> { + Ok(( + (destination.take().unwrap(), message.take().unwrap()), + MultiAssets::new().into(), + )) + } + + fn deliver(pair: Self::Ticket) -> Result { + let (dest, msg) = (pair.0, pair.1); + SENT_XCM.with(|q| q.borrow_mut().push((dest.into(), msg))); + Ok(XcmHash::default()) + } +} + +#[cfg(feature = "runtime-benchmarks")] +parameter_types! { + pub ReachableDest: Option = Some(Parachain(1000).into()); +} + +impl pallet_xcm::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type SendXcmOrigin = xcm_builder::EnsureXcmOrigin; + type XcmRouter = StoringRouter; + type ExecuteXcmOrigin = xcm_builder::EnsureXcmOrigin; + type XcmExecuteFilter = Everything; + type XcmExecutor = XcmExecutor; + type XcmTeleportFilter = Everything; + type XcmReserveTransferFilter = Everything; + type Weigher = FixedWeightBounds; + type UniversalLocation = UniversalLocation; + type RuntimeOrigin = RuntimeOrigin; + type RuntimeCall = RuntimeCall; + const VERSION_DISCOVERY_QUEUE_SIZE: u32 = 100; + + type AdvertisedXcmVersion = AdvertisedXcmVersion; + type TrustedLockers = (); + type SovereignAccountOf = (); + type Currency = Balances; + type CurrencyMatcher = (); + type MaxLockers = frame_support::traits::ConstU32<8>; + type WeightInfo = pallet_xcm::TestWeightInfo; + #[cfg(feature = "runtime-benchmarks")] + type ReachableDest = ReachableDest; +} + +// Configure a mock runtime to test the pallet. +construct_runtime!( + pub enum Runtime where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: frame_system, + Balances: pallet_balances, + Assets: pallet_assets, + Evm: pallet_evm, + Timestamp: pallet_timestamp, + XcmPallet: pallet_xcm, + } +); + +#[derive(Default)] +pub(crate) struct ExtBuilder; + +impl ExtBuilder { + pub(crate) fn build(self) -> sp_io::TestExternalities { + let t = frame_system::GenesisConfig::default() + .build_storage::() + .expect("Frame system builds valid default genesis config"); + + let mut ext = sp_io::TestExternalities::new(t); + ext.execute_with(|| System::set_block_number(1)); + ext + } +} diff --git a/precompiles/xcm/src/tests.rs b/precompiles/xcm/src/tests.rs new file mode 100644 index 0000000000..8604dc4417 --- /dev/null +++ b/precompiles/xcm/src/tests.rs @@ -0,0 +1,282 @@ +// This file is part of Astar. + +// Copyright (C) 2019-2023 Stake Technologies Pte.Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later + +// Astar is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Astar is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Astar. If not, see . + +use core::assert_matches::assert_matches; + +use crate::mock::*; +use crate::*; + +use precompile_utils::testing::*; +use precompile_utils::EvmDataWriter; +use sp_core::H160; + +fn precompiles() -> TestPrecompileSet { + PrecompilesValue::get() +} + +#[test] +fn wrong_assets_len_or_fee_index_reverts() { + ExtBuilder::default().build().execute_with(|| { + precompiles() + .prepare_test( + TestAccount::Alice, + PRECOMPILE_ADDRESS, + EvmDataWriter::new_with_selector(Action::AssetsWithdrawNative) + .write(vec![Address::from(H160::repeat_byte(0xF1))]) + .write(Vec::::new()) + .write(H256::repeat_byte(0xF1)) + .write(true) + .write(U256::from(0_u64)) + .write(U256::from(0_u64)) + .build(), + ) + .expect_no_logs() + .execute_reverts(|output| output == b"Assets resolution failure."); + + precompiles() + .prepare_test( + TestAccount::Alice, + PRECOMPILE_ADDRESS, + EvmDataWriter::new_with_selector(Action::AssetsWithdrawNative) + .write(vec![Address::from(Runtime::asset_id_to_address(1u128))]) + .write(vec![U256::from(42000u64)]) + .write(H256::repeat_byte(0xF1)) + .write(true) + .write(U256::from(0_u64)) + .write(U256::from(2_u64)) + .build(), + ) + .expect_no_logs() + .execute_reverts(|output| output == b"Bad fee index."); + }); +} + +#[test] +fn assets_withdraw_works() { + ExtBuilder::default().build().execute_with(|| { + // SS58 + precompiles() + .prepare_test( + TestAccount::Alice, + PRECOMPILE_ADDRESS, + EvmDataWriter::new_with_selector(Action::AssetsWithdrawNative) + .write(vec![Address::from(Runtime::asset_id_to_address(1u128))]) + .write(vec![U256::from(42000u64)]) + .write(H256::repeat_byte(0xF1)) + .write(true) + .write(U256::from(0_u64)) + .write(U256::from(0_u64)) + .build(), + ) + .expect_no_logs() + .execute_returns(EvmDataWriter::new().write(true).build()); + + // H160 + precompiles() + .prepare_test( + TestAccount::Alice, + PRECOMPILE_ADDRESS, + EvmDataWriter::new_with_selector(Action::AssetsWithdrawEvm) + .write(vec![Address::from(Runtime::asset_id_to_address(1u128))]) + .write(vec![U256::from(42000u64)]) + .write(Address::from(H160::repeat_byte(0xDE))) + .write(true) + .write(U256::from(0_u64)) + .write(U256::from(0_u64)) + .build(), + ) + .expect_no_logs() + .execute_returns(EvmDataWriter::new().write(true).build()); + }); +} + +#[test] +fn remote_transact_works() { + ExtBuilder::default().build().execute_with(|| { + // SS58 + precompiles() + .prepare_test( + TestAccount::Alice, + PRECOMPILE_ADDRESS, + EvmDataWriter::new_with_selector(Action::RemoteTransact) + .write(U256::from(0_u64)) + .write(true) + .write(Address::from(Runtime::asset_id_to_address(1_u128))) + .write(U256::from(367)) + .write(vec![0xff_u8, 0xaa, 0x77, 0x00]) + .write(U256::from(3_000_000_000u64)) + .build(), + ) + .expect_no_logs() + .execute_returns(EvmDataWriter::new().write(true).build()); + }); +} + +#[test] +fn reserve_transfer_assets_works() { + ExtBuilder::default().build().execute_with(|| { + // SS58 + precompiles() + .prepare_test( + TestAccount::Alice, + PRECOMPILE_ADDRESS, + EvmDataWriter::new_with_selector(Action::AssetsReserveTransferNative) + .write(vec![Address::from(Runtime::asset_id_to_address(1u128))]) + .write(vec![U256::from(42000u64)]) + .write(H256::repeat_byte(0xF1)) + .write(true) + .write(U256::from(0_u64)) + .write(U256::from(0_u64)) + .build(), + ) + .expect_no_logs() + .execute_returns(EvmDataWriter::new().write(true).build()); + + // H160 + precompiles() + .prepare_test( + TestAccount::Alice, + PRECOMPILE_ADDRESS, + EvmDataWriter::new_with_selector(Action::AssetsReserveTransferEvm) + .write(vec![Address::from(Runtime::asset_id_to_address(1u128))]) + .write(vec![U256::from(42000u64)]) + .write(Address::from(H160::repeat_byte(0xDE))) + .write(true) + .write(U256::from(0_u64)) + .write(U256::from(0_u64)) + .build(), + ) + .expect_no_logs() + .execute_returns(EvmDataWriter::new().write(true).build()); + }); + + for (location, Xcm(instructions)) in take_sent_xcm() { + assert_eq!( + location, + MultiLocation { + parents: 1, + interior: Here + } + ); + + let non_native_asset = MultiAsset { + fun: Fungible(42000), + id: xcm::v3::AssetId::from(MultiLocation { + parents: 0, + interior: Here, + }), + }; + + assert_matches!( + instructions.as_slice(), + [ + ReserveAssetDeposited(assets), + ClearOrigin, + BuyExecution { + fees, + .. + }, + DepositAsset { + beneficiary: MultiLocation { + parents: 0, + interior: X1(_), + }, + .. + } + ] + + if fees.contains(&non_native_asset) && assets.contains(&non_native_asset) + ); + } +} + +#[test] +fn reserve_transfer_currency_works() { + ExtBuilder::default().build().execute_with(|| { + precompiles() + .prepare_test( + TestAccount::Alice, + PRECOMPILE_ADDRESS, + EvmDataWriter::new_with_selector(Action::AssetsReserveTransferNative) + .write(vec![Address::from(H160::zero())]) // zero address by convention + .write(vec![U256::from(42000u64)]) + .write(H256::repeat_byte(0xF1)) + .write(true) + .write(U256::from(0_u64)) + .write(U256::from(0_u64)) + .build(), + ) + .expect_no_logs() + .execute_returns(EvmDataWriter::new().write(true).build()); + + precompiles() + .prepare_test( + TestAccount::Alice, + PRECOMPILE_ADDRESS, + EvmDataWriter::new_with_selector(Action::AssetsReserveTransferEvm) + .write(vec![Address::from(H160::zero())]) // zero address by convention + .write(vec![U256::from(42000u64)]) + .write(Address::from(H160::repeat_byte(0xDE))) + .write(true) + .write(U256::from(0_u64)) + .write(U256::from(0_u64)) + .build(), + ) + .expect_no_logs() + .execute_returns(EvmDataWriter::new().write(true).build()); + }); + + for (location, Xcm(instructions)) in take_sent_xcm() { + assert_eq!( + location, + MultiLocation { + parents: 1, + interior: Here + } + ); + + let native_asset = MultiAsset { + fun: Fungible(42000), + id: xcm::v3::AssetId::from(MultiLocation { + parents: 0, + interior: X1(OnlyChild), + }), + }; + + assert_matches!( + instructions.as_slice(), + [ + ReserveAssetDeposited(assets), + ClearOrigin, + BuyExecution { + fees, + .. + }, + DepositAsset { + beneficiary: MultiLocation { + parents: 0, + interior: X1(_), + }, + .. + } + ] + + if fees.contains(&native_asset) && assets.contains(&native_asset) + ); + } +} diff --git a/precompiles/xvm/Cargo.toml b/precompiles/xvm/Cargo.toml new file mode 100644 index 0000000000..cb431293f1 --- /dev/null +++ b/precompiles/xvm/Cargo.toml @@ -0,0 +1,55 @@ +[package] +name = "pallet-evm-precompile-xvm" +description = "Cross-VM call support for EVM." +version = "0.1.0" +authors.workspace = true +edition.workspace = true +homepage.workspace = true +repository.workspace = true + +[dependencies] +log = { workspace = true } +num_enum = { workspace = true } +pallet-xvm = { workspace = true } +precompile-utils = { workspace = true } + +# Substrate +frame-support = { workspace = true } +frame-system = { workspace = true } +parity-scale-codec = { workspace = true, features = ["max-encoded-len"] } +sp-core = { workspace = true } +sp-io = { workspace = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } + +# Frontier +fp-evm = { workspace = true } +pallet-evm = { workspace = true } + +[dev-dependencies] +derive_more = { workspace = true } +hex-literal = { workspace = true } +scale-info = { workspace = true } +serde = { workspace = true } + +precompile-utils = { workspace = true, features = ["testing"] } + +pallet-balances = { workspace = true } +pallet-timestamp = { workspace = true } +sp-runtime = { workspace = true } + +[features] +default = ["std"] +std = [ + "parity-scale-codec/std", + "fp-evm/std", + "frame-support/std", + "frame-system/std", + "pallet-evm/std", + "pallet-xvm/std", + "precompile-utils/std", + "sp-core/std", + "sp-std/std", + "sp-io/std", + "sp-runtime/std", +] diff --git a/precompiles/xvm/evm_sdk/XVM.sol b/precompiles/xvm/evm_sdk/XVM.sol new file mode 100644 index 0000000000..652582ff08 --- /dev/null +++ b/precompiles/xvm/evm_sdk/XVM.sol @@ -0,0 +1,20 @@ +pragma solidity ^0.8.0; + +/** + * @title XVM interface. + */ +interface XVM { + /** + * @dev Execute external VM call + * @param context - execution context + * @param to - call recepient + * @param input - SCALE-encoded call arguments + * @return success - operation outcome + * @return data - output data if successful, error data on error + */ + function xvm_call( + bytes calldata context, + bytes calldata to, + bytes calldata input + ) external returns (bool success, bytes memory data); +} diff --git a/precompiles/xvm/evm_sdk/flipper.sol b/precompiles/xvm/evm_sdk/flipper.sol new file mode 100644 index 0000000000..a15eaefd9e --- /dev/null +++ b/precompiles/xvm/evm_sdk/flipper.sol @@ -0,0 +1,18 @@ +pragma solidity ^0.8.0; + +interface XVM { + function xvm_call( + bytes calldata context, + bytes calldata to, + bytes calldata input, + ) external; +} + +library Flipper { + const XVM XVM_PRECOMPILE = XVM(0x0000000000000000000000000000000000005005); + + function flip(bytes to) { + bytes input = "0xcafecafe"; + XVM_PRECOMPILE.xvm_call(0x1f00, to, input); + } +} diff --git a/precompiles/xvm/src/lib.rs b/precompiles/xvm/src/lib.rs new file mode 100644 index 0000000000..1b4ec02eeb --- /dev/null +++ b/precompiles/xvm/src/lib.rs @@ -0,0 +1,132 @@ +// This file is part of Astar. + +// Copyright (C) 2019-2023 Stake Technologies Pte.Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later + +// Astar is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Astar is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Astar. If not, see . + +#![cfg_attr(not(feature = "std"), no_std)] +#![cfg_attr(test, feature(assert_matches))] + +use fp_evm::{PrecompileHandle, PrecompileOutput}; +use frame_support::dispatch::{Dispatchable, GetDispatchInfo, PostDispatchInfo}; +use pallet_evm::{AddressMapping, Precompile}; +use pallet_xvm::XvmContext; +use parity_scale_codec::Decode; +use sp_runtime::codec::Encode; +use sp_std::marker::PhantomData; +use sp_std::prelude::*; + +use precompile_utils::{ + revert, succeed, Bytes, EvmDataWriter, EvmResult, FunctionModifier, PrecompileHandleExt, +}; + +#[cfg(test)] +mod mock; +#[cfg(test)] +mod tests; + +#[precompile_utils::generate_function_selector] +#[derive(Debug, PartialEq)] +pub enum Action { + XvmCall = "xvm_call(bytes,bytes,bytes)", +} + +/// A precompile that expose XVM related functions. +pub struct XvmPrecompile(PhantomData); + +impl Precompile for XvmPrecompile +where + R: pallet_evm::Config + pallet_xvm::Config, + <::RuntimeCall as Dispatchable>::RuntimeOrigin: + From>, + ::RuntimeCall: + From> + Dispatchable + GetDispatchInfo, +{ + fn execute(handle: &mut impl PrecompileHandle) -> EvmResult { + log::trace!(target: "xvm-precompile", "In XVM precompile"); + + let selector = handle.read_selector()?; + + handle.check_function_modifier(FunctionModifier::NonPayable)?; + + match selector { + // Dispatchables + Action::XvmCall => Self::xvm_call(handle), + } + } +} + +impl XvmPrecompile +where + R: pallet_evm::Config + pallet_xvm::Config, + <::RuntimeCall as Dispatchable>::RuntimeOrigin: + From>, + ::RuntimeCall: + From> + Dispatchable + GetDispatchInfo, +{ + fn xvm_call(handle: &mut impl PrecompileHandle) -> EvmResult { + let mut input = handle.read_input()?; + input.expect_arguments(4)?; + + // Read arguments and check it + // TODO: This approach probably needs to be revised - does contract call need to specify gas/weight? Usually it is implicit. + let context_raw = input.read::()?; + let context: XvmContext = Decode::decode(&mut context_raw.0.as_ref()) + .map_err(|_| revert("can not decode XVM context"))?; + + // Fetch the remaining gas (weight) available for execution + // TODO: rework + //let remaining_gas = handle.remaining_gas(); + //let remaining_weight = R::GasWeightMapping::gas_to_weight(remaining_gas); + //context.max_weight = remaining_weight; + + let call_to = input.read::()?.0; + let call_input = input.read::()?.0; + + let from = R::AddressMapping::into_account_id(handle.context().caller); + match &pallet_xvm::Pallet::::xvm_bare_call(context, from, call_to, call_input) { + Ok(success) => { + log::trace!( + target: "xvm-precompile::xvm_call", + "success: {:?}", success + ); + + Ok(succeed( + EvmDataWriter::new() + .write(true) + .write(Bytes(success.output().to_vec())) // TODO redundant clone + .build(), + )) + } + + Err(failure) => { + log::trace!( + target: "xvm-precompile::xvm_call", + "failure: {:?}", failure + ); + + let mut error_buffer = Vec::new(); + failure.error().encode_to(&mut error_buffer); + + Ok(succeed( + EvmDataWriter::new() + .write(false) + .write(Bytes(error_buffer)) + .build(), + )) + } + } + } +} diff --git a/precompiles/xvm/src/mock.rs b/precompiles/xvm/src/mock.rs new file mode 100644 index 0000000000..2ac1583030 --- /dev/null +++ b/precompiles/xvm/src/mock.rs @@ -0,0 +1,255 @@ +// This file is part of Astar. + +// Copyright (C) 2019-2023 Stake Technologies Pte.Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later + +// Astar is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Astar is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Astar. If not, see . + +//! Testing utilities. + +use super::*; + +use frame_support::{construct_runtime, parameter_types, traits::Everything, weights::Weight}; +use parity_scale_codec::{Decode, Encode, MaxEncodedLen}; +use scale_info::TypeInfo; +use serde::{Deserialize, Serialize}; + +use pallet_evm::{ + AddressMapping, EnsureAddressNever, EnsureAddressRoot, PrecompileResult, PrecompileSet, +}; +use sp_core::{H160, H256}; +use sp_runtime::{ + testing::Header, + traits::{BlakeTwo256, IdentityLookup}, +}; + +pub type AccountId = TestAccount; +pub type Balance = u128; +pub type BlockNumber = u64; +pub type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; +pub type Block = frame_system::mocking::MockBlock; + +pub const PRECOMPILE_ADDRESS: H160 = H160::repeat_byte(0x7B); + +#[derive( + Eq, + PartialEq, + Ord, + PartialOrd, + Clone, + Encode, + Decode, + Debug, + MaxEncodedLen, + Serialize, + Deserialize, + derive_more::Display, + TypeInfo, +)] +pub enum TestAccount { + Alice, + Bob, + Charlie, + Bogus, + Precompile, +} + +impl Default for TestAccount { + fn default() -> Self { + Self::Alice + } +} + +impl AddressMapping for TestAccount { + fn into_account_id(h160_account: H160) -> TestAccount { + match h160_account { + a if a == H160::repeat_byte(0xAA) => Self::Alice, + a if a == H160::repeat_byte(0xBB) => Self::Bob, + a if a == H160::repeat_byte(0xCC) => Self::Charlie, + a if a == PRECOMPILE_ADDRESS => Self::Precompile, + _ => Self::Bogus, + } + } +} + +impl From for TestAccount { + fn from(x: H160) -> TestAccount { + TestAccount::into_account_id(x) + } +} + +impl From for H160 { + fn from(value: TestAccount) -> H160 { + match value { + TestAccount::Alice => H160::repeat_byte(0xAA), + TestAccount::Bob => H160::repeat_byte(0xBB), + TestAccount::Charlie => H160::repeat_byte(0xCC), + TestAccount::Precompile => PRECOMPILE_ADDRESS, + TestAccount::Bogus => Default::default(), + } + } +} + +impl From for [u8; 32] { + fn from(value: TestAccount) -> [u8; 32] { + match value { + TestAccount::Alice => [0xAA; 32], + TestAccount::Bob => [0xBB; 32], + TestAccount::Charlie => [0xCC; 32], + _ => Default::default(), + } + } +} + +parameter_types! { + pub const BlockHashCount: u64 = 250; + pub const SS58Prefix: u8 = 42; +} + +impl frame_system::Config for Runtime { + type BaseCallFilter = Everything; + type DbWeight = (); + type RuntimeOrigin = RuntimeOrigin; + type Index = u64; + type BlockNumber = BlockNumber; + type RuntimeCall = RuntimeCall; + type Hash = H256; + type Hashing = BlakeTwo256; + type AccountId = AccountId; + type Lookup = IdentityLookup; + type Header = Header; + type RuntimeEvent = RuntimeEvent; + type BlockHashCount = BlockHashCount; + type Version = (); + type PalletInfo = PalletInfo; + type AccountData = pallet_balances::AccountData; + type OnNewAccount = (); + type OnKilledAccount = (); + type SystemWeightInfo = (); + type BlockWeights = (); + type BlockLength = (); + type SS58Prefix = SS58Prefix; + type OnSetCode = (); + type MaxConsumers = frame_support::traits::ConstU32<16>; +} + +#[derive(Debug, Clone, Copy)] +pub struct TestPrecompileSet(PhantomData); + +impl PrecompileSet for TestPrecompileSet +where + R: pallet_evm::Config + pallet_xvm::Config, + XvmPrecompile: Precompile, +{ + fn execute(&self, handle: &mut impl PrecompileHandle) -> Option { + match handle.code_address() { + a if a == PRECOMPILE_ADDRESS => Some(XvmPrecompile::::execute(handle)), + _ => None, + } + } + + fn is_precompile(&self, address: H160) -> bool { + address == PRECOMPILE_ADDRESS + } +} + +parameter_types! { + pub const MinimumPeriod: u64 = 5; +} + +impl pallet_timestamp::Config for Runtime { + type Moment = u64; + type OnTimestampSet = (); + type MinimumPeriod = MinimumPeriod; + type WeightInfo = (); +} + +parameter_types! { + pub const ExistentialDeposit: u128 = 0; +} + +impl pallet_balances::Config for Runtime { + type MaxReserves = (); + type ReserveIdentifier = (); + type MaxLocks = (); + type Balance = Balance; + type RuntimeEvent = RuntimeEvent; + type DustRemoval = (); + type ExistentialDeposit = ExistentialDeposit; + type AccountStore = System; + type WeightInfo = (); +} + +parameter_types! { + pub const PrecompilesValue: TestPrecompileSet = + TestPrecompileSet(PhantomData); + pub WeightPerGas: Weight = Weight::from_ref_time(1); +} + +impl pallet_evm::Config for Runtime { + type FeeCalculator = (); + type GasWeightMapping = pallet_evm::FixedGasWeightMapping; + type WeightPerGas = WeightPerGas; + type CallOrigin = EnsureAddressRoot; + type WithdrawOrigin = EnsureAddressNever; + type AddressMapping = AccountId; + type Currency = Balances; + type RuntimeEvent = RuntimeEvent; + type Runner = pallet_evm::runner::stack::Runner; + type PrecompilesType = TestPrecompileSet; + type PrecompilesValue = PrecompilesValue; + type ChainId = (); + type OnChargeTransaction = (); + type BlockGasLimit = (); + type BlockHashMapping = pallet_evm::SubstrateBlockHashMapping; + type FindAuthor = (); + type OnCreate = (); + type WeightInfo = (); +} + +impl pallet_xvm::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type SyncVM = (); + type AsyncVM = (); +} + +// Configure a mock runtime to test the pallet. +construct_runtime!( + pub enum Runtime where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: frame_system, + Balances: pallet_balances, + Evm: pallet_evm, + Timestamp: pallet_timestamp, + Xvm: pallet_xvm, + } +); + +#[derive(Default)] +pub(crate) struct ExtBuilder; + +impl ExtBuilder { + pub(crate) fn build(self) -> sp_io::TestExternalities { + let t = frame_system::GenesisConfig::default() + .build_storage::() + .expect("Frame system builds valid default genesis config"); + + let mut ext = sp_io::TestExternalities::new(t); + ext.execute_with(|| System::set_block_number(1)); + ext + } +} diff --git a/precompiles/xvm/src/tests.rs b/precompiles/xvm/src/tests.rs new file mode 100644 index 0000000000..2f3ba81831 --- /dev/null +++ b/precompiles/xvm/src/tests.rs @@ -0,0 +1,83 @@ +// This file is part of Astar. + +// Copyright (C) 2019-2023 Stake Technologies Pte.Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later + +// Astar is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Astar is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Astar. If not, see . + +use crate::mock::*; +use crate::*; + +use parity_scale_codec::Encode; +use precompile_utils::testing::*; +use precompile_utils::EvmDataWriter; + +fn precompiles() -> TestPrecompileSet { + PrecompilesValue::get() +} + +#[test] +fn wrong_argument_reverts() { + ExtBuilder::default().build().execute_with(|| { + precompiles() + .prepare_test( + TestAccount::Alice, + PRECOMPILE_ADDRESS, + EvmDataWriter::new_with_selector(Action::XvmCall) + .write(42u64) + .build(), + ) + .expect_no_logs() + .execute_reverts(|output| output == b"input doesn't match expected length"); + + precompiles() + .prepare_test( + TestAccount::Alice, + PRECOMPILE_ADDRESS, + EvmDataWriter::new_with_selector(Action::XvmCall) + .write(0u8) + .write(Bytes(b"".to_vec())) + .write(Bytes(b"".to_vec())) + .write(Bytes(b"".to_vec())) + .build(), + ) + .expect_no_logs() + .execute_reverts(|output| output == b"can not decode XVM context"); + }) +} + +#[test] +fn correct_arguments_works() { + let context: XvmContext = Default::default(); + ExtBuilder::default().build().execute_with(|| { + precompiles() + .prepare_test( + TestAccount::Alice, + PRECOMPILE_ADDRESS, + EvmDataWriter::new_with_selector(Action::XvmCall) + .write(Bytes(context.encode())) + .write(Bytes(b"".to_vec())) + .write(Bytes(b"".to_vec())) + .write(Bytes(b"".to_vec())) + .build(), + ) + .expect_no_logs() + .execute_returns( + EvmDataWriter::new() + .write(false) // the XVM call should succeed but the internal should fail + .write(vec![0u8]) + .build(), + ); + }) +} diff --git a/primitives/xcm/Cargo.toml b/primitives/xcm/Cargo.toml new file mode 100644 index 0000000000..1d2f20dadf --- /dev/null +++ b/primitives/xcm/Cargo.toml @@ -0,0 +1,40 @@ +[package] +name = "xcm-primitives" +version = "0.4.1" +description = "Common XCM primitives used by runtimes" +authors.workspace = true +edition.workspace = true +homepage.workspace = true +repository.workspace = true + +[dependencies] + +# third-party dependencies +log = { workspace = true } + +# Substrate dependencies +frame-support = { workspace = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } + +# XCM dependencies +xcm = { workspace = true } +xcm-builder = { workspace = true } +xcm-executor = { workspace = true } + +# Astar pallets +pallet-xc-asset-config = { workspace = true } + +[features] +default = ["std"] +std = [ + "log/std", + "frame-support/std", + "sp-std/std", + "sp-runtime/std", + "xcm/std", + "xcm-builder/std", + "xcm-executor/std", + "pallet-xc-asset-config/std", +] +runtime-benchmarks = ["xcm-builder/runtime-benchmarks"] diff --git a/primitives/xcm/src/lib.rs b/primitives/xcm/src/lib.rs new file mode 100644 index 0000000000..d3dbb04706 --- /dev/null +++ b/primitives/xcm/src/lib.rs @@ -0,0 +1,317 @@ +// This file is part of Astar. + +// Copyright (C) 2019-2023 Stake Technologies Pte.Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later + +// Astar is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Astar is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Astar. If not, see . + +//! # XCM Primitives +//! +//! ## Overview +//! +//! Collection of common XCM primitives used by runtimes. +//! +//! - `AssetLocationIdConverter` - conversion between local asset Id and cross-chain asset multilocation +//! - `FixedRateOfForeignAsset` - weight trader for execution payment in foreign asset +//! - `ReserveAssetFilter` - used to check whether asset/origin are a valid reserve location +//! - `XcmFungibleFeeHandler` - used to handle XCM fee execution fees +//! +//! Please refer to implementation below for more info. +//! + +#![cfg_attr(not(feature = "std"), no_std)] + +use frame_support::{ + ensure, + traits::{tokens::fungibles, Contains, ContainsPair, Get}, + weights::constants::WEIGHT_REF_TIME_PER_SECOND, +}; +use sp_runtime::traits::{Bounded, Zero}; +use sp_std::{borrow::Borrow, marker::PhantomData, vec::Vec}; + +// Polkadot imports +use xcm::latest::{prelude::*, Weight}; +use xcm_builder::TakeRevenue; +use xcm_executor::traits::{MatchesFungibles, ShouldExecute, WeightTrader}; + +use pallet_xc_asset_config::{ExecutionPaymentRate, XcAssetLocation}; + +#[cfg(test)] +mod tests; + +/// Used to convert between cross-chain asset multilocation and local asset Id. +/// +/// This implementation relies on `XcAssetConfig` pallet to handle mapping. +/// In case asset location hasn't been mapped, it means the asset isn't supported (yet). +pub struct AssetLocationIdConverter( + sp_std::marker::PhantomData<(AssetId, AssetMapper)>, +); +impl xcm_executor::traits::Convert + for AssetLocationIdConverter +where + AssetId: Clone + Eq + Bounded, + AssetMapper: XcAssetLocation, +{ + fn convert_ref(location: impl Borrow) -> Result { + if let Some(asset_id) = AssetMapper::get_asset_id(location.borrow().clone()) { + Ok(asset_id) + } else { + Err(()) + } + } + + fn reverse_ref(id: impl Borrow) -> Result { + if let Some(multilocation) = AssetMapper::get_xc_asset_location(id.borrow().clone()) { + Ok(multilocation) + } else { + Err(()) + } + } +} + +/// Used as weight trader for foreign assets. +/// +/// In case foreigin asset is supported as payment asset, XCM execution time +/// on-chain can be paid by the foreign asset, using the configured rate. +pub struct FixedRateOfForeignAsset { + /// Total used weight + weight: Weight, + /// Total consumed assets + consumed: u128, + /// Asset Id (as MultiLocation) and units per second for payment + asset_location_and_units_per_second: Option<(MultiLocation, u128)>, + _pd: PhantomData<(T, R)>, +} + +impl WeightTrader for FixedRateOfForeignAsset { + fn new() -> Self { + Self { + weight: Weight::zero(), + consumed: 0, + asset_location_and_units_per_second: None, + _pd: PhantomData, + } + } + + fn buy_weight( + &mut self, + weight: Weight, + payment: xcm_executor::Assets, + ) -> Result { + log::trace!( + target: "xcm::weight", + "FixedRateOfForeignAsset::buy_weight weight: {:?}, payment: {:?}", + weight, payment, + ); + + // Atm in pallet, we only support one asset so this should work + let payment_asset = payment + .fungible_assets_iter() + .next() + .ok_or(XcmError::TooExpensive)?; + + match payment_asset { + MultiAsset { + id: xcm::latest::AssetId::Concrete(asset_location), + fun: Fungibility::Fungible(_), + } => { + if let Some(units_per_second) = T::get_units_per_second(asset_location.clone()) { + let amount = units_per_second.saturating_mul(weight.ref_time() as u128) // TODO: change this to u64? + / (WEIGHT_REF_TIME_PER_SECOND as u128); + if amount == 0 { + return Ok(payment); + } + + let unused = payment + .checked_sub((asset_location.clone(), amount).into()) + .map_err(|_| XcmError::TooExpensive)?; + + self.weight = self.weight.saturating_add(weight); + + // If there are multiple calls to `BuyExecution` but with different assets, we need to be able to handle that. + // Current primitive implementation will just keep total track of consumed asset for the FIRST consumed asset. + // Others will just be ignored when refund is concerned. + if let Some((old_asset_location, _)) = + self.asset_location_and_units_per_second.clone() + { + if old_asset_location == asset_location { + self.consumed = self.consumed.saturating_add(amount); + } + } else { + self.consumed = self.consumed.saturating_add(amount); + self.asset_location_and_units_per_second = + Some((asset_location, units_per_second)); + } + + Ok(unused) + } else { + Err(XcmError::TooExpensive) + } + } + _ => Err(XcmError::TooExpensive), + } + } + + fn refund_weight(&mut self, weight: Weight) -> Option { + log::trace!(target: "xcm::weight", "FixedRateOfForeignAsset::refund_weight weight: {:?}", weight); + + if let Some((asset_location, units_per_second)) = + self.asset_location_and_units_per_second.clone() + { + let weight = weight.min(self.weight); + let amount = units_per_second.saturating_mul(weight.ref_time() as u128) + / (WEIGHT_REF_TIME_PER_SECOND as u128); + + self.weight = self.weight.saturating_sub(weight); + self.consumed = self.consumed.saturating_sub(amount); + + if amount > 0 { + Some((asset_location, amount).into()) + } else { + None + } + } else { + None + } + } +} + +impl Drop for FixedRateOfForeignAsset { + fn drop(&mut self) { + if let Some((asset_location, _)) = self.asset_location_and_units_per_second.clone() { + if self.consumed > 0 { + R::take_revenue((asset_location, self.consumed).into()); + } + } + } +} + +/// Used to determine whether the cross-chain asset is coming from a trusted reserve or not +/// +/// Basically, we trust any cross-chain asset from any location to act as a reserve since +/// in order to support the xc-asset, we need to first register it in the `XcAssetConfig` pallet. +/// +pub struct ReserveAssetFilter; +impl ContainsPair for ReserveAssetFilter { + fn contains(asset: &MultiAsset, origin: &MultiLocation) -> bool { + // We assume that relay chain and sibling parachain assets are trusted reserves for their assets + let reserve_location = if let Concrete(location) = &asset.id { + match (location.parents, location.first_interior()) { + // sibling parachain + (1, Some(Parachain(id))) => Some(MultiLocation::new(1, X1(Parachain(*id)))), + // relay chain + (1, _) => Some(MultiLocation::parent()), + _ => None, + } + } else { + None + }; + + if let Some(ref reserve) = reserve_location { + origin == reserve + } else { + false + } + } +} + +/// Used to deposit XCM fees into a destination account. +/// +/// Only handles fungible assets for now. +/// If for any reason taking of the fee fails, it will be burned and and error trace will be printed. +/// +pub struct XcmFungibleFeeHandler( + sp_std::marker::PhantomData<(AccountId, Matcher, Assets, FeeDestination)>, +); +impl< + AccountId, + Assets: fungibles::Mutate, + Matcher: MatchesFungibles, + FeeDestination: Get, + > TakeRevenue for XcmFungibleFeeHandler +{ + fn take_revenue(revenue: MultiAsset) { + match Matcher::matches_fungibles(&revenue) { + Ok((asset_id, amount)) => { + if amount > Zero::zero() { + if let Err(error) = Assets::mint_into(asset_id, &FeeDestination::get(), amount) + { + log::error!( + target: "xcm::weight", + "XcmFeeHandler::take_revenue failed when minting asset: {:?}", error, + ); + } else { + log::trace!( + target: "xcm::weight", + "XcmFeeHandler::take_revenue took {:?} of asset Id {:?}", + amount, asset_id, + ); + } + } + } + Err(_) => { + log::error!( + target: "xcm::weight", + "XcmFeeHandler:take_revenue failed to match fungible asset, it has been burned." + ); + } + } + } +} + +/// Allows execution from `origin` if it is contained in `T` (i.e. `T::Contains(origin)`) taking +/// payments into account. +/// +/// Only allows for sequence `DescendOrigin` -> `WithdrawAsset` -> `BuyExecution` +pub struct AllowPaidExecWithDescendOriginFrom(PhantomData); +impl> ShouldExecute for AllowPaidExecWithDescendOriginFrom { + fn should_execute( + origin: &MultiLocation, + message: &mut [Instruction], + max_weight: Weight, + _weight_credit: &mut Weight, + ) -> Result<(), ()> { + log::trace!( + target: "xcm::barriers", + "AllowPaidExecWithDescendOriginFrom origin: {:?}, message: {:?}, max_weight: {:?}, weight_credit: {:?}", + origin, message, max_weight, _weight_credit, + ); + ensure!(T::contains(origin), ()); + + match message + .iter_mut() + .take(3) + .collect::>() + .as_mut_slice() + { + [DescendOrigin(..), WithdrawAsset(..), BuyExecution { + weight_limit: Limited(ref mut limit), + .. + }] if limit.all_gte(max_weight) => { + *limit = max_weight; + Ok(()) + } + + [DescendOrigin(..), WithdrawAsset(..), BuyExecution { + weight_limit: ref mut limit @ Unlimited, + .. + }] => { + *limit = Limited(max_weight); + Ok(()) + } + + _ => return Err(()), + } + } +} diff --git a/primitives/xcm/src/tests.rs b/primitives/xcm/src/tests.rs new file mode 100644 index 0000000000..a1ba960088 --- /dev/null +++ b/primitives/xcm/src/tests.rs @@ -0,0 +1,562 @@ +// This file is part of Astar. + +// Copyright (C) 2019-2023 Stake Technologies Pte.Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later + +// Astar is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Astar is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Astar. If not, see . + +use super::*; +use frame_support::{ + assert_ok, + traits::{Everything, Nothing}, +}; +use sp_runtime::traits::Zero; +use xcm_executor::traits::Convert; + +type AssetId = u128; + +// Primitive, perhaps I improve it later +const PARENT: MultiLocation = MultiLocation::parent(); +const PARACHAIN: MultiLocation = MultiLocation { + parents: 1, + interior: Junctions::X1(Parachain(10)), +}; +const GENERAL_INDEX: MultiLocation = MultiLocation { + parents: 2, + interior: Junctions::X1(GeneralIndex(20)), +}; +const RELAY_ASSET: AssetId = AssetId::MAX; + +/// Helper struct used for testing `AssetLocationIdConverter` +struct AssetLocationMapper; +impl XcAssetLocation for AssetLocationMapper { + fn get_xc_asset_location(asset_id: AssetId) -> Option { + match asset_id { + RELAY_ASSET => Some(PARENT), + 20 => Some(PARACHAIN), + 30 => Some(GENERAL_INDEX), + _ => None, + } + } + + fn get_asset_id(asset_location: MultiLocation) -> Option { + match asset_location { + a if a == PARENT => Some(RELAY_ASSET), + a if a == PARACHAIN => Some(20), + a if a == GENERAL_INDEX => Some(30), + _ => None, + } + } +} + +/// Helper struct used for testing `FixedRateOfForeignAsset` +struct ExecutionPayment; +impl ExecutionPaymentRate for ExecutionPayment { + fn get_units_per_second(asset_location: MultiLocation) -> Option { + match asset_location { + a if a == PARENT => Some(1_000_000), + a if a == PARACHAIN => Some(2_000_000), + a if a == GENERAL_INDEX => Some(3_000_000), + _ => None, + } + } +} + +/// Execution fee for the specified weight, using provided `units_per_second` +fn execution_fee(weight: Weight, units_per_second: u128) -> u128 { + units_per_second * (weight.ref_time() as u128) / (WEIGHT_REF_TIME_PER_SECOND as u128) +} + +#[test] +fn asset_location_to_id() { + // Test cases where the MultiLocation is valid + assert_eq!( + AssetLocationIdConverter::::convert_ref(PARENT), + Ok(u128::MAX) + ); + assert_eq!( + AssetLocationIdConverter::::convert_ref(PARACHAIN), + Ok(20) + ); + assert_eq!( + AssetLocationIdConverter::::convert_ref(GENERAL_INDEX), + Ok(30) + ); + + // Test case where MultiLocation isn't supported + assert_eq!( + AssetLocationIdConverter::::convert_ref(MultiLocation::here()), + Err(()) + ); +} + +#[test] +fn asset_id_to_location() { + // Test cases where the AssetId is valid + assert_eq!( + AssetLocationIdConverter::::reverse_ref(u128::MAX), + Ok(PARENT) + ); + assert_eq!( + AssetLocationIdConverter::::reverse_ref(20), + Ok(PARACHAIN) + ); + assert_eq!( + AssetLocationIdConverter::::reverse_ref(30), + Ok(GENERAL_INDEX) + ); + + // Test case where the AssetId isn't supported + assert_eq!( + AssetLocationIdConverter::::reverse_ref(0), + Err(()) + ); +} + +#[test] +fn fixed_rate_of_foreign_asset_buy_is_ok() { + let mut fixed_rate_trader = FixedRateOfForeignAsset::::new(); + + // The amount we have designated for payment (doesn't mean it will be used though) + let total_payment = 10_000; + let payment_multi_asset = MultiAsset { + id: xcm::latest::AssetId::Concrete(PARENT), + fun: Fungibility::Fungible(total_payment), + }; + let weight: Weight = Weight::from_ref_time(1_000_000_000); + + // Calculate the expected execution fee for the execution weight + let expected_execution_fee = execution_fee( + weight, + ExecutionPayment::get_units_per_second(PARENT).unwrap(), + ); + assert!(expected_execution_fee > 0); // sanity check + + // 1. Buy weight and expect it to be successful + let result = fixed_rate_trader.buy_weight(weight, payment_multi_asset.clone().into()); + if let Ok(assets) = result { + // We expect only one unused payment asset and specific amount + assert_eq!(assets.len(), 1); + assert_ok!(assets.ensure_contains( + &MultiAsset::from((PARENT, total_payment - expected_execution_fee)).into() + )); + + assert_eq!(fixed_rate_trader.consumed, expected_execution_fee); + assert_eq!(fixed_rate_trader.weight, weight); + assert_eq!( + fixed_rate_trader.asset_location_and_units_per_second, + Some(( + PARENT, + ExecutionPayment::get_units_per_second(PARENT).unwrap() + )) + ); + } else { + panic!("Should have been `Ok` wrapped Assets!"); + } + + // 2. Buy more weight, using the same trader and asset type. Verify it works as expected. + let (old_weight, old_consumed) = (fixed_rate_trader.weight, fixed_rate_trader.consumed); + + let weight: Weight = Weight::from_ref_time(3_500_000_000); + let expected_execution_fee = execution_fee( + weight, + ExecutionPayment::get_units_per_second(PARENT).unwrap(), + ); + assert!(expected_execution_fee > 0); // sanity check + + let result = fixed_rate_trader.buy_weight(weight, payment_multi_asset.clone().into()); + if let Ok(assets) = result { + // We expect only one unused payment asset and specific amount + assert_eq!(assets.len(), 1); + assert_ok!(assets.ensure_contains( + &MultiAsset::from((PARENT, total_payment - expected_execution_fee)).into() + )); + + assert_eq!( + fixed_rate_trader.consumed, + expected_execution_fee + old_consumed + ); + assert_eq!(fixed_rate_trader.weight, weight + old_weight); + assert_eq!( + fixed_rate_trader.asset_location_and_units_per_second, + Some(( + PARENT, + ExecutionPayment::get_units_per_second(PARENT).unwrap() + )) + ); + } else { + panic!("Should have been `Ok` wrapped Assets!"); + } + + // 3. Buy even more weight, but use a different type of asset now while reusing the old trader instance. + let (old_weight, old_consumed) = (fixed_rate_trader.weight, fixed_rate_trader.consumed); + + // Note that the concrete asset type differs now from previous buys + let total_payment = 20_000; + let payment_multi_asset = MultiAsset { + id: xcm::latest::AssetId::Concrete(PARACHAIN), + fun: Fungibility::Fungible(total_payment), + }; + + let weight: Weight = Weight::from_ref_time(1_750_000_000); + let expected_execution_fee = execution_fee( + weight, + ExecutionPayment::get_units_per_second(PARACHAIN).unwrap(), + ); + assert!(expected_execution_fee > 0); // sanity check + + let result = fixed_rate_trader.buy_weight(weight, payment_multi_asset.clone().into()); + if let Ok(assets) = result { + // We expect only one unused payment asset and specific amount + assert_eq!(assets.len(), 1); + assert_ok!(assets.ensure_contains( + &MultiAsset::from((PARACHAIN, total_payment - expected_execution_fee)).into() + )); + + assert_eq!(fixed_rate_trader.weight, weight + old_weight); + // We don't expect this to change since trader already contains data about previous asset type. + // Current rule is not to update in this case. + assert_eq!(fixed_rate_trader.consumed, old_consumed); + assert_eq!( + fixed_rate_trader.asset_location_and_units_per_second, + Some(( + PARENT, + ExecutionPayment::get_units_per_second(PARENT).unwrap() + )) + ); + } else { + panic!("Should have been `Ok` wrapped Assets!"); + } +} + +#[test] +fn fixed_rate_of_foreign_asset_buy_execution_fails() { + let mut fixed_rate_trader = FixedRateOfForeignAsset::::new(); + + // The amount we have designated for payment (doesn't mean it will be used though) + let total_payment = 1000; + let payment_multi_asset = MultiAsset { + id: xcm::latest::AssetId::Concrete(PARENT), + fun: Fungibility::Fungible(total_payment), + }; + let weight: Weight = Weight::from_ref_time(3_000_000_000); + + // Calculate the expected execution fee for the execution weight + let expected_execution_fee = execution_fee( + weight, + ExecutionPayment::get_units_per_second(PARENT).unwrap(), + ); + // sanity check, should be more for UT to make sense + assert!(expected_execution_fee > total_payment); + + // Expect failure because we lack the required funds + assert_eq!( + fixed_rate_trader.buy_weight(weight, payment_multi_asset.clone().into()), + Err(XcmError::TooExpensive) + ); + + // Try to pay with unsupported funds, expect failure + let payment_multi_asset = MultiAsset { + id: xcm::latest::AssetId::Concrete(MultiLocation::here()), + fun: Fungibility::Fungible(total_payment), + }; + assert_eq!( + fixed_rate_trader.buy_weight(Weight::zero(), payment_multi_asset.clone().into()), + Err(XcmError::TooExpensive) + ); +} + +#[test] +fn fixed_rate_of_foreign_asset_refund_is_ok() { + let mut fixed_rate_trader = FixedRateOfForeignAsset::::new(); + + // The amount we have designated for payment (doesn't mean it will be used though) + let total_payment = 10_000; + let payment_multi_asset = MultiAsset { + id: xcm::latest::AssetId::Concrete(PARENT), + fun: Fungibility::Fungible(total_payment), + }; + let weight: Weight = Weight::from_ref_time(1_000_000_000); + + // Calculate the expected execution fee for the execution weight and buy it + let expected_execution_fee = execution_fee( + weight, + ExecutionPayment::get_units_per_second(PARENT).unwrap(), + ); + assert!(expected_execution_fee > 0); // sanity check + assert_ok!(fixed_rate_trader.buy_weight(weight, payment_multi_asset.clone().into())); + + // Refund quarter and expect it to pass + let weight_to_refund = weight / 4; + let assets_to_refund = expected_execution_fee / 4; + let (old_weight, old_consumed) = (fixed_rate_trader.weight, fixed_rate_trader.consumed); + + let result = fixed_rate_trader.refund_weight(weight_to_refund); + if let Some(asset_location) = result { + assert_eq!(asset_location, (PARENT, assets_to_refund).into()); + + assert_eq!(fixed_rate_trader.weight, old_weight - weight_to_refund); + assert_eq!(fixed_rate_trader.consumed, old_consumed - assets_to_refund); + } + + // Refund more than remains and expect it to pass (saturated) + let assets_to_refund = fixed_rate_trader.consumed; + + let result = fixed_rate_trader.refund_weight(weight + Weight::from_ref_time(10000)); + if let Some(asset_location) = result { + assert_eq!(asset_location, (PARENT, assets_to_refund).into()); + + assert!(fixed_rate_trader.weight.is_zero()); + assert!(fixed_rate_trader.consumed.is_zero()); + } +} + +#[test] +fn reserve_asset_filter_for_sibling_parachain_is_ok() { + let asset_xc_location = MultiLocation { + parents: 1, + interior: X2(Parachain(20), GeneralIndex(30)), + }; + let multi_asset = MultiAsset { + id: xcm::latest::AssetId::Concrete(asset_xc_location), + fun: Fungibility::Fungible(123456), + }; + let origin = MultiLocation { + parents: 1, + interior: X1(Parachain(20)), + }; + + assert!(ReserveAssetFilter::contains(&multi_asset, &origin)); +} + +#[test] +fn reserve_asset_filter_for_relay_chain_is_ok() { + let asset_xc_location = MultiLocation { + parents: 1, + interior: Here, + }; + let multi_asset = MultiAsset { + id: xcm::latest::AssetId::Concrete(asset_xc_location), + fun: Fungibility::Fungible(123456), + }; + let origin = MultiLocation { + parents: 1, + interior: Here, + }; + + assert!(ReserveAssetFilter::contains(&multi_asset, &origin)); +} + +#[test] +fn reserve_asset_filter_with_origin_mismatch() { + let asset_xc_location = MultiLocation { + parents: 1, + interior: X2(Parachain(20), GeneralIndex(30)), + }; + let multi_asset = MultiAsset { + id: xcm::latest::AssetId::Concrete(asset_xc_location), + fun: Fungibility::Fungible(123456), + }; + let origin = MultiLocation { + parents: 1, + interior: Here, + }; + + assert!(!ReserveAssetFilter::contains(&multi_asset, &origin)); +} + +#[test] +fn reserve_asset_filter_for_unsupported_asset_multi_location() { + // 1st case + let asset_xc_location = MultiLocation { + parents: 0, + interior: X2(Parachain(20), GeneralIndex(30)), + }; + let multi_asset = MultiAsset { + id: xcm::latest::AssetId::Concrete(asset_xc_location), + fun: Fungibility::Fungible(123456), + }; + let origin = MultiLocation { + parents: 0, + interior: Here, + }; + + assert!(!ReserveAssetFilter::contains(&multi_asset, &origin)); + + // 2nd case + let asset_xc_location = MultiLocation { + parents: 1, + interior: X2(GeneralIndex(50), GeneralIndex(30)), + }; + let multi_asset = MultiAsset { + id: xcm::latest::AssetId::Concrete(asset_xc_location), + fun: Fungibility::Fungible(123456), + }; + let origin = MultiLocation { + parents: 1, + interior: X1(GeneralIndex(50)), + }; + + assert!(!ReserveAssetFilter::contains(&multi_asset, &origin)); +} + +/// Returns valid XCM sequence for bypassing `AllowPaidExecWithDescendOriginFrom` +fn desc_origin_barrier_valid_sequence() -> Vec> { + vec![ + DescendOrigin(X1(Junction::Parachain(1234))), + WithdrawAsset((Here, 100).into()), + BuyExecution { + fees: (Here, 100).into(), + weight_limit: WeightLimit::Unlimited, + }, + ] +} + +#[test] +fn allow_paid_exec_with_descend_origin_works() { + let mut valid_message = desc_origin_barrier_valid_sequence(); + + let res = AllowPaidExecWithDescendOriginFrom::::should_execute( + &Here.into(), + &mut valid_message, + Weight::from_ref_time(150), + &mut Weight::zero(), + ); + assert_eq!(res, Ok(())); + + // Still works even if there are follow-up instructions + valid_message = desc_origin_barrier_valid_sequence(); + valid_message.push(SetErrorHandler(Default::default())); + let res = AllowPaidExecWithDescendOriginFrom::::should_execute( + &Here.into(), + &mut valid_message, + Weight::from_ref_time(100), + &mut Weight::zero(), + ); + assert_eq!(res, Ok(())); +} + +#[test] +fn allow_paid_exec_with_descend_origin_with_weight_correction_works() { + let mut valid_message = desc_origin_barrier_valid_sequence(); + + // Ensure that `Limited` gets adjusted to the provided enforced_weight_limit + let enforced_weight_limit = Weight::from_ref_time(3); + let res = AllowPaidExecWithDescendOriginFrom::::should_execute( + &Here.into(), + &mut valid_message, + enforced_weight_limit, + &mut Weight::zero(), + ); + assert_eq!(res, Ok(())); + + if let BuyExecution { + weight_limit, + fees: _, + } = valid_message[2].clone() + { + assert_eq!(weight_limit, WeightLimit::Limited(enforced_weight_limit)) + } else { + panic!("3rd instruction should be BuyExecution!"); + } + + // Ensure that we use `BuyExecution` with `Unlimited` weight limit + let _ = std::mem::replace( + &mut valid_message[2], + BuyExecution { + fees: (Here, 100).into(), + weight_limit: WeightLimit::Limited(enforced_weight_limit.add_ref_time(7)), + }, + ); + + // Ensure that `Unlimited` gets adjusted to the provided max weight limit + let res = AllowPaidExecWithDescendOriginFrom::::should_execute( + &Here.into(), + &mut valid_message, + enforced_weight_limit, + &mut Weight::zero(), + ); + assert_eq!(res, Ok(())); + + if let BuyExecution { + weight_limit, + fees: _, + } = valid_message[2].clone() + { + assert_eq!(weight_limit, WeightLimit::Limited(enforced_weight_limit)) + } else { + panic!("3rd instruction should be BuyExecution!"); + } +} + +#[test] +fn allow_paid_exec_with_descend_origin_with_unsupported_origin_fails() { + let mut valid_message = desc_origin_barrier_valid_sequence(); + + let res = AllowPaidExecWithDescendOriginFrom::::should_execute( + &Here.into(), + &mut valid_message, + Weight::from_ref_time(100), + &mut Weight::zero(), + ); + assert_eq!(res, Err(())); +} + +#[test] +fn allow_paid_exec_with_descend_origin_with_invalid_message_fails() { + let mut invalid_message = vec![WithdrawAsset((Here, 100).into())]; + + let res = AllowPaidExecWithDescendOriginFrom::::should_execute( + &Here.into(), + &mut invalid_message, + Weight::from_ref_time(100), + &mut Weight::zero(), + ); + assert_eq!(res, Err(())); + + // Should still fail, even if correct sequence follows next + invalid_message.append(&mut desc_origin_barrier_valid_sequence()); + let res = AllowPaidExecWithDescendOriginFrom::::should_execute( + &Here.into(), + &mut invalid_message, + Weight::from_ref_time(100), + &mut Weight::zero(), + ); + assert_eq!(res, Err(())); +} + +#[test] +fn allow_paid_exec_with_descend_origin_too_small_weight_fails() { + let mut valid_message = desc_origin_barrier_valid_sequence(); + let enforced_weight_limit = Weight::from_ref_time(29); + + // Ensure that we use `BuyExecution` with `Limited` weight but with insufficient weight. + // This means that not enough execution time (weight) is being bought compared to the + // weight of whole sequence. + let _ = std::mem::replace( + &mut valid_message[2], + BuyExecution { + fees: (Here, 100).into(), + weight_limit: WeightLimit::Limited(enforced_weight_limit.sub_ref_time(7)), + }, + ); + + let res = AllowPaidExecWithDescendOriginFrom::::should_execute( + &Here.into(), + &mut valid_message, + enforced_weight_limit, + &mut Weight::zero(), + ); + assert_eq!(res, Err(())); +} diff --git a/runtime/astar/Cargo.toml b/runtime/astar/Cargo.toml index 2037310a14..67f6959eb6 100644 --- a/runtime/astar/Cargo.toml +++ b/runtime/astar/Cargo.toml @@ -99,10 +99,10 @@ pallet-collator-selection = { workspace = true } pallet-custom-signatures = { workspace = true } pallet-dapps-staking = { workspace = true } pallet-evm-precompile-assets-erc20 = { workspace = true } +pallet-evm-precompile-dapps-staking = { workspace = true } pallet-evm-precompile-sr25519 = { workspace = true } pallet-evm-precompile-substrate-ecdsa = { workspace = true } pallet-evm-precompile-xcm = { workspace = true } -pallet-precompile-dapps-staking = { workspace = true } pallet-xc-asset-config = { workspace = true } pallet-xcm = { workspace = true } xcm-primitives = { workspace = true } @@ -147,7 +147,7 @@ std = [ "pallet-block-reward/std", "pallet-custom-signatures/std", "pallet-dapps-staking/std", - "pallet-precompile-dapps-staking/std", + "pallet-evm-precompile-dapps-staking/std", "pallet-evm-precompile-sr25519/std", "pallet-evm-precompile-assets-erc20/std", "pallet-evm-precompile-substrate-ecdsa/std", diff --git a/runtime/astar/src/precompiles.rs b/runtime/astar/src/precompiles.rs index 2788aa8b96..dbf9fdc170 100644 --- a/runtime/astar/src/precompiles.rs +++ b/runtime/astar/src/precompiles.rs @@ -24,6 +24,7 @@ use pallet_evm::{ use pallet_evm_precompile_assets_erc20::{AddressToAssetId, Erc20AssetsPrecompileSet}; use pallet_evm_precompile_blake2::Blake2F; use pallet_evm_precompile_bn128::{Bn128Add, Bn128Mul, Bn128Pairing}; +use pallet_evm_precompile_dapps_staking::DappsStakingWrapper; use pallet_evm_precompile_dispatch::Dispatch; use pallet_evm_precompile_ed25519::Ed25519Verify; use pallet_evm_precompile_modexp::Modexp; @@ -32,7 +33,6 @@ use pallet_evm_precompile_simple::{ECRecover, ECRecoverPublicKey, Identity, Ripe use pallet_evm_precompile_sr25519::Sr25519Precompile; use pallet_evm_precompile_substrate_ecdsa::SubstrateEcdsaPrecompile; use pallet_evm_precompile_xcm::XcmPrecompile; -use pallet_precompile_dapps_staking::DappsStakingWrapper; use sp_core::H160; use sp_std::fmt::Debug; use sp_std::marker::PhantomData; diff --git a/runtime/local/Cargo.toml b/runtime/local/Cargo.toml index fd226018c7..f3af11fe75 100644 --- a/runtime/local/Cargo.toml +++ b/runtime/local/Cargo.toml @@ -68,10 +68,10 @@ pallet-chain-extension-xvm = { workspace = true } pallet-custom-signatures = { workspace = true } pallet-dapps-staking = { workspace = true } pallet-evm-precompile-assets-erc20 = { workspace = true } +pallet-evm-precompile-dapps-staking = { workspace = true } pallet-evm-precompile-sr25519 = { workspace = true } pallet-evm-precompile-substrate-ecdsa = { workspace = true } pallet-evm-precompile-xvm = { workspace = true } -pallet-precompile-dapps-staking = { workspace = true } pallet-xvm = { workspace = true, features = ["evm", "wasm"] } # Moonbeam tracing @@ -123,7 +123,7 @@ std = [ "pallet-evm-precompile-ed25519/std", "pallet-evm-precompile-modexp/std", "pallet-evm-precompile-sha3fips/std", - "pallet-precompile-dapps-staking/std", + "pallet-evm-precompile-dapps-staking/std", "pallet-evm-precompile-sr25519/std", "pallet-evm-precompile-substrate-ecdsa/std", "pallet-evm-precompile-xvm/std", diff --git a/runtime/local/src/precompiles.rs b/runtime/local/src/precompiles.rs index 63b7308d66..3e11c9246b 100644 --- a/runtime/local/src/precompiles.rs +++ b/runtime/local/src/precompiles.rs @@ -24,6 +24,7 @@ use pallet_evm::{ use pallet_evm_precompile_assets_erc20::{AddressToAssetId, Erc20AssetsPrecompileSet}; use pallet_evm_precompile_blake2::Blake2F; use pallet_evm_precompile_bn128::{Bn128Add, Bn128Mul, Bn128Pairing}; +use pallet_evm_precompile_dapps_staking::DappsStakingWrapper; use pallet_evm_precompile_dispatch::Dispatch; use pallet_evm_precompile_ed25519::Ed25519Verify; use pallet_evm_precompile_modexp::Modexp; @@ -32,7 +33,6 @@ use pallet_evm_precompile_simple::{ECRecover, ECRecoverPublicKey, Identity, Ripe use pallet_evm_precompile_sr25519::Sr25519Precompile; use pallet_evm_precompile_substrate_ecdsa::SubstrateEcdsaPrecompile; use pallet_evm_precompile_xvm::XvmPrecompile; -use pallet_precompile_dapps_staking::DappsStakingWrapper; use sp_core::H160; use sp_std::fmt::Debug; use sp_std::marker::PhantomData; diff --git a/runtime/shibuya/Cargo.toml b/runtime/shibuya/Cargo.toml index b82d05ad8b..3db337e0c7 100644 --- a/runtime/shibuya/Cargo.toml +++ b/runtime/shibuya/Cargo.toml @@ -104,11 +104,11 @@ pallet-collator-selection = { workspace = true } pallet-custom-signatures = { workspace = true } pallet-dapps-staking = { workspace = true } pallet-evm-precompile-assets-erc20 = { workspace = true } +pallet-evm-precompile-dapps-staking = { workspace = true } pallet-evm-precompile-sr25519 = { workspace = true } pallet-evm-precompile-substrate-ecdsa = { workspace = true } pallet-evm-precompile-xcm = { workspace = true } pallet-evm-precompile-xvm = { workspace = true } -pallet-precompile-dapps-staking = { workspace = true } pallet-xc-asset-config = { workspace = true } pallet-xcm = { workspace = true } pallet-xvm = { workspace = true, features = ["evm", "wasm"] } @@ -179,7 +179,7 @@ std = [ "pallet-evm-precompile-ed25519/std", "pallet-evm-precompile-modexp/std", "pallet-evm-precompile-sha3fips/std", - "pallet-precompile-dapps-staking/std", + "pallet-evm-precompile-dapps-staking/std", "pallet-evm-precompile-sr25519/std", "pallet-evm-precompile-substrate-ecdsa/std", "pallet-evm-precompile-assets-erc20/std", diff --git a/runtime/shibuya/src/precompiles.rs b/runtime/shibuya/src/precompiles.rs index 37e06789f5..236946c1ef 100644 --- a/runtime/shibuya/src/precompiles.rs +++ b/runtime/shibuya/src/precompiles.rs @@ -24,6 +24,7 @@ use pallet_evm::{ use pallet_evm_precompile_assets_erc20::{AddressToAssetId, Erc20AssetsPrecompileSet}; use pallet_evm_precompile_blake2::Blake2F; use pallet_evm_precompile_bn128::{Bn128Add, Bn128Mul, Bn128Pairing}; +use pallet_evm_precompile_dapps_staking::DappsStakingWrapper; use pallet_evm_precompile_dispatch::Dispatch; use pallet_evm_precompile_ed25519::Ed25519Verify; use pallet_evm_precompile_modexp::Modexp; @@ -33,7 +34,6 @@ use pallet_evm_precompile_sr25519::Sr25519Precompile; use pallet_evm_precompile_substrate_ecdsa::SubstrateEcdsaPrecompile; use pallet_evm_precompile_xcm::XcmPrecompile; use pallet_evm_precompile_xvm::XvmPrecompile; -use pallet_precompile_dapps_staking::DappsStakingWrapper; use sp_core::H160; use sp_std::fmt::Debug; use sp_std::marker::PhantomData; diff --git a/runtime/shiden/Cargo.toml b/runtime/shiden/Cargo.toml index 31c408be8d..0b986ae733 100644 --- a/runtime/shiden/Cargo.toml +++ b/runtime/shiden/Cargo.toml @@ -106,10 +106,10 @@ pallet-collator-selection = { workspace = true } pallet-custom-signatures = { workspace = true } pallet-dapps-staking = { workspace = true } pallet-evm-precompile-assets-erc20 = { workspace = true } +pallet-evm-precompile-dapps-staking = { workspace = true } pallet-evm-precompile-sr25519 = { workspace = true } pallet-evm-precompile-substrate-ecdsa = { workspace = true } pallet-evm-precompile-xcm = { workspace = true } -pallet-precompile-dapps-staking = { workspace = true } pallet-xc-asset-config = { workspace = true } pallet-xcm = { workspace = true } xcm-primitives = { workspace = true } @@ -165,7 +165,7 @@ std = [ "pallet-evm-precompile-modexp/std", "pallet-evm-precompile-sha3fips/std", "pallet-dapps-staking/std", - "pallet-precompile-dapps-staking/std", + "pallet-evm-precompile-dapps-staking/std", "pallet-evm-precompile-sr25519/std", "pallet-evm-precompile-assets-erc20/std", "pallet-evm-precompile-substrate-ecdsa/std", diff --git a/runtime/shiden/src/precompiles.rs b/runtime/shiden/src/precompiles.rs index d847270bac..7b07fcaca0 100644 --- a/runtime/shiden/src/precompiles.rs +++ b/runtime/shiden/src/precompiles.rs @@ -24,6 +24,7 @@ use pallet_evm::{ use pallet_evm_precompile_assets_erc20::{AddressToAssetId, Erc20AssetsPrecompileSet}; use pallet_evm_precompile_blake2::Blake2F; use pallet_evm_precompile_bn128::{Bn128Add, Bn128Mul, Bn128Pairing}; +use pallet_evm_precompile_dapps_staking::DappsStakingWrapper; use pallet_evm_precompile_dispatch::Dispatch; use pallet_evm_precompile_ed25519::Ed25519Verify; use pallet_evm_precompile_modexp::Modexp; @@ -32,7 +33,6 @@ use pallet_evm_precompile_simple::{ECRecover, ECRecoverPublicKey, Identity, Ripe use pallet_evm_precompile_sr25519::Sr25519Precompile; use pallet_evm_precompile_substrate_ecdsa::SubstrateEcdsaPrecompile; use pallet_evm_precompile_xcm::XcmPrecompile; -use pallet_precompile_dapps_staking::DappsStakingWrapper; use sp_core::H160; use sp_std::fmt::Debug; use sp_std::marker::PhantomData; diff --git a/scripts/make-tags.sh b/scripts/make-tags.sh new file mode 100755 index 0000000000..e13b2dee03 --- /dev/null +++ b/scripts/make-tags.sh @@ -0,0 +1,58 @@ +#!/bin/sh +################################################################################ +################################################################################ +## +## DESCRIPTION +## This script implements tag making rules for Astar Frame repository. Each +## pallet should be tagged according to its version and used polkadot release. +## +## USAGE +## ./script/make-tags.sh [POLKADOT_VERSION] [PALLET_PATH] +## * POLKADOT_VERSION (optional) is version to create tags, if missed +## then current branch name will be used (suits for default branch) +## * PALLET_PATH (optional) is path to precise pallet for making tags +## +################################################################################ +################################################################################ + +function create_tags { + CARGO_TOML_PATH="$2/Cargo.toml" + if [ ! -f "$CARGO_TOML_PATH" ]; then + echo "$CARGO_TOML_PATH does not exist" + exit 1 + fi + + local line key value entry_regex + entry_regex="^[[:blank:]]*([[:alnum:]_-]+)[[:blank:]]*=[[:blank:]]*('[^']+'|\"[^\"]+\"|[^#]+)" + while read -r line; do + [[ -n $line ]] || continue + [[ $line =~ $entry_regex ]] || continue + key=${BASH_REMATCH[1]} + value=${BASH_REMATCH[2]#[\'\"]} # strip quotes + value=${value%[\'\"]} + value=${value%${value##*[![:blank:]]}} # strip trailing spaces + + if [[ "$key" == "name" ]]; then PKG_NAME=$value; fi + if [[ "$key" == "version" ]]; then PKG_VERSION=$value; fi + done < "$CARGO_TOML_PATH" + + PKG_TAG="$PKG_NAME-$PKG_VERSION/$1" + echo -e $PKG_TAG + + git tag $PKG_TAG +} + +if [ -z "$1" ]; then + POLKADOT_VERSION=$(git branch --show-current) +else + POLKADOT_VERSION=$1 +fi + +if [ ! -z "$2" ]; then + create_tags $POLKADOT_VERSION $2 +else + # create tags for all pallets + for PALLET_PATH in $(find ./frame ./precompiles -mindepth 1 -maxdepth 1 -type d 2>/dev/zero); do + create_tags $POLKADOT_VERSION $PALLET_PATH + done +fi diff --git a/tests/integration/Cargo.toml b/tests/integration/Cargo.toml index cd4d39f289..9e291b6295 100644 --- a/tests/integration/Cargo.toml +++ b/tests/integration/Cargo.toml @@ -20,9 +20,9 @@ sp-io = { workspace = true } sp-runtime = { workspace = true } # runtime -astar-runtime = { path = "../../runtime/astar", optional = true } -shibuya-runtime = { path = "../../runtime/shibuya", optional = true } -shiden-runtime = { path = "../../runtime/shiden", optional = true } +astar-runtime = { workspace = true, features = ["std"], optional = true } +shibuya-runtime = { workspace = true, features = ["std"], optional = true } +shiden-runtime = { workspace = true, features = ["std"], optional = true } [features] default = ["std"] diff --git a/vendor/README.md b/vendor/README.md new file mode 100644 index 0000000000..cf93add6c5 --- /dev/null +++ b/vendor/README.md @@ -0,0 +1,31 @@ +# Third party packages + +This directory contatins packages from third-party vendors reused in Astar Network. + +### Why not fork? + +As a way to protect code from **unexpected changes** and **release from third-party project +internal dependencies** a directory used instead of repository fork. + +When porting changes from the external vendor projects into astar repo, +changes will be visible as part of the difference introduced by PR. This ensures we don't +introduce any unintentional changes without being aware of them. + +## Package list + +| Directory | Package name | Origin | +|------------------------------------|--------------------------------|-----------------------------------------------| +| evm-tracing | moonbeam-client-evm-tracing | ${moonbeam}/client/evm-tracing | +| rpc/debug | moonbeam-rpc-debug | ${moonbeam}/client/rpc/debug | +| rpc/trace | moonbeam-rpc-trace | ${moonbeam}/client/rpc/trace | +| rpc/txpool | moonbeam-rpc-txpool | ${moonbeam}/client/rpc/txpool | +| rpc-core/types | moonbeam-rpc-core-types | ${moonbeam}/client/rpc-core/types | +| rpc-core/debug | moonbeam-rpc-core-debug | ${moonbeam}/client/rpc-core/debug | +| rpc-core/trace | moonbeam-rpc-core-trace | ${moonbeam}/client/rpc-core/trace | +| rpc-core/txpool | moonbeam-rpc-core-txpool | ${moonbeam}/client/rpc-core/txpool | +| runtime/evm_tracer | moonbeam-evm-tracer | ${moonbeam}/runtime/evm_tracer | +| runtime/ext | moonbeam-primitives-ext | ${moonbeam}/primitives/ext | +| primitives/evm-tracing-events | evm-tracing-events | ${moonbeam}/primitives/rpc/evm-tracing-events | +| primitives/debug | moonbeam-rpc-primitives-debug | ${moonbeam}/primitives/rpc/debug | +| primitives/txpool | moonbeam-rpc-primitives-txpool | ${moonbeam}/primitives/rpc/txpool | + diff --git a/vendor/evm-tracing/Cargo.toml b/vendor/evm-tracing/Cargo.toml new file mode 100644 index 0000000000..7a2a330bf8 --- /dev/null +++ b/vendor/evm-tracing/Cargo.toml @@ -0,0 +1,22 @@ +[package] +name = "moonbeam-client-evm-tracing" +authors = ["PureStake"] +edition = "2021" +homepage = "https://moonbeam.network" +license = "GPL-3.0-only" +repository = "https://github.com/PureStake/moonbeam/" +version = "0.1.0" + +[dependencies] +ethereum-types = { workspace = true, features = ["std"] } +hex = { workspace = true } +serde = { workspace = true } +serde_json = { workspace = true } + +# Moonbeam +evm-tracing-events = { workspace = true, features = ["std"] } +moonbeam-rpc-primitives-debug = { workspace = true } + +# Substrate +parity-scale-codec = { workspace = true } +sp-std = { workspace = true, features = ["std"] } diff --git a/vendor/evm-tracing/src/formatters/blockscout.rs b/vendor/evm-tracing/src/formatters/blockscout.rs new file mode 100644 index 0000000000..7c10a9dd78 --- /dev/null +++ b/vendor/evm-tracing/src/formatters/blockscout.rs @@ -0,0 +1,92 @@ +// Copyright 2019-2022 PureStake Inc. +// This file is part of Moonbeam. + +// Moonbeam is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Moonbeam is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Moonbeam. If not, see . + +use crate::listeners::call_list::Listener; +use crate::types::serialization::*; +use crate::types::{ + single::{Call, TransactionTrace}, + CallResult, CallType, CreateResult, +}; +use ethereum_types::{H160, U256}; +use parity_scale_codec::{Decode, Encode}; +use serde::Serialize; + +pub struct Formatter; + +impl super::ResponseFormatter for Formatter { + type Listener = Listener; + type Response = TransactionTrace; + + fn format(listener: Listener) -> Option { + if let Some(entry) = listener.entries.last() { + return Some(TransactionTrace::CallList( + entry + .into_iter() + .map(|(_, value)| Call::Blockscout(value.clone())) + .collect(), + )); + } + None + } +} + +#[derive(Clone, Eq, PartialEq, Debug, Encode, Decode, Serialize)] +#[serde(rename_all = "lowercase", tag = "type")] +pub enum BlockscoutCallInner { + Call { + #[serde(rename(serialize = "callType"))] + /// Type of call. + call_type: CallType, + to: H160, + #[serde(serialize_with = "bytes_0x_serialize")] + input: Vec, + /// "output" or "error" field + #[serde(flatten)] + res: CallResult, + }, + Create { + #[serde(serialize_with = "bytes_0x_serialize")] + init: Vec, + #[serde(flatten)] + res: CreateResult, + }, + SelfDestruct { + #[serde(skip)] + balance: U256, + to: H160, + }, +} + +#[derive(Clone, Eq, PartialEq, Debug, Encode, Decode, Serialize)] +#[serde(rename_all = "camelCase")] +pub struct BlockscoutCall { + pub from: H160, + /// Indices of parent calls. + pub trace_address: Vec, + /// Number of children calls. + /// Not needed for Blockscout, but needed for `crate::block` + /// types that are build from this type. + #[serde(skip)] + pub subtraces: u32, + /// Sends funds to the (payable) function + pub value: U256, + /// Remaining gas in the runtime. + pub gas: U256, + /// Gas used by this context. + pub gas_used: U256, + #[serde(flatten)] + pub inner: BlockscoutCallInner, +} diff --git a/vendor/evm-tracing/src/formatters/call_tracer.rs b/vendor/evm-tracing/src/formatters/call_tracer.rs new file mode 100644 index 0000000000..af9ca34971 --- /dev/null +++ b/vendor/evm-tracing/src/formatters/call_tracer.rs @@ -0,0 +1,315 @@ +// Copyright 2019-2022 PureStake Inc. +// This file is part of Moonbeam. + +// Moonbeam is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Moonbeam is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Moonbeam. If not, see . + +use super::blockscout::BlockscoutCallInner; +use crate::types::{ + single::{Call, TransactionTrace}, + CallResult, CallType, CreateResult, +}; + +use crate::listeners::call_list::Listener; + +use crate::types::serialization::*; +use serde::Serialize; + +use ethereum_types::{H160, U256}; +use parity_scale_codec::{Decode, Encode}; +use sp_std::{cmp::Ordering, vec::Vec}; + +pub struct Formatter; + +impl super::ResponseFormatter for Formatter { + type Listener = Listener; + type Response = Vec; + + fn format(mut listener: Listener) -> Option> { + // Remove empty BTreeMaps pushed to `entries`. + // I.e. InvalidNonce or other pallet_evm::runner exits + listener.entries.retain(|x| !x.is_empty()); + let mut traces = Vec::new(); + for entry in listener.entries.iter() { + let mut result: Vec = entry + .into_iter() + .map(|(_, it)| { + let from = it.from; + let trace_address = it.trace_address.clone(); + let value = it.value; + let gas = it.gas; + let gas_used = it.gas_used; + let inner = it.inner.clone(); + Call::CallTracer(CallTracerCall { + from: from, + gas: gas, + gas_used: gas_used, + trace_address: Some(trace_address.clone()), + inner: match inner.clone() { + BlockscoutCallInner::Call { + input, + to, + res, + call_type, + } => CallTracerInner::Call { + call_type: match call_type { + CallType::Call => "CALL".as_bytes().to_vec(), + CallType::CallCode => "CALLCODE".as_bytes().to_vec(), + CallType::DelegateCall => "DELEGATECALL".as_bytes().to_vec(), + CallType::StaticCall => "STATICCALL".as_bytes().to_vec(), + }, + to, + input, + res, + value: Some(value), + }, + BlockscoutCallInner::Create { init, res } => CallTracerInner::Create { + input: init, + error: match res { + CreateResult::Success { .. } => None, + CreateResult::Error { ref error } => Some(error.clone()), + }, + to: match res { + CreateResult::Success { + created_contract_address_hash, + .. + } => Some(created_contract_address_hash), + CreateResult::Error { .. } => None, + }, + output: match res { + CreateResult::Success { + created_contract_code, + .. + } => Some(created_contract_code), + CreateResult::Error { .. } => None, + }, + value: value, + call_type: "CREATE".as_bytes().to_vec(), + }, + BlockscoutCallInner::SelfDestruct { balance, to } => { + CallTracerInner::SelfDestruct { + value: balance, + to, + call_type: "SELFDESTRUCT".as_bytes().to_vec(), + } + } + }, + calls: Vec::new(), + }) + }) + .collect(); + // Geth's `callTracer` expects a tree of nested calls and we have a stack. + // + // We iterate over the sorted stack, and push each children to it's + // parent (the item which's `trace_address` matches &T[0..T.len()-1]) until there + // is a single item on the list. + // + // The last remaining item is the context call with all it's descendants. I.e. + // + // # Input + // [] + // [0] + // [0,0] + // [0,0,0] + // [0,1] + // [0,1,0] + // [0,1,1] + // [0,1,2] + // [1] + // [1,0] + // + // # Sorted + // [0,0,0] -> pop 0 and push to [0,0] + // [0,1,0] -> pop 0 and push to [0,1] + // [0,1,1] -> pop 1 and push to [0,1] + // [0,1,2] -> pop 2 and push to [0,1] + // [0,0] -> pop 0 and push to [0] + // [0,1] -> pop 1 and push to [0] + // [1,0] -> pop 0 and push to [1] + // [0] -> pop 0 and push to root + // [1] -> pop 1 and push to root + // [] + // + // # Result + // root { + // calls: { + // 0 { 0 { 0 }, 1 { 0, 1, 2 }}, + // 1 { 0 }, + // } + // } + if result.len() > 1 { + // Sort the stack. Assume there is no `Ordering::Equal`, as we are + // sorting by index. + // + // We consider an item to be `Ordering::Less` when: + // - Is closer to the root or + // - Is greater than its sibling. + result.sort_by(|a, b| match (a, b) { + ( + Call::CallTracer(CallTracerCall { + trace_address: Some(a), + .. + }), + Call::CallTracer(CallTracerCall { + trace_address: Some(b), + .. + }), + ) => { + let a_len = a.len(); + let b_len = b.len(); + let sibling_greater_than = |a: &Vec, b: &Vec| -> bool { + for (i, a_value) in a.iter().enumerate() { + if a_value > &b[i] { + return true; + } else if a_value < &b[i] { + return false; + } else { + continue; + } + } + return false; + }; + if b_len > a_len || (a_len == b_len && sibling_greater_than(&a, &b)) { + Ordering::Less + } else { + Ordering::Greater + } + } + _ => unreachable!(), + }); + // Stack pop-and-push. + while result.len() > 1 { + let mut last = result + .pop() + .expect("result.len() > 1, so pop() necessarily returns an element"); + // Find the parent index. + if let Some(index) = + result + .iter() + .position(|current| match (last.clone(), current) { + ( + Call::CallTracer(CallTracerCall { + trace_address: Some(a), + .. + }), + Call::CallTracer(CallTracerCall { + trace_address: Some(b), + .. + }), + ) => { + &b[..] + == a.get(0..a.len() - 1).expect( + "non-root element while traversing trace result", + ) + } + _ => unreachable!(), + }) + { + // Remove `trace_address` from result. + if let Call::CallTracer(CallTracerCall { + ref mut trace_address, + .. + }) = last + { + *trace_address = None; + } + // Push the children to parent. + if let Some(Call::CallTracer(CallTracerCall { calls, .. })) = + result.get_mut(index) + { + calls.push(last); + } + } + } + } + // Remove `trace_address` from result. + if let Some(Call::CallTracer(CallTracerCall { trace_address, .. })) = result.get_mut(0) + { + *trace_address = None; + } + if result.len() == 1 { + traces.push(TransactionTrace::CallListNested(result.pop().expect( + "result.len() == 1, so pop() necessarily returns this element", + ))); + } + } + if traces.is_empty() { + return None; + } + return Some(traces); + } +} + +#[derive(Clone, Eq, PartialEq, Debug, Encode, Decode, Serialize)] +#[serde(rename_all = "camelCase")] +pub struct CallTracerCall { + pub from: H160, + + /// Indices of parent calls. Used to build the Etherscan nested response. + #[serde(skip_serializing_if = "Option::is_none")] + pub trace_address: Option>, + + /// Remaining gas in the runtime. + pub gas: U256, + /// Gas used by this context. + pub gas_used: U256, + + #[serde(flatten)] + pub inner: CallTracerInner, + + #[serde(skip_serializing_if = "Vec::is_empty")] + pub calls: Vec, +} + +#[derive(Clone, Eq, PartialEq, Debug, Encode, Decode, Serialize)] +#[serde(untagged)] +pub enum CallTracerInner { + Call { + #[serde(rename = "type", serialize_with = "opcode_serialize")] + call_type: Vec, + to: H160, + #[serde(serialize_with = "bytes_0x_serialize")] + input: Vec, + /// "output" or "error" field + #[serde(flatten)] + res: CallResult, + + #[serde(skip_serializing_if = "Option::is_none")] + value: Option, + }, + Create { + #[serde(rename = "type", serialize_with = "opcode_serialize")] + call_type: Vec, + #[serde(serialize_with = "bytes_0x_serialize")] + input: Vec, + #[serde(skip_serializing_if = "Option::is_none")] + to: Option, + #[serde( + skip_serializing_if = "Option::is_none", + serialize_with = "option_bytes_0x_serialize" + )] + output: Option>, + #[serde( + skip_serializing_if = "Option::is_none", + serialize_with = "option_string_serialize" + )] + error: Option>, + value: U256, + }, + SelfDestruct { + #[serde(rename = "type", serialize_with = "opcode_serialize")] + call_type: Vec, + to: H160, + value: U256, + }, +} diff --git a/vendor/evm-tracing/src/formatters/mod.rs b/vendor/evm-tracing/src/formatters/mod.rs new file mode 100644 index 0000000000..3d991ecea4 --- /dev/null +++ b/vendor/evm-tracing/src/formatters/mod.rs @@ -0,0 +1,35 @@ +// Copyright 2019-2022 PureStake Inc. +// This file is part of Moonbeam. + +// Moonbeam is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Moonbeam is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Moonbeam. If not, see . + +pub mod blockscout; +pub mod call_tracer; +pub mod raw; +pub mod trace_filter; + +pub use blockscout::Formatter as Blockscout; +pub use call_tracer::Formatter as CallTracer; +pub use raw::Formatter as Raw; +pub use trace_filter::Formatter as TraceFilter; + +use evm_tracing_events::Listener; +use serde::Serialize; + +pub trait ResponseFormatter { + type Listener: Listener; + type Response: Serialize; + + fn format(listener: Self::Listener) -> Option; +} diff --git a/vendor/evm-tracing/src/formatters/raw.rs b/vendor/evm-tracing/src/formatters/raw.rs new file mode 100644 index 0000000000..785beeb3ba --- /dev/null +++ b/vendor/evm-tracing/src/formatters/raw.rs @@ -0,0 +1,37 @@ +// Copyright 2019-2022 PureStake Inc. +// This file is part of Moonbeam. + +// Moonbeam is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Moonbeam is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Moonbeam. If not, see . + +use crate::listeners::raw::Listener; +use crate::types::single::TransactionTrace; + +pub struct Formatter; + +impl super::ResponseFormatter for Formatter { + type Listener = Listener; + type Response = TransactionTrace; + + fn format(listener: Listener) -> Option { + if listener.remaining_memory_usage.is_none() { + None + } else { + Some(TransactionTrace::Raw { + step_logs: listener.step_logs, + gas: listener.final_gas.into(), + return_value: listener.return_value, + }) + } + } +} diff --git a/vendor/evm-tracing/src/formatters/trace_filter.rs b/vendor/evm-tracing/src/formatters/trace_filter.rs new file mode 100644 index 0000000000..5c4c432f11 --- /dev/null +++ b/vendor/evm-tracing/src/formatters/trace_filter.rs @@ -0,0 +1,134 @@ +// Copyright 2019-2022 PureStake Inc. +// This file is part of Moonbeam. + +// Moonbeam is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Moonbeam is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Moonbeam. If not, see . + +use super::blockscout::BlockscoutCallInner as CallInner; +use crate::listeners::call_list::Listener; +use crate::types::{ + block::{ + TransactionTrace, TransactionTraceAction, TransactionTraceOutput, TransactionTraceResult, + }, + CallResult, CreateResult, CreateType, +}; +use ethereum_types::H256; + +pub struct Formatter; + +impl super::ResponseFormatter for Formatter { + type Listener = Listener; + type Response = Vec; + + fn format(mut listener: Listener) -> Option> { + // Remove empty BTreeMaps pushed to `entries`. + // I.e. InvalidNonce or other pallet_evm::runner exits + listener.entries.retain(|x| !x.is_empty()); + let mut traces = Vec::new(); + for (eth_tx_index, entry) in listener.entries.iter().enumerate() { + let mut tx_traces: Vec<_> = entry + .into_iter() + .map(|(_, trace)| match trace.inner.clone() { + CallInner::Call { + input, + to, + res, + call_type, + } => TransactionTrace { + action: TransactionTraceAction::Call { + call_type, + from: trace.from, + gas: trace.gas, + input, + to, + value: trace.value, + }, + // Can't be known here, must be inserted upstream. + block_hash: H256::default(), + // Can't be known here, must be inserted upstream. + block_number: 0, + output: match res { + CallResult::Output(output) => { + TransactionTraceOutput::Result(TransactionTraceResult::Call { + gas_used: trace.gas_used, + output, + }) + } + CallResult::Error(error) => TransactionTraceOutput::Error(error), + }, + subtraces: trace.subtraces, + trace_address: trace.trace_address.clone(), + // Can't be known here, must be inserted upstream. + transaction_hash: H256::default(), + transaction_position: eth_tx_index as u32, + }, + CallInner::Create { init, res } => { + TransactionTrace { + action: TransactionTraceAction::Create { + creation_method: CreateType::Create, + from: trace.from, + gas: trace.gas, + init, + value: trace.value, + }, + // Can't be known here, must be inserted upstream. + block_hash: H256::default(), + // Can't be known here, must be inserted upstream. + block_number: 0, + output: match res { + CreateResult::Success { + created_contract_address_hash, + created_contract_code, + } => { + TransactionTraceOutput::Result(TransactionTraceResult::Create { + gas_used: trace.gas_used, + code: created_contract_code, + address: created_contract_address_hash, + }) + } + CreateResult::Error { error } => { + TransactionTraceOutput::Error(error) + } + }, + subtraces: trace.subtraces, + trace_address: trace.trace_address.clone(), + // Can't be known here, must be inserted upstream. + transaction_hash: H256::default(), + transaction_position: eth_tx_index as u32, + } + } + CallInner::SelfDestruct { balance, to } => TransactionTrace { + action: TransactionTraceAction::Suicide { + address: trace.from, + balance, + refund_address: to, + }, + // Can't be known here, must be inserted upstream. + block_hash: H256::default(), + // Can't be known here, must be inserted upstream. + block_number: 0, + output: TransactionTraceOutput::Result(TransactionTraceResult::Suicide), + subtraces: trace.subtraces, + trace_address: trace.trace_address.clone(), + // Can't be known here, must be inserted upstream. + transaction_hash: H256::default(), + transaction_position: eth_tx_index as u32, + }, + }) + .collect(); + + traces.append(&mut tx_traces); + } + Some(traces) + } +} diff --git a/vendor/evm-tracing/src/lib.rs b/vendor/evm-tracing/src/lib.rs new file mode 100644 index 0000000000..1c964b43a0 --- /dev/null +++ b/vendor/evm-tracing/src/lib.rs @@ -0,0 +1,21 @@ +// Copyright 2019-2022 PureStake Inc. +// This file is part of Moonbeam. + +// Moonbeam is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Moonbeam is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Moonbeam. If not, see . + +//! This crate contains the client-side part that interacts with our "v2" tracing design. + +pub mod formatters; +pub mod listeners; +pub mod types; diff --git a/vendor/evm-tracing/src/listeners/call_list.rs b/vendor/evm-tracing/src/listeners/call_list.rs new file mode 100644 index 0000000000..6d60948a4d --- /dev/null +++ b/vendor/evm-tracing/src/listeners/call_list.rs @@ -0,0 +1,1130 @@ +// Copyright 2019-2022 PureStake Inc. +// This file is part of Moonbeam. + +// Moonbeam is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Moonbeam is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Moonbeam. If not, see . + +use crate::formatters::blockscout::BlockscoutCall as Call; +use crate::formatters::blockscout::BlockscoutCallInner as CallInner; +use crate::types::{CallResult, CallType, ContextType, CreateResult}; +use ethereum_types::{H160, U256}; +use evm_tracing_events::{ + runtime::{Capture, ExitError, ExitReason, ExitSucceed}, + Event, EvmEvent, GasometerEvent, Listener as ListenerT, RuntimeEvent, StepEventFilter, +}; +use std::{collections::btree_map::BTreeMap, vec, vec::Vec}; + +/// Enum of the different "modes" of tracer for multiple runtime versions and +/// the kind of EVM events that are emitted. +enum TracingVersion { + /// The first event of the transaction is `EvmEvent::TransactX`. It goes along other events + /// such as `EvmEvent::Exit`. All contexts should have clear start/end boundaries. + EarlyTransact, + /// Older version in which the events above didn't existed. + /// It means that we cannot rely on those events to perform any task, and must rely only + /// on other events. + Legacy, +} + +pub struct Listener { + /// Version of the tracing. + /// Defaults to legacy, and switch to a more modern version if recently added events are + /// received. + version: TracingVersion, + + // Transaction cost that must be added to the first context cost. + transaction_cost: u64, + + // Final logs. + pub entries: Vec>, + // Next index to use. + entries_next_index: u32, + // Stack of contexts with data to keep between events. + context_stack: Vec, + + // Type of the next call. + // By default is None and corresponds to the root call, which + // can be determined using the `is_static` field of the `Call` event. + // Then by looking at call traps events we can set this value to the correct + // call type, to be used when the following `Call` event is received. + call_type: Option, + + /// When `EvmEvent::TransactX` is received it creates its own context. However it will usually + /// be followed by an `EvmEvent::Call/Create` that will also create a context, which must be + /// prevented. It must however not be skipped if `EvmEvent::TransactX` was not received + /// (in legacy mode). + skip_next_context: bool, + + // /// To handle EvmEvent::Exit no emitted by previous runtimes versions, + // /// entries are not inserted directly in `self.entries`. + // pending_entries: Vec<(u32, Call)>, + /// See `RuntimeEvent::StepResult` event explanatioins. + step_result_entry: Option<(u32, Call)>, + + /// When tracing a block `Event::CallListNew` is emitted before each Ethereum transaction is + /// processed. Since we use that event to **finish** the transaction, we must ignore the first + /// one. + call_list_first_transaction: bool, + + /// True if only the `GasometerEvent::RecordTransaction` event has been received. + /// Allow to correctly handle transactions that cannot pay for the tx data in Legacy mode. + record_transaction_event_only: bool, +} + +struct Context { + entries_index: u32, + + context_type: ContextType, + + from: H160, + trace_address: Vec, + subtraces: u32, + value: U256, + + gas: u64, + start_gas: Option, + + // input / data + data: Vec, + // to / create address + to: H160, +} + +impl Default for Listener { + fn default() -> Self { + Self { + version: TracingVersion::Legacy, + transaction_cost: 0, + + entries: vec![], + entries_next_index: 0, + + context_stack: vec![], + + call_type: None, + step_result_entry: None, + skip_next_context: false, + call_list_first_transaction: true, + record_transaction_event_only: false, + } + } +} + +impl Listener { + pub fn using R>(&mut self, f: F) -> R { + evm_tracing_events::using(self, f) + } + + /// Called at the end of each transaction when tracing. + /// Allow to insert the pending entries regardless of which runtime version + /// is used (with or without EvmEvent::Exit). + pub fn finish_transaction(&mut self) { + // remove any leftover context + let mut context_stack = vec![]; + core::mem::swap(&mut self.context_stack, &mut context_stack); + + // if there is a left over there have been an early exit. + // we generate an entry from it and discord any inner context. + if let Some(context) = context_stack.into_iter().next() { + let mut gas_used = context.start_gas.unwrap_or(0) - context.gas; + if context.entries_index == 0 { + gas_used += self.transaction_cost; + } + + let entry = match context.context_type { + ContextType::Call(call_type) => { + let res = CallResult::Error( + b"early exit (out of gas, stack overflow, direct call to precompile, ...)" + .to_vec(), + ); + Call { + from: context.from, + trace_address: context.trace_address, + subtraces: context.subtraces, + value: context.value, + gas: context.gas.into(), + gas_used: gas_used.into(), + inner: CallInner::Call { + call_type, + to: context.to, + input: context.data, + res, + }, + } + } + ContextType::Create => { + let res = CreateResult::Error { + error: b"early exit (out of gas, stack overflow, direct call to precompile, ...)".to_vec(), + }; + + Call { + value: context.value, + trace_address: context.trace_address, + subtraces: context.subtraces, + gas: context.gas.into(), + gas_used: gas_used.into(), + from: context.from, + inner: CallInner::Create { + init: context.data, + res, + }, + } + } + }; + + self.insert_entry(context.entries_index, entry); + // Since only this context/entry is kept, we need update entries_next_index too. + self.entries_next_index = context.entries_index + 1; + } + // However if the transaction had a too low gas limit to pay for the data cost itself, + // and `EvmEvent::Exit` is not emitted in **Legacy mode**, then it has never produced any + // context (and exited **early in the transaction**). + else if self.record_transaction_event_only { + let res = CallResult::Error( + b"transaction could not pay its own data cost (impossible to gather more info)" + .to_vec(), + ); + + let entry = Call { + from: H160::repeat_byte(0), + trace_address: vec![], + subtraces: 0, + value: 0.into(), + gas: 0.into(), + gas_used: 0.into(), + inner: CallInner::Call { + call_type: CallType::Call, + to: H160::repeat_byte(0), + input: vec![], + res, + }, + }; + + self.insert_entry(self.entries_next_index, entry); + self.entries_next_index += 1; + } + } + + pub fn gasometer_event(&mut self, event: GasometerEvent) { + match event { + GasometerEvent::RecordCost { snapshot, .. } + | GasometerEvent::RecordDynamicCost { snapshot, .. } + | GasometerEvent::RecordStipend { snapshot, .. } => { + if let Some(context) = self.context_stack.last_mut() { + if context.start_gas.is_none() { + context.start_gas = Some(snapshot.gas()); + } + context.gas = snapshot.gas(); + } + } + GasometerEvent::RecordTransaction { cost, .. } => { + self.transaction_cost = cost; + self.record_transaction_event_only = true; + } + // We ignore other kinds of message if any (new ones may be added in the future). + #[allow(unreachable_patterns)] + _ => (), + } + } + + pub fn runtime_event(&mut self, event: RuntimeEvent) { + match event { + RuntimeEvent::StepResult { + result: Err(Capture::Trap(opcode)), + .. + } => { + if let Some(ContextType::Call(call_type)) = ContextType::from(opcode) { + self.call_type = Some(call_type) + } + } + RuntimeEvent::StepResult { + result: Err(Capture::Exit(reason)), + return_value, + } => { + if let Some((key, entry)) = self.pop_context_to_entry(reason, return_value) { + match self.version { + TracingVersion::Legacy => { + // In Legacy mode we directly insert the entry. + self.insert_entry(key, entry); + } + TracingVersion::EarlyTransact => { + // In EarlyTransact mode this context must be used if this event is + // emitted. However the context of `EvmEvent::Exit` must be used if + // `StepResult` is skipped. For that reason we store this generated + // entry in a temporary value, and deal with it in `EvmEvent::Exit` that + // will be called in all cases. + self.step_result_entry = Some((key, entry)); + } + } + } + } + // We ignore other kinds of message if any (new ones may be added in the future). + #[allow(unreachable_patterns)] + _ => (), + } + } + + pub fn evm_event(&mut self, event: EvmEvent) { + match event { + EvmEvent::TransactCall { + caller, + address, + value, + data, + .. + } => { + self.record_transaction_event_only = false; + self.version = TracingVersion::EarlyTransact; + self.context_stack.push(Context { + entries_index: self.entries_next_index, + + context_type: ContextType::Call(CallType::Call), + + from: caller, + trace_address: vec![], + subtraces: 0, + value, + + gas: 0, + start_gas: None, + + data, + to: address, + }); + + self.entries_next_index += 1; + self.skip_next_context = true; + } + + EvmEvent::TransactCreate { + caller, + value, + init_code, + address, + .. + } => { + self.record_transaction_event_only = false; + self.version = TracingVersion::EarlyTransact; + self.context_stack.push(Context { + entries_index: self.entries_next_index, + + context_type: ContextType::Create, + + from: caller, + trace_address: vec![], + subtraces: 0, + value, + + gas: 0, + start_gas: None, + + data: init_code, + to: address, + }); + + self.entries_next_index += 1; + self.skip_next_context = true; + } + + EvmEvent::TransactCreate2 { + caller, + value, + init_code, + address, + .. + } => { + self.record_transaction_event_only = false; + self.version = TracingVersion::EarlyTransact; + self.context_stack.push(Context { + entries_index: self.entries_next_index, + + context_type: ContextType::Create, + + from: caller, + trace_address: vec![], + subtraces: 0, + value, + + gas: 0, + start_gas: None, + + data: init_code, + to: address, + }); + + self.entries_next_index += 1; + self.skip_next_context = true; + } + + EvmEvent::Call { + code_address, + input, + is_static, + context, + .. + } => { + self.record_transaction_event_only = false; + + let call_type = match (self.call_type, is_static) { + (None, true) => CallType::StaticCall, + (None, false) => CallType::Call, + (Some(call_type), _) => call_type, + }; + + if !self.skip_next_context { + let trace_address = if let Some(context) = self.context_stack.last_mut() { + let mut trace_address = context.trace_address.clone(); + trace_address.push(context.subtraces); + context.subtraces += 1; + trace_address + } else { + vec![] + }; + + // For subcalls we want to have "from" always be the parent context address + // instead of `context.caller`, since the latter will not have the correct + // value inside a DelegateCall. + let from = if let Some(parent_context) = self.context_stack.last() { + parent_context.to.clone() + } else { + context.caller + }; + + self.context_stack.push(Context { + entries_index: self.entries_next_index, + + context_type: ContextType::Call(call_type), + + from, + trace_address, + subtraces: 0, + value: context.apparent_value, + + gas: 0, + start_gas: None, + + data: input.to_vec(), + to: code_address, + }); + + self.entries_next_index += 1; + } else { + self.skip_next_context = false; + } + } + + EvmEvent::Create { + caller, + address, + // scheme, + value, + init_code, + .. + } => { + self.record_transaction_event_only = false; + + if !self.skip_next_context { + let trace_address = if let Some(context) = self.context_stack.last_mut() { + let mut trace_address = context.trace_address.clone(); + trace_address.push(context.subtraces); + context.subtraces += 1; + trace_address + } else { + vec![] + }; + + self.context_stack.push(Context { + entries_index: self.entries_next_index, + + context_type: ContextType::Create, + + from: caller, + trace_address, + subtraces: 0, + value, + + gas: 0, + start_gas: None, + + data: init_code.to_vec(), + to: address, + }); + + self.entries_next_index += 1; + } else { + self.skip_next_context = false; + } + } + EvmEvent::Suicide { + address, + target, + balance, + } => { + let trace_address = if let Some(context) = self.context_stack.last_mut() { + let mut trace_address = context.trace_address.clone(); + trace_address.push(context.subtraces); + context.subtraces += 1; + trace_address + } else { + vec![] + }; + + self.insert_entry( + self.entries_next_index, + Call { + from: address, // this contract is self destructing + trace_address, + subtraces: 0, + value: 0.into(), + gas: 0.into(), + gas_used: 0.into(), + inner: CallInner::SelfDestruct { + to: target, + balance, + }, + }, + ); + self.entries_next_index += 1; + } + EvmEvent::Exit { + reason, + return_value, + } => { + // We know we're in `TracingVersion::EarlyTransact` mode. + + self.record_transaction_event_only = false; + + let entry = self + .step_result_entry + .take() + .or_else(|| self.pop_context_to_entry(reason, return_value)); + + if let Some((key, entry)) = entry { + self.insert_entry(key, entry); + } + } + EvmEvent::PrecompileSubcall { .. } => { + // In a precompile subcall there is no CALL opcode result to observe, thus + // we need this new event. Precompile subcall might use non-standard call + // behavior (like batch precompile does) thus we simply consider this a call. + self.call_type = Some(CallType::Call); + } + + // We ignore other kinds of message if any (new ones may be added in the future). + #[allow(unreachable_patterns)] + _ => (), + } + } + + fn insert_entry(&mut self, key: u32, entry: Call) { + if let Some(ref mut last) = self.entries.last_mut() { + last.insert(key, entry); + } else { + let mut btree_map = BTreeMap::new(); + btree_map.insert(key, entry); + self.entries.push(btree_map); + } + } + + fn pop_context_to_entry( + &mut self, + reason: ExitReason, + return_value: Vec, + ) -> Option<(u32, Call)> { + if let Some(context) = self.context_stack.pop() { + let mut gas_used = context.start_gas.unwrap_or(0) - context.gas; + if context.entries_index == 0 { + gas_used += self.transaction_cost; + } + + Some(( + context.entries_index, + match context.context_type { + ContextType::Call(call_type) => { + let res = match &reason { + ExitReason::Succeed(ExitSucceed::Returned) => { + CallResult::Output(return_value.to_vec()) + } + ExitReason::Succeed(_) => CallResult::Output(vec![]), + ExitReason::Error(error) => CallResult::Error(error_message(error)), + + ExitReason::Revert(_) => { + CallResult::Error(b"execution reverted".to_vec()) + } + ExitReason::Fatal(_) => CallResult::Error(vec![]), + }; + + Call { + from: context.from, + trace_address: context.trace_address, + subtraces: context.subtraces, + value: context.value, + gas: context.gas.into(), + gas_used: gas_used.into(), + inner: CallInner::Call { + call_type, + to: context.to, + input: context.data, + res, + }, + } + } + ContextType::Create => { + let res = match &reason { + ExitReason::Succeed(_) => CreateResult::Success { + created_contract_address_hash: context.to, + created_contract_code: return_value.to_vec(), + }, + ExitReason::Error(error) => CreateResult::Error { + error: error_message(error), + }, + ExitReason::Revert(_) => CreateResult::Error { + error: b"execution reverted".to_vec(), + }, + ExitReason::Fatal(_) => CreateResult::Error { error: vec![] }, + }; + + Call { + value: context.value, + trace_address: context.trace_address, + subtraces: context.subtraces, + gas: context.gas.into(), + gas_used: gas_used.into(), + from: context.from, + inner: CallInner::Create { + init: context.data, + res, + }, + } + } + }, + )) + } else { + None + } + } +} + +fn error_message(error: &ExitError) -> Vec { + match error { + ExitError::StackUnderflow => "stack underflow", + ExitError::StackOverflow => "stack overflow", + ExitError::InvalidJump => "invalid jump", + ExitError::InvalidRange => "invalid range", + ExitError::DesignatedInvalid => "designated invalid", + ExitError::CallTooDeep => "call too deep", + ExitError::CreateCollision => "create collision", + ExitError::CreateContractLimit => "create contract limit", + ExitError::OutOfOffset => "out of offset", + ExitError::OutOfGas => "out of gas", + ExitError::OutOfFund => "out of funds", + ExitError::Other(err) => err, + _ => "unexpected error", + } + .as_bytes() + .to_vec() +} + +impl ListenerT for Listener { + fn event(&mut self, event: Event) { + match event { + Event::Gasometer(gasometer_event) => self.gasometer_event(gasometer_event), + Event::Runtime(runtime_event) => self.runtime_event(runtime_event), + Event::Evm(evm_event) => self.evm_event(evm_event), + Event::CallListNew() => { + if !self.call_list_first_transaction { + self.finish_transaction(); + self.skip_next_context = false; + self.entries.push(BTreeMap::new()); + } else { + self.call_list_first_transaction = false; + } + } + }; + } + + fn step_event_filter(&self) -> StepEventFilter { + StepEventFilter { + enable_memory: false, + enable_stack: false, + } + } +} + +#[cfg(test)] +#[allow(unused)] +mod tests { + use super::*; + use ethereum_types::H256; + use evm_tracing_events::{ + evm::CreateScheme, + gasometer::Snapshot, + runtime::{Memory, Stack}, + Context as EvmContext, + }; + + enum TestEvmEvent { + Call, + Create, + Suicide, + Exit, + TransactCall, + TransactCreate, + TransactCreate2, + } + + enum TestRuntimeEvent { + Step, + StepResult, + SLoad, + SStore, + } + + enum TestGasometerEvent { + RecordCost, + RecordRefund, + RecordStipend, + RecordDynamicCost, + RecordTransaction, + } + + fn test_context() -> EvmContext { + EvmContext { + address: H160::default(), + caller: H160::default(), + apparent_value: U256::zero(), + } + } + + fn test_create_scheme() -> CreateScheme { + CreateScheme::Legacy { + caller: H160::default(), + } + } + + fn test_stack() -> Option { + None + } + + fn test_memory() -> Option { + None + } + + fn test_snapshot() -> Snapshot { + Snapshot { + gas_limit: 0u64, + memory_gas: 0u64, + used_gas: 0u64, + refunded_gas: 0i64, + } + } + + fn test_emit_evm_event( + event_type: TestEvmEvent, + is_static: bool, + exit_reason: Option, + ) -> EvmEvent { + match event_type { + TestEvmEvent::Call => EvmEvent::Call { + code_address: H160::default(), + transfer: None, + input: Vec::new(), + target_gas: None, + is_static, + context: test_context(), + }, + TestEvmEvent::Create => EvmEvent::Create { + caller: H160::default(), + address: H160::default(), + scheme: test_create_scheme(), + value: U256::zero(), + init_code: Vec::new(), + target_gas: None, + }, + TestEvmEvent::Suicide => EvmEvent::Suicide { + address: H160::default(), + target: H160::default(), + balance: U256::zero(), + }, + TestEvmEvent::Exit => EvmEvent::Exit { + reason: exit_reason.unwrap(), + return_value: Vec::new(), + }, + TestEvmEvent::TransactCall => EvmEvent::TransactCall { + caller: H160::default(), + address: H160::default(), + value: U256::zero(), + data: Vec::new(), + gas_limit: 0u64, + }, + TestEvmEvent::TransactCreate => EvmEvent::TransactCreate { + caller: H160::default(), + value: U256::zero(), + init_code: Vec::new(), + gas_limit: 0u64, + address: H160::default(), + }, + TestEvmEvent::TransactCreate2 => EvmEvent::TransactCreate2 { + caller: H160::default(), + value: U256::zero(), + init_code: Vec::new(), + salt: H256::default(), + gas_limit: 0u64, + address: H160::default(), + }, + } + } + + fn test_emit_runtime_event(event_type: TestRuntimeEvent) -> RuntimeEvent { + match event_type { + TestRuntimeEvent::Step => RuntimeEvent::Step { + context: test_context(), + opcode: Vec::new(), + position: Ok(0u64), + stack: test_stack(), + memory: test_memory(), + }, + TestRuntimeEvent::StepResult => RuntimeEvent::StepResult { + result: Ok(()), + return_value: Vec::new(), + }, + TestRuntimeEvent::SLoad => RuntimeEvent::SLoad { + address: H160::default(), + index: H256::default(), + value: H256::default(), + }, + TestRuntimeEvent::SStore => RuntimeEvent::SStore { + address: H160::default(), + index: H256::default(), + value: H256::default(), + }, + } + } + + fn test_emit_gasometer_event(event_type: TestGasometerEvent) -> GasometerEvent { + match event_type { + TestGasometerEvent::RecordCost => GasometerEvent::RecordCost { + cost: 0u64, + snapshot: test_snapshot(), + }, + TestGasometerEvent::RecordRefund => GasometerEvent::RecordRefund { + refund: 0i64, + snapshot: test_snapshot(), + }, + TestGasometerEvent::RecordStipend => GasometerEvent::RecordStipend { + stipend: 0u64, + snapshot: test_snapshot(), + }, + TestGasometerEvent::RecordDynamicCost => GasometerEvent::RecordDynamicCost { + gas_cost: 0u64, + memory_gas: 0u64, + gas_refund: 0i64, + snapshot: test_snapshot(), + }, + TestGasometerEvent::RecordTransaction => GasometerEvent::RecordTransaction { + cost: 0u64, + snapshot: test_snapshot(), + }, + } + } + + fn do_transact_call_event(listener: &mut Listener) { + listener.evm_event(test_emit_evm_event(TestEvmEvent::TransactCall, false, None)); + } + + fn do_transact_create_event(listener: &mut Listener) { + listener.evm_event(test_emit_evm_event( + TestEvmEvent::TransactCreate, + false, + None, + )); + } + + fn do_gasometer_event(listener: &mut Listener) { + listener.gasometer_event(test_emit_gasometer_event( + TestGasometerEvent::RecordTransaction, + )); + } + + fn do_exit_event(listener: &mut Listener) { + listener.evm_event(test_emit_evm_event( + TestEvmEvent::Exit, + false, + Some(ExitReason::Error(ExitError::OutOfGas)), + )); + } + + fn do_evm_call_event(listener: &mut Listener) { + listener.evm_event(test_emit_evm_event(TestEvmEvent::Call, false, None)); + } + + fn do_evm_create_event(listener: &mut Listener) { + listener.evm_event(test_emit_evm_event(TestEvmEvent::Create, false, None)); + } + + fn do_evm_suicide_event(listener: &mut Listener) { + listener.evm_event(test_emit_evm_event(TestEvmEvent::Suicide, false, None)); + } + + fn do_runtime_step_event(listener: &mut Listener) { + listener.runtime_event(test_emit_runtime_event(TestRuntimeEvent::Step)); + } + + fn do_runtime_step_result_event(listener: &mut Listener) { + listener.runtime_event(test_emit_runtime_event(TestRuntimeEvent::StepResult)); + } + + // Call context + + // Early exit on TransactionCost. + #[test] + fn call_early_exit_tx_cost() { + let mut listener = Listener::default(); + do_transact_call_event(&mut listener); + do_gasometer_event(&mut listener); + do_exit_event(&mut listener); + listener.finish_transaction(); + assert_eq!(listener.entries.len(), 1); + assert_eq!(listener.entries[0].len(), 1); + } + + // Early exit somewhere between the first callstack event and stepping the bytecode. + // I.e. precompile call. + #[test] + fn call_early_exit_before_runtime() { + let mut listener = Listener::default(); + do_transact_call_event(&mut listener); + do_gasometer_event(&mut listener); + do_evm_call_event(&mut listener); + do_exit_event(&mut listener); + listener.finish_transaction(); + assert_eq!(listener.entries.len(), 1); + assert_eq!(listener.entries[0].len(), 1); + } + + // Exit after Step without StepResult. + #[test] + fn call_step_without_step_result() { + let mut listener = Listener::default(); + do_transact_call_event(&mut listener); + do_gasometer_event(&mut listener); + do_evm_call_event(&mut listener); + do_runtime_step_event(&mut listener); + do_exit_event(&mut listener); + listener.finish_transaction(); + assert_eq!(listener.entries.len(), 1); + assert_eq!(listener.entries[0].len(), 1); + } + + // Exit after StepResult. + #[test] + fn call_step_result() { + let mut listener = Listener::default(); + do_transact_call_event(&mut listener); + do_gasometer_event(&mut listener); + do_evm_call_event(&mut listener); + do_runtime_step_event(&mut listener); + do_runtime_step_result_event(&mut listener); + do_exit_event(&mut listener); + listener.finish_transaction(); + assert_eq!(listener.entries.len(), 1); + assert_eq!(listener.entries[0].len(), 1); + } + + // Suicide. + #[test] + fn call_suicide() { + let mut listener = Listener::default(); + do_transact_call_event(&mut listener); + do_gasometer_event(&mut listener); + do_evm_call_event(&mut listener); + do_runtime_step_event(&mut listener); + do_evm_suicide_event(&mut listener); + do_exit_event(&mut listener); + listener.finish_transaction(); + assert_eq!(listener.entries.len(), 1); + assert_eq!(listener.entries[0].len(), 2); + } + + // Create context + + // Early exit on TransactionCost. + #[test] + fn create_early_exit_tx_cost() { + let mut listener = Listener::default(); + do_transact_create_event(&mut listener); + do_gasometer_event(&mut listener); + do_exit_event(&mut listener); + listener.finish_transaction(); + assert_eq!(listener.entries.len(), 1); + assert_eq!(listener.entries[0].len(), 1); + } + + // Early exit somewhere between the first callstack event and stepping the bytecode + // I.e. precompile call.. + #[test] + fn create_early_exit_before_runtime() { + let mut listener = Listener::default(); + do_transact_create_event(&mut listener); + do_gasometer_event(&mut listener); + do_evm_create_event(&mut listener); + do_exit_event(&mut listener); + listener.finish_transaction(); + assert_eq!(listener.entries.len(), 1); + assert_eq!(listener.entries[0].len(), 1); + } + + // Exit after Step without StepResult. + #[test] + fn create_step_without_step_result() { + let mut listener = Listener::default(); + do_transact_create_event(&mut listener); + do_gasometer_event(&mut listener); + do_evm_create_event(&mut listener); + do_runtime_step_event(&mut listener); + do_exit_event(&mut listener); + listener.finish_transaction(); + assert_eq!(listener.entries.len(), 1); + assert_eq!(listener.entries[0].len(), 1); + } + + // Exit after StepResult. + #[test] + fn create_step_result() { + let mut listener = Listener::default(); + do_transact_create_event(&mut listener); + do_gasometer_event(&mut listener); + do_evm_create_event(&mut listener); + do_runtime_step_event(&mut listener); + do_runtime_step_result_event(&mut listener); + do_exit_event(&mut listener); + listener.finish_transaction(); + assert_eq!(listener.entries.len(), 1); + assert_eq!(listener.entries[0].len(), 1); + } + + // Call Context Nested + + // Nested call early exit before stepping. + #[test] + fn nested_call_early_exit_before_runtime() { + let mut listener = Listener::default(); + // Main + do_transact_call_event(&mut listener); + do_gasometer_event(&mut listener); + do_evm_call_event(&mut listener); + do_runtime_step_event(&mut listener); + do_runtime_step_result_event(&mut listener); + // Nested + do_evm_call_event(&mut listener); + do_exit_event(&mut listener); + // Main exit + do_exit_event(&mut listener); + listener.finish_transaction(); + assert_eq!(listener.entries.len(), 1); + assert_eq!(listener.entries[0].len(), 2); + } + + // Nested exit before step result. + #[test] + fn nested_call_without_step_result() { + let mut listener = Listener::default(); + // Main + do_transact_call_event(&mut listener); + do_gasometer_event(&mut listener); + do_evm_call_event(&mut listener); + do_runtime_step_event(&mut listener); + do_runtime_step_result_event(&mut listener); + // Nested + do_evm_call_event(&mut listener); + do_runtime_step_event(&mut listener); + do_exit_event(&mut listener); + // Main exit + do_exit_event(&mut listener); + listener.finish_transaction(); + assert_eq!(listener.entries.len(), 1); + assert_eq!(listener.entries[0].len(), 2); + } + + // Nested exit. + #[test] + fn nested_call_step_result() { + let depth = 5; + let mut listener = Listener::default(); + // Main + do_transact_call_event(&mut listener); + do_gasometer_event(&mut listener); + do_evm_call_event(&mut listener); + do_runtime_step_event(&mut listener); + do_runtime_step_result_event(&mut listener); + // 5 nested calls + for d in 0..depth { + do_evm_call_event(&mut listener); + do_runtime_step_event(&mut listener); + do_runtime_step_result_event(&mut listener); + do_exit_event(&mut listener); + } + // Main exit + do_exit_event(&mut listener); + listener.finish_transaction(); + assert_eq!(listener.entries.len(), 1); + assert_eq!(listener.entries[0].len(), depth + 1); + } + + // Call + Create mixed subnesting. + + #[test] + fn subnested_call_and_create_mixbag() { + let depth = 5; + let subdepth = 10; + let mut listener = Listener::default(); + // Main + do_transact_call_event(&mut listener); + do_gasometer_event(&mut listener); + do_evm_call_event(&mut listener); + do_runtime_step_event(&mut listener); + do_runtime_step_result_event(&mut listener); + // 5 nested call/creates, each with 10 nested call/creates + for d in 0..depth { + if d % 2 == 0 { + do_evm_call_event(&mut listener); + } else { + do_evm_create_event(&mut listener); + } + do_runtime_step_event(&mut listener); + do_runtime_step_result_event(&mut listener); + for s in 0..subdepth { + // Some mixed call/create and early exits. + if s % 2 == 0 { + do_evm_call_event(&mut listener); + } else { + do_evm_create_event(&mut listener); + } + if s % 3 == 0 { + do_runtime_step_event(&mut listener); + do_runtime_step_result_event(&mut listener); + } + do_exit_event(&mut listener); + } + // Nested exit + do_exit_event(&mut listener); + } + // Main exit + do_exit_event(&mut listener); + listener.finish_transaction(); + assert_eq!(listener.entries.len(), 1); + // Each nested call contains 11 elements in the callstack (main + 10 subcalls). + // There are 5 main nested calls for a total of 56 elements in the callstack: 1 main + 55 nested. + assert_eq!(listener.entries[0].len(), (depth * (subdepth + 1)) + 1); + } +} diff --git a/vendor/evm-tracing/src/listeners/mod.rs b/vendor/evm-tracing/src/listeners/mod.rs new file mode 100644 index 0000000000..5049d5f583 --- /dev/null +++ b/vendor/evm-tracing/src/listeners/mod.rs @@ -0,0 +1,21 @@ +// Copyright 2019-2022 PureStake Inc. +// This file is part of Moonbeam. + +// Moonbeam is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Moonbeam is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Moonbeam. If not, see . + +pub mod call_list; +pub mod raw; + +pub use call_list::Listener as CallList; +pub use raw::Listener as Raw; diff --git a/vendor/evm-tracing/src/listeners/raw.rs b/vendor/evm-tracing/src/listeners/raw.rs new file mode 100644 index 0000000000..1e14200f2c --- /dev/null +++ b/vendor/evm-tracing/src/listeners/raw.rs @@ -0,0 +1,339 @@ +// Copyright 2019-2022 PureStake Inc. +// This file is part of Moonbeam. + +// Moonbeam is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Moonbeam is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Moonbeam. If not, see . + +use ethereum_types::{H160, H256}; +use std::{collections::btree_map::BTreeMap, vec, vec::Vec}; + +use crate::types::{convert_memory, single::RawStepLog, ContextType}; +use evm_tracing_events::{ + runtime::{Capture, ExitReason}, + Event, GasometerEvent, Listener as ListenerT, RuntimeEvent, StepEventFilter, +}; + +#[derive(Debug)] +pub struct Listener { + disable_storage: bool, + disable_memory: bool, + disable_stack: bool, + + new_context: bool, + context_stack: Vec, + + pub step_logs: Vec, + pub return_value: Vec, + pub final_gas: u64, + pub remaining_memory_usage: Option, +} + +#[derive(Debug)] +struct Context { + storage_cache: BTreeMap, + address: H160, + current_step: Option, + global_storage_changes: BTreeMap>, +} + +#[derive(Debug)] +struct Step { + /// Current opcode. + opcode: Vec, + /// Depth of the context. + depth: usize, + /// Remaining gas. + gas: u64, + /// Gas cost of the following opcode. + gas_cost: u64, + /// Program counter position. + position: usize, + /// EVM memory copy (if not disabled). + memory: Option>, + /// EVM stack copy (if not disabled). + stack: Option>, +} + +impl Listener { + pub fn new( + disable_storage: bool, + disable_memory: bool, + disable_stack: bool, + raw_max_memory_usage: usize, + ) -> Self { + Self { + disable_storage, + disable_memory, + disable_stack, + remaining_memory_usage: Some(raw_max_memory_usage), + + step_logs: vec![], + return_value: vec![], + final_gas: 0, + + new_context: false, + context_stack: vec![], + } + } + + pub fn using R>(&mut self, f: F) -> R { + evm_tracing_events::using(self, f) + } + + pub fn gasometer_event(&mut self, event: GasometerEvent) { + match event { + GasometerEvent::RecordTransaction { cost, .. } => { + // First event of a transaction. + // Next step will be the first context. + self.new_context = true; + self.final_gas = cost; + } + GasometerEvent::RecordCost { cost, snapshot } => { + if let Some(context) = self.context_stack.last_mut() { + // Register opcode cost. (ignore costs not between Step and StepResult) + if let Some(step) = &mut context.current_step { + step.gas = snapshot.gas(); + step.gas_cost = cost; + } + + self.final_gas = snapshot.used_gas; + } + } + GasometerEvent::RecordDynamicCost { + gas_cost, snapshot, .. + } => { + if let Some(context) = self.context_stack.last_mut() { + // Register opcode cost. (ignore costs not between Step and StepResult) + if let Some(step) = &mut context.current_step { + step.gas = snapshot.gas(); + step.gas_cost = gas_cost; + } + + self.final_gas = snapshot.used_gas; + } + } + // We ignore other kinds of message if any (new ones may be added in the future). + #[allow(unreachable_patterns)] + _ => (), + } + } + + pub fn runtime_event(&mut self, event: RuntimeEvent) { + match event { + RuntimeEvent::Step { + context, + opcode, + position, + stack, + memory, + } => { + // Create a context if needed. + if self.new_context { + self.new_context = false; + + self.context_stack.push(Context { + storage_cache: BTreeMap::new(), + address: context.address, + current_step: None, + global_storage_changes: BTreeMap::new(), + }); + } + + let depth = self.context_stack.len(); + + // Ignore steps outside of any context (shouldn't even be possible). + if let Some(context) = self.context_stack.last_mut() { + context.current_step = Some(Step { + opcode, + depth, + gas: 0, // 0 for now, will add with gas events + gas_cost: 0, // 0 for now, will add with gas events + position: *position.as_ref().unwrap_or(&0) as usize, + memory: if self.disable_memory { + None + } else { + let memory = memory.expect("memory data to not be filtered out"); + + self.remaining_memory_usage = self + .remaining_memory_usage + .and_then(|inner| inner.checked_sub(memory.data.len())); + + if self.remaining_memory_usage.is_none() { + return; + } + + Some(memory.data.clone()) + }, + stack: if self.disable_stack { + None + } else { + let stack = stack.expect("stack data to not be filtered out"); + + self.remaining_memory_usage = self + .remaining_memory_usage + .and_then(|inner| inner.checked_sub(stack.data.len())); + + if self.remaining_memory_usage.is_none() { + return; + } + + Some(stack.data.clone()) + }, + }); + } + } + RuntimeEvent::StepResult { + result, + return_value, + } => { + // StepResult is expected to be emited after a step (in a context). + // Only case StepResult will occur without a Step before is in a transfer + // transaction to a non-contract address. However it will not contain any + // steps and return an empty trace, so we can ignore this edge case. + if let Some(context) = self.context_stack.last_mut() { + if let Some(current_step) = context.current_step.take() { + let Step { + opcode, + depth, + gas, + gas_cost, + position, + memory, + stack, + } = current_step; + + let memory = memory.map(convert_memory); + + let storage = if self.disable_storage { + None + } else { + self.remaining_memory_usage = + self.remaining_memory_usage.and_then(|inner| { + inner.checked_sub(context.storage_cache.len() * 64) + }); + + if self.remaining_memory_usage.is_none() { + return; + } + + Some(context.storage_cache.clone()) + }; + + self.step_logs.push(RawStepLog { + depth: depth.into(), + gas: gas.into(), + gas_cost: gas_cost.into(), + memory, + op: opcode, + pc: position.into(), + stack, + storage, + }); + } + } + + // We match on the capture to handle traps/exits. + match result { + Err(Capture::Exit(reason)) => { + // Exit = we exit the context (should always be some) + if let Some(mut context) = self.context_stack.pop() { + // If final context is exited, we store gas and return value. + if self.context_stack.is_empty() { + self.return_value = return_value.to_vec(); + } + + // If the context exited without revert we must keep track of the + // updated storage keys. + if !self.disable_storage && matches!(reason, ExitReason::Succeed(_)) { + if let Some(parent_context) = self.context_stack.last_mut() { + // Add cache to storage changes. + context + .global_storage_changes + .insert(context.address, context.storage_cache); + + // Apply storage changes to parent, either updating its cache or map of changes. + for (address, mut storage) in + context.global_storage_changes.into_iter() + { + // Same address => We update its cache (only tracked keys) + if parent_context.address == address { + for (cached_key, cached_value) in + parent_context.storage_cache.iter_mut() + { + if let Some(value) = storage.remove(cached_key) { + *cached_value = value; + } + } + } + // Otherwise, update the storage changes. + else { + parent_context + .global_storage_changes + .entry(address) + .or_insert_with(BTreeMap::new) + .append(&mut storage); + } + } + } + } + } + } + Err(Capture::Trap(opcode)) if ContextType::from(opcode.clone()).is_some() => { + self.new_context = true; + } + _ => (), + } + } + RuntimeEvent::SLoad { + address: _, + index, + value, + } + | RuntimeEvent::SStore { + address: _, + index, + value, + } => { + if let Some(context) = self.context_stack.last_mut() { + if !self.disable_storage { + context.storage_cache.insert(index, value); + } + } + } + // We ignore other kinds of message if any (new ones may be added in the future). + #[allow(unreachable_patterns)] + _ => (), + } + } +} + +impl ListenerT for Listener { + fn event(&mut self, event: Event) { + if self.remaining_memory_usage.is_none() { + return; + } + + match event { + Event::Gasometer(e) => self.gasometer_event(e), + Event::Runtime(e) => self.runtime_event(e), + _ => {} + }; + } + + fn step_event_filter(&self) -> StepEventFilter { + StepEventFilter { + enable_memory: !self.disable_memory, + enable_stack: !self.disable_stack, + } + } +} diff --git a/vendor/evm-tracing/src/types/block.rs b/vendor/evm-tracing/src/types/block.rs new file mode 100644 index 0000000000..4925aaf867 --- /dev/null +++ b/vendor/evm-tracing/src/types/block.rs @@ -0,0 +1,97 @@ +// Copyright 2019-2022 PureStake Inc. +// This file is part of Moonbeam. + +// Moonbeam is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Moonbeam is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Moonbeam. If not, see . + +//! Types for tracing all Ethereum transactions of a block. + +use super::serialization::*; +use serde::Serialize; + +use ethereum_types::{H160, H256, U256}; +use parity_scale_codec::{Decode, Encode}; +use sp_std::vec::Vec; + +#[derive(Clone, Eq, PartialEq, Debug, Encode, Decode, Serialize)] +#[serde(rename_all = "camelCase")] +pub struct TransactionTrace { + #[serde(flatten)] + pub action: TransactionTraceAction, + #[serde(serialize_with = "h256_0x_serialize")] + pub block_hash: H256, + pub block_number: u32, + #[serde(flatten)] + pub output: TransactionTraceOutput, + pub subtraces: u32, + pub trace_address: Vec, + #[serde(serialize_with = "h256_0x_serialize")] + pub transaction_hash: H256, + pub transaction_position: u32, +} + +#[derive(Clone, Eq, PartialEq, Debug, Encode, Decode, Serialize)] +#[serde(rename_all = "camelCase", tag = "type", content = "action")] +pub enum TransactionTraceAction { + #[serde(rename_all = "camelCase")] + Call { + call_type: super::CallType, + from: H160, + gas: U256, + #[serde(serialize_with = "bytes_0x_serialize")] + input: Vec, + to: H160, + value: U256, + }, + #[serde(rename_all = "camelCase")] + Create { + creation_method: super::CreateType, + from: H160, + gas: U256, + #[serde(serialize_with = "bytes_0x_serialize")] + init: Vec, + value: U256, + }, + #[serde(rename_all = "camelCase")] + Suicide { + address: H160, + balance: U256, + refund_address: H160, + }, +} + +#[derive(Clone, Eq, PartialEq, Debug, Encode, Decode, Serialize)] +#[serde(rename_all = "camelCase")] +pub enum TransactionTraceOutput { + Result(TransactionTraceResult), + Error(#[serde(serialize_with = "string_serialize")] Vec), +} + +#[derive(Clone, Eq, PartialEq, Debug, Encode, Decode, Serialize)] +#[serde(rename_all = "camelCase", untagged)] +pub enum TransactionTraceResult { + #[serde(rename_all = "camelCase")] + Call { + gas_used: U256, + #[serde(serialize_with = "bytes_0x_serialize")] + output: Vec, + }, + #[serde(rename_all = "camelCase")] + Create { + address: H160, + #[serde(serialize_with = "bytes_0x_serialize")] + code: Vec, + gas_used: U256, + }, + Suicide, +} diff --git a/vendor/evm-tracing/src/types/mod.rs b/vendor/evm-tracing/src/types/mod.rs new file mode 100644 index 0000000000..f2099cbb55 --- /dev/null +++ b/vendor/evm-tracing/src/types/mod.rs @@ -0,0 +1,113 @@ +// Copyright 2019-2022 PureStake Inc. +// This file is part of Moonbeam. + +// Moonbeam is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Moonbeam is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Moonbeam. If not, see . + +//! Runtime API allowing to debug/trace Ethereum + +extern crate alloc; + +use ethereum_types::{H160, H256}; +use parity_scale_codec::{Decode, Encode}; +use sp_std::vec::Vec; + +pub mod block; +pub mod serialization; +pub mod single; + +use serde::Serialize; +use serialization::*; + +pub const MANUAL_BLOCK_INITIALIZATION_RUNTIME_VERSION: u32 = 159; + +#[derive(Clone, Eq, PartialEq, Debug, Encode, Decode, Serialize)] +#[serde(rename_all = "camelCase")] +pub enum CallResult { + Output(#[serde(serialize_with = "bytes_0x_serialize")] Vec), + // field "error" + Error(#[serde(serialize_with = "string_serialize")] Vec), +} + +#[derive(Clone, Eq, PartialEq, Debug, Encode, Decode, Serialize)] +#[serde(rename_all = "camelCase", untagged)] +pub enum CreateResult { + Error { + #[serde(serialize_with = "string_serialize")] + error: Vec, + }, + Success { + #[serde(rename = "createdContractAddressHash")] + created_contract_address_hash: H160, + #[serde(serialize_with = "bytes_0x_serialize", rename = "createdContractCode")] + created_contract_code: Vec, + }, +} + +#[derive(Clone, Copy, Eq, PartialEq, Debug, Encode, Decode, Serialize)] +#[serde(rename_all = "lowercase")] +pub enum CallType { + Call, + CallCode, + DelegateCall, + StaticCall, +} + +#[derive(Clone, Copy, Eq, PartialEq, Debug, Encode, Decode, Serialize)] +#[serde(rename_all = "lowercase")] +pub enum CreateType { + Create, +} + +#[derive(Debug)] +pub enum ContextType { + Call(CallType), + Create, +} + +impl ContextType { + pub fn from(opcode: Vec) -> Option { + let opcode = match alloc::str::from_utf8(&opcode[..]) { + Ok(op) => op.to_uppercase(), + _ => return None, + }; + match &opcode[..] { + "CREATE" | "CREATE2" => Some(ContextType::Create), + "CALL" => Some(ContextType::Call(CallType::Call)), + "CALLCODE" => Some(ContextType::Call(CallType::CallCode)), + "DELEGATECALL" => Some(ContextType::Call(CallType::DelegateCall)), + "STATICCALL" => Some(ContextType::Call(CallType::StaticCall)), + _ => None, + } + } +} + +pub fn convert_memory(memory: Vec) -> Vec { + let size = 32; + memory + .chunks(size) + .map(|c| { + let mut msg = [0u8; 32]; + let chunk = c.len(); + if chunk < size { + let left = size - chunk; + let remainder = vec![0; left]; + msg[0..left].copy_from_slice(&remainder[..]); + msg[left..size].copy_from_slice(c); + } else { + msg[0..size].copy_from_slice(c) + } + H256::from_slice(&msg[..]) + }) + .collect() +} diff --git a/vendor/evm-tracing/src/types/serialization.rs b/vendor/evm-tracing/src/types/serialization.rs new file mode 100644 index 0000000000..439029d524 --- /dev/null +++ b/vendor/evm-tracing/src/types/serialization.rs @@ -0,0 +1,113 @@ +// Copyright 2019-2022 PureStake Inc. +// This file is part of Moonbeam. + +// Moonbeam is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Moonbeam is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Moonbeam. If not, see . + +//! Provide serialization functions for various types and formats. + +use ethereum_types::{H256, U256}; +use serde::{ + ser::{Error, SerializeSeq}, + Serializer, +}; + +pub fn seq_h256_serialize(data: &Option>, serializer: S) -> Result +where + S: Serializer, +{ + if let Some(vec) = data { + let mut seq = serializer.serialize_seq(Some(vec.len()))?; + for hash in vec { + seq.serialize_element(&format!("{:x}", hash))?; + } + seq.end() + } else { + let seq = serializer.serialize_seq(Some(0))?; + seq.end() + } +} + +pub fn bytes_0x_serialize(bytes: &[u8], serializer: S) -> Result +where + S: Serializer, +{ + serializer.serialize_str(&format!("0x{}", hex::encode(bytes))) +} + +pub fn option_bytes_0x_serialize( + bytes: &Option>, + serializer: S, +) -> Result +where + S: Serializer, +{ + if let Some(bytes) = bytes.as_ref() { + return serializer.serialize_str(&format!("0x{}", hex::encode(&bytes[..]))); + } + Err(S::Error::custom("String serialize error.")) +} + +pub fn opcode_serialize(opcode: &[u8], serializer: S) -> Result +where + S: Serializer, +{ + let d = std::str::from_utf8(opcode) + .map_err(|_| S::Error::custom("Opcode serialize error."))? + .to_uppercase(); + serializer.serialize_str(&d) +} + +pub fn string_serialize(value: &[u8], serializer: S) -> Result +where + S: Serializer, +{ + let d = std::str::from_utf8(value) + .map_err(|_| S::Error::custom("String serialize error."))? + .to_string(); + serializer.serialize_str(&d) +} + +pub fn option_string_serialize(value: &Option>, serializer: S) -> Result +where + S: Serializer, +{ + if let Some(value) = value.as_ref() { + let d = std::str::from_utf8(&value[..]) + .map_err(|_| S::Error::custom("String serialize error."))? + .to_string(); + return serializer.serialize_str(&d); + } + Err(S::Error::custom("String serialize error.")) +} + +pub fn u256_serialize(data: &U256, serializer: S) -> Result +where + S: Serializer, +{ + serializer.serialize_u64(data.low_u64()) +} + +pub fn h256_serialize(data: &H256, serializer: S) -> Result +where + S: Serializer, +{ + serializer.serialize_str(&format!("{:x}", data)) +} + +pub fn h256_0x_serialize(data: &H256, serializer: S) -> Result +where + S: Serializer, +{ + serializer.serialize_str(&format!("0x{:x}", data)) +} diff --git a/vendor/evm-tracing/src/types/single.rs b/vendor/evm-tracing/src/types/single.rs new file mode 100644 index 0000000000..a5dbaa91c1 --- /dev/null +++ b/vendor/evm-tracing/src/types/single.rs @@ -0,0 +1,102 @@ +// Copyright 2019-2022 PureStake Inc. +// This file is part of Moonbeam. + +// Moonbeam is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Moonbeam is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Moonbeam. If not, see . + +//! Types for the tracing of a single Ethereum transaction. +//! Structure from "raw" debug_trace and a "call list" matching +//! Blockscout formatter. This "call list" is also used to build +//! the whole block tracing output. + +use super::serialization::*; +use serde::Serialize; + +use ethereum_types::{H256, U256}; +use parity_scale_codec::{Decode, Encode}; +use sp_std::{collections::btree_map::BTreeMap, vec::Vec}; + +#[derive(Clone, Eq, PartialEq, Debug, Encode, Decode, Serialize)] +#[serde(rename_all = "camelCase", untagged)] +pub enum Call { + Blockscout(crate::formatters::blockscout::BlockscoutCall), + CallTracer(crate::formatters::call_tracer::CallTracerCall), +} + +#[derive(Clone, Copy, Eq, PartialEq, Debug, Encode, Decode)] +pub enum TraceType { + /// Classic geth with no javascript based tracing. + Raw { + disable_storage: bool, + disable_memory: bool, + disable_stack: bool, + }, + /// List of calls and subcalls formatted with an input tracer (i.e. callTracer or Blockscout). + CallList, + /// A single block trace. Use in `debug_traceTransactionByNumber` / `traceTransactionByHash`. + Block, +} + +/// Single transaction trace. +#[derive(Clone, Eq, PartialEq, Debug, Encode, Decode, Serialize)] +#[serde(rename_all = "camelCase", untagged)] +pub enum TransactionTrace { + /// Classical output of `debug_trace`. + #[serde(rename_all = "camelCase")] + Raw { + gas: U256, + #[serde(with = "hex")] + return_value: Vec, + step_logs: Vec, + }, + /// Matches the formatter used by Blockscout. + /// Is also used to built output of OpenEthereum's `trace_filter`. + CallList(Vec), + /// Used by Geth's callTracer. + CallListNested(Call), +} + +#[derive(Clone, Eq, PartialEq, Debug, Encode, Decode, Serialize)] +#[serde(rename_all = "camelCase")] +pub struct RawStepLog { + #[serde(serialize_with = "u256_serialize")] + pub depth: U256, + + //error: TODO + #[serde(serialize_with = "u256_serialize")] + pub gas: U256, + + #[serde(serialize_with = "u256_serialize")] + pub gas_cost: U256, + + #[serde( + serialize_with = "seq_h256_serialize", + skip_serializing_if = "Option::is_none" + )] + pub memory: Option>, + + #[serde(serialize_with = "opcode_serialize")] + pub op: Vec, + + #[serde(serialize_with = "u256_serialize")] + pub pc: U256, + + #[serde( + serialize_with = "seq_h256_serialize", + skip_serializing_if = "Option::is_none" + )] + pub stack: Option>, + + #[serde(skip_serializing_if = "Option::is_none")] + pub storage: Option>, +} diff --git a/vendor/primitives/debug/Cargo.toml b/vendor/primitives/debug/Cargo.toml new file mode 100644 index 0000000000..291c2fc16b --- /dev/null +++ b/vendor/primitives/debug/Cargo.toml @@ -0,0 +1,41 @@ +[package] +name = "moonbeam-rpc-primitives-debug" +authors = ["PureStake"] +edition = "2021" +homepage = "https://moonbeam.network" +license = "GPL-3.0-only" +repository = "https://github.com/PureStake/moonbeam/" +version = "0.1.0" + +[dependencies] +environmental = { workspace = true } +ethereum = { workspace = true, features = ["with-codec"] } +ethereum-types = { workspace = true } +hex = { workspace = true, optional = true } +serde = { workspace = true, optional = true } +serde_json = { workspace = true, optional = true } + +# Substrate +parity-scale-codec = { workspace = true } +sp-api = { workspace = true } +sp-core = { workspace = true } +sp-io = { workspace = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } + +[features] +default = ["std"] +std = [ + "parity-scale-codec/std", + "environmental/std", + "ethereum-types/std", + "ethereum/std", + "hex", + "serde", + "serde_json", + "sp-api/std", + "sp-core/std", + "sp-io/std", + "sp-runtime/std", + "sp-std/std", +] diff --git a/vendor/primitives/debug/src/lib.rs b/vendor/primitives/debug/src/lib.rs new file mode 100644 index 0000000000..2c0774ade9 --- /dev/null +++ b/vendor/primitives/debug/src/lib.rs @@ -0,0 +1,66 @@ +// Copyright 2019-2022 PureStake Inc. +// This file is part of Moonbeam. + +// Moonbeam is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Moonbeam is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Moonbeam. If not, see . + +#![cfg_attr(not(feature = "std"), no_std)] + +use ethereum::{TransactionV0 as LegacyTransaction, TransactionV2 as Transaction}; +use ethereum_types::H256; +use parity_scale_codec::{Decode, Encode}; +use sp_std::vec::Vec; + +sp_api::decl_runtime_apis! { + // Api version is virtually 4. + // + // We realized that even using runtime overrides, using the ApiExt interface reads the api + // versions from the state runtime, meaning we cannot just reset the versioning as we see fit. + // + // In order to be able to use ApiExt as part of the RPC handler logic we need to be always + // above the version that exists on chain for this Api, even if this Api is only meant + // to be used overridden. + #[api_version(4)] + pub trait DebugRuntimeApi { + #[changed_in(4)] + fn trace_transaction( + extrinsics: Vec, + transaction: &LegacyTransaction, + ) -> Result<(), sp_runtime::DispatchError>; + + fn trace_transaction( + extrinsics: Vec, + transaction: &Transaction, + ) -> Result<(), sp_runtime::DispatchError>; + + fn trace_block( + extrinsics: Vec, + known_transactions: Vec, + ) -> Result<(), sp_runtime::DispatchError>; + } +} + +#[derive(Clone, Copy, Eq, PartialEq, Debug, Encode, Decode)] +pub enum TracerInput { + None, + Blockscout, + CallTracer, +} + +/// DebugRuntimeApi V2 result. Trace response is stored in client and runtime api call response is +/// empty. +#[derive(Debug)] +pub enum Response { + Single, + Block, +} diff --git a/vendor/primitives/evm-tracing-events/Cargo.toml b/vendor/primitives/evm-tracing-events/Cargo.toml new file mode 100644 index 0000000000..b8b219c31a --- /dev/null +++ b/vendor/primitives/evm-tracing-events/Cargo.toml @@ -0,0 +1,36 @@ +[package] +name = "evm-tracing-events" +authors = ["PureStake"] +edition = "2021" +homepage = "https://moonbeam.network" +license = "GPL-3.0-only" +repository = "https://github.com/PureStake/moonbeam/" +version = "0.1.0" + +[dependencies] +environmental = { workspace = true } + +# Substrate +parity-scale-codec = { workspace = true } +sp-runtime-interface = { workspace = true } + +# Ethereum +ethereum = { workspace = true, features = ["with-codec"] } +ethereum-types = { workspace = true } +evm = { workspace = true, features = ["with-codec"] } +evm-gasometer = { workspace = true } +evm-runtime = { workspace = true } + +[features] +default = ["std"] +std = [ + "parity-scale-codec/std", + "environmental/std", + "ethereum-types/std", + "ethereum/std", + "evm-gasometer/std", + "evm-runtime/std", + "evm/std", + "sp-runtime-interface/std", +] +evm-tracing = ["evm-gasometer/tracing", "evm-runtime/tracing", "evm/tracing"] diff --git a/vendor/primitives/evm-tracing-events/src/evm.rs b/vendor/primitives/evm-tracing-events/src/evm.rs new file mode 100644 index 0000000000..35765892d1 --- /dev/null +++ b/vendor/primitives/evm-tracing-events/src/evm.rs @@ -0,0 +1,257 @@ +// Copyright 2019-2022 PureStake Inc. +// This file is part of Moonbeam. + +// Moonbeam is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Moonbeam is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Moonbeam. If not, see . + +extern crate alloc; + +use alloc::vec::Vec; +use ethereum_types::{H160, H256, U256}; +use evm::ExitReason; +use parity_scale_codec::{Decode, Encode}; + +#[derive(Clone, Debug, Encode, Decode, PartialEq, Eq)] +pub struct Transfer { + /// Source address. + pub source: H160, + /// Target address. + pub target: H160, + /// Transfer value. + pub value: U256, +} + +impl From for Transfer { + fn from(i: evm_runtime::Transfer) -> Self { + Self { + source: i.source, + target: i.target, + value: i.value, + } + } +} + +#[derive(Clone, Copy, Eq, PartialEq, Debug, Encode, Decode)] +pub enum CreateScheme { + /// Legacy create scheme of `CREATE`. + Legacy { + /// Caller of the create. + caller: H160, + }, + /// Create scheme of `CREATE2`. + Create2 { + /// Caller of the create. + caller: H160, + /// Code hash. + code_hash: H256, + /// Salt. + salt: H256, + }, + /// Create at a fixed location. + Fixed(H160), +} + +impl From for CreateScheme { + fn from(i: evm_runtime::CreateScheme) -> Self { + match i { + evm_runtime::CreateScheme::Legacy { caller } => Self::Legacy { caller }, + evm_runtime::CreateScheme::Create2 { + caller, + code_hash, + salt, + } => Self::Create2 { + caller, + code_hash, + salt, + }, + evm_runtime::CreateScheme::Fixed(address) => Self::Fixed(address), + } + } +} + +#[derive(Debug, Clone, Encode, Decode, PartialEq, Eq)] +pub enum EvmEvent { + Call { + code_address: H160, + transfer: Option, + input: Vec, + target_gas: Option, + is_static: bool, + context: super::Context, + }, + Create { + caller: H160, + address: H160, + scheme: CreateScheme, + value: U256, + init_code: Vec, + target_gas: Option, + }, + Suicide { + address: H160, + target: H160, + balance: U256, + }, + Exit { + reason: ExitReason, + return_value: Vec, + }, + TransactCall { + caller: H160, + address: H160, + value: U256, + data: Vec, + gas_limit: u64, + }, + TransactCreate { + caller: H160, + value: U256, + init_code: Vec, + gas_limit: u64, + address: H160, + }, + TransactCreate2 { + caller: H160, + value: U256, + init_code: Vec, + salt: H256, + gas_limit: u64, + address: H160, + }, + PrecompileSubcall { + code_address: H160, + transfer: Option, + input: Vec, + target_gas: Option, + is_static: bool, + context: super::Context, + }, +} + +#[cfg(feature = "evm-tracing")] +impl<'a> From> for EvmEvent { + fn from(i: evm::tracing::Event<'a>) -> Self { + match i { + evm::tracing::Event::Call { + code_address, + transfer, + input, + target_gas, + is_static, + context, + } => Self::Call { + code_address, + transfer: if let Some(transfer) = transfer { + Some(transfer.clone().into()) + } else { + None + }, + input: input.to_vec(), + target_gas, + is_static, + context: context.clone().into(), + }, + evm::tracing::Event::Create { + caller, + address, + scheme, + value, + init_code, + target_gas, + } => Self::Create { + caller, + address, + scheme: scheme.into(), + value, + init_code: init_code.to_vec(), + target_gas, + }, + evm::tracing::Event::Suicide { + address, + target, + balance, + } => Self::Suicide { + address, + target, + balance, + }, + evm::tracing::Event::Exit { + reason, + return_value, + } => Self::Exit { + reason: reason.clone(), + return_value: return_value.to_vec(), + }, + evm::tracing::Event::TransactCall { + caller, + address, + value, + data, + gas_limit, + } => Self::TransactCall { + caller, + address, + value, + data: data.to_vec(), + gas_limit, + }, + evm::tracing::Event::TransactCreate { + caller, + value, + init_code, + gas_limit, + address, + } => Self::TransactCreate { + caller, + value, + init_code: init_code.to_vec(), + gas_limit, + address, + }, + evm::tracing::Event::TransactCreate2 { + caller, + value, + init_code, + salt, + gas_limit, + address, + } => Self::TransactCreate2 { + caller, + value, + init_code: init_code.to_vec(), + salt, + gas_limit, + address, + }, + evm::tracing::Event::PrecompileSubcall { + code_address, + transfer, + input, + target_gas, + is_static, + context, + } => Self::PrecompileSubcall { + code_address, + transfer: if let Some(transfer) = transfer { + Some(transfer.clone().into()) + } else { + None + }, + input: input.to_vec(), + target_gas, + is_static, + context: context.clone().into(), + }, + } + } +} diff --git a/vendor/primitives/evm-tracing-events/src/gasometer.rs b/vendor/primitives/evm-tracing-events/src/gasometer.rs new file mode 100644 index 0000000000..66ccec01ed --- /dev/null +++ b/vendor/primitives/evm-tracing-events/src/gasometer.rs @@ -0,0 +1,114 @@ +// Copyright 2019-2022 PureStake Inc. +// This file is part of Moonbeam. + +// Moonbeam is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Moonbeam is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Moonbeam. If not, see . + +use parity_scale_codec::{Decode, Encode}; + +#[derive(Debug, Default, Copy, Clone, Encode, Decode, PartialEq, Eq)] +pub struct Snapshot { + pub gas_limit: u64, + pub memory_gas: u64, + pub used_gas: u64, + pub refunded_gas: i64, +} + +impl Snapshot { + pub fn gas(&self) -> u64 { + self.gas_limit - self.used_gas - self.memory_gas + } +} + +#[cfg(feature = "evm-tracing")] +impl From> for Snapshot { + fn from(i: Option) -> Self { + if let Some(i) = i { + Self { + gas_limit: i.gas_limit, + memory_gas: i.memory_gas, + used_gas: i.used_gas, + refunded_gas: i.refunded_gas, + } + } else { + Default::default() + } + } +} + +#[derive(Debug, Copy, Clone, Encode, Decode, PartialEq, Eq)] +pub enum GasometerEvent { + RecordCost { + cost: u64, + snapshot: Snapshot, + }, + RecordRefund { + refund: i64, + snapshot: Snapshot, + }, + RecordStipend { + stipend: u64, + snapshot: Snapshot, + }, + RecordDynamicCost { + gas_cost: u64, + memory_gas: u64, + gas_refund: i64, + snapshot: Snapshot, + }, + RecordTransaction { + cost: u64, + snapshot: Snapshot, + }, +} + +#[cfg(feature = "evm-tracing")] +impl From for GasometerEvent { + fn from(i: evm_gasometer::tracing::Event) -> Self { + match i { + evm_gasometer::tracing::Event::RecordCost { cost, snapshot } => Self::RecordCost { + cost, + snapshot: snapshot.into(), + }, + evm_gasometer::tracing::Event::RecordRefund { refund, snapshot } => { + Self::RecordRefund { + refund, + snapshot: snapshot.into(), + } + } + evm_gasometer::tracing::Event::RecordStipend { stipend, snapshot } => { + Self::RecordStipend { + stipend, + snapshot: snapshot.into(), + } + } + evm_gasometer::tracing::Event::RecordDynamicCost { + gas_cost, + memory_gas, + gas_refund, + snapshot, + } => Self::RecordDynamicCost { + gas_cost, + memory_gas, + gas_refund, + snapshot: snapshot.into(), + }, + evm_gasometer::tracing::Event::RecordTransaction { cost, snapshot } => { + Self::RecordTransaction { + cost, + snapshot: snapshot.into(), + } + } + } + } +} diff --git a/vendor/primitives/evm-tracing-events/src/lib.rs b/vendor/primitives/evm-tracing-events/src/lib.rs new file mode 100644 index 0000000000..9b6019dc92 --- /dev/null +++ b/vendor/primitives/evm-tracing-events/src/lib.rs @@ -0,0 +1,116 @@ +// Copyright 2019-2022 PureStake Inc. +// This file is part of Moonbeam. + +// Moonbeam is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Moonbeam is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Moonbeam. If not, see . + +//! A Proxy in this context is an environmental trait implementor meant to be used for capturing +//! EVM trace events sent to a Host function from the Runtime. Works like: +//! - Runtime Api call `using` environmental. +//! - Runtime calls a Host function with some scale-encoded Evm event. +//! - Host function emits an additional event to this Listener. +//! - Proxy listens for the event and format the actual trace response. +//! +//! There are two proxy types: `Raw` and `CallList`. +//! - `Raw` - used for opcode-level traces. +//! - `CallList` - used for block tracing (stack of call stacks) and custom tracing outputs. +//! +//! The EVM event types may contain references and not implement Encode/Decode. +//! This module provide mirror types and conversion into them from the original events. + +#![cfg_attr(not(feature = "std"), no_std)] +extern crate alloc; + +pub mod evm; +pub mod gasometer; +pub mod runtime; + +pub use self::evm::EvmEvent; +pub use gasometer::GasometerEvent; +pub use runtime::RuntimeEvent; + +use ethereum_types::{H160, U256}; +use parity_scale_codec::{Decode, Encode}; +use sp_runtime_interface::pass_by::PassByCodec; + +environmental::environmental!(listener: dyn Listener + 'static); + +pub fn using R>(l: &mut (dyn Listener + 'static), f: F) -> R { + listener::using(l, f) +} + +/// Allow to configure which data of the Step event +/// we want to keep or discard. Not discarding the data requires cloning the data +/// in the runtime which have a significant cost for each step. +#[derive(Clone, Copy, Eq, PartialEq, Debug, Encode, Decode, Default, PassByCodec)] +pub struct StepEventFilter { + pub enable_stack: bool, + pub enable_memory: bool, +} + +#[derive(Clone, Eq, PartialEq, Debug, Encode, Decode)] +pub enum Event { + Evm(evm::EvmEvent), + Gasometer(gasometer::GasometerEvent), + Runtime(runtime::RuntimeEvent), + CallListNew(), +} + +impl Event { + /// Access the global reference and call it's `event` method, passing the `Event` itself as + /// argument. + /// + /// This only works if we are `using` a global reference to a `Listener` implementor. + pub fn emit(self) { + listener::with(|listener| listener.event(self)); + } +} + +/// Main trait to proxy emitted messages. +/// Used 2 times : +/// - Inside the runtime to proxy the events through the host functions +/// - Inside the client to forward those events to the client listener. +pub trait Listener { + fn event(&mut self, event: Event); + + /// Allow the runtime to know which data should be discarded and not cloned. + /// WARNING: It is only called once when the runtime tracing is instantiated to avoid + /// performing many ext calls. + fn step_event_filter(&self) -> StepEventFilter; +} + +pub fn step_event_filter() -> Option { + let mut filter = None; + listener::with(|listener| filter = Some(listener.step_event_filter())); + filter +} + +#[derive(Clone, Debug, Encode, Decode, PartialEq, Eq)] +pub struct Context { + /// Execution address. + pub address: H160, + /// Caller of the EVM. + pub caller: H160, + /// Apparent value of the EVM. + pub apparent_value: U256, +} + +impl From for Context { + fn from(i: evm_runtime::Context) -> Self { + Self { + address: i.address, + caller: i.caller, + apparent_value: i.apparent_value, + } + } +} diff --git a/vendor/primitives/evm-tracing-events/src/runtime.rs b/vendor/primitives/evm-tracing-events/src/runtime.rs new file mode 100644 index 0000000000..b35f7d6b05 --- /dev/null +++ b/vendor/primitives/evm-tracing-events/src/runtime.rs @@ -0,0 +1,326 @@ +// Copyright 2019-2022 PureStake Inc. +// This file is part of Moonbeam. + +// Moonbeam is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Moonbeam is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Moonbeam. If not, see . + +extern crate alloc; + +use super::Context; +use alloc::vec::Vec; +use ethereum_types::{H160, H256, U256}; +pub use evm::{ExitError, ExitReason, ExitSucceed, Opcode}; +use parity_scale_codec::{Decode, Encode}; + +#[derive(Clone, Debug, Encode, Decode, PartialEq, Eq)] +pub struct Stack { + pub data: Vec, + pub limit: u64, +} + +impl From<&evm::Stack> for Stack { + fn from(i: &evm::Stack) -> Self { + Self { + data: i.data().clone(), + limit: i.limit() as u64, + } + } +} + +#[derive(Clone, Debug, Encode, Decode, PartialEq, Eq)] +pub struct Memory { + pub data: Vec, + pub effective_len: U256, + pub limit: u64, +} + +impl From<&evm::Memory> for Memory { + fn from(i: &evm::Memory) -> Self { + Self { + data: i.data().clone(), + effective_len: i.effective_len(), + limit: i.limit() as u64, + } + } +} + +#[derive(Clone, Copy, Debug, Eq, PartialEq, Encode, Decode)] +pub enum Capture { + /// The machine has exited. It cannot be executed again. + Exit(E), + /// The machine has trapped. It is waiting for external information, and can + /// be executed again. + Trap(T), +} + +pub type Trap = Vec; // Should hold the marshalled Opcode. + +#[derive(Debug, Clone, Encode, Decode, PartialEq, Eq)] +pub enum RuntimeEvent { + Step { + context: Context, + // This needs to be marshalled in the runtime no matter what. + opcode: Vec, + // We can use ExitReason with `with-codec` feature, + position: Result, + stack: Option, + memory: Option, + }, + StepResult { + result: Result<(), Capture>, + return_value: Vec, + }, + SLoad { + address: H160, + index: H256, + value: H256, + }, + SStore { + address: H160, + index: H256, + value: H256, + }, +} + +#[cfg(feature = "evm-tracing")] +impl RuntimeEvent { + pub fn from_evm_event<'a>( + i: evm_runtime::tracing::Event<'a>, + filter: crate::StepEventFilter, + ) -> Self { + match i { + evm_runtime::tracing::Event::Step { + context, + opcode, + position, + stack, + memory, + } => Self::Step { + context: context.clone().into(), + opcode: opcodes_string(opcode), + position: match position { + Ok(position) => Ok(*position as u64), + Err(e) => Err(e.clone()), + }, + stack: if filter.enable_stack { + Some(stack.into()) + } else { + None + }, + memory: if filter.enable_memory { + Some(memory.into()) + } else { + None + }, + }, + evm_runtime::tracing::Event::StepResult { + result, + return_value, + } => Self::StepResult { + result: match result { + Ok(_) => Ok(()), + Err(capture) => match capture { + evm::Capture::Exit(e) => Err(Capture::Exit(e.clone())), + evm::Capture::Trap(t) => Err(Capture::Trap(opcodes_string(*t))), + }, + }, + return_value: return_value.to_vec(), + }, + evm_runtime::tracing::Event::SLoad { + address, + index, + value, + } => Self::SLoad { + address, + index, + value, + }, + evm_runtime::tracing::Event::SStore { + address, + index, + value, + } => Self::SStore { + address, + index, + value, + }, + } + } +} + +#[cfg(feature = "evm-tracing")] +/// Converts an Opcode into its name, stored in a `Vec`. +pub fn opcodes_string(opcode: Opcode) -> Vec { + let tmp; + let out = match opcode { + Opcode(0) => "Stop", + Opcode(1) => "Add", + Opcode(2) => "Mul", + Opcode(3) => "Sub", + Opcode(4) => "Div", + Opcode(5) => "SDiv", + Opcode(6) => "Mod", + Opcode(7) => "SMod", + Opcode(8) => "AddMod", + Opcode(9) => "MulMod", + Opcode(10) => "Exp", + Opcode(11) => "SignExtend", + Opcode(16) => "Lt", + Opcode(17) => "Gt", + Opcode(18) => "Slt", + Opcode(19) => "Sgt", + Opcode(20) => "Eq", + Opcode(21) => "IsZero", + Opcode(22) => "And", + Opcode(23) => "Or", + Opcode(24) => "Xor", + Opcode(25) => "Not", + Opcode(26) => "Byte", + Opcode(27) => "Shl", + Opcode(28) => "Shr", + Opcode(29) => "Sar", + Opcode(32) => "Keccak256", + Opcode(48) => "Address", + Opcode(49) => "Balance", + Opcode(50) => "Origin", + Opcode(51) => "Caller", + Opcode(52) => "CallValue", + Opcode(53) => "CallDataLoad", + Opcode(54) => "CallDataSize", + Opcode(55) => "CallDataCopy", + Opcode(56) => "CodeSize", + Opcode(57) => "CodeCopy", + Opcode(58) => "GasPrice", + Opcode(59) => "ExtCodeSize", + Opcode(60) => "ExtCodeCopy", + Opcode(61) => "ReturnDataSize", + Opcode(62) => "ReturnDataCopy", + Opcode(63) => "ExtCodeHash", + Opcode(64) => "BlockHash", + Opcode(65) => "Coinbase", + Opcode(66) => "Timestamp", + Opcode(67) => "Number", + Opcode(68) => "Difficulty", + Opcode(69) => "GasLimit", + Opcode(70) => "ChainId", + Opcode(80) => "Pop", + Opcode(81) => "MLoad", + Opcode(82) => "MStore", + Opcode(83) => "MStore8", + Opcode(84) => "SLoad", + Opcode(85) => "SStore", + Opcode(86) => "Jump", + Opcode(87) => "JumpI", + Opcode(88) => "GetPc", + Opcode(89) => "MSize", + Opcode(90) => "Gas", + Opcode(91) => "JumpDest", + Opcode(96) => "Push1", + Opcode(97) => "Push2", + Opcode(98) => "Push3", + Opcode(99) => "Push4", + Opcode(100) => "Push5", + Opcode(101) => "Push6", + Opcode(102) => "Push7", + Opcode(103) => "Push8", + Opcode(104) => "Push9", + Opcode(105) => "Push10", + Opcode(106) => "Push11", + Opcode(107) => "Push12", + Opcode(108) => "Push13", + Opcode(109) => "Push14", + Opcode(110) => "Push15", + Opcode(111) => "Push16", + Opcode(112) => "Push17", + Opcode(113) => "Push18", + Opcode(114) => "Push19", + Opcode(115) => "Push20", + Opcode(116) => "Push21", + Opcode(117) => "Push22", + Opcode(118) => "Push23", + Opcode(119) => "Push24", + Opcode(120) => "Push25", + Opcode(121) => "Push26", + Opcode(122) => "Push27", + Opcode(123) => "Push28", + Opcode(124) => "Push29", + Opcode(125) => "Push30", + Opcode(126) => "Push31", + Opcode(127) => "Push32", + Opcode(128) => "Dup1", + Opcode(129) => "Dup2", + Opcode(130) => "Dup3", + Opcode(131) => "Dup4", + Opcode(132) => "Dup5", + Opcode(133) => "Dup6", + Opcode(134) => "Dup7", + Opcode(135) => "Dup8", + Opcode(136) => "Dup9", + Opcode(137) => "Dup10", + Opcode(138) => "Dup11", + Opcode(139) => "Dup12", + Opcode(140) => "Dup13", + Opcode(141) => "Dup14", + Opcode(142) => "Dup15", + Opcode(143) => "Dup16", + Opcode(144) => "Swap1", + Opcode(145) => "Swap2", + Opcode(146) => "Swap3", + Opcode(147) => "Swap4", + Opcode(148) => "Swap5", + Opcode(149) => "Swap6", + Opcode(150) => "Swap7", + Opcode(151) => "Swap8", + Opcode(152) => "Swap9", + Opcode(153) => "Swap10", + Opcode(154) => "Swap11", + Opcode(155) => "Swap12", + Opcode(156) => "Swap13", + Opcode(157) => "Swap14", + Opcode(158) => "Swap15", + Opcode(159) => "Swap16", + Opcode(160) => "Log0", + Opcode(161) => "Log1", + Opcode(162) => "Log2", + Opcode(163) => "Log3", + Opcode(164) => "Log4", + Opcode(176) => "JumpTo", + Opcode(177) => "JumpIf", + Opcode(178) => "JumpSub", + Opcode(180) => "JumpSubv", + Opcode(181) => "BeginSub", + Opcode(182) => "BeginData", + Opcode(184) => "ReturnSub", + Opcode(185) => "PutLocal", + Opcode(186) => "GetLocal", + Opcode(225) => "SLoadBytes", + Opcode(226) => "SStoreBytes", + Opcode(227) => "SSize", + Opcode(240) => "Create", + Opcode(241) => "Call", + Opcode(242) => "CallCode", + Opcode(243) => "Return", + Opcode(244) => "DelegateCall", + Opcode(245) => "Create2", + Opcode(250) => "StaticCall", + Opcode(252) => "TxExecGas", + Opcode(253) => "Revert", + Opcode(254) => "Invalid", + Opcode(255) => "SelfDestruct", + Opcode(n) => { + tmp = alloc::format!("Unknown({})", n); + &tmp + } + }; + out.as_bytes().to_vec() +} diff --git a/vendor/primitives/txpool/Cargo.toml b/vendor/primitives/txpool/Cargo.toml new file mode 100644 index 0000000000..d9b594247b --- /dev/null +++ b/vendor/primitives/txpool/Cargo.toml @@ -0,0 +1,28 @@ +[package] +name = "moonbeam-rpc-primitives-txpool" +authors = ["PureStake"] +edition = "2021" +homepage = "https://moonbeam.network" +license = "GPL-3.0-only" +repository = "https://github.com/PureStake/moonbeam/" +version = "0.6.0" + +[dependencies] +ethereum = { workspace = true, features = ["with-codec"] } + +# Substrate +parity-scale-codec = { workspace = true } +sp-api = { workspace = true } +sp-io = { workspace = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } + +[features] +default = ["std"] +std = [ + "ethereum/std", + "sp-api/std", + "sp-io/std", + "sp-runtime/std", + "sp-std/std", +] diff --git a/vendor/primitives/txpool/src/lib.rs b/vendor/primitives/txpool/src/lib.rs new file mode 100644 index 0000000000..fc5f2c2bf7 --- /dev/null +++ b/vendor/primitives/txpool/src/lib.rs @@ -0,0 +1,52 @@ +// Copyright 2019-2022 PureStake Inc. +// This file is part of Moonbeam. + +// Moonbeam is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Moonbeam is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Moonbeam. If not, see . + +#![cfg_attr(not(feature = "std"), no_std)] +// These clippy lints are disabled because the macro-generated code triggers them. +#![allow(clippy::unnecessary_mut_passed)] +#![allow(clippy::too_many_arguments)] + +pub use ethereum::{TransactionV0 as LegacyTransaction, TransactionV2 as Transaction}; +use parity_scale_codec::{Decode, Encode}; +use sp_runtime::traits::Block as BlockT; +use sp_std::vec::Vec; + +#[derive(Eq, PartialEq, Clone, Encode, Decode, sp_runtime::RuntimeDebug)] +pub struct TxPoolResponseLegacy { + pub ready: Vec, + pub future: Vec, +} + +#[derive(Eq, PartialEq, Clone, Encode, Decode, sp_runtime::RuntimeDebug)] +pub struct TxPoolResponse { + pub ready: Vec, + pub future: Vec, +} + +sp_api::decl_runtime_apis! { + #[api_version(2)] + pub trait TxPoolRuntimeApi { + #[changed_in(2)] + fn extrinsic_filter( + xt_ready: Vec<::Extrinsic>, + xt_future: Vec<::Extrinsic>, + ) -> TxPoolResponseLegacy; + fn extrinsic_filter( + xt_ready: Vec<::Extrinsic>, + xt_future: Vec<::Extrinsic>, + ) -> TxPoolResponse; + } +} diff --git a/vendor/rpc-core/debug/Cargo.toml b/vendor/rpc-core/debug/Cargo.toml new file mode 100644 index 0000000000..dca0ddc94b --- /dev/null +++ b/vendor/rpc-core/debug/Cargo.toml @@ -0,0 +1,19 @@ +[package] +name = "moonbeam-rpc-core-debug" +authors = ["PureStake"] +edition = "2021" +homepage = "https://moonbeam.network" +license = "GPL-3.0-only" +repository = "https://github.com/PureStake/moonbeam/" +version = "0.1.0" + +[dependencies] +ethereum-types = { workspace = true, features = ["std"] } +futures = { workspace = true, features = ["compat"] } +jsonrpsee = { workspace = true, features = ["macros", "server"] } +moonbeam-client-evm-tracing = { workspace = true } +moonbeam-rpc-core-types = { workspace = true } +serde = { workspace = true } +serde_json = { workspace = true } + +sp-core = { workspace = true, features = ["std"] } diff --git a/vendor/rpc-core/debug/src/lib.rs b/vendor/rpc-core/debug/src/lib.rs new file mode 100644 index 0000000000..2f9bd2416b --- /dev/null +++ b/vendor/rpc-core/debug/src/lib.rs @@ -0,0 +1,48 @@ +// Copyright 2019-2022 PureStake Inc. +// This file is part of Moonbeam. + +// Moonbeam is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Moonbeam is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Moonbeam. If not, see . +use ethereum_types::H256; +use jsonrpsee::{core::RpcResult, proc_macros::rpc}; +use moonbeam_client_evm_tracing::types::single; +use moonbeam_rpc_core_types::RequestBlockId; +use serde::Deserialize; + +#[derive(Clone, Eq, PartialEq, Debug, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct TraceParams { + pub disable_storage: Option, + pub disable_memory: Option, + pub disable_stack: Option, + /// Javascript tracer (we just check if it's Blockscout tracer string) + pub tracer: Option, + pub timeout: Option, +} + +#[rpc(server)] +#[jsonrpsee::core::async_trait] +pub trait Debug { + #[method(name = "debug_traceTransaction")] + async fn trace_transaction( + &self, + transaction_hash: H256, + params: Option, + ) -> RpcResult; + #[method(name = "debug_traceBlockByNumber", aliases = ["debug_traceBlockByHash"])] + async fn trace_block( + &self, + id: RequestBlockId, + params: Option, + ) -> RpcResult>; +} diff --git a/vendor/rpc-core/trace/Cargo.toml b/vendor/rpc-core/trace/Cargo.toml new file mode 100644 index 0000000000..61719ac4e2 --- /dev/null +++ b/vendor/rpc-core/trace/Cargo.toml @@ -0,0 +1,17 @@ +[package] +name = "moonbeam-rpc-core-trace" +authors = ["PureStake"] +edition = "2021" +homepage = "https://moonbeam.network" +license = "GPL-3.0-only" +repository = "https://github.com/PureStake/moonbeam/" +version = "0.6.0" + +[dependencies] +ethereum-types = { workspace = true, features = ["std"] } +futures = { workspace = true, features = ["compat"] } +jsonrpsee = { workspace = true, features = ["macros", "server"] } +moonbeam-client-evm-tracing = { workspace = true } +moonbeam-rpc-core-types = { workspace = true } +serde = { workspace = true } +serde_json = { workspace = true } diff --git a/vendor/rpc-core/trace/src/lib.rs b/vendor/rpc-core/trace/src/lib.rs new file mode 100644 index 0000000000..9b2e3608fc --- /dev/null +++ b/vendor/rpc-core/trace/src/lib.rs @@ -0,0 +1,50 @@ +// Copyright 2019-2022 PureStake Inc. +// This file is part of Moonbeam. + +// Moonbeam is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Moonbeam is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Moonbeam. If not, see . + +use ethereum_types::H160; +use jsonrpsee::{core::RpcResult, proc_macros::rpc}; +use moonbeam_client_evm_tracing::types::block::TransactionTrace; +use moonbeam_rpc_core_types::RequestBlockId; +use serde::Deserialize; + +#[rpc(server)] +#[jsonrpsee::core::async_trait] +pub trait Trace { + #[method(name = "trace_filter")] + async fn filter(&self, filter: FilterRequest) -> RpcResult>; +} + +#[derive(Clone, Eq, PartialEq, Debug, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct FilterRequest { + /// (optional?) From this block. + pub from_block: Option, + + /// (optional?) To this block. + pub to_block: Option, + + /// (optional) Sent from these addresses. + pub from_address: Option>, + + /// (optional) Sent to these addresses. + pub to_address: Option>, + + /// (optional) The offset trace number + pub after: Option, + + /// (optional) Integer number of traces to display in a batch. + pub count: Option, +} diff --git a/vendor/rpc-core/txpool/Cargo.toml b/vendor/rpc-core/txpool/Cargo.toml new file mode 100644 index 0000000000..7ccbf4f6c3 --- /dev/null +++ b/vendor/rpc-core/txpool/Cargo.toml @@ -0,0 +1,17 @@ +[package] +name = "moonbeam-rpc-core-txpool" +authors = ["PureStake"] +edition = "2021" +homepage = "https://moonbeam.network" +license = "GPL-3.0-only" +repository = "https://github.com/PureStake/moonbeam/" +version = "0.6.0" + +[dependencies] +ethereum = { workspace = true, features = ["with-codec"] } +ethereum-types = { workspace = true, features = ["std"] } +jsonrpsee = { workspace = true, features = ["macros", "server"] } +serde = { workspace = true } +serde_json = { workspace = true } + +fc-rpc-core = { workspace = true } diff --git a/vendor/rpc-core/txpool/src/lib.rs b/vendor/rpc-core/txpool/src/lib.rs new file mode 100644 index 0000000000..aa9d357bb1 --- /dev/null +++ b/vendor/rpc-core/txpool/src/lib.rs @@ -0,0 +1,34 @@ +// Copyright 2019-2022 PureStake Inc. +// This file is part of Moonbeam. + +// Moonbeam is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Moonbeam is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Moonbeam. If not, see . + +use ethereum_types::U256; +use jsonrpsee::{core::RpcResult, proc_macros::rpc}; + +mod types; + +pub use crate::types::{Get as GetT, Summary, Transaction, TransactionMap, TxPoolResult}; + +#[rpc(server)] +pub trait TxPool { + #[method(name = "txpool_content")] + fn content(&self) -> RpcResult>>; + + #[method(name = "txpool_inspect")] + fn inspect(&self) -> RpcResult>>; + + #[method(name = "txpool_status")] + fn status(&self) -> RpcResult>; +} diff --git a/vendor/rpc-core/txpool/src/types/content.rs b/vendor/rpc-core/txpool/src/types/content.rs new file mode 100644 index 0000000000..92ea202edb --- /dev/null +++ b/vendor/rpc-core/txpool/src/types/content.rs @@ -0,0 +1,111 @@ +// Copyright 2019-2022 PureStake Inc. +// This file is part of Moonbeam. + +// Moonbeam is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Moonbeam is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Moonbeam. If not, see . + +use crate::GetT; +use ethereum::{TransactionAction, TransactionV2 as EthereumTransaction}; +use ethereum_types::{H160, H256, U256}; +use fc_rpc_core::types::Bytes; +use serde::{Serialize, Serializer}; + +#[derive(Debug, Default, Clone, PartialEq, Serialize)] +#[serde(rename_all = "camelCase")] +pub struct Transaction { + /// Hash + pub hash: H256, + /// Nonce + pub nonce: U256, + /// Block hash + #[serde(serialize_with = "block_hash_serialize")] + pub block_hash: Option, + /// Block number + pub block_number: Option, + /// Sender + pub from: H160, + /// Recipient + #[serde(serialize_with = "to_serialize")] + pub to: Option, + /// Transfered value + pub value: U256, + /// Gas Price + pub gas_price: U256, + /// Gas + pub gas: U256, + /// Data + pub input: Bytes, + /// Transaction Index + pub transaction_index: Option, +} + +fn block_hash_serialize(hash: &Option, serializer: S) -> Result +where + S: Serializer, +{ + serializer.serialize_str(&format!("0x{:x}", hash.unwrap_or_default())) +} + +fn to_serialize(hash: &Option, serializer: S) -> Result +where + S: Serializer, +{ + serializer.serialize_str(&format!("0x{:x}", hash.unwrap_or_default())) +} + +impl GetT for Transaction { + fn get(hash: H256, from_address: H160, txn: &EthereumTransaction) -> Self { + let (nonce, action, value, gas_price, gas_limit, input) = match txn { + EthereumTransaction::Legacy(t) => ( + t.nonce, + t.action, + t.value, + t.gas_price, + t.gas_limit, + t.input.clone(), + ), + EthereumTransaction::EIP2930(t) => ( + t.nonce, + t.action, + t.value, + t.gas_price, + t.gas_limit, + t.input.clone(), + ), + EthereumTransaction::EIP1559(t) => ( + t.nonce, + t.action, + t.value, + t.max_fee_per_gas, + t.gas_limit, + t.input.clone(), + ), + }; + Self { + hash, + nonce, + block_hash: None, + block_number: None, + from: from_address, + to: match action { + TransactionAction::Call(to) => Some(to), + _ => None, + }, + value, + gas_price, + gas: gas_limit, + input: Bytes(input), + transaction_index: None, + } + } +} diff --git a/vendor/rpc-core/txpool/src/types/inspect.rs b/vendor/rpc-core/txpool/src/types/inspect.rs new file mode 100644 index 0000000000..2f8cb70d00 --- /dev/null +++ b/vendor/rpc-core/txpool/src/types/inspect.rs @@ -0,0 +1,63 @@ +// Copyright 2019-2022 PureStake Inc. +// This file is part of Moonbeam. + +// Moonbeam is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Moonbeam is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Moonbeam. If not, see . + +use crate::GetT; +use ethereum::{TransactionAction, TransactionV2 as EthereumTransaction}; +use ethereum_types::{H160, H256, U256}; +use serde::{Serialize, Serializer}; + +#[derive(Clone, Debug)] +pub struct Summary { + pub to: Option, + pub value: U256, + pub gas: U256, + pub gas_price: U256, +} + +impl Serialize for Summary { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + let res = format!( + "0x{:x}: {} wei + {} gas x {} wei", + self.to.unwrap_or_default(), + self.value, + self.gas, + self.gas_price + ); + serializer.serialize_str(&res) + } +} + +impl GetT for Summary { + fn get(_hash: H256, _from_address: H160, txn: &EthereumTransaction) -> Self { + let (action, value, gas_price, gas_limit) = match txn { + EthereumTransaction::Legacy(t) => (t.action, t.value, t.gas_price, t.gas_limit), + EthereumTransaction::EIP2930(t) => (t.action, t.value, t.gas_price, t.gas_limit), + EthereumTransaction::EIP1559(t) => (t.action, t.value, t.max_fee_per_gas, t.gas_limit), + }; + Self { + to: match action { + TransactionAction::Call(to) => Some(to), + _ => None, + }, + value, + gas_price, + gas: gas_limit, + } + } +} diff --git a/vendor/rpc-core/txpool/src/types/mod.rs b/vendor/rpc-core/txpool/src/types/mod.rs new file mode 100644 index 0000000000..5702018d9c --- /dev/null +++ b/vendor/rpc-core/txpool/src/types/mod.rs @@ -0,0 +1,38 @@ +// Copyright 2019-2022 PureStake Inc. +// This file is part of Moonbeam. + +// Moonbeam is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Moonbeam is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Moonbeam. If not, see . + +mod content; +mod inspect; + +use ethereum::TransactionV2 as EthereumTransaction; +use ethereum_types::{H160, H256, U256}; +use serde::Serialize; +use std::collections::HashMap; + +pub use self::content::Transaction; +pub use self::inspect::Summary; + +pub type TransactionMap = HashMap>; + +#[derive(Debug, Serialize)] +pub struct TxPoolResult { + pub pending: T, + pub queued: T, +} + +pub trait Get { + fn get(hash: H256, from_address: H160, txn: &EthereumTransaction) -> Self; +} diff --git a/vendor/rpc-core/types/Cargo.toml b/vendor/rpc-core/types/Cargo.toml new file mode 100644 index 0000000000..bc561cde28 --- /dev/null +++ b/vendor/rpc-core/types/Cargo.toml @@ -0,0 +1,13 @@ +[package] +name = "moonbeam-rpc-core-types" +authors = ["PureStake"] +edition = "2021" +homepage = "https://moonbeam.network" +license = "GPL-3.0-only" +repository = "https://github.com/PureStake/moonbeam/" +version = "0.1.0" + +[dependencies] +ethereum-types = { workspace = true, features = ["std"] } +serde = { workspace = true } +serde_json = { workspace = true } diff --git a/vendor/rpc-core/types/src/lib.rs b/vendor/rpc-core/types/src/lib.rs new file mode 100644 index 0000000000..e8023d41bd --- /dev/null +++ b/vendor/rpc-core/types/src/lib.rs @@ -0,0 +1,48 @@ +// Copyright 2019-2022 PureStake Inc.. +// This file is part of Moonbeam. + +// Moonbeam is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Moonbeam is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Moonbeam. If not, see . + +use ethereum_types::H256; +use serde::{de::Error, Deserialize, Deserializer}; + +#[derive(Copy, Clone, Eq, PartialEq, Debug, Deserialize)] +#[serde(rename_all = "camelCase", untagged)] +pub enum RequestBlockId { + Number(#[serde(deserialize_with = "deserialize_u32_0x")] u32), + Hash(H256), + Tag(RequestBlockTag), +} + +#[derive(Copy, Clone, Eq, PartialEq, Debug, Deserialize)] +#[serde(rename_all = "camelCase")] +pub enum RequestBlockTag { + Earliest, + Latest, + Pending, +} + +fn deserialize_u32_0x<'de, D>(deserializer: D) -> Result +where + D: Deserializer<'de>, +{ + let buf = String::deserialize(deserializer)?; + + let parsed = match buf.strip_prefix("0x") { + Some(buf) => u32::from_str_radix(&buf, 16), + None => u32::from_str_radix(&buf, 10), + }; + + parsed.map_err(|e| Error::custom(format!("parsing error: {:?} from '{}'", e, buf))) +} diff --git a/vendor/rpc/debug/Cargo.toml b/vendor/rpc/debug/Cargo.toml new file mode 100644 index 0000000000..a0e6e55b35 --- /dev/null +++ b/vendor/rpc/debug/Cargo.toml @@ -0,0 +1,39 @@ +[package] +name = "moonbeam-rpc-debug" +authors = ["PureStake"] +edition = "2021" +homepage = "https://moonbeam.network" +license = "GPL-3.0-only" +repository = "https://github.com/PureStake/moonbeam/" +version = "0.1.0" + +[dependencies] +futures = { workspace = true, features = ["compat"] } +hex-literal = { workspace = true } +jsonrpsee = { workspace = true, features = ["macros", "server"] } +tokio = { workspace = true, features = ["sync", "time"] } + +# Moonbeam +moonbeam-client-evm-tracing = { workspace = true } +moonbeam-rpc-core-debug = { workspace = true } +moonbeam-rpc-core-types = { workspace = true } +moonbeam-rpc-primitives-debug = { workspace = true } + +# Substrate +sc-client-api = { workspace = true } +sc-utils = { workspace = true } +sp-api = { workspace = true, features = ["std"] } +sp-block-builder = { workspace = true } +sp-blockchain = { workspace = true } +sp-core = { workspace = true, features = ["std"] } +sp-io = { workspace = true, features = ["std"] } +sp-runtime = { workspace = true, features = ["std"] } + +# Frontier +ethereum = { workspace = true, features = ["with-codec"] } +ethereum-types = { workspace = true, features = ["std"] } +fc-consensus = { workspace = true } +fc-db = { workspace = true } +fc-rpc = { workspace = true, features = ["rpc-binary-search-estimate"] } +fc-storage = { workspace = true } +fp-rpc = { workspace = true } diff --git a/vendor/rpc/debug/src/lib.rs b/vendor/rpc/debug/src/lib.rs new file mode 100644 index 0000000000..f6c68c3792 --- /dev/null +++ b/vendor/rpc/debug/src/lib.rs @@ -0,0 +1,591 @@ +// Copyright 2019-2022 PureStake Inc. +// This file is part of Moonbeam. + +// Moonbeam is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Moonbeam is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Moonbeam. If not, see . +use futures::{SinkExt, StreamExt}; +use jsonrpsee::core::{async_trait, RpcResult}; +pub use moonbeam_rpc_core_debug::{DebugServer, TraceParams}; + +use tokio::{ + self, + sync::{oneshot, Semaphore}, +}; + +use ethereum_types::H256; +use fc_rpc::{frontier_backend_client, internal_err, OverrideHandle}; +use fp_rpc::EthereumRuntimeRPCApi; +use moonbeam_client_evm_tracing::{formatters::ResponseFormatter, types::single}; +use moonbeam_rpc_core_types::{RequestBlockId, RequestBlockTag}; +use moonbeam_rpc_primitives_debug::{DebugRuntimeApi, TracerInput}; +use sc_client_api::backend::{Backend, StateBackend, StorageProvider}; +use sc_utils::mpsc::TracingUnboundedSender; +use sp_api::{ApiExt, BlockId, Core, HeaderT, ProvideRuntimeApi}; +use sp_block_builder::BlockBuilder; +use sp_blockchain::{ + Backend as BlockchainBackend, Error as BlockChainError, HeaderBackend, HeaderMetadata, +}; +use sp_runtime::traits::{BlakeTwo256, Block as BlockT, UniqueSaturatedInto}; +use std::{future::Future, marker::PhantomData, sync::Arc}; + +pub enum RequesterInput { + Transaction(H256), + Block(RequestBlockId), +} + +pub enum Response { + Single(single::TransactionTrace), + Block(Vec), +} + +pub type Responder = oneshot::Sender>; +pub type DebugRequester = + TracingUnboundedSender<((RequesterInput, Option), Responder)>; + +pub struct Debug { + pub requester: DebugRequester, +} + +impl Debug { + pub fn new(requester: DebugRequester) -> Self { + Self { requester } + } +} + +#[async_trait] +impl DebugServer for Debug { + /// Handler for `debug_traceTransaction` request. Communicates with the service-defined task + /// using channels. + async fn trace_transaction( + &self, + transaction_hash: H256, + params: Option, + ) -> RpcResult { + let mut requester = self.requester.clone(); + + let (tx, rx) = oneshot::channel(); + // Send a message from the rpc handler to the service level task. + requester + .send(((RequesterInput::Transaction(transaction_hash), params), tx)) + .await + .map_err(|err| { + internal_err(format!( + "failed to send request to debug service : {:?}", + err + )) + })?; + + // Receive a message from the service level task and send the rpc response. + rx.await + .map_err(|err| internal_err(format!("debug service dropped the channel : {:?}", err)))? + .map(|res| match res { + Response::Single(res) => res, + _ => unreachable!(), + }) + } + + async fn trace_block( + &self, + id: RequestBlockId, + params: Option, + ) -> RpcResult> { + let mut requester = self.requester.clone(); + + let (tx, rx) = oneshot::channel(); + // Send a message from the rpc handler to the service level task. + requester + .send(((RequesterInput::Block(id), params), tx)) + .await + .map_err(|err| { + internal_err(format!( + "failed to send request to debug service : {:?}", + err + )) + })?; + + // Receive a message from the service level task and send the rpc response. + rx.await + .map_err(|err| internal_err(format!("debug service dropped the channel : {:?}", err)))? + .map(|res| match res { + Response::Block(res) => res, + _ => unreachable!(), + }) + } +} + +pub struct DebugHandler(PhantomData<(B, C, BE)>); + +impl DebugHandler +where + BE: Backend + 'static, + BE::State: StateBackend, + C: ProvideRuntimeApi, + C: StorageProvider, + C: HeaderMetadata + HeaderBackend, + C: Send + Sync + 'static, + B: BlockT + Send + Sync + 'static, + C::Api: BlockBuilder, + C::Api: DebugRuntimeApi, + C::Api: EthereumRuntimeRPCApi, + C::Api: ApiExt, +{ + /// Task spawned at service level that listens for messages on the rpc channel and spawns + /// blocking tasks using a permit pool. + pub fn task( + client: Arc, + backend: Arc, + frontier_backend: Arc>, + permit_pool: Arc, + overrides: Arc>, + raw_max_memory_usage: usize, + ) -> (impl Future, DebugRequester) { + let (tx, mut rx): (DebugRequester, _) = + sc_utils::mpsc::tracing_unbounded("debug-requester", 100_000); + + let fut = async move { + loop { + match rx.next().await { + Some(( + (RequesterInput::Transaction(transaction_hash), params), + response_tx, + )) => { + let client = client.clone(); + let backend = backend.clone(); + let frontier_backend = frontier_backend.clone(); + let permit_pool = permit_pool.clone(); + let overrides = overrides.clone(); + + tokio::task::spawn(async move { + let _ = response_tx.send( + async { + let _permit = permit_pool.acquire().await; + tokio::task::spawn_blocking(move || { + Self::handle_transaction_request( + client.clone(), + backend.clone(), + frontier_backend.clone(), + transaction_hash, + params, + overrides.clone(), + raw_max_memory_usage, + ) + }) + .await + .map_err(|e| { + internal_err(format!( + "Internal error on spawned task : {:?}", + e + )) + })? + } + .await, + ); + }); + } + Some(((RequesterInput::Block(request_block_id), params), response_tx)) => { + let client = client.clone(); + let backend = backend.clone(); + let frontier_backend = frontier_backend.clone(); + let permit_pool = permit_pool.clone(); + let overrides = overrides.clone(); + + tokio::task::spawn(async move { + let _ = response_tx.send( + async { + let _permit = permit_pool.acquire().await; + + tokio::task::spawn_blocking(move || { + Self::handle_block_request( + client.clone(), + backend.clone(), + frontier_backend.clone(), + request_block_id, + params, + overrides.clone(), + ) + }) + .await + .map_err(|e| { + internal_err(format!( + "Internal error on spawned task : {:?}", + e + )) + })? + } + .await, + ); + }); + } + _ => {} + } + } + }; + (fut, tx) + } + + fn handle_params(params: Option) -> RpcResult<(TracerInput, single::TraceType)> { + // Set trace input and type + match params { + Some(TraceParams { + tracer: Some(tracer), + .. + }) => { + const BLOCKSCOUT_JS_CODE_HASH: [u8; 16] = + hex_literal::hex!("94d9f08796f91eb13a2e82a6066882f7"); + const BLOCKSCOUT_JS_CODE_HASH_V2: [u8; 16] = + hex_literal::hex!("89db13694675692951673a1e6e18ff02"); + let hash = sp_io::hashing::twox_128(&tracer.as_bytes()); + let tracer = + if hash == BLOCKSCOUT_JS_CODE_HASH || hash == BLOCKSCOUT_JS_CODE_HASH_V2 { + Some(TracerInput::Blockscout) + } else if tracer == "callTracer" { + Some(TracerInput::CallTracer) + } else { + None + }; + if let Some(tracer) = tracer { + Ok((tracer, single::TraceType::CallList)) + } else { + return Err(internal_err(format!( + "javascript based tracing is not available (hash :{:?})", + hash + ))); + } + } + Some(params) => Ok(( + TracerInput::None, + single::TraceType::Raw { + disable_storage: params.disable_storage.unwrap_or(false), + disable_memory: params.disable_memory.unwrap_or(false), + disable_stack: params.disable_stack.unwrap_or(false), + }, + )), + _ => Ok(( + TracerInput::None, + single::TraceType::Raw { + disable_storage: false, + disable_memory: false, + disable_stack: false, + }, + )), + } + } + + fn handle_block_request( + client: Arc, + backend: Arc, + frontier_backend: Arc>, + request_block_id: RequestBlockId, + params: Option, + overrides: Arc>, + ) -> RpcResult { + let (tracer_input, trace_type) = Self::handle_params(params)?; + + let reference_id: BlockId = match request_block_id { + RequestBlockId::Number(n) => Ok(BlockId::Number(n.unique_saturated_into())), + RequestBlockId::Tag(RequestBlockTag::Latest) => { + Ok(BlockId::Number(client.info().best_number)) + } + RequestBlockId::Tag(RequestBlockTag::Earliest) => { + Ok(BlockId::Number(0u32.unique_saturated_into())) + } + RequestBlockId::Tag(RequestBlockTag::Pending) => { + Err(internal_err("'pending' blocks are not supported")) + } + RequestBlockId::Hash(eth_hash) => { + match frontier_backend_client::load_hash::( + client.as_ref(), + frontier_backend.as_ref(), + eth_hash, + ) { + Ok(Some(hash)) => Ok(BlockId::Hash(hash)), + Ok(_) => Err(internal_err("Block hash not found".to_string())), + Err(e) => Err(e), + } + } + }?; + + // Get ApiRef. This handle allow to keep changes between txs in an internal buffer. + let api = client.runtime_api(); + // Get Blockchain backend + let blockchain = backend.blockchain(); + let Ok(hash) = client.expect_block_hash_from_id(&reference_id) else { + return Err(internal_err("Block header not found")) + }; + let header = match client.header(hash) { + Ok(Some(h)) => h, + _ => return Err(internal_err("Block header not found")), + }; + + let schema = fc_storage::onchain_storage_schema(client.as_ref(), hash); + + // Using storage overrides we align with `:ethereum_schema` which will result in proper + // SCALE decoding in case of migration. + let statuses = match overrides.schemas.get(&schema) { + Some(schema) => schema + .current_transaction_statuses(hash) + .unwrap_or_default(), + _ => return Err(internal_err(format!("No storage override at {:?}", hash))), + }; + + // Known ethereum transaction hashes. + let eth_tx_hashes: Vec<_> = statuses.iter().map(|t| t.transaction_hash).collect(); + + // If there are no ethereum transactions in the block return empty trace right away. + if eth_tx_hashes.is_empty() { + return Ok(Response::Block(vec![])); + } + + // Get block extrinsics. + let exts = blockchain + .body(hash) + .map_err(|e| internal_err(format!("Fail to read blockchain db: {:?}", e)))? + .unwrap_or_default(); + + // Get parent blockid. + let parent_block_hash = *header.parent_hash(); + + // Trace the block. + let f = || -> RpcResult<_> { + api.initialize_block(parent_block_hash, &header) + .map_err(|e| internal_err(format!("Runtime api access error: {:?}", e)))?; + + let _result = api + .trace_block(parent_block_hash, exts, eth_tx_hashes) + .map_err(|e| { + internal_err(format!( + "Blockchain error when replaying block {} : {:?}", + reference_id, e + )) + })? + .map_err(|e| { + internal_err(format!( + "Internal runtime error when replaying block {} : {:?}", + reference_id, e + )) + })?; + Ok(moonbeam_rpc_primitives_debug::Response::Block) + }; + + return match trace_type { + single::TraceType::CallList => { + let mut proxy = moonbeam_client_evm_tracing::listeners::CallList::default(); + proxy.using(f)?; + proxy.finish_transaction(); + let response = match tracer_input { + TracerInput::CallTracer => { + moonbeam_client_evm_tracing::formatters::CallTracer::format(proxy) + .ok_or("Trace result is empty.") + .map_err(|e| internal_err(format!("{:?}", e))) + } + _ => Err(internal_err( + "Bug: failed to resolve the tracer format.".to_string(), + )), + }?; + + Ok(Response::Block(response)) + } + _ => Err(internal_err( + "debug_traceBlock functions currently only support callList mode (enabled + by providing `{{'tracer': 'callTracer'}}` in the request)." + .to_string(), + )), + }; + } + + /// Replays a transaction in the Runtime at a given block height. + /// + /// In order to succesfully reproduce the result of the original transaction we need a correct + /// state to replay over. + /// + /// Substrate allows to apply extrinsics in the Runtime and thus creating an overlayed state. + /// This overlayed changes will live in-memory for the lifetime of the ApiRef. + fn handle_transaction_request( + client: Arc, + backend: Arc, + frontier_backend: Arc>, + transaction_hash: H256, + params: Option, + overrides: Arc>, + raw_max_memory_usage: usize, + ) -> RpcResult { + let (tracer_input, trace_type) = Self::handle_params(params)?; + + let (hash, index) = match frontier_backend_client::load_transactions::( + client.as_ref(), + frontier_backend.as_ref(), + transaction_hash, + false, + ) { + Ok(Some((hash, index))) => (hash, index as usize), + Ok(None) => return Err(internal_err("Transaction hash not found".to_string())), + Err(e) => return Err(e), + }; + + let reference_id = match frontier_backend_client::load_hash::( + client.as_ref(), + frontier_backend.as_ref(), + hash, + ) { + Ok(Some(hash)) => BlockId::Hash(hash), + Ok(_) => return Err(internal_err("Block hash not found".to_string())), + Err(e) => return Err(e), + }; + // Get ApiRef. This handle allow to keep changes between txs in an internal buffer. + let api = client.runtime_api(); + // Get Blockchain backend + let blockchain = backend.blockchain(); + // Get the header I want to work with. + let Ok(reference_hash) = client.expect_block_hash_from_id(&reference_id) else { + return Err(internal_err("Block header not found")) + }; + let header = match client.header(reference_hash) { + Ok(Some(h)) => h, + _ => return Err(internal_err("Block header not found")), + }; + // Get parent blockid. + let parent_block_hash = *header.parent_hash(); + + // Get block extrinsics. + let exts = blockchain + .body(reference_hash) + .map_err(|e| internal_err(format!("Fail to read blockchain db: {:?}", e)))? + .unwrap_or_default(); + + // Get DebugRuntimeApi version + let trace_api_version = if let Ok(Some(api_version)) = + api.api_version::>(parent_block_hash) + { + api_version + } else { + return Err(internal_err( + "Runtime api version call failed (trace)".to_string(), + )); + }; + + let schema = fc_storage::onchain_storage_schema(client.as_ref(), reference_hash); + + // Get the block that contains the requested transaction. Using storage overrides we align + // with `:ethereum_schema` which will result in proper SCALE decoding in case of migration. + let reference_block = match overrides.schemas.get(&schema) { + Some(schema) => schema.current_block(reference_hash), + _ => { + return Err(internal_err(format!( + "No storage override at {:?}", + reference_hash + ))) + } + }; + + // Get the actual ethereum transaction. + if let Some(block) = reference_block { + let transactions = block.transactions; + if let Some(transaction) = transactions.get(index) { + let f = || -> RpcResult<_> { + api.initialize_block(parent_block_hash, &header) + .map_err(|e| internal_err(format!("Runtime api access error: {:?}", e)))?; + + if trace_api_version >= 4 { + let _result = api + .trace_transaction(parent_block_hash, exts, &transaction) + .map_err(|e| { + internal_err(format!( + "Runtime api access error (version {:?}): {:?}", + trace_api_version, e + )) + })? + .map_err(|e| internal_err(format!("DispatchError: {:?}", e)))?; + } else { + // Pre-london update, legacy transactions. + let _result = match transaction { + ethereum::TransactionV2::Legacy(tx) => + { + #[allow(deprecated)] + api.trace_transaction_before_version_4(parent_block_hash, exts, &tx) + .map_err(|e| { + internal_err(format!( + "Runtime api access error (legacy): {:?}", + e + )) + })? + .map_err(|e| internal_err(format!("DispatchError: {:?}", e)))? + } + _ => { + return Err(internal_err( + "Bug: pre-london runtime expects legacy transactions" + .to_string(), + )) + } + }; + } + + Ok(moonbeam_rpc_primitives_debug::Response::Single) + }; + + return match trace_type { + single::TraceType::Raw { + disable_storage, + disable_memory, + disable_stack, + } => { + let mut proxy = moonbeam_client_evm_tracing::listeners::Raw::new( + disable_storage, + disable_memory, + disable_stack, + raw_max_memory_usage, + ); + proxy.using(f)?; + Ok(Response::Single( + moonbeam_client_evm_tracing::formatters::Raw::format(proxy).ok_or( + internal_err( + "replayed transaction generated too much data. \ + try disabling memory or storage?", + ), + )?, + )) + } + single::TraceType::CallList => { + let mut proxy = moonbeam_client_evm_tracing::listeners::CallList::default(); + proxy.using(f)?; + proxy.finish_transaction(); + let response = match tracer_input { + TracerInput::Blockscout => { + moonbeam_client_evm_tracing::formatters::Blockscout::format(proxy) + .ok_or("Trace result is empty.") + .map_err(|e| internal_err(format!("{:?}", e))) + } + TracerInput::CallTracer => { + let mut res = + moonbeam_client_evm_tracing::formatters::CallTracer::format( + proxy, + ) + .ok_or("Trace result is empty.") + .map_err(|e| internal_err(format!("{:?}", e)))?; + Ok(res.pop().expect("Trace result is empty.")) + } + _ => Err(internal_err( + "Bug: failed to resolve the tracer format.".to_string(), + )), + }?; + Ok(Response::Single(response)) + } + not_supported => Err(internal_err(format!( + "Bug: `handle_transaction_request` does not support {:?}.", + not_supported + ))), + }; + } + } + Err(internal_err("Runtime block call failed".to_string())) + } +} diff --git a/vendor/rpc/trace/Cargo.toml b/vendor/rpc/trace/Cargo.toml new file mode 100644 index 0000000000..1c77e20024 --- /dev/null +++ b/vendor/rpc/trace/Cargo.toml @@ -0,0 +1,43 @@ +[package] +name = "moonbeam-rpc-trace" +authors = ["PureStake"] +edition = "2021" +homepage = "https://moonbeam.network" +license = "GPL-3.0-only" +repository = "https://github.com/PureStake/moonbeam/" +version = "0.6.0" + +[dependencies] +ethereum = { workspace = true, features = ["with-codec", "std"] } +ethereum-types = { workspace = true, features = ["std"] } +futures = { workspace = true } +jsonrpsee = { workspace = true, features = ["macros", "server"] } +serde = { workspace = true } +sha3 = { workspace = true, features = ["std"] } +tokio = { workspace = true, features = ["sync", "time"] } +tracing = { workspace = true } + +# Moonbeam +moonbeam-client-evm-tracing = { workspace = true } +moonbeam-rpc-core-trace = { workspace = true } +moonbeam-rpc-core-types = { workspace = true } +moonbeam-rpc-primitives-debug = { workspace = true } + +# Substrate +sc-client-api = { workspace = true } +sc-network = { workspace = true } +sc-utils = { workspace = true } +sp-api = { workspace = true, features = ["std"] } +sp-block-builder = { workspace = true } +sp-blockchain = { workspace = true } +sp-io = { workspace = true, features = ["std"] } +sp-runtime = { workspace = true, features = ["std"] } +sp-std = { workspace = true, features = ["std"] } +sp-transaction-pool = { workspace = true } + +# Frontier +fc-consensus = { workspace = true } +fc-rpc = { workspace = true, features = ["rpc-binary-search-estimate"] } +fc-rpc-core = { workspace = true } +fc-storage = { workspace = true } +fp-rpc = { workspace = true } diff --git a/vendor/rpc/trace/src/lib.rs b/vendor/rpc/trace/src/lib.rs new file mode 100644 index 0000000000..6ccb0afaa6 --- /dev/null +++ b/vendor/rpc/trace/src/lib.rs @@ -0,0 +1,885 @@ +// Copyright 2019-2022 PureStake Inc. +// This file is part of Moonbeam. + +// Moonbeam is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Moonbeam is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Moonbeam. If not, see . + +//! `trace_filter` RPC handler and its associated service task. +//! The RPC handler rely on `CacheTask` which provides a future that must be run inside a tokio +//! executor. +//! +//! The implementation is composed of multiple tasks : +//! - Many calls the the RPC handler `Trace::filter`, communicating with the main task. +//! - A main `CacheTask` managing the cache and the communication between tasks. +//! - For each traced block an async task responsible to wait for a permit, spawn a blocking +//! task and waiting for the result, then send it to the main `CacheTask`. + +use futures::{select, stream::FuturesUnordered, FutureExt, SinkExt, StreamExt}; +use std::{collections::BTreeMap, future::Future, marker::PhantomData, sync::Arc, time::Duration}; +use tokio::{ + sync::{mpsc, oneshot, Semaphore}, + time::sleep, +}; +use tracing::{instrument, Instrument}; + +use sc_client_api::backend::{Backend, StateBackend, StorageProvider}; +use sc_utils::mpsc::TracingUnboundedSender; +use sp_api::{ApiExt, Core, HeaderT, ProvideRuntimeApi}; +use sp_block_builder::BlockBuilder; +use sp_blockchain::{ + Backend as BlockchainBackend, Error as BlockChainError, HeaderBackend, HeaderMetadata, +}; +use sp_runtime::traits::{BlakeTwo256, Block as BlockT}; + +use ethereum_types::H256; +use fc_rpc::OverrideHandle; +use fp_rpc::EthereumRuntimeRPCApi; + +use moonbeam_client_evm_tracing::{ + formatters::ResponseFormatter, + types::block::{self, TransactionTrace}, +}; +pub use moonbeam_rpc_core_trace::{FilterRequest, TraceServer}; +use moonbeam_rpc_core_types::{RequestBlockId, RequestBlockTag}; +use moonbeam_rpc_primitives_debug::DebugRuntimeApi; + +type TxsTraceRes = Result, String>; + +/// RPC handler. Will communicate with a `CacheTask` through a `CacheRequester`. +pub struct Trace { + _phantom: PhantomData, + client: Arc, + requester: CacheRequester, + max_count: u32, +} + +impl Clone for Trace { + fn clone(&self) -> Self { + Self { + _phantom: PhantomData::default(), + client: Arc::clone(&self.client), + requester: self.requester.clone(), + max_count: self.max_count, + } + } +} + +impl Trace +where + B: BlockT + Send + Sync + 'static, + B::Header: HeaderT, + C: HeaderMetadata + HeaderBackend, + C: Send + Sync + 'static, +{ + /// Create a new RPC handler. + pub fn new(client: Arc, requester: CacheRequester, max_count: u32) -> Self { + Self { + client, + requester, + max_count, + _phantom: PhantomData::default(), + } + } + + /// Convert an optional block ID (number or tag) to a block height. + fn block_id(&self, id: Option) -> Result { + match id { + Some(RequestBlockId::Number(n)) => Ok(n), + None | Some(RequestBlockId::Tag(RequestBlockTag::Latest)) => { + Ok(self.client.info().best_number) + } + Some(RequestBlockId::Tag(RequestBlockTag::Earliest)) => Ok(0), + Some(RequestBlockId::Tag(RequestBlockTag::Pending)) => { + Err("'pending' is not supported") + } + Some(RequestBlockId::Hash(_)) => Err("Block hash not supported"), + } + } + + /// `trace_filter` endpoint (wrapped in the trait implementation with futures compatibilty) + async fn filter(self, req: FilterRequest) -> TxsTraceRes { + let from_block = self.block_id(req.from_block)?; + let to_block = self.block_id(req.to_block)?; + let block_heights = from_block..=to_block; + + let count = req.count.unwrap_or(self.max_count); + if count > self.max_count { + return Err(format!( + "count ({}) can't be greater than maximum ({})", + count, self.max_count + )); + } + + // Build a list of all the Substrate block hashes that need to be traced. + let mut block_hashes = vec![]; + for block_height in block_heights { + if block_height == 0 { + continue; // no traces for genesis block. + } + + let block_hash = self + .client + .hash(block_height) + .map_err(|e| { + format!( + "Error when fetching block {} header : {:?}", + block_height, e + ) + })? + .ok_or_else(|| format!("Block with height {} don't exist", block_height))?; + + block_hashes.push(block_hash); + } + + // Start a batch with these blocks. + let batch_id = self.requester.start_batch(block_hashes.clone()).await?; + // Fetch all the traces. It is done in another function to simplify error handling and allow + // to call the following `stop_batch` regardless of the result. This is important for the + // cache cleanup to work properly. + let res = self.fetch_traces(req, &block_hashes, count as usize).await; + // Stop the batch, allowing the cache task to remove useless non-started block traces and + // start the expiration delay. + self.requester.stop_batch(batch_id).await; + + res + } + + async fn fetch_traces( + &self, + req: FilterRequest, + block_hashes: &[H256], + count: usize, + ) -> TxsTraceRes { + let from_address = req.from_address.unwrap_or_default(); + let to_address = req.to_address.unwrap_or_default(); + + let mut traces_amount: i64 = -(req.after.unwrap_or(0) as i64); + let mut traces = vec![]; + + for &block_hash in block_hashes { + // Request the traces of this block to the cache service. + // This will resolve quickly if the block is already cached, or wait until the block + // has finished tracing. + let block_traces = self.requester.get_traces(block_hash).await?; + + // Filter addresses. + let mut block_traces: Vec<_> = block_traces + .iter() + .filter(|trace| match trace.action { + block::TransactionTraceAction::Call { from, to, .. } => { + (from_address.is_empty() || from_address.contains(&from)) + && (to_address.is_empty() || to_address.contains(&to)) + } + block::TransactionTraceAction::Create { from, .. } => { + (from_address.is_empty() || from_address.contains(&from)) + && to_address.is_empty() + } + block::TransactionTraceAction::Suicide { address, .. } => { + (from_address.is_empty() || from_address.contains(&address)) + && to_address.is_empty() + } + }) + .cloned() + .collect(); + + // Don't insert anything if we're still before "after" + traces_amount += block_traces.len() as i64; + if traces_amount > 0 { + let traces_amount = traces_amount as usize; + // If the current Vec of traces is across the "after" marker, + // we skip some elements of it. + if traces_amount < block_traces.len() { + let skip = block_traces.len() - traces_amount; + block_traces = block_traces.into_iter().skip(skip).collect(); + } + + traces.append(&mut block_traces); + + // If we go over "count" (the limit), we trim and exit the loop, + // unless we used the default maximum, in which case we return an error. + if traces_amount >= count { + if req.count.is_none() { + return Err(format!( + "the amount of traces goes over the maximum ({}), please use 'after' \ + and 'count' in your request", + self.max_count + )); + } + + traces = traces.into_iter().take(count).collect(); + break; + } + } + } + + Ok(traces) + } +} + +#[jsonrpsee::core::async_trait] +impl TraceServer for Trace +where + B: BlockT + Send + Sync + 'static, + B::Header: HeaderT, + C: HeaderMetadata + HeaderBackend, + C: Send + Sync + 'static, +{ + async fn filter( + &self, + filter: FilterRequest, + ) -> jsonrpsee::core::RpcResult> { + self.clone() + .filter(filter) + .await + .map_err(|e| fc_rpc::internal_err(e)) + } +} + +/// An opaque batch ID. +#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord)] +pub struct CacheBatchId(u64); + +/// Requests the cache task can accept. +enum CacheRequest { + /// Request to start caching the provided range of blocks. + /// The task will add to blocks to its pool and immediately return a new batch ID. + StartBatch { + /// Returns the ID of the batch for cancellation. + sender: oneshot::Sender, + /// List of block hash to trace. + blocks: Vec, + }, + /// Fetch the traces for given block hash. + /// The task will answer only when it has processed this block. + GetTraces { + /// Returns the array of traces or an error. + sender: oneshot::Sender, + /// Hash of the block. + block: H256, + }, + /// Notify the cache that it can stop the batch with that ID. Any block contained only in + /// this batch and still not started will be discarded. + StopBatch { batch_id: CacheBatchId }, +} + +/// Allows to interact with the cache task. +#[derive(Clone)] +pub struct CacheRequester(TracingUnboundedSender); + +impl CacheRequester { + /// Request to start caching the provided range of blocks. + /// The task will add to blocks to its pool and immediately return the batch ID. + #[instrument(skip(self))] + pub async fn start_batch(&self, blocks: Vec) -> Result { + let (response_tx, response_rx) = oneshot::channel(); + let mut sender = self.0.clone(); + + sender + .send(CacheRequest::StartBatch { + sender: response_tx, + blocks, + }) + .await + .map_err(|e| { + format!( + "Failed to send request to the trace cache task. Error : {:?}", + e + ) + })?; + + response_rx.await.map_err(|e| { + format!( + "Trace cache task closed the response channel. Error : {:?}", + e + ) + }) + } + + /// Fetch the traces for given block hash. + /// The task will answer only when it has processed this block. + /// The block should be part of a batch first. If no batch has requested the block it will + /// return an error. + #[instrument(skip(self))] + pub async fn get_traces(&self, block: H256) -> TxsTraceRes { + let (response_tx, response_rx) = oneshot::channel(); + let mut sender = self.0.clone(); + + sender + .send(CacheRequest::GetTraces { + sender: response_tx, + block, + }) + .await + .map_err(|e| { + format!( + "Failed to send request to the trace cache task. Error : {:?}", + e + ) + })?; + + response_rx + .await + .map_err(|e| { + format!( + "Trace cache task closed the response channel. Error : {:?}", + e + ) + })? + .map_err(|e| format!("Failed to replay block. Error : {:?}", e)) + } + + /// Notify the cache that it can stop the batch with that ID. Any block contained only in + /// this batch and still in the waiting pool will be discarded. + #[instrument(skip(self))] + pub async fn stop_batch(&self, batch_id: CacheBatchId) { + let mut sender = self.0.clone(); + + // Here we don't care if the request has been accepted or refused, the caller can't + // do anything with it. + let _ = sender + .send(CacheRequest::StopBatch { batch_id }) + .await + .map_err(|e| { + format!( + "Failed to send request to the trace cache task. Error : {:?}", + e + ) + }); + } +} + +/// Data stored for each block in the cache. +/// `active_batch_count` represents the number of batches using this +/// block. It will increase immediatly when a batch is created, but will be +/// decrease only after the batch ends and its expiration delay passes. +/// It allows to keep the data in the cache for following requests that would use +/// this block, which is important to handle pagination efficiently. +struct CacheBlock { + active_batch_count: usize, + state: CacheBlockState, +} + +/// State of a cached block. It can either be polled to be traced or cached. +enum CacheBlockState { + /// Block has been added to the pool blocks to be replayed. + /// It may be currently waiting to be replayed or being replayed. + Pooled { + started: bool, + /// Multiple requests might query the same block while it is pooled to be + /// traced. They response channel is stored here, and the result will be + /// sent in all of them when the tracing is finished. + waiting_requests: Vec>, + /// Channel used to unqueue a tracing that has not yet started. + /// A tracing will be unqueued if it has not yet been started and the last batch + /// needing this block is ended (ignoring the expiration delay). + /// It is not used directly, but dropping will wake up the receiver. + #[allow(dead_code)] + unqueue_sender: oneshot::Sender<()>, + }, + /// Tracing has completed and the result is available. No Runtime API call + /// will be needed until this block cache is removed. + Cached { traces: TxsTraceRes }, +} + +/// Tracing a block is done in a separate tokio blocking task to avoid clogging the async threads. +/// For this reason a channel using this type is used by the blocking task to communicate with the +/// main cache task. +enum BlockingTaskMessage { + /// Notify the tracing for this block has started as the blocking task got a permit from + /// the semaphore. This is used to prevent the deletion of a cache entry for a block that has + /// started being traced. + Started { block_hash: H256 }, + /// The tracing is finished and the result is send to the main task. + Finished { + block_hash: H256, + result: TxsTraceRes, + }, +} + +/// Type wrapper for the cache task, generic over the Client, Block and Backend types. +pub struct CacheTask { + client: Arc, + backend: Arc, + blocking_permits: Arc, + cached_blocks: BTreeMap, + batches: BTreeMap>, + next_batch_id: u64, + _phantom: PhantomData, +} + +impl CacheTask +where + BE: Backend + 'static, + BE::State: StateBackend, + C: ProvideRuntimeApi, + C: StorageProvider, + C: HeaderMetadata + HeaderBackend, + C: Send + Sync + 'static, + B: BlockT + Send + Sync + 'static, + B::Header: HeaderT, + C::Api: BlockBuilder, + C::Api: DebugRuntimeApi, + C::Api: EthereumRuntimeRPCApi, + C::Api: ApiExt, +{ + /// Create a new cache task. + /// + /// Returns a Future that needs to be added to a tokio executor, and an handle allowing to + /// send requests to the task. + pub fn create( + client: Arc, + backend: Arc, + cache_duration: Duration, + blocking_permits: Arc, + overrides: Arc>, + ) -> (impl Future, CacheRequester) { + // Communication with the outside world : + let (requester_tx, mut requester_rx) = + sc_utils::mpsc::tracing_unbounded("trace-filter-cache", 100_000); + + // Task running in the service. + let task = async move { + // The following variables are polled by the select! macro, and thus cannot be + // part of Self without introducing borrowing issues. + let mut batch_expirations = FuturesUnordered::new(); + let (blocking_tx, mut blocking_rx) = + mpsc::channel(blocking_permits.available_permits() * 2); + + // Contains the inner state of the cache task, excluding the pooled futures/channels. + // Having this object allow to refactor each event into its own function, simplifying + // the main loop. + let mut inner = Self { + client, + backend, + blocking_permits, + cached_blocks: BTreeMap::new(), + batches: BTreeMap::new(), + next_batch_id: 0, + _phantom: Default::default(), + }; + + // Main event loop. This loop must not contain any direct .await, as we want to + // react to events as fast as possible. + loop { + select! { + request = requester_rx.next() => { + match request { + None => break, + Some(CacheRequest::StartBatch {sender, blocks}) + => inner.request_start_batch(&blocking_tx, sender, blocks, overrides.clone()), + Some(CacheRequest::GetTraces {sender, block}) + => inner.request_get_traces(sender, block), + Some(CacheRequest::StopBatch {batch_id}) => { + // Cannot be refactored inside `request_stop_batch` because + // it has an unnamable type :C + batch_expirations.push(async move { + sleep(cache_duration).await; + batch_id + }); + + inner.request_stop_batch(batch_id); + }, + } + }, + message = blocking_rx.recv().fuse() => { + match message { + None => (), + Some(BlockingTaskMessage::Started { block_hash }) + => inner.blocking_started(block_hash), + Some(BlockingTaskMessage::Finished { block_hash, result }) + => inner.blocking_finished(block_hash, result), + } + }, + batch_id = batch_expirations.next() => { + match batch_id { + None => (), + Some(batch_id) => inner.expired_batch(batch_id), + } + } + } + } + } + .instrument(tracing::debug_span!("trace_filter_cache")); + + (task, CacheRequester(requester_tx)) + } + + /// Handle the creation of a batch. + /// Will start the tracing process for blocks that are not already in the cache. + #[instrument(skip(self, blocking_tx, sender, blocks, overrides))] + fn request_start_batch( + &mut self, + blocking_tx: &mpsc::Sender, + sender: oneshot::Sender, + blocks: Vec, + overrides: Arc>, + ) { + tracing::trace!("Starting batch {}", self.next_batch_id); + self.batches.insert(self.next_batch_id, blocks.clone()); + + for block in blocks { + // The block is already in the cache, awesome ! + if let Some(block_cache) = self.cached_blocks.get_mut(&block) { + block_cache.active_batch_count += 1; + tracing::trace!( + "Cache hit for block {}, now used by {} batches.", + block, + block_cache.active_batch_count + ); + } + // Otherwise we need to queue this block for tracing. + else { + tracing::trace!("Cache miss for block {}, pooling it for tracing.", block); + + let blocking_permits = Arc::clone(&self.blocking_permits); + let (unqueue_sender, unqueue_receiver) = oneshot::channel(); + let client = Arc::clone(&self.client); + let backend = Arc::clone(&self.backend); + let blocking_tx = blocking_tx.clone(); + let overrides = overrides.clone(); + + // Spawn all block caching asynchronously. + // It will wait to obtain a permit, then spawn a blocking task. + // When the blocking task returns its result, it is send + // thought a channel to the main task loop. + tokio::spawn( + async move { + tracing::trace!("Waiting for blocking permit or task cancellation"); + let _permit = select!( + _ = unqueue_receiver.fuse() => { + tracing::trace!("Tracing of the block has been cancelled."); + return; + }, + permit = blocking_permits.acquire().fuse() => permit, + ); + + // Warn the main task that block tracing as started, and + // this block cache entry should not be removed. + let _ = blocking_tx + .send(BlockingTaskMessage::Started { block_hash: block }) + .await; + + tracing::trace!("Start block tracing in a blocking task."); + + // Perform block tracing in a tokio blocking task. + let result = async { + tokio::task::spawn_blocking(move || { + Self::cache_block(client, backend, block, overrides.clone()) + }) + .await + .map_err(|e| { + format!("Tracing Substrate block {} panicked : {:?}", block, e) + })? + } + .await + .map_err(|e| e.to_string()); + + tracing::trace!("Block tracing finished, sending result to main task."); + + // Send response to main task. + let _ = blocking_tx + .send(BlockingTaskMessage::Finished { + block_hash: block, + result, + }) + .await; + } + .instrument(tracing::trace_span!("Block tracing", block = %block)), + ); + + // Insert the block in the cache. + self.cached_blocks.insert( + block, + CacheBlock { + active_batch_count: 1, + state: CacheBlockState::Pooled { + started: false, + waiting_requests: vec![], + unqueue_sender, + }, + }, + ); + } + } + + // Respond with the batch ID. + let _ = sender.send(CacheBatchId(self.next_batch_id)); + + // Increase batch ID for next request. + self.next_batch_id = self.next_batch_id.overflowing_add(1).0; + } + + /// Handle a request to get the traces of the provided block. + /// - If the result is stored in the cache, it sends it immediatly. + /// - If the block is currently being pooled, it is added in this block cache waiting list, + /// and all requests concerning this block will be satisfied when the tracing for this block + /// is finished. + /// - If this block is missing from the cache, it means no batch asked for it. All requested + /// blocks should be contained in a batch beforehand, and thus an error is returned. + #[instrument(skip(self))] + fn request_get_traces(&mut self, sender: oneshot::Sender, block: H256) { + if let Some(block_cache) = self.cached_blocks.get_mut(&block) { + match &mut block_cache.state { + CacheBlockState::Pooled { + ref mut waiting_requests, + .. + } => { + tracing::warn!( + "A request asked a pooled block ({}), adding it to the list of \ + waiting requests.", + block + ); + waiting_requests.push(sender); + } + CacheBlockState::Cached { traces, .. } => { + tracing::warn!( + "A request asked a cached block ({}), sending the traces directly.", + block + ); + let _ = sender.send(traces.clone()); + } + } + } else { + tracing::warn!( + "An RPC request asked to get a block ({}) which was not batched.", + block + ); + let _ = sender.send(Err(format!( + "RPC request asked a block ({}) that was not batched", + block + ))); + } + } + + /// Handle a request to stop a batch. + /// For all blocks that needed to be traced, are only in this batch and not yet started, their + /// tracing is cancelled to save CPU-time and avoid attacks requesting large amount of blocks. + /// This batch data is not yet removed however. Instead a expiration delay timer is started + /// after which the data will indeed be cleared. (the code for that is in the main loop code + /// as it involved an unnamable type :C) + #[instrument(skip(self))] + fn request_stop_batch(&mut self, batch_id: CacheBatchId) { + tracing::trace!("Stopping batch {}", batch_id.0); + if let Some(blocks) = self.batches.get(&batch_id.0) { + for block in blocks { + let mut remove = false; + + // We remove early the block cache if this batch is the last + // pooling this block. + if let Some(block_cache) = self.cached_blocks.get_mut(block) { + if block_cache.active_batch_count == 1 + && matches!( + block_cache.state, + CacheBlockState::Pooled { started: false, .. } + ) + { + remove = true; + } + } + + if remove { + tracing::trace!("Pooled block {} is no longer requested.", block); + // Remove block from the cache. Drops the value, + // closing all the channels contained in it. + let _ = self.cached_blocks.remove(&block); + } + } + } + } + + /// A tracing blocking task notifies it got a permit and is starting the tracing. + /// This started status is stored to avoid removing this block entry. + #[instrument(skip(self))] + fn blocking_started(&mut self, block_hash: H256) { + if let Some(block_cache) = self.cached_blocks.get_mut(&block_hash) { + if let CacheBlockState::Pooled { + ref mut started, .. + } = block_cache.state + { + *started = true; + } + } + } + + /// A tracing blocking task notifies it has finished the tracing and provide the result. + #[instrument(skip(self, result))] + fn blocking_finished(&mut self, block_hash: H256, result: TxsTraceRes) { + // In some cases it might be possible to receive traces of a block + // that has no entry in the cache because it was removed of the pool + // and received a permit concurrently. We just ignore it. + // + // TODO : Should we add it back ? Should it have an active_batch_count + // of 1 then ? + if let Some(block_cache) = self.cached_blocks.get_mut(&block_hash) { + if let CacheBlockState::Pooled { + ref mut waiting_requests, + .. + } = block_cache.state + { + tracing::trace!( + "A new block ({}) has been traced, adding it to the cache and responding to \ + {} waiting requests.", + block_hash, + waiting_requests.len() + ); + // Send result in waiting channels + while let Some(channel) = waiting_requests.pop() { + let _ = channel.send(result.clone()); + } + + // Update cache entry + block_cache.state = CacheBlockState::Cached { traces: result }; + } + } + } + + /// A batch expiration delay timer has completed. It performs the cache cleaning for blocks + /// not longer used by other batches. + #[instrument(skip(self))] + fn expired_batch(&mut self, batch_id: CacheBatchId) { + if let Some(batch) = self.batches.remove(&batch_id.0) { + for block in batch { + // For each block of the batch, we remove it if it was the + // last batch containing it. + let mut remove = false; + if let Some(block_cache) = self.cached_blocks.get_mut(&block) { + block_cache.active_batch_count -= 1; + + if block_cache.active_batch_count == 0 { + remove = true; + } + } + + if remove { + let _ = self.cached_blocks.remove(&block); + } + } + } + } + + /// (In blocking task) Use the Runtime API to trace the block. + #[instrument(skip(client, backend, overrides))] + fn cache_block( + client: Arc, + backend: Arc, + substrate_hash: H256, + overrides: Arc>, + ) -> TxsTraceRes { + // Get Subtrate block data. + let api = client.runtime_api(); + let block_header = client + .header(substrate_hash) + .map_err(|e| { + format!( + "Error when fetching substrate block {} header : {:?}", + substrate_hash, e + ) + })? + .ok_or_else(|| format!("Subtrate block {} don't exist", substrate_hash))?; + + let height = *block_header.number(); + let substrate_parent_id = *block_header.parent_hash(); + + let schema = fc_storage::onchain_storage_schema(client.as_ref(), substrate_hash); + + // Get Ethereum block data. + let (eth_block, eth_transactions) = match overrides.schemas.get(&schema) { + Some(schema) => match ( + schema.current_block(substrate_hash), + schema.current_transaction_statuses(substrate_hash), + ) { + (Some(a), Some(b)) => (a, b), + _ => { + return Err(format!( + "Failed to get Ethereum block data for Substrate block {}", + substrate_hash + )) + } + }, + _ => return Err(format!("No storage override at {:?}", substrate_hash)), + }; + + let eth_block_hash = eth_block.header.hash(); + let eth_tx_hashes = eth_transactions + .iter() + .map(|t| t.transaction_hash) + .collect(); + + // Get extrinsics (containing Ethereum ones) + let extrinsics = backend + .blockchain() + .body(substrate_hash) + .map_err(|e| { + format!( + "Blockchain error when fetching extrinsics of block {} : {:?}", + height, e + ) + })? + .ok_or_else(|| format!("Could not find block {} when fetching extrinsics.", height))?; + + // Trace the block. + let f = || -> Result<_, String> { + api.initialize_block(substrate_parent_id, &block_header) + .map_err(|e| format!("Runtime api access error: {:?}", e))?; + + let _result = api + .trace_block(substrate_parent_id, extrinsics, eth_tx_hashes) + .map_err(|e| format!("Blockchain error when replaying block {} : {:?}", height, e))? + .map_err(|e| { + tracing::warn!( + "Internal runtime error when replaying block {} : {:?}", + height, + e + ); + format!( + "Internal runtime error when replaying block {} : {:?}", + height, e + ) + })?; + Ok(moonbeam_rpc_primitives_debug::Response::Block) + }; + + let mut proxy = moonbeam_client_evm_tracing::listeners::CallList::default(); + proxy.using(f)?; + let mut traces: Vec<_> = + moonbeam_client_evm_tracing::formatters::TraceFilter::format(proxy) + .ok_or("Fail to format proxy")?; + // Fill missing data. + for trace in traces.iter_mut() { + trace.block_hash = eth_block_hash; + trace.block_number = height; + trace.transaction_hash = eth_transactions + .get(trace.transaction_position as usize) + .ok_or_else(|| { + tracing::warn!( + "Bug: A transaction has been replayed while it shouldn't (in block {}).", + height + ); + + format!( + "Bug: A transaction has been replayed while it shouldn't (in block {}).", + height + ) + })? + .transaction_hash; + + // Reformat error messages. + if let block::TransactionTraceOutput::Error(ref mut error) = trace.output { + if error.as_slice() == b"execution reverted" { + *error = b"Reverted".to_vec(); + } + } + } + Ok(traces) + } +} diff --git a/vendor/rpc/txpool/Cargo.toml b/vendor/rpc/txpool/Cargo.toml new file mode 100644 index 0000000000..7678395ce0 --- /dev/null +++ b/vendor/rpc/txpool/Cargo.toml @@ -0,0 +1,32 @@ +[package] +name = "moonbeam-rpc-txpool" +authors = ["PureStake"] +edition = "2021" +homepage = "https://moonbeam.network" +license = "GPL-3.0-only" +repository = "https://github.com/PureStake/moonbeam/" +version = "0.6.0" + +[dependencies] +jsonrpsee = { workspace = true, features = ["macros", "server"] } +rlp = { workspace = true } +serde = { workspace = true } +sha3 = { workspace = true, features = ["std"] } + +# Moonbeam +moonbeam-rpc-core-txpool = { workspace = true } +moonbeam-rpc-primitives-txpool = { workspace = true } + +# Substrate +frame-system = { workspace = true, features = ["std"] } +sc-transaction-pool = { workspace = true } +sc-transaction-pool-api = { workspace = true } +sp-api = { workspace = true, features = ["std"] } +sp-blockchain = { workspace = true } +sp-io = { workspace = true, features = ["std"] } +sp-runtime = { workspace = true, features = ["std"] } +sp-std = { workspace = true, features = ["std"] } + +# Frontier +ethereum-types = { workspace = true, features = ["std"] } +fc-rpc = { workspace = true, features = ["rpc-binary-search-estimate"] } diff --git a/vendor/rpc/txpool/src/lib.rs b/vendor/rpc/txpool/src/lib.rs new file mode 100644 index 0000000000..bdac226817 --- /dev/null +++ b/vendor/rpc/txpool/src/lib.rs @@ -0,0 +1,185 @@ +// Copyright 2019-2022 PureStake Inc. +// This file is part of Moonbeam. + +// Moonbeam is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Moonbeam is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Moonbeam. If not, see . + +use ethereum_types::{H160, H256, U256}; +use fc_rpc::{internal_err, public_key}; +use jsonrpsee::core::RpcResult; +pub use moonbeam_rpc_core_txpool::{ + GetT, Summary, Transaction, TransactionMap, TxPoolResult, TxPoolServer, +}; +use sc_transaction_pool::{ChainApi, Pool}; +use sc_transaction_pool_api::InPoolTransaction; +use serde::Serialize; +use sha3::{Digest, Keccak256}; +use sp_api::{ApiExt, ProvideRuntimeApi}; +use sp_blockchain::{Error as BlockChainError, HeaderBackend, HeaderMetadata}; +use sp_runtime::traits::Block as BlockT; +use std::collections::HashMap; +use std::{marker::PhantomData, sync::Arc}; + +use moonbeam_rpc_primitives_txpool::{ + Transaction as TransactionV2, TxPoolResponse, TxPoolRuntimeApi, +}; + +pub struct TxPool { + client: Arc, + graph: Arc>, + _marker: PhantomData, +} + +impl TxPool +where + C: ProvideRuntimeApi, + C: HeaderMetadata + HeaderBackend + 'static, + C: Send + Sync + 'static, + B: BlockT + Send + Sync + 'static, + A: ChainApi + 'static, + C::Api: TxPoolRuntimeApi, +{ + /// Use the transaction graph interface to get the extrinsics currently in the ready and future + /// queues. + fn map_build(&self) -> RpcResult>> + where + T: GetT + Serialize, + { + // Collect transactions in the ready validated pool. + let txs_ready = self + .graph + .validated_pool() + .ready() + .map(|in_pool_tx| in_pool_tx.data().clone()) + .collect(); + + // Collect transactions in the future validated pool. + let txs_future = self + .graph + .validated_pool() + .futures() + .iter() + .map(|(_hash, extrinsic)| extrinsic.clone()) + .collect(); + + // Use the runtime to match the (here) opaque extrinsics against ethereum transactions. + let best_block_hash = self.client.info().best_hash; + let api = self.client.runtime_api(); + let api_version = if let Ok(Some(api_version)) = + api.api_version::>(best_block_hash) + { + api_version + } else { + return Err(internal_err( + "failed to retrieve Runtime Api version".to_string(), + )); + }; + let ethereum_txns: TxPoolResponse = if api_version == 1 { + #[allow(deprecated)] + let res = api + .extrinsic_filter_before_version_2(best_block_hash, txs_ready, txs_future) + .map_err(|err| { + internal_err(format!("fetch runtime extrinsic filter failed: {:?}", err)) + })?; + TxPoolResponse { + ready: res + .ready + .iter() + .map(|t| TransactionV2::Legacy(t.clone())) + .collect(), + future: res + .future + .iter() + .map(|t| TransactionV2::Legacy(t.clone())) + .collect(), + } + } else { + api.extrinsic_filter(best_block_hash, txs_ready, txs_future) + .map_err(|err| { + internal_err(format!("fetch runtime extrinsic filter failed: {:?}", err)) + })? + }; + // Build the T response. + let mut pending = TransactionMap::::new(); + for txn in ethereum_txns.ready.iter() { + let hash = txn.hash(); + let nonce = match txn { + TransactionV2::Legacy(t) => t.nonce, + TransactionV2::EIP2930(t) => t.nonce, + TransactionV2::EIP1559(t) => t.nonce, + }; + let from_address = match public_key(txn) { + Ok(pk) => H160::from(H256::from_slice(Keccak256::digest(&pk).as_slice())), + Err(_e) => H160::default(), + }; + pending + .entry(from_address) + .or_insert_with(HashMap::new) + .insert(nonce, T::get(hash, from_address, txn)); + } + let mut queued = TransactionMap::::new(); + for txn in ethereum_txns.future.iter() { + let hash = txn.hash(); + let nonce = match txn { + TransactionV2::Legacy(t) => t.nonce, + TransactionV2::EIP2930(t) => t.nonce, + TransactionV2::EIP1559(t) => t.nonce, + }; + let from_address = match public_key(txn) { + Ok(pk) => H160::from(H256::from_slice(Keccak256::digest(&pk).as_slice())), + Err(_e) => H160::default(), + }; + queued + .entry(from_address) + .or_insert_with(HashMap::new) + .insert(nonce, T::get(hash, from_address, txn)); + } + Ok(TxPoolResult { pending, queued }) + } +} + +impl TxPool { + pub fn new(client: Arc, graph: Arc>) -> Self { + Self { + client, + graph, + _marker: PhantomData, + } + } +} + +impl TxPoolServer for TxPool +where + C: ProvideRuntimeApi, + C: HeaderMetadata + HeaderBackend, + C: Send + Sync + 'static, + B: BlockT + Send + Sync + 'static, + A: ChainApi + 'static, + C::Api: TxPoolRuntimeApi, +{ + fn content(&self) -> RpcResult>> { + self.map_build::() + } + + fn inspect(&self) -> RpcResult>> { + self.map_build::() + } + + fn status(&self) -> RpcResult> { + let status = self.graph.validated_pool().status(); + Ok(TxPoolResult { + pending: U256::from(status.ready), + queued: U256::from(status.future), + }) + } +} diff --git a/vendor/runtime/evm-tracer/Cargo.toml b/vendor/runtime/evm-tracer/Cargo.toml new file mode 100644 index 0000000000..8c6a62159b --- /dev/null +++ b/vendor/runtime/evm-tracer/Cargo.toml @@ -0,0 +1,47 @@ +[package] +name = "moonbeam-evm-tracer" +authors = ["PureStake"] +edition = "2021" +homepage = "https://moonbeam.network" +license = "GPL-3.0-only" +repository = "https://github.com/PureStake/moonbeam/" +version = "0.1.0" + +[dependencies] + +# Moonbeam +evm-tracing-events = { workspace = true, features = ["evm-tracing"] } +moonbeam-primitives-ext = { workspace = true } + +# Substrate +parity-scale-codec = { workspace = true } +sp-core = { workspace = true } +sp-io = { workspace = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } + +# Frontier +ethereum-types = { workspace = true } +evm = { workspace = true, features = ["with-codec"] } +evm-gasometer = { workspace = true } +evm-runtime = { workspace = true } +fp-evm = { workspace = true } +pallet-evm = { workspace = true } + +[features] +default = ["std"] +std = [ + "parity-scale-codec/std", + "ethereum-types/std", + "evm-gasometer/std", + "evm-runtime/std", + "evm-tracing-events/std", + "evm/std", + "evm/with-serde", + "fp-evm/std", + "moonbeam-primitives-ext/std", + "pallet-evm/std", + "sp-core/std", + "sp-runtime/std", + "sp-std/std", +] diff --git a/vendor/runtime/evm-tracer/src/lib.rs b/vendor/runtime/evm-tracer/src/lib.rs new file mode 100644 index 0000000000..fe4a314650 --- /dev/null +++ b/vendor/runtime/evm-tracer/src/lib.rs @@ -0,0 +1,117 @@ +// Copyright 2019-2022 PureStake Inc. +// This file is part of Moonbeam. + +// Moonbeam is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Moonbeam is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Moonbeam. If not, see . + +//! Substrate EVM tracing. +//! +//! The purpose of this crate is enable tracing the EVM opcode execution and will be used by +//! both Dapp developers - to get a granular view on their transactions - and indexers to access +//! the EVM callstack (internal transactions). +//! +//! Proxies EVM messages to the host functions. + +#![cfg_attr(not(feature = "std"), no_std)] + +pub mod tracer { + use evm_tracing_events::{EvmEvent, GasometerEvent, RuntimeEvent, StepEventFilter}; + use parity_scale_codec::Encode; + + use evm::tracing::{using as evm_using, EventListener as EvmListener}; + use evm_gasometer::tracing::{using as gasometer_using, EventListener as GasometerListener}; + use evm_runtime::tracing::{using as runtime_using, EventListener as RuntimeListener}; + use sp_std::{cell::RefCell, rc::Rc}; + + struct ListenerProxy(pub Rc>); + impl GasometerListener for ListenerProxy { + fn event(&mut self, event: evm_gasometer::tracing::Event) { + self.0.borrow_mut().event(event); + } + } + + impl RuntimeListener for ListenerProxy { + fn event(&mut self, event: evm_runtime::tracing::Event) { + self.0.borrow_mut().event(event); + } + } + + impl EvmListener for ListenerProxy { + fn event(&mut self, event: evm::tracing::Event) { + self.0.borrow_mut().event(event); + } + } + + pub struct EvmTracer { + step_event_filter: StepEventFilter, + } + + impl EvmTracer { + pub fn new() -> Self { + Self { + step_event_filter: moonbeam_primitives_ext::moonbeam_ext::step_event_filter(), + } + } + + /// Setup event listeners and execute provided closure. + /// + /// Consume the tracer and return it alongside the return value of + /// the closure. + pub fn trace R>(self, f: F) { + let wrapped = Rc::new(RefCell::new(self)); + + let mut gasometer = ListenerProxy(Rc::clone(&wrapped)); + let mut runtime = ListenerProxy(Rc::clone(&wrapped)); + let mut evm = ListenerProxy(Rc::clone(&wrapped)); + + // Each line wraps the previous `f` into a `using` call. + // Listening to new events results in adding one new line. + // Order is irrelevant when registering listeners. + let f = || runtime_using(&mut runtime, f); + let f = || gasometer_using(&mut gasometer, f); + let f = || evm_using(&mut evm, f); + f(); + } + + pub fn emit_new() { + moonbeam_primitives_ext::moonbeam_ext::call_list_new(); + } + } + + impl EvmListener for EvmTracer { + /// Proxies `evm::tracing::Event` to the host. + fn event(&mut self, event: evm::tracing::Event) { + let event: EvmEvent = event.into(); + let message = event.encode(); + moonbeam_primitives_ext::moonbeam_ext::evm_event(message); + } + } + + impl GasometerListener for EvmTracer { + /// Proxies `evm_gasometer::tracing::Event` to the host. + fn event(&mut self, event: evm_gasometer::tracing::Event) { + let event: GasometerEvent = event.into(); + let message = event.encode(); + moonbeam_primitives_ext::moonbeam_ext::gasometer_event(message); + } + } + + impl RuntimeListener for EvmTracer { + /// Proxies `evm_runtime::tracing::Event` to the host. + fn event(&mut self, event: evm_runtime::tracing::Event) { + let event = RuntimeEvent::from_evm_event(event, self.step_event_filter); + let message = event.encode(); + moonbeam_primitives_ext::moonbeam_ext::runtime_event(message); + } + } +} diff --git a/vendor/runtime/ext/Cargo.toml b/vendor/runtime/ext/Cargo.toml new file mode 100644 index 0000000000..0a30218125 --- /dev/null +++ b/vendor/runtime/ext/Cargo.toml @@ -0,0 +1,31 @@ +[package] +name = "moonbeam-primitives-ext" +authors = ["PureStake"] +edition = "2021" +homepage = "https://moonbeam.network" +license = "GPL-3.0-only" +repository = "https://github.com/PureStake/moonbeam/" +version = "0.1.0" + +[dependencies] +ethereum-types = { workspace = true } + +# Moonbeam +evm-tracing-events = { workspace = true } + +# Substrate +parity-scale-codec = { workspace = true } +sp-externalities = { workspace = true } +sp-runtime-interface = { workspace = true } +sp-std = { workspace = true } + +[features] +default = ["std"] +std = [ + "parity-scale-codec/std", + "ethereum-types/std", + "evm-tracing-events/std", + "sp-externalities/std", + "sp-runtime-interface/std", + "sp-std/std", +] diff --git a/vendor/runtime/ext/src/lib.rs b/vendor/runtime/ext/src/lib.rs new file mode 100644 index 0000000000..e3f8c4dc6c --- /dev/null +++ b/vendor/runtime/ext/src/lib.rs @@ -0,0 +1,82 @@ +// Copyright 2019-2022 PureStake Inc. +// This file is part of Moonbeam. + +// Moonbeam is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Moonbeam is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Moonbeam. If not, see . + +//! Environmental-aware externalities for EVM tracing in Wasm runtime. This enables +//! capturing the - potentially large - trace output data in the host and keep +//! a low memory footprint in `--execution=wasm`. +//! +//! - The original trace Runtime Api call is wrapped `using` environmental (thread local). +//! - Arguments are scale-encoded known types in the host. +//! - Host functions will decode the input and emit an event `with` environmental. + +#![cfg_attr(not(feature = "std"), no_std)] +use sp_runtime_interface::runtime_interface; + +use parity_scale_codec::Decode; +use sp_std::vec::Vec; + +use evm_tracing_events::{Event, EvmEvent, GasometerEvent, RuntimeEvent, StepEventFilter}; + +#[runtime_interface] +pub trait MoonbeamExt { + fn raw_step(&mut self, _data: Vec) {} + + fn raw_gas(&mut self, _data: Vec) {} + + fn raw_return_value(&mut self, _data: Vec) {} + + fn call_list_entry(&mut self, _index: u32, _value: Vec) {} + + fn call_list_new(&mut self) {} + + // New design, proxy events. + /// An `Evm` event proxied by the Moonbeam runtime to this host function. + /// evm -> moonbeam_runtime -> host. + fn evm_event(&mut self, event: Vec) { + if let Ok(event) = EvmEvent::decode(&mut &event[..]) { + Event::Evm(event).emit(); + } + } + + /// A `Gasometer` event proxied by the Moonbeam runtime to this host function. + /// evm_gasometer -> moonbeam_runtime -> host. + fn gasometer_event(&mut self, event: Vec) { + if let Ok(event) = GasometerEvent::decode(&mut &event[..]) { + Event::Gasometer(event).emit(); + } + } + + /// A `Runtime` event proxied by the Moonbeam runtime to this host function. + /// evm_runtime -> moonbeam_runtime -> host. + fn runtime_event(&mut self, event: Vec) { + if let Ok(event) = RuntimeEvent::decode(&mut &event[..]) { + Event::Runtime(event).emit(); + } + } + + /// Allow the tracing module in the runtime to know how to filter Step event + /// content, as cloning the entire data is expensive and most of the time + /// not necessary. + fn step_event_filter(&self) -> StepEventFilter { + evm_tracing_events::step_event_filter().unwrap_or_default() + } + + /// An event to create a new CallList (currently a new transaction when tracing a block). + #[version(2)] + fn call_list_new(&mut self) { + Event::CallListNew().emit(); + } +}