From d0540a79967cb06cd7598a4965c7c06afc788b0c Mon Sep 17 00:00:00 2001 From: Keith Yeung Date: Fri, 9 Dec 2022 01:18:51 +0900 Subject: [PATCH 01/29] Introduce sensible weight constants (#12868) * Introduce sensible weight constants * cargo fmt * Remove unused import * Add missing import * ".git/.scripts/bench-bot.sh" pallet dev pallet_lottery Co-authored-by: command-bot <> --- bin/node-template/runtime/src/lib.rs | 6 +- bin/node/runtime/src/lib.rs | 7 ++- docs/Upgrading-2.0-to-3.0.md | 4 +- frame/babe/src/default_weights.rs | 21 ++++--- frame/contracts/src/tests.rs | 4 +- frame/democracy/src/tests.rs | 4 +- .../election-provider-multi-phase/src/mock.rs | 2 +- frame/fast-unstake/src/mock.rs | 4 +- frame/grandpa/src/default_weights.rs | 20 ++++--- frame/lottery/src/weights.rs | 58 +++++++++---------- .../src/default_weights.rs | 4 +- frame/offences/benchmarking/src/mock.rs | 4 +- frame/offences/src/mock.rs | 6 +- frame/staking/src/mock.rs | 2 +- frame/staking/src/tests.rs | 13 +++-- frame/support/src/weights/block_weights.rs | 9 +-- .../support/src/weights/extrinsic_weights.rs | 9 +-- frame/support/src/weights/paritydb_weights.rs | 12 ++-- frame/support/src/weights/rocksdb_weights.rs | 12 ++-- frame/system/src/limits.rs | 2 +- primitives/weights/src/lib.rs | 11 ++-- .../frame/benchmarking-cli/src/block/bench.rs | 4 +- .../benchmarking-cli/src/overhead/README.md | 6 +- .../benchmarking-cli/src/overhead/weights.hbs | 13 +++-- .../benchmarking-cli/src/storage/README.md | 4 +- .../benchmarking-cli/src/storage/weights.hbs | 12 ++-- 26 files changed, 140 insertions(+), 113 deletions(-) diff --git a/bin/node-template/runtime/src/lib.rs b/bin/node-template/runtime/src/lib.rs index 1d0e18d31bf80..938282c662b5c 100644 --- a/bin/node-template/runtime/src/lib.rs +++ b/bin/node-template/runtime/src/lib.rs @@ -32,7 +32,9 @@ pub use frame_support::{ ConstU128, ConstU32, ConstU64, ConstU8, KeyOwnerProofSystem, Randomness, StorageInfo, }, weights::{ - constants::{BlockExecutionWeight, ExtrinsicBaseWeight, RocksDbWeight, WEIGHT_PER_SECOND}, + constants::{ + BlockExecutionWeight, ExtrinsicBaseWeight, RocksDbWeight, WEIGHT_REF_TIME_PER_SECOND, + }, IdentityFee, Weight, }, StorageValue, @@ -141,7 +143,7 @@ parameter_types! { /// We allow for 2 seconds of compute with a 6 second average block time. pub BlockWeights: frame_system::limits::BlockWeights = frame_system::limits::BlockWeights::with_sensible_defaults( - (2u64 * WEIGHT_PER_SECOND).set_proof_size(u64::MAX), + Weight::from_parts(2u64 * WEIGHT_REF_TIME_PER_SECOND, u64::MAX), NORMAL_DISPATCH_RATIO, ); pub BlockLength: frame_system::limits::BlockLength = frame_system::limits::BlockLength diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index 44d8e287064f9..a754fac1da7ab 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -38,7 +38,9 @@ use frame_support::{ WithdrawReasons, }, weights::{ - constants::{BlockExecutionWeight, ExtrinsicBaseWeight, RocksDbWeight, WEIGHT_PER_SECOND}, + constants::{ + BlockExecutionWeight, ExtrinsicBaseWeight, RocksDbWeight, WEIGHT_REF_TIME_PER_SECOND, + }, ConstantMultiplier, IdentityFee, Weight, }, PalletId, RuntimeDebug, @@ -173,7 +175,8 @@ const AVERAGE_ON_INITIALIZE_RATIO: Perbill = Perbill::from_percent(10); /// by Operational extrinsics. const NORMAL_DISPATCH_RATIO: Perbill = Perbill::from_percent(75); /// We allow for 2 seconds of compute with a 6 second average block time, with maximum proof size. -const MAXIMUM_BLOCK_WEIGHT: Weight = WEIGHT_PER_SECOND.saturating_mul(2).set_proof_size(u64::MAX); +const MAXIMUM_BLOCK_WEIGHT: Weight = + Weight::from_parts(WEIGHT_REF_TIME_PER_SECOND.saturating_mul(2), u64::MAX); parameter_types! { pub const BlockHashCount: BlockNumber = 2400; diff --git a/docs/Upgrading-2.0-to-3.0.md b/docs/Upgrading-2.0-to-3.0.md index 7540e0d5b5b8c..906018db9a707 100644 --- a/docs/Upgrading-2.0-to-3.0.md +++ b/docs/Upgrading-2.0-to-3.0.md @@ -100,12 +100,12 @@ And update the overall definition for weights on frame and a few related types a +/// by Operational extrinsics. +const NORMAL_DISPATCH_RATIO: Perbill = Perbill::from_percent(75); +/// We allow for 2 seconds of compute with a 6 second average block time. -+const MAXIMUM_BLOCK_WEIGHT: Weight = 2u64 * WEIGHT_PER_SECOND; ++const MAXIMUM_BLOCK_WEIGHT: Weight = Weight::from_parts(2u64 * WEIGHT_REF_TIME_PER_SECOND, u64::MAX); + parameter_types! { pub const BlockHashCount: BlockNumber = 2400; - /// We allow for 2 seconds of compute with a 6 second average block time. -- pub const MaximumBlockWeight: Weight = 2u64 * WEIGHT_PER_SECOND; +- pub const MaximumBlockWeight: Weight = Weight::from_parts(2u64 * WEIGHT_REF_TIME_PER_SECOND, u64::MAX); - pub const AvailableBlockRatio: Perbill = Perbill::from_percent(75); - /// Assume 10% of weight for average on_initialize calls. - pub MaximumExtrinsicWeight: Weight = diff --git a/frame/babe/src/default_weights.rs b/frame/babe/src/default_weights.rs index d3e0c9d044883..f864fd18d86a6 100644 --- a/frame/babe/src/default_weights.rs +++ b/frame/babe/src/default_weights.rs @@ -19,7 +19,7 @@ //! This file was not auto-generated. use frame_support::weights::{ - constants::{RocksDbWeight as DbWeight, WEIGHT_PER_MICROS, WEIGHT_PER_NANOS}, + constants::{RocksDbWeight as DbWeight, WEIGHT_REF_TIME_PER_MICROS, WEIGHT_REF_TIME_PER_NANOS}, Weight, }; @@ -38,17 +38,20 @@ impl crate::WeightInfo for () { const MAX_NOMINATORS: u64 = 200; // checking membership proof - let ref_time_weight = (35u64 * WEIGHT_PER_MICROS) - .saturating_add((175u64 * WEIGHT_PER_NANOS).saturating_mul(validator_count)) + Weight::from_ref_time(35u64 * WEIGHT_REF_TIME_PER_MICROS) + .saturating_add( + Weight::from_ref_time(175u64 * WEIGHT_REF_TIME_PER_NANOS) + .saturating_mul(validator_count), + ) .saturating_add(DbWeight::get().reads(5)) // check equivocation proof - .saturating_add(110u64 * WEIGHT_PER_MICROS) + .saturating_add(Weight::from_ref_time(110u64 * WEIGHT_REF_TIME_PER_MICROS)) // report offence - .saturating_add(110u64 * WEIGHT_PER_MICROS) - .saturating_add(25u64 * WEIGHT_PER_MICROS * MAX_NOMINATORS) + .saturating_add(Weight::from_ref_time(110u64 * WEIGHT_REF_TIME_PER_MICROS)) + .saturating_add(Weight::from_ref_time( + 25u64 * WEIGHT_REF_TIME_PER_MICROS * MAX_NOMINATORS, + )) .saturating_add(DbWeight::get().reads(14 + 3 * MAX_NOMINATORS)) - .saturating_add(DbWeight::get().writes(10 + 3 * MAX_NOMINATORS)); - - ref_time_weight + .saturating_add(DbWeight::get().writes(10 + 3 * MAX_NOMINATORS)) } } diff --git a/frame/contracts/src/tests.rs b/frame/contracts/src/tests.rs index f4c8889ef05f4..f4cba0c85b083 100644 --- a/frame/contracts/src/tests.rs +++ b/frame/contracts/src/tests.rs @@ -40,7 +40,7 @@ use frame_support::{ fungibles::Lockable, BalanceStatus, ConstU32, ConstU64, Contains, Currency, Get, OnIdle, OnInitialize, ReservableCurrency, WithdrawReasons, }, - weights::{constants::WEIGHT_PER_SECOND, Weight}, + weights::{constants::WEIGHT_REF_TIME_PER_SECOND, Weight}, }; use frame_system::{self as system, EventRecord, Phase}; use pretty_assertions::{assert_eq, assert_ne}; @@ -285,7 +285,7 @@ impl RegisteredChainExtension for TempStorageExtension { parameter_types! { pub BlockWeights: frame_system::limits::BlockWeights = frame_system::limits::BlockWeights::simple_max( - (2u64 * WEIGHT_PER_SECOND).set_proof_size(u64::MAX), + Weight::from_parts(2u64 * WEIGHT_REF_TIME_PER_SECOND, u64::MAX), ); pub static ExistentialDeposit: u64 = 1; } diff --git a/frame/democracy/src/tests.rs b/frame/democracy/src/tests.rs index eceb1a3400bba..41b279035028e 100644 --- a/frame/democracy/src/tests.rs +++ b/frame/democracy/src/tests.rs @@ -77,7 +77,9 @@ impl Contains for BaseFilter { parameter_types! { pub BlockWeights: frame_system::limits::BlockWeights = - frame_system::limits::BlockWeights::simple_max(frame_support::weights::constants::WEIGHT_PER_SECOND.set_proof_size(u64::MAX)); + frame_system::limits::BlockWeights::simple_max( + Weight::from_parts(frame_support::weights::constants::WEIGHT_REF_TIME_PER_SECOND, u64::MAX), + ); } impl frame_system::Config for Test { type BaseCallFilter = BaseFilter; diff --git a/frame/election-provider-multi-phase/src/mock.rs b/frame/election-provider-multi-phase/src/mock.rs index 8ab7e5bbf733d..347a4f19185f9 100644 --- a/frame/election-provider-multi-phase/src/mock.rs +++ b/frame/election-provider-multi-phase/src/mock.rs @@ -239,7 +239,7 @@ parameter_types! { pub const ExistentialDeposit: u64 = 1; pub BlockWeights: frame_system::limits::BlockWeights = frame_system::limits::BlockWeights ::with_sensible_defaults( - Weight::from_parts(2u64 * constants::WEIGHT_PER_SECOND.ref_time(), u64::MAX), + Weight::from_parts(2u64 * constants::WEIGHT_REF_TIME_PER_SECOND, u64::MAX), NORMAL_DISPATCH_RATIO, ); } diff --git a/frame/fast-unstake/src/mock.rs b/frame/fast-unstake/src/mock.rs index d66f4ba5663d9..b67dcf581ed97 100644 --- a/frame/fast-unstake/src/mock.rs +++ b/frame/fast-unstake/src/mock.rs @@ -21,7 +21,7 @@ use frame_support::{ pallet_prelude::*, parameter_types, traits::{ConstU64, Currency}, - weights::constants::WEIGHT_PER_SECOND, + weights::constants::WEIGHT_REF_TIME_PER_SECOND, }; use sp_runtime::traits::{Convert, IdentityLookup}; @@ -37,7 +37,7 @@ pub type T = Runtime; parameter_types! { pub BlockWeights: frame_system::limits::BlockWeights = frame_system::limits::BlockWeights::simple_max( - (2u64 * WEIGHT_PER_SECOND).set_proof_size(u64::MAX), + Weight::from_parts(2u64 * WEIGHT_REF_TIME_PER_SECOND, u64::MAX), ); } diff --git a/frame/grandpa/src/default_weights.rs b/frame/grandpa/src/default_weights.rs index 4ca94dd576fb7..ba343cabac622 100644 --- a/frame/grandpa/src/default_weights.rs +++ b/frame/grandpa/src/default_weights.rs @@ -19,7 +19,7 @@ //! This file was not auto-generated. use frame_support::weights::{ - constants::{RocksDbWeight as DbWeight, WEIGHT_PER_MICROS, WEIGHT_PER_NANOS}, + constants::{RocksDbWeight as DbWeight, WEIGHT_REF_TIME_PER_MICROS, WEIGHT_REF_TIME_PER_NANOS}, Weight, }; @@ -34,14 +34,19 @@ impl crate::WeightInfo for () { const MAX_NOMINATORS: u64 = 200; // checking membership proof - (35u64 * WEIGHT_PER_MICROS) - .saturating_add((175u64 * WEIGHT_PER_NANOS).saturating_mul(validator_count)) + Weight::from_ref_time(35u64 * WEIGHT_REF_TIME_PER_MICROS) + .saturating_add( + Weight::from_ref_time(175u64 * WEIGHT_REF_TIME_PER_NANOS) + .saturating_mul(validator_count), + ) .saturating_add(DbWeight::get().reads(5)) // check equivocation proof - .saturating_add(95u64 * WEIGHT_PER_MICROS) + .saturating_add(Weight::from_ref_time(95u64 * WEIGHT_REF_TIME_PER_MICROS)) // report offence - .saturating_add(110u64 * WEIGHT_PER_MICROS) - .saturating_add(25u64 * WEIGHT_PER_MICROS * MAX_NOMINATORS) + .saturating_add(Weight::from_ref_time(110u64 * WEIGHT_REF_TIME_PER_MICROS)) + .saturating_add(Weight::from_ref_time( + 25u64 * WEIGHT_REF_TIME_PER_MICROS * MAX_NOMINATORS, + )) .saturating_add(DbWeight::get().reads(14 + 3 * MAX_NOMINATORS)) .saturating_add(DbWeight::get().writes(10 + 3 * MAX_NOMINATORS)) // fetching set id -> session index mappings @@ -49,6 +54,7 @@ impl crate::WeightInfo for () { } fn note_stalled() -> Weight { - (3u64 * WEIGHT_PER_MICROS).saturating_add(DbWeight::get().writes(1)) + Weight::from_ref_time(3u64 * WEIGHT_REF_TIME_PER_MICROS) + .saturating_add(DbWeight::get().writes(1)) } } diff --git a/frame/lottery/src/weights.rs b/frame/lottery/src/weights.rs index c0936ae6c8073..e9ee528cc43b8 100644 --- a/frame/lottery/src/weights.rs +++ b/frame/lottery/src/weights.rs @@ -18,7 +18,7 @@ //! Autogenerated weights for pallet_lottery //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2022-11-18, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2022-12-08, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` //! HOSTNAME: `bm3`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 @@ -67,33 +67,33 @@ impl WeightInfo for SubstrateWeight { // Storage: System Account (r:1 w:1) // Storage: Lottery Tickets (r:0 w:1) fn buy_ticket() -> Weight { - // Minimum execution time: 53_735 nanoseconds. - Weight::from_ref_time(54_235_000) + // Minimum execution time: 52_479 nanoseconds. + Weight::from_ref_time(53_225_000) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(4)) } // Storage: Lottery CallIndices (r:0 w:1) /// The range of component `n` is `[0, 10]`. fn set_calls(n: u32, ) -> Weight { - // Minimum execution time: 15_065 nanoseconds. - Weight::from_ref_time(16_467_398) - // Standard Error: 5_392 - .saturating_add(Weight::from_ref_time(294_914).saturating_mul(n.into())) + // Minimum execution time: 14_433 nanoseconds. + Weight::from_ref_time(15_660_780) + // Standard Error: 5_894 + .saturating_add(Weight::from_ref_time(290_482).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().writes(1)) } // Storage: Lottery Lottery (r:1 w:1) // Storage: Lottery LotteryIndex (r:1 w:1) // Storage: System Account (r:1 w:1) fn start_lottery() -> Weight { - // Minimum execution time: 45_990 nanoseconds. - Weight::from_ref_time(46_789_000) + // Minimum execution time: 43_683 nanoseconds. + Weight::from_ref_time(44_580_000) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(3)) } // Storage: Lottery Lottery (r:1 w:1) fn stop_repeat() -> Weight { - // Minimum execution time: 10_783 nanoseconds. - Weight::from_ref_time(11_180_000) + // Minimum execution time: 10_514 nanoseconds. + Weight::from_ref_time(10_821_000) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -103,8 +103,8 @@ impl WeightInfo for SubstrateWeight { // Storage: Lottery TicketsCount (r:1 w:1) // Storage: Lottery Tickets (r:1 w:0) fn on_initialize_end() -> Weight { - // Minimum execution time: 62_088 nanoseconds. - Weight::from_ref_time(63_670_000) + // Minimum execution time: 60_254 nanoseconds. + Weight::from_ref_time(61_924_000) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(4)) } @@ -115,8 +115,8 @@ impl WeightInfo for SubstrateWeight { // Storage: Lottery Tickets (r:1 w:0) // Storage: Lottery LotteryIndex (r:1 w:1) fn on_initialize_repeat() -> Weight { - // Minimum execution time: 64_953 nanoseconds. - Weight::from_ref_time(65_465_000) + // Minimum execution time: 61_552 nanoseconds. + Weight::from_ref_time(62_152_000) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(5)) } @@ -132,33 +132,33 @@ impl WeightInfo for () { // Storage: System Account (r:1 w:1) // Storage: Lottery Tickets (r:0 w:1) fn buy_ticket() -> Weight { - // Minimum execution time: 53_735 nanoseconds. - Weight::from_ref_time(54_235_000) + // Minimum execution time: 52_479 nanoseconds. + Weight::from_ref_time(53_225_000) .saturating_add(RocksDbWeight::get().reads(6)) .saturating_add(RocksDbWeight::get().writes(4)) } // Storage: Lottery CallIndices (r:0 w:1) /// The range of component `n` is `[0, 10]`. fn set_calls(n: u32, ) -> Weight { - // Minimum execution time: 15_065 nanoseconds. - Weight::from_ref_time(16_467_398) - // Standard Error: 5_392 - .saturating_add(Weight::from_ref_time(294_914).saturating_mul(n.into())) + // Minimum execution time: 14_433 nanoseconds. + Weight::from_ref_time(15_660_780) + // Standard Error: 5_894 + .saturating_add(Weight::from_ref_time(290_482).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().writes(1)) } // Storage: Lottery Lottery (r:1 w:1) // Storage: Lottery LotteryIndex (r:1 w:1) // Storage: System Account (r:1 w:1) fn start_lottery() -> Weight { - // Minimum execution time: 45_990 nanoseconds. - Weight::from_ref_time(46_789_000) + // Minimum execution time: 43_683 nanoseconds. + Weight::from_ref_time(44_580_000) .saturating_add(RocksDbWeight::get().reads(3)) .saturating_add(RocksDbWeight::get().writes(3)) } // Storage: Lottery Lottery (r:1 w:1) fn stop_repeat() -> Weight { - // Minimum execution time: 10_783 nanoseconds. - Weight::from_ref_time(11_180_000) + // Minimum execution time: 10_514 nanoseconds. + Weight::from_ref_time(10_821_000) .saturating_add(RocksDbWeight::get().reads(1)) .saturating_add(RocksDbWeight::get().writes(1)) } @@ -168,8 +168,8 @@ impl WeightInfo for () { // Storage: Lottery TicketsCount (r:1 w:1) // Storage: Lottery Tickets (r:1 w:0) fn on_initialize_end() -> Weight { - // Minimum execution time: 62_088 nanoseconds. - Weight::from_ref_time(63_670_000) + // Minimum execution time: 60_254 nanoseconds. + Weight::from_ref_time(61_924_000) .saturating_add(RocksDbWeight::get().reads(6)) .saturating_add(RocksDbWeight::get().writes(4)) } @@ -180,8 +180,8 @@ impl WeightInfo for () { // Storage: Lottery Tickets (r:1 w:0) // Storage: Lottery LotteryIndex (r:1 w:1) fn on_initialize_repeat() -> Weight { - // Minimum execution time: 64_953 nanoseconds. - Weight::from_ref_time(65_465_000) + // Minimum execution time: 61_552 nanoseconds. + Weight::from_ref_time(62_152_000) .saturating_add(RocksDbWeight::get().reads(7)) .saturating_add(RocksDbWeight::get().writes(5)) } diff --git a/frame/merkle-mountain-range/src/default_weights.rs b/frame/merkle-mountain-range/src/default_weights.rs index e513e2197f1c6..e4f9750fbcba5 100644 --- a/frame/merkle-mountain-range/src/default_weights.rs +++ b/frame/merkle-mountain-range/src/default_weights.rs @@ -19,7 +19,7 @@ //! This file was not auto-generated. use frame_support::weights::{ - constants::{RocksDbWeight as DbWeight, WEIGHT_PER_NANOS}, + constants::{RocksDbWeight as DbWeight, WEIGHT_REF_TIME_PER_NANOS}, Weight, }; @@ -28,7 +28,7 @@ impl crate::WeightInfo for () { // Reading the parent hash. let leaf_weight = DbWeight::get().reads(1); // Blake2 hash cost. - let hash_weight = 2u64 * WEIGHT_PER_NANOS; + let hash_weight = Weight::from_ref_time(2u64 * WEIGHT_REF_TIME_PER_NANOS); // No-op hook. let hook_weight = Weight::zero(); diff --git a/frame/offences/benchmarking/src/mock.rs b/frame/offences/benchmarking/src/mock.rs index e022d81c5b5bd..de3a4eca6308d 100644 --- a/frame/offences/benchmarking/src/mock.rs +++ b/frame/offences/benchmarking/src/mock.rs @@ -24,7 +24,7 @@ use frame_election_provider_support::{onchain, SequentialPhragmen}; use frame_support::{ parameter_types, traits::{ConstU32, ConstU64}, - weights::constants::WEIGHT_PER_SECOND, + weights::{constants::WEIGHT_REF_TIME_PER_SECOND, Weight}, }; use frame_system as system; use pallet_session::historical as pallet_session_historical; @@ -41,7 +41,7 @@ type Balance = u64; parameter_types! { pub BlockWeights: frame_system::limits::BlockWeights = frame_system::limits::BlockWeights::simple_max( - 2u64 * WEIGHT_PER_SECOND + Weight::from_parts(2u64 * WEIGHT_REF_TIME_PER_SECOND, u64::MAX) ); } diff --git a/frame/offences/src/mock.rs b/frame/offences/src/mock.rs index 31dac8d51d3b1..8e4256ec3d3e6 100644 --- a/frame/offences/src/mock.rs +++ b/frame/offences/src/mock.rs @@ -26,7 +26,7 @@ use frame_support::{ parameter_types, traits::{ConstU32, ConstU64}, weights::{ - constants::{RocksDbWeight, WEIGHT_PER_SECOND}, + constants::{RocksDbWeight, WEIGHT_REF_TIME_PER_SECOND}, Weight, }, }; @@ -85,7 +85,9 @@ frame_support::construct_runtime!( parameter_types! { pub BlockWeights: frame_system::limits::BlockWeights = - frame_system::limits::BlockWeights::simple_max(2u64 * WEIGHT_PER_SECOND); + frame_system::limits::BlockWeights::simple_max( + Weight::from_parts(2u64 * WEIGHT_REF_TIME_PER_SECOND, u64::MAX), + ); } impl frame_system::Config for Runtime { type BaseCallFilter = frame_support::traits::Everything; diff --git a/frame/staking/src/mock.rs b/frame/staking/src/mock.rs index 16e4e5ddd7aa2..d3affda05277a 100644 --- a/frame/staking/src/mock.rs +++ b/frame/staking/src/mock.rs @@ -115,7 +115,7 @@ impl FindAuthor for Author11 { parameter_types! { pub BlockWeights: frame_system::limits::BlockWeights = frame_system::limits::BlockWeights::simple_max( - frame_support::weights::constants::WEIGHT_PER_SECOND * 2 + Weight::from_parts(frame_support::weights::constants::WEIGHT_REF_TIME_PER_SECOND * 2, u64::MAX), ); pub static SessionsPerEra: SessionIndex = 3; pub static ExistentialDeposit: Balance = 1; diff --git a/frame/staking/src/tests.rs b/frame/staking/src/tests.rs index 6609b9087637d..78429122d00f1 100644 --- a/frame/staking/src/tests.rs +++ b/frame/staking/src/tests.rs @@ -4365,9 +4365,10 @@ mod election_data_provider { #[test] fn targets_2sec_block() { let mut validators = 1000; - while ::WeightInfo::get_npos_targets(validators) - .all_lt(2u64 * frame_support::weights::constants::WEIGHT_PER_SECOND) - { + while ::WeightInfo::get_npos_targets(validators).all_lt(Weight::from_parts( + 2u64 * frame_support::weights::constants::WEIGHT_REF_TIME_PER_SECOND, + u64::MAX, + )) { validators += 1; } @@ -4384,8 +4385,10 @@ mod election_data_provider { let mut nominators = 1000; while ::WeightInfo::get_npos_voters(validators, nominators, slashing_spans) - .all_lt(2u64 * frame_support::weights::constants::WEIGHT_PER_SECOND) - { + .all_lt(Weight::from_parts( + 2u64 * frame_support::weights::constants::WEIGHT_REF_TIME_PER_SECOND, + u64::MAX, + )) { nominators += 1; } diff --git a/frame/support/src/weights/block_weights.rs b/frame/support/src/weights/block_weights.rs index 5c8e1f1c86e9d..b68c1fb508b01 100644 --- a/frame/support/src/weights/block_weights.rs +++ b/frame/support/src/weights/block_weights.rs @@ -37,7 +37,7 @@ // --repeat=100 use sp_core::parameter_types; -use sp_weights::{constants::WEIGHT_PER_NANOS, Weight}; +use sp_weights::{constants::WEIGHT_REF_TIME_PER_NANOS, Weight}; parameter_types! { /// Time to execute an empty block. @@ -53,7 +53,8 @@ parameter_types! { /// 99th: 390_723 /// 95th: 365_799 /// 75th: 361_582 - pub const BlockExecutionWeight: Weight = WEIGHT_PER_NANOS.saturating_mul(358_523); + pub const BlockExecutionWeight: Weight = + Weight::from_ref_time(WEIGHT_REF_TIME_PER_NANOS.saturating_mul(358_523)); } #[cfg(test)] @@ -69,12 +70,12 @@ mod test_weights { // At least 100 µs. assert!( - w.ref_time() >= 100u64 * constants::WEIGHT_PER_MICROS.ref_time(), + w.ref_time() >= 100u64 * constants::WEIGHT_REF_TIME_PER_MICROS, "Weight should be at least 100 µs." ); // At most 50 ms. assert!( - w.ref_time() <= 50u64 * constants::WEIGHT_PER_MILLIS.ref_time(), + w.ref_time() <= 50u64 * constants::WEIGHT_REF_TIME_PER_MILLIS, "Weight should be at most 50 ms." ); } diff --git a/frame/support/src/weights/extrinsic_weights.rs b/frame/support/src/weights/extrinsic_weights.rs index 1db2281dfe488..ced1fb91621f6 100644 --- a/frame/support/src/weights/extrinsic_weights.rs +++ b/frame/support/src/weights/extrinsic_weights.rs @@ -37,7 +37,7 @@ // --repeat=100 use sp_core::parameter_types; -use sp_weights::{constants::WEIGHT_PER_NANOS, Weight}; +use sp_weights::{constants::WEIGHT_REF_TIME_PER_NANOS, Weight}; parameter_types! { /// Time to execute a NO-OP extrinsic, for example `System::remark`. @@ -53,7 +53,8 @@ parameter_types! { /// 99th: 99_202 /// 95th: 99_163 /// 75th: 99_030 - pub const ExtrinsicBaseWeight: Weight = WEIGHT_PER_NANOS.saturating_mul(98_974); + pub const ExtrinsicBaseWeight: Weight = + Weight::from_ref_time(WEIGHT_REF_TIME_PER_NANOS.saturating_mul(98_974)); } #[cfg(test)] @@ -69,12 +70,12 @@ mod test_weights { // At least 10 µs. assert!( - w.ref_time() >= 10u64 * constants::WEIGHT_PER_MICROS.ref_time(), + w.ref_time() >= 10u64 * constants::WEIGHT_REF_TIME_PER_MICROS, "Weight should be at least 10 µs." ); // At most 1 ms. assert!( - w.ref_time() <= constants::WEIGHT_PER_MILLIS.ref_time(), + w.ref_time() <= constants::WEIGHT_REF_TIME_PER_MILLIS, "Weight should be at most 1 ms." ); } diff --git a/frame/support/src/weights/paritydb_weights.rs b/frame/support/src/weights/paritydb_weights.rs index 344e6cf0ddb6e..6fd1112ee2947 100644 --- a/frame/support/src/weights/paritydb_weights.rs +++ b/frame/support/src/weights/paritydb_weights.rs @@ -24,8 +24,8 @@ pub mod constants { /// ParityDB can be enabled with a feature flag, but is still experimental. These weights /// are available for brave runtime engineers who may want to try this out as default. pub const ParityDbWeight: RuntimeDbWeight = RuntimeDbWeight { - read: 8_000 * constants::WEIGHT_PER_NANOS.ref_time(), - write: 50_000 * constants::WEIGHT_PER_NANOS.ref_time(), + read: 8_000 * constants::WEIGHT_REF_TIME_PER_NANOS, + write: 50_000 * constants::WEIGHT_REF_TIME_PER_NANOS, }; } @@ -41,20 +41,20 @@ pub mod constants { fn sane() { // At least 1 µs. assert!( - W::get().reads(1).ref_time() >= constants::WEIGHT_PER_MICROS.ref_time(), + W::get().reads(1).ref_time() >= constants::WEIGHT_REF_TIME_PER_MICROS, "Read weight should be at least 1 µs." ); assert!( - W::get().writes(1).ref_time() >= constants::WEIGHT_PER_MICROS.ref_time(), + W::get().writes(1).ref_time() >= constants::WEIGHT_REF_TIME_PER_MICROS, "Write weight should be at least 1 µs." ); // At most 1 ms. assert!( - W::get().reads(1).ref_time() <= constants::WEIGHT_PER_MILLIS.ref_time(), + W::get().reads(1).ref_time() <= constants::WEIGHT_REF_TIME_PER_MILLIS, "Read weight should be at most 1 ms." ); assert!( - W::get().writes(1).ref_time() <= constants::WEIGHT_PER_MILLIS.ref_time(), + W::get().writes(1).ref_time() <= constants::WEIGHT_REF_TIME_PER_MILLIS, "Write weight should be at most 1 ms." ); } diff --git a/frame/support/src/weights/rocksdb_weights.rs b/frame/support/src/weights/rocksdb_weights.rs index 4dec2d8c877ea..b18b387de9957 100644 --- a/frame/support/src/weights/rocksdb_weights.rs +++ b/frame/support/src/weights/rocksdb_weights.rs @@ -24,8 +24,8 @@ pub mod constants { /// By default, Substrate uses RocksDB, so this will be the weight used throughout /// the runtime. pub const RocksDbWeight: RuntimeDbWeight = RuntimeDbWeight { - read: 25_000 * constants::WEIGHT_PER_NANOS.ref_time(), - write: 100_000 * constants::WEIGHT_PER_NANOS.ref_time(), + read: 25_000 * constants::WEIGHT_REF_TIME_PER_NANOS, + write: 100_000 * constants::WEIGHT_REF_TIME_PER_NANOS, }; } @@ -41,20 +41,20 @@ pub mod constants { fn sane() { // At least 1 µs. assert!( - W::get().reads(1).ref_time() >= constants::WEIGHT_PER_MICROS.ref_time(), + W::get().reads(1).ref_time() >= constants::WEIGHT_REF_TIME_PER_MICROS, "Read weight should be at least 1 µs." ); assert!( - W::get().writes(1).ref_time() >= constants::WEIGHT_PER_MICROS.ref_time(), + W::get().writes(1).ref_time() >= constants::WEIGHT_REF_TIME_PER_MICROS, "Write weight should be at least 1 µs." ); // At most 1 ms. assert!( - W::get().reads(1).ref_time() <= constants::WEIGHT_PER_MILLIS.ref_time(), + W::get().reads(1).ref_time() <= constants::WEIGHT_REF_TIME_PER_MILLIS, "Read weight should be at most 1 ms." ); assert!( - W::get().writes(1).ref_time() <= constants::WEIGHT_PER_MILLIS.ref_time(), + W::get().writes(1).ref_time() <= constants::WEIGHT_REF_TIME_PER_MILLIS, "Write weight should be at most 1 ms." ); } diff --git a/frame/system/src/limits.rs b/frame/system/src/limits.rs index eb95b699eba32..54d27c5b9e86d 100644 --- a/frame/system/src/limits.rs +++ b/frame/system/src/limits.rs @@ -208,7 +208,7 @@ pub struct BlockWeights { impl Default for BlockWeights { fn default() -> Self { Self::with_sensible_defaults( - Weight::from_parts(constants::WEIGHT_PER_SECOND.ref_time(), u64::MAX), + Weight::from_parts(constants::WEIGHT_REF_TIME_PER_SECOND, u64::MAX), DEFAULT_NORMAL_RATIO, ) } diff --git a/primitives/weights/src/lib.rs b/primitives/weights/src/lib.rs index af9e730fbfefd..928080d139864 100644 --- a/primitives/weights/src/lib.rs +++ b/primitives/weights/src/lib.rs @@ -47,12 +47,13 @@ pub use weight_meter::*; pub use weight_v2::*; pub mod constants { - use super::Weight; + pub const WEIGHT_REF_TIME_PER_SECOND: u64 = 1_000_000_000_000; + pub const WEIGHT_REF_TIME_PER_MILLIS: u64 = 1_000_000_000; + pub const WEIGHT_REF_TIME_PER_MICROS: u64 = 1_000_000; + pub const WEIGHT_REF_TIME_PER_NANOS: u64 = 1_000; - pub const WEIGHT_PER_SECOND: Weight = Weight::from_ref_time(1_000_000_000_000); - pub const WEIGHT_PER_MILLIS: Weight = Weight::from_ref_time(1_000_000_000); - pub const WEIGHT_PER_MICROS: Weight = Weight::from_ref_time(1_000_000); - pub const WEIGHT_PER_NANOS: Weight = Weight::from_ref_time(1_000); + pub const WEIGHT_PROOF_SIZE_PER_MB: u64 = 1024 * 1024; + pub const WEIGHT_PROOF_SIZE_PER_KB: u64 = 1024; } /// The old weight type. diff --git a/utils/frame/benchmarking-cli/src/block/bench.rs b/utils/frame/benchmarking-cli/src/block/bench.rs index 5a67b11f494f5..578158d8a2356 100644 --- a/utils/frame/benchmarking-cli/src/block/bench.rs +++ b/utils/frame/benchmarking-cli/src/block/bench.rs @@ -18,7 +18,7 @@ //! Contains the core benchmarking logic. use codec::DecodeAll; -use frame_support::weights::constants::WEIGHT_PER_NANOS; +use frame_support::weights::constants::WEIGHT_REF_TIME_PER_NANOS; use frame_system::ConsumedWeight; use sc_block_builder::{BlockBuilderApi, BlockBuilderProvider}; use sc_cli::{Error, Result}; @@ -148,7 +148,7 @@ where let weight = ConsumedWeight::decode_all(&mut raw_weight)?; // Should be divisible, but still use floats in case we ever change that. - Ok((weight.total().ref_time() as f64 / WEIGHT_PER_NANOS.ref_time() as f64).floor() + Ok((weight.total().ref_time() as f64 / WEIGHT_REF_TIME_PER_NANOS as f64).floor() as NanoSeconds) } diff --git a/utils/frame/benchmarking-cli/src/overhead/README.md b/utils/frame/benchmarking-cli/src/overhead/README.md index b21d051e9d44c..1584c2affe0a3 100644 --- a/utils/frame/benchmarking-cli/src/overhead/README.md +++ b/utils/frame/benchmarking-cli/src/overhead/README.md @@ -30,7 +30,8 @@ The file will contain the concrete weight value and various statistics about the /// 99th: 3_631_863 /// 95th: 3_595_674 /// 75th: 3_526_435 -pub const BlockExecutionWeight: Weight = WEIGHT_PER_NANOS.saturating_mul(3_532_484); +pub const BlockExecutionWeight: Weight = + Weight::from_ref_time(WEIGHT_REF_TIME_PER_NANOS.saturating_mul(3_532_484)); ``` In this example it takes 3.5 ms to execute an empty block. That means that it always takes at least 3.5 ms to execute *any* block. @@ -59,7 +60,8 @@ The relevant section in the output file looks like this: /// 99th: 68_758 /// 95th: 67_843 /// 75th: 67_749 -pub const ExtrinsicBaseWeight: Weight = WEIGHT_PER_NANOS.saturating_mul(67_745); +pub const ExtrinsicBaseWeight: Weight = + Weight::from_ref_time(WEIGHT_REF_TIME_PER_NANOS.saturating_mul(67_745)); ``` In this example it takes 67.7 µs to execute a NO-OP extrinsic. That means that it always takes at least 67.7 µs to execute *any* extrinsic. diff --git a/utils/frame/benchmarking-cli/src/overhead/weights.hbs b/utils/frame/benchmarking-cli/src/overhead/weights.hbs index 8d1a369372721..c54393d200bd3 100644 --- a/utils/frame/benchmarking-cli/src/overhead/weights.hbs +++ b/utils/frame/benchmarking-cli/src/overhead/weights.hbs @@ -14,7 +14,7 @@ {{/each}} use sp_core::parameter_types; -use sp_weights::{constants::WEIGHT_PER_NANOS, Weight}; +use sp_weights::{constants::WEIGHT_REF_TIME_PER_NANOS, Weight}; parameter_types! { {{#if (eq short_name "block")}} @@ -34,7 +34,8 @@ parameter_types! { /// 99th: {{underscore stats.p99}} /// 95th: {{underscore stats.p95}} /// 75th: {{underscore stats.p75}} - pub const {{long_name}}Weight: Weight = WEIGHT_PER_NANOS.saturating_mul({{underscore weight}}); + pub const {{long_name}}Weight: Weight = + Weight::from_ref_time(WEIGHT_REF_TIME_PER_NANOS.saturating_mul({{underscore weight}})); } #[cfg(test)] @@ -51,23 +52,23 @@ mod test_weights { {{#if (eq short_name "block")}} // At least 100 µs. assert!( - w.ref_time() >= 100u64 * constants::WEIGHT_PER_MICROS.ref_time(), + w.ref_time() >= 100u64 * constants::WEIGHT_REF_TIME_PER_MICROS, "Weight should be at least 100 µs." ); // At most 50 ms. assert!( - w.ref_time() <= 50u64 * constants::WEIGHT_PER_MILLIS.ref_time(), + w.ref_time() <= 50u64 * constants::WEIGHT_REF_TIME_PER_MILLIS, "Weight should be at most 50 ms." ); {{else}} // At least 10 µs. assert!( - w.ref_time() >= 10u64 * constants::WEIGHT_PER_MICROS.ref_time(), + w.ref_time() >= 10u64 * constants::WEIGHT_REF_TIME_PER_MICROS, "Weight should be at least 10 µs." ); // At most 1 ms. assert!( - w.ref_time() <= constants::WEIGHT_PER_MILLIS.ref_time(), + w.ref_time() <= constants::WEIGHT_REF_TIME_PER_MILLIS, "Weight should be at most 1 ms." ); {{/if}} diff --git a/utils/frame/benchmarking-cli/src/storage/README.md b/utils/frame/benchmarking-cli/src/storage/README.md index ecaf4edadab38..f61b7ba1bddd0 100644 --- a/utils/frame/benchmarking-cli/src/storage/README.md +++ b/utils/frame/benchmarking-cli/src/storage/README.md @@ -69,7 +69,7 @@ The interesting part in the generated weight file tells us the weight constants /// 99th: 18_270 /// 95th: 16_190 /// 75th: 14_819 -read: 14_262 * constants::WEIGHT_PER_NANOS, +read: 14_262 * constants::WEIGHT_REF_TIME_PER_NANOS, /// Time to write one storage item. /// Calculated by multiplying the *Average* of all values with `1.1` and adding `0`. @@ -84,7 +84,7 @@ read: 14_262 * constants::WEIGHT_PER_NANOS, /// 99th: 135_839 /// 95th: 106_129 /// 75th: 79_239 -write: 71_347 * constants::WEIGHT_PER_NANOS, +write: 71_347 * constants::WEIGHT_REF_TIME_PER_NANOS, ``` ## Arguments diff --git a/utils/frame/benchmarking-cli/src/storage/weights.hbs b/utils/frame/benchmarking-cli/src/storage/weights.hbs index 82e581cf990c8..135b18b193746 100644 --- a/utils/frame/benchmarking-cli/src/storage/weights.hbs +++ b/utils/frame/benchmarking-cli/src/storage/weights.hbs @@ -43,7 +43,7 @@ pub mod constants { /// 99th: {{underscore read.0.p99}} /// 95th: {{underscore read.0.p95}} /// 75th: {{underscore read.0.p75}} - read: {{underscore read_weight}} * constants::WEIGHT_PER_NANOS, + read: {{underscore read_weight}} * constants::WEIGHT_REF_TIME_PER_NANOS, /// Time to write one storage item. /// Calculated by multiplying the *{{params.weight_params.weight_metric}}* of all values with `{{params.weight_params.weight_mul}}` and adding `{{params.weight_params.weight_add}}`. @@ -58,7 +58,7 @@ pub mod constants { /// 99th: {{underscore write.0.p99}} /// 95th: {{underscore write.0.p95}} /// 75th: {{underscore write.0.p75}} - write: {{underscore write_weight}} * constants::WEIGHT_PER_NANOS, + write: {{underscore write_weight}} * constants::WEIGHT_REF_TIME_PER_NANOS, }; } @@ -74,20 +74,20 @@ pub mod constants { fn bound() { // At least 1 µs. assert!( - W::get().reads(1).ref_time() >= constants::WEIGHT_PER_MICROS.ref_time(), + W::get().reads(1).ref_time() >= constants::WEIGHT_REF_TIME_PER_MICROS, "Read weight should be at least 1 µs." ); assert!( - W::get().writes(1).ref_time() >= constants::WEIGHT_PER_MICROS.ref_time(), + W::get().writes(1).ref_time() >= constants::WEIGHT_REF_TIME_PER_MICROS, "Write weight should be at least 1 µs." ); // At most 1 ms. assert!( - W::get().reads(1).ref_time() <= constants::WEIGHT_PER_MILLIS.ref_time(), + W::get().reads(1).ref_time() <= constants::WEIGHT_REF_TIME_PER_MILLIS, "Read weight should be at most 1 ms." ); assert!( - W::get().writes(1).ref_time() <= constants::WEIGHT_PER_MILLIS.ref_time(), + W::get().writes(1).ref_time() <= constants::WEIGHT_REF_TIME_PER_MILLIS, "Write weight should be at most 1 ms." ); } From 02a9deaa007e358a2aa537ce624497c3ef3cf2ca Mon Sep 17 00:00:00 2001 From: Vlad Date: Thu, 8 Dec 2022 17:58:52 +0000 Subject: [PATCH 02/29] Checkout to the branch HEAD explicitly in `build-linux-substrate` (#12876) --- scripts/ci/gitlab/pipeline/build.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/scripts/ci/gitlab/pipeline/build.yml b/scripts/ci/gitlab/pipeline/build.yml index 5bbc3fb8f751c..2f8cff7b3ffa6 100644 --- a/scripts/ci/gitlab/pipeline/build.yml +++ b/scripts/ci/gitlab/pipeline/build.yml @@ -64,6 +64,9 @@ build-linux-substrate: before_script: - mkdir -p ./artifacts/substrate/ - !reference [.rusty-cachier, before_script] + # tldr: we need to checkout the branch HEAD explicitly because of our dynamic versioning approach while building the substrate binary + # see https://github.com/paritytech/ci_cd/issues/682#issuecomment-1340953589 + - git checkout -B "$CI_COMMIT_REF_NAME" "$CI_COMMIT_SHA" script: - rusty-cachier snapshot create - WASM_BUILD_NO_COLOR=1 time cargo build --locked --release --verbose From 9a0644ca46410613d332d7a2754c502d89146e2f Mon Sep 17 00:00:00 2001 From: Alexandru Vasile <60601340+lexnv@users.noreply.github.com> Date: Thu, 8 Dec 2022 22:15:14 +0200 Subject: [PATCH 03/29] cli: Improve pruning documentation (#12819) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * cli: Improve pruning documentation Signed-off-by: Alexandru Vasile * cli: Keep `finalized` notation and remove `canonical` one * cli: Fix cargo doc * cli: `PruningModeClap` IR enum Signed-off-by: Alexandru Vasile * cli: Convert PruningModeClap into pruning modes Signed-off-by: Alexandru Vasile * cli: Use `PruningModeClap` Signed-off-by: Alexandru Vasile * cli: Rename to `DatabasePruningMode` Signed-off-by: Alexandru Vasile * cli: Implement `FromStr` instead of `clap::ValueEnum` Signed-off-by: Alexandru Vasile * Update client/cli/src/params/pruning_params.rs Co-authored-by: Bastian Köcher * Fix clippy Signed-off-by: Alexandru Vasile * cli: Add option documentation back Signed-off-by: Alexandru Vasile * Apply suggestions from code review Signed-off-by: Alexandru Vasile Co-authored-by: Bastian Köcher --- client/cli/src/params/pruning_params.rs | 114 ++++++++++++++++-------- 1 file changed, 75 insertions(+), 39 deletions(-) diff --git a/client/cli/src/params/pruning_params.rs b/client/cli/src/params/pruning_params.rs index 2da1de919771c..7e50f53d7169a 100644 --- a/client/cli/src/params/pruning_params.rs +++ b/client/cli/src/params/pruning_params.rs @@ -23,57 +23,93 @@ use sc_service::{BlocksPruning, PruningMode}; /// Parameters to define the pruning mode #[derive(Debug, Clone, PartialEq, Args)] pub struct PruningParams { - /// Specify the state pruning mode, a number of blocks to keep or 'archive'. + /// Specify the state pruning mode. /// - /// Default is to keep only the last 256 blocks, - /// otherwise, the state can be kept for all of the blocks (i.e 'archive'), - /// or for all of the canonical blocks (i.e 'archive-canonical'). - #[arg(alias = "pruning", long, value_name = "PRUNING_MODE")] - pub state_pruning: Option, - /// Specify the blocks pruning mode, a number of blocks to keep or 'archive'. + /// This mode specifies when the block's state (ie, storage) + /// should be pruned (ie, removed) from the database. /// - /// Default is to keep all finalized blocks. - /// otherwise, all blocks can be kept (i.e 'archive'), - /// or for all canonical blocks (i.e 'archive-canonical'), - /// or for the last N blocks (i.e a number). + /// Possible values: + /// 'archive' Keep the state of all blocks. + /// 'archive-canonical' Keep only the state of finalized blocks. + /// number Keep the state of the last number of finalized blocks. + #[arg(alias = "pruning", long, value_name = "PRUNING_MODE", default_value = "256")] + pub state_pruning: DatabasePruningMode, + /// Specify the blocks pruning mode. /// - /// NOTE: only finalized blocks are subject for removal! - #[arg(alias = "keep-blocks", long, value_name = "COUNT")] - pub blocks_pruning: Option, + /// This mode specifies when the block's body (including justifications) + /// should be pruned (ie, removed) from the database. + /// + /// Possible values: + /// 'archive' Keep all blocks. + /// 'archive-canonical' Keep only finalized blocks. + /// number Keep the last `number` of finalized blocks. + #[arg( + alias = "keep-blocks", + long, + value_name = "PRUNING_MODE", + default_value = "archive-canonical" + )] + pub blocks_pruning: DatabasePruningMode, } impl PruningParams { /// Get the pruning value from the parameters pub fn state_pruning(&self) -> error::Result> { - self.state_pruning - .as_ref() - .map(|s| match s.as_str() { - "archive" => Ok(PruningMode::ArchiveAll), - "archive-canonical" => Ok(PruningMode::ArchiveCanonical), - bc => bc - .parse() - .map_err(|_| { - error::Error::Input("Invalid state pruning mode specified".to_string()) - }) - .map(PruningMode::blocks_pruning), - }) - .transpose() + Ok(Some(self.state_pruning.into())) } /// Get the block pruning value from the parameters pub fn blocks_pruning(&self) -> error::Result { - match self.blocks_pruning.as_ref() { - Some(bp) => match bp.as_str() { - "archive" => Ok(BlocksPruning::KeepAll), - "archive-canonical" => Ok(BlocksPruning::KeepFinalized), - bc => bc - .parse() - .map_err(|_| { - error::Error::Input("Invalid blocks pruning mode specified".to_string()) - }) - .map(BlocksPruning::Some), - }, - None => Ok(BlocksPruning::KeepFinalized), + Ok(self.blocks_pruning.into()) + } +} + +/// Specifies the pruning mode of the database. +/// +/// This specifies when the block's data (either state via `--state-pruning` +/// or body via `--blocks-pruning`) should be pruned (ie, removed) from +/// the database. +#[derive(Debug, Clone, Copy, PartialEq)] +pub enum DatabasePruningMode { + /// Keep the data of all blocks. + Archive, + /// Keep only the data of finalized blocks. + ArchiveCanonical, + /// Keep the data of the last number of finalized blocks. + Custom(u32), +} + +impl std::str::FromStr for DatabasePruningMode { + type Err = String; + + fn from_str(input: &str) -> Result { + match input { + "archive" => Ok(Self::Archive), + "archive-canonical" => Ok(Self::ArchiveCanonical), + bc => bc + .parse() + .map_err(|_| "Invalid pruning mode specified".to_string()) + .map(Self::Custom), + } + } +} + +impl Into for DatabasePruningMode { + fn into(self) -> PruningMode { + match self { + DatabasePruningMode::Archive => PruningMode::ArchiveAll, + DatabasePruningMode::ArchiveCanonical => PruningMode::ArchiveCanonical, + DatabasePruningMode::Custom(n) => PruningMode::blocks_pruning(n), + } + } +} + +impl Into for DatabasePruningMode { + fn into(self) -> BlocksPruning { + match self { + DatabasePruningMode::Archive => BlocksPruning::KeepAll, + DatabasePruningMode::ArchiveCanonical => BlocksPruning::KeepFinalized, + DatabasePruningMode::Custom(n) => BlocksPruning::Some(n), } } } From e6bbc53af99458c87b6fafe02314b4959b8da2b0 Mon Sep 17 00:00:00 2001 From: Anthony Alaribe Date: Fri, 9 Dec 2022 11:40:59 +0200 Subject: [PATCH 04/29] Revert "Move LockableCurrency trait to fungibles::Lockable and deprecate LockableCurrency (#12798)" (#12882) This reverts commit ea3ca3f757ff9d9559665719a77da81f4cf0f0ce. --- bin/node/runtime/src/lib.rs | 8 +-- frame/balances/README.md | 8 +-- frame/balances/src/lib.rs | 26 ++++---- frame/balances/src/tests.rs | 6 +- frame/contracts/src/tests.rs | 2 +- frame/conviction-voting/src/benchmarking.rs | 2 +- frame/conviction-voting/src/lib.rs | 6 +- frame/democracy/src/lib.rs | 10 ++- frame/elections-phragmen/src/lib.rs | 10 +-- frame/executive/src/lib.rs | 9 ++- frame/referenda/src/lib.rs | 7 +- frame/staking/src/pallet/impls.rs | 5 +- frame/staking/src/pallet/mod.rs | 8 +-- frame/support/src/traits.rs | 1 - frame/support/src/traits/tokens/currency.rs | 5 +- .../src/traits/tokens/currency/lockable.rs | 48 +++++++++++++- frame/support/src/traits/tokens/fungible.rs | 1 - frame/support/src/traits/tokens/fungibles.rs | 2 - .../src/traits/tokens/fungibles/lockable.rs | 65 ------------------- frame/vesting/src/lib.rs | 11 ++-- 20 files changed, 105 insertions(+), 135 deletions(-) delete mode 100644 frame/support/src/traits/tokens/fungibles/lockable.rs diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index a754fac1da7ab..1bb4dd6f913a6 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -32,9 +32,9 @@ use frame_support::{ pallet_prelude::Get, parameter_types, traits::{ - fungible::ItemOf, fungibles, AsEnsureOriginWithArg, ConstBool, ConstU128, ConstU16, - ConstU32, Currency, EitherOfDiverse, EqualPrivilegeOnly, Everything, Imbalance, - InstanceFilter, KeyOwnerProofSystem, Nothing, OnUnbalanced, U128CurrencyToVote, + fungible::ItemOf, AsEnsureOriginWithArg, ConstBool, ConstU128, ConstU16, ConstU32, + Currency, EitherOfDiverse, EqualPrivilegeOnly, Everything, Imbalance, InstanceFilter, + KeyOwnerProofSystem, LockIdentifier, Nothing, OnUnbalanced, U128CurrencyToVote, WithdrawReasons, }, weights::{ @@ -1006,7 +1006,7 @@ parameter_types! { pub const DesiredRunnersUp: u32 = 7; pub const MaxVoters: u32 = 10 * 1000; pub const MaxCandidates: u32 = 1000; - pub const ElectionsPhragmenPalletId: fungibles::LockIdentifier = *b"phrelect"; + pub const ElectionsPhragmenPalletId: LockIdentifier = *b"phrelect"; } // Make sure that there are no more than `MaxMembers` members elected via elections-phragmen. diff --git a/frame/balances/README.md b/frame/balances/README.md index d32fffbf0e7ad..93e424a89c721 100644 --- a/frame/balances/README.md +++ b/frame/balances/README.md @@ -57,7 +57,7 @@ that you need, then you can avoid coupling with the Balances module. fungible assets system. - [`ReservableCurrency`](https://docs.rs/frame-support/latest/frame_support/traits/trait.ReservableCurrency.html): Functions for dealing with assets that can be reserved from an account. -- [`Lockable`](https://docs.rs/frame-support/latest/frame_support/traits/fungibles/trait.Lockable.html): Functions for +- [`LockableCurrency`](https://docs.rs/frame-support/latest/frame_support/traits/trait.LockableCurrency.html): Functions for dealing with accounts that allow liquidity restrictions. - [`Imbalance`](https://docs.rs/frame-support/latest/frame_support/traits/trait.Imbalance.html): Functions for handling imbalances between total issuance in the system and account balances. Must be used when a function @@ -88,13 +88,13 @@ pub type NegativeImbalanceOf = <::Currency as Currency<; + type Currency: LockableCurrency; } fn update_ledger( diff --git a/frame/balances/src/lib.rs b/frame/balances/src/lib.rs index d74de37e993f7..381a0ffceeb85 100644 --- a/frame/balances/src/lib.rs +++ b/frame/balances/src/lib.rs @@ -79,7 +79,7 @@ //! - [`ReservableCurrency`](frame_support::traits::ReservableCurrency): //! - [`NamedReservableCurrency`](frame_support::traits::NamedReservableCurrency): //! Functions for dealing with assets that can be reserved from an account. -//! - [`Lockable`](frame_support::traits::fungibles::Lockable): Functions for +//! - [`LockableCurrency`](frame_support::traits::LockableCurrency): Functions for //! dealing with accounts that allow liquidity restrictions. //! - [`Imbalance`](frame_support::traits::Imbalance): Functions for handling //! imbalances between total issuance in the system and account balances. Must be used when a @@ -113,13 +113,13 @@ //! # fn main() {} //! ``` //! -//! The Staking pallet uses the `fungibles::Lockable` trait to lock a stash account's funds: +//! The Staking pallet uses the `LockableCurrency` trait to lock a stash account's funds: //! //! ``` -//! use frame_support::traits::{WithdrawReasons, fungibles, fungibles::Lockable}; +//! use frame_support::traits::{WithdrawReasons, LockableCurrency}; //! use sp_runtime::traits::Bounded; //! pub trait Config: frame_system::Config { -//! type Currency: fungibles::Lockable; +//! type Currency: LockableCurrency; //! } //! # struct StakingLedger { //! # stash: ::AccountId, @@ -171,13 +171,11 @@ use frame_support::{ ensure, pallet_prelude::DispatchResult, traits::{ - tokens::{ - fungible, fungibles, BalanceStatus as Status, DepositConsequence, WithdrawConsequence, - }, + tokens::{fungible, BalanceStatus as Status, DepositConsequence, WithdrawConsequence}, Currency, DefensiveSaturating, ExistenceRequirement, ExistenceRequirement::{AllowDeath, KeepAlive}, - Get, Imbalance, NamedReservableCurrency, OnUnbalanced, ReservableCurrency, SignedImbalance, - StoredMap, TryDrop, WithdrawReasons, + Get, Imbalance, LockIdentifier, LockableCurrency, NamedReservableCurrency, OnUnbalanced, + ReservableCurrency, SignedImbalance, StoredMap, TryDrop, WithdrawReasons, }, WeakBoundedVec, }; @@ -664,7 +662,7 @@ impl BitOr for Reasons { #[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug, MaxEncodedLen, TypeInfo)] pub struct BalanceLock { /// An identifier for this lock. Only one lock may be in existence for each identifier. - pub id: fungibles::LockIdentifier, + pub id: LockIdentifier, /// The amount which the free balance may not drop below when this lock is in effect. pub amount: Balance, /// If true, then the lock remains in effect even for payment of transaction fees. @@ -2133,7 +2131,7 @@ where } } -impl, I: 'static> fungibles::Lockable for Pallet +impl, I: 'static> LockableCurrency for Pallet where T::Balance: MaybeSerializeDeserialize + Debug, { @@ -2144,7 +2142,7 @@ where // Set a lock on the balance of `who`. // Is a no-op if lock amount is zero or `reasons` `is_none()`. fn set_lock( - id: fungibles::LockIdentifier, + id: LockIdentifier, who: &T::AccountId, amount: T::Balance, reasons: WithdrawReasons, @@ -2166,7 +2164,7 @@ where // Extend a lock on the balance of `who`. // Is a no-op if lock amount is zero or `reasons` `is_none()`. fn extend_lock( - id: fungibles::LockIdentifier, + id: LockIdentifier, who: &T::AccountId, amount: T::Balance, reasons: WithdrawReasons, @@ -2195,7 +2193,7 @@ where Self::update_locks(who, &locks[..]); } - fn remove_lock(id: fungibles::LockIdentifier, who: &T::AccountId) { + fn remove_lock(id: LockIdentifier, who: &T::AccountId) { let mut locks = Self::locks(who); locks.retain(|l| l.id != id); Self::update_locks(who, &locks[..]); diff --git a/frame/balances/src/tests.rs b/frame/balances/src/tests.rs index 44a71b93257db..83944caf9f7ff 100644 --- a/frame/balances/src/tests.rs +++ b/frame/balances/src/tests.rs @@ -28,15 +28,15 @@ macro_rules! decl_tests { use frame_support::{ assert_noop, assert_storage_noop, assert_ok, assert_err, traits::{ - fungibles, fungibles::Lockable, WithdrawReasons, + LockableCurrency, LockIdentifier, WithdrawReasons, Currency, ReservableCurrency, ExistenceRequirement::AllowDeath } }; use pallet_transaction_payment::{ChargeTransactionPayment, Multiplier}; use frame_system::RawOrigin; - const ID_1: fungibles::LockIdentifier = *b"1 "; - const ID_2: fungibles::LockIdentifier = *b"2 "; + const ID_1: LockIdentifier = *b"1 "; + const ID_2: LockIdentifier = *b"2 "; pub const CALL: &<$test as frame_system::Config>::RuntimeCall = &RuntimeCall::Balances(pallet_balances::Call::transfer { dest: 0, value: 0 }); diff --git a/frame/contracts/src/tests.rs b/frame/contracts/src/tests.rs index f4cba0c85b083..a467800dfe15b 100644 --- a/frame/contracts/src/tests.rs +++ b/frame/contracts/src/tests.rs @@ -37,7 +37,7 @@ use frame_support::{ parameter_types, storage::child, traits::{ - fungibles::Lockable, BalanceStatus, ConstU32, ConstU64, Contains, Currency, Get, OnIdle, + BalanceStatus, ConstU32, ConstU64, Contains, Currency, Get, LockableCurrency, OnIdle, OnInitialize, ReservableCurrency, WithdrawReasons, }, weights::{constants::WEIGHT_REF_TIME_PER_SECOND, Weight}, diff --git a/frame/conviction-voting/src/benchmarking.rs b/frame/conviction-voting/src/benchmarking.rs index 4bebc6a97c49b..117bb7fe22989 100644 --- a/frame/conviction-voting/src/benchmarking.rs +++ b/frame/conviction-voting/src/benchmarking.rs @@ -23,7 +23,7 @@ use assert_matches::assert_matches; use frame_benchmarking::{account, benchmarks_instance_pallet, whitelist_account}; use frame_support::{ dispatch::RawOrigin, - traits::{Currency, Get}, + traits::{fungible, Currency, Get}, }; use sp_runtime::traits::Bounded; use sp_std::collections::btree_map::BTreeMap; diff --git a/frame/conviction-voting/src/lib.rs b/frame/conviction-voting/src/lib.rs index 992b532fb93ed..3ecc6e56be94e 100644 --- a/frame/conviction-voting/src/lib.rs +++ b/frame/conviction-voting/src/lib.rs @@ -31,7 +31,7 @@ use frame_support::{ dispatch::{DispatchError, DispatchResult}, ensure, traits::{ - fungible, fungibles, fungibles::Lockable, Currency, Get, PollStatus, Polling, + fungible, Currency, Get, LockIdentifier, LockableCurrency, PollStatus, Polling, ReservableCurrency, WithdrawReasons, }, }; @@ -60,7 +60,7 @@ mod tests; #[cfg(feature = "runtime-benchmarks")] pub mod benchmarking; -const CONVICTION_VOTING_ID: fungibles::LockIdentifier = *b"pyconvot"; +const CONVICTION_VOTING_ID: LockIdentifier = *b"pyconvot"; type AccountIdLookupOf = <::Lookup as StaticLookup>::Source; type BalanceOf = @@ -104,7 +104,7 @@ pub mod pallet { type WeightInfo: WeightInfo; /// Currency type with which voting happens. type Currency: ReservableCurrency - + fungibles::Lockable + + LockableCurrency + fungible::Inspect; /// The implementation of the logic which conducts polls. diff --git a/frame/democracy/src/lib.rs b/frame/democracy/src/lib.rs index 096122cb1caa5..cf954d4800eee 100644 --- a/frame/democracy/src/lib.rs +++ b/frame/democracy/src/lib.rs @@ -157,11 +157,9 @@ use frame_support::{ ensure, traits::{ defensive_prelude::*, - fungibles, - fungibles::Lockable, schedule::{v3::Named as ScheduleNamed, DispatchTime}, - Bounded, Currency, Get, OnUnbalanced, QueryPreimage, ReservableCurrency, StorePreimage, - WithdrawReasons, + Bounded, Currency, Get, LockIdentifier, LockableCurrency, OnUnbalanced, QueryPreimage, + ReservableCurrency, StorePreimage, WithdrawReasons, }, weights::Weight, }; @@ -191,7 +189,7 @@ pub mod benchmarking; pub mod migrations; -const DEMOCRACY_ID: fungibles::LockIdentifier = *b"democrac"; +const DEMOCRACY_ID: LockIdentifier = *b"democrac"; /// A proposal index. pub type PropIndex = u32; @@ -236,7 +234,7 @@ pub mod pallet { /// Currency type for this pallet. type Currency: ReservableCurrency - + fungibles::Lockable; + + LockableCurrency; /// The period between a proposal being approved and enacted. /// diff --git a/frame/elections-phragmen/src/lib.rs b/frame/elections-phragmen/src/lib.rs index 13190237ea784..165a8fcab429b 100644 --- a/frame/elections-phragmen/src/lib.rs +++ b/frame/elections-phragmen/src/lib.rs @@ -101,8 +101,8 @@ use codec::{Decode, Encode}; use frame_support::{ traits::{ - defensive_prelude::*, fungibles, fungibles::Lockable, ChangeMembers, Contains, - ContainsLengthBound, Currency, CurrencyToVote, Get, InitializeMembers, OnUnbalanced, + defensive_prelude::*, ChangeMembers, Contains, ContainsLengthBound, Currency, + CurrencyToVote, Get, InitializeMembers, LockIdentifier, LockableCurrency, OnUnbalanced, ReservableCurrency, SortedMembers, WithdrawReasons, }, weights::Weight, @@ -199,10 +199,10 @@ pub mod pallet { /// Identifier for the elections-phragmen pallet's lock #[pallet::constant] - type PalletId: Get; + type PalletId: Get; /// The currency that people are electing with. - type Currency: fungibles::Lockable + type Currency: LockableCurrency + ReservableCurrency; /// What to do when the members change. @@ -1274,7 +1274,7 @@ mod tests { } parameter_types! { - pub const ElectionsPhragmenPalletId: fungibles::LockIdentifier = *b"phrelect"; + pub const ElectionsPhragmenPalletId: LockIdentifier = *b"phrelect"; pub const PhragmenMaxVoters: u32 = 1000; pub const PhragmenMaxCandidates: u32 = 100; } diff --git a/frame/executive/src/lib.rs b/frame/executive/src/lib.rs index 2d307a1a024b5..5a4ef92b1c874 100644 --- a/frame/executive/src/lib.rs +++ b/frame/executive/src/lib.rs @@ -620,7 +620,10 @@ mod tests { use frame_support::{ assert_err, parameter_types, - traits::{fungibles, ConstU32, ConstU64, ConstU8, Currency, WithdrawReasons}, + traits::{ + ConstU32, ConstU64, ConstU8, Currency, LockIdentifier, LockableCurrency, + WithdrawReasons, + }, weights::{ConstantMultiplier, IdentityFee, RuntimeDbWeight, Weight, WeightToFee}, }; use frame_system::{Call as SystemCall, ChainContext, LastRuntimeUpgradeInfo}; @@ -1182,11 +1185,11 @@ mod tests { #[test] fn can_pay_for_tx_fee_on_full_lock() { - let id: fungibles::LockIdentifier = *b"0 "; + let id: LockIdentifier = *b"0 "; let execute_with_lock = |lock: WithdrawReasons| { let mut t = new_test_ext(1); t.execute_with(|| { - as fungibles::Lockable>::set_lock( + as LockableCurrency>::set_lock( id, &1, 110, lock, ); let xt = TestXt::new( diff --git a/frame/referenda/src/lib.rs b/frame/referenda/src/lib.rs index 551628fee9159..2bb01baa0cd3a 100644 --- a/frame/referenda/src/lib.rs +++ b/frame/referenda/src/lib.rs @@ -1,3 +1,5 @@ +// This file is part of Substrate. + // Copyright (C) 2017-2022 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 @@ -66,12 +68,11 @@ use codec::{Codec, Encode}; use frame_support::{ ensure, traits::{ - fungibles, schedule::{ v3::{Anon as ScheduleAnon, Named as ScheduleNamed}, DispatchTime, }, - Currency, OnUnbalanced, OriginTrait, PollStatus, Polling, QueryPreimage, + Currency, LockIdentifier, OnUnbalanced, OriginTrait, PollStatus, Polling, QueryPreimage, ReservableCurrency, StorePreimage, VoteTally, }, BoundedVec, @@ -132,7 +133,7 @@ macro_rules! impl_tracksinfo_get { }; } -const ASSEMBLY_ID: fungibles::LockIdentifier = *b"assembly"; +const ASSEMBLY_ID: LockIdentifier = *b"assembly"; #[frame_support::pallet] pub mod pallet { diff --git a/frame/staking/src/pallet/impls.rs b/frame/staking/src/pallet/impls.rs index 34e12fbcf6adf..c22a2bd2d1f77 100644 --- a/frame/staking/src/pallet/impls.rs +++ b/frame/staking/src/pallet/impls.rs @@ -25,9 +25,8 @@ use frame_support::{ dispatch::WithPostDispatchInfo, pallet_prelude::*, traits::{ - fungibles::Lockable, Currency, CurrencyToVote, Defensive, DefensiveResult, - EstimateNextNewSession, Get, Imbalance, OnUnbalanced, TryCollect, UnixTime, - WithdrawReasons, + Currency, CurrencyToVote, Defensive, DefensiveResult, EstimateNextNewSession, Get, + Imbalance, LockableCurrency, OnUnbalanced, TryCollect, UnixTime, WithdrawReasons, }, weights::Weight, }; diff --git a/frame/staking/src/pallet/mod.rs b/frame/staking/src/pallet/mod.rs index fd0c494fa6723..8fddba2150370 100644 --- a/frame/staking/src/pallet/mod.rs +++ b/frame/staking/src/pallet/mod.rs @@ -24,8 +24,8 @@ use frame_support::{ dispatch::Codec, pallet_prelude::*, traits::{ - fungibles, fungibles::Lockable, Currency, CurrencyToVote, Defensive, DefensiveResult, - DefensiveSaturating, EnsureOrigin, EstimateNextNewSession, Get, OnUnbalanced, TryCollect, + Currency, CurrencyToVote, Defensive, DefensiveResult, DefensiveSaturating, EnsureOrigin, + EstimateNextNewSession, Get, LockIdentifier, LockableCurrency, OnUnbalanced, TryCollect, UnixTime, }, weights::Weight, @@ -50,7 +50,7 @@ use crate::{ ValidatorPrefs, }; -const STAKING_ID: fungibles::LockIdentifier = *b"staking "; +const STAKING_ID: LockIdentifier = *b"staking "; #[frame_support::pallet] pub mod pallet { @@ -78,7 +78,7 @@ pub mod pallet { #[pallet::config] pub trait Config: frame_system::Config { /// The staking balance. - type Currency: fungibles::Lockable< + type Currency: LockableCurrency< Self::AccountId, Moment = Self::BlockNumber, Balance = Self::CurrencyBalance, diff --git a/frame/support/src/traits.rs b/frame/support/src/traits.rs index 3a831d9c27cc6..e5ba98fe0c5bb 100644 --- a/frame/support/src/traits.rs +++ b/frame/support/src/traits.rs @@ -20,7 +20,6 @@ //! NOTE: If you're looking for `parameter_types`, it has moved in to the top-level module. pub mod tokens; -#[allow(deprecated)] pub use tokens::{ currency::{ ActiveIssuanceOf, Currency, LockIdentifier, LockableCurrency, NamedReservableCurrency, diff --git a/frame/support/src/traits/tokens/currency.rs b/frame/support/src/traits/tokens/currency.rs index 29603198e9a2b..48247b6021798 100644 --- a/frame/support/src/traits/tokens/currency.rs +++ b/frame/support/src/traits/tokens/currency.rs @@ -32,10 +32,7 @@ use sp_std::fmt::Debug; mod reservable; pub use reservable::{NamedReservableCurrency, ReservableCurrency}; mod lockable; - -#[deprecated(note = "Deprecated in favour of using fungibles::Lockable trait directly")] -pub use super::fungibles::{LockIdentifier, Lockable as LockableCurrency}; -pub use lockable::VestingSchedule; +pub use lockable::{LockIdentifier, LockableCurrency, VestingSchedule}; /// Abstraction over a fungible assets system. pub trait Currency { diff --git a/frame/support/src/traits/tokens/currency/lockable.rs b/frame/support/src/traits/tokens/currency/lockable.rs index 5b7cad3b5c1d3..a10edd6e3e874 100644 --- a/frame/support/src/traits/tokens/currency/lockable.rs +++ b/frame/support/src/traits/tokens/currency/lockable.rs @@ -17,8 +17,52 @@ //! The lockable currency trait and some associated types. -use super::Currency; -use crate::dispatch::DispatchResult; +use super::{super::misc::WithdrawReasons, Currency}; +use crate::{dispatch::DispatchResult, traits::misc::Get}; + +/// An identifier for a lock. Used for disambiguating different locks so that +/// they can be individually replaced or removed. +pub type LockIdentifier = [u8; 8]; + +/// A currency whose accounts can have liquidity restrictions. +pub trait LockableCurrency: Currency { + /// The quantity used to denote time; usually just a `BlockNumber`. + type Moment; + + /// The maximum number of locks a user should have on their account. + type MaxLocks: Get; + + /// Create a new balance lock on account `who`. + /// + /// If the new lock is valid (i.e. not already expired), it will push the struct to + /// the `Locks` vec in storage. Note that you can lock more funds than a user has. + /// + /// If the lock `id` already exists, this will update it. + fn set_lock( + id: LockIdentifier, + who: &AccountId, + amount: Self::Balance, + reasons: WithdrawReasons, + ); + + /// Changes a balance lock (selected by `id`) so that it becomes less liquid in all + /// parameters or creates a new one if it does not exist. + /// + /// Calling `extend_lock` on an existing lock `id` differs from `set_lock` in that it + /// applies the most severe constraints of the two, while `set_lock` replaces the lock + /// with the new parameters. As in, `extend_lock` will set: + /// - maximum `amount` + /// - bitwise mask of all `reasons` + fn extend_lock( + id: LockIdentifier, + who: &AccountId, + amount: Self::Balance, + reasons: WithdrawReasons, + ); + + /// Remove an existing lock. + fn remove_lock(id: LockIdentifier, who: &AccountId); +} /// A vesting schedule over a currency. This allows a particular currency to have vesting limits /// applied to it. diff --git a/frame/support/src/traits/tokens/fungible.rs b/frame/support/src/traits/tokens/fungible.rs index d11959fd7c5d2..05e109b870ec0 100644 --- a/frame/support/src/traits/tokens/fungible.rs +++ b/frame/support/src/traits/tokens/fungible.rs @@ -29,7 +29,6 @@ use sp_runtime::traits::Saturating; mod balanced; mod imbalance; - pub use balanced::{Balanced, Unbalanced}; pub use imbalance::{CreditOf, DebtOf, HandleImbalanceDrop, Imbalance}; diff --git a/frame/support/src/traits/tokens/fungibles.rs b/frame/support/src/traits/tokens/fungibles.rs index 045ecd05134c2..a29cb974fe450 100644 --- a/frame/support/src/traits/tokens/fungibles.rs +++ b/frame/support/src/traits/tokens/fungibles.rs @@ -33,9 +33,7 @@ pub mod metadata; pub use balanced::{Balanced, Unbalanced}; mod imbalance; pub use imbalance::{CreditOf, DebtOf, HandleImbalanceDrop, Imbalance}; -mod lockable; pub mod roles; -pub use lockable::{LockIdentifier, Lockable}; /// Trait for providing balance-inspection access to a set of named fungible assets. pub trait Inspect { diff --git a/frame/support/src/traits/tokens/fungibles/lockable.rs b/frame/support/src/traits/tokens/fungibles/lockable.rs deleted file mode 100644 index 185b40eae9b28..0000000000000 --- a/frame/support/src/traits/tokens/fungibles/lockable.rs +++ /dev/null @@ -1,65 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2019-2022 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! The Lockable trait and some associated types. - -use super::{super::misc::WithdrawReasons, currency::Currency}; -use crate::traits::misc::Get; - -/// An identifier for a lock. Used for disambiguating different locks so that -/// they can be individually replaced or removed. -pub type LockIdentifier = [u8; 8]; - -/// A currency whose accounts can have liquidity restrictions. -pub trait Lockable: Currency { - /// The quantity used to denote time; usually just a `BlockNumber`. - type Moment; - - /// The maximum number of locks a user should have on their account. - type MaxLocks: Get; - - /// Create a new balance lock on account `who`. - /// - /// If the new lock is valid (i.e. not already expired), it will push the struct to - /// the `Locks` vec in storage. Note that you can lock more funds than a user has. - /// - /// If the lock `id` already exists, this will update it. - fn set_lock( - id: LockIdentifier, - who: &AccountId, - amount: Self::Balance, - reasons: WithdrawReasons, - ); - - /// Changes a balance lock (selected by `id`) so that it becomes less liquid in all - /// parameters or creates a new one if it does not exist. - /// - /// Calling `extend_lock` on an existing lock `id` differs from `set_lock` in that it - /// applies the most severe constraints of the two, while `set_lock` replaces the lock - /// with the new parameters. As in, `extend_lock` will set: - /// - maximum `amount` - /// - bitwise mask of all `reasons` - fn extend_lock( - id: LockIdentifier, - who: &AccountId, - amount: Self::Balance, - reasons: WithdrawReasons, - ); - - /// Remove an existing lock. - fn remove_lock(id: LockIdentifier, who: &AccountId); -} diff --git a/frame/vesting/src/lib.rs b/frame/vesting/src/lib.rs index 226f539a740f8..a92f94baf6cf9 100644 --- a/frame/vesting/src/lib.rs +++ b/frame/vesting/src/lib.rs @@ -62,7 +62,7 @@ use frame_support::{ ensure, storage::bounded_vec::BoundedVec, traits::{ - fungibles, fungibles::Lockable, Currency, ExistenceRequirement, Get, VestingSchedule, + Currency, ExistenceRequirement, Get, LockIdentifier, LockableCurrency, VestingSchedule, WithdrawReasons, }, weights::Weight, @@ -83,12 +83,11 @@ pub use weights::WeightInfo; type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; -type MaxLocksOf = <::Currency as fungibles::Lockable< - ::AccountId, ->>::MaxLocks; +type MaxLocksOf = + <::Currency as LockableCurrency<::AccountId>>::MaxLocks; type AccountIdLookupOf = <::Lookup as StaticLookup>::Source; -const VESTING_ID: fungibles::LockIdentifier = *b"vesting "; +const VESTING_ID: LockIdentifier = *b"vesting "; // A value placed in storage that represents the current version of the Vesting storage. // This value is used by `on_runtime_upgrade` to determine whether we run storage migration logic. @@ -160,7 +159,7 @@ pub mod pallet { type RuntimeEvent: From> + IsType<::RuntimeEvent>; /// The currency trait. - type Currency: fungibles::Lockable; + type Currency: LockableCurrency; /// Convert the block number into a balance. type BlockNumberToBalance: Convert>; From 90ab4fafa0982ad71cecf451e5719a53346fcd32 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Fri, 9 Dec 2022 11:31:57 +0100 Subject: [PATCH 05/29] Don't indefinitely block on shutting down Tokio (#12885) * Don't indefinitely on shutting down Tokio Now we wait in maximum 60 seconds before we shutdown the node. Tasks are may be leaked and leading to some data corruption. * Drink less :thinking_face: --- client/cli/src/runner.rs | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/client/cli/src/runner.rs b/client/cli/src/runner.rs index f6edd8444735a..c976c319708c2 100644 --- a/client/cli/src/runner.rs +++ b/client/cli/src/runner.rs @@ -22,7 +22,7 @@ use futures::{future, future::FutureExt, pin_mut, select, Future}; use log::info; use sc_service::{Configuration, Error as ServiceError, TaskManager}; use sc_utils::metrics::{TOKIO_THREADS_ALIVE, TOKIO_THREADS_TOTAL}; -use std::marker::PhantomData; +use std::{marker::PhantomData, time::Duration}; #[cfg(target_family = "unix")] async fn main(func: F) -> std::result::Result<(), E> @@ -147,7 +147,11 @@ impl Runner { self.print_node_infos(); let mut task_manager = self.tokio_runtime.block_on(initialize(self.config))?; let res = self.tokio_runtime.block_on(main(task_manager.future().fuse())); - Ok(res?) + + // Give all futures 60 seconds to shutdown, before tokio "leaks" them. + self.tokio_runtime.shutdown_timeout(Duration::from_secs(60)); + + res.map_err(Into::into) } /// A helper function that runs a command with the configuration of this node. From 47bd959c1ee12e2083feb38bf118fa906ff6fea5 Mon Sep 17 00:00:00 2001 From: Gavin Wood Date: Fri, 9 Dec 2022 10:38:24 +0000 Subject: [PATCH 06/29] General Message Queue Pallet (#12485) * The message queue * Make fully generic * Refactor * Docs * Refactor * Use iter not slice * Per-origin queues * Multi-queue processing * Introduce MaxReady * Remove MaxReady in favour of ready ring * Cleanups * ReadyRing and tests * Stale page reaping * from_components -> from_parts Signed-off-by: Oliver Tale-Yazdi * Move WeightCounter to sp_weights Signed-off-by: Oliver Tale-Yazdi * Add MockedWeightInfo Signed-off-by: Oliver Tale-Yazdi * Deploy to kitchensink Signed-off-by: Oliver Tale-Yazdi * Use WeightCounter Signed-off-by: Oliver Tale-Yazdi * Small fixes and logging Signed-off-by: Oliver Tale-Yazdi * Add service_page Signed-off-by: Oliver Tale-Yazdi * Typo Signed-off-by: Oliver Tale-Yazdi * Move service_page below service_queue Signed-off-by: Oliver Tale-Yazdi * Add service_message Signed-off-by: Oliver Tale-Yazdi * Use correct weight function Signed-off-by: Oliver Tale-Yazdi * Overweight execution * Refactor * Missing file * Fix WeightCounter usage in scheduler Signed-off-by: Oliver Tale-Yazdi * Fix peek_index Take into account that decoding from a mutable slice modifies it. Signed-off-by: Oliver Tale-Yazdi * Add tests and bench service_page_item Signed-off-by: Oliver Tale-Yazdi * Add debug_info Signed-off-by: Oliver Tale-Yazdi * Add no-progress check to service_queues Signed-off-by: Oliver Tale-Yazdi * Add more benches Signed-off-by: Oliver Tale-Yazdi * Bound from_message and try_append_message Signed-off-by: Oliver Tale-Yazdi * Add PageReaped event Signed-off-by: Oliver Tale-Yazdi * Rename BookStateOf and BookStateFor Signed-off-by: Oliver Tale-Yazdi * Update tests and remove logging Signed-off-by: Oliver Tale-Yazdi * Remove redundant per-message origins; add footprint() and sweep_queue() * Move testing stuff to mock.rs Signed-off-by: Oliver Tale-Yazdi * Add integration test Signed-off-by: Oliver Tale-Yazdi * Fix no-progress check Signed-off-by: Oliver Tale-Yazdi * Fix debug_info Signed-off-by: Oliver Tale-Yazdi * Fixup merge and tests Signed-off-by: Oliver Tale-Yazdi * Fix footprint tracking * Introduce * Formatting * OverweightEnqueued event, auto-servicing config item * Update tests and benchmarks Signed-off-by: Oliver Tale-Yazdi * Clippy Signed-off-by: Oliver Tale-Yazdi * Add tests Signed-off-by: Oliver Tale-Yazdi * Provide change handler * Add missing BookStateFor::insert and call QueueChangeHandler Signed-off-by: Oliver Tale-Yazdi * Docs Signed-off-by: Oliver Tale-Yazdi * Update benchmarks and weights Signed-off-by: Oliver Tale-Yazdi * More tests... Signed-off-by: Oliver Tale-Yazdi * Use weight metering functions Signed-off-by: Oliver Tale-Yazdi * weightInfo::process_message_payload is gone Signed-off-by: Oliver Tale-Yazdi * Add defensive_saturating_accrue Signed-off-by: Oliver Tale-Yazdi * Rename WeightCounter to WeightMeter Ctr+Shift+H should do the trick. Signed-off-by: Oliver Tale-Yazdi * Test on_initialize Signed-off-by: Oliver Tale-Yazdi * Add module docs Signed-off-by: Oliver Tale-Yazdi * Remove origin from MaxMessageLen The message origin is not encoded into the heap and does therefore not influence the max message length anymore. Signed-off-by: Oliver Tale-Yazdi * Add BoundedVec::as_slice Signed-off-by: Oliver Tale-Yazdi * Test Page::{from_message, try_append_message} Signed-off-by: Oliver Tale-Yazdi * Fixup docs Signed-off-by: Oliver Tale-Yazdi * Docs * Do nothing in sweep_queue if the queue does not exist ... otherwise it inserts default values into the storage. Signed-off-by: Oliver Tale-Yazdi * Test ring (un)knitting Signed-off-by: Oliver Tale-Yazdi * Upgrade stress-test Change the test to not assume that all queued messages will be processed in the next block but split it over multiple. Signed-off-by: Oliver Tale-Yazdi * More tests... Signed-off-by: Oliver Tale-Yazdi * Beauty fixes Signed-off-by: Oliver Tale-Yazdi * clippy Signed-off-by: Oliver Tale-Yazdi * Rename BoundedVec::as_slice to as_bounded_slice Conflicts with deref().as_slice() otherwise. Signed-off-by: Oliver Tale-Yazdi * Fix imports Signed-off-by: Oliver Tale-Yazdi * Remove ReadyRing struct Was used for testing only. Instead use 'fn assert_ring' which also check the service head and backlinks. Signed-off-by: Oliver Tale-Yazdi * Beauty fixes Signed-off-by: Oliver Tale-Yazdi * Fix stale page watermark Signed-off-by: Oliver Tale-Yazdi * Cleanup Signed-off-by: Oliver Tale-Yazdi * Fix test feature and clippy Signed-off-by: Oliver Tale-Yazdi * QueueChanged handler is called correctly Signed-off-by: Oliver Tale-Yazdi * Update benches Signed-off-by: Oliver Tale-Yazdi * Abstract testing functions Signed-off-by: Oliver Tale-Yazdi * More tests Signed-off-by: Oliver Tale-Yazdi * Cleanup Signed-off-by: Oliver Tale-Yazdi * Clippy Signed-off-by: Oliver Tale-Yazdi * fmt Signed-off-by: Oliver Tale-Yazdi * Simplify tests Signed-off-by: Oliver Tale-Yazdi * Make stuff compile Signed-off-by: Oliver Tale-Yazdi * Extend overweight execution benchmark Signed-off-by: Oliver Tale-Yazdi * Remove TODOs Signed-off-by: Oliver Tale-Yazdi * Test service queue with faulty MessageProcessor Signed-off-by: Oliver Tale-Yazdi * fmt Signed-off-by: Oliver Tale-Yazdi * Update pallet ui tests to 1.65 Signed-off-by: Oliver Tale-Yazdi * More docs Signed-off-by: Oliver Tale-Yazdi * Review doc fixes Co-authored-by: Robert Klotzner Signed-off-by: Oliver Tale-Yazdi * Add weight_limit to extrinsic weight of execute_overweight * Correctly return unused weight * Return actual weight consumed in do_execute_overweight * Review fixes Signed-off-by: Oliver Tale-Yazdi * Set version 7.0.0-dev Signed-off-by: Oliver Tale-Yazdi * Make it compile Signed-off-by: Oliver Tale-Yazdi * Switch message_size to u64 Signed-off-by: Oliver Tale-Yazdi * Switch message_count to u64 Signed-off-by: Oliver Tale-Yazdi * Fix benchmarks Signed-off-by: Oliver Tale-Yazdi * Make CI green Signed-off-by: Oliver Tale-Yazdi * Docs * Update tests Signed-off-by: Oliver Tale-Yazdi * ".git/.scripts/bench-bot.sh" pallet dev pallet_message_queue * Dont mention README.md in the Cargo.toml Signed-off-by: Oliver Tale-Yazdi * Remove reference to readme Signed-off-by: Oliver Tale-Yazdi Co-authored-by: Oliver Tale-Yazdi Co-authored-by: parity-processbot <> Co-authored-by: Robert Klotzner Co-authored-by: Keith Yeung --- Cargo.lock | 40 +- Cargo.toml | 1 + bin/node/runtime/Cargo.toml | 4 + bin/node/runtime/src/lib.rs | 21 + frame/message-queue/Cargo.toml | 53 + frame/message-queue/src/benchmarking.rs | 204 +++ frame/message-queue/src/integration_test.rs | 224 +++ frame/message-queue/src/lib.rs | 1308 +++++++++++++++++ frame/message-queue/src/mock.rs | 312 ++++ frame/message-queue/src/mock_helpers.rs | 185 +++ frame/message-queue/src/tests.rs | 1092 ++++++++++++++ frame/message-queue/src/weights.rs | 216 +++ frame/scheduler/Cargo.toml | 2 + frame/scheduler/src/lib.rs | 3 +- frame/support/src/traits.rs | 6 + frame/support/src/traits/messages.rs | 202 +++ ...age_ensure_span_are_ok_on_wrong_gen.stderr | 6 +- ...re_span_are_ok_on_wrong_gen_unnamed.stderr | 6 +- primitives/core/src/bounded/bounded_vec.rs | 7 + primitives/weights/src/weight_meter.rs | 6 + 20 files changed, 3883 insertions(+), 15 deletions(-) create mode 100644 frame/message-queue/Cargo.toml create mode 100644 frame/message-queue/src/benchmarking.rs create mode 100644 frame/message-queue/src/integration_test.rs create mode 100644 frame/message-queue/src/lib.rs create mode 100644 frame/message-queue/src/mock.rs create mode 100644 frame/message-queue/src/mock_helpers.rs create mode 100644 frame/message-queue/src/tests.rs create mode 100644 frame/message-queue/src/weights.rs create mode 100644 frame/support/src/traits/messages.rs diff --git a/Cargo.lock b/Cargo.lock index 73effefc48da1..41c641cf05963 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3117,6 +3117,7 @@ dependencies = [ "pallet-indices", "pallet-lottery", "pallet-membership", + "pallet-message-queue", "pallet-mmr", "pallet-multisig", "pallet-nis", @@ -5322,6 +5323,28 @@ dependencies = [ "sp-std", ] +[[package]] +name = "pallet-message-queue" +version = "7.0.0-dev" +dependencies = [ + "frame-benchmarking", + "frame-support", + "frame-system", + "log", + "parity-scale-codec", + "rand 0.8.5", + "rand_distr", + "scale-info", + "serde", + "sp-arithmetic", + "sp-core", + "sp-io", + "sp-runtime", + "sp-std", + "sp-tracing", + "sp-weights", +] + [[package]] name = "pallet-mmr" version = "4.0.0-dev" @@ -5709,6 +5732,7 @@ dependencies = [ "sp-io", "sp-runtime", "sp-std", + "sp-weights", "substrate-test-utils", ] @@ -8397,9 +8421,9 @@ dependencies = [ [[package]] name = "scale-info" -version = "2.1.1" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8980cafbe98a7ee7a9cc16b32ebce542c77883f512d83fbf2ddc8f6a85ea74c9" +checksum = "333af15b02563b8182cd863f925bd31ef8fa86a0e095d30c091956057d436153" dependencies = [ "bitvec", "cfg-if", @@ -8411,9 +8435,9 @@ dependencies = [ [[package]] name = "scale-info-derive" -version = "2.1.1" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4260c630e8a8a33429d1688eff2f163f24c65a4e1b1578ef6b565061336e4b6f" +checksum = "53f56acbd0743d29ffa08f911ab5397def774ad01bab3786804cf6ee057fb5e1" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -8570,9 +8594,9 @@ checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" [[package]] name = "serde" -version = "1.0.136" +version = "1.0.145" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce31e24b01e1e524df96f1c2fdd054405f8d7376249a5110886fb4b658484789" +checksum = "728eb6351430bccb993660dfffc5a72f91ccc1295abaa8ce19b27ebe4f75568b" dependencies = [ "serde_derive", ] @@ -8589,9 +8613,9 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.136" +version = "1.0.145" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08597e7152fcd306f41838ed3e37be9eaeed2b61c42e2117266a554fab4662f9" +checksum = "81fa1584d3d1bcacd84c277a0dfe21f5b0f6accf4a23d04d4c6d61f1af522b4c" dependencies = [ "proc-macro2", "quote", diff --git a/Cargo.toml b/Cargo.toml index 12f2ced0d1d03..eb78d5e104486 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -121,6 +121,7 @@ members = [ "frame/offences/benchmarking", "frame/preimage", "frame/proxy", + "frame/message-queue", "frame/nomination-pools", "frame/nomination-pools/fuzzer", "frame/nomination-pools/benchmarking", diff --git a/bin/node/runtime/Cargo.toml b/bin/node/runtime/Cargo.toml index 02a2ae292d83e..477545c9ac332 100644 --- a/bin/node/runtime/Cargo.toml +++ b/bin/node/runtime/Cargo.toml @@ -75,6 +75,7 @@ pallet-indices = { version = "4.0.0-dev", default-features = false, path = "../. pallet-identity = { version = "4.0.0-dev", default-features = false, path = "../../../frame/identity" } pallet-lottery = { version = "4.0.0-dev", default-features = false, path = "../../../frame/lottery" } pallet-membership = { version = "4.0.0-dev", default-features = false, path = "../../../frame/membership" } +pallet-message-queue = { version = "7.0.0-dev", default-features = false, path = "../../../frame/message-queue" } pallet-mmr = { version = "4.0.0-dev", default-features = false, path = "../../../frame/merkle-mountain-range" } pallet-multisig = { version = "4.0.0-dev", default-features = false, path = "../../../frame/multisig" } pallet-nomination-pools = { version = "1.0.0", default-features = false, path = "../../../frame/nomination-pools"} @@ -150,6 +151,7 @@ std = [ "sp-inherents/std", "pallet-lottery/std", "pallet-membership/std", + "pallet-message-queue/std", "pallet-mmr/std", "pallet-multisig/std", "pallet-nomination-pools/std", @@ -229,6 +231,7 @@ runtime-benchmarks = [ "pallet-indices/runtime-benchmarks", "pallet-lottery/runtime-benchmarks", "pallet-membership/runtime-benchmarks", + "pallet-message-queue/runtime-benchmarks", "pallet-mmr/runtime-benchmarks", "pallet-multisig/runtime-benchmarks", "pallet-nomination-pools-benchmarking/runtime-benchmarks", @@ -282,6 +285,7 @@ try-runtime = [ "pallet-identity/try-runtime", "pallet-lottery/try-runtime", "pallet-membership/try-runtime", + "pallet-message-queue/try-runtime", "pallet-mmr/try-runtime", "pallet-multisig/try-runtime", "pallet-nomination-pools/try-runtime", diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index 1bb4dd6f913a6..7cd42be73a19b 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -1135,6 +1135,25 @@ impl pallet_bounties::Config for Runtime { type ChildBountyManager = ChildBounties; } +parameter_types! { + /// Allocate at most 20% of each block for message processing. + /// + /// Is set to 20% since the scheduler can already consume a maximum of 80%. + pub MessageQueueServiceWeight: Option = Some(Perbill::from_percent(20) * RuntimeBlockWeights::get().max_block); +} + +impl pallet_message_queue::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type WeightInfo = (); + /// NOTE: Always set this to `NoopMessageProcessor` for benchmarking. + type MessageProcessor = pallet_message_queue::mock_helpers::NoopMessageProcessor; + type Size = u32; + type QueueChangeHandler = (); + type HeapSize = ConstU32<{ 64 * 1024 }>; + type MaxStale = ConstU32<128>; + type ServiceWeight = MessageQueueServiceWeight; +} + parameter_types! { pub const ChildBountyValueMinimum: Balance = 1 * DOLLARS; } @@ -1699,6 +1718,7 @@ construct_runtime!( RankedPolls: pallet_referenda::, RankedCollective: pallet_ranked_collective, FastUnstake: pallet_fast_unstake, + MessageQueue: pallet_message_queue, } ); @@ -1793,6 +1813,7 @@ mod benches { [pallet_indices, Indices] [pallet_lottery, Lottery] [pallet_membership, TechnicalMembership] + [pallet_message_queue, MessageQueue] [pallet_mmr, Mmr] [pallet_multisig, Multisig] [pallet_nomination_pools, NominationPoolsBench::] diff --git a/frame/message-queue/Cargo.toml b/frame/message-queue/Cargo.toml new file mode 100644 index 0000000000000..47d114902f52c --- /dev/null +++ b/frame/message-queue/Cargo.toml @@ -0,0 +1,53 @@ +[package] +authors = ["Parity Technologies "] +edition = "2021" +name = "pallet-message-queue" +version = "7.0.0-dev" +license = "Apache-2.0" +homepage = "https://substrate.io" +repository = "https://github.com/paritytech/substrate/" +description = "FRAME pallet to queue and process messages" + +[dependencies] +codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.1.2", default-features = false, features = ["derive"] } +serde = { version = "1.0.137", optional = true, features = ["derive"] } +log = { version = "0.4.17", default-features = false } + +sp-core = { version = "7.0.0", default-features = false, path = "../../primitives/core" } +sp-io = { version = "7.0.0", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "7.0.0", default-features = false, path = "../../primitives/runtime" } +sp-std = { version = "5.0.0", default-features = false, path = "../../primitives/std" } +sp-arithmetic = { version = "6.0.0", default-features = false, path = "../../primitives/arithmetic" } +sp-weights = { version = "4.0.0", default-features = false, path = "../../primitives/weights" } + +frame-benchmarking = { version = "4.0.0-dev", default-features = false, optional = true, path = "../benchmarking" } +frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } +frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } + +[dev-dependencies] +sp-tracing = { version = "6.0.0", path = "../../primitives/tracing" } +rand = "0.8.5" +rand_distr = "0.4.3" + +[features] +default = ["std"] +std = [ + "codec/std", + "scale-info/std", + "sp-core/std", + "sp-io/std", + "sp-runtime/std", + "sp-std/std", + "sp-arithmetic/std", + "sp-weights/std", + "frame-benchmarking?/std", + "frame-support/std", + "frame-system/std", +] +runtime-benchmarks = [ + "frame-benchmarking/runtime-benchmarks", + "frame-support/runtime-benchmarks", + "frame-system/runtime-benchmarks", +] +try-runtime = ["frame-support/try-runtime"] diff --git a/frame/message-queue/src/benchmarking.rs b/frame/message-queue/src/benchmarking.rs new file mode 100644 index 0000000000000..c0ff20431d00e --- /dev/null +++ b/frame/message-queue/src/benchmarking.rs @@ -0,0 +1,204 @@ +// Copyright 2022 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Benchmarking for the message queue pallet. + +#![cfg(feature = "runtime-benchmarks")] +#![allow(unused_assignments)] // Needed for `ready_ring_knit`. + +use super::{mock_helpers::*, Pallet as MessageQueue, *}; + +use frame_benchmarking::{benchmarks, whitelisted_caller}; +use frame_support::traits::Get; +use frame_system::RawOrigin; +use sp_std::prelude::*; + +benchmarks! { + where_clause { + where + // NOTE: We need to generate multiple origins, therefore Origin is `From`. The + // `PartialEq` is for asserting the outcome of the ring (un)knitting and *could* be + // removed if really necessary. + <::MessageProcessor as ProcessMessage>::Origin: From + PartialEq, + ::Size: From, + } + + // Worst case path of `ready_ring_knit`. + ready_ring_knit { + let mid: MessageOriginOf:: = 1.into(); + build_ring::(&[0.into(), mid.clone(), 2.into()]); + unknit::(&mid); + assert_ring::(&[0.into(), 2.into()]); + let mut neighbours = None; + }: { + neighbours = MessageQueue::::ready_ring_knit(&mid).ok(); + } verify { + // The neighbours needs to be modified manually. + BookStateFor::::mutate(&mid, |b| { b.ready_neighbours = neighbours }); + assert_ring::(&[0.into(), 2.into(), mid]); + } + + // Worst case path of `ready_ring_unknit`. + ready_ring_unknit { + build_ring::(&[0.into(), 1.into(), 2.into()]); + assert_ring::(&[0.into(), 1.into(), 2.into()]); + let o: MessageOriginOf:: = 0.into(); + let neighbours = BookStateFor::::get(&o).ready_neighbours.unwrap(); + }: { + MessageQueue::::ready_ring_unknit(&o, neighbours); + } verify { + assert_ring::(&[1.into(), 2.into()]); + } + + // `service_queues` without any queue processing. + service_queue_base { + }: { + MessageQueue::::service_queue(0.into(), &mut WeightMeter::max_limit(), Weight::MAX) + } + + // `service_page` without any message processing but with page completion. + service_page_base_completion { + let origin: MessageOriginOf = 0.into(); + let page = PageOf::::default(); + Pages::::insert(&origin, 0, &page); + let mut book_state = single_page_book::(); + let mut meter = WeightMeter::max_limit(); + let limit = Weight::MAX; + }: { + MessageQueue::::service_page(&origin, &mut book_state, &mut meter, limit) + } + + // `service_page` without any message processing and without page completion. + service_page_base_no_completion { + let origin: MessageOriginOf = 0.into(); + let mut page = PageOf::::default(); + // Mock the storage such that `is_complete` returns `false` but `peek_first` returns `None`. + page.first = 1.into(); + page.remaining = 1.into(); + Pages::::insert(&origin, 0, &page); + let mut book_state = single_page_book::(); + let mut meter = WeightMeter::max_limit(); + let limit = Weight::MAX; + }: { + MessageQueue::::service_page(&origin, &mut book_state, &mut meter, limit) + } + + // Processing a single message from a page. + service_page_item { + let msg = vec![1u8; MaxMessageLenOf::::get() as usize]; + let mut page = page::(&msg.clone()); + let mut book = book_for::(&page); + assert!(page.peek_first().is_some(), "There is one message"); + let mut weight = WeightMeter::max_limit(); + }: { + let status = MessageQueue::::service_page_item(&0u32.into(), 0, &mut book, &mut page, &mut weight, Weight::MAX); + assert_eq!(status, ItemExecutionStatus::Executed(true)); + } verify { + // Check that it was processed. + assert_last_event::(Event::Processed { + hash: T::Hashing::hash(&msg), origin: 0.into(), + weight_used: 1.into_weight(), success: true + }.into()); + let (_, processed, _) = page.peek_index(0).unwrap(); + assert!(processed); + assert_eq!(book.message_count, 0); + } + + // Worst case for calling `bump_service_head`. + bump_service_head { + setup_bump_service_head::(0.into(), 10.into()); + let mut weight = WeightMeter::max_limit(); + }: { + MessageQueue::::bump_service_head(&mut weight); + } verify { + assert_eq!(ServiceHead::::get().unwrap(), 10u32.into()); + assert_eq!(weight.consumed, T::WeightInfo::bump_service_head()); + } + + reap_page { + // Mock the storage to get a *cullable* but not *reapable* page. + let origin: MessageOriginOf = 0.into(); + let mut book = single_page_book::(); + let (page, msgs) = full_page::(); + + for p in 0 .. T::MaxStale::get() * T::MaxStale::get() { + if p == 0 { + Pages::::insert(&origin, p, &page); + } + book.end += 1; + book.count += 1; + book.message_count += msgs as u64; + book.size += page.remaining_size.into() as u64; + } + book.begin = book.end - T::MaxStale::get(); + BookStateFor::::insert(&origin, &book); + assert!(Pages::::contains_key(&origin, 0)); + + }: _(RawOrigin::Signed(whitelisted_caller()), 0u32.into(), 0) + verify { + assert_last_event::(Event::PageReaped{ origin: 0.into(), index: 0 }.into()); + assert!(!Pages::::contains_key(&origin, 0)); + } + + // Worst case for `execute_overweight` where the page is removed as completed. + // + // The worst case occurs when executing the last message in a page of which all are skipped since it is using `peek_index` which has linear complexities. + execute_overweight_page_removed { + let origin: MessageOriginOf = 0.into(); + let (mut page, msgs) = full_page::(); + // Skip all messages. + for _ in 1..msgs { + page.skip_first(true); + } + page.skip_first(false); + let book = book_for::(&page); + Pages::::insert(&origin, 0, &page); + BookStateFor::::insert(&origin, &book); + }: { + MessageQueue::::execute_overweight(RawOrigin::Signed(whitelisted_caller()).into(), 0u32.into(), 0u32, ((msgs - 1) as u32).into(), Weight::MAX).unwrap() + } + verify { + assert_last_event::(Event::Processed { + hash: T::Hashing::hash(&((msgs - 1) as u32).encode()), origin: 0.into(), + weight_used: Weight::from_parts(1, 1), success: true + }.into()); + assert!(!Pages::::contains_key(&origin, 0), "Page must be removed"); + } + + // Worst case for `execute_overweight` where the page is updated. + execute_overweight_page_updated { + let origin: MessageOriginOf = 0.into(); + let (mut page, msgs) = full_page::(); + // Skip all messages. + for _ in 0..msgs { + page.skip_first(false); + } + let book = book_for::(&page); + Pages::::insert(&origin, 0, &page); + BookStateFor::::insert(&origin, &book); + }: { + MessageQueue::::execute_overweight(RawOrigin::Signed(whitelisted_caller()).into(), 0u32.into(), 0u32, ((msgs - 1) as u32).into(), Weight::MAX).unwrap() + } + verify { + assert_last_event::(Event::Processed { + hash: T::Hashing::hash(&((msgs - 1) as u32).encode()), origin: 0.into(), + weight_used: Weight::from_parts(1, 1), success: true + }.into()); + assert!(Pages::::contains_key(&origin, 0), "Page must be updated"); + } + + impl_benchmark_test_suite!(MessageQueue, crate::mock::new_test_ext::(), crate::integration_test::Test); +} diff --git a/frame/message-queue/src/integration_test.rs b/frame/message-queue/src/integration_test.rs new file mode 100644 index 0000000000000..a9b6ee9bd2214 --- /dev/null +++ b/frame/message-queue/src/integration_test.rs @@ -0,0 +1,224 @@ +// Copyright 2022 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Stress tests pallet-message-queue. Defines its own runtime config to use larger constants for +//! `HeapSize` and `MaxStale`. + +#![cfg(test)] + +use crate::{ + mock::{ + new_test_ext, CountingMessageProcessor, IntoWeight, MockedWeightInfo, NumMessagesProcessed, + }, + *, +}; + +use crate as pallet_message_queue; +use frame_support::{ + parameter_types, + traits::{ConstU32, ConstU64}, +}; +use rand::{rngs::StdRng, Rng, SeedableRng}; +use rand_distr::Pareto; +use sp_core::H256; +use sp_runtime::{ + testing::Header, + traits::{BlakeTwo256, IdentityLookup}, +}; + +type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; +type Block = frame_system::mocking::MockBlock; + +frame_support::construct_runtime!( + pub enum Test where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: frame_system::{Pallet, Call, Config, Storage, Event}, + MessageQueue: pallet_message_queue::{Pallet, Call, Storage, Event}, + } +); + +parameter_types! { + pub BlockWeights: frame_system::limits::BlockWeights = + frame_system::limits::BlockWeights::simple_max(frame_support::weights::Weight::from_ref_time(1024)); +} +impl frame_system::Config for Test { + type BaseCallFilter = frame_support::traits::Everything; + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); + type RuntimeOrigin = RuntimeOrigin; + type Index = u64; + type BlockNumber = u64; + type Hash = H256; + type RuntimeCall = RuntimeCall; + type Hashing = BlakeTwo256; + type AccountId = u64; + type Lookup = IdentityLookup; + type Header = Header; + type RuntimeEvent = RuntimeEvent; + type BlockHashCount = ConstU64<250>; + type Version = (); + type PalletInfo = PalletInfo; + type AccountData = (); + type OnNewAccount = (); + type OnKilledAccount = (); + type SystemWeightInfo = (); + type SS58Prefix = (); + type OnSetCode = (); + type MaxConsumers = ConstU32<16>; +} + +parameter_types! { + pub const HeapSize: u32 = 32 * 1024; + pub const MaxStale: u32 = 32; + pub static ServiceWeight: Option = Some(Weight::from_parts(100, 100)); +} + +impl Config for Test { + type RuntimeEvent = RuntimeEvent; + type WeightInfo = MockedWeightInfo; + type MessageProcessor = CountingMessageProcessor; + type Size = u32; + type QueueChangeHandler = (); + type HeapSize = HeapSize; + type MaxStale = MaxStale; + type ServiceWeight = ServiceWeight; +} + +/// Simulates heavy usage by enqueueing and processing large amounts of messages. +/// +/// Best to run with `-r`, `RUST_LOG=info` and `RUSTFLAGS='-Cdebug-assertions=y'`. +/// +/// # Example output +/// +/// ```pre +/// Enqueued 1189 messages across 176 queues. Payload 46.97 KiB +/// Processing 772 of 1189 messages +/// Enqueued 9270 messages across 1559 queues. Payload 131.85 KiB +/// Processing 6262 of 9687 messages +/// Enqueued 5025 messages across 1225 queues. Payload 100.23 KiB +/// Processing 1739 of 8450 messages +/// Enqueued 42061 messages across 6357 queues. Payload 536.29 KiB +/// Processing 11675 of 48772 messages +/// Enqueued 20253 messages across 2420 queues. Payload 288.34 KiB +/// Processing 28711 of 57350 messages +/// Processing all remaining 28639 messages +/// ``` +#[test] +#[ignore] // Only run in the CI. +fn stress_test_enqueue_and_service() { + let blocks = 20; + let max_queues = 10_000; + let max_messages_per_queue = 10_000; + let max_msg_len = MaxMessageLenOf::::get(); + let mut rng = StdRng::seed_from_u64(42); + + new_test_ext::().execute_with(|| { + let mut msgs_remaining = 0; + for _ in 0..blocks { + // Start by enqueuing a large number of messages. + let (enqueued, _) = + enqueue_messages(max_queues, max_messages_per_queue, max_msg_len, &mut rng); + msgs_remaining += enqueued; + + // Pick a fraction of all messages currently in queue and process them. + let processed = rng.gen_range(1..=msgs_remaining); + log::info!("Processing {} of all messages {}", processed, msgs_remaining); + process_messages(processed); // This also advances the block. + msgs_remaining -= processed; + } + log::info!("Processing all remaining {} messages", msgs_remaining); + process_messages(msgs_remaining); + post_conditions(); + }); +} + +/// Enqueue a random number of random messages into a random number of queues. +fn enqueue_messages( + max_queues: u32, + max_per_queue: u32, + max_msg_len: u32, + rng: &mut StdRng, +) -> (u32, usize) { + let num_queues = rng.gen_range(1..max_queues); + let mut num_messages = 0; + let mut total_msg_len = 0; + for origin in 0..num_queues { + let num_messages_per_queue = + (rng.sample(Pareto::new(1.0, 1.1).unwrap()) as u32).min(max_per_queue); + + for m in 0..num_messages_per_queue { + let mut message = format!("{}:{}", &origin, &m).into_bytes(); + let msg_len = (rng.sample(Pareto::new(1.0, 1.0).unwrap()) as u32) + .clamp(message.len() as u32, max_msg_len); + message.resize(msg_len as usize, 0); + MessageQueue::enqueue_message( + BoundedSlice::defensive_truncate_from(&message), + origin.into(), + ); + total_msg_len += msg_len; + } + num_messages += num_messages_per_queue; + } + log::info!( + "Enqueued {} messages across {} queues. Payload {:.2} KiB", + num_messages, + num_queues, + total_msg_len as f64 / 1024.0 + ); + (num_messages, total_msg_len as usize) +} + +/// Process the number of messages. +fn process_messages(num_msgs: u32) { + let weight = (num_msgs as u64).into_weight(); + ServiceWeight::set(Some(weight)); + let consumed = next_block(); + + assert_eq!(consumed, weight, "\n{}", MessageQueue::debug_info()); + assert_eq!(NumMessagesProcessed::take(), num_msgs as usize); +} + +/// Returns the weight consumed by `MessageQueue::on_initialize()`. +fn next_block() -> Weight { + MessageQueue::on_finalize(System::block_number()); + System::on_finalize(System::block_number()); + System::set_block_number(System::block_number() + 1); + System::on_initialize(System::block_number()); + MessageQueue::on_initialize(System::block_number()) +} + +/// Assert that the pallet is in the expected post state. +fn post_conditions() { + // All queues are empty. + for (_, book) in BookStateFor::::iter() { + assert!(book.end >= book.begin); + assert_eq!(book.count, 0); + assert_eq!(book.size, 0); + assert_eq!(book.message_count, 0); + assert!(book.ready_neighbours.is_none()); + } + // No pages remain. + assert_eq!(Pages::::iter().count(), 0); + // Service head is gone. + assert!(ServiceHead::::get().is_none()); + // This still works fine. + assert_eq!(MessageQueue::service_queues(Weight::MAX), Weight::zero(), "Nothing left"); + next_block(); +} diff --git a/frame/message-queue/src/lib.rs b/frame/message-queue/src/lib.rs new file mode 100644 index 0000000000000..9b976c48245c9 --- /dev/null +++ b/frame/message-queue/src/lib.rs @@ -0,0 +1,1308 @@ +// Copyright 2022 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! # Generalized Message Queue Pallet +//! +//! Provides generalized message queuing and processing capabilities on a per-queue basis for +//! arbitrary use-cases. +//! +//! # Design Goals +//! +//! 1. Minimal assumptions about `Message`s and `MessageOrigin`s. Both should be MEL bounded blobs. +//! This ensures the generality and reusability of the pallet. +//! 2. Well known and tightly limited pre-dispatch PoV weights, especially for message execution. +//! This is paramount for the success of the pallet since message execution is done in +//! `on_initialize` which must _never_ under-estimate its PoV weight. It also needs a frugal PoV +//! footprint since PoV is scarce and this is (possibly) done in every block. This must also hold +//! in the presence of unpredictable message size distributions. +//! 3. Usable as XCMP, DMP and UMP message/dispatch queue - possibly through adapter types. +//! +//! # Design +//! +//! The pallet has means to enqueue, store and process messages. This is implemented by having +//! *queues* which store enqueued messages and can be *served* to process said messages. A queue is +//! identified by its origin in the `BookStateFor`. Each message has an origin which defines into +//! which queue it will be stored. Messages are stored by being appended to the last [`Page`] of a +//! book. Each book keeps track of its pages by indexing `Pages`. The `ReadyRing` contains all +//! queues which hold at least one unprocessed message and are thereby *ready* to be serviced. The +//! `ServiceHead` indicates which *ready* queue is the next to be serviced. +//! The pallet implements [`frame_support::traits::EnqueueMessage`], +//! [`frame_support::traits::ServiceQueues`] and has [`frame_support::traits::ProcessMessage`] and +//! [`OnQueueChanged`] hooks to communicate with the outside world. +//! +//! NOTE: The storage items are not linked since they are not public. +//! +//! **Message Execution** +//! +//! Executing a message is offloaded to the [`Config::MessageProcessor`] which contains the actual +//! logic of how to handle the message since they are blobs. A message can be temporarily or +//! permanently overweight. The pallet will perpetually try to execute a temporarily overweight +//! message. A permanently overweight message is skipped and must be executed manually. +//! +//! **Pagination** +//! +//! Queues are stored in a *paged* manner by splitting their messages into [`Page`]s. This results +//! in a lot of complexity when implementing the pallet but is completely necessary to archive the +//! second #[Design Goal](design-goals). The problem comes from the fact a message can *possibly* be +//! quite large, lets say 64KiB. This then results in a *MEL* of at least 64KiB which results in a +//! PoV of at least 64KiB. Now we have the assumption that most messages are much shorter than their +//! maximum allowed length. This would result in most messages having a pre-dispatch PoV size which +//! is much larger than their post-dispatch PoV size, possibly by a factor of thousand. Disregarding +//! this observation would cripple the processing power of the pallet since it cannot straighten out +//! this discrepancy at runtime. Conceptually, the implementation is packing as many messages into a +//! single bounded vec, as actually fit into the bounds. This reduces the wasted PoV. +//! +//! **Page Data Layout** +//! +//! A Page contains a heap which holds all its messages. The heap is built by concatenating +//! `(ItemHeader, Message)` pairs. The [`ItemHeader`] contains the length of the message which is +//! needed for retrieving it. This layout allows for constant access time of the next message and +//! linear access time for any message in the page. The header must remain minimal to reduce its PoV +//! impact. +//! +//! **Weight Metering** +//! +//! The pallet utilizes the [`sp_weights::WeightMeter`] to manually track its consumption to always +//! stay within the required limit. This implies that the message processor hook can calculate the +//! weight of a message without executing it. This restricts the possible use-cases but is necessary +//! since the pallet runs in `on_initialize` which has a hard weight limit. The weight meter is used +//! in a way that `can_accrue` and `check_accrue` are always used to check the remaining weight of +//! an operation before committing to it. The process of exiting due to insufficient weight is +//! termed "bailing". +//! +//! # Scenario: Message enqueuing +//! +//! A message `m` is enqueued for origin `o` into queue `Q[o]` through +//! [`frame_support::traits::EnqueueMessage::enqueue_message`]`(m, o)`. +//! +//! First the queue is either loaded if it exists or otherwise created with empty default values. +//! The message is then inserted to the queue by appended it into its last `Page` or by creating a +//! new `Page` just for `m` if it does not fit in there. The number of messages in the `Book` is +//! incremented. +//! +//! `Q[o]` is now *ready* which will eventually result in `m` being processed. +//! +//! # Scenario: Message processing +//! +//! The pallet runs each block in `on_initialize` or when being manually called through +//! [`frame_support::traits::ServiceQueues::service_queues`]. +//! +//! First it tries to "rotate" the `ReadyRing` by one through advancing the `ServiceHead` to the +//! next *ready* queue. It then starts to service this queue by servicing as many pages of it as +//! possible. Servicing a page means to execute as many message of it as possible. Each executed +//! message is marked as *processed* if the [`Config::MessageProcessor`] return Ok. An event +//! [`Event::Processed`] is emitted afterwards. It is possible that the weight limit of the pallet +//! will never allow a specific message to be executed. In this case it remains as unprocessed and +//! is skipped. This process stops if either there are no more messages in the queue or the +//! remaining weight became insufficient to service this queue. If there is enough weight it tries +//! to advance to the next *ready* queue and service it. This continues until there are no more +//! queues on which it can make progress or not enough weight to check that. +//! +//! # Scenario: Overweight execution +//! +//! A permanently over-weight message which was skipped by the message processing will never be +//! executed automatically through `on_initialize` nor by calling +//! [`frame_support::traits::ServiceQueues::service_queues`]. +//! +//! Manual intervention in the form of +//! [`frame_support::traits::ServiceQueues::execute_overweight`] is necessary. Overweight messages +//! emit an [`Event::OverweightEnqueued`] event which can be used to extract the arguments for +//! manual execution. This only works on permanently overweight messages. There is no guarantee that +//! this will work since the message could be part of a stale page and be reaped before execution +//! commences. +//! +//! # Terminology +//! +//! - `Message`: A blob of data into which the pallet has no introspection, defined as +//! [`BoundedSlice>`]. The message length is limited by [`MaxMessageLenOf`] +//! which is calculated from [`Config::HeapSize`] and [`ItemHeader::max_encoded_len()`]. +//! - `MessageOrigin`: A generic *origin* of a message, defined as [`MessageOriginOf`]. The +//! requirements for it are kept minimal to remain as generic as possible. The type is defined in +//! [`frame_support::traits::ProcessMessage::Origin`]. +//! - `Page`: An array of `Message`s, see [`Page`]. Can never be empty. +//! - `Book`: A list of `Page`s, see [`BookState`]. Can be empty. +//! - `Queue`: A `Book` together with an `MessageOrigin` which can be part of the `ReadyRing`. Can +//! be empty. +//! - `ReadyRing`: A double-linked list which contains all *ready* `Queue`s. It chains together the +//! queues via their `ready_neighbours` fields. A `Queue` is *ready* if it contains at least one +//! `Message` which can be processed. Can be empty. +//! - `ServiceHead`: A pointer into the `ReadyRing` to the next `Queue` to be serviced. +//! - (`un`)`processed`: A message is marked as *processed* after it was executed by the pallet. A +//! message which was either: not yet executed or could not be executed remains as `unprocessed` +//! which is the default state for a message after being enqueued. +//! - `knitting`/`unknitting`: The means of adding or removing a `Queue` from the `ReadyRing`. +//! - `MEL`: The Max Encoded Length of a type, see [`codec::MaxEncodedLen`]. +//! +//! # Properties +//! +//! **Liveness - Enqueueing** +//! +//! It is always possible to enqueue any message for any `MessageOrigin`. +//! +//! **Liveness - Processing** +//! +//! `on_initialize` always respects its finite weight-limit. +//! +//! **Progress - Enqueueing** +//! +//! An enqueued message immediately becomes *unprocessed* and thereby eligible for execution. +//! +//! **Progress - Processing** +//! +//! The pallet will execute at least one unprocessed message per block, if there is any. Ensuring +//! this property needs careful consideration of the concrete weights, since it is possible that the +//! weight limit of `on_initialize` never allows for the execution of even one message; trivially if +//! the limit is set to zero. `integrity_test` can be used to ensure that this property holds. +//! +//! **Fairness - Enqueuing** +//! +//! Enqueueing a message for a specific `MessageOrigin` does not influence the ability to enqueue a +//! message for the same of any other `MessageOrigin`; guaranteed by **Liveness - Enqueueing**. +//! +//! **Fairness - Processing** +//! +//! The average amount of weight available for message processing is the same for each queue if the +//! number of queues is constant. Creating a new queue must therefore be, possibly economically, +//! expensive. Currently this is archived by having one queue per para-chain/thread, which keeps the +//! number of queues within `O(n)` and should be "good enough". + +#![cfg_attr(not(feature = "std"), no_std)] + +mod benchmarking; +mod integration_test; +mod mock; +pub mod mock_helpers; +mod tests; +pub mod weights; + +use codec::{Codec, Decode, Encode, MaxEncodedLen}; +use frame_support::{ + defensive, + pallet_prelude::*, + traits::{ + DefensiveTruncateFrom, EnqueueMessage, ExecuteOverweightError, Footprint, ProcessMessage, + ProcessMessageError, ServiceQueues, + }, + BoundedSlice, CloneNoBound, DefaultNoBound, +}; +use frame_system::pallet_prelude::*; +pub use pallet::*; +use scale_info::TypeInfo; +use sp_arithmetic::traits::{BaseArithmetic, Unsigned}; +use sp_runtime::{ + traits::{Hash, One, Zero}, + SaturatedConversion, Saturating, +}; +use sp_std::{fmt::Debug, ops::Deref, prelude::*, vec}; +use sp_weights::WeightMeter; +pub use weights::WeightInfo; + +/// Type for identifying a page. +type PageIndex = u32; + +/// Data encoded and prefixed to the encoded `MessageItem`. +#[derive(Encode, Decode, PartialEq, MaxEncodedLen, Debug)] +pub struct ItemHeader { + /// The length of this item, not including the size of this header. The next item of the page + /// follows immediately after the payload of this item. + payload_len: Size, + /// Whether this item has been processed. + is_processed: bool, +} + +/// A page of messages. Pages always contain at least one item. +#[derive( + CloneNoBound, Encode, Decode, RuntimeDebugNoBound, DefaultNoBound, TypeInfo, MaxEncodedLen, +)] +#[scale_info(skip_type_params(HeapSize))] +#[codec(mel_bound(Size: MaxEncodedLen))] +pub struct Page + Debug + Clone + Default, HeapSize: Get> { + /// Messages remaining to be processed; this includes overweight messages which have been + /// skipped. + remaining: Size, + /// The size of all remaining messages to be processed. + /// + /// Includes overweight messages outside of the `first` to `last` window. + remaining_size: Size, + /// The number of items before the `first` item in this page. + first_index: Size, + /// The heap-offset of the header of the first message item in this page which is ready for + /// processing. + first: Size, + /// The heap-offset of the header of the last message item in this page. + last: Size, + /// The heap. If `self.offset == self.heap.len()` then the page is empty and should be deleted. + heap: BoundedVec>, +} + +impl< + Size: BaseArithmetic + Unsigned + Copy + Into + Codec + MaxEncodedLen + Debug + Default, + HeapSize: Get, + > Page +{ + /// Create a [`Page`] from one unprocessed message. + fn from_message(message: BoundedSlice>) -> Self { + let payload_len = message.len(); + let data_len = ItemHeader::::max_encoded_len().saturating_add(payload_len); + let payload_len = payload_len.saturated_into(); + let header = ItemHeader:: { payload_len, is_processed: false }; + + let mut heap = Vec::with_capacity(data_len); + header.using_encoded(|h| heap.extend_from_slice(h)); + heap.extend_from_slice(message.deref()); + + Page { + remaining: One::one(), + remaining_size: payload_len, + first_index: Zero::zero(), + first: Zero::zero(), + last: Zero::zero(), + heap: BoundedVec::defensive_truncate_from(heap), + } + } + + /// Try to append one message to a page. + fn try_append_message( + &mut self, + message: BoundedSlice>, + ) -> Result<(), ()> { + let pos = self.heap.len(); + let payload_len = message.len(); + let data_len = ItemHeader::::max_encoded_len().saturating_add(payload_len); + let payload_len = payload_len.saturated_into(); + let header = ItemHeader:: { payload_len, is_processed: false }; + let heap_size: u32 = HeapSize::get().into(); + if (heap_size as usize).saturating_sub(self.heap.len()) < data_len { + // Can't fit. + return Err(()) + } + + let mut heap = sp_std::mem::take(&mut self.heap).into_inner(); + header.using_encoded(|h| heap.extend_from_slice(h)); + heap.extend_from_slice(message.deref()); + self.heap = BoundedVec::defensive_truncate_from(heap); + self.last = pos.saturated_into(); + self.remaining.saturating_inc(); + self.remaining_size.saturating_accrue(payload_len); + Ok(()) + } + + /// Returns the first message in the page without removing it. + /// + /// SAFETY: Does not panic even on corrupted storage. + fn peek_first(&self) -> Option>> { + if self.first > self.last { + return None + } + let f = (self.first.into() as usize).min(self.heap.len()); + let mut item_slice = &self.heap[f..]; + if let Ok(h) = ItemHeader::::decode(&mut item_slice) { + let payload_len = h.payload_len.into() as usize; + if payload_len <= item_slice.len() { + // impossible to truncate since is sliced up from `self.heap: BoundedVec` + return Some(BoundedSlice::defensive_truncate_from(&item_slice[..payload_len])) + } + } + defensive!("message-queue: heap corruption"); + None + } + + /// Point `first` at the next message, marking the first as processed if `is_processed` is true. + fn skip_first(&mut self, is_processed: bool) { + let f = (self.first.into() as usize).min(self.heap.len()); + if let Ok(mut h) = ItemHeader::decode(&mut &self.heap[f..]) { + if is_processed && !h.is_processed { + h.is_processed = true; + h.using_encoded(|d| self.heap[f..f + d.len()].copy_from_slice(d)); + self.remaining.saturating_dec(); + self.remaining_size.saturating_reduce(h.payload_len); + } + self.first + .saturating_accrue(ItemHeader::::max_encoded_len().saturated_into()); + self.first.saturating_accrue(h.payload_len); + self.first_index.saturating_inc(); + } + } + + /// Return the message with index `index` in the form of `(position, processed, message)`. + fn peek_index(&self, index: usize) -> Option<(usize, bool, &[u8])> { + let mut pos = 0; + let mut item_slice = &self.heap[..]; + let header_len: usize = ItemHeader::::max_encoded_len().saturated_into(); + for _ in 0..index { + let h = ItemHeader::::decode(&mut item_slice).ok()?; + let item_len = h.payload_len.into() as usize; + if item_slice.len() < item_len { + return None + } + item_slice = &item_slice[item_len..]; + pos.saturating_accrue(header_len.saturating_add(item_len)); + } + let h = ItemHeader::::decode(&mut item_slice).ok()?; + if item_slice.len() < h.payload_len.into() as usize { + return None + } + item_slice = &item_slice[..h.payload_len.into() as usize]; + Some((pos, h.is_processed, item_slice)) + } + + /// Set the `is_processed` flag for the item at `pos` to be `true` if not already and decrement + /// the `remaining` counter of the page. + /// + /// Does nothing if no [`ItemHeader`] could be decoded at the given position. + fn note_processed_at_pos(&mut self, pos: usize) { + if let Ok(mut h) = ItemHeader::::decode(&mut &self.heap[pos..]) { + if !h.is_processed { + h.is_processed = true; + h.using_encoded(|d| self.heap[pos..pos + d.len()].copy_from_slice(d)); + self.remaining.saturating_dec(); + self.remaining_size.saturating_reduce(h.payload_len); + } + } + } + + /// Returns whether the page is *complete* which means that no messages remain. + fn is_complete(&self) -> bool { + self.remaining.is_zero() + } +} + +/// A single link in the double-linked Ready Ring list. +#[derive(Clone, Encode, Decode, MaxEncodedLen, TypeInfo, RuntimeDebug, PartialEq)] +pub struct Neighbours { + /// The previous queue. + prev: MessageOrigin, + /// The next queue. + next: MessageOrigin, +} + +/// The state of a queue as represented by a book of its pages. +/// +/// Each queue has exactly one book which holds all of its pages. All pages of a book combined +/// contain all of the messages of its queue; hence the name *Book*. +/// Books can be chained together in a double-linked fashion through their `ready_neighbours` field. +#[derive(Clone, Encode, Decode, MaxEncodedLen, TypeInfo, RuntimeDebug)] +pub struct BookState { + /// The first page with some items to be processed in it. If this is `>= end`, then there are + /// no pages with items to be processing in them. + begin: PageIndex, + /// One more than the last page with some items to be processed in it. + end: PageIndex, + /// The number of pages stored at present. + /// + /// This might be larger than `end-begin`, because we keep pages with unprocessed overweight + /// messages outside of the end/begin window. + count: PageIndex, + /// If this book has any ready pages, then this will be `Some` with the previous and next + /// neighbours. This wraps around. + ready_neighbours: Option>, + /// The number of unprocessed messages stored at present. + message_count: u64, + /// The total size of all unprocessed messages stored at present. + size: u64, +} + +impl Default for BookState { + fn default() -> Self { + Self { begin: 0, end: 0, count: 0, ready_neighbours: None, message_count: 0, size: 0 } + } +} + +/// Handler code for when the items in a queue change. +pub trait OnQueueChanged { + /// Note that the queue `id` now has `item_count` items in it, taking up `items_size` bytes. + fn on_queue_changed(id: Id, items_count: u64, items_size: u64); +} + +impl OnQueueChanged for () { + fn on_queue_changed(_: Id, _: u64, _: u64) {} +} + +#[frame_support::pallet] +pub mod pallet { + use super::*; + + #[pallet::pallet] + #[pallet::generate_store(pub(super) trait Store)] + pub struct Pallet(_); + + /// The module configuration trait. + #[pallet::config] + pub trait Config: frame_system::Config { + /// The overarching event type. + type RuntimeEvent: From> + IsType<::RuntimeEvent>; + + /// Weight information for extrinsics in this pallet. + type WeightInfo: WeightInfo; + + /// Processor for a message. + /// + /// Must be set to [`mock_helpers::NoopMessageProcessor`] for benchmarking. + /// Other message processors that consumes exactly (1, 1) weight for any give message will + /// work as well. Otherwise the benchmarking will also measure the weight of the message + /// processor, which is not desired. + type MessageProcessor: ProcessMessage; + + /// Page/heap size type. + type Size: BaseArithmetic + + Unsigned + + Copy + + Into + + Member + + Encode + + Decode + + MaxEncodedLen + + TypeInfo + + Default; + + /// Code to be called when a message queue changes - either with items introduced or + /// removed. + type QueueChangeHandler: OnQueueChanged<::Origin>; + + /// The size of the page; this implies the maximum message size which can be sent. + /// + /// A good value depends on the expected message sizes, their weights, the weight that is + /// available for processing them and the maximal needed message size. The maximal message + /// size is slightly lower than this as defined by [`MaxMessageLenOf`]. + #[pallet::constant] + type HeapSize: Get; + + /// The maximum number of stale pages (i.e. of overweight messages) allowed before culling + /// can happen. Once there are more stale pages than this, then historical pages may be + /// dropped, even if they contain unprocessed overweight messages. + #[pallet::constant] + type MaxStale: Get; + + /// The amount of weight (if any) which should be provided to the message queue for + /// servicing enqueued items. + /// + /// This may be legitimately `None` in the case that you will call + /// `ServiceQueues::service_queues` manually. + #[pallet::constant] + type ServiceWeight: Get>; + } + + #[pallet::event] + #[pallet::generate_deposit(pub(super) fn deposit_event)] + pub enum Event { + /// Message discarded due to an inability to decode the item. Usually caused by state + /// corruption. + Discarded { hash: T::Hash }, + /// Message discarded due to an error in the `MessageProcessor` (usually a format error). + ProcessingFailed { hash: T::Hash, origin: MessageOriginOf, error: ProcessMessageError }, + /// Message is processed. + Processed { hash: T::Hash, origin: MessageOriginOf, weight_used: Weight, success: bool }, + /// Message placed in overweight queue. + OverweightEnqueued { + hash: T::Hash, + origin: MessageOriginOf, + page_index: PageIndex, + message_index: T::Size, + }, + /// This page was reaped. + PageReaped { origin: MessageOriginOf, index: PageIndex }, + } + + #[pallet::error] + pub enum Error { + /// Page is not reapable because it has items remaining to be processed and is not old + /// enough. + NotReapable, + /// Page to be reaped does not exist. + NoPage, + /// The referenced message could not be found. + NoMessage, + /// The message was already processed and cannot be processed again. + AlreadyProcessed, + /// The message is queued for future execution. + Queued, + /// There is temporarily not enough weight to continue servicing messages. + InsufficientWeight, + } + + /// The index of the first and last (non-empty) pages. + #[pallet::storage] + pub(super) type BookStateFor = + StorageMap<_, Twox64Concat, MessageOriginOf, BookState>, ValueQuery>; + + /// The origin at which we should begin servicing. + #[pallet::storage] + pub(super) type ServiceHead = StorageValue<_, MessageOriginOf, OptionQuery>; + + /// The map of page indices to pages. + #[pallet::storage] + pub(super) type Pages = StorageDoubleMap< + _, + Twox64Concat, + MessageOriginOf, + Twox64Concat, + PageIndex, + Page, + OptionQuery, + >; + + #[pallet::hooks] + impl Hooks> for Pallet { + fn on_initialize(_n: BlockNumberFor) -> Weight { + if let Some(weight_limit) = T::ServiceWeight::get() { + Self::service_queues(weight_limit) + } else { + Weight::zero() + } + } + + /// Check all assumptions about [`crate::Config`]. + fn integrity_test() { + assert!(!MaxMessageLenOf::::get().is_zero(), "HeapSize too low"); + } + } + + #[pallet::call] + impl Pallet { + /// Remove a page which has no more messages remaining to be processed or is stale. + #[pallet::weight(T::WeightInfo::reap_page())] + pub fn reap_page( + origin: OriginFor, + message_origin: MessageOriginOf, + page_index: PageIndex, + ) -> DispatchResult { + let _ = ensure_signed(origin)?; + Self::do_reap_page(&message_origin, page_index) + } + + /// Execute an overweight message. + /// + /// - `origin`: Must be `Signed`. + /// - `message_origin`: The origin from which the message to be executed arrived. + /// - `page`: The page in the queue in which the message to be executed is sitting. + /// - `index`: The index into the queue of the message to be executed. + /// - `weight_limit`: The maximum amount of weight allowed to be consumed in the execution + /// of the message. + /// + /// Benchmark complexity considerations: O(index + weight_limit). + #[pallet::weight( + T::WeightInfo::execute_overweight_page_updated().max( + T::WeightInfo::execute_overweight_page_removed()).saturating_add(*weight_limit) + )] + pub fn execute_overweight( + origin: OriginFor, + message_origin: MessageOriginOf, + page: PageIndex, + index: T::Size, + weight_limit: Weight, + ) -> DispatchResultWithPostInfo { + let _ = ensure_signed(origin)?; + let actual_weight = + Self::do_execute_overweight(message_origin, page, index, weight_limit)?; + Ok(Some(actual_weight).into()) + } + } +} + +/// The status of a page after trying to execute its next message. +#[derive(PartialEq, Debug)] +enum PageExecutionStatus { + /// The execution bailed because there was not enough weight remaining. + Bailed, + /// No more messages could be loaded. This does _not_ imply `page.is_complete()`. + /// + /// The reasons for this status are: + /// - The end of the page is reached but there could still be skipped messages. + /// - The storage is corrupted. + NoMore, +} + +/// The status after trying to execute the next item of a [`Page`]. +#[derive(PartialEq, Debug)] +enum ItemExecutionStatus { + /// The execution bailed because there was not enough weight remaining. + Bailed, + /// The item was not found. + NoItem, + /// Whether the execution of an item resulted in it being processed. + /// + /// One reason for `false` would be permanently overweight. + Executed(bool), +} + +/// The status of an attempt to process a message. +#[derive(PartialEq)] +enum MessageExecutionStatus { + /// There is not enough weight remaining at present. + InsufficientWeight, + /// There will never be enough weight. + Overweight, + /// The message was processed successfully. + Processed, + /// The message was processed and resulted in a permanent error. + Unprocessable, +} + +impl Pallet { + /// Knit `origin` into the ready ring right at the end. + /// + /// Return the two ready ring neighbours of `origin`. + fn ready_ring_knit(origin: &MessageOriginOf) -> Result>, ()> { + if let Some(head) = ServiceHead::::get() { + let mut head_book_state = BookStateFor::::get(&head); + let mut head_neighbours = head_book_state.ready_neighbours.take().ok_or(())?; + let tail = head_neighbours.prev; + head_neighbours.prev = origin.clone(); + head_book_state.ready_neighbours = Some(head_neighbours); + BookStateFor::::insert(&head, head_book_state); + + let mut tail_book_state = BookStateFor::::get(&tail); + let mut tail_neighbours = tail_book_state.ready_neighbours.take().ok_or(())?; + tail_neighbours.next = origin.clone(); + tail_book_state.ready_neighbours = Some(tail_neighbours); + BookStateFor::::insert(&tail, tail_book_state); + + Ok(Neighbours { next: head, prev: tail }) + } else { + ServiceHead::::put(origin); + Ok(Neighbours { next: origin.clone(), prev: origin.clone() }) + } + } + + fn ready_ring_unknit(origin: &MessageOriginOf, neighbours: Neighbours>) { + if origin == &neighbours.next { + debug_assert!( + origin == &neighbours.prev, + "unknitting from single item ring; outgoing must be only item" + ); + // Service queue empty. + ServiceHead::::kill(); + } else { + BookStateFor::::mutate(&neighbours.next, |book_state| { + if let Some(ref mut n) = book_state.ready_neighbours { + n.prev = neighbours.prev.clone() + } + }); + BookStateFor::::mutate(&neighbours.prev, |book_state| { + if let Some(ref mut n) = book_state.ready_neighbours { + n.next = neighbours.next.clone() + } + }); + if let Some(head) = ServiceHead::::get() { + if &head == origin { + ServiceHead::::put(neighbours.next); + } + } else { + defensive!("`ServiceHead` must be some if there was a ready queue"); + } + } + } + + /// Tries to bump the current `ServiceHead` to the next ready queue. + /// + /// Returns the current head if it got be bumped and `None` otherwise. + fn bump_service_head(weight: &mut WeightMeter) -> Option> { + if !weight.check_accrue(T::WeightInfo::bump_service_head()) { + return None + } + + if let Some(head) = ServiceHead::::get() { + let mut head_book_state = BookStateFor::::get(&head); + if let Some(head_neighbours) = head_book_state.ready_neighbours.take() { + ServiceHead::::put(&head_neighbours.next); + Some(head) + } else { + None + } + } else { + None + } + } + + fn do_enqueue_message( + origin: &MessageOriginOf, + message: BoundedSlice>, + ) { + let mut book_state = BookStateFor::::get(origin); + book_state.message_count.saturating_inc(); + book_state + .size + // This should be payload size, but here the payload *is* the message. + .saturating_accrue(message.len() as u64); + + if book_state.end > book_state.begin { + debug_assert!(book_state.ready_neighbours.is_some(), "Must be in ready ring if ready"); + // Already have a page in progress - attempt to append. + let last = book_state.end - 1; + let mut page = match Pages::::get(origin, last) { + Some(p) => p, + None => { + defensive!("Corruption: referenced page doesn't exist."); + return + }, + }; + if page.try_append_message::(message).is_ok() { + Pages::::insert(origin, last, &page); + BookStateFor::::insert(origin, book_state); + return + } + } else { + debug_assert!( + book_state.ready_neighbours.is_none(), + "Must not be in ready ring if not ready" + ); + // insert into ready queue. + match Self::ready_ring_knit(origin) { + Ok(neighbours) => book_state.ready_neighbours = Some(neighbours), + Err(()) => { + defensive!("Ring state invalid when knitting"); + }, + } + } + // No room on the page or no page - link in a new page. + book_state.end.saturating_inc(); + book_state.count.saturating_inc(); + let page = Page::from_message::(message); + Pages::::insert(origin, book_state.end - 1, page); + // NOTE: `T::QueueChangeHandler` is called by the caller. + BookStateFor::::insert(origin, book_state); + } + + /// Try to execute a single message that was marked as overweight. + /// + /// The `weight_limit` is the weight that can be consumed to execute the message. The base + /// weight of the function it self must be measured by the caller. + pub fn do_execute_overweight( + origin: MessageOriginOf, + page_index: PageIndex, + index: T::Size, + weight_limit: Weight, + ) -> Result> { + let mut book_state = BookStateFor::::get(&origin); + let mut page = Pages::::get(&origin, page_index).ok_or(Error::::NoPage)?; + let (pos, is_processed, payload) = + page.peek_index(index.into() as usize).ok_or(Error::::NoMessage)?; + let payload_len = payload.len() as u64; + ensure!( + page_index < book_state.begin || + (page_index == book_state.begin && pos < page.first.into() as usize), + Error::::Queued + ); + ensure!(!is_processed, Error::::AlreadyProcessed); + use MessageExecutionStatus::*; + let mut weight_counter = WeightMeter::from_limit(weight_limit); + match Self::process_message_payload( + origin.clone(), + page_index, + index, + payload, + &mut weight_counter, + Weight::MAX, + // ^^^ We never recognise it as permanently overweight, since that would result in an + // additional overweight event being deposited. + ) { + Overweight | InsufficientWeight => Err(Error::::InsufficientWeight), + Unprocessable | Processed => { + page.note_processed_at_pos(pos); + book_state.message_count.saturating_dec(); + book_state.size.saturating_reduce(payload_len); + let page_weight = if page.remaining.is_zero() { + debug_assert!( + page.remaining_size.is_zero(), + "no messages remaining; no space taken; qed" + ); + Pages::::remove(&origin, page_index); + debug_assert!(book_state.count >= 1, "page exists, so book must have pages"); + book_state.count.saturating_dec(); + T::WeightInfo::execute_overweight_page_removed() + // no need to consider .first or ready ring since processing an overweight page + // would not alter that state. + } else { + Pages::::insert(&origin, page_index, page); + T::WeightInfo::execute_overweight_page_updated() + }; + BookStateFor::::insert(&origin, &book_state); + T::QueueChangeHandler::on_queue_changed( + origin, + book_state.message_count, + book_state.size, + ); + Ok(weight_counter.consumed.saturating_add(page_weight)) + }, + } + } + + /// Remove a stale page or one which has no more messages remaining to be processed. + fn do_reap_page(origin: &MessageOriginOf, page_index: PageIndex) -> DispatchResult { + let mut book_state = BookStateFor::::get(origin); + // definitely not reapable if the page's index is no less than the `begin`ning of ready + // pages. + ensure!(page_index < book_state.begin, Error::::NotReapable); + + let page = Pages::::get(origin, page_index).ok_or(Error::::NoPage)?; + + // definitely reapable if the page has no messages in it. + let reapable = page.remaining.is_zero(); + + // also reapable if the page index has dropped below our watermark. + let cullable = || { + let total_pages = book_state.count; + let ready_pages = book_state.end.saturating_sub(book_state.begin).min(total_pages); + + // The number of stale pages - i.e. pages which contain unprocessed overweight messages. + // We would prefer to keep these around but will restrict how far into history they can + // extend if we notice that there's too many of them. + // + // We don't know *where* in history these pages are so we use a dynamic formula which + // reduces the historical time horizon as the stale pages pile up and increases it as + // they reduce. + let stale_pages = total_pages - ready_pages; + + // The maximum number of stale pages (i.e. of overweight messages) allowed before + // culling can happen at all. Once there are more stale pages than this, then historical + // pages may be dropped, even if they contain unprocessed overweight messages. + let max_stale = T::MaxStale::get(); + + // The amount beyond the maximum which are being used. If it's not beyond the maximum + // then we exit now since no culling is needed. + let overflow = match stale_pages.checked_sub(max_stale + 1) { + Some(x) => x + 1, + None => return false, + }; + + // The special formula which tells us how deep into index-history we will pages. As + // the overflow is greater (and thus the need to drop items from storage is more urgent) + // this is reduced, allowing a greater range of pages to be culled. + // With a minimum `overflow` (`1`), this returns `max_stale ** 2`, indicating we only + // cull beyond that number of indices deep into history. + // At this overflow increases, our depth reduces down to a limit of `max_stale`. We + // never want to reduce below this since this will certainly allow enough pages to be + // culled in order to bring `overflow` back to zero. + let backlog = (max_stale * max_stale / overflow).max(max_stale); + + let watermark = book_state.begin.saturating_sub(backlog); + page_index < watermark + }; + ensure!(reapable || cullable(), Error::::NotReapable); + + Pages::::remove(origin, page_index); + debug_assert!(book_state.count > 0, "reaping a page implies there are pages"); + book_state.count.saturating_dec(); + book_state.message_count.saturating_reduce(page.remaining.into() as u64); + book_state.size.saturating_reduce(page.remaining_size.into() as u64); + BookStateFor::::insert(origin, &book_state); + T::QueueChangeHandler::on_queue_changed( + origin.clone(), + book_state.message_count, + book_state.size, + ); + Self::deposit_event(Event::PageReaped { origin: origin.clone(), index: page_index }); + + Ok(()) + } + + /// Execute any messages remaining to be processed in the queue of `origin`, using up to + /// `weight_limit` to do so. Any messages which would take more than `overweight_limit` to + /// execute are deemed overweight and ignored. + fn service_queue( + origin: MessageOriginOf, + weight: &mut WeightMeter, + overweight_limit: Weight, + ) -> (bool, Option>) { + if !weight.check_accrue( + T::WeightInfo::service_queue_base().saturating_add(T::WeightInfo::ready_ring_unknit()), + ) { + return (false, None) + } + + let mut book_state = BookStateFor::::get(&origin); + let mut total_processed = 0; + + while book_state.end > book_state.begin { + let (processed, status) = + Self::service_page(&origin, &mut book_state, weight, overweight_limit); + total_processed.saturating_accrue(processed); + match status { + // Store the page progress and do not go to the next one. + PageExecutionStatus::Bailed => break, + // Go to the next page if this one is at the end. + PageExecutionStatus::NoMore => (), + }; + book_state.begin.saturating_inc(); + } + let next_ready = book_state.ready_neighbours.as_ref().map(|x| x.next.clone()); + if book_state.begin >= book_state.end && total_processed > 0 { + // No longer ready - unknit. + if let Some(neighbours) = book_state.ready_neighbours.take() { + Self::ready_ring_unknit(&origin, neighbours); + } else { + defensive!("Freshly processed queue must have been ready"); + } + } + BookStateFor::::insert(&origin, &book_state); + if total_processed > 0 { + T::QueueChangeHandler::on_queue_changed( + origin, + book_state.message_count, + book_state.size, + ); + } + (total_processed > 0, next_ready) + } + + /// Service as many messages of a page as possible. + /// + /// Returns how many messages were processed and the page's status. + fn service_page( + origin: &MessageOriginOf, + book_state: &mut BookStateOf, + weight: &mut WeightMeter, + overweight_limit: Weight, + ) -> (u32, PageExecutionStatus) { + use PageExecutionStatus::*; + if !weight.check_accrue( + T::WeightInfo::service_page_base_completion() + .max(T::WeightInfo::service_page_base_no_completion()), + ) { + return (0, Bailed) + } + + let page_index = book_state.begin; + let mut page = match Pages::::get(origin, page_index) { + Some(p) => p, + None => { + defensive!("message-queue: referenced page not found"); + return (0, NoMore) + }, + }; + + let mut total_processed = 0; + + // Execute as many messages as possible. + let status = loop { + use ItemExecutionStatus::*; + match Self::service_page_item( + origin, + page_index, + book_state, + &mut page, + weight, + overweight_limit, + ) { + Bailed => break PageExecutionStatus::Bailed, + NoItem => break PageExecutionStatus::NoMore, + // Keep going as long as we make progress... + Executed(true) => total_processed.saturating_inc(), + Executed(false) => (), + } + }; + + if page.is_complete() { + debug_assert!(status != Bailed, "we never bail if a page became complete"); + Pages::::remove(origin, page_index); + debug_assert!(book_state.count > 0, "completing a page implies there are pages"); + book_state.count.saturating_dec(); + } else { + Pages::::insert(origin, page_index, page); + } + (total_processed, status) + } + + /// Execute the next message of a page. + pub(crate) fn service_page_item( + origin: &MessageOriginOf, + page_index: PageIndex, + book_state: &mut BookStateOf, + page: &mut PageOf, + weight: &mut WeightMeter, + overweight_limit: Weight, + ) -> ItemExecutionStatus { + // This ugly pre-checking is needed for the invariant + // "we never bail if a page became complete". + if page.is_complete() { + return ItemExecutionStatus::NoItem + } + if !weight.check_accrue(T::WeightInfo::service_page_item()) { + return ItemExecutionStatus::Bailed + } + + let payload = &match page.peek_first() { + Some(m) => m, + None => return ItemExecutionStatus::NoItem, + }[..]; + + use MessageExecutionStatus::*; + let is_processed = match Self::process_message_payload( + origin.clone(), + page_index, + page.first_index, + payload.deref(), + weight, + overweight_limit, + ) { + InsufficientWeight => return ItemExecutionStatus::Bailed, + Processed | Unprocessable => true, + Overweight => false, + }; + + if is_processed { + book_state.message_count.saturating_dec(); + book_state.size.saturating_reduce(payload.len() as u64); + } + page.skip_first(is_processed); + ItemExecutionStatus::Executed(is_processed) + } + + /// Print the pages in each queue and the messages in each page. + /// + /// Processed messages are prefixed with a `*` and the current `begin`ning page with a `>`. + /// + /// # Example output + /// + /// ```text + /// queue Here: + /// page 0: [] + /// > page 1: [] + /// page 2: ["\0weight=4", "\0c", ] + /// page 3: ["\0bigbig 1", ] + /// page 4: ["\0bigbig 2", ] + /// page 5: ["\0bigbig 3", ] + /// ``` + #[cfg(feature = "std")] + pub fn debug_info() -> String { + let mut info = String::new(); + for (origin, book_state) in BookStateFor::::iter() { + let mut queue = format!("queue {:?}:\n", &origin); + let mut pages = Pages::::iter_prefix(&origin).collect::>(); + pages.sort_by(|(a, _), (b, _)| a.cmp(b)); + for (page_index, mut page) in pages.into_iter() { + let page_info = if book_state.begin == page_index { ">" } else { " " }; + let mut page_info = format!( + "{} page {} ({:?} first, {:?} last, {:?} remain): [ ", + page_info, page_index, page.first, page.last, page.remaining + ); + for i in 0..u32::MAX { + if let Some((_, processed, message)) = + page.peek_index(i.try_into().expect("std-only code")) + { + let msg = String::from_utf8_lossy(message.deref()); + if processed { + page_info.push('*'); + } + page_info.push_str(&format!("{:?}, ", msg)); + page.skip_first(true); + } else { + break + } + } + page_info.push_str("]\n"); + queue.push_str(&page_info); + } + info.push_str(&queue); + } + info + } + + /// Process a single message. + /// + /// The base weight of this function needs to be accounted for by the caller. `weight` is the + /// remaining weight to process the message. `overweight_limit` is the maximum weight that a + /// message can ever consume. Messages above this limit are marked as permanently overweight. + fn process_message_payload( + origin: MessageOriginOf, + page_index: PageIndex, + message_index: T::Size, + message: &[u8], + weight: &mut WeightMeter, + overweight_limit: Weight, + ) -> MessageExecutionStatus { + let hash = T::Hashing::hash(message); + use ProcessMessageError::Overweight; + match T::MessageProcessor::process_message(message, origin.clone(), weight.remaining()) { + Err(Overweight(w)) if w.any_gt(overweight_limit) => { + // Permanently overweight. + Self::deposit_event(Event::::OverweightEnqueued { + hash, + origin, + page_index, + message_index, + }); + MessageExecutionStatus::Overweight + }, + Err(Overweight(_)) => { + // Temporarily overweight - save progress and stop processing this + // queue. + MessageExecutionStatus::InsufficientWeight + }, + Err(error) => { + // Permanent error - drop + Self::deposit_event(Event::::ProcessingFailed { hash, origin, error }); + MessageExecutionStatus::Unprocessable + }, + Ok((success, weight_used)) => { + // Success + weight.defensive_saturating_accrue(weight_used); + let event = Event::::Processed { hash, origin, weight_used, success }; + Self::deposit_event(event); + MessageExecutionStatus::Processed + }, + } + } +} + +/// Provides a [`sp_core::Get`] to access the `MEL` of a [`codec::MaxEncodedLen`] type. +pub struct MaxEncodedLenOf(sp_std::marker::PhantomData); +impl Get for MaxEncodedLenOf { + fn get() -> u32 { + T::max_encoded_len() as u32 + } +} + +/// Calculates the maximum message length and exposed it through the [`codec::MaxEncodedLen`] trait. +pub struct MaxMessageLen( + sp_std::marker::PhantomData<(Origin, Size, HeapSize)>, +); +impl, HeapSize: Get> Get + for MaxMessageLen +{ + fn get() -> u32 { + (HeapSize::get().into()).saturating_sub(ItemHeader::::max_encoded_len() as u32) + } +} + +/// The maximal message length. +pub type MaxMessageLenOf = + MaxMessageLen, ::Size, ::HeapSize>; +/// The maximal encoded origin length. +pub type MaxOriginLenOf = MaxEncodedLenOf>; +/// The `MessageOrigin` of this pallet. +pub type MessageOriginOf = <::MessageProcessor as ProcessMessage>::Origin; +/// The maximal heap size of a page. +pub type HeapSizeU32Of = IntoU32<::HeapSize, ::Size>; +/// The [`Page`] of this pallet. +pub type PageOf = Page<::Size, ::HeapSize>; +/// The [`BookState`] of this pallet. +pub type BookStateOf = BookState>; + +/// Converts a [`sp_core::Get`] with returns a type that can be cast into an `u32` into a `Get` +/// which returns an `u32`. +pub struct IntoU32(sp_std::marker::PhantomData<(T, O)>); +impl, O: Into> Get for IntoU32 { + fn get() -> u32 { + T::get().into() + } +} + +impl ServiceQueues for Pallet { + type OverweightMessageAddress = (MessageOriginOf, PageIndex, T::Size); + + fn service_queues(weight_limit: Weight) -> Weight { + // The maximum weight that processing a single message may take. + let overweight_limit = weight_limit; + let mut weight = WeightMeter::from_limit(weight_limit); + + let mut next = match Self::bump_service_head(&mut weight) { + Some(h) => h, + None => return weight.consumed, + }; + // The last queue that did not make any progress. + // The loop aborts as soon as it arrives at this queue again without making any progress + // on other queues in between. + let mut last_no_progress = None; + + loop { + let (progressed, n) = Self::service_queue(next.clone(), &mut weight, overweight_limit); + next = match n { + Some(n) => + if !progressed { + if last_no_progress == Some(n.clone()) { + break + } + if last_no_progress.is_none() { + last_no_progress = Some(next.clone()) + } + n + } else { + last_no_progress = None; + n + }, + None => break, + } + } + weight.consumed + } + + /// Execute a single overweight message. + /// + /// The weight limit must be enough for `execute_overweight` and the message execution itself. + fn execute_overweight( + weight_limit: Weight, + (message_origin, page, index): Self::OverweightMessageAddress, + ) -> Result { + let mut weight = WeightMeter::from_limit(weight_limit); + if !weight.check_accrue( + T::WeightInfo::execute_overweight_page_removed() + .max(T::WeightInfo::execute_overweight_page_updated()), + ) { + return Err(ExecuteOverweightError::InsufficientWeight) + } + + Pallet::::do_execute_overweight(message_origin, page, index, weight.remaining()).map_err( + |e| match e { + Error::::InsufficientWeight => ExecuteOverweightError::InsufficientWeight, + _ => ExecuteOverweightError::NotFound, + }, + ) + } +} + +impl EnqueueMessage> for Pallet { + type MaxMessageLen = + MaxMessageLen<::Origin, T::Size, T::HeapSize>; + + fn enqueue_message( + message: BoundedSlice, + origin: ::Origin, + ) { + Self::do_enqueue_message(&origin, message); + let book_state = BookStateFor::::get(&origin); + T::QueueChangeHandler::on_queue_changed(origin, book_state.message_count, book_state.size); + } + + fn enqueue_messages<'a>( + messages: impl Iterator>, + origin: ::Origin, + ) { + for message in messages { + Self::do_enqueue_message(&origin, message); + } + let book_state = BookStateFor::::get(&origin); + T::QueueChangeHandler::on_queue_changed(origin, book_state.message_count, book_state.size); + } + + fn sweep_queue(origin: MessageOriginOf) { + if !BookStateFor::::contains_key(&origin) { + return + } + let mut book_state = BookStateFor::::get(&origin); + book_state.begin = book_state.end; + if let Some(neighbours) = book_state.ready_neighbours.take() { + Self::ready_ring_unknit(&origin, neighbours); + } + BookStateFor::::insert(&origin, &book_state); + } + + fn footprint(origin: MessageOriginOf) -> Footprint { + let book_state = BookStateFor::::get(&origin); + Footprint { count: book_state.message_count, size: book_state.size } + } +} diff --git a/frame/message-queue/src/mock.rs b/frame/message-queue/src/mock.rs new file mode 100644 index 0000000000000..bb9942443e226 --- /dev/null +++ b/frame/message-queue/src/mock.rs @@ -0,0 +1,312 @@ +// Copyright 2022 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +#![cfg(test)] + +pub use super::mock_helpers::*; +use super::*; + +use crate as pallet_message_queue; +use frame_support::{ + parameter_types, + traits::{ConstU32, ConstU64}, +}; +use sp_core::H256; +use sp_runtime::{ + testing::Header, + traits::{BlakeTwo256, IdentityLookup}, +}; +use sp_std::collections::btree_map::BTreeMap; + +type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; +type Block = frame_system::mocking::MockBlock; + +frame_support::construct_runtime!( + pub enum Test where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: frame_system::{Pallet, Call, Config, Storage, Event}, + MessageQueue: pallet_message_queue::{Pallet, Call, Storage, Event}, + } +); +parameter_types! { + pub BlockWeights: frame_system::limits::BlockWeights = + frame_system::limits::BlockWeights::simple_max(frame_support::weights::Weight::from_ref_time(1024)); +} +impl frame_system::Config for Test { + type BaseCallFilter = frame_support::traits::Everything; + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); + type RuntimeOrigin = RuntimeOrigin; + type Index = u64; + type BlockNumber = u64; + type Hash = H256; + type RuntimeCall = RuntimeCall; + type Hashing = BlakeTwo256; + type AccountId = u64; + type Lookup = IdentityLookup; + type Header = Header; + type RuntimeEvent = RuntimeEvent; + type BlockHashCount = ConstU64<250>; + type Version = (); + type PalletInfo = PalletInfo; + type AccountData = (); + type OnNewAccount = (); + type OnKilledAccount = (); + type SystemWeightInfo = (); + type SS58Prefix = (); + type OnSetCode = (); + type MaxConsumers = ConstU32<16>; +} +parameter_types! { + pub const HeapSize: u32 = 24; + pub const MaxStale: u32 = 2; + pub const ServiceWeight: Option = Some(Weight::from_parts(10, 10)); +} +impl Config for Test { + type RuntimeEvent = RuntimeEvent; + type WeightInfo = MockedWeightInfo; + type MessageProcessor = RecordingMessageProcessor; + type Size = u32; + type QueueChangeHandler = RecordingQueueChangeHandler; + type HeapSize = HeapSize; + type MaxStale = MaxStale; + type ServiceWeight = ServiceWeight; +} + +/// Mocked `WeightInfo` impl with allows to set the weight per call. +pub struct MockedWeightInfo; + +parameter_types! { + /// Storage for `MockedWeightInfo`, do not use directly. + pub static WeightForCall: BTreeMap = Default::default(); +} + +/// Set the return value for a function from the `WeightInfo` trait. +impl MockedWeightInfo { + /// Set the weight of a specific weight function. + pub fn set_weight(call_name: &str, weight: Weight) { + let mut calls = WeightForCall::get(); + calls.insert(call_name.into(), weight); + WeightForCall::set(calls); + } +} + +impl crate::weights::WeightInfo for MockedWeightInfo { + fn reap_page() -> Weight { + WeightForCall::get().get("reap_page").copied().unwrap_or_default() + } + fn execute_overweight_page_updated() -> Weight { + WeightForCall::get() + .get("execute_overweight_page_updated") + .copied() + .unwrap_or_default() + } + fn execute_overweight_page_removed() -> Weight { + WeightForCall::get() + .get("execute_overweight_page_removed") + .copied() + .unwrap_or_default() + } + fn service_page_base_completion() -> Weight { + WeightForCall::get() + .get("service_page_base_completion") + .copied() + .unwrap_or_default() + } + fn service_page_base_no_completion() -> Weight { + WeightForCall::get() + .get("service_page_base_no_completion") + .copied() + .unwrap_or_default() + } + fn service_queue_base() -> Weight { + WeightForCall::get().get("service_queue_base").copied().unwrap_or_default() + } + fn bump_service_head() -> Weight { + WeightForCall::get().get("bump_service_head").copied().unwrap_or_default() + } + fn service_page_item() -> Weight { + WeightForCall::get().get("service_page_item").copied().unwrap_or_default() + } + fn ready_ring_knit() -> Weight { + WeightForCall::get().get("ready_ring_knit").copied().unwrap_or_default() + } + fn ready_ring_unknit() -> Weight { + WeightForCall::get().get("ready_ring_unknit").copied().unwrap_or_default() + } +} + +parameter_types! { + pub static MessagesProcessed: Vec<(Vec, MessageOrigin)> = vec![]; +} + +/// A message processor which records all processed messages into [`MessagesProcessed`]. +pub struct RecordingMessageProcessor; +impl ProcessMessage for RecordingMessageProcessor { + /// The transport from where a message originates. + type Origin = MessageOrigin; + + /// Process the given message, using no more than `weight_limit` in weight to do so. + /// + /// Consumes exactly `n` weight of all components if it starts `weight=n` and `1` otherwise. + /// Errors if given the `weight_limit` is insufficient to process the message or if the message + /// is `badformat`, `corrupt` or `unsupported` with the respective error. + fn process_message( + message: &[u8], + origin: Self::Origin, + weight_limit: Weight, + ) -> Result<(bool, Weight), ProcessMessageError> { + processing_message(message)?; + + let weight = if message.starts_with(&b"weight="[..]) { + let mut w: u64 = 0; + for &c in &message[7..] { + if (b'0'..=b'9').contains(&c) { + w = w * 10 + (c - b'0') as u64; + } else { + break + } + } + w + } else { + 1 + }; + let weight = Weight::from_parts(weight, weight); + + if weight.all_lte(weight_limit) { + let mut m = MessagesProcessed::get(); + m.push((message.to_vec(), origin)); + MessagesProcessed::set(m); + Ok((true, weight)) + } else { + Err(ProcessMessageError::Overweight(weight)) + } + } +} + +/// Processed a mocked message. Messages that end with `badformat`, `corrupt` or `unsupported` will +/// fail with the respective error. +fn processing_message(msg: &[u8]) -> Result<(), ProcessMessageError> { + let msg = String::from_utf8_lossy(msg); + if msg.ends_with("badformat") { + Err(ProcessMessageError::BadFormat) + } else if msg.ends_with("corrupt") { + Err(ProcessMessageError::Corrupt) + } else if msg.ends_with("unsupported") { + Err(ProcessMessageError::Unsupported) + } else { + Ok(()) + } +} + +parameter_types! { + pub static NumMessagesProcessed: usize = 0; + pub static NumMessagesErrored: usize = 0; +} + +/// Similar to [`RecordingMessageProcessor`] but only counts the number of messages processed and +/// does always consume one weight per message. +/// +/// The [`RecordingMessageProcessor`] is a bit too slow for the integration tests. +pub struct CountingMessageProcessor; +impl ProcessMessage for CountingMessageProcessor { + type Origin = MessageOrigin; + + fn process_message( + message: &[u8], + _origin: Self::Origin, + weight_limit: Weight, + ) -> Result<(bool, Weight), ProcessMessageError> { + if let Err(e) = processing_message(message) { + NumMessagesErrored::set(NumMessagesErrored::get() + 1); + return Err(e) + } + let weight = Weight::from_parts(1, 1); + + if weight.all_lte(weight_limit) { + NumMessagesProcessed::set(NumMessagesProcessed::get() + 1); + Ok((true, weight)) + } else { + Err(ProcessMessageError::Overweight(weight)) + } + } +} + +parameter_types! { + /// Storage for `RecordingQueueChangeHandler`, do not use directly. + pub static QueueChanges: Vec<(MessageOrigin, u64, u64)> = vec![]; +} + +/// Records all queue changes into [`QueueChanges`]. +pub struct RecordingQueueChangeHandler; +impl OnQueueChanged for RecordingQueueChangeHandler { + fn on_queue_changed(id: MessageOrigin, items_count: u64, items_size: u64) { + QueueChanges::mutate(|cs| cs.push((id, items_count, items_size))); + } +} + +/// Create new test externalities. +/// +/// Is generic since it is used by the unit test, integration tests and benchmarks. +pub fn new_test_ext() -> sp_io::TestExternalities +where + ::BlockNumber: From, +{ + sp_tracing::try_init_simple(); + WeightForCall::take(); + QueueChanges::take(); + NumMessagesErrored::take(); + let t = frame_system::GenesisConfig::default().build_storage::().unwrap(); + let mut ext = sp_io::TestExternalities::new(t); + ext.execute_with(|| frame_system::Pallet::::set_block_number(1.into())); + ext +} + +/// Set the weight of a specific weight function. +pub fn set_weight(name: &str, w: Weight) { + MockedWeightInfo::set_weight::(name, w); +} + +/// Assert that exactly these pages are present. Assumes `Here` origin. +pub fn assert_pages(indices: &[u32]) { + assert_eq!(Pages::::iter().count(), indices.len()); + for i in indices { + assert!(Pages::::contains_key(MessageOrigin::Here, i)); + } +} + +/// Build a ring with three queues: `Here`, `There` and `Everywhere(0)`. +pub fn build_triple_ring() { + use MessageOrigin::*; + build_ring::(&[Here, There, Everywhere(0)]) +} + +/// Shim to get rid of the annoying `::` everywhere. +pub fn assert_ring(queues: &[MessageOrigin]) { + super::mock_helpers::assert_ring::(queues); +} + +pub fn knit(queue: &MessageOrigin) { + super::mock_helpers::knit::(queue); +} + +pub fn unknit(queue: &MessageOrigin) { + super::mock_helpers::unknit::(queue); +} diff --git a/frame/message-queue/src/mock_helpers.rs b/frame/message-queue/src/mock_helpers.rs new file mode 100644 index 0000000000000..39d961d8fc558 --- /dev/null +++ b/frame/message-queue/src/mock_helpers.rs @@ -0,0 +1,185 @@ +// Copyright 2022 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Std setup helpers for testing and benchmarking. +//! +//! Cannot be put into mock.rs since benchmarks require no-std and mock.rs is std. + +use crate::*; +use frame_support::traits::Defensive; + +/// Converts `Self` into a `Weight` by using `Self` for all components. +pub trait IntoWeight { + fn into_weight(self) -> Weight; +} + +impl IntoWeight for u64 { + fn into_weight(self) -> Weight { + Weight::from_parts(self, self) + } +} + +/// Mocked message origin for testing. +#[derive(Copy, Clone, Eq, PartialEq, Encode, Decode, MaxEncodedLen, TypeInfo, Debug)] +pub enum MessageOrigin { + Here, + There, + Everywhere(u32), +} + +impl From for MessageOrigin { + fn from(i: u32) -> Self { + Self::Everywhere(i) + } +} + +/// Processes any message and consumes (1, 1) weight per message. +pub struct NoopMessageProcessor; +impl ProcessMessage for NoopMessageProcessor { + type Origin = MessageOrigin; + + fn process_message( + _message: &[u8], + _origin: Self::Origin, + weight_limit: Weight, + ) -> Result<(bool, Weight), ProcessMessageError> { + let weight = Weight::from_parts(1, 1); + + if weight.all_lte(weight_limit) { + Ok((true, weight)) + } else { + Err(ProcessMessageError::Overweight(weight)) + } + } +} + +/// Create a message from the given data. +pub fn msg>(x: &'static str) -> BoundedSlice { + BoundedSlice::defensive_truncate_from(x.as_bytes()) +} + +pub fn vmsg(x: &'static str) -> Vec { + x.as_bytes().to_vec() +} + +/// Create a page from a single message. +pub fn page(msg: &[u8]) -> PageOf { + PageOf::::from_message::(msg.try_into().unwrap()) +} + +pub fn single_page_book() -> BookStateOf { + BookState { begin: 0, end: 1, count: 1, ready_neighbours: None, message_count: 0, size: 0 } +} + +pub fn empty_book() -> BookStateOf { + BookState { begin: 0, end: 1, count: 1, ready_neighbours: None, message_count: 0, size: 0 } +} + +/// Returns a full page of messages with their index as payload and the number of messages. +pub fn full_page() -> (PageOf, usize) { + let mut msgs = 0; + let mut page = PageOf::::default(); + for i in 0..u32::MAX { + let r = i.using_encoded(|d| page.try_append_message::(d.try_into().unwrap())); + if r.is_err() { + break + } else { + msgs += 1; + } + } + assert!(msgs > 0, "page must hold at least one message"); + (page, msgs) +} + +/// Returns a page filled with empty messages and the number of messages. +pub fn book_for(page: &PageOf) -> BookStateOf { + BookState { + count: 1, + begin: 0, + end: 1, + ready_neighbours: None, + message_count: page.remaining.into() as u64, + size: page.remaining_size.into() as u64, + } +} + +/// Assert the last event that was emitted. +#[cfg(any(feature = "std", feature = "runtime-benchmarks", test))] +pub fn assert_last_event(generic_event: ::RuntimeEvent) { + assert!( + !frame_system::Pallet::::block_number().is_zero(), + "The genesis block has n o events" + ); + frame_system::Pallet::::assert_last_event(generic_event.into()); +} + +/// Provide a setup for `bump_service_head`. +pub fn setup_bump_service_head( + current: <::MessageProcessor as ProcessMessage>::Origin, + next: <::MessageProcessor as ProcessMessage>::Origin, +) { + let mut book = single_page_book::(); + book.ready_neighbours = Some(Neighbours::> { prev: next.clone(), next }); + ServiceHead::::put(¤t); + BookStateFor::::insert(¤t, &book); +} + +/// Knit a queue into the ready-ring and write it back to storage. +pub fn knit(o: &<::MessageProcessor as ProcessMessage>::Origin) { + let mut b = BookStateFor::::get(o); + b.ready_neighbours = crate::Pallet::::ready_ring_knit(o).ok().defensive(); + BookStateFor::::insert(o, b); +} + +/// Unknit a queue into the ready-ring and write it back to storage. +pub fn unknit(o: &<::MessageProcessor as ProcessMessage>::Origin) { + let mut b = BookStateFor::::get(o); + crate::Pallet::::ready_ring_unknit(o, b.ready_neighbours.unwrap()); + b.ready_neighbours = None; + BookStateFor::::insert(o, b); +} + +/// Build a ring with three queues: `Here`, `There` and `Everywhere(0)`. +pub fn build_ring( + queues: &[<::MessageProcessor as ProcessMessage>::Origin], +) { + for queue in queues { + BookStateFor::::insert(queue, empty_book::()); + } + for queue in queues { + knit::(queue); + } + assert_ring::(queues); +} + +/// Check that the Ready Ring consists of `queues` in that exact order. +/// +/// Also check that all backlinks are valid and that the first element is the service head. +pub fn assert_ring( + queues: &[<::MessageProcessor as ProcessMessage>::Origin], +) { + for (i, origin) in queues.iter().enumerate() { + let book = BookStateFor::::get(origin); + assert_eq!( + book.ready_neighbours, + Some(Neighbours { + prev: queues[(i + queues.len() - 1) % queues.len()].clone(), + next: queues[(i + 1) % queues.len()].clone(), + }) + ); + } + assert_eq!(ServiceHead::::get(), queues.first().cloned()); +} diff --git a/frame/message-queue/src/tests.rs b/frame/message-queue/src/tests.rs new file mode 100644 index 0000000000000..103fb690ddba7 --- /dev/null +++ b/frame/message-queue/src/tests.rs @@ -0,0 +1,1092 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Tests for Message Queue Pallet. + +#![cfg(test)] + +use crate::{mock::*, *}; + +use frame_support::{assert_noop, assert_ok, assert_storage_noop, StorageNoopGuard}; +use rand::{rngs::StdRng, Rng, SeedableRng}; + +#[test] +fn mocked_weight_works() { + new_test_ext::().execute_with(|| { + assert!(::WeightInfo::service_queue_base().is_zero()); + }); + new_test_ext::().execute_with(|| { + set_weight("service_queue_base", Weight::MAX); + assert_eq!(::WeightInfo::service_queue_base(), Weight::MAX); + }); + // The externalities reset it. + new_test_ext::().execute_with(|| { + assert!(::WeightInfo::service_queue_base().is_zero()); + }); +} + +#[test] +fn enqueue_within_one_page_works() { + new_test_ext::().execute_with(|| { + use MessageOrigin::*; + MessageQueue::enqueue_message(msg("a"), Here); + MessageQueue::enqueue_message(msg("b"), Here); + MessageQueue::enqueue_message(msg("c"), Here); + assert_eq!(MessageQueue::service_queues(2.into_weight()), 2.into_weight()); + assert_eq!(MessagesProcessed::take(), vec![(b"a".to_vec(), Here), (b"b".to_vec(), Here)]); + + assert_eq!(MessageQueue::service_queues(2.into_weight()), 1.into_weight()); + assert_eq!(MessagesProcessed::take(), vec![(b"c".to_vec(), Here)]); + + assert_eq!(MessageQueue::service_queues(2.into_weight()), 0.into_weight()); + assert!(MessagesProcessed::get().is_empty()); + + MessageQueue::enqueue_messages([msg("a"), msg("b"), msg("c")].into_iter(), There); + + assert_eq!(MessageQueue::service_queues(2.into_weight()), 2.into_weight()); + assert_eq!( + MessagesProcessed::take(), + vec![(b"a".to_vec(), There), (b"b".to_vec(), There),] + ); + + MessageQueue::enqueue_message(msg("d"), Everywhere(1)); + + assert_eq!(MessageQueue::service_queues(2.into_weight()), 2.into_weight()); + assert_eq!(MessageQueue::service_queues(2.into_weight()), 0.into_weight()); + assert_eq!( + MessagesProcessed::take(), + vec![(b"c".to_vec(), There), (b"d".to_vec(), Everywhere(1))] + ); + }); +} + +#[test] +fn queue_priority_retains() { + new_test_ext::().execute_with(|| { + use MessageOrigin::*; + assert_ring(&[]); + MessageQueue::enqueue_message(msg("a"), Everywhere(1)); + assert_ring(&[Everywhere(1)]); + MessageQueue::enqueue_message(msg("b"), Everywhere(2)); + assert_ring(&[Everywhere(1), Everywhere(2)]); + MessageQueue::enqueue_message(msg("c"), Everywhere(3)); + assert_ring(&[Everywhere(1), Everywhere(2), Everywhere(3)]); + MessageQueue::enqueue_message(msg("d"), Everywhere(2)); + assert_ring(&[Everywhere(1), Everywhere(2), Everywhere(3)]); + // service head is 1, it will process a, leaving service head at 2. it also processes b but + // doees not empty queue 2, so service head will end at 2. + assert_eq!(MessageQueue::service_queues(2.into_weight()), 2.into_weight()); + assert_eq!( + MessagesProcessed::take(), + vec![(vmsg("a"), Everywhere(1)), (vmsg("b"), Everywhere(2)),] + ); + assert_ring(&[Everywhere(2), Everywhere(3)]); + // service head is 2, so will process d first, then c. + assert_eq!(MessageQueue::service_queues(2.into_weight()), 2.into_weight()); + assert_eq!( + MessagesProcessed::get(), + vec![(vmsg("d"), Everywhere(2)), (vmsg("c"), Everywhere(3)),] + ); + assert_ring(&[]); + }); +} + +#[test] +fn queue_priority_reset_once_serviced() { + new_test_ext::().execute_with(|| { + use MessageOrigin::*; + MessageQueue::enqueue_message(msg("a"), Everywhere(1)); + MessageQueue::enqueue_message(msg("b"), Everywhere(2)); + MessageQueue::enqueue_message(msg("c"), Everywhere(3)); + // service head is 1, it will process a, leaving service head at 2. it also processes b and + // empties queue 2, so service head will end at 3. + assert_eq!(MessageQueue::service_queues(2.into_weight()), 2.into_weight()); + MessageQueue::enqueue_message(msg("d"), Everywhere(2)); + // service head is 3, so will process c first, then d. + assert_eq!(MessageQueue::service_queues(2.into_weight()), 2.into_weight()); + + assert_eq!( + MessagesProcessed::get(), + vec![ + (vmsg("a"), Everywhere(1)), + (vmsg("b"), Everywhere(2)), + (vmsg("c"), Everywhere(3)), + (vmsg("d"), Everywhere(2)), + ] + ); + }); +} + +#[test] +fn service_queues_basic_works() { + use MessageOrigin::*; + new_test_ext::().execute_with(|| { + MessageQueue::enqueue_messages(vec![msg("a"), msg("ab"), msg("abc")].into_iter(), Here); + MessageQueue::enqueue_messages(vec![msg("x"), msg("xy"), msg("xyz")].into_iter(), There); + assert_eq!(QueueChanges::take(), vec![(Here, 3, 6), (There, 3, 6)]); + + // Service one message from `Here`. + assert_eq!(MessageQueue::service_queues(1.into_weight()), 1.into_weight()); + assert_eq!(MessagesProcessed::take(), vec![(vmsg("a"), Here)]); + assert_eq!(QueueChanges::take(), vec![(Here, 2, 5)]); + + // Service one message from `There`. + ServiceHead::::set(There.into()); + assert_eq!(MessageQueue::service_queues(1.into_weight()), 1.into_weight()); + assert_eq!(MessagesProcessed::take(), vec![(vmsg("x"), There)]); + assert_eq!(QueueChanges::take(), vec![(There, 2, 5)]); + + // Service the remaining from `Here`. + ServiceHead::::set(Here.into()); + assert_eq!(MessageQueue::service_queues(2.into_weight()), 2.into_weight()); + assert_eq!(MessagesProcessed::take(), vec![(vmsg("ab"), Here), (vmsg("abc"), Here)]); + assert_eq!(QueueChanges::take(), vec![(Here, 0, 0)]); + + // Service all remaining messages. + assert_eq!(MessageQueue::service_queues(Weight::MAX), 2.into_weight()); + assert_eq!(MessagesProcessed::take(), vec![(vmsg("xy"), There), (vmsg("xyz"), There)]); + assert_eq!(QueueChanges::take(), vec![(There, 0, 0)]); + }); +} + +#[test] +fn service_queues_failing_messages_works() { + use MessageOrigin::*; + new_test_ext::().execute_with(|| { + set_weight("service_page_item", 1.into_weight()); + MessageQueue::enqueue_message(msg("badformat"), Here); + MessageQueue::enqueue_message(msg("corrupt"), Here); + MessageQueue::enqueue_message(msg("unsupported"), Here); + // Starts with three pages. + assert_pages(&[0, 1, 2]); + + assert_eq!(MessageQueue::service_queues(1.into_weight()), 1.into_weight()); + assert_last_event::( + Event::ProcessingFailed { + hash: ::Hashing::hash(b"badformat"), + origin: MessageOrigin::Here, + error: ProcessMessageError::BadFormat, + } + .into(), + ); + assert_eq!(MessageQueue::service_queues(1.into_weight()), 1.into_weight()); + assert_last_event::( + Event::ProcessingFailed { + hash: ::Hashing::hash(b"corrupt"), + origin: MessageOrigin::Here, + error: ProcessMessageError::Corrupt, + } + .into(), + ); + assert_eq!(MessageQueue::service_queues(1.into_weight()), 1.into_weight()); + assert_last_event::( + Event::ProcessingFailed { + hash: ::Hashing::hash(b"unsupported"), + origin: MessageOrigin::Here, + error: ProcessMessageError::Unsupported, + } + .into(), + ); + // All pages removed. + assert_pages(&[]); + }); +} + +#[test] +fn reap_page_permanent_overweight_works() { + use MessageOrigin::*; + new_test_ext::().execute_with(|| { + // Create 10 pages more than the stale limit. + let n = (MaxStale::get() + 10) as usize; + for _ in 0..n { + MessageQueue::enqueue_message(msg("weight=2"), Here); + } + assert_eq!(Pages::::iter().count(), n); + assert_eq!(QueueChanges::take().len(), n); + // Mark all pages as stale since their message is permanently overweight. + MessageQueue::service_queues(1.into_weight()); + + // Check that we can reap everything below the watermark. + let max_stale = MaxStale::get(); + for i in 0..n as u32 { + let b = BookStateFor::::get(Here); + let stale_pages = n as u32 - i; + let overflow = stale_pages.saturating_sub(max_stale + 1) + 1; + let backlog = (max_stale * max_stale / overflow).max(max_stale); + let watermark = b.begin.saturating_sub(backlog); + + if i >= watermark { + break + } + assert_ok!(MessageQueue::do_reap_page(&Here, i)); + assert_eq!(QueueChanges::take(), vec![(Here, b.message_count - 1, b.size - 8)]); + } + + // Cannot reap any more pages. + for (o, i, _) in Pages::::iter() { + assert_noop!(MessageQueue::do_reap_page(&o, i), Error::::NotReapable); + assert!(QueueChanges::take().is_empty()); + } + }); +} + +#[test] +fn reaping_overweight_fails_properly() { + use MessageOrigin::*; + assert_eq!(MaxStale::get(), 2, "The stale limit is two"); + + new_test_ext::().execute_with(|| { + // page 0 + MessageQueue::enqueue_message(msg("weight=4"), Here); + MessageQueue::enqueue_message(msg("a"), Here); + // page 1 + MessageQueue::enqueue_message(msg("weight=4"), Here); + MessageQueue::enqueue_message(msg("b"), Here); + // page 2 + MessageQueue::enqueue_message(msg("weight=4"), Here); + MessageQueue::enqueue_message(msg("c"), Here); + // page 3 + MessageQueue::enqueue_message(msg("bigbig 1"), Here); + // page 4 + MessageQueue::enqueue_message(msg("bigbig 2"), Here); + // page 5 + MessageQueue::enqueue_message(msg("bigbig 3"), Here); + // Double-check that exactly these pages exist. + assert_pages(&[0, 1, 2, 3, 4, 5]); + + assert_eq!(MessageQueue::service_queues(2.into_weight()), 2.into_weight()); + assert_eq!(MessagesProcessed::take(), vec![(vmsg("a"), Here), (vmsg("b"), Here)]); + // 2 stale now. + + // Nothing reapable yet, because we haven't hit the stale limit. + for (o, i, _) in Pages::::iter() { + assert_noop!(MessageQueue::do_reap_page(&o, i), Error::::NotReapable); + } + assert_pages(&[0, 1, 2, 3, 4, 5]); + + assert_eq!(MessageQueue::service_queues(1.into_weight()), 1.into_weight()); + assert_eq!(MessagesProcessed::take(), vec![(vmsg("c"), Here)]); + // 3 stale now: can take something 4 pages in history. + + assert_eq!(MessageQueue::service_queues(1.into_weight()), 1.into_weight()); + assert_eq!(MessagesProcessed::take(), vec![(vmsg("bigbig 1"), Here)]); + + // Nothing reapable yet, because we haven't hit the stale limit. + for (o, i, _) in Pages::::iter() { + assert_noop!(MessageQueue::do_reap_page(&o, i), Error::::NotReapable); + } + assert_pages(&[0, 1, 2, 4, 5]); + + assert_eq!(MessageQueue::service_queues(1.into_weight()), 1.into_weight()); + assert_eq!(MessagesProcessed::take(), vec![(vmsg("bigbig 2"), Here)]); + assert_pages(&[0, 1, 2, 5]); + + // First is now reapable as it is too far behind the first ready page (5). + assert_ok!(MessageQueue::do_reap_page(&Here, 0)); + // Others not reapable yet, because we haven't hit the stale limit. + for (o, i, _) in Pages::::iter() { + assert_noop!(MessageQueue::do_reap_page(&o, i), Error::::NotReapable); + } + assert_pages(&[1, 2, 5]); + + assert_eq!(MessageQueue::service_queues(1.into_weight()), 1.into_weight()); + assert_eq!(MessagesProcessed::take(), vec![(vmsg("bigbig 3"), Here)]); + + assert_noop!(MessageQueue::do_reap_page(&Here, 0), Error::::NoPage); + assert_noop!(MessageQueue::do_reap_page(&Here, 3), Error::::NoPage); + assert_noop!(MessageQueue::do_reap_page(&Here, 4), Error::::NoPage); + // Still not reapable, since the number of stale pages is only 2. + for (o, i, _) in Pages::::iter() { + assert_noop!(MessageQueue::do_reap_page(&o, i), Error::::NotReapable); + } + }); +} + +#[test] +fn service_queue_bails() { + // Not enough weight for `service_queue_base`. + new_test_ext::().execute_with(|| { + set_weight("service_queue_base", 2.into_weight()); + let mut meter = WeightMeter::from_limit(1.into_weight()); + + assert_storage_noop!(MessageQueue::service_queue(0u32.into(), &mut meter, Weight::MAX)); + assert!(meter.consumed.is_zero()); + }); + // Not enough weight for `ready_ring_unknit`. + new_test_ext::().execute_with(|| { + set_weight("ready_ring_unknit", 2.into_weight()); + let mut meter = WeightMeter::from_limit(1.into_weight()); + + assert_storage_noop!(MessageQueue::service_queue(0u32.into(), &mut meter, Weight::MAX)); + assert!(meter.consumed.is_zero()); + }); + // Not enough weight for `service_queue_base` and `ready_ring_unknit`. + new_test_ext::().execute_with(|| { + set_weight("service_queue_base", 2.into_weight()); + set_weight("ready_ring_unknit", 2.into_weight()); + + let mut meter = WeightMeter::from_limit(3.into_weight()); + assert_storage_noop!(MessageQueue::service_queue(0.into(), &mut meter, Weight::MAX)); + assert!(meter.consumed.is_zero()); + }); +} + +#[test] +fn service_page_works() { + use super::integration_test::Test; // Run with larger page size. + use MessageOrigin::*; + use PageExecutionStatus::*; + new_test_ext::().execute_with(|| { + set_weight("service_page_base_completion", 2.into_weight()); + set_weight("service_page_item", 3.into_weight()); + + let (page, mut msgs) = full_page::(); + assert!(msgs >= 10, "pre-condition: need at least 10 msgs per page"); + let mut book = book_for::(&page); + Pages::::insert(Here, 0, page); + + // Call it a few times each with a random weight limit. + let mut rng = rand::rngs::StdRng::seed_from_u64(42); + while msgs > 0 { + let process = rng.gen_range(0..=msgs); + msgs -= process; + + // Enough weight to process `process` messages. + let mut meter = WeightMeter::from_limit(((2 + (3 + 1) * process) as u64).into_weight()); + System::reset_events(); + let (processed, status) = + crate::Pallet::::service_page(&Here, &mut book, &mut meter, Weight::MAX); + assert_eq!(processed as usize, process); + assert_eq!(NumMessagesProcessed::take(), process); + assert_eq!(System::events().len(), process); + if msgs == 0 { + assert_eq!(status, NoMore); + } else { + assert_eq!(status, Bailed); + } + } + assert!(!Pages::::contains_key(Here, 0), "The page got removed"); + }); +} + +// `service_page` does nothing when called with an insufficient weight limit. +#[test] +fn service_page_bails() { + // Not enough weight for `service_page_base_completion`. + new_test_ext::().execute_with(|| { + set_weight("service_page_base_completion", 2.into_weight()); + let mut meter = WeightMeter::from_limit(1.into_weight()); + + let (page, _) = full_page::(); + let mut book = book_for::(&page); + Pages::::insert(MessageOrigin::Here, 0, page); + + assert_storage_noop!(MessageQueue::service_page( + &MessageOrigin::Here, + &mut book, + &mut meter, + Weight::MAX + )); + assert!(meter.consumed.is_zero()); + }); + // Not enough weight for `service_page_base_no_completion`. + new_test_ext::().execute_with(|| { + set_weight("service_page_base_no_completion", 2.into_weight()); + let mut meter = WeightMeter::from_limit(1.into_weight()); + + let (page, _) = full_page::(); + let mut book = book_for::(&page); + Pages::::insert(MessageOrigin::Here, 0, page); + + assert_storage_noop!(MessageQueue::service_page( + &MessageOrigin::Here, + &mut book, + &mut meter, + Weight::MAX + )); + assert!(meter.consumed.is_zero()); + }); +} + +#[test] +fn service_page_item_bails() { + new_test_ext::().execute_with(|| { + let _guard = StorageNoopGuard::default(); + let (mut page, _) = full_page::(); + let mut weight = WeightMeter::from_limit(10.into_weight()); + let overweight_limit = 10.into_weight(); + set_weight("service_page_item", 11.into_weight()); + + assert_eq!( + MessageQueue::service_page_item( + &MessageOrigin::Here, + 0, + &mut book_for::(&page), + &mut page, + &mut weight, + overweight_limit, + ), + ItemExecutionStatus::Bailed + ); + }); +} + +#[test] +fn bump_service_head_works() { + use MessageOrigin::*; + new_test_ext::().execute_with(|| { + // Create a ready ring with three queues. + BookStateFor::::insert(Here, empty_book::()); + knit(&Here); + BookStateFor::::insert(There, empty_book::()); + knit(&There); + BookStateFor::::insert(Everywhere(0), empty_book::()); + knit(&Everywhere(0)); + + // Bump 99 times. + for i in 0..99 { + let current = MessageQueue::bump_service_head(&mut WeightMeter::max_limit()).unwrap(); + assert_eq!(current, [Here, There, Everywhere(0)][i % 3]); + } + + // The ready ring is intact and the service head is still `Here`. + assert_ring(&[Here, There, Everywhere(0)]); + }); +} + +/// `bump_service_head` does nothing when called with an insufficient weight limit. +#[test] +fn bump_service_head_bails() { + new_test_ext::().execute_with(|| { + set_weight("bump_service_head", 2.into_weight()); + setup_bump_service_head::(0.into(), 10.into()); + + let _guard = StorageNoopGuard::default(); + let mut meter = WeightMeter::from_limit(1.into_weight()); + assert!(MessageQueue::bump_service_head(&mut meter).is_none()); + assert_eq!(meter.consumed, 0.into_weight()); + }); +} + +#[test] +fn bump_service_head_trivial_works() { + new_test_ext::().execute_with(|| { + set_weight("bump_service_head", 2.into_weight()); + let mut meter = WeightMeter::max_limit(); + + assert_eq!(MessageQueue::bump_service_head(&mut meter), None, "Cannot bump"); + assert_eq!(meter.consumed, 2.into_weight()); + + setup_bump_service_head::(0.into(), 1.into()); + + assert_eq!(MessageQueue::bump_service_head(&mut meter), Some(0.into())); + assert_eq!(ServiceHead::::get().unwrap(), 1.into(), "Bumped the head"); + assert_eq!(meter.consumed, 4.into_weight()); + + assert_eq!(MessageQueue::bump_service_head(&mut meter), None, "Cannot bump"); + assert_eq!(meter.consumed, 6.into_weight()); + }); +} + +#[test] +fn bump_service_head_no_head_noops() { + use MessageOrigin::*; + new_test_ext::().execute_with(|| { + // Create a ready ring with three queues. + BookStateFor::::insert(Here, empty_book::()); + knit(&Here); + BookStateFor::::insert(There, empty_book::()); + knit(&There); + BookStateFor::::insert(Everywhere(0), empty_book::()); + knit(&Everywhere(0)); + + // But remove the service head. + ServiceHead::::kill(); + + // Nothing happens. + assert_storage_noop!(MessageQueue::bump_service_head(&mut WeightMeter::max_limit())); + }); +} + +#[test] +fn service_page_item_consumes_correct_weight() { + new_test_ext::().execute_with(|| { + let mut page = page::(b"weight=3"); + let mut weight = WeightMeter::from_limit(10.into_weight()); + let overweight_limit = 0.into_weight(); + set_weight("service_page_item", 2.into_weight()); + + assert_eq!( + MessageQueue::service_page_item( + &MessageOrigin::Here, + 0, + &mut book_for::(&page), + &mut page, + &mut weight, + overweight_limit + ), + ItemExecutionStatus::Executed(true) + ); + assert_eq!(weight.consumed, 5.into_weight()); + }); +} + +/// `service_page_item` skips a permanently `Overweight` message and marks it as `unprocessed`. +#[test] +fn service_page_item_skips_perm_overweight_message() { + new_test_ext::().execute_with(|| { + let mut page = page::(b"TooMuch"); + let mut weight = WeightMeter::from_limit(2.into_weight()); + let overweight_limit = 0.into_weight(); + set_weight("service_page_item", 2.into_weight()); + + assert_eq!( + crate::Pallet::::service_page_item( + &MessageOrigin::Here, + 0, + &mut book_for::(&page), + &mut page, + &mut weight, + overweight_limit + ), + ItemExecutionStatus::Executed(false) + ); + assert_eq!(weight.consumed, 2.into_weight()); + assert_last_event::( + Event::OverweightEnqueued { + hash: ::Hashing::hash(b"TooMuch"), + origin: MessageOrigin::Here, + message_index: 0, + page_index: 0, + } + .into(), + ); + + // Check that the message was skipped. + let (pos, processed, payload) = page.peek_index(0).unwrap(); + assert_eq!(pos, 0); + assert!(!processed); + assert_eq!(payload, b"TooMuch".encode()); + }); +} + +#[test] +fn peek_index_works() { + use super::integration_test::Test; // Run with larger page size. + new_test_ext::().execute_with(|| { + // Fill a page with messages. + let (mut page, msgs) = full_page::(); + let msg_enc_len = ItemHeader::<::Size>::max_encoded_len() + 4; + + for i in 0..msgs { + // Skip all even messages. + page.skip_first(i % 2 == 0); + // Peek each message and check that it is correct. + let (pos, processed, payload) = page.peek_index(i).unwrap(); + assert_eq!(pos, msg_enc_len * i); + assert_eq!(processed, i % 2 == 0); + // `full_page` uses the index as payload. + assert_eq!(payload, (i as u32).encode()); + } + }); +} + +#[test] +fn peek_first_and_skip_first_works() { + use super::integration_test::Test; // Run with larger page size. + new_test_ext::().execute_with(|| { + // Fill a page with messages. + let (mut page, msgs) = full_page::(); + + for i in 0..msgs { + let msg = page.peek_first().unwrap(); + // `full_page` uses the index as payload. + assert_eq!(msg.deref(), (i as u32).encode()); + page.skip_first(i % 2 == 0); // True of False should not matter here. + } + assert!(page.peek_first().is_none(), "Page must be at the end"); + + // Check that all messages were correctly marked as (un)processed. + for i in 0..msgs { + let (_, processed, _) = page.peek_index(i).unwrap(); + assert_eq!(processed, i % 2 == 0); + } + }); +} + +#[test] +fn note_processed_at_pos_works() { + use super::integration_test::Test; // Run with larger page size. + new_test_ext::().execute_with(|| { + let (mut page, msgs) = full_page::(); + + for i in 0..msgs { + let (pos, processed, _) = page.peek_index(i).unwrap(); + assert!(!processed); + assert_eq!(page.remaining as usize, msgs - i); + + page.note_processed_at_pos(pos); + + let (_, processed, _) = page.peek_index(i).unwrap(); + assert!(processed); + assert_eq!(page.remaining as usize, msgs - i - 1); + } + // `skip_first` still works fine. + for _ in 0..msgs { + page.peek_first().unwrap(); + page.skip_first(false); + } + assert!(page.peek_first().is_none()); + }); +} + +#[test] +fn note_processed_at_pos_idempotent() { + let (mut page, _) = full_page::(); + page.note_processed_at_pos(0); + + let original = page.clone(); + page.note_processed_at_pos(0); + assert_eq!(page.heap, original.heap); +} + +#[test] +fn is_complete_works() { + use super::integration_test::Test; // Run with larger page size. + new_test_ext::().execute_with(|| { + let (mut page, msgs) = full_page::(); + assert!(msgs > 3, "Boring"); + let msg_enc_len = ItemHeader::<::Size>::max_encoded_len() + 4; + + assert!(!page.is_complete()); + for i in 0..msgs { + if i % 2 == 0 { + page.skip_first(false); + } else { + page.note_processed_at_pos(msg_enc_len * i); + } + } + // Not complete since `skip_first` was called with `false`. + assert!(!page.is_complete()); + for i in 0..msgs { + if i % 2 == 0 { + assert!(!page.is_complete()); + let (pos, _, _) = page.peek_index(i).unwrap(); + page.note_processed_at_pos(pos); + } + } + assert!(page.is_complete()); + assert_eq!(page.remaining_size, 0); + // Each message is marked as processed. + for i in 0..msgs { + let (_, processed, _) = page.peek_index(i).unwrap(); + assert!(processed); + } + }); +} + +#[test] +fn page_from_message_basic_works() { + assert!(MaxMessageLenOf::::get() > 0, "pre-condition unmet"); + let mut msg: BoundedVec> = Default::default(); + msg.bounded_resize(MaxMessageLenOf::::get() as usize, 123); + + let page = PageOf::::from_message::(msg.as_bounded_slice()); + assert_eq!(page.remaining, 1); + assert_eq!(page.remaining_size as usize, msg.len()); + assert!(page.first_index == 0 && page.first == 0 && page.last == 0); + + // Verify the content of the heap. + let mut heap = Vec::::new(); + let header = + ItemHeader::<::Size> { payload_len: msg.len() as u32, is_processed: false }; + heap.extend(header.encode()); + heap.extend(msg.deref()); + assert_eq!(page.heap, heap); +} + +#[test] +fn page_try_append_message_basic_works() { + use super::integration_test::Test; // Run with larger page size. + + let mut page = PageOf::::default(); + let mut msgs = 0; + // Append as many 4-byte message as possible. + for i in 0..u32::MAX { + let r = i.using_encoded(|i| page.try_append_message::(i.try_into().unwrap())); + if r.is_err() { + break + } else { + msgs += 1; + } + } + let expected_msgs = (::HeapSize::get()) / + (ItemHeader::<::Size>::max_encoded_len() as u32 + 4); + assert_eq!(expected_msgs, msgs, "Wrong number of messages"); + assert_eq!(page.remaining, msgs); + assert_eq!(page.remaining_size, msgs * 4); + + // Verify that the heap content is correct. + let mut heap = Vec::::new(); + for i in 0..msgs { + let header = ItemHeader::<::Size> { payload_len: 4, is_processed: false }; + heap.extend(header.encode()); + heap.extend(i.encode()); + } + assert_eq!(page.heap, heap); +} + +#[test] +fn page_try_append_message_max_msg_len_works_works() { + use super::integration_test::Test; // Run with larger page size. + + // We start off with an empty page. + let mut page = PageOf::::default(); + // … and append a message with maximum possible length. + let msg = vec![123u8; MaxMessageLenOf::::get() as usize]; + // … which works. + page.try_append_message::(BoundedSlice::defensive_truncate_from(&msg)) + .unwrap(); + // Now we cannot append *anything* since the heap is full. + page.try_append_message::(BoundedSlice::defensive_truncate_from(&[])) + .unwrap_err(); + assert_eq!(page.heap.len(), ::HeapSize::get() as usize); +} + +#[test] +fn page_try_append_message_with_remaining_size_works_works() { + use super::integration_test::Test; // Run with larger page size. + let header_size = ItemHeader::<::Size>::max_encoded_len(); + + // We start off with an empty page. + let mut page = PageOf::::default(); + let mut remaining = ::HeapSize::get() as usize; + let mut msgs = Vec::new(); + let mut rng = StdRng::seed_from_u64(42); + // Now we keep appending messages with different lengths. + while remaining >= header_size { + let take = rng.gen_range(0..=(remaining - header_size)); + let msg = vec![123u8; take]; + page.try_append_message::(BoundedSlice::defensive_truncate_from(&msg)) + .unwrap(); + remaining -= take + header_size; + msgs.push(msg); + } + // Cannot even fit a single header in there now. + assert!(remaining < header_size); + assert_eq!(::HeapSize::get() as usize - page.heap.len(), remaining); + assert_eq!(page.remaining as usize, msgs.len()); + assert_eq!( + page.remaining_size as usize, + msgs.iter().fold(0, |mut a, m| { + a += m.len(); + a + }) + ); + // Verify the heap content. + let mut heap = Vec::new(); + for msg in msgs.into_iter() { + let header = ItemHeader::<::Size> { + payload_len: msg.len() as u32, + is_processed: false, + }; + heap.extend(header.encode()); + heap.extend(msg); + } + assert_eq!(page.heap, heap); +} + +// `Page::from_message` does not panic when called with the maximum message and origin lengths. +#[test] +fn page_from_message_max_len_works() { + let max_msg_len: usize = MaxMessageLenOf::::get() as usize; + + let page = PageOf::::from_message::(vec![1; max_msg_len][..].try_into().unwrap()); + + assert_eq!(page.remaining, 1); +} + +#[test] +fn sweep_queue_works() { + use MessageOrigin::*; + new_test_ext::().execute_with(|| { + build_triple_ring(); + + let book = BookStateFor::::get(Here); + assert!(book.begin != book.end); + // Removing the service head works + assert_eq!(ServiceHead::::get(), Some(Here)); + MessageQueue::sweep_queue(Here); + assert_ring(&[There, Everywhere(0)]); + // The book still exits, but has updated begin and end. + let book = BookStateFor::::get(Here); + assert_eq!(book.begin, book.end); + + // Removing something that is not the service head works. + assert!(ServiceHead::::get() != Some(Everywhere(0))); + MessageQueue::sweep_queue(Everywhere(0)); + assert_ring(&[There]); + // The book still exits, but has updated begin and end. + let book = BookStateFor::::get(Everywhere(0)); + assert_eq!(book.begin, book.end); + + MessageQueue::sweep_queue(There); + // The book still exits, but has updated begin and end. + let book = BookStateFor::::get(There); + assert_eq!(book.begin, book.end); + assert_ring(&[]); + + // Sweeping a queue never calls OnQueueChanged. + assert!(QueueChanges::take().is_empty()); + }) +} + +/// Test that `sweep_queue` also works if the ReadyRing wraps around. +#[test] +fn sweep_queue_wraps_works() { + use MessageOrigin::*; + new_test_ext::().execute_with(|| { + BookStateFor::::insert(Here, empty_book::()); + knit(&Here); + + MessageQueue::sweep_queue(Here); + let book = BookStateFor::::get(Here); + assert!(book.ready_neighbours.is_none()); + }); +} + +#[test] +fn sweep_queue_invalid_noops() { + use MessageOrigin::*; + new_test_ext::().execute_with(|| { + assert_storage_noop!(MessageQueue::sweep_queue(Here)); + }); +} + +#[test] +fn footprint_works() { + new_test_ext::().execute_with(|| { + let origin = MessageOrigin::Here; + let (page, msgs) = full_page::(); + let book = book_for::(&page); + BookStateFor::::insert(origin, book); + + let info = MessageQueue::footprint(origin); + assert_eq!(info.count as usize, msgs); + assert_eq!(info.size, page.remaining_size as u64); + + // Sweeping a queue never calls OnQueueChanged. + assert!(QueueChanges::take().is_empty()); + }) +} + +/// The footprint of an invalid queue is the default footprint. +#[test] +fn footprint_invalid_works() { + new_test_ext::().execute_with(|| { + let origin = MessageOrigin::Here; + assert_eq!(MessageQueue::footprint(origin), Default::default()); + }) +} + +/// The footprint of a swept queue is still correct. +#[test] +fn footprint_on_swept_works() { + use MessageOrigin::*; + new_test_ext::().execute_with(|| { + let mut book = empty_book::(); + book.message_count = 3; + book.size = 10; + BookStateFor::::insert(Here, &book); + knit(&Here); + + MessageQueue::sweep_queue(Here); + let fp = MessageQueue::footprint(Here); + assert_eq!(fp.count, 3); + assert_eq!(fp.size, 10); + }) +} + +#[test] +fn execute_overweight_works() { + new_test_ext::().execute_with(|| { + set_weight("bump_service_head", 1.into_weight()); + set_weight("service_queue_base", 1.into_weight()); + set_weight("service_page_base_completion", 1.into_weight()); + + // Enqueue a message + let origin = MessageOrigin::Here; + MessageQueue::enqueue_message(msg("weight=6"), origin); + // Load the current book + let book = BookStateFor::::get(origin); + assert_eq!(book.message_count, 1); + assert!(Pages::::contains_key(origin, 0)); + + // Mark the message as permanently overweight. + assert_eq!(MessageQueue::service_queues(4.into_weight()), 4.into_weight()); + assert_eq!(QueueChanges::take(), vec![(origin, 1, 8)]); + assert_last_event::( + Event::OverweightEnqueued { + hash: ::Hashing::hash(b"weight=6"), + origin: MessageOrigin::Here, + message_index: 0, + page_index: 0, + } + .into(), + ); + + // Now try to execute it with too few weight. + let consumed = + ::execute_overweight(5.into_weight(), (origin, 0, 0)); + assert_eq!(consumed, Err(ExecuteOverweightError::InsufficientWeight)); + + // Execute it with enough weight. + assert_eq!(Pages::::iter().count(), 1); + assert!(QueueChanges::take().is_empty()); + let consumed = + ::execute_overweight(7.into_weight(), (origin, 0, 0)) + .unwrap(); + assert_eq!(consumed, 6.into_weight()); + assert_eq!(QueueChanges::take(), vec![(origin, 0, 0)]); + // There is no message left in the book. + let book = BookStateFor::::get(origin); + assert_eq!(book.message_count, 0); + // And no more pages. + assert_eq!(Pages::::iter().count(), 0); + + // Doing it again with enough weight will error. + let consumed = + ::execute_overweight(70.into_weight(), (origin, 0, 0)); + assert_eq!(consumed, Err(ExecuteOverweightError::NotFound)); + assert!(QueueChanges::take().is_empty()); + assert!(!Pages::::contains_key(origin, 0), "Page is gone"); + }); +} + +/// Checks that (un)knitting the ready ring works with just one queue. +/// +/// This case is interesting since it wraps and a lot of `mutate` now operate on the same object. +#[test] +fn ready_ring_knit_basic_works() { + use MessageOrigin::*; + + new_test_ext::().execute_with(|| { + BookStateFor::::insert(Here, empty_book::()); + + for i in 0..10 { + if i % 2 == 0 { + knit(&Here); + assert_ring(&[Here]); + } else { + unknit(&Here); + assert_ring(&[]); + } + } + assert_ring(&[]); + }); +} + +#[test] +fn ready_ring_knit_and_unknit_works() { + use MessageOrigin::*; + + new_test_ext::().execute_with(|| { + // Place three queues into the storage. + BookStateFor::::insert(Here, empty_book::()); + BookStateFor::::insert(There, empty_book::()); + BookStateFor::::insert(Everywhere(0), empty_book::()); + + // Knit them into the ready ring. + assert_ring(&[]); + knit(&Here); + assert_ring(&[Here]); + knit(&There); + assert_ring(&[Here, There]); + knit(&Everywhere(0)); + assert_ring(&[Here, There, Everywhere(0)]); + + // Now unknit… + unknit(&Here); + assert_ring(&[There, Everywhere(0)]); + unknit(&There); + assert_ring(&[Everywhere(0)]); + unknit(&Everywhere(0)); + assert_ring(&[]); + }); +} + +#[test] +fn enqueue_message_works() { + use MessageOrigin::*; + let max_msg_per_page = ::HeapSize::get() as u64 / + (ItemHeader::<::Size>::max_encoded_len() as u64 + 1); + + new_test_ext::().execute_with(|| { + // Enqueue messages which should fill three pages. + let n = max_msg_per_page * 3; + for i in 1..=n { + MessageQueue::enqueue_message(msg("a"), Here); + assert_eq!(QueueChanges::take(), vec![(Here, i, i)], "OnQueueChanged not called"); + } + assert_eq!(Pages::::iter().count(), 3); + + // Enqueue one more onto page 4. + MessageQueue::enqueue_message(msg("abc"), Here); + assert_eq!(QueueChanges::take(), vec![(Here, n + 1, n + 3)]); + assert_eq!(Pages::::iter().count(), 4); + + // Check the state. + assert_eq!(BookStateFor::::iter().count(), 1); + let book = BookStateFor::::get(Here); + assert_eq!(book.message_count, n + 1); + assert_eq!(book.size, n + 3); + assert_eq!((book.begin, book.end), (0, 4)); + assert_eq!(book.count as usize, Pages::::iter().count()); + }); +} + +#[test] +fn enqueue_messages_works() { + use MessageOrigin::*; + let max_msg_per_page = ::HeapSize::get() as u64 / + (ItemHeader::<::Size>::max_encoded_len() as u64 + 1); + + new_test_ext::().execute_with(|| { + // Enqueue messages which should fill three pages. + let n = max_msg_per_page * 3; + let msgs = vec![msg("a"); n as usize]; + + // Now queue all messages at once. + MessageQueue::enqueue_messages(msgs.into_iter(), Here); + // The changed handler should only be called once. + assert_eq!(QueueChanges::take(), vec![(Here, n, n)], "OnQueueChanged not called"); + assert_eq!(Pages::::iter().count(), 3); + + // Enqueue one more onto page 4. + MessageQueue::enqueue_message(msg("abc"), Here); + assert_eq!(QueueChanges::take(), vec![(Here, n + 1, n + 3)]); + assert_eq!(Pages::::iter().count(), 4); + + // Check the state. + assert_eq!(BookStateFor::::iter().count(), 1); + let book = BookStateFor::::get(Here); + assert_eq!(book.message_count, n + 1); + assert_eq!(book.size, n + 3); + assert_eq!((book.begin, book.end), (0, 4)); + assert_eq!(book.count as usize, Pages::::iter().count()); + }); +} diff --git a/frame/message-queue/src/weights.rs b/frame/message-queue/src/weights.rs new file mode 100644 index 0000000000000..cd9268ffde224 --- /dev/null +++ b/frame/message-queue/src/weights.rs @@ -0,0 +1,216 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for pallet_message_queue +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2022-12-08, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! HOSTNAME: `bm3`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 + +// Executed Command: +// /home/benchbot/cargo_target_dir/production/substrate +// benchmark +// pallet +// --steps=50 +// --repeat=20 +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/var/lib/gitlab-runner/builds/zyw4fam_/0/parity/mirrors/substrate/.git/.artifacts/bench.json +// --pallet=pallet_message_queue +// --chain=dev +// --header=./HEADER-APACHE2 +// --output=./frame/message-queue/src/weights.rs +// --template=./.maintain/frame-weight-template.hbs + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] + +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use sp_std::marker::PhantomData; + +/// Weight functions needed for pallet_message_queue. +pub trait WeightInfo { + fn ready_ring_knit() -> Weight; + fn ready_ring_unknit() -> Weight; + fn service_queue_base() -> Weight; + fn service_page_base_completion() -> Weight; + fn service_page_base_no_completion() -> Weight; + fn service_page_item() -> Weight; + fn bump_service_head() -> Weight; + fn reap_page() -> Weight; + fn execute_overweight_page_removed() -> Weight; + fn execute_overweight_page_updated() -> Weight; +} + +/// Weights for pallet_message_queue using the Substrate node and recommended hardware. +pub struct SubstrateWeight(PhantomData); +impl WeightInfo for SubstrateWeight { + // Storage: MessageQueue ServiceHead (r:1 w:0) + // Storage: MessageQueue BookStateFor (r:2 w:2) + fn ready_ring_knit() -> Weight { + // Minimum execution time: 12_330 nanoseconds. + Weight::from_ref_time(12_711_000) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + // Storage: MessageQueue BookStateFor (r:2 w:2) + // Storage: MessageQueue ServiceHead (r:1 w:1) + fn ready_ring_unknit() -> Weight { + // Minimum execution time: 12_322 nanoseconds. + Weight::from_ref_time(12_560_000) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(3)) + } + // Storage: MessageQueue BookStateFor (r:1 w:1) + fn service_queue_base() -> Weight { + // Minimum execution time: 4_652 nanoseconds. + Weight::from_ref_time(4_848_000) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + // Storage: MessageQueue Pages (r:1 w:1) + fn service_page_base_completion() -> Weight { + // Minimum execution time: 7_115 nanoseconds. + Weight::from_ref_time(7_407_000) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + // Storage: MessageQueue Pages (r:1 w:1) + fn service_page_base_no_completion() -> Weight { + // Minimum execution time: 6_974 nanoseconds. + Weight::from_ref_time(7_200_000) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + fn service_page_item() -> Weight { + // Minimum execution time: 79_657 nanoseconds. + Weight::from_ref_time(80_050_000) + } + // Storage: MessageQueue ServiceHead (r:1 w:1) + // Storage: MessageQueue BookStateFor (r:1 w:0) + fn bump_service_head() -> Weight { + // Minimum execution time: 7_598 nanoseconds. + Weight::from_ref_time(8_118_000) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + // Storage: MessageQueue BookStateFor (r:1 w:1) + // Storage: MessageQueue Pages (r:1 w:1) + fn reap_page() -> Weight { + // Minimum execution time: 60_562 nanoseconds. + Weight::from_ref_time(61_430_000) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + // Storage: MessageQueue BookStateFor (r:1 w:1) + // Storage: MessageQueue Pages (r:1 w:1) + fn execute_overweight_page_removed() -> Weight { + // Minimum execution time: 74_582 nanoseconds. + Weight::from_ref_time(75_445_000) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + // Storage: MessageQueue BookStateFor (r:1 w:1) + // Storage: MessageQueue Pages (r:1 w:1) + fn execute_overweight_page_updated() -> Weight { + // Minimum execution time: 87_526 nanoseconds. + Weight::from_ref_time(88_055_000) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } +} + +// For backwards compatibility and tests +impl WeightInfo for () { + // Storage: MessageQueue ServiceHead (r:1 w:0) + // Storage: MessageQueue BookStateFor (r:2 w:2) + fn ready_ring_knit() -> Weight { + // Minimum execution time: 12_330 nanoseconds. + Weight::from_ref_time(12_711_000) + .saturating_add(RocksDbWeight::get().reads(3)) + .saturating_add(RocksDbWeight::get().writes(2)) + } + // Storage: MessageQueue BookStateFor (r:2 w:2) + // Storage: MessageQueue ServiceHead (r:1 w:1) + fn ready_ring_unknit() -> Weight { + // Minimum execution time: 12_322 nanoseconds. + Weight::from_ref_time(12_560_000) + .saturating_add(RocksDbWeight::get().reads(3)) + .saturating_add(RocksDbWeight::get().writes(3)) + } + // Storage: MessageQueue BookStateFor (r:1 w:1) + fn service_queue_base() -> Weight { + // Minimum execution time: 4_652 nanoseconds. + Weight::from_ref_time(4_848_000) + .saturating_add(RocksDbWeight::get().reads(1)) + .saturating_add(RocksDbWeight::get().writes(1)) + } + // Storage: MessageQueue Pages (r:1 w:1) + fn service_page_base_completion() -> Weight { + // Minimum execution time: 7_115 nanoseconds. + Weight::from_ref_time(7_407_000) + .saturating_add(RocksDbWeight::get().reads(1)) + .saturating_add(RocksDbWeight::get().writes(1)) + } + // Storage: MessageQueue Pages (r:1 w:1) + fn service_page_base_no_completion() -> Weight { + // Minimum execution time: 6_974 nanoseconds. + Weight::from_ref_time(7_200_000) + .saturating_add(RocksDbWeight::get().reads(1)) + .saturating_add(RocksDbWeight::get().writes(1)) + } + fn service_page_item() -> Weight { + // Minimum execution time: 79_657 nanoseconds. + Weight::from_ref_time(80_050_000) + } + // Storage: MessageQueue ServiceHead (r:1 w:1) + // Storage: MessageQueue BookStateFor (r:1 w:0) + fn bump_service_head() -> Weight { + // Minimum execution time: 7_598 nanoseconds. + Weight::from_ref_time(8_118_000) + .saturating_add(RocksDbWeight::get().reads(2)) + .saturating_add(RocksDbWeight::get().writes(1)) + } + // Storage: MessageQueue BookStateFor (r:1 w:1) + // Storage: MessageQueue Pages (r:1 w:1) + fn reap_page() -> Weight { + // Minimum execution time: 60_562 nanoseconds. + Weight::from_ref_time(61_430_000) + .saturating_add(RocksDbWeight::get().reads(2)) + .saturating_add(RocksDbWeight::get().writes(2)) + } + // Storage: MessageQueue BookStateFor (r:1 w:1) + // Storage: MessageQueue Pages (r:1 w:1) + fn execute_overweight_page_removed() -> Weight { + // Minimum execution time: 74_582 nanoseconds. + Weight::from_ref_time(75_445_000) + .saturating_add(RocksDbWeight::get().reads(2)) + .saturating_add(RocksDbWeight::get().writes(2)) + } + // Storage: MessageQueue BookStateFor (r:1 w:1) + // Storage: MessageQueue Pages (r:1 w:1) + fn execute_overweight_page_updated() -> Weight { + // Minimum execution time: 87_526 nanoseconds. + Weight::from_ref_time(88_055_000) + .saturating_add(RocksDbWeight::get().reads(2)) + .saturating_add(RocksDbWeight::get().writes(2)) + } +} diff --git a/frame/scheduler/Cargo.toml b/frame/scheduler/Cargo.toml index 86ca63c753bea..25ac602681cc0 100644 --- a/frame/scheduler/Cargo.toml +++ b/frame/scheduler/Cargo.toml @@ -19,6 +19,7 @@ frame-system = { version = "4.0.0-dev", default-features = false, path = "../sys sp-io = { version = "7.0.0", default-features = false, path = "../../primitives/io" } sp-runtime = { version = "7.0.0", default-features = false, path = "../../primitives/runtime" } sp-std = { version = "5.0.0", default-features = false, path = "../../primitives/std" } +sp-weights = { version = "4.0.0", default-features = false, path = "../../primitives/weights" } [dev-dependencies] pallet-preimage = { version = "4.0.0-dev", path = "../preimage" } @@ -42,5 +43,6 @@ std = [ "sp-io/std", "sp-runtime/std", "sp-std/std", + "sp-weights/std", ] try-runtime = ["frame-support/try-runtime"] diff --git a/frame/scheduler/src/lib.rs b/frame/scheduler/src/lib.rs index 78533540be98f..2e0d0c6be1db5 100644 --- a/frame/scheduler/src/lib.rs +++ b/frame/scheduler/src/lib.rs @@ -73,7 +73,6 @@ use frame_support::{ weights::{Weight, WeightMeter}, }; use frame_system::{self as system}; -pub use pallet::*; use scale_info::TypeInfo; use sp_io::hashing::blake2_256; use sp_runtime::{ @@ -81,6 +80,8 @@ use sp_runtime::{ BoundedVec, RuntimeDebug, }; use sp_std::{borrow::Borrow, cmp::Ordering, marker::PhantomData, prelude::*}; + +pub use pallet::*; pub use weights::WeightInfo; /// Just a simple index for naming period tasks. diff --git a/frame/support/src/traits.rs b/frame/support/src/traits.rs index e5ba98fe0c5bb..63c86c1f68459 100644 --- a/frame/support/src/traits.rs +++ b/frame/support/src/traits.rs @@ -112,6 +112,12 @@ pub use voting::{ mod preimages; pub use preimages::{Bounded, BoundedInline, FetchResult, Hash, QueryPreimage, StorePreimage}; +mod messages; +pub use messages::{ + EnqueueMessage, ExecuteOverweightError, Footprint, ProcessMessage, ProcessMessageError, + ServiceQueues, +}; + #[cfg(feature = "try-runtime")] mod try_runtime; #[cfg(feature = "try-runtime")] diff --git a/frame/support/src/traits/messages.rs b/frame/support/src/traits/messages.rs new file mode 100644 index 0000000000000..9b86c421ad9e0 --- /dev/null +++ b/frame/support/src/traits/messages.rs @@ -0,0 +1,202 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Traits for managing message queuing and handling. + +use codec::{Decode, Encode, FullCodec, MaxEncodedLen}; +use scale_info::TypeInfo; +use sp_core::{ConstU32, Get, TypedGet}; +use sp_runtime::{traits::Convert, BoundedSlice, RuntimeDebug}; +use sp_std::{fmt::Debug, marker::PhantomData, prelude::*}; +use sp_weights::Weight; + +/// Errors that can happen when attempting to process a message with +/// [`ProcessMessage::process_message()`]. +#[derive(Copy, Clone, Eq, PartialEq, Encode, Decode, TypeInfo, RuntimeDebug)] +pub enum ProcessMessageError { + /// The message data format is unknown (e.g. unrecognised header) + BadFormat, + /// The message data is bad (e.g. decoding returns an error). + Corrupt, + /// The message format is unsupported (e.g. old XCM version). + Unsupported, + /// Message processing was not attempted because it was not certain that the weight limit + /// would be respected. The parameter gives the maximum weight which the message could take + /// to process. + Overweight(Weight), +} + +/// Can process messages from a specific origin. +pub trait ProcessMessage { + /// The transport from where a message originates. + type Origin: FullCodec + MaxEncodedLen + Clone + Eq + PartialEq + TypeInfo + Debug; + + /// Process the given message, using no more than `weight_limit` in weight to do so. + fn process_message( + message: &[u8], + origin: Self::Origin, + weight_limit: Weight, + ) -> Result<(bool, Weight), ProcessMessageError>; +} + +/// Errors that can happen when attempting to execute an overweight message with +/// [`ServiceQueues::execute_overweight()`]. +#[derive(Eq, PartialEq, RuntimeDebug)] +pub enum ExecuteOverweightError { + /// The referenced message was not found. + NotFound, + /// The available weight was insufficient to execute the message. + InsufficientWeight, +} + +/// Can service queues and execute overweight messages. +pub trait ServiceQueues { + /// Addresses a specific overweight message. + type OverweightMessageAddress; + + /// Service all message queues in some fair manner. + /// + /// - `weight_limit`: The maximum amount of dynamic weight that this call can use. + /// + /// Returns the dynamic weight used by this call; is never greater than `weight_limit`. + fn service_queues(weight_limit: Weight) -> Weight; + + /// Executes a message that could not be executed by [`Self::service_queues()`] because it was + /// temporarily overweight. + fn execute_overweight( + _weight_limit: Weight, + _address: Self::OverweightMessageAddress, + ) -> Result { + Err(ExecuteOverweightError::NotFound) + } +} + +/// The resource footprint of a queue. +#[derive(Default, Copy, Clone, Eq, PartialEq, RuntimeDebug)] +pub struct Footprint { + pub count: u64, + pub size: u64, +} + +/// Can enqueue messages for multiple origins. +pub trait EnqueueMessage { + /// The maximal length any enqueued message may have. + type MaxMessageLen: Get; + + /// Enqueue a single `message` from a specific `origin`. + fn enqueue_message(message: BoundedSlice, origin: Origin); + + /// Enqueue multiple `messages` from a specific `origin`. + fn enqueue_messages<'a>( + messages: impl Iterator>, + origin: Origin, + ); + + /// Any remaining unprocessed messages should happen only lazily, not proactively. + fn sweep_queue(origin: Origin); + + /// Return the state footprint of the given queue. + fn footprint(origin: Origin) -> Footprint; +} + +impl EnqueueMessage for () { + type MaxMessageLen = ConstU32<0>; + fn enqueue_message(_: BoundedSlice, _: Origin) {} + fn enqueue_messages<'a>( + _: impl Iterator>, + _: Origin, + ) { + } + fn sweep_queue(_: Origin) {} + fn footprint(_: Origin) -> Footprint { + Footprint::default() + } +} + +/// Transform the origin of an [`EnqueueMessage`] via `C::convert`. +pub struct TransformOrigin(PhantomData<(E, O, N, C)>); +impl, O: MaxEncodedLen, N: MaxEncodedLen, C: Convert> EnqueueMessage + for TransformOrigin +{ + type MaxMessageLen = E::MaxMessageLen; + + fn enqueue_message(message: BoundedSlice, origin: N) { + E::enqueue_message(message, C::convert(origin)); + } + + fn enqueue_messages<'a>( + messages: impl Iterator>, + origin: N, + ) { + E::enqueue_messages(messages, C::convert(origin)); + } + + fn sweep_queue(origin: N) { + E::sweep_queue(C::convert(origin)); + } + + fn footprint(origin: N) -> Footprint { + E::footprint(C::convert(origin)) + } +} + +/// Handles incoming messages for a single origin. +pub trait HandleMessage { + /// The maximal length any enqueued message may have. + type MaxMessageLen: Get; + + /// Enqueue a single `message` with an implied origin. + fn handle_message(message: BoundedSlice); + + /// Enqueue multiple `messages` from an implied origin. + fn handle_messages<'a>( + messages: impl Iterator>, + ); + + /// Any remaining unprocessed messages should happen only lazily, not proactively. + fn sweep_queue(); + + /// Return the state footprint of the queue. + fn footprint() -> Footprint; +} + +/// Adapter type to transform an [`EnqueueMessage`] with an origin into a [`HandleMessage`] impl. +pub struct EnqueueWithOrigin(PhantomData<(E, O)>); +impl, O: TypedGet> HandleMessage for EnqueueWithOrigin +where + O::Type: MaxEncodedLen, +{ + type MaxMessageLen = E::MaxMessageLen; + + fn handle_message(message: BoundedSlice) { + E::enqueue_message(message, O::get()); + } + + fn handle_messages<'a>( + messages: impl Iterator>, + ) { + E::enqueue_messages(messages, O::get()); + } + + fn sweep_queue() { + E::sweep_queue(O::get()); + } + + fn footprint() -> Footprint { + E::footprint(O::get()) + } +} diff --git a/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.stderr b/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.stderr index 42ef5a34e4c30..999d8585c221a 100644 --- a/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.stderr +++ b/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.stderr @@ -28,7 +28,7 @@ error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied <&[(T,)] as EncodeLike>> <&[(T,)] as EncodeLike>> <&[T] as EncodeLike>> - and 278 others + and 279 others = note: required for `Bar` to implement `FullEncode` = note: required for `Bar` to implement `FullCodec` = note: required for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` to implement `PartialStorageInfoTrait` @@ -69,7 +69,7 @@ error[E0277]: the trait bound `Bar: TypeInfo` is not satisfied (A, B, C, D) (A, B, C, D, E) (A, B, C, D, E, F) - and 161 others + and 162 others = note: required for `Bar` to implement `StaticTypeInfo` = note: required for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` to implement `StorageEntryMetadataBuilder` @@ -103,7 +103,7 @@ error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied <&[(T,)] as EncodeLike>> <&[(T,)] as EncodeLike>> <&[T] as EncodeLike>> - and 278 others + and 279 others = note: required for `Bar` to implement `FullEncode` = note: required for `Bar` to implement `FullCodec` = note: required for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` to implement `StorageEntryMetadataBuilder` diff --git a/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.stderr b/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.stderr index 461d63ebb0d9c..e2870ffb9e86f 100644 --- a/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.stderr +++ b/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.stderr @@ -28,7 +28,7 @@ error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied <&[(T,)] as EncodeLike>> <&[(T,)] as EncodeLike>> <&[T] as EncodeLike>> - and 278 others + and 279 others = note: required for `Bar` to implement `FullEncode` = note: required for `Bar` to implement `FullCodec` = note: required for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` to implement `PartialStorageInfoTrait` @@ -69,7 +69,7 @@ error[E0277]: the trait bound `Bar: TypeInfo` is not satisfied (A, B, C, D) (A, B, C, D, E) (A, B, C, D, E, F) - and 161 others + and 162 others = note: required for `Bar` to implement `StaticTypeInfo` = note: required for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` to implement `StorageEntryMetadataBuilder` @@ -103,7 +103,7 @@ error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied <&[(T,)] as EncodeLike>> <&[(T,)] as EncodeLike>> <&[T] as EncodeLike>> - and 278 others + and 279 others = note: required for `Bar` to implement `FullEncode` = note: required for `Bar` to implement `FullCodec` = note: required for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` to implement `StorageEntryMetadataBuilder` diff --git a/primitives/core/src/bounded/bounded_vec.rs b/primitives/core/src/bounded/bounded_vec.rs index 2f39f3340ce50..6e1e1c7cfda64 100644 --- a/primitives/core/src/bounded/bounded_vec.rs +++ b/primitives/core/src/bounded/bounded_vec.rs @@ -675,6 +675,13 @@ impl> BoundedVec { } } +impl BoundedVec { + /// Return a [`BoundedSlice`] with the content and bound of [`Self`]. + pub fn as_bounded_slice(&self) -> BoundedSlice { + BoundedSlice(&self.0[..], PhantomData::default()) + } +} + impl Default for BoundedVec { fn default() -> Self { // the bound cannot be below 0, which is satisfied by an empty vector diff --git a/primitives/weights/src/weight_meter.rs b/primitives/weights/src/weight_meter.rs index d03e72968bb09..17c5da1502e9e 100644 --- a/primitives/weights/src/weight_meter.rs +++ b/primitives/weights/src/weight_meter.rs @@ -71,6 +71,12 @@ impl WeightMeter { time.max(pov) } + /// Consume some weight and defensively fail if it is over the limit. Saturate in any case. + pub fn defensive_saturating_accrue(&mut self, w: Weight) { + self.consumed.saturating_accrue(w); + debug_assert!(self.consumed.all_lte(self.limit), "Weight counter overflow"); + } + /// Consume the given weight after checking that it can be consumed. Otherwise do nothing. pub fn check_accrue(&mut self, w: Weight) -> bool { self.consumed.checked_add(&w).map_or(false, |test| { From f0b6e79a8d21856743acc5370b5d5c3271048cb9 Mon Sep 17 00:00:00 2001 From: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> Date: Fri, 9 Dec 2022 13:47:04 +0100 Subject: [PATCH 07/29] zombienet timings adjusted (#12890) * zombinet tests: add some timeout to allow net spin-up Sometimes tests are failing at first try, as the pods were not up yet. Adding timeout should allow the network to spin up properly. * initial timeout increased to 30s --- zombienet/0000-block-building/block-building.zndsl | 4 ++-- zombienet/0001-basic-warp-sync/test-warp-sync.zndsl | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/zombienet/0000-block-building/block-building.zndsl b/zombienet/0000-block-building/block-building.zndsl index c53e50915c202..86a54773484b3 100644 --- a/zombienet/0000-block-building/block-building.zndsl +++ b/zombienet/0000-block-building/block-building.zndsl @@ -2,8 +2,8 @@ Description: Block building Network: ./block-building.toml Creds: config -alice: is up -bob: is up +alice: is up within 30 seconds +bob: is up within 30 seconds alice: reports node_roles is 4 bob: reports node_roles is 4 diff --git a/zombienet/0001-basic-warp-sync/test-warp-sync.zndsl b/zombienet/0001-basic-warp-sync/test-warp-sync.zndsl index 1ccacb2e6d038..8ceb61c8b039d 100644 --- a/zombienet/0001-basic-warp-sync/test-warp-sync.zndsl +++ b/zombienet/0001-basic-warp-sync/test-warp-sync.zndsl @@ -2,10 +2,10 @@ Description: Warp sync Network: ./test-warp-sync.toml Creds: config -alice: is up -bob: is up -charlie: is up -dave: is up +alice: is up within 30 seconds +bob: is up within 30 seconds +charlie: is up within 30 seconds +dave: is up within 30 seconds alice: reports node_roles is 1 bob: reports node_roles is 1 From 9931220910f9fb65227fe4571842f800d61c7b95 Mon Sep 17 00:00:00 2001 From: Aaro Altonen <48052676+altonen@users.noreply.github.com> Date: Fri, 9 Dec 2022 21:50:57 +0200 Subject: [PATCH 08/29] Move import queue out of `sc-network` (#12764) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Move import queue out of `sc-network` Add supplementary asynchronous API for the import queue which means it can be run as an independent task and communicated with through the `ImportQueueService`. This commit removes removes block and justification imports from `sc-network` and provides `ChainSync` with a handle to import queue so it can import blocks and justifications. Polling of the import queue is moved complete out of `sc-network` and `sc_consensus::Link` is implemented for `ChainSyncInterfaceHandled` so the import queue can still influence the syncing process. * Fix tests * Apply review comments * Apply suggestions from code review Co-authored-by: Bastian Köcher * Update client/network/sync/src/lib.rs Co-authored-by: Bastian Köcher Co-authored-by: Bastian Köcher --- Cargo.lock | 2 + client/consensus/common/Cargo.toml | 1 + client/consensus/common/src/import_queue.rs | 23 +- .../common/src/import_queue/basic_queue.rs | 69 ++- .../common/src/import_queue/buffered_link.rs | 32 +- .../consensus/common/src/import_queue/mock.rs | 46 ++ client/network/common/src/sync.rs | 28 +- client/network/src/behaviour.rs | 31 +- client/network/src/config.rs | 7 - client/network/src/lib.rs | 9 +- client/network/src/protocol.rs | 107 +--- client/network/src/service.rs | 88 +--- client/network/src/service/metrics.rs | 10 - .../network/src/service/tests/chain_sync.rs | 106 ++-- client/network/src/service/tests/mod.rs | 40 +- client/network/sync/Cargo.toml | 1 + client/network/sync/src/lib.rs | 483 ++++++++++++------ client/network/sync/src/mock.rs | 14 +- client/network/sync/src/service/chain_sync.rs | 53 ++ client/network/sync/src/service/mock.rs | 33 +- client/network/sync/src/tests.rs | 3 + client/network/test/src/lib.rs | 12 +- client/service/src/builder.rs | 6 +- client/service/src/chain_ops/import_blocks.rs | 2 +- 24 files changed, 716 insertions(+), 490 deletions(-) create mode 100644 client/consensus/common/src/import_queue/mock.rs diff --git a/Cargo.lock b/Cargo.lock index 41c641cf05963..8225e557141d1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7328,6 +7328,7 @@ dependencies = [ "futures-timer", "libp2p", "log", + "mockall", "parking_lot 0.12.1", "sc-client-api", "sc-utils", @@ -7929,6 +7930,7 @@ dependencies = [ "sp-runtime", "sp-test-primitives", "sp-tracing", + "substrate-prometheus-endpoint", "substrate-test-runtime-client", "thiserror", "tokio", diff --git a/client/consensus/common/Cargo.toml b/client/consensus/common/Cargo.toml index 971ee71ab8040..b61c6a4334285 100644 --- a/client/consensus/common/Cargo.toml +++ b/client/consensus/common/Cargo.toml @@ -18,6 +18,7 @@ futures = { version = "0.3.21", features = ["thread-pool"] } futures-timer = "3.0.1" libp2p = { version = "0.49.0", default-features = false } log = "0.4.17" +mockall = "0.11.2" parking_lot = "0.12.1" serde = { version = "1.0", features = ["derive"] } thiserror = "1.0.30" diff --git a/client/consensus/common/src/import_queue.rs b/client/consensus/common/src/import_queue.rs index 3741fa99663cd..d49b240ef3489 100644 --- a/client/consensus/common/src/import_queue.rs +++ b/client/consensus/common/src/import_queue.rs @@ -53,6 +53,7 @@ pub type DefaultImportQueue = mod basic_queue; pub mod buffered_link; +pub mod mock; /// Shared block import struct used by the queue. pub type BoxBlockImport = @@ -105,10 +106,10 @@ pub trait Verifier: Send + Sync { /// Blocks import queue API. /// /// The `import_*` methods can be called in order to send elements for the import queue to verify. -/// Afterwards, call `poll_actions` to determine how to respond to these elements. -pub trait ImportQueue: Send { +pub trait ImportQueueService: Send { /// Import bunch of blocks. fn import_blocks(&mut self, origin: BlockOrigin, blocks: Vec>); + /// Import block justifications. fn import_justifications( &mut self, @@ -117,12 +118,26 @@ pub trait ImportQueue: Send { number: NumberFor, justifications: Justifications, ); - /// Polls for actions to perform on the network. - /// +} + +#[async_trait::async_trait] +pub trait ImportQueue: Send { + /// Get a copy of the handle to [`ImportQueueService`]. + fn service(&self) -> Box>; + + /// Get a reference to the handle to [`ImportQueueService`]. + fn service_ref(&mut self) -> &mut dyn ImportQueueService; + /// This method should behave in a way similar to `Future::poll`. It can register the current /// task and notify later when more actions are ready to be polled. To continue the comparison, /// it is as if this method always returned `Poll::Pending`. fn poll_actions(&mut self, cx: &mut futures::task::Context, link: &mut dyn Link); + + /// Start asynchronous runner for import queue. + /// + /// Takes an object implementing [`Link`] which allows the import queue to + /// influece the synchronization process. + async fn run(self, link: Box>); } /// Hooks that the verification queue can use to influence the synchronization diff --git a/client/consensus/common/src/import_queue/basic_queue.rs b/client/consensus/common/src/import_queue/basic_queue.rs index 0e607159b75c3..20e8d262cacda 100644 --- a/client/consensus/common/src/import_queue/basic_queue.rs +++ b/client/consensus/common/src/import_queue/basic_queue.rs @@ -34,7 +34,8 @@ use crate::{ import_queue::{ buffered_link::{self, BufferedLinkReceiver, BufferedLinkSender}, import_single_block_metered, BlockImportError, BlockImportStatus, BoxBlockImport, - BoxJustificationImport, ImportQueue, IncomingBlock, Link, RuntimeOrigin, Verifier, + BoxJustificationImport, ImportQueue, ImportQueueService, IncomingBlock, Link, + RuntimeOrigin, Verifier, }, metrics::Metrics, }; @@ -42,10 +43,8 @@ use crate::{ /// Interface to a basic block import queue that is importing blocks sequentially in a separate /// task, with plugable verification. pub struct BasicQueue { - /// Channel to send justification import messages to the background task. - justification_sender: TracingUnboundedSender>, - /// Channel to send block import messages to the background task. - block_import_sender: TracingUnboundedSender>, + /// Handle for sending justification and block import messages to the background task. + handle: BasicQueueHandle, /// Results coming from the worker task. result_port: BufferedLinkReceiver, _phantom: PhantomData, @@ -54,8 +53,7 @@ pub struct BasicQueue { impl Drop for BasicQueue { fn drop(&mut self) { // Flush the queue and close the receiver to terminate the future. - self.justification_sender.close_channel(); - self.block_import_sender.close_channel(); + self.handle.close(); self.result_port.close(); } } @@ -95,11 +93,37 @@ impl BasicQueue { future.boxed(), ); - Self { justification_sender, block_import_sender, result_port, _phantom: PhantomData } + Self { + handle: BasicQueueHandle::new(justification_sender, block_import_sender), + result_port, + _phantom: PhantomData, + } } } -impl ImportQueue for BasicQueue { +#[derive(Clone)] +struct BasicQueueHandle { + /// Channel to send justification import messages to the background task. + justification_sender: TracingUnboundedSender>, + /// Channel to send block import messages to the background task. + block_import_sender: TracingUnboundedSender>, +} + +impl BasicQueueHandle { + pub fn new( + justification_sender: TracingUnboundedSender>, + block_import_sender: TracingUnboundedSender>, + ) -> Self { + Self { justification_sender, block_import_sender } + } + + pub fn close(&mut self) { + self.justification_sender.close_channel(); + self.block_import_sender.close_channel(); + } +} + +impl ImportQueueService for BasicQueueHandle { fn import_blocks(&mut self, origin: BlockOrigin, blocks: Vec>) { if blocks.is_empty() { return @@ -138,12 +162,39 @@ impl ImportQueue for BasicQueue } } } +} + +#[async_trait::async_trait] +impl ImportQueue for BasicQueue { + /// Get handle to [`ImportQueueService`]. + fn service(&self) -> Box> { + Box::new(self.handle.clone()) + } + /// Get a reference to the handle to [`ImportQueueService`]. + fn service_ref(&mut self) -> &mut dyn ImportQueueService { + &mut self.handle + } + + /// Poll actions from network. fn poll_actions(&mut self, cx: &mut Context, link: &mut dyn Link) { if self.result_port.poll_actions(cx, link).is_err() { log::error!(target: "sync", "poll_actions: Background import task is no longer alive"); } } + + /// Start asynchronous runner for import queue. + /// + /// Takes an object implementing [`Link`] which allows the import queue to + /// influece the synchronization process. + async fn run(mut self, mut link: Box>) { + loop { + if let Err(_) = self.result_port.next_action(&mut *link).await { + log::error!(target: "sync", "poll_actions: Background import task is no longer alive"); + return + } + } + } } /// Messages destinated to the background worker. diff --git a/client/consensus/common/src/import_queue/buffered_link.rs b/client/consensus/common/src/import_queue/buffered_link.rs index 5d418dddf0853..e6d3b212fdbac 100644 --- a/client/consensus/common/src/import_queue/buffered_link.rs +++ b/client/consensus/common/src/import_queue/buffered_link.rs @@ -80,7 +80,7 @@ impl Clone for BufferedLinkSender { } /// Internal buffered message. -enum BlockImportWorkerMsg { +pub enum BlockImportWorkerMsg { BlocksProcessed(usize, usize, Vec<(BlockImportResult, B::Hash)>), JustificationImported(RuntimeOrigin, B::Hash, NumberFor, bool), RequestJustification(B::Hash, NumberFor), @@ -122,6 +122,18 @@ pub struct BufferedLinkReceiver { } impl BufferedLinkReceiver { + /// Send action for the synchronization to perform. + pub fn send_actions(&mut self, msg: BlockImportWorkerMsg, link: &mut dyn Link) { + match msg { + BlockImportWorkerMsg::BlocksProcessed(imported, count, results) => + link.blocks_processed(imported, count, results), + BlockImportWorkerMsg::JustificationImported(who, hash, number, success) => + link.justification_imported(who, &hash, number, success), + BlockImportWorkerMsg::RequestJustification(hash, number) => + link.request_justification(&hash, number), + } + } + /// Polls for the buffered link actions. Any enqueued action will be propagated to the link /// passed as parameter. /// @@ -138,15 +150,17 @@ impl BufferedLinkReceiver { Poll::Pending => break Ok(()), }; - match msg { - BlockImportWorkerMsg::BlocksProcessed(imported, count, results) => - link.blocks_processed(imported, count, results), - BlockImportWorkerMsg::JustificationImported(who, hash, number, success) => - link.justification_imported(who, &hash, number, success), - BlockImportWorkerMsg::RequestJustification(hash, number) => - link.request_justification(&hash, number), - } + self.send_actions(msg, &mut *link); + } + } + + /// Poll next element from import queue and send the corresponding action command over the link. + pub async fn next_action(&mut self, link: &mut dyn Link) -> Result<(), ()> { + if let Some(msg) = self.rx.next().await { + self.send_actions(msg, link); + return Ok(()) } + Err(()) } /// Close the channel. diff --git a/client/consensus/common/src/import_queue/mock.rs b/client/consensus/common/src/import_queue/mock.rs new file mode 100644 index 0000000000000..67deee9514a1c --- /dev/null +++ b/client/consensus/common/src/import_queue/mock.rs @@ -0,0 +1,46 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use super::*; + +mockall::mock! { + pub ImportQueueHandle {} + + impl ImportQueueService for ImportQueueHandle { + fn import_blocks(&mut self, origin: BlockOrigin, blocks: Vec>); + fn import_justifications( + &mut self, + who: RuntimeOrigin, + hash: B::Hash, + number: NumberFor, + justifications: Justifications, + ); + } +} + +mockall::mock! { + pub ImportQueue {} + + #[async_trait::async_trait] + impl ImportQueue for ImportQueue { + fn service(&self) -> Box>; + fn service_ref(&mut self) -> &mut dyn ImportQueueService; + fn poll_actions<'a>(&mut self, cx: &mut futures::task::Context<'a>, link: &mut dyn Link); + async fn run(self, link: Box>); + } +} diff --git a/client/network/common/src/sync.rs b/client/network/common/src/sync.rs index bed9935698769..5e8219c550d19 100644 --- a/client/network/common/src/sync.rs +++ b/client/network/common/src/sync.rs @@ -24,9 +24,7 @@ pub mod warp; use libp2p::PeerId; use message::{BlockAnnounce, BlockData, BlockRequest, BlockResponse}; -use sc_consensus::{ - import_queue::RuntimeOrigin, BlockImportError, BlockImportStatus, IncomingBlock, -}; +use sc_consensus::{import_queue::RuntimeOrigin, IncomingBlock}; use sp_consensus::BlockOrigin; use sp_runtime::{ traits::{Block as BlockT, NumberFor}, @@ -317,6 +315,12 @@ pub trait ChainSync: Send { response: BlockResponse, ) -> Result, BadPeer>; + /// Procss received block data. + fn process_block_response_data( + &mut self, + blocks_to_import: Result, BadPeer>, + ); + /// Handle a response from the remote to a justification request that we made. /// /// `request` must be the original request that triggered `response`. @@ -326,17 +330,6 @@ pub trait ChainSync: Send { response: BlockResponse, ) -> Result, BadPeer>; - /// A batch of blocks have been processed, with or without errors. - /// - /// Call this when a batch of blocks have been processed by the import - /// queue, with or without errors. - fn on_blocks_processed( - &mut self, - imported: usize, - count: usize, - results: Vec<(Result>, BlockImportError>, Block::Hash)>, - ) -> Box), BadPeer>>>; - /// Call this when a justification has been processed by the import queue, /// with or without errors. fn on_justification_import( @@ -378,7 +371,7 @@ pub trait ChainSync: Send { /// Call when a peer has disconnected. /// Canceled obsolete block request may result in some blocks being ready for /// import, so this functions checks for such blocks and returns them. - fn peer_disconnected(&mut self, who: &PeerId) -> Option>; + fn peer_disconnected(&mut self, who: &PeerId); /// Return some key metrics. fn metrics(&self) -> Metrics; @@ -395,7 +388,10 @@ pub trait ChainSync: Send { /// Internally calls [`ChainSync::poll_block_announce_validation()`] and /// this function should be polled until it returns [`Poll::Pending`] to /// consume all pending events. - fn poll(&mut self, cx: &mut std::task::Context) -> Poll>; + fn poll( + &mut self, + cx: &mut std::task::Context, + ) -> Poll>; /// Send block request to peer fn send_block_request(&mut self, who: PeerId, request: BlockRequest); diff --git a/client/network/src/behaviour.rs b/client/network/src/behaviour.rs index 48d6127f642c3..3a977edbca574 100644 --- a/client/network/src/behaviour.rs +++ b/client/network/src/behaviour.rs @@ -32,7 +32,6 @@ use libp2p::{ NetworkBehaviour, }; -use sc_consensus::import_queue::{IncomingBlock, RuntimeOrigin}; use sc_network_common::{ protocol::{ event::DhtEvent, @@ -43,18 +42,14 @@ use sc_network_common::{ }; use sc_peerset::{PeersetHandle, ReputationChange}; use sp_blockchain::HeaderBackend; -use sp_consensus::BlockOrigin; -use sp_runtime::{ - traits::{Block as BlockT, NumberFor}, - Justifications, -}; +use sp_runtime::traits::Block as BlockT; use std::{collections::HashSet, time::Duration}; pub use crate::request_responses::{InboundFailure, OutboundFailure, RequestId, ResponseFailure}; /// General behaviour of the network. Combines all protocols together. #[derive(NetworkBehaviour)] -#[behaviour(out_event = "BehaviourOut")] +#[behaviour(out_event = "BehaviourOut")] pub struct Behaviour where B: BlockT, @@ -72,10 +67,7 @@ where } /// Event generated by `Behaviour`. -pub enum BehaviourOut { - BlockImport(BlockOrigin, Vec>), - JustificationImport(RuntimeOrigin, B::Hash, NumberFor, Justifications), - +pub enum BehaviourOut { /// Started a random iterative Kademlia discovery query. RandomKademliaStarted, @@ -107,10 +99,7 @@ pub enum BehaviourOut { }, /// A request protocol handler issued reputation changes for the given peer. - ReputationChanges { - peer: PeerId, - changes: Vec, - }, + ReputationChanges { peer: PeerId, changes: Vec }, /// Opened a substream with the given node with the given notifications protocol. /// @@ -306,13 +295,9 @@ fn reported_roles_to_observed_role(roles: Roles) -> ObservedRole { } } -impl From> for BehaviourOut { +impl From> for BehaviourOut { fn from(event: CustomMessageOutcome) -> Self { match event { - CustomMessageOutcome::BlockImport(origin, blocks) => - BehaviourOut::BlockImport(origin, blocks), - CustomMessageOutcome::JustificationImport(origin, hash, nb, justification) => - BehaviourOut::JustificationImport(origin, hash, nb, justification), CustomMessageOutcome::NotificationStreamOpened { remote, protocol, @@ -344,7 +329,7 @@ impl From> for BehaviourOut { } } -impl From for BehaviourOut { +impl From for BehaviourOut { fn from(event: request_responses::Event) -> Self { match event { request_responses::Event::InboundRequest { peer, protocol, result } => @@ -357,14 +342,14 @@ impl From for BehaviourOut { } } -impl From for BehaviourOut { +impl From for BehaviourOut { fn from(event: peer_info::PeerInfoEvent) -> Self { let peer_info::PeerInfoEvent::Identified { peer_id, info } = event; BehaviourOut::PeerIdentify { peer_id, info } } } -impl From for BehaviourOut { +impl From for BehaviourOut { fn from(event: DiscoveryOut) -> Self { match event { DiscoveryOut::UnroutablePeer(_peer_id) => { diff --git a/client/network/src/config.rs b/client/network/src/config.rs index b10612dd17094..52993e2519400 100644 --- a/client/network/src/config.rs +++ b/client/network/src/config.rs @@ -40,7 +40,6 @@ use libp2p::{ multiaddr, Multiaddr, }; use prometheus_endpoint::Registry; -use sc_consensus::ImportQueue; use sc_network_common::{ config::{MultiaddrWithPeerId, NonDefaultSetConfig, SetConfig, TransportConfig}, sync::ChainSync, @@ -82,12 +81,6 @@ where /// name on the wire. pub fork_id: Option, - /// Import queue to use. - /// - /// The import queue is the component that verifies that blocks received from other nodes are - /// valid. - pub import_queue: Box>, - /// Instance of chain sync implementation. pub chain_sync: Box>, diff --git a/client/network/src/lib.rs b/client/network/src/lib.rs index f3faa44ee6dbd..f185458e0dace 100644 --- a/client/network/src/lib.rs +++ b/client/network/src/lib.rs @@ -258,6 +258,7 @@ pub mod network_state; #[doc(inline)] pub use libp2p::{multiaddr, Multiaddr, PeerId}; pub use protocol::PeerInfo; +use sc_consensus::{JustificationSyncLink, Link}; pub use sc_network_common::{ protocol::{ event::{DhtEvent, Event}, @@ -297,11 +298,15 @@ const MAX_CONNECTIONS_ESTABLISHED_INCOMING: u32 = 10_000; /// Abstraction over syncing-related services pub trait ChainSyncInterface: - NetworkSyncForkRequest> + Send + Sync + NetworkSyncForkRequest> + JustificationSyncLink + Link + Send + Sync { } impl ChainSyncInterface for T where - T: NetworkSyncForkRequest> + Send + Sync + T: NetworkSyncForkRequest> + + JustificationSyncLink + + Link + + Send + + Sync { } diff --git a/client/network/src/protocol.rs b/client/network/src/protocol.rs index 8c1dd39b49be3..10eb31b595253 100644 --- a/client/network/src/protocol.rs +++ b/client/network/src/protocol.rs @@ -29,32 +29,26 @@ use libp2p::{ }, Multiaddr, PeerId, }; -use log::{debug, error, info, log, trace, warn, Level}; +use log::{debug, error, log, trace, warn, Level}; use lru::LruCache; use message::{generic::Message as GenericMessage, Message}; use notifications::{Notifications, NotificationsOut}; use prometheus_endpoint::{register, Gauge, GaugeVec, Opts, PrometheusError, Registry, U64}; use sc_client_api::HeaderBackend; -use sc_consensus::import_queue::{ - BlockImportError, BlockImportStatus, IncomingBlock, RuntimeOrigin, -}; use sc_network_common::{ config::NonReservedPeerMode, error, protocol::{role::Roles, ProtocolName}, sync::{ message::{BlockAnnounce, BlockAnnouncesHandshake, BlockData, BlockResponse, BlockState}, - BadPeer, ChainSync, ImportResult, OnBlockData, PollBlockAnnounceValidation, PollResult, - SyncStatus, + BadPeer, ChainSync, PollBlockAnnounceValidation, SyncStatus, }, utils::{interval, LruHashSet}, }; use sp_arithmetic::traits::SaturatedConversion; -use sp_consensus::BlockOrigin; use sp_runtime::{ generic::BlockId, traits::{Block as BlockT, CheckedSub, Header as HeaderT, NumberFor, Zero}, - Justifications, }; use std::{ collections::{HashMap, HashSet, VecDeque}, @@ -481,12 +475,7 @@ where } if let Some(_peer_data) = self.peers.remove(&peer) { - if let Some(OnBlockData::Import(origin, blocks)) = - self.chain_sync.peer_disconnected(&peer) - { - self.pending_messages - .push_back(CustomMessageOutcome::BlockImport(origin, blocks)); - } + self.chain_sync.peer_disconnected(&peer); self.default_peers_set_no_slot_connected_peers.remove(&peer); Ok(()) } else { @@ -785,25 +774,13 @@ where }], }, ); + self.chain_sync.process_block_response_data(blocks_to_import); if is_best { self.pending_messages.push_back(CustomMessageOutcome::PeerNewBest(who, number)); } - match blocks_to_import { - Ok(OnBlockData::Import(origin, blocks)) => - CustomMessageOutcome::BlockImport(origin, blocks), - Ok(OnBlockData::Request(peer, req)) => { - self.chain_sync.send_block_request(peer, req); - CustomMessageOutcome::None - }, - Ok(OnBlockData::Continue) => CustomMessageOutcome::None, - Err(BadPeer(id, repu)) => { - self.behaviour.disconnect_peer(&id, HARDCODED_PEERSETS_SYNC); - self.peerset_handle.report_peer(id, repu); - CustomMessageOutcome::None - }, - } + CustomMessageOutcome::None } /// Call this when a block has been finalized. The sync layer may have some additional @@ -812,58 +789,6 @@ where self.chain_sync.on_block_finalized(&hash, *header.number()) } - /// Request a justification for the given block. - /// - /// Uses `protocol` to queue a new justification request and tries to dispatch all pending - /// requests. - pub fn request_justification(&mut self, hash: &B::Hash, number: NumberFor) { - self.chain_sync.request_justification(hash, number) - } - - /// Clear all pending justification requests. - pub fn clear_justification_requests(&mut self) { - self.chain_sync.clear_justification_requests(); - } - - /// A batch of blocks have been processed, with or without errors. - /// Call this when a batch of blocks have been processed by the importqueue, with or without - /// errors. - pub fn on_blocks_processed( - &mut self, - imported: usize, - count: usize, - results: Vec<(Result>, BlockImportError>, B::Hash)>, - ) { - let results = self.chain_sync.on_blocks_processed(imported, count, results); - for result in results { - match result { - Ok((id, req)) => self.chain_sync.send_block_request(id, req), - Err(BadPeer(id, repu)) => { - self.behaviour.disconnect_peer(&id, HARDCODED_PEERSETS_SYNC); - self.peerset_handle.report_peer(id, repu) - }, - } - } - } - - /// Call this when a justification has been processed by the import queue, with or without - /// errors. - pub fn justification_import_result( - &mut self, - who: PeerId, - hash: B::Hash, - number: NumberFor, - success: bool, - ) { - self.chain_sync.on_justification_import(hash, number, success); - if !success { - info!("💔 Invalid justification provided by {} for #{}", who, hash); - self.behaviour.disconnect_peer(&who, HARDCODED_PEERSETS_SYNC); - self.peerset_handle - .report_peer(who, sc_peerset::ReputationChange::new_fatal("Invalid justification")); - } - } - /// Set whether the syncing peers set is in reserved-only mode. pub fn set_reserved_only(&self, reserved_only: bool) { self.peerset_handle.set_reserved_only(HARDCODED_PEERSETS_SYNC, reserved_only); @@ -997,8 +922,6 @@ where #[derive(Debug)] #[must_use] pub enum CustomMessageOutcome { - BlockImport(BlockOrigin, Vec>), - JustificationImport(RuntimeOrigin, B::Hash, NumberFor, Justifications), /// Notification protocols have been opened with a remote. NotificationStreamOpened { remote: PeerId, @@ -1106,23 +1029,9 @@ where // Process any received requests received from `NetworkService` and // check if there is any block announcement validation finished. while let Poll::Ready(result) = self.chain_sync.poll(cx) { - match result { - PollResult::Import(import) => self.pending_messages.push_back(match import { - ImportResult::BlockImport(origin, blocks) => - CustomMessageOutcome::BlockImport(origin, blocks), - ImportResult::JustificationImport(origin, hash, number, justifications) => - CustomMessageOutcome::JustificationImport( - origin, - hash, - number, - justifications, - ), - }), - PollResult::Announce(announce) => - match self.process_block_announce_validation_result(announce) { - CustomMessageOutcome::None => {}, - outcome => self.pending_messages.push_back(outcome), - }, + match self.process_block_announce_validation_result(result) { + CustomMessageOutcome::None => {}, + outcome => self.pending_messages.push_back(outcome), } } diff --git a/client/network/src/service.rs b/client/network/src/service.rs index d35594a07e38a..08e498299a1d3 100644 --- a/client/network/src/service.rs +++ b/client/network/src/service.rs @@ -54,7 +54,6 @@ use libp2p::{ use log::{debug, error, info, trace, warn}; use metrics::{Histogram, HistogramVec, MetricSources, Metrics}; use parking_lot::Mutex; -use sc_consensus::{BlockImportError, BlockImportStatus, ImportQueue, Link}; use sc_network_common::{ config::{MultiaddrWithPeerId, TransportConfig}, error::Error, @@ -450,7 +449,6 @@ where is_major_syncing, network_service: swarm, service, - import_queue: params.import_queue, from_service, event_streams: out_events::OutChannels::new(params.metrics_registry.as_ref())?, peers_notifications_sinks, @@ -748,13 +746,11 @@ impl sc_consensus::JustificationSyncLink for NetworkSe /// On success, the justification will be passed to the import queue that was part at /// initialization as part of the configuration. fn request_justification(&self, hash: &B::Hash, number: NumberFor) { - let _ = self - .to_worker - .unbounded_send(ServiceToWorkerMsg::RequestJustification(*hash, number)); + let _ = self.chain_sync_service.request_justification(hash, number); } fn clear_justification_requests(&self) { - let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::ClearJustificationRequests); + let _ = self.chain_sync_service.clear_justification_requests(); } } @@ -1208,8 +1204,6 @@ impl<'a> NotificationSenderReadyT for NotificationSenderReady<'a> { /// /// Each entry corresponds to a method of `NetworkService`. enum ServiceToWorkerMsg { - RequestJustification(B::Hash, NumberFor), - ClearJustificationRequests, AnnounceBlock(B::Hash, Option>), GetValue(KademliaKey), PutValue(KademliaKey, Vec), @@ -1261,8 +1255,6 @@ where service: Arc>, /// The *actual* network. network_service: Swarm>, - /// The import queue that was passed at initialization. - import_queue: Box>, /// Messages from the [`NetworkService`] that must be processed. from_service: TracingUnboundedReceiver>, /// Senders for events that happen on the network. @@ -1290,10 +1282,6 @@ where fn poll(mut self: Pin<&mut Self>, cx: &mut std::task::Context) -> Poll { let this = &mut *self; - // Poll the import queue for actions to perform. - this.import_queue - .poll_actions(cx, &mut NetworkLink { protocol: &mut this.network_service }); - // At the time of writing of this comment, due to a high volume of messages, the network // worker sometimes takes a long time to process the loop below. When that happens, the // rest of the polling is frozen. In order to avoid negative side-effects caused by this @@ -1322,16 +1310,6 @@ where .behaviour_mut() .user_protocol_mut() .announce_block(hash, data), - ServiceToWorkerMsg::RequestJustification(hash, number) => this - .network_service - .behaviour_mut() - .user_protocol_mut() - .request_justification(&hash, number), - ServiceToWorkerMsg::ClearJustificationRequests => this - .network_service - .behaviour_mut() - .user_protocol_mut() - .clear_justification_requests(), ServiceToWorkerMsg::GetValue(key) => this.network_service.behaviour_mut().get_value(key), ServiceToWorkerMsg::PutValue(key, value) => @@ -1435,23 +1413,6 @@ where match poll_value { Poll::Pending => break, - Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::BlockImport(origin, blocks))) => { - if let Some(metrics) = this.metrics.as_ref() { - metrics.import_queue_blocks_submitted.inc(); - } - this.import_queue.import_blocks(origin, blocks); - }, - Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::JustificationImport( - origin, - hash, - nb, - justifications, - ))) => { - if let Some(metrics) = this.metrics.as_ref() { - metrics.import_queue_justifications_submitted.inc(); - } - this.import_queue.import_justifications(origin, hash, nb, justifications); - }, Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::InboundRequest { protocol, result, @@ -1952,51 +1913,6 @@ where { } -// Implementation of `import_queue::Link` trait using the available local variables. -struct NetworkLink<'a, B, Client> -where - B: BlockT, - Client: HeaderBackend + 'static, -{ - protocol: &'a mut Swarm>, -} - -impl<'a, B, Client> Link for NetworkLink<'a, B, Client> -where - B: BlockT, - Client: HeaderBackend + 'static, -{ - fn blocks_processed( - &mut self, - imported: usize, - count: usize, - results: Vec<(Result>, BlockImportError>, B::Hash)>, - ) { - self.protocol - .behaviour_mut() - .user_protocol_mut() - .on_blocks_processed(imported, count, results) - } - fn justification_imported( - &mut self, - who: PeerId, - hash: &B::Hash, - number: NumberFor, - success: bool, - ) { - self.protocol - .behaviour_mut() - .user_protocol_mut() - .justification_import_result(who, *hash, number, success); - } - fn request_justification(&mut self, hash: &B::Hash, number: NumberFor) { - self.protocol - .behaviour_mut() - .user_protocol_mut() - .request_justification(hash, number) - } -} - fn ensure_addresses_consistent_with_transport<'a>( addresses: impl Iterator, transport: &TransportConfig, diff --git a/client/network/src/service/metrics.rs b/client/network/src/service/metrics.rs index db1b6f7f6500d..a099bba716eb9 100644 --- a/client/network/src/service/metrics.rs +++ b/client/network/src/service/metrics.rs @@ -53,8 +53,6 @@ pub struct Metrics { pub connections_opened_total: CounterVec, pub distinct_peers_connections_closed_total: Counter, pub distinct_peers_connections_opened_total: Counter, - pub import_queue_blocks_submitted: Counter, - pub import_queue_justifications_submitted: Counter, pub incoming_connections_errors_total: CounterVec, pub incoming_connections_total: Counter, pub issued_light_requests: Counter, @@ -103,14 +101,6 @@ impl Metrics { "substrate_sub_libp2p_distinct_peers_connections_opened_total", "Total number of connections opened with distinct peers" )?, registry)?, - import_queue_blocks_submitted: prometheus::register(Counter::new( - "substrate_import_queue_blocks_submitted", - "Number of blocks submitted to the import queue.", - )?, registry)?, - import_queue_justifications_submitted: prometheus::register(Counter::new( - "substrate_import_queue_justifications_submitted", - "Number of justifications submitted to the import queue.", - )?, registry)?, incoming_connections_errors_total: prometheus::register(CounterVec::new( Opts::new( "substrate_sub_libp2p_incoming_connections_handshake_errors_total", diff --git a/client/network/src/service/tests/chain_sync.rs b/client/network/src/service/tests/chain_sync.rs index bd4967f25973a..0f47b64c352f2 100644 --- a/client/network/src/service/tests/chain_sync.rs +++ b/client/network/src/service/tests/chain_sync.rs @@ -86,27 +86,26 @@ async fn normal_network_poll_no_peers() { #[tokio::test] async fn request_justification() { - // build `ChainSyncInterface` provider and set no expecations for it (i.e., it cannot be - // called) - let chain_sync_service = - Box::new(MockChainSyncInterface::::new()); - - // build `ChainSync` and verify that call to `request_justification()` is made - let mut chain_sync = - Box::new(MockChainSync::::new()); - let hash = H256::random(); let number = 1337u64; - chain_sync - .expect_request_justification() + // build `ChainSyncInterface` provider and and expect + // `JustificationSyncLink::request_justification() to be called once + let mut chain_sync_service = + Box::new(MockChainSyncInterface::::new()); + + chain_sync_service + .expect_justification_sync_link_request_justification() .withf(move |in_hash, in_number| &hash == in_hash && &number == in_number) .once() .returning(|_, _| ()); + // build `ChainSync` and set default expecations for it + let mut chain_sync = MockChainSync::::new(); + set_default_expecations_no_peers(&mut chain_sync); let mut network = TestNetworkBuilder::new(Handle::current()) - .with_chain_sync((chain_sync, chain_sync_service)) + .with_chain_sync((Box::new(chain_sync), chain_sync_service)) .build(); // send "request justifiction" message and poll the network @@ -121,17 +120,20 @@ async fn request_justification() { #[tokio::test] async fn clear_justification_requests() { - // build `ChainSyncInterface` provider and set no expecations for it (i.e., it cannot be - // called) - let chain_sync_service = + // build `ChainSyncInterface` provider and expect + // `JustificationSyncLink::clear_justification_requests()` to be called + let mut chain_sync_service = Box::new(MockChainSyncInterface::::new()); - // build `ChainSync` and verify that call to `clear_justification_requests()` is made + chain_sync_service + .expect_justification_sync_link_clear_justification_requests() + .once() + .returning(|| ()); + + // build `ChainSync` and set default expecations for it let mut chain_sync = Box::new(MockChainSync::::new()); - chain_sync.expect_clear_justification_requests().once().returning(|| ()); - set_default_expecations_no_peers(&mut chain_sync); let mut network = TestNetworkBuilder::new(Handle::current()) .with_chain_sync((chain_sync, chain_sync_service)) @@ -235,19 +237,13 @@ async fn on_block_finalized() { // and verify that connection to the peer is closed #[tokio::test] async fn invalid_justification_imported() { - struct DummyImportQueue( - Arc< - RwLock< - Option<( - PeerId, - substrate_test_runtime_client::runtime::Hash, - sp_runtime::traits::NumberFor, - )>, - >, - >, - ); + struct DummyImportQueueHandle; - impl sc_consensus::ImportQueue for DummyImportQueue { + impl + sc_consensus::import_queue::ImportQueueService< + substrate_test_runtime_client::runtime::Block, + > for DummyImportQueueHandle + { fn import_blocks( &mut self, _origin: sp_consensus::BlockOrigin, @@ -265,7 +261,23 @@ async fn invalid_justification_imported() { _justifications: sp_runtime::Justifications, ) { } + } + struct DummyImportQueue( + Arc< + RwLock< + Option<( + PeerId, + substrate_test_runtime_client::runtime::Hash, + sp_runtime::traits::NumberFor, + )>, + >, + >, + DummyImportQueueHandle, + ); + + #[async_trait::async_trait] + impl sc_consensus::ImportQueue for DummyImportQueue { fn poll_actions( &mut self, _cx: &mut futures::task::Context, @@ -275,13 +287,40 @@ async fn invalid_justification_imported() { link.justification_imported(peer, &hash, number, false); } } + + fn service( + &self, + ) -> Box< + dyn sc_consensus::import_queue::ImportQueueService< + substrate_test_runtime_client::runtime::Block, + >, + > { + Box::new(DummyImportQueueHandle {}) + } + + fn service_ref( + &mut self, + ) -> &mut dyn sc_consensus::import_queue::ImportQueueService< + substrate_test_runtime_client::runtime::Block, + > { + &mut self.1 + } + + async fn run( + self, + _link: Box>, + ) { + } } let justification_info = Arc::new(RwLock::new(None)); let listen_addr = config::build_multiaddr![Memory(rand::random::())]; let (service1, mut event_stream1) = TestNetworkBuilder::new(Handle::current()) - .with_import_queue(Box::new(DummyImportQueue(justification_info.clone()))) + .with_import_queue(Box::new(DummyImportQueue( + justification_info.clone(), + DummyImportQueueHandle {}, + ))) .with_listen_addresses(vec![listen_addr.clone()]) .build() .start_network(); @@ -331,6 +370,7 @@ async fn disconnect_peer_using_chain_sync_handle() { let client = Arc::new(TestClientBuilder::with_default_backend().build_with_longest_chain().0); let listen_addr = config::build_multiaddr![Memory(rand::random::())]; + let import_queue = Box::new(sc_consensus::import_queue::mock::MockImportQueueHandle::new()); let (chain_sync_network_provider, chain_sync_network_handle) = sc_network_sync::service::network::NetworkServiceProvider::new(); let handle_clone = chain_sync_network_handle.clone(); @@ -344,7 +384,9 @@ async fn disconnect_peer_using_chain_sync_handle() { Box::new(sp_consensus::block_validation::DefaultBlockAnnounceValidator), 1u32, None, + None, chain_sync_network_handle.clone(), + import_queue, ProtocolName::from("block-request"), ProtocolName::from("state-request"), None, @@ -353,7 +395,7 @@ async fn disconnect_peer_using_chain_sync_handle() { let (node1, mut event_stream1) = TestNetworkBuilder::new(Handle::current()) .with_listen_addresses(vec![listen_addr.clone()]) - .with_chain_sync((Box::new(chain_sync), chain_sync_service)) + .with_chain_sync((Box::new(chain_sync), Box::new(chain_sync_service))) .with_chain_sync_network((chain_sync_network_provider, chain_sync_network_handle)) .with_client(client.clone()) .build() diff --git a/client/network/src/service/tests/mod.rs b/client/network/src/service/tests/mod.rs index f8635e39e9da9..fa1486a791213 100644 --- a/client/network/src/service/tests/mod.rs +++ b/client/network/src/service/tests/mod.rs @@ -21,7 +21,7 @@ use crate::{config, ChainSyncInterface, NetworkService, NetworkWorker}; use futures::prelude::*; use libp2p::Multiaddr; use sc_client_api::{BlockBackend, HeaderBackend}; -use sc_consensus::ImportQueue; +use sc_consensus::{ImportQueue, Link}; use sc_network_common::{ config::{ NonDefaultSetConfig, NonReservedPeerMode, NotificationHandshake, ProtocolId, SetConfig, @@ -93,6 +93,7 @@ impl TestNetwork { struct TestNetworkBuilder { import_queue: Option>>, + link: Option>>, client: Option>, listen_addresses: Vec, set_config: Option, @@ -106,6 +107,7 @@ impl TestNetworkBuilder { pub fn new(rt_handle: Handle) -> Self { Self { import_queue: None, + link: None, client: None, listen_addresses: Vec::new(), set_config: None, @@ -212,13 +214,14 @@ impl TestNetworkBuilder { } } - let import_queue = self.import_queue.unwrap_or(Box::new(sc_consensus::BasicQueue::new( - PassThroughVerifier(false), - Box::new(client.clone()), - None, - &sp_core::testing::TaskExecutor::new(), - None, - ))); + let mut import_queue = + self.import_queue.unwrap_or(Box::new(sc_consensus::BasicQueue::new( + PassThroughVerifier(false), + Box::new(client.clone()), + None, + &sp_core::testing::TaskExecutor::new(), + None, + ))); let protocol_id = ProtocolId::from("test-protocol-name"); let fork_id = Some(String::from("test-fork-id")); @@ -289,15 +292,23 @@ impl TestNetworkBuilder { Box::new(sp_consensus::block_validation::DefaultBlockAnnounceValidator), network_config.max_parallel_downloads, None, + None, chain_sync_network_handle, + import_queue.service(), block_request_protocol_config.name.clone(), state_request_protocol_config.name.clone(), None, ) .unwrap(); - (Box::new(chain_sync), chain_sync_service) + if let None = self.link { + self.link = Some(Box::new(chain_sync_service.clone())); + } + (Box::new(chain_sync), Box::new(chain_sync_service)) }); + let mut link = self + .link + .unwrap_or(Box::new(sc_network_sync::service::mock::MockChainSyncInterface::new())); let handle = self.rt_handle.clone(); let executor = move |f| { @@ -316,7 +327,6 @@ impl TestNetworkBuilder { chain: client.clone(), protocol_id, fork_id, - import_queue, chain_sync, chain_sync_service, metrics_registry: None, @@ -333,6 +343,16 @@ impl TestNetworkBuilder { self.rt_handle.spawn(async move { let _ = chain_sync_network_provider.run(service).await; }); + self.rt_handle.spawn(async move { + loop { + futures::future::poll_fn(|cx| { + import_queue.poll_actions(cx, &mut *link); + std::task::Poll::Ready(()) + }) + .await; + tokio::time::sleep(std::time::Duration::from_millis(250)).await; + } + }); TestNetwork::new(worker, self.rt_handle) } diff --git a/client/network/sync/Cargo.toml b/client/network/sync/Cargo.toml index 086ab3c30cc25..e29d8047161ce 100644 --- a/client/network/sync/Cargo.toml +++ b/client/network/sync/Cargo.toml @@ -28,6 +28,7 @@ prost = "0.11" smallvec = "1.8.0" thiserror = "1.0" fork-tree = { version = "3.0.0", path = "../../../utils/fork-tree" } +prometheus-endpoint = { package = "substrate-prometheus-endpoint", version = "0.10.0-dev", path = "../../../utils/prometheus" } sc-client-api = { version = "4.0.0-dev", path = "../../api" } sc-consensus = { version = "0.10.0-dev", path = "../../consensus/common" } sc-network-common = { version = "0.10.0-dev", path = "../common" } diff --git a/client/network/sync/src/lib.rs b/client/network/sync/src/lib.rs index 697445334a073..75eda91219ec8 100644 --- a/client/network/sync/src/lib.rs +++ b/client/network/sync/src/lib.rs @@ -54,9 +54,12 @@ use futures::{ }; use libp2p::{request_response::OutboundFailure, PeerId}; use log::{debug, error, info, trace, warn}; +use prometheus_endpoint::{register, Counter, PrometheusError, Registry, U64}; use prost::Message; use sc_client_api::{BlockBackend, ProofProvider}; -use sc_consensus::{BlockImportError, BlockImportStatus, IncomingBlock}; +use sc_consensus::{ + import_queue::ImportQueueService, BlockImportError, BlockImportStatus, IncomingBlock, +}; use sc_network_common::{ config::{ NonDefaultSetConfig, NonReservedPeerMode, NotificationHandshake, ProtocolId, SetConfig, @@ -71,8 +74,8 @@ use sc_network_common::{ warp::{EncodedProof, WarpProofRequest, WarpSyncPhase, WarpSyncProgress, WarpSyncProvider}, BadPeer, ChainSync as ChainSyncT, ImportResult, Metrics, OnBlockData, OnBlockJustification, OnStateData, OpaqueBlockRequest, OpaqueBlockResponse, OpaqueStateRequest, - OpaqueStateResponse, PeerInfo, PeerRequest, PollBlockAnnounceValidation, PollResult, - SyncMode, SyncState, SyncStatus, + OpaqueStateResponse, PeerInfo, PeerRequest, PollBlockAnnounceValidation, SyncMode, + SyncState, SyncStatus, }, }; use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver}; @@ -233,6 +236,32 @@ impl Default for AllowedRequests { } } +struct SyncingMetrics { + pub import_queue_blocks_submitted: Counter, + pub import_queue_justifications_submitted: Counter, +} + +impl SyncingMetrics { + fn register(registry: &Registry) -> Result { + Ok(Self { + import_queue_blocks_submitted: register( + Counter::new( + "substrate_sync_import_queue_blocks_submitted", + "Number of blocks submitted to the import queue.", + )?, + registry, + )?, + import_queue_justifications_submitted: register( + Counter::new( + "substrate_sync_import_queue_justifications_submitted", + "Number of justifications submitted to the import queue.", + )?, + registry, + )?, + }) + } +} + struct GapSync { blocks: BlockCollection, best_queued_number: NumberFor, @@ -311,6 +340,10 @@ pub struct ChainSync { warp_sync_protocol_name: Option, /// Pending responses pending_responses: FuturesUnordered>, + /// Handle to import queue. + import_queue: Box>, + /// Metrics. + metrics: Option, } /// All the data we have about a Peer that we are trying to sync with @@ -961,6 +994,19 @@ where Ok(self.validate_and_queue_blocks(new_blocks, gap)) } + fn process_block_response_data(&mut self, blocks_to_import: Result, BadPeer>) { + match blocks_to_import { + Ok(OnBlockData::Import(origin, blocks)) => self.import_blocks(origin, blocks), + Ok(OnBlockData::Request(peer, req)) => self.send_block_request(peer, req), + Ok(OnBlockData::Continue) => {}, + Err(BadPeer(id, repu)) => { + self.network_service + .disconnect_peer(id, self.block_announce_protocol_name.clone()); + self.network_service.report_peer(id, repu); + }, + } + } + fn on_block_justification( &mut self, who: PeerId, @@ -1016,156 +1062,6 @@ where Ok(OnBlockJustification::Nothing) } - fn on_blocks_processed( - &mut self, - imported: usize, - count: usize, - results: Vec<(Result>, BlockImportError>, B::Hash)>, - ) -> Box), BadPeer>>> { - trace!(target: "sync", "Imported {} of {}", imported, count); - - let mut output = Vec::new(); - - let mut has_error = false; - for (_, hash) in &results { - self.queue_blocks.remove(hash); - self.blocks.clear_queued(hash); - if let Some(gap_sync) = &mut self.gap_sync { - gap_sync.blocks.clear_queued(hash); - } - } - for (result, hash) in results { - if has_error { - break - } - - if result.is_err() { - has_error = true; - } - - match result { - Ok(BlockImportStatus::ImportedKnown(number, who)) => - if let Some(peer) = who { - self.update_peer_common_number(&peer, number); - }, - Ok(BlockImportStatus::ImportedUnknown(number, aux, who)) => { - if aux.clear_justification_requests { - trace!( - target: "sync", - "Block imported clears all pending justification requests {}: {:?}", - number, - hash, - ); - self.clear_justification_requests(); - } - - if aux.needs_justification { - trace!( - target: "sync", - "Block imported but requires justification {}: {:?}", - number, - hash, - ); - self.request_justification(&hash, number); - } - - if aux.bad_justification { - if let Some(ref peer) = who { - warn!("💔 Sent block with bad justification to import"); - output.push(Err(BadPeer(*peer, rep::BAD_JUSTIFICATION))); - } - } - - if let Some(peer) = who { - self.update_peer_common_number(&peer, number); - } - let state_sync_complete = - self.state_sync.as_ref().map_or(false, |s| s.target() == hash); - if state_sync_complete { - info!( - target: "sync", - "State sync is complete ({} MiB), restarting block sync.", - self.state_sync.as_ref().map_or(0, |s| s.progress().size / (1024 * 1024)), - ); - self.state_sync = None; - self.mode = SyncMode::Full; - output.extend(self.restart()); - } - let warp_sync_complete = self - .warp_sync - .as_ref() - .map_or(false, |s| s.target_block_hash() == Some(hash)); - if warp_sync_complete { - info!( - target: "sync", - "Warp sync is complete ({} MiB), restarting block sync.", - self.warp_sync.as_ref().map_or(0, |s| s.progress().total_bytes / (1024 * 1024)), - ); - self.warp_sync = None; - self.mode = SyncMode::Full; - output.extend(self.restart()); - } - let gap_sync_complete = - self.gap_sync.as_ref().map_or(false, |s| s.target == number); - if gap_sync_complete { - info!( - target: "sync", - "Block history download is complete." - ); - self.gap_sync = None; - } - }, - Err(BlockImportError::IncompleteHeader(who)) => - if let Some(peer) = who { - warn!( - target: "sync", - "💔 Peer sent block with incomplete header to import", - ); - output.push(Err(BadPeer(peer, rep::INCOMPLETE_HEADER))); - output.extend(self.restart()); - }, - Err(BlockImportError::VerificationFailed(who, e)) => - if let Some(peer) = who { - warn!( - target: "sync", - "💔 Verification failed for block {:?} received from peer: {}, {:?}", - hash, - peer, - e, - ); - output.push(Err(BadPeer(peer, rep::VERIFICATION_FAIL))); - output.extend(self.restart()); - }, - Err(BlockImportError::BadBlock(who)) => - if let Some(peer) = who { - warn!( - target: "sync", - "💔 Block {:?} received from peer {} has been blacklisted", - hash, - peer, - ); - output.push(Err(BadPeer(peer, rep::BAD_BLOCK))); - }, - Err(BlockImportError::MissingState) => { - // This may happen if the chain we were requesting upon has been discarded - // in the meantime because other chain has been finalized. - // Don't mark it as bad as it still may be synced if explicitly requested. - trace!(target: "sync", "Obsolete block {:?}", hash); - }, - e @ Err(BlockImportError::UnknownParent) | e @ Err(BlockImportError::Other(_)) => { - warn!(target: "sync", "💔 Error importing block {:?}: {}", hash, e.unwrap_err()); - self.state_sync = None; - self.warp_sync = None; - output.extend(self.restart()); - }, - Err(BlockImportError::Cancelled) => {}, - }; - } - - self.allowed_requests.set_all(); - Box::new(output.into_iter()) - } - fn on_justification_import(&mut self, hash: B::Hash, number: NumberFor, success: bool) { let finalization_result = if success { Ok((hash, number)) } else { Err(()) }; self.extra_justifications @@ -1331,7 +1227,7 @@ where } } - fn peer_disconnected(&mut self, who: &PeerId) -> Option> { + fn peer_disconnected(&mut self, who: &PeerId) { self.blocks.clear_peer_download(who); if let Some(gap_sync) = &mut self.gap_sync { gap_sync.blocks.clear_peer_download(who) @@ -1343,8 +1239,13 @@ where target.peers.remove(who); !target.peers.is_empty() }); + let blocks = self.ready_blocks(); - (!blocks.is_empty()).then(|| self.validate_and_queue_blocks(blocks, false)) + if let Some(OnBlockData::Import(origin, blocks)) = + (!blocks.is_empty()).then(|| self.validate_and_queue_blocks(blocks, false)) + { + self.import_blocks(origin, blocks); + } } fn metrics(&self) -> Metrics { @@ -1421,22 +1322,56 @@ where .map_err(|error: codec::Error| error.to_string()) } - fn poll(&mut self, cx: &mut std::task::Context) -> Poll> { + fn poll( + &mut self, + cx: &mut std::task::Context, + ) -> Poll> { while let Poll::Ready(Some(event)) = self.service_rx.poll_next_unpin(cx) { match event { ToServiceCommand::SetSyncForkRequest(peers, hash, number) => { self.set_sync_fork_request(peers, &hash, number); }, + ToServiceCommand::RequestJustification(hash, number) => + self.request_justification(&hash, number), + ToServiceCommand::ClearJustificationRequests => self.clear_justification_requests(), + ToServiceCommand::BlocksProcessed(imported, count, results) => { + for result in self.on_blocks_processed(imported, count, results) { + match result { + Ok((id, req)) => self.send_block_request(id, req), + Err(BadPeer(id, repu)) => { + self.network_service + .disconnect_peer(id, self.block_announce_protocol_name.clone()); + self.network_service.report_peer(id, repu) + }, + } + } + }, + ToServiceCommand::JustificationImported(peer, hash, number, success) => { + self.on_justification_import(hash, number, success); + if !success { + info!(target: "sync", "💔 Invalid justification provided by {} for #{}", peer, hash); + self.network_service + .disconnect_peer(peer, self.block_announce_protocol_name.clone()); + self.network_service.report_peer( + peer, + sc_peerset::ReputationChange::new_fatal("Invalid justification"), + ); + } + }, } } self.process_outbound_requests(); - if let Poll::Ready(result) = self.poll_pending_responses(cx) { - return Poll::Ready(PollResult::Import(result)) + while let Poll::Ready(result) = self.poll_pending_responses(cx) { + match result { + ImportResult::BlockImport(origin, blocks) => self.import_blocks(origin, blocks), + ImportResult::JustificationImport(who, hash, number, justifications) => + self.import_justifications(who, hash, number, justifications), + } } if let Poll::Ready(announce) = self.poll_block_announce_validation(cx) { - return Poll::Ready(PollResult::Announce(announce)) + return Poll::Ready(announce) } Poll::Pending @@ -1494,11 +1429,13 @@ where block_announce_validator: Box + Send>, max_parallel_downloads: u32, warp_sync_provider: Option>>, + metrics_registry: Option<&Registry>, network_service: service::network::NetworkServiceHandle, + import_queue: Box>, block_request_protocol_name: ProtocolName, state_request_protocol_name: ProtocolName, warp_sync_protocol_name: Option, - ) -> Result<(Self, Box>, NonDefaultSetConfig), ClientError> { + ) -> Result<(Self, ChainSyncInterfaceHandle, NonDefaultSetConfig), ClientError> { let (tx, service_rx) = tracing_unbounded("mpsc_chain_sync"); let block_announce_config = Self::get_block_announce_proto_config( protocol_id, @@ -1544,10 +1481,22 @@ where .clone() .into(), pending_responses: Default::default(), + import_queue, + metrics: if let Some(r) = &metrics_registry { + match SyncingMetrics::register(r) { + Ok(metrics) => Some(metrics), + Err(err) => { + error!(target: "sync", "Failed to register metrics for ChainSync: {err:?}"); + None + }, + } + } else { + None + }, }; sync.reset_sync_start_point()?; - Ok((sync, Box::new(ChainSyncInterfaceHandle::new(tx)), block_announce_config)) + Ok((sync, ChainSyncInterfaceHandle::new(tx), block_announce_config)) } /// Returns the median seen block number. @@ -2173,8 +2122,10 @@ where if request.fields == BlockAttributes::JUSTIFICATION { match self.on_block_justification(peer_id, block_response) { Ok(OnBlockJustification::Nothing) => None, - Ok(OnBlockJustification::Import { peer, hash, number, justifications }) => - Some(ImportResult::JustificationImport(peer, hash, number, justifications)), + Ok(OnBlockJustification::Import { peer, hash, number, justifications }) => { + self.import_justifications(peer, hash, number, justifications); + None + }, Err(BadPeer(id, repu)) => { self.network_service .disconnect_peer(id, self.block_announce_protocol_name.clone()); @@ -2184,8 +2135,10 @@ where } } else { match self.on_block_data(&peer_id, Some(request), block_response) { - Ok(OnBlockData::Import(origin, blocks)) => - Some(ImportResult::BlockImport(origin, blocks)), + Ok(OnBlockData::Import(origin, blocks)) => { + self.import_blocks(origin, blocks); + None + }, Ok(OnBlockData::Request(peer, req)) => { self.send_block_request(peer, req); None @@ -2712,6 +2665,182 @@ where }, } } + + fn import_blocks(&mut self, origin: BlockOrigin, blocks: Vec>) { + if let Some(metrics) = &self.metrics { + metrics.import_queue_blocks_submitted.inc(); + } + + self.import_queue.import_blocks(origin, blocks); + } + + fn import_justifications( + &mut self, + peer: PeerId, + hash: B::Hash, + number: NumberFor, + justifications: Justifications, + ) { + if let Some(metrics) = &self.metrics { + metrics.import_queue_justifications_submitted.inc(); + } + + self.import_queue.import_justifications(peer, hash, number, justifications); + } + + /// A batch of blocks have been processed, with or without errors. + /// + /// Call this when a batch of blocks have been processed by the import + /// queue, with or without errors. + fn on_blocks_processed( + &mut self, + imported: usize, + count: usize, + results: Vec<(Result>, BlockImportError>, B::Hash)>, + ) -> Box), BadPeer>>> { + trace!(target: "sync", "Imported {} of {}", imported, count); + + let mut output = Vec::new(); + + let mut has_error = false; + for (_, hash) in &results { + self.queue_blocks.remove(hash); + self.blocks.clear_queued(hash); + if let Some(gap_sync) = &mut self.gap_sync { + gap_sync.blocks.clear_queued(hash); + } + } + for (result, hash) in results { + if has_error { + break + } + + if result.is_err() { + has_error = true; + } + + match result { + Ok(BlockImportStatus::ImportedKnown(number, who)) => + if let Some(peer) = who { + self.update_peer_common_number(&peer, number); + }, + Ok(BlockImportStatus::ImportedUnknown(number, aux, who)) => { + if aux.clear_justification_requests { + trace!( + target: "sync", + "Block imported clears all pending justification requests {}: {:?}", + number, + hash, + ); + self.clear_justification_requests(); + } + + if aux.needs_justification { + trace!( + target: "sync", + "Block imported but requires justification {}: {:?}", + number, + hash, + ); + self.request_justification(&hash, number); + } + + if aux.bad_justification { + if let Some(ref peer) = who { + warn!("💔 Sent block with bad justification to import"); + output.push(Err(BadPeer(*peer, rep::BAD_JUSTIFICATION))); + } + } + + if let Some(peer) = who { + self.update_peer_common_number(&peer, number); + } + let state_sync_complete = + self.state_sync.as_ref().map_or(false, |s| s.target() == hash); + if state_sync_complete { + info!( + target: "sync", + "State sync is complete ({} MiB), restarting block sync.", + self.state_sync.as_ref().map_or(0, |s| s.progress().size / (1024 * 1024)), + ); + self.state_sync = None; + self.mode = SyncMode::Full; + output.extend(self.restart()); + } + let warp_sync_complete = self + .warp_sync + .as_ref() + .map_or(false, |s| s.target_block_hash() == Some(hash)); + if warp_sync_complete { + info!( + target: "sync", + "Warp sync is complete ({} MiB), restarting block sync.", + self.warp_sync.as_ref().map_or(0, |s| s.progress().total_bytes / (1024 * 1024)), + ); + self.warp_sync = None; + self.mode = SyncMode::Full; + output.extend(self.restart()); + } + let gap_sync_complete = + self.gap_sync.as_ref().map_or(false, |s| s.target == number); + if gap_sync_complete { + info!( + target: "sync", + "Block history download is complete." + ); + self.gap_sync = None; + } + }, + Err(BlockImportError::IncompleteHeader(who)) => + if let Some(peer) = who { + warn!( + target: "sync", + "💔 Peer sent block with incomplete header to import", + ); + output.push(Err(BadPeer(peer, rep::INCOMPLETE_HEADER))); + output.extend(self.restart()); + }, + Err(BlockImportError::VerificationFailed(who, e)) => + if let Some(peer) = who { + warn!( + target: "sync", + "💔 Verification failed for block {:?} received from peer: {}, {:?}", + hash, + peer, + e, + ); + output.push(Err(BadPeer(peer, rep::VERIFICATION_FAIL))); + output.extend(self.restart()); + }, + Err(BlockImportError::BadBlock(who)) => + if let Some(peer) = who { + warn!( + target: "sync", + "💔 Block {:?} received from peer {} has been blacklisted", + hash, + peer, + ); + output.push(Err(BadPeer(peer, rep::BAD_BLOCK))); + }, + Err(BlockImportError::MissingState) => { + // This may happen if the chain we were requesting upon has been discarded + // in the meantime because other chain has been finalized. + // Don't mark it as bad as it still may be synced if explicitly requested. + trace!(target: "sync", "Obsolete block {:?}", hash); + }, + e @ Err(BlockImportError::UnknownParent) | e @ Err(BlockImportError::Other(_)) => { + warn!(target: "sync", "💔 Error importing block {:?}: {}", hash, e.unwrap_err()); + self.state_sync = None; + self.warp_sync = None; + output.extend(self.restart()); + }, + Err(BlockImportError::Cancelled) => {}, + }; + } + + self.allowed_requests.set_all(); + Box::new(output.into_iter()) + } } // This is purely during a backwards compatible transitionary period and should be removed @@ -3089,6 +3218,7 @@ mod test { let block_announce_validator = Box::new(DefaultBlockAnnounceValidator); let peer_id = PeerId::random(); + let import_queue = Box::new(sc_consensus::import_queue::mock::MockImportQueueHandle::new()); let (_chain_sync_network_provider, chain_sync_network_handle) = NetworkServiceProvider::new(); let (mut sync, _, _) = ChainSync::new( @@ -3100,7 +3230,9 @@ mod test { block_announce_validator, 1, None, + None, chain_sync_network_handle, + import_queue, ProtocolName::from("block-request"), ProtocolName::from("state-request"), None, @@ -3151,6 +3283,7 @@ mod test { #[test] fn restart_doesnt_affect_peers_downloading_finality_data() { let mut client = Arc::new(TestClientBuilder::new().build()); + let import_queue = Box::new(sc_consensus::import_queue::mock::MockImportQueueHandle::new()); let (_chain_sync_network_provider, chain_sync_network_handle) = NetworkServiceProvider::new(); @@ -3163,7 +3296,9 @@ mod test { Box::new(DefaultBlockAnnounceValidator), 1, None, + None, chain_sync_network_handle, + import_queue, ProtocolName::from("block-request"), ProtocolName::from("state-request"), None, @@ -3330,6 +3465,7 @@ mod test { sp_tracing::try_init_simple(); let mut client = Arc::new(TestClientBuilder::new().build()); + let import_queue = Box::new(sc_consensus::import_queue::mock::MockImportQueueHandle::new()); let (_chain_sync_network_provider, chain_sync_network_handle) = NetworkServiceProvider::new(); @@ -3342,7 +3478,9 @@ mod test { Box::new(DefaultBlockAnnounceValidator), 5, None, + None, chain_sync_network_handle, + import_queue, ProtocolName::from("block-request"), ProtocolName::from("state-request"), None, @@ -3453,6 +3591,7 @@ mod test { }; let mut client = Arc::new(TestClientBuilder::new().build()); + let import_queue = Box::new(sc_consensus::import_queue::mock::MockImportQueueHandle::new()); let (_chain_sync_network_provider, chain_sync_network_handle) = NetworkServiceProvider::new(); let info = client.info(); @@ -3466,7 +3605,9 @@ mod test { Box::new(DefaultBlockAnnounceValidator), 5, None, + None, chain_sync_network_handle, + import_queue, ProtocolName::from("block-request"), ProtocolName::from("state-request"), None, @@ -3584,6 +3725,7 @@ mod test { fn can_sync_huge_fork() { sp_tracing::try_init_simple(); + let import_queue = Box::new(sc_consensus::import_queue::mock::MockImportQueueHandle::new()); let (_chain_sync_network_provider, chain_sync_network_handle) = NetworkServiceProvider::new(); let mut client = Arc::new(TestClientBuilder::new().build()); @@ -3619,7 +3761,9 @@ mod test { Box::new(DefaultBlockAnnounceValidator), 5, None, + None, chain_sync_network_handle, + import_queue, ProtocolName::from("block-request"), ProtocolName::from("state-request"), None, @@ -3722,6 +3866,7 @@ mod test { fn syncs_fork_without_duplicate_requests() { sp_tracing::try_init_simple(); + let import_queue = Box::new(sc_consensus::import_queue::mock::MockImportQueueHandle::new()); let (_chain_sync_network_provider, chain_sync_network_handle) = NetworkServiceProvider::new(); let mut client = Arc::new(TestClientBuilder::new().build()); @@ -3757,7 +3902,9 @@ mod test { Box::new(DefaultBlockAnnounceValidator), 5, None, + None, chain_sync_network_handle, + import_queue, ProtocolName::from("block-request"), ProtocolName::from("state-request"), None, @@ -3881,6 +4028,7 @@ mod test { #[test] fn removes_target_fork_on_disconnect() { sp_tracing::try_init_simple(); + let import_queue = Box::new(sc_consensus::import_queue::mock::MockImportQueueHandle::new()); let (_chain_sync_network_provider, chain_sync_network_handle) = NetworkServiceProvider::new(); let mut client = Arc::new(TestClientBuilder::new().build()); @@ -3895,7 +4043,9 @@ mod test { Box::new(DefaultBlockAnnounceValidator), 1, None, + None, chain_sync_network_handle, + import_queue, ProtocolName::from("block-request"), ProtocolName::from("state-request"), None, @@ -3921,6 +4071,7 @@ mod test { #[test] fn can_import_response_with_missing_blocks() { sp_tracing::try_init_simple(); + let import_queue = Box::new(sc_consensus::import_queue::mock::MockImportQueueHandle::new()); let (_chain_sync_network_provider, chain_sync_network_handle) = NetworkServiceProvider::new(); let mut client2 = Arc::new(TestClientBuilder::new().build()); @@ -3937,7 +4088,9 @@ mod test { Box::new(DefaultBlockAnnounceValidator), 1, None, + None, chain_sync_network_handle, + import_queue, ProtocolName::from("block-request"), ProtocolName::from("state-request"), None, diff --git a/client/network/sync/src/mock.rs b/client/network/sync/src/mock.rs index 48d72c425bd03..b59ea7e4fea70 100644 --- a/client/network/sync/src/mock.rs +++ b/client/network/sync/src/mock.rs @@ -21,11 +21,10 @@ use futures::task::Poll; use libp2p::PeerId; -use sc_consensus::{BlockImportError, BlockImportStatus}; use sc_network_common::sync::{ message::{BlockAnnounce, BlockData, BlockRequest, BlockResponse}, BadPeer, ChainSync as ChainSyncT, Metrics, OnBlockData, OnBlockJustification, - OpaqueBlockResponse, PeerInfo, PollBlockAnnounceValidation, PollResult, SyncStatus, + OpaqueBlockResponse, PeerInfo, PollBlockAnnounceValidation, SyncStatus, }; use sp_runtime::traits::{Block as BlockT, NumberFor}; @@ -60,17 +59,12 @@ mockall::mock! { request: Option>, response: BlockResponse, ) -> Result, BadPeer>; + fn process_block_response_data(&mut self, blocks_to_import: Result, BadPeer>); fn on_block_justification( &mut self, who: PeerId, response: BlockResponse, ) -> Result, BadPeer>; - fn on_blocks_processed( - &mut self, - imported: usize, - count: usize, - results: Vec<(Result>, BlockImportError>, Block::Hash)>, - ) -> Box), BadPeer>>>; fn on_justification_import( &mut self, hash: Block::Hash, @@ -89,7 +83,7 @@ mockall::mock! { &mut self, cx: &mut std::task::Context<'a>, ) -> Poll>; - fn peer_disconnected(&mut self, who: &PeerId) -> Option>; + fn peer_disconnected(&mut self, who: &PeerId); fn metrics(&self) -> Metrics; fn block_response_into_blocks( &self, @@ -99,7 +93,7 @@ mockall::mock! { fn poll<'a>( &mut self, cx: &mut std::task::Context<'a>, - ) -> Poll>; + ) -> Poll>; fn send_block_request( &mut self, who: PeerId, diff --git a/client/network/sync/src/service/chain_sync.rs b/client/network/sync/src/service/chain_sync.rs index cf07c65ee3109..50ded5b643dea 100644 --- a/client/network/sync/src/service/chain_sync.rs +++ b/client/network/sync/src/service/chain_sync.rs @@ -17,6 +17,7 @@ // along with this program. If not, see . use libp2p::PeerId; +use sc_consensus::{BlockImportError, BlockImportStatus, JustificationSyncLink, Link}; use sc_network_common::service::NetworkSyncForkRequest; use sc_utils::mpsc::TracingUnboundedSender; use sp_runtime::traits::{Block as BlockT, NumberFor}; @@ -25,9 +26,18 @@ use sp_runtime::traits::{Block as BlockT, NumberFor}; #[derive(Debug)] pub enum ToServiceCommand { SetSyncForkRequest(Vec, B::Hash, NumberFor), + RequestJustification(B::Hash, NumberFor), + ClearJustificationRequests, + BlocksProcessed( + usize, + usize, + Vec<(Result>, BlockImportError>, B::Hash)>, + ), + JustificationImported(PeerId, B::Hash, NumberFor, bool), } /// Handle for communicating with `ChainSync` asynchronously +#[derive(Clone)] pub struct ChainSyncInterfaceHandle { tx: TracingUnboundedSender>, } @@ -56,3 +66,46 @@ impl NetworkSyncForkRequest> .unbounded_send(ToServiceCommand::SetSyncForkRequest(peers, hash, number)); } } + +impl JustificationSyncLink for ChainSyncInterfaceHandle { + /// Request a justification for the given block from the network. + /// + /// On success, the justification will be passed to the import queue that was part at + /// initialization as part of the configuration. + fn request_justification(&self, hash: &B::Hash, number: NumberFor) { + let _ = self.tx.unbounded_send(ToServiceCommand::RequestJustification(*hash, number)); + } + + fn clear_justification_requests(&self) { + let _ = self.tx.unbounded_send(ToServiceCommand::ClearJustificationRequests); + } +} + +impl Link for ChainSyncInterfaceHandle { + fn blocks_processed( + &mut self, + imported: usize, + count: usize, + results: Vec<(Result>, BlockImportError>, B::Hash)>, + ) { + let _ = self + .tx + .unbounded_send(ToServiceCommand::BlocksProcessed(imported, count, results)); + } + + fn justification_imported( + &mut self, + who: PeerId, + hash: &B::Hash, + number: NumberFor, + success: bool, + ) { + let _ = self + .tx + .unbounded_send(ToServiceCommand::JustificationImported(who, *hash, number, success)); + } + + fn request_justification(&mut self, hash: &B::Hash, number: NumberFor) { + let _ = self.tx.unbounded_send(ToServiceCommand::RequestJustification(*hash, number)); + } +} diff --git a/client/network/sync/src/service/mock.rs b/client/network/sync/src/service/mock.rs index c8a29e1fba8ea..d8aad2fa7bac1 100644 --- a/client/network/sync/src/service/mock.rs +++ b/client/network/sync/src/service/mock.rs @@ -18,6 +18,7 @@ use futures::channel::oneshot; use libp2p::{Multiaddr, PeerId}; +use sc_consensus::{BlockImportError, BlockImportStatus}; use sc_network_common::{ config::MultiaddrWithPeerId, protocol::ProtocolName, @@ -29,13 +30,43 @@ use sp_runtime::traits::{Block as BlockT, NumberFor}; use std::collections::HashSet; mockall::mock! { - pub ChainSyncInterface {} + pub ChainSyncInterface { + pub fn justification_sync_link_request_justification(&self, hash: &B::Hash, number: NumberFor); + pub fn justification_sync_link_clear_justification_requests(&self); + } impl NetworkSyncForkRequest> for ChainSyncInterface { fn set_sync_fork_request(&self, peers: Vec, hash: B::Hash, number: NumberFor); } + + impl sc_consensus::Link for ChainSyncInterface { + fn blocks_processed( + &mut self, + imported: usize, + count: usize, + results: Vec<(Result>, BlockImportError>, B::Hash)>, + ); + fn justification_imported( + &mut self, + who: PeerId, + hash: &B::Hash, + number: NumberFor, + success: bool, + ); + fn request_justification(&mut self, hash: &B::Hash, number: NumberFor); + } +} + +impl sc_consensus::JustificationSyncLink for MockChainSyncInterface { + fn request_justification(&self, hash: &B::Hash, number: NumberFor) { + self.justification_sync_link_request_justification(hash, number); + } + + fn clear_justification_requests(&self) { + self.justification_sync_link_clear_justification_requests(); + } } mockall::mock! { diff --git a/client/network/sync/src/tests.rs b/client/network/sync/src/tests.rs index a03e657f03ab2..61de08443a6c2 100644 --- a/client/network/sync/src/tests.rs +++ b/client/network/sync/src/tests.rs @@ -37,6 +37,7 @@ use substrate_test_runtime_client::{TestClientBuilder, TestClientBuilderExt as _ // poll `ChainSync` and verify that a new sync fork request has been registered #[tokio::test] async fn delegate_to_chainsync() { + let import_queue = Box::new(sc_consensus::import_queue::mock::MockImportQueueHandle::new()); let (_chain_sync_network_provider, chain_sync_network_handle) = NetworkServiceProvider::new(); let (mut chain_sync, chain_sync_service, _) = ChainSync::new( sc_network_common::sync::SyncMode::Full, @@ -47,7 +48,9 @@ async fn delegate_to_chainsync() { Box::new(DefaultBlockAnnounceValidator), 1u32, None, + None, chain_sync_network_handle, + import_queue, ProtocolName::from("block-request"), ProtocolName::from("state-request"), None, diff --git a/client/network/test/src/lib.rs b/client/network/test/src/lib.rs index d3642e69cb632..173ca81653b1a 100644 --- a/client/network/test/src/lib.rs +++ b/client/network/test/src/lib.rs @@ -43,8 +43,8 @@ use sc_client_api::{ }; use sc_consensus::{ BasicQueue, BlockCheckParams, BlockImport, BlockImportParams, BoxJustificationImport, - ForkChoiceStrategy, ImportResult, JustificationImport, JustificationSyncLink, LongestChain, - Verifier, + ForkChoiceStrategy, ImportQueue, ImportResult, JustificationImport, JustificationSyncLink, + LongestChain, Verifier, }; use sc_network::{ config::{NetworkConfiguration, RequestResponseConfig, Role, SyncMode}, @@ -896,7 +896,9 @@ where block_announce_validator, network_config.max_parallel_downloads, Some(warp_sync), + None, chain_sync_network_handle, + import_queue.service(), block_request_protocol_config.name.clone(), state_request_protocol_config.name.clone(), Some(warp_protocol_config.name.clone()), @@ -915,9 +917,8 @@ where chain: client.clone(), protocol_id, fork_id, - import_queue, chain_sync: Box::new(chain_sync), - chain_sync_service, + chain_sync_service: Box::new(chain_sync_service.clone()), metrics_registry: None, block_announce_config, request_response_protocol_configs: [ @@ -936,6 +937,9 @@ where self.rt_handle().spawn(async move { chain_sync_network_provider.run(service).await; }); + self.rt_handle().spawn(async move { + import_queue.run(Box::new(chain_sync_service)).await; + }); self.mut_peers(move |peers| { for peer in peers.iter_mut() { diff --git a/client/service/src/builder.rs b/client/service/src/builder.rs index dd89ce6dff10a..7153672030d6a 100644 --- a/client/service/src/builder.rs +++ b/client/service/src/builder.rs @@ -853,7 +853,9 @@ where block_announce_validator, config.network.max_parallel_downloads, warp_sync_provider, + config.prometheus_config.as_ref().map(|config| config.registry.clone()).as_ref(), chain_sync_network_handle, + import_queue.service(), block_request_protocol_config.name.clone(), state_request_protocol_config.name.clone(), warp_sync_protocol_config.as_ref().map(|config| config.name.clone()), @@ -877,9 +879,8 @@ where chain: client.clone(), protocol_id: protocol_id.clone(), fork_id: config.chain_spec.fork_id().map(ToOwned::to_owned), - import_queue: Box::new(import_queue), chain_sync: Box::new(chain_sync), - chain_sync_service, + chain_sync_service: Box::new(chain_sync_service.clone()), metrics_registry: config.prometheus_config.as_ref().map(|config| config.registry.clone()), block_announce_config, request_response_protocol_configs: request_response_protocol_configs @@ -925,6 +926,7 @@ where Some("networking"), chain_sync_network_provider.run(network.clone()), ); + spawn_handle.spawn("import-queue", None, import_queue.run(Box::new(chain_sync_service))); let (system_rpc_tx, system_rpc_rx) = tracing_unbounded("mpsc_system_rpc"); diff --git a/client/service/src/chain_ops/import_blocks.rs b/client/service/src/chain_ops/import_blocks.rs index c0612124dd0c2..ca09c1658d72f 100644 --- a/client/service/src/chain_ops/import_blocks.rs +++ b/client/service/src/chain_ops/import_blocks.rs @@ -157,7 +157,7 @@ fn import_block_to_queue( let (header, extrinsics) = signed_block.block.deconstruct(); let hash = header.hash(); // import queue handles verification and importing it into the client. - queue.import_blocks( + queue.service_ref().import_blocks( BlockOrigin::File, vec![IncomingBlock:: { hash, From 15cfd9c5dbbfa1f3ed49623eb55b67354d1645e9 Mon Sep 17 00:00:00 2001 From: tgmichel Date: Fri, 9 Dec 2022 20:55:56 +0100 Subject: [PATCH 09/29] Trace response payload in default `jsonrpsee` middleware (#12886) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Trace result in default `jsonrpsee` middleware * `rpc_metrics::extra` Co-authored-by: Bastian Köcher Co-authored-by: Bastian Köcher --- client/rpc-servers/src/middleware.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/client/rpc-servers/src/middleware.rs b/client/rpc-servers/src/middleware.rs index 0d77442323241..9e0d422b2350e 100644 --- a/client/rpc-servers/src/middleware.rs +++ b/client/rpc-servers/src/middleware.rs @@ -204,8 +204,9 @@ impl RpcMiddleware { } /// Called once the JSON-RPC request is finished and response is sent to the output buffer. - fn on_response(&self, _result: &str, started_at: std::time::Instant) { + fn on_response(&self, result: &str, started_at: std::time::Instant) { log::trace!(target: "rpc_metrics", "[{}] on_response started_at={:?}", self.transport_label, started_at); + log::trace!(target: "rpc_metrics::extra", "[{}] result={:?}", self.transport_label, result); self.metrics.requests_finished.with_label_values(&[self.transport_label]).inc(); } } From 33e6029e4c75bab850aac1d10925e2b327e9c4b2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Sat, 10 Dec 2022 14:28:36 +0100 Subject: [PATCH 10/29] Ensure that we inform all tasks to stop before starting the 60 seconds shutdown (#12897) * Ensure that we inform all tasks to stop before starting the 60 seconds shutdown The change of waiting in maximum 60 seconds for the node to shutdown actually introduced a bug. We were actually waiting always 60 seconds as we didn't informed our tasks to shutdown. The solution to this problem is to drop the task manager as this will then inform all tasks to end. It also adds tests to ensure that the behaviors work as expected. (This should already have been done in the first pr! :() * ".git/.scripts/fmt.sh" 1 Co-authored-by: command-bot <> --- Cargo.lock | 1 + client/cli/Cargo.toml | 1 + client/cli/src/runner.rs | 211 +++++++++++++++++++++++++++++++++++++++ 3 files changed, 213 insertions(+) diff --git a/Cargo.lock b/Cargo.lock index 8225e557141d1..110f6fcdc9a19 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7227,6 +7227,7 @@ dependencies = [ "clap 4.0.11", "fdlimit", "futures", + "futures-timer", "libp2p", "log", "names", diff --git a/client/cli/Cargo.toml b/client/cli/Cargo.toml index 2f079a0c7c56f..fd84ff4d4574b 100644 --- a/client/cli/Cargo.toml +++ b/client/cli/Cargo.toml @@ -49,6 +49,7 @@ sp-version = { version = "5.0.0", path = "../../primitives/version" } [dev-dependencies] tempfile = "3.1.0" +futures-timer = "3.0.1" [features] default = ["rocksdb"] diff --git a/client/cli/src/runner.rs b/client/cli/src/runner.rs index c976c319708c2..d4191feddfa90 100644 --- a/client/cli/src/runner.rs +++ b/client/cli/src/runner.rs @@ -145,8 +145,14 @@ impl Runner { E: std::error::Error + Send + Sync + 'static + From, { self.print_node_infos(); + let mut task_manager = self.tokio_runtime.block_on(initialize(self.config))?; let res = self.tokio_runtime.block_on(main(task_manager.future().fuse())); + // We need to drop the task manager here to inform all tasks that they should shut down. + // + // This is important to be done before we instruct the tokio runtime to shutdown. Otherwise + // the tokio runtime will wait the full 60 seconds for all tasks to stop. + drop(task_manager); // Give all futures 60 seconds to shutdown, before tokio "leaks" them. self.tokio_runtime.shutdown_timeout(Duration::from_secs(60)); @@ -208,3 +214,208 @@ pub fn print_node_infos(config: &Configuration) { ); info!("⛓ Native runtime: {}", C::native_runtime_version(&config.chain_spec)); } + +#[cfg(test)] +mod tests { + use std::{ + path::PathBuf, + sync::atomic::{AtomicU64, Ordering}, + }; + + use sc_network::config::NetworkConfiguration; + use sc_service::{Arc, ChainType, GenericChainSpec, NoExtension}; + use sp_runtime::create_runtime_str; + use sp_version::create_apis_vec; + + use super::*; + + struct Cli; + + impl SubstrateCli for Cli { + fn author() -> String { + "test".into() + } + + fn impl_name() -> String { + "yep".into() + } + + fn impl_version() -> String { + "version".into() + } + + fn description() -> String { + "desc".into() + } + + fn support_url() -> String { + "no.pe".into() + } + + fn copyright_start_year() -> i32 { + 2042 + } + + fn load_spec( + &self, + _: &str, + ) -> std::result::Result, String> { + Err("nope".into()) + } + + fn native_runtime_version( + _: &Box, + ) -> &'static sp_version::RuntimeVersion { + const VERSION: sp_version::RuntimeVersion = sp_version::RuntimeVersion { + spec_name: create_runtime_str!("spec"), + impl_name: create_runtime_str!("name"), + authoring_version: 0, + spec_version: 0, + impl_version: 0, + apis: create_apis_vec!([]), + transaction_version: 2, + state_version: 0, + }; + + &VERSION + } + } + + fn create_runner() -> Runner { + let runtime = build_runtime().unwrap(); + + let runner = Runner::new( + Configuration { + impl_name: "spec".into(), + impl_version: "3".into(), + role: sc_service::Role::Authority, + tokio_handle: runtime.handle().clone(), + transaction_pool: Default::default(), + network: NetworkConfiguration::new_memory(), + keystore: sc_service::config::KeystoreConfig::InMemory, + keystore_remote: None, + database: sc_client_db::DatabaseSource::ParityDb { path: PathBuf::from("db") }, + trie_cache_maximum_size: None, + state_pruning: None, + blocks_pruning: sc_client_db::BlocksPruning::KeepAll, + chain_spec: Box::new(GenericChainSpec::from_genesis( + "test", + "test_id", + ChainType::Development, + || unimplemented!("Not required in tests"), + Vec::new(), + None, + None, + None, + None, + NoExtension::None, + )), + wasm_method: Default::default(), + wasm_runtime_overrides: None, + execution_strategies: Default::default(), + rpc_http: None, + rpc_ws: None, + rpc_ipc: None, + rpc_ws_max_connections: None, + rpc_cors: None, + rpc_methods: Default::default(), + rpc_max_payload: None, + rpc_max_request_size: None, + rpc_max_response_size: None, + rpc_id_provider: None, + rpc_max_subs_per_conn: None, + ws_max_out_buffer_capacity: None, + prometheus_config: None, + telemetry_endpoints: None, + default_heap_pages: None, + offchain_worker: Default::default(), + force_authoring: false, + disable_grandpa: false, + dev_key_seed: None, + tracing_targets: None, + tracing_receiver: Default::default(), + max_runtime_instances: 8, + announce_block: true, + base_path: None, + informant_output_format: Default::default(), + runtime_cache_size: 2, + }, + runtime, + ) + .unwrap(); + + runner + } + + #[test] + fn ensure_run_until_exit_informs_tasks_to_end() { + let runner = create_runner(); + + let counter = Arc::new(AtomicU64::new(0)); + let counter2 = counter.clone(); + + runner + .run_node_until_exit(move |cfg| async move { + let task_manager = TaskManager::new(cfg.tokio_handle.clone(), None).unwrap(); + let (sender, receiver) = futures::channel::oneshot::channel(); + + // We need to use `spawn_blocking` here so that we get a dedicated thread for our + // future. This is important for this test, as otherwise tokio can just "drop" the + // future. + task_manager.spawn_handle().spawn_blocking("test", None, async move { + let _ = sender.send(()); + loop { + counter2.fetch_add(1, Ordering::Relaxed); + futures_timer::Delay::new(Duration::from_millis(50)).await; + } + }); + + task_manager.spawn_essential_handle().spawn_blocking("test2", None, async { + // Let's stop this essential task directly when our other task started. + // It will signal that the task manager should end. + let _ = receiver.await; + }); + + Ok::<_, sc_service::Error>(task_manager) + }) + .unwrap_err(); + + let count = counter.load(Ordering::Relaxed); + + // Ensure that our counting task was running for less than 30 seconds. + // It should be directly killed, but for CI and whatever we are being a little bit more + // "relaxed". + assert!((count as u128) < (Duration::from_secs(30).as_millis() / 50)); + } + + /// This test ensures that `run_node_until_exit` aborts waiting for "stuck" tasks after 60 + /// seconds, aka doesn't wait until they are finished (which may never happen). + #[test] + fn ensure_run_until_exit_is_not_blocking_indefinitely() { + let runner = create_runner(); + + runner + .run_node_until_exit(move |cfg| async move { + let task_manager = TaskManager::new(cfg.tokio_handle.clone(), None).unwrap(); + let (sender, receiver) = futures::channel::oneshot::channel(); + + // We need to use `spawn_blocking` here so that we get a dedicated thread for our + // future. This future is more blocking code that will never end. + task_manager.spawn_handle().spawn_blocking("test", None, async move { + let _ = sender.send(()); + loop { + std::thread::sleep(Duration::from_secs(30)); + } + }); + + task_manager.spawn_essential_handle().spawn_blocking("test2", None, async { + // Let's stop this essential task directly when our other task started. + // It will signal that the task manager should end. + let _ = receiver.await; + }); + + Ok::<_, sc_service::Error>(task_manager) + }) + .unwrap_err(); + } +} From 2f0d59d766ff79ca2b552216a411c8e2b1762da4 Mon Sep 17 00:00:00 2001 From: Ankan <10196091+Ank4n@users.noreply.github.com> Date: Sat, 10 Dec 2022 23:52:23 +0100 Subject: [PATCH 11/29] Safe desired targets call (#12826) * checked call for desired targets * fix compile * fmt * fix tests * cleaner with and_then --- frame/election-provider-multi-phase/src/lib.rs | 10 +++++----- .../election-provider-multi-phase/src/signed.rs | 10 ++++++---- frame/election-provider-support/src/lib.rs | 16 +++++++--------- 3 files changed, 18 insertions(+), 18 deletions(-) diff --git a/frame/election-provider-multi-phase/src/lib.rs b/frame/election-provider-multi-phase/src/lib.rs index 2d49cd79dbcad..cd70514fd3461 100644 --- a/frame/election-provider-multi-phase/src/lib.rs +++ b/frame/election-provider-multi-phase/src/lib.rs @@ -1409,12 +1409,12 @@ impl Pallet { return Err(ElectionError::DataProvider("Snapshot too big for submission.")) } - let mut desired_targets = - T::DataProvider::desired_targets().map_err(ElectionError::DataProvider)?; + let mut desired_targets = as ElectionProviderBase>::desired_targets_checked() + .map_err(|e| ElectionError::DataProvider(e))?; - // If `desired_targets` > `targets.len()`, cap `desired_targets` to that - // level and emit a warning - let max_desired_targets: u32 = (targets.len() as u32).min(T::MaxWinners::get()); + // If `desired_targets` > `targets.len()`, cap `desired_targets` to that level and emit a + // warning + let max_desired_targets: u32 = targets.len() as u32; if desired_targets > max_desired_targets { log!( warn, diff --git a/frame/election-provider-multi-phase/src/signed.rs b/frame/election-provider-multi-phase/src/signed.rs index 9d629ad77fd79..12d39e83b6c09 100644 --- a/frame/election-provider-multi-phase/src/signed.rs +++ b/frame/election-provider-multi-phase/src/signed.rs @@ -594,10 +594,12 @@ mod tests { DesiredTargets::set(4); MaxWinners::set(3); - let (_, _, actual_desired_targets) = MultiPhase::create_snapshot_external().unwrap(); - - // snapshot is created with min of desired_targets and MaxWinners - assert_eq!(actual_desired_targets, 3); + // snapshot not created because data provider returned an unexpected number of + // desired_targets + assert_noop!( + MultiPhase::create_snapshot_external(), + ElectionError::DataProvider("desired_targets must not be greater than MaxWinners."), + ); }) } diff --git a/frame/election-provider-support/src/lib.rs b/frame/election-provider-support/src/lib.rs index 38924a18e2f54..8b26148844c39 100644 --- a/frame/election-provider-support/src/lib.rs +++ b/frame/election-provider-support/src/lib.rs @@ -386,15 +386,13 @@ pub trait ElectionProviderBase { /// checked call to `Self::DataProvider::desired_targets()` ensuring the value never exceeds /// [`Self::MaxWinners`]. fn desired_targets_checked() -> data_provider::Result { - match Self::DataProvider::desired_targets() { - Ok(desired_targets) => - if desired_targets <= Self::MaxWinners::get() { - Ok(desired_targets) - } else { - Err("desired_targets should not be greater than MaxWinners") - }, - Err(e) => Err(e), - } + Self::DataProvider::desired_targets().and_then(|desired_targets| { + if desired_targets <= Self::MaxWinners::get() { + Ok(desired_targets) + } else { + Err("desired_targets must not be greater than MaxWinners.") + } + }) } } From 0ba5206f670d7029dcab20d4a6df05e796fe6716 Mon Sep 17 00:00:00 2001 From: Luke Schoen Date: Mon, 12 Dec 2022 09:10:18 +1100 Subject: [PATCH 12/29] Fix typo (#12900) --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 02f8a7591acc5..7d8c7e575581c 100644 --- a/README.md +++ b/README.md @@ -12,7 +12,7 @@ Then try out one of the [tutorials](https://docs.substrate.io/tutorials/). ## Community & Support -Join the highly active and supportive community on the [Susbstrate Stack Exchange](https://substrate.stackexchange.com/) to ask questions about use and problems you run into using this software. +Join the highly active and supportive community on the [Substrate Stack Exchange](https://substrate.stackexchange.com/) to ask questions about use and problems you run into using this software. Please do report bugs and [issues here](https://github.com/paritytech/substrate/issues) for anything you suspect requires action in the source. ## Contributions & Code of Conduct From 9772209ee962fe36c3502afb638e1bd835c23d9e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Sun, 11 Dec 2022 23:18:59 +0100 Subject: [PATCH 13/29] ValidateUnsigned: Improve docs. (#12870) * ValidateUnsigned: Improve docs. * Review comments --- primitives/runtime/src/traits.rs | 33 ++++++++++++++++++++------------ 1 file changed, 21 insertions(+), 12 deletions(-) diff --git a/primitives/runtime/src/traits.rs b/primitives/runtime/src/traits.rs index 6af711cba8e50..c69f8616b4be5 100644 --- a/primitives/runtime/src/traits.rs +++ b/primitives/runtime/src/traits.rs @@ -1335,12 +1335,13 @@ pub trait GetNodeBlockType { type NodeBlock: self::Block; } -/// Something that can validate unsigned extrinsics for the transaction pool. +/// Provide validation for unsigned extrinsics. /// -/// Note that any checks done here are only used for determining the validity of -/// the transaction for the transaction pool. -/// During block execution phase one need to perform the same checks anyway, -/// since this function is not being called. +/// This trait provides two functions [`pre_dispatch`](Self::pre_dispatch) and +/// [`validate_unsigned`](Self::validate_unsigned). The [`pre_dispatch`](Self::pre_dispatch) +/// function is called right before dispatching the call wrapped by an unsigned extrinsic. The +/// [`validate_unsigned`](Self::validate_unsigned) function is mainly being used in the context of +/// the transaction pool to check the validity of the call wrapped by an unsigned extrinsic. pub trait ValidateUnsigned { /// The call to validate type Call; @@ -1348,13 +1349,15 @@ pub trait ValidateUnsigned { /// Validate the call right before dispatch. /// /// This method should be used to prevent transactions already in the pool - /// (i.e. passing `validate_unsigned`) from being included in blocks - /// in case we know they now became invalid. + /// (i.e. passing [`validate_unsigned`](Self::validate_unsigned)) from being included in blocks + /// in case they became invalid since being added to the pool. /// - /// By default it's a good idea to call `validate_unsigned` from within - /// this function again to make sure we never include an invalid transaction. + /// By default it's a good idea to call [`validate_unsigned`](Self::validate_unsigned) from + /// within this function again to make sure we never include an invalid transaction. Otherwise + /// the implementation of the call or this method will need to provide proper validation to + /// ensure that the transaction is valid. /// - /// Changes made to storage WILL be persisted if the call returns `Ok`. + /// Changes made to storage *WILL* be persisted if the call returns `Ok`. fn pre_dispatch(call: &Self::Call) -> Result<(), TransactionValidityError> { Self::validate_unsigned(TransactionSource::InBlock, call) .map(|_| ()) @@ -1363,8 +1366,14 @@ pub trait ValidateUnsigned { /// Return the validity of the call /// - /// This doesn't execute any side-effects; it merely checks - /// whether the transaction would panic if it were included or not. + /// This method has no side-effects. It merely checks whether the call would be rejected + /// by the runtime in an unsigned extrinsic. + /// + /// The validity checks should be as lightweight as possible because every node will execute + /// this code before the unsigned extrinsic enters the transaction pool and also periodically + /// afterwards to ensure the validity. To prevent dos-ing a network with unsigned + /// extrinsics, these validity checks should include some checks around uniqueness, for example, + /// like checking that the unsigned extrinsic was send by an authority in the active set. /// /// Changes made to storage should be discarded by caller. fn validate_unsigned(source: TransactionSource, call: &Self::Call) -> TransactionValidity; From 06090ab35255833d892016dfc74fdc9d2d2fe06f Mon Sep 17 00:00:00 2001 From: Niklas Adolfsson Date: Mon, 12 Dec 2022 11:32:55 +0100 Subject: [PATCH 14/29] rpc server with HTTP/WS on the same socket (#12663) * jsonrpsee v0.16 add backwards compatibility run old http server on http only * cargo fmt * update jsonrpsee 0.16.1 * less verbose cors log * fix nit in log: WS -> HTTP * revert needless changes in Cargo.lock * remove unused features in tower * fix nits; add client-core feature * jsonrpsee v0.16.2 --- Cargo.lock | 165 ++++++++++-------- bin/node-template/node/Cargo.toml | 2 +- bin/node/cli/Cargo.toml | 2 +- bin/node/rpc/Cargo.toml | 2 +- client/beefy/rpc/Cargo.toml | 2 +- client/beefy/rpc/src/lib.rs | 2 +- client/consensus/babe/rpc/Cargo.toml | 2 +- client/consensus/manual-seal/Cargo.toml | 2 +- client/finality-grandpa/rpc/Cargo.toml | 2 +- client/finality-grandpa/rpc/src/lib.rs | 2 +- client/merkle-mountain-range/rpc/Cargo.toml | 2 +- client/rpc-api/Cargo.toml | 2 +- client/rpc-servers/Cargo.toml | 5 +- client/rpc-servers/src/lib.rs | 142 ++++++++------- client/rpc-servers/src/middleware.rs | 130 +++++--------- client/rpc-spec-v2/Cargo.toml | 2 +- client/rpc-spec-v2/src/chain_spec/tests.rs | 2 +- client/rpc/Cargo.toml | 2 +- client/rpc/src/author/tests.rs | 2 +- client/rpc/src/chain/tests.rs | 2 +- client/rpc/src/state/mod.rs | 3 +- client/rpc/src/state/tests.rs | 2 +- client/rpc/src/system/tests.rs | 2 +- client/service/Cargo.toml | 2 +- client/service/src/lib.rs | 50 ++---- client/sync-state-rpc/Cargo.toml | 2 +- frame/transaction-payment/rpc/Cargo.toml | 2 +- utils/frame/remote-externalities/src/lib.rs | 120 ++++++++----- utils/frame/rpc/client/Cargo.toml | 2 +- utils/frame/rpc/client/src/lib.rs | 5 +- .../rpc/state-trie-migration-rpc/Cargo.toml | 2 +- utils/frame/rpc/support/Cargo.toml | 4 +- utils/frame/rpc/system/Cargo.toml | 2 +- 33 files changed, 355 insertions(+), 317 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 110f6fcdc9a19..2f2b4aa7cf35d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -288,7 +288,7 @@ dependencies = [ "futures-sink", "futures-util", "memchr", - "pin-project-lite 0.2.6", + "pin-project-lite 0.2.9", ] [[package]] @@ -2293,7 +2293,7 @@ dependencies = [ "futures-io", "memchr", "parking", - "pin-project-lite 0.2.6", + "pin-project-lite 0.2.9", "waker-fn", ] @@ -2350,7 +2350,7 @@ dependencies = [ "futures-sink", "futures-task", "memchr", - "pin-project-lite 0.2.6", + "pin-project-lite 0.2.9", "pin-utils", "slab", ] @@ -2667,15 +2667,21 @@ dependencies = [ [[package]] name = "http-body" -version = "0.4.2" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60daa14be0e0786db0f03a9e57cb404c9d756eed2b6c62b9ea98ec5743ec75a9" +checksum = "d5f38f16d184e36f2408a55281cd658ecbd3ca05cce6d6510a176eca393e26d1" dependencies = [ "bytes", "http", - "pin-project-lite 0.2.6", + "pin-project-lite 0.2.9", ] +[[package]] +name = "http-range-header" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0bfe8eed0a9285ef776bb792479ea3834e8b94e13d615c2f66d03dd50a435a29" + [[package]] name = "httparse" version = "1.8.0" @@ -2710,7 +2716,7 @@ dependencies = [ "httparse", "httpdate", "itoa 1.0.4", - "pin-project-lite 0.2.6", + "pin-project-lite 0.2.9", "socket2", "tokio", "tower-service", @@ -2907,24 +2913,23 @@ dependencies = [ [[package]] name = "jsonrpsee" -version = "0.15.1" +version = "0.16.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8bd0d559d5e679b1ab2f869b486a11182923863b1b3ee8b421763cdd707b783a" +checksum = "7d291e3a5818a2384645fd9756362e6d89cf0541b0b916fa7702ea4a9833608e" dependencies = [ "jsonrpsee-core", - "jsonrpsee-http-server", "jsonrpsee-proc-macros", + "jsonrpsee-server", "jsonrpsee-types", "jsonrpsee-ws-client", - "jsonrpsee-ws-server", "tracing", ] [[package]] name = "jsonrpsee-client-transport" -version = "0.15.1" +version = "0.16.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8752740ecd374bcbf8b69f3e80b0327942df76f793f8d4e60d3355650c31fb74" +checksum = "965de52763f2004bc91ac5bcec504192440f0b568a5d621c59d9dbd6f886c3fb" dependencies = [ "futures-util", "http", @@ -2943,9 +2948,9 @@ dependencies = [ [[package]] name = "jsonrpsee-core" -version = "0.15.1" +version = "0.16.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3dc3e9cf2ba50b7b1d7d76a667619f82846caa39e8e8daa8a4962d74acaddca" +checksum = "a4e70b4439a751a5de7dd5ed55eacff78ebf4ffe0fc009cb1ebb11417f5b536b" dependencies = [ "anyhow", "arrayvec 0.7.2", @@ -2956,10 +2961,8 @@ dependencies = [ "futures-timer", "futures-util", "globset", - "http", "hyper", "jsonrpsee-types", - "lazy_static", "parking_lot 0.12.1", "rand 0.8.5", "rustc-hash", @@ -2969,45 +2972,48 @@ dependencies = [ "thiserror", "tokio", "tracing", - "tracing-futures", - "unicase", ] [[package]] -name = "jsonrpsee-http-server" -version = "0.15.1" +name = "jsonrpsee-proc-macros" +version = "0.16.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "baa6da1e4199c10d7b1d0a6e5e8bd8e55f351163b6f4b3cbb044672a69bd4c1c" +dependencies = [ + "heck", + "proc-macro-crate", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "jsonrpsee-server" +version = "0.16.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03802f0373a38c2420c70b5144742d800b509e2937edc4afb116434f07120117" +checksum = "1fb69dad85df79527c019659a992498d03f8495390496da2f07e6c24c2b356fc" dependencies = [ "futures-channel", "futures-util", + "http", "hyper", "jsonrpsee-core", "jsonrpsee-types", "serde", "serde_json", + "soketto", "tokio", + "tokio-stream", + "tokio-util", + "tower", "tracing", - "tracing-futures", -] - -[[package]] -name = "jsonrpsee-proc-macros" -version = "0.15.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd67957d4280217247588ac86614ead007b301ca2fa9f19c19f880a536f029e3" -dependencies = [ - "proc-macro-crate", - "proc-macro2", - "quote", - "syn", ] [[package]] name = "jsonrpsee-types" -version = "0.15.1" +version = "0.16.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e290bba767401b646812f608c099b922d8142603c9e73a50fb192d3ac86f4a0d" +checksum = "5bd522fe1ce3702fd94812965d7bb7a3364b1c9aba743944c5a00529aae80f8c" dependencies = [ "anyhow", "beef", @@ -3019,9 +3025,9 @@ dependencies = [ [[package]] name = "jsonrpsee-ws-client" -version = "0.15.1" +version = "0.16.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ee5feddd5188e62ac08fcf0e56478138e581509d4730f3f7be9b57dd402a4ff" +checksum = "0b83daeecfc6517cfe210df24e570fb06213533dfb990318fae781f4c7119dd9" dependencies = [ "http", "jsonrpsee-client-transport", @@ -3029,26 +3035,6 @@ dependencies = [ "jsonrpsee-types", ] -[[package]] -name = "jsonrpsee-ws-server" -version = "0.15.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d488ba74fb369e5ab68926feb75a483458b88e768d44319f37e4ecad283c7325" -dependencies = [ - "futures-channel", - "futures-util", - "http", - "jsonrpsee-core", - "jsonrpsee-types", - "serde_json", - "soketto", - "tokio", - "tokio-stream", - "tokio-util", - "tracing", - "tracing-futures", -] - [[package]] name = "k256" version = "0.11.5" @@ -6334,9 +6320,9 @@ checksum = "257b64915a082f7811703966789728173279bdebb956b143dbcd23f6f970a777" [[package]] name = "pin-project-lite" -version = "0.2.6" +version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc0e1f259c92177c30a4c9d177246edd0a3568b25756a977d0632cf8fa37e905" +checksum = "e0a7ae3ac2f1173085d398531c705756c94a4c56843785df85a60c1a0afac116" [[package]] name = "pin-utils" @@ -8110,11 +8096,14 @@ name = "sc-rpc-server" version = "4.0.0-dev" dependencies = [ "futures", + "http", "jsonrpsee", "log", "serde_json", "substrate-prometheus-endpoint", "tokio", + "tower", + "tower-http", ] [[package]] @@ -8822,6 +8811,7 @@ dependencies = [ "bytes", "flate2", "futures", + "http", "httparse", "log", "rand 0.8.5", @@ -10297,7 +10287,7 @@ dependencies = [ "mio", "num_cpus", "parking_lot 0.12.1", - "pin-project-lite 0.2.6", + "pin-project-lite 0.2.9", "signal-hook-registry", "socket2", "tokio-macros", @@ -10333,7 +10323,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7b2f3f698253f03119ac0102beaa64f67a67e08074d03a22d18784104543727f" dependencies = [ "futures-core", - "pin-project-lite 0.2.6", + "pin-project-lite 0.2.9", "tokio", ] @@ -10360,7 +10350,7 @@ dependencies = [ "futures-core", "futures-io", "futures-sink", - "pin-project-lite 0.2.6", + "pin-project-lite 0.2.9", "tokio", "tracing", ] @@ -10374,6 +10364,41 @@ dependencies = [ "serde", ] +[[package]] +name = "tower" +version = "0.4.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8fa9be0de6cf49e536ce1851f987bd21a43b771b09473c3549a6c853db37c1c" +dependencies = [ + "tower-layer", + "tower-service", + "tracing", +] + +[[package]] +name = "tower-http" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f873044bf02dd1e8239e9c1293ea39dad76dc594ec16185d0a1bf31d8dc8d858" +dependencies = [ + "bitflags", + "bytes", + "futures-core", + "futures-util", + "http", + "http-body", + "http-range-header", + "pin-project-lite 0.2.9", + "tower-layer", + "tower-service", +] + +[[package]] +name = "tower-layer" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c20c8dbed6283a09604c3e69b4b7eeb54e298b8a600d4d5ecb5ad39de609f1d0" + [[package]] name = "tower-service" version = "0.3.1" @@ -10387,7 +10412,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a400e31aa60b9d44a52a8ee0343b5b18566b03a8321e0d321f695cf56e940160" dependencies = [ "cfg-if", - "pin-project-lite 0.2.6", + "log", + "pin-project-lite 0.2.9", "tracing-attributes", "tracing-core", ] @@ -10657,15 +10683,6 @@ dependencies = [ "static_assertions", ] -[[package]] -name = "unicase" -version = "2.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50f37be617794602aabbeee0be4f259dc1778fabe05e2d67ee8f79326d5cb4f6" -dependencies = [ - "version_check", -] - [[package]] name = "unicode-bidi" version = "0.3.4" diff --git a/bin/node-template/node/Cargo.toml b/bin/node-template/node/Cargo.toml index d609edc88401d..2ea841093d0e2 100644 --- a/bin/node-template/node/Cargo.toml +++ b/bin/node-template/node/Cargo.toml @@ -43,7 +43,7 @@ frame-system = { version = "4.0.0-dev", path = "../../../frame/system" } pallet-transaction-payment = { version = "4.0.0-dev", default-features = false, path = "../../../frame/transaction-payment" } # These dependencies are used for the node template's RPCs -jsonrpsee = { version = "0.15.1", features = ["server"] } +jsonrpsee = { version = "0.16.2", features = ["server"] } sc-rpc = { version = "4.0.0-dev", path = "../../../client/rpc" } sp-api = { version = "4.0.0-dev", path = "../../../primitives/api" } sc-rpc-api = { version = "0.10.0-dev", path = "../../../client/rpc-api" } diff --git a/bin/node/cli/Cargo.toml b/bin/node/cli/Cargo.toml index 2ca37e7febe16..4ee4bcd033921 100644 --- a/bin/node/cli/Cargo.toml +++ b/bin/node/cli/Cargo.toml @@ -39,7 +39,7 @@ array-bytes = "4.1" clap = { version = "4.0.9", features = ["derive"], optional = true } codec = { package = "parity-scale-codec", version = "3.0.0" } serde = { version = "1.0.136", features = ["derive"] } -jsonrpsee = { version = "0.15.1", features = ["server"] } +jsonrpsee = { version = "0.16.2", features = ["server"] } futures = "0.3.21" log = "0.4.17" rand = "0.8" diff --git a/bin/node/rpc/Cargo.toml b/bin/node/rpc/Cargo.toml index 9d2810413613f..f34922a287dfe 100644 --- a/bin/node/rpc/Cargo.toml +++ b/bin/node/rpc/Cargo.toml @@ -13,7 +13,7 @@ publish = false targets = ["x86_64-unknown-linux-gnu"] [dependencies] -jsonrpsee = { version = "0.15.1", features = ["server"] } +jsonrpsee = { version = "0.16.2", features = ["server"] } node-primitives = { version = "2.0.0", path = "../primitives" } pallet-transaction-payment-rpc = { version = "4.0.0-dev", path = "../../../frame/transaction-payment/rpc/" } mmr-rpc = { version = "4.0.0-dev", path = "../../../client/merkle-mountain-range/rpc/" } diff --git a/client/beefy/rpc/Cargo.toml b/client/beefy/rpc/Cargo.toml index d27225824539a..f5b5770153477 100644 --- a/client/beefy/rpc/Cargo.toml +++ b/client/beefy/rpc/Cargo.toml @@ -11,7 +11,7 @@ homepage = "https://substrate.io" [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", features = ["derive"] } futures = "0.3.21" -jsonrpsee = { version = "0.15.1", features = ["server", "macros"] } +jsonrpsee = { version = "0.16.2", features = ["client-core", "server", "macros"] } log = "0.4" parking_lot = "0.12.1" serde = { version = "1.0.136", features = ["derive"] } diff --git a/client/beefy/rpc/src/lib.rs b/client/beefy/rpc/src/lib.rs index d29ed433c38db..59a133b86214e 100644 --- a/client/beefy/rpc/src/lib.rs +++ b/client/beefy/rpc/src/lib.rs @@ -172,7 +172,7 @@ mod tests { }; use beefy_primitives::{known_payloads, Payload, SignedCommitment}; use codec::{Decode, Encode}; - use jsonrpsee::{types::EmptyParams, RpcModule}; + use jsonrpsee::{types::EmptyServerParams as EmptyParams, RpcModule}; use sp_runtime::traits::{BlakeTwo256, Hash}; use substrate_test_runtime_client::runtime::Block; diff --git a/client/consensus/babe/rpc/Cargo.toml b/client/consensus/babe/rpc/Cargo.toml index d0a65a3fc3193..4f5aaf85494b9 100644 --- a/client/consensus/babe/rpc/Cargo.toml +++ b/client/consensus/babe/rpc/Cargo.toml @@ -13,7 +13,7 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -jsonrpsee = { version = "0.15.1", features = ["server", "macros"] } +jsonrpsee = { version = "0.16.2", features = ["client-core", "server", "macros"] } futures = "0.3.21" serde = { version = "1.0.136", features = ["derive"] } thiserror = "1.0" diff --git a/client/consensus/manual-seal/Cargo.toml b/client/consensus/manual-seal/Cargo.toml index cf151424c2ee5..fb89445a97002 100644 --- a/client/consensus/manual-seal/Cargo.toml +++ b/client/consensus/manual-seal/Cargo.toml @@ -13,7 +13,7 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -jsonrpsee = { version = "0.15.1", features = ["server", "macros"] } +jsonrpsee = { version = "0.16.2", features = ["client-core", "server", "macros"] } assert_matches = "1.3.0" async-trait = "0.1.57" codec = { package = "parity-scale-codec", version = "3.0.0" } diff --git a/client/finality-grandpa/rpc/Cargo.toml b/client/finality-grandpa/rpc/Cargo.toml index 7be77c122bab2..252c5e3871a64 100644 --- a/client/finality-grandpa/rpc/Cargo.toml +++ b/client/finality-grandpa/rpc/Cargo.toml @@ -12,7 +12,7 @@ homepage = "https://substrate.io" [dependencies] finality-grandpa = { version = "0.16.0", features = ["derive-codec"] } futures = "0.3.16" -jsonrpsee = { version = "0.15.1", features = ["server", "macros"] } +jsonrpsee = { version = "0.16.2", features = ["client-core", "server", "macros"] } log = "0.4.8" parity-scale-codec = { version = "3.0.0", features = ["derive"] } serde = { version = "1.0.105", features = ["derive"] } diff --git a/client/finality-grandpa/rpc/src/lib.rs b/client/finality-grandpa/rpc/src/lib.rs index 85df72de77b54..dfdad666ba8f3 100644 --- a/client/finality-grandpa/rpc/src/lib.rs +++ b/client/finality-grandpa/rpc/src/lib.rs @@ -138,7 +138,7 @@ mod tests { use std::{collections::HashSet, convert::TryInto, sync::Arc}; use jsonrpsee::{ - types::{EmptyParams, SubscriptionId}, + types::{EmptyServerParams as EmptyParams, SubscriptionId}, RpcModule, }; use parity_scale_codec::{Decode, Encode}; diff --git a/client/merkle-mountain-range/rpc/Cargo.toml b/client/merkle-mountain-range/rpc/Cargo.toml index ca14544000bdb..dcc5e49c52051 100644 --- a/client/merkle-mountain-range/rpc/Cargo.toml +++ b/client/merkle-mountain-range/rpc/Cargo.toml @@ -13,7 +13,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0" } -jsonrpsee = { version = "0.15.1", features = ["server", "macros"] } +jsonrpsee = { version = "0.16.2", features = ["client-core", "server", "macros"] } serde = { version = "1.0.136", features = ["derive"] } sp-api = { version = "4.0.0-dev", path = "../../../primitives/api" } sp-blockchain = { version = "4.0.0-dev", path = "../../../primitives/blockchain" } diff --git a/client/rpc-api/Cargo.toml b/client/rpc-api/Cargo.toml index cb82a3b26706b..c46488db2d8e1 100644 --- a/client/rpc-api/Cargo.toml +++ b/client/rpc-api/Cargo.toml @@ -28,4 +28,4 @@ sp-rpc = { version = "6.0.0", path = "../../primitives/rpc" } sp-runtime = { version = "7.0.0", path = "../../primitives/runtime" } sp-tracing = { version = "6.0.0", path = "../../primitives/tracing" } sp-version = { version = "5.0.0", path = "../../primitives/version" } -jsonrpsee = { version = "0.15.1", features = ["server", "macros"] } +jsonrpsee = { version = "0.16.2", features = ["server", "client-core", "macros"] } diff --git a/client/rpc-servers/Cargo.toml b/client/rpc-servers/Cargo.toml index a3e64c367afb6..b494749ffd26a 100644 --- a/client/rpc-servers/Cargo.toml +++ b/client/rpc-servers/Cargo.toml @@ -14,8 +14,11 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] futures = "0.3.21" -jsonrpsee = { version = "0.15.1", features = ["server"] } +jsonrpsee = { version = "0.16.2", features = ["server"] } log = "0.4.17" serde_json = "1.0.85" tokio = { version = "1.22.0", features = ["parking_lot"] } prometheus-endpoint = { package = "substrate-prometheus-endpoint", version = "0.10.0-dev", path = "../../utils/prometheus" } +tower-http = { version = "0.3.4", features = ["cors"] } +tower = "0.4.13" +http = "0.2.8" diff --git a/client/rpc-servers/src/lib.rs b/client/rpc-servers/src/lib.rs index 7eb825e169bfa..1fa2ba81d8672 100644 --- a/client/rpc-servers/src/lib.rs +++ b/client/rpc-servers/src/lib.rs @@ -21,17 +21,21 @@ #![warn(missing_docs)] use jsonrpsee::{ - http_server::{AccessControlBuilder, HttpServerBuilder, HttpServerHandle}, - ws_server::{WsServerBuilder, WsServerHandle}, + server::{ + middleware::proxy_get_request::ProxyGetRequestLayer, AllowHosts, ServerBuilder, + ServerHandle, + }, RpcModule, }; use std::{error::Error as StdError, net::SocketAddr}; -pub use crate::middleware::{RpcMetrics, RpcMiddleware}; +pub use crate::middleware::RpcMetrics; +use http::header::HeaderValue; pub use jsonrpsee::core::{ id_providers::{RandomIntegerIdProvider, RandomStringIdProvider}, traits::IdProvider, }; +use tower_http::cors::{AllowOrigin, CorsLayer}; const MEGABYTE: usize = 1024 * 1024; @@ -46,12 +50,11 @@ const WS_MAX_SUBS_PER_CONN: usize = 1024; pub mod middleware; -/// Type alias for http server -pub type HttpServer = HttpServerHandle; -/// Type alias for ws server -pub type WsServer = WsServerHandle; +/// Type alias JSON-RPC server +pub type Server = ServerHandle; -/// WebSocket specific settings on the server. +/// Server config. +#[derive(Debug, Clone)] pub struct WsConfig { /// Maximum connections. pub max_connections: Option, @@ -67,8 +70,8 @@ impl WsConfig { // Deconstructs the config to get the finalized inner values. // // `Payload size` or `max subs per connection` bigger than u32::MAX will be truncated. - fn deconstruct(self) -> (u32, u32, u64, u32) { - let max_conns = self.max_connections.unwrap_or(WS_MAX_CONNECTIONS) as u64; + fn deconstruct(self) -> (u32, u32, u32, u32) { + let max_conns = self.max_connections.unwrap_or(WS_MAX_CONNECTIONS) as u32; let max_payload_in_mb = payload_size_or_default(self.max_payload_in_mb) as u32; let max_payload_out_mb = payload_size_or_default(self.max_payload_out_mb) as u32; let max_subs_per_conn = self.max_subs_per_conn.unwrap_or(WS_MAX_SUBS_PER_CONN) as u32; @@ -86,31 +89,27 @@ pub async fn start_http( metrics: Option, rpc_api: RpcModule, rt: tokio::runtime::Handle, -) -> Result> { - let max_payload_in = payload_size_or_default(max_payload_in_mb); - let max_payload_out = payload_size_or_default(max_payload_out_mb); +) -> Result> { + let max_payload_in = payload_size_or_default(max_payload_in_mb) as u32; + let max_payload_out = payload_size_or_default(max_payload_out_mb) as u32; + let host_filter = hosts_filter(cors.is_some(), &addrs); - let mut acl = AccessControlBuilder::new(); + let middleware = tower::ServiceBuilder::new() + // Proxy `GET /health` requests to internal `system_health` method. + .layer(ProxyGetRequestLayer::new("/health", "system_health")?) + .layer(try_into_cors(cors)?); - if let Some(cors) = cors { - // Whitelist listening address. - // NOTE: set_allowed_hosts will whitelist both ports but only one will used. - acl = acl.set_allowed_hosts(format_allowed_hosts(&addrs[..]))?; - acl = acl.set_allowed_origins(cors)?; - }; - - let builder = HttpServerBuilder::new() - .max_request_body_size(max_payload_in as u32) - .max_response_body_size(max_payload_out as u32) - .set_access_control(acl.build()) - .health_api("/health", "system_health")? - .custom_tokio_runtime(rt); + let builder = ServerBuilder::new() + .max_request_body_size(max_payload_in) + .max_response_body_size(max_payload_out) + .set_host_filtering(host_filter) + .set_middleware(middleware) + .custom_tokio_runtime(rt) + .http_only(); let rpc_api = build_rpc_api(rpc_api); let (handle, addr) = if let Some(metrics) = metrics { - let middleware = RpcMiddleware::new(metrics, "http".into()); - let builder = builder.set_middleware(middleware); - let server = builder.build(&addrs[..]).await?; + let server = builder.set_logger(metrics).build(&addrs[..]).await?; let addr = server.local_addr(); (server.start(rpc_api)?, addr) } else { @@ -120,16 +119,16 @@ pub async fn start_http( }; log::info!( - "Running JSON-RPC HTTP server: addr={}, allowed origins={:?}", + "Running JSON-RPC HTTP server: addr={}, allowed origins={}", addr.map_or_else(|_| "unknown".to_string(), |a| a.to_string()), - cors + format_cors(cors) ); Ok(handle) } -/// Start WS server listening on given address. -pub async fn start_ws( +/// Start a JSON-RPC server listening on given address that supports both HTTP and WS. +pub async fn start( addrs: [SocketAddr; 2], cors: Option<&Vec>, ws_config: WsConfig, @@ -137,27 +136,26 @@ pub async fn start_ws( rpc_api: RpcModule, rt: tokio::runtime::Handle, id_provider: Option>, -) -> Result> { +) -> Result> { let (max_payload_in, max_payload_out, max_connections, max_subs_per_conn) = ws_config.deconstruct(); - let mut acl = AccessControlBuilder::new(); + let host_filter = hosts_filter(cors.is_some(), &addrs); - if let Some(cors) = cors { - // Whitelist listening address. - // NOTE: set_allowed_hosts will whitelist both ports but only one will used. - acl = acl.set_allowed_hosts(format_allowed_hosts(&addrs[..]))?; - acl = acl.set_allowed_origins(cors)?; - }; + let middleware = tower::ServiceBuilder::new() + // Proxy `GET /health` requests to internal `system_health` method. + .layer(ProxyGetRequestLayer::new("/health", "system_health")?) + .layer(try_into_cors(cors)?); - let mut builder = WsServerBuilder::new() + let mut builder = ServerBuilder::new() .max_request_body_size(max_payload_in) .max_response_body_size(max_payload_out) .max_connections(max_connections) .max_subscriptions_per_connection(max_subs_per_conn) .ping_interval(std::time::Duration::from_secs(30)) - .custom_tokio_runtime(rt) - .set_access_control(acl.build()); + .set_host_filtering(host_filter) + .set_middleware(middleware) + .custom_tokio_runtime(rt); if let Some(provider) = id_provider { builder = builder.set_id_provider(provider); @@ -167,9 +165,7 @@ pub async fn start_ws( let rpc_api = build_rpc_api(rpc_api); let (handle, addr) = if let Some(metrics) = metrics { - let middleware = RpcMiddleware::new(metrics, "ws".into()); - let builder = builder.set_middleware(middleware); - let server = builder.build(&addrs[..]).await?; + let server = builder.set_logger(metrics).build(&addrs[..]).await?; let addr = server.local_addr(); (server.start(rpc_api)?, addr) } else { @@ -179,23 +175,14 @@ pub async fn start_ws( }; log::info!( - "Running JSON-RPC WS server: addr={}, allowed origins={:?}", + "Running JSON-RPC WS server: addr={}, allowed origins={}", addr.map_or_else(|_| "unknown".to_string(), |a| a.to_string()), - cors + format_cors(cors) ); Ok(handle) } -fn format_allowed_hosts(addrs: &[SocketAddr]) -> Vec { - let mut hosts = Vec::with_capacity(addrs.len() * 2); - for addr in addrs { - hosts.push(format!("localhost:{}", addr.port())); - hosts.push(format!("127.0.0.1:{}", addr.port())); - } - hosts -} - fn build_rpc_api(mut rpc_api: RpcModule) -> RpcModule { let mut available_methods = rpc_api.method_names().collect::>(); available_methods.sort(); @@ -214,3 +201,40 @@ fn build_rpc_api(mut rpc_api: RpcModule) -> RpcModu fn payload_size_or_default(size_mb: Option) -> usize { size_mb.map_or(RPC_MAX_PAYLOAD_DEFAULT, |mb| mb.saturating_mul(MEGABYTE)) } + +fn hosts_filter(enabled: bool, addrs: &[SocketAddr]) -> AllowHosts { + if enabled { + // NOTE The listening addresses are whitelisted by default. + let mut hosts = Vec::with_capacity(addrs.len() * 2); + for addr in addrs { + hosts.push(format!("localhost:{}", addr.port()).into()); + hosts.push(format!("127.0.0.1:{}", addr.port()).into()); + } + AllowHosts::Only(hosts) + } else { + AllowHosts::Any + } +} + +fn try_into_cors( + maybe_cors: Option<&Vec>, +) -> Result> { + if let Some(cors) = maybe_cors { + let mut list = Vec::new(); + for origin in cors { + list.push(HeaderValue::from_str(origin)?); + } + Ok(CorsLayer::new().allow_origin(AllowOrigin::list(list))) + } else { + // allow all cors + Ok(CorsLayer::permissive()) + } +} + +fn format_cors(maybe_cors: Option<&Vec>) -> String { + if let Some(cors) = maybe_cors { + format!("{:?}", cors) + } else { + format!("{:?}", ["*"]) + } +} diff --git a/client/rpc-servers/src/middleware.rs b/client/rpc-servers/src/middleware.rs index 9e0d422b2350e..1c25ac1dfd1b3 100644 --- a/client/rpc-servers/src/middleware.rs +++ b/client/rpc-servers/src/middleware.rs @@ -16,9 +16,9 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -//! RPC middlware to collect prometheus metrics on RPC calls. +//! RPC middleware to collect prometheus metrics on RPC calls. -use jsonrpsee::core::middleware::{Headers, HttpMiddleware, MethodKind, Params, WsMiddleware}; +use jsonrpsee::server::logger::{HttpRequest, Logger, MethodKind, Params, TransportProtocol}; use prometheus_endpoint::{ register, Counter, CounterVec, HistogramOpts, HistogramVec, Opts, PrometheusError, Registry, U64, @@ -54,9 +54,9 @@ pub struct RpcMetrics { calls_started: CounterVec, /// Number of calls completed. calls_finished: CounterVec, - /// Number of Websocket sessions opened (Websocket only). + /// Number of Websocket sessions opened. ws_sessions_opened: Option>, - /// Number of Websocket sessions closed (Websocket only). + /// Number of Websocket sessions closed. ws_sessions_closed: Option>, } @@ -139,62 +139,61 @@ impl RpcMetrics { } } -#[derive(Clone)] -/// Middleware for RPC calls -pub struct RpcMiddleware { - metrics: RpcMetrics, - transport_label: &'static str, -} +impl Logger for RpcMetrics { + type Instant = std::time::Instant; -impl RpcMiddleware { - /// Create a new [`RpcMiddleware`] with the provided [`RpcMetrics`]. - pub fn new(metrics: RpcMetrics, transport_label: &'static str) -> Self { - Self { metrics, transport_label } + fn on_connect( + &self, + _remote_addr: SocketAddr, + _request: &HttpRequest, + transport: TransportProtocol, + ) { + if let TransportProtocol::WebSocket = transport { + self.ws_sessions_opened.as_ref().map(|counter| counter.inc()); + } } - /// Called when a new JSON-RPC request comes to the server. - fn on_request(&self) -> std::time::Instant { + fn on_request(&self, transport: TransportProtocol) -> Self::Instant { + let transport_label = transport_label_str(transport); let now = std::time::Instant::now(); - self.metrics.requests_started.with_label_values(&[self.transport_label]).inc(); + self.requests_started.with_label_values(&[transport_label]).inc(); now } - /// Called on each JSON-RPC method call, batch requests will trigger `on_call` multiple times. - fn on_call(&self, name: &str, params: Params, kind: MethodKind) { + fn on_call(&self, name: &str, params: Params, kind: MethodKind, transport: TransportProtocol) { + let transport_label = transport_label_str(transport); log::trace!( target: "rpc_metrics", "[{}] on_call name={} params={:?} kind={}", - self.transport_label, + transport_label, name, params, kind, ); - self.metrics - .calls_started - .with_label_values(&[self.transport_label, name]) - .inc(); + self.calls_started.with_label_values(&[transport_label, name]).inc(); } - /// Called on each JSON-RPC method completion, batch requests will trigger `on_result` multiple - /// times. - fn on_result(&self, name: &str, success: bool, started_at: std::time::Instant) { + fn on_result( + &self, + name: &str, + success: bool, + started_at: Self::Instant, + transport: TransportProtocol, + ) { + let transport_label = transport_label_str(transport); let micros = started_at.elapsed().as_micros(); log::debug!( target: "rpc_metrics", "[{}] {} call took {} μs", - self.transport_label, + transport_label, name, micros, ); - self.metrics - .calls_time - .with_label_values(&[self.transport_label, name]) - .observe(micros as _); + self.calls_time.with_label_values(&[transport_label, name]).observe(micros as _); - self.metrics - .calls_finished + self.calls_finished .with_label_values(&[ - self.transport_label, + transport_label, name, // the label "is_error", so `success` should be regarded as false // and vice-versa to be registrered correctly. @@ -203,58 +202,23 @@ impl RpcMiddleware { .inc(); } - /// Called once the JSON-RPC request is finished and response is sent to the output buffer. - fn on_response(&self, result: &str, started_at: std::time::Instant) { - log::trace!(target: "rpc_metrics", "[{}] on_response started_at={:?}", self.transport_label, started_at); - log::trace!(target: "rpc_metrics::extra", "[{}] result={:?}", self.transport_label, result); - self.metrics.requests_finished.with_label_values(&[self.transport_label]).inc(); - } -} - -impl WsMiddleware for RpcMiddleware { - type Instant = std::time::Instant; - - fn on_connect(&self, _remote_addr: SocketAddr, _headers: &Headers) { - self.metrics.ws_sessions_opened.as_ref().map(|counter| counter.inc()); - } - - fn on_request(&self) -> Self::Instant { - self.on_request() - } - - fn on_call(&self, name: &str, params: Params, kind: MethodKind) { - self.on_call(name, params, kind) + fn on_response(&self, result: &str, started_at: Self::Instant, transport: TransportProtocol) { + let transport_label = transport_label_str(transport); + log::trace!(target: "rpc_metrics", "[{}] on_response started_at={:?}", transport_label, started_at); + log::trace!(target: "rpc_metrics::extra", "[{}] result={:?}", transport_label, result); + self.requests_finished.with_label_values(&[transport_label]).inc(); } - fn on_result(&self, name: &str, success: bool, started_at: Self::Instant) { - self.on_result(name, success, started_at) - } - - fn on_response(&self, _result: &str, started_at: Self::Instant) { - self.on_response(_result, started_at) - } - - fn on_disconnect(&self, _remote_addr: SocketAddr) { - self.metrics.ws_sessions_closed.as_ref().map(|counter| counter.inc()); + fn on_disconnect(&self, _remote_addr: SocketAddr, transport: TransportProtocol) { + if let TransportProtocol::WebSocket = transport { + self.ws_sessions_closed.as_ref().map(|counter| counter.inc()); + } } } -impl HttpMiddleware for RpcMiddleware { - type Instant = std::time::Instant; - - fn on_request(&self, _remote_addr: SocketAddr, _headers: &Headers) -> Self::Instant { - self.on_request() - } - - fn on_call(&self, name: &str, params: Params, kind: MethodKind) { - self.on_call(name, params, kind) - } - - fn on_result(&self, name: &str, success: bool, started_at: Self::Instant) { - self.on_result(name, success, started_at) - } - - fn on_response(&self, _result: &str, started_at: Self::Instant) { - self.on_response(_result, started_at) +fn transport_label_str(t: TransportProtocol) -> &'static str { + match t { + TransportProtocol::Http => "http", + TransportProtocol::WebSocket => "ws", } } diff --git a/client/rpc-spec-v2/Cargo.toml b/client/rpc-spec-v2/Cargo.toml index a0ae3038378ff..930aeb4bd8956 100644 --- a/client/rpc-spec-v2/Cargo.toml +++ b/client/rpc-spec-v2/Cargo.toml @@ -13,7 +13,7 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -jsonrpsee = { version = "0.15.1", features = ["server", "macros"] } +jsonrpsee = { version = "0.16.2", features = ["client-core", "server", "macros"] } # Internal chain structures for "chain_spec". sc-chain-spec = { version = "4.0.0-dev", path = "../chain-spec" } # Pool for submitting extrinsics required by "transaction" diff --git a/client/rpc-spec-v2/src/chain_spec/tests.rs b/client/rpc-spec-v2/src/chain_spec/tests.rs index 6c078b2974e98..6f662ba422bc4 100644 --- a/client/rpc-spec-v2/src/chain_spec/tests.rs +++ b/client/rpc-spec-v2/src/chain_spec/tests.rs @@ -17,7 +17,7 @@ // along with this program. If not, see . use super::*; -use jsonrpsee::{types::EmptyParams, RpcModule}; +use jsonrpsee::{types::EmptyServerParams as EmptyParams, RpcModule}; use sc_chain_spec::Properties; const CHAIN_NAME: &'static str = "TEST_CHAIN_NAME"; diff --git a/client/rpc/Cargo.toml b/client/rpc/Cargo.toml index d690e2c7b4cf1..a241807cc242b 100644 --- a/client/rpc/Cargo.toml +++ b/client/rpc/Cargo.toml @@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.0.0" } futures = "0.3.21" hash-db = { version = "0.15.2", default-features = false } -jsonrpsee = { version = "0.15.1", features = ["server"] } +jsonrpsee = { version = "0.16.2", features = ["server"] } lazy_static = { version = "1.4.0", optional = true } log = "0.4.17" parking_lot = "0.12.1" diff --git a/client/rpc/src/author/tests.rs b/client/rpc/src/author/tests.rs index f969812e5b14c..573d01630de32 100644 --- a/client/rpc/src/author/tests.rs +++ b/client/rpc/src/author/tests.rs @@ -23,7 +23,7 @@ use assert_matches::assert_matches; use codec::Encode; use jsonrpsee::{ core::Error as RpcError, - types::{error::CallError, EmptyParams}, + types::{error::CallError, EmptyServerParams as EmptyParams}, RpcModule, }; use sc_transaction_pool::{BasicPool, FullChainApi}; diff --git a/client/rpc/src/chain/tests.rs b/client/rpc/src/chain/tests.rs index 1e6dbd5aca148..224d021f9409e 100644 --- a/client/rpc/src/chain/tests.rs +++ b/client/rpc/src/chain/tests.rs @@ -19,7 +19,7 @@ use super::*; use crate::testing::{test_executor, timeout_secs}; use assert_matches::assert_matches; -use jsonrpsee::types::EmptyParams; +use jsonrpsee::types::EmptyServerParams as EmptyParams; use sc_block_builder::BlockBuilderProvider; use sp_consensus::BlockOrigin; use sp_rpc::list::ListOrValue; diff --git a/client/rpc/src/state/mod.rs b/client/rpc/src/state/mod.rs index 7213e4360ae2b..fd802e5a80391 100644 --- a/client/rpc/src/state/mod.rs +++ b/client/rpc/src/state/mod.rs @@ -28,9 +28,8 @@ use std::sync::Arc; use crate::SubscriptionTaskExecutor; use jsonrpsee::{ - core::{Error as JsonRpseeError, RpcResult}, + core::{server::rpc_module::SubscriptionSink, Error as JsonRpseeError, RpcResult}, types::SubscriptionResult, - ws_server::SubscriptionSink, }; use sc_rpc_api::{state::ReadProof, DenyUnsafe}; diff --git a/client/rpc/src/state/tests.rs b/client/rpc/src/state/tests.rs index 53dd8ebf50499..3ef59e5ca9a7c 100644 --- a/client/rpc/src/state/tests.rs +++ b/client/rpc/src/state/tests.rs @@ -23,7 +23,7 @@ use assert_matches::assert_matches; use futures::executor; use jsonrpsee::{ core::Error as RpcError, - types::{error::CallError as RpcCallError, EmptyParams, ErrorObject}, + types::{error::CallError as RpcCallError, EmptyServerParams as EmptyParams, ErrorObject}, }; use sc_block_builder::BlockBuilderProvider; use sc_rpc_api::DenyUnsafe; diff --git a/client/rpc/src/system/tests.rs b/client/rpc/src/system/tests.rs index 2f91648008ff7..00ab9c46861e2 100644 --- a/client/rpc/src/system/tests.rs +++ b/client/rpc/src/system/tests.rs @@ -21,7 +21,7 @@ use assert_matches::assert_matches; use futures::prelude::*; use jsonrpsee::{ core::Error as RpcError, - types::{error::CallError, EmptyParams}, + types::{error::CallError, EmptyServerParams as EmptyParams}, RpcModule, }; use sc_network::{self, config::Role, PeerId}; diff --git a/client/service/Cargo.toml b/client/service/Cargo.toml index 87949ef12d888..4d1d267d45c97 100644 --- a/client/service/Cargo.toml +++ b/client/service/Cargo.toml @@ -22,7 +22,7 @@ test-helpers = [] runtime-benchmarks = ["sc-client-db/runtime-benchmarks"] [dependencies] -jsonrpsee = { version = "0.15.1", features = ["server"] } +jsonrpsee = { version = "0.16.2", features = ["server"] } thiserror = "1.0.30" futures = "0.3.21" rand = "0.7.3" diff --git a/client/service/src/lib.rs b/client/service/src/lib.rs index 091b4bbe9fe5f..f0e3f72510c28 100644 --- a/client/service/src/lib.rs +++ b/client/service/src/lib.rs @@ -43,7 +43,6 @@ use log::{debug, error, warn}; use sc_client_api::{blockchain::HeaderBackend, BlockBackend, BlockchainEvents, ProofProvider}; use sc_network::PeerId; use sc_network_common::{config::MultiaddrWithPeerId, service::NetworkBlock}; -use sc_rpc_server::WsConfig; use sc_utils::mpsc::TracingUnboundedReceiver; use sp_blockchain::HeaderMetadata; use sp_consensus::SyncOracle; @@ -294,20 +293,9 @@ async fn build_network_future< // Wrapper for HTTP and WS servers that makes sure they are properly shut down. mod waiting { - pub struct HttpServer(pub Option); + pub struct Server(pub Option); - impl Drop for HttpServer { - fn drop(&mut self) { - if let Some(server) = self.0.take() { - // This doesn't not wait for the server to be stopped but fires the signal. - let _ = server.stop(); - } - } - } - - pub struct WsServer(pub Option); - - impl Drop for WsServer { + impl Drop for Server { fn drop(&mut self) { if let Some(server) = self.0.take() { // This doesn't not wait for the server to be stopped but fires the signal. @@ -326,9 +314,6 @@ fn start_rpc_servers( where R: Fn(sc_rpc::DenyUnsafe) -> Result, Error>, { - let (max_request_size, ws_max_response_size, http_max_response_size) = - legacy_cli_parsing(config); - fn deny_unsafe(addr: SocketAddr, methods: &RpcMethods) -> sc_rpc::DenyUnsafe { let is_exposed_addr = !addr.ip().is_loopback(); match (is_exposed_addr, methods) { @@ -337,6 +322,9 @@ where } } + let (max_request_size, ws_max_response_size, http_max_response_size) = + legacy_cli_parsing(config); + let random_port = |mut addr: SocketAddr| { addr.set_port(0); addr @@ -346,6 +334,7 @@ where .rpc_ws .unwrap_or_else(|| "127.0.0.1:9944".parse().expect("valid sockaddr; qed")); let ws_addr2 = random_port(ws_addr); + let http_addr = config .rpc_http .unwrap_or_else(|| "127.0.0.1:9933".parse().expect("valid sockaddr; qed")); @@ -353,29 +342,29 @@ where let metrics = sc_rpc_server::RpcMetrics::new(config.prometheus_registry())?; + let server_config = sc_rpc_server::WsConfig { + max_connections: config.rpc_ws_max_connections, + max_payload_in_mb: max_request_size, + max_payload_out_mb: ws_max_response_size, + max_subs_per_conn: config.rpc_max_subs_per_conn, + }; + let http_fut = sc_rpc_server::start_http( [http_addr, http_addr2], config.rpc_cors.as_ref(), max_request_size, http_max_response_size, metrics.clone(), - gen_rpc_module(deny_unsafe(ws_addr, &config.rpc_methods))?, + gen_rpc_module(deny_unsafe(http_addr, &config.rpc_methods))?, config.tokio_handle.clone(), ); - let ws_config = WsConfig { - max_connections: config.rpc_ws_max_connections, - max_payload_in_mb: max_request_size, - max_payload_out_mb: ws_max_response_size, - max_subs_per_conn: config.rpc_max_subs_per_conn, - }; - - let ws_fut = sc_rpc_server::start_ws( + let ws_fut = sc_rpc_server::start( [ws_addr, ws_addr2], config.rpc_cors.as_ref(), - ws_config, - metrics, - gen_rpc_module(deny_unsafe(http_addr, &config.rpc_methods))?, + server_config.clone(), + metrics.clone(), + gen_rpc_module(deny_unsafe(ws_addr, &config.rpc_methods))?, config.tokio_handle.clone(), rpc_id_provider, ); @@ -383,8 +372,7 @@ where match tokio::task::block_in_place(|| { config.tokio_handle.block_on(futures::future::try_join(http_fut, ws_fut)) }) { - Ok((http, ws)) => - Ok(Box::new((waiting::HttpServer(Some(http)), waiting::WsServer(Some(ws))))), + Ok((http, ws)) => Ok(Box::new((waiting::Server(Some(http)), waiting::Server(Some(ws))))), Err(e) => Err(Error::Application(e)), } } diff --git a/client/sync-state-rpc/Cargo.toml b/client/sync-state-rpc/Cargo.toml index 9730eb56e9bd6..a72b4106ba873 100644 --- a/client/sync-state-rpc/Cargo.toml +++ b/client/sync-state-rpc/Cargo.toml @@ -13,7 +13,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0" } -jsonrpsee = { version = "0.15.1", features = ["server", "macros"] } +jsonrpsee = { version = "0.16.2", features = ["client-core", "server", "macros"] } serde = { version = "1.0.136", features = ["derive"] } serde_json = "1.0.85" thiserror = "1.0.30" diff --git a/frame/transaction-payment/rpc/Cargo.toml b/frame/transaction-payment/rpc/Cargo.toml index 06dcaca937381..b77143201ffd4 100644 --- a/frame/transaction-payment/rpc/Cargo.toml +++ b/frame/transaction-payment/rpc/Cargo.toml @@ -14,7 +14,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0" } -jsonrpsee = { version = "0.15.1", features = ["server", "macros"] } +jsonrpsee = { version = "0.16.2", features = ["client-core", "server", "macros"] } pallet-transaction-payment-rpc-runtime-api = { version = "4.0.0-dev", path = "./runtime-api" } sp-api = { version = "4.0.0-dev", path = "../../../primitives/api" } sp-blockchain = { version = "4.0.0-dev", path = "../../../primitives/blockchain" } diff --git a/utils/frame/remote-externalities/src/lib.rs b/utils/frame/remote-externalities/src/lib.rs index 86cfc767bf3b5..4f95331c03bc8 100644 --- a/utils/frame/remote-externalities/src/lib.rs +++ b/utils/frame/remote-externalities/src/lib.rs @@ -39,7 +39,9 @@ use std::{ path::{Path, PathBuf}, sync::Arc, }; -use substrate_rpc_client::{rpc_params, ws_client, ChainApi, ClientT, StateApi, WsClient}; +use substrate_rpc_client::{ + rpc_params, ws_client, BatchRequestBuilder, ChainApi, ClientT, StateApi, WsClient, +}; type KeyValue = (StorageKey, StorageData); type TopKeyValues = Vec; @@ -334,33 +336,50 @@ where log::debug!(target: LOG_TARGET, "Querying a total of {} keys", keys.len()); let mut key_values: Vec = vec![]; + let mut batch_success = true; + let client = self.as_online().rpc_client(); for chunk_keys in keys.chunks(BATCH_SIZE) { - let batch = chunk_keys - .iter() - .cloned() - .map(|key| ("state_getStorage", rpc_params![key, at])) - .collect::>(); - - let values = client.batch_request::>(batch).await.map_err(|e| { - log::error!( - target: LOG_TARGET, - "failed to execute batch: {:?}. Error: {:?}", - chunk_keys.iter().map(HexDisplay::from).collect::>(), - e - ); - "batch failed." - })?; + let mut batch = BatchRequestBuilder::new(); - assert_eq!(chunk_keys.len(), values.len()); + for key in chunk_keys.iter() { + batch + .insert("state_getStorage", rpc_params![key, at]) + .map_err(|_| "Invalid batch params")?; + } + + let batch_response = + client.batch_request::>(batch).await.map_err(|e| { + log::error!( + target: LOG_TARGET, + "failed to execute batch: {:?}. Error: {:?}", + chunk_keys.iter().map(HexDisplay::from).collect::>(), + e + ); + "batch failed." + })?; + + assert_eq!(chunk_keys.len(), batch_response.len()); + + for (key, maybe_value) in chunk_keys.into_iter().zip(batch_response) { + match maybe_value { + Ok(Some(v)) => { + key_values.push((key.clone(), v)); + }, + Ok(None) => { + log::warn!( + target: LOG_TARGET, + "key {:?} had none corresponding value.", + &key + ); + key_values.push((key.clone(), StorageData(vec![]))); + }, + Err(e) => { + log::error!(target: LOG_TARGET, "key {:?} failed: {:?}", &key, e); + batch_success = false; + }, + }; - for (idx, key) in chunk_keys.iter().enumerate() { - let maybe_value = values[idx].clone(); - let value = maybe_value.unwrap_or_else(|| { - log::warn!(target: LOG_TARGET, "key {:?} had none corresponding value.", &key); - StorageData(vec![]) - }); - key_values.push((key.clone(), value)); if key_values.len() % (10 * BATCH_SIZE) == 0 { let ratio: f64 = key_values.len() as f64 / keys_count as f64; log::debug!( @@ -374,7 +393,11 @@ where } } - Ok(key_values) + if batch_success { + Ok(key_values) + } else { + Err("batch failed.") + } } /// Get the values corresponding to `child_keys` at the given `prefixed_top_key`. @@ -385,12 +408,14 @@ where at: B::Hash, ) -> Result, &'static str> { let mut child_kv_inner = vec![]; + let mut batch_success = true; + for batch_child_key in child_keys.chunks(BATCH_SIZE) { - let batch_request = batch_child_key - .iter() - .cloned() - .map(|key| { - ( + let mut batch_request = BatchRequestBuilder::new(); + + for key in batch_child_key { + batch_request + .insert( "childstate_getStorage", rpc_params![ PrefixedStorageKey::new(prefixed_top_key.as_ref().to_vec()), @@ -398,8 +423,8 @@ where at ], ) - }) - .collect::>(); + .map_err(|_| "Invalid batch params")?; + } let batch_response = self .as_online() @@ -418,17 +443,32 @@ where assert_eq!(batch_child_key.len(), batch_response.len()); - for (idx, key) in batch_child_key.iter().enumerate() { - let maybe_value = batch_response[idx].clone(); - let value = maybe_value.unwrap_or_else(|| { - log::warn!(target: LOG_TARGET, "key {:?} had none corresponding value.", &key); - StorageData(vec![]) - }); - child_kv_inner.push((key.clone(), value)); + for (key, maybe_value) in batch_child_key.iter().zip(batch_response) { + match maybe_value { + Ok(Some(v)) => { + child_kv_inner.push((key.clone(), v)); + }, + Ok(None) => { + log::warn!( + target: LOG_TARGET, + "key {:?} had none corresponding value.", + &key + ); + child_kv_inner.push((key.clone(), StorageData(vec![]))); + }, + Err(e) => { + log::error!(target: LOG_TARGET, "key {:?} failed: {:?}", &key, e); + batch_success = false; + }, + }; } } - Ok(child_kv_inner) + if batch_success { + Ok(child_kv_inner) + } else { + Err("batch failed.") + } } pub(crate) async fn rpc_child_get_keys( diff --git a/utils/frame/rpc/client/Cargo.toml b/utils/frame/rpc/client/Cargo.toml index bbe8879818092..ee9982971cee3 100644 --- a/utils/frame/rpc/client/Cargo.toml +++ b/utils/frame/rpc/client/Cargo.toml @@ -12,7 +12,7 @@ description = "Shared JSON-RPC client" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -jsonrpsee = { version = "0.15.1", features = ["ws-client"] } +jsonrpsee = { version = "0.16.2", features = ["ws-client"] } sc-rpc-api = { version = "0.10.0-dev", path = "../../../../client/rpc-api" } async-trait = "0.1.57" serde = "1" diff --git a/utils/frame/rpc/client/src/lib.rs b/utils/frame/rpc/client/src/lib.rs index 254cc193c0e67..a211fc6c6983e 100644 --- a/utils/frame/rpc/client/src/lib.rs +++ b/utils/frame/rpc/client/src/lib.rs @@ -43,7 +43,10 @@ use sp_runtime::traits::{Block as BlockT, Header as HeaderT}; use std::collections::VecDeque; pub use jsonrpsee::{ - core::client::{ClientT, Subscription, SubscriptionClientT}, + core::{ + client::{ClientT, Subscription, SubscriptionClientT}, + params::BatchRequestBuilder, + }, rpc_params, ws_client::{WsClient, WsClientBuilder}, }; diff --git a/utils/frame/rpc/state-trie-migration-rpc/Cargo.toml b/utils/frame/rpc/state-trie-migration-rpc/Cargo.toml index 4886563a99440..3a1b4b8b6cbf8 100644 --- a/utils/frame/rpc/state-trie-migration-rpc/Cargo.toml +++ b/utils/frame/rpc/state-trie-migration-rpc/Cargo.toml @@ -25,7 +25,7 @@ sp-state-machine = { path = "../../../../primitives/state-machine" } sp-trie = { path = "../../../../primitives/trie" } trie-db = "0.24.0" -jsonrpsee = { version = "0.15.1", features = ["server", "macros"] } +jsonrpsee = { version = "0.16.2", features = ["client-core", "server", "macros"] } # Substrate Dependencies sc-client-api = { version = "4.0.0-dev", path = "../../../../client/api" } diff --git a/utils/frame/rpc/support/Cargo.toml b/utils/frame/rpc/support/Cargo.toml index 119acbd937c8a..d098877e7302c 100644 --- a/utils/frame/rpc/support/Cargo.toml +++ b/utils/frame/rpc/support/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0" } futures = "0.3.21" -jsonrpsee = { version = "0.15.1", features = ["jsonrpsee-types"] } +jsonrpsee = { version = "0.16.2", features = ["jsonrpsee-types"] } serde = "1" frame-support = { version = "4.0.0-dev", path = "../../../../frame/support" } sc-rpc-api = { version = "0.10.0-dev", path = "../../../../client/rpc-api" } @@ -25,7 +25,7 @@ sp-storage = { version = "7.0.0", path = "../../../../primitives/storage" } [dev-dependencies] scale-info = "2.1.1" -jsonrpsee = { version = "0.15.1", features = ["ws-client", "jsonrpsee-types"] } +jsonrpsee = { version = "0.16.2", features = ["ws-client", "jsonrpsee-types"] } tokio = "1.22.0" sp-core = { version = "7.0.0", path = "../../../../primitives/core" } sp-runtime = { version = "7.0.0", path = "../../../../primitives/runtime" } diff --git a/utils/frame/rpc/system/Cargo.toml b/utils/frame/rpc/system/Cargo.toml index 56b8a79f8c080..92cf6882a10f1 100644 --- a/utils/frame/rpc/system/Cargo.toml +++ b/utils/frame/rpc/system/Cargo.toml @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde_json = "1" codec = { package = "parity-scale-codec", version = "3.0.0" } -jsonrpsee = { version = "0.15.1", features = ["server"] } +jsonrpsee = { version = "0.16.2", features = ["client-core", "server", "macros"] } futures = "0.3.21" log = "0.4.17" frame-system-rpc-runtime-api = { version = "4.0.0-dev", path = "../../../../frame/system/rpc/runtime-api" } From e5d5d88d0d0e79042393c8bc85e5e9ebe6a24000 Mon Sep 17 00:00:00 2001 From: Oliver Tale-Yazdi Date: Mon, 12 Dec 2022 11:48:43 +0100 Subject: [PATCH 15/29] `pallet-message-queue`: Fix license (#12895) * Fix license Signed-off-by: Oliver Tale-Yazdi * Add mock doc Signed-off-by: Oliver Tale-Yazdi Signed-off-by: Oliver Tale-Yazdi --- frame/message-queue/src/benchmarking.rs | 31 +++++++++++---------- frame/message-queue/src/integration_test.rs | 31 +++++++++++---------- frame/message-queue/src/lib.rs | 31 +++++++++++---------- frame/message-queue/src/mock.rs | 27 ++++++++++-------- frame/message-queue/src/mock_helpers.rs | 31 +++++++++++---------- 5 files changed, 79 insertions(+), 72 deletions(-) diff --git a/frame/message-queue/src/benchmarking.rs b/frame/message-queue/src/benchmarking.rs index c0ff20431d00e..9cd6b75e4d0ae 100644 --- a/frame/message-queue/src/benchmarking.rs +++ b/frame/message-queue/src/benchmarking.rs @@ -1,18 +1,19 @@ -// Copyright 2022 Parity Technologies (UK) Ltd. -// This file is part of Polkadot. - -// Polkadot is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Polkadot is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Polkadot. If not, see . +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //! Benchmarking for the message queue pallet. diff --git a/frame/message-queue/src/integration_test.rs b/frame/message-queue/src/integration_test.rs index a9b6ee9bd2214..f4b1b7a125449 100644 --- a/frame/message-queue/src/integration_test.rs +++ b/frame/message-queue/src/integration_test.rs @@ -1,18 +1,19 @@ -// Copyright 2022 Parity Technologies (UK) Ltd. -// This file is part of Polkadot. - -// Polkadot is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Polkadot is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Polkadot. If not, see . +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //! Stress tests pallet-message-queue. Defines its own runtime config to use larger constants for //! `HeapSize` and `MaxStale`. diff --git a/frame/message-queue/src/lib.rs b/frame/message-queue/src/lib.rs index 9b976c48245c9..6945ff1b1e549 100644 --- a/frame/message-queue/src/lib.rs +++ b/frame/message-queue/src/lib.rs @@ -1,18 +1,19 @@ -// Copyright 2022 Parity Technologies (UK) Ltd. -// This file is part of Polkadot. - -// Polkadot is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Polkadot is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Polkadot. If not, see . +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //! # Generalized Message Queue Pallet //! diff --git a/frame/message-queue/src/mock.rs b/frame/message-queue/src/mock.rs index bb9942443e226..7159840d1c01b 100644 --- a/frame/message-queue/src/mock.rs +++ b/frame/message-queue/src/mock.rs @@ -1,18 +1,21 @@ -// Copyright 2022 Parity Technologies (UK) Ltd. -// This file is part of Polkadot. +// This file is part of Substrate. -// Polkadot is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 -// Polkadot is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. -// You should have received a copy of the GNU General Public License -// along with Polkadot. If not, see . +//! Test helpers and runtime setup for the message queue pallet. #![cfg(test)] diff --git a/frame/message-queue/src/mock_helpers.rs b/frame/message-queue/src/mock_helpers.rs index 39d961d8fc558..f12cf4cc41073 100644 --- a/frame/message-queue/src/mock_helpers.rs +++ b/frame/message-queue/src/mock_helpers.rs @@ -1,18 +1,19 @@ -// Copyright 2022 Parity Technologies (UK) Ltd. -// This file is part of Polkadot. - -// Polkadot is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Polkadot is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Polkadot. If not, see . +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //! Std setup helpers for testing and benchmarking. //! From b3d9f3c57e7f799d49442e3456614431f2d0e51a Mon Sep 17 00:00:00 2001 From: Oliver Tale-Yazdi Date: Mon, 12 Dec 2022 15:32:41 +0100 Subject: [PATCH 16/29] Use explicit call indices (#12891) * frame-system: explicit call index Signed-off-by: Oliver Tale-Yazdi * Use explicit call indices Signed-off-by: Oliver Tale-Yazdi * pallet-template: explicit call index Signed-off-by: Oliver Tale-Yazdi * DNM: Temporarily require call_index Signed-off-by: Oliver Tale-Yazdi * Revert "DNM: Temporarily require call_index" This reverts commit c4934e312e12af72ca05a8029d7da753a9c99346. Signed-off-by: Oliver Tale-Yazdi --- bin/node-template/pallets/template/src/lib.rs | 2 ++ frame/alliance/src/lib.rs | 18 ++++++++++++ frame/assets/src/lib.rs | 28 +++++++++++++++++++ frame/atomic-swap/src/lib.rs | 3 ++ frame/authorship/src/lib.rs | 1 + frame/babe/src/lib.rs | 3 ++ frame/bags-list/src/lib.rs | 2 ++ frame/balances/src/lib.rs | 6 ++++ frame/benchmarking/src/tests.rs | 3 ++ frame/benchmarking/src/tests_instance.rs | 2 ++ frame/bounties/src/lib.rs | 9 ++++++ frame/child-bounties/src/lib.rs | 7 +++++ frame/collective/src/lib.rs | 7 +++++ frame/collective/src/tests.rs | 1 + frame/contracts/src/lib.rs | 9 ++++++ frame/conviction-voting/src/lib.rs | 6 ++++ frame/democracy/src/lib.rs | 18 ++++++++++++ .../election-provider-multi-phase/src/lib.rs | 5 ++++ frame/elections-phragmen/src/lib.rs | 6 ++++ frame/examples/basic/src/lib.rs | 2 ++ frame/examples/offchain-worker/src/lib.rs | 3 ++ frame/executive/src/lib.rs | 7 +++++ frame/fast-unstake/src/lib.rs | 3 ++ frame/grandpa/src/lib.rs | 3 ++ frame/identity/src/lib.rs | 15 ++++++++++ frame/im-online/src/lib.rs | 1 + frame/indices/src/lib.rs | 5 ++++ frame/lottery/src/lib.rs | 4 +++ frame/membership/src/lib.rs | 7 +++++ frame/message-queue/src/lib.rs | 2 ++ frame/multisig/src/lib.rs | 4 +++ frame/nicks/src/lib.rs | 4 +++ frame/nis/src/lib.rs | 4 +++ frame/node-authorization/src/lib.rs | 9 ++++++ frame/nomination-pools/src/lib.rs | 14 ++++++++++ frame/preimage/src/lib.rs | 4 +++ frame/proxy/src/lib.rs | 10 +++++++ frame/ranked-collective/src/lib.rs | 6 ++++ frame/recovery/src/lib.rs | 9 ++++++ frame/referenda/src/lib.rs | 8 ++++++ frame/remark/src/lib.rs | 1 + frame/root-offences/src/lib.rs | 1 + frame/root-testing/src/lib.rs | 1 + frame/scheduler/src/lib.rs | 6 ++++ frame/scheduler/src/mock.rs | 2 ++ frame/scored-pool/src/lib.rs | 5 ++++ frame/session/src/lib.rs | 2 ++ frame/society/src/lib.rs | 12 ++++++++ frame/staking/src/pallet/mod.rs | 25 +++++++++++++++++ frame/state-trie-migration/src/lib.rs | 6 ++++ frame/sudo/src/lib.rs | 4 +++ frame/sudo/src/mock.rs | 2 ++ frame/support/test/tests/pallet.rs | 3 ++ .../test/tests/pallet_compatibility.rs | 1 + .../tests/pallet_compatibility_instance.rs | 1 + frame/support/test/tests/pallet_instance.rs | 2 ++ frame/support/test/tests/storage_layers.rs | 1 + frame/system/src/lib.rs | 8 ++++++ frame/timestamp/src/lib.rs | 1 + frame/tips/src/lib.rs | 6 ++++ frame/transaction-storage/src/lib.rs | 3 ++ frame/treasury/src/lib.rs | 5 ++++ frame/uniques/src/lib.rs | 26 +++++++++++++++++ frame/utility/src/lib.rs | 6 ++++ frame/utility/src/tests.rs | 4 +++ frame/vesting/src/lib.rs | 5 ++++ frame/whitelist/src/lib.rs | 4 +++ 67 files changed, 403 insertions(+) diff --git a/bin/node-template/pallets/template/src/lib.rs b/bin/node-template/pallets/template/src/lib.rs index 28d36ac2c6321..4630e344add31 100644 --- a/bin/node-template/pallets/template/src/lib.rs +++ b/bin/node-template/pallets/template/src/lib.rs @@ -64,6 +64,7 @@ pub mod pallet { impl Pallet { /// An example dispatchable that takes a singles value as a parameter, writes the value to /// storage and emits an event. This function must be dispatched by a signed extrinsic. + #[pallet::call_index(0)] #[pallet::weight(10_000 + T::DbWeight::get().writes(1).ref_time())] pub fn do_something(origin: OriginFor, something: u32) -> DispatchResult { // Check that the extrinsic was signed and get the signer. @@ -81,6 +82,7 @@ pub mod pallet { } /// An example dispatchable that may throw a custom error. + #[pallet::call_index(1)] #[pallet::weight(10_000 + T::DbWeight::get().reads_writes(1,1).ref_time())] pub fn cause_error(origin: OriginFor) -> DispatchResult { let _who = ensure_signed(origin)?; diff --git a/frame/alliance/src/lib.rs b/frame/alliance/src/lib.rs index 7e03da9ac1c7b..790c3c384e701 100644 --- a/frame/alliance/src/lib.rs +++ b/frame/alliance/src/lib.rs @@ -503,6 +503,7 @@ pub mod pallet { /// Add a new proposal to be voted on. /// /// Must be called by a Fellow. + #[pallet::call_index(0)] #[pallet::weight(T::WeightInfo::propose_proposed( *length_bound, // B T::MaxFellows::get(), // M @@ -524,6 +525,7 @@ pub mod pallet { /// Add an aye or nay vote for the sender to the given proposal. /// /// Must be called by a Fellow. + #[pallet::call_index(1)] #[pallet::weight(T::WeightInfo::vote(T::MaxFellows::get()))] pub fn vote( origin: OriginFor, @@ -541,6 +543,7 @@ pub mod pallet { /// Close a vote that is either approved, disapproved, or whose voting period has ended. /// /// Must be called by a Fellow. + #[pallet::call_index(2)] #[pallet::weight({ let b = *length_bound; let m = T::MaxFellows::get(); @@ -573,6 +576,7 @@ pub mod pallet { /// The Alliance must be empty, and the call must provide some founding members. /// /// Must be called by the Root origin. + #[pallet::call_index(3)] #[pallet::weight(T::WeightInfo::init_members( fellows.len() as u32, allies.len() as u32, @@ -623,6 +627,7 @@ pub mod pallet { /// Disband the Alliance, remove all active members and unreserve deposits. /// /// Witness data must be set. + #[pallet::call_index(4)] #[pallet::weight(T::WeightInfo::disband( witness.fellow_members, witness.ally_members, @@ -673,6 +678,7 @@ pub mod pallet { } /// Set a new IPFS CID to the alliance rule. + #[pallet::call_index(5)] #[pallet::weight(T::WeightInfo::set_rule())] pub fn set_rule(origin: OriginFor, rule: Cid) -> DispatchResult { T::AdminOrigin::ensure_origin(origin)?; @@ -684,6 +690,7 @@ pub mod pallet { } /// Make an announcement of a new IPFS CID about alliance issues. + #[pallet::call_index(6)] #[pallet::weight(T::WeightInfo::announce())] pub fn announce(origin: OriginFor, announcement: Cid) -> DispatchResult { T::AnnouncementOrigin::ensure_origin(origin)?; @@ -699,6 +706,7 @@ pub mod pallet { } /// Remove an announcement. + #[pallet::call_index(7)] #[pallet::weight(T::WeightInfo::remove_announcement())] pub fn remove_announcement(origin: OriginFor, announcement: Cid) -> DispatchResult { T::AnnouncementOrigin::ensure_origin(origin)?; @@ -716,6 +724,7 @@ pub mod pallet { } /// Submit oneself for candidacy. A fixed deposit is reserved. + #[pallet::call_index(8)] #[pallet::weight(T::WeightInfo::join_alliance())] pub fn join_alliance(origin: OriginFor) -> DispatchResult { let who = ensure_signed(origin)?; @@ -752,6 +761,7 @@ pub mod pallet { /// A Fellow can nominate someone to join the alliance as an Ally. There is no deposit /// required from the nominator or nominee. + #[pallet::call_index(9)] #[pallet::weight(T::WeightInfo::nominate_ally())] pub fn nominate_ally(origin: OriginFor, who: AccountIdLookupOf) -> DispatchResult { let nominator = ensure_signed(origin)?; @@ -776,6 +786,7 @@ pub mod pallet { } /// Elevate an Ally to Fellow. + #[pallet::call_index(10)] #[pallet::weight(T::WeightInfo::elevate_ally())] pub fn elevate_ally(origin: OriginFor, ally: AccountIdLookupOf) -> DispatchResult { T::MembershipManager::ensure_origin(origin)?; @@ -792,6 +803,7 @@ pub mod pallet { /// As a member, give a retirement notice and start a retirement period required to pass in /// order to retire. + #[pallet::call_index(11)] #[pallet::weight(T::WeightInfo::give_retirement_notice())] pub fn give_retirement_notice(origin: OriginFor) -> DispatchResult { let who = ensure_signed(origin)?; @@ -814,6 +826,7 @@ pub mod pallet { /// /// This can only be done once you have called `give_retirement_notice` and the /// `RetirementPeriod` has passed. + #[pallet::call_index(12)] #[pallet::weight(T::WeightInfo::retire())] pub fn retire(origin: OriginFor) -> DispatchResult { let who = ensure_signed(origin)?; @@ -836,6 +849,7 @@ pub mod pallet { } /// Kick a member from the Alliance and slash its deposit. + #[pallet::call_index(13)] #[pallet::weight(T::WeightInfo::kick_member())] pub fn kick_member(origin: OriginFor, who: AccountIdLookupOf) -> DispatchResult { T::MembershipManager::ensure_origin(origin)?; @@ -853,6 +867,7 @@ pub mod pallet { } /// Add accounts or websites to the list of unscrupulous items. + #[pallet::call_index(14)] #[pallet::weight(T::WeightInfo::add_unscrupulous_items(items.len() as u32, T::MaxWebsiteUrlLength::get()))] pub fn add_unscrupulous_items( origin: OriginFor, @@ -882,6 +897,7 @@ pub mod pallet { } /// Deem some items no longer unscrupulous. + #[pallet::call_index(15)] #[pallet::weight(>::WeightInfo::remove_unscrupulous_items( items.len() as u32, T::MaxWebsiteUrlLength::get() ))] @@ -907,6 +923,7 @@ pub mod pallet { /// Close a vote that is either approved, disapproved, or whose voting period has ended. /// /// Must be called by a Fellow. + #[pallet::call_index(16)] #[pallet::weight({ let b = *length_bound; let m = T::MaxFellows::get(); @@ -934,6 +951,7 @@ pub mod pallet { /// Abdicate one's position as a voting member and just be an Ally. May be used by Fellows /// who do not want to leave the Alliance but do not have the capacity to participate /// operationally for some time. + #[pallet::call_index(17)] #[pallet::weight(T::WeightInfo::abdicate_fellow_status())] pub fn abdicate_fellow_status(origin: OriginFor) -> DispatchResult { let who = ensure_signed(origin)?; diff --git a/frame/assets/src/lib.rs b/frame/assets/src/lib.rs index 2902477d0f2b5..629a0243cfc80 100644 --- a/frame/assets/src/lib.rs +++ b/frame/assets/src/lib.rs @@ -563,6 +563,7 @@ pub mod pallet { /// Emits `Created` event when successful. /// /// Weight: `O(1)` + #[pallet::call_index(0)] #[pallet::weight(T::WeightInfo::create())] pub fn create( origin: OriginFor, @@ -620,6 +621,7 @@ pub mod pallet { /// Emits `ForceCreated` event when successful. /// /// Weight: `O(1)` + #[pallet::call_index(1)] #[pallet::weight(T::WeightInfo::force_create())] pub fn force_create( origin: OriginFor, @@ -645,6 +647,7 @@ pub mod pallet { /// asset. /// /// The asset class must be frozen before calling `start_destroy`. + #[pallet::call_index(2)] #[pallet::weight(T::WeightInfo::start_destroy())] pub fn start_destroy(origin: OriginFor, id: T::AssetIdParameter) -> DispatchResult { let maybe_check_owner = match T::ForceOrigin::try_origin(origin) { @@ -667,6 +670,7 @@ pub mod pallet { /// asset. /// /// Each call emits the `Event::DestroyedAccounts` event. + #[pallet::call_index(3)] #[pallet::weight(T::WeightInfo::destroy_accounts(T::RemoveItemsLimit::get()))] pub fn destroy_accounts( origin: OriginFor, @@ -690,6 +694,7 @@ pub mod pallet { /// asset. /// /// Each call emits the `Event::DestroyedApprovals` event. + #[pallet::call_index(4)] #[pallet::weight(T::WeightInfo::destroy_approvals(T::RemoveItemsLimit::get()))] pub fn destroy_approvals( origin: OriginFor, @@ -711,6 +716,7 @@ pub mod pallet { /// asset. /// /// Each successful call emits the `Event::Destroyed` event. + #[pallet::call_index(5)] #[pallet::weight(T::WeightInfo::finish_destroy())] pub fn finish_destroy(origin: OriginFor, id: T::AssetIdParameter) -> DispatchResult { let _ = ensure_signed(origin)?; @@ -730,6 +736,7 @@ pub mod pallet { /// /// Weight: `O(1)` /// Modes: Pre-existing balance of `beneficiary`; Account pre-existence of `beneficiary`. + #[pallet::call_index(6)] #[pallet::weight(T::WeightInfo::mint())] pub fn mint( origin: OriginFor, @@ -759,6 +766,7 @@ pub mod pallet { /// /// Weight: `O(1)` /// Modes: Post-existence of `who`; Pre & post Zombie-status of `who`. + #[pallet::call_index(7)] #[pallet::weight(T::WeightInfo::burn())] pub fn burn( origin: OriginFor, @@ -793,6 +801,7 @@ pub mod pallet { /// Weight: `O(1)` /// Modes: Pre-existence of `target`; Post-existence of sender; Account pre-existence of /// `target`. + #[pallet::call_index(8)] #[pallet::weight(T::WeightInfo::transfer())] pub fn transfer( origin: OriginFor, @@ -826,6 +835,7 @@ pub mod pallet { /// Weight: `O(1)` /// Modes: Pre-existence of `target`; Post-existence of sender; Account pre-existence of /// `target`. + #[pallet::call_index(9)] #[pallet::weight(T::WeightInfo::transfer_keep_alive())] pub fn transfer_keep_alive( origin: OriginFor, @@ -860,6 +870,7 @@ pub mod pallet { /// Weight: `O(1)` /// Modes: Pre-existence of `dest`; Post-existence of `source`; Account pre-existence of /// `dest`. + #[pallet::call_index(10)] #[pallet::weight(T::WeightInfo::force_transfer())] pub fn force_transfer( origin: OriginFor, @@ -887,6 +898,7 @@ pub mod pallet { /// Emits `Frozen`. /// /// Weight: `O(1)` + #[pallet::call_index(11)] #[pallet::weight(T::WeightInfo::freeze())] pub fn freeze( origin: OriginFor, @@ -923,6 +935,7 @@ pub mod pallet { /// Emits `Thawed`. /// /// Weight: `O(1)` + #[pallet::call_index(12)] #[pallet::weight(T::WeightInfo::thaw())] pub fn thaw( origin: OriginFor, @@ -958,6 +971,7 @@ pub mod pallet { /// Emits `Frozen`. /// /// Weight: `O(1)` + #[pallet::call_index(13)] #[pallet::weight(T::WeightInfo::freeze_asset())] pub fn freeze_asset(origin: OriginFor, id: T::AssetIdParameter) -> DispatchResult { let origin = ensure_signed(origin)?; @@ -984,6 +998,7 @@ pub mod pallet { /// Emits `Thawed`. /// /// Weight: `O(1)` + #[pallet::call_index(14)] #[pallet::weight(T::WeightInfo::thaw_asset())] pub fn thaw_asset(origin: OriginFor, id: T::AssetIdParameter) -> DispatchResult { let origin = ensure_signed(origin)?; @@ -1011,6 +1026,7 @@ pub mod pallet { /// Emits `OwnerChanged`. /// /// Weight: `O(1)` + #[pallet::call_index(15)] #[pallet::weight(T::WeightInfo::transfer_ownership())] pub fn transfer_ownership( origin: OriginFor, @@ -1054,6 +1070,7 @@ pub mod pallet { /// Emits `TeamChanged`. /// /// Weight: `O(1)` + #[pallet::call_index(16)] #[pallet::weight(T::WeightInfo::set_team())] pub fn set_team( origin: OriginFor, @@ -1098,6 +1115,7 @@ pub mod pallet { /// Emits `MetadataSet`. /// /// Weight: `O(1)` + #[pallet::call_index(17)] #[pallet::weight(T::WeightInfo::set_metadata(name.len() as u32, symbol.len() as u32))] pub fn set_metadata( origin: OriginFor, @@ -1122,6 +1140,7 @@ pub mod pallet { /// Emits `MetadataCleared`. /// /// Weight: `O(1)` + #[pallet::call_index(18)] #[pallet::weight(T::WeightInfo::clear_metadata())] pub fn clear_metadata(origin: OriginFor, id: T::AssetIdParameter) -> DispatchResult { let origin = ensure_signed(origin)?; @@ -1153,6 +1172,7 @@ pub mod pallet { /// Emits `MetadataSet`. /// /// Weight: `O(N + S)` where N and S are the length of the name and symbol respectively. + #[pallet::call_index(19)] #[pallet::weight(T::WeightInfo::force_set_metadata(name.len() as u32, symbol.len() as u32))] pub fn force_set_metadata( origin: OriginFor, @@ -1204,6 +1224,7 @@ pub mod pallet { /// Emits `MetadataCleared`. /// /// Weight: `O(1)` + #[pallet::call_index(20)] #[pallet::weight(T::WeightInfo::force_clear_metadata())] pub fn force_clear_metadata( origin: OriginFor, @@ -1243,6 +1264,7 @@ pub mod pallet { /// Emits `AssetStatusChanged` with the identity of the asset. /// /// Weight: `O(1)` + #[pallet::call_index(21)] #[pallet::weight(T::WeightInfo::force_asset_status())] pub fn force_asset_status( origin: OriginFor, @@ -1299,6 +1321,7 @@ pub mod pallet { /// Emits `ApprovedTransfer` on success. /// /// Weight: `O(1)` + #[pallet::call_index(22)] #[pallet::weight(T::WeightInfo::approve_transfer())] pub fn approve_transfer( origin: OriginFor, @@ -1325,6 +1348,7 @@ pub mod pallet { /// Emits `ApprovalCancelled` on success. /// /// Weight: `O(1)` + #[pallet::call_index(23)] #[pallet::weight(T::WeightInfo::cancel_approval())] pub fn cancel_approval( origin: OriginFor, @@ -1361,6 +1385,7 @@ pub mod pallet { /// Emits `ApprovalCancelled` on success. /// /// Weight: `O(1)` + #[pallet::call_index(24)] #[pallet::weight(T::WeightInfo::force_cancel_approval())] pub fn force_cancel_approval( origin: OriginFor, @@ -1410,6 +1435,7 @@ pub mod pallet { /// Emits `TransferredApproved` on success. /// /// Weight: `O(1)` + #[pallet::call_index(25)] #[pallet::weight(T::WeightInfo::transfer_approved())] pub fn transfer_approved( origin: OriginFor, @@ -1434,6 +1460,7 @@ pub mod pallet { /// - `id`: The identifier of the asset for the account to be created. /// /// Emits `Touched` event when successful. + #[pallet::call_index(26)] #[pallet::weight(T::WeightInfo::mint())] pub fn touch(origin: OriginFor, id: T::AssetIdParameter) -> DispatchResult { let id: T::AssetId = id.into(); @@ -1448,6 +1475,7 @@ pub mod pallet { /// - `allow_burn`: If `true` then assets may be destroyed in order to complete the refund. /// /// Emits `Refunded` event when successful. + #[pallet::call_index(27)] #[pallet::weight(T::WeightInfo::mint())] pub fn refund( origin: OriginFor, diff --git a/frame/atomic-swap/src/lib.rs b/frame/atomic-swap/src/lib.rs index 9c6056497118c..66628e8e6f242 100644 --- a/frame/atomic-swap/src/lib.rs +++ b/frame/atomic-swap/src/lib.rs @@ -243,6 +243,7 @@ pub mod pallet { /// - `duration`: Locked duration of the atomic swap. For safety reasons, it is recommended /// that the revealer uses a shorter duration than the counterparty, to prevent the /// situation where the revealer reveals the proof too late around the end block. + #[pallet::call_index(0)] #[pallet::weight(T::DbWeight::get().reads_writes(1, 1).ref_time().saturating_add(40_000_000))] pub fn create_swap( origin: OriginFor, @@ -278,6 +279,7 @@ pub mod pallet { /// - `proof`: Revealed proof of the claim. /// - `action`: Action defined in the swap, it must match the entry in blockchain. Otherwise /// the operation fails. This is used for weight calculation. + #[pallet::call_index(1)] #[pallet::weight( T::DbWeight::get().reads_writes(1, 1) .saturating_add(action.weight()) @@ -318,6 +320,7 @@ pub mod pallet { /// /// - `target`: Target of the original atomic swap. /// - `hashed_proof`: Hashed proof of the original atomic swap. + #[pallet::call_index(2)] #[pallet::weight(T::DbWeight::get().reads_writes(1, 1).ref_time().saturating_add(40_000_000))] pub fn cancel_swap( origin: OriginFor, diff --git a/frame/authorship/src/lib.rs b/frame/authorship/src/lib.rs index c08e773abe3a7..a40f32d36c265 100644 --- a/frame/authorship/src/lib.rs +++ b/frame/authorship/src/lib.rs @@ -237,6 +237,7 @@ pub mod pallet { #[pallet::call] impl Pallet { /// Provide a set of uncles. + #[pallet::call_index(0)] #[pallet::weight((0, DispatchClass::Mandatory))] pub fn set_uncles(origin: OriginFor, new_uncles: Vec) -> DispatchResult { ensure_none(origin)?; diff --git a/frame/babe/src/lib.rs b/frame/babe/src/lib.rs index eadaa036332fa..1a9b3200087ae 100644 --- a/frame/babe/src/lib.rs +++ b/frame/babe/src/lib.rs @@ -403,6 +403,7 @@ pub mod pallet { /// the equivocation proof and validate the given key ownership proof /// against the extracted offender. If both are valid, the offence will /// be reported. + #[pallet::call_index(0)] #[pallet::weight(::WeightInfo::report_equivocation( key_owner_proof.validator_count(), ))] @@ -424,6 +425,7 @@ pub mod pallet { /// block authors will call it (validated in `ValidateUnsigned`), as such /// if the block author is defined it will be defined as the equivocation /// reporter. + #[pallet::call_index(1)] #[pallet::weight(::WeightInfo::report_equivocation( key_owner_proof.validator_count(), ))] @@ -445,6 +447,7 @@ pub mod pallet { /// the next call to `enact_epoch_change`. The config will be activated one epoch after. /// Multiple calls to this method will replace any existing planned config change that had /// not been enacted yet. + #[pallet::call_index(2)] #[pallet::weight(::WeightInfo::plan_config_change())] pub fn plan_config_change( origin: OriginFor, diff --git a/frame/bags-list/src/lib.rs b/frame/bags-list/src/lib.rs index 2b48fbf0ca630..1ffdf29345513 100644 --- a/frame/bags-list/src/lib.rs +++ b/frame/bags-list/src/lib.rs @@ -224,6 +224,7 @@ pub mod pallet { /// `ScoreProvider`. /// /// If `dislocated` does not exists, it returns an error. + #[pallet::call_index(0)] #[pallet::weight(T::WeightInfo::rebag_non_terminal().max(T::WeightInfo::rebag_terminal()))] pub fn rebag(origin: OriginFor, dislocated: AccountIdLookupOf) -> DispatchResult { ensure_signed(origin)?; @@ -242,6 +243,7 @@ pub mod pallet { /// Only works if /// - both nodes are within the same bag, /// - and `origin` has a greater `Score` than `lighter`. + #[pallet::call_index(1)] #[pallet::weight(T::WeightInfo::put_in_front_of())] pub fn put_in_front_of( origin: OriginFor, diff --git a/frame/balances/src/lib.rs b/frame/balances/src/lib.rs index 381a0ffceeb85..57f76b1ff679d 100644 --- a/frame/balances/src/lib.rs +++ b/frame/balances/src/lib.rs @@ -282,6 +282,7 @@ pub mod pallet { /// --------------------------------- /// - Origin account is already in memory, so no DB operations for them. /// # + #[pallet::call_index(0)] #[pallet::weight(T::WeightInfo::transfer())] pub fn transfer( origin: OriginFor, @@ -307,6 +308,7 @@ pub mod pallet { /// it will reset the account nonce (`frame_system::AccountNonce`). /// /// The dispatch origin for this call is `root`. + #[pallet::call_index(1)] #[pallet::weight( T::WeightInfo::set_balance_creating() // Creates a new account. .max(T::WeightInfo::set_balance_killing()) // Kills an existing account. @@ -360,6 +362,7 @@ pub mod pallet { /// - Same as transfer, but additional read and write because the source account is not /// assumed to be in the overlay. /// # + #[pallet::call_index(2)] #[pallet::weight(T::WeightInfo::force_transfer())] pub fn force_transfer( origin: OriginFor, @@ -385,6 +388,7 @@ pub mod pallet { /// 99% of the time you want [`transfer`] instead. /// /// [`transfer`]: struct.Pallet.html#method.transfer + #[pallet::call_index(3)] #[pallet::weight(T::WeightInfo::transfer_keep_alive())] pub fn transfer_keep_alive( origin: OriginFor, @@ -414,6 +418,7 @@ pub mod pallet { /// keep the sender account alive (true). # /// - O(1). Just like transfer, but reading the user's transferable balance first. /// # + #[pallet::call_index(4)] #[pallet::weight(T::WeightInfo::transfer_all())] pub fn transfer_all( origin: OriginFor, @@ -432,6 +437,7 @@ pub mod pallet { /// Unreserve some balance from a user by force. /// /// Can only be called by ROOT. + #[pallet::call_index(5)] #[pallet::weight(T::WeightInfo::force_unreserve())] pub fn force_unreserve( origin: OriginFor, diff --git a/frame/benchmarking/src/tests.rs b/frame/benchmarking/src/tests.rs index 88a7d6d0286b2..1499f9c182fce 100644 --- a/frame/benchmarking/src/tests.rs +++ b/frame/benchmarking/src/tests.rs @@ -51,6 +51,7 @@ mod pallet_test { #[pallet::call] impl Pallet { + #[pallet::call_index(0)] #[pallet::weight(0)] pub fn set_value(origin: OriginFor, n: u32) -> DispatchResult { let _sender = ensure_signed(origin)?; @@ -58,12 +59,14 @@ mod pallet_test { Ok(()) } + #[pallet::call_index(1)] #[pallet::weight(0)] pub fn dummy(origin: OriginFor, _n: u32) -> DispatchResult { let _sender = ensure_none(origin)?; Ok(()) } + #[pallet::call_index(2)] #[pallet::weight(0)] pub fn always_error(_origin: OriginFor) -> DispatchResult { return Err("I always fail".into()) diff --git a/frame/benchmarking/src/tests_instance.rs b/frame/benchmarking/src/tests_instance.rs index 7e1cd48840687..ecc0a78a199b9 100644 --- a/frame/benchmarking/src/tests_instance.rs +++ b/frame/benchmarking/src/tests_instance.rs @@ -61,6 +61,7 @@ mod pallet_test { where ::OtherEvent: Into<>::RuntimeEvent>, { + #[pallet::call_index(0)] #[pallet::weight(0)] pub fn set_value(origin: OriginFor, n: u32) -> DispatchResult { let _sender = ensure_signed(origin)?; @@ -68,6 +69,7 @@ mod pallet_test { Ok(()) } + #[pallet::call_index(1)] #[pallet::weight(0)] pub fn dummy(origin: OriginFor, _n: u32) -> DispatchResult { let _sender = ensure_none(origin)?; diff --git a/frame/bounties/src/lib.rs b/frame/bounties/src/lib.rs index 2e350dd1e2484..eb92c774f86e3 100644 --- a/frame/bounties/src/lib.rs +++ b/frame/bounties/src/lib.rs @@ -333,6 +333,7 @@ pub mod pallet { /// - `fee`: The curator fee. /// - `value`: The total payment amount of this bounty, curator fee included. /// - `description`: The description of this bounty. + #[pallet::call_index(0)] #[pallet::weight(>::WeightInfo::propose_bounty(description.len() as u32))] pub fn propose_bounty( origin: OriginFor, @@ -352,6 +353,7 @@ pub mod pallet { /// # /// - O(1). /// # + #[pallet::call_index(1)] #[pallet::weight(>::WeightInfo::approve_bounty())] pub fn approve_bounty( origin: OriginFor, @@ -383,6 +385,7 @@ pub mod pallet { /// # /// - O(1). /// # + #[pallet::call_index(2)] #[pallet::weight(>::WeightInfo::propose_curator())] pub fn propose_curator( origin: OriginFor, @@ -432,6 +435,7 @@ pub mod pallet { /// # /// - O(1). /// # + #[pallet::call_index(3)] #[pallet::weight(>::WeightInfo::unassign_curator())] pub fn unassign_curator( origin: OriginFor, @@ -517,6 +521,7 @@ pub mod pallet { /// # /// - O(1). /// # + #[pallet::call_index(4)] #[pallet::weight(>::WeightInfo::accept_curator())] pub fn accept_curator( origin: OriginFor, @@ -559,6 +564,7 @@ pub mod pallet { /// # /// - O(1). /// # + #[pallet::call_index(5)] #[pallet::weight(>::WeightInfo::award_bounty())] pub fn award_bounty( origin: OriginFor, @@ -606,6 +612,7 @@ pub mod pallet { /// # /// - O(1). /// # + #[pallet::call_index(6)] #[pallet::weight(>::WeightInfo::claim_bounty())] pub fn claim_bounty( origin: OriginFor, @@ -669,6 +676,7 @@ pub mod pallet { /// # /// - O(1). /// # + #[pallet::call_index(7)] #[pallet::weight(>::WeightInfo::close_bounty_proposed() .max(>::WeightInfo::close_bounty_active()))] pub fn close_bounty( @@ -760,6 +768,7 @@ pub mod pallet { /// # /// - O(1). /// # + #[pallet::call_index(8)] #[pallet::weight(>::WeightInfo::extend_bounty_expiry())] pub fn extend_bounty_expiry( origin: OriginFor, diff --git a/frame/child-bounties/src/lib.rs b/frame/child-bounties/src/lib.rs index 2dfe0660ad68e..9eb784eaccd23 100644 --- a/frame/child-bounties/src/lib.rs +++ b/frame/child-bounties/src/lib.rs @@ -237,6 +237,7 @@ pub mod pallet { /// - `parent_bounty_id`: Index of parent bounty for which child-bounty is being added. /// - `value`: Value for executing the proposal. /// - `description`: Text description for the child-bounty. + #[pallet::call_index(0)] #[pallet::weight(::WeightInfo::add_child_bounty(description.len() as u32))] pub fn add_child_bounty( origin: OriginFor, @@ -311,6 +312,7 @@ pub mod pallet { /// - `child_bounty_id`: Index of child bounty. /// - `curator`: Address of child-bounty curator. /// - `fee`: payment fee to child-bounty curator for execution. + #[pallet::call_index(1)] #[pallet::weight(::WeightInfo::propose_curator())] pub fn propose_curator( origin: OriginFor, @@ -380,6 +382,7 @@ pub mod pallet { /// /// - `parent_bounty_id`: Index of parent bounty. /// - `child_bounty_id`: Index of child bounty. + #[pallet::call_index(2)] #[pallet::weight(::WeightInfo::accept_curator())] pub fn accept_curator( origin: OriginFor, @@ -456,6 +459,7 @@ pub mod pallet { /// /// - `parent_bounty_id`: Index of parent bounty. /// - `child_bounty_id`: Index of child bounty. + #[pallet::call_index(3)] #[pallet::weight(::WeightInfo::unassign_curator())] pub fn unassign_curator( origin: OriginFor, @@ -570,6 +574,7 @@ pub mod pallet { /// - `parent_bounty_id`: Index of parent bounty. /// - `child_bounty_id`: Index of child bounty. /// - `beneficiary`: Beneficiary account. + #[pallet::call_index(4)] #[pallet::weight(::WeightInfo::award_child_bounty())] pub fn award_child_bounty( origin: OriginFor, @@ -636,6 +641,7 @@ pub mod pallet { /// /// - `parent_bounty_id`: Index of parent bounty. /// - `child_bounty_id`: Index of child bounty. + #[pallet::call_index(5)] #[pallet::weight(::WeightInfo::claim_child_bounty())] pub fn claim_child_bounty( origin: OriginFor, @@ -745,6 +751,7 @@ pub mod pallet { /// /// - `parent_bounty_id`: Index of parent bounty. /// - `child_bounty_id`: Index of child bounty. + #[pallet::call_index(6)] #[pallet::weight(::WeightInfo::close_child_bounty_added() .max(::WeightInfo::close_child_bounty_active()))] pub fn close_child_bounty( diff --git a/frame/collective/src/lib.rs b/frame/collective/src/lib.rs index 06d5b1fab78e7..c522b71891b3c 100644 --- a/frame/collective/src/lib.rs +++ b/frame/collective/src/lib.rs @@ -372,6 +372,7 @@ pub mod pallet { /// - `P` storage mutations (codec `O(M)`) for updating the votes for each proposal /// - 1 storage write (codec `O(1)`) for deleting the old `prime` and setting the new one /// # + #[pallet::call_index(0)] #[pallet::weight(( T::WeightInfo::set_members( *old_count, // M @@ -429,6 +430,7 @@ pub mod pallet { /// - DB: 1 read (codec `O(M)`) + DB access of `proposal` /// - 1 event /// # + #[pallet::call_index(1)] #[pallet::weight(( T::WeightInfo::execute( *length_bound, // B @@ -492,6 +494,7 @@ pub mod pallet { /// - 1 storage write `Voting` (codec `O(M)`) /// - 1 event /// # + #[pallet::call_index(2)] #[pallet::weight(( if *threshold < 2 { T::WeightInfo::propose_execute( @@ -557,6 +560,7 @@ pub mod pallet { /// - 1 storage mutation `Voting` (codec `O(M)`) /// - 1 event /// # + #[pallet::call_index(3)] #[pallet::weight((T::WeightInfo::vote(T::MaxMembers::get()), DispatchClass::Operational))] pub fn vote( origin: OriginFor, @@ -610,6 +614,7 @@ pub mod pallet { /// - any mutations done while executing `proposal` (`P1`) /// - up to 3 events /// # + #[pallet::call_index(4)] #[pallet::weight(( { let b = *length_bound; @@ -653,6 +658,7 @@ pub mod pallet { /// * Reads: Proposals /// * Writes: Voting, Proposals, ProposalOf /// # + #[pallet::call_index(5)] #[pallet::weight(T::WeightInfo::disapprove_proposal(T::MaxProposals::get()))] pub fn disapprove_proposal( origin: OriginFor, @@ -695,6 +701,7 @@ pub mod pallet { /// - any mutations done while executing `proposal` (`P1`) /// - up to 3 events /// # + #[pallet::call_index(6)] #[pallet::weight(( { let b = *length_bound; diff --git a/frame/collective/src/tests.rs b/frame/collective/src/tests.rs index 3d1540a8c3b5c..648b6f88ec86c 100644 --- a/frame/collective/src/tests.rs +++ b/frame/collective/src/tests.rs @@ -69,6 +69,7 @@ mod mock_democracy { #[pallet::call] impl Pallet { + #[pallet::call_index(0)] #[pallet::weight(0)] pub fn external_propose_majority(origin: OriginFor) -> DispatchResult { T::ExternalMajorityOrigin::ensure_origin(origin)?; diff --git a/frame/contracts/src/lib.rs b/frame/contracts/src/lib.rs index 4bbb311313d61..06d817785cc39 100644 --- a/frame/contracts/src/lib.rs +++ b/frame/contracts/src/lib.rs @@ -385,6 +385,7 @@ pub mod pallet { as HasCompact>::Type: Clone + Eq + PartialEq + Debug + TypeInfo + Encode, { /// Deprecated version if [`Self::call`] for use in an in-storage `Call`. + #[pallet::call_index(0)] #[pallet::weight(T::WeightInfo::call().saturating_add(>::compat_weight(*gas_limit)))] #[allow(deprecated)] #[deprecated(note = "1D weight is used in this extrinsic, please migrate to `call`")] @@ -407,6 +408,7 @@ pub mod pallet { } /// Deprecated version if [`Self::instantiate_with_code`] for use in an in-storage `Call`. + #[pallet::call_index(1)] #[pallet::weight( T::WeightInfo::instantiate_with_code(code.len() as u32, salt.len() as u32) .saturating_add(>::compat_weight(*gas_limit)) @@ -436,6 +438,7 @@ pub mod pallet { } /// Deprecated version if [`Self::instantiate`] for use in an in-storage `Call`. + #[pallet::call_index(2)] #[pallet::weight( T::WeightInfo::instantiate(salt.len() as u32).saturating_add(>::compat_weight(*gas_limit)) )] @@ -481,6 +484,7 @@ pub mod pallet { /// To avoid this situation a constructor could employ access control so that it can /// only be instantiated by permissioned entities. The same is true when uploading /// through [`Self::instantiate_with_code`]. + #[pallet::call_index(3)] #[pallet::weight(T::WeightInfo::upload_code(code.len() as u32))] pub fn upload_code( origin: OriginFor, @@ -497,6 +501,7 @@ pub mod pallet { /// /// A code can only be removed by its original uploader (its owner) and only if it is /// not used by any contract. + #[pallet::call_index(4)] #[pallet::weight(T::WeightInfo::remove_code())] pub fn remove_code( origin: OriginFor, @@ -518,6 +523,7 @@ pub mod pallet { /// This does **not** change the address of the contract in question. This means /// that the contract address is no longer derived from its code hash after calling /// this dispatchable. + #[pallet::call_index(5)] #[pallet::weight(T::WeightInfo::set_code())] pub fn set_code( origin: OriginFor, @@ -563,6 +569,7 @@ pub mod pallet { /// * If the account is a regular account, any value will be transferred. /// * If no account exists and the call value is not less than `existential_deposit`, /// a regular account will be created and any value will be transferred. + #[pallet::call_index(6)] #[pallet::weight(T::WeightInfo::call().saturating_add(*gas_limit))] pub fn call( origin: OriginFor, @@ -619,6 +626,7 @@ pub mod pallet { /// - The smart-contract account is created at the computed address. /// - The `value` is transferred to the new account. /// - The `deploy` function is executed in the context of the newly-created account. + #[pallet::call_index(7)] #[pallet::weight( T::WeightInfo::instantiate_with_code(code.len() as u32, salt.len() as u32) .saturating_add(*gas_limit) @@ -661,6 +669,7 @@ pub mod pallet { /// This function is identical to [`Self::instantiate_with_code`] but without the /// code deployment step. Instead, the `code_hash` of an on-chain deployed wasm binary /// must be supplied. + #[pallet::call_index(8)] #[pallet::weight( T::WeightInfo::instantiate(salt.len() as u32).saturating_add(*gas_limit) )] diff --git a/frame/conviction-voting/src/lib.rs b/frame/conviction-voting/src/lib.rs index 3ecc6e56be94e..141e9690fa29d 100644 --- a/frame/conviction-voting/src/lib.rs +++ b/frame/conviction-voting/src/lib.rs @@ -208,6 +208,7 @@ pub mod pallet { /// - `vote`: The vote configuration. /// /// Weight: `O(R)` where R is the number of polls the voter has voted on. + #[pallet::call_index(0)] #[pallet::weight(T::WeightInfo::vote_new().max(T::WeightInfo::vote_existing()))] pub fn vote( origin: OriginFor, @@ -243,6 +244,7 @@ pub mod pallet { /// voted on. Weight is initially charged as if maximum votes, but is refunded later. // NOTE: weight must cover an incorrect voting of origin with max votes, this is ensure // because a valid delegation cover decoding a direct voting with max votes. + #[pallet::call_index(1)] #[pallet::weight(T::WeightInfo::delegate(T::MaxVotes::get()))] pub fn delegate( origin: OriginFor, @@ -274,6 +276,7 @@ pub mod pallet { /// voted on. Weight is initially charged as if maximum votes, but is refunded later. // NOTE: weight must cover an incorrect voting of origin with max votes, this is ensure // because a valid delegation cover decoding a direct voting with max votes. + #[pallet::call_index(2)] #[pallet::weight(T::WeightInfo::undelegate(T::MaxVotes::get().into()))] pub fn undelegate( origin: OriginFor, @@ -293,6 +296,7 @@ pub mod pallet { /// - `target`: The account to remove the lock on. /// /// Weight: `O(R)` with R number of vote of target. + #[pallet::call_index(3)] #[pallet::weight(T::WeightInfo::unlock())] pub fn unlock( origin: OriginFor, @@ -334,6 +338,7 @@ pub mod pallet { /// /// Weight: `O(R + log R)` where R is the number of polls that `target` has voted on. /// Weight is calculated for the maximum number of vote. + #[pallet::call_index(4)] #[pallet::weight(T::WeightInfo::remove_vote())] pub fn remove_vote( origin: OriginFor, @@ -360,6 +365,7 @@ pub mod pallet { /// /// Weight: `O(R + log R)` where R is the number of polls that `target` has voted on. /// Weight is calculated for the maximum number of vote. + #[pallet::call_index(5)] #[pallet::weight(T::WeightInfo::remove_other_vote())] pub fn remove_other_vote( origin: OriginFor, diff --git a/frame/democracy/src/lib.rs b/frame/democracy/src/lib.rs index cf954d4800eee..2c65e5d94bc46 100644 --- a/frame/democracy/src/lib.rs +++ b/frame/democracy/src/lib.rs @@ -549,6 +549,7 @@ pub mod pallet { /// - `value`: The amount of deposit (must be at least `MinimumDeposit`). /// /// Emits `Proposed`. + #[pallet::call_index(0)] #[pallet::weight(T::WeightInfo::propose())] pub fn propose( origin: OriginFor, @@ -591,6 +592,7 @@ pub mod pallet { /// must have funds to cover the deposit, equal to the original deposit. /// /// - `proposal`: The index of the proposal to second. + #[pallet::call_index(1)] #[pallet::weight(T::WeightInfo::second())] pub fn second( origin: OriginFor, @@ -616,6 +618,7 @@ pub mod pallet { /// /// - `ref_index`: The index of the referendum to vote for. /// - `vote`: The vote configuration. + #[pallet::call_index(2)] #[pallet::weight(T::WeightInfo::vote_new().max(T::WeightInfo::vote_existing()))] pub fn vote( origin: OriginFor, @@ -634,6 +637,7 @@ pub mod pallet { /// -`ref_index`: The index of the referendum to cancel. /// /// Weight: `O(1)`. + #[pallet::call_index(3)] #[pallet::weight((T::WeightInfo::emergency_cancel(), DispatchClass::Operational))] pub fn emergency_cancel( origin: OriginFor, @@ -656,6 +660,7 @@ pub mod pallet { /// The dispatch origin of this call must be `ExternalOrigin`. /// /// - `proposal_hash`: The preimage hash of the proposal. + #[pallet::call_index(4)] #[pallet::weight(T::WeightInfo::external_propose())] pub fn external_propose( origin: OriginFor, @@ -684,6 +689,7 @@ pub mod pallet { /// pre-scheduled `external_propose` call. /// /// Weight: `O(1)` + #[pallet::call_index(5)] #[pallet::weight(T::WeightInfo::external_propose_majority())] pub fn external_propose_majority( origin: OriginFor, @@ -705,6 +711,7 @@ pub mod pallet { /// pre-scheduled `external_propose` call. /// /// Weight: `O(1)` + #[pallet::call_index(6)] #[pallet::weight(T::WeightInfo::external_propose_default())] pub fn external_propose_default( origin: OriginFor, @@ -731,6 +738,7 @@ pub mod pallet { /// Emits `Started`. /// /// Weight: `O(1)` + #[pallet::call_index(7)] #[pallet::weight(T::WeightInfo::fast_track())] pub fn fast_track( origin: OriginFor, @@ -783,6 +791,7 @@ pub mod pallet { /// Emits `Vetoed`. /// /// Weight: `O(V + log(V))` where V is number of `existing vetoers` + #[pallet::call_index(8)] #[pallet::weight(T::WeightInfo::veto_external())] pub fn veto_external(origin: OriginFor, proposal_hash: H256) -> DispatchResult { let who = T::VetoOrigin::ensure_origin(origin)?; @@ -817,6 +826,7 @@ pub mod pallet { /// - `ref_index`: The index of the referendum to cancel. /// /// # Weight: `O(1)`. + #[pallet::call_index(9)] #[pallet::weight(T::WeightInfo::cancel_referendum())] pub fn cancel_referendum( origin: OriginFor, @@ -849,6 +859,7 @@ pub mod pallet { /// voted on. Weight is charged as if maximum votes. // NOTE: weight must cover an incorrect voting of origin with max votes, this is ensure // because a valid delegation cover decoding a direct voting with max votes. + #[pallet::call_index(10)] #[pallet::weight(T::WeightInfo::delegate(T::MaxVotes::get()))] pub fn delegate( origin: OriginFor, @@ -877,6 +888,7 @@ pub mod pallet { /// voted on. Weight is charged as if maximum votes. // NOTE: weight must cover an incorrect voting of origin with max votes, this is ensure // because a valid delegation cover decoding a direct voting with max votes. + #[pallet::call_index(11)] #[pallet::weight(T::WeightInfo::undelegate(T::MaxVotes::get()))] pub fn undelegate(origin: OriginFor) -> DispatchResultWithPostInfo { let who = ensure_signed(origin)?; @@ -889,6 +901,7 @@ pub mod pallet { /// The dispatch origin of this call must be _Root_. /// /// Weight: `O(1)`. + #[pallet::call_index(12)] #[pallet::weight(T::WeightInfo::clear_public_proposals())] pub fn clear_public_proposals(origin: OriginFor) -> DispatchResult { ensure_root(origin)?; @@ -903,6 +916,7 @@ pub mod pallet { /// - `target`: The account to remove the lock on. /// /// Weight: `O(R)` with R number of vote of target. + #[pallet::call_index(13)] #[pallet::weight(T::WeightInfo::unlock_set(T::MaxVotes::get()).max(T::WeightInfo::unlock_remove(T::MaxVotes::get())))] pub fn unlock(origin: OriginFor, target: AccountIdLookupOf) -> DispatchResult { ensure_signed(origin)?; @@ -938,6 +952,7 @@ pub mod pallet { /// /// Weight: `O(R + log R)` where R is the number of referenda that `target` has voted on. /// Weight is calculated for the maximum number of vote. + #[pallet::call_index(14)] #[pallet::weight(T::WeightInfo::remove_vote(T::MaxVotes::get()))] pub fn remove_vote(origin: OriginFor, index: ReferendumIndex) -> DispatchResult { let who = ensure_signed(origin)?; @@ -959,6 +974,7 @@ pub mod pallet { /// /// Weight: `O(R + log R)` where R is the number of referenda that `target` has voted on. /// Weight is calculated for the maximum number of vote. + #[pallet::call_index(15)] #[pallet::weight(T::WeightInfo::remove_other_vote(T::MaxVotes::get()))] pub fn remove_other_vote( origin: OriginFor, @@ -987,6 +1003,7 @@ pub mod pallet { /// /// Weight: `O(p)` (though as this is an high-privilege dispatch, we assume it has a /// reasonable value). + #[pallet::call_index(16)] #[pallet::weight((T::WeightInfo::blacklist(), DispatchClass::Operational))] pub fn blacklist( origin: OriginFor, @@ -1036,6 +1053,7 @@ pub mod pallet { /// - `prop_index`: The index of the proposal to cancel. /// /// Weight: `O(p)` where `p = PublicProps::::decode_len()` + #[pallet::call_index(17)] #[pallet::weight(T::WeightInfo::cancel_proposal())] pub fn cancel_proposal( origin: OriginFor, diff --git a/frame/election-provider-multi-phase/src/lib.rs b/frame/election-provider-multi-phase/src/lib.rs index cd70514fd3461..4704eaffa0bfe 100644 --- a/frame/election-provider-multi-phase/src/lib.rs +++ b/frame/election-provider-multi-phase/src/lib.rs @@ -892,6 +892,7 @@ pub mod pallet { /// putting their authoring reward at risk. /// /// No deposit or reward is associated with this submission. + #[pallet::call_index(0)] #[pallet::weight(( T::WeightInfo::submit_unsigned( witness.voters, @@ -941,6 +942,7 @@ pub mod pallet { /// Dispatch origin must be aligned with `T::ForceOrigin`. /// /// This check can be turned off by setting the value to `None`. + #[pallet::call_index(1)] #[pallet::weight(T::DbWeight::get().writes(1))] pub fn set_minimum_untrusted_score( origin: OriginFor, @@ -959,6 +961,7 @@ pub mod pallet { /// The solution is not checked for any feasibility and is assumed to be trustworthy, as any /// feasibility check itself can in principle cause the election process to fail (due to /// memory/weight constrains). + #[pallet::call_index(2)] #[pallet::weight(T::DbWeight::get().reads_writes(1, 1))] pub fn set_emergency_election_result( origin: OriginFor, @@ -996,6 +999,7 @@ pub mod pallet { /// /// A deposit is reserved and recorded for the solution. Based on the outcome, the solution /// might be rewarded, slashed, or get all or a part of the deposit back. + #[pallet::call_index(3)] #[pallet::weight(T::WeightInfo::submit())] pub fn submit( origin: OriginFor, @@ -1065,6 +1069,7 @@ pub mod pallet { /// /// This can only be called when [`Phase::Emergency`] is enabled, as an alternative to /// calling [`Call::set_emergency_election_result`]. + #[pallet::call_index(4)] #[pallet::weight(T::DbWeight::get().reads_writes(1, 1))] pub fn governance_fallback( origin: OriginFor, diff --git a/frame/elections-phragmen/src/lib.rs b/frame/elections-phragmen/src/lib.rs index 165a8fcab429b..1cfdc25fd9b47 100644 --- a/frame/elections-phragmen/src/lib.rs +++ b/frame/elections-phragmen/src/lib.rs @@ -309,6 +309,7 @@ pub mod pallet { /// # /// We assume the maximum weight among all 3 cases: vote_equal, vote_more and vote_less. /// # + #[pallet::call_index(0)] #[pallet::weight( T::WeightInfo::vote_more(votes.len() as u32) .max(T::WeightInfo::vote_less(votes.len() as u32)) @@ -371,6 +372,7 @@ pub mod pallet { /// This removes the lock and returns the deposit. /// /// The dispatch origin of this call must be signed and be a voter. + #[pallet::call_index(1)] #[pallet::weight(T::WeightInfo::remove_voter())] pub fn remove_voter(origin: OriginFor) -> DispatchResult { let who = ensure_signed(origin)?; @@ -394,6 +396,7 @@ pub mod pallet { /// # /// The number of current candidates must be provided as witness data. /// # + #[pallet::call_index(2)] #[pallet::weight(T::WeightInfo::submit_candidacy(*candidate_count))] pub fn submit_candidacy( origin: OriginFor, @@ -438,6 +441,7 @@ pub mod pallet { /// # /// The type of renouncing must be provided as witness data. /// # + #[pallet::call_index(3)] #[pallet::weight(match *renouncing { Renouncing::Candidate(count) => T::WeightInfo::renounce_candidacy_candidate(count), Renouncing::Member => T::WeightInfo::renounce_candidacy_members(), @@ -500,6 +504,7 @@ pub mod pallet { /// If we have a replacement, we use a small weight. Else, since this is a root call and /// will go into phragmen, we assume full block for now. /// # + #[pallet::call_index(4)] #[pallet::weight(if *rerun_election { T::WeightInfo::remove_member_without_replacement() } else { @@ -535,6 +540,7 @@ pub mod pallet { /// # /// The total number of voters and those that are defunct must be provided as witness data. /// # + #[pallet::call_index(5)] #[pallet::weight(T::WeightInfo::clean_defunct_voters(*_num_voters, *_num_defunct))] pub fn clean_defunct_voters( origin: OriginFor, diff --git a/frame/examples/basic/src/lib.rs b/frame/examples/basic/src/lib.rs index 256529421caae..d5045157dade7 100644 --- a/frame/examples/basic/src/lib.rs +++ b/frame/examples/basic/src/lib.rs @@ -497,6 +497,7 @@ pub mod pallet { // // The weight for this extrinsic we rely on the auto-generated `WeightInfo` from the // benchmark toolchain. + #[pallet::call_index(0)] #[pallet::weight( ::WeightInfo::accumulate_dummy() )] @@ -541,6 +542,7 @@ pub mod pallet { // // The weight for this extrinsic we use our own weight object `WeightForSetDummy` to // determine its weight + #[pallet::call_index(1)] #[pallet::weight(WeightForSetDummy::(>::from(100u32)))] pub fn set_dummy( origin: OriginFor, diff --git a/frame/examples/offchain-worker/src/lib.rs b/frame/examples/offchain-worker/src/lib.rs index fdf8b61a01acd..46ff7725e3b16 100644 --- a/frame/examples/offchain-worker/src/lib.rs +++ b/frame/examples/offchain-worker/src/lib.rs @@ -229,6 +229,7 @@ pub mod pallet { /// working and receives (and provides) meaningful data. /// This example is not focused on correctness of the oracle itself, but rather its /// purpose is to showcase offchain worker capabilities. + #[pallet::call_index(0)] #[pallet::weight(0)] pub fn submit_price(origin: OriginFor, price: u32) -> DispatchResultWithPostInfo { // Retrieve sender of the transaction. @@ -254,6 +255,7 @@ pub mod pallet { /// /// This example is not focused on correctness of the oracle itself, but rather its /// purpose is to showcase offchain worker capabilities. + #[pallet::call_index(1)] #[pallet::weight(0)] pub fn submit_price_unsigned( origin: OriginFor, @@ -270,6 +272,7 @@ pub mod pallet { Ok(().into()) } + #[pallet::call_index(2)] #[pallet::weight(0)] pub fn submit_price_unsigned_with_signed_payload( origin: OriginFor, diff --git a/frame/executive/src/lib.rs b/frame/executive/src/lib.rs index 5a4ef92b1c874..6f59ac72eb2fd 100644 --- a/frame/executive/src/lib.rs +++ b/frame/executive/src/lib.rs @@ -674,6 +674,7 @@ mod tests { #[pallet::call] impl Pallet { + #[pallet::call_index(0)] #[pallet::weight(100)] pub fn some_function(origin: OriginFor) -> DispatchResult { // NOTE: does not make any different. @@ -681,36 +682,42 @@ mod tests { Ok(()) } + #[pallet::call_index(1)] #[pallet::weight((200, DispatchClass::Operational))] pub fn some_root_operation(origin: OriginFor) -> DispatchResult { frame_system::ensure_root(origin)?; Ok(()) } + #[pallet::call_index(2)] #[pallet::weight(0)] pub fn some_unsigned_message(origin: OriginFor) -> DispatchResult { frame_system::ensure_none(origin)?; Ok(()) } + #[pallet::call_index(3)] #[pallet::weight(0)] pub fn allowed_unsigned(origin: OriginFor) -> DispatchResult { frame_system::ensure_root(origin)?; Ok(()) } + #[pallet::call_index(4)] #[pallet::weight(0)] pub fn unallowed_unsigned(origin: OriginFor) -> DispatchResult { frame_system::ensure_root(origin)?; Ok(()) } + #[pallet::call_index(5)] #[pallet::weight((0, DispatchClass::Mandatory))] pub fn inherent_call(origin: OriginFor) -> DispatchResult { frame_system::ensure_none(origin)?; Ok(()) } + #[pallet::call_index(6)] #[pallet::weight(0)] pub fn calculate_storage_root(_origin: OriginFor) -> DispatchResult { let root = sp_io::storage::root(sp_runtime::StateVersion::V1); diff --git a/frame/fast-unstake/src/lib.rs b/frame/fast-unstake/src/lib.rs index 618afa63c2c4c..7f226826cbc53 100644 --- a/frame/fast-unstake/src/lib.rs +++ b/frame/fast-unstake/src/lib.rs @@ -228,6 +228,7 @@ pub mod pallet { /// If the check fails, the stash remains chilled and waiting for being unbonded as in with /// the normal staking system, but they lose part of their unbonding chunks due to consuming /// the chain's resources. + #[pallet::call_index(0)] #[pallet::weight(::WeightInfo::register_fast_unstake())] pub fn register_fast_unstake(origin: OriginFor) -> DispatchResult { let ctrl = ensure_signed(origin)?; @@ -257,6 +258,7 @@ pub mod pallet { /// Note that the associated stash is still fully unbonded and chilled as a consequence of /// calling `register_fast_unstake`. This should probably be followed by a call to /// `Staking::rebond`. + #[pallet::call_index(1)] #[pallet::weight(::WeightInfo::deregister())] pub fn deregister(origin: OriginFor) -> DispatchResult { let ctrl = ensure_signed(origin)?; @@ -282,6 +284,7 @@ pub mod pallet { /// Control the operation of this pallet. /// /// Dispatch origin must be signed by the [`Config::ControlOrigin`]. + #[pallet::call_index(2)] #[pallet::weight(::WeightInfo::control())] pub fn control(origin: OriginFor, unchecked_eras_to_check: EraIndex) -> DispatchResult { let _ = T::ControlOrigin::ensure_origin(origin)?; diff --git a/frame/grandpa/src/lib.rs b/frame/grandpa/src/lib.rs index fe5b9861853bf..c6b7fd251661f 100644 --- a/frame/grandpa/src/lib.rs +++ b/frame/grandpa/src/lib.rs @@ -193,6 +193,7 @@ pub mod pallet { /// equivocation proof and validate the given key ownership proof /// against the extracted offender. If both are valid, the offence /// will be reported. + #[pallet::call_index(0)] #[pallet::weight(T::WeightInfo::report_equivocation(key_owner_proof.validator_count()))] pub fn report_equivocation( origin: OriginFor, @@ -213,6 +214,7 @@ pub mod pallet { /// block authors will call it (validated in `ValidateUnsigned`), as such /// if the block author is defined it will be defined as the equivocation /// reporter. + #[pallet::call_index(1)] #[pallet::weight(T::WeightInfo::report_equivocation(key_owner_proof.validator_count()))] pub fn report_equivocation_unsigned( origin: OriginFor, @@ -240,6 +242,7 @@ pub mod pallet { /// block of all validators of the new authority set. /// /// Only callable by root. + #[pallet::call_index(2)] #[pallet::weight(T::WeightInfo::note_stalled())] pub fn note_stalled( origin: OriginFor, diff --git a/frame/identity/src/lib.rs b/frame/identity/src/lib.rs index 95f5a84d8abb7..8eab2c67418a1 100644 --- a/frame/identity/src/lib.rs +++ b/frame/identity/src/lib.rs @@ -284,6 +284,7 @@ pub mod pallet { /// - One storage mutation (codec `O(R)`). /// - One event. /// # + #[pallet::call_index(0)] #[pallet::weight(T::WeightInfo::add_registrar(T::MaxRegistrars::get()))] pub fn add_registrar( origin: OriginFor, @@ -329,6 +330,7 @@ pub mod pallet { /// - One storage mutation (codec-read `O(X' + R)`, codec-write `O(X + R)`). /// - One event. /// # + #[pallet::call_index(1)] #[pallet::weight( T::WeightInfo::set_identity( T::MaxRegistrars::get(), // R T::MaxAdditionalFields::get(), // X @@ -404,6 +406,7 @@ pub mod pallet { // N storage items for N sub accounts. Right now the weight on this function // is a large overestimate due to the fact that it could potentially write // to 2 x T::MaxSubAccounts::get(). + #[pallet::call_index(2)] #[pallet::weight(T::WeightInfo::set_subs_old(T::MaxSubAccounts::get()) // P: Assume max sub accounts removed. .saturating_add(T::WeightInfo::set_subs_new(subs.len() as u32)) // S: Assume all subs are new. )] @@ -475,6 +478,7 @@ pub mod pallet { /// - `2` storage reads and `S + 2` storage deletions. /// - One event. /// # + #[pallet::call_index(3)] #[pallet::weight(T::WeightInfo::clear_identity( T::MaxRegistrars::get(), // R T::MaxSubAccounts::get(), // S @@ -526,6 +530,7 @@ pub mod pallet { /// - Storage: 1 read `O(R)`, 1 mutate `O(X + R)`. /// - One event. /// # + #[pallet::call_index(4)] #[pallet::weight(T::WeightInfo::request_judgement( T::MaxRegistrars::get(), // R T::MaxAdditionalFields::get(), // X @@ -588,6 +593,7 @@ pub mod pallet { /// - One storage mutation `O(R + X)`. /// - One event /// # + #[pallet::call_index(5)] #[pallet::weight(T::WeightInfo::cancel_request( T::MaxRegistrars::get(), // R T::MaxAdditionalFields::get(), // X @@ -636,6 +642,7 @@ pub mod pallet { /// - One storage mutation `O(R)`. /// - Benchmark: 7.315 + R * 0.329 µs (min squares analysis) /// # + #[pallet::call_index(6)] #[pallet::weight(T::WeightInfo::set_fee(T::MaxRegistrars::get()))] // R pub fn set_fee( origin: OriginFor, @@ -674,6 +681,7 @@ pub mod pallet { /// - One storage mutation `O(R)`. /// - Benchmark: 8.823 + R * 0.32 µs (min squares analysis) /// # + #[pallet::call_index(7)] #[pallet::weight(T::WeightInfo::set_account_id(T::MaxRegistrars::get()))] // R pub fn set_account_id( origin: OriginFor, @@ -713,6 +721,7 @@ pub mod pallet { /// - One storage mutation `O(R)`. /// - Benchmark: 7.464 + R * 0.325 µs (min squares analysis) /// # + #[pallet::call_index(8)] #[pallet::weight(T::WeightInfo::set_fields(T::MaxRegistrars::get()))] // R pub fn set_fields( origin: OriginFor, @@ -761,6 +770,7 @@ pub mod pallet { /// - Storage: 1 read `O(R)`, 1 mutate `O(R + X)`. /// - One event. /// # + #[pallet::call_index(9)] #[pallet::weight(T::WeightInfo::provide_judgement( T::MaxRegistrars::get(), // R T::MaxAdditionalFields::get(), // X @@ -834,6 +844,7 @@ pub mod pallet { /// - `S + 2` storage mutations. /// - One event. /// # + #[pallet::call_index(10)] #[pallet::weight(T::WeightInfo::kill_identity( T::MaxRegistrars::get(), // R T::MaxSubAccounts::get(), // S @@ -874,6 +885,7 @@ pub mod pallet { /// /// The dispatch origin for this call must be _Signed_ and the sender must have a registered /// sub identity of `sub`. + #[pallet::call_index(11)] #[pallet::weight(T::WeightInfo::add_sub(T::MaxSubAccounts::get()))] pub fn add_sub( origin: OriginFor, @@ -909,6 +921,7 @@ pub mod pallet { /// /// The dispatch origin for this call must be _Signed_ and the sender must have a registered /// sub identity of `sub`. + #[pallet::call_index(12)] #[pallet::weight(T::WeightInfo::rename_sub(T::MaxSubAccounts::get()))] pub fn rename_sub( origin: OriginFor, @@ -930,6 +943,7 @@ pub mod pallet { /// /// The dispatch origin for this call must be _Signed_ and the sender must have a registered /// sub identity of `sub`. + #[pallet::call_index(13)] #[pallet::weight(T::WeightInfo::remove_sub(T::MaxSubAccounts::get()))] pub fn remove_sub(origin: OriginFor, sub: AccountIdLookupOf) -> DispatchResult { let sender = ensure_signed(origin)?; @@ -959,6 +973,7 @@ pub mod pallet { /// /// NOTE: This should not normally be used, but is provided in the case that the non- /// controller of an account is maliciously registered as a sub-account. + #[pallet::call_index(14)] #[pallet::weight(T::WeightInfo::quit_sub(T::MaxSubAccounts::get()))] pub fn quit_sub(origin: OriginFor) -> DispatchResult { let sender = ensure_signed(origin)?; diff --git a/frame/im-online/src/lib.rs b/frame/im-online/src/lib.rs index 342522ff29b19..f23e610a4874d 100644 --- a/frame/im-online/src/lib.rs +++ b/frame/im-online/src/lib.rs @@ -474,6 +474,7 @@ pub mod pallet { /// # // NOTE: the weight includes the cost of validate_unsigned as it is part of the cost to // import block with such an extrinsic. + #[pallet::call_index(0)] #[pallet::weight(::WeightInfo::validate_unsigned_and_then_heartbeat( heartbeat.validators_len as u32, heartbeat.network_state.external_addresses.len() as u32, diff --git a/frame/indices/src/lib.rs b/frame/indices/src/lib.rs index 41893254c3c97..95d3cf4b2eed1 100644 --- a/frame/indices/src/lib.rs +++ b/frame/indices/src/lib.rs @@ -98,6 +98,7 @@ pub mod pallet { /// ------------------- /// - DB Weight: 1 Read/Write (Accounts) /// # + #[pallet::call_index(0)] #[pallet::weight(T::WeightInfo::claim())] pub fn claim(origin: OriginFor, index: T::AccountIndex) -> DispatchResult { let who = ensure_signed(origin)?; @@ -131,6 +132,7 @@ pub mod pallet { /// - Reads: Indices Accounts, System Account (recipient) /// - Writes: Indices Accounts, System Account (recipient) /// # + #[pallet::call_index(1)] #[pallet::weight(T::WeightInfo::transfer())] pub fn transfer( origin: OriginFor, @@ -171,6 +173,7 @@ pub mod pallet { /// ------------------- /// - DB Weight: 1 Read/Write (Accounts) /// # + #[pallet::call_index(2)] #[pallet::weight(T::WeightInfo::free())] pub fn free(origin: OriginFor, index: T::AccountIndex) -> DispatchResult { let who = ensure_signed(origin)?; @@ -207,6 +210,7 @@ pub mod pallet { /// - Reads: Indices Accounts, System Account (original owner) /// - Writes: Indices Accounts, System Account (original owner) /// # + #[pallet::call_index(3)] #[pallet::weight(T::WeightInfo::force_transfer())] pub fn force_transfer( origin: OriginFor, @@ -245,6 +249,7 @@ pub mod pallet { /// ------------------- /// - DB Weight: 1 Read/Write (Accounts) /// # + #[pallet::call_index(4)] #[pallet::weight(T::WeightInfo::freeze())] pub fn freeze(origin: OriginFor, index: T::AccountIndex) -> DispatchResult { let who = ensure_signed(origin)?; diff --git a/frame/lottery/src/lib.rs b/frame/lottery/src/lib.rs index c501a30ef5f4a..3255062e3fe7f 100644 --- a/frame/lottery/src/lib.rs +++ b/frame/lottery/src/lib.rs @@ -296,6 +296,7 @@ pub mod pallet { /// should listen for the `TicketBought` event. /// /// This extrinsic must be called by a signed origin. + #[pallet::call_index(0)] #[pallet::weight( T::WeightInfo::buy_ticket() .saturating_add(call.get_dispatch_info().weight) @@ -317,6 +318,7 @@ pub mod pallet { /// provided by this pallet, which uses storage to determine the valid calls. /// /// This extrinsic must be called by the Manager origin. + #[pallet::call_index(1)] #[pallet::weight(T::WeightInfo::set_calls(calls.len() as u32))] pub fn set_calls( origin: OriginFor, @@ -344,6 +346,7 @@ pub mod pallet { /// * `length`: How long the lottery should run for starting at the current block. /// * `delay`: How long after the lottery end we should wait before picking a winner. /// * `repeat`: If the lottery should repeat when completed. + #[pallet::call_index(2)] #[pallet::weight(T::WeightInfo::start_lottery())] pub fn start_lottery( origin: OriginFor, @@ -376,6 +379,7 @@ pub mod pallet { /// The lottery will continue to run to completion. /// /// This extrinsic must be called by the `ManagerOrigin`. + #[pallet::call_index(3)] #[pallet::weight(T::WeightInfo::stop_repeat())] pub fn stop_repeat(origin: OriginFor) -> DispatchResult { T::ManagerOrigin::ensure_origin(origin)?; diff --git a/frame/membership/src/lib.rs b/frame/membership/src/lib.rs index 4191bbcc5d86e..77b53aa72dad8 100644 --- a/frame/membership/src/lib.rs +++ b/frame/membership/src/lib.rs @@ -166,6 +166,7 @@ pub mod pallet { /// Add a member `who` to the set. /// /// May only be called from `T::AddOrigin`. + #[pallet::call_index(0)] #[pallet::weight(50_000_000)] pub fn add_member(origin: OriginFor, who: AccountIdLookupOf) -> DispatchResult { T::AddOrigin::ensure_origin(origin)?; @@ -188,6 +189,7 @@ pub mod pallet { /// Remove a member `who` from the set. /// /// May only be called from `T::RemoveOrigin`. + #[pallet::call_index(1)] #[pallet::weight(50_000_000)] pub fn remove_member(origin: OriginFor, who: AccountIdLookupOf) -> DispatchResult { T::RemoveOrigin::ensure_origin(origin)?; @@ -211,6 +213,7 @@ pub mod pallet { /// May only be called from `T::SwapOrigin`. /// /// Prime membership is *not* passed from `remove` to `add`, if extant. + #[pallet::call_index(2)] #[pallet::weight(50_000_000)] pub fn swap_member( origin: OriginFor, @@ -244,6 +247,7 @@ pub mod pallet { /// pass `members` pre-sorted. /// /// May only be called from `T::ResetOrigin`. + #[pallet::call_index(3)] #[pallet::weight(50_000_000)] pub fn reset_members(origin: OriginFor, members: Vec) -> DispatchResult { T::ResetOrigin::ensure_origin(origin)?; @@ -266,6 +270,7 @@ pub mod pallet { /// May only be called from `Signed` origin of a current member. /// /// Prime membership is passed from the origin account to `new`, if extant. + #[pallet::call_index(4)] #[pallet::weight(50_000_000)] pub fn change_key(origin: OriginFor, new: AccountIdLookupOf) -> DispatchResult { let remove = ensure_signed(origin)?; @@ -300,6 +305,7 @@ pub mod pallet { /// Set the prime member. Must be a current member. /// /// May only be called from `T::PrimeOrigin`. + #[pallet::call_index(5)] #[pallet::weight(50_000_000)] pub fn set_prime(origin: OriginFor, who: AccountIdLookupOf) -> DispatchResult { T::PrimeOrigin::ensure_origin(origin)?; @@ -313,6 +319,7 @@ pub mod pallet { /// Remove the prime member if it exists. /// /// May only be called from `T::PrimeOrigin`. + #[pallet::call_index(6)] #[pallet::weight(50_000_000)] pub fn clear_prime(origin: OriginFor) -> DispatchResult { T::PrimeOrigin::ensure_origin(origin)?; diff --git a/frame/message-queue/src/lib.rs b/frame/message-queue/src/lib.rs index 6945ff1b1e549..8d9faebe0517f 100644 --- a/frame/message-queue/src/lib.rs +++ b/frame/message-queue/src/lib.rs @@ -575,6 +575,7 @@ pub mod pallet { #[pallet::call] impl Pallet { /// Remove a page which has no more messages remaining to be processed or is stale. + #[pallet::call_index(0)] #[pallet::weight(T::WeightInfo::reap_page())] pub fn reap_page( origin: OriginFor, @@ -595,6 +596,7 @@ pub mod pallet { /// of the message. /// /// Benchmark complexity considerations: O(index + weight_limit). + #[pallet::call_index(1)] #[pallet::weight( T::WeightInfo::execute_overweight_page_updated().max( T::WeightInfo::execute_overweight_page_removed()).saturating_add(*weight_limit) diff --git a/frame/multisig/src/lib.rs b/frame/multisig/src/lib.rs index ae4efb76335a0..076a289e06519 100644 --- a/frame/multisig/src/lib.rs +++ b/frame/multisig/src/lib.rs @@ -272,6 +272,7 @@ pub mod pallet { /// - DB Weight: None /// - Plus Call Weight /// # + #[pallet::call_index(0)] #[pallet::weight({ let dispatch_info = call.get_dispatch_info(); ( @@ -365,6 +366,7 @@ pub mod pallet { /// - Writes: Multisig Storage, [Caller Account] /// - Plus Call Weight /// # + #[pallet::call_index(1)] #[pallet::weight({ let s = other_signatories.len() as u32; let z = call.using_encoded(|d| d.len()) as u32; @@ -428,6 +430,7 @@ pub mod pallet { /// - Read: Multisig Storage, [Caller Account] /// - Write: Multisig Storage, [Caller Account] /// # + #[pallet::call_index(2)] #[pallet::weight({ let s = other_signatories.len() as u32; @@ -480,6 +483,7 @@ pub mod pallet { /// - Read: Multisig Storage, [Caller Account], Refund Account /// - Write: Multisig Storage, [Caller Account], Refund Account /// # + #[pallet::call_index(3)] #[pallet::weight(T::WeightInfo::cancel_as_multi(other_signatories.len() as u32))] pub fn cancel_as_multi( origin: OriginFor, diff --git a/frame/nicks/src/lib.rs b/frame/nicks/src/lib.rs index b3238630d3174..79daeb9bdb9a8 100644 --- a/frame/nicks/src/lib.rs +++ b/frame/nicks/src/lib.rs @@ -135,6 +135,7 @@ pub mod pallet { /// - One storage read/write. /// - One event. /// # + #[pallet::call_index(0)] #[pallet::weight(50_000_000)] pub fn set_name(origin: OriginFor, name: Vec) -> DispatchResult { let sender = ensure_signed(origin)?; @@ -167,6 +168,7 @@ pub mod pallet { /// - One storage read/write. /// - One event. /// # + #[pallet::call_index(1)] #[pallet::weight(70_000_000)] pub fn clear_name(origin: OriginFor) -> DispatchResult { let sender = ensure_signed(origin)?; @@ -193,6 +195,7 @@ pub mod pallet { /// - One storage read/write. /// - One event. /// # + #[pallet::call_index(2)] #[pallet::weight(70_000_000)] pub fn kill_name(origin: OriginFor, target: AccountIdLookupOf) -> DispatchResult { T::ForceOrigin::ensure_origin(origin)?; @@ -220,6 +223,7 @@ pub mod pallet { /// - One storage read/write. /// - One event. /// # + #[pallet::call_index(3)] #[pallet::weight(70_000_000)] pub fn force_name( origin: OriginFor, diff --git a/frame/nis/src/lib.rs b/frame/nis/src/lib.rs index 97f727c241479..dff64625a3654 100644 --- a/frame/nis/src/lib.rs +++ b/frame/nis/src/lib.rs @@ -520,6 +520,7 @@ pub mod pallet { /// /// Complexities: /// - `Queues[duration].len()` (just take max). + #[pallet::call_index(0)] #[pallet::weight(T::WeightInfo::place_bid_max())] pub fn place_bid( origin: OriginFor, @@ -581,6 +582,7 @@ pub mod pallet { /// /// - `amount`: The amount of the previous bid. /// - `duration`: The duration of the previous bid. + #[pallet::call_index(1)] #[pallet::weight(T::WeightInfo::retract_bid(T::MaxQueueLen::get()))] pub fn retract_bid( origin: OriginFor, @@ -615,6 +617,7 @@ pub mod pallet { /// Ensure we have sufficient funding for all potential payouts. /// /// - `origin`: Must be accepted by `FundOrigin`. + #[pallet::call_index(2)] #[pallet::weight(T::WeightInfo::fund_deficit())] pub fn fund_deficit(origin: OriginFor) -> DispatchResult { T::FundOrigin::ensure_origin(origin)?; @@ -636,6 +639,7 @@ pub mod pallet { /// - `index`: The index of the receipt. /// - `portion`: If `Some`, then only the given portion of the receipt should be thawed. If /// `None`, then all of it should be. + #[pallet::call_index(3)] #[pallet::weight(T::WeightInfo::thaw())] pub fn thaw( origin: OriginFor, diff --git a/frame/node-authorization/src/lib.rs b/frame/node-authorization/src/lib.rs index bd1b14d10b013..543ba24500ebc 100644 --- a/frame/node-authorization/src/lib.rs +++ b/frame/node-authorization/src/lib.rs @@ -210,6 +210,7 @@ pub mod pallet { /// May only be called from `T::AddOrigin`. /// /// - `node`: identifier of the node. + #[pallet::call_index(0)] #[pallet::weight((T::WeightInfo::add_well_known_node(), DispatchClass::Operational))] pub fn add_well_known_node( origin: OriginFor, @@ -239,6 +240,7 @@ pub mod pallet { /// May only be called from `T::RemoveOrigin`. /// /// - `node`: identifier of the node. + #[pallet::call_index(1)] #[pallet::weight((T::WeightInfo::remove_well_known_node(), DispatchClass::Operational))] pub fn remove_well_known_node(origin: OriginFor, node: PeerId) -> DispatchResult { T::RemoveOrigin::ensure_origin(origin)?; @@ -264,6 +266,7 @@ pub mod pallet { /// /// - `remove`: the node which will be moved out from the list. /// - `add`: the node which will be put in the list. + #[pallet::call_index(2)] #[pallet::weight((T::WeightInfo::swap_well_known_node(), DispatchClass::Operational))] pub fn swap_well_known_node( origin: OriginFor, @@ -300,6 +303,7 @@ pub mod pallet { /// May only be called from `T::ResetOrigin`. /// /// - `nodes`: the new nodes for the allow list. + #[pallet::call_index(3)] #[pallet::weight((T::WeightInfo::reset_well_known_nodes(), DispatchClass::Operational))] pub fn reset_well_known_nodes( origin: OriginFor, @@ -318,6 +322,7 @@ pub mod pallet { /// PeerId, so claim it right away! /// /// - `node`: identifier of the node. + #[pallet::call_index(4)] #[pallet::weight(T::WeightInfo::claim_node())] pub fn claim_node(origin: OriginFor, node: PeerId) -> DispatchResult { let sender = ensure_signed(origin)?; @@ -335,6 +340,7 @@ pub mod pallet { /// needs to reach consensus among the network participants. /// /// - `node`: identifier of the node. + #[pallet::call_index(5)] #[pallet::weight(T::WeightInfo::remove_claim())] pub fn remove_claim(origin: OriginFor, node: PeerId) -> DispatchResult { let sender = ensure_signed(origin)?; @@ -355,6 +361,7 @@ pub mod pallet { /// /// - `node`: identifier of the node. /// - `owner`: new owner of the node. + #[pallet::call_index(6)] #[pallet::weight(T::WeightInfo::transfer_node())] pub fn transfer_node( origin: OriginFor, @@ -378,6 +385,7 @@ pub mod pallet { /// /// - `node`: identifier of the node. /// - `connections`: additonal nodes from which the connections are allowed. + #[pallet::call_index(7)] #[pallet::weight(T::WeightInfo::add_connections())] pub fn add_connections( origin: OriginFor, @@ -412,6 +420,7 @@ pub mod pallet { /// /// - `node`: identifier of the node. /// - `connections`: additonal nodes from which the connections are not allowed anymore. + #[pallet::call_index(8)] #[pallet::weight(T::WeightInfo::remove_connections())] pub fn remove_connections( origin: OriginFor, diff --git a/frame/nomination-pools/src/lib.rs b/frame/nomination-pools/src/lib.rs index 9ca9539b3dca8..fd533ee3762b4 100644 --- a/frame/nomination-pools/src/lib.rs +++ b/frame/nomination-pools/src/lib.rs @@ -1506,6 +1506,7 @@ pub mod pallet { /// * This call will *not* dust the member account, so the member must have at least /// `existential deposit + amount` in their account. /// * Only a pool with [`PoolState::Open`] can be joined + #[pallet::call_index(0)] #[pallet::weight(T::WeightInfo::join())] pub fn join( origin: OriginFor, @@ -1563,6 +1564,7 @@ pub mod pallet { // NOTE: this transaction is implemented with the sole purpose of readability and // correctness, not optimization. We read/write several storage items multiple times instead // of just once, in the spirit reusing code. + #[pallet::call_index(1)] #[pallet::weight( T::WeightInfo::bond_extra_transfer() .max(T::WeightInfo::bond_extra_reward()) @@ -1605,6 +1607,7 @@ pub mod pallet { /// /// The member will earn rewards pro rata based on the members stake vs the sum of the /// members in the pools stake. Rewards do not "expire". + #[pallet::call_index(2)] #[pallet::weight(T::WeightInfo::claim_payout())] pub fn claim_payout(origin: OriginFor) -> DispatchResult { let who = ensure_signed(origin)?; @@ -1644,6 +1647,7 @@ pub mod pallet { /// [`Call::pool_withdraw_unbonded`] can be called to try and minimize unlocking chunks. If /// there are too many unlocking chunks, the result of this call will likely be the /// `NoMoreChunks` error from the staking system. + #[pallet::call_index(3)] #[pallet::weight(T::WeightInfo::unbond())] pub fn unbond( origin: OriginFor, @@ -1719,6 +1723,7 @@ pub mod pallet { /// can be cleared by withdrawing. In the case there are too many unlocking chunks, the user /// would probably see an error like `NoMoreChunks` emitted from the staking system when /// they attempt to unbond. + #[pallet::call_index(4)] #[pallet::weight(T::WeightInfo::pool_withdraw_unbonded(*num_slashing_spans))] pub fn pool_withdraw_unbonded( origin: OriginFor, @@ -1753,6 +1758,7 @@ pub mod pallet { /// # Note /// /// If the target is the depositor, the pool will be destroyed. + #[pallet::call_index(5)] #[pallet::weight( T::WeightInfo::withdraw_unbonded_kill(*num_slashing_spans) )] @@ -1874,6 +1880,7 @@ pub mod pallet { /// /// In addition to `amount`, the caller will transfer the existential deposit; so the caller /// needs at have at least `amount + existential_deposit` transferrable. + #[pallet::call_index(6)] #[pallet::weight(T::WeightInfo::create())] pub fn create( origin: OriginFor, @@ -1898,6 +1905,7 @@ pub mod pallet { /// /// same as `create` with the inclusion of /// * `pool_id` - `A valid PoolId. + #[pallet::call_index(7)] #[pallet::weight(T::WeightInfo::create())] pub fn create_with_pool_id( origin: OriginFor, @@ -1922,6 +1930,7 @@ pub mod pallet { /// /// This directly forward the call to the staking pallet, on behalf of the pool bonded /// account. + #[pallet::call_index(8)] #[pallet::weight(T::WeightInfo::nominate(validators.len() as u32))] pub fn nominate( origin: OriginFor, @@ -1944,6 +1953,7 @@ pub mod pallet { /// 1. signed by the state toggler, or the root role of the pool, /// 2. if the pool conditions to be open are NOT met (as described by `ok_to_be_open`), and /// then the state of the pool can be permissionlessly changed to `Destroying`. + #[pallet::call_index(9)] #[pallet::weight(T::WeightInfo::set_state())] pub fn set_state( origin: OriginFor, @@ -1972,6 +1982,7 @@ pub mod pallet { /// /// The dispatch origin of this call must be signed by the state toggler, or the root role /// of the pool. + #[pallet::call_index(10)] #[pallet::weight(T::WeightInfo::set_metadata(metadata.len() as u32))] pub fn set_metadata( origin: OriginFor, @@ -2003,6 +2014,7 @@ pub mod pallet { /// * `max_pools` - Set [`MaxPools`]. /// * `max_members` - Set [`MaxPoolMembers`]. /// * `max_members_per_pool` - Set [`MaxPoolMembersPerPool`]. + #[pallet::call_index(11)] #[pallet::weight(T::WeightInfo::set_configs())] pub fn set_configs( origin: OriginFor, @@ -2039,6 +2051,7 @@ pub mod pallet { /// /// It emits an event, notifying UIs of the role change. This event is quite relevant to /// most pool members and they should be informed of changes to pool roles. + #[pallet::call_index(12)] #[pallet::weight(T::WeightInfo::update_roles())] pub fn update_roles( origin: OriginFor, @@ -2091,6 +2104,7 @@ pub mod pallet { /// /// This directly forward the call to the staking pallet, on behalf of the pool bonded /// account. + #[pallet::call_index(13)] #[pallet::weight(T::WeightInfo::chill())] pub fn chill(origin: OriginFor, pool_id: PoolId) -> DispatchResult { let who = ensure_signed(origin)?; diff --git a/frame/preimage/src/lib.rs b/frame/preimage/src/lib.rs index 6549832c11f5d..bf7d602057cac 100644 --- a/frame/preimage/src/lib.rs +++ b/frame/preimage/src/lib.rs @@ -153,6 +153,7 @@ pub mod pallet { /// /// If the preimage was previously requested, no fees or deposits are taken for providing /// the preimage. Otherwise, a deposit is taken proportional to the size of the preimage. + #[pallet::call_index(0)] #[pallet::weight(T::WeightInfo::note_preimage(bytes.len() as u32))] pub fn note_preimage(origin: OriginFor, bytes: Vec) -> DispatchResultWithPostInfo { // We accept a signed origin which will pay a deposit, or a root origin where a deposit @@ -172,6 +173,7 @@ pub mod pallet { /// /// - `hash`: The hash of the preimage to be removed from the store. /// - `len`: The length of the preimage of `hash`. + #[pallet::call_index(1)] #[pallet::weight(T::WeightInfo::unnote_preimage())] pub fn unnote_preimage(origin: OriginFor, hash: T::Hash) -> DispatchResult { let maybe_sender = Self::ensure_signed_or_manager(origin)?; @@ -182,6 +184,7 @@ pub mod pallet { /// /// If the preimage requests has already been provided on-chain, we unreserve any deposit /// a user may have paid, and take the control of the preimage out of their hands. + #[pallet::call_index(2)] #[pallet::weight(T::WeightInfo::request_preimage())] pub fn request_preimage(origin: OriginFor, hash: T::Hash) -> DispatchResult { T::ManagerOrigin::ensure_origin(origin)?; @@ -192,6 +195,7 @@ pub mod pallet { /// Clear a previously made request for a preimage. /// /// NOTE: THIS MUST NOT BE CALLED ON `hash` MORE TIMES THAN `request_preimage`. + #[pallet::call_index(3)] #[pallet::weight(T::WeightInfo::unrequest_preimage())] pub fn unrequest_preimage(origin: OriginFor, hash: T::Hash) -> DispatchResult { T::ManagerOrigin::ensure_origin(origin)?; diff --git a/frame/proxy/src/lib.rs b/frame/proxy/src/lib.rs index 5c07a2b012243..d98534d16a21b 100644 --- a/frame/proxy/src/lib.rs +++ b/frame/proxy/src/lib.rs @@ -191,6 +191,7 @@ pub mod pallet { /// - `real`: The account that the proxy will make a call on behalf of. /// - `force_proxy_type`: Specify the exact proxy type to be used and checked for this call. /// - `call`: The call to be made by the `real` account. + #[pallet::call_index(0)] #[pallet::weight({ let di = call.get_dispatch_info(); (T::WeightInfo::proxy(T::MaxProxies::get()) @@ -224,6 +225,7 @@ pub mod pallet { /// - `proxy_type`: The permissions allowed for this proxy account. /// - `delay`: The announcement period required of the initial proxy. Will generally be /// zero. + #[pallet::call_index(1)] #[pallet::weight(T::WeightInfo::add_proxy(T::MaxProxies::get()))] pub fn add_proxy( origin: OriginFor, @@ -243,6 +245,7 @@ pub mod pallet { /// Parameters: /// - `proxy`: The account that the `caller` would like to remove as a proxy. /// - `proxy_type`: The permissions currently enabled for the removed proxy account. + #[pallet::call_index(2)] #[pallet::weight(T::WeightInfo::remove_proxy(T::MaxProxies::get()))] pub fn remove_proxy( origin: OriginFor, @@ -261,6 +264,7 @@ pub mod pallet { /// /// WARNING: This may be called on accounts created by `pure`, however if done, then /// the unreserved fees will be inaccessible. **All access to this account will be lost.** + #[pallet::call_index(3)] #[pallet::weight(T::WeightInfo::remove_proxies(T::MaxProxies::get()))] pub fn remove_proxies(origin: OriginFor) -> DispatchResult { let who = ensure_signed(origin)?; @@ -288,6 +292,7 @@ pub mod pallet { /// same sender, with the same parameters. /// /// Fails if there are insufficient funds to pay for deposit. + #[pallet::call_index(4)] #[pallet::weight(T::WeightInfo::create_pure(T::MaxProxies::get()))] pub fn create_pure( origin: OriginFor, @@ -335,6 +340,7 @@ pub mod pallet { /// /// Fails with `NoPermission` in case the caller is not a previously created pure /// account whose `pure` call has corresponding parameters. + #[pallet::call_index(5)] #[pallet::weight(T::WeightInfo::kill_pure(T::MaxProxies::get()))] pub fn kill_pure( origin: OriginFor, @@ -372,6 +378,7 @@ pub mod pallet { /// Parameters: /// - `real`: The account that the proxy will make a call on behalf of. /// - `call_hash`: The hash of the call to be made by the `real` account. + #[pallet::call_index(6)] #[pallet::weight(T::WeightInfo::announce(T::MaxPending::get(), T::MaxProxies::get()))] pub fn announce( origin: OriginFor, @@ -421,6 +428,7 @@ pub mod pallet { /// Parameters: /// - `real`: The account that the proxy will make a call on behalf of. /// - `call_hash`: The hash of the call to be made by the `real` account. + #[pallet::call_index(7)] #[pallet::weight(T::WeightInfo::remove_announcement( T::MaxPending::get(), T::MaxProxies::get() @@ -447,6 +455,7 @@ pub mod pallet { /// Parameters: /// - `delegate`: The account that previously announced the call. /// - `call_hash`: The hash of the call to be made. + #[pallet::call_index(8)] #[pallet::weight(T::WeightInfo::reject_announcement( T::MaxPending::get(), T::MaxProxies::get() @@ -476,6 +485,7 @@ pub mod pallet { /// - `real`: The account that the proxy will make a call on behalf of. /// - `force_proxy_type`: Specify the exact proxy type to be used and checked for this call. /// - `call`: The call to be made by the `real` account. + #[pallet::call_index(9)] #[pallet::weight({ let di = call.get_dispatch_info(); (T::WeightInfo::proxy_announced(T::MaxPending::get(), T::MaxProxies::get()) diff --git a/frame/ranked-collective/src/lib.rs b/frame/ranked-collective/src/lib.rs index 33aed2704918c..b057a57508023 100644 --- a/frame/ranked-collective/src/lib.rs +++ b/frame/ranked-collective/src/lib.rs @@ -470,6 +470,7 @@ pub mod pallet { /// - `rank`: The rank to give the new member. /// /// Weight: `O(1)` + #[pallet::call_index(0)] #[pallet::weight(T::WeightInfo::add_member())] pub fn add_member(origin: OriginFor, who: AccountIdLookupOf) -> DispatchResult { let _ = T::PromoteOrigin::ensure_origin(origin)?; @@ -483,6 +484,7 @@ pub mod pallet { /// - `who`: Account of existing member. /// /// Weight: `O(1)` + #[pallet::call_index(1)] #[pallet::weight(T::WeightInfo::promote_member(0))] pub fn promote_member(origin: OriginFor, who: AccountIdLookupOf) -> DispatchResult { let max_rank = T::PromoteOrigin::ensure_origin(origin)?; @@ -497,6 +499,7 @@ pub mod pallet { /// - `who`: Account of existing member of rank greater than zero. /// /// Weight: `O(1)`, less if the member's index is highest in its rank. + #[pallet::call_index(2)] #[pallet::weight(T::WeightInfo::demote_member(0))] pub fn demote_member(origin: OriginFor, who: AccountIdLookupOf) -> DispatchResult { let max_rank = T::DemoteOrigin::ensure_origin(origin)?; @@ -528,6 +531,7 @@ pub mod pallet { /// - `min_rank`: The rank of the member or greater. /// /// Weight: `O(min_rank)`. + #[pallet::call_index(3)] #[pallet::weight(T::WeightInfo::remove_member(*min_rank as u32))] pub fn remove_member( origin: OriginFor, @@ -562,6 +566,7 @@ pub mod pallet { /// fee. /// /// Weight: `O(1)`, less if there was no previous vote on the poll by the member. + #[pallet::call_index(4)] #[pallet::weight(T::WeightInfo::vote())] pub fn vote( origin: OriginFor, @@ -618,6 +623,7 @@ pub mod pallet { /// Transaction fees are waived if the operation is successful. /// /// Weight `O(max)` (less if there are fewer items to remove than `max`). + #[pallet::call_index(5)] #[pallet::weight(T::WeightInfo::cleanup_poll(*max))] pub fn cleanup_poll( origin: OriginFor, diff --git a/frame/recovery/src/lib.rs b/frame/recovery/src/lib.rs index 18d3d48dc024c..9c57ca79d2e47 100644 --- a/frame/recovery/src/lib.rs +++ b/frame/recovery/src/lib.rs @@ -374,6 +374,7 @@ pub mod pallet { /// Parameters: /// - `account`: The recovered account you want to make a call on-behalf-of. /// - `call`: The call you want to make with the recovered account. + #[pallet::call_index(0)] #[pallet::weight({ let dispatch_info = call.get_dispatch_info(); ( @@ -403,6 +404,7 @@ pub mod pallet { /// Parameters: /// - `lost`: The "lost account" to be recovered. /// - `rescuer`: The "rescuer account" which can call as the lost account. + #[pallet::call_index(1)] #[pallet::weight(T::WeightInfo::set_recovered())] pub fn set_recovered( origin: OriginFor, @@ -437,6 +439,7 @@ pub mod pallet { /// friends. /// - `delay_period`: The number of blocks after a recovery attempt is initialized that /// needs to pass before the account can be recovered. + #[pallet::call_index(2)] #[pallet::weight(T::WeightInfo::create_recovery(friends.len() as u32))] pub fn create_recovery( origin: OriginFor, @@ -488,6 +491,7 @@ pub mod pallet { /// Parameters: /// - `account`: The lost account that you want to recover. This account needs to be /// recoverable (i.e. have a recovery configuration). + #[pallet::call_index(3)] #[pallet::weight(T::WeightInfo::initiate_recovery())] pub fn initiate_recovery( origin: OriginFor, @@ -532,6 +536,7 @@ pub mod pallet { /// /// The combination of these two parameters must point to an active recovery /// process. + #[pallet::call_index(4)] #[pallet::weight(T::WeightInfo::vouch_recovery(T::MaxFriends::get()))] pub fn vouch_recovery( origin: OriginFor, @@ -575,6 +580,7 @@ pub mod pallet { /// Parameters: /// - `account`: The lost account that you want to claim has been successfully recovered by /// you. + #[pallet::call_index(5)] #[pallet::weight(T::WeightInfo::claim_recovery(T::MaxFriends::get()))] pub fn claim_recovery( origin: OriginFor, @@ -622,6 +628,7 @@ pub mod pallet { /// /// Parameters: /// - `rescuer`: The account trying to rescue this recoverable account. + #[pallet::call_index(6)] #[pallet::weight(T::WeightInfo::close_recovery(T::MaxFriends::get()))] pub fn close_recovery( origin: OriginFor, @@ -659,6 +666,7 @@ pub mod pallet { /// /// The dispatch origin for this call must be _Signed_ and must be a /// recoverable account (i.e. has a recovery configuration). + #[pallet::call_index(7)] #[pallet::weight(T::WeightInfo::remove_recovery(T::MaxFriends::get()))] pub fn remove_recovery(origin: OriginFor) -> DispatchResult { let who = ensure_signed(origin)?; @@ -681,6 +689,7 @@ pub mod pallet { /// /// Parameters: /// - `account`: The recovered account you are able to call on-behalf-of. + #[pallet::call_index(8)] #[pallet::weight(T::WeightInfo::cancel_recovered())] pub fn cancel_recovered( origin: OriginFor, diff --git a/frame/referenda/src/lib.rs b/frame/referenda/src/lib.rs index 2bb01baa0cd3a..0b846faf88558 100644 --- a/frame/referenda/src/lib.rs +++ b/frame/referenda/src/lib.rs @@ -397,6 +397,7 @@ pub mod pallet { /// - `enactment_moment`: The moment that the proposal should be enacted. /// /// Emits `Submitted`. + #[pallet::call_index(0)] #[pallet::weight(T::WeightInfo::submit())] pub fn submit( origin: OriginFor, @@ -444,6 +445,7 @@ pub mod pallet { /// posted. /// /// Emits `DecisionDepositPlaced`. + #[pallet::call_index(1)] #[pallet::weight(ServiceBranch::max_weight_of_deposit::())] pub fn place_decision_deposit( origin: OriginFor, @@ -471,6 +473,7 @@ pub mod pallet { /// refunded. /// /// Emits `DecisionDepositRefunded`. + #[pallet::call_index(2)] #[pallet::weight(T::WeightInfo::refund_decision_deposit())] pub fn refund_decision_deposit( origin: OriginFor, @@ -500,6 +503,7 @@ pub mod pallet { /// - `index`: The index of the referendum to be cancelled. /// /// Emits `Cancelled`. + #[pallet::call_index(3)] #[pallet::weight(T::WeightInfo::cancel())] pub fn cancel(origin: OriginFor, index: ReferendumIndex) -> DispatchResult { T::CancelOrigin::ensure_origin(origin)?; @@ -524,6 +528,7 @@ pub mod pallet { /// - `index`: The index of the referendum to be cancelled. /// /// Emits `Killed` and `DepositSlashed`. + #[pallet::call_index(4)] #[pallet::weight(T::WeightInfo::kill())] pub fn kill(origin: OriginFor, index: ReferendumIndex) -> DispatchResult { T::KillOrigin::ensure_origin(origin)?; @@ -544,6 +549,7 @@ pub mod pallet { /// /// - `origin`: must be `Root`. /// - `index`: the referendum to be advanced. + #[pallet::call_index(5)] #[pallet::weight(ServiceBranch::max_weight_of_nudge::())] pub fn nudge_referendum( origin: OriginFor, @@ -570,6 +576,7 @@ pub mod pallet { /// `DecidingCount` is not yet updated. This means that we should either: /// - begin deciding another referendum (and leave `DecidingCount` alone); or /// - decrement `DecidingCount`. + #[pallet::call_index(6)] #[pallet::weight(OneFewerDecidingBranch::max_weight::())] pub fn one_fewer_deciding( origin: OriginFor, @@ -603,6 +610,7 @@ pub mod pallet { /// refunded. /// /// Emits `SubmissionDepositRefunded`. + #[pallet::call_index(7)] #[pallet::weight(T::WeightInfo::refund_submission_deposit())] pub fn refund_submission_deposit( origin: OriginFor, diff --git a/frame/remark/src/lib.rs b/frame/remark/src/lib.rs index b61c79f7f273d..80fe393c20f4a 100644 --- a/frame/remark/src/lib.rs +++ b/frame/remark/src/lib.rs @@ -62,6 +62,7 @@ pub mod pallet { #[pallet::call] impl Pallet { /// Index and store data off chain. + #[pallet::call_index(0)] #[pallet::weight(T::WeightInfo::store(remark.len() as u32))] pub fn store(origin: OriginFor, remark: Vec) -> DispatchResultWithPostInfo { ensure!(!remark.is_empty(), Error::::Empty); diff --git a/frame/root-offences/src/lib.rs b/frame/root-offences/src/lib.rs index 298fe0078a6a6..ed039f46becc8 100644 --- a/frame/root-offences/src/lib.rs +++ b/frame/root-offences/src/lib.rs @@ -81,6 +81,7 @@ pub mod pallet { #[pallet::call] impl Pallet { /// Allows the `root`, for example sudo to create an offence. + #[pallet::call_index(0)] #[pallet::weight(T::DbWeight::get().reads(2))] pub fn create_offence( origin: OriginFor, diff --git a/frame/root-testing/src/lib.rs b/frame/root-testing/src/lib.rs index 25d66cfac202d..da67904967853 100644 --- a/frame/root-testing/src/lib.rs +++ b/frame/root-testing/src/lib.rs @@ -45,6 +45,7 @@ pub mod pallet { #[pallet::call] impl Pallet { /// A dispatch that will fill the block weight up to the given ratio. + #[pallet::call_index(0)] #[pallet::weight(*_ratio * T::BlockWeights::get().max_block)] pub fn fill_block(origin: OriginFor, _ratio: Perbill) -> DispatchResult { ensure_root(origin)?; diff --git a/frame/scheduler/src/lib.rs b/frame/scheduler/src/lib.rs index 2e0d0c6be1db5..d6a66c5e2cb2c 100644 --- a/frame/scheduler/src/lib.rs +++ b/frame/scheduler/src/lib.rs @@ -297,6 +297,7 @@ pub mod pallet { #[pallet::call] impl Pallet { /// Anonymously schedule a task. + #[pallet::call_index(0)] #[pallet::weight(::WeightInfo::schedule(T::MaxScheduledPerBlock::get()))] pub fn schedule( origin: OriginFor, @@ -318,6 +319,7 @@ pub mod pallet { } /// Cancel an anonymously scheduled task. + #[pallet::call_index(1)] #[pallet::weight(::WeightInfo::cancel(T::MaxScheduledPerBlock::get()))] pub fn cancel(origin: OriginFor, when: T::BlockNumber, index: u32) -> DispatchResult { T::ScheduleOrigin::ensure_origin(origin.clone())?; @@ -327,6 +329,7 @@ pub mod pallet { } /// Schedule a named task. + #[pallet::call_index(2)] #[pallet::weight(::WeightInfo::schedule_named(T::MaxScheduledPerBlock::get()))] pub fn schedule_named( origin: OriginFor, @@ -350,6 +353,7 @@ pub mod pallet { } /// Cancel a named scheduled task. + #[pallet::call_index(3)] #[pallet::weight(::WeightInfo::cancel_named(T::MaxScheduledPerBlock::get()))] pub fn cancel_named(origin: OriginFor, id: TaskName) -> DispatchResult { T::ScheduleOrigin::ensure_origin(origin.clone())?; @@ -363,6 +367,7 @@ pub mod pallet { /// # /// Same as [`schedule`]. /// # + #[pallet::call_index(4)] #[pallet::weight(::WeightInfo::schedule(T::MaxScheduledPerBlock::get()))] pub fn schedule_after( origin: OriginFor, @@ -388,6 +393,7 @@ pub mod pallet { /// # /// Same as [`schedule_named`](Self::schedule_named). /// # + #[pallet::call_index(5)] #[pallet::weight(::WeightInfo::schedule_named(T::MaxScheduledPerBlock::get()))] pub fn schedule_named_after( origin: OriginFor, diff --git a/frame/scheduler/src/mock.rs b/frame/scheduler/src/mock.rs index 61efdfb67b73e..0aaac56667dcb 100644 --- a/frame/scheduler/src/mock.rs +++ b/frame/scheduler/src/mock.rs @@ -72,6 +72,7 @@ pub mod logger { where ::RuntimeOrigin: OriginTrait, { + #[pallet::call_index(0)] #[pallet::weight(*weight)] pub fn log(origin: OriginFor, i: u32, weight: Weight) -> DispatchResult { Self::deposit_event(Event::Logged(i, weight)); @@ -81,6 +82,7 @@ pub mod logger { Ok(()) } + #[pallet::call_index(1)] #[pallet::weight(*weight)] pub fn log_without_filter(origin: OriginFor, i: u32, weight: Weight) -> DispatchResult { Self::deposit_event(Event::Logged(i, weight)); diff --git a/frame/scored-pool/src/lib.rs b/frame/scored-pool/src/lib.rs index a015c1c568153..5db9c6506d770 100644 --- a/frame/scored-pool/src/lib.rs +++ b/frame/scored-pool/src/lib.rs @@ -311,6 +311,7 @@ pub mod pallet { /// /// The `index` parameter of this function must be set to /// the index of the transactor in the `Pool`. + #[pallet::call_index(0)] #[pallet::weight(0)] pub fn submit_candidacy(origin: OriginFor) -> DispatchResult { let who = ensure_signed(origin)?; @@ -340,6 +341,7 @@ pub mod pallet { /// /// The `index` parameter of this function must be set to /// the index of the transactor in the `Pool`. + #[pallet::call_index(1)] #[pallet::weight(0)] pub fn withdraw_candidacy(origin: OriginFor, index: u32) -> DispatchResult { let who = ensure_signed(origin)?; @@ -358,6 +360,7 @@ pub mod pallet { /// /// The `index` parameter of this function must be set to /// the index of `dest` in the `Pool`. + #[pallet::call_index(2)] #[pallet::weight(0)] pub fn kick( origin: OriginFor, @@ -382,6 +385,7 @@ pub mod pallet { /// /// The `index` parameter of this function must be set to /// the index of the `dest` in the `Pool`. + #[pallet::call_index(3)] #[pallet::weight(0)] pub fn score( origin: OriginFor, @@ -421,6 +425,7 @@ pub mod pallet { /// (this happens each `Period`). /// /// May only be called from root. + #[pallet::call_index(4)] #[pallet::weight(0)] pub fn change_member_count(origin: OriginFor, count: u32) -> DispatchResult { ensure_root(origin)?; diff --git a/frame/session/src/lib.rs b/frame/session/src/lib.rs index 7b97a20860175..4e2caf5e0874e 100644 --- a/frame/session/src/lib.rs +++ b/frame/session/src/lib.rs @@ -595,6 +595,7 @@ pub mod pallet { /// - DbReads per key id: `KeyOwner` /// - DbWrites per key id: `KeyOwner` /// # + #[pallet::call_index(0)] #[pallet::weight(T::WeightInfo::set_keys())] pub fn set_keys(origin: OriginFor, keys: T::Keys, proof: Vec) -> DispatchResult { let who = ensure_signed(origin)?; @@ -620,6 +621,7 @@ pub mod pallet { /// - DbWrites: `NextKeys`, `origin account` /// - DbWrites per key id: `KeyOwner` /// # + #[pallet::call_index(1)] #[pallet::weight(T::WeightInfo::purge_keys())] pub fn purge_keys(origin: OriginFor) -> DispatchResult { let who = ensure_signed(origin)?; diff --git a/frame/society/src/lib.rs b/frame/society/src/lib.rs index 73a09490ea579..0edf00ff80f6e 100644 --- a/frame/society/src/lib.rs +++ b/frame/society/src/lib.rs @@ -711,6 +711,7 @@ pub mod pallet { /// /// Total Complexity: O(M + B + C + logM + logB + X) /// # + #[pallet::call_index(0)] #[pallet::weight(T::BlockWeights::get().max_block / 10)] pub fn bid(origin: OriginFor, value: BalanceOf) -> DispatchResult { let who = ensure_signed(origin)?; @@ -750,6 +751,7 @@ pub mod pallet { /// /// Total Complexity: O(B + X) /// # + #[pallet::call_index(1)] #[pallet::weight(T::BlockWeights::get().max_block / 10)] pub fn unbid(origin: OriginFor, pos: u32) -> DispatchResult { let who = ensure_signed(origin)?; @@ -822,6 +824,7 @@ pub mod pallet { /// /// Total Complexity: O(M + B + C + logM + logB + X) /// # + #[pallet::call_index(2)] #[pallet::weight(T::BlockWeights::get().max_block / 10)] pub fn vouch( origin: OriginFor, @@ -873,6 +876,7 @@ pub mod pallet { /// /// Total Complexity: O(B) /// # + #[pallet::call_index(3)] #[pallet::weight(T::BlockWeights::get().max_block / 10)] pub fn unvouch(origin: OriginFor, pos: u32) -> DispatchResult { let voucher = ensure_signed(origin)?; @@ -914,6 +918,7 @@ pub mod pallet { /// /// Total Complexity: O(M + logM + C) /// # + #[pallet::call_index(4)] #[pallet::weight(T::BlockWeights::get().max_block / 10)] pub fn vote( origin: OriginFor, @@ -950,6 +955,7 @@ pub mod pallet { /// /// Total Complexity: O(M + logM) /// # + #[pallet::call_index(5)] #[pallet::weight(T::BlockWeights::get().max_block / 10)] pub fn defender_vote(origin: OriginFor, approve: bool) -> DispatchResult { let voter = ensure_signed(origin)?; @@ -984,6 +990,7 @@ pub mod pallet { /// /// Total Complexity: O(M + logM + P + X) /// # + #[pallet::call_index(6)] #[pallet::weight(T::BlockWeights::get().max_block / 10)] pub fn payout(origin: OriginFor) -> DispatchResult { let who = ensure_signed(origin)?; @@ -1026,6 +1033,7 @@ pub mod pallet { /// /// Total Complexity: O(1) /// # + #[pallet::call_index(7)] #[pallet::weight(T::BlockWeights::get().max_block / 10)] pub fn found( origin: OriginFor, @@ -1060,6 +1068,7 @@ pub mod pallet { /// /// Total Complexity: O(1) /// # + #[pallet::call_index(8)] #[pallet::weight(T::BlockWeights::get().max_block / 10)] pub fn unfound(origin: OriginFor) -> DispatchResult { let founder = ensure_signed(origin)?; @@ -1105,6 +1114,7 @@ pub mod pallet { /// /// Total Complexity: O(M + logM + B) /// # + #[pallet::call_index(9)] #[pallet::weight(T::BlockWeights::get().max_block / 10)] pub fn judge_suspended_member( origin: OriginFor, @@ -1182,6 +1192,7 @@ pub mod pallet { /// /// Total Complexity: O(M + logM + B + X) /// # + #[pallet::call_index(10)] #[pallet::weight(T::BlockWeights::get().max_block / 10)] pub fn judge_suspended_candidate( origin: OriginFor, @@ -1255,6 +1266,7 @@ pub mod pallet { /// /// Total Complexity: O(1) /// # + #[pallet::call_index(11)] #[pallet::weight(T::BlockWeights::get().max_block / 10)] pub fn set_max_members(origin: OriginFor, max: u32) -> DispatchResult { ensure_root(origin)?; diff --git a/frame/staking/src/pallet/mod.rs b/frame/staking/src/pallet/mod.rs index 8fddba2150370..9dc39dd4a2116 100644 --- a/frame/staking/src/pallet/mod.rs +++ b/frame/staking/src/pallet/mod.rs @@ -831,6 +831,7 @@ pub mod pallet { /// unless the `origin` falls below _existential deposit_ and gets removed as dust. /// ------------------ /// # + #[pallet::call_index(0)] #[pallet::weight(T::WeightInfo::bond())] pub fn bond( origin: OriginFor, @@ -900,6 +901,7 @@ pub mod pallet { /// - Independent of the arguments. Insignificant complexity. /// - O(1). /// # + #[pallet::call_index(1)] #[pallet::weight(T::WeightInfo::bond_extra())] pub fn bond_extra( origin: OriginFor, @@ -953,6 +955,7 @@ pub mod pallet { /// Emits `Unbonded`. /// /// See also [`Call::withdraw_unbonded`]. + #[pallet::call_index(2)] #[pallet::weight(T::WeightInfo::unbond())] pub fn unbond( origin: OriginFor, @@ -1032,6 +1035,7 @@ pub mod pallet { /// Complexity O(S) where S is the number of slashing spans to remove /// NOTE: Weight annotation is the kill scenario, we refund otherwise. /// # + #[pallet::call_index(3)] #[pallet::weight(T::WeightInfo::withdraw_unbonded_kill(*num_slashing_spans))] pub fn withdraw_unbonded( origin: OriginFor, @@ -1079,6 +1083,7 @@ pub mod pallet { /// Effects will be felt at the beginning of the next era. /// /// The dispatch origin for this call must be _Signed_ by the controller, not the stash. + #[pallet::call_index(4)] #[pallet::weight(T::WeightInfo::validate())] pub fn validate(origin: OriginFor, prefs: ValidatorPrefs) -> DispatchResult { let controller = ensure_signed(origin)?; @@ -1122,6 +1127,7 @@ pub mod pallet { /// which is capped at CompactAssignments::LIMIT (T::MaxNominations). /// - Both the reads and writes follow a similar pattern. /// # + #[pallet::call_index(5)] #[pallet::weight(T::WeightInfo::nominate(targets.len() as u32))] pub fn nominate( origin: OriginFor, @@ -1190,6 +1196,7 @@ pub mod pallet { /// - Contains one read. /// - Writes are limited to the `origin` account key. /// # + #[pallet::call_index(6)] #[pallet::weight(T::WeightInfo::chill())] pub fn chill(origin: OriginFor) -> DispatchResult { let controller = ensure_signed(origin)?; @@ -1214,6 +1221,7 @@ pub mod pallet { /// - Read: Ledger /// - Write: Payee /// # + #[pallet::call_index(7)] #[pallet::weight(T::WeightInfo::set_payee())] pub fn set_payee( origin: OriginFor, @@ -1242,6 +1250,7 @@ pub mod pallet { /// - Read: Bonded, Ledger New Controller, Ledger Old Controller /// - Write: Bonded, Ledger New Controller, Ledger Old Controller /// # + #[pallet::call_index(8)] #[pallet::weight(T::WeightInfo::set_controller())] pub fn set_controller( origin: OriginFor, @@ -1270,6 +1279,7 @@ pub mod pallet { /// Weight: O(1) /// Write: Validator Count /// # + #[pallet::call_index(9)] #[pallet::weight(T::WeightInfo::set_validator_count())] pub fn set_validator_count( origin: OriginFor, @@ -1294,6 +1304,7 @@ pub mod pallet { /// # /// Same as [`Self::set_validator_count`]. /// # + #[pallet::call_index(10)] #[pallet::weight(T::WeightInfo::set_validator_count())] pub fn increase_validator_count( origin: OriginFor, @@ -1319,6 +1330,7 @@ pub mod pallet { /// # /// Same as [`Self::set_validator_count`]. /// # + #[pallet::call_index(11)] #[pallet::weight(T::WeightInfo::set_validator_count())] pub fn scale_validator_count(origin: OriginFor, factor: Percent) -> DispatchResult { ensure_root(origin)?; @@ -1349,6 +1361,7 @@ pub mod pallet { /// - Weight: O(1) /// - Write: ForceEra /// # + #[pallet::call_index(12)] #[pallet::weight(T::WeightInfo::force_no_eras())] pub fn force_no_eras(origin: OriginFor) -> DispatchResult { ensure_root(origin)?; @@ -1372,6 +1385,7 @@ pub mod pallet { /// - Weight: O(1) /// - Write ForceEra /// # + #[pallet::call_index(13)] #[pallet::weight(T::WeightInfo::force_new_era())] pub fn force_new_era(origin: OriginFor) -> DispatchResult { ensure_root(origin)?; @@ -1382,6 +1396,7 @@ pub mod pallet { /// Set the validators who cannot be slashed (if any). /// /// The dispatch origin must be Root. + #[pallet::call_index(14)] #[pallet::weight(T::WeightInfo::set_invulnerables(invulnerables.len() as u32))] pub fn set_invulnerables( origin: OriginFor, @@ -1395,6 +1410,7 @@ pub mod pallet { /// Force a current staker to become completely unstaked, immediately. /// /// The dispatch origin must be Root. + #[pallet::call_index(15)] #[pallet::weight(T::WeightInfo::force_unstake(*num_slashing_spans))] pub fn force_unstake( origin: OriginFor, @@ -1420,6 +1436,7 @@ pub mod pallet { /// The election process starts multiple blocks before the end of the era. /// If this is called just before a new era is triggered, the election process may not /// have enough blocks to get a result. + #[pallet::call_index(16)] #[pallet::weight(T::WeightInfo::force_new_era_always())] pub fn force_new_era_always(origin: OriginFor) -> DispatchResult { ensure_root(origin)?; @@ -1432,6 +1449,7 @@ pub mod pallet { /// Can be called by the `T::SlashCancelOrigin`. /// /// Parameters: era and indices of the slashes for that era to kill. + #[pallet::call_index(17)] #[pallet::weight(T::WeightInfo::cancel_deferred_slash(slash_indices.len() as u32))] pub fn cancel_deferred_slash( origin: OriginFor, @@ -1477,6 +1495,7 @@ pub mod pallet { /// NOTE: weights are assuming that payouts are made to alive stash account (Staked). /// Paying even a dead controller is cheaper weight-wise. We don't do any refunds here. /// # + #[pallet::call_index(18)] #[pallet::weight(T::WeightInfo::payout_stakers_alive_staked( T::MaxNominatorRewardedPerValidator::get() ))] @@ -1498,6 +1517,7 @@ pub mod pallet { /// - Bounded by `MaxUnlockingChunks`. /// - Storage changes: Can't increase storage, only decrease it. /// # + #[pallet::call_index(19)] #[pallet::weight(T::WeightInfo::rebond(T::MaxUnlockingChunks::get() as u32))] pub fn rebond( origin: OriginFor, @@ -1542,6 +1562,7 @@ pub mod pallet { /// It can be called by anyone, as long as `stash` meets the above requirements. /// /// Refunds the transaction fees upon successful execution. + #[pallet::call_index(20)] #[pallet::weight(T::WeightInfo::reap_stash(*num_slashing_spans))] pub fn reap_stash( origin: OriginFor, @@ -1574,6 +1595,7 @@ pub mod pallet { /// /// Note: Making this call only makes sense if you first set the validator preferences to /// block any further nominations. + #[pallet::call_index(21)] #[pallet::weight(T::WeightInfo::kick(who.len() as u32))] pub fn kick(origin: OriginFor, who: Vec>) -> DispatchResult { let controller = ensure_signed(origin)?; @@ -1621,6 +1643,7 @@ pub mod pallet { /// to kick people under the new limits, `chill_other` should be called. // We assume the worst case for this call is either: all items are set or all items are // removed. + #[pallet::call_index(22)] #[pallet::weight( T::WeightInfo::set_staking_configs_all_set() .max(T::WeightInfo::set_staking_configs_all_remove()) @@ -1681,6 +1704,7 @@ pub mod pallet { /// /// This can be helpful if bond requirements are updated, and we need to remove old users /// who do not satisfy these requirements. + #[pallet::call_index(23)] #[pallet::weight(T::WeightInfo::chill_other())] pub fn chill_other(origin: OriginFor, controller: T::AccountId) -> DispatchResult { // Anyone can call this function. @@ -1743,6 +1767,7 @@ pub mod pallet { /// Force a validator to have at least the minimum commission. This will not affect a /// validator who already has a commission greater than or equal to the minimum. Any account /// can call this. + #[pallet::call_index(24)] #[pallet::weight(T::WeightInfo::force_apply_min_commission())] pub fn force_apply_min_commission( origin: OriginFor, diff --git a/frame/state-trie-migration/src/lib.rs b/frame/state-trie-migration/src/lib.rs index aab92e678e88c..823ea08a0b573 100644 --- a/frame/state-trie-migration/src/lib.rs +++ b/frame/state-trie-migration/src/lib.rs @@ -546,6 +546,7 @@ pub mod pallet { /// Control the automatic migration. /// /// The dispatch origin of this call must be [`Config::ControlOrigin`]. + #[pallet::call_index(0)] #[pallet::weight(T::DbWeight::get().reads_writes(1, 1))] pub fn control_auto_migration( origin: OriginFor, @@ -577,6 +578,7 @@ pub mod pallet { /// Based on the documentation of [`MigrationTask::migrate_until_exhaustion`], the /// recommended way of doing this is to pass a `limit` that only bounds `count`, as the /// `size` limit can always be overwritten. + #[pallet::call_index(1)] #[pallet::weight( // the migration process Pallet::::dynamic_weight(limits.item, * real_size_upper) @@ -648,6 +650,7 @@ pub mod pallet { /// /// This does not affect the global migration process tracker ([`MigrationProcess`]), and /// should only be used in case any keys are leftover due to a bug. + #[pallet::call_index(2)] #[pallet::weight( T::WeightInfo::migrate_custom_top_success() .max(T::WeightInfo::migrate_custom_top_fail()) @@ -704,6 +707,7 @@ pub mod pallet { /// /// This does not affect the global migration process tracker ([`MigrationProcess`]), and /// should only be used in case any keys are leftover due to a bug. + #[pallet::call_index(3)] #[pallet::weight( T::WeightInfo::migrate_custom_child_success() .max(T::WeightInfo::migrate_custom_child_fail()) @@ -764,6 +768,7 @@ pub mod pallet { } /// Set the maximum limit of the signed migration. + #[pallet::call_index(4)] #[pallet::weight(T::DbWeight::get().reads_writes(1, 1))] pub fn set_signed_max_limits( origin: OriginFor, @@ -783,6 +788,7 @@ pub mod pallet { /// /// In case you mess things up, you can also, in principle, use this to reset the migration /// process. + #[pallet::call_index(5)] #[pallet::weight(T::DbWeight::get().reads_writes(1, 1))] pub fn force_set_progress( origin: OriginFor, diff --git a/frame/sudo/src/lib.rs b/frame/sudo/src/lib.rs index c18ced8911193..0867f24b1691e 100644 --- a/frame/sudo/src/lib.rs +++ b/frame/sudo/src/lib.rs @@ -148,6 +148,7 @@ pub mod pallet { /// - One DB write (event). /// - Weight of derivative `call` execution + 10,000. /// # + #[pallet::call_index(0)] #[pallet::weight({ let dispatch_info = call.get_dispatch_info(); (dispatch_info.weight, dispatch_info.class) @@ -176,6 +177,7 @@ pub mod pallet { /// - O(1). /// - The weight of this call is defined by the caller. /// # + #[pallet::call_index(1)] #[pallet::weight((*_weight, call.get_dispatch_info().class))] pub fn sudo_unchecked_weight( origin: OriginFor, @@ -202,6 +204,7 @@ pub mod pallet { /// - Limited storage reads. /// - One DB change. /// # + #[pallet::call_index(2)] #[pallet::weight(0)] pub fn set_key( origin: OriginFor, @@ -229,6 +232,7 @@ pub mod pallet { /// - One DB write (event). /// - Weight of derivative `call` execution + 10,000. /// # + #[pallet::call_index(3)] #[pallet::weight({ let dispatch_info = call.get_dispatch_info(); ( diff --git a/frame/sudo/src/mock.rs b/frame/sudo/src/mock.rs index db2ad4d563910..639e81ceaa308 100644 --- a/frame/sudo/src/mock.rs +++ b/frame/sudo/src/mock.rs @@ -49,6 +49,7 @@ pub mod logger { #[pallet::call] impl Pallet { + #[pallet::call_index(0)] #[pallet::weight(*weight)] pub fn privileged_i32_log( origin: OriginFor, @@ -62,6 +63,7 @@ pub mod logger { Ok(().into()) } + #[pallet::call_index(1)] #[pallet::weight(*weight)] pub fn non_privileged_log( origin: OriginFor, diff --git a/frame/support/test/tests/pallet.rs b/frame/support/test/tests/pallet.rs index 0fd32dad2242a..c0376d5aa450f 100644 --- a/frame/support/test/tests/pallet.rs +++ b/frame/support/test/tests/pallet.rs @@ -192,6 +192,7 @@ pub mod pallet { T::AccountId: From + From + SomeAssociation1, { /// Doc comment put in metadata + #[pallet::call_index(0)] #[pallet::weight(Weight::from_ref_time(*_foo as u64))] pub fn foo( origin: OriginFor, @@ -206,6 +207,7 @@ pub mod pallet { } /// Doc comment put in metadata + #[pallet::call_index(1)] #[pallet::weight(1)] pub fn foo_storage_layer( _origin: OriginFor, @@ -220,6 +222,7 @@ pub mod pallet { } // Test for DispatchResult return type + #[pallet::call_index(2)] #[pallet::weight(1)] pub fn foo_no_post_info(_origin: OriginFor) -> DispatchResult { Ok(()) diff --git a/frame/support/test/tests/pallet_compatibility.rs b/frame/support/test/tests/pallet_compatibility.rs index 398137d644ee4..300fb9a40cf4e 100644 --- a/frame/support/test/tests/pallet_compatibility.rs +++ b/frame/support/test/tests/pallet_compatibility.rs @@ -141,6 +141,7 @@ pub mod pallet { #[pallet::call] impl Pallet { + #[pallet::call_index(0)] #[pallet::weight(>::into(new_value.clone()))] pub fn set_dummy( origin: OriginFor, diff --git a/frame/support/test/tests/pallet_compatibility_instance.rs b/frame/support/test/tests/pallet_compatibility_instance.rs index e8b5fe9fa33d4..79370d911b943 100644 --- a/frame/support/test/tests/pallet_compatibility_instance.rs +++ b/frame/support/test/tests/pallet_compatibility_instance.rs @@ -127,6 +127,7 @@ pub mod pallet { #[pallet::call] impl, I: 'static> Pallet { + #[pallet::call_index(0)] #[pallet::weight(>::into(new_value.clone()))] pub fn set_dummy( origin: OriginFor, diff --git a/frame/support/test/tests/pallet_instance.rs b/frame/support/test/tests/pallet_instance.rs index 7e05e2ecf783b..d8ad13ceda1dd 100644 --- a/frame/support/test/tests/pallet_instance.rs +++ b/frame/support/test/tests/pallet_instance.rs @@ -82,6 +82,7 @@ pub mod pallet { #[pallet::call] impl, I: 'static> Pallet { /// Doc comment put in metadata + #[pallet::call_index(0)] #[pallet::weight(Weight::from_ref_time(*_foo as u64))] pub fn foo( origin: OriginFor, @@ -93,6 +94,7 @@ pub mod pallet { } /// Doc comment put in metadata + #[pallet::call_index(1)] #[pallet::weight(1)] pub fn foo_storage_layer( origin: OriginFor, diff --git a/frame/support/test/tests/storage_layers.rs b/frame/support/test/tests/storage_layers.rs index 6fbbb8ac67bd7..cff81c0bea2ed 100644 --- a/frame/support/test/tests/storage_layers.rs +++ b/frame/support/test/tests/storage_layers.rs @@ -46,6 +46,7 @@ pub mod pallet { #[pallet::call] impl Pallet { + #[pallet::call_index(0)] #[pallet::weight(1)] pub fn set_value(_origin: OriginFor, value: u32) -> DispatchResult { Value::::put(value); diff --git a/frame/system/src/lib.rs b/frame/system/src/lib.rs index f7e3849beeb8d..b41083538a325 100644 --- a/frame/system/src/lib.rs +++ b/frame/system/src/lib.rs @@ -372,6 +372,7 @@ pub mod pallet { /// # /// - `O(1)` /// # + #[pallet::call_index(0)] #[pallet::weight(T::SystemWeightInfo::remark(_remark.len() as u32))] pub fn remark(origin: OriginFor, _remark: Vec) -> DispatchResultWithPostInfo { ensure_signed_or_root(origin)?; @@ -379,6 +380,7 @@ pub mod pallet { } /// Set the number of pages in the WebAssembly environment's heap. + #[pallet::call_index(1)] #[pallet::weight((T::SystemWeightInfo::set_heap_pages(), DispatchClass::Operational))] pub fn set_heap_pages(origin: OriginFor, pages: u64) -> DispatchResultWithPostInfo { ensure_root(origin)?; @@ -399,6 +401,7 @@ pub mod pallet { /// The weight of this function is dependent on the runtime, but generally this is very /// expensive. We will treat this as a full block. /// # + #[pallet::call_index(2)] #[pallet::weight((T::BlockWeights::get().max_block, DispatchClass::Operational))] pub fn set_code(origin: OriginFor, code: Vec) -> DispatchResultWithPostInfo { ensure_root(origin)?; @@ -416,6 +419,7 @@ pub mod pallet { /// - 1 event. /// The weight of this function is dependent on the runtime. We will treat this as a full /// block. # + #[pallet::call_index(3)] #[pallet::weight((T::BlockWeights::get().max_block, DispatchClass::Operational))] pub fn set_code_without_checks( origin: OriginFor, @@ -427,6 +431,7 @@ pub mod pallet { } /// Set some items of storage. + #[pallet::call_index(4)] #[pallet::weight(( T::SystemWeightInfo::set_storage(items.len() as u32), DispatchClass::Operational, @@ -443,6 +448,7 @@ pub mod pallet { } /// Kill some items from storage. + #[pallet::call_index(5)] #[pallet::weight(( T::SystemWeightInfo::kill_storage(keys.len() as u32), DispatchClass::Operational, @@ -459,6 +465,7 @@ pub mod pallet { /// /// **NOTE:** We rely on the Root origin to provide us the number of subkeys under /// the prefix we are removing to accurately calculate the weight of this function. + #[pallet::call_index(6)] #[pallet::weight(( T::SystemWeightInfo::kill_prefix(_subkeys.saturating_add(1)), DispatchClass::Operational, @@ -474,6 +481,7 @@ pub mod pallet { } /// Make some on-chain remark and emit event. + #[pallet::call_index(7)] #[pallet::weight(T::SystemWeightInfo::remark_with_event(remark.len() as u32))] pub fn remark_with_event( origin: OriginFor, diff --git a/frame/timestamp/src/lib.rs b/frame/timestamp/src/lib.rs index 6a7f849d1329a..e859474c2cb9e 100644 --- a/frame/timestamp/src/lib.rs +++ b/frame/timestamp/src/lib.rs @@ -198,6 +198,7 @@ pub mod pallet { /// `on_finalize`) /// - 1 event handler `on_timestamp_set`. Must be `O(1)`. /// # + #[pallet::call_index(0)] #[pallet::weight(( T::WeightInfo::set(), DispatchClass::Mandatory diff --git a/frame/tips/src/lib.rs b/frame/tips/src/lib.rs index 9313a26e52e00..dd9ebc9813233 100644 --- a/frame/tips/src/lib.rs +++ b/frame/tips/src/lib.rs @@ -235,6 +235,7 @@ pub mod pallet { /// - DbReads: `Reasons`, `Tips` /// - DbWrites: `Reasons`, `Tips` /// # + #[pallet::call_index(0)] #[pallet::weight(>::WeightInfo::report_awesome(reason.len() as u32))] pub fn report_awesome( origin: OriginFor, @@ -292,6 +293,7 @@ pub mod pallet { /// - DbReads: `Tips`, `origin account` /// - DbWrites: `Reasons`, `Tips`, `origin account` /// # + #[pallet::call_index(1)] #[pallet::weight(>::WeightInfo::retract_tip())] pub fn retract_tip(origin: OriginFor, hash: T::Hash) -> DispatchResult { let who = ensure_signed(origin)?; @@ -330,6 +332,7 @@ pub mod pallet { /// - DbReads: `Tippers`, `Reasons` /// - DbWrites: `Reasons`, `Tips` /// # + #[pallet::call_index(2)] #[pallet::weight(>::WeightInfo::tip_new(reason.len() as u32, T::Tippers::max_len() as u32))] pub fn tip_new( origin: OriginFor, @@ -384,6 +387,7 @@ pub mod pallet { /// - DbReads: `Tippers`, `Tips` /// - DbWrites: `Tips` /// # + #[pallet::call_index(3)] #[pallet::weight(>::WeightInfo::tip(T::Tippers::max_len() as u32))] pub fn tip( origin: OriginFor, @@ -417,6 +421,7 @@ pub mod pallet { /// - DbReads: `Tips`, `Tippers`, `tip finder` /// - DbWrites: `Reasons`, `Tips`, `Tippers`, `tip finder` /// # + #[pallet::call_index(4)] #[pallet::weight(>::WeightInfo::close_tip(T::Tippers::max_len() as u32))] pub fn close_tip(origin: OriginFor, hash: T::Hash) -> DispatchResult { ensure_signed(origin)?; @@ -443,6 +448,7 @@ pub mod pallet { /// `T` is charged as upper bound given by `ContainsLengthBound`. /// The actual cost depends on the implementation of `T::Tippers`. /// # + #[pallet::call_index(5)] #[pallet::weight(>::WeightInfo::slash_tip(T::Tippers::max_len() as u32))] pub fn slash_tip(origin: OriginFor, hash: T::Hash) -> DispatchResult { T::RejectOrigin::ensure_origin(origin)?; diff --git a/frame/transaction-storage/src/lib.rs b/frame/transaction-storage/src/lib.rs index 07144c5617113..cda7610efdf87 100644 --- a/frame/transaction-storage/src/lib.rs +++ b/frame/transaction-storage/src/lib.rs @@ -188,6 +188,7 @@ pub mod pallet { /// - n*log(n) of data size, as all data is pushed to an in-memory trie. /// Additionally contains a DB write. /// # + #[pallet::call_index(0)] #[pallet::weight(T::WeightInfo::store(data.len() as u32))] pub fn store(origin: OriginFor, data: Vec) -> DispatchResult { ensure!(data.len() > 0, Error::::EmptyTransaction); @@ -236,6 +237,7 @@ pub mod pallet { /// # /// - Constant. /// # + #[pallet::call_index(1)] #[pallet::weight(T::WeightInfo::renew())] pub fn renew( origin: OriginFor, @@ -281,6 +283,7 @@ pub mod pallet { /// There's a DB read for each transaction. /// Here we assume a maximum of 100 probed transactions. /// # + #[pallet::call_index(2)] #[pallet::weight((T::WeightInfo::check_proof_max(), DispatchClass::Mandatory))] pub fn check_proof( origin: OriginFor, diff --git a/frame/treasury/src/lib.rs b/frame/treasury/src/lib.rs index 4aa00c348585c..0ffc53d8b7978 100644 --- a/frame/treasury/src/lib.rs +++ b/frame/treasury/src/lib.rs @@ -350,6 +350,7 @@ pub mod pallet { /// - DbReads: `ProposalCount`, `origin account` /// - DbWrites: `ProposalCount`, `Proposals`, `origin account` /// # + #[pallet::call_index(0)] #[pallet::weight(T::WeightInfo::propose_spend())] pub fn propose_spend( origin: OriginFor, @@ -380,6 +381,7 @@ pub mod pallet { /// - DbReads: `Proposals`, `rejected proposer account` /// - DbWrites: `Proposals`, `rejected proposer account` /// # + #[pallet::call_index(1)] #[pallet::weight((T::WeightInfo::reject_proposal(), DispatchClass::Operational))] pub fn reject_proposal( origin: OriginFor, @@ -410,6 +412,7 @@ pub mod pallet { /// - DbReads: `Proposals`, `Approvals` /// - DbWrite: `Approvals` /// # + #[pallet::call_index(2)] #[pallet::weight((T::WeightInfo::approve_proposal(T::MaxApprovals::get()), DispatchClass::Operational))] pub fn approve_proposal( origin: OriginFor, @@ -431,6 +434,7 @@ pub mod pallet { /// /// NOTE: For record-keeping purposes, the proposer is deemed to be equivalent to the /// beneficiary. + #[pallet::call_index(3)] #[pallet::weight(T::WeightInfo::spend())] pub fn spend( origin: OriginFor, @@ -472,6 +476,7 @@ pub mod pallet { /// - `ProposalNotApproved`: The `proposal_id` supplied was not found in the approval queue, /// i.e., the proposal has not been approved. This could also mean the proposal does not /// exist altogether, thus there is no way it would have been approved in the first place. + #[pallet::call_index(4)] #[pallet::weight((T::WeightInfo::remove_approval(), DispatchClass::Operational))] pub fn remove_approval( origin: OriginFor, diff --git a/frame/uniques/src/lib.rs b/frame/uniques/src/lib.rs index 185d8fc0c8edd..8157817d4166e 100644 --- a/frame/uniques/src/lib.rs +++ b/frame/uniques/src/lib.rs @@ -449,6 +449,7 @@ pub mod pallet { /// Emits `Created` event when successful. /// /// Weight: `O(1)` + #[pallet::call_index(0)] #[pallet::weight(T::WeightInfo::create())] pub fn create( origin: OriginFor, @@ -485,6 +486,7 @@ pub mod pallet { /// Emits `ForceCreated` event when successful. /// /// Weight: `O(1)` + #[pallet::call_index(1)] #[pallet::weight(T::WeightInfo::force_create())] pub fn force_create( origin: OriginFor, @@ -520,6 +522,7 @@ pub mod pallet { /// - `n = witness.items` /// - `m = witness.item_metadatas` /// - `a = witness.attributes` + #[pallet::call_index(2)] #[pallet::weight(T::WeightInfo::destroy( witness.items, witness.item_metadatas, @@ -555,6 +558,7 @@ pub mod pallet { /// Emits `Issued` event when successful. /// /// Weight: `O(1)` + #[pallet::call_index(3)] #[pallet::weight(T::WeightInfo::mint())] pub fn mint( origin: OriginFor, @@ -584,6 +588,7 @@ pub mod pallet { /// /// Weight: `O(1)` /// Modes: `check_owner.is_some()`. + #[pallet::call_index(4)] #[pallet::weight(T::WeightInfo::burn())] pub fn burn( origin: OriginFor, @@ -622,6 +627,7 @@ pub mod pallet { /// Emits `Transferred`. /// /// Weight: `O(1)` + #[pallet::call_index(5)] #[pallet::weight(T::WeightInfo::transfer())] pub fn transfer( origin: OriginFor, @@ -658,6 +664,7 @@ pub mod pallet { /// is not permitted to call it. /// /// Weight: `O(items.len())` + #[pallet::call_index(6)] #[pallet::weight(T::WeightInfo::redeposit(items.len() as u32))] pub fn redeposit( origin: OriginFor, @@ -718,6 +725,7 @@ pub mod pallet { /// Emits `Frozen`. /// /// Weight: `O(1)` + #[pallet::call_index(7)] #[pallet::weight(T::WeightInfo::freeze())] pub fn freeze( origin: OriginFor, @@ -749,6 +757,7 @@ pub mod pallet { /// Emits `Thawed`. /// /// Weight: `O(1)` + #[pallet::call_index(8)] #[pallet::weight(T::WeightInfo::thaw())] pub fn thaw( origin: OriginFor, @@ -779,6 +788,7 @@ pub mod pallet { /// Emits `CollectionFrozen`. /// /// Weight: `O(1)` + #[pallet::call_index(9)] #[pallet::weight(T::WeightInfo::freeze_collection())] pub fn freeze_collection( origin: OriginFor, @@ -806,6 +816,7 @@ pub mod pallet { /// Emits `CollectionThawed`. /// /// Weight: `O(1)` + #[pallet::call_index(10)] #[pallet::weight(T::WeightInfo::thaw_collection())] pub fn thaw_collection( origin: OriginFor, @@ -835,6 +846,7 @@ pub mod pallet { /// Emits `OwnerChanged`. /// /// Weight: `O(1)` + #[pallet::call_index(11)] #[pallet::weight(T::WeightInfo::transfer_ownership())] pub fn transfer_ownership( origin: OriginFor, @@ -883,6 +895,7 @@ pub mod pallet { /// Emits `TeamChanged`. /// /// Weight: `O(1)` + #[pallet::call_index(12)] #[pallet::weight(T::WeightInfo::set_team())] pub fn set_team( origin: OriginFor, @@ -923,6 +936,7 @@ pub mod pallet { /// Emits `ApprovedTransfer` on success. /// /// Weight: `O(1)` + #[pallet::call_index(13)] #[pallet::weight(T::WeightInfo::approve_transfer())] pub fn approve_transfer( origin: OriginFor, @@ -976,6 +990,7 @@ pub mod pallet { /// Emits `ApprovalCancelled` on success. /// /// Weight: `O(1)` + #[pallet::call_index(14)] #[pallet::weight(T::WeightInfo::cancel_approval())] pub fn cancel_approval( origin: OriginFor, @@ -1028,6 +1043,7 @@ pub mod pallet { /// Emits `ItemStatusChanged` with the identity of the item. /// /// Weight: `O(1)` + #[pallet::call_index(15)] #[pallet::weight(T::WeightInfo::force_item_status())] pub fn force_item_status( origin: OriginFor, @@ -1077,6 +1093,7 @@ pub mod pallet { /// Emits `AttributeSet`. /// /// Weight: `O(1)` + #[pallet::call_index(16)] #[pallet::weight(T::WeightInfo::set_attribute())] pub fn set_attribute( origin: OriginFor, @@ -1139,6 +1156,7 @@ pub mod pallet { /// Emits `AttributeCleared`. /// /// Weight: `O(1)` + #[pallet::call_index(17)] #[pallet::weight(T::WeightInfo::clear_attribute())] pub fn clear_attribute( origin: OriginFor, @@ -1188,6 +1206,7 @@ pub mod pallet { /// Emits `MetadataSet`. /// /// Weight: `O(1)` + #[pallet::call_index(18)] #[pallet::weight(T::WeightInfo::set_metadata())] pub fn set_metadata( origin: OriginFor, @@ -1250,6 +1269,7 @@ pub mod pallet { /// Emits `MetadataCleared`. /// /// Weight: `O(1)` + #[pallet::call_index(19)] #[pallet::weight(T::WeightInfo::clear_metadata())] pub fn clear_metadata( origin: OriginFor, @@ -1299,6 +1319,7 @@ pub mod pallet { /// Emits `CollectionMetadataSet`. /// /// Weight: `O(1)` + #[pallet::call_index(20)] #[pallet::weight(T::WeightInfo::set_collection_metadata())] pub fn set_collection_metadata( origin: OriginFor, @@ -1356,6 +1377,7 @@ pub mod pallet { /// Emits `CollectionMetadataCleared`. /// /// Weight: `O(1)` + #[pallet::call_index(21)] #[pallet::weight(T::WeightInfo::clear_collection_metadata())] pub fn clear_collection_metadata( origin: OriginFor, @@ -1392,6 +1414,7 @@ pub mod pallet { /// ownership transferal. /// /// Emits `OwnershipAcceptanceChanged`. + #[pallet::call_index(22)] #[pallet::weight(T::WeightInfo::set_accept_ownership())] pub fn set_accept_ownership( origin: OriginFor, @@ -1428,6 +1451,7 @@ pub mod pallet { /// - `max_supply`: The maximum amount of items a collection could have. /// /// Emits `CollectionMaxSupplySet` event when successful. + #[pallet::call_index(23)] #[pallet::weight(T::WeightInfo::set_collection_max_supply())] pub fn set_collection_max_supply( origin: OriginFor, @@ -1467,6 +1491,7 @@ pub mod pallet { /// /// Emits `ItemPriceSet` on success if the price is not `None`. /// Emits `ItemPriceRemoved` on success if the price is `None`. + #[pallet::call_index(24)] #[pallet::weight(T::WeightInfo::set_price())] pub fn set_price( origin: OriginFor, @@ -1489,6 +1514,7 @@ pub mod pallet { /// - `bid_price`: The price the sender is willing to pay. /// /// Emits `ItemBought` on success. + #[pallet::call_index(25)] #[pallet::weight(T::WeightInfo::buy_item())] #[transactional] pub fn buy_item( diff --git a/frame/utility/src/lib.rs b/frame/utility/src/lib.rs index 00cb18e1b23aa..2d60ae15679d5 100644 --- a/frame/utility/src/lib.rs +++ b/frame/utility/src/lib.rs @@ -181,6 +181,7 @@ pub mod pallet { /// `BatchInterrupted` event is deposited, along with the number of successful calls made /// and the error of the failed call. If all were successful, then the `BatchCompleted` /// event is deposited. + #[pallet::call_index(0)] #[pallet::weight({ let dispatch_infos = calls.iter().map(|call| call.get_dispatch_info()).collect::>(); let dispatch_weight = dispatch_infos.iter() @@ -254,6 +255,7 @@ pub mod pallet { /// NOTE: Prior to version *12, this was called `as_limited_sub`. /// /// The dispatch origin for this call must be _Signed_. + #[pallet::call_index(1)] #[pallet::weight({ let dispatch_info = call.get_dispatch_info(); ( @@ -302,6 +304,7 @@ pub mod pallet { /// # /// - Complexity: O(C) where C is the number of calls to be batched. /// # + #[pallet::call_index(2)] #[pallet::weight({ let dispatch_infos = calls.iter().map(|call| call.get_dispatch_info()).collect::>(); let dispatch_weight = dispatch_infos.iter() @@ -377,6 +380,7 @@ pub mod pallet { /// - One DB write (event). /// - Weight of derivative `call` execution + T::WeightInfo::dispatch_as(). /// # + #[pallet::call_index(3)] #[pallet::weight({ let dispatch_info = call.get_dispatch_info(); ( @@ -414,6 +418,7 @@ pub mod pallet { /// # /// - Complexity: O(C) where C is the number of calls to be batched. /// # + #[pallet::call_index(4)] #[pallet::weight({ let dispatch_infos = calls.iter().map(|call| call.get_dispatch_info()).collect::>(); let dispatch_weight = dispatch_infos.iter() @@ -481,6 +486,7 @@ pub mod pallet { /// Root origin to specify the weight of the call. /// /// The dispatch origin for this call must be _Root_. + #[pallet::call_index(5)] #[pallet::weight((*_weight, call.get_dispatch_info().class))] pub fn with_weight( origin: OriginFor, diff --git a/frame/utility/src/tests.rs b/frame/utility/src/tests.rs index d48ce139d839c..f9d6a16c1a0d4 100644 --- a/frame/utility/src/tests.rs +++ b/frame/utility/src/tests.rs @@ -53,11 +53,13 @@ pub mod example { #[pallet::call] impl Pallet { + #[pallet::call_index(0)] #[pallet::weight(*_weight)] pub fn noop(_origin: OriginFor, _weight: Weight) -> DispatchResult { Ok(()) } + #[pallet::call_index(1)] #[pallet::weight(*_start_weight)] pub fn foobar( origin: OriginFor, @@ -78,6 +80,7 @@ pub mod example { } } + #[pallet::call_index(2)] #[pallet::weight(0)] pub fn big_variant(_origin: OriginFor, _arg: [u8; 400]) -> DispatchResult { Ok(()) @@ -105,6 +108,7 @@ mod mock_democracy { #[pallet::call] impl Pallet { + #[pallet::call_index(3)] #[pallet::weight(0)] pub fn external_propose_majority(origin: OriginFor) -> DispatchResult { T::ExternalMajorityOrigin::ensure_origin(origin)?; diff --git a/frame/vesting/src/lib.rs b/frame/vesting/src/lib.rs index a92f94baf6cf9..3439608af3ce4 100644 --- a/frame/vesting/src/lib.rs +++ b/frame/vesting/src/lib.rs @@ -303,6 +303,7 @@ pub mod pallet { /// - Reads: Vesting Storage, Balances Locks, [Sender Account] /// - Writes: Vesting Storage, Balances Locks, [Sender Account] /// # + #[pallet::call_index(0)] #[pallet::weight(T::WeightInfo::vest_locked(MaxLocksOf::::get(), T::MAX_VESTING_SCHEDULES) .max(T::WeightInfo::vest_unlocked(MaxLocksOf::::get(), T::MAX_VESTING_SCHEDULES)) )] @@ -326,6 +327,7 @@ pub mod pallet { /// - Reads: Vesting Storage, Balances Locks, Target Account /// - Writes: Vesting Storage, Balances Locks, Target Account /// # + #[pallet::call_index(1)] #[pallet::weight(T::WeightInfo::vest_other_locked(MaxLocksOf::::get(), T::MAX_VESTING_SCHEDULES) .max(T::WeightInfo::vest_other_unlocked(MaxLocksOf::::get(), T::MAX_VESTING_SCHEDULES)) )] @@ -352,6 +354,7 @@ pub mod pallet { /// - Reads: Vesting Storage, Balances Locks, Target Account, [Sender Account] /// - Writes: Vesting Storage, Balances Locks, Target Account, [Sender Account] /// # + #[pallet::call_index(2)] #[pallet::weight( T::WeightInfo::vested_transfer(MaxLocksOf::::get(), T::MAX_VESTING_SCHEDULES) )] @@ -383,6 +386,7 @@ pub mod pallet { /// - Reads: Vesting Storage, Balances Locks, Target Account, Source Account /// - Writes: Vesting Storage, Balances Locks, Target Account, Source Account /// # + #[pallet::call_index(3)] #[pallet::weight( T::WeightInfo::force_vested_transfer(MaxLocksOf::::get(), T::MAX_VESTING_SCHEDULES) )] @@ -417,6 +421,7 @@ pub mod pallet { /// /// - `schedule1_index`: index of the first schedule to merge. /// - `schedule2_index`: index of the second schedule to merge. + #[pallet::call_index(4)] #[pallet::weight( T::WeightInfo::not_unlocking_merge_schedules(MaxLocksOf::::get(), T::MAX_VESTING_SCHEDULES) .max(T::WeightInfo::unlocking_merge_schedules(MaxLocksOf::::get(), T::MAX_VESTING_SCHEDULES)) diff --git a/frame/whitelist/src/lib.rs b/frame/whitelist/src/lib.rs index 1b2dc9415607e..8a5666331c7e9 100644 --- a/frame/whitelist/src/lib.rs +++ b/frame/whitelist/src/lib.rs @@ -119,6 +119,7 @@ pub mod pallet { #[pallet::call] impl Pallet { + #[pallet::call_index(0)] #[pallet::weight(T::WeightInfo::whitelist_call())] pub fn whitelist_call(origin: OriginFor, call_hash: PreimageHash) -> DispatchResult { T::WhitelistOrigin::ensure_origin(origin)?; @@ -136,6 +137,7 @@ pub mod pallet { Ok(()) } + #[pallet::call_index(1)] #[pallet::weight(T::WeightInfo::remove_whitelisted_call())] pub fn remove_whitelisted_call( origin: OriginFor, @@ -152,6 +154,7 @@ pub mod pallet { Ok(()) } + #[pallet::call_index(2)] #[pallet::weight( T::WeightInfo::dispatch_whitelisted_call(*call_encoded_len) .saturating_add(*call_weight_witness) @@ -190,6 +193,7 @@ pub mod pallet { Ok(actual_weight.into()) } + #[pallet::call_index(3)] #[pallet::weight({ let call_weight = call.get_dispatch_info().weight; let call_len = call.encoded_size() as u32; From f3c95e63ab92154a102aca61b6e5a206c7e3b32c Mon Sep 17 00:00:00 2001 From: Arkadiy Paronyan Date: Mon, 12 Dec 2022 16:22:47 +0100 Subject: [PATCH 17/29] Pin canonincalized block (#12902) --- client/state-db/src/noncanonical.rs | 28 +++++++++++++++++++++++++--- 1 file changed, 25 insertions(+), 3 deletions(-) diff --git a/client/state-db/src/noncanonical.rs b/client/state-db/src/noncanonical.rs index df09a9c017747..7cb3017966b0f 100644 --- a/client/state-db/src/noncanonical.rs +++ b/client/state-db/src/noncanonical.rs @@ -38,6 +38,7 @@ pub struct NonCanonicalOverlay { // would be deleted but kept around because block is pinned, ref counted. pinned: HashMap, pinned_insertions: HashMap, u32)>, + last_canon_pinned: Option, } #[cfg_attr(test, derive(PartialEq, Debug))] @@ -225,6 +226,7 @@ impl NonCanonicalOverlay { pinned: Default::default(), pinned_insertions: Default::default(), values, + last_canon_pinned: None, }) } @@ -367,6 +369,16 @@ impl NonCanonicalOverlay { .position(|overlay| overlay.hash == *hash) .ok_or(StateDbError::InvalidBlock)?; + // No failures are possible beyond this point. + + // Unpin previously canonicalized block + if let Some(prev_hash) = self.last_canon_pinned.take() { + self.unpin(&prev_hash); + } + // Force pin canonicalized block so that it is no discarded immediately + self.pin(hash); + self.last_canon_pinned = Some(hash.clone()); + let mut discarded_journals = Vec::new(); let mut discarded_blocks = Vec::new(); for (i, overlay) in level.blocks.into_iter().enumerate() { @@ -680,6 +692,7 @@ mod tests { db.commit(&overlay.insert(&h2, 11, &h1, make_changeset(&[5], &[3])).unwrap()); let mut commit = CommitSet::default(); overlay.canonicalize(&h1, &mut commit).unwrap(); + overlay.unpin(&h1); db.commit(&commit); assert_eq!(overlay.levels.len(), 1); @@ -707,15 +720,16 @@ mod tests { let mut commit = CommitSet::default(); overlay.canonicalize(&h1, &mut commit).unwrap(); db.commit(&commit); - assert!(!contains(&overlay, 5)); + assert!(contains(&overlay, 5)); assert!(contains(&overlay, 7)); assert_eq!(overlay.levels.len(), 1); - assert_eq!(overlay.parents.len(), 1); + assert_eq!(overlay.parents.len(), 2); let mut commit = CommitSet::default(); overlay.canonicalize(&h2, &mut commit).unwrap(); + assert!(!contains(&overlay, 5)); db.commit(&commit); assert_eq!(overlay.levels.len(), 0); - assert_eq!(overlay.parents.len(), 0); + assert_eq!(overlay.parents.len(), 1); assert!(db.data_eq(&make_db(&[1, 4, 6, 7, 8]))); } @@ -732,6 +746,8 @@ mod tests { let mut commit = CommitSet::default(); overlay.canonicalize(&h_1, &mut commit).unwrap(); db.commit(&commit); + // explicitly unpin last block + overlay.unpin(&h_1); assert!(!contains(&overlay, 1)); } @@ -818,6 +834,8 @@ mod tests { // canonicalize 1. 2 and all its children should be discarded let mut commit = CommitSet::default(); overlay.canonicalize(&h_1, &mut commit).unwrap(); + // explicitly unpin last block + overlay.unpin(&h_1); db.commit(&commit); assert_eq!(overlay.levels.len(), 2); assert_eq!(overlay.parents.len(), 6); @@ -838,6 +856,7 @@ mod tests { // canonicalize 1_2. 1_1 and all its children should be discarded let mut commit = CommitSet::default(); overlay.canonicalize(&h_1_2, &mut commit).unwrap(); + overlay.unpin(&h_1_2); db.commit(&commit); assert_eq!(overlay.levels.len(), 1); assert_eq!(overlay.parents.len(), 3); @@ -854,6 +873,7 @@ mod tests { // canonicalize 1_2_2 let mut commit = CommitSet::default(); overlay.canonicalize(&h_1_2_2, &mut commit).unwrap(); + overlay.unpin(&h_1_2_2); db.commit(&commit); assert_eq!(overlay.levels.len(), 0); assert_eq!(overlay.parents.len(), 0); @@ -964,6 +984,7 @@ mod tests { assert!(contains(&overlay, 1)); overlay.unpin(&h_21); assert!(!contains(&overlay, 1)); + overlay.unpin(&h_12); assert!(overlay.pinned.is_empty()); } @@ -998,6 +1019,7 @@ mod tests { let mut commit = CommitSet::default(); overlay.canonicalize(&h21, &mut commit).unwrap(); // h11 should stay in the DB + overlay.unpin(&h21); db.commit(&commit); assert!(!contains(&overlay, 21)); } From d4837cb5ede0c4a17a1aca8a72a48fbac11e9ef7 Mon Sep 17 00:00:00 2001 From: Kian Paimani <5588131+kianenigma@users.noreply.github.com> Date: Mon, 12 Dec 2022 17:05:13 +0000 Subject: [PATCH 18/29] Remove implicit approval chilling upon slash. (#12420) * don't read slashing spans when taking election snapshot * update cargo.toml * bring back remote test * fix merge stuff * fix npos-voters function sig * remove as much redundant diff as you can * Update frame/staking/src/pallet/mod.rs Co-authored-by: Andronik * fix * Update frame/staking/src/pallet/impls.rs * update lock * fix all tests * review comments * fmt * fix offence bench * clippy * ".git/.scripts/bench-bot.sh" pallet dev pallet_staking Co-authored-by: Andronik Co-authored-by: Ankan Co-authored-by: command-bot <> --- .../election-provider-multi-phase/src/lib.rs | 3 + frame/offences/benchmarking/src/lib.rs | 14 +- frame/staking/src/benchmarking.rs | 11 +- frame/staking/src/pallet/impls.rs | 37 +- frame/staking/src/pallet/mod.rs | 7 +- frame/staking/src/slashing.rs | 10 +- frame/staking/src/tests.rs | 330 +++++---- frame/staking/src/weights.rs | 631 +++++++++--------- utils/frame/remote-externalities/src/lib.rs | 29 +- 9 files changed, 526 insertions(+), 546 deletions(-) diff --git a/frame/election-provider-multi-phase/src/lib.rs b/frame/election-provider-multi-phase/src/lib.rs index 4704eaffa0bfe..6c4a55800f7e8 100644 --- a/frame/election-provider-multi-phase/src/lib.rs +++ b/frame/election-provider-multi-phase/src/lib.rs @@ -2293,6 +2293,8 @@ mod tests { assert_eq!(MultiPhase::elect().unwrap_err(), ElectionError::Fallback("NoFallback.")); // phase is now emergency. assert_eq!(MultiPhase::current_phase(), Phase::Emergency); + // snapshot is still there until election finalizes. + assert!(MultiPhase::snapshot().is_some()); assert_eq!( multi_phase_events(), @@ -2318,6 +2320,7 @@ mod tests { // phase is now emergency. assert_eq!(MultiPhase::current_phase(), Phase::Emergency); assert!(MultiPhase::queued_solution().is_none()); + assert!(MultiPhase::snapshot().is_some()); // no single account can trigger this assert_noop!( diff --git a/frame/offences/benchmarking/src/lib.rs b/frame/offences/benchmarking/src/lib.rs index 555ec42882ee1..e5ec2952f8114 100644 --- a/frame/offences/benchmarking/src/lib.rs +++ b/frame/offences/benchmarking/src/lib.rs @@ -308,17 +308,20 @@ benchmarks! { let slash_amount = slash_fraction * bond_amount; let reward_amount = slash_amount.saturating_mul(1 + n) / 2; let reward = reward_amount / r; + let slash_report = |id| core::iter::once( + ::RuntimeEvent::from(StakingEvent::::SlashReported{ validator: id, fraction: slash_fraction, slash_era: 0}) + ); let slash = |id| core::iter::once( - ::RuntimeEvent::from(StakingEvent::::Slashed{staker: id, amount: BalanceOf::::from(slash_amount)}) + ::RuntimeEvent::from(StakingEvent::::Slashed{ staker: id, amount: BalanceOf::::from(slash_amount) }) ); let balance_slash = |id| core::iter::once( - ::RuntimeEvent::from(pallet_balances::Event::::Slashed{who: id, amount: slash_amount.into()}) + ::RuntimeEvent::from(pallet_balances::Event::::Slashed{ who: id, amount: slash_amount.into() }) ); let chill = |id| core::iter::once( - ::RuntimeEvent::from(StakingEvent::::Chilled{stash: id}) + ::RuntimeEvent::from(StakingEvent::::Chilled{ stash: id }) ); let balance_deposit = |id, amount: u32| - ::RuntimeEvent::from(pallet_balances::Event::::Deposit{who: id, amount: amount.into()}); + ::RuntimeEvent::from(pallet_balances::Event::::Deposit{ who: id, amount: amount.into() }); let mut first = true; let slash_events = raw_offenders.into_iter() .flat_map(|offender| { @@ -328,6 +331,7 @@ benchmarks! { }); let mut events = chill(offender.stash.clone()).map(Into::into) + .chain(slash_report(offender.stash.clone()).map(Into::into)) .chain(balance_slash(offender.stash.clone()).map(Into::into)) .chain(slash(offender.stash).map(Into::into)) .chain(nom_slashes) @@ -407,6 +411,7 @@ benchmarks! { System::::event_count(), 0 + 1 // offence + 3 // reporter (reward + endowment) + + 1 // offenders reported + 2 // offenders slashed + 1 // offenders chilled + 2 * n // nominators slashed @@ -443,6 +448,7 @@ benchmarks! { System::::event_count(), 0 + 1 // offence + 3 // reporter (reward + endowment) + + 1 // offenders reported + 2 // offenders slashed + 1 // offenders chilled + 2 * n // nominators slashed diff --git a/frame/staking/src/benchmarking.rs b/frame/staking/src/benchmarking.rs index dcb861e2ce419..8409b5413f992 100644 --- a/frame/staking/src/benchmarking.rs +++ b/frame/staking/src/benchmarking.rs @@ -792,12 +792,10 @@ benchmarks! { } get_npos_voters { - // number of validator intention. + // number of validator intention. we will iterate all of them. let v in (MaxValidators::::get() / 2) .. MaxValidators::::get(); - // number of nominator intention. + // number of nominator intention. we will iterate all of them. let n in (MaxNominators::::get() / 2) .. MaxNominators::::get(); - // total number of slashing spans. Assigned to validators randomly. - let s in 1 .. 20; let validators = create_validators_with_nominators_for_era::( v, n, T::MaxNominations::get() as usize, false, None @@ -806,9 +804,8 @@ benchmarks! { .map(|v| T::Lookup::lookup(v).unwrap()) .collect::>(); - (0..s).for_each(|index| { - add_slashing_spans::(&validators[index as usize], 10); - }); + assert_eq!(Validators::::count(), v); + assert_eq!(Nominators::::count(), n); let num_voters = (v + n) as usize; }: { diff --git a/frame/staking/src/pallet/impls.rs b/frame/staking/src/pallet/impls.rs index c22a2bd2d1f77..6729a2ca32ecc 100644 --- a/frame/staking/src/pallet/impls.rs +++ b/frame/staking/src/pallet/impls.rs @@ -40,7 +40,7 @@ use sp_staking::{ offence::{DisableStrategy, OffenceDetails, OnOffenceHandler}, EraIndex, SessionIndex, Stake, StakingInterface, }; -use sp_std::{collections::btree_map::BTreeMap, prelude::*}; +use sp_std::prelude::*; use crate::{ log, slashing, weights::WeightInfo, ActiveEraInfo, BalanceOf, EraPayout, Exposure, ExposureOf, @@ -351,6 +351,7 @@ impl Pallet { } } + /// Start a new era. It does: /// /// * Increment `active_era.index`, /// * reset `active_era.start`, @@ -704,11 +705,6 @@ impl Pallet { /// `maybe_max_len` can imposes a cap on the number of voters returned; /// /// This function is self-weighing as [`DispatchClass::Mandatory`]. - /// - /// ### Slashing - /// - /// All votes that have been submitted before the last non-zero slash of the corresponding - /// target are *auto-chilled*, but still count towards the limit imposed by `maybe_max_len`. pub fn get_npos_voters(maybe_max_len: Option) -> Vec> { let max_allowed_len = { let all_voter_count = T::VoterList::count() as usize; @@ -719,7 +715,6 @@ impl Pallet { // cache a few things. let weight_of = Self::weight_of_fn(); - let slashing_spans = >::iter().collect::>(); let mut voters_seen = 0u32; let mut validators_taken = 0u32; @@ -737,18 +732,12 @@ impl Pallet { None => break, }; - if let Some(Nominations { submitted_in, mut targets, suppressed: _ }) = - >::get(&voter) - { - // if this voter is a nominator: - targets.retain(|stash| { - slashing_spans - .get(stash) - .map_or(true, |spans| submitted_in >= spans.last_nonzero_slash()) - }); - if !targets.len().is_zero() { + if let Some(Nominations { targets, .. }) = >::get(&voter) { + if !targets.is_empty() { all_voters.push((voter.clone(), weight_of(&voter), targets)); nominators_taken.saturating_inc(); + } else { + // Technically should never happen, but not much we can do about it. } } else if Validators::::contains_key(&voter) { // if this voter is a validator: @@ -771,18 +760,14 @@ impl Pallet { warn, "DEFENSIVE: invalid item in `VoterList`: {:?}, this nominator probably has too many nominations now", voter - ) + ); } } // all_voters should have not re-allocated. debug_assert!(all_voters.capacity() == max_allowed_len); - Self::register_weight(T::WeightInfo::get_npos_voters( - validators_taken, - nominators_taken, - slashing_spans.len() as u32, - )); + Self::register_weight(T::WeightInfo::get_npos_voters(validators_taken, nominators_taken)); log!( info, @@ -1285,6 +1270,12 @@ where disable_strategy, }); + Self::deposit_event(Event::::SlashReported { + validator: stash.clone(), + fraction: *slash_fraction, + slash_era, + }); + if let Some(mut unapplied) = unapplied { let nominators_len = unapplied.others.len() as u64; let reporters_len = details.reporters.len() as u64; diff --git a/frame/staking/src/pallet/mod.rs b/frame/staking/src/pallet/mod.rs index 9dc39dd4a2116..fda455ca3c166 100644 --- a/frame/staking/src/pallet/mod.rs +++ b/frame/staking/src/pallet/mod.rs @@ -517,7 +517,7 @@ pub mod pallet { #[pallet::storage] #[pallet::getter(fn slashing_spans)] #[pallet::unbounded] - pub(crate) type SlashingSpans = + pub type SlashingSpans = StorageMap<_, Twox64Concat, T::AccountId, slashing::SlashingSpans>; /// Records information about the maximum slash of a stash within a slashing span, @@ -671,8 +671,11 @@ pub mod pallet { EraPaid { era_index: EraIndex, validator_payout: BalanceOf, remainder: BalanceOf }, /// The nominator has been rewarded by this amount. Rewarded { stash: T::AccountId, amount: BalanceOf }, - /// One staker (and potentially its nominators) has been slashed by the given amount. + /// A staker (validator or nominator) has been slashed by the given amount. Slashed { staker: T::AccountId, amount: BalanceOf }, + /// A slash for the given validator, for the given percentage of their stake, at the given + /// era as been reported. + SlashReported { validator: T::AccountId, fraction: Perbill, slash_era: EraIndex }, /// An old slashing report from a prior era was discarded because it could /// not be processed. OldSlashingReportDiscarded { session_index: SessionIndex }, diff --git a/frame/staking/src/slashing.rs b/frame/staking/src/slashing.rs index a1900136d64fd..aeea0a1a58c63 100644 --- a/frame/staking/src/slashing.rs +++ b/frame/staking/src/slashing.rs @@ -239,9 +239,9 @@ pub(crate) fn compute_slash( return None } - let (prior_slash_p, _era_slash) = + let prior_slash_p = as Store>::ValidatorSlashInEra::get(¶ms.slash_era, params.stash) - .unwrap_or((Perbill::zero(), Zero::zero())); + .map_or(Zero::zero(), |(prior_slash_proportion, _)| prior_slash_proportion); // compare slash proportions rather than slash values to avoid issues due to rounding // error. @@ -390,9 +390,7 @@ fn slash_nominators( let mut era_slash = as Store>::NominatorSlashInEra::get(¶ms.slash_era, stash) .unwrap_or_else(Zero::zero); - era_slash += own_slash_difference; - as Store>::NominatorSlashInEra::insert(¶ms.slash_era, stash, &era_slash); era_slash @@ -411,12 +409,10 @@ fn slash_nominators( let target_span = spans.compare_and_update_span_slash(params.slash_era, era_slash); if target_span == Some(spans.span_index()) { - // End the span, but don't chill the nominator. its nomination - // on this validator will be ignored in the future. + // end the span, but don't chill the nominator. spans.end_span(params.now); } } - nominators_slashed.push((stash.clone(), nom_slashed)); } diff --git a/frame/staking/src/tests.rs b/frame/staking/src/tests.rs index 78429122d00f1..3e0a62f53d886 100644 --- a/frame/staking/src/tests.rs +++ b/frame/staking/src/tests.rs @@ -2845,6 +2845,8 @@ fn deferred_slashes_are_deferred() { assert_eq!(Balances::free_balance(101), 2000); let nominated_value = exposure.others.iter().find(|o| o.who == 101).unwrap().value; + System::reset_events(); + on_offence_now( &[OffenceDetails { offender: (11, Staking::eras_stakers(active_era(), 11)), @@ -2853,6 +2855,9 @@ fn deferred_slashes_are_deferred() { &[Perbill::from_percent(10)], ); + // nominations are not removed regardless of the deferring. + assert_eq!(Staking::nominators(101).unwrap().targets, vec![11, 21]); + assert_eq!(Balances::free_balance(11), 1000); assert_eq!(Balances::free_balance(101), 2000); @@ -2866,8 +2871,6 @@ fn deferred_slashes_are_deferred() { assert_eq!(Balances::free_balance(11), 1000); assert_eq!(Balances::free_balance(101), 2000); - System::reset_events(); - // at the start of era 4, slashes from era 1 are processed, // after being deferred for at least 2 full eras. mock::start_active_era(4); @@ -2875,15 +2878,16 @@ fn deferred_slashes_are_deferred() { assert_eq!(Balances::free_balance(11), 900); assert_eq!(Balances::free_balance(101), 2000 - (nominated_value / 10)); - assert_eq!( - staking_events_since_last_call(), - vec![ - Event::StakersElected, - Event::EraPaid { era_index: 3, validator_payout: 11075, remainder: 33225 }, + assert!(matches!( + staking_events_since_last_call().as_slice(), + &[ + Event::Chilled { stash: 11 }, + Event::SlashReported { validator: 11, slash_era: 1, .. }, + .., Event::Slashed { staker: 11, amount: 100 }, Event::Slashed { staker: 101, amount: 12 } ] - ); + )); }) } @@ -2896,25 +2900,29 @@ fn retroactive_deferred_slashes_two_eras_before() { let exposure_11_at_era1 = Staking::eras_stakers(active_era(), 11); mock::start_active_era(3); + + assert_eq!(Staking::nominators(101).unwrap().targets, vec![11, 21]); + + System::reset_events(); on_offence_in_era( &[OffenceDetails { offender: (11, exposure_11_at_era1), reporters: vec![] }], &[Perbill::from_percent(10)], 1, // should be deferred for two full eras, and applied at the beginning of era 4. DisableStrategy::Never, ); - System::reset_events(); mock::start_active_era(4); - assert_eq!( - staking_events_since_last_call(), - vec![ - Event::StakersElected, - Event::EraPaid { era_index: 3, validator_payout: 7100, remainder: 21300 }, + assert!(matches!( + staking_events_since_last_call().as_slice(), + &[ + Event::Chilled { stash: 11 }, + Event::SlashReported { validator: 11, slash_era: 1, .. }, + .., Event::Slashed { staker: 11, amount: 100 }, - Event::Slashed { staker: 101, amount: 12 }, + Event::Slashed { staker: 101, amount: 12 } ] - ); + )); }) } @@ -2932,35 +2940,29 @@ fn retroactive_deferred_slashes_one_before() { assert_ok!(Staking::unbond(RuntimeOrigin::signed(10), 100)); mock::start_active_era(3); + System::reset_events(); on_offence_in_era( &[OffenceDetails { offender: (11, exposure_11_at_era1), reporters: vec![] }], &[Perbill::from_percent(10)], 2, // should be deferred for two full eras, and applied at the beginning of era 5. DisableStrategy::Never, ); - System::reset_events(); mock::start_active_era(4); - assert_eq!( - staking_events_since_last_call(), - vec![ - Event::StakersElected, - Event::EraPaid { era_index: 3, validator_payout: 11075, remainder: 33225 } - ] - ); assert_eq!(Staking::ledger(10).unwrap().total, 1000); // slash happens after the next line. + mock::start_active_era(5); - assert_eq!( - staking_events_since_last_call(), - vec![ - Event::StakersElected, - Event::EraPaid { era_index: 4, validator_payout: 11075, remainder: 33225 }, + assert!(matches!( + staking_events_since_last_call().as_slice(), + &[ + Event::SlashReported { validator: 11, slash_era: 2, .. }, + .., Event::Slashed { staker: 11, amount: 100 }, Event::Slashed { staker: 101, amount: 12 } ] - ); + )); // their ledger has already been slashed. assert_eq!(Staking::ledger(10).unwrap().total, 900); @@ -3068,6 +3070,7 @@ fn remove_deferred() { mock::start_active_era(2); // reported later, but deferred to start of era 4 as well. + System::reset_events(); on_offence_in_era( &[OffenceDetails { offender: (11, exposure.clone()), reporters: vec![] }], &[Perbill::from_percent(15)], @@ -3094,19 +3097,18 @@ fn remove_deferred() { // at the start of era 4, slashes from era 1 are processed, // after being deferred for at least 2 full eras. - System::reset_events(); mock::start_active_era(4); - // the first slash for 10% was cancelled, but the 15% one - assert_eq!( - staking_events_since_last_call(), - vec![ - Event::StakersElected, - Event::EraPaid { era_index: 3, validator_payout: 11075, remainder: 33225 }, + // the first slash for 10% was cancelled, but the 15% one not. + assert!(matches!( + staking_events_since_last_call().as_slice(), + &[ + Event::SlashReported { validator: 11, slash_era: 1, .. }, + .., Event::Slashed { staker: 11, amount: 50 }, Event::Slashed { staker: 101, amount: 7 } ] - ); + )); let slash_10 = Perbill::from_percent(10); let slash_15 = Perbill::from_percent(15); @@ -3196,6 +3198,9 @@ fn slash_kicks_validators_not_nominators_and_disables_nominator_for_kicked_valid assert_eq!(Balances::free_balance(11), 1000); assert_eq!(Balances::free_balance(101), 2000); + // 100 has approval for 11 as of now + assert!(Staking::nominators(101).unwrap().targets.contains(&11)); + // 11 and 21 both have the support of 100 let exposure_11 = Staking::eras_stakers(active_era(), &11); let exposure_21 = Staking::eras_stakers(active_era(), &21); @@ -3208,23 +3213,29 @@ fn slash_kicks_validators_not_nominators_and_disables_nominator_for_kicked_valid &[Perbill::from_percent(10)], ); + assert_eq!( + staking_events_since_last_call(), + vec![ + Event::StakersElected, + Event::EraPaid { era_index: 0, validator_payout: 11075, remainder: 33225 }, + Event::Chilled { stash: 11 }, + Event::SlashReported { + validator: 11, + fraction: Perbill::from_percent(10), + slash_era: 1 + }, + Event::Slashed { staker: 11, amount: 100 }, + Event::Slashed { staker: 101, amount: 12 }, + ] + ); + // post-slash balance let nominator_slash_amount_11 = 125 / 10; assert_eq!(Balances::free_balance(11), 900); assert_eq!(Balances::free_balance(101), 2000 - nominator_slash_amount_11); - // This is the best way to check that the validator was chilled; `get` will - // return default value. - for (stash, _) in ::Validators::iter() { - assert!(stash != 11); - } - - let nominations = ::Nominators::get(&101).unwrap(); - - // and make sure that the vote will be ignored even if the validator - // re-registers. - let last_slash = ::SlashingSpans::get(&11).unwrap().last_nonzero_slash(); - assert!(nominations.submitted_in < last_slash); + // check that validator was chilled. + assert!(::Validators::iter().all(|(stash, _)| stash != 11)); // actually re-bond the slashed validator assert_ok!(Staking::validate(RuntimeOrigin::signed(10), Default::default())); @@ -3233,11 +3244,12 @@ fn slash_kicks_validators_not_nominators_and_disables_nominator_for_kicked_valid let exposure_11 = Staking::eras_stakers(active_era(), &11); let exposure_21 = Staking::eras_stakers(active_era(), &21); - // 10 is re-elected, but without the support of 100 - assert_eq!(exposure_11.total, 900); - - // 20 is re-elected, with the (almost) entire support of 100 - assert_eq!(exposure_21.total, 1000 + 500 - nominator_slash_amount_11); + // 11's own expo is reduced. sum of support from 11 is less (448), which is 500 + // 900 + 146 + assert!(matches!(exposure_11, Exposure { own: 900, total: 1046, .. })); + // 1000 + 342 + assert!(matches!(exposure_21, Exposure { own: 1000, total: 1342, .. })); + assert_eq!(500 - 146 - 342, nominator_slash_amount_11); }); } @@ -3256,12 +3268,40 @@ fn non_slashable_offence_doesnt_disable_validator() { &[Perbill::zero()], ); + // it does NOT affect the nominator. + assert_eq!(Staking::nominators(101).unwrap().targets, vec![11, 21]); + // offence that slashes 25% of the bond on_offence_now( &[OffenceDetails { offender: (21, exposure_21.clone()), reporters: vec![] }], &[Perbill::from_percent(25)], ); + // it DOES NOT affect the nominator. + assert_eq!(Staking::nominators(101).unwrap().targets, vec![11, 21]); + + assert_eq!( + staking_events_since_last_call(), + vec![ + Event::StakersElected, + Event::EraPaid { era_index: 0, validator_payout: 11075, remainder: 33225 }, + Event::Chilled { stash: 11 }, + Event::SlashReported { + validator: 11, + fraction: Perbill::from_percent(0), + slash_era: 1 + }, + Event::Chilled { stash: 21 }, + Event::SlashReported { + validator: 21, + fraction: Perbill::from_percent(25), + slash_era: 1 + }, + Event::Slashed { staker: 21, amount: 250 }, + Event::Slashed { staker: 101, amount: 94 } + ] + ); + // the offence for validator 10 wasn't slashable so it wasn't disabled assert!(!is_disabled(10)); // whereas validator 20 gets disabled @@ -3288,6 +3328,9 @@ fn slashing_independent_of_disabling_validator() { DisableStrategy::Always, ); + // nomination remains untouched. + assert_eq!(Staking::nominators(101).unwrap().targets, vec![11, 21]); + // offence that slashes 25% of the bond, BUT not disabling on_offence_in_era( &[OffenceDetails { offender: (21, exposure_21.clone()), reporters: vec![] }], @@ -3296,6 +3339,31 @@ fn slashing_independent_of_disabling_validator() { DisableStrategy::Never, ); + // nomination remains untouched. + assert_eq!(Staking::nominators(101).unwrap().targets, vec![11, 21]); + + assert_eq!( + staking_events_since_last_call(), + vec![ + Event::StakersElected, + Event::EraPaid { era_index: 0, validator_payout: 11075, remainder: 33225 }, + Event::Chilled { stash: 11 }, + Event::SlashReported { + validator: 11, + fraction: Perbill::from_percent(0), + slash_era: 1 + }, + Event::Chilled { stash: 21 }, + Event::SlashReported { + validator: 21, + fraction: Perbill::from_percent(25), + slash_era: 1 + }, + Event::Slashed { staker: 21, amount: 250 }, + Event::Slashed { staker: 101, amount: 94 } + ] + ); + // the offence for validator 10 was explicitly disabled assert!(is_disabled(10)); // whereas validator 20 is explicitly not disabled @@ -3370,6 +3438,9 @@ fn disabled_validators_are_kept_disabled_for_whole_era() { &[Perbill::from_percent(25)], ); + // nominations are not updated. + assert_eq!(Staking::nominators(101).unwrap().targets, vec![11, 21]); + // validator 10 should not be disabled since the offence wasn't slashable assert!(!is_disabled(10)); // validator 20 gets disabled since it got slashed @@ -3387,6 +3458,9 @@ fn disabled_validators_are_kept_disabled_for_whole_era() { &[Perbill::from_percent(25)], ); + // nominations are not updated. + assert_eq!(Staking::nominators(101).unwrap().targets, vec![11, 21]); + advance_session(); // and both are disabled in the last session of the era @@ -3503,18 +3577,10 @@ fn zero_slash_keeps_nominators() { assert_eq!(Balances::free_balance(11), 1000); assert_eq!(Balances::free_balance(101), 2000); - // This is the best way to check that the validator was chilled; `get` will - // return default value. - for (stash, _) in ::Validators::iter() { - assert!(stash != 11); - } - - let nominations = ::Nominators::get(&101).unwrap(); - - // and make sure that the vote will not be ignored, because the slash was - // zero. - let last_slash = ::SlashingSpans::get(&11).unwrap().last_nonzero_slash(); - assert!(nominations.submitted_in >= last_slash); + // 11 is still removed.. + assert!(::Validators::iter().all(|(stash, _)| stash != 11)); + // but their nominations are kept. + assert_eq!(Staking::nominators(101).unwrap().targets, vec![11, 21]); }); } @@ -4380,15 +4446,14 @@ mod election_data_provider { // we assume a network only wants up to 1000 validators in most cases, thus having 2000 // candidates is as high as it gets. let validators = 2000; - // we assume the worse case: each validator also has a slashing span. - let slashing_spans = validators; let mut nominators = 1000; - while ::WeightInfo::get_npos_voters(validators, nominators, slashing_spans) - .all_lt(Weight::from_parts( + while ::WeightInfo::get_npos_voters(validators, nominators).all_lt( + Weight::from_parts( 2u64 * frame_support::weights::constants::WEIGHT_REF_TIME_PER_SECOND, u64::MAX, - )) { + ), + ) { nominators += 1; } @@ -4410,49 +4475,6 @@ mod election_data_provider { }) } - #[test] - fn voters_exclude_slashed() { - ExtBuilder::default().build_and_execute(|| { - assert_eq!(Staking::nominators(101).unwrap().targets, vec![11, 21]); - assert_eq!( - ::electing_voters(None) - .unwrap() - .iter() - .find(|x| x.0 == 101) - .unwrap() - .2, - vec![11, 21] - ); - - start_active_era(1); - add_slash(&11); - - // 11 is gone. - start_active_era(2); - assert_eq!( - ::electing_voters(None) - .unwrap() - .iter() - .find(|x| x.0 == 101) - .unwrap() - .2, - vec![21] - ); - - // resubmit and it is back - assert_ok!(Staking::nominate(RuntimeOrigin::signed(100), vec![11, 21])); - assert_eq!( - ::electing_voters(None) - .unwrap() - .iter() - .find(|x| x.0 == 101) - .unwrap() - .2, - vec![11, 21] - ); - }) - } - #[test] fn respects_snapshot_len_limits() { ExtBuilder::default() @@ -4489,10 +4511,26 @@ mod election_data_provider { fn only_iterates_max_2_times_max_allowed_len() { ExtBuilder::default() .nominate(false) - // the other nominators only nominate 21 - .add_staker(61, 60, 2_000, StakerStatus::::Nominator(vec![21])) - .add_staker(71, 70, 2_000, StakerStatus::::Nominator(vec![21])) - .add_staker(81, 80, 2_000, StakerStatus::::Nominator(vec![21])) + // the best way to invalidate a bunch of nominators is to have them nominate a lot of + // ppl, but then lower the MaxNomination limit. + .add_staker( + 61, + 60, + 2_000, + StakerStatus::::Nominator(vec![21, 22, 23, 24, 25]), + ) + .add_staker( + 71, + 70, + 2_000, + StakerStatus::::Nominator(vec![21, 22, 23, 24, 25]), + ) + .add_staker( + 81, + 80, + 2_000, + StakerStatus::::Nominator(vec![21, 22, 23, 24, 25]), + ) .build_and_execute(|| { // all voters ordered by stake, assert_eq!( @@ -4500,10 +4538,7 @@ mod election_data_provider { vec![61, 71, 81, 11, 21, 31] ); - run_to_block(25); - - // slash 21, the only validator nominated by our first 3 nominators - add_slash(&21); + MaxNominations::set(2); // we want 2 voters now, and in maximum we allow 4 iterations. This is what happens: // 61 is pruned; @@ -4523,55 +4558,6 @@ mod election_data_provider { }); } - // Even if some of the higher staked nominators are slashed, we still get up to max len voters - // by adding more lower staked nominators. In other words, we assert that we keep on adding - // valid nominators until we reach max len voters; which is opposed to simply stopping after we - // have iterated max len voters, but not adding all of them to voters due to some nominators not - // having valid targets. - #[test] - fn get_max_len_voters_even_if_some_nominators_are_slashed() { - ExtBuilder::default() - .nominate(false) - .add_staker(61, 60, 20, StakerStatus::::Nominator(vec![21])) - .add_staker(71, 70, 10, StakerStatus::::Nominator(vec![11, 21])) - .add_staker(81, 80, 10, StakerStatus::::Nominator(vec![11, 21])) - .build_and_execute(|| { - // given our voters ordered by stake, - assert_eq!( - ::VoterList::iter().collect::>(), - vec![11, 21, 31, 61, 71, 81] - ); - - // we take 4 voters - assert_eq!( - Staking::electing_voters(Some(4)) - .unwrap() - .iter() - .map(|(stash, _, _)| stash) - .copied() - .collect::>(), - vec![11, 21, 31, 61], - ); - - // roll to session 5 - run_to_block(25); - - // slash 21, the only validator nominated by 61. - add_slash(&21); - - // we take 4 voters; 71 and 81 are replacing the ejected ones. - assert_eq!( - Staking::electing_voters(Some(4)) - .unwrap() - .iter() - .map(|(stash, _, _)| stash) - .copied() - .collect::>(), - vec![11, 31, 71, 81], - ); - }); - } - #[test] fn estimate_next_election_works() { ExtBuilder::default().session_per_era(5).period(5).build_and_execute(|| { diff --git a/frame/staking/src/weights.rs b/frame/staking/src/weights.rs index 56374ffbc4b62..21fc3d6f077bc 100644 --- a/frame/staking/src/weights.rs +++ b/frame/staking/src/weights.rs @@ -18,24 +18,25 @@ //! Autogenerated weights for pallet_staking //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2022-11-07, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` +//! DATE: 2022-12-12, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! HOSTNAME: `bm3`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: -// ./target/production/substrate +// /home/benchbot/cargo_target_dir/production/substrate // benchmark // pallet -// --chain=dev // --steps=50 // --repeat=20 -// --pallet=pallet_staking // --extrinsic=* // --execution=wasm // --wasm-execution=compiled // --heap-pages=4096 -// --output=./frame/staking/src/weights.rs +// --json-file=/var/lib/gitlab-runner/builds/zyw4fam_/0/parity/mirrors/substrate/.git/.artifacts/bench.json +// --pallet=pallet_staking +// --chain=dev // --header=./HEADER-APACHE2 +// --output=./frame/staking/src/weights.rs // --template=./.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] @@ -70,7 +71,7 @@ pub trait WeightInfo { fn rebond(l: u32, ) -> Weight; fn reap_stash(s: u32, ) -> Weight; fn new_era(v: u32, n: u32, ) -> Weight; - fn get_npos_voters(v: u32, n: u32, s: u32, ) -> Weight; + fn get_npos_voters(v: u32, n: u32, ) -> Weight; fn get_npos_targets(v: u32, ) -> Weight; fn set_staking_configs_all_set() -> Weight; fn set_staking_configs_all_remove() -> Weight; @@ -87,10 +88,10 @@ impl WeightInfo for SubstrateWeight { // Storage: Balances Locks (r:1 w:1) // Storage: Staking Payee (r:0 w:1) fn bond() -> Weight { - // Minimum execution time: 53_097 nanoseconds. - Weight::from_ref_time(53_708_000 as u64) - .saturating_add(T::DbWeight::get().reads(4 as u64)) - .saturating_add(T::DbWeight::get().writes(4 as u64)) + // Minimum execution time: 56_034 nanoseconds. + Weight::from_ref_time(56_646_000) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(4)) } // Storage: Staking Bonded (r:1 w:0) // Storage: Staking Ledger (r:1 w:1) @@ -98,10 +99,10 @@ impl WeightInfo for SubstrateWeight { // Storage: VoterList ListNodes (r:3 w:3) // Storage: VoterList ListBags (r:2 w:2) fn bond_extra() -> Weight { - // Minimum execution time: 92_199 nanoseconds. - Weight::from_ref_time(93_541_000 as u64) - .saturating_add(T::DbWeight::get().reads(8 as u64)) - .saturating_add(T::DbWeight::get().writes(7 as u64)) + // Minimum execution time: 94_354 nanoseconds. + Weight::from_ref_time(95_318_000) + .saturating_add(T::DbWeight::get().reads(8)) + .saturating_add(T::DbWeight::get().writes(7)) } // Storage: Staking Ledger (r:1 w:1) // Storage: Staking Nominators (r:1 w:0) @@ -113,10 +114,10 @@ impl WeightInfo for SubstrateWeight { // Storage: Staking Bonded (r:1 w:0) // Storage: VoterList ListBags (r:2 w:2) fn unbond() -> Weight { - // Minimum execution time: 98_227 nanoseconds. - Weight::from_ref_time(99_070_000 as u64) - .saturating_add(T::DbWeight::get().reads(12 as u64)) - .saturating_add(T::DbWeight::get().writes(8 as u64)) + // Minimum execution time: 99_960 nanoseconds. + Weight::from_ref_time(101_022_000) + .saturating_add(T::DbWeight::get().reads(12)) + .saturating_add(T::DbWeight::get().writes(8)) } // Storage: Staking Ledger (r:1 w:1) // Storage: Staking CurrentEra (r:1 w:0) @@ -124,12 +125,12 @@ impl WeightInfo for SubstrateWeight { // Storage: System Account (r:1 w:1) /// The range of component `s` is `[0, 100]`. fn withdraw_unbonded_update(s: u32, ) -> Weight { - // Minimum execution time: 45_058 nanoseconds. - Weight::from_ref_time(46_592_713 as u64) - // Standard Error: 413 - .saturating_add(Weight::from_ref_time(63_036 as u64).saturating_mul(s as u64)) - .saturating_add(T::DbWeight::get().reads(4 as u64)) - .saturating_add(T::DbWeight::get().writes(3 as u64)) + // Minimum execution time: 45_819 nanoseconds. + Weight::from_ref_time(48_073_614) + // Standard Error: 1_410 + .saturating_add(Weight::from_ref_time(62_881).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) } // Storage: Staking Ledger (r:1 w:1) // Storage: Staking CurrentEra (r:1 w:0) @@ -146,10 +147,10 @@ impl WeightInfo for SubstrateWeight { // Storage: Staking Payee (r:0 w:1) /// The range of component `s` is `[0, 100]`. fn withdraw_unbonded_kill(_s: u32, ) -> Weight { - // Minimum execution time: 86_087 nanoseconds. - Weight::from_ref_time(87_627_894 as u64) - .saturating_add(T::DbWeight::get().reads(13 as u64)) - .saturating_add(T::DbWeight::get().writes(11 as u64)) + // Minimum execution time: 86_035 nanoseconds. + Weight::from_ref_time(89_561_735) + .saturating_add(T::DbWeight::get().reads(13)) + .saturating_add(T::DbWeight::get().writes(11)) } // Storage: Staking Ledger (r:1 w:0) // Storage: Staking MinValidatorBond (r:1 w:0) @@ -163,22 +164,22 @@ impl WeightInfo for SubstrateWeight { // Storage: VoterList CounterForListNodes (r:1 w:1) // Storage: Staking CounterForValidators (r:1 w:1) fn validate() -> Weight { - // Minimum execution time: 67_690 nanoseconds. - Weight::from_ref_time(68_348_000 as u64) - .saturating_add(T::DbWeight::get().reads(11 as u64)) - .saturating_add(T::DbWeight::get().writes(5 as u64)) + // Minimum execution time: 68_748 nanoseconds. + Weight::from_ref_time(69_285_000) + .saturating_add(T::DbWeight::get().reads(11)) + .saturating_add(T::DbWeight::get().writes(5)) } // Storage: Staking Ledger (r:1 w:0) // Storage: Staking Nominators (r:1 w:1) /// The range of component `k` is `[1, 128]`. fn kick(k: u32, ) -> Weight { - // Minimum execution time: 43_512 nanoseconds. - Weight::from_ref_time(47_300_477 as u64) - // Standard Error: 11_609 - .saturating_add(Weight::from_ref_time(6_770_405 as u64).saturating_mul(k as u64)) - .saturating_add(T::DbWeight::get().reads(1 as u64)) - .saturating_add(T::DbWeight::get().reads((1 as u64).saturating_mul(k as u64))) - .saturating_add(T::DbWeight::get().writes((1 as u64).saturating_mul(k as u64))) + // Minimum execution time: 41_641 nanoseconds. + Weight::from_ref_time(48_919_231) + // Standard Error: 11_548 + .saturating_add(Weight::from_ref_time(6_901_201).saturating_mul(k.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(k.into()))) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(k.into()))) } // Storage: Staking Ledger (r:1 w:0) // Storage: Staking MinNominatorBond (r:1 w:0) @@ -193,13 +194,13 @@ impl WeightInfo for SubstrateWeight { // Storage: Staking CounterForNominators (r:1 w:1) /// The range of component `n` is `[1, 16]`. fn nominate(n: u32, ) -> Weight { - // Minimum execution time: 74_296 nanoseconds. - Weight::from_ref_time(73_201_782 as u64) - // Standard Error: 5_007 - .saturating_add(Weight::from_ref_time(2_810_370 as u64).saturating_mul(n as u64)) - .saturating_add(T::DbWeight::get().reads(12 as u64)) - .saturating_add(T::DbWeight::get().reads((1 as u64).saturating_mul(n as u64))) - .saturating_add(T::DbWeight::get().writes(6 as u64)) + // Minimum execution time: 75_097 nanoseconds. + Weight::from_ref_time(74_052_497) + // Standard Error: 6_784 + .saturating_add(Weight::from_ref_time(2_842_146).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(12)) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) + .saturating_add(T::DbWeight::get().writes(6)) } // Storage: Staking Ledger (r:1 w:0) // Storage: Staking Validators (r:1 w:0) @@ -209,59 +210,59 @@ impl WeightInfo for SubstrateWeight { // Storage: VoterList ListBags (r:1 w:1) // Storage: VoterList CounterForListNodes (r:1 w:1) fn chill() -> Weight { - // Minimum execution time: 66_605 nanoseconds. - Weight::from_ref_time(67_279_000 as u64) - .saturating_add(T::DbWeight::get().reads(8 as u64)) - .saturating_add(T::DbWeight::get().writes(6 as u64)) + // Minimum execution time: 67_307 nanoseconds. + Weight::from_ref_time(67_838_000) + .saturating_add(T::DbWeight::get().reads(8)) + .saturating_add(T::DbWeight::get().writes(6)) } // Storage: Staking Ledger (r:1 w:0) // Storage: Staking Payee (r:0 w:1) fn set_payee() -> Weight { - // Minimum execution time: 18_897 nanoseconds. - Weight::from_ref_time(19_357_000 as u64) - .saturating_add(T::DbWeight::get().reads(1 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + // Minimum execution time: 18_831 nanoseconds. + Weight::from_ref_time(19_047_000) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) } // Storage: Staking Bonded (r:1 w:1) // Storage: Staking Ledger (r:2 w:2) fn set_controller() -> Weight { - // Minimum execution time: 26_509 nanoseconds. - Weight::from_ref_time(26_961_000 as u64) - .saturating_add(T::DbWeight::get().reads(3 as u64)) - .saturating_add(T::DbWeight::get().writes(3 as u64)) + // Minimum execution time: 27_534 nanoseconds. + Weight::from_ref_time(27_806_000) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(3)) } // Storage: Staking ValidatorCount (r:0 w:1) fn set_validator_count() -> Weight { - // Minimum execution time: 5_025 nanoseconds. - Weight::from_ref_time(5_240_000 as u64) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + // Minimum execution time: 5_211 nanoseconds. + Weight::from_ref_time(5_372_000) + .saturating_add(T::DbWeight::get().writes(1)) } // Storage: Staking ForceEra (r:0 w:1) fn force_no_eras() -> Weight { - // Minimum execution time: 5_107 nanoseconds. - Weight::from_ref_time(5_320_000 as u64) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + // Minimum execution time: 5_382 nanoseconds. + Weight::from_ref_time(5_654_000) + .saturating_add(T::DbWeight::get().writes(1)) } // Storage: Staking ForceEra (r:0 w:1) fn force_new_era() -> Weight { - // Minimum execution time: 5_094 nanoseconds. - Weight::from_ref_time(5_377_000 as u64) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + // Minimum execution time: 5_618 nanoseconds. + Weight::from_ref_time(5_714_000) + .saturating_add(T::DbWeight::get().writes(1)) } // Storage: Staking ForceEra (r:0 w:1) fn force_new_era_always() -> Weight { - // Minimum execution time: 5_219 nanoseconds. - Weight::from_ref_time(5_434_000 as u64) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + // Minimum execution time: 5_589 nanoseconds. + Weight::from_ref_time(5_776_000) + .saturating_add(T::DbWeight::get().writes(1)) } // Storage: Staking Invulnerables (r:0 w:1) /// The range of component `v` is `[0, 1000]`. fn set_invulnerables(v: u32, ) -> Weight { - // Minimum execution time: 5_122 nanoseconds. - Weight::from_ref_time(5_977_533 as u64) - // Standard Error: 34 - .saturating_add(Weight::from_ref_time(10_205 as u64).saturating_mul(v as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + // Minimum execution time: 5_541 nanoseconds. + Weight::from_ref_time(6_479_253) + // Standard Error: 49 + .saturating_add(Weight::from_ref_time(10_125).saturating_mul(v.into())) + .saturating_add(T::DbWeight::get().writes(1)) } // Storage: Staking Bonded (r:1 w:1) // Storage: Staking SlashingSpans (r:1 w:0) @@ -278,23 +279,23 @@ impl WeightInfo for SubstrateWeight { // Storage: Staking SpanSlash (r:0 w:2) /// The range of component `s` is `[0, 100]`. fn force_unstake(s: u32, ) -> Weight { - // Minimum execution time: 80_216 nanoseconds. - Weight::from_ref_time(86_090_609 as u64) - // Standard Error: 2_006 - .saturating_add(Weight::from_ref_time(1_039_308 as u64).saturating_mul(s as u64)) - .saturating_add(T::DbWeight::get().reads(11 as u64)) - .saturating_add(T::DbWeight::get().writes(12 as u64)) - .saturating_add(T::DbWeight::get().writes((1 as u64).saturating_mul(s as u64))) + // Minimum execution time: 81_041 nanoseconds. + Weight::from_ref_time(88_526_481) + // Standard Error: 11_494 + .saturating_add(Weight::from_ref_time(1_095_933).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(11)) + .saturating_add(T::DbWeight::get().writes(12)) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(s.into()))) } // Storage: Staking UnappliedSlashes (r:1 w:1) /// The range of component `s` is `[1, 1000]`. fn cancel_deferred_slash(s: u32, ) -> Weight { - // Minimum execution time: 92_034 nanoseconds. - Weight::from_ref_time(896_585_370 as u64) - // Standard Error: 58_231 - .saturating_add(Weight::from_ref_time(4_908_277 as u64).saturating_mul(s as u64)) - .saturating_add(T::DbWeight::get().reads(1 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + // Minimum execution time: 92_308 nanoseconds. + Weight::from_ref_time(900_351_007) + // Standard Error: 59_145 + .saturating_add(Weight::from_ref_time(4_944_988).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) } // Storage: Staking CurrentEra (r:1 w:0) // Storage: Staking ErasValidatorReward (r:1 w:0) @@ -307,14 +308,14 @@ impl WeightInfo for SubstrateWeight { // Storage: System Account (r:1 w:1) /// The range of component `n` is `[0, 256]`. fn payout_stakers_dead_controller(n: u32, ) -> Weight { - // Minimum execution time: 127_936 nanoseconds. - Weight::from_ref_time(184_556_084 as u64) - // Standard Error: 26_981 - .saturating_add(Weight::from_ref_time(21_786_304 as u64).saturating_mul(n as u64)) - .saturating_add(T::DbWeight::get().reads(9 as u64)) - .saturating_add(T::DbWeight::get().reads((3 as u64).saturating_mul(n as u64))) - .saturating_add(T::DbWeight::get().writes(2 as u64)) - .saturating_add(T::DbWeight::get().writes((1 as u64).saturating_mul(n as u64))) + // Minimum execution time: 131_855 nanoseconds. + Weight::from_ref_time(197_412_779) + // Standard Error: 21_283 + .saturating_add(Weight::from_ref_time(22_093_758).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(9)) + .saturating_add(T::DbWeight::get().reads((3_u64).saturating_mul(n.into()))) + .saturating_add(T::DbWeight::get().writes(2)) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(n.into()))) } // Storage: Staking CurrentEra (r:1 w:0) // Storage: Staking ErasValidatorReward (r:1 w:0) @@ -328,14 +329,14 @@ impl WeightInfo for SubstrateWeight { // Storage: Balances Locks (r:1 w:1) /// The range of component `n` is `[0, 256]`. fn payout_stakers_alive_staked(n: u32, ) -> Weight { - // Minimum execution time: 157_778 nanoseconds. - Weight::from_ref_time(223_306_359 as u64) - // Standard Error: 27_216 - .saturating_add(Weight::from_ref_time(30_612_663 as u64).saturating_mul(n as u64)) - .saturating_add(T::DbWeight::get().reads(10 as u64)) - .saturating_add(T::DbWeight::get().reads((5 as u64).saturating_mul(n as u64))) - .saturating_add(T::DbWeight::get().writes(3 as u64)) - .saturating_add(T::DbWeight::get().writes((3 as u64).saturating_mul(n as u64))) + // Minimum execution time: 163_118 nanoseconds. + Weight::from_ref_time(229_356_697) + // Standard Error: 30_740 + .saturating_add(Weight::from_ref_time(31_575_360).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(10)) + .saturating_add(T::DbWeight::get().reads((5_u64).saturating_mul(n.into()))) + .saturating_add(T::DbWeight::get().writes(3)) + .saturating_add(T::DbWeight::get().writes((3_u64).saturating_mul(n.into()))) } // Storage: Staking Ledger (r:1 w:1) // Storage: Balances Locks (r:1 w:1) @@ -345,12 +346,12 @@ impl WeightInfo for SubstrateWeight { // Storage: VoterList ListBags (r:2 w:2) /// The range of component `l` is `[1, 32]`. fn rebond(l: u32, ) -> Weight { - // Minimum execution time: 92_880 nanoseconds. - Weight::from_ref_time(94_434_663 as u64) - // Standard Error: 1_734 - .saturating_add(Weight::from_ref_time(34_453 as u64).saturating_mul(l as u64)) - .saturating_add(T::DbWeight::get().reads(9 as u64)) - .saturating_add(T::DbWeight::get().writes(8 as u64)) + // Minimum execution time: 94_048 nanoseconds. + Weight::from_ref_time(95_784_236) + // Standard Error: 2_313 + .saturating_add(Weight::from_ref_time(52_798).saturating_mul(l.into())) + .saturating_add(T::DbWeight::get().reads(9)) + .saturating_add(T::DbWeight::get().writes(8)) } // Storage: System Account (r:1 w:1) // Storage: Staking Bonded (r:1 w:1) @@ -367,16 +368,15 @@ impl WeightInfo for SubstrateWeight { // Storage: Staking SpanSlash (r:0 w:1) /// The range of component `s` is `[1, 100]`. fn reap_stash(s: u32, ) -> Weight { - // Minimum execution time: 92_334 nanoseconds. - Weight::from_ref_time(95_207_614 as u64) - // Standard Error: 1_822 - .saturating_add(Weight::from_ref_time(1_036_787 as u64).saturating_mul(s as u64)) - .saturating_add(T::DbWeight::get().reads(12 as u64)) - .saturating_add(T::DbWeight::get().writes(12 as u64)) - .saturating_add(T::DbWeight::get().writes((1 as u64).saturating_mul(s as u64))) + // Minimum execution time: 93_342 nanoseconds. + Weight::from_ref_time(95_756_184) + // Standard Error: 2_067 + .saturating_add(Weight::from_ref_time(1_090_785).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(12)) + .saturating_add(T::DbWeight::get().writes(12)) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(s.into()))) } // Storage: VoterList CounterForListNodes (r:1 w:0) - // Storage: Staking SlashingSpans (r:1 w:0) // Storage: VoterList ListBags (r:200 w:0) // Storage: VoterList ListNodes (r:101 w:0) // Storage: Staking Nominators (r:101 w:0) @@ -395,20 +395,19 @@ impl WeightInfo for SubstrateWeight { /// The range of component `v` is `[1, 10]`. /// The range of component `n` is `[0, 100]`. fn new_era(v: u32, n: u32, ) -> Weight { - // Minimum execution time: 535_169 nanoseconds. - Weight::from_ref_time(548_667_000 as u64) - // Standard Error: 1_759_252 - .saturating_add(Weight::from_ref_time(58_283_319 as u64).saturating_mul(v as u64)) - // Standard Error: 175_299 - .saturating_add(Weight::from_ref_time(13_578_512 as u64).saturating_mul(n as u64)) - .saturating_add(T::DbWeight::get().reads(207 as u64)) - .saturating_add(T::DbWeight::get().reads((5 as u64).saturating_mul(v as u64))) - .saturating_add(T::DbWeight::get().reads((4 as u64).saturating_mul(n as u64))) - .saturating_add(T::DbWeight::get().writes(3 as u64)) - .saturating_add(T::DbWeight::get().writes((3 as u64).saturating_mul(v as u64))) + // Minimum execution time: 506_874 nanoseconds. + Weight::from_ref_time(507_798_000) + // Standard Error: 1_802_261 + .saturating_add(Weight::from_ref_time(59_874_736).saturating_mul(v.into())) + // Standard Error: 179_585 + .saturating_add(Weight::from_ref_time(13_668_574).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(206)) + .saturating_add(T::DbWeight::get().reads((5_u64).saturating_mul(v.into()))) + .saturating_add(T::DbWeight::get().reads((4_u64).saturating_mul(n.into()))) + .saturating_add(T::DbWeight::get().writes(3)) + .saturating_add(T::DbWeight::get().writes((3_u64).saturating_mul(v.into()))) } // Storage: VoterList CounterForListNodes (r:1 w:0) - // Storage: Staking SlashingSpans (r:21 w:0) // Storage: VoterList ListBags (r:200 w:0) // Storage: VoterList ListNodes (r:1500 w:0) // Storage: Staking Nominators (r:1500 w:0) @@ -417,29 +416,27 @@ impl WeightInfo for SubstrateWeight { // Storage: Staking Ledger (r:1500 w:0) /// The range of component `v` is `[500, 1000]`. /// The range of component `n` is `[500, 1000]`. - /// The range of component `s` is `[1, 20]`. - fn get_npos_voters(v: u32, n: u32, s: u32, ) -> Weight { - // Minimum execution time: 25_323_129 nanoseconds. - Weight::from_ref_time(25_471_672_000 as u64) - // Standard Error: 266_391 - .saturating_add(Weight::from_ref_time(6_665_504 as u64).saturating_mul(v as u64)) - // Standard Error: 266_391 - .saturating_add(Weight::from_ref_time(6_956_606 as u64).saturating_mul(n as u64)) - .saturating_add(T::DbWeight::get().reads(202 as u64)) - .saturating_add(T::DbWeight::get().reads((5 as u64).saturating_mul(v as u64))) - .saturating_add(T::DbWeight::get().reads((4 as u64).saturating_mul(n as u64))) - .saturating_add(T::DbWeight::get().reads((1 as u64).saturating_mul(s as u64))) + fn get_npos_voters(v: u32, n: u32, ) -> Weight { + // Minimum execution time: 24_634_585 nanoseconds. + Weight::from_ref_time(24_718_377_000) + // Standard Error: 324_839 + .saturating_add(Weight::from_ref_time(3_654_508).saturating_mul(v.into())) + // Standard Error: 324_839 + .saturating_add(Weight::from_ref_time(2_927_535).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(201)) + .saturating_add(T::DbWeight::get().reads((5_u64).saturating_mul(v.into()))) + .saturating_add(T::DbWeight::get().reads((4_u64).saturating_mul(n.into()))) } // Storage: Staking CounterForValidators (r:1 w:0) // Storage: Staking Validators (r:501 w:0) /// The range of component `v` is `[500, 1000]`. fn get_npos_targets(v: u32, ) -> Weight { - // Minimum execution time: 4_905_036 nanoseconds. - Weight::from_ref_time(78_163_554 as u64) - // Standard Error: 23_723 - .saturating_add(Weight::from_ref_time(9_784_870 as u64).saturating_mul(v as u64)) - .saturating_add(T::DbWeight::get().reads(2 as u64)) - .saturating_add(T::DbWeight::get().reads((1 as u64).saturating_mul(v as u64))) + // Minimum execution time: 4_805_490 nanoseconds. + Weight::from_ref_time(118_475_494) + // Standard Error: 26_332 + .saturating_add(Weight::from_ref_time(9_635_188).saturating_mul(v.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(v.into()))) } // Storage: Staking MinCommission (r:0 w:1) // Storage: Staking MinValidatorBond (r:0 w:1) @@ -448,9 +445,9 @@ impl WeightInfo for SubstrateWeight { // Storage: Staking MaxNominatorsCount (r:0 w:1) // Storage: Staking MinNominatorBond (r:0 w:1) fn set_staking_configs_all_set() -> Weight { - // Minimum execution time: 10_096 nanoseconds. - Weight::from_ref_time(10_538_000 as u64) - .saturating_add(T::DbWeight::get().writes(6 as u64)) + // Minimum execution time: 10_816 nanoseconds. + Weight::from_ref_time(11_242_000) + .saturating_add(T::DbWeight::get().writes(6)) } // Storage: Staking MinCommission (r:0 w:1) // Storage: Staking MinValidatorBond (r:0 w:1) @@ -459,9 +456,9 @@ impl WeightInfo for SubstrateWeight { // Storage: Staking MaxNominatorsCount (r:0 w:1) // Storage: Staking MinNominatorBond (r:0 w:1) fn set_staking_configs_all_remove() -> Weight { - // Minimum execution time: 9_045 nanoseconds. - Weight::from_ref_time(9_379_000 as u64) - .saturating_add(T::DbWeight::get().writes(6 as u64)) + // Minimum execution time: 9_581 nanoseconds. + Weight::from_ref_time(10_383_000) + .saturating_add(T::DbWeight::get().writes(6)) } // Storage: Staking Ledger (r:1 w:0) // Storage: Staking Nominators (r:1 w:1) @@ -474,18 +471,18 @@ impl WeightInfo for SubstrateWeight { // Storage: VoterList ListBags (r:1 w:1) // Storage: VoterList CounterForListNodes (r:1 w:1) fn chill_other() -> Weight { - // Minimum execution time: 81_457 nanoseconds. - Weight::from_ref_time(82_410_000 as u64) - .saturating_add(T::DbWeight::get().reads(11 as u64)) - .saturating_add(T::DbWeight::get().writes(6 as u64)) + // Minimum execution time: 83_669 nanoseconds. + Weight::from_ref_time(84_772_000) + .saturating_add(T::DbWeight::get().reads(11)) + .saturating_add(T::DbWeight::get().writes(6)) } // Storage: Staking MinCommission (r:1 w:0) // Storage: Staking Validators (r:1 w:1) fn force_apply_min_commission() -> Weight { - // Minimum execution time: 19_684 nanoseconds. - Weight::from_ref_time(20_059_000 as u64) - .saturating_add(T::DbWeight::get().reads(2 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + // Minimum execution time: 20_553 nanoseconds. + Weight::from_ref_time(20_933_000) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) } } @@ -497,10 +494,10 @@ impl WeightInfo for () { // Storage: Balances Locks (r:1 w:1) // Storage: Staking Payee (r:0 w:1) fn bond() -> Weight { - // Minimum execution time: 53_097 nanoseconds. - Weight::from_ref_time(53_708_000 as u64) - .saturating_add(RocksDbWeight::get().reads(4 as u64)) - .saturating_add(RocksDbWeight::get().writes(4 as u64)) + // Minimum execution time: 56_034 nanoseconds. + Weight::from_ref_time(56_646_000) + .saturating_add(RocksDbWeight::get().reads(4)) + .saturating_add(RocksDbWeight::get().writes(4)) } // Storage: Staking Bonded (r:1 w:0) // Storage: Staking Ledger (r:1 w:1) @@ -508,10 +505,10 @@ impl WeightInfo for () { // Storage: VoterList ListNodes (r:3 w:3) // Storage: VoterList ListBags (r:2 w:2) fn bond_extra() -> Weight { - // Minimum execution time: 92_199 nanoseconds. - Weight::from_ref_time(93_541_000 as u64) - .saturating_add(RocksDbWeight::get().reads(8 as u64)) - .saturating_add(RocksDbWeight::get().writes(7 as u64)) + // Minimum execution time: 94_354 nanoseconds. + Weight::from_ref_time(95_318_000) + .saturating_add(RocksDbWeight::get().reads(8)) + .saturating_add(RocksDbWeight::get().writes(7)) } // Storage: Staking Ledger (r:1 w:1) // Storage: Staking Nominators (r:1 w:0) @@ -523,10 +520,10 @@ impl WeightInfo for () { // Storage: Staking Bonded (r:1 w:0) // Storage: VoterList ListBags (r:2 w:2) fn unbond() -> Weight { - // Minimum execution time: 98_227 nanoseconds. - Weight::from_ref_time(99_070_000 as u64) - .saturating_add(RocksDbWeight::get().reads(12 as u64)) - .saturating_add(RocksDbWeight::get().writes(8 as u64)) + // Minimum execution time: 99_960 nanoseconds. + Weight::from_ref_time(101_022_000) + .saturating_add(RocksDbWeight::get().reads(12)) + .saturating_add(RocksDbWeight::get().writes(8)) } // Storage: Staking Ledger (r:1 w:1) // Storage: Staking CurrentEra (r:1 w:0) @@ -534,12 +531,12 @@ impl WeightInfo for () { // Storage: System Account (r:1 w:1) /// The range of component `s` is `[0, 100]`. fn withdraw_unbonded_update(s: u32, ) -> Weight { - // Minimum execution time: 45_058 nanoseconds. - Weight::from_ref_time(46_592_713 as u64) - // Standard Error: 413 - .saturating_add(Weight::from_ref_time(63_036 as u64).saturating_mul(s as u64)) - .saturating_add(RocksDbWeight::get().reads(4 as u64)) - .saturating_add(RocksDbWeight::get().writes(3 as u64)) + // Minimum execution time: 45_819 nanoseconds. + Weight::from_ref_time(48_073_614) + // Standard Error: 1_410 + .saturating_add(Weight::from_ref_time(62_881).saturating_mul(s.into())) + .saturating_add(RocksDbWeight::get().reads(4)) + .saturating_add(RocksDbWeight::get().writes(3)) } // Storage: Staking Ledger (r:1 w:1) // Storage: Staking CurrentEra (r:1 w:0) @@ -556,10 +553,10 @@ impl WeightInfo for () { // Storage: Staking Payee (r:0 w:1) /// The range of component `s` is `[0, 100]`. fn withdraw_unbonded_kill(_s: u32, ) -> Weight { - // Minimum execution time: 86_087 nanoseconds. - Weight::from_ref_time(87_627_894 as u64) - .saturating_add(RocksDbWeight::get().reads(13 as u64)) - .saturating_add(RocksDbWeight::get().writes(11 as u64)) + // Minimum execution time: 86_035 nanoseconds. + Weight::from_ref_time(89_561_735) + .saturating_add(RocksDbWeight::get().reads(13)) + .saturating_add(RocksDbWeight::get().writes(11)) } // Storage: Staking Ledger (r:1 w:0) // Storage: Staking MinValidatorBond (r:1 w:0) @@ -573,22 +570,22 @@ impl WeightInfo for () { // Storage: VoterList CounterForListNodes (r:1 w:1) // Storage: Staking CounterForValidators (r:1 w:1) fn validate() -> Weight { - // Minimum execution time: 67_690 nanoseconds. - Weight::from_ref_time(68_348_000 as u64) - .saturating_add(RocksDbWeight::get().reads(11 as u64)) - .saturating_add(RocksDbWeight::get().writes(5 as u64)) + // Minimum execution time: 68_748 nanoseconds. + Weight::from_ref_time(69_285_000) + .saturating_add(RocksDbWeight::get().reads(11)) + .saturating_add(RocksDbWeight::get().writes(5)) } // Storage: Staking Ledger (r:1 w:0) // Storage: Staking Nominators (r:1 w:1) /// The range of component `k` is `[1, 128]`. fn kick(k: u32, ) -> Weight { - // Minimum execution time: 43_512 nanoseconds. - Weight::from_ref_time(47_300_477 as u64) - // Standard Error: 11_609 - .saturating_add(Weight::from_ref_time(6_770_405 as u64).saturating_mul(k as u64)) - .saturating_add(RocksDbWeight::get().reads(1 as u64)) - .saturating_add(RocksDbWeight::get().reads((1 as u64).saturating_mul(k as u64))) - .saturating_add(RocksDbWeight::get().writes((1 as u64).saturating_mul(k as u64))) + // Minimum execution time: 41_641 nanoseconds. + Weight::from_ref_time(48_919_231) + // Standard Error: 11_548 + .saturating_add(Weight::from_ref_time(6_901_201).saturating_mul(k.into())) + .saturating_add(RocksDbWeight::get().reads(1)) + .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(k.into()))) + .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(k.into()))) } // Storage: Staking Ledger (r:1 w:0) // Storage: Staking MinNominatorBond (r:1 w:0) @@ -603,13 +600,13 @@ impl WeightInfo for () { // Storage: Staking CounterForNominators (r:1 w:1) /// The range of component `n` is `[1, 16]`. fn nominate(n: u32, ) -> Weight { - // Minimum execution time: 74_296 nanoseconds. - Weight::from_ref_time(73_201_782 as u64) - // Standard Error: 5_007 - .saturating_add(Weight::from_ref_time(2_810_370 as u64).saturating_mul(n as u64)) - .saturating_add(RocksDbWeight::get().reads(12 as u64)) - .saturating_add(RocksDbWeight::get().reads((1 as u64).saturating_mul(n as u64))) - .saturating_add(RocksDbWeight::get().writes(6 as u64)) + // Minimum execution time: 75_097 nanoseconds. + Weight::from_ref_time(74_052_497) + // Standard Error: 6_784 + .saturating_add(Weight::from_ref_time(2_842_146).saturating_mul(n.into())) + .saturating_add(RocksDbWeight::get().reads(12)) + .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(n.into()))) + .saturating_add(RocksDbWeight::get().writes(6)) } // Storage: Staking Ledger (r:1 w:0) // Storage: Staking Validators (r:1 w:0) @@ -619,59 +616,59 @@ impl WeightInfo for () { // Storage: VoterList ListBags (r:1 w:1) // Storage: VoterList CounterForListNodes (r:1 w:1) fn chill() -> Weight { - // Minimum execution time: 66_605 nanoseconds. - Weight::from_ref_time(67_279_000 as u64) - .saturating_add(RocksDbWeight::get().reads(8 as u64)) - .saturating_add(RocksDbWeight::get().writes(6 as u64)) + // Minimum execution time: 67_307 nanoseconds. + Weight::from_ref_time(67_838_000) + .saturating_add(RocksDbWeight::get().reads(8)) + .saturating_add(RocksDbWeight::get().writes(6)) } // Storage: Staking Ledger (r:1 w:0) // Storage: Staking Payee (r:0 w:1) fn set_payee() -> Weight { - // Minimum execution time: 18_897 nanoseconds. - Weight::from_ref_time(19_357_000 as u64) - .saturating_add(RocksDbWeight::get().reads(1 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + // Minimum execution time: 18_831 nanoseconds. + Weight::from_ref_time(19_047_000) + .saturating_add(RocksDbWeight::get().reads(1)) + .saturating_add(RocksDbWeight::get().writes(1)) } // Storage: Staking Bonded (r:1 w:1) // Storage: Staking Ledger (r:2 w:2) fn set_controller() -> Weight { - // Minimum execution time: 26_509 nanoseconds. - Weight::from_ref_time(26_961_000 as u64) - .saturating_add(RocksDbWeight::get().reads(3 as u64)) - .saturating_add(RocksDbWeight::get().writes(3 as u64)) + // Minimum execution time: 27_534 nanoseconds. + Weight::from_ref_time(27_806_000) + .saturating_add(RocksDbWeight::get().reads(3)) + .saturating_add(RocksDbWeight::get().writes(3)) } // Storage: Staking ValidatorCount (r:0 w:1) fn set_validator_count() -> Weight { - // Minimum execution time: 5_025 nanoseconds. - Weight::from_ref_time(5_240_000 as u64) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + // Minimum execution time: 5_211 nanoseconds. + Weight::from_ref_time(5_372_000) + .saturating_add(RocksDbWeight::get().writes(1)) } // Storage: Staking ForceEra (r:0 w:1) fn force_no_eras() -> Weight { - // Minimum execution time: 5_107 nanoseconds. - Weight::from_ref_time(5_320_000 as u64) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + // Minimum execution time: 5_382 nanoseconds. + Weight::from_ref_time(5_654_000) + .saturating_add(RocksDbWeight::get().writes(1)) } // Storage: Staking ForceEra (r:0 w:1) fn force_new_era() -> Weight { - // Minimum execution time: 5_094 nanoseconds. - Weight::from_ref_time(5_377_000 as u64) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + // Minimum execution time: 5_618 nanoseconds. + Weight::from_ref_time(5_714_000) + .saturating_add(RocksDbWeight::get().writes(1)) } // Storage: Staking ForceEra (r:0 w:1) fn force_new_era_always() -> Weight { - // Minimum execution time: 5_219 nanoseconds. - Weight::from_ref_time(5_434_000 as u64) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + // Minimum execution time: 5_589 nanoseconds. + Weight::from_ref_time(5_776_000) + .saturating_add(RocksDbWeight::get().writes(1)) } // Storage: Staking Invulnerables (r:0 w:1) /// The range of component `v` is `[0, 1000]`. fn set_invulnerables(v: u32, ) -> Weight { - // Minimum execution time: 5_122 nanoseconds. - Weight::from_ref_time(5_977_533 as u64) - // Standard Error: 34 - .saturating_add(Weight::from_ref_time(10_205 as u64).saturating_mul(v as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + // Minimum execution time: 5_541 nanoseconds. + Weight::from_ref_time(6_479_253) + // Standard Error: 49 + .saturating_add(Weight::from_ref_time(10_125).saturating_mul(v.into())) + .saturating_add(RocksDbWeight::get().writes(1)) } // Storage: Staking Bonded (r:1 w:1) // Storage: Staking SlashingSpans (r:1 w:0) @@ -688,23 +685,23 @@ impl WeightInfo for () { // Storage: Staking SpanSlash (r:0 w:2) /// The range of component `s` is `[0, 100]`. fn force_unstake(s: u32, ) -> Weight { - // Minimum execution time: 80_216 nanoseconds. - Weight::from_ref_time(86_090_609 as u64) - // Standard Error: 2_006 - .saturating_add(Weight::from_ref_time(1_039_308 as u64).saturating_mul(s as u64)) - .saturating_add(RocksDbWeight::get().reads(11 as u64)) - .saturating_add(RocksDbWeight::get().writes(12 as u64)) - .saturating_add(RocksDbWeight::get().writes((1 as u64).saturating_mul(s as u64))) + // Minimum execution time: 81_041 nanoseconds. + Weight::from_ref_time(88_526_481) + // Standard Error: 11_494 + .saturating_add(Weight::from_ref_time(1_095_933).saturating_mul(s.into())) + .saturating_add(RocksDbWeight::get().reads(11)) + .saturating_add(RocksDbWeight::get().writes(12)) + .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(s.into()))) } // Storage: Staking UnappliedSlashes (r:1 w:1) /// The range of component `s` is `[1, 1000]`. fn cancel_deferred_slash(s: u32, ) -> Weight { - // Minimum execution time: 92_034 nanoseconds. - Weight::from_ref_time(896_585_370 as u64) - // Standard Error: 58_231 - .saturating_add(Weight::from_ref_time(4_908_277 as u64).saturating_mul(s as u64)) - .saturating_add(RocksDbWeight::get().reads(1 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + // Minimum execution time: 92_308 nanoseconds. + Weight::from_ref_time(900_351_007) + // Standard Error: 59_145 + .saturating_add(Weight::from_ref_time(4_944_988).saturating_mul(s.into())) + .saturating_add(RocksDbWeight::get().reads(1)) + .saturating_add(RocksDbWeight::get().writes(1)) } // Storage: Staking CurrentEra (r:1 w:0) // Storage: Staking ErasValidatorReward (r:1 w:0) @@ -717,14 +714,14 @@ impl WeightInfo for () { // Storage: System Account (r:1 w:1) /// The range of component `n` is `[0, 256]`. fn payout_stakers_dead_controller(n: u32, ) -> Weight { - // Minimum execution time: 127_936 nanoseconds. - Weight::from_ref_time(184_556_084 as u64) - // Standard Error: 26_981 - .saturating_add(Weight::from_ref_time(21_786_304 as u64).saturating_mul(n as u64)) - .saturating_add(RocksDbWeight::get().reads(9 as u64)) - .saturating_add(RocksDbWeight::get().reads((3 as u64).saturating_mul(n as u64))) - .saturating_add(RocksDbWeight::get().writes(2 as u64)) - .saturating_add(RocksDbWeight::get().writes((1 as u64).saturating_mul(n as u64))) + // Minimum execution time: 131_855 nanoseconds. + Weight::from_ref_time(197_412_779) + // Standard Error: 21_283 + .saturating_add(Weight::from_ref_time(22_093_758).saturating_mul(n.into())) + .saturating_add(RocksDbWeight::get().reads(9)) + .saturating_add(RocksDbWeight::get().reads((3_u64).saturating_mul(n.into()))) + .saturating_add(RocksDbWeight::get().writes(2)) + .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(n.into()))) } // Storage: Staking CurrentEra (r:1 w:0) // Storage: Staking ErasValidatorReward (r:1 w:0) @@ -738,14 +735,14 @@ impl WeightInfo for () { // Storage: Balances Locks (r:1 w:1) /// The range of component `n` is `[0, 256]`. fn payout_stakers_alive_staked(n: u32, ) -> Weight { - // Minimum execution time: 157_778 nanoseconds. - Weight::from_ref_time(223_306_359 as u64) - // Standard Error: 27_216 - .saturating_add(Weight::from_ref_time(30_612_663 as u64).saturating_mul(n as u64)) - .saturating_add(RocksDbWeight::get().reads(10 as u64)) - .saturating_add(RocksDbWeight::get().reads((5 as u64).saturating_mul(n as u64))) - .saturating_add(RocksDbWeight::get().writes(3 as u64)) - .saturating_add(RocksDbWeight::get().writes((3 as u64).saturating_mul(n as u64))) + // Minimum execution time: 163_118 nanoseconds. + Weight::from_ref_time(229_356_697) + // Standard Error: 30_740 + .saturating_add(Weight::from_ref_time(31_575_360).saturating_mul(n.into())) + .saturating_add(RocksDbWeight::get().reads(10)) + .saturating_add(RocksDbWeight::get().reads((5_u64).saturating_mul(n.into()))) + .saturating_add(RocksDbWeight::get().writes(3)) + .saturating_add(RocksDbWeight::get().writes((3_u64).saturating_mul(n.into()))) } // Storage: Staking Ledger (r:1 w:1) // Storage: Balances Locks (r:1 w:1) @@ -755,12 +752,12 @@ impl WeightInfo for () { // Storage: VoterList ListBags (r:2 w:2) /// The range of component `l` is `[1, 32]`. fn rebond(l: u32, ) -> Weight { - // Minimum execution time: 92_880 nanoseconds. - Weight::from_ref_time(94_434_663 as u64) - // Standard Error: 1_734 - .saturating_add(Weight::from_ref_time(34_453 as u64).saturating_mul(l as u64)) - .saturating_add(RocksDbWeight::get().reads(9 as u64)) - .saturating_add(RocksDbWeight::get().writes(8 as u64)) + // Minimum execution time: 94_048 nanoseconds. + Weight::from_ref_time(95_784_236) + // Standard Error: 2_313 + .saturating_add(Weight::from_ref_time(52_798).saturating_mul(l.into())) + .saturating_add(RocksDbWeight::get().reads(9)) + .saturating_add(RocksDbWeight::get().writes(8)) } // Storage: System Account (r:1 w:1) // Storage: Staking Bonded (r:1 w:1) @@ -777,16 +774,15 @@ impl WeightInfo for () { // Storage: Staking SpanSlash (r:0 w:1) /// The range of component `s` is `[1, 100]`. fn reap_stash(s: u32, ) -> Weight { - // Minimum execution time: 92_334 nanoseconds. - Weight::from_ref_time(95_207_614 as u64) - // Standard Error: 1_822 - .saturating_add(Weight::from_ref_time(1_036_787 as u64).saturating_mul(s as u64)) - .saturating_add(RocksDbWeight::get().reads(12 as u64)) - .saturating_add(RocksDbWeight::get().writes(12 as u64)) - .saturating_add(RocksDbWeight::get().writes((1 as u64).saturating_mul(s as u64))) + // Minimum execution time: 93_342 nanoseconds. + Weight::from_ref_time(95_756_184) + // Standard Error: 2_067 + .saturating_add(Weight::from_ref_time(1_090_785).saturating_mul(s.into())) + .saturating_add(RocksDbWeight::get().reads(12)) + .saturating_add(RocksDbWeight::get().writes(12)) + .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(s.into()))) } // Storage: VoterList CounterForListNodes (r:1 w:0) - // Storage: Staking SlashingSpans (r:1 w:0) // Storage: VoterList ListBags (r:200 w:0) // Storage: VoterList ListNodes (r:101 w:0) // Storage: Staking Nominators (r:101 w:0) @@ -805,20 +801,19 @@ impl WeightInfo for () { /// The range of component `v` is `[1, 10]`. /// The range of component `n` is `[0, 100]`. fn new_era(v: u32, n: u32, ) -> Weight { - // Minimum execution time: 535_169 nanoseconds. - Weight::from_ref_time(548_667_000 as u64) - // Standard Error: 1_759_252 - .saturating_add(Weight::from_ref_time(58_283_319 as u64).saturating_mul(v as u64)) - // Standard Error: 175_299 - .saturating_add(Weight::from_ref_time(13_578_512 as u64).saturating_mul(n as u64)) - .saturating_add(RocksDbWeight::get().reads(207 as u64)) - .saturating_add(RocksDbWeight::get().reads((5 as u64).saturating_mul(v as u64))) - .saturating_add(RocksDbWeight::get().reads((4 as u64).saturating_mul(n as u64))) - .saturating_add(RocksDbWeight::get().writes(3 as u64)) - .saturating_add(RocksDbWeight::get().writes((3 as u64).saturating_mul(v as u64))) + // Minimum execution time: 506_874 nanoseconds. + Weight::from_ref_time(507_798_000) + // Standard Error: 1_802_261 + .saturating_add(Weight::from_ref_time(59_874_736).saturating_mul(v.into())) + // Standard Error: 179_585 + .saturating_add(Weight::from_ref_time(13_668_574).saturating_mul(n.into())) + .saturating_add(RocksDbWeight::get().reads(206)) + .saturating_add(RocksDbWeight::get().reads((5_u64).saturating_mul(v.into()))) + .saturating_add(RocksDbWeight::get().reads((4_u64).saturating_mul(n.into()))) + .saturating_add(RocksDbWeight::get().writes(3)) + .saturating_add(RocksDbWeight::get().writes((3_u64).saturating_mul(v.into()))) } // Storage: VoterList CounterForListNodes (r:1 w:0) - // Storage: Staking SlashingSpans (r:21 w:0) // Storage: VoterList ListBags (r:200 w:0) // Storage: VoterList ListNodes (r:1500 w:0) // Storage: Staking Nominators (r:1500 w:0) @@ -827,29 +822,27 @@ impl WeightInfo for () { // Storage: Staking Ledger (r:1500 w:0) /// The range of component `v` is `[500, 1000]`. /// The range of component `n` is `[500, 1000]`. - /// The range of component `s` is `[1, 20]`. - fn get_npos_voters(v: u32, n: u32, s: u32, ) -> Weight { - // Minimum execution time: 25_323_129 nanoseconds. - Weight::from_ref_time(25_471_672_000 as u64) - // Standard Error: 266_391 - .saturating_add(Weight::from_ref_time(6_665_504 as u64).saturating_mul(v as u64)) - // Standard Error: 266_391 - .saturating_add(Weight::from_ref_time(6_956_606 as u64).saturating_mul(n as u64)) - .saturating_add(RocksDbWeight::get().reads(202 as u64)) - .saturating_add(RocksDbWeight::get().reads((5 as u64).saturating_mul(v as u64))) - .saturating_add(RocksDbWeight::get().reads((4 as u64).saturating_mul(n as u64))) - .saturating_add(RocksDbWeight::get().reads((1 as u64).saturating_mul(s as u64))) + fn get_npos_voters(v: u32, n: u32, ) -> Weight { + // Minimum execution time: 24_634_585 nanoseconds. + Weight::from_ref_time(24_718_377_000) + // Standard Error: 324_839 + .saturating_add(Weight::from_ref_time(3_654_508).saturating_mul(v.into())) + // Standard Error: 324_839 + .saturating_add(Weight::from_ref_time(2_927_535).saturating_mul(n.into())) + .saturating_add(RocksDbWeight::get().reads(201)) + .saturating_add(RocksDbWeight::get().reads((5_u64).saturating_mul(v.into()))) + .saturating_add(RocksDbWeight::get().reads((4_u64).saturating_mul(n.into()))) } // Storage: Staking CounterForValidators (r:1 w:0) // Storage: Staking Validators (r:501 w:0) /// The range of component `v` is `[500, 1000]`. fn get_npos_targets(v: u32, ) -> Weight { - // Minimum execution time: 4_905_036 nanoseconds. - Weight::from_ref_time(78_163_554 as u64) - // Standard Error: 23_723 - .saturating_add(Weight::from_ref_time(9_784_870 as u64).saturating_mul(v as u64)) - .saturating_add(RocksDbWeight::get().reads(2 as u64)) - .saturating_add(RocksDbWeight::get().reads((1 as u64).saturating_mul(v as u64))) + // Minimum execution time: 4_805_490 nanoseconds. + Weight::from_ref_time(118_475_494) + // Standard Error: 26_332 + .saturating_add(Weight::from_ref_time(9_635_188).saturating_mul(v.into())) + .saturating_add(RocksDbWeight::get().reads(2)) + .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(v.into()))) } // Storage: Staking MinCommission (r:0 w:1) // Storage: Staking MinValidatorBond (r:0 w:1) @@ -858,9 +851,9 @@ impl WeightInfo for () { // Storage: Staking MaxNominatorsCount (r:0 w:1) // Storage: Staking MinNominatorBond (r:0 w:1) fn set_staking_configs_all_set() -> Weight { - // Minimum execution time: 10_096 nanoseconds. - Weight::from_ref_time(10_538_000 as u64) - .saturating_add(RocksDbWeight::get().writes(6 as u64)) + // Minimum execution time: 10_816 nanoseconds. + Weight::from_ref_time(11_242_000) + .saturating_add(RocksDbWeight::get().writes(6)) } // Storage: Staking MinCommission (r:0 w:1) // Storage: Staking MinValidatorBond (r:0 w:1) @@ -869,9 +862,9 @@ impl WeightInfo for () { // Storage: Staking MaxNominatorsCount (r:0 w:1) // Storage: Staking MinNominatorBond (r:0 w:1) fn set_staking_configs_all_remove() -> Weight { - // Minimum execution time: 9_045 nanoseconds. - Weight::from_ref_time(9_379_000 as u64) - .saturating_add(RocksDbWeight::get().writes(6 as u64)) + // Minimum execution time: 9_581 nanoseconds. + Weight::from_ref_time(10_383_000) + .saturating_add(RocksDbWeight::get().writes(6)) } // Storage: Staking Ledger (r:1 w:0) // Storage: Staking Nominators (r:1 w:1) @@ -884,17 +877,17 @@ impl WeightInfo for () { // Storage: VoterList ListBags (r:1 w:1) // Storage: VoterList CounterForListNodes (r:1 w:1) fn chill_other() -> Weight { - // Minimum execution time: 81_457 nanoseconds. - Weight::from_ref_time(82_410_000 as u64) - .saturating_add(RocksDbWeight::get().reads(11 as u64)) - .saturating_add(RocksDbWeight::get().writes(6 as u64)) + // Minimum execution time: 83_669 nanoseconds. + Weight::from_ref_time(84_772_000) + .saturating_add(RocksDbWeight::get().reads(11)) + .saturating_add(RocksDbWeight::get().writes(6)) } // Storage: Staking MinCommission (r:1 w:0) // Storage: Staking Validators (r:1 w:1) fn force_apply_min_commission() -> Weight { - // Minimum execution time: 19_684 nanoseconds. - Weight::from_ref_time(20_059_000 as u64) - .saturating_add(RocksDbWeight::get().reads(2 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + // Minimum execution time: 20_553 nanoseconds. + Weight::from_ref_time(20_933_000) + .saturating_add(RocksDbWeight::get().reads(2)) + .saturating_add(RocksDbWeight::get().writes(1)) } } diff --git a/utils/frame/remote-externalities/src/lib.rs b/utils/frame/remote-externalities/src/lib.rs index 4f95331c03bc8..db062e246ceef 100644 --- a/utils/frame/remote-externalities/src/lib.rs +++ b/utils/frame/remote-externalities/src/lib.rs @@ -776,6 +776,8 @@ impl Builder { /// Inject a hashed prefix. This is treated as-is, and should be pre-hashed. /// + /// Only relevant is `Mode::Online` is being used. Noop otherwise. + /// /// This should be used to inject a "PREFIX", like a storage (double) map. pub fn inject_hashed_prefix(mut self, hashed: &[u8]) -> Self { self.hashed_prefixes.push(hashed.to_vec()); @@ -785,6 +787,8 @@ impl Builder { /// Just a utility wrapper of [`Self::inject_hashed_prefix`] that injects /// [`DEFAULT_CHILD_STORAGE_KEY_PREFIX`] as a prefix. /// + /// Only relevant is `Mode::Online` is being used. Noop otherwise. + /// /// If set, this will guarantee that the child-tree data of ALL pallets will be downloaded. /// /// This is not needed if the entire state is being downloaded. @@ -800,6 +804,8 @@ impl Builder { /// Inject a hashed key to scrape. This is treated as-is, and should be pre-hashed. /// + /// Only relevant is `Mode::Online` is being used. Noop otherwise. + /// /// This should be used to inject a "KEY", like a storage value. pub fn inject_hashed_key(mut self, hashed: &[u8]) -> Self { self.hashed_keys.push(hashed.to_vec()); @@ -951,7 +957,6 @@ mod tests { #[cfg(all(test, feature = "remote-test"))] mod remote_tests { use super::test_prelude::*; - const REMOTE_INACCESSIBLE: &'static str = "Can't reach the remote node. Is it running?"; #[tokio::test] async fn offline_else_online_works() { @@ -970,7 +975,7 @@ mod remote_tests { )) .build() .await - .expect(REMOTE_INACCESSIBLE) + .unwrap() .execute_with(|| {}); // this shows that in the second run, we are not using the remote @@ -988,7 +993,7 @@ mod remote_tests { )) .build() .await - .expect(REMOTE_INACCESSIBLE) + .unwrap() .execute_with(|| {}); let to_delete = std::fs::read_dir(Path::new(".")) @@ -1018,7 +1023,7 @@ mod remote_tests { })) .build() .await - .expect(REMOTE_INACCESSIBLE) + .unwrap() .execute_with(|| {}); } @@ -1033,7 +1038,7 @@ mod remote_tests { })) .build() .await - .expect(REMOTE_INACCESSIBLE) + .unwrap() .execute_with(|| {}); Builder::::new() @@ -1044,7 +1049,7 @@ mod remote_tests { })) .build() .await - .expect(REMOTE_INACCESSIBLE) + .unwrap() .execute_with(|| {}); } @@ -1059,7 +1064,7 @@ mod remote_tests { })) .build() .await - .expect(REMOTE_INACCESSIBLE) + .unwrap() .execute_with(|| {}); Builder::::new() @@ -1070,7 +1075,7 @@ mod remote_tests { })) .build() .await - .expect(REMOTE_INACCESSIBLE) + .unwrap() .execute_with(|| {}); } @@ -1085,7 +1090,7 @@ mod remote_tests { })) .build() .await - .expect(REMOTE_INACCESSIBLE) + .unwrap() .execute_with(|| {}); let to_delete = std::fs::read_dir(Path::new(".")) @@ -1126,7 +1131,7 @@ mod remote_tests { .inject_default_child_tree_prefix() .build() .await - .expect(REMOTE_INACCESSIBLE) + .unwrap() .execute_with(|| {}); let to_delete = std::fs::read_dir(Path::new(".")) @@ -1164,7 +1169,7 @@ mod remote_tests { })) .build() .await - .expect(REMOTE_INACCESSIBLE) + .unwrap() .execute_with(|| {}); let to_delete = std::fs::read_dir(Path::new(".")) @@ -1203,7 +1208,7 @@ mod remote_tests { })) .build() .await - .expect(REMOTE_INACCESSIBLE) + .unwrap() .execute_with(|| {}); } } From 2a0eeff4008573f6ead70eb9bd43cf6d268d2e7d Mon Sep 17 00:00:00 2001 From: Muharem Ismailov Date: Mon, 12 Dec 2022 23:20:21 +0100 Subject: [PATCH 19/29] bounties calls docs fix (#12909) Co-authored-by: parity-processbot <> --- frame/bounties/src/lib.rs | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/frame/bounties/src/lib.rs b/frame/bounties/src/lib.rs index eb92c774f86e3..c3c2c08d24b2a 100644 --- a/frame/bounties/src/lib.rs +++ b/frame/bounties/src/lib.rs @@ -151,8 +151,7 @@ pub enum BountyStatus { Approved, /// The bounty is funded and waiting for curator assignment. Funded, - /// A curator has been proposed by the `ApproveOrigin`. Waiting for acceptance from the - /// curator. + /// A curator has been proposed. Waiting for acceptance from the curator. CuratorProposed { /// The assigned curator of this bounty. curator: AccountId, @@ -348,7 +347,7 @@ pub mod pallet { /// Approve a bounty proposal. At a later time, the bounty will be funded and become active /// and the original deposit will be returned. /// - /// May only be called from `T::ApproveOrigin`. + /// May only be called from `T::SpendOrigin`. /// /// # /// - O(1). @@ -380,7 +379,7 @@ pub mod pallet { /// Assign a curator to a funded bounty. /// - /// May only be called from `T::ApproveOrigin`. + /// May only be called from `T::SpendOrigin`. /// /// # /// - O(1). From 01efa856c6bb6fd120203afddd06c7b9719eddc4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Dino=20Pa=C4=8Dandi?= <3002868+Dinonard@users.noreply.github.com> Date: Tue, 13 Dec 2022 12:09:26 +0100 Subject: [PATCH 20/29] pallet-contracts migration pre-upgrade fix for v8 (#12905) * Only run pre-v8 migration check for versions older than 8 * Logix fix --- frame/contracts/src/migration.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/frame/contracts/src/migration.rs b/frame/contracts/src/migration.rs index aa04d8b9b1084..56d688abc7309 100644 --- a/frame/contracts/src/migration.rs +++ b/frame/contracts/src/migration.rs @@ -69,7 +69,7 @@ impl OnRuntimeUpgrade for Migration { fn pre_upgrade() -> Result, &'static str> { let version = >::on_chain_storage_version(); - if version == 8 { + if version == 7 { v8::pre_upgrade::()?; } From 13664c388d59b4c83dcb90a27700a9b68abe0305 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Paulo=20Silva=20de=20Souza?= <77391175+joao-paulo-parity@users.noreply.github.com> Date: Tue, 13 Dec 2022 08:42:50 -0300 Subject: [PATCH 21/29] use custom environment for publishing crates (#12912) --- scripts/ci/gitlab/pipeline/publish.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/scripts/ci/gitlab/pipeline/publish.yml b/scripts/ci/gitlab/pipeline/publish.yml index 9053035a61cdb..381a1bc420ef3 100644 --- a/scripts/ci/gitlab/pipeline/publish.yml +++ b/scripts/ci/gitlab/pipeline/publish.yml @@ -211,6 +211,9 @@ update-node-template: # to roughly 202 minutes of delay, or 3h and 22 minutes. As such, the job needs to have a much # higher timeout than average. timeout: 5h + # A custom publishing environment is used for us to be able to set up protected secrets + # specifically for it + environment: publish-crates script: - rusty-cachier snapshot create - git clone From 93fa104fd4d4762c4d5c01257820bd3acf24a17b Mon Sep 17 00:00:00 2001 From: Sasha Gryaznov Date: Tue, 13 Dec 2022 17:54:50 +0200 Subject: [PATCH 22/29] [contracts] Add debug buffer limit + enforcement (#12845) * Add debug buffer limit + enforcement Add debug buffer limit + enforcement * use BoundedVec for the debug buffer * revert schedule (debug buf len limit not needed anymore) * return DispatchError * addressed review comments --- bin/node/runtime/src/lib.rs | 1 + frame/contracts/src/exec.rs | 84 ++++++++++++++++++++++------- frame/contracts/src/lib.rs | 22 +++++--- frame/contracts/src/tests.rs | 1 + frame/contracts/src/wasm/mod.rs | 4 +- frame/contracts/src/wasm/runtime.rs | 4 +- 6 files changed, 86 insertions(+), 30 deletions(-) diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index 7cd42be73a19b..00d2a54d1e774 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -1216,6 +1216,7 @@ impl pallet_contracts::Config for Runtime { type MaxCodeLen = ConstU32<{ 128 * 1024 }>; type MaxStorageKeyLen = ConstU32<128>; type UnsafeUnstableInterface = ConstBool; + type MaxDebugBufferLen = ConstU32<{ 2 * 1024 * 1024 }>; } impl pallet_sudo::Config for Runtime { diff --git a/frame/contracts/src/exec.rs b/frame/contracts/src/exec.rs index c0cf6a9f4c4c4..945095dc20329 100644 --- a/frame/contracts/src/exec.rs +++ b/frame/contracts/src/exec.rs @@ -18,8 +18,8 @@ use crate::{ gas::GasMeter, storage::{self, Storage, WriteOutcome}, - BalanceOf, CodeHash, Config, ContractInfo, ContractInfoOf, Determinism, Error, Event, Nonce, - Pallet as Contracts, Schedule, + BalanceOf, CodeHash, Config, ContractInfo, ContractInfoOf, DebugBufferVec, Determinism, Error, + Event, Nonce, Pallet as Contracts, Schedule, }; use frame_support::{ crypto::ecdsa::ECDSAExt, @@ -279,7 +279,7 @@ pub trait Ext: sealing::Sealed { /// when the code is executing on-chain. /// /// Returns `true` if debug message recording is enabled. Otherwise `false` is returned. - fn append_debug_buffer(&mut self, msg: &str) -> bool; + fn append_debug_buffer(&mut self, msg: &str) -> Result; /// Call some dispatchable and return the result. fn call_runtime(&self, call: ::RuntimeCall) -> DispatchResultWithPostInfo; @@ -409,7 +409,7 @@ pub struct Stack<'a, T: Config, E> { /// /// All the bytes added to this field should be valid UTF-8. The buffer has no defined /// structure and is intended to be shown to users as-is for debugging purposes. - debug_message: Option<&'a mut Vec>, + debug_message: Option<&'a mut DebugBufferVec>, /// The determinism requirement of this call stack. determinism: Determinism, /// No executable is held by the struct but influences its behaviour. @@ -617,7 +617,7 @@ where schedule: &'a Schedule, value: BalanceOf, input_data: Vec, - debug_message: Option<&'a mut Vec>, + debug_message: Option<&'a mut DebugBufferVec>, determinism: Determinism, ) -> Result { let (mut stack, executable) = Self::new( @@ -652,7 +652,7 @@ where value: BalanceOf, input_data: Vec, salt: &[u8], - debug_message: Option<&'a mut Vec>, + debug_message: Option<&'a mut DebugBufferVec>, ) -> Result<(T::AccountId, ExecReturnValue), ExecError> { let (mut stack, executable) = Self::new( FrameArgs::Instantiate { @@ -681,7 +681,7 @@ where storage_meter: &'a mut storage::meter::Meter, schedule: &'a Schedule, value: BalanceOf, - debug_message: Option<&'a mut Vec>, + debug_message: Option<&'a mut DebugBufferVec>, determinism: Determinism, ) -> Result<(Self, E), ExecError> { let (first_frame, executable, nonce) = Self::new_frame( @@ -1328,14 +1328,16 @@ where &mut self.top_frame_mut().nested_gas } - fn append_debug_buffer(&mut self, msg: &str) -> bool { + fn append_debug_buffer(&mut self, msg: &str) -> Result { if let Some(buffer) = &mut self.debug_message { if !msg.is_empty() { - buffer.extend(msg.as_bytes()); + buffer + .try_extend(&mut msg.bytes()) + .map_err(|_| Error::::DebugBufferExhausted)?; } - true + Ok(true) } else { - false + Ok(false) } } @@ -2503,12 +2505,16 @@ mod tests { #[test] fn printing_works() { let code_hash = MockLoader::insert(Call, |ctx, _| { - ctx.ext.append_debug_buffer("This is a test"); - ctx.ext.append_debug_buffer("More text"); + ctx.ext + .append_debug_buffer("This is a test") + .expect("Maximum allowed debug buffer size exhausted!"); + ctx.ext + .append_debug_buffer("More text") + .expect("Maximum allowed debug buffer size exhausted!"); exec_success() }); - let mut debug_buffer = Vec::new(); + let mut debug_buffer = DebugBufferVec::::try_from(Vec::new()).unwrap(); ExtBuilder::default().build().execute_with(|| { let min_balance = ::Currency::minimum_balance(); @@ -2531,18 +2537,22 @@ mod tests { .unwrap(); }); - assert_eq!(&String::from_utf8(debug_buffer).unwrap(), "This is a testMore text"); + assert_eq!(&String::from_utf8(debug_buffer.to_vec()).unwrap(), "This is a testMore text"); } #[test] fn printing_works_on_fail() { let code_hash = MockLoader::insert(Call, |ctx, _| { - ctx.ext.append_debug_buffer("This is a test"); - ctx.ext.append_debug_buffer("More text"); + ctx.ext + .append_debug_buffer("This is a test") + .expect("Maximum allowed debug buffer size exhausted!"); + ctx.ext + .append_debug_buffer("More text") + .expect("Maximum allowed debug buffer size exhausted!"); exec_trapped() }); - let mut debug_buffer = Vec::new(); + let mut debug_buffer = DebugBufferVec::::try_from(Vec::new()).unwrap(); ExtBuilder::default().build().execute_with(|| { let min_balance = ::Currency::minimum_balance(); @@ -2565,7 +2575,43 @@ mod tests { assert!(result.is_err()); }); - assert_eq!(&String::from_utf8(debug_buffer).unwrap(), "This is a testMore text"); + assert_eq!(&String::from_utf8(debug_buffer.to_vec()).unwrap(), "This is a testMore text"); + } + + #[test] + fn debug_buffer_is_limited() { + let code_hash = MockLoader::insert(Call, move |ctx, _| { + ctx.ext.append_debug_buffer("overflowing bytes")?; + exec_success() + }); + + // Pre-fill the buffer up to its limit + let mut debug_buffer = + DebugBufferVec::::try_from(vec![0u8; DebugBufferVec::::bound()]).unwrap(); + + ExtBuilder::default().build().execute_with(|| { + let schedule: Schedule = ::Schedule::get(); + let min_balance = ::Currency::minimum_balance(); + let mut gas_meter = GasMeter::::new(GAS_LIMIT); + set_balance(&ALICE, min_balance * 10); + place_contract(&BOB, code_hash); + let mut storage_meter = storage::meter::Meter::new(&ALICE, Some(0), 0).unwrap(); + assert_err!( + MockStack::run_call( + ALICE, + BOB, + &mut gas_meter, + &mut storage_meter, + &schedule, + 0, + vec![], + Some(&mut debug_buffer), + Determinism::Deterministic, + ) + .map_err(|e| e.error), + Error::::DebugBufferExhausted + ); + }); } #[test] diff --git a/frame/contracts/src/lib.rs b/frame/contracts/src/lib.rs index 06d817785cc39..b76acf9d1db08 100644 --- a/frame/contracts/src/lib.rs +++ b/frame/contracts/src/lib.rs @@ -142,6 +142,7 @@ type BalanceOf = type CodeVec = BoundedVec::MaxCodeLen>; type RelaxedCodeVec = WeakBoundedVec::MaxCodeLen>; type AccountIdLookupOf = <::Lookup as StaticLookup>::Source; +type DebugBufferVec = BoundedVec::MaxDebugBufferLen>; /// Used as a sentinel value when reading and writing contract memory. /// @@ -344,6 +345,10 @@ pub mod pallet { /// Do **not** set to `true` on productions chains. #[pallet::constant] type UnsafeUnstableInterface: Get; + + /// The maximum length of the debug buffer in bytes. + #[pallet::constant] + type MaxDebugBufferLen: Get; } #[pallet::hooks] @@ -863,6 +868,9 @@ pub mod pallet { CodeRejected, /// An indetermistic code was used in a context where this is not permitted. Indeterministic, + /// The debug buffer size used during contract execution exceeded the limit determined by + /// the `MaxDebugBufferLen` pallet config parameter. + DebugBufferExhausted, } /// A mapping from an original code hash to the original code, untouched by instrumentation. @@ -961,7 +969,7 @@ where debug: bool, determinism: Determinism, ) -> ContractExecResult> { - let mut debug_message = if debug { Some(Vec::new()) } else { None }; + let mut debug_message = if debug { Some(DebugBufferVec::::default()) } else { None }; let output = Self::internal_call( origin, dest, @@ -977,7 +985,7 @@ where gas_consumed: output.gas_meter.gas_consumed(), gas_required: output.gas_meter.gas_required(), storage_deposit: output.storage_deposit, - debug_message: debug_message.unwrap_or_default(), + debug_message: debug_message.unwrap_or_default().to_vec(), } } @@ -1003,7 +1011,7 @@ where salt: Vec, debug: bool, ) -> ContractInstantiateResult> { - let mut debug_message = if debug { Some(Vec::new()) } else { None }; + let mut debug_message = if debug { Some(DebugBufferVec::::default()) } else { None }; let output = Self::internal_instantiate( origin, value, @@ -1022,7 +1030,7 @@ where gas_consumed: output.gas_meter.gas_consumed(), gas_required: output.gas_meter.gas_required(), storage_deposit: output.storage_deposit, - debug_message: debug_message.unwrap_or_default(), + debug_message: debug_message.unwrap_or_default().to_vec(), } } @@ -1113,7 +1121,7 @@ where gas_limit: Weight, storage_deposit_limit: Option>, data: Vec, - debug_message: Option<&mut Vec>, + debug_message: Option<&mut DebugBufferVec>, determinism: Determinism, ) -> InternalCallOutput { let mut gas_meter = GasMeter::new(gas_limit); @@ -1156,7 +1164,7 @@ where code: Code>, data: Vec, salt: Vec, - mut debug_message: Option<&mut Vec>, + mut debug_message: Option<&mut DebugBufferVec>, ) -> InternalInstantiateOutput { let mut storage_deposit = Default::default(); let mut gas_meter = GasMeter::new(gas_limit); @@ -1172,7 +1180,7 @@ where TryInstantiate::Skip, ) .map_err(|(err, msg)| { - debug_message.as_mut().map(|buffer| buffer.extend(msg.as_bytes())); + debug_message.as_mut().map(|buffer| buffer.try_extend(&mut msg.bytes())); err })?; // The open deposit will be charged during execution when the diff --git a/frame/contracts/src/tests.rs b/frame/contracts/src/tests.rs index a467800dfe15b..6121d880ca8c5 100644 --- a/frame/contracts/src/tests.rs +++ b/frame/contracts/src/tests.rs @@ -415,6 +415,7 @@ impl Config for Test { type MaxCodeLen = ConstU32<{ 128 * 1024 }>; type MaxStorageKeyLen = ConstU32<128>; type UnsafeUnstableInterface = UnstableInterface; + type MaxDebugBufferLen = ConstU32<{ 2 * 1024 * 1024 }>; } pub const ALICE: AccountId32 = AccountId32::new([1u8; 32]); diff --git a/frame/contracts/src/wasm/mod.rs b/frame/contracts/src/wasm/mod.rs index e9e6b42dc3f8a..d85dac95cc712 100644 --- a/frame/contracts/src/wasm/mod.rs +++ b/frame/contracts/src/wasm/mod.rs @@ -594,9 +594,9 @@ mod tests { fn gas_meter(&mut self) -> &mut GasMeter { &mut self.gas_meter } - fn append_debug_buffer(&mut self, msg: &str) -> bool { + fn append_debug_buffer(&mut self, msg: &str) -> Result { self.debug_buffer.extend(msg.as_bytes()); - true + Ok(true) } fn call_runtime( &self, diff --git a/frame/contracts/src/wasm/runtime.rs b/frame/contracts/src/wasm/runtime.rs index b933688eb61ec..50ad9996e6eb6 100644 --- a/frame/contracts/src/wasm/runtime.rs +++ b/frame/contracts/src/wasm/runtime.rs @@ -2395,11 +2395,11 @@ pub mod env { str_len: u32, ) -> Result { ctx.charge_gas(RuntimeCosts::DebugMessage)?; - if ctx.ext.append_debug_buffer("") { + if ctx.ext.append_debug_buffer("")? { let data = ctx.read_sandbox_memory(memory, str_ptr, str_len)?; let msg = core::str::from_utf8(&data).map_err(|_| >::DebugMessageInvalidUTF8)?; - ctx.ext.append_debug_buffer(msg); + ctx.ext.append_debug_buffer(msg)?; return Ok(ReturnCode::Success) } Ok(ReturnCode::LoggingDisabled) From 89498c0d756c649d71e82340bee44fcc7cfe8037 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Tue, 13 Dec 2022 22:47:51 +0100 Subject: [PATCH 23/29] Fixup some wrong dependencies (#12899) * Fixup some wrong dependencies Dev dependencies should not appear in the feature list. If features are required, they should be directly enabled for the `dev-dependency`. * More fixups * Fix fix * Remove deprecated feature * Make all work properly and nice!! * FMT * Fix formatting --- Cargo.lock | 2 +- frame/assets/Cargo.toml | 3 +- frame/assets/src/lib.rs | 2 + frame/bags-list/src/lib.rs | 58 ++++++++++--------- frame/election-provider-support/Cargo.toml | 3 +- frame/election-provider-support/src/lib.rs | 3 + frame/fast-unstake/Cargo.toml | 6 -- frame/nomination-pools/Cargo.toml | 2 +- .../nomination-pools/benchmarking/Cargo.toml | 2 - frame/staking/Cargo.toml | 8 +-- frame/staking/src/pallet/impls.rs | 41 +++++++------ .../asset-tx-payment/Cargo.toml | 4 -- .../asset-tx-payment/src/tests.rs | 5 +- primitives/core/src/lib.rs | 46 +++++++++++++++ primitives/staking/Cargo.toml | 2 + primitives/staking/src/lib.rs | 2 + utils/wasm-builder/src/wasm_project.rs | 9 +-- 17 files changed, 120 insertions(+), 78 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 2f2b4aa7cf35d..ea34146f16f9b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5176,7 +5176,6 @@ dependencies = [ "frame-support", "frame-system", "log", - "pallet-assets", "pallet-balances", "pallet-staking", "pallet-staking-reward-curve", @@ -9475,6 +9474,7 @@ version = "4.0.0-dev" dependencies = [ "parity-scale-codec", "scale-info", + "sp-core", "sp-runtime", "sp-std", ] diff --git a/frame/assets/Cargo.toml b/frame/assets/Cargo.toml index 715149b20c042..84bfd9535a461 100644 --- a/frame/assets/Cargo.toml +++ b/frame/assets/Cargo.toml @@ -23,9 +23,9 @@ frame-support = { version = "4.0.0-dev", default-features = false, path = "../su # `system` module provides us with all sorts of useful stuff and macros depend on it being around. frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } frame-benchmarking = { version = "4.0.0-dev", default-features = false, path = "../benchmarking", optional = true } +sp-core = { version = "7.0.0", default-features = false, path = "../../primitives/core" } [dev-dependencies] -sp-core = { version = "7.0.0", path = "../../primitives/core" } sp-std = { version = "5.0.0", path = "../../primitives/std" } sp-io = { version = "7.0.0", path = "../../primitives/io" } pallet-balances = { version = "4.0.0-dev", path = "../balances" } @@ -35,6 +35,7 @@ default = ["std"] std = [ "codec/std", "scale-info/std", + "sp-core/std", "sp-std/std", "sp-runtime/std", "frame-support/std", diff --git a/frame/assets/src/lib.rs b/frame/assets/src/lib.rs index 629a0243cfc80..ab589c0eef0f4 100644 --- a/frame/assets/src/lib.rs +++ b/frame/assets/src/lib.rs @@ -1487,3 +1487,5 @@ pub mod pallet { } } } + +sp_core::generate_feature_enabled_macro!(runtime_benchmarks_enabled, feature = "runtime-benchmarks", $); diff --git a/frame/bags-list/src/lib.rs b/frame/bags-list/src/lib.rs index 1ffdf29345513..14f8a613eb798 100644 --- a/frame/bags-list/src/lib.rs +++ b/frame/bags-list/src/lib.rs @@ -359,25 +359,26 @@ impl, I: 'static> SortedListProvider for Pallet List::::unsafe_clear() } - #[cfg(feature = "runtime-benchmarks")] - fn score_update_worst_case(who: &T::AccountId, is_increase: bool) -> Self::Score { - use frame_support::traits::Get as _; - let thresholds = T::BagThresholds::get(); - let node = list::Node::::get(who).unwrap(); - let current_bag_idx = thresholds - .iter() - .chain(sp_std::iter::once(&T::Score::max_value())) - .position(|w| w == &node.bag_upper()) - .unwrap(); - - if is_increase { - let next_threshold_idx = current_bag_idx + 1; - assert!(thresholds.len() > next_threshold_idx); - thresholds[next_threshold_idx] - } else { - assert!(current_bag_idx != 0); - let prev_threshold_idx = current_bag_idx - 1; - thresholds[prev_threshold_idx] + frame_election_provider_support::runtime_benchmarks_enabled! { + fn score_update_worst_case(who: &T::AccountId, is_increase: bool) -> Self::Score { + use frame_support::traits::Get as _; + let thresholds = T::BagThresholds::get(); + let node = list::Node::::get(who).unwrap(); + let current_bag_idx = thresholds + .iter() + .chain(sp_std::iter::once(&T::Score::max_value())) + .position(|w| w == &node.bag_upper) + .unwrap(); + + if is_increase { + let next_threshold_idx = current_bag_idx + 1; + assert!(thresholds.len() > next_threshold_idx); + thresholds[next_threshold_idx] + } else { + assert!(current_bag_idx != 0); + let prev_threshold_idx = current_bag_idx - 1; + thresholds[prev_threshold_idx] + } } } } @@ -389,14 +390,15 @@ impl, I: 'static> ScoreProvider for Pallet { Node::::get(id).map(|node| node.score()).unwrap_or_default() } - #[cfg(any(feature = "runtime-benchmarks", feature = "fuzz", test))] - fn set_score_of(id: &T::AccountId, new_score: T::Score) { - ListNodes::::mutate(id, |maybe_node| { - if let Some(node) = maybe_node.as_mut() { - node.set_score(new_score) - } else { - panic!("trying to mutate {:?} which does not exists", id); - } - }) + frame_election_provider_support::runtime_benchmarks_or_fuzz_enabled! { + fn set_score_of(id: &T::AccountId, new_score: T::Score) { + ListNodes::::mutate(id, |maybe_node| { + if let Some(node) = maybe_node.as_mut() { + node.score = new_score; + } else { + panic!("trying to mutate {:?} which does not exists", id); + } + }) + } } } diff --git a/frame/election-provider-support/Cargo.toml b/frame/election-provider-support/Cargo.toml index b9584c899e1b1..33a6f25ed0822 100644 --- a/frame/election-provider-support/Cargo.toml +++ b/frame/election-provider-support/Cargo.toml @@ -21,10 +21,10 @@ sp-arithmetic = { version = "6.0.0", default-features = false, path = "../../pri sp-npos-elections = { version = "4.0.0-dev", default-features = false, path = "../../primitives/npos-elections" } sp-runtime = { version = "7.0.0", default-features = false, path = "../../primitives/runtime" } sp-std = { version = "5.0.0", default-features = false, path = "../../primitives/std" } +sp-core = { version = "7.0.0", default-features = false, path = "../../primitives/core" } [dev-dependencies] rand = "0.7.3" -sp-core = { version = "7.0.0", path = "../../primitives/core" } sp-io = { version = "7.0.0", path = "../../primitives/io" } sp-npos-elections = { version = "4.0.0-dev", path = "../../primitives/npos-elections" } @@ -38,6 +38,7 @@ std = [ "scale-info/std", "sp-arithmetic/std", "sp-npos-elections/std", + "sp-core/std", "sp-runtime/std", "sp-std/std", ] diff --git a/frame/election-provider-support/src/lib.rs b/frame/election-provider-support/src/lib.rs index 8b26148844c39..9d5d6c018e5e1 100644 --- a/frame/election-provider-support/src/lib.rs +++ b/frame/election-provider-support/src/lib.rs @@ -671,3 +671,6 @@ pub type BoundedSupportsOf = BoundedSupports< ::AccountId, ::MaxWinners, >; + +sp_core::generate_feature_enabled_macro!(runtime_benchmarks_enabled, feature = "runtime-benchmarks", $); +sp_core::generate_feature_enabled_macro!(runtime_benchmarks_or_fuzz_enabled, any(feature = "runtime-benchmarks", feature = "fuzzing"), $); diff --git a/frame/fast-unstake/Cargo.toml b/frame/fast-unstake/Cargo.toml index 61bc823cc11e5..b61060e775a9f 100644 --- a/frame/fast-unstake/Cargo.toml +++ b/frame/fast-unstake/Cargo.toml @@ -25,10 +25,7 @@ sp-std = { version = "5.0.0", default-features = false, path = "../../primitives sp-staking = { default-features = false, path = "../../primitives/staking" } frame-election-provider-support = { default-features = false, path = "../election-provider-support" } -# optional dependencies for cargo features frame-benchmarking = { version = "4.0.0-dev", default-features = false, optional = true, path = "../benchmarking" } -pallet-staking = { default-features = false, optional = true, path = "../staking" } -pallet-assets = { default-features = false, optional = true, path = "../assets" } [dev-dependencies] pallet-staking-reward-curve = { version = "4.0.0-dev", path = "../staking/reward-curve" } @@ -38,8 +35,6 @@ sp-tracing = { version = "6.0.0", path = "../../primitives/tracing" } pallet-staking = { path = "../staking" } pallet-balances = { path = "../balances" } pallet-timestamp = { path = "../timestamp" } -pallet-assets = { path = "../assets" } - [features] default = ["std"] @@ -64,6 +59,5 @@ runtime-benchmarks = [ "frame-benchmarking/runtime-benchmarks", "frame-system/runtime-benchmarks", "sp-staking/runtime-benchmarks", - "pallet-staking/runtime-benchmarks" ] try-runtime = ["frame-support/try-runtime"] diff --git a/frame/nomination-pools/Cargo.toml b/frame/nomination-pools/Cargo.toml index 4894e3d97f19a..3eb2d4bc5fd9b 100644 --- a/frame/nomination-pools/Cargo.toml +++ b/frame/nomination-pools/Cargo.toml @@ -26,7 +26,7 @@ sp-core = { version = "7.0.0", default-features = false, path = "../../primitive sp-io = { version = "7.0.0", default-features = false, path = "../../primitives/io" } log = { version = "0.4.0", default-features = false } -# Optional: usef for testing and/or fuzzing +# Optional: use for testing and/or fuzzing pallet-balances = { version = "4.0.0-dev", path = "../balances", optional = true } sp-tracing = { version = "6.0.0", path = "../../primitives/tracing", optional = true } diff --git a/frame/nomination-pools/benchmarking/Cargo.toml b/frame/nomination-pools/benchmarking/Cargo.toml index be52d9777ac86..74b71a353fe7f 100644 --- a/frame/nomination-pools/benchmarking/Cargo.toml +++ b/frame/nomination-pools/benchmarking/Cargo.toml @@ -31,7 +31,6 @@ sp-runtime = { version = "7.0.0", default-features = false, path = "../../../pri sp-runtime-interface = { version = "7.0.0", default-features = false, path = "../../../primitives/runtime-interface" } sp-staking = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/staking" } sp-std = { version = "5.0.0", default-features = false, path = "../../../primitives/std" } -sp-io = { optional = true, default-features = false, path = "../../../primitives/io" } [dev-dependencies] pallet-balances = { version = "4.0.0-dev", default-features = false, path = "../../balances" } @@ -53,7 +52,6 @@ std = [ "pallet-nomination-pools/std", "sp-runtime/std", "sp-runtime-interface/std", - "sp-io/std", "sp-staking/std", "sp-std/std", ] diff --git a/frame/staking/Cargo.toml b/frame/staking/Cargo.toml index 3ad63ad94a08a..a7fca045cc4ba 100644 --- a/frame/staking/Cargo.toml +++ b/frame/staking/Cargo.toml @@ -32,10 +32,9 @@ sp-application-crypto = { version = "7.0.0", default-features = false, path = ". frame-election-provider-support = { version = "4.0.0-dev", default-features = false, path = "../election-provider-support" } log = { version = "0.4.17", default-features = false } -# optional dependencies for cargo features +# Optional imports for benchmarking frame-benchmarking = { version = "4.0.0-dev", default-features = false, path = "../benchmarking", optional = true } rand_chacha = { version = "0.2", default-features = false, optional = true } -pallet-bags-list = { default-features = false, optional = true, path = "../bags-list" } [dev-dependencies] sp-tracing = { version = "6.0.0", path = "../../primitives/tracing" } @@ -75,10 +74,5 @@ runtime-benchmarks = [ "frame-election-provider-support/runtime-benchmarks", "rand_chacha", "sp-staking/runtime-benchmarks", - "pallet-bags-list/runtime-benchmarks", ] try-runtime = ["frame-support/try-runtime"] -fuzz = [ - "pallet-bags-list/fuzz", - "frame-election-provider-support/fuzz", -] diff --git a/frame/staking/src/pallet/impls.rs b/frame/staking/src/pallet/impls.rs index 6729a2ca32ecc..1a4086ad2ab11 100644 --- a/frame/staking/src/pallet/impls.rs +++ b/frame/staking/src/pallet/impls.rs @@ -1329,7 +1329,7 @@ impl ScoreProvider for Pallet { Self::weight_of(who) } - #[cfg(any(feature = "runtime-benchmarks", feature = "fuzz"))] + #[cfg(feature = "runtime-benchmarks")] fn set_score_of(who: &T::AccountId, weight: Self::Score) { // this will clearly results in an inconsistent state, but it should not matter for a // benchmark. @@ -1594,28 +1594,27 @@ impl StakingInterface for Pallet { Self::nominate(RawOrigin::Signed(ctrl).into(), targets) } - #[cfg(feature = "runtime-benchmarks")] - fn nominations(who: Self::AccountId) -> Option> { - Nominators::::get(who).map(|n| n.targets.into_inner()) - } + sp_staking::runtime_benchmarks_enabled! { + fn nominations(who: Self::AccountId) -> Option> { + Nominators::::get(who).map(|n| n.targets.into_inner()) + } - #[cfg(feature = "runtime-benchmarks")] - fn add_era_stakers( - current_era: &EraIndex, - stash: &T::AccountId, - exposures: Vec<(Self::AccountId, Self::Balance)>, - ) { - let others = exposures - .iter() - .map(|(who, value)| IndividualExposure { who: who.clone(), value: value.clone() }) - .collect::>(); - let exposure = Exposure { total: Default::default(), own: Default::default(), others }; - Self::add_era_stakers(current_era.clone(), stash.clone(), exposure) - } + fn add_era_stakers( + current_era: &EraIndex, + stash: &T::AccountId, + exposures: Vec<(Self::AccountId, Self::Balance)>, + ) { + let others = exposures + .iter() + .map(|(who, value)| IndividualExposure { who: who.clone(), value: value.clone() }) + .collect::>(); + let exposure = Exposure { total: Default::default(), own: Default::default(), others }; + >::insert(¤t_era, &stash, &exposure); + } - #[cfg(feature = "runtime-benchmarks")] - fn set_current_era(era: EraIndex) { - CurrentEra::::put(era); + fn set_current_era(era: EraIndex) { + CurrentEra::::put(era); + } } } diff --git a/frame/transaction-payment/asset-tx-payment/Cargo.toml b/frame/transaction-payment/asset-tx-payment/Cargo.toml index b192c4e9cd96e..8e4645a2677f9 100644 --- a/frame/transaction-payment/asset-tx-payment/Cargo.toml +++ b/frame/transaction-payment/asset-tx-payment/Cargo.toml @@ -19,12 +19,10 @@ sp-io = { version = "7.0.0", default-features = false, path = "../../../primitiv sp-runtime = { version = "7.0.0", default-features = false, path = "../../../primitives/runtime" } sp-std = { version = "5.0.0", default-features = false, path = "../../../primitives/std" } -# optional dependencies for cargo features frame-support = { version = "4.0.0-dev", default-features = false, path = "../../support" } frame-system = { version = "4.0.0-dev", default-features = false, path = "../../system" } pallet-transaction-payment = { version = "4.0.0-dev", default-features = false, path = ".." } frame-benchmarking = { version = "4.0.0-dev", default-features = false, path = "../../benchmarking", optional = true } -pallet-assets = { default-features = false, optional = true, path = "../../assets" } # Other dependencies codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } @@ -40,7 +38,6 @@ pallet-assets = { version = "4.0.0-dev", path = "../../assets" } pallet-authorship = { version = "4.0.0-dev", path = "../../authorship" } pallet-balances = { version = "4.0.0-dev", path = "../../balances" } - [features] default = ["std"] std = [ @@ -60,6 +57,5 @@ runtime-benchmarks = [ "frame-benchmarking/runtime-benchmarks", "sp-runtime/runtime-benchmarks", "frame-system/runtime-benchmarks", - "pallet-assets/runtime-benchmarks", ] try-runtime = ["frame-support/try-runtime"] diff --git a/frame/transaction-payment/asset-tx-payment/src/tests.rs b/frame/transaction-payment/asset-tx-payment/src/tests.rs index 02e15654f3eed..b70a88d02c6e1 100644 --- a/frame/transaction-payment/asset-tx-payment/src/tests.rs +++ b/frame/transaction-payment/asset-tx-payment/src/tests.rs @@ -173,8 +173,9 @@ impl pallet_assets::Config for Runtime { type Extra = (); type WeightInfo = (); type RemoveItemsLimit = ConstU32<1000>; - #[cfg(feature = "runtime-benchmarks")] - type BenchmarkHelper = (); + pallet_assets::runtime_benchmarks_enabled! { + type BenchmarkHelper = (); + } } pub struct HardcodedAuthor; diff --git a/primitives/core/src/lib.rs b/primitives/core/src/lib.rs index fda7604d5337f..30d0cc199b74d 100644 --- a/primitives/core/src/lib.rs +++ b/primitives/core/src/lib.rs @@ -622,3 +622,49 @@ macro_rules! bounded_btree_map { } }; } + +/// Generates a macro for checking if a certain feature is enabled. +/// +/// These feature checking macros can be used to conditionally enable/disable code in a dependent +/// crate based on a feature in the crate where the macro is called. +#[macro_export] +// We need to skip formatting this macro because of this bug: +// https://github.com/rust-lang/rustfmt/issues/5283 +#[rustfmt::skip] +macro_rules! generate_feature_enabled_macro { + ( $macro_name:ident, $feature_name:meta, $d:tt ) => { + /// Enable/disable the given code depending on + #[doc = concat!("`", stringify!($feature_name), "`")] + /// being enabled for the crate or not. + /// + /// # Example + /// + /// ```nocompile + /// // Will add the code depending on the feature being enabled or not. + #[doc = concat!(stringify!($macro_name), "!( println!(\"Hello\") )")] + /// ``` + #[cfg($feature_name)] + #[macro_export] + macro_rules! $macro_name { + ( $d ( $d input:tt )* ) => { + $d ( $d input )* + } + } + + /// Enable/disable the given code depending on + #[doc = concat!("`", stringify!($feature_name), "`")] + /// being enabled for the crate or not. + /// + /// # Example + /// + /// ```nocompile + /// // Will add the code depending on the feature being enabled or not. + #[doc = concat!(stringify!($macro_name), "!( println!(\"Hello\") )")] + /// ``` + #[cfg(not($feature_name))] + #[macro_export] + macro_rules! $macro_name { + ( $d ( $d input:tt )* ) => {}; + } + }; +} diff --git a/primitives/staking/Cargo.toml b/primitives/staking/Cargo.toml index 550c1485e992c..35feae43ebb8c 100644 --- a/primitives/staking/Cargo.toml +++ b/primitives/staking/Cargo.toml @@ -15,6 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } +sp-core = { version = "7.0.0", default-features = false, path = "../core" } sp-runtime = { version = "7.0.0", default-features = false, path = "../runtime" } sp-std = { version = "5.0.0", default-features = false, path = "../std" } @@ -23,6 +24,7 @@ default = ["std"] std = [ "codec/std", "scale-info/std", + "sp-core/std", "sp-runtime/std", "sp-std/std", ] diff --git a/primitives/staking/src/lib.rs b/primitives/staking/src/lib.rs index 703f0abe80458..9eb4a4890cdf8 100644 --- a/primitives/staking/src/lib.rs +++ b/primitives/staking/src/lib.rs @@ -190,3 +190,5 @@ pub trait StakingInterface { #[cfg(feature = "runtime-benchmarks")] fn set_current_era(era: EraIndex); } + +sp_core::generate_feature_enabled_macro!(runtime_benchmarks_enabled, feature = "runtime-benchmarks", $); diff --git a/utils/wasm-builder/src/wasm_project.rs b/utils/wasm-builder/src/wasm_project.rs index 7688069dd7cca..d17997360deef 100644 --- a/utils/wasm-builder/src/wasm_project.rs +++ b/utils/wasm-builder/src/wasm_project.rs @@ -379,14 +379,15 @@ fn find_package_by_manifest_path<'a>( if let Some(pkg) = crate_metadata.packages.iter().find(|p| p.manifest_path == manifest_path) { return pkg } + let pkgs_by_name = crate_metadata .packages .iter() .filter(|p| p.name == pkg_name) .collect::>(); - let mut pkgs = pkgs_by_name.iter(); - if let Some(pkg) = pkgs.next() { - if pkgs.next().is_some() { + + if let Some(pkg) = pkgs_by_name.first() { + if pkgs_by_name.len() > 1 { panic!( "Found multiple packages matching the name {pkg_name} ({manifest_path:?}): {:?}", pkgs_by_name @@ -395,7 +396,7 @@ fn find_package_by_manifest_path<'a>( return pkg } } else { - panic!("Failed to find entry for package {pkg_name} ({manifest_path:?})"); + panic!("Failed to find entry for package {pkg_name} ({manifest_path:?})."); } } From b65c9f044804b6f401197b2567e1ab10b4776159 Mon Sep 17 00:00:00 2001 From: Alexander Popiak Date: Wed, 14 Dec 2022 10:59:04 +0100 Subject: [PATCH 24/29] add numerator and denominator to Rational128 Debug impl and increase precision of float representation (#12914) --- primitives/arithmetic/src/rational.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/primitives/arithmetic/src/rational.rs b/primitives/arithmetic/src/rational.rs index 54cabfc6214e8..447b37551bb1f 100644 --- a/primitives/arithmetic/src/rational.rs +++ b/primitives/arithmetic/src/rational.rs @@ -94,14 +94,14 @@ pub struct Rational128(u128, u128); #[cfg(feature = "std")] impl sp_std::fmt::Debug for Rational128 { fn fmt(&self, f: &mut sp_std::fmt::Formatter<'_>) -> sp_std::fmt::Result { - write!(f, "Rational128({:.4})", self.0 as f32 / self.1 as f32) + write!(f, "Rational128({} / {} ≈ {:.8})", self.0, self.1, self.0 as f64 / self.1 as f64) } } #[cfg(not(feature = "std"))] impl sp_std::fmt::Debug for Rational128 { fn fmt(&self, f: &mut sp_std::fmt::Formatter<'_>) -> sp_std::fmt::Result { - write!(f, "Rational128(..)") + write!(f, "Rational128({} / {})", self.0, self.1) } } From 59b590300c368f285e885a5304d231dd994b6020 Mon Sep 17 00:00:00 2001 From: Arkadiy Paronyan Date: Wed, 14 Dec 2022 12:03:16 +0100 Subject: [PATCH 25/29] Fix state-db pinning (#12927) * Pin all canonicalized blocks * Added a test * Docs --- client/db/src/lib.rs | 1 + client/state-db/src/lib.rs | 10 +++++ client/state-db/src/noncanonical.rs | 65 +++++++++++++++++++++-------- 3 files changed, 58 insertions(+), 18 deletions(-) diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index 426876f5cba8c..4452d5dcb3f29 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -2002,6 +2002,7 @@ impl sc_client_api::backend::Backend for Backend { .map_err(sp_blockchain::Error::from_state_db)?; Err(e) } else { + self.storage.state_db.sync(); Ok(()) } } diff --git a/client/state-db/src/lib.rs b/client/state-db/src/lib.rs index 94d41787701b3..5e01a0e063ac1 100644 --- a/client/state-db/src/lib.rs +++ b/client/state-db/src/lib.rs @@ -470,6 +470,10 @@ impl StateDbSync { } } + fn sync(&mut self) { + self.non_canonical.sync(); + } + pub fn get( &self, key: &Q, @@ -573,6 +577,12 @@ impl StateDb { self.db.write().unpin(hash) } + /// Confirm that all changes made to commit sets are on disk. Allows for temporarily pinned + /// blocks to be released. + pub fn sync(&self) { + self.db.write().sync() + } + /// Get a value from non-canonical/pruning overlay or the backing DB. pub fn get( &self, diff --git a/client/state-db/src/noncanonical.rs b/client/state-db/src/noncanonical.rs index 7cb3017966b0f..84ba94c052909 100644 --- a/client/state-db/src/noncanonical.rs +++ b/client/state-db/src/noncanonical.rs @@ -38,7 +38,7 @@ pub struct NonCanonicalOverlay { // would be deleted but kept around because block is pinned, ref counted. pinned: HashMap, pinned_insertions: HashMap, u32)>, - last_canon_pinned: Option, + pinned_canonincalized: Vec, } #[cfg_attr(test, derive(PartialEq, Debug))] @@ -226,7 +226,7 @@ impl NonCanonicalOverlay { pinned: Default::default(), pinned_insertions: Default::default(), values, - last_canon_pinned: None, + pinned_canonincalized: Default::default(), }) } @@ -350,6 +350,18 @@ impl NonCanonicalOverlay { self.last_canonicalized.as_ref().map(|&(_, n)| n) } + /// Confirm that all changes made to commit sets are on disk. Allows for temporarily pinned + /// blocks to be released. + pub fn sync(&mut self) { + let mut pinned = std::mem::take(&mut self.pinned_canonincalized); + for hash in pinned.iter() { + self.unpin(hash) + } + pinned.clear(); + // Reuse the same memory buffer + self.pinned_canonincalized = pinned; + } + /// Select a top-level root and canonicalized it. Discards all sibling subtrees and the root. /// Add a set of changes of the canonicalized block to `CommitSet` /// Return the block number of the canonicalized block @@ -371,13 +383,9 @@ impl NonCanonicalOverlay { // No failures are possible beyond this point. - // Unpin previously canonicalized block - if let Some(prev_hash) = self.last_canon_pinned.take() { - self.unpin(&prev_hash); - } // Force pin canonicalized block so that it is no discarded immediately self.pin(hash); - self.last_canon_pinned = Some(hash.clone()); + self.pinned_canonincalized.push(hash.clone()); let mut discarded_journals = Vec::new(); let mut discarded_blocks = Vec::new(); @@ -720,16 +728,17 @@ mod tests { let mut commit = CommitSet::default(); overlay.canonicalize(&h1, &mut commit).unwrap(); db.commit(&commit); - assert!(contains(&overlay, 5)); + overlay.sync(); + assert!(!contains(&overlay, 5)); assert!(contains(&overlay, 7)); assert_eq!(overlay.levels.len(), 1); - assert_eq!(overlay.parents.len(), 2); + assert_eq!(overlay.parents.len(), 1); let mut commit = CommitSet::default(); overlay.canonicalize(&h2, &mut commit).unwrap(); - assert!(!contains(&overlay, 5)); db.commit(&commit); + overlay.sync(); assert_eq!(overlay.levels.len(), 0); - assert_eq!(overlay.parents.len(), 1); + assert_eq!(overlay.parents.len(), 0); assert!(db.data_eq(&make_db(&[1, 4, 6, 7, 8]))); } @@ -746,8 +755,7 @@ mod tests { let mut commit = CommitSet::default(); overlay.canonicalize(&h_1, &mut commit).unwrap(); db.commit(&commit); - // explicitly unpin last block - overlay.unpin(&h_1); + overlay.sync(); assert!(!contains(&overlay, 1)); } @@ -834,9 +842,8 @@ mod tests { // canonicalize 1. 2 and all its children should be discarded let mut commit = CommitSet::default(); overlay.canonicalize(&h_1, &mut commit).unwrap(); - // explicitly unpin last block - overlay.unpin(&h_1); db.commit(&commit); + overlay.sync(); assert_eq!(overlay.levels.len(), 2); assert_eq!(overlay.parents.len(), 6); assert!(!contains(&overlay, 1)); @@ -856,8 +863,8 @@ mod tests { // canonicalize 1_2. 1_1 and all its children should be discarded let mut commit = CommitSet::default(); overlay.canonicalize(&h_1_2, &mut commit).unwrap(); - overlay.unpin(&h_1_2); db.commit(&commit); + overlay.sync(); assert_eq!(overlay.levels.len(), 1); assert_eq!(overlay.parents.len(), 3); assert!(!contains(&overlay, 11)); @@ -873,8 +880,8 @@ mod tests { // canonicalize 1_2_2 let mut commit = CommitSet::default(); overlay.canonicalize(&h_1_2_2, &mut commit).unwrap(); - overlay.unpin(&h_1_2_2); db.commit(&commit); + overlay.sync(); assert_eq!(overlay.levels.len(), 0); assert_eq!(overlay.parents.len(), 0); assert!(db.data_eq(&make_db(&[1, 12, 122]))); @@ -958,6 +965,28 @@ mod tests { assert!(!contains(&overlay, 1)); } + #[test] + fn pins_canonicalized() { + let mut db = make_db(&[]); + + let (h_1, c_1) = (H256::random(), make_changeset(&[1], &[])); + let (h_2, c_2) = (H256::random(), make_changeset(&[2], &[])); + + let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); + db.commit(&overlay.insert(&h_1, 1, &H256::default(), c_1).unwrap()); + db.commit(&overlay.insert(&h_2, 2, &h_1, c_2).unwrap()); + + let mut commit = CommitSet::default(); + overlay.canonicalize(&h_1, &mut commit).unwrap(); + overlay.canonicalize(&h_2, &mut commit).unwrap(); + assert!(contains(&overlay, 1)); + assert!(contains(&overlay, 2)); + db.commit(&commit); + overlay.sync(); + assert!(!contains(&overlay, 1)); + assert!(!contains(&overlay, 2)); + } + #[test] fn pin_keeps_parent() { let mut db = make_db(&[]); @@ -1019,8 +1048,8 @@ mod tests { let mut commit = CommitSet::default(); overlay.canonicalize(&h21, &mut commit).unwrap(); // h11 should stay in the DB - overlay.unpin(&h21); db.commit(&commit); + overlay.sync(); assert!(!contains(&overlay, 21)); } From 2e21c35f879e904101d8305eef7a203d95dd6cd6 Mon Sep 17 00:00:00 2001 From: Alexander Samusev <41779041+alvicsam@users.noreply.github.com> Date: Wed, 14 Dec 2022 12:36:33 +0100 Subject: [PATCH 26/29] [ci] add job switcher (#12922) --- .gitlab-ci.yml | 9 ++++++++- scripts/ci/gitlab/pipeline/build.yml | 3 +++ scripts/ci/gitlab/pipeline/publish.yml | 1 + scripts/ci/gitlab/pipeline/test.yml | 2 ++ 4 files changed, 14 insertions(+), 1 deletion(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 25d61cf349615..dcc0cbb7c9693 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -81,8 +81,14 @@ default: paths: - artifacts/ +.job-switcher: + before_script: + - if echo "$CI_DISABLED_JOBS" | grep -xF "$CI_JOB_NAME"; then echo "The job has been cancelled in CI settings"; exit 0; fi + .kubernetes-env: image: "${CI_IMAGE}" + before_script: + - !reference [.job-switcher, before_script] tags: - kubernetes-parity-build @@ -95,6 +101,7 @@ default: .pipeline-stopper-vars: script: + - !reference [.job-switcher, before_script] - echo "Collecting env variables for the cancel-pipeline job" - echo "FAILED_JOB_URL=${CI_JOB_URL}" > pipeline-stopper.env - echo "FAILED_JOB_NAME=${CI_JOB_NAME}" >> pipeline-stopper.env @@ -110,6 +117,7 @@ default: before_script: # TODO: remove unset invocation when we'll be free from 'ENV RUSTC_WRAPPER=sccache' & sccache itself in all images - unset RUSTC_WRAPPER + - !reference [.job-switcher, before_script] - !reference [.rust-info-script, script] - !reference [.rusty-cachier, before_script] - !reference [.pipeline-stopper-vars, script] @@ -300,7 +308,6 @@ rusty-cachier-notify: PR_NUM: "${PR_NUM}" trigger: project: "parity/infrastructure/ci_cd/pipeline-stopper" - branch: "as-improve" remove-cancel-pipeline-message: stage: .post diff --git a/scripts/ci/gitlab/pipeline/build.yml b/scripts/ci/gitlab/pipeline/build.yml index 2f8cff7b3ffa6..ba529569d0fc1 100644 --- a/scripts/ci/gitlab/pipeline/build.yml +++ b/scripts/ci/gitlab/pipeline/build.yml @@ -62,6 +62,7 @@ build-linux-substrate: - job: test-linux-stable artifacts: false before_script: + - !reference [.job-switcher, before_script] - mkdir -p ./artifacts/substrate/ - !reference [.rusty-cachier, before_script] # tldr: we need to checkout the branch HEAD explicitly because of our dynamic versioning approach while building the substrate binary @@ -94,6 +95,7 @@ build-linux-substrate: # this variable gets overriden by "rusty-cachier environment inject", use the value as default CARGO_TARGET_DIR: "$CI_PROJECT_DIR/target" before_script: + - !reference [.job-switcher, before_script] - mkdir -p ./artifacts/subkey - !reference [.rusty-cachier, before_script] script: @@ -118,6 +120,7 @@ build-subkey-macos: # duplicating before_script & script sections from .build-subkey hidden job # to overwrite rusty-cachier integration as it doesn't work on macos before_script: + - !reference [.job-switcher, before_script] - mkdir -p ./artifacts/subkey script: - cd ./bin/utils/subkey diff --git a/scripts/ci/gitlab/pipeline/publish.yml b/scripts/ci/gitlab/pipeline/publish.yml index 381a1bc420ef3..6a0d6d6341304 100644 --- a/scripts/ci/gitlab/pipeline/publish.yml +++ b/scripts/ci/gitlab/pipeline/publish.yml @@ -12,6 +12,7 @@ DOCKERFILE: $PRODUCT.Dockerfile IMAGE_NAME: docker.io/$IMAGE_PATH before_script: + - !reference [.job-switcher, before_script] - cd ./artifacts/$PRODUCT/ - VERSION="$(cat ./VERSION)" - echo "${PRODUCT} version = ${VERSION}" diff --git a/scripts/ci/gitlab/pipeline/test.yml b/scripts/ci/gitlab/pipeline/test.yml index ea1240af8bd57..a468a7b04caeb 100644 --- a/scripts/ci/gitlab/pipeline/test.yml +++ b/scripts/ci/gitlab/pipeline/test.yml @@ -65,6 +65,7 @@ cargo-check-benches: before_script: # perform rusty-cachier operations before any further modifications to the git repo to make cargo feel cheated not so much - !reference [.rust-info-script, script] + - !reference [.job-switcher, before_script] - !reference [.rusty-cachier, before_script] - !reference [.pipeline-stopper-vars, script] # merges in the master branch on PRs @@ -414,6 +415,7 @@ cargo-check-each-crate-macos: - .collect-artifacts - .pipeline-stopper-artifacts before_script: + - !reference [.job-switcher, before_script] - !reference [.rust-info-script, script] - !reference [.pipeline-stopper-vars, script] variables: From 2f6105bbc6a230210d022e48c568404acd501a81 Mon Sep 17 00:00:00 2001 From: Davide Galassi Date: Wed, 14 Dec 2022 14:56:17 +0100 Subject: [PATCH 27/29] Use LOG_TARGET in consensus related crates (#12875) * Use shared LOG_TARGET in consensus related crates * Rename target from "afg" to "grandpa" --- client/consensus/aura/src/import_queue.rs | 7 +- client/consensus/aura/src/lib.rs | 8 +- client/consensus/babe/src/aux_schema.rs | 4 +- client/consensus/babe/src/lib.rs | 86 +++++++++++-------- client/consensus/babe/src/tests.rs | 10 +-- client/consensus/babe/src/verification.rs | 15 ++-- client/consensus/common/src/import_queue.rs | 37 +++++--- .../common/src/import_queue/basic_queue.rs | 25 +++--- .../manual-seal/src/consensus/babe.rs | 6 +- client/consensus/manual-seal/src/lib.rs | 2 + client/consensus/pow/src/lib.rs | 20 +++-- client/consensus/pow/src/worker.rs | 34 ++------ client/consensus/slots/src/lib.rs | 16 ++-- client/consensus/slots/src/slots.rs | 4 +- client/finality-grandpa/src/authorities.rs | 20 +++-- client/finality-grandpa/src/aux_schema.rs | 13 +-- .../src/communication/gossip.rs | 47 ++++++---- .../finality-grandpa/src/communication/mod.rs | 39 ++++++--- .../src/communication/periodic.rs | 8 +- client/finality-grandpa/src/environment.rs | 80 ++++++++++++----- client/finality-grandpa/src/finality_proof.rs | 10 +-- client/finality-grandpa/src/import.rs | 17 ++-- client/finality-grandpa/src/lib.rs | 25 ++++-- client/finality-grandpa/src/observer.rs | 6 +- client/finality-grandpa/src/until_imported.rs | 6 +- frame/aura/src/lib.rs | 4 +- frame/babe/src/equivocation.rs | 16 ++-- frame/babe/src/lib.rs | 2 + frame/grandpa/src/equivocation.rs | 16 ++-- frame/grandpa/src/lib.rs | 2 + frame/grandpa/src/migrations/v4.rs | 5 +- 31 files changed, 343 insertions(+), 247 deletions(-) diff --git a/client/consensus/aura/src/import_queue.rs b/client/consensus/aura/src/import_queue.rs index 07f982542c95b..d5cf40f33359e 100644 --- a/client/consensus/aura/src/import_queue.rs +++ b/client/consensus/aura/src/import_queue.rs @@ -20,6 +20,7 @@ use crate::{ aura_err, authorities, find_pre_digest, slot_author, AuthorityId, CompatibilityMode, Error, + LOG_TARGET, }; use codec::{Codec, Decode, Encode}; use log::{debug, info, trace}; @@ -88,7 +89,7 @@ where .map_err(Error::Client)? { info!( - target: "aura", + target: LOG_TARGET, "Slot author is equivocating at slot {} with headers {:?} and {:?}", slot, equivocation_proof.first_header.hash(), @@ -256,7 +257,7 @@ where block.body = Some(inner_body); } - trace!(target: "aura", "Checked {:?}; importing.", pre_header); + trace!(target: LOG_TARGET, "Checked {:?}; importing.", pre_header); telemetry!( self.telemetry; CONSENSUS_TRACE; @@ -272,7 +273,7 @@ where Ok((block, None)) }, CheckedHeader::Deferred(a, b) => { - debug!(target: "aura", "Checking {:?} failed; {:?}, {:?}.", hash, a, b); + debug!(target: LOG_TARGET, "Checking {:?} failed; {:?}, {:?}.", hash, a, b); telemetry!( self.telemetry; CONSENSUS_DEBUG; diff --git a/client/consensus/aura/src/lib.rs b/client/consensus/aura/src/lib.rs index 46b9124f9077f..a8ed80d7c0432 100644 --- a/client/consensus/aura/src/lib.rs +++ b/client/consensus/aura/src/lib.rs @@ -72,6 +72,8 @@ pub use sp_consensus_aura::{ AuraApi, ConsensusLog, SlotDuration, AURA_ENGINE_ID, }; +const LOG_TARGET: &str = "aura"; + type AuthorityId

=

::Public; /// Run `AURA` in a compatibility mode. @@ -530,7 +532,7 @@ where } fn aura_err(error: Error) -> Error { - debug!(target: "aura", "{}", error); + debug!(target: LOG_TARGET, "{}", error); error } @@ -580,10 +582,10 @@ pub fn find_pre_digest(header: &B::Header) -> Resul let mut pre_digest: Option = None; for log in header.digest().logs() { - trace!(target: "aura", "Checking log {:?}", log); + trace!(target: LOG_TARGET, "Checking log {:?}", log); match (CompatibleDigestItem::::as_aura_pre_digest(log), pre_digest.is_some()) { (Some(_), true) => return Err(aura_err(Error::MultipleHeaders)), - (None, _) => trace!(target: "aura", "Ignoring digest not meant for us"), + (None, _) => trace!(target: LOG_TARGET, "Ignoring digest not meant for us"), (s, false) => pre_digest = s, } } diff --git a/client/consensus/babe/src/aux_schema.rs b/client/consensus/babe/src/aux_schema.rs index fef84bda86974..2a09aa738f4ec 100644 --- a/client/consensus/babe/src/aux_schema.rs +++ b/client/consensus/babe/src/aux_schema.rs @@ -21,7 +21,7 @@ use codec::{Decode, Encode}; use log::info; -use crate::{migration::EpochV0, Epoch}; +use crate::{migration::EpochV0, Epoch, LOG_TARGET}; use sc_client_api::backend::AuxStore; use sc_consensus_epochs::{ migration::{EpochChangesV0For, EpochChangesV1For}, @@ -82,7 +82,7 @@ pub fn load_epoch_changes( let epoch_changes = SharedEpochChanges::::new(maybe_epoch_changes.unwrap_or_else(|| { info!( - target: "babe", + target: LOG_TARGET, "👶 Creating empty BABE epoch changes on what appears to be first startup.", ); EpochChangesFor::::default() diff --git a/client/consensus/babe/src/lib.rs b/client/consensus/babe/src/lib.rs index 84b6893648f49..b50874ae3401d 100644 --- a/client/consensus/babe/src/lib.rs +++ b/client/consensus/babe/src/lib.rs @@ -149,6 +149,8 @@ pub mod aux_schema; #[cfg(test)] mod tests; +const LOG_TARGET: &str = "babe"; + /// BABE epoch information #[derive(Decode, Encode, PartialEq, Eq, Clone, Debug)] pub struct Epoch { @@ -323,7 +325,7 @@ impl From> for String { } fn babe_err(error: Error) -> Error { - debug!(target: "babe", "{}", error); + debug!(target: LOG_TARGET, "{}", error); error } @@ -345,7 +347,7 @@ where let block_id = if client.usage_info().chain.finalized_state.is_some() { BlockId::Hash(client.usage_info().chain.best_hash) } else { - debug!(target: "babe", "No finalized state is available. Reading config from genesis"); + debug!(target: LOG_TARGET, "No finalized state is available. Reading config from genesis"); BlockId::Hash(client.usage_info().chain.genesis_hash) }; @@ -486,7 +488,7 @@ where telemetry, }; - info!(target: "babe", "👶 Starting BABE Authorship worker"); + info!(target: LOG_TARGET, "👶 Starting BABE Authorship worker"); let slot_worker = sc_consensus_slots::start_slot_worker( babe_link.config.slot_duration(), @@ -523,12 +525,8 @@ fn aux_storage_cleanup + HeaderBackend, Block: B Ok(meta) => { hashes.insert(meta.parent); }, - Err(err) => warn!( - target: "babe", - "Failed to lookup metadata for block `{:?}`: {}", - first, - err, - ), + Err(err) => + warn!(target: LOG_TARGET, "Failed to lookup metadata for block `{:?}`: {}", first, err,), } // Cleans data for finalized block's ancestors @@ -716,7 +714,7 @@ where type AuxData = ViableEpochDescriptor, Epoch>; fn logging_target(&self) -> &'static str { - "babe" + LOG_TARGET } fn block_import(&mut self) -> &mut Self::BlockImport { @@ -749,7 +747,7 @@ where slot: Slot, epoch_descriptor: &ViableEpochDescriptor, Epoch>, ) -> Option { - debug!(target: "babe", "Attempting to claim slot {}", slot); + debug!(target: LOG_TARGET, "Attempting to claim slot {}", slot); let s = authorship::claim_slot( slot, self.epoch_changes @@ -760,7 +758,7 @@ where ); if s.is_some() { - debug!(target: "babe", "Claimed slot {}", slot); + debug!(target: LOG_TARGET, "Claimed slot {}", slot); } s @@ -777,7 +775,7 @@ where Ok(()) => true, Err(e) => if e.is_full() { - warn!(target: "babe", "Trying to notify a slot but the channel is full"); + warn!(target: LOG_TARGET, "Trying to notify a slot but the channel is full"); true } else { false @@ -904,10 +902,10 @@ pub fn find_pre_digest(header: &B::Header) -> Result = None; for log in header.digest().logs() { - trace!(target: "babe", "Checking log {:?}, looking for pre runtime digest", log); + trace!(target: LOG_TARGET, "Checking log {:?}, looking for pre runtime digest", log); match (log.as_babe_pre_digest(), pre_digest.is_some()) { (Some(_), true) => return Err(babe_err(Error::MultiplePreRuntimeDigests)), - (None, _) => trace!(target: "babe", "Ignoring digest not meant for us"), + (None, _) => trace!(target: LOG_TARGET, "Ignoring digest not meant for us"), (s, false) => pre_digest = s, } } @@ -920,13 +918,13 @@ fn find_next_epoch_digest( ) -> Result, Error> { let mut epoch_digest: Option<_> = None; for log in header.digest().logs() { - trace!(target: "babe", "Checking log {:?}, looking for epoch change digest.", log); + trace!(target: LOG_TARGET, "Checking log {:?}, looking for epoch change digest.", log); let log = log.try_to::(OpaqueDigestItemId::Consensus(&BABE_ENGINE_ID)); match (log, epoch_digest.is_some()) { (Some(ConsensusLog::NextEpochData(_)), true) => return Err(babe_err(Error::MultipleEpochChangeDigests)), (Some(ConsensusLog::NextEpochData(epoch)), false) => epoch_digest = Some(epoch), - _ => trace!(target: "babe", "Ignoring digest not meant for us"), + _ => trace!(target: LOG_TARGET, "Ignoring digest not meant for us"), } } @@ -939,13 +937,13 @@ fn find_next_config_digest( ) -> Result, Error> { let mut config_digest: Option<_> = None; for log in header.digest().logs() { - trace!(target: "babe", "Checking log {:?}, looking for epoch change digest.", log); + trace!(target: LOG_TARGET, "Checking log {:?}, looking for epoch change digest.", log); let log = log.try_to::(OpaqueDigestItemId::Consensus(&BABE_ENGINE_ID)); match (log, config_digest.is_some()) { (Some(ConsensusLog::NextConfigData(_)), true) => return Err(babe_err(Error::MultipleConfigChangeDigests)), (Some(ConsensusLog::NextConfigData(config)), false) => config_digest = Some(config), - _ => trace!(target: "babe", "Ignoring digest not meant for us"), + _ => trace!(target: LOG_TARGET, "Ignoring digest not meant for us"), } } @@ -1075,7 +1073,10 @@ where None => match generate_key_owner_proof(&best_id)? { Some(proof) => proof, None => { - debug!(target: "babe", "Equivocation offender is not part of the authority set."); + debug!( + target: LOG_TARGET, + "Equivocation offender is not part of the authority set." + ); return Ok(()) }, }, @@ -1091,7 +1092,7 @@ where ) .map_err(Error::RuntimeApi)?; - info!(target: "babe", "Submitted equivocation report for author {:?}", author); + info!(target: LOG_TARGET, "Submitted equivocation report for author {:?}", author); Ok(()) } @@ -1121,7 +1122,7 @@ where mut block: BlockImportParams, ) -> BlockVerificationResult { trace!( - target: "babe", + target: LOG_TARGET, "Verifying origin: {:?} header: {:?} justification(s): {:?} body: {:?}", block.origin, block.header, @@ -1140,7 +1141,11 @@ where return Ok((block, Default::default())) } - debug!(target: "babe", "We have {:?} logs in this header", block.header.digest().logs().len()); + debug!( + target: LOG_TARGET, + "We have {:?} logs in this header", + block.header.digest().logs().len() + ); let create_inherent_data_providers = self .create_inherent_data_providers @@ -1204,7 +1209,10 @@ where ) .await { - warn!(target: "babe", "Error checking/reporting BABE equivocation: {}", err); + warn!( + target: LOG_TARGET, + "Error checking/reporting BABE equivocation: {}", err + ); } if let Some(inner_body) = block.body { @@ -1233,7 +1241,7 @@ where block.body = Some(inner_body); } - trace!(target: "babe", "Checked {:?}; importing.", pre_header); + trace!(target: LOG_TARGET, "Checked {:?}; importing.", pre_header); telemetry!( self.telemetry; CONSENSUS_TRACE; @@ -1252,7 +1260,7 @@ where Ok((block, Default::default())) }, CheckedHeader::Deferred(a, b) => { - debug!(target: "babe", "Checking {:?} failed; {:?}, {:?}.", hash, a, b); + debug!(target: LOG_TARGET, "Checking {:?} failed; {:?}, {:?}.", hash, a, b); telemetry!( self.telemetry; CONSENSUS_DEBUG; @@ -1520,21 +1528,23 @@ where log::Level::Info }; - log!(target: "babe", - log_level, - "👶 New epoch {} launching at block {} (block slot {} >= start slot {}).", - viable_epoch.as_ref().epoch_index, - hash, - slot, - viable_epoch.as_ref().start_slot, + log!( + target: LOG_TARGET, + log_level, + "👶 New epoch {} launching at block {} (block slot {} >= start slot {}).", + viable_epoch.as_ref().epoch_index, + hash, + slot, + viable_epoch.as_ref().start_slot, ); let next_epoch = viable_epoch.increment((next_epoch_descriptor, epoch_config)); - log!(target: "babe", - log_level, - "👶 Next epoch starts at slot {}", - next_epoch.as_ref().start_slot, + log!( + target: LOG_TARGET, + log_level, + "👶 Next epoch starts at slot {}", + next_epoch.as_ref().start_slot, ); // prune the tree of epochs not part of the finalized chain or @@ -1565,7 +1575,7 @@ where }; if let Err(e) = prune_and_import() { - debug!(target: "babe", "Failed to launch next epoch: {}", e); + debug!(target: LOG_TARGET, "Failed to launch next epoch: {}", e); *epoch_changes = old_epoch_changes.expect("set `Some` above and not taken; qed"); return Err(e) diff --git a/client/consensus/babe/src/tests.rs b/client/consensus/babe/src/tests.rs index 7f51eb2c51977..d7691235a550d 100644 --- a/client/consensus/babe/src/tests.rs +++ b/client/consensus/babe/src/tests.rs @@ -323,7 +323,7 @@ impl TestNetFactory for BabeTestNet { use substrate_test_runtime_client::DefaultTestClientBuilderExt; let client = client.as_client(); - trace!(target: "babe", "Creating a verifier"); + trace!(target: LOG_TARGET, "Creating a verifier"); // ensure block import and verifier are linked correctly. let data = maybe_link @@ -352,12 +352,12 @@ impl TestNetFactory for BabeTestNet { } fn peer(&mut self, i: usize) -> &mut BabePeer { - trace!(target: "babe", "Retrieving a peer"); + trace!(target: LOG_TARGET, "Retrieving a peer"); &mut self.peers[i] } fn peers(&self) -> &Vec { - trace!(target: "babe", "Retrieving peers"); + trace!(target: LOG_TARGET, "Retrieving peers"); &self.peers } @@ -583,7 +583,7 @@ fn can_author_block() { // with secondary slots enabled it should never be empty match claim_slot(i.into(), &epoch, &keystore) { None => i += 1, - Some(s) => debug!(target: "babe", "Authored block {:?}", s.0), + Some(s) => debug!(target: LOG_TARGET, "Authored block {:?}", s.0), } // otherwise with only vrf-based primary slots we might need to try a couple @@ -593,7 +593,7 @@ fn can_author_block() { match claim_slot(i.into(), &epoch, &keystore) { None => i += 1, Some(s) => { - debug!(target: "babe", "Authored block {:?}", s.0); + debug!(target: LOG_TARGET, "Authored block {:?}", s.0); break }, } diff --git a/client/consensus/babe/src/verification.rs b/client/consensus/babe/src/verification.rs index 53ec3002e6a85..e77a70c8e465a 100644 --- a/client/consensus/babe/src/verification.rs +++ b/client/consensus/babe/src/verification.rs @@ -17,9 +17,9 @@ // along with this program. If not, see . //! Verification for BABE headers. -use super::{ +use crate::{ authorship::{calculate_primary_threshold, check_primary_threshold, secondary_slot_author}, - babe_err, find_pre_digest, BlockT, Epoch, Error, + babe_err, find_pre_digest, BlockT, Epoch, Error, LOG_TARGET, }; use log::{debug, trace}; use sc_consensus_slots::CheckedHeader; @@ -67,7 +67,7 @@ pub(super) fn check_header( let authorities = &epoch.authorities; let pre_digest = pre_digest.map(Ok).unwrap_or_else(|| find_pre_digest::(&header))?; - trace!(target: "babe", "Checking header"); + trace!(target: LOG_TARGET, "Checking header"); let seal = header .digest_mut() .pop() @@ -93,7 +93,8 @@ pub(super) fn check_header( match &pre_digest { PreDigest::Primary(primary) => { - debug!(target: "babe", + debug!( + target: LOG_TARGET, "Verifying primary block #{} at slot: {}", header.number(), primary.slot, @@ -104,7 +105,8 @@ pub(super) fn check_header( PreDigest::SecondaryPlain(secondary) if epoch.config.allowed_slots.is_secondary_plain_slots_allowed() => { - debug!(target: "babe", + debug!( + target: LOG_TARGET, "Verifying secondary plain block #{} at slot: {}", header.number(), secondary.slot, @@ -115,7 +117,8 @@ pub(super) fn check_header( PreDigest::SecondaryVRF(secondary) if epoch.config.allowed_slots.is_secondary_vrf_slots_allowed() => { - debug!(target: "babe", + debug!( + target: LOG_TARGET, "Verifying secondary VRF block #{} at slot: {}", header.number(), secondary.slot, diff --git a/client/consensus/common/src/import_queue.rs b/client/consensus/common/src/import_queue.rs index d49b240ef3489..02309cc6a365e 100644 --- a/client/consensus/common/src/import_queue.rs +++ b/client/consensus/common/src/import_queue.rs @@ -45,6 +45,8 @@ use crate::{ pub use basic_queue::BasicQueue; use sp_consensus::{error::Error as ConsensusError, BlockOrigin, CacheKeyId}; +const LOG_TARGET: &str = "sync::import-queue"; + /// A commonly-used Import Queue type. /// /// This defines the transaction type of the `BasicQueue` to be the transaction type for a client. @@ -247,15 +249,15 @@ pub(crate) async fn import_single_block_metered< (Some(header), justifications) => (header, justifications), (None, _) => { if let Some(ref peer) = peer { - debug!(target: "sync", "Header {} was not provided by {} ", block.hash, peer); + debug!(target: LOG_TARGET, "Header {} was not provided by {} ", block.hash, peer); } else { - debug!(target: "sync", "Header {} was not provided ", block.hash); + debug!(target: LOG_TARGET, "Header {} was not provided ", block.hash); } return Err(BlockImportError::IncompleteHeader(peer)) }, }; - trace!(target: "sync", "Header {} has {:?} logs", block.hash, header.digest().logs().len()); + trace!(target: LOG_TARGET, "Header {} has {:?} logs", block.hash, header.digest().logs().len()); let number = *header.number(); let hash = block.hash; @@ -263,27 +265,31 @@ pub(crate) async fn import_single_block_metered< let import_handler = |import| match import { Ok(ImportResult::AlreadyInChain) => { - trace!(target: "sync", "Block already in chain {}: {:?}", number, hash); + trace!(target: LOG_TARGET, "Block already in chain {}: {:?}", number, hash); Ok(BlockImportStatus::ImportedKnown(number, peer)) }, Ok(ImportResult::Imported(aux)) => Ok(BlockImportStatus::ImportedUnknown(number, aux, peer)), Ok(ImportResult::MissingState) => { - debug!(target: "sync", "Parent state is missing for {}: {:?}, parent: {:?}", - number, hash, parent_hash); + debug!( + target: LOG_TARGET, + "Parent state is missing for {}: {:?}, parent: {:?}", number, hash, parent_hash + ); Err(BlockImportError::MissingState) }, Ok(ImportResult::UnknownParent) => { - debug!(target: "sync", "Block with unknown parent {}: {:?}, parent: {:?}", - number, hash, parent_hash); + debug!( + target: LOG_TARGET, + "Block with unknown parent {}: {:?}, parent: {:?}", number, hash, parent_hash + ); Err(BlockImportError::UnknownParent) }, Ok(ImportResult::KnownBad) => { - debug!(target: "sync", "Peer gave us a bad block {}: {:?}", number, hash); + debug!(target: LOG_TARGET, "Peer gave us a bad block {}: {:?}", number, hash); Err(BlockImportError::BadBlock(peer)) }, Err(e) => { - debug!(target: "sync", "Error importing block {}: {:?}: {}", number, hash, e); + debug!(target: LOG_TARGET, "Error importing block {}: {:?}: {}", number, hash, e); Err(BlockImportError::Other(e)) }, }; @@ -324,9 +330,16 @@ pub(crate) async fn import_single_block_metered< let (import_block, maybe_keys) = verifier.verify(import_block).await.map_err(|msg| { if let Some(ref peer) = peer { - trace!(target: "sync", "Verifying {}({}) from {} failed: {}", number, hash, peer, msg); + trace!( + target: LOG_TARGET, + "Verifying {}({}) from {} failed: {}", + number, + hash, + peer, + msg + ); } else { - trace!(target: "sync", "Verifying {}({}) failed: {}", number, hash, msg); + trace!(target: LOG_TARGET, "Verifying {}({}) failed: {}", number, hash, msg); } if let Some(metrics) = metrics.as_ref() { metrics.report_verification(false, started.elapsed()); diff --git a/client/consensus/common/src/import_queue/basic_queue.rs b/client/consensus/common/src/import_queue/basic_queue.rs index 20e8d262cacda..b63bc192b2e77 100644 --- a/client/consensus/common/src/import_queue/basic_queue.rs +++ b/client/consensus/common/src/import_queue/basic_queue.rs @@ -35,7 +35,7 @@ use crate::{ buffered_link::{self, BufferedLinkReceiver, BufferedLinkSender}, import_single_block_metered, BlockImportError, BlockImportStatus, BoxBlockImport, BoxJustificationImport, ImportQueue, ImportQueueService, IncomingBlock, Link, - RuntimeOrigin, Verifier, + RuntimeOrigin, Verifier, LOG_TARGET, }, metrics::Metrics, }; @@ -129,14 +129,14 @@ impl ImportQueueService for BasicQueueHandle { return } - trace!(target: "sync", "Scheduling {} blocks for import", blocks.len()); + trace!(target: LOG_TARGET, "Scheduling {} blocks for import", blocks.len()); let res = self .block_import_sender .unbounded_send(worker_messages::ImportBlocks(origin, blocks)); if res.is_err() { log::error!( - target: "sync", + target: LOG_TARGET, "import_blocks: Background import task is no longer alive" ); } @@ -156,7 +156,7 @@ impl ImportQueueService for BasicQueueHandle { if res.is_err() { log::error!( - target: "sync", + target: LOG_TARGET, "import_justification: Background import task is no longer alive" ); } @@ -179,7 +179,10 @@ impl ImportQueue for BasicQueue /// Poll actions from network. fn poll_actions(&mut self, cx: &mut Context, link: &mut dyn Link) { if self.result_port.poll_actions(cx, link).is_err() { - log::error!(target: "sync", "poll_actions: Background import task is no longer alive"); + log::error!( + target: LOG_TARGET, + "poll_actions: Background import task is no longer alive" + ); } } @@ -231,7 +234,7 @@ async fn block_import_process( Some(blocks) => blocks, None => { log::debug!( - target: "block-import", + target: LOG_TARGET, "Stopping block import because the import channel was closed!", ); return @@ -305,7 +308,7 @@ impl BlockImportWorker { // down and we should end this future. if worker.result_sender.is_closed() { log::debug!( - target: "block-import", + target: LOG_TARGET, "Stopping block import because result channel was closed!", ); return @@ -318,7 +321,7 @@ impl BlockImportWorker { worker.import_justification(who, hash, number, justification).await, None => { log::debug!( - target: "block-import", + target: LOG_TARGET, "Stopping block import because justification channel was closed!", ); return @@ -353,7 +356,7 @@ impl BlockImportWorker { .await .map_err(|e| { debug!( - target: "sync", + target: LOG_TARGET, "Justification import failed for hash = {:?} with number = {:?} coming from node = {:?} with error: {}", hash, number, @@ -407,7 +410,7 @@ async fn import_many_blocks, Transaction: Send + 'stat _ => Default::default(), }; - trace!(target: "sync", "Starting import of {} blocks {}", count, blocks_range); + trace!(target: LOG_TARGET, "Starting import of {} blocks {}", count, blocks_range); let mut imported = 0; let mut results = vec![]; @@ -447,7 +450,7 @@ async fn import_many_blocks, Transaction: Send + 'stat if import_result.is_ok() { trace!( - target: "sync", + target: LOG_TARGET, "Block imported successfully {:?} ({})", block_number, block_hash, diff --git a/client/consensus/manual-seal/src/consensus/babe.rs b/client/consensus/manual-seal/src/consensus/babe.rs index 206f5163a13cd..d2bea3a3a3656 100644 --- a/client/consensus/manual-seal/src/consensus/babe.rs +++ b/client/consensus/manual-seal/src/consensus/babe.rs @@ -20,7 +20,7 @@ //! that expect babe-specific digests. use super::ConsensusDataProvider; -use crate::Error; +use crate::{Error, LOG_TARGET}; use codec::Encode; use sc_client_api::{AuxStore, UsageProvider}; use sc_consensus_babe::{ @@ -179,7 +179,7 @@ where let epoch = epoch_changes .viable_epoch(&epoch_descriptor, |slot| Epoch::genesis(&self.config, slot)) .ok_or_else(|| { - log::info!(target: "babe", "create_digest: no viable_epoch :("); + log::info!(target: LOG_TARGET, "create_digest: no viable_epoch :("); sp_consensus::Error::InvalidAuthoritiesSet })?; @@ -290,7 +290,7 @@ where let has_authority = epoch.authorities.iter().any(|(id, _)| *id == *authority); if !has_authority { - log::info!(target: "manual-seal", "authority not found"); + log::info!(target: LOG_TARGET, "authority not found"); let timestamp = inherents .timestamp_inherent_data()? .ok_or_else(|| Error::StringError("No timestamp inherent data".into()))?; diff --git a/client/consensus/manual-seal/src/lib.rs b/client/consensus/manual-seal/src/lib.rs index 09ab139b91c73..700b94cf1d704 100644 --- a/client/consensus/manual-seal/src/lib.rs +++ b/client/consensus/manual-seal/src/lib.rs @@ -49,6 +49,8 @@ pub use self::{ use sc_transaction_pool_api::TransactionPool; use sp_api::{ProvideRuntimeApi, TransactionFor}; +const LOG_TARGET: &str = "manual-seal"; + /// The `ConsensusEngineId` of Manual Seal. pub const MANUAL_SEAL_ENGINE_ID: ConsensusEngineId = [b'm', b'a', b'n', b'l']; diff --git a/client/consensus/pow/src/lib.rs b/client/consensus/pow/src/lib.rs index ac7ce3b411333..ace00a34459af 100644 --- a/client/consensus/pow/src/lib.rs +++ b/client/consensus/pow/src/lib.rs @@ -67,6 +67,8 @@ use sp_runtime::{ }; use std::{cmp::Ordering, collections::HashMap, marker::PhantomData, sync::Arc, time::Duration}; +const LOG_TARGET: &str = "pow"; + #[derive(Debug, thiserror::Error)] pub enum Error { #[error("Header uses the wrong engine {0:?}")] @@ -531,7 +533,7 @@ where } if sync_oracle.is_major_syncing() { - debug!(target: "pow", "Skipping proposal due to sync."); + debug!(target: LOG_TARGET, "Skipping proposal due to sync."); worker.on_major_syncing(); continue } @@ -540,7 +542,7 @@ where Ok(x) => x, Err(err) => { warn!( - target: "pow", + target: LOG_TARGET, "Unable to pull new block for authoring. \ Select best chain error: {}", err @@ -561,7 +563,7 @@ where Ok(x) => x, Err(err) => { warn!( - target: "pow", + target: LOG_TARGET, "Unable to propose new block for authoring. \ Fetch difficulty failed: {}", err, @@ -577,7 +579,7 @@ where Ok(x) => x, Err(err) => { warn!( - target: "pow", + target: LOG_TARGET, "Unable to propose new block for authoring. \ Creating inherent data providers failed: {}", err, @@ -590,7 +592,7 @@ where Ok(r) => r, Err(e) => { warn!( - target: "pow", + target: LOG_TARGET, "Unable to propose new block for authoring. \ Creating inherent data failed: {}", e, @@ -610,7 +612,7 @@ where Ok(x) => x, Err(err) => { warn!( - target: "pow", + target: LOG_TARGET, "Unable to propose new block for authoring. \ Creating proposer failed: {:?}", err, @@ -624,7 +626,7 @@ where Ok(x) => x, Err(err) => { warn!( - target: "pow", + target: LOG_TARGET, "Unable to propose new block for authoring. \ Creating proposal failed: {}", err, @@ -654,14 +656,14 @@ where fn find_pre_digest(header: &B::Header) -> Result>, Error> { let mut pre_digest: Option<_> = None; for log in header.digest().logs() { - trace!(target: "pow", "Checking log {:?}, looking for pre runtime digest", log); + trace!(target: LOG_TARGET, "Checking log {:?}, looking for pre runtime digest", log); match (log, pre_digest.is_some()) { (DigestItem::PreRuntime(POW_ENGINE_ID, _), true) => return Err(Error::MultiplePreRuntimeDigests), (DigestItem::PreRuntime(POW_ENGINE_ID, v), false) => { pre_digest = Some(v.clone()); }, - (_, _) => trace!(target: "pow", "Ignoring digest not meant for us"), + (_, _) => trace!(target: LOG_TARGET, "Ignoring digest not meant for us"), } } diff --git a/client/consensus/pow/src/worker.rs b/client/consensus/pow/src/worker.rs index a00da6e7022fb..b53227bb3ca50 100644 --- a/client/consensus/pow/src/worker.rs +++ b/client/consensus/pow/src/worker.rs @@ -41,7 +41,7 @@ use std::{ time::Duration, }; -use crate::{PowAlgorithm, PowIntermediate, Seal, INTERMEDIATE_KEY, POW_ENGINE_ID}; +use crate::{PowAlgorithm, PowIntermediate, Seal, INTERMEDIATE_KEY, LOG_TARGET, POW_ENGINE_ID}; /// Mining metadata. This is the information needed to start an actual mining loop. #[derive(Clone, Eq, PartialEq)] @@ -159,26 +159,16 @@ where ) { Ok(true) => (), Ok(false) => { - warn!( - target: "pow", - "Unable to import mined block: seal is invalid", - ); + warn!(target: LOG_TARGET, "Unable to import mined block: seal is invalid",); return false }, Err(err) => { - warn!( - target: "pow", - "Unable to import mined block: {}", - err, - ); + warn!(target: LOG_TARGET, "Unable to import mined block: {}", err,); return false }, } } else { - warn!( - target: "pow", - "Unable to import mined block: metadata does not exist", - ); + warn!(target: LOG_TARGET, "Unable to import mined block: metadata does not exist",); return false } @@ -192,10 +182,7 @@ where } { build } else { - warn!( - target: "pow", - "Unable to import mined block: build does not exist", - ); + warn!(target: LOG_TARGET, "Unable to import mined block: build does not exist",); return false }; @@ -225,18 +212,13 @@ where ); info!( - target: "pow", - "✅ Successfully mined block on top of: {}", - build.metadata.best_hash + target: LOG_TARGET, + "✅ Successfully mined block on top of: {}", build.metadata.best_hash ); true }, Err(err) => { - warn!( - target: "pow", - "Unable to import mined block: {}", - err, - ); + warn!(target: LOG_TARGET, "Unable to import mined block: {}", err,); false }, } diff --git a/client/consensus/slots/src/lib.rs b/client/consensus/slots/src/lib.rs index bc68797dc734e..6126647e6190d 100644 --- a/client/consensus/slots/src/lib.rs +++ b/client/consensus/slots/src/lib.rs @@ -48,6 +48,8 @@ use std::{ time::{Duration, Instant}, }; +const LOG_TARGET: &str = "slots"; + /// The changes that need to applied to the storage to create the state for a block. /// /// See [`sp_state_machine::StorageChanges`] for more information. @@ -198,9 +200,9 @@ pub trait SimpleSlotWorker { > { let slot = slot_info.slot; let telemetry = self.telemetry(); - let logging_target = self.logging_target(); + let log_target = self.logging_target(); - let inherent_data = Self::create_inherent_data(&slot_info, &logging_target).await?; + let inherent_data = Self::create_inherent_data(&slot_info, &log_target).await?; let proposing_remaining_duration = self.proposing_remaining_duration(&slot_info); let logs = self.pre_digest_data(slot, claim); @@ -220,19 +222,19 @@ pub trait SimpleSlotWorker { let proposal = match futures::future::select(proposing, proposing_remaining).await { Either::Left((Ok(p), _)) => p, Either::Left((Err(err), _)) => { - warn!(target: logging_target, "Proposing failed: {}", err); + warn!(target: log_target, "Proposing failed: {}", err); return None }, Either::Right(_) => { info!( - target: logging_target, + target: log_target, "⌛️ Discarding proposal for slot {}; block production took too long", slot, ); // If the node was compiled with debug, tell the user to use release optimizations. #[cfg(build_type = "debug")] info!( - target: logging_target, + target: log_target, "👉 Recompile your node in `--release` mode to mitigate this problem.", ); telemetry!( @@ -525,13 +527,13 @@ pub async fn start_slot_worker( let slot_info = match slots.next_slot().await { Ok(r) => r, Err(e) => { - warn!(target: "slots", "Error while polling for next slot: {}", e); + warn!(target: LOG_TARGET, "Error while polling for next slot: {}", e); return }, }; if sync_oracle.is_major_syncing() { - debug!(target: "slots", "Skipping proposal slot due to sync."); + debug!(target: LOG_TARGET, "Skipping proposal slot due to sync."); continue } diff --git a/client/consensus/slots/src/slots.rs b/client/consensus/slots/src/slots.rs index f8f366d89c82c..9bb5650b313d5 100644 --- a/client/consensus/slots/src/slots.rs +++ b/client/consensus/slots/src/slots.rs @@ -20,7 +20,7 @@ //! //! This is used instead of `futures_timer::Interval` because it was unreliable. -use super::{InherentDataProviderExt, Slot}; +use super::{InherentDataProviderExt, Slot, LOG_TARGET}; use sp_consensus::{Error, SelectChain}; use sp_inherents::{CreateInherentDataProviders, InherentDataProvider}; use sp_runtime::traits::{Block as BlockT, Header as HeaderT}; @@ -146,7 +146,7 @@ where Ok(x) => x, Err(e) => { log::warn!( - target: "slots", + target: LOG_TARGET, "Unable to author block in slot. No best block header: {}", e, ); diff --git a/client/finality-grandpa/src/authorities.rs b/client/finality-grandpa/src/authorities.rs index 0803e6b3c2931..a61c66979bb5c 100644 --- a/client/finality-grandpa/src/authorities.rs +++ b/client/finality-grandpa/src/authorities.rs @@ -29,7 +29,7 @@ use sc_consensus::shared_data::{SharedData, SharedDataLocked}; use sc_telemetry::{telemetry, TelemetryHandle, CONSENSUS_INFO}; use sp_finality_grandpa::{AuthorityId, AuthorityList}; -use crate::SetId; +use crate::{SetId, LOG_TARGET}; /// Error type returned on operations on the `AuthoritySet`. #[derive(Debug, thiserror::Error)] @@ -314,7 +314,7 @@ where let number = pending.canon_height.clone(); debug!( - target: "afg", + target: LOG_TARGET, "Inserting potential standard set change signaled at block {:?} (delayed by {:?} blocks).", (&number, &hash), pending.delay, @@ -323,7 +323,7 @@ where self.pending_standard_changes.import(hash, number, pending, is_descendent_of)?; debug!( - target: "afg", + target: LOG_TARGET, "There are now {} alternatives for the next pending standard change (roots), and a \ total of {} pending standard changes (across all forks).", self.pending_standard_changes.roots().count(), @@ -362,7 +362,7 @@ where .unwrap_or_else(|i| i); debug!( - target: "afg", + target: LOG_TARGET, "Inserting potential forced set change at block {:?} (delayed by {:?} blocks).", (&pending.canon_height, &pending.canon_hash), pending.delay, @@ -370,7 +370,11 @@ where self.pending_forced_changes.insert(idx, pending); - debug!(target: "afg", "There are now {} pending forced changes.", self.pending_forced_changes.len()); + debug!( + target: LOG_TARGET, + "There are now {} pending forced changes.", + self.pending_forced_changes.len() + ); Ok(()) } @@ -475,7 +479,7 @@ where if standard_change.effective_number() <= median_last_finalized && is_descendent_of(&standard_change.canon_hash, &change.canon_hash)? { - log::info!(target: "afg", + log::info!(target: LOG_TARGET, "Not applying authority set change forced at block #{:?}, due to pending standard change at block #{:?}", change.canon_height, standard_change.effective_number(), @@ -488,7 +492,7 @@ where } // apply this change: make the set canonical - afg_log!( + grandpa_log!( initial_sync, "👴 Applying authority set change forced at block #{:?}", change.canon_height, @@ -570,7 +574,7 @@ where } if let Some(change) = change { - afg_log!( + grandpa_log!( initial_sync, "👴 Applying authority set change scheduled at block #{:?}", change.canon_height, diff --git a/client/finality-grandpa/src/aux_schema.rs b/client/finality-grandpa/src/aux_schema.rs index 235453ea35df1..a7357a7fa5e40 100644 --- a/client/finality-grandpa/src/aux_schema.rs +++ b/client/finality-grandpa/src/aux_schema.rs @@ -38,7 +38,7 @@ use crate::{ CompletedRound, CompletedRounds, CurrentRounds, HasVoted, SharedVoterSetState, VoterSetState, }, - GrandpaJustification, NewAuthoritySet, + GrandpaJustification, NewAuthoritySet, LOG_TARGET, }; const VERSION_KEY: &[u8] = b"grandpa_schema_version"; @@ -100,8 +100,8 @@ where // previously we only supported at most one pending change per fork &|_, _| Ok(false), ) { - warn!(target: "afg", "Error migrating pending authority set change: {}", err); - warn!(target: "afg", "Node is in a potentially inconsistent state."); + warn!(target: LOG_TARGET, "Error migrating pending authority set change: {}", err); + warn!(target: LOG_TARGET, "Node is in a potentially inconsistent state."); } } @@ -384,8 +384,11 @@ where } // genesis. - info!(target: "afg", "👴 Loading GRANDPA authority set \ - from genesis on what appears to be first startup."); + info!( + target: LOG_TARGET, + "👴 Loading GRANDPA authority set \ + from genesis on what appears to be first startup." + ); let genesis_authorities = genesis_authorities()?; let genesis_set = AuthoritySet::genesis(genesis_authorities) diff --git a/client/finality-grandpa/src/communication/gossip.rs b/client/finality-grandpa/src/communication/gossip.rs index 218b4b668c10f..408cbda745e56 100644 --- a/client/finality-grandpa/src/communication/gossip.rs +++ b/client/finality-grandpa/src/communication/gossip.rs @@ -99,7 +99,7 @@ use sp_finality_grandpa::AuthorityId; use sp_runtime::traits::{Block as BlockT, NumberFor, Zero}; use super::{benefit, cost, Round, SetId, NEIGHBOR_REBROADCAST_PERIOD}; -use crate::{environment, CatchUp, CompactCommit, SignedMessage}; +use crate::{environment, CatchUp, CompactCommit, SignedMessage, LOG_TARGET}; use std::{ collections::{HashSet, VecDeque}, @@ -578,8 +578,13 @@ impl Peers { last_update: Some(now), }; - trace!(target: "afg", "Peer {} updated view. Now at {:?}, {:?}", - who, peer.view.round, peer.view.set_id); + trace!( + target: LOG_TARGET, + "Peer {} updated view. Now at {:?}, {:?}", + who, + peer.view.round, + peer.view.set_id + ); Ok(Some(&peer.view)) } @@ -801,8 +806,12 @@ impl Inner { let set_id = local_view.set_id; - debug!(target: "afg", "Voter {} noting beginning of round {:?} to network.", - self.config.name(), (round, set_id)); + debug!( + target: LOG_TARGET, + "Voter {} noting beginning of round {:?} to network.", + self.config.name(), + (round, set_id) + ); local_view.update_round(round); @@ -824,7 +833,7 @@ impl Inner { authorities.iter().collect::>(); if diff_authorities { - debug!(target: "afg", + debug!(target: LOG_TARGET, "Gossip validator noted set {:?} twice with different authorities. \ Was the authority set hard forked?", set_id, @@ -912,7 +921,7 @@ impl Inner { // ensure authority is part of the set. if !self.authorities.contains(&full.message.id) { - debug!(target: "afg", "Message from unknown voter: {}", full.message.id); + debug!(target: LOG_TARGET, "Message from unknown voter: {}", full.message.id); telemetry!( self.config.telemetry; CONSENSUS_DEBUG; @@ -929,7 +938,7 @@ impl Inner { full.round.0, full.set_id.0, ) { - debug!(target: "afg", "Bad message signature {}", full.message.id); + debug!(target: LOG_TARGET, "Bad message signature {}", full.message.id); telemetry!( self.config.telemetry; CONSENSUS_DEBUG; @@ -964,7 +973,7 @@ impl Inner { if full.message.precommits.len() != full.message.auth_data.len() || full.message.precommits.is_empty() { - debug!(target: "afg", "Malformed compact commit"); + debug!(target: LOG_TARGET, "Malformed compact commit"); telemetry!( self.config.telemetry; CONSENSUS_DEBUG; @@ -1023,9 +1032,9 @@ impl Inner { PendingCatchUp::Processing { .. } => { self.pending_catch_up = PendingCatchUp::None; }, - state => debug!(target: "afg", - "Noted processed catch up message when state was: {:?}", - state, + state => debug!( + target: LOG_TARGET, + "Noted processed catch up message when state was: {:?}", state, ), } } @@ -1067,7 +1076,9 @@ impl Inner { return (None, Action::Discard(Misbehavior::OutOfScopeMessage.cost())) } - trace!(target: "afg", "Replying to catch-up request for round {} from {} with round {}", + trace!( + target: LOG_TARGET, + "Replying to catch-up request for round {} from {} with round {}", request.round.0, who, last_completed_round.number, @@ -1141,9 +1152,9 @@ impl Inner { let (catch_up_allowed, catch_up_report) = self.note_catch_up_request(who, &request); if catch_up_allowed { - debug!(target: "afg", "Sending catch-up request for round {} to {}", - round, - who, + debug!( + target: LOG_TARGET, + "Sending catch-up request for round {} to {}", round, who, ); catch_up = Some(GossipMessage::::CatchUpRequest(request)); @@ -1347,7 +1358,7 @@ impl GossipValidator { let metrics = match prometheus_registry.map(Metrics::register) { Some(Ok(metrics)) => Some(metrics), Some(Err(e)) => { - debug!(target: "afg", "Failed to register metrics: {:?}", e); + debug!(target: LOG_TARGET, "Failed to register metrics: {:?}", e); None }, None => None, @@ -1466,7 +1477,7 @@ impl GossipValidator { }, Err(e) => { message_name = None; - debug!(target: "afg", "Error decoding message: {}", e); + debug!(target: LOG_TARGET, "Error decoding message: {}", e); telemetry!( self.telemetry; CONSENSUS_DEBUG; diff --git a/client/finality-grandpa/src/communication/mod.rs b/client/finality-grandpa/src/communication/mod.rs index 75a7697812c6c..e7e3c12989c96 100644 --- a/client/finality-grandpa/src/communication/mod.rs +++ b/client/finality-grandpa/src/communication/mod.rs @@ -54,7 +54,7 @@ use sp_runtime::traits::{Block as BlockT, Hash as HashT, Header as HeaderT, Numb use crate::{ environment::HasVoted, CatchUp, Commit, CommunicationIn, CommunicationOutH, CompactCommit, - Error, Message, SignedMessage, + Error, Message, SignedMessage, LOG_TARGET, }; use gossip::{ FullCatchUpMessage, FullCommitMessage, GossipMessage, GossipValidator, PeerReport, VoteMessage, @@ -274,7 +274,8 @@ impl> NetworkBridge { gossip_engine.lock().register_gossip_message(topic, message.encode()); } - trace!(target: "afg", + trace!( + target: LOG_TARGET, "Registered {} messages for topic {:?} (round: {}, set_id: {})", round.votes.len(), topic, @@ -340,13 +341,19 @@ impl> NetworkBridge { match decoded { Err(ref e) => { - debug!(target: "afg", "Skipping malformed message {:?}: {}", notification, e); + debug!( + target: LOG_TARGET, + "Skipping malformed message {:?}: {}", notification, e + ); future::ready(None) }, Ok(GossipMessage::Vote(msg)) => { // check signature. if !voters.contains(&msg.message.id) { - debug!(target: "afg", "Skipping message from unknown voter {}", msg.message.id); + debug!( + target: LOG_TARGET, + "Skipping message from unknown voter {}", msg.message.id + ); return future::ready(None) } @@ -388,7 +395,7 @@ impl> NetworkBridge { future::ready(Some(msg.message)) }, _ => { - debug!(target: "afg", "Skipping unknown message type"); + debug!(target: LOG_TARGET, "Skipping unknown message type"); future::ready(None) }, } @@ -631,7 +638,12 @@ fn incoming_global( // this could be optimized by decoding piecewise. let decoded = GossipMessage::::decode(&mut ¬ification.message[..]); if let Err(ref e) = decoded { - trace!(target: "afg", "Skipping malformed commit message {:?}: {}", notification, e); + trace!( + target: LOG_TARGET, + "Skipping malformed commit message {:?}: {}", + notification, + e + ); } future::ready(decoded.map(move |d| (notification, d)).ok()) }) @@ -642,7 +654,7 @@ fn incoming_global( GossipMessage::CatchUp(msg) => process_catch_up(msg, notification, &gossip_engine, &gossip_validator, &voters), _ => { - debug!(target: "afg", "Skipping unknown message type"); + debug!(target: LOG_TARGET, "Skipping unknown message type"); None }, }) @@ -748,7 +760,7 @@ impl Sink> for OutgoingMessages { }); debug!( - target: "afg", + target: LOG_TARGET, "Announcing block {} to peers which we voted on in round {} in set {}", target_hash, self.round, @@ -813,7 +825,7 @@ fn check_compact_commit( return Err(cost::MALFORMED_COMMIT) } } else { - debug!(target: "afg", "Skipping commit containing unknown voter {}", id); + debug!(target: LOG_TARGET, "Skipping commit containing unknown voter {}", id); return Err(cost::MALFORMED_COMMIT) } } @@ -838,7 +850,7 @@ fn check_compact_commit( set_id.0, &mut buf, ) { - debug!(target: "afg", "Bad commit message signature {}", id); + debug!(target: LOG_TARGET, "Bad commit message signature {}", id); telemetry!( telemetry; CONSENSUS_DEBUG; @@ -886,7 +898,10 @@ fn check_catch_up( return Err(cost::MALFORMED_CATCH_UP) } } else { - debug!(target: "afg", "Skipping catch up message containing unknown voter {}", id); + debug!( + target: LOG_TARGET, + "Skipping catch up message containing unknown voter {}", id + ); return Err(cost::MALFORMED_CATCH_UP) } } @@ -922,7 +937,7 @@ fn check_catch_up( if !sp_finality_grandpa::check_message_signature_with_buffer( &msg, id, sig, round, set_id, buf, ) { - debug!(target: "afg", "Bad catch up message signature {}", id); + debug!(target: LOG_TARGET, "Bad catch up message signature {}", id); telemetry!( telemetry; CONSENSUS_DEBUG; diff --git a/client/finality-grandpa/src/communication/periodic.rs b/client/finality-grandpa/src/communication/periodic.rs index c001796b5ca5d..7e50abb96e441 100644 --- a/client/finality-grandpa/src/communication/periodic.rs +++ b/client/finality-grandpa/src/communication/periodic.rs @@ -21,17 +21,19 @@ use futures::{future::FutureExt as _, prelude::*, ready, stream::Stream}; use futures_timer::Delay; use log::debug; -use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; use std::{ pin::Pin, task::{Context, Poll}, time::Duration, }; -use super::gossip::{GossipMessage, NeighborPacket}; use sc_network::PeerId; +use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; use sp_runtime::traits::{Block as BlockT, NumberFor}; +use super::gossip::{GossipMessage, NeighborPacket}; +use crate::LOG_TARGET; + /// A sender used to send neighbor packets to a background job. #[derive(Clone)] pub(super) struct NeighborPacketSender( @@ -46,7 +48,7 @@ impl NeighborPacketSender { neighbor_packet: NeighborPacket>, ) { if let Err(err) = self.0.unbounded_send((who, neighbor_packet)) { - debug!(target: "afg", "Failed to send neighbor packet: {:?}", err); + debug!(target: LOG_TARGET, "Failed to send neighbor packet: {:?}", err); } } } diff --git a/client/finality-grandpa/src/environment.rs b/client/finality-grandpa/src/environment.rs index f235c3a86c04e..110d0eb2c927e 100644 --- a/client/finality-grandpa/src/environment.rs +++ b/client/finality-grandpa/src/environment.rs @@ -60,7 +60,7 @@ use crate::{ until_imported::UntilVoteTargetImported, voting_rule::VotingRule as VotingRuleT, ClientForGrandpa, CommandOrError, Commit, Config, Error, NewAuthoritySet, Precommit, Prevote, - PrimaryPropose, SignedMessage, VoterCommand, + PrimaryPropose, SignedMessage, VoterCommand, LOG_TARGET, }; type HistoricalVotes = finality_grandpa::HistoricalVotes< @@ -551,7 +551,10 @@ where { Some(proof) => proof, None => { - debug!(target: "afg", "Equivocation offender is not part of the authority set."); + debug!( + target: LOG_TARGET, + "Equivocation offender is not part of the authority set." + ); return Ok(()) }, }; @@ -609,8 +612,13 @@ where let tree_route = match tree_route_res { Ok(tree_route) => tree_route, Err(e) => { - debug!(target: "afg", "Encountered error computing ancestry between block {:?} and base {:?}: {}", - block, base, e); + debug!( + target: LOG_TARGET, + "Encountered error computing ancestry between block {:?} and base {:?}: {}", + block, + base, + e + ); return Err(GrandpaError::NotDescendent) }, @@ -955,7 +963,8 @@ where historical_votes: &HistoricalVotes, ) -> Result<(), Self::Error> { debug!( - target: "afg", "Voter {} completed round {} in set {}. Estimate = {:?}, Finalized in round = {:?}", + target: LOG_TARGET, + "Voter {} completed round {} in set {}. Estimate = {:?}, Finalized in round = {:?}", self.config.name(), round, self.set_id, @@ -1016,7 +1025,8 @@ where historical_votes: &HistoricalVotes, ) -> Result<(), Self::Error> { debug!( - target: "afg", "Voter {} concluded round {} in set {}. Estimate = {:?}, Finalized in round = {:?}", + target: LOG_TARGET, + "Voter {} concluded round {} in set {}. Estimate = {:?}, Finalized in round = {:?}", self.config.name(), round, self.set_id, @@ -1102,9 +1112,12 @@ where Self::Signature, >, ) { - warn!(target: "afg", "Detected prevote equivocation in the finality worker: {:?}", equivocation); + warn!( + target: LOG_TARGET, + "Detected prevote equivocation in the finality worker: {:?}", equivocation + ); if let Err(err) = self.report_equivocation(equivocation.into()) { - warn!(target: "afg", "Error reporting prevote equivocation: {}", err); + warn!(target: LOG_TARGET, "Error reporting prevote equivocation: {}", err); } } @@ -1117,9 +1130,12 @@ where Self::Signature, >, ) { - warn!(target: "afg", "Detected precommit equivocation in the finality worker: {:?}", equivocation); + warn!( + target: LOG_TARGET, + "Detected precommit equivocation in the finality worker: {:?}", equivocation + ); if let Err(err) = self.report_equivocation(equivocation.into()) { - warn!(target: "afg", "Error reporting precommit equivocation: {}", err); + warn!(target: LOG_TARGET, "Error reporting precommit equivocation: {}", err); } } } @@ -1158,7 +1174,8 @@ where let base_header = match client.header(BlockId::Hash(block))? { Some(h) => h, None => { - debug!(target: "afg", + debug!( + target: LOG_TARGET, "Encountered error finding best chain containing {:?}: couldn't find base block", block, ); @@ -1172,7 +1189,10 @@ where // proceed onwards. most of the time there will be no pending transition. the limit, if any, is // guaranteed to be higher than or equal to the given base number. let limit = authority_set.current_limit(*base_header.number()); - debug!(target: "afg", "Finding best chain containing block {:?} with number limit {:?}", block, limit); + debug!( + target: LOG_TARGET, + "Finding best chain containing block {:?} with number limit {:?}", block, limit + ); let result = match select_chain.finality_target(block, None).await { Ok(best_hash) => { @@ -1234,7 +1254,10 @@ where .or_else(|| Some((target_header.hash(), *target_header.number()))) }, Err(e) => { - warn!(target: "afg", "Encountered error finding best chain containing {:?}: {}", block, e); + warn!( + target: LOG_TARGET, + "Encountered error finding best chain containing {:?}: {}", block, e + ); None }, }; @@ -1273,7 +1296,7 @@ where // This can happen after a forced change (triggered manually from the runtime when // finality is stalled), since the voter will be restarted at the median last finalized // block, which can be lower than the local best finalized block. - warn!(target: "afg", "Re-finalized block #{:?} ({:?}) in the canonical chain, current best finalized is #{:?}", + warn!(target: LOG_TARGET, "Re-finalized block #{:?} ({:?}) in the canonical chain, current best finalized is #{:?}", hash, number, status.finalized_number, @@ -1303,7 +1326,10 @@ where ) { if let Some(sender) = justification_sender { if let Err(err) = sender.notify(justification) { - warn!(target: "afg", "Error creating justification for subscriber: {}", err); + warn!( + target: LOG_TARGET, + "Error creating justification for subscriber: {}", err + ); } } } @@ -1354,11 +1380,16 @@ where client .apply_finality(import_op, hash, persisted_justification, true) .map_err(|e| { - warn!(target: "afg", "Error applying finality to block {:?}: {}", (hash, number), e); + warn!( + target: LOG_TARGET, + "Error applying finality to block {:?}: {}", + (hash, number), + e + ); e })?; - debug!(target: "afg", "Finalizing blocks up to ({:?}, {})", number, hash); + debug!(target: LOG_TARGET, "Finalizing blocks up to ({:?}, {})", number, hash); telemetry!( telemetry; @@ -1376,13 +1407,17 @@ where let (new_id, set_ref) = authority_set.current(); if set_ref.len() > 16 { - afg_log!( + grandpa_log!( initial_sync, "👴 Applying GRANDPA set change to new set with {} authorities", set_ref.len(), ); } else { - afg_log!(initial_sync, "👴 Applying GRANDPA set change to new set {:?}", set_ref); + grandpa_log!( + initial_sync, + "👴 Applying GRANDPA set change to new set {:?}", + set_ref + ); } telemetry!( @@ -1411,8 +1446,11 @@ where ); if let Err(e) = write_result { - warn!(target: "afg", "Failed to write updated authority set to disk. Bailing."); - warn!(target: "afg", "Node is in a potentially inconsistent state."); + warn!( + target: LOG_TARGET, + "Failed to write updated authority set to disk. Bailing." + ); + warn!(target: LOG_TARGET, "Node is in a potentially inconsistent state."); return Err(e.into()) } diff --git a/client/finality-grandpa/src/finality_proof.rs b/client/finality-grandpa/src/finality_proof.rs index 453b41bc63468..43ed0ed31993e 100644 --- a/client/finality-grandpa/src/finality_proof.rs +++ b/client/finality-grandpa/src/finality_proof.rs @@ -52,7 +52,7 @@ use crate::{ authorities::{AuthoritySetChangeId, AuthoritySetChanges}, best_justification, justification::GrandpaJustification, - SharedAuthoritySet, + SharedAuthoritySet, LOG_TARGET, }; const MAX_UNKNOWN_HEADERS: usize = 100_000; @@ -163,7 +163,7 @@ where "Requested finality proof for descendant of #{} while we only have finalized #{}.", block, info.finalized_number, ); - trace!(target: "afg", "{}", &err); + trace!(target: LOG_TARGET, "{}", &err); return Err(FinalityProofError::BlockNotYetFinalized) } @@ -175,7 +175,7 @@ where justification } else { trace!( - target: "afg", + target: LOG_TARGET, "No justification found for the latest finalized block. \ Returning empty proof.", ); @@ -194,7 +194,7 @@ where grandpa_justification } else { trace!( - target: "afg", + target: LOG_TARGET, "No justification found when making finality proof for {}. \ Returning empty proof.", block, @@ -205,7 +205,7 @@ where }, AuthoritySetChangeId::Unknown => { warn!( - target: "afg", + target: LOG_TARGET, "AuthoritySetChanges does not cover the requested block #{} due to missing data. \ You need to resync to populate AuthoritySetChanges properly.", block, diff --git a/client/finality-grandpa/src/import.rs b/client/finality-grandpa/src/import.rs index 3715287eea31f..e061c105eeea5 100644 --- a/client/finality-grandpa/src/import.rs +++ b/client/finality-grandpa/src/import.rs @@ -45,6 +45,7 @@ use crate::{ justification::GrandpaJustification, notification::GrandpaJustificationSender, AuthoritySetChanges, ClientForGrandpa, CommandOrError, Error, NewAuthoritySet, VoterCommand, + LOG_TARGET, }; /// A block-import handler for GRANDPA. @@ -589,18 +590,16 @@ where Ok(ImportResult::Imported(aux)) => aux, Ok(r) => { debug!( - target: "afg", - "Restoring old authority set after block import result: {:?}", - r, + target: LOG_TARGET, + "Restoring old authority set after block import result: {:?}", r, ); pending_changes.revert(); return Ok(r) }, Err(e) => { debug!( - target: "afg", - "Restoring old authority set after block import error: {}", - e, + target: LOG_TARGET, + "Restoring old authority set after block import error: {}", e, ); pending_changes.revert(); return Err(ConsensusError::ClientImport(e.to_string())) @@ -665,7 +664,7 @@ where import_res.unwrap_or_else(|err| { if needs_justification { debug!( - target: "afg", + target: LOG_TARGET, "Requesting justification from peers due to imported block #{} that enacts authority set change with invalid justification: {}", number, err @@ -678,7 +677,7 @@ where None => if needs_justification { debug!( - target: "afg", + target: LOG_TARGET, "Imported unjustified block #{} that enacts authority set change, waiting for finality for enactment.", number, ); @@ -803,7 +802,7 @@ where match result { Err(CommandOrError::VoterCommand(command)) => { - afg_log!( + grandpa_log!( initial_sync, "👴 Imported justification for block #{} that triggers \ command {}, signaling voter.", diff --git a/client/finality-grandpa/src/lib.rs b/client/finality-grandpa/src/lib.rs index c1b4962d04a12..efc46d8f93a6d 100644 --- a/client/finality-grandpa/src/lib.rs +++ b/client/finality-grandpa/src/lib.rs @@ -93,10 +93,12 @@ use std::{ time::Duration, }; +const LOG_TARGET: &str = "grandpa"; + // utility logging macro that takes as first argument a conditional to // decide whether to log under debug or info level (useful to restrict // logging under initial sync). -macro_rules! afg_log { +macro_rules! grandpa_log { ($condition:expr, $($msg: expr),+ $(,)?) => { { let log_level = if $condition { @@ -105,7 +107,7 @@ macro_rules! afg_log { log::Level::Info }; - log::log!(target: "afg", log_level, $($msg),+); + log::log!(target: LOG_TARGET, log_level, $($msg),+); } }; } @@ -803,10 +805,11 @@ where ); let voter_work = voter_work.map(|res| match res { - Ok(()) => error!(target: "afg", + Ok(()) => error!( + target: LOG_TARGET, "GRANDPA voter future has concluded naturally, this should be unreachable." ), - Err(e) => error!(target: "afg", "GRANDPA voter error: {}", e), + Err(e) => error!(target: LOG_TARGET, "GRANDPA voter error: {}", e), }); // Make sure that `telemetry_task` doesn't accidentally finish and kill grandpa. @@ -871,7 +874,7 @@ where let metrics = match prometheus_registry.as_ref().map(Metrics::register) { Some(Ok(metrics)) => Some(metrics), Some(Err(e)) => { - debug!(target: "afg", "Failed to register metrics: {:?}", e); + debug!(target: LOG_TARGET, "Failed to register metrics: {:?}", e); None }, None => None, @@ -913,7 +916,12 @@ where /// state. This method should be called when we know that the authority set /// has changed (e.g. as signalled by a voter command). fn rebuild_voter(&mut self) { - debug!(target: "afg", "{}: Starting new voter with set ID {}", self.env.config.name(), self.env.set_id); + debug!( + target: LOG_TARGET, + "{}: Starting new voter with set ID {}", + self.env.config.name(), + self.env.set_id + ); let maybe_authority_id = local_authority_id(&self.env.voters, self.env.config.keystore.as_ref()); @@ -974,7 +982,8 @@ where // Repoint shared_voter_state so that the RPC endpoint can query the state if self.shared_voter_state.reset(voter.voter_state()).is_none() { - info!(target: "afg", + info!( + target: LOG_TARGET, "Timed out trying to update shared GRANDPA voter state. \ RPC endpoints may return stale data." ); @@ -1043,7 +1052,7 @@ where Ok(()) }, VoterCommand::Pause(reason) => { - info!(target: "afg", "Pausing old validator set: {}", reason); + info!(target: LOG_TARGET, "Pausing old validator set: {}", reason); // not racing because old voter is shut down. self.env.update_voter_set_state(|voter_set_state| { diff --git a/client/finality-grandpa/src/observer.rs b/client/finality-grandpa/src/observer.rs index 9bcb03c0555c2..1efb71e5903ec 100644 --- a/client/finality-grandpa/src/observer.rs +++ b/client/finality-grandpa/src/observer.rs @@ -43,7 +43,7 @@ use crate::{ environment, global_communication, notification::GrandpaJustificationSender, ClientForGrandpa, CommandOrError, CommunicationIn, Config, Error, LinkHalf, VoterCommand, - VoterSetState, + VoterSetState, LOG_TARGET, }; struct ObserverChain<'a, Block: BlockT, Client> { @@ -145,7 +145,7 @@ where // proceed processing with new finalized block number future::ok(finalized_number) } else { - debug!(target: "afg", "Received invalid commit: ({:?}, {:?})", round, commit); + debug!(target: LOG_TARGET, "Received invalid commit: ({:?}, {:?})", round, commit); finality_grandpa::process_commit_validation_result(validation_result, callback); @@ -317,7 +317,7 @@ where // update it on-disk in case we restart as validator in the future. self.persistent_data.set_state = match command { VoterCommand::Pause(reason) => { - info!(target: "afg", "Pausing old validator set: {}", reason); + info!(target: LOG_TARGET, "Pausing old validator set: {}", reason); let completed_rounds = self.persistent_data.set_state.read().completed_rounds(); let set_state = VoterSetState::Paused { completed_rounds }; diff --git a/client/finality-grandpa/src/until_imported.rs b/client/finality-grandpa/src/until_imported.rs index df0b63348e94b..95b658e92298a 100644 --- a/client/finality-grandpa/src/until_imported.rs +++ b/client/finality-grandpa/src/until_imported.rs @@ -24,7 +24,7 @@ use super::{ BlockStatus as BlockStatusT, BlockSyncRequester as BlockSyncRequesterT, CommunicationIn, Error, - SignedMessage, + SignedMessage, LOG_TARGET, }; use finality_grandpa::voter; @@ -296,7 +296,7 @@ where let next_log = *last_log + LOG_PENDING_INTERVAL; if Instant::now() >= next_log { debug!( - target: "afg", + target: LOG_TARGET, "Waiting to import block {} before {} {} messages can be imported. \ Requesting network sync service to retrieve block from. \ Possible fork?", @@ -346,7 +346,7 @@ where fn warn_authority_wrong_target(hash: H, id: AuthorityId) { warn!( - target: "afg", + target: LOG_TARGET, "Authority {:?} signed GRANDPA message with \ wrong block number for hash {}", id, diff --git a/frame/aura/src/lib.rs b/frame/aura/src/lib.rs index ff2c5df04a453..635e22c5d58aa 100644 --- a/frame/aura/src/lib.rs +++ b/frame/aura/src/lib.rs @@ -58,6 +58,8 @@ mod tests; pub use pallet::*; +const LOG_TARGET: &str = "runtime::aura"; + #[frame_support::pallet] pub mod pallet { use super::*; @@ -222,7 +224,7 @@ impl OneSessionHandler for Pallet { if last_authorities != next_authorities { if next_authorities.len() as u32 > T::MaxAuthorities::get() { log::warn!( - target: "runtime::aura", + target: LOG_TARGET, "next authorities list larger than {}, truncating", T::MaxAuthorities::get(), ); diff --git a/frame/babe/src/equivocation.rs b/frame/babe/src/equivocation.rs index f55bda751887d..70f087fd461f9 100644 --- a/frame/babe/src/equivocation.rs +++ b/frame/babe/src/equivocation.rs @@ -48,7 +48,7 @@ use sp_staking::{ }; use sp_std::prelude::*; -use crate::{Call, Config, Pallet}; +use crate::{Call, Config, Pallet, LOG_TARGET}; /// A trait with utility methods for handling equivocation reports in BABE. /// The trait provides methods for reporting an offence triggered by a valid @@ -161,15 +161,9 @@ where }; match SubmitTransaction::>::submit_unsigned_transaction(call.into()) { - Ok(()) => log::info!( - target: "runtime::babe", - "Submitted BABE equivocation report.", - ), - Err(e) => log::error!( - target: "runtime::babe", - "Error submitting equivocation report: {:?}", - e, - ), + Ok(()) => log::info!(target: LOG_TARGET, "Submitted BABE equivocation report.",), + Err(e) => + log::error!(target: LOG_TARGET, "Error submitting equivocation report: {:?}", e,), } Ok(()) @@ -192,7 +186,7 @@ impl Pallet { TransactionSource::Local | TransactionSource::InBlock => { /* allowed */ }, _ => { log::warn!( - target: "runtime::babe", + target: LOG_TARGET, "rejecting unsigned report equivocation transaction because it is not local/in-block.", ); diff --git a/frame/babe/src/lib.rs b/frame/babe/src/lib.rs index 1a9b3200087ae..16b2b2119793a 100644 --- a/frame/babe/src/lib.rs +++ b/frame/babe/src/lib.rs @@ -50,6 +50,8 @@ use sp_consensus_vrf::schnorrkel; pub use sp_consensus_babe::{AuthorityId, PUBLIC_KEY_LENGTH, RANDOMNESS_LENGTH, VRF_OUTPUT_LENGTH}; +const LOG_TARGET: &str = "runtime::babe"; + mod default_weights; mod equivocation; mod randomness; diff --git a/frame/grandpa/src/equivocation.rs b/frame/grandpa/src/equivocation.rs index 181d22fba545c..23801a4982a82 100644 --- a/frame/grandpa/src/equivocation.rs +++ b/frame/grandpa/src/equivocation.rs @@ -52,7 +52,7 @@ use sp_staking::{ SessionIndex, }; -use super::{Call, Config, Pallet}; +use super::{Call, Config, Pallet, LOG_TARGET}; /// A trait with utility methods for handling equivocation reports in GRANDPA. /// The offence type is generic, and the trait provides , reporting an offence @@ -170,15 +170,9 @@ where }; match SubmitTransaction::>::submit_unsigned_transaction(call.into()) { - Ok(()) => log::info!( - target: "runtime::afg", - "Submitted GRANDPA equivocation report.", - ), - Err(e) => log::error!( - target: "runtime::afg", - "Error submitting equivocation report: {:?}", - e, - ), + Ok(()) => log::info!(target: LOG_TARGET, "Submitted GRANDPA equivocation report.",), + Err(e) => + log::error!(target: LOG_TARGET, "Error submitting equivocation report: {:?}", e,), } Ok(()) @@ -211,7 +205,7 @@ impl Pallet { TransactionSource::Local | TransactionSource::InBlock => { /* allowed */ }, _ => { log::warn!( - target: "runtime::afg", + target: LOG_TARGET, "rejecting unsigned report equivocation transaction because it is not local/in-block." ); diff --git a/frame/grandpa/src/lib.rs b/frame/grandpa/src/lib.rs index c6b7fd251661f..716cf54865773 100644 --- a/frame/grandpa/src/lib.rs +++ b/frame/grandpa/src/lib.rs @@ -70,6 +70,8 @@ pub use equivocation::{ pub use pallet::*; +const LOG_TARGET: &str = "runtime::grandpa"; + #[frame_support::pallet] pub mod pallet { use super::*; diff --git a/frame/grandpa/src/migrations/v4.rs b/frame/grandpa/src/migrations/v4.rs index 81dbd3bab4b67..33e200b728336 100644 --- a/frame/grandpa/src/migrations/v4.rs +++ b/frame/grandpa/src/migrations/v4.rs @@ -15,6 +15,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +use crate::LOG_TARGET; use frame_support::{ traits::{Get, StorageVersion}, weights::Weight, @@ -34,14 +35,14 @@ pub const OLD_PREFIX: &[u8] = b"GrandpaFinality"; pub fn migrate>(new_pallet_name: N) -> Weight { if new_pallet_name.as_ref().as_bytes() == OLD_PREFIX { log::info!( - target: "runtime::afg", + target: LOG_TARGET, "New pallet name is equal to the old prefix. No migration needs to be done.", ); return Weight::zero() } let storage_version = StorageVersion::get::>(); log::info!( - target: "runtime::afg", + target: LOG_TARGET, "Running migration to v3.1 for grandpa with storage version {:?}", storage_version, ); From bb94ac73b65a4b21186efaf5a870b5550aa056aa Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gon=C3=A7alo=20Pestana?= Date: Wed, 14 Dec 2022 22:55:24 +0000 Subject: [PATCH 28/29] Staking: store last `min-active-bond` on-chain (#12889) * Staking: store last min-active-bond on-chain Storing the `min-active-bond` onchain helps the UIs with minimal on-chain costs. Closes https://github.com/paritytech/substrate/issues/12746 * Avoid relying on sorting to set the * Addresses PR comments --- frame/staking/src/pallet/impls.rs | 14 +++++++++++++- frame/staking/src/pallet/mod.rs | 4 ++++ frame/staking/src/tests.rs | 26 ++++++++++++++++++++++++++ 3 files changed, 43 insertions(+), 1 deletion(-) diff --git a/frame/staking/src/pallet/impls.rs b/frame/staking/src/pallet/impls.rs index 1a4086ad2ab11..830b33ceb69a2 100644 --- a/frame/staking/src/pallet/impls.rs +++ b/frame/staking/src/pallet/impls.rs @@ -704,6 +704,9 @@ impl Pallet { /// /// `maybe_max_len` can imposes a cap on the number of voters returned; /// + /// Sets `MinimumActiveStake` to the minimum active nominator stake in the returned set of + /// nominators. + /// /// This function is self-weighing as [`DispatchClass::Mandatory`]. pub fn get_npos_voters(maybe_max_len: Option) -> Vec> { let max_allowed_len = { @@ -719,6 +722,7 @@ impl Pallet { let mut voters_seen = 0u32; let mut validators_taken = 0u32; let mut nominators_taken = 0u32; + let mut min_active_stake = u64::MAX; let mut sorted_voters = T::VoterList::iter(); while all_voters.len() < max_allowed_len && @@ -733,12 +737,15 @@ impl Pallet { }; if let Some(Nominations { targets, .. }) = >::get(&voter) { + let voter_weight = weight_of(&voter); if !targets.is_empty() { - all_voters.push((voter.clone(), weight_of(&voter), targets)); + all_voters.push((voter.clone(), voter_weight, targets)); nominators_taken.saturating_inc(); } else { // Technically should never happen, but not much we can do about it. } + min_active_stake = + if voter_weight < min_active_stake { voter_weight } else { min_active_stake }; } else if Validators::::contains_key(&voter) { // if this voter is a validator: let self_vote = ( @@ -769,6 +776,11 @@ impl Pallet { Self::register_weight(T::WeightInfo::get_npos_voters(validators_taken, nominators_taken)); + let min_active_stake: T::CurrencyBalance = + if all_voters.len() == 0 { 0u64.into() } else { min_active_stake.into() }; + + MinimumActiveStake::::put(min_active_stake); + log!( info, "generated {} npos voters, {} from validators and {} nominators", diff --git a/frame/staking/src/pallet/mod.rs b/frame/staking/src/pallet/mod.rs index fda455ca3c166..2daa992f4ef6e 100644 --- a/frame/staking/src/pallet/mod.rs +++ b/frame/staking/src/pallet/mod.rs @@ -295,6 +295,10 @@ pub mod pallet { #[pallet::storage] pub type MinValidatorBond = StorageValue<_, BalanceOf, ValueQuery>; + /// The minimum active nominator stake of the last successful election. + #[pallet::storage] + pub type MinimumActiveStake = StorageValue<_, BalanceOf, ValueQuery>; + /// The minimum amount of commission that validators can set. /// /// If set to `0`, no limit exists. diff --git a/frame/staking/src/tests.rs b/frame/staking/src/tests.rs index 3e0a62f53d886..74d8dc8a8105c 100644 --- a/frame/staking/src/tests.rs +++ b/frame/staking/src/tests.rs @@ -4463,6 +4463,32 @@ mod election_data_provider { ); } + #[test] + fn set_minimum_active_stake_is_correct() { + ExtBuilder::default() + .nominate(false) + .add_staker(61, 60, 2_000, StakerStatus::::Nominator(vec![21])) + .add_staker(71, 70, 10, StakerStatus::::Nominator(vec![21])) + .add_staker(81, 80, 50, StakerStatus::::Nominator(vec![21])) + .build_and_execute(|| { + assert_ok!(::electing_voters(None)); + assert_eq!(MinimumActiveStake::::get(), 10); + + // remove staker with lower bond by limiting the number of voters and check + // `MinimumActiveStake` again after electing voters. + assert_ok!(::electing_voters(Some(5))); + assert_eq!(MinimumActiveStake::::get(), 50); + }); + } + + #[test] + fn set_minimum_active_stake_zero_correct() { + ExtBuilder::default().has_stakers(false).build_and_execute(|| { + assert_ok!(::electing_voters(None)); + assert_eq!(MinimumActiveStake::::get(), 0); + }); + } + #[test] fn voters_include_self_vote() { ExtBuilder::default().nominate(false).build_and_execute(|| { From cd2fdcf85eb96c53ce2a5d418d4338eb92f5d4f5 Mon Sep 17 00:00:00 2001 From: Kian Paimani <5588131+kianenigma@users.noreply.github.com> Date: Wed, 14 Dec 2022 23:59:01 +0000 Subject: [PATCH 29/29] Try-runtime Revamp and Facelift (#12537) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * fix online/offline confusion * unified cache file * multi-threaded babyyy * checkpoint for niklas * compiles * all tests pass with --test-threads 1 * child-tree scrape is also multi-threaded now. * better thread splitting * some suggestions (#12532) * some suggestions * tokio multithread * move unused dependencies * snapshot command * fix rem * a bit of cleanup * support optional checks * fix * OCW command migrated to wasm-only, as an example * state-version management fully in remote-ext * almost everything move to wasm executor, some CLI flags reduced * follow-chain works as well * Master.into() * everything builds now * concurrent insertion and download for remote builds * minor fix * fix a bug * checkpoint * some updates * fmt * review comments * fmt * fix * fmt * update * fmt * rename * fix the damn UI tests * fmt * remoe the thread abstraction for the time being * cleanup * fix CI * fmt * fix * fix a few more things * tweak log levels * better error handling * address grumbles: use futures::mpsc * review comments * fmt * Apply suggestions from code review Co-authored-by: Bastian Köcher * Update utils/frame/try-runtime/cli/src/lib.rs Co-authored-by: Bastian Köcher * better api version stuff * some doc update * a whole lot of docs * fmt * fix all docs * fmt * rpc rebase: Try-runtime Revamp and Facelift (#12921) * Introduce sensible weight constants (#12868) * Introduce sensible weight constants * cargo fmt * Remove unused import * Add missing import * ".git/.scripts/bench-bot.sh" pallet dev pallet_lottery Co-authored-by: command-bot <> * Checkout to the branch HEAD explicitly in `build-linux-substrate` (#12876) * cli: Improve pruning documentation (#12819) * cli: Improve pruning documentation Signed-off-by: Alexandru Vasile * cli: Keep `finalized` notation and remove `canonical` one * cli: Fix cargo doc * cli: `PruningModeClap` IR enum Signed-off-by: Alexandru Vasile * cli: Convert PruningModeClap into pruning modes Signed-off-by: Alexandru Vasile * cli: Use `PruningModeClap` Signed-off-by: Alexandru Vasile * cli: Rename to `DatabasePruningMode` Signed-off-by: Alexandru Vasile * cli: Implement `FromStr` instead of `clap::ValueEnum` Signed-off-by: Alexandru Vasile * Update client/cli/src/params/pruning_params.rs Co-authored-by: Bastian Köcher * Fix clippy Signed-off-by: Alexandru Vasile * cli: Add option documentation back Signed-off-by: Alexandru Vasile * Apply suggestions from code review Signed-off-by: Alexandru Vasile Co-authored-by: Bastian Köcher * Revert "Move LockableCurrency trait to fungibles::Lockable and deprecate LockableCurrency (#12798)" (#12882) This reverts commit ea3ca3f757ff9d9559665719a77da81f4cf0f0ce. * Don't indefinitely block on shutting down Tokio (#12885) * Don't indefinitely on shutting down Tokio Now we wait in maximum 60 seconds before we shutdown the node. Tasks are may be leaked and leading to some data corruption. * Drink less :thinking_face: * General Message Queue Pallet (#12485) * The message queue * Make fully generic * Refactor * Docs * Refactor * Use iter not slice * Per-origin queues * Multi-queue processing * Introduce MaxReady * Remove MaxReady in favour of ready ring * Cleanups * ReadyRing and tests * Stale page reaping * from_components -> from_parts Signed-off-by: Oliver Tale-Yazdi * Move WeightCounter to sp_weights Signed-off-by: Oliver Tale-Yazdi * Add MockedWeightInfo Signed-off-by: Oliver Tale-Yazdi * Deploy to kitchensink Signed-off-by: Oliver Tale-Yazdi * Use WeightCounter Signed-off-by: Oliver Tale-Yazdi * Small fixes and logging Signed-off-by: Oliver Tale-Yazdi * Add service_page Signed-off-by: Oliver Tale-Yazdi * Typo Signed-off-by: Oliver Tale-Yazdi * Move service_page below service_queue Signed-off-by: Oliver Tale-Yazdi * Add service_message Signed-off-by: Oliver Tale-Yazdi * Use correct weight function Signed-off-by: Oliver Tale-Yazdi * Overweight execution * Refactor * Missing file * Fix WeightCounter usage in scheduler Signed-off-by: Oliver Tale-Yazdi * Fix peek_index Take into account that decoding from a mutable slice modifies it. Signed-off-by: Oliver Tale-Yazdi * Add tests and bench service_page_item Signed-off-by: Oliver Tale-Yazdi * Add debug_info Signed-off-by: Oliver Tale-Yazdi * Add no-progress check to service_queues Signed-off-by: Oliver Tale-Yazdi * Add more benches Signed-off-by: Oliver Tale-Yazdi * Bound from_message and try_append_message Signed-off-by: Oliver Tale-Yazdi * Add PageReaped event Signed-off-by: Oliver Tale-Yazdi * Rename BookStateOf and BookStateFor Signed-off-by: Oliver Tale-Yazdi * Update tests and remove logging Signed-off-by: Oliver Tale-Yazdi * Remove redundant per-message origins; add footprint() and sweep_queue() * Move testing stuff to mock.rs Signed-off-by: Oliver Tale-Yazdi * Add integration test Signed-off-by: Oliver Tale-Yazdi * Fix no-progress check Signed-off-by: Oliver Tale-Yazdi * Fix debug_info Signed-off-by: Oliver Tale-Yazdi * Fixup merge and tests Signed-off-by: Oliver Tale-Yazdi * Fix footprint tracking * Introduce * Formatting * OverweightEnqueued event, auto-servicing config item * Update tests and benchmarks Signed-off-by: Oliver Tale-Yazdi * Clippy Signed-off-by: Oliver Tale-Yazdi * Add tests Signed-off-by: Oliver Tale-Yazdi * Provide change handler * Add missing BookStateFor::insert and call QueueChangeHandler Signed-off-by: Oliver Tale-Yazdi * Docs Signed-off-by: Oliver Tale-Yazdi * Update benchmarks and weights Signed-off-by: Oliver Tale-Yazdi * More tests... Signed-off-by: Oliver Tale-Yazdi * Use weight metering functions Signed-off-by: Oliver Tale-Yazdi * weightInfo::process_message_payload is gone Signed-off-by: Oliver Tale-Yazdi * Add defensive_saturating_accrue Signed-off-by: Oliver Tale-Yazdi * Rename WeightCounter to WeightMeter Ctr+Shift+H should do the trick. Signed-off-by: Oliver Tale-Yazdi * Test on_initialize Signed-off-by: Oliver Tale-Yazdi * Add module docs Signed-off-by: Oliver Tale-Yazdi * Remove origin from MaxMessageLen The message origin is not encoded into the heap and does therefore not influence the max message length anymore. Signed-off-by: Oliver Tale-Yazdi * Add BoundedVec::as_slice Signed-off-by: Oliver Tale-Yazdi * Test Page::{from_message, try_append_message} Signed-off-by: Oliver Tale-Yazdi * Fixup docs Signed-off-by: Oliver Tale-Yazdi * Docs * Do nothing in sweep_queue if the queue does not exist ... otherwise it inserts default values into the storage. Signed-off-by: Oliver Tale-Yazdi * Test ring (un)knitting Signed-off-by: Oliver Tale-Yazdi * Upgrade stress-test Change the test to not assume that all queued messages will be processed in the next block but split it over multiple. Signed-off-by: Oliver Tale-Yazdi * More tests... Signed-off-by: Oliver Tale-Yazdi * Beauty fixes Signed-off-by: Oliver Tale-Yazdi * clippy Signed-off-by: Oliver Tale-Yazdi * Rename BoundedVec::as_slice to as_bounded_slice Conflicts with deref().as_slice() otherwise. Signed-off-by: Oliver Tale-Yazdi * Fix imports Signed-off-by: Oliver Tale-Yazdi * Remove ReadyRing struct Was used for testing only. Instead use 'fn assert_ring' which also check the service head and backlinks. Signed-off-by: Oliver Tale-Yazdi * Beauty fixes Signed-off-by: Oliver Tale-Yazdi * Fix stale page watermark Signed-off-by: Oliver Tale-Yazdi * Cleanup Signed-off-by: Oliver Tale-Yazdi * Fix test feature and clippy Signed-off-by: Oliver Tale-Yazdi * QueueChanged handler is called correctly Signed-off-by: Oliver Tale-Yazdi * Update benches Signed-off-by: Oliver Tale-Yazdi * Abstract testing functions Signed-off-by: Oliver Tale-Yazdi * More tests Signed-off-by: Oliver Tale-Yazdi * Cleanup Signed-off-by: Oliver Tale-Yazdi * Clippy Signed-off-by: Oliver Tale-Yazdi * fmt Signed-off-by: Oliver Tale-Yazdi * Simplify tests Signed-off-by: Oliver Tale-Yazdi * Make stuff compile Signed-off-by: Oliver Tale-Yazdi * Extend overweight execution benchmark Signed-off-by: Oliver Tale-Yazdi * Remove TODOs Signed-off-by: Oliver Tale-Yazdi * Test service queue with faulty MessageProcessor Signed-off-by: Oliver Tale-Yazdi * fmt Signed-off-by: Oliver Tale-Yazdi * Update pallet ui tests to 1.65 Signed-off-by: Oliver Tale-Yazdi * More docs Signed-off-by: Oliver Tale-Yazdi * Review doc fixes Co-authored-by: Robert Klotzner Signed-off-by: Oliver Tale-Yazdi * Add weight_limit to extrinsic weight of execute_overweight * Correctly return unused weight * Return actual weight consumed in do_execute_overweight * Review fixes Signed-off-by: Oliver Tale-Yazdi * Set version 7.0.0-dev Signed-off-by: Oliver Tale-Yazdi * Make it compile Signed-off-by: Oliver Tale-Yazdi * Switch message_size to u64 Signed-off-by: Oliver Tale-Yazdi * Switch message_count to u64 Signed-off-by: Oliver Tale-Yazdi * Fix benchmarks Signed-off-by: Oliver Tale-Yazdi * Make CI green Signed-off-by: Oliver Tale-Yazdi * Docs * Update tests Signed-off-by: Oliver Tale-Yazdi * ".git/.scripts/bench-bot.sh" pallet dev pallet_message_queue * Dont mention README.md in the Cargo.toml Signed-off-by: Oliver Tale-Yazdi * Remove reference to readme Signed-off-by: Oliver Tale-Yazdi Co-authored-by: Oliver Tale-Yazdi Co-authored-by: parity-processbot <> Co-authored-by: Robert Klotzner Co-authored-by: Keith Yeung * zombienet timings adjusted (#12890) * zombinet tests: add some timeout to allow net spin-up Sometimes tests are failing at first try, as the pods were not up yet. Adding timeout should allow the network to spin up properly. * initial timeout increased to 30s * Move import queue out of `sc-network` (#12764) * Move import queue out of `sc-network` Add supplementary asynchronous API for the import queue which means it can be run as an independent task and communicated with through the `ImportQueueService`. This commit removes removes block and justification imports from `sc-network` and provides `ChainSync` with a handle to import queue so it can import blocks and justifications. Polling of the import queue is moved complete out of `sc-network` and `sc_consensus::Link` is implemented for `ChainSyncInterfaceHandled` so the import queue can still influence the syncing process. * Fix tests * Apply review comments * Apply suggestions from code review Co-authored-by: Bastian Köcher * Update client/network/sync/src/lib.rs Co-authored-by: Bastian Köcher Co-authored-by: Bastian Köcher * Trace response payload in default `jsonrpsee` middleware (#12886) * Trace result in default `jsonrpsee` middleware * `rpc_metrics::extra` Co-authored-by: Bastian Köcher Co-authored-by: Bastian Köcher * Ensure that we inform all tasks to stop before starting the 60 seconds shutdown (#12897) * Ensure that we inform all tasks to stop before starting the 60 seconds shutdown The change of waiting in maximum 60 seconds for the node to shutdown actually introduced a bug. We were actually waiting always 60 seconds as we didn't informed our tasks to shutdown. The solution to this problem is to drop the task manager as this will then inform all tasks to end. It also adds tests to ensure that the behaviors work as expected. (This should already have been done in the first pr! :() * ".git/.scripts/fmt.sh" 1 Co-authored-by: command-bot <> * Safe desired targets call (#12826) * checked call for desired targets * fix compile * fmt * fix tests * cleaner with and_then * Fix typo (#12900) * ValidateUnsigned: Improve docs. (#12870) * ValidateUnsigned: Improve docs. * Review comments * rpc server with HTTP/WS on the same socket (#12663) * jsonrpsee v0.16 add backwards compatibility run old http server on http only * cargo fmt * update jsonrpsee 0.16.1 * less verbose cors log * fix nit in log: WS -> HTTP * revert needless changes in Cargo.lock * remove unused features in tower * fix nits; add client-core feature * jsonrpsee v0.16.2 * `pallet-message-queue`: Fix license (#12895) * Fix license Signed-off-by: Oliver Tale-Yazdi * Add mock doc Signed-off-by: Oliver Tale-Yazdi Signed-off-by: Oliver Tale-Yazdi * Use explicit call indices (#12891) * frame-system: explicit call index Signed-off-by: Oliver Tale-Yazdi * Use explicit call indices Signed-off-by: Oliver Tale-Yazdi * pallet-template: explicit call index Signed-off-by: Oliver Tale-Yazdi * DNM: Temporarily require call_index Signed-off-by: Oliver Tale-Yazdi * Revert "DNM: Temporarily require call_index" This reverts commit c4934e312e12af72ca05a8029d7da753a9c99346. Signed-off-by: Oliver Tale-Yazdi * Pin canonincalized block (#12902) * Remove implicit approval chilling upon slash. (#12420) * don't read slashing spans when taking election snapshot * update cargo.toml * bring back remote test * fix merge stuff * fix npos-voters function sig * remove as much redundant diff as you can * Update frame/staking/src/pallet/mod.rs Co-authored-by: Andronik * fix * Update frame/staking/src/pallet/impls.rs * update lock * fix all tests * review comments * fmt * fix offence bench * clippy * ".git/.scripts/bench-bot.sh" pallet dev pallet_staking Co-authored-by: Andronik Co-authored-by: Ankan Co-authored-by: command-bot <> * bounties calls docs fix (#12909) Co-authored-by: parity-processbot <> * pallet-contracts migration pre-upgrade fix for v8 (#12905) * Only run pre-v8 migration check for versions older than 8 * Logix fix * use custom environment for publishing crates (#12912) * [contracts] Add debug buffer limit + enforcement (#12845) * Add debug buffer limit + enforcement Add debug buffer limit + enforcement * use BoundedVec for the debug buffer * revert schedule (debug buf len limit not needed anymore) * return DispatchError * addressed review comments * frame/remote-externalities: Fix clippy Signed-off-by: Alexandru Vasile * frame/rpc: Add previous export Signed-off-by: Alexandru Vasile * Fixup some wrong dependencies (#12899) * Fixup some wrong dependencies Dev dependencies should not appear in the feature list. If features are required, they should be directly enabled for the `dev-dependency`. * More fixups * Fix fix * Remove deprecated feature * Make all work properly and nice!! * FMT * Fix formatting * add numerator and denominator to Rational128 Debug impl and increase precision of float representation (#12914) * Fix state-db pinning (#12927) * Pin all canonicalized blocks * Added a test * Docs * [ci] add job switcher (#12922) Signed-off-by: Alexandru Vasile Signed-off-by: Oliver Tale-Yazdi Co-authored-by: Keith Yeung Co-authored-by: Vlad Co-authored-by: Bastian Köcher Co-authored-by: Anthony Alaribe Co-authored-by: Gavin Wood Co-authored-by: Oliver Tale-Yazdi Co-authored-by: Robert Klotzner Co-authored-by: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> Co-authored-by: Aaro Altonen <48052676+altonen@users.noreply.github.com> Co-authored-by: tgmichel Co-authored-by: Ankan <10196091+Ank4n@users.noreply.github.com> Co-authored-by: Luke Schoen Co-authored-by: Niklas Adolfsson Co-authored-by: Arkadiy Paronyan Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> Co-authored-by: Andronik Co-authored-by: Ankan Co-authored-by: Muharem Ismailov Co-authored-by: Dino Pačandi <3002868+Dinonard@users.noreply.github.com> Co-authored-by: João Paulo Silva de Souza <77391175+joao-paulo-parity@users.noreply.github.com> Co-authored-by: Sasha Gryaznov Co-authored-by: Alexander Popiak Co-authored-by: Alexander Samusev <41779041+alvicsam@users.noreply.github.com> * Revert "rpc rebase: Try-runtime Revamp and Facelift (#12921)" This reverts commit 4ce770a9cb8daf1401529bda7d974b8c703f6b3e. * Lexnv/kiz revamp try runtime stuff (#12932) * Introduce sensible weight constants (#12868) * Introduce sensible weight constants * cargo fmt * Remove unused import * Add missing import * ".git/.scripts/bench-bot.sh" pallet dev pallet_lottery Co-authored-by: command-bot <> * Checkout to the branch HEAD explicitly in `build-linux-substrate` (#12876) * cli: Improve pruning documentation (#12819) * cli: Improve pruning documentation Signed-off-by: Alexandru Vasile * cli: Keep `finalized` notation and remove `canonical` one * cli: Fix cargo doc * cli: `PruningModeClap` IR enum Signed-off-by: Alexandru Vasile * cli: Convert PruningModeClap into pruning modes Signed-off-by: Alexandru Vasile * cli: Use `PruningModeClap` Signed-off-by: Alexandru Vasile * cli: Rename to `DatabasePruningMode` Signed-off-by: Alexandru Vasile * cli: Implement `FromStr` instead of `clap::ValueEnum` Signed-off-by: Alexandru Vasile * Update client/cli/src/params/pruning_params.rs Co-authored-by: Bastian Köcher * Fix clippy Signed-off-by: Alexandru Vasile * cli: Add option documentation back Signed-off-by: Alexandru Vasile * Apply suggestions from code review Signed-off-by: Alexandru Vasile Co-authored-by: Bastian Köcher * Revert "Move LockableCurrency trait to fungibles::Lockable and deprecate LockableCurrency (#12798)" (#12882) This reverts commit ea3ca3f757ff9d9559665719a77da81f4cf0f0ce. * Don't indefinitely block on shutting down Tokio (#12885) * Don't indefinitely on shutting down Tokio Now we wait in maximum 60 seconds before we shutdown the node. Tasks are may be leaked and leading to some data corruption. * Drink less :thinking_face: * General Message Queue Pallet (#12485) * The message queue * Make fully generic * Refactor * Docs * Refactor * Use iter not slice * Per-origin queues * Multi-queue processing * Introduce MaxReady * Remove MaxReady in favour of ready ring * Cleanups * ReadyRing and tests * Stale page reaping * from_components -> from_parts Signed-off-by: Oliver Tale-Yazdi * Move WeightCounter to sp_weights Signed-off-by: Oliver Tale-Yazdi * Add MockedWeightInfo Signed-off-by: Oliver Tale-Yazdi * Deploy to kitchensink Signed-off-by: Oliver Tale-Yazdi * Use WeightCounter Signed-off-by: Oliver Tale-Yazdi * Small fixes and logging Signed-off-by: Oliver Tale-Yazdi * Add service_page Signed-off-by: Oliver Tale-Yazdi * Typo Signed-off-by: Oliver Tale-Yazdi * Move service_page below service_queue Signed-off-by: Oliver Tale-Yazdi * Add service_message Signed-off-by: Oliver Tale-Yazdi * Use correct weight function Signed-off-by: Oliver Tale-Yazdi * Overweight execution * Refactor * Missing file * Fix WeightCounter usage in scheduler Signed-off-by: Oliver Tale-Yazdi * Fix peek_index Take into account that decoding from a mutable slice modifies it. Signed-off-by: Oliver Tale-Yazdi * Add tests and bench service_page_item Signed-off-by: Oliver Tale-Yazdi * Add debug_info Signed-off-by: Oliver Tale-Yazdi * Add no-progress check to service_queues Signed-off-by: Oliver Tale-Yazdi * Add more benches Signed-off-by: Oliver Tale-Yazdi * Bound from_message and try_append_message Signed-off-by: Oliver Tale-Yazdi * Add PageReaped event Signed-off-by: Oliver Tale-Yazdi * Rename BookStateOf and BookStateFor Signed-off-by: Oliver Tale-Yazdi * Update tests and remove logging Signed-off-by: Oliver Tale-Yazdi * Remove redundant per-message origins; add footprint() and sweep_queue() * Move testing stuff to mock.rs Signed-off-by: Oliver Tale-Yazdi * Add integration test Signed-off-by: Oliver Tale-Yazdi * Fix no-progress check Signed-off-by: Oliver Tale-Yazdi * Fix debug_info Signed-off-by: Oliver Tale-Yazdi * Fixup merge and tests Signed-off-by: Oliver Tale-Yazdi * Fix footprint tracking * Introduce * Formatting * OverweightEnqueued event, auto-servicing config item * Update tests and benchmarks Signed-off-by: Oliver Tale-Yazdi * Clippy Signed-off-by: Oliver Tale-Yazdi * Add tests Signed-off-by: Oliver Tale-Yazdi * Provide change handler * Add missing BookStateFor::insert and call QueueChangeHandler Signed-off-by: Oliver Tale-Yazdi * Docs Signed-off-by: Oliver Tale-Yazdi * Update benchmarks and weights Signed-off-by: Oliver Tale-Yazdi * More tests... Signed-off-by: Oliver Tale-Yazdi * Use weight metering functions Signed-off-by: Oliver Tale-Yazdi * weightInfo::process_message_payload is gone Signed-off-by: Oliver Tale-Yazdi * Add defensive_saturating_accrue Signed-off-by: Oliver Tale-Yazdi * Rename WeightCounter to WeightMeter Ctr+Shift+H should do the trick. Signed-off-by: Oliver Tale-Yazdi * Test on_initialize Signed-off-by: Oliver Tale-Yazdi * Add module docs Signed-off-by: Oliver Tale-Yazdi * Remove origin from MaxMessageLen The message origin is not encoded into the heap and does therefore not influence the max message length anymore. Signed-off-by: Oliver Tale-Yazdi * Add BoundedVec::as_slice Signed-off-by: Oliver Tale-Yazdi * Test Page::{from_message, try_append_message} Signed-off-by: Oliver Tale-Yazdi * Fixup docs Signed-off-by: Oliver Tale-Yazdi * Docs * Do nothing in sweep_queue if the queue does not exist ... otherwise it inserts default values into the storage. Signed-off-by: Oliver Tale-Yazdi * Test ring (un)knitting Signed-off-by: Oliver Tale-Yazdi * Upgrade stress-test Change the test to not assume that all queued messages will be processed in the next block but split it over multiple. Signed-off-by: Oliver Tale-Yazdi * More tests... Signed-off-by: Oliver Tale-Yazdi * Beauty fixes Signed-off-by: Oliver Tale-Yazdi * clippy Signed-off-by: Oliver Tale-Yazdi * Rename BoundedVec::as_slice to as_bounded_slice Conflicts with deref().as_slice() otherwise. Signed-off-by: Oliver Tale-Yazdi * Fix imports Signed-off-by: Oliver Tale-Yazdi * Remove ReadyRing struct Was used for testing only. Instead use 'fn assert_ring' which also check the service head and backlinks. Signed-off-by: Oliver Tale-Yazdi * Beauty fixes Signed-off-by: Oliver Tale-Yazdi * Fix stale page watermark Signed-off-by: Oliver Tale-Yazdi * Cleanup Signed-off-by: Oliver Tale-Yazdi * Fix test feature and clippy Signed-off-by: Oliver Tale-Yazdi * QueueChanged handler is called correctly Signed-off-by: Oliver Tale-Yazdi * Update benches Signed-off-by: Oliver Tale-Yazdi * Abstract testing functions Signed-off-by: Oliver Tale-Yazdi * More tests Signed-off-by: Oliver Tale-Yazdi * Cleanup Signed-off-by: Oliver Tale-Yazdi * Clippy Signed-off-by: Oliver Tale-Yazdi * fmt Signed-off-by: Oliver Tale-Yazdi * Simplify tests Signed-off-by: Oliver Tale-Yazdi * Make stuff compile Signed-off-by: Oliver Tale-Yazdi * Extend overweight execution benchmark Signed-off-by: Oliver Tale-Yazdi * Remove TODOs Signed-off-by: Oliver Tale-Yazdi * Test service queue with faulty MessageProcessor Signed-off-by: Oliver Tale-Yazdi * fmt Signed-off-by: Oliver Tale-Yazdi * Update pallet ui tests to 1.65 Signed-off-by: Oliver Tale-Yazdi * More docs Signed-off-by: Oliver Tale-Yazdi * Review doc fixes Co-authored-by: Robert Klotzner Signed-off-by: Oliver Tale-Yazdi * Add weight_limit to extrinsic weight of execute_overweight * Correctly return unused weight * Return actual weight consumed in do_execute_overweight * Review fixes Signed-off-by: Oliver Tale-Yazdi * Set version 7.0.0-dev Signed-off-by: Oliver Tale-Yazdi * Make it compile Signed-off-by: Oliver Tale-Yazdi * Switch message_size to u64 Signed-off-by: Oliver Tale-Yazdi * Switch message_count to u64 Signed-off-by: Oliver Tale-Yazdi * Fix benchmarks Signed-off-by: Oliver Tale-Yazdi * Make CI green Signed-off-by: Oliver Tale-Yazdi * Docs * Update tests Signed-off-by: Oliver Tale-Yazdi * ".git/.scripts/bench-bot.sh" pallet dev pallet_message_queue * Dont mention README.md in the Cargo.toml Signed-off-by: Oliver Tale-Yazdi * Remove reference to readme Signed-off-by: Oliver Tale-Yazdi Co-authored-by: Oliver Tale-Yazdi Co-authored-by: parity-processbot <> Co-authored-by: Robert Klotzner Co-authored-by: Keith Yeung * zombienet timings adjusted (#12890) * zombinet tests: add some timeout to allow net spin-up Sometimes tests are failing at first try, as the pods were not up yet. Adding timeout should allow the network to spin up properly. * initial timeout increased to 30s * Move import queue out of `sc-network` (#12764) * Move import queue out of `sc-network` Add supplementary asynchronous API for the import queue which means it can be run as an independent task and communicated with through the `ImportQueueService`. This commit removes removes block and justification imports from `sc-network` and provides `ChainSync` with a handle to import queue so it can import blocks and justifications. Polling of the import queue is moved complete out of `sc-network` and `sc_consensus::Link` is implemented for `ChainSyncInterfaceHandled` so the import queue can still influence the syncing process. * Fix tests * Apply review comments * Apply suggestions from code review Co-authored-by: Bastian Köcher * Update client/network/sync/src/lib.rs Co-authored-by: Bastian Köcher Co-authored-by: Bastian Köcher * Trace response payload in default `jsonrpsee` middleware (#12886) * Trace result in default `jsonrpsee` middleware * `rpc_metrics::extra` Co-authored-by: Bastian Köcher Co-authored-by: Bastian Köcher * Ensure that we inform all tasks to stop before starting the 60 seconds shutdown (#12897) * Ensure that we inform all tasks to stop before starting the 60 seconds shutdown The change of waiting in maximum 60 seconds for the node to shutdown actually introduced a bug. We were actually waiting always 60 seconds as we didn't informed our tasks to shutdown. The solution to this problem is to drop the task manager as this will then inform all tasks to end. It also adds tests to ensure that the behaviors work as expected. (This should already have been done in the first pr! :() * ".git/.scripts/fmt.sh" 1 Co-authored-by: command-bot <> * Safe desired targets call (#12826) * checked call for desired targets * fix compile * fmt * fix tests * cleaner with and_then * Fix typo (#12900) * ValidateUnsigned: Improve docs. (#12870) * ValidateUnsigned: Improve docs. * Review comments * rpc server with HTTP/WS on the same socket (#12663) * jsonrpsee v0.16 add backwards compatibility run old http server on http only * cargo fmt * update jsonrpsee 0.16.1 * less verbose cors log * fix nit in log: WS -> HTTP * revert needless changes in Cargo.lock * remove unused features in tower * fix nits; add client-core feature * jsonrpsee v0.16.2 * `pallet-message-queue`: Fix license (#12895) * Fix license Signed-off-by: Oliver Tale-Yazdi * Add mock doc Signed-off-by: Oliver Tale-Yazdi Signed-off-by: Oliver Tale-Yazdi * Use explicit call indices (#12891) * frame-system: explicit call index Signed-off-by: Oliver Tale-Yazdi * Use explicit call indices Signed-off-by: Oliver Tale-Yazdi * pallet-template: explicit call index Signed-off-by: Oliver Tale-Yazdi * DNM: Temporarily require call_index Signed-off-by: Oliver Tale-Yazdi * Revert "DNM: Temporarily require call_index" This reverts commit c4934e312e12af72ca05a8029d7da753a9c99346. Signed-off-by: Oliver Tale-Yazdi * Pin canonincalized block (#12902) * Remove implicit approval chilling upon slash. (#12420) * don't read slashing spans when taking election snapshot * update cargo.toml * bring back remote test * fix merge stuff * fix npos-voters function sig * remove as much redundant diff as you can * Update frame/staking/src/pallet/mod.rs Co-authored-by: Andronik * fix * Update frame/staking/src/pallet/impls.rs * update lock * fix all tests * review comments * fmt * fix offence bench * clippy * ".git/.scripts/bench-bot.sh" pallet dev pallet_staking Co-authored-by: Andronik Co-authored-by: Ankan Co-authored-by: command-bot <> * bounties calls docs fix (#12909) Co-authored-by: parity-processbot <> * pallet-contracts migration pre-upgrade fix for v8 (#12905) * Only run pre-v8 migration check for versions older than 8 * Logix fix * use custom environment for publishing crates (#12912) * [contracts] Add debug buffer limit + enforcement (#12845) * Add debug buffer limit + enforcement Add debug buffer limit + enforcement * use BoundedVec for the debug buffer * revert schedule (debug buf len limit not needed anymore) * return DispatchError * addressed review comments * frame/remote-externalities: Fix clippy Signed-off-by: Alexandru Vasile * frame/rpc: Add previous export Signed-off-by: Alexandru Vasile * Fixup some wrong dependencies (#12899) * Fixup some wrong dependencies Dev dependencies should not appear in the feature list. If features are required, they should be directly enabled for the `dev-dependency`. * More fixups * Fix fix * Remove deprecated feature * Make all work properly and nice!! * FMT * Fix formatting * add numerator and denominator to Rational128 Debug impl and increase precision of float representation (#12914) * Fix state-db pinning (#12927) * Pin all canonicalized blocks * Added a test * Docs * [ci] add job switcher (#12922) * Use LOG_TARGET in consensus related crates (#12875) * Use shared LOG_TARGET in consensus related crates * Rename target from "afg" to "grandpa" Signed-off-by: Alexandru Vasile Signed-off-by: Oliver Tale-Yazdi Co-authored-by: Keith Yeung Co-authored-by: Vlad Co-authored-by: Alexandru Vasile <60601340+lexnv@users.noreply.github.com> Co-authored-by: Bastian Köcher Co-authored-by: Anthony Alaribe Co-authored-by: Gavin Wood Co-authored-by: Oliver Tale-Yazdi Co-authored-by: Robert Klotzner Co-authored-by: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> Co-authored-by: Aaro Altonen <48052676+altonen@users.noreply.github.com> Co-authored-by: tgmichel Co-authored-by: Ankan <10196091+Ank4n@users.noreply.github.com> Co-authored-by: Luke Schoen Co-authored-by: Niklas Adolfsson Co-authored-by: Arkadiy Paronyan Co-authored-by: Andronik Co-authored-by: Ankan Co-authored-by: Muharem Ismailov Co-authored-by: Dino Pačandi <3002868+Dinonard@users.noreply.github.com> Co-authored-by: João Paulo Silva de Souza <77391175+joao-paulo-parity@users.noreply.github.com> Co-authored-by: Sasha Gryaznov Co-authored-by: Alexandru Vasile Co-authored-by: Alexander Popiak Co-authored-by: Alexander Samusev <41779041+alvicsam@users.noreply.github.com> Co-authored-by: Davide Galassi * Revert "Lexnv/kiz revamp try runtime stuff (#12932)" This reverts commit 378cfb26d984bcde467781f07ef8ddb6998212cb. * fmt * update * fix publish Signed-off-by: Alexandru Vasile Signed-off-by: Oliver Tale-Yazdi Co-authored-by: Niklas Adolfsson Co-authored-by: Bastian Köcher Co-authored-by: Alexandru Vasile <60601340+lexnv@users.noreply.github.com> Co-authored-by: Keith Yeung Co-authored-by: Vlad Co-authored-by: Anthony Alaribe Co-authored-by: Gavin Wood Co-authored-by: Oliver Tale-Yazdi Co-authored-by: Robert Klotzner Co-authored-by: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> Co-authored-by: Aaro Altonen <48052676+altonen@users.noreply.github.com> Co-authored-by: tgmichel Co-authored-by: Ankan <10196091+Ank4n@users.noreply.github.com> Co-authored-by: Luke Schoen Co-authored-by: Arkadiy Paronyan Co-authored-by: Andronik Co-authored-by: Ankan Co-authored-by: Muharem Ismailov Co-authored-by: Dino Pačandi <3002868+Dinonard@users.noreply.github.com> Co-authored-by: João Paulo Silva de Souza <77391175+joao-paulo-parity@users.noreply.github.com> Co-authored-by: Sasha Gryaznov Co-authored-by: Alexander Popiak Co-authored-by: Alexander Samusev <41779041+alvicsam@users.noreply.github.com> Co-authored-by: Alexandru Vasile Co-authored-by: Davide Galassi --- Cargo.lock | 79 +- bin/node-template/node/Cargo.toml | 1 + bin/node-template/node/src/command.rs | 10 +- bin/node-template/runtime/src/lib.rs | 7 +- bin/node/cli/Cargo.toml | 1 + bin/node/cli/src/command.rs | 9 +- bin/node/runtime/src/lib.rs | 14 +- frame/bags-list/remote-tests/src/migration.rs | 2 +- frame/bags-list/remote-tests/src/snapshot.rs | 18 +- frame/bags-list/remote-tests/src/try_state.rs | 8 +- frame/executive/Cargo.toml | 2 +- frame/executive/src/lib.rs | 94 +- frame/staking/Cargo.toml | 1 - frame/staking/src/pallet/impls.rs | 11 +- frame/state-trie-migration/src/lib.rs | 9 +- .../procedural/src/pallet/expand/hooks.rs | 2 +- frame/support/src/dispatch.rs | 2 +- frame/support/src/traits/hooks.rs | 177 +-- frame/support/src/traits/try_runtime.rs | 2 + ...age_ensure_span_are_ok_on_wrong_gen.stderr | 4 +- ...re_span_are_ok_on_wrong_gen_unnamed.stderr | 4 +- frame/try-runtime/src/lib.rs | 19 +- primitives/runtime/Cargo.toml | 1 + .../src/generic/unchecked_extrinsic.rs | 16 + primitives/runtime/src/testing.rs | 8 + primitives/runtime/src/traits.rs | 22 + primitives/storage/src/lib.rs | 1 + utils/frame/remote-externalities/Cargo.toml | 4 +- utils/frame/remote-externalities/src/lib.rs | 1194 ++++++++++------- .../remote-externalities/test_data/proxy_test | Bin 0 -> 70206 bytes .../test_data/proxy_test.top | Bin 39 -> 0 bytes utils/frame/rpc/client/src/lib.rs | 1 + utils/frame/try-runtime/cli/Cargo.toml | 16 +- .../cli/src/commands/create_snapshot.rs | 78 ++ .../cli/src/commands/execute_block.rs | 197 +-- .../cli/src/commands/follow_chain.rs | 128 +- .../frame/try-runtime/cli/src/commands/mod.rs | 9 +- .../cli/src/commands/offchain_worker.rs | 112 +- .../cli/src/commands/on_runtime_upgrade.rs | 56 +- utils/frame/try-runtime/cli/src/lib.rs | 923 +++++++------ 40 files changed, 1779 insertions(+), 1463 deletions(-) create mode 100644 utils/frame/remote-externalities/test_data/proxy_test delete mode 100644 utils/frame/remote-externalities/test_data/proxy_test.top create mode 100644 utils/frame/try-runtime/cli/src/commands/create_snapshot.rs diff --git a/Cargo.lock b/Cargo.lock index ea34146f16f9b..b840cadce3e5c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -269,9 +269,9 @@ checksum = "e91831deabf0d6d7ec49552e489aed63b7456a7a3c46cff62adad428110b0af0" [[package]] name = "async-trait" -version = "0.1.58" +version = "0.1.59" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e805d94e6b5001b651426cf4cd446b1ab5f319d27bab5c644f61de0a804360c" +checksum = "31e6e93155431f3931513b243d371981bb2770112b370c82745a1d19d2f99364" dependencies = [ "proc-macro2", "quote", @@ -2031,6 +2031,7 @@ version = "0.10.0-dev" dependencies = [ "env_logger", "frame-support", + "futures", "log", "pallet-elections-phragmen", "parity-scale-codec", @@ -2042,6 +2043,7 @@ dependencies = [ "sp-version", "substrate-rpc-client", "tokio", + "tracing-subscriber 0.3.16", ] [[package]] @@ -2874,9 +2876,9 @@ checksum = "879d54834c8c76457ef4293a689b2a8c59b076067ad77b15efafbb05f92a592b" [[package]] name = "itertools" -version = "0.10.3" +version = "0.10.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9a9d19fa1e79b6215ff29b9d6880b706147f16e9b1dbb1e4e5947b5b02bc5e3" +checksum = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473" dependencies = [ "either", ] @@ -3788,6 +3790,15 @@ dependencies = [ "regex-automata", ] +[[package]] +name = "matchers" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8263075bb86c5a1b1427b5ae862e8889656f126e9f77c484496e8b47cf5c5558" +dependencies = [ + "regex-automata", +] + [[package]] name = "matches" version = "0.1.8" @@ -4271,6 +4282,7 @@ dependencies = [ "sp-core", "sp-finality-grandpa", "sp-inherents", + "sp-io", "sp-keyring", "sp-keystore", "sp-runtime", @@ -4427,6 +4439,7 @@ dependencies = [ "sp-core", "sp-finality-grandpa", "sp-inherents", + "sp-io", "sp-keyring", "sp-runtime", "sp-timestamp", @@ -4529,6 +4542,16 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "61807f77802ff30975e01f4f071c8ba10c022052f98b3294119f3e615d13e5be" +[[package]] +name = "nu-ansi-term" +version = "0.46.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77a8165726e8236064dbb45459242600304b42a5ea24ee2948e18e023bf7ba84" +dependencies = [ + "overload", + "winapi", +] + [[package]] name = "num-bigint" version = "0.4.3" @@ -4667,6 +4690,12 @@ dependencies = [ "winapi", ] +[[package]] +name = "overload" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39" + [[package]] name = "pallet-alliance" version = "4.0.0-dev" @@ -7585,7 +7614,7 @@ dependencies = [ "substrate-test-runtime", "tempfile", "tracing", - "tracing-subscriber", + "tracing-subscriber 0.2.25", "wasmi 0.13.0", "wat", ] @@ -8337,7 +8366,7 @@ dependencies = [ "thiserror", "tracing", "tracing-log", - "tracing-subscriber", + "tracing-subscriber 0.2.25", ] [[package]] @@ -8705,9 +8734,9 @@ dependencies = [ [[package]] name = "sharded-slab" -version = "0.1.1" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79c719719ee05df97490f80a45acfc99e5a30ce98a1e4fb67aee422745ae14e3" +checksum = "900fba806f70c630b0a382d0d825e17a0f19fcd059a2ade1ff237bcddf446b31" dependencies = [ "lazy_static", ] @@ -8763,9 +8792,9 @@ checksum = "03b634d87b960ab1a38c4fe143b508576f075e7c978bfad18217645ebfdfa2ec" [[package]] name = "smallvec" -version = "1.8.0" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2dd574626839106c320a323308629dcb1acfc96e32a8cba364ddc61ac23ee83" +checksum = "a507befe795404456341dfab10cef66ead4c041f62b8b11bbb92bffe5d0953e0" [[package]] name = "snap" @@ -9555,7 +9584,7 @@ dependencies = [ "sp-std", "tracing", "tracing-core", - "tracing-subscriber", + "tracing-subscriber 0.2.25", ] [[package]] @@ -10431,9 +10460,9 @@ dependencies = [ [[package]] name = "tracing-core" -version = "0.1.28" +version = "0.1.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b7358be39f2f274f322d2aaed611acc57f382e8eb1e5b48cb9ae30933495ce7" +checksum = "24eb03ba0eab1fd845050058ce5e616558e8f8d8fca633e6b163fe25c797213a" dependencies = [ "once_cell", "valuable", @@ -10479,7 +10508,7 @@ dependencies = [ "ansi_term", "chrono", "lazy_static", - "matchers", + "matchers 0.0.1", "parking_lot 0.11.2", "regex", "serde", @@ -10493,6 +10522,24 @@ dependencies = [ "tracing-serde", ] +[[package]] +name = "tracing-subscriber" +version = "0.3.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a6176eae26dd70d0c919749377897b54a9276bd7061339665dd68777926b5a70" +dependencies = [ + "matchers 0.1.0", + "nu-ansi-term", + "once_cell", + "regex", + "sharded-slab", + "smallvec", + "thread_local", + "tracing", + "tracing-core", + "tracing-log", +] + [[package]] name = "treeline" version = "0.1.0" @@ -10605,6 +10652,7 @@ dependencies = [ "clap 4.0.11", "frame-remote-externalities", "frame-try-runtime", + "hex", "log", "parity-scale-codec", "sc-chain-spec", @@ -10612,10 +10660,13 @@ dependencies = [ "sc-executor", "sc-service", "serde", + "sp-api", "sp-core", + "sp-debug-derive", "sp-externalities", "sp-io", "sp-keystore", + "sp-rpc", "sp-runtime", "sp-state-machine", "sp-version", diff --git a/bin/node-template/node/Cargo.toml b/bin/node-template/node/Cargo.toml index 2ea841093d0e2..364cfa25d3c6b 100644 --- a/bin/node-template/node/Cargo.toml +++ b/bin/node-template/node/Cargo.toml @@ -36,6 +36,7 @@ sc-finality-grandpa = { version = "0.10.0-dev", path = "../../../client/finality sp-finality-grandpa = { version = "4.0.0-dev", path = "../../../primitives/finality-grandpa" } sc-client-api = { version = "4.0.0-dev", path = "../../../client/api" } sp-runtime = { version = "7.0.0", path = "../../../primitives/runtime" } +sp-io = { version = "7.0.0", path = "../../../primitives/io" } sp-timestamp = { version = "4.0.0-dev", path = "../../../primitives/timestamp" } sp-inherents = { version = "4.0.0-dev", path = "../../../primitives/inherents" } sp-keyring = { version = "7.0.0", path = "../../../primitives/keyring" } diff --git a/bin/node-template/node/src/command.rs b/bin/node-template/node/src/command.rs index 6d293b7b85fcc..15cd69b34b5b2 100644 --- a/bin/node-template/node/src/command.rs +++ b/bin/node-template/node/src/command.rs @@ -174,6 +174,8 @@ pub fn run() -> sc_cli::Result<()> { }, #[cfg(feature = "try-runtime")] Some(Subcommand::TryRuntime(cmd)) => { + use crate::service::ExecutorDispatch; + use sc_executor::{sp_wasm_interface::ExtendedHostFunctions, NativeExecutionDispatch}; let runner = cli.create_runner(cmd)?; runner.async_run(|config| { // we don't need any of the components of new_partial, just a runtime, or a task @@ -182,7 +184,13 @@ pub fn run() -> sc_cli::Result<()> { let task_manager = sc_service::TaskManager::new(config.tokio_handle.clone(), registry) .map_err(|e| sc_cli::Error::Service(sc_service::Error::Prometheus(e)))?; - Ok((cmd.run::(config), task_manager)) + Ok(( + cmd.run::::ExtendHostFunctions, + >>(), + task_manager, + )) }) }, #[cfg(not(feature = "try-runtime"))] diff --git a/bin/node-template/runtime/src/lib.rs b/bin/node-template/runtime/src/lib.rs index 938282c662b5c..f76b2c449ee4a 100644 --- a/bin/node-template/runtime/src/lib.rs +++ b/bin/node-template/runtime/src/lib.rs @@ -539,22 +539,23 @@ impl_runtime_apis! { #[cfg(feature = "try-runtime")] impl frame_try_runtime::TryRuntime for Runtime { - fn on_runtime_upgrade() -> (Weight, Weight) { + fn on_runtime_upgrade(checks: bool) -> (Weight, Weight) { // NOTE: intentional unwrap: we don't want to propagate the error backwards, and want to // have a backtrace here. If any of the pre/post migration checks fail, we shall stop // right here and right now. - let weight = Executive::try_runtime_upgrade().unwrap(); + let weight = Executive::try_runtime_upgrade(checks).unwrap(); (weight, BlockWeights::get().max_block) } fn execute_block( block: Block, state_root_check: bool, + signature_check: bool, select: frame_try_runtime::TryStateSelect ) -> Weight { // NOTE: intentional unwrap: we don't want to propagate the error backwards, and want to // have a backtrace here. - Executive::try_execute_block(block, state_root_check, select).expect("execute-block failed") + Executive::try_execute_block(block, state_root_check, signature_check, select).expect("execute-block failed") } } } diff --git a/bin/node/cli/Cargo.toml b/bin/node/cli/Cargo.toml index 4ee4bcd033921..6b50115fd9a00 100644 --- a/bin/node/cli/Cargo.toml +++ b/bin/node/cli/Cargo.toml @@ -59,6 +59,7 @@ sp-keystore = { version = "0.13.0", path = "../../../primitives/keystore" } sp-consensus = { version = "0.10.0-dev", path = "../../../primitives/consensus/common" } sp-transaction-pool = { version = "4.0.0-dev", path = "../../../primitives/transaction-pool" } sp-transaction-storage-proof = { version = "4.0.0-dev", path = "../../../primitives/transaction-storage-proof" } +sp-io = { path = "../../../primitives/io" } # client dependencies sc-client-api = { version = "4.0.0-dev", path = "../../../client/api" } diff --git a/bin/node/cli/src/command.rs b/bin/node/cli/src/command.rs index 108d7743843b6..fd464bbc914a5 100644 --- a/bin/node/cli/src/command.rs +++ b/bin/node/cli/src/command.rs @@ -227,6 +227,7 @@ pub fn run() -> Result<()> { }, #[cfg(feature = "try-runtime")] Some(Subcommand::TryRuntime(cmd)) => { + use sc_executor::{sp_wasm_interface::ExtendedHostFunctions, NativeExecutionDispatch}; let runner = cli.create_runner(cmd)?; runner.async_run(|config| { // we don't need any of the components of new_partial, just a runtime, or a task @@ -236,7 +237,13 @@ pub fn run() -> Result<()> { sc_service::TaskManager::new(config.tokio_handle.clone(), registry) .map_err(|e| sc_cli::Error::Service(sc_service::Error::Prometheus(e)))?; - Ok((cmd.run::(config), task_manager)) + Ok(( + cmd.run::::ExtendHostFunctions, + >>(), + task_manager, + )) }) }, #[cfg(not(feature = "try-runtime"))] diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index 00d2a54d1e774..0e3bee8821fc2 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -2158,29 +2158,23 @@ impl_runtime_apis! { #[cfg(feature = "try-runtime")] impl frame_try_runtime::TryRuntime for Runtime { - fn on_runtime_upgrade() -> (Weight, Weight) { + fn on_runtime_upgrade(checks: bool) -> (Weight, Weight) { // NOTE: intentional unwrap: we don't want to propagate the error backwards, and want to // have a backtrace here. If any of the pre/post migration checks fail, we shall stop // right here and right now. - let weight = Executive::try_runtime_upgrade().unwrap(); + let weight = Executive::try_runtime_upgrade(checks).unwrap(); (weight, RuntimeBlockWeights::get().max_block) } fn execute_block( block: Block, state_root_check: bool, + signature_check: bool, select: frame_try_runtime::TryStateSelect ) -> Weight { - log::info!( - target: "node-runtime", - "try-runtime: executing block {:?} / root checks: {:?} / try-state-select: {:?}", - block.header.hash(), - state_root_check, - select, - ); // NOTE: intentional unwrap: we don't want to propagate the error backwards, and want to // have a backtrace here. - Executive::try_execute_block(block, state_root_check, select).unwrap() + Executive::try_execute_block(block, state_root_check, signature_check, select).unwrap() } } diff --git a/frame/bags-list/remote-tests/src/migration.rs b/frame/bags-list/remote-tests/src/migration.rs index b013472b4c90e..759906a4ef479 100644 --- a/frame/bags-list/remote-tests/src/migration.rs +++ b/frame/bags-list/remote-tests/src/migration.rs @@ -30,7 +30,7 @@ pub async fn execute( ws_url: String, ) where Runtime: RuntimeT, - Block: BlockT, + Block: BlockT + DeserializeOwned, Block::Header: DeserializeOwned, { let mut ext = Builder::::new() diff --git a/frame/bags-list/remote-tests/src/snapshot.rs b/frame/bags-list/remote-tests/src/snapshot.rs index cfe065924bd92..0163ca200a15d 100644 --- a/frame/bags-list/remote-tests/src/snapshot.rs +++ b/frame/bags-list/remote-tests/src/snapshot.rs @@ -25,7 +25,7 @@ use sp_runtime::{traits::Block as BlockT, DeserializeOwned}; pub async fn execute(voter_limit: Option, currency_unit: u64, ws_url: String) where Runtime: crate::RuntimeT, - Block: BlockT, + Block: BlockT + DeserializeOwned, Block::Header: DeserializeOwned, { use frame_support::storage::generator::StorageMap; @@ -38,14 +38,18 @@ where pallets: vec![pallet_bags_list::Pallet::::name() .to_string()], at: None, + hashed_prefixes: vec![ + >::prefix_hash(), + >::prefix_hash(), + >::map_storage_final_prefix(), + >::map_storage_final_prefix(), + ], + hashed_keys: vec![ + >::counter_storage_final_key().to_vec(), + >::counter_storage_final_key().to_vec(), + ], ..Default::default() })) - .inject_hashed_prefix(&>::prefix_hash()) - .inject_hashed_prefix(&>::prefix_hash()) - .inject_hashed_prefix(&>::map_storage_final_prefix()) - .inject_hashed_prefix(&>::map_storage_final_prefix()) - .inject_hashed_key(&>::counter_storage_final_key()) - .inject_hashed_key(&>::counter_storage_final_key()) .build() .await .unwrap(); diff --git a/frame/bags-list/remote-tests/src/try_state.rs b/frame/bags-list/remote-tests/src/try_state.rs index d3fb63f045a64..514c80d72ab67 100644 --- a/frame/bags-list/remote-tests/src/try_state.rs +++ b/frame/bags-list/remote-tests/src/try_state.rs @@ -31,7 +31,7 @@ pub async fn execute( ws_url: String, ) where Runtime: crate::RuntimeT, - Block: BlockT, + Block: BlockT + DeserializeOwned, Block::Header: DeserializeOwned, { let mut ext = Builder::::new() @@ -39,10 +39,12 @@ pub async fn execute( transport: ws_url.to_string().into(), pallets: vec![pallet_bags_list::Pallet::::name() .to_string()], + hashed_prefixes: vec![ + >::prefix_hash(), + >::prefix_hash(), + ], ..Default::default() })) - .inject_hashed_prefix(&>::prefix_hash()) - .inject_hashed_prefix(&>::prefix_hash()) .build() .await .unwrap(); diff --git a/frame/executive/Cargo.toml b/frame/executive/Cargo.toml index b3e4247445710..a6d16e9b0793f 100644 --- a/frame/executive/Cargo.toml +++ b/frame/executive/Cargo.toml @@ -49,4 +49,4 @@ std = [ "sp-std/std", "sp-tracing/std", ] -try-runtime = ["frame-support/try-runtime", "frame-try-runtime/try-runtime" ] +try-runtime = ["frame-support/try-runtime", "frame-try-runtime/try-runtime", "sp-runtime/try-runtime"] diff --git a/frame/executive/src/lib.rs b/frame/executive/src/lib.rs index 6f59ac72eb2fd..1f20e93f43c30 100644 --- a/frame/executive/src/lib.rs +++ b/frame/executive/src/lib.rs @@ -227,27 +227,71 @@ where { /// Execute given block, but don't as strict is the normal block execution. /// - /// Some consensus related checks such as the state root check can be switched off via - /// `try_state_root`. Some additional non-consensus checks can be additionally enabled via - /// `try_state`. + /// Some checks can be disabled via: + /// + /// - `state_root_check` + /// - `signature_check` /// /// Should only be used for testing ONLY. pub fn try_execute_block( block: Block, - try_state_root: bool, + state_root_check: bool, + signature_check: bool, select: frame_try_runtime::TryStateSelect, - ) -> Result { - use frame_support::traits::TryState; + ) -> Result { + frame_support::log::info!( + target: "frame::executive", + "try-runtime: executing block #{:?} / state root check: {:?} / signature check: {:?} / try-state-select: {:?}", + block.header().number(), + state_root_check, + signature_check, + select, + ); Self::initialize_block(block.header()); Self::initial_checks(&block); let (header, extrinsics) = block.deconstruct(); - Self::execute_extrinsics_with_book_keeping(extrinsics, *header.number()); + let try_apply_extrinsic = |uxt: Block::Extrinsic| -> ApplyExtrinsicResult { + sp_io::init_tracing(); + let encoded = uxt.encode(); + let encoded_len = encoded.len(); + + // skip signature verification. + let xt = if signature_check { + uxt.check(&Default::default()) + } else { + uxt.unchecked_into_checked_i_know_what_i_am_doing(&Default::default()) + }?; + >::note_extrinsic(encoded); + + let dispatch_info = xt.get_dispatch_info(); + let r = Applyable::apply::(xt, &dispatch_info, encoded_len)?; - // run the try-state checks of all pallets. - >::try_state( + >::note_applied_extrinsic(&r, dispatch_info); + + Ok(r.map(|_| ()).map_err(|e| e.error)) + }; + + for e in extrinsics { + if let Err(err) = try_apply_extrinsic(e.clone()) { + frame_support::log::error!( + target: "runtime::executive", "executing transaction {:?} failed due to {:?}. Aborting the rest of the block execution.", + e, + err, + ); + break + } + } + + // post-extrinsics book-keeping + >::note_finished_extrinsics(); + Self::idle_and_finalize_hook(*header.number()); + + // run the try-state checks of all pallets, ensuring they don't alter any state. + let _guard = frame_support::StorageNoopGuard::default(); + >::try_state( *header.number(), select, ) @@ -255,6 +299,7 @@ where frame_support::log::error!(target: "runtime::executive", "failure: {:?}", e); e })?; + drop(_guard); // do some of the checks that would normally happen in `final_checks`, but perhaps skip // the state root check. @@ -266,7 +311,7 @@ where assert!(header_item == computed_item, "Digest item must match that calculated."); } - if try_state_root { + if state_root_check { let storage_root = new_header.state_root(); header.state_root().check_equal(storage_root); assert!( @@ -286,9 +331,30 @@ where /// Execute all `OnRuntimeUpgrade` of this runtime, including the pre and post migration checks. /// - /// This should only be used for testing. - pub fn try_runtime_upgrade() -> Result { - let weight = Self::execute_on_runtime_upgrade(); + /// Runs the try-state code both before and after the migration function if `checks` is set to + /// `true`. Also, if set to `true`, it runs the `pre_upgrade` and `post_upgrade` hooks. + pub fn try_runtime_upgrade(checks: bool) -> Result { + if checks { + let _guard = frame_support::StorageNoopGuard::default(); + >::try_state( + frame_system::Pallet::::block_number(), + frame_try_runtime::TryStateSelect::All, + )?; + } + + let weight = + <(COnRuntimeUpgrade, AllPalletsWithSystem) as OnRuntimeUpgrade>::try_on_runtime_upgrade( + checks, + )?; + + if checks { + let _guard = frame_support::StorageNoopGuard::default(); + >::try_state( + frame_system::Pallet::::block_number(), + frame_try_runtime::TryStateSelect::All, + )?; + } + Ok(weight) } } @@ -314,7 +380,7 @@ where UnsignedValidator: ValidateUnsigned>, { /// Execute all `OnRuntimeUpgrade` of this runtime, and return the aggregate weight. - pub fn execute_on_runtime_upgrade() -> frame_support::weights::Weight { + pub fn execute_on_runtime_upgrade() -> Weight { <(COnRuntimeUpgrade, AllPalletsWithSystem) as OnRuntimeUpgrade>::on_runtime_upgrade() } diff --git a/frame/staking/Cargo.toml b/frame/staking/Cargo.toml index a7fca045cc4ba..f6b3b95d0beb9 100644 --- a/frame/staking/Cargo.toml +++ b/frame/staking/Cargo.toml @@ -62,7 +62,6 @@ std = [ "sp-runtime/std", "sp-staking/std", "pallet-session/std", - "pallet-bags-list/std", "frame-system/std", "pallet-authorship/std", "sp-application-crypto/std", diff --git a/frame/staking/src/pallet/impls.rs b/frame/staking/src/pallet/impls.rs index 830b33ceb69a2..d3b9b6a2b1e83 100644 --- a/frame/staking/src/pallet/impls.rs +++ b/frame/staking/src/pallet/impls.rs @@ -1636,9 +1636,9 @@ impl Pallet { ensure!( T::VoterList::iter() .all(|x| >::contains_key(&x) || >::contains_key(&x)), - "VoterList contains non-nominators" + "VoterList contains non-staker" ); - T::VoterList::try_state()?; + Self::check_nominators()?; Self::check_exposures()?; Self::check_ledgers()?; @@ -1651,7 +1651,10 @@ impl Pallet { Nominators::::count() + Validators::::count(), "wrong external count" ); - + ensure!( + ::TargetList::count() == Validators::::count(), + "wrong external count" + ); ensure!( ValidatorCount::::get() <= ::MaxWinners::get(), @@ -1692,7 +1695,7 @@ impl Pallet { >::iter() .filter_map( |(nominator, nomination)| { - if nomination.submitted_in > era { + if nomination.submitted_in < era { Some(nominator) } else { None diff --git a/frame/state-trie-migration/src/lib.rs b/frame/state-trie-migration/src/lib.rs index 823ea08a0b573..23f73bb56b173 100644 --- a/frame/state-trie-migration/src/lib.rs +++ b/frame/state-trie-migration/src/lib.rs @@ -1619,7 +1619,10 @@ pub(crate) mod remote_tests { use frame_system::Pallet as System; use remote_externalities::Mode; use sp_core::H256; - use sp_runtime::traits::{Block as BlockT, HashFor, Header as _, One, Zero}; + use sp_runtime::{ + traits::{Block as BlockT, HashFor, Header as _, One, Zero}, + DeserializeOwned, + }; use thousands::Separable; #[allow(dead_code)] @@ -1648,12 +1651,12 @@ pub(crate) mod remote_tests { pub(crate) async fn run_with_limits(limits: MigrationLimits, mode: Mode) where Runtime: crate::Config, - Block: BlockT, + Block: BlockT + DeserializeOwned, Block::Header: serde::de::DeserializeOwned, { let mut ext = remote_externalities::Builder::::new() .mode(mode) - .state_version(sp_core::storage::StateVersion::V0) + .overwrite_state_version(sp_core::storage::StateVersion::V0) .build() .await .unwrap(); diff --git a/frame/support/procedural/src/pallet/expand/hooks.rs b/frame/support/procedural/src/pallet/expand/hooks.rs index d8d009cf3c940..0aa7c1e7aaf06 100644 --- a/frame/support/procedural/src/pallet/expand/hooks.rs +++ b/frame/support/procedural/src/pallet/expand/hooks.rs @@ -50,7 +50,7 @@ pub fn expand_hooks(def: &mut Def) -> proc_macro2::TokenStream { } else { // default. quote::quote! { - #frame_support::log::info!( + #frame_support::log::debug!( target: #frame_support::LOG_TARGET, "✅ no migration for {}", pallet_name, diff --git a/frame/support/src/dispatch.rs b/frame/support/src/dispatch.rs index 1696e9a63915e..93cf08c131641 100644 --- a/frame/support/src/dispatch.rs +++ b/frame/support/src/dispatch.rs @@ -2188,7 +2188,7 @@ macro_rules! decl_module { $system::Config >::PalletInfo as $crate::traits::PalletInfo>::name::().unwrap_or(""); - $crate::log::info!( + $crate::log::debug!( target: $crate::LOG_TARGET, "✅ no migration for {}", pallet_name, diff --git a/frame/support/src/traits/hooks.rs b/frame/support/src/traits/hooks.rs index 3415682c0b382..3f7db1fa046bd 100644 --- a/frame/support/src/traits/hooks.rs +++ b/frame/support/src/traits/hooks.rs @@ -22,9 +22,6 @@ use impl_trait_for_tuples::impl_for_tuples; use sp_runtime::traits::AtLeast32BitUnsigned; use sp_std::prelude::*; -#[cfg(all(feature = "try-runtime", test))] -use codec::{Decode, Encode}; - /// The block initialization trait. /// /// Implementing this lets you express what should happen for your pallet when the block is @@ -136,6 +133,29 @@ pub trait OnRuntimeUpgrade { Weight::zero() } + /// Same as `on_runtime_upgrade`, but perform the optional `pre_upgrade` and `post_upgrade` as + /// well. + #[cfg(feature = "try-runtime")] + fn try_on_runtime_upgrade(checks: bool) -> Result { + let maybe_state = if checks { + let _guard = frame_support::StorageNoopGuard::default(); + let state = Self::pre_upgrade()?; + Some(state) + } else { + None + }; + + let weight = Self::on_runtime_upgrade(); + + if let Some(state) = maybe_state { + let _guard = frame_support::StorageNoopGuard::default(); + // we want to panic if any checks fail right here right now. + Self::post_upgrade(state)? + } + + Ok(weight) + } + /// Execute some pre-checks prior to a runtime upgrade. /// /// Return a `Vec` that can contain arbitrary encoded data (usually some pre-upgrade state), @@ -143,6 +163,9 @@ pub trait OnRuntimeUpgrade { /// should be returned if there is no such need. /// /// This hook is never meant to be executed on-chain but is meant to be used by testing tools. + /// + /// This hook must not write to any state, as it would make the main `on_runtime_upgrade` path + /// inaccurate. #[cfg(feature = "try-runtime")] fn pre_upgrade() -> Result, &'static str> { Ok(Vec::new()) @@ -155,6 +178,9 @@ pub trait OnRuntimeUpgrade { /// be passed in, in such case `post_upgrade` should ignore it. /// /// This hook is never meant to be executed on-chain but is meant to be used by testing tools. + /// + /// This hook must not write to any state, as it would make the main `on_runtime_upgrade` path + /// inaccurate. #[cfg(feature = "try-runtime")] fn post_upgrade(_state: Vec) -> Result<(), &'static str> { Ok(()) @@ -165,7 +191,6 @@ pub trait OnRuntimeUpgrade { #[cfg_attr(all(feature = "tuples-96", not(feature = "tuples-128")), impl_for_tuples(96))] #[cfg_attr(feature = "tuples-128", impl_for_tuples(128))] impl OnRuntimeUpgrade for Tuple { - #[cfg(not(feature = "try-runtime"))] fn on_runtime_upgrade() -> Weight { let mut weight = Weight::zero(); for_tuples!( #( weight = weight.saturating_add(Tuple::on_runtime_upgrade()); )* ); @@ -176,40 +201,10 @@ impl OnRuntimeUpgrade for Tuple { /// We are executing pre- and post-checks sequentially in order to be able to test several /// consecutive migrations for the same pallet without errors. Therefore pre and post upgrade /// hooks for tuples are a noop. - fn on_runtime_upgrade() -> Weight { - use scale_info::prelude::format; - + fn try_on_runtime_upgrade(checks: bool) -> Result { let mut weight = Weight::zero(); - // migration index in the tuple, start with 1 for better readability - let mut i = 1; - for_tuples!( #( - let _guard = frame_support::StorageNoopGuard::default(); - // we want to panic if any checks fail right here right now. - let state = Tuple::pre_upgrade().expect(&format!("PreUpgrade failed for migration #{}", i)); - drop(_guard); - - weight = weight.saturating_add(Tuple::on_runtime_upgrade()); - - let _guard = frame_support::StorageNoopGuard::default(); - // we want to panic if any checks fail right here right now. - Tuple::post_upgrade(state).expect(&format!("PostUpgrade failed for migration #{}", i)); - drop(_guard); - - i += 1; - )* ); - weight - } - - #[cfg(feature = "try-runtime")] - /// noop - fn pre_upgrade() -> Result, &'static str> { - Ok(Vec::new()) - } - - #[cfg(feature = "try-runtime")] - /// noop - fn post_upgrade(_state: Vec) -> Result<(), &'static str> { - Ok(()) + for_tuples!( #( weight = weight.saturating_add(Tuple::try_on_runtime_upgrade(checks)?); )* ); + Ok(weight) } } @@ -274,6 +269,8 @@ pub trait Hooks { /// /// It should focus on certain checks to ensure that the state is sensible. This is never /// executed in a consensus code-path, therefore it can consume as much weight as it needs. + /// + /// This hook should not alter any storage. #[cfg(feature = "try-runtime")] fn try_state(_n: BlockNumber) -> Result<(), &'static str> { Ok(()) @@ -440,110 +437,4 @@ mod tests { ON_IDLE_INVOCATION_ORDER.clear(); } } - - #[cfg(feature = "try-runtime")] - #[test] - #[allow(dead_code)] - fn on_runtime_upgrade_tuple() { - use frame_support::parameter_types; - use sp_io::TestExternalities; - - struct Test1; - struct Test2; - struct Test3; - - parameter_types! { - static Test1Assertions: u8 = 0; - static Test2Assertions: u8 = 0; - static Test3Assertions: u8 = 0; - static EnableSequentialTest: bool = false; - static SequentialAssertions: u8 = 0; - } - - impl OnRuntimeUpgrade for Test1 { - fn pre_upgrade() -> Result, &'static str> { - Ok("Test1".encode()) - } - fn post_upgrade(state: Vec) -> Result<(), &'static str> { - let s: String = Decode::decode(&mut state.as_slice()).unwrap(); - Test1Assertions::mutate(|val| *val += 1); - if EnableSequentialTest::get() { - SequentialAssertions::mutate(|val| *val += 1); - } - assert_eq!(s, "Test1"); - Ok(()) - } - } - - impl OnRuntimeUpgrade for Test2 { - fn pre_upgrade() -> Result, &'static str> { - Ok(100u32.encode()) - } - fn post_upgrade(state: Vec) -> Result<(), &'static str> { - let s: u32 = Decode::decode(&mut state.as_slice()).unwrap(); - Test2Assertions::mutate(|val| *val += 1); - if EnableSequentialTest::get() { - assert_eq!(SequentialAssertions::get(), 1); - SequentialAssertions::mutate(|val| *val += 1); - } - assert_eq!(s, 100); - Ok(()) - } - } - - impl OnRuntimeUpgrade for Test3 { - fn pre_upgrade() -> Result, &'static str> { - Ok(true.encode()) - } - fn post_upgrade(state: Vec) -> Result<(), &'static str> { - let s: bool = Decode::decode(&mut state.as_slice()).unwrap(); - Test3Assertions::mutate(|val| *val += 1); - if EnableSequentialTest::get() { - assert_eq!(SequentialAssertions::get(), 2); - SequentialAssertions::mutate(|val| *val += 1); - } - assert_eq!(s, true); - Ok(()) - } - } - - TestExternalities::default().execute_with(|| { - type TestEmpty = (); - let origin_state = ::pre_upgrade().unwrap(); - assert!(origin_state.is_empty()); - ::post_upgrade(origin_state).unwrap(); - - type Test1Tuple = (Test1,); - let origin_state = ::pre_upgrade().unwrap(); - assert!(origin_state.is_empty()); - ::post_upgrade(origin_state).unwrap(); - assert_eq!(Test1Assertions::get(), 0); - ::on_runtime_upgrade(); - assert_eq!(Test1Assertions::take(), 1); - - type Test321 = (Test3, Test2, Test1); - ::on_runtime_upgrade(); - assert_eq!(Test1Assertions::take(), 1); - assert_eq!(Test2Assertions::take(), 1); - assert_eq!(Test3Assertions::take(), 1); - - // enable sequential tests - EnableSequentialTest::mutate(|val| *val = true); - - type Test123 = (Test1, Test2, Test3); - ::on_runtime_upgrade(); - assert_eq!(Test1Assertions::take(), 1); - assert_eq!(Test2Assertions::take(), 1); - assert_eq!(Test3Assertions::take(), 1); - - // reset assertions - SequentialAssertions::take(); - - type TestNested123 = (Test1, (Test2, Test3)); - ::on_runtime_upgrade(); - assert_eq!(Test1Assertions::take(), 1); - assert_eq!(Test2Assertions::take(), 1); - assert_eq!(Test3Assertions::take(), 1); - }); - } } diff --git a/frame/support/src/traits/try_runtime.rs b/frame/support/src/traits/try_runtime.rs index 640bb566a65af..f741ca56a56fc 100644 --- a/frame/support/src/traits/try_runtime.rs +++ b/frame/support/src/traits/try_runtime.rs @@ -85,6 +85,8 @@ impl sp_std::str::FromStr for Select { /// /// Usually, these checks should check all of the invariants that are expected to be held on all of /// the storage items of your pallet. +/// +/// This hook should not alter any storage. pub trait TryState { /// Execute the state checks. fn try_state(_: BlockNumber, _: Select) -> Result<(), &'static str>; diff --git a/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.stderr b/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.stderr index 999d8585c221a..a3af9897be5c7 100644 --- a/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.stderr +++ b/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.stderr @@ -28,7 +28,7 @@ error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied <&[(T,)] as EncodeLike>> <&[(T,)] as EncodeLike>> <&[T] as EncodeLike>> - and 279 others + and 280 others = note: required for `Bar` to implement `FullEncode` = note: required for `Bar` to implement `FullCodec` = note: required for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` to implement `PartialStorageInfoTrait` @@ -103,7 +103,7 @@ error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied <&[(T,)] as EncodeLike>> <&[(T,)] as EncodeLike>> <&[T] as EncodeLike>> - and 279 others + and 280 others = note: required for `Bar` to implement `FullEncode` = note: required for `Bar` to implement `FullCodec` = note: required for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` to implement `StorageEntryMetadataBuilder` diff --git a/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.stderr b/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.stderr index e2870ffb9e86f..9e87f87825b2a 100644 --- a/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.stderr +++ b/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.stderr @@ -28,7 +28,7 @@ error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied <&[(T,)] as EncodeLike>> <&[(T,)] as EncodeLike>> <&[T] as EncodeLike>> - and 279 others + and 280 others = note: required for `Bar` to implement `FullEncode` = note: required for `Bar` to implement `FullCodec` = note: required for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` to implement `PartialStorageInfoTrait` @@ -103,7 +103,7 @@ error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied <&[(T,)] as EncodeLike>> <&[(T,)] as EncodeLike>> <&[T] as EncodeLike>> - and 279 others + and 280 others = note: required for `Bar` to implement `FullEncode` = note: required for `Bar` to implement `FullCodec` = note: required for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` to implement `StorageEntryMetadataBuilder` diff --git a/frame/try-runtime/src/lib.rs b/frame/try-runtime/src/lib.rs index ed1247bd8e6f2..99c68d4dc65b8 100644 --- a/frame/try-runtime/src/lib.rs +++ b/frame/try-runtime/src/lib.rs @@ -33,12 +33,21 @@ sp_api::decl_runtime_apis! { /// /// Returns the consumed weight of the migration in case of a successful one, combined with /// the total allowed block weight of the runtime. - fn on_runtime_upgrade() -> (Weight, Weight); + /// + /// If `checks` is `true`, `pre_migrate` and `post_migrate` of each migration and + /// `try_state` of all pallets will be executed. Else, no. If checks are executed, the PoV + /// tracking is likely inaccurate. + fn on_runtime_upgrade(checks: bool) -> (Weight, Weight); - /// Execute the given block, but don't check that its state root matches that of yours. + /// Execute the given block, but optionally disable state-root and signature checks. /// - /// This is only sensible where the incoming block is from a different network, yet it has - /// the same block format as the runtime implementing this API. - fn execute_block(block: Block, state_root_check: bool, try_state: TryStateSelect) -> Weight; + /// Optionally, a number of `try_state` hooks can also be executed after the block + /// execution. + fn execute_block( + block: Block, + state_root_check: bool, + signature_check: bool, + try_state: TryStateSelect, + ) -> Weight; } } diff --git a/primitives/runtime/Cargo.toml b/primitives/runtime/Cargo.toml index 8ea6ed3eb3b19..4202110cd60c2 100644 --- a/primitives/runtime/Cargo.toml +++ b/primitives/runtime/Cargo.toml @@ -41,6 +41,7 @@ substrate-test-runtime-client = { version = "2.0.0", path = "../../test-utils/ru [features] runtime-benchmarks = [] +try-runtime = [] default = ["std"] std = [ "codec/std", diff --git a/primitives/runtime/src/generic/unchecked_extrinsic.rs b/primitives/runtime/src/generic/unchecked_extrinsic.rs index 5d378410e4756..bab4411167a7f 100644 --- a/primitives/runtime/src/generic/unchecked_extrinsic.rs +++ b/primitives/runtime/src/generic/unchecked_extrinsic.rs @@ -149,6 +149,22 @@ where None => CheckedExtrinsic { signed: None, function: self.function }, }) } + + #[cfg(feature = "try-runtime")] + fn unchecked_into_checked_i_know_what_i_am_doing( + self, + lookup: &Lookup, + ) -> Result { + Ok(match self.signature { + Some((signed, _, extra)) => { + let signed = lookup.lookup(signed)?; + let raw_payload = SignedPayload::new(self.function, extra)?; + let (function, extra, _) = raw_payload.deconstruct(); + CheckedExtrinsic { signed: Some((signed, extra)), function } + }, + None => CheckedExtrinsic { signed: None, function: self.function }, + }) + } } impl ExtrinsicMetadata diff --git a/primitives/runtime/src/testing.rs b/primitives/runtime/src/testing.rs index 0cd78ba6267dd..81762b3fc4f9f 100644 --- a/primitives/runtime/src/testing.rs +++ b/primitives/runtime/src/testing.rs @@ -326,6 +326,14 @@ impl Checkable for TestXt Result { Ok(self) } + + #[cfg(feature = "try-runtime")] + fn unchecked_into_checked_i_know_what_i_am_doing( + self, + _: &Context, + ) -> Result { + unreachable!() + } } impl traits::Extrinsic for TestXt { diff --git a/primitives/runtime/src/traits.rs b/primitives/runtime/src/traits.rs index c69f8616b4be5..375475141b818 100644 --- a/primitives/runtime/src/traits.rs +++ b/primitives/runtime/src/traits.rs @@ -990,6 +990,20 @@ pub trait Checkable: Sized { /// Check self, given an instance of Context. fn check(self, c: &Context) -> Result; + + /// Blindly check self. + /// + /// ## WARNING + /// + /// DO NOT USE IN PRODUCTION. This is only meant to be used in testing environments. A runtime + /// compiled with `try-runtime` should never be in production. Moreover, the name of this + /// function is deliberately chosen to prevent developers from ever calling it in consensus + /// code-paths. + #[cfg(feature = "try-runtime")] + fn unchecked_into_checked_i_know_what_i_am_doing( + self, + c: &Context, + ) -> Result; } /// A "checkable" piece of information, used by the standard Substrate Executive in order to @@ -1011,6 +1025,14 @@ impl Checkable for T { fn check(self, _c: &Context) -> Result { BlindCheckable::check(self) } + + #[cfg(feature = "try-runtime")] + fn unchecked_into_checked_i_know_what_i_am_doing( + self, + _: &Context, + ) -> Result { + unreachable!(); + } } /// A lazy call (module function and argument values) that can be executed via its `dispatch` diff --git a/primitives/storage/src/lib.rs b/primitives/storage/src/lib.rs index 79c1012196bde..237787710a7e7 100644 --- a/primitives/storage/src/lib.rs +++ b/primitives/storage/src/lib.rs @@ -407,6 +407,7 @@ impl ChildTrieParentKeyId { /// V0 and V1 uses a same trie implementation, but V1 will write external value node in the trie for /// value with size at least `TRIE_VALUE_NODE_THRESHOLD`. #[derive(Debug, Clone, Copy, Eq, PartialEq)] +#[cfg_attr(feature = "std", derive(Encode, Decode))] pub enum StateVersion { /// Old state version, no value nodes. V0 = 0, diff --git a/utils/frame/remote-externalities/Cargo.toml b/utils/frame/remote-externalities/Cargo.toml index 4cb847867f374..ad8230fe29dcf 100644 --- a/utils/frame/remote-externalities/Cargo.toml +++ b/utils/frame/remote-externalities/Cargo.toml @@ -22,12 +22,14 @@ sp-core = { version = "7.0.0", path = "../../../primitives/core" } sp-io = { version = "7.0.0", path = "../../../primitives/io" } sp-runtime = { version = "7.0.0", path = "../../../primitives/runtime" } sp-version = { version = "5.0.0", path = "../../../primitives/version" } +tokio = { version = "1.22.0", features = ["macros", "rt-multi-thread"] } substrate-rpc-client = { path = "../rpc/client" } +futures = "0.3" [dev-dependencies] -tokio = { version = "1.22.0", features = ["macros", "rt-multi-thread"] } frame-support = { version = "4.0.0-dev", path = "../../../frame/support" } pallet-elections-phragmen = { version = "5.0.0-dev", path = "../../../frame/elections-phragmen" } +tracing-subscriber = { version = "0.3.16", features = ["env-filter"] } [features] remote-test = ["frame-support"] diff --git a/utils/frame/remote-externalities/src/lib.rs b/utils/frame/remote-externalities/src/lib.rs index db062e246ceef..fb63b4275172d 100644 --- a/utils/frame/remote-externalities/src/lib.rs +++ b/utils/frame/remote-externalities/src/lib.rs @@ -21,7 +21,7 @@ //! based chain, or a local state snapshot file. use codec::{Decode, Encode}; - +use futures::{channel::mpsc, stream::StreamExt}; use log::*; use serde::de::DeserializeOwned; use sp_core::{ @@ -36,8 +36,11 @@ pub use sp_io::TestExternalities; use sp_runtime::{traits::Block as BlockT, StateVersion}; use std::{ fs, + num::NonZeroUsize, + ops::{Deref, DerefMut}, path::{Path, PathBuf}, sync::Arc, + thread, }; use substrate_rpc_client::{ rpc_params, ws_client, BatchRequestBuilder, ChainApi, ClientT, StateApi, WsClient, @@ -48,18 +51,49 @@ type TopKeyValues = Vec; type ChildKeyValues = Vec<(ChildInfo, Vec)>; const LOG_TARGET: &str = "remote-ext"; -const DEFAULT_TARGET: &str = "wss://rpc.polkadot.io:443"; -const BATCH_SIZE: usize = 1000; -const PAGE: u32 = 1000; +const DEFAULT_WS_ENDPOINT: &str = "wss://rpc.polkadot.io:443"; +const DEFAULT_VALUE_DOWNLOAD_BATCH: usize = 4096; +// NOTE: increasing this value does not seem to impact speed all that much. +const DEFAULT_KEY_DOWNLOAD_PAGE: u32 = 1000; +/// The snapshot that we store on disk. +#[derive(Decode, Encode)] +struct Snapshot { + state_version: StateVersion, + block_hash: B::Hash, + top: TopKeyValues, + child: ChildKeyValues, +} + +/// An externalities that acts exactly the same as [`sp_io::TestExternalities`] but has a few extra +/// bits and pieces to it, and can be loaded remotely. +pub struct RemoteExternalities { + /// The inner externalities. + pub inner_ext: TestExternalities, + /// The block hash it which we created this externality env. + pub block_hash: B::Hash, +} + +impl Deref for RemoteExternalities { + type Target = TestExternalities; + fn deref(&self) -> &Self::Target { + &self.inner_ext + } +} + +impl DerefMut for RemoteExternalities { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner_ext + } +} /// The execution mode. #[derive(Clone)] pub enum Mode { - /// Online. Potentially writes to a cache file. + /// Online. Potentially writes to a snapshot file. Online(OnlineConfig), /// Offline. Uses a state snapshot file and needs not any client config. Offline(OfflineConfig), - /// Prefer using a cache file if it exists, else use a remote server. + /// Prefer using a snapshot file if it exists, else use a remote server. OfflineOrElseOnline(OfflineConfig, OnlineConfig), } @@ -95,6 +129,13 @@ impl Transport { } } + fn as_client_cloned(&self) -> Option> { + match self { + Self::RemoteClient(client) => Some(client.clone()), + _ => None, + } + } + // Open a new WebSocket connection if it's not connected. async fn map_uri(&mut self) -> Result<(), &'static str> { if let Self::Uri(uri) = self { @@ -134,38 +175,56 @@ pub struct OnlineConfig { pub at: Option, /// An optional state snapshot file to WRITE to, not for reading. Not written if set to `None`. pub state_snapshot: Option, - /// The pallets to scrape. If empty, entire chain state will be scraped. + /// The pallets to scrape. These values are hashed and added to `hashed_prefix`. pub pallets: Vec, /// Transport config. pub transport: Transport, /// Lookout for child-keys, and scrape them as well if set to true. - pub scrape_children: bool, + pub child_trie: bool, + /// Storage entry key prefixes to be injected into the externalities. The *hashed* prefix must + /// be given. + pub hashed_prefixes: Vec>, + /// Storage entry keys to be injected into the externalities. The *hashed* key must be given. + pub hashed_keys: Vec>, } impl OnlineConfig { - /// Return rpc (ws) client. + /// Return rpc (ws) client reference. fn rpc_client(&self) -> &WsClient { self.transport .as_client() .expect("ws client must have been initialized by now; qed.") } + + /// Return a cloned rpc (ws) client, suitable for being moved to threads. + fn rpc_client_cloned(&self) -> Arc { + self.transport + .as_client_cloned() + .expect("ws client must have been initialized by now; qed.") + } + + fn at_expected(&self) -> B::Hash { + self.at.expect("block at must be initialized; qed") + } } impl Default for OnlineConfig { fn default() -> Self { Self { - transport: Transport::Uri(DEFAULT_TARGET.to_owned()), + transport: Transport::from(DEFAULT_WS_ENDPOINT.to_owned()), + child_trie: true, at: None, state_snapshot: None, - pallets: vec![], - scrape_children: true, + pallets: Default::default(), + hashed_keys: Default::default(), + hashed_prefixes: Default::default(), } } } impl From for OnlineConfig { - fn from(s: String) -> Self { - Self { transport: s.into(), ..Default::default() } + fn from(t: String) -> Self { + Self { transport: t.into(), ..Default::default() } } } @@ -196,20 +255,18 @@ impl Default for SnapshotConfig { /// Builder for remote-externalities. pub struct Builder { - /// Custom key-pairs to be injected into the externalities. The *hashed* keys and values must - /// be given. + /// Custom key-pairs to be injected into the final externalities. The *hashed* keys and values + /// must be given. hashed_key_values: Vec, - /// Storage entry key prefixes to be injected into the externalities. The *hashed* prefix must - /// be given. - hashed_prefixes: Vec>, - /// Storage entry keys to be injected into the externalities. The *hashed* key must be given. - hashed_keys: Vec>, /// The keys that will be excluded from the final externality. The *hashed* key must be given. hashed_blacklist: Vec>, - /// connectivity mode, online or offline. + /// Connectivity mode, online or offline. mode: Mode, - /// The state version being used. - state_version: StateVersion, + /// If provided, overwrite the state version with this. Otherwise, the state_version of the + /// remote node is used. All cache files also store their state version. + /// + /// Overwrite only with care. + overwrite_state_version: Option, } // NOTE: ideally we would use `DefaultNoBound` here, but not worth bringing in frame-support for @@ -219,10 +276,8 @@ impl Default for Builder { Self { mode: Default::default(), hashed_key_values: Default::default(), - hashed_prefixes: Default::default(), - hashed_keys: Default::default(), hashed_blacklist: Default::default(), - state_version: StateVersion::V1, + overwrite_state_version: None, } } } @@ -252,20 +307,22 @@ where B::Hash: DeserializeOwned, B::Header: DeserializeOwned, { + /// Get the number of threads to use. + fn threads() -> NonZeroUsize { + thread::available_parallelism() + .unwrap_or(NonZeroUsize::new(4usize).expect("4 is non-zero; qed")) + } + async fn rpc_get_storage( &self, key: StorageKey, maybe_at: Option, - ) -> Result { + ) -> Result, &'static str> { trace!(target: LOG_TARGET, "rpc: get_storage"); - match self.as_online().rpc_client().storage(key, maybe_at).await { - Ok(Some(res)) => Ok(res), - Ok(None) => Err("get_storage not found"), - Err(e) => { - error!(target: LOG_TARGET, "Error = {:?}", e); - Err("rpc get_storage failed.") - }, - } + self.as_online().rpc_client().storage(key, maybe_at).await.map_err(|e| { + error!(target: LOG_TARGET, "Error = {:?}", e); + "rpc get_storage failed." + }) } /// Get the latest finalized head. @@ -293,7 +350,12 @@ where let page = self .as_online() .rpc_client() - .storage_keys_paged(Some(prefix.clone()), PAGE, last_key.clone(), Some(at)) + .storage_keys_paged( + Some(prefix.clone()), + DEFAULT_KEY_DOWNLOAD_PAGE, + last_key.clone(), + Some(at), + ) .await .map_err(|e| { error!(target: LOG_TARGET, "Error = {:?}", e); @@ -303,7 +365,7 @@ where all_keys.extend(page); - if page_len < PAGE as usize { + if page_len < DEFAULT_KEY_DOWNLOAD_PAGE as usize { log::debug!(target: LOG_TARGET, "last page received: {}", page_len); break all_keys } else { @@ -322,7 +384,7 @@ where Ok(keys) } - /// Synonym of `rpc_get_pairs_unsafe` that uses paged queries to first get the keys, and then + /// Synonym of `getPairs` that uses paged queries to first get the keys, and then /// map them to values one by one. /// /// This can work with public nodes. But, expect it to be darn slow. @@ -330,79 +392,169 @@ where &self, prefix: StorageKey, at: B::Hash, + pending_ext: &mut TestExternalities, ) -> Result, &'static str> { - let keys = self.rpc_get_keys_paged(prefix, at).await?; - let keys_count = keys.len(); - log::debug!(target: LOG_TARGET, "Querying a total of {} keys", keys.len()); + let keys = self.rpc_get_keys_paged(prefix.clone(), at).await?; + if keys.is_empty() { + return Ok(Default::default()) + } - let mut key_values: Vec = vec![]; - let mut batch_success = true; + let client = self.as_online().rpc_client_cloned(); + let threads = Self::threads().get(); + let thread_chunk_size = (keys.len() + threads - 1) / threads; - let client = self.as_online().rpc_client(); - for chunk_keys in keys.chunks(BATCH_SIZE) { - let mut batch = BatchRequestBuilder::new(); + log::info!( + target: LOG_TARGET, + "Querying a total of {} keys from prefix {:?}, splitting among {} threads, {} keys per thread", + keys.len(), + HexDisplay::from(&prefix), + threads, + thread_chunk_size, + ); - for key in chunk_keys.iter() { - batch - .insert("state_getStorage", rpc_params![key, at]) - .map_err(|_| "Invalid batch params")?; - } + let mut handles = Vec::new(); + let keys_chunked: Vec> = + keys.chunks(thread_chunk_size).map(|s| s.into()).collect::>(); + + enum Message { + /// This thread completed the assigned work. + Terminated, + /// The thread produced the following batch response. + Batch(Vec<(Vec, Vec)>), + /// A request from the batch failed. + BatchFailed(String), + } - let batch_response = - client.batch_request::>(batch).await.map_err(|e| { - log::error!( - target: LOG_TARGET, - "failed to execute batch: {:?}. Error: {:?}", - chunk_keys.iter().map(HexDisplay::from).collect::>(), - e - ); - "batch failed." - })?; + let (tx, mut rx) = mpsc::unbounded::(); + + for thread_keys in keys_chunked { + let thread_client = client.clone(); + let thread_sender = tx.clone(); + let handle = std::thread::spawn(move || { + let rt = tokio::runtime::Runtime::new().unwrap(); + let mut thread_key_values = Vec::with_capacity(thread_keys.len()); + + for chunk_keys in thread_keys.chunks(DEFAULT_VALUE_DOWNLOAD_BATCH) { + let mut batch = BatchRequestBuilder::new(); + + for key in chunk_keys.iter() { + batch + .insert("state_getStorage", rpc_params![key, at]) + .map_err(|_| "Invalid batch params") + .unwrap(); + } + + let batch_response = rt + .block_on(thread_client.batch_request::>(batch)) + .map_err(|e| { + log::error!( + target: LOG_TARGET, + "failed to execute batch: {:?}. Error: {:?}", + chunk_keys.iter().map(HexDisplay::from).collect::>(), + e + ); + "batch failed." + }) + .unwrap(); + + // Check if we got responses for all submitted requests. + assert_eq!(chunk_keys.len(), batch_response.len()); + + let mut batch_kv = Vec::with_capacity(chunk_keys.len()); + for (key, maybe_value) in chunk_keys.into_iter().zip(batch_response) { + match maybe_value { + Ok(Some(data)) => { + thread_key_values.push((key.clone(), data.clone())); + batch_kv.push((key.clone().0, data.0)); + }, + Ok(None) => { + log::warn!( + target: LOG_TARGET, + "key {:?} had none corresponding value.", + &key + ); + let data = StorageData(vec![]); + thread_key_values.push((key.clone(), data.clone())); + batch_kv.push((key.clone().0, data.0)); + }, + Err(e) => { + let reason = format!("key {:?} failed: {:?}", &key, e); + log::error!(target: LOG_TARGET, "Reason: {}", reason); + // Signal failures to the main thread, stop aggregating (key, value) + // pairs and return immediately an error. + thread_sender.unbounded_send(Message::BatchFailed(reason)).unwrap(); + return Default::default() + }, + }; + + if thread_key_values.len() % (thread_keys.len() / 10).max(1) == 0 { + let ratio: f64 = + thread_key_values.len() as f64 / thread_keys.len() as f64; + log::debug!( + target: LOG_TARGET, + "[thread = {:?}] progress = {:.2} [{} / {}]", + std::thread::current().id(), + ratio, + thread_key_values.len(), + thread_keys.len(), + ); + } + } + + // Send this batch to the main thread to start inserting. + thread_sender.unbounded_send(Message::Batch(batch_kv)).unwrap(); + } - assert_eq!(chunk_keys.len(), batch_response.len()); + thread_sender.unbounded_send(Message::Terminated).unwrap(); + thread_key_values + }); - for (key, maybe_value) in chunk_keys.into_iter().zip(batch_response) { - match maybe_value { - Ok(Some(v)) => { - key_values.push((key.clone(), v)); - }, - Ok(None) => { - log::warn!( - target: LOG_TARGET, - "key {:?} had none corresponding value.", - &key - ); - key_values.push((key.clone(), StorageData(vec![]))); - }, - Err(e) => { - log::error!(target: LOG_TARGET, "key {:?} failed: {:?}", &key, e); - batch_success = false; - }, - }; + handles.push(handle); + } - if key_values.len() % (10 * BATCH_SIZE) == 0 { - let ratio: f64 = key_values.len() as f64 / keys_count as f64; - log::debug!( - target: LOG_TARGET, - "progress = {:.2} [{} / {}]", - ratio, - key_values.len(), - keys_count, - ); - } + // first, wait until all threads send a `Terminated` message, in the meantime populate + // `pending_ext`. + let mut terminated = 0usize; + let mut batch_failed = false; + loop { + match rx.next().await.unwrap() { + Message::Batch(kv) => { + for (k, v) in kv { + // skip writing the child root data. + if is_default_child_storage_key(k.as_ref()) { + continue + } + pending_ext.insert(k, v); + } + }, + Message::BatchFailed(error) => { + log::error!(target: LOG_TARGET, "Batch processing failed: {:?}", error); + batch_failed = true; + break + }, + Message::Terminated => { + terminated += 1; + if terminated == handles.len() { + break + } + }, } } - if batch_success { - Ok(key_values) - } else { - Err("batch failed.") + // Ensure all threads finished execution before returning. + let keys_and_values = + handles.into_iter().flat_map(|h| h.join().unwrap()).collect::>(); + + if batch_failed { + return Err("Batch failed.") } + + Ok(keys_and_values) } /// Get the values corresponding to `child_keys` at the given `prefixed_top_key`. pub(crate) async fn rpc_child_get_storage_paged( - &self, + client: &WsClient, prefixed_top_key: &StorageKey, child_keys: Vec, at: B::Hash, @@ -410,7 +562,7 @@ where let mut child_kv_inner = vec![]; let mut batch_success = true; - for batch_child_key in child_keys.chunks(BATCH_SIZE) { + for batch_child_key in child_keys.chunks(DEFAULT_VALUE_DOWNLOAD_BATCH) { let mut batch_request = BatchRequestBuilder::new(); for key in batch_child_key { @@ -426,12 +578,8 @@ where .map_err(|_| "Invalid batch params")?; } - let batch_response = self - .as_online() - .rpc_client() - .batch_request::>(batch_request) - .await - .map_err(|e| { + let batch_response = + client.batch_request::>(batch_request).await.map_err(|e| { log::error!( target: LOG_TARGET, "failed to execute batch: {:?}. Error: {:?}", @@ -472,7 +620,7 @@ where } pub(crate) async fn rpc_child_get_keys( - &self, + client: &WsClient, prefixed_top_key: &StorageKey, child_prefix: StorageKey, at: B::Hash, @@ -480,7 +628,7 @@ where // This is deprecated and will generate a warning which causes the CI to fail. #[allow(warnings)] let child_keys = substrate_rpc_client::ChildStateApi::storage_keys( - self.as_online().rpc_client(), + client, PrefixedStorageKey::new(prefixed_top_key.as_ref().to_vec()), child_prefix, Some(at), @@ -493,7 +641,8 @@ where debug!( target: LOG_TARGET, - "scraped {} child-keys of the child-bearing top key: {}", + "[thread = {:?}] scraped {} child-keys of the child-bearing top key: {}", + std::thread::current().id(), child_keys.len(), HexDisplay::from(prefixed_top_key) ); @@ -502,214 +651,341 @@ where } } -// Internal methods -impl Builder +impl Builder where B::Hash: DeserializeOwned, B::Header: DeserializeOwned, { - /// Save the given data to the top keys snapshot. - fn save_top_snapshot(&self, data: &[KeyValue], path: &PathBuf) -> Result<(), &'static str> { - let mut path = path.clone(); - let encoded = data.encode(); - path.set_extension("top"); - debug!( - target: LOG_TARGET, - "writing {} bytes to state snapshot file {:?}", - encoded.len(), - path - ); - fs::write(path, encoded).map_err(|_| "fs::write failed.")?; - Ok(()) - } - - /// Save the given data to the child keys snapshot. - fn save_child_snapshot( - &self, - data: &ChildKeyValues, - path: &PathBuf, - ) -> Result<(), &'static str> { - let mut path = path.clone(); - path.set_extension("child"); - let encoded = data.encode(); - debug!( - target: LOG_TARGET, - "writing {} bytes to state snapshot file {:?}", - encoded.len(), - path - ); - fs::write(path, encoded).map_err(|_| "fs::write failed.")?; - Ok(()) - } - - fn load_top_snapshot(&self, path: &PathBuf) -> Result { - let mut path = path.clone(); - path.set_extension("top"); - info!(target: LOG_TARGET, "loading top key-pairs from snapshot {:?}", path); - let bytes = fs::read(path).map_err(|_| "fs::read failed.")?; - Decode::decode(&mut &*bytes).map_err(|e| { - log::error!(target: LOG_TARGET, "{:?}", e); - "decode failed" - }) - } - - fn load_child_snapshot(&self, path: &PathBuf) -> Result { - let mut path = path.clone(); - path.set_extension("child"); - info!(target: LOG_TARGET, "loading child key-pairs from snapshot {:?}", path); - let bytes = fs::read(path).map_err(|_| "fs::read failed.")?; - Decode::decode(&mut &*bytes).map_err(|e| { - log::error!(target: LOG_TARGET, "{:?}", e); - "decode failed" - }) - } - - /// Load all the `top` keys from the remote config, and maybe write then to cache. - async fn load_top_remote_and_maybe_save(&self) -> Result { - let top_kv = self.load_top_remote().await?; - if let Some(c) = &self.as_online().state_snapshot { - self.save_top_snapshot(&top_kv, &c.path)?; - } - Ok(top_kv) - } - /// Load all of the child keys from the remote config, given the already scraped list of top key /// pairs. /// - /// Stores all values to cache as well, if provided. - async fn load_child_remote_and_maybe_save( + /// `top_kv` need not be only child-bearing top keys. It should be all of the top keys that are + /// included thus far. + /// + /// This function concurrently populates `pending_ext`. the return value is only for writing to + /// cache, we can also optimize further. + async fn load_child_remote( &self, top_kv: &[KeyValue], + pending_ext: &mut TestExternalities, ) -> Result { - let child_kv = self.load_child_remote(top_kv).await?; - if let Some(c) = &self.as_online().state_snapshot { - self.save_child_snapshot(&child_kv, &c.path)?; - } - Ok(child_kv) - } - - /// Load all of the child keys from the remote config, given the already scraped list of top key - /// pairs. - /// - /// `top_kv` need not be only child-bearing top keys. It should be all of the top keys that are - /// included thus far. - async fn load_child_remote(&self, top_kv: &[KeyValue]) -> Result { let child_roots = top_kv - .iter() - .filter_map(|(k, _)| is_default_child_storage_key(k.as_ref()).then(|| k)) + .into_iter() + .filter_map(|(k, _)| is_default_child_storage_key(k.as_ref()).then(|| k.clone())) .collect::>(); + if child_roots.is_empty() { + return Ok(Default::default()) + } + + // div-ceil simulation. + let threads = Self::threads().get(); + let child_roots_per_thread = (child_roots.len() + threads - 1) / threads; + info!( target: LOG_TARGET, - "👩‍👦 scraping child-tree data from {} top keys", - child_roots.len() + "👩‍👦 scraping child-tree data from {} top keys, split among {} threads, {} top keys per thread", + child_roots.len(), + threads, + child_roots_per_thread, ); - let mut child_kv = vec![]; - for prefixed_top_key in child_roots { - let at = self.as_online().at.expect("at must be initialized in online mode."); - let child_keys = - self.rpc_child_get_keys(prefixed_top_key, StorageKey(vec![]), at).await?; - let child_kv_inner = - self.rpc_child_get_storage_paged(prefixed_top_key, child_keys, at).await?; - - let prefixed_top_key = PrefixedStorageKey::new(prefixed_top_key.clone().0); - let un_prefixed = match ChildType::from_prefixed_key(&prefixed_top_key) { - Some((ChildType::ParentKeyId, storage_key)) => storage_key, - None => { - log::error!(target: LOG_TARGET, "invalid key: {:?}", prefixed_top_key); - return Err("Invalid child key") - }, - }; + // NOTE: the threading done here is the simpler, yet slightly un-elegant because we are + // splitting child root among threads, and it is very common for these root to have vastly + // different child tries underneath them, causing some threads to finish way faster than + // others. Certainly still better than single thread though. + let mut handles = vec![]; + let client = self.as_online().rpc_client_cloned(); + let at = self.as_online().at_expected(); + + enum Message { + Terminated, + Batch((ChildInfo, Vec<(Vec, Vec)>)), + } + let (tx, mut rx) = mpsc::unbounded::(); + + for thread_child_roots in child_roots + .chunks(child_roots_per_thread) + .map(|x| x.into()) + .collect::>>() + { + let thread_client = client.clone(); + let thread_sender = tx.clone(); + let handle = thread::spawn(move || { + let rt = tokio::runtime::Runtime::new().unwrap(); + let mut thread_child_kv = vec![]; + for prefixed_top_key in thread_child_roots { + let child_keys = rt.block_on(Self::rpc_child_get_keys( + &thread_client, + &prefixed_top_key, + StorageKey(vec![]), + at, + ))?; + let child_kv_inner = rt.block_on(Self::rpc_child_get_storage_paged( + &thread_client, + &prefixed_top_key, + child_keys, + at, + ))?; + + let prefixed_top_key = PrefixedStorageKey::new(prefixed_top_key.clone().0); + let un_prefixed = match ChildType::from_prefixed_key(&prefixed_top_key) { + Some((ChildType::ParentKeyId, storage_key)) => storage_key, + None => { + log::error!(target: LOG_TARGET, "invalid key: {:?}", prefixed_top_key); + return Err("Invalid child key") + }, + }; + + thread_sender + .unbounded_send(Message::Batch(( + ChildInfo::new_default(un_prefixed), + child_kv_inner + .iter() + .cloned() + .map(|(k, v)| (k.0, v.0)) + .collect::>(), + ))) + .unwrap(); + thread_child_kv.push((ChildInfo::new_default(un_prefixed), child_kv_inner)); + } + + thread_sender.unbounded_send(Message::Terminated).unwrap(); + Ok(thread_child_kv) + }); + handles.push(handle); + } - child_kv.push((ChildInfo::new_default(un_prefixed), child_kv_inner)); + // first, wait until all threads send a `Terminated` message, in the meantime populate + // `pending_ext`. + let mut terminated = 0usize; + loop { + match rx.next().await.unwrap() { + Message::Batch((info, kvs)) => + for (k, v) in kvs { + pending_ext.insert_child(info.clone(), k, v); + }, + Message::Terminated => { + terminated += 1; + if terminated == handles.len() { + break + } + }, + } } + let child_kv = handles + .into_iter() + .flat_map(|h| h.join().unwrap()) + .flatten() + .collect::>(); Ok(child_kv) } /// Build `Self` from a network node denoted by `uri`. - async fn load_top_remote(&self) -> Result { + /// + /// This function concurrently populates `pending_ext`. the return value is only for writing to + /// cache, we can also optimize further. + async fn load_top_remote( + &self, + pending_ext: &mut TestExternalities, + ) -> Result { let config = self.as_online(); let at = self .as_online() .at .expect("online config must be initialized by this point; qed."); - log::info!(target: LOG_TARGET, "scraping key-pairs from remote @ {:?}", at); - - let mut keys_and_values = if config.pallets.len() > 0 { - let mut filtered_kv = vec![]; - for p in config.pallets.iter() { - let hashed_prefix = StorageKey(twox_128(p.as_bytes()).to_vec()); - let pallet_kv = self.rpc_get_pairs_paged(hashed_prefix.clone(), at).await?; - log::info!( - target: LOG_TARGET, - "downloaded data for module {} (count: {} / prefix: {}).", - p, - pallet_kv.len(), - HexDisplay::from(&hashed_prefix), - ); - filtered_kv.extend(pallet_kv); - } - filtered_kv - } else { - log::info!(target: LOG_TARGET, "downloading data for all pallets."); - self.rpc_get_pairs_paged(StorageKey(vec![]), at).await? - }; + log::info!(target: LOG_TARGET, "scraping key-pairs from remote at block height {:?}", at); - for prefix in &self.hashed_prefixes { + let mut keys_and_values = Vec::new(); + for prefix in &config.hashed_prefixes { + let now = std::time::Instant::now(); + let additional_key_values = + self.rpc_get_pairs_paged(StorageKey(prefix.to_vec()), at, pending_ext).await?; + let elapsed = now.elapsed(); log::info!( target: LOG_TARGET, - "adding data for hashed prefix: {:?}", - HexDisplay::from(prefix) + "adding data for hashed prefix: {:?}, took {:?}s", + HexDisplay::from(prefix), + elapsed.as_secs() ); - let additional_key_values = - self.rpc_get_pairs_paged(StorageKey(prefix.to_vec()), at).await?; keys_and_values.extend(additional_key_values); } - for key in &self.hashed_keys { + for key in &config.hashed_keys { let key = StorageKey(key.to_vec()); log::info!( target: LOG_TARGET, "adding data for hashed key: {:?}", HexDisplay::from(&key) ); - let value = self.rpc_get_storage(key.clone(), Some(at)).await?; - keys_and_values.push((key, value)); + match self.rpc_get_storage(key.clone(), Some(at)).await? { + Some(value) => { + pending_ext.insert(key.clone().0, value.clone().0); + keys_and_values.push((key, value)); + }, + None => { + log::warn!( + target: LOG_TARGET, + "no data found for hashed key: {:?}", + HexDisplay::from(&key) + ); + }, + } } Ok(keys_and_values) } - pub(crate) async fn init_remote_client(&mut self) -> Result<(), &'static str> { + /// The entry point of execution, if `mode` is online. + /// + /// initializes the remote client in `transport`, and sets the `at` field, if not specified. + async fn init_remote_client(&mut self) -> Result<(), &'static str> { // First, initialize the ws client. self.as_online_mut().transport.map_uri().await?; // Then, if `at` is not set, set it. if self.as_online().at.is_none() { let at = self.rpc_get_head().await?; + log::info!( + target: LOG_TARGET, + "since no at is provided, setting it to latest finalized head, {:?}", + at + ); self.as_online_mut().at = Some(at); } + // Then, a few transformation that we want to perform in the online config: + let online_config = self.as_online_mut(); + online_config + .pallets + .iter() + .for_each(|p| online_config.hashed_prefixes.push(twox_128(p.as_bytes()).to_vec())); + + if online_config.child_trie { + online_config.hashed_prefixes.push(DEFAULT_CHILD_STORAGE_KEY_PREFIX.to_vec()); + } + + // Finally, if by now, we have put any limitations on prefixes that we are interested in, we + // download everything. + if online_config + .hashed_prefixes + .iter() + .filter(|p| *p != DEFAULT_CHILD_STORAGE_KEY_PREFIX) + .count() == 0 + { + log::info!( + target: LOG_TARGET, + "since no prefix is filtered, the data for all pallets will be downloaded" + ); + online_config.hashed_prefixes.push(vec![]); + } + Ok(()) } - pub(crate) async fn pre_build( - mut self, - ) -> Result<(TopKeyValues, ChildKeyValues), &'static str> { - let mut top_kv = match self.mode.clone() { - Mode::Offline(config) => self.load_top_snapshot(&config.state_snapshot.path)?, - Mode::Online(_) => { - self.init_remote_client().await?; - self.load_top_remote_and_maybe_save().await? - }, + /// Load the data from a remote server. The main code path is calling into `load_top_remote` and + /// `load_child_remote`. + /// + /// Must be called after `init_remote_client`. + async fn load_remote_and_maybe_save(&mut self) -> Result { + let state_version = + StateApi::::runtime_version(self.as_online().rpc_client(), None) + .await + .map_err(|e| { + error!(target: LOG_TARGET, "Error = {:?}", e); + "rpc runtime_version failed." + }) + .map(|v| v.state_version())?; + let mut pending_ext = TestExternalities::new_with_code_and_state( + Default::default(), + Default::default(), + self.overwrite_state_version.unwrap_or(state_version), + ); + let top_kv = self.load_top_remote(&mut pending_ext).await?; + let child_kv = self.load_child_remote(&top_kv, &mut pending_ext).await?; + + if let Some(path) = self.as_online().state_snapshot.clone().map(|c| c.path) { + let snapshot = Snapshot:: { + state_version, + top: top_kv, + child: child_kv, + block_hash: self + .as_online() + .at + .expect("set to `Some` in `init_remote_client`; must be called before; qed"), + }; + let encoded = snapshot.encode(); + log::info!( + target: LOG_TARGET, + "writing snapshot of {} bytes to {:?}", + encoded.len(), + path + ); + std::fs::write(path, encoded).map_err(|_| "fs::write failed")?; + } + + Ok(pending_ext) + } + + fn load_snapshot(&mut self, path: PathBuf) -> Result, &'static str> { + info!(target: LOG_TARGET, "loading data from snapshot {:?}", path); + let bytes = fs::read(path).map_err(|_| "fs::read failed.")?; + Decode::decode(&mut &*bytes).map_err(|_| "decode failed") + } + + async fn do_load_remote(&mut self) -> Result, &'static str> { + self.init_remote_client().await?; + let block_hash = self.as_online().at_expected(); + let inner_ext = self.load_remote_and_maybe_save().await?; + Ok(RemoteExternalities { block_hash, inner_ext }) + } + + fn do_load_offline( + &mut self, + config: OfflineConfig, + ) -> Result, &'static str> { + let Snapshot { block_hash, top, child, state_version } = + self.load_snapshot(config.state_snapshot.path.clone())?; + + let mut inner_ext = TestExternalities::new_with_code_and_state( + Default::default(), + Default::default(), + self.overwrite_state_version.unwrap_or(state_version), + ); + + info!(target: LOG_TARGET, "injecting a total of {} top keys", top.len()); + for (k, v) in top { + // skip writing the child root data. + if is_default_child_storage_key(k.as_ref()) { + continue + } + inner_ext.insert(k.0, v.0); + } + + info!( + target: LOG_TARGET, + "injecting a total of {} child keys", + child.iter().flat_map(|(_, kv)| kv).count() + ); + + for (info, key_values) in child { + for (k, v) in key_values { + inner_ext.insert_child(info.clone(), k.0, v.0); + } + } + + Ok(RemoteExternalities { inner_ext, block_hash }) + } + + pub(crate) async fn pre_build(mut self) -> Result, &'static str> { + let mut ext = match self.mode.clone() { + Mode::Offline(config) => self.do_load_offline(config)?, + Mode::Online(_) => self.do_load_remote().await?, Mode::OfflineOrElseOnline(offline_config, _) => { - if let Ok(kv) = self.load_top_snapshot(&offline_config.state_snapshot.path) { - kv - } else { - self.init_remote_client().await?; - self.load_top_remote_and_maybe_save().await? + match self.do_load_offline(offline_config) { + Ok(x) => x, + Err(_) => self.do_load_remote().await?, } }, }; @@ -721,7 +997,9 @@ where "extending externalities with {} manually injected key-values", self.hashed_key_values.len() ); - top_kv.extend(self.hashed_key_values.clone()); + for (k, v) in self.hashed_key_values { + ext.insert(k.0, v.0); + } } // exclude manual key values. @@ -731,87 +1009,34 @@ where "excluding externalities from {} keys", self.hashed_blacklist.len() ); - top_kv.retain(|(k, _)| !self.hashed_blacklist.contains(&k.0)) + for k in self.hashed_blacklist { + ext.execute_with(|| sp_io::storage::clear(&k)); + } } - let child_kv = match self.mode.clone() { - Mode::Online(_) => self.load_child_remote_and_maybe_save(&top_kv).await?, - Mode::OfflineOrElseOnline(offline_config, _) => { - if let Ok(kv) = self.load_child_snapshot(&offline_config.state_snapshot.path) { - kv - } else { - self.load_child_remote_and_maybe_save(&top_kv).await? - } - }, - Mode::Offline(ref config) => self - .load_child_snapshot(&config.state_snapshot.path) - .map_err(|why| { - log::warn!( - target: LOG_TARGET, - "failed to load child-key file due to {:?}.", - why - ) - }) - .unwrap_or_default(), - }; - - Ok((top_kv, child_kv)) + Ok(ext) } } // Public methods -impl Builder { +impl Builder +where + B::Hash: DeserializeOwned, + B::Header: DeserializeOwned, +{ /// Create a new builder. pub fn new() -> Self { Default::default() } /// Inject a manual list of key and values to the storage. - pub fn inject_hashed_key_value(mut self, injections: &[KeyValue]) -> Self { + pub fn inject_hashed_key_value(mut self, injections: Vec) -> Self { for i in injections { self.hashed_key_values.push(i.clone()); } self } - /// Inject a hashed prefix. This is treated as-is, and should be pre-hashed. - /// - /// Only relevant is `Mode::Online` is being used. Noop otherwise. - /// - /// This should be used to inject a "PREFIX", like a storage (double) map. - pub fn inject_hashed_prefix(mut self, hashed: &[u8]) -> Self { - self.hashed_prefixes.push(hashed.to_vec()); - self - } - - /// Just a utility wrapper of [`Self::inject_hashed_prefix`] that injects - /// [`DEFAULT_CHILD_STORAGE_KEY_PREFIX`] as a prefix. - /// - /// Only relevant is `Mode::Online` is being used. Noop otherwise. - /// - /// If set, this will guarantee that the child-tree data of ALL pallets will be downloaded. - /// - /// This is not needed if the entire state is being downloaded. - /// - /// Otherwise, the only other way to make sure a child-tree is manually included is to inject - /// its root (`DEFAULT_CHILD_STORAGE_KEY_PREFIX`, plus some other postfix) into - /// [`Self::inject_hashed_key`]. Unfortunately, there's no federated way of managing child tree - /// roots as of now and each pallet does its own thing. Therefore, it is not possible for this - /// library to automatically include child trees of pallet X, when its top keys are included. - pub fn inject_default_child_tree_prefix(self) -> Self { - self.inject_hashed_prefix(DEFAULT_CHILD_STORAGE_KEY_PREFIX) - } - - /// Inject a hashed key to scrape. This is treated as-is, and should be pre-hashed. - /// - /// Only relevant is `Mode::Online` is being used. Noop otherwise. - /// - /// This should be used to inject a "KEY", like a storage value. - pub fn inject_hashed_key(mut self, hashed: &[u8]) -> Self { - self.hashed_keys.push(hashed.to_vec()); - self - } - /// Blacklist this hashed key from the final externalities. This is treated as-is, and should be /// pre-hashed. pub fn blacklist_hashed_key(mut self, hashed: &[u8]) -> Self { @@ -826,64 +1051,20 @@ impl Builder { } /// The state version to use. - pub fn state_version(mut self, version: StateVersion) -> Self { - self.state_version = version; - self - } - - /// overwrite the `at` value, if `mode` is set to [`Mode::Online`]. - /// - /// noop if `mode` is [`Mode::Offline`] - pub fn overwrite_online_at(mut self, at: B::Hash) -> Self { - if let Mode::Online(mut online) = self.mode.clone() { - online.at = Some(at); - self.mode = Mode::Online(online); - } + pub fn overwrite_state_version(mut self, version: StateVersion) -> Self { + self.overwrite_state_version = Some(version); self } -} - -// Public methods -impl Builder -where - B::Header: DeserializeOwned, -{ - /// Build the test externalities. - pub async fn build(self) -> Result { - let state_version = self.state_version; - let (top_kv, child_kv) = self.pre_build().await?; - let mut ext = TestExternalities::new_with_code_and_state( - Default::default(), - Default::default(), - state_version, - ); - - info!(target: LOG_TARGET, "injecting a total of {} top keys", top_kv.len()); - for (k, v) in top_kv { - // skip writing the child root data. - if is_default_child_storage_key(k.as_ref()) { - continue - } - ext.insert(k.0, v.0); - } - - info!( - target: LOG_TARGET, - "injecting a total of {} child keys", - child_kv.iter().flat_map(|(_, kv)| kv).count() - ); - - for (info, key_values) in child_kv { - for (k, v) in key_values { - ext.insert_child(info.clone(), k.0, v.0); - } - } + pub async fn build(self) -> Result, &'static str> { + let mut ext = self.pre_build().await?; ext.commit_all().unwrap(); + info!( target: LOG_TARGET, - "initialized state externalities with storage root {:?}", - ext.as_backend().root() + "initialized state externalities with storage root {:?} and state_version {:?}", + ext.as_backend().root(), + ext.state_version ); Ok(ext) @@ -892,16 +1073,16 @@ where #[cfg(test)] mod test_prelude { + use tracing_subscriber::EnvFilter; + pub(crate) use super::*; pub(crate) use sp_runtime::testing::{Block as RawBlock, ExtrinsicWrapper, H256 as Hash}; - pub(crate) type Block = RawBlock>; pub(crate) fn init_logger() { - let _ = env_logger::Builder::from_default_env() - .format_module_path(true) - .format_level(true) - .filter_module(LOG_TARGET, log::LevelFilter::Debug) + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .with_level(true) .try_init(); } } @@ -910,7 +1091,7 @@ mod test_prelude { mod tests { use super::test_prelude::*; - #[tokio::test] + #[tokio::test(flavor = "multi_thread")] async fn can_load_state_snapshot() { init_logger(); Builder::::new() @@ -919,15 +1100,15 @@ mod tests { })) .build() .await - .expect("Can't read state snapshot file") + .unwrap() .execute_with(|| {}); } - #[tokio::test] - async fn can_exclude_from_cache() { + #[tokio::test(flavor = "multi_thread")] + async fn can_exclude_from_snapshot() { init_logger(); - // get the first key from the cache file. + // get the first key from the snapshot file. let some_key = Builder::::new() .mode(Mode::Offline(OfflineConfig { state_snapshot: SnapshotConfig::new("test_data/proxy_test"), @@ -957,19 +1138,88 @@ mod tests { #[cfg(all(test, feature = "remote-test"))] mod remote_tests { use super::test_prelude::*; + use std::os::unix::fs::MetadataExt; + + #[tokio::test(flavor = "multi_thread")] + async fn state_version_is_kept_and_can_be_altered() { + const CACHE: &'static str = "state_version_is_kept_and_can_be_altered"; + init_logger(); + + // first, build a snapshot. + let ext = Builder::::new() + .mode(Mode::Online(OnlineConfig { + pallets: vec!["Proxy".to_owned()], + child_trie: false, + state_snapshot: Some(SnapshotConfig::new(CACHE)), + ..Default::default() + })) + .build() + .await + .unwrap(); + + // now re-create the same snapshot. + let cached_ext = Builder::::new() + .mode(Mode::Offline(OfflineConfig { state_snapshot: SnapshotConfig::new(CACHE) })) + .build() + .await + .unwrap(); + + assert_eq!(ext.state_version, cached_ext.state_version); + + // now overwrite it + let other = match ext.state_version { + StateVersion::V0 => StateVersion::V1, + StateVersion::V1 => StateVersion::V0, + }; + let cached_ext = Builder::::new() + .mode(Mode::Offline(OfflineConfig { state_snapshot: SnapshotConfig::new(CACHE) })) + .overwrite_state_version(other) + .build() + .await + .unwrap(); + + assert_eq!(cached_ext.state_version, other); + } + + #[tokio::test(flavor = "multi_thread")] + async fn snapshot_block_hash_works() { + const CACHE: &'static str = "snapshot_block_hash_works"; + init_logger(); + + // first, build a snapshot. + let ext = Builder::::new() + .mode(Mode::Online(OnlineConfig { + pallets: vec!["Proxy".to_owned()], + child_trie: false, + state_snapshot: Some(SnapshotConfig::new(CACHE)), + ..Default::default() + })) + .build() + .await + .unwrap(); - #[tokio::test] + // now re-create the same snapshot. + let cached_ext = Builder::::new() + .mode(Mode::Offline(OfflineConfig { state_snapshot: SnapshotConfig::new(CACHE) })) + .build() + .await + .unwrap(); + + assert_eq!(ext.block_hash, cached_ext.block_hash); + } + + #[tokio::test(flavor = "multi_thread")] async fn offline_else_online_works() { + const CACHE: &'static str = "offline_else_online_works_data"; init_logger(); - // this shows that in the second run, we use the remote and create a cache. + // this shows that in the second run, we use the remote and create a snapshot. Builder::::new() .mode(Mode::OfflineOrElseOnline( - OfflineConfig { - state_snapshot: SnapshotConfig::new("offline_else_online_works_data"), - }, + OfflineConfig { state_snapshot: SnapshotConfig::new(CACHE) }, OnlineConfig { pallets: vec!["Proxy".to_owned()], - state_snapshot: Some(SnapshotConfig::new("offline_else_online_works_data")), + child_trie: false, + state_snapshot: Some(SnapshotConfig::new(CACHE)), ..Default::default() }, )) @@ -981,12 +1231,8 @@ mod remote_tests { // this shows that in the second run, we are not using the remote Builder::::new() .mode(Mode::OfflineOrElseOnline( - OfflineConfig { - state_snapshot: SnapshotConfig::new("offline_else_online_works_data"), - }, + OfflineConfig { state_snapshot: SnapshotConfig::new(CACHE) }, OnlineConfig { - pallets: vec!["Proxy".to_owned()], - state_snapshot: Some(SnapshotConfig::new("offline_else_online_works_data")), transport: "ws://non-existent:666".to_owned().into(), ..Default::default() }, @@ -1000,51 +1246,20 @@ mod remote_tests { .unwrap() .into_iter() .map(|d| d.unwrap()) - .filter(|p| { - p.path().file_name().unwrap_or_default() == "offline_else_online_works_data" || - p.path().extension().unwrap_or_default() == "top" || - p.path().extension().unwrap_or_default() == "child" - }) + .filter(|p| p.path().file_name().unwrap_or_default() == CACHE) .collect::>(); - assert!(to_delete.len() > 0); - for d in to_delete { - std::fs::remove_file(d.path()).unwrap(); - } - } - #[tokio::test] - #[ignore = "too slow"] - async fn can_build_one_big_pallet() { - init_logger(); - Builder::::new() - .mode(Mode::Online(OnlineConfig { - pallets: vec!["System".to_owned()], - ..Default::default() - })) - .build() - .await - .unwrap() - .execute_with(|| {}); + assert!(to_delete.len() == 1); + std::fs::remove_file(to_delete[0].path()).unwrap(); } - #[tokio::test] + #[tokio::test(flavor = "multi_thread")] async fn can_build_one_small_pallet() { init_logger(); Builder::::new() .mode(Mode::Online(OnlineConfig { - transport: "wss://kusama-rpc.polkadot.io:443".to_owned().into(), - pallets: vec!["Council".to_owned()], - ..Default::default() - })) - .build() - .await - .unwrap() - .execute_with(|| {}); - - Builder::::new() - .mode(Mode::Online(OnlineConfig { - transport: "wss://rpc.polkadot.io:443".to_owned().into(), - pallets: vec!["Council".to_owned()], + pallets: vec!["Proxy".to_owned()], + child_trie: false, ..Default::default() })) .build() @@ -1053,24 +1268,13 @@ mod remote_tests { .execute_with(|| {}); } - #[tokio::test] + #[tokio::test(flavor = "multi_thread")] async fn can_build_few_pallet() { init_logger(); Builder::::new() .mode(Mode::Online(OnlineConfig { - transport: "wss://kusama-rpc.polkadot.io:443".to_owned().into(), - pallets: vec!["Proxy".to_owned(), "Multisig".to_owned()], - ..Default::default() - })) - .build() - .await - .unwrap() - .execute_with(|| {}); - - Builder::::new() - .mode(Mode::Online(OnlineConfig { - transport: "wss://rpc.polkadot.io:443".to_owned().into(), pallets: vec!["Proxy".to_owned(), "Multisig".to_owned()], + child_trie: false, ..Default::default() })) .build() @@ -1079,13 +1283,16 @@ mod remote_tests { .execute_with(|| {}); } - #[tokio::test] - async fn can_create_top_snapshot() { + #[tokio::test(flavor = "multi_thread")] + async fn can_create_snapshot() { + const CACHE: &'static str = "can_create_snapshot"; init_logger(); + Builder::::new() .mode(Mode::Online(OnlineConfig { - state_snapshot: Some(SnapshotConfig::new("can_create_top_snapshot_data")), + state_snapshot: Some(SnapshotConfig::new(CACHE)), pallets: vec!["Proxy".to_owned()], + child_trie: false, ..Default::default() })) .build() @@ -1097,38 +1304,29 @@ mod remote_tests { .unwrap() .into_iter() .map(|d| d.unwrap()) - .filter(|p| { - p.path().file_name().unwrap_or_default() == "can_create_top_snapshot_data" || - p.path().extension().unwrap_or_default() == "top" || - p.path().extension().unwrap_or_default() == "child" - }) + .filter(|p| p.path().file_name().unwrap_or_default() == CACHE) .collect::>(); - assert!(to_delete.len() > 0); + let snap: Snapshot = Builder::::new().load_snapshot(CACHE.into()).unwrap(); + assert!(matches!(snap, Snapshot { top, child, .. } if top.len() > 0 && child.len() == 0)); - for d in to_delete { - use std::os::unix::fs::MetadataExt; - if d.path().extension().unwrap_or_default() == "top" { - // if this is the top snapshot it must not be empty. - assert!(std::fs::metadata(d.path()).unwrap().size() > 1); - } else { - // the child is empty for this pallet. - assert!(std::fs::metadata(d.path()).unwrap().size() == 1); - } - std::fs::remove_file(d.path()).unwrap(); - } + assert!(to_delete.len() == 1); + let to_delete = to_delete.first().unwrap(); + assert!(std::fs::metadata(to_delete.path()).unwrap().size() > 1); + std::fs::remove_file(to_delete.path()).unwrap(); } - #[tokio::test] + #[tokio::test(flavor = "multi_thread")] async fn can_create_child_snapshot() { + const CACHE: &'static str = "can_create_child_snapshot"; init_logger(); Builder::::new() .mode(Mode::Online(OnlineConfig { - state_snapshot: Some(SnapshotConfig::new("can_create_child_snapshot_data")), + state_snapshot: Some(SnapshotConfig::new(CACHE)), pallets: vec!["Crowdloan".to_owned()], + child_trie: true, ..Default::default() })) - .inject_default_child_tree_prefix() .build() .await .unwrap() @@ -1138,72 +1336,46 @@ mod remote_tests { .unwrap() .into_iter() .map(|d| d.unwrap()) - .filter(|p| { - p.path().file_name().unwrap_or_default() == "can_create_child_snapshot_data" || - p.path().extension().unwrap_or_default() == "top" || - p.path().extension().unwrap_or_default() == "child" - }) + .filter(|p| p.path().file_name().unwrap_or_default() == CACHE) .collect::>(); - assert!(to_delete.len() > 0); + let snap: Snapshot = Builder::::new().load_snapshot(CACHE.into()).unwrap(); + assert!(matches!(snap, Snapshot { top, child, .. } if top.len() > 0 && child.len() > 0)); - for d in to_delete { - use std::os::unix::fs::MetadataExt; - // if this is the top snapshot it must not be empty - if d.path().extension().unwrap_or_default() == "child" { - assert!(std::fs::metadata(d.path()).unwrap().size() > 1); - } else { - assert!(std::fs::metadata(d.path()).unwrap().size() > 1); - } - std::fs::remove_file(d.path()).unwrap(); - } + assert!(to_delete.len() == 1); + let to_delete = to_delete.first().unwrap(); + assert!(std::fs::metadata(to_delete.path()).unwrap().size() > 1); + std::fs::remove_file(to_delete.path()).unwrap(); } - #[tokio::test] - async fn can_fetch_all() { + #[tokio::test(flavor = "multi_thread")] + async fn can_build_big_pallet() { + if std::option_env!("TEST_WS").is_none() { + return + } init_logger(); Builder::::new() .mode(Mode::Online(OnlineConfig { - state_snapshot: Some(SnapshotConfig::new("can_fetch_all_data")), + transport: std::option_env!("TEST_WS").unwrap().to_owned().into(), + pallets: vec!["Staking".to_owned()], + child_trie: false, ..Default::default() })) .build() .await .unwrap() .execute_with(|| {}); - - let to_delete = std::fs::read_dir(Path::new(".")) - .unwrap() - .into_iter() - .map(|d| d.unwrap()) - .filter(|p| { - p.path().file_name().unwrap_or_default() == "can_fetch_all_data" || - p.path().extension().unwrap_or_default() == "top" || - p.path().extension().unwrap_or_default() == "child" - }) - .collect::>(); - - assert!(to_delete.len() > 0); - - for d in to_delete { - use std::os::unix::fs::MetadataExt; - // if we download everything, child tree must also be filled. - if d.path().extension().unwrap_or_default() == "child" { - assert!(std::fs::metadata(d.path()).unwrap().size() > 1); - } else { - assert!(std::fs::metadata(d.path()).unwrap().size() > 1); - } - std::fs::remove_file(d.path()).unwrap(); - } } - #[tokio::test] - async fn can_build_child_tree() { + #[tokio::test(flavor = "multi_thread")] + async fn can_fetch_all() { + if std::option_env!("TEST_WS").is_none() { + return + } init_logger(); Builder::::new() .mode(Mode::Online(OnlineConfig { - transport: "wss://rpc.polkadot.io:443".to_owned().into(), - pallets: vec!["Crowdloan".to_owned()], + transport: std::option_env!("TEST_WS").unwrap().to_owned().into(), ..Default::default() })) .build() diff --git a/utils/frame/remote-externalities/test_data/proxy_test b/utils/frame/remote-externalities/test_data/proxy_test new file mode 100644 index 0000000000000000000000000000000000000000..6673bd6765ad8df2a9e74f210d8795acfeb54a30 GIT binary patch literal 70206 zcmcG1Wmwf)^ER;wrMtVkyHk)xx3Ip8tLxtE&(aYO-h4=q;$Od!T0JZ-}ChdG6;SH7=o|Lw>LiF~`c`ij=~ULE^qz z2$pvCM*bazk|R)^PeBx}KUwE-7IF^Ur8lHQ41oyD(zGO5wdiRp)gM0#8TaO@AHHQG zavLuR#fb=nHpb${Tce*I9)*NL97i7rUpspJBc?=r2S0Md(8Y-x!)soyLhzP@ktZXd z2I7DJ);LG(qE_o>TRs!LS-lF5>%aNUP=;|_E2_l6=T9Z)3-!N6Ki`CiUJjd;Gvy-K zcnq;~8y?fHzt)J7=}*ny>Ucvp`m!jnxnYX=f2;rR-@o{lSV4ukT#aj*rJ4AE6P4qc z?o10+v4<8rI%_MG{>4|hc$!)*Y~xozKpa;+lt8Xwjz7aff@pJQ*;jsPr`#C2$~v1f zv?FCRDail*&AQW{(XhKk!j@uC#<6c0_D z%%bpV6e?x?N`7a=!=zRFYk>Q?BhD7%OQatVK>cQDVH9uYqB%Xkzr$NFx2;INAk4FL zn1!mg9jQFc=f&w@c;Krn|Af$cSkCVvuXuc^@F4dlv5|skoHTLyn)e3E{bpk`iX{>D zOn`tj0N$+_+`!6Z%Y#=i~t%iJpdJm*IpUX!Q`+UC6_4y z^R!&{u`!04!PP#ylsf-FTR8wDG01V4dj<%Q##-C%`IXNWreXVo?+a}x^zG+%L#ejq zd5R2kE&FH=Xz(xq^)td4{|GlhmGtZ#rMU<}P1`&fJ*Rwt!l7uHy@*V@#A^ z@!}H=a19WNt!yL+AZePjbIK~U9Q*F|+rPP%Fa83jbxx{E)%pl{janZTdA_V@V`Cfsvt z&!jJ@$RYgg4bresp*_1Mr6C#Ry#l~sqFWb%VZnZYMQsNghawx6cG)u-lN9Z0R;#ECZ0eG_-LDQ{vt{*x~H^Rc&s6;ubKO=oe0p*mN3U?9i zIf^$#%NXK`HG3kLT8>}dZQX1cI34(mGzx%8X_!(!$FP+2*rhf%(qtRQ46DjPpLgv- z4cUcH`$?`pBT`W}KX{pQxBPd2zKV&%fi9sA_mu>kvx{!o9E_Ntv$}L?VbHR)6{6WN zy5nNAgnGCG0p1M;o18$wo=fk#dW&)E3C|jNZZ$(l+?!rYi6EghMKC=8%S^XZbo+>7 zY$~0`SZedGR1(5y80)TW8+P~ACX>VtpS9*Qdfw9t5Mck^{JYr`osiMBO;n9Veq3!) zu&5hUNti%yWK}Yd&IC+K!ye!h0}%1kFuGhO5}MJcUa`%l!eMmF?Wc&VZ|vavFAz^3 z@SJk30s)qQBynC_JR%fSzR9@mXO4DL3_8ZGE$9P;Z8qnndU-Gl8=S4>r|p3N9apJM zyx)f|B$G()GihH(9wb$`Q+SXXM=a?J=(qsBz*;I6n&)wI@8y_t^B$ zHvs`?ak!$+gnIVfIVOFi+vii6>D~(XpWTM^uk15?lwi%kG~g8+^4Ym1mn|&9kJ&D< z9b`hz!FNJ7Bv_BIsW8~djX=-bV-_YtcU~Z1a=Zn^!Ct-!T5i^1*yCJSCeuA<|*zs_{~X~hkYjJ62o z(krTE+LZGDp~ZZFPGlrswBw|XKd%JJmk?-?(Se&}!6!v=uhTtVDFjr&S8AI!k58%D zmz)n-mc7)K$oOpD-hu!H6XD{u`mAO@YcFM$FS^@_sPjPYDjgV)7pVSyzI;u&Utm7v ze)(}M#T3?Y+ci%#$s2*M#^*@FB<1Z%X++{HsMpsnRU7}DuE6z^8J9+@6w5oNRK<$E z1AzHA0?wTV!p_1I(yO+`cB{CUIXn@7sO?{}$qq-mKH=%|my5BWx7 zKmb(n6EIMj4SwM*!}}&GbUy3?EiRYU0s?cGX%~TVau+aru-A9&9~1J$r^iBX-rkc8 zQk^ngDyZdaWCa9c`mbKImMsehDI0|8fDAa|EmWO(SW@j&`}m8O$~kx~LDU%zwR~P#|33IJqa(Q%MFQM%d4^|Gx%zyyb5jk;e=hu06 z0~^BWWEm5i4jA23p}T;w-)E}LK@d1#h5$KTP*}@4E;fjqP1h}USA)8F>U#M1lv^Z) zA^8uDZ`a(Y@z&w0$PXZG;cahsWUSA|t}NEjg+z3fXQmY-USRTWn9{%<%k6$>*NX*~ zR2LU8^pnc(2G(M-y<~f9b}r|!^kpKq1RAR6IrD5^7Onr}J>smV5yQltU{SpZJxHo< z%2S@RbClg)XMfOU%U-<;5O7D$DS}nU;NNPfBx1{p1E)gL{D6gc3ix@-yM7n`h6+qLp}zhqxYq`(IM#J z!669I(O!UY>}E00bcz*B1JH>^3Skm9F}6gcNSl-78=v+)A|+{s4qwUi3ED`IYSj_% zD<;4j3IPEz)_44Z=A1Dm1Gsz(+%&I$8_&8+btrST422>e!>qG`Yrs<2r^*IahV82y zrXQ4XJcs#47@i)Dr!>@$&Psl2PWhkwE>jd42tmaMOBQAYrzJHo2Tp#|GJ2cecPA0! z;Wgb@Da?>2|BN4)24FyQ53DH{DS8gVIAd8JH3sZ@H1sK2KzQx&xi z6#xO|sD)7S^!oKvq#CoF3h2mRyJ*=U7Vs^wP7~a){guE?DHyoT8zE9&o51J}u2x9r z2iLO$$_euh<$lxdUu6t_pFJ3n=2wcVSb>1HXReK2rNKkrg~NhN{Lf1CD)=9zyq`@~ zREa5@Ze}&XWHbgpTFMvYxgV5<@k2t*anZS-f>>}ZUzggK23KdUpOL8K$ORSn7bXA! z18>M9rSAx<5%Jl(IaDvnYA`W?Dfh|_f9?F(w@p}`OOb-2+Z)zIF%>=f(GzQ3YbTx~y*2|~e`o{}2-tQx zgk}A%vl5G3J4Q^mdm6T-%>LB$ORrNY zCyW*)m&Km$w;groE-eqOl2xXd7_`fn$d><^!iHtA^hRYzkc!-peUxg61mED>D8^!7 z5QV+l)_m%Ux6%!wpS@aNJ_Zm#d~lf!yVp`T>Zdj?3CS(t{&AUHp5@7Y<@-4uZEMvl zFfoA5WNcW*^7!Q3tpRJVRBof4c)@*aen2$T(ah=zzmm%CSJ(_>Fhcmx3M&pmq}^k% z@XTTPw|@FEA^)rQ;R~=_Nkev<9FW4*lh^u9_Q39yL`NW?!T!jLOHa93iRclBB|46* z!2UL@gK+)Seu#P!mc&>R%?7|XPx=2)?Rwc3hNs6982{!=~ zJ->Kp%n$$sY(`;i{NQ16w^?9aiK05(WH1}l(<)p~$_|}7h`(Nz1=oPZPh5E+7xPe- zkli7O=GSy$@0;l|-#T$=y>g!PmCRs z-3s5Q4FkfOl;{+w6%TF z3`C&4Q*eDieo9?H402P`3co!Q8Uq?lZX{u<%xD zKtjo%;29L%{jhj#i%eY-vQggr<|>~_SRvyCjcs2aSi9z4Yqc()!jZjL;0diWUl2kZGb&;@O^>0R!oLUiT#qd8b89xPT7n%>p=U*l7w6=e}Kun$OzR;ZO*?+be|3cJmC!PQMi^7L*7N4=-l5$A=XN)2s)fa`J zL}8%!KB|&W=lwm&7M6bd6u&MHYu#t(d2Jd9@Fa1!I%cqj2;b8$CVlLo z{ScpP-4Uh2A@#LEcE1Wv8O*d!P<+|*YtD}&a~Cvynfg)TVyTTp3 zXRZ7`i+l!nsFT~}IXFxKz)HXWL1B0Rm-Zox2Aab|NdUB|sTly&Aym+=0q0FN5B63E z>B7f#$miBxQ6*^c=@00GS$&B7RuQPz=BLO-D>u2lR%wdq^x0?p9Np-d@BN$KR0HN) zd?fcB5syHDTKciIEvZ+ID5_BkK{mp5^K`=)N&Ewo!$ivQr*UJF$cqJ6BDl3S2x5I9&p*|t| zJ1oPHg^%?&?Y-xlAhE0oVsgF1Ju||Y4aSWvA(z7j#xm6jT`a1f2>b{h<^J#X!ed0^~t&)Yqer*MvdF^Phi%?m+Z`TMwOs#>tn6yVUA-b?~cveL8^He#A=-yxopv;5dNPed^f_V%5lXc+Im`~}=gn`gYa4oQbW)Qbg#f-<{1P3{ z00J1uaxXBO87gZFaWVuSoK#dhAKAnk^`XY=)*>>f!m#`$t5Rb-Bs2t7Nq8@y72Bzbu=vlf|BjM>TK_xKB>y4O@MH$V#;?xrPcj3^u53-U#gU zr-5X=DoFvzl*^97?9u-OH6T`0gLC01J*JZN5wqk-C4DiR8E6FomgO0XQ5NshD8V#< z4(DO(Dc90wqE)WMZ0OQ>gdzGdgN$1di7Ks5X|<*_TcDu~Q-6vIo2O%RCi}Qsi-)dsa9Kd-QfMzdoQslncwxCoV!kC}$77&DCW!JY?i!+> zH(a2lmCNTSgx~`xrx9wXc(0wMnw;-&61>T4LQP{d)zcgKNYk8~TRvFG`e$NGvb>Mq z5oxdj`P26`$>AfH=!Q_w-o>b?l7m#6I=hyG7pU%*SlEs0uV=02(GO=eIvB;6Z*e=d zIE}2H>{V435WoI|=fBQrbUz>_QMYy8&1+X%L`O9SKZ{|Iq~2+pKxYJ}mv4=BZ7?X6 zSozG5fSM^sG=zlTrT4D9Y|zJa9aIBJ8~};1P6LASQaWx$v9D$?7W~tq7vBGUGewVb zv#1uqmzv{d)4ub)==IUAysMuczh%(lRxMrCs*O0g5ZzX6eKio^FRJ72C192m(`Rh% zNGK!mZZQvcp7QJo|C@;erV)oWn7s_W&PD&*TI8c)){Bzwfu0QAQ;7>>kOYzf@w~4d zZMr_=ipI9{4knWW0Z|o}!}^i7Wd=8GULFdV__7nF>WIQu6k{JAfXlYO<-s(7frTYw zxM+oyl@ynOR>680rk0lqs@Ule6ETog7(MFgE{)y<`?Uux5Wr8AGeAwioTM7SrsWkD zu(qz?bz8D^PI!5|wC7$ZDh+1pWfG)vJFRSZ$P{*1@^tLzdoF(7*_b zYpj-K^g=y71)=_(ib`?1RW$!0dc$uxV)5(Nl{vkgnw=k`b4trV5) zW@Rp~4(m^J1U@0(x{=xor-fp%TP&$j4ZQeim$~R`tMR5UlANfjfvvi(1lFkR`*#?h zdEiOOcVx5vl>5J*Q)G@_An9b5OqTduor~IMK|7?spR{vp9UWkDS0O|`0h&}IKVW3g>tG;ZK_z~R2tY(4>c$d<1~`=i^wsj z4G!(0x^E?PxW3CF^yAfkHz>)%OcI6_liMLJMMvMYTO3S z;xLajUX?Krz;0}l5*#1h8|1D=8t?C-w3sWMh{kh5Tji=94%sL@wvs`LkApZCd zx+B#}b0`vrJyep6^~k@Vuqp1x#}(st0zkX4TChcMT!G?f)^@SX&sbG^bX&ZyP%pGU z7{AjxOY<)xpeI$j!IJiq!Z?-7;n@!htoh~Y+;ZsuCz^j<$5^3h{9IMKzgmb4y-CM_ zQ{zzSqRGM`FKJ~HLgiE(mG0da2xTr{qXYtIMT0alNH}eEVENIj9w5T_`IonjfLdrD zTZ?4|grWW@cz=H?z=p@)XLz>B89L}K6N@dW)6)yvo!KFl!Rt!!+z>2&_Qg{6mrEKY z!GA_5ww|b2DE&I<&t*3eJ|Ezq^_!%>vTP7|Tw#Vqa#CiGt|$IX)nyk29n7HOUKdYJ z6kPe z*MK#(xumst2YP>Ex#+WRrx)>F_i0?A29I7Nm_R?HjTyYbmM1RFYajqN)j903cB&}1 zegL-}1x4QccsIq4uDQL^DYkqklMDk)La`qi=1nW{Oj*V^9K&s)X8?TOjvg2EyrQro z-)h?-vm3idZzmkfp8@T*KPg(FF<^cBDG?M2(8AtCQE~KI6q}_W*C#@4Fqc&U(*O?a z)*>r0l?kXm6X^?t=`Lz-CPusn=GZdir$EN&pNkAZ{Cah%qE4W0&O1U|9l%)s6e_}H zx>t`J&${Jf7pJ>Hhoxep)O z6deAj5`W*#ao8E(BF*;5Cr}07O)76=(f9;D_cv~^pq9_EqFtE`wp&@aYT!_a{xes= ztbUwOO?ZZ6K~uFK8by}%8G)!XE@Z=m-aP4{v%dnEC%6C1Lo*4rZV@d0}eDXcDm`E0%(L2Hy9DuFS%BoEd!` zh7&~)y;pdVvD zb=d_CIfsIoASbfJJh>-*KfUATUHp?)kI|p)$zKa-TpuaT83WWm7tB`(C%T}EE_SRZ zSPrB`OcoHEq%lQM_EDfVYRip*+HPw7@|&oRv0uq3j1U#Zj=A(!Qas;!;durH@W0du z(sF~DQ@GAM>ru(rI%#hXi#n?&ZfyDmQG9a79z4w7xEDbSvVC=OP}t1PQ!PSO6DS4j9B>71_t60iyVQQQ@Bk8(2t?LRUf+KrUZ`t7Ajqol-K)$ z7pq^RXF2V~6rl%_-G2LB4Fd!e>>mtVW~+Scjc`zXwH@?$LzIYtPT)6aiusf~>}jq9 zCRDlYKZ!|+WNB&n;EkEA6RzOq%4ROn)ZBAWDV~ zGCi;wne}U``Dj0oSvCG=Cm!5`O$O3ah!C*ULfBWHmK3F_A!8c3T>1kGn(eTwMFLH< zSPOyCil9!hE9#-U&7#K}C2g%a3997T>-3Hwdgn|dh0RK*)X?BPm>}eNL+CftC{oXP zr-*tkA!cPSJx`FzLX&1?6bW%WH$$F!FqNOjbP zOi91TZ~b6==)e%I24)C&Bd96yt4_Q*R}}1R-AKI%ej=5G>7eL={54$7+kkqaNW`#Y5+LB8R)ASC1U=80^9jnoGFMKMF3i<+ zpQYRq&-j%#xxf#^YG$bueHY^+XdOI9WR28HrmX?D=SU|Qq-#hbfv~1 zQtq8n0;%}uFXJd2L>9-lBR z{Uik@3xtcq_A_3`&mzhm-uU?o0vZ-<*zqv z!Zo_p$al8}=WQ(0Z^h83KE-zsY#(0`uN*1Sz^2bi%@iF$b>rhHfxe{%JJ&;}`0*a4 z&>^!@Y_L0!bag-!5)i}bb@6;I0{2Hr|EEQ8|Fg5k8@nLOsW3HJ^>+&HHvR$@*?b(j zUb%H>hIPGuVC)T;-y#Cv+|Z+D%CvN{2Gdbu|FbKTQY=~I2%u9HrZQkubz;LLh9(zL z1-fm+JXBBrSif~3!p@X6QOq#n-u zUu-;HT<}jDkC#P%-_MGwoT4$Gy>h6b_)fQ&^X+roRLb(q=x09})|sM(^}95;XPSt` zMHkQ>`9EqWC203JOhr4Lqo`8EXF3s3lgHtG@SbSuLjcAnnaVtyZ$8>v{q&Ix12xo- z@XaS*%U8Z`W`t?&?dCZ6bQ{)A3Y#@yYti!%c z%A7dqQBiX}?fQ|Ud_bJ&8kVM892BU38cTSIL2V{_ZRDKP;RVuejRjj0Dl&O7-&A6zP47nF-EuC)5d9ZeLFm`5rl-3 z1cgeo+Bl0Lu+^H9#q|oMSy_=7u6R1ld<=+yb>KlJXo~&9X_;NsY^ZYvuz6rwJvH6WUJQT%jgzFkxFJ79y@8}bZgJ5@!ypUxTKz) zCW0mfn$Ke`I&VUmY;#5}6UjB_%@)ncS{t(CBIyJR$Q z&?+t#11Z(~Tt*V(*$l89*Jbj>f{y2<3!p$(t71NAFTc9JSQ;j46)UQ~9+FvLhPiwQ zauw@!4T7(oa(My$-%E&ZvGmotp<7|e=v-7*r9-q_MJ{2rWl{4{yIONQ7vBn&U|2Th z%?*)(xcS*_716>$mO@pHsFkBJ>IlZiQC~#c)%@%0(`?;QLdcg6r!UVll|=F!in=ba zvMYT^LNlf^JE-T2-a$TR4(ulegO^2rpG4ovlrwJ*Jm-JUj=mVL{k#mP?FF6gFuRCO z7p&1v2a+MVKvS*1Ii!F9ASE?FLr#5`L|yK^Hld}fU?HGy!d8&J)LyHJ`U@Ktn1Om* zJ&%fR+EtUAR)g^=OgOsrJz#;k6xnDR4WNA1-Aj7s6R*A_LIni14geR%fdqcIC*X5N z{Z4InPC3jE5#^BnpIEK%TU7GMilE!t_eHwo)a!! zw{%#)nE7S64K?qd=@=iT|H(`Rg`FPMYNMOhX6u z%!O5cOdEAa?bdcXl~|lqmfea|%PGiR5LQK~g?qmS)YglUhqJ~<^-f~%Al=yHsuct; z_;;~=Xq#+tuU7i`Yy&3k;X(pp!s&zd)j|^r}OOPmkZ_4%z3rK&t?mKz* z6j_0|{(Kwy_+-PX=)CnUzAtT)B_1b!ryUQ>04Zu=gqhssvr#lkLZRP$Md%Dc54U>z zOi$r*{H+%IN~`0s>g*h|V;oc&{?%;q5yW6EZ+w)K-tAUDPmbbMUJl0A`BohOF1`Jc zD{ckrPZRUkk2xrXxE}+vc!w!8fxk@1EPLgn1WDvV#Ub z|0A7B>5NMa8!LL!S22XuC#RjRCUx@?X2|6t!+jy32L0Gj&-lS|z=!WG@#*BN%iG+<^^xd;sOPTbDA83X{eZTWRXTkJ#0n0m$dX@b^|?nXx)6L*B1Fl|e{>jX8Hc|8hfR z!J%Ak64mPQVQU0K*&U_|Re>ZX9OEN4G%Gy>1cR1W8RqNf#4-3b%;e3EXtar(nQ2Pz zLYxp22YZe`^A`|*&nQ(B>bTr*vyuzcGHMSRQd4G}1aY5z7Q3)ae|#W?6D}8(xP)SU zFB}U5+?-O%B_%R=TZ}%>5d9=x(Yxo*%M~$BJENIU8PpVq+lU{K41n)wVI=2lWNa7*Ql6Pud%_K4*JS8w%sPTl5&A%Hk0ot;0xKt$BPw^ zoc2#w8Nu|xJIJ6qh*&KQ>R?kyZ^1#Q%R?3|Pz&*^X6fPlzY zg!iVq=<-HnA5Kw?gB#vt@cHT;#Glgojz(H1zGem2fN^VYS3*&^z<>xTU+Pglf^qVgdS!PABRFAm9NqMa32+*=G4B@#@`VjWT1+q!iM!g-Z8&K0@}@ zcXV(Kxa=e9baVLn2}$j3#19_pTi2R_t$E(lz zn7I1F+i$vr9KU8wX*_c?v+lgG)WUjZ`! zM!W*rX+1cBR&vy#GZ-By37*xki*y#;JJo8BaapzQ^|43^d8!J^L>1q{ffl52DQvfm zti0=0t0yfVKHXOr9!vrO$isNb(}IoFQ`eV~3|${hn>WzO?x9>F0(^BU?-QNgg6RQu zgW8ShgVHg6neO3b6F99C>F)>)uwLP1Y>huv%t!I6)jx_yFsnlY0Z%nYlEbU94|0>2LH4OOhrgTgi8JW!nfQfHUADhNgX*U8pbVg+#&oX zb2YIaV#_AAOhRv5Ep$QeX}E65v4$>tFF|BO<}0%lMlYw*Hl_w>qG@>l#Bq=owg(d_ znv6sf22viPtmViSA-m@%5+h+57&<>xs`6+iMR^+dqPL50d?8nxL4G1^9&cNre@3NL zz&@nl%V2=MEU9w}3UnJ$#AbTSb)|rw;QsaQq<+bJ>(JerF=o%H~ z=$d^4@e7{*zOrie#4Y+p5lno7)gE)}G&170!$&NK7kZp;>bpVWaUzcE9><^Qhyxu| z0rz1E*LAP1WdPzuOs~+%Xrg*?*$FZPE<*1QUfIHfJD#mIlSQQU;FA(YsUzIi?IuI% zRY9}c*foy^30>F0_jT+HDoSuNof948Cwm$}28@~a>HEIjm)VTi($H_!H^M&(Puk9m z!sigO8N48Rv?{OX+4ME-YS8O?x?5`V+ThY7VkD27`cWrcx`1qSF#V*nHAVe-hQYM~ zO9_4aB%CAWM~PnNyqxg>Dz3khmlKTl3Hx~Uk7g(k@r}&*%;b{g5GGt?>!Pbn^)LbT zPf$M-YNzBh0zo(Gz{Ha-E1_#exuQS%SR)5-1yZ5YjFp=Vr$ggu6Hmow^qcZ-I}EMJ zJq>oyTSj=N{0};PEGh8C`vpxyXq@*k31i8=p0y)8ETYy7T!P`=SnI zf*;CZn?H|aW9AyC2<(*^p=(4E$`VN(C1)*&BTSZo6u*Cx9>7d}`V~62J&{M9T0|OW z9W^fBT)VukGBSG@g;)yoZ6KYsnH{z=dTUHT*T^**!&6q5#|4cXva5v8c=x?pbIb(y zgCV@XeCMoI#rcy`{@w!yF=!w1noHL@5T`Id`A>a2e>aTBe;9c&SlS73UKmRcP#HiJ zg+guzsm%ZX1muMBatbI@=$KMxetZLSq|-X+XUlG6sD7KX4>=2`-BT~1uZU+*q_)r- zkd>Ro9~eoX@t~a_N5ArLc}HZlu)GDE{<7%rRc3?Yh+qAj*^)gA7E7A4v1E1PDVwty zp_9rzdUMoBcRu$k&EK0wctIzaqzWb|m*d{ufqaKeHt2p5On580o_dkt28JFzA_rfE zzzn&e7;RoGK~zxAfNr7O|JiUwfwK8JHxy% z#pHNJwv9)wD%?S{bgs~@i;eqsO}p6!Rtyzqb%;p5-JJXMS)YDM z(qynu{Xu!@O9p7O30;hXE~j_6DWH0iscrq%0@A4~|J|<^U|V0)xmPF-xY69&f6I&_ z@nO3*E`Nhc=`D;tvYw`F!3F=8rxE(QcM-_pkeD{tlb|W~6}TR>?t*TZ{T+&|llOKC zcVBWN8S>97efjM--z(R%f~gxBLC|>>)@NXA8IR~L23}12Y#x$3N`mEn7kKM1WlN*Sd(2Q;iVhEM_pO2OaKxx|hEH zrSF@eI#d*P8 z!xz^5eWo=@?PPd+@W!do>S}w^<~V8e{uFY9U9Bz*l^k)g4!#$~Nv+pI^i?*9_vp-z z2T-edrHwYEf$96GpIS0?LKAlJ{1AsHWuc4rv0up2u6}yDlp~hBDrS7me(xRMl$Rap zM|CM&hpNKBd?Nm5&gmGj2ACT_nJt{LKXS{j5eCl_TH3cAyD;FUsv|G)N#qah{+jnD z1&DODkBo_R02m_mzG1{4_DCGkpA{2GMgkB*3&so@)x17DywL}}|MHs5vnGz$4w+gi z)Y1}@zeD$G1pi#Ml@T;ICk}B*2LO}zCg<88ilPot80pm;H%NCmh@G8H~T~b=^e`t2&4b)$c$@nqQZxKFAU|0f@N>Nt%)2-j7QU)oW{Gr%uMU z(Es=V?%<#quH$hbQMEGPh2*<5k=Pcs;NKRRDGDs(Wl|6ip>Jdd3oZ?WOO-$edjIqW zFPQ1ctkiw%>LkNRgM*To0ad2b>Xzgv)(?okD|Fo#iVan#;4`O>Fnxpk|Fyp;uFH<- z^*mC;u(OOQLMm&eGIufb$#iauZSWh~mV#(|VekY8+^zj1a&3da^CpFu1XOWdE^#a0 zN7c1OOd8re2jiDTf8Sr2ov?pz#FrkCFAX~EO@X1%Mu6OyO0yYA#ySf!uCQaYk7hMJ za9zX&0!*xitkTP)M!JP)zP83Wbp8^Ra535;`i(EksuV%`>;z__GsjufduN#9_ZvoI z3zD_DPegH$l!F$>B<+1&QI;;$+v>jTy{@Y`UC@wPVRs^gO~4$=1AW``od2n<#W@av zata)L!W}-Iege2%y?IriV=Y>EneMBsVeKodux{K>E-4MzJc5fCm6s)^4~^c83W#}0 zQ$WDJsI5AwJxk#(Oa)UP7OdrlJH*V;T!*t_Kkix8NC>#MFwDDS@`?*8uehBht|7Qu ze~`E_zdk-Kct1Qa7@o5bkqC2XXOjH}nG*<*+@LuAi5J}O`c{Yih@pTUpRq1TenD6K zQmc1gAvR1MOwgOpd|s~HbX7=+PyNaEm0Sxa(#R8unGrgKR4jbonc=5EJCS3+k}jyL zS5=aXct32ElatX2_yLWM;s5TF*P8c}MMV%vzGlS<1(+DHV0R0m_h1=qlhA4*Dtyx> zeUq{>EI2>%mi`&efNJ`dNgUx7@`NVnrsRJ#a401aUtnIZSSwH~(YrsK<^` zQ{jRNQGad-Xzefiq3oNUk?b9iLs0G5;vZU`XSAYMkQ*f_3w{?om^exh9(RisI z=AHf9V+@cCvs#ACYb7&ei}rC3Ivz=(`+E;}M>*aw?6n(bOm82!9hbGC!q~;?qCUIh z=H3Dy>KO1gI?DgiDnZGIhgW791$vD<)c{)@-xmn5x%l94&Rkr;QrOCn=(!S>}EtgWMz+I5h*CJ)!ADMA=a+zKYp=ZHT zy$71Lo4DlS_+h^uN0m|`K@4|UdbL+>cbVuq`CbQY-`k*u0n9eZmMEf|z2FOd=wajg zEA@_bm@jirEQF+7p)A%8YUxHEx<4~E)8KnOK-HJH1OaIcnbbFC(7VU5YlvH!u@1Xk zoLf9T+3P75PV~=HN&j@S{DrLlN-FuMVUL&p>F=wrt?GA76u#V*s9g*N-`{U?l_64y zJ$|C~TPlSWueiwx2qO1eP9gY!etYC!9TNY2mv3vig@ZaH=OKB7JoPq5GJN6eu*)|YCVFk(wvgo9 zvCHiRYR#htczTqD7>3|~#(rd(5lvVb;YNjw0<7vH=5J zA=S^5>Jaf}Gtw_JzUY!bKpx%K(0zp%-S`(pW$4{Ix=DvxKG*J zOOoUi$Y;F`g6)15|8VU|4S|dgU(bx8AvbSw8svQcu7!p8(4KkmKP!v9DJfb!zx$Q7 zb!$@U(G@YU;dXW!qzqE={ZxKDDsm+@893Gkv2tMvt|NX!WG83M2eoz0Ziz|5yGY_yc zp*|>VHcs{QV8)5HAXW7{<<*PAf{4MPZj0zHF>|N=dE^FPw+JtX+-H3k>J*tfnT6|& z5=@1~@jhStyYKJu^$=yN!t9*YYjPMefue;Dfyn_0?CRu??PP{QhCOH^K!6hi;if%i z)o-%wwTZLZ+0DPAr`_b?8O1c2Wh0lA>Wwz4)T(NG z?b~fcO7Khc4aD-Bc)HQ0P>{gEyWm%c1{FcfhP7qf4mt9i&2gxCL!7sg%xsjPH0J-O z+${)wCTZ3~;GMUkZ9V@Od>wqCH+GDjpJexEcdnMCjM#J(+qVy_DdG2UWX`b}#2trMMX1!re% zP*g(D{p6`HEGhrz-UZD5)unVmkI96~PJe?2yIcUjh}}MH6fSw{jsjP&QeUFjVd;PY zF8A~|5R{&-;VHL9>>@rnSoorhzRn!TiDheA-?(4}R*aB-qjUnZdvmEKMMeM!a;*_E zl57V9SyxV;(|3NI_sdyK*en(?tsv8fr23xci2rAk0j@E6*0RVu>N|M**mM)c>>3z2 z*sU|Ba<5HbY-SzV)iflzdwW6IdY2Du-=ws3H zDheZ*&68WGw?=dsPDtDF`3@2fJ3udza+ZDu+b2ztEe!6mO1`}Lgp9?2*9i2`mPg5@ zy;01u`XL@t*1M1BD8H^`i7@K5Tbti^5R8rJfr$aPvSY{@{$~_x8rvvsBfE|etc&(F zhHlm*>%BzF%-=oG3p+{QkA9i{XAf<;i^EB(OiP;At@~`x16kDUM~IAG{UXzC=I+v! zFxj>Dk+%QJ^u|O96oT}?VCrYmk@YSIC9C8Q;;frNDW0DW#T1Cg#*rCA{)lXU-%Yv4 z7Uqr`i-hTxnQ9M;FU6f5N6o|=FRVKzQH!w7>`v@tV9mu$`V)ahlvIaBR@iI(gXOG8 z6F-fGi2b%ruY6{ASYeD~doL`)31$d9Nb6LO-ue038Kkxly5}C6VNp4hNkTPt=1qln zO6R?=Lyr}mHf)av7F;hA7Yk#i%z5St5 z;N$G|&U}tMWQ&eGGuXbk{R!hsS8R?WqEhI?pI^F~2$IdTMc1@|aT;;?(8EfEht5=t z{V^#gNxFGQfOh%qpKtl=oaV8)s--EiJ>s@dYVnO%-yzfw3k8m5@mq=3kUkz2YlC=d zj%~){K_=)|>2x*Kt~`c{V4U`qFi%+Jns)t@9r#SD_Ifg9v>pVp;%?WNg!{Wla{SChJ^56?9({sqaqQq_-EEUn_*; z^uPcAs)la-P;f+X7@5&Df0)qoN4tviSHmIAB~5{t-n00W$K>@=BG1d?^2B9c%PrVi zrS;>+m;_H_WEh}X(})`5cC9_dA^z!|{`yXjxA)Ig59XjHKD$@Tya;EMD`XT~2O+aY z&Tg#pr z2pDcT3NB!n+I>rAdZ&6!ZgZC9K(gkpVR;}o1fu=t^`6I`gJr!>GkWkQBPjzR(Qjfm zC>f0VGz=OoB~r}61ieo<+Aa&VgT1zNBL~E9?0v7bT$8R(>!ZQ+sF4M5+S52x`El5a z0e=6FxUY<=a_icrr9)6qKP?(XjH?(XjH-gHVxcf+?2e*e#5kH_); zbc}n*+-qGk){JWcD+eUq@amT zv`eudNVkm1`g%80=EsgeL)08JioAe%L>M2TzunhKQ6Saz{pOW>h(cj^I`f zQwGMDf%8uvWKMsNKq>v1M18pcF1d{@CRX@AR7M#6GkY}bWURr@PYh0F`cs*ON ziHu`EpKu<(gSS4hVZ6=^hd{b+=r2h5BK-{+sTf`D3RrB#qFs$2YqxVOuM6H$kaHed zjmG04vosrBhFd~Y7Y(HV(bBrCtRUDE+z7d(py{-{ht(^MQwr^Q6wy2$H9LPi;6K&P z2{^B9BKiP;f}ObR)Nca-_F^sxzjzMXN>s19v&wSXTLr)R$go8XY6`&B$(5%oa4CEJ zy5xAg3D4pSnE(>c`Y`bi&GV7AZ#_7HER!_-jVojaPtxDGBu_4D@sx>d?Z22Frbz-#YcC zqEW?MAJQjl=Eh_4AJNAnJJGbSl!~u@dXGIIB?E9m24oRT5{l8r1>Smcv z`=)JMV;`0VOPRqM8D;9=i!@rLb2(RO!#3htC_-7}(awN5F_169_jR>Zjo%L?{Uhu_ z)y-z;Bk>#fH-M{6Xy(th8XMc0s@Z7 z4Cl3_)Ul$!RF#V0h5%4-!=sy&wE`TPNDMvOKhG$!YMj zW5dMeJFupCl#1-NRDclAoJf&WusVVS@%c455>!FBhOb^sc|e2x#qAPfe}RheCtvfB zM(GnOFC*$>uan(c(XTpU)3^y?5Cw4m!9pK-#-GD~t6dSdLz{}{yqrlIhImL)EpKfi zCdbV(?E96=aTEsV{Se`sNf`p6)z+DFlZ^vX+6$dor+Q9np77>$T@wx^?VhpvZ)u9> zk3zgZ%ta7{5*Qu^X-jRr(qILHfnVc^TcwLaPs_s*`1uF*FZO=!k6gKP>tE_ezC&u> zdo|9yLc)rNWM3c<4i^LDJ~3VI1h;HsAu#>Z5A=or=`Fpf(+SDg78C_~UuqL~_xEv( zidVy`3uCi*>27XrMC?f@{A1S>X5bRI*2! zdTMCCWg3fcdEWTzdGUd3)XucU&$_56Y#Z#zT0Cu+dfUt;DqXOq@|8 zK>XDrCQ)QWe5L^8-dVU~qw1`Zi8NMBVH8N=_o%0mouA1BqQ1p`cgg+k4aUkMCgXI- z2vt?fjr*{1A)jrjqk0w=r~^lyz_n!xV}@_t0Ga3X7NU94n^lR$qq*dqcN1@Be#wrv z{Une4fbJA$j`*4qY zsV9WfezBEaEaL!6*M3x#t7kZ!dA&Nq9F(656nii8hD|925v>p{P7{-TYU1^D=d8)e z^G7eQ$$Dj;s2}*wCAm;V(>DET45+WsNyS33XZ(@ndFh&NT!6ZHTifhP0|$UgpuA4% z1Yb6QAUWArbLURG@(gE9sU*cZAU%LR9tFb(5p!M@W>9_HlH3{Du#rrTe$+LD;35M* z)fTzPjJ3!hlZZnFNX(jK@CgA>uY=+CPtq?o-xaBAdGFc!dk9ESwe3PUIk6%}5Lr5c zn)dN(ZG+^A2pT{RXIkW*ek(dYdos@*oOm1=92N4IYe{ZJl#w|8IT{$5{@WKIMy6hq z?quMNt#8UL)RBH+tgg5PzFWA@Ft7km$-Y_NGirlYcf|#-Itc*QhAPujN|BHEp=cA zDO-dCnE!f8R3DI=7?p6~?nc~^tNVfyr6U$;g=}sxjJ3xoJMzaCX5R#;?0MdVbfJ${ z%ccMJC2vIFSzL2qXov9iIjLgvP5iaU)Z2Zs-sqG7m7sR9r!UD7 z%LoCHA_)>s(DM5QFMSqOIY3*f+Hd+AnEZ@lDUjGw^N`R%vt!`!bpbmmOpX{tTULWf zD8LDGsK)?N4SmW3vT{j+EKIPi39acwG22ydI<~X;jM*>Hp(Xq;dmBlHUFbG>x&Eu+ z`?srn))nszs*o_d5Bp&QGchdo;*$MGM7U}kmo}fY3A(6D-T&CMa&xgFL$jY(QG)E> z!PKpxnzLs<%u8w!onFct6G<(!gJ}4Cws4U3&elr%Fmfg&X>BTL(hX*0n%ZT+_FKJ| za%i{5TxExOsPuq6?=fA@Z~23jUHPQmaq#Wk*gd>L`l1X_dA)*1ayR3!V*#St`|1Xw zeu0Jlc|+u#ltV+j#PLbWZ2T_JU{P%&ZOU+2Z~}`aw_}{G9{^=Rgu?Y&Xk*ih{LR;E z%V@sbj<4q_HT1mSVpB^Rs^vEbW<=tOjIfa+gqAcBUO#Z#dIWr zF#b9n(v6P(3B&}8Z;fOS7QyD#rkG7EV-=oE3RGOjojayy(ZQV-Y+%2aIiUO3sOWSH zFvR4jf1lu~O-L-KiJ#Sr&1mz*g+@;!$BHikkLQb8#J%A&p8iuy!^=ZV--#>H(!Bcc zILWsnRPm$57%XTX7~cB;oKcGKI4%WQul+VP>^bDofk*zOsMcnp|INE$eQ*@F#Y4wO z8?sIhO);N9>F6o#jLuUNqczuIX|L|xjy>gB!^F6y!uu>Tu6Uu+rw+!Ll)wn z4YF-~ht&rOBE?;g-#H_wqi!}Tz#0u^)3s;d5%Y?ALqz^qj2B1sewpRf(oC82uhV@7 z7c$+71!hf}Me!zn_@dzntR@X6*=)5zl@&n%him>Vo$&lqUT(Dgk%g-udzG`@pg&${ zR3yViZ@@uvN$KYQZVG!xduZDLV}S`9NPo>i!=r{uAukk2qMh0;m|B?D9~WvAX) z0_oJx+;D^JR>L~L51SS9Wpj6tIX zOSc#OWMfk1soSSmyUX|hZq2dnQG;8&%#K`Y%D?tb4>?)Uib z?@L8TzRsIOMNTkDvnd2>z*^yL*Aoy?B@Kegza4~^XHUrW74@(P-l0IQJ#&%U73wan zUQ=+XZBSkuV`{)Ot5pKhR_ed{&Hu#lSE!>x*M*^48i{ho;RJU0oA7;N1=11~dM6jA!6hS6bBHsK;5`F>Q67HMynN;F3bjZ2q6My-GA&8D9hjlBR zYpCB|#q%tzX?_JI;ok9{m47F4{p!OaI)(*JTi|ZPH878g*}PZr#qe^45#DT+EuzEB zEN6SXOJ!FI5iH62RMrT@D-U8%8Xg-IlMl3kxO7g|HYC?OtBy-$ww1loxXYr``kcm`&Uao3L))t$kAEZU5s$w zR#2cbO^V6RTa)hVf%r%bS)y9YFQi!BStp@+tITNc&|3=KkV}z(CFK3;bOyp_ZFak z7OaH134AMmjZT@j@ynIt73&i+-SbEP7SKKamj8s_mt!upt@cyhYHSDB16EgactrYx zOU!RH4>b-~Il|aUo>+2|InT<@hhIQ~^PlPbE=>b+!vnppkw@3@M(p^K5H`+b%W%b9 z7xs^EE*YdQ5f8S?2&iVX=y4Hztsa6Xyzo$k^PaZp|NcGz zQGjr^8peh}4(xGg+a+ffadHWogja?v?0w!xb>&?~^CnXGyi=Eoh#!Gh&c4L>O3{<^ zqFdLDcU8=5w=#v=TPil4eM^Q6Eu3$-`G0(yhGmW%Pqcj_p@Jdbs01HQz6lRw!UwaN zz5)}tFxpGOd;W7Tzw&U&Q+U>@BY&43r>$wZPe`lR9d}}E465hxW|27v&A#D=B6oN_ z^1i?yjKbI%&7?X|?_d6eAyU+2AL_lG@A7&HPMXnl)YXgW&qA~R9egV%9IH#i=C+)B zxpwS-*M}5(ou7jb@zM%ARyej8qj!S(gKPVY#@lXC6}35yP+66K;n;7;YK}H}zL{h6 ztS3W(A6CF#w9d!S1-2q!kv`__evoE`a*2$?VAV~oG~d#C$dY!LZI#Pvo8$%2szewU z?6sPj?CIX4#ge7|Bx>X=46vQaPnDYlH-J77$TQ;A2R8=Dj{G~uK@6uNyl)BmybFjz z$XC88N-lN8XB6Pf5AJimU)3IkW$trP6RO9%Qew9SKtV{VQ};TkpW|Jjveruu1xVh{ zDZq}HLG0z{Ym`^oyaM$E^6PvBwHG1v^iJfM57$_hobZj~C3A`xqxLMA!**BoAHuX( zHNb$L#cx8vaApa7(nGYP;LcQ3IGQk#G8PAbGld~58LlJnXOq;+3*jU9l_v*fD_kB#8EcN(MS&QYO4E-I|CSud@)0@CmmCgZ)0Raa z5la1w$9raWw24R+uzwc6yj+0DaRw;JP(|bJJFR4-_`UO07tD&}sr}!wkVRtyqTYJW z6g3|zpf}@ zOO2uEVRT9M{!imMmvo+P^fC+h0mEUKOg3-q}7i- z7F3Rq3v93OLoS4wp0$Oj;56B8XOF8c$S~&Tk6vzQqrYx5Iwk9l-g-wy&U;SCY_X38 z{igeR4t;3v7?1m!2_&ZG2@Wg#EZnFDPYDbB&U$u7L=m0w^y_J8X)KH}b=MJhY zjJ^X=?W1)*iwp1NDcxE)XRC2FUTJOyNFSlU@)hiitITCol-}ygtG@d}Yz%-JE3TZ4 z)G$4tHysOt%y!Q*t>=_g*zD)bUz~maK<8SE%5uG_Y?$JRc zp_j2v&!DUFZ9L}t@wy7^O@d?f>C{Khfsw_!LJIuQEdnG-Et)@@eYetJGvjjzDiUYe zm*I^K>2l;|9n{Cz9xldCm1--Cum9-Jiy!6DHeB&}Z}EGH>Q93^X(W`l(kOMqKI@32 zju|I>`-DN6+<;$%hI=Ou+*PQlk=dSvabsAC?FyIAs9IXT^Ct%1igYBv}; zo+Dyw3+f{yFvX+*sFYv)3_NREDp)ERS%bEY;k{R{lGgn$5O8iSxu3AN+ z)ZtELWBXpn?DLo1v!_&i{lIX;Sk%;p_2oT$1%bu*3 zY3B@^H6}LI@mTS;>Xv*liJ8bWc{3nsgLd@SB8k{_bLxtfD7i3ik9=rhGOIRqz>jDI z$3{p-dazEtL-P~6 z5==hZ`}Wou3ILQ?o+(7o+0la}vwmq~rW#@R>5|!N-bNQCE=l%Xx)7c}mhiy#f2kYyKt&VQ2l_rbck<75@%G5HS!RhUWEyGQyVS z9a&PvmexXhx0N+art?+SBw9h=X_8S(pgC0ZueZR$CSn^Ouh&l+(yr7xm45D5fJXQg z3Rt4z{A1kuR-0M~O%N?%{6eP-hJvRnn0`p#X=C_tIJQn*+V3HUiR#x9Te#|xge^fo zdHO1$fs4@)1?#XvBC!;~^GtR2VM;lR#+_X-DOa}c~bPu`9ylNsQ(&QYn z87~k67Qg6=bnqT8Ykfl~EkA#iWGktW^`mXyO%_v8DHzcQ^#p2&TB{xL=;j2$lj;$=q5>rV5i z41+7pv3ybxL7328GuTh}S$rA%+}G2k#9FD&(c1)5x(Wa2?h=eIHwoZWEe?;?2yF4^ z+k}RX@sUkLkCZzySd}}VZ>H(Lknt%L2@q)0rz1cF5rm09HoLr%;AIyTlKeJQGn5sL zXWou&d@MqK$OO1o<0)ifGyCKDG~xjHM!sjulGgjEeUD{e+ZLlf=?0eJ!)+q z7~S#nVdL03VuxOuXSn7ZI|yKqmU10Au^Vmb5=ciyBf---v_>TqofIb0>q-%-YJ z+mv}BDHqckhrXG7^vOP=eLknV{X;hnl1+KD?h>eGD7B*9(=z@rFJDaVi<1)>T~}EU zqq`*e$}w%Gig_ne`IC>U(_57tn36@~k1P|GfpYsQ zcRZkgrsh=(iLcW=0+a%2v7IP*XadwSgE2Z9yaY*&Z5e`HTKH!vklFQEcVQV&|^yVqx_$6S)A`gJZ~-TFUmFaNiXp8eR%>(=Sa^Bv(n zgnb!NV*9N`shZ9^(D8hy^SU^apJ^~xF|#p5?N2%U7jyroS&f}Mm&4&yOHK`3MnRF1$Lbz*>-8URp9F{QX~%hl1a+z+J;VnSZ4O&7Bd05p zIx%W7l)jb$(?7)kMaVDVN6kgv8&Ts3C{d~gV0?kuvQk(S(N$bxz!<=|DsPS!kCg8* zChuJ6dgAFS0csOVdg6!W9cMMNB0UPYV%(i{Zf_GW!J5qYycK=K@$(5Og6n$&*s}6} z*uhz2ZF?}e`Pjt#w$hLwf=Mm&1uKUYe&F{^+L}`lHvFj*452^}%$5hgO%gv++V5*X zs}WuhGVw)O_hx}PNPaQ|mCZSQtM^-2=9?t=8hBZAtM=OIr0m;lzLPI*^jb zl zuht1I=*Q^CXWdjYb4&Y<`+ah7TW!$+43&Gw*I7q_;Kcnse+M6Fa%u z`u$3%EFD3)cRTEa0A_j44FEFj_vJ9%f$J450->ANjSMdlu%NRQ()YgH|FC`=jtPF# zMZn@Uph4L~#dXVZe8(bYoeD(Xk{MHBu<)Bp=2mOsiVn8wzT!Lxdwn%5SeD?>ool*` z$N5@AeMr^}IBxTonAS7z=ZKg>_w8>x(k>SYiMox6LC+b zjk^GK^Z#<%B`YnBvX^~4{NIY#04Qd+#8;2W?>sH__spXD(R-w@sjV4j3R&6T%E-Xi zvUWYg>E%&K_Q4f z={AM?e%1}aTBomG8T?@cdVHs}b=%nJ`#Gl(kzw%t7#;;$A2MY|F>ytgrks7c$n*d7 z@{leEx4WX7+is7I^}hM%YqlyCvs>wUt8cWUA3B2&KJtXiRp=C@tL0w+pn5tNrb6R+ zSPK%ZBGB}%wfQ=uW$uLKSZk!i84D%VB|$vQ90STnNZkarQ&Fcy*{_r$)XN>h+9j1( zBtLOf&6vYueOm_w)pcgA08nP9l30yi9qFQ%JvpIn9yEg1%5Rn{MoExG?8m03PX0gb z_KOowxzzqULGQrxz%+ANKEY6K+}*|ETV*$SXIu_JN)LJ!B zeJW1As|*@Lrmpi<9)EP;K|oyvb2bxs2p5PigxoM}jqO1>DH?f+wfpe6NsTF$zNf(B zj6Sp9SqA}gm9%wZZ?|HKfpydofttz)HeSXkE*N@{=f$wcY!2Dx+ z|5K~0eM0@^+1p@K)j86)*VwPEeyHPl8Zvz$U0Y<2J%}hYc~;h!$FBw40;}O}v_Ano z4ocpMWc*^UPtxqxW0Qk$8;PLW)-<=(aiTU7@wERMA6`KdJoFM9>Ze=0Z`z^XRWjIY zYDx8C+bUhSBUF7J-*XB}E_9 z7PN*JN8khKTu#(dBYa|S;TC%-sNQ85v`^BeE8m102f9hV1?nb%kjP!X$KSAOI-Esg zwFbSiH{_gssftm0yFV1XFG2^ztsX^QPTdK0i#8=wc3+r{G!#ktd8w`yue(-v?{a6k zO{FoN%m|Vma*zQ~Hq)mg-J623;^OSrY6)NCYmY_9na$>CT~7cX8pp}Bo)7%x-nQsc ztT+=OKp=pdhM9CafB8N=MEN)(x2jT>5sW8U8mXrwF;!=fR}cV|^m8nth3W0qeTwJr zS{Nm|=3uU9b>Wi=ZAOy-8g_MfNU4n<^sa<@+R|So6A+$`gFP4taq-pjuqo_L4uKB7;`!F zQcpgxZ}~3;hNThB3B)7^ur{Tk{j~172pZAScv2NcxmvKjE3G-e4gm9Au@dV0Lx7{&(k>{h4t)2#o4~EnN z`;yVtrgdRh(BLuSWcjv)Iy1(Ld!vDB5AQ8*0b3X0c&h=o8^^hA;Q-DVClJE=k6ABK!5OmN%K+xXVIf~P3JB% zKa&t&$|DnbcG#=Jl~{UAP?VCWJ+ z8xHG0?mM-A71zgi>_&N;Ag*V6xS{N5HQM4b#|L8UR5j#t*S1?__R!G^)dS~1&NkFY zeK;bnL3nXH9?)TPsT!E&7rIC!0f0g+aGQj`$qSi^COv`WAbO8_vydu66h-F*j@#Lv z^j;gp=&@?#Os7S=jTefHpSJqVcX#rRS&axd52i0}72z!RXiHk0=qY<@JHVW*o8m{m z)DqRk`Zay1gedXOM<^jE`NJVGW%=U<(4UkljU6~xKfR#8Rc z?&UOKTjx5%%5rCQX!Z2ovH_qjg$@GxlFZfgQOcac(z?5ipETy#u;c9swD@!TBSS!) z{jU}mz<1&pBA9!s^J6o?1uOIUCNHck%}%dv(RB4{3@zC>A#KkfG9w%*_f=V_Bq>_d{}{%x&3KfbVoPlhoo|NVW~0a z_ije4F?MVEEGddWz8ty&nPS7b@L(#TU5q$Gtah}PRb~%u#HQ3*?$ymMbPz#U>!Amc zV=$KY7_Oy50f$<8sLMN>CH|>n7#*2Xsi0Way-7<`PE+D7aInQH#vbZxNaEtEdJTST zFP)%1=h1T2_i$=7r$_^dRb>!8RP8j8^eX+Y$uJ)h-&e!{Tb4CpJ&z&ht5$|%ZqI|D zngyObIvD5J&%pI>hr|S7wY9|_G|6bvY7OdJUz^ta&Y-d%G!SoAXeRnWG(@$hVz+V9 z&>vGUJs5g92>ogKlATM!EbQb3zI4m4Xt*B8iH&bcDcAkAp8LA6=6JW5>DJ1)fbXXS z!bGu{%{~ydMO-L7+(HSvPa(+=6xmQS*k1uq$72>h(q7H*BzzV+wHn^07G$^Ber!+s z4c|;~^n0H4|5vA8{E)A!U2xMy#TlcRE!X~}64Y;|qbu7@sy~dv2WW%}7k&*0g7*(U zM+N3=x!fMPKECtxF2V`G_#k>N9pk>Y9E$X zPf)etNIW{vpO@;O&NCG7IEJ?boL#B)D8~W!Gz4Jk^+)Y9y|NVWJmJ=qojXbDyvLDH z5hnT2+=h|1jSZp})WaD^Syb~D9jgFNZr=3ac=&}A#W$GWtaZ=|hEda0wxo+1cs3aU zGYL_84eqyoo*!!{2(HT@>!zZq?VZ09s+uQSw!G34ZIJ*`3+m%8i_oM-KDZy7;$&aJ zNru+41?9GvTGE5PsZb+W6Ig1|Xty3F83bm=z*6GA=UqX#?sN0`uMJ6h zFc3g(Z*O>W-?wz2Ew*lnB@37x1*DR3mhfB@xr}cY@gH519APR$r~!k}sbKrQgu21yq@@i8 zPQIR`B7q0%Y~9!lj>PG)8*(_vXFUBUBGQ*9B^ovgyBCL?b3=?Yh&P(Rt2RTkZ57KR z^bgnihoeclx-JOEgW)8+RR77H{Fij>f6BDJT+AkDI#mNlvG-#pWSFek)!UsUv;9)y zZIsn5Hf2j&^7fp8oA%d<*pYTU3JMz(MODw7lTU>2 zxJBIlZqjZenF@e94CpJCMK4eZ$kbX7DE+pFnlm^Q^f3@QV>z2aF2$7}MDx^?DUP!v zE`UPx_(d8qCu`56J9eBV|5t>9C_neIzo|p^><#&-@0t^ku7>)76 z8RYP%+%qHgyOX96=U}1k4$YNUe$lVr#}(GM^vCq?W}J(JSG4 zm&QKqb^1m8>gF^2R{;2tZD@$gg$H=^>;~6c*HXSTT>c2pL4F`qS$9@^)%!3=9ZMiZ zI#_-QjpUalS_f4+ZXyL#NPSAie@^FcLi9mgefzar_`yb7M{RTOwLemwLynLiIU5u* zV6=|<`JUnE4&#Rd5h<-ql3@j$=ZJ^jHIYm{DVoc*Lu^B~ zR<~&Ce-?lJC#LAW%h)@n&OAx7-2&AEQ=zMRqJA<$71Rt7L++Nvk=#GU$Cp=FT6esd z)lgc9vP7Yo(K+ZofVsODUJbcgo2@5jVFEU(F1YQ?Y4Mv&fKy5uS6sf_UXr3<8ZcZ( zp2Aci=^8T3ALTqTTG4Mjtq?$r4BOz;zvT!ZuUIW4ph0DR4CMbXqBhXD@e6i>c8%rd zB$@S-u5ey3y5B#!1jC){iR<#hvqt?f!k@$Zg3_Fk3xRkk5BDicM4dit&k|H+LJXAO zLHT##fUTt-hXW%s^PgNSRQ-gFOcMy91=;fem8+e*Q3znUF)P~57bn2Jb%+q=8 zdeE^rFl0k2`q3dpUP>qqXT^PFyc+=jZ&DM-W|$Id8oAn0fwm^ZJx zTrjKhJ8a7WX*pcv0k7VG#eYaMv$frCuu&i0WEoGdY?QV>mfB34 z5$7?2f|c*l7tbH9R`}j|?;pVT3nBZHxC%6|#CG`|eWu=$Q75zP+*yJ8bLTG(UfL6M z-)TJwE>inST(F~Qzv4Jo(z1!yYl6qQ3~ls9N01A5PIzBzSq^}@-Hnr*v96gL$b}}s zVoWP66Th=7X);$e0{OS%(7z>q>k=){zmgexdGn5Y)T&DkpI9Rajl z{3TCOZ>ijBRDe&?juFu!>C_dDX&G4|uL;66$+Pu7u5gA5RbyOU_@8Iai$x`@!h=Cc#4_7t}hwXdDUgKJmCWX4gPQz_JDI*WD%FrRmGuDGB}P zd99*obEzYgWd1!r*E~F%raD16+tzk-7sZaZ#UB< zPq%}Vya?Nm91~$-&_S=~#)x zE6eHkg0F@q1d(UsVVVwyzuo=haT9z%4X(3iPt*H94q$t<42E?@5{PpBtGl#GB_wHJ zQ=8n))K!r*QvoK%^sUBr? z`(7!r#}P#DjPNza_Z6k$GKd+eUp0n8%*XFddf_W1sjg){%cEra9sadEa&I^-JlVCn zgiE=bBidP;uy3kcfh%sCALg$FXexFI__~lPjDe#as6NDKC{re{yvirP-<}KuZc(rW=jSC&)r^H9pC) z9HE`~$$lVI#gJ>4J`<$!u!Anqm z049g={v>TB!fJMi3fGJ}XIH@;`sO%7s^@&L`FZ0My9)XI0NnyG->#~jnBIu<1(IL+ z6rGq~<4Zf_b}EPN4|#hdn#`bPRZ>s|uw<~32Or>NzD|yMo6o*~i{P1S3~4{wx(KMY z_z}|ia8~QT9z9_K92Il8uI{%U`Vz)Ss%_h1ci$UcO<&cIoZn$LFLK~vYz3mO3^?RQ z9wMKwCNRr}q_Q^JU`uLcu2-vyMNSawzq75!d^N9H75mnD7ywl?9o8>yNg4NQP+^C! zQWdv!A$b_i+XZ*AI#v)R%v}q_;AS9^w`r3C9p7iMdBoaW*lk4a1M{W9l(R#N{H}~q z(T6pZcm;0E5;mZ+k|jJem8RooNRKZmu9Oan?%2KAN-czaz}{C5K-x0reOA_g8>f2y z=;g{f&~7LPXMZ(!n0N2eGsw~Q!ME5|!+4ZE!{)MJJeIU&nFf7D;mukhuu-l#$@l(^ z$kN-zppa1c=&f{emWwAoMp*-Ny|X|{u2~R6pn>+RrUYLwE^B*1dPDheB!O`Ihy0R- zPlbe;i>aP;#kTkR+~Qb6kih9?KiVtHRu=DM2kXr9Qo1AhKue$DtHzb`(86&^k~K;Y z1sLL#Jodxp)_U(!+Y*+8oY__NZG|=z5aD+uaN*8pJQvg{hVZR18kq9f67LskhyQ53 z<$21oO(8HdgUzvcMf(tB86pyLZ}#RJh)^9ey9O%~<_XYKp81qY|Esl;zZ{NJn{Kg4 zD&#C@=F1OE4XSD;-(DpEl-&i)ssX+umyioFdwej4MGM84-vttqb3*bX!r)gm4iKR_ zR7-g8H%v$aBLbO)d~Qgc{|+9PRITwda1~fiLO@sl5tbPh&Z1|l&8i03q7M*)Q~MYb0T@w1 z#rTO|(+`K>G`+V;9k>p z^trlgqbQZjIAVyGi9@vN>CWMC>H`{3$?-1Ii~i6!9H(?-TeV$Sy4wq8PSg4B z3TiaW>&Ar$8bk&LS&4F){Bemder}IVP9Ggq#LF#@nukSrxNAeRAiwS=iR#-(rMo6( zXu+!7pyA@zXoUqp{o5BH9_F~d1V?ATwt|zH`Ft4#COJ(KEq592>r8*uGdAX?sF?&?eLs!8Y_?k zR!i!2&*tg20FhOZw_2}Gv?=ROji-F1^v%Cod-CJTS%G*06O;Z(N+jl4#9%}14Bp5b zZ?>~2!aBnn^h~cTT&xmTSqDGjj+N*h0HCylOa^JZR1!NrSmO;{kGu)SP&mrmNK0ee|%+UZWY@txl=G7n|e|C1Ih>08opH zb~PJzUFU`d{(|6`Qc@ACA1ex}f(t&LRj+d*#l8hGE}MK!^;_GJUk=vPz(~$@3p|lR zp>48PLHurj_0aWa5O-W1#giCM2e1bIZ}a0Ip1`Ego>##;Rks|^a`@14Yen8c6#OtB zx!r7!dgrKVd!Yzr=lBdpqCvn| zgbHfso^I854bawx#i&hQN?+D-jK?mUx5&3(-WQUH;o*Bvm$v4$ zZL-2>kuufAc4A-DQKk#1d|wx$|0}22$T1OOIW=-* zwk8vwSEyvMNp)}bisyei+p|Vy^)Qsid8CHVW1(9TYJ`KZXM;|z1P)mF0-L{{_v!gC z?_fTzc4k(F*gIXXSbeE*6V^bAd@Nxi9-APAH5YK{{&V6l-kdkp9VaCD-M|aCiiznH zZ6 z5$PtkAFbLgzAr)~@xNwZ{7(xs{%u>wpQGEWLHLu<{PRzIa4fi4Paz8j+0t zLR>$M(F)rW?^D1Qg(f+o_q65Fqb72#R$L>;`YPLusq)81t3iPn{~{|s@7FahkgN*J z8rF3ZK-ac9pg&~k?2L(&>Ig^Vn$#qVL8X(^R0YUxJWVl~cL9}berB&lVP9AnZiltU zLB28cA|iXS!=z~~K7Yi4#gfj~Q#6OlEe7@gI6@x7xb%TUtYJ+bO0v`9R0bR_bt9j6 zjke7Q-d|znZWSCvvo(|yip@>ZcNvK%OwQ)vWq6UHRU0qtHb)V1i8 zYq||hpk4jHYy_BX!jBlyCe0lg$5fe9VJLkOEk-T5AY>8UN*U2+}om~pD)T&E1& z!S2J=cU@i3d>m~)+&}gdTeX0%|H{TH5G%C}PoS2}21KL)7~8=!O3vOt-nhzf$i|gc znwy!>qSRn;B#5PJYAHt$;4hYz|28ZJMq$lcmVT=W3Z|6=uPeoj<$c%wF-Xm{*w}BjgT_+<+KVC~C>75OVph zvaiPV?Asiaw0TbIK5w^Pp=Q~Aaoc9|HX8(!5mI2_1M|^Mg42Ee=;hDNMPWrx$qcj( z2x?Me_m3uwAMrCv-r8i*7knlFKw&fs{@4=VP7~)n&hCL=`2CccXx4=Oi6D!F zCZPIi#u~(nS|&U3H*p5+gnr(d5<3!nq?5vih!yR%wtv_L4!PNs5d;CcaIBa?< z_G?y)uq6q?B>mRnbv)<0Q=k0v1SuEzHzs(iI%?u(ORrnNviWVZoukAZ2U&n+1yb2<)rqF z!S)ACO1w5>n!j9m``&1bqn_hmD|)OfUgqI`L0&aHH6-5`Mwy@3`W$QZ-f!hK|8ES( zJ0Z4uhcE6wVxWP|^CPx_+{u|7$yue`r(}Rz0r{czleVMSoc>s#zf{1|)amEF3Xg?c;k2>q_4K_xW#YnIH zq!t=nm6H;yYM5|^t+Ic|GpKO#-Mr|tSicRT%$Q1*hD@5whSoa~-<@&>x08gI9a35% z*4RX-V+RsW0hS7oS&c+`OwhcxA;Xa*TF2X1nhM|b!CYB0g|v5Qf-1m>PncP|z|-6j zq>y)p>KVGLek(r!94ic)0jgBilhdAU6z0M6@*VItxg2)U?C#gdQ+@ad8>|v@0@?@d zU01y9AXTC>80);KpKr>L`0NatVpLkLYdIvb+D$kC0xNCp!rA&7OcbKIa}*WcG$RcZzb+-tcpP{R{w36+_E{k3f!zbrd=j~K9F z1E9;ef{g22i@bLyb4ZVETA;f(LL0+B`pF=#ty|Rs1^5~$E+-X%$2V}O9fXNU`*UE2 zP>ae`YMO1Wd!7R!vk}DG{N;L&M4e0}t4Fc44-+ia(|LuP0yV&=Ru7^2M9a)gymI5v z6K?+MAGV|a%PF*9Ny6%Ag()o*fRDztUh6X&L^}lT zL-V>iDqDT*IUSQx{X~Q+zy~`ofiFH_*Ps8^S&%Q()Bwa_YU?6|e&De}Sa`#sFu8!sgRe<^;6ZhL|MOKtsr1%_}6!&dfU7JUoyA)3j}*Go`DFG6THGn%%`PN(5c{qFB;eCG5)6GfM2!_8QEymM&SK@$l^t z4~aGXPON*O_j+JHNm=Y!*LM$OLOr4xtqio0nMM0%+;AGjV!(HF0u*&KPMD)nJ@zzR zLzP)+H7tcWb)E{&)#XU#yS9wz_I%SS*pN&Ld3uhPuO2_hjdLM{W=?w-=_~pkn__L~ z>{_u`fctc&yKYi&%5JfpiM33zJ<+rvHPn zV}9B>e7L#|)d~L9IVZwpDjafI0k`#qcDE7)TBRwq@uu{;L_N6w34^(aLn&RAkFYWf z&|>e+izbmJR-N&)5?b7>eWk-h_o_jl{WGs-#pb251&T3c4RabhsBlR2oEz=9i@wk*n-VlK7?U-Vjnc;Wjv^^OMOKg{AptpAKN0M{p z=6+x+7X6}(#}m#{d=cid(wXw;fkCd&xGwO}-QZreR+n3)wLGW%a+^_E|q=e7{&8E1FcM$81b{Ge?_#Cro8H z(E9%cP_N9(E1RQg^SEzSz-}!gqKHeluTbbji70_ zWr&>q=(eesV43ZRXW|q?wQsx0TEBAPS3Bru#2&u)gTPTVyfZ^qdyknm|LJ&!vy>A@ z^YI%gdO5wd>Jzh^?C{Qt`2ndE%U?)fP;h-2)X-CHPE=^!tBu`eZX1Xv17{j~0h=V@0^0 z5^IAWYVJfRWf~~XarJP;H&vG5n0B~cPHym7S=_4vu=F6&MmMXo^p#zJ$pfi z4DifWsdUE}MLliJ3Y||vI?vqWZ6*jVc~ccSMi4lb6Tzf9A)Tyfp7Es(@#afop-5lp z9Pl)7BJg1Aw2OiX-I6=y=m=_d~aG zvYflT)7eqf!1mAN^F2-n*G&iTS#SB3j<`w2$kWRC(LVc7_39DdG&ufeK#Sv@nLl;} z+xw#*CPl<=NLrea*wiG};_9DgvwU4-W{RQ1sDZvef5RtDd?MQV{ErE+BisL21++uM zu6QW2VbfBp?}ty6LS7d+TiZrVWH7prCT=LCp}v(VD`ELF$*A~;lXSjUWE zF`q;^Ko$3*3qOxDTB=jrZ=EpdhbwXfN@hIQ-{yPg$0Q04(9{iiFO5Ns<8CssPoM56 zB6ln2NwaK+npfP${-Id;fFmz)Ev$mOf&tN>C26~e=7 zL#?@y9(Ft%jfLygBID#|`Kd&K-*f`=huU>rP)yViDts>!=!trsX<30*b-ouRxk+0* zA|lb5o;+Rc=G`ZJA7-$-v~Yz?RsT>NKqR?**N%sIW+rj)maB%}1YfR?e_(M*oFRAHjz@}vj;_WWUlMJnO}J1~IZpaG^M1y0i8QG<*y zx3PSx>7f6eJqf`6?!jhd;wU?ccjj)mR1Nno;8YmAf)mEb+MZ$QG$249YuFZbPCNl) zljCDaNT(Kj8x8|hHDh5-K$ajU$uis()vfaGtf{g;xOu3bHsJCMpS=a5sDa%9;@Jb) z$7Z;nST@*~`2-7YOj<9G-Oc8$WD$`v#NuAt-zR)68c>fcy{apu!xx1Q)xAivtYaX_ zq7Zl1C2VL9Bw~Bpcw3DDMI+q9_nH}0`(e!I6*U zYVTXiFfr$|>%aSbOxVv)={)rbHQF4E6TK4{InVin6uU1LEr!H` zG}_b{ol2Qg8pBjHSdk_1L#>?VG{|KTQM(uD6)T!l`mMUMc|$Af>CA+)+V!rqihy zYkI0UATqlXABl(F%>(O=`d~q&fTC~iM`-j2g4ofPsi}$F{7#770)%2`T8b(5fl>An zfNA?s?l!*wM>TaM*%Ut}Ls4$n%h9z^BC?!juThVk+`{*TL;414?dbjuq$v94K~1*7 z62}+D1iC%a_@z5HQkdAqEG4mum_i-*MfQt{0phg2&xbmtV1S~|c|#sDA0bRr6%1*_ z^OTZ9z0?+`U{BdX#EOd24ys*=LrRIY9=LR%W`?Dw7->dw^OAmx8M%1kqX3Mjq6W1B z*iA4CVa5hSt&32BJyt z-aF$)VqZq|_t+K7u{RK$l`Yq8l@nF2QC?6CO ziHjLvTDS_`uQ>DD@3X@gwodZnz+ybD;n}&HSwa;YFGW`qr3EY}ic@Q7Q9RAV&Ruv8Gih)v^>Z$I z$vZqa6zf#c2DJ%*a= z-=>ug((oFJ${Zs4-0>uUMf(OpUd0i`J!HP|XGV*Tc|Xda18dTYWyM_w0)G1E=c30u z@`Tv(Pdrc2P4peSi9NO+;<(HX12}rwO0rR=QJZ*EsmrHdR&yJdS#)5|0sT&m++nZ$ zQC;nGd|v_(8DqZ{J4vG>!;ZtxV5cDCc?+OOrAyAN|t1qi&zkA!3SX~%<@Z4WC({#yt+1h!qrD6BZPQZWfs5=4?A?y^vb4X<`bJdwEc!i@#*NHMP*ja_!szF`>5{WIu$MV8x=|o zwtQv_tX!O#4i2LrgV)jqg%OWIVu(ZU8NAQ%PC!8iNkc#gf=pk&*9i#3_(xKL~(sNV(P4>Uk#7ZTHq@KH$2B zm!DNnI3$mXOccJWC8<&cc`N&^=*)rH5O!4dqpPy1I@gBr`}xLcGSjEv^zrgI-YQ%0 zY*;*VU-&oY9P=-8iw`BDpRQ7RE2{e54fI4(ZX>+99bV=`BwdIFM*1YoZ%ahtO~D_q z=|=IOux?7!nw-ia3pUV2(QyQiRKMU`HJg^j`Y52xbi z%=96aclKb*Jkc+78H{O3Oqw+j>PTvGvc0J!0-wU@&3Q!AJwc8?_G~?m@koO0Pkwo{ z)%HUVvn8=Z;zMrYkVfW1@KKYGt4NfUr8QTVH-;BdPcdWU)^A$i%;OScv2xo}{eYEf ztb30&u?P9dnrmSx%Gcqk@5uOFWL^(p>edIHSm2J)?ya|Pade!UMTQTREALPLA}#v; z7dHasjU{>+b1b-iwr;6DK<@F$Y5Wgv`e55cJP}?1o-1 zbsOcf=8>Qloxuk@C!!H1J#&;wjO(QskU9#-G+M0zWx+Wi2Z_l=6a~2Y81845o3zq@ z+t8IHJ`T5f&>NOmTT;YUS|ir$p9{;+qlZIS0ZmAjo39}u^8(R$2Be^mCs8E_bR_MJ z)cm?x6%PY*-0SB5|3_5t{|4-82PBZ;x+?Z;NL9ZIaxdma?uh@tsNO<63qe=fvba4S zRhUb6JH7k*N|(Uog5cjR@p^rO=I=IvmDf1=z7Dm?Yba^TWqKx(L95om^HohE?n;e2 z?GV1BjGLr^_IKX^fF=aiv~?sO$ro6MG}c$?_w~dPUya^x^aaKe^j@-MeEiS9K}fFo zw{lEQXC3A0kIt+!MW~pKWJ1gaw4W2WfgbeLM2Pq;>IiWC8{Y^CpGY%+EoL|~!$YBr z59C3&E2`nD;d{o8E@`PjYDQsrPJb(hu=s>5#*4>a$W`jB@Al40?CB!z+I_Tue9*)& V3ArEafJTFa^6EoE{wI_h?O!-)7>WP@ literal 0 HcmV?d00001 diff --git a/utils/frame/remote-externalities/test_data/proxy_test.top b/utils/frame/remote-externalities/test_data/proxy_test.top deleted file mode 100644 index 548ce9cdba4f157e3ff0018d314f3fecfbed9f8f..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 39 vcmZQ+kl?)D>{e98_qB(Af%W>u%I&?*zKN<^Se*X}{?*hKULwHEz`y_iDLxHx diff --git a/utils/frame/rpc/client/src/lib.rs b/utils/frame/rpc/client/src/lib.rs index a211fc6c6983e..a6f73ba6784b2 100644 --- a/utils/frame/rpc/client/src/lib.rs +++ b/utils/frame/rpc/client/src/lib.rs @@ -46,6 +46,7 @@ pub use jsonrpsee::{ core::{ client::{ClientT, Subscription, SubscriptionClientT}, params::BatchRequestBuilder, + Error, RpcResult, }, rpc_params, ws_client::{WsClient, WsClientBuilder}, diff --git a/utils/frame/try-runtime/cli/Cargo.toml b/utils/frame/try-runtime/cli/Cargo.toml index 2b095fc9419b9..d6f211392c6cf 100644 --- a/utils/frame/try-runtime/cli/Cargo.toml +++ b/utils/frame/try-runtime/cli/Cargo.toml @@ -12,11 +12,6 @@ description = "Cli command runtime testing and dry-running" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -clap = { version = "4.0.9", features = ["derive"] } -log = "0.4.17" -parity-scale-codec = "3.0.0" -serde = "1.0.136" -zstd = { version = "0.11.2", default-features = false } remote-externalities = { version = "0.10.0-dev", path = "../../remote-externalities", package = "frame-remote-externalities" } sc-chain-spec = { version = "4.0.0-dev", path = "../../../../client/chain-spec" } sc-cli = { version = "0.10.0-dev", path = "../../../../client/cli" } @@ -27,16 +22,27 @@ sp-externalities = { version = "0.13.0", path = "../../../../primitives/external sp-io = { version = "7.0.0", path = "../../../../primitives/io" } sp-keystore = { version = "0.13.0", path = "../../../../primitives/keystore" } sp-runtime = { version = "7.0.0", path = "../../../../primitives/runtime" } +sp-rpc = { version = "6.0.0", path = "../../../../primitives/rpc" } sp-state-machine = { version = "0.13.0", path = "../../../../primitives/state-machine" } sp-version = { version = "5.0.0", path = "../../../../primitives/version" } +sp-debug-derive = { path = "../../../../primitives/debug-derive" } +sp-api = { path = "../../../../primitives/api" } sp-weights = { version = "4.0.0", path = "../../../../primitives/weights" } frame-try-runtime = { optional = true, path = "../../../../frame/try-runtime" } substrate-rpc-client = { path = "../../rpc/client" } +parity-scale-codec = "3.0.0" +hex = "0.4.3" +clap = { version = "4.0.9", features = ["derive"] } +log = "0.4.17" +serde = "1.0.136" +zstd = { version = "0.11.2", default-features = false } + [dev-dependencies] tokio = "1.22.0" [features] try-runtime = [ + "sp-debug-derive/force-debug", "frame-try-runtime/try-runtime", ] diff --git a/utils/frame/try-runtime/cli/src/commands/create_snapshot.rs b/utils/frame/try-runtime/cli/src/commands/create_snapshot.rs new file mode 100644 index 0000000000000..ef39c3d9846ce --- /dev/null +++ b/utils/frame/try-runtime/cli/src/commands/create_snapshot.rs @@ -0,0 +1,78 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::{build_executor, LiveState, SharedParams, State, LOG_TARGET}; +use sc_executor::sp_wasm_interface::HostFunctions; +use sp_runtime::traits::{Block as BlockT, NumberFor}; +use std::{fmt::Debug, str::FromStr}; +use substrate_rpc_client::{ws_client, StateApi}; + +/// Configurations of the [`crate::Command::CreateSnapshot`]. +#[derive(Debug, Clone, clap::Parser)] +pub struct CreateSnapshotCmd { + /// The source of the snapshot. Must be a remote node. + #[clap(flatten)] + pub from: LiveState, + + /// The snapshot path to write to. + /// + /// If not provided `-@.snap` will be used. + pub snapshot_path: Option, +} + +/// inner command for `Command::CreateSnapshot`. +pub(crate) async fn create_snapshot( + shared: SharedParams, + command: CreateSnapshotCmd, +) -> sc_cli::Result<()> +where + Block: BlockT + serde::de::DeserializeOwned, + Block::Hash: FromStr + serde::de::DeserializeOwned, + Block::Header: serde::de::DeserializeOwned, + ::Err: Debug, + NumberFor: FromStr, + as FromStr>::Err: Debug, + HostFns: HostFunctions, +{ + let snapshot_path = command.snapshot_path; + if !matches!(shared.runtime, crate::Runtime::Existing) { + return Err("creating a snapshot is only possible with --runtime existing.".into()) + } + + let path = match snapshot_path { + Some(path) => path, + None => { + let rpc = ws_client(&command.from.uri).await.unwrap(); + let remote_spec = StateApi::::runtime_version(&rpc, None).await.unwrap(); + let path_str = format!( + "{}-{}@{}.snap", + remote_spec.spec_name.to_lowercase(), + remote_spec.spec_version, + command.from.at.clone().unwrap_or("latest".to_owned()) + ); + log::info!(target: LOG_TARGET, "snapshot path not provided (-s), using '{}'", path_str); + path_str.into() + }, + }; + + let executor = build_executor::(&shared); + let _ = State::Live(command.from) + .into_ext::(&shared, &executor, Some(path.into())) + .await?; + + Ok(()) +} diff --git a/utils/frame/try-runtime/cli/src/commands/execute_block.rs b/utils/frame/try-runtime/cli/src/commands/execute_block.rs index 56d88b9cb8919..80d34002fa771 100644 --- a/utils/frame/try-runtime/cli/src/commands/execute_block.rs +++ b/utils/frame/try-runtime/cli/src/commands/execute_block.rs @@ -16,30 +16,25 @@ // limitations under the License. use crate::{ - build_executor, ensure_matching_spec, extract_code, full_extensions, hash_of, local_spec, - state_machine_call_with_proof, SharedParams, State, LOG_TARGET, + build_executor, full_extensions, rpc_err_handler, state_machine_call_with_proof, LiveState, + SharedParams, State, LOG_TARGET, }; use parity_scale_codec::Encode; -use sc_service::{Configuration, NativeExecutionDispatch}; -use sp_core::storage::well_known_keys; -use sp_runtime::traits::{Block as BlockT, Header as HeaderT, NumberFor}; +use sc_executor::sp_wasm_interface::HostFunctions; +use sp_rpc::{list::ListOrValue, number::NumberOrHex}; +use sp_runtime::{ + generic::SignedBlock, + traits::{Block as BlockT, Header as HeaderT, NumberFor}, +}; use std::{fmt::Debug, str::FromStr}; use substrate_rpc_client::{ws_client, ChainApi}; -/// Configurations of the [`Command::ExecuteBlock`]. +/// Configurations of the [`crate::Command::ExecuteBlock`]. /// /// This will always call into `TryRuntime_execute_block`, which can optionally skip the state-root /// check (useful for trying a unreleased runtime), and can execute runtime sanity checks as well. #[derive(Debug, Clone, clap::Parser)] pub struct ExecuteBlockCmd { - /// Overwrite the wasm code in state or not. - #[arg(long)] - overwrite_wasm_code: bool, - - /// If set the state root check is disabled. - #[arg(long)] - no_state_root_check: bool, - /// Which try-state targets to execute when running this command. /// /// Expected values: @@ -49,69 +44,28 @@ pub struct ExecuteBlockCmd { /// `Staking, System`). /// - `rr-[x]` where `[x]` is a number. Then, the given number of pallets are checked in a /// round-robin fashion. - #[arg(long, default_value = "none")] - try_state: frame_try_runtime::TryStateSelect, - - /// The block hash at which to fetch the block. - /// - /// If the `live` state type is being used, then this can be omitted, and is equal to whatever - /// the `state::at` is. Only use this (with care) when combined with a snapshot. - #[arg( - long, - value_parser = crate::parse::hash - )] - block_at: Option, + #[arg(long, default_value = "all")] + pub try_state: frame_try_runtime::TryStateSelect, /// The ws uri from which to fetch the block. /// - /// If the `live` state type is being used, then this can be omitted, and is equal to whatever - /// the `state::uri` is. Only use this (with care) when combined with a snapshot. + /// This will always fetch the next block of whatever `state` is referring to, because this is + /// the only sensible combination. In other words, if you have the state of block `n`, you + /// should execute block `n+1` on top of it. + /// + /// If `state` is `Live`, this can be ignored and the same uri is used for both. #[arg( long, value_parser = crate::parse::url )] - block_ws_uri: Option, + pub block_ws_uri: Option, /// The state type to use. - /// - /// For this command only, if the `live` is used, then state of the parent block is fetched. - /// - /// If `block_at` is provided, then the [`State::Live::at`] is being ignored. #[command(subcommand)] - state: State, + pub state: State, } impl ExecuteBlockCmd { - async fn block_at(&self, ws_uri: String) -> sc_cli::Result - where - Block::Hash: FromStr + serde::de::DeserializeOwned, - ::Err: Debug, - Block::Header: serde::de::DeserializeOwned, - { - let rpc = ws_client(&ws_uri).await?; - - match (&self.block_at, &self.state) { - (Some(block_at), State::Snap { .. }) => hash_of::(block_at), - (Some(block_at), State::Live { .. }) => { - log::warn!(target: LOG_TARGET, "--block-at is provided while state type is live. the `Live::at` will be ignored"); - hash_of::(block_at) - }, - (None, State::Live { at: None, .. }) => { - log::warn!( - target: LOG_TARGET, - "No --block-at or --at provided, using the latest finalized block instead" - ); - ChainApi::<(), Block::Hash, Block::Header, ()>::finalized_head(&rpc) - .await - .map_err(|e| e.to_string().into()) - }, - (None, State::Live { at: Some(at), .. }) => hash_of::(at), - _ => { - panic!("either `--block-at` must be provided, or state must be `live with a proper `--at``"); - }, - } - } - fn block_ws_uri(&self) -> String where Block::Hash: FromStr, @@ -123,7 +77,7 @@ impl ExecuteBlockCmd { log::error!(target: LOG_TARGET, "--block-uri is provided while state type is live, Are you sure you know what you are doing?"); block_ws_uri.to_owned() }, - (None, State::Live { uri, .. }) => uri.clone(), + (None, State::Live(LiveState { uri, .. })) => uri.clone(), (None, State::Snap { .. }) => { panic!("either `--block-uri` must be provided, or state must be `live`"); }, @@ -131,10 +85,9 @@ impl ExecuteBlockCmd { } } -pub(crate) async fn execute_block( +pub(crate) async fn execute_block( shared: SharedParams, command: ExecuteBlockCmd, - config: Configuration, ) -> sc_cli::Result<()> where Block: BlockT + serde::de::DeserializeOwned, @@ -142,79 +95,77 @@ where ::Err: Debug, Block::Hash: serde::de::DeserializeOwned, Block::Header: serde::de::DeserializeOwned, - NumberFor: FromStr, - as FromStr>::Err: Debug, - ExecDispatch: NativeExecutionDispatch + 'static, + as TryInto>::Error: Debug, + HostFns: HostFunctions, { - let executor = build_executor::(&shared, &config); - let execution = shared.execution; + let executor = build_executor::(&shared); + let ext = command.state.into_ext::(&shared, &executor, None).await?; + // get the block number associated with this block. let block_ws_uri = command.block_ws_uri::(); - let block_at = command.block_at::(block_ws_uri.clone()).await?; let rpc = ws_client(&block_ws_uri).await?; - let block: Block = ChainApi::<(), Block::Hash, Block::Header, _>::block(&rpc, Some(block_at)) - .await - .unwrap() - .unwrap(); - let parent_hash = block.header().parent_hash(); - log::info!( - target: LOG_TARGET, - "fetched block #{:?} from {:?}, parent_hash to fetch the state {:?}", - block.header().number(), - block_ws_uri, - parent_hash - ); - - let ext = { - let builder = command - .state - .builder::()? - // make sure the state is being build with the parent hash, if it is online. - .overwrite_online_at(parent_hash.to_owned()) - .state_version(shared.state_version); - - let builder = if command.overwrite_wasm_code { - log::info!( - target: LOG_TARGET, - "replacing the in-storage :code: with the local code from {}'s chain_spec (your local repo)", - config.chain_spec.name(), - ); - let (code_key, code) = extract_code(&config.chain_spec)?; - builder.inject_hashed_key_value(&[(code_key, code)]) - } else { - builder.inject_hashed_key(well_known_keys::CODE) - }; - - builder.build().await? - }; + let next_hash = next_hash_of::(&rpc, ext.block_hash).await?; + + log::info!(target: LOG_TARGET, "fetching next block: {:?} ", next_hash); + + let block = ChainApi::<(), Block::Hash, Block::Header, SignedBlock>::block( + &rpc, + Some(next_hash), + ) + .await + .map_err(rpc_err_handler)? + .expect("header exists, block should also exist; qed") + .block; // A digest item gets added when the runtime is processing the block, so we need to pop // the last one to be consistent with what a gossiped block would contain. let (mut header, extrinsics) = block.deconstruct(); header.digest_mut().pop(); let block = Block::new(header, extrinsics); - let payload = (block.clone(), !command.no_state_root_check, command.try_state).encode(); - - let (expected_spec_name, expected_spec_version, _) = - local_spec::(&ext, &executor); - ensure_matching_spec::( - block_ws_uri.clone(), - expected_spec_name, - expected_spec_version, - shared.no_spec_check_panic, - ) - .await; - let _ = state_machine_call_with_proof::( + // for now, hardcoded for the sake of simplicity. We might customize them one day. + let state_root_check = false; + let signature_check = false; + let payload = (block.clone(), state_root_check, signature_check, command.try_state).encode(); + + let _ = state_machine_call_with_proof::( &ext, &executor, - execution, "TryRuntime_execute_block", &payload, full_extensions(), )?; - log::info!(target: LOG_TARGET, "Core_execute_block executed without errors."); - Ok(()) } + +pub(crate) async fn next_hash_of( + rpc: &substrate_rpc_client::WsClient, + hash: Block::Hash, +) -> sc_cli::Result +where + Block: BlockT + serde::de::DeserializeOwned, + Block::Header: serde::de::DeserializeOwned, +{ + let number = ChainApi::<(), Block::Hash, Block::Header, ()>::header(rpc, Some(hash)) + .await + .map_err(rpc_err_handler) + .and_then(|maybe_header| maybe_header.ok_or("header_not_found").map(|h| *h.number()))?; + + let next = number + sp_runtime::traits::One::one(); + + let next_hash = match ChainApi::<(), Block::Hash, Block::Header, ()>::block_hash( + rpc, + Some(ListOrValue::Value(NumberOrHex::Number( + next.try_into().map_err(|_| "failed to convert number to block number")?, + ))), + ) + .await + .map_err(rpc_err_handler)? + { + ListOrValue::Value(t) => t.expect("value passed in; value comes out; qed"), + _ => unreachable!(), + }; + + Ok(next_hash) +} diff --git a/utils/frame/try-runtime/cli/src/commands/follow_chain.rs b/utils/frame/try-runtime/cli/src/commands/follow_chain.rs index 1cc371c8f22fd..4eb3b3a8f35a9 100644 --- a/utils/frame/try-runtime/cli/src/commands/follow_chain.rs +++ b/utils/frame/try-runtime/cli/src/commands/follow_chain.rs @@ -16,32 +16,33 @@ // limitations under the License. use crate::{ - build_executor, ensure_matching_spec, extract_code, full_extensions, local_spec, parse, - state_machine_call_with_proof, SharedParams, LOG_TARGET, + build_executor, full_extensions, parse, rpc_err_handler, state_machine_call_with_proof, + LiveState, SharedParams, State, LOG_TARGET, }; use parity_scale_codec::{Decode, Encode}; -use remote_externalities::{Builder, Mode, OnlineConfig}; -use sc_executor::NativeExecutionDispatch; -use sc_service::Configuration; +use sc_executor::sp_wasm_interface::HostFunctions; use serde::{de::DeserializeOwned, Serialize}; use sp_core::H256; -use sp_runtime::traits::{Block as BlockT, Header as HeaderT, NumberFor}; +use sp_runtime::{ + generic::SignedBlock, + traits::{Block as BlockT, Header as HeaderT, NumberFor}, +}; use std::{fmt::Debug, str::FromStr}; use substrate_rpc_client::{ws_client, ChainApi, FinalizedHeaders, Subscription, WsClient}; const SUB: &str = "chain_subscribeFinalizedHeads"; const UN_SUB: &str = "chain_unsubscribeFinalizedHeads"; -/// Configurations of the [`Command::FollowChain`]. +/// Configurations of the [`crate::Command::FollowChain`]. #[derive(Debug, Clone, clap::Parser)] pub struct FollowChainCmd { /// The url to connect to. #[arg(short, long, value_parser = parse::url)] - uri: String, + pub uri: String, /// If set, then the state root check is enabled. #[arg(long)] - state_root_check: bool, + pub state_root_check: bool, /// Which try-state targets to execute when running this command. /// @@ -52,12 +53,12 @@ pub struct FollowChainCmd { /// `Staking, System`). /// - `rr-[x]` where `[x]` is a number. Then, the given number of pallets are checked in a /// round-robin fashion. - #[arg(long, default_value = "none")] - try_state: frame_try_runtime::TryStateSelect, + #[arg(long, default_value = "all")] + pub try_state: frame_try_runtime::TryStateSelect, /// If present, a single connection to a node will be kept and reused for fetching blocks. #[arg(long)] - keep_connection: bool, + pub keep_connection: bool, } /// Start listening for with `SUB` at `url`. @@ -77,10 +78,9 @@ async fn start_subscribing( +pub(crate) async fn follow_chain( shared: SharedParams, command: FollowChainCmd, - config: Configuration, ) -> sc_cli::Result<()> where Block: BlockT + DeserializeOwned, @@ -89,26 +89,35 @@ where ::Err: Debug, NumberFor: FromStr, as FromStr>::Err: Debug, - ExecDispatch: NativeExecutionDispatch + 'static, + HostFns: HostFunctions, { - let mut maybe_state_ext = None; let (rpc, subscription) = start_subscribing::(&command.uri).await?; - - let (code_key, code) = extract_code(&config.chain_spec)?; - let executor = build_executor::(&shared, &config); - let execution = shared.execution; - let mut finalized_headers: FinalizedHeaders = FinalizedHeaders::new(&rpc, subscription); + let mut maybe_state_ext = None; + let executor = build_executor::(&shared); + while let Some(header) = finalized_headers.next().await { let hash = header.hash(); let number = header.number(); - let block: Block = ChainApi::<(), Block::Hash, Block::Header, _>::block(&rpc, Some(hash)) - .await - .unwrap() - .unwrap(); + let block = + ChainApi::<(), Block::Hash, Block::Header, SignedBlock>::block(&rpc, Some(hash)) + .await + .or_else(|e| { + if matches!(e, substrate_rpc_client::Error::ParseError(_)) { + log::error!( + "failed to parse the block format of remote against the local \ + codebase. The block format has changed, and follow-chain cannot run in \ + this case. Try running this command in a branch of your codebase that has \ + the same block format as the remote chain. For now, we replace the block with an empty one" + ); + } + Err(rpc_err_handler(e)) + })? + .expect("if header exists, block should also exist.") + .block; log::debug!( target: LOG_TARGET, @@ -120,49 +129,40 @@ where // create an ext at the state of this block, whatever is the first subscription event. if maybe_state_ext.is_none() { - let builder = Builder::::new() - .mode(Mode::Online(OnlineConfig { - transport: command.uri.clone().into(), - at: Some(*header.parent_hash()), - ..Default::default() - })) - .state_version(shared.state_version); - - let new_ext = builder - .inject_hashed_key_value(&[(code_key.clone(), code.clone())]) - .build() - .await?; - log::info!( - target: LOG_TARGET, - "initialized state externalities at {:?}, storage root {:?}", - number, - new_ext.as_backend().root() - ); - - let (expected_spec_name, expected_spec_version, spec_state_version) = - local_spec::(&new_ext, &executor); - ensure_matching_spec::( - command.uri.clone(), - expected_spec_name, - expected_spec_version, - shared.no_spec_check_panic, - ) - .await; - - maybe_state_ext = Some((new_ext, spec_state_version)); + let state = State::Live(LiveState { + uri: command.uri.clone(), + // a bit dodgy, we have to un-parse the has to a string again and re-parse it + // inside. + at: Some(hex::encode(header.parent_hash().encode())), + pallet: vec![], + child_tree: true, + }); + let ext = state.into_ext::(&shared, &executor, None).await?; + maybe_state_ext = Some(ext); } - let (state_ext, spec_state_version) = + let state_ext = maybe_state_ext.as_mut().expect("state_ext either existed or was just created"); - let (mut changes, encoded_result) = state_machine_call_with_proof::( + let result = state_machine_call_with_proof::( state_ext, &executor, - execution, "TryRuntime_execute_block", (block, command.state_root_check, command.try_state.clone()).encode().as_ref(), full_extensions(), - )?; + ); + + if let Err(why) = result { + log::error!( + target: LOG_TARGET, + "failed to execute block {:?} due to {:?}", + number, + why + ); + continue + } + + let (mut changes, encoded_result) = result.expect("checked to be Ok; qed"); let consumed_weight = ::decode(&mut &*encoded_result) .map_err(|e| format!("failed to decode weight: {:?}", e))?; @@ -171,13 +171,13 @@ where .drain_storage_changes( &state_ext.backend, &mut Default::default(), - // Note that in case a block contains a runtime upgrade, - // state version could potentially be incorrect here, - // this is very niche and would only result in unaligned - // roots, so this use case is ignored for now. - *spec_state_version, + // Note that in case a block contains a runtime upgrade, state version could + // potentially be incorrect here, this is very niche and would only result in + // unaligned roots, so this use case is ignored for now. + state_ext.state_version, ) .unwrap(); + state_ext.backend.apply_transaction( storage_changes.transaction_storage_root, storage_changes.transaction, diff --git a/utils/frame/try-runtime/cli/src/commands/mod.rs b/utils/frame/try-runtime/cli/src/commands/mod.rs index 4861d94f077ce..ab0a066585f6a 100644 --- a/utils/frame/try-runtime/cli/src/commands/mod.rs +++ b/utils/frame/try-runtime/cli/src/commands/mod.rs @@ -15,7 +15,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -pub(crate) mod execute_block; -pub(crate) mod follow_chain; -pub(crate) mod offchain_worker; -pub(crate) mod on_runtime_upgrade; +pub mod create_snapshot; +pub mod execute_block; +pub mod follow_chain; +pub mod offchain_worker; +pub mod on_runtime_upgrade; diff --git a/utils/frame/try-runtime/cli/src/commands/offchain_worker.rs b/utils/frame/try-runtime/cli/src/commands/offchain_worker.rs index 8d2585372b4a8..c55de7da64817 100644 --- a/utils/frame/try-runtime/cli/src/commands/offchain_worker.rs +++ b/utils/frame/try-runtime/cli/src/commands/offchain_worker.rs @@ -16,34 +16,18 @@ // limitations under the License. use crate::{ - build_executor, ensure_matching_spec, extract_code, full_extensions, hash_of, local_spec, - parse, state_machine_call, SharedParams, State, LOG_TARGET, + build_executor, commands::execute_block::next_hash_of, full_extensions, parse, rpc_err_handler, + state_machine_call, LiveState, SharedParams, State, LOG_TARGET, }; use parity_scale_codec::Encode; -use sc_executor::NativeExecutionDispatch; -use sc_service::Configuration; -use sp_core::storage::well_known_keys; -use sp_runtime::traits::{Block as BlockT, Header, NumberFor}; +use sc_executor::sp_wasm_interface::HostFunctions; +use sp_runtime::traits::{Block as BlockT, NumberFor}; use std::{fmt::Debug, str::FromStr}; use substrate_rpc_client::{ws_client, ChainApi}; -/// Configurations of the [`Command::OffchainWorker`]. +/// Configurations of the [`crate::Command::OffchainWorker`]. #[derive(Debug, Clone, clap::Parser)] pub struct OffchainWorkerCmd { - /// Overwrite the wasm code in state or not. - #[arg(long)] - overwrite_wasm_code: bool, - - /// The block hash at which to fetch the header. - /// - /// If the `live` state type is being used, then this can be omitted, and is equal to whatever - /// the `state::at` is. Only use this (with care) when combined with a snapshot. - #[arg( - long, - value_parser = parse::hash - )] - header_at: Option, - /// The ws uri from which to fetch the header. /// /// If the `live` state type is being used, then this can be omitted, and is equal to whatever @@ -52,7 +36,7 @@ pub struct OffchainWorkerCmd { long, value_parser = parse::url )] - header_ws_uri: Option, + pub header_ws_uri: Option, /// The state type to use. #[command(subcommand)] @@ -60,24 +44,6 @@ pub struct OffchainWorkerCmd { } impl OffchainWorkerCmd { - fn header_at(&self) -> sc_cli::Result - where - Block::Hash: FromStr, - ::Err: Debug, - { - match (&self.header_at, &self.state) { - (Some(header_at), State::Snap { .. }) => hash_of::(header_at), - (Some(header_at), State::Live { .. }) => { - log::error!(target: LOG_TARGET, "--header-at is provided while state type is live, this will most likely lead to a nonsensical result."); - hash_of::(header_at) - }, - (None, State::Live { at: Some(at), .. }) => hash_of::(at), - _ => { - panic!("either `--header-at` must be provided, or state must be `live` with a proper `--at`"); - }, - } - } - fn header_ws_uri(&self) -> String where Block::Hash: FromStr, @@ -89,7 +55,7 @@ impl OffchainWorkerCmd { log::error!(target: LOG_TARGET, "--header-uri is provided while state type is live, this will most likely lead to a nonsensical result."); header_ws_uri.to_owned() }, - (None, State::Live { uri, .. }) => uri.clone(), + (None, State::Live(LiveState { uri, .. })) => uri.clone(), (None, State::Snap { .. }) => { panic!("either `--header-uri` must be provided, or state must be `live`"); }, @@ -97,76 +63,42 @@ impl OffchainWorkerCmd { } } -pub(crate) async fn offchain_worker( +pub(crate) async fn offchain_worker( shared: SharedParams, command: OffchainWorkerCmd, - config: Configuration, ) -> sc_cli::Result<()> where Block: BlockT + serde::de::DeserializeOwned, - Block::Hash: FromStr, Block::Header: serde::de::DeserializeOwned, + Block::Hash: FromStr, ::Err: Debug, NumberFor: FromStr, as FromStr>::Err: Debug, - ExecDispatch: NativeExecutionDispatch + 'static, + HostFns: HostFunctions, { - let executor = build_executor(&shared, &config); - let execution = shared.execution; + let executor = build_executor(&shared); + // we first build the externalities with the remote code. + let ext = command.state.into_ext::(&shared, &executor, None).await?; - let header_at = command.header_at::()?; let header_ws_uri = command.header_ws_uri::(); let rpc = ws_client(&header_ws_uri).await?; - let header = ChainApi::<(), Block::Hash, Block::Header, ()>::header(&rpc, Some(header_at)) - .await - .unwrap() - .unwrap(); - log::info!( - target: LOG_TARGET, - "fetched header from {:?}, block number: {:?}", - header_ws_uri, - header.number() - ); - - let ext = { - let builder = command.state.builder::()?.state_version(shared.state_version); - - let builder = if command.overwrite_wasm_code { - log::info!( - target: LOG_TARGET, - "replacing the in-storage :code: with the local code from {}'s chain_spec (your local repo)", - config.chain_spec.name(), - ); - let (code_key, code) = extract_code(&config.chain_spec)?; - builder.inject_hashed_key_value(&[(code_key, code)]) - } else { - builder.inject_hashed_key(well_known_keys::CODE) - }; + let next_hash = next_hash_of::(&rpc, ext.block_hash).await?; + log::info!(target: LOG_TARGET, "fetching next header: {:?} ", next_hash); - builder.build().await? - }; - - let (expected_spec_name, expected_spec_version, _) = - local_spec::(&ext, &executor); - ensure_matching_spec::( - header_ws_uri, - expected_spec_name, - expected_spec_version, - shared.no_spec_check_panic, - ) - .await; + let header = ChainApi::<(), Block::Hash, Block::Header, ()>::header(&rpc, Some(next_hash)) + .await + .map_err(rpc_err_handler) + .map(|maybe_header| maybe_header.ok_or("Header does not exist"))??; + let payload = header.encode(); - let _ = state_machine_call::( + let _ = state_machine_call::( &ext, &executor, - execution, "OffchainWorkerApi_offchain_worker", - header.encode().as_ref(), + &payload, full_extensions(), )?; - log::info!(target: LOG_TARGET, "OffchainWorkerApi_offchain_worker executed without errors."); - Ok(()) } diff --git a/utils/frame/try-runtime/cli/src/commands/on_runtime_upgrade.rs b/utils/frame/try-runtime/cli/src/commands/on_runtime_upgrade.rs index fba34ddfb5060..80fb5d31f71a9 100644 --- a/utils/frame/try-runtime/cli/src/commands/on_runtime_upgrade.rs +++ b/utils/frame/try-runtime/cli/src/commands/on_runtime_upgrade.rs @@ -15,31 +15,31 @@ // See the License for the specific language governing permissions and // limitations under the License. -use std::{fmt::Debug, str::FromStr}; - -use parity_scale_codec::Decode; -use sc_executor::NativeExecutionDispatch; -use sc_service::Configuration; +use crate::{build_executor, state_machine_call_with_proof, SharedParams, State, LOG_TARGET}; +use parity_scale_codec::{Decode, Encode}; +use sc_executor::sp_wasm_interface::HostFunctions; use sp_runtime::traits::{Block as BlockT, NumberFor}; use sp_weights::Weight; +use std::{fmt::Debug, str::FromStr}; -use crate::{ - build_executor, ensure_matching_spec, extract_code, local_spec, state_machine_call_with_proof, - SharedParams, State, LOG_TARGET, -}; - -/// Configurations of the [`Command::OnRuntimeUpgrade`]. +/// Configurations of the [`crate::Command::OnRuntimeUpgrade`]. #[derive(Debug, Clone, clap::Parser)] pub struct OnRuntimeUpgradeCmd { /// The state type to use. #[command(subcommand)] pub state: State, + + /// Execute `try_state`, `pre_upgrade` and `post_upgrade` checks as well. + /// + /// This will perform more checks, but it will also makes the reported PoV/Weight be + /// inaccurate. + #[clap(long)] + pub checks: bool, } -pub(crate) async fn on_runtime_upgrade( +pub(crate) async fn on_runtime_upgrade( shared: SharedParams, command: OnRuntimeUpgradeCmd, - config: Configuration, ) -> sc_cli::Result<()> where Block: BlockT + serde::de::DeserializeOwned, @@ -48,40 +48,22 @@ where Block::Header: serde::de::DeserializeOwned, NumberFor: FromStr, as FromStr>::Err: Debug, - ExecDispatch: NativeExecutionDispatch + 'static, + HostFns: HostFunctions, { - let executor = build_executor(&shared, &config); - let execution = shared.execution; - - let ext = { - let builder = command.state.builder::()?.state_version(shared.state_version); - let (code_key, code) = extract_code(&config.chain_spec)?; - builder.inject_hashed_key_value(&[(code_key, code)]).build().await? - }; + let executor = build_executor(&shared); + let ext = command.state.into_ext::(&shared, &executor, None).await?; - if let Some(uri) = command.state.live_uri() { - let (expected_spec_name, expected_spec_version, _) = - local_spec::(&ext, &executor); - ensure_matching_spec::( - uri, - expected_spec_name, - expected_spec_version, - shared.no_spec_check_panic, - ) - .await; - } - - let (_, encoded_result) = state_machine_call_with_proof::( + let (_, encoded_result) = state_machine_call_with_proof::( &ext, &executor, - execution, "TryRuntime_on_runtime_upgrade", - &[], + command.checks.encode().as_ref(), Default::default(), // we don't really need any extensions here. )?; let (weight, total_weight) = <(Weight, Weight) as Decode>::decode(&mut &*encoded_result) .map_err(|e| format!("failed to decode weight: {:?}", e))?; + log::info!( target: LOG_TARGET, "TryRuntime_on_runtime_upgrade executed without errors. Consumed weight = ({} ps, {} byte), total weight = ({} ps, {} byte) ({:.2} %, {:.2} %).", diff --git a/utils/frame/try-runtime/cli/src/lib.rs b/utils/frame/try-runtime/cli/src/lib.rs index f54354342bf28..47a9dfa3f6544 100644 --- a/utils/frame/try-runtime/cli/src/lib.rs +++ b/utils/frame/try-runtime/cli/src/lib.rs @@ -22,7 +22,7 @@ //! > As the name suggests, `try-runtime` is a detailed testing framework that gives you a lot of //! control over what is being executed in which environment. It is recommended that user's first //! familiarize themselves with substrate in depth, particularly the execution model. It is critical -//! to deeply understand how the wasm/native interactions, and the runtime apis work in the +//! to deeply understand how the wasm/client/runtime interactions, and the runtime apis work in the //! substrate runtime, before commencing to working with `try-runtime`. //! //! #### Resources @@ -35,101 +35,102 @@ //! //! --- //! -//! ## Overview +//! ## Background Knowledge //! //! The basis of all try-runtime commands is the same: connect to a live node, scrape its *state* //! and put it inside a `TestExternalities`, then call into a *specific runtime-api* using the given //! state and some *runtime*. //! +//! Alternatively, the state could come from a snapshot file. +//! //! All of the variables in the above statement are made *italic*. Let's look at each of them: //! //! 1. **State** is the key-value pairs of data that comprise the canonical information that any //! blockchain is keeping. A state can be full (all key-value pairs), or be partial (only pairs -//! related to some pallets). Moreover, some keys are special and are not related to specific -//! pallets, known as [`well_known_keys`] in substrate. The most important of these is the -//! `:CODE:` key, which contains the code used for execution, when wasm execution is chosen. +//! related to some pallets/prefixes). Moreover, some keys are especial and are not related to +//! specific pallets, known as [`well_known_keys`] in substrate. The most important of these is +//! the `:CODE:` key, which contains the code used for execution, when wasm execution is chosen. //! //! 2. *A runtime-api* call is a call into a function defined in the runtime, *on top of a given //! state*. Each subcommand of `try-runtime` utilizes a specific *runtime-api*. //! //! 3. Finally, the **runtime** is the actual code that is used to execute the aforementioned -//! runtime-api. All substrate based chains always have two runtimes: native and wasm. The -//! decision of which one is chosen is non-trivial. First, let's look at the options: -//! -//! 1. Native: this means that the runtime that is **in your codebase**, aka whatever you see in -//! your editor, is being used. This runtime is easier for diagnostics. We refer to this as -//! the "local runtime". -//! -//! 2. Wasm: this means that whatever is stored in the `:CODE:` key of the state that your -//! scrape is being used. In plain sight, since the entire state (including `:CODE:`) is -//! scraped from a remote chain, you could conclude that the wasm runtime, if used, is always -//! equal to the canonical runtime of the live chain (i.e. NOT the "local runtime"). That's -//! factually true, but then the testing would be quite lame. Typically, with try-runtime, -//! you don't want to execute whatever code is already on the live chain. Instead, you want -//! your local runtime (which typically includes a non-released feature) to be used. This is -//! why try-runtime overwrites the wasm runtime (at `:CODE:`) with the local runtime as well. -//! That being said, this behavior can be controlled in certain subcommands with a special -//! flag (`--overwrite-wasm-code`). -//! -//! The decision of which runtime is eventually used is based on two facts: -//! -//! 1. `--execution` flag. If you specify `wasm`, then it is *always* wasm. If it is `native`, then -//! if and ONLY IF the spec versions match, then the native runtime is used. Else, wasm runtime -//! is used again. -//! 2. `--chain` flag (if present in your cli), which determines *which local runtime*, is selected. -//! This will specify: -//! 1. which native runtime is used, if you select `--execution Native` -//! 2. which wasm runtime is used to replace the `:CODE:`, if try-runtime is instructed to do -//! so. -//! -//! All in all, if the term "local runtime" is used in the rest of this crate's documentation, it -//! means either the native runtime, or the wasm runtime when overwritten inside `:CODE:`. In other -//! words, it means your... well, "local runtime", regardless of wasm or native. -//! -//! //! See [`Command`] for more information about each command's specific customization flags, and -//! assumptions regarding the runtime being used. +//! runtime-api. Everything in this crate assumes wasm execution, which means the runtime that +//! you use is the one stored onchain, namely under the `:CODE:` key. +//! +//! To recap, a typical try-runtime command does the following: +//! +//! 1. Download the state of a live chain, and write to an `externalities`. +//! 2. Overwrite the `:CODE:` with a given wasm blob +//! 3. Test some functionality via calling a runtime-api. +//! +//! ## Usage +//! +//! To use any of the provided commands, [`SharedParams`] must be provided. The most important of +//! which being [`SharedParams::runtime`], which specifies which runtime to use. Furthermore, +//! [`SharedParams::overwrite_state_version`] can be used to alter the state-version (see +//! for more info). +//! +//! Then, the specific command has to be specified. See [`Command`] for more information about each +//! command's specific customization flags, and assumptions regarding the runtime being used. +//! +//! Said briefly, this CLI is capable of executing: +//! +//! * [`Command::OnRuntimeUpgrade`]: execute all the `on_runtime_upgrade` hooks. +//! * [`Command::ExecuteBlock`]: re-execute the given block. +//! * [`Command::OffchainWorker`]: re-execute the given block's offchain worker code path. +//! * [`Command::FollowChain`]: continuously execute the blocks of a remote chain on top of a given +//! runtime. +//! * [`Command::CreateSnapshot`]: Create a snapshot file from a remote node. //! //! Finally, To make sure there are no errors regarding this, always run any `try-runtime` command //! with `executor=trace` logging targets, which will specify which runtime is being used per api -//! call. -//! -//! Furthermore, other relevant log targets are: `try-runtime::cli`, `remote-ext`, and `runtime`. +//! call. Moreover, `remote-ext`, `try-runtime` and `runtime` logs targets will also be useful. //! //! ## Spec name check //! //! A common pitfall is that you might be running some test on top of the state of chain `x`, with //! the runtime of chain `y`. To avoid this all commands do a spec-name check before executing -//! anything by default. This will check the spec name of the remote node your are connected to, -//! with the spec name of your local runtime and ensure that they match. +//! anything by default. This will check the, if any alterations are being made to the `:CODE:`, +//! then the spec names match. The spec versions are warned, but are not mandated to match. //! -//! Should you need to disable this on certain occasions, a top level flag of `--no-spec-name-check` -//! can be used. +//! > If anything, in most cases, we expect spec-versions to NOT match, because try-runtime is all +//! > about testing unreleased runtimes. //! -//! The spec version is also always inspected, but if it is a mismatch, it will only emit a warning. -//! -//! ## Note nodes that operate with `try-runtime` +//! ## Note on nodes that respond to `try-runtime` requests. //! //! There are a number of flags that need to be preferably set on a running node in order to work //! well with try-runtime's expensive RPC queries: //! -//! - set `--rpc-max-payload 1000` to ensure large RPC queries can work. -//! - set `--ws-max-out-buffer-capacity 1000` to ensure the websocket connection can handle large -//! RPC queries. +//! - set `--rpc-max-response-size 1000` and +//! - `--rpc-max-request-size 1000` to ensure connections are not dropped in case the state is +//! large. //! - set `--rpc-cors all` to ensure ws connections can come through. //! //! Note that *none* of the try-runtime operations need unsafe RPCs. //! -//! ## Migration Best Practices +//! ## Note on signature and state-root checks +//! +//! All of the commands calling into `TryRuntime_execute_block` ([`Command::ExecuteBlock`] and +//! [`Command::FollowChain`]) disable both state root and signature checks. This is because in 99% +//! of the cases, the runtime that is being tested is different from the one that is stored in the +//! canonical chain state. This implies: +//! +//! 1. the state root will NEVER match, because `:CODE:` is different between the two. +//! 2. replaying all transactions will fail, because the spec-version is part of the transaction +//! signature. +//! +//! ## Best Practices //! -//! One of the main use-cases of try-runtime is using it for testing storage migrations. The -//! following points makes sure you can *effectively* test your migrations with try-runtime. +//! Try-runtime is all about battle-testing unreleased runtime. The following list of suggestions +//! help developers maximize the testing coverage and make base use of `try-runtime`. //! //! #### Adding pre/post hooks //! //! One of the gems that come only in the `try-runtime` feature flag is the `pre_upgrade` and -//! `post_upgrade` hooks for `OnRuntimeUpgrade`. This trait is implemented either inside the -//! pallet, or manually in a runtime, to define a migration. In both cases, these functions can be -//! added, given the right flag: +//! `post_upgrade` hooks for `OnRuntimeUpgrade`. This trait is implemented either inside the pallet, +//! or manually in a runtime, to define a migration. In both cases, these functions can be added, +//! given the right flag: //! //! ```ignore //! @@ -147,6 +148,19 @@ //! encoded data (usually some pre-upgrade state) which will be passed to `post_upgrade` after //! upgrading and used for post checking. //! +//! ## State Consistency +//! +//! Similarly, each pallet can expose a function in `#[pallet::hooks]` section as follows: +//! +//! ``` +//! #[cfg(feature = try-runtime)] +//! fn try_state(_) -> Result<(), &'static str> {} +//! ``` +//! +//! which is called on numerous code paths in the try-runtime tool. These checks should ensure that +//! the state of the pallet is consistent and correct. See `frame_support::try_runtime::TryState` +//! for more info. +//! //! #### Logging //! //! It is super helpful to make sure your migration code uses logging (always with a `runtime` log @@ -161,216 +175,260 @@ //! //! ## Examples //! -//! Run the migrations of the local runtime on the state of polkadot, from the polkadot repo where -//! we have `--chain polkadot-dev`, on the latest finalized block's state +//! For the following examples, we assume the existence of the following: //! -//! ```sh -//! RUST_LOG=runtime=trace,try-runtime::cli=trace,executor=trace \ -//! cargo run try-runtime \ -//! --execution Native \ -//! --chain polkadot-dev \ -//! on-runtime-upgrade \ -//! live \ -//! --uri wss://rpc.polkadot.io -//! # note that we don't pass any --at, nothing means latest block. +//! 1. a substrate node compiled without `--feature try-runtime`, called `substrate`. This will be +//! the running node that you connect to. then, after some changes to this node, you compile it with +//! `--features try-runtime`. This gives you: +//! 2. a substrate binary that has the try-runtime sub-command enabled. +//! 3. a wasm blob that has try-runtime functionality. +//! +//! ```bash +//! # this is like your running deployed node. +//! cargo build --release && cp target/release/substrate . +//! +//! # this is like your WIP branch. +//! cargo build --release --features try-runtime +//! cp target/release/substrate substrate-try-runtime +//! cp ./target/release/wbuild/kitchensink-runtime/kitchensink_runtime.wasm runtime-try-runtime.wasm //! ``` //! -//! Same as previous one, but let's say we want to run this command from the substrate repo, where -//! we don't have a matching spec name/version. +//! > The above example is with `substrate`'s `kitchensink-runtime`, but is applicable to any +//! > substrate-based chain that has implemented `try-runtime-cli`. +//! +//! * If you run `try-runtime` subcommand against `substrate` binary listed above, you get the +//! following error. +//! +//! ```bash +//! [substrate] ./substrate try-runtime +//! Error: Input("TryRuntime wasn't enabled when building the node. You can enable it with `--features try-runtime`.") +//! ``` //! -//! ```sh -//! RUST_LOG=runtime=trace,try-runtime::cli=trace,executor=trace \ -//! cargo run try-runtime \ -//! --execution Native \ -//! --chain dev \ -//! --no-spec-name-check \ # mind this one! +//! * If you run the same against `substrate-try-runtime`, it will work. +//! +//! ```bash +//! [substrate] ./substrate-try-runtime try-runtime +//! Try some command against runtime state +//! +//! Usage: substrate-try-runtime try-runtime [OPTIONS] --runtime +//! +//! Commands: +//! on-runtime-upgrade Execute the migrations of the "local runtime" +//! execute-block Executes the given block against some state +//! offchain-worker Executes *the offchain worker hooks* of a given block against some state +//! follow-chain Follow the given chain's finalized blocks and apply all of its extrinsics +//! create-snapshot Create a new snapshot file +//! help Print this message or the help of the given subcommand(s) +//! +//! Options: +//! --chain +//! Specify the chain specification +//! --dev +//! Specify the development chain +//! -d, --base-path +//! Specify custom base path +//! -l, --log ... +//! Sets a custom logging filter. Syntax is `=`, e.g. -lsync=debug +//! --detailed-log-output +//! Enable detailed log output +//! --disable-log-color +//! Disable log color output +//! --enable-log-reloading +//! Enable feature to dynamically update and reload the log filter +//! --tracing-targets +//! Sets a custom profiling filter. Syntax is the same as for logging: `=` +//! --tracing-receiver +//! Receiver to process tracing messages [default: log] [possible values: log] +//! --runtime +//! The runtime to use +//! --wasm-execution +//! Type of wasm execution used [default: compiled] [possible values: interpreted-i-know-what-i-do, compiled] +//! --wasm-instantiation-strategy +//! The WASM instantiation method to use [default: pooling-copy-on-write] [possible values: pooling-copy-on-write, recreate-instance-copy-on-write, pooling, recreate-instance, legacy-instance-reuse] +//! --heap-pages +//! The number of 64KB pages to allocate for Wasm execution. Defaults to [`sc_service::Configuration.default_heap_pages`] +//! --overwrite-state-version +//! Overwrite the `state_version` +//! -h, --help +//! Print help information (use `--help` for more detail) +//! -V, --version +//! Print version information +//! ``` +//! +//! * Run the migrations of a given runtime on top of a live state. +//! +//! ```bash +//! # assuming there's `./substrate --dev --tmp --ws-port 9999` or similar running. +//! ./substrate-try-runtime \ +//! try-runtime \ +//! --runtime kitchensink_runtime.wasm \ +//! -lruntime=debug \ //! on-runtime-upgrade \ -//! live \ -//! --uri wss://rpc.polkadot.io +//! live --uri ws://localhost:9999 //! ``` //! -//! Same as the previous one, but run it at specific block number's state. This means that this +//! * Same as the previous one, but run it at specific block number's state. This means that this //! block hash's state shall not yet have been pruned in `rpc.polkadot.io`. //! -//! ```sh -//! RUST_LOG=runtime=trace,try-runtime::cli=trace,executor=trace \ -//! cargo run try-runtime \ -//! --execution Native \ -//! --chain dev \ -//! --no-spec-name-check \ # mind this one! on-runtime-upgrade \ +//! ```bash +//! ./substrate-try-runtime \ +//! try-runtime \ +//! --runtime kitchensink_runtime.wasm \ +//! -lruntime=debug \ //! on-runtime-upgrade \ -//! live \ -//! --uri wss://rpc.polkadot.io \ -//! --at +//! live --uri ws://localhost:9999 \ +//! # replace with your desired block hash! +//! --at 0xa1b16c1efd889a9f17375ec4dd5c1b4351a2be17fa069564fced10d23b9b3836 //! ``` //! -//! Moving to `execute-block` and `offchain-workers`. For these commands, you always needs to -//! specify a block hash. For the rest of these examples, we assume we're in the polkadot repo. -//! -//! First, let's assume you are in a branch that has the same spec name/version as the live polkadot -//! network. -//! -//! ```sh -//! RUST_LOG=runtime=trace,try-runtime::cli=trace,executor=trace \ -//! cargo run try-runtime \ -//! --execution Wasm \ -//! --chain polkadot-dev \ -//! --uri wss://rpc.polkadot.io \ -//! execute-block \ -//! live \ -//! --at +//! * Executing the same command with the [`Runtime::Existing`] will fail because the existing +//! runtime, stored onchain in `substrate` binary that we compiled earlier does not have +//! `try-runtime` feature! +//! +//! ```bash +//! ./substrate-try-runtime try-runtime --runtime existing -lruntime=debug on-runtime-upgrade live --uri ws://localhost:9999 +//! ... +//! Error: Input("given runtime is NOT compiled with try-runtime feature!") //! ``` //! -//! This is wasm, so it will technically execute the code that lives on the live network. Let's say -//! you want to execute your local runtime. Since you have a matching spec versions, you can simply -//! change `--execution Wasm` to `--execution Native` to achieve this. Your logs of `executor=trace` -//! should show something among the lines of: +//! * Now, let's use a snapshot file. First, we create the snapshot: //! -//! ```text -//! Request for native execution succeeded (native: polkadot-9900 (parity-polkadot-0.tx7.au0), chain: polkadot-9900 (parity-polkadot-0.tx7.au0)) +//! ```bash +//! ./substrate-try-runtime try-runtime --runtime existing -lruntime=debug create-snapshot --uri ws://localhost:9999 +//! 2022-12-13 10:28:17.516 INFO main try-runtime::cli: snapshot path not provided (-s), using 'node-268@latest.snap' +//! 2022-12-13 10:28:17.516 INFO main remote-ext: since no at is provided, setting it to latest finalized head, 0xe7d0b614dfe89af65b33577aae46a6f958c974bf52f8a5e865a0f4faeb578d22 +//! 2022-12-13 10:28:17.516 INFO main remote-ext: since no prefix is filtered, the data for all pallets will be downloaded +//! 2022-12-13 10:28:17.550 INFO main remote-ext: writing snapshot of 1611464 bytes to "node-268@latest.snap" +//! 2022-12-13 10:28:17.551 INFO main remote-ext: initialized state externalities with storage root 0x925e4e95de4c08474fb7f976c4472fa9b8a1091619cd7820a793bf796ee6d932 and state_version V1 //! ``` //! -//! If you don't have matching spec versions, then are doomed to execute wasm. In this case, you can -//! manually overwrite the wasm code with your local runtime: -//! -//! ```sh -//! RUST_LOG=runtime=trace,try-runtime::cli=trace,executor=trace \ -//! cargo run try-runtime \ -//! --execution Wasm \ -//! --chain polkadot-dev \ -//! execute-block \ -//! live \ -//! --uri wss://rpc.polkadot.io \ -//! --at \ -//! --overwrite-wasm-code +//! > Note that the snapshot contains the `existing` runtime, which does not have the correct +//! > `try-runtime` feature. In the following commands, we still need to overwrite the runtime. +//! +//! Then, we can use it to have the same command as before, `on-runtime-upgrade` +//! +//! ```bash +//! try-runtime \ +//! --runtime runtime-try-runtime.wasm \ +//! -lruntime=debug \ +//! on-runtime-upgrade \ +//! snap -s node-268@latest.snap //! ``` //! -//! For all of these blocks, the block with hash `` is being used, and the initial state -//! is the state of the parent hash. This is because by omitting `ExecuteBlockCmd::block_at`, the -//! `--at` is used for both. This should be good enough for 99% of the cases. The only case where -//! you need to specify `block-at` and `block-ws-uri` is with snapshots. Let's say you have a file -//! `snap` and you know it corresponds to the state of the parent block of `X`. Then you'd do: -//! -//! ```sh -//! RUST_LOG=runtime=trace,try-runtime::cli=trace,executor=trace \ -//! cargo run try-runtime \ -//! --execution Wasm \ -//! --chain polkadot-dev \ -//! --uri wss://rpc.polkadot.io \ -//! execute-block \ -//! --block-at \ -//! --block-ws-uri wss://rpc.polkadot.io \ -//! --overwrite-wasm-code \ -//! snap \ -//! -s snap \ +//! * Execute the latest finalized block with the given runtime. +//! +//! ```bash +//! ./substrate-try-runtime try-runtime \ +//! --runtime runtime-try-runtime.wasm \ +//! -lruntime=debug \ +//! execute-block live \ +//! --uri ws://localhost:999 +//! ``` +//! +//! This can still be customized at a given block with `--at`. If you want to use a snapshot, you +//! can still use `--block-ws-uri` to provide a node form which the block data can be fetched. +//! +//! Moreover, this runs the `frame_support::try_runtime::TryState` hooks as well. The hooks to run +//! can be customized with the `--try-state`. For example: +//! +//! ```bash +//! ./substrate-try-runtime try-runtime \ +//! --runtime runtime-try-runtime.wasm \ +//! -lruntime=debug \ +//! execute-block live \ +//! --try-state System,Staking \ +//! --uri ws://localhost:999 +//! ``` +//! +//! Will only run the `try-state` of the two given pallets. See +//! [`frame_try_runtime::TryStateSelect`] for more information. +//! +//! * Follow our live chain's blocks using `follow-chain`, whilst running the try-state of 3 pallets +//! in a round robin fashion +//! +//! ```bash +//! ./substrate-try-runtime \ +//! try-runtime \ +//! --runtime runtime-try-runtime.wasm \ +//! -lruntime=debug \ +//! follow-chain \ +//! --uri ws://localhost:9999 \ +//! --try-state rr-3 //! ``` #![cfg(feature = "try-runtime")] use parity_scale_codec::Decode; use remote_externalities::{ - Builder, Mode, OfflineConfig, OnlineConfig, SnapshotConfig, TestExternalities, + Builder, Mode, OfflineConfig, OnlineConfig, RemoteExternalities, SnapshotConfig, + TestExternalities, }; -use sc_chain_spec::ChainSpec; use sc_cli::{ - execution_method_from_cli, CliConfiguration, ExecutionStrategy, WasmExecutionMethod, - WasmtimeInstantiationStrategy, DEFAULT_WASMTIME_INSTANTIATION_STRATEGY, - DEFAULT_WASM_EXECUTION_METHOD, + CliConfiguration, RuntimeVersion, WasmExecutionMethod, WasmtimeInstantiationStrategy, + DEFAULT_WASMTIME_INSTANTIATION_STRATEGY, DEFAULT_WASM_EXECUTION_METHOD, }; -use sc_executor::NativeElseWasmExecutor; -use sc_service::{Configuration, NativeExecutionDispatch}; +use sc_executor::{sp_wasm_interface::HostFunctions, WasmExecutor}; +use sp_api::HashT; use sp_core::{ + hexdisplay::HexDisplay, offchain::{ testing::{TestOffchainExt, TestTransactionPoolExt}, OffchainDbExt, OffchainWorkerExt, TransactionPoolExt, }, - storage::{well_known_keys, StorageData, StorageKey}, + storage::well_known_keys, testing::TaskExecutor, - traits::TaskExecutorExt, + traits::{ReadRuntimeVersion, TaskExecutorExt}, twox_128, H256, }; use sp_externalities::Extensions; use sp_keystore::{testing::KeyStore, KeystoreExt}; use sp_runtime::{ - traits::{Block as BlockT, NumberFor}, + traits::{BlakeTwo256, Block as BlockT, NumberFor}, DeserializeOwned, }; -use sp_state_machine::{OverlayedChanges, StateMachine, TrieBackendBuilder}; +use sp_state_machine::{CompactProof, OverlayedChanges, StateMachine, TrieBackendBuilder}; use sp_version::StateVersion; use std::{fmt::Debug, path::PathBuf, str::FromStr}; -use substrate_rpc_client::{ws_client, StateApi}; -mod commands; +pub mod commands; pub(crate) mod parse; pub(crate) const LOG_TARGET: &str = "try-runtime::cli"; /// Possible commands of `try-runtime`. #[derive(Debug, Clone, clap::Subcommand)] pub enum Command { - /// Execute the migrations of the "local runtime". + /// Execute the migrations of the given runtime /// - /// This uses a custom runtime api call, namely "TryRuntime_on_runtime_upgrade". + /// This uses a custom runtime api call, namely "TryRuntime_on_runtime_upgrade". The code path + /// only triggers all of the `on_runtime_upgrade` hooks in the runtime, and optionally + /// `try_state`. /// - /// This always overwrites the wasm code with the local runtime (specified by `--chain`), to - /// ensure the new migrations are being executed. Re-executing already existing migrations is - /// evidently not very exciting. + /// See [`frame_try_runtime::TryRuntime`] and + /// [`commands::on_runtime_upgrade::OnRuntimeUpgradeCmd`] for more information. OnRuntimeUpgrade(commands::on_runtime_upgrade::OnRuntimeUpgradeCmd), /// Executes the given block against some state. /// - /// Unlike [`Command::OnRuntimeUpgrade`], this command needs two inputs: the state, and the - /// block data. Since the state could be cached (see [`State::Snap`]), different flags are - /// provided for both. `--block-at` and `--block-uri`, if provided, are only used for fetching - /// the block. For convenience, these flags can be both emitted, if the [`State::Live`] is - /// being used. - /// - /// Note that by default, this command does not overwrite the code, so in wasm execution, the - /// live chain's code is used. This can be disabled if desired, see - /// `ExecuteBlockCmd::overwrite_wasm_code`. + /// This uses a custom runtime api call, namely "TryRuntime_execute_block". Some checks, such + /// as state-root and signature checks are always disabled, and additional checks like + /// `try-state` can be enabled. /// - /// Note that if you do overwrite the wasm code, or generally use the local runtime for this, - /// you might - /// - not be able to decode the block, if the block format has changed. - /// - quite possibly will get a signature verification failure, since the spec and - /// transaction version are part of the signature's payload, and if they differ between - /// your local runtime and the remote counterparts, the signatures cannot be verified. - /// - almost certainly will get a state root mismatch, since, well, you are executing a - /// different state transition function. - /// - /// To make testing slightly more dynamic, you can disable the state root check by enabling - /// `ExecuteBlockCmd::no_check`. If you get signature verification errors, you should manually - /// tweak your local runtime's spec version to fix this. - /// - /// A subtle detail of execute block is that if you want to execute block 100 of a live chain - /// again, you need to scrape the state of block 99. This is already done automatically if you - /// use [`State::Live`], and the parent hash of the target block is used to scrape the state. - /// If [`State::Snap`] is being used, then this needs to be manually taken into consideration. - /// - /// This does not execute the same runtime api as normal block import do, namely - /// `Core_execute_block`. Instead, it uses `TryRuntime_execute_block`, which can optionally - /// skip state-root check (useful for trying a unreleased runtime), and can execute runtime - /// sanity checks as well. + /// See [`frame_try_runtime::TryRuntime`] and [`commands::execute_block::ExecuteBlockCmd`] for + /// more information. ExecuteBlock(commands::execute_block::ExecuteBlockCmd), /// Executes *the offchain worker hooks* of a given block against some state. /// - /// Similar to [`Command::ExecuteBlock`], this command needs two inputs: the state, and the - /// header data. Likewise, `--header-at` and `--header-uri` can be filled, or omitted if - /// [`State::Live`] is used. - /// - /// Similar to [`Command::ExecuteBlock`], this command does not overwrite the code, so in wasm - /// execution, the live chain's code is used. This can be disabled if desired, see - /// `OffchainWorkerCmd::overwrite_wasm_code`. - /// /// This executes the same runtime api as normal block import, namely /// `OffchainWorkerApi_offchain_worker`. + /// + /// See [`frame_try_runtime::TryRuntime`] and [`commands::offchain_worker::OffchainWorkerCmd`] + /// for more information. OffchainWorker(commands::offchain_worker::OffchainWorkerCmd), /// Follow the given chain's finalized blocks and apply all of its extrinsics. /// - /// This is essentially repeated calls to [`Command::ExecuteBlock`], whilst the local runtime - /// is always at use, the state root check is disabled, and the state is persisted between - /// executions. + /// This is essentially repeated calls to [`Command::ExecuteBlock`]. /// /// This allows the behavior of a new runtime to be inspected over a long period of time, with /// realistic transactions coming as input. @@ -382,7 +440,38 @@ pub enum Command { /// connections, starts listening for finalized block events. Upon first block notification, it /// initializes the state from the remote node, and starts applying that block, plus all the /// blocks that follow, to the same growing state. + /// + /// This can only work if the block format between the remote chain and the new runtime being + /// tested has remained the same, otherwise block decoding might fail. FollowChain(commands::follow_chain::FollowChainCmd), + + /// Create a new snapshot file. + CreateSnapshot(commands::create_snapshot::CreateSnapshotCmd), +} + +#[derive(Debug, Clone)] +pub enum Runtime { + /// Use the given path to the wasm binary file. + /// + /// It must have been compiled with `try-runtime`. + Path(PathBuf), + + /// Use the code of the remote node, or the snapshot. + /// + /// In almost all cases, this is not what you want, because the code in the remote node does + /// not have any of the try-runtime custom runtime APIs. + Existing, +} + +impl FromStr for Runtime { + type Err = String; + + fn from_str(s: &str) -> Result { + Ok(match s.to_lowercase().as_ref() { + "existing" => Runtime::Existing, + x @ _ => Runtime::Path(x.into()), + }) + } } /// Shared parameters of the `try-runtime` commands @@ -390,13 +479,23 @@ pub enum Command { #[group(skip)] pub struct SharedParams { /// Shared parameters of substrate cli. + /// + /// TODO: this is only needed because try-runtime is embedded in the substrate CLI. It should + /// go away. #[allow(missing_docs)] #[clap(flatten)] pub shared_params: sc_cli::SharedParams, - /// The execution strategy that should be used. - #[arg(long, value_name = "STRATEGY", value_enum, ignore_case = true, default_value_t = ExecutionStrategy::Wasm)] - pub execution: ExecutionStrategy, + /// The runtime to use. + /// + /// Must be a path to a wasm blob, compiled with `try-runtime` feature flag. + /// + /// Or, `existing`, indicating that you don't want to overwrite the runtime. This will use + /// whatever comes from the remote node, or the snapshot file. This will most likely not work + /// against a remote node, as no (sane) blockchain should compile its onchain wasm with + /// `try-runtime` feature. + #[arg(long)] + pub runtime: Runtime, /// Type of wasm execution used. #[arg( @@ -424,13 +523,11 @@ pub struct SharedParams { #[arg(long)] pub heap_pages: Option, - /// When enabled, the spec check will not panic, and instead only show a warning. - #[arg(long)] - pub no_spec_check_panic: bool, - - /// State version that is used by the chain. - #[arg(long, default_value_t = StateVersion::V1, value_parser = parse::state_version)] - pub state_version: StateVersion, + /// Overwrite the `state_version`. + /// + /// Otherwise `remote-externalities` will automatically set the correct state version. + #[arg(long, value_parser = parse::state_version)] + pub overwrite_state_version: Option, } /// Our `try-runtime` command. @@ -445,6 +542,41 @@ pub struct TryRuntimeCmd { pub command: Command, } +/// A `Live` variant [`State`] +#[derive(Debug, Clone, clap::Args)] +pub struct LiveState { + /// The url to connect to. + #[arg( + short, + long, + value_parser = parse::url, + )] + uri: String, + + /// The block hash at which to fetch the state. + /// + /// If non provided, then the latest finalized head is used. + #[arg( + short, + long, + value_parser = parse::hash, + )] + at: Option, + + /// A pallet to scrape. Can be provided multiple times. If empty, entire chain state will + /// be scraped. + #[arg(short, long, num_args = 1..)] + pallet: Vec, + + /// Fetch the child-keys as well. + /// + /// Default is `false`, if specific `--pallets` are specified, `true` otherwise. In other + /// words, if you scrape the whole state the child tree data is included out of the box. + /// Otherwise, it must be enabled explicitly using this flag. + #[arg(long)] + child_tree: bool, +} + /// The source of runtime *state* to use. #[derive(Debug, Clone, clap::Subcommand)] pub enum State { @@ -457,128 +589,167 @@ pub enum State { }, /// Use a live chain as the source of runtime state. - Live { - /// The url to connect to. - #[arg( - short, - long, - value_parser = parse::url, - )] - uri: String, - - /// The block hash at which to fetch the state. - /// - /// If non provided, then the latest finalized head is used. This is particularly useful - /// for [`Command::OnRuntimeUpgrade`]. - #[arg( - short, - long, - value_parser = parse::hash, - )] - at: Option, - - /// An optional state snapshot file to WRITE to. Not written if set to `None`. - #[arg(short, long)] - snapshot_path: Option, - - /// A pallet to scrape. Can be provided multiple times. If empty, entire chain state will - /// be scraped. - #[arg(short, long, num_args = 1..)] - pallet: Vec, - - /// Fetch the child-keys as well. - /// - /// Default is `false`, if specific `--pallets` are specified, `true` otherwise. In other - /// words, if you scrape the whole state the child tree data is included out of the box. - /// Otherwise, it must be enabled explicitly using this flag. - #[arg(long)] - child_tree: bool, - }, + Live(LiveState), } impl State { - /// Create the [`remote_externalities::Builder`] from self. - pub(crate) fn builder(&self) -> sc_cli::Result> + /// Create the [`remote_externalities::RemoteExternalities`] using [`remote-externalities`] from + /// self. + /// + /// This will override the code as it sees fit based on [`SharedParams::Runtime`]. It will also + /// check the spec-version and name. + pub(crate) async fn into_ext( + &self, + shared: &SharedParams, + executor: &WasmExecutor, + state_snapshot: Option, + ) -> sc_cli::Result> where Block::Hash: FromStr, + Block::Header: DeserializeOwned, + Block::Hash: DeserializeOwned, ::Err: Debug, { - Ok(match self { + let builder = match self { State::Snap { snapshot_path } => Builder::::new().mode(Mode::Offline(OfflineConfig { state_snapshot: SnapshotConfig::new(snapshot_path), })), - State::Live { snapshot_path, pallet, uri, at, child_tree } => { + State::Live(LiveState { pallet, uri, at, child_tree }) => { let at = match at { Some(at_str) => Some(hash_of::(at_str)?), None => None, }; - let mut builder = Builder::::new() - .mode(Mode::Online(OnlineConfig { - transport: uri.to_owned().into(), - state_snapshot: snapshot_path.as_ref().map(SnapshotConfig::new), - pallets: pallet.clone(), - scrape_children: true, - at, - })) - .inject_hashed_key( - &[twox_128(b"System"), twox_128(b"LastRuntimeUpgrade")].concat(), - ); - if *child_tree { - builder = builder.inject_default_child_tree_prefix(); - } - builder + Builder::::new().mode(Mode::Online(OnlineConfig { + at, + transport: uri.to_owned().into(), + state_snapshot, + pallets: pallet.clone(), + child_trie: *child_tree, + hashed_keys: vec![ + // we always download the code, but we almost always won't use it, based on + // `Runtime`. + well_known_keys::CODE.to_vec(), + // we will always download this key, since it helps detect if we should do + // runtime migration or not. + [twox_128(b"System"), twox_128(b"LastRuntimeUpgrade")].concat(), + [twox_128(b"System"), twox_128(b"Number")].concat(), + ], + hashed_prefixes: vec![], + })) }, - }) - } + }; + + // possibly overwrite the state version, should hardly be needed. + let builder = if let Some(state_version) = shared.overwrite_state_version { + log::warn!( + target: LOG_TARGET, + "overwriting state version to {:?}, you better know what you are doing.", + state_version + ); + builder.overwrite_state_version(state_version) + } else { + builder + }; + + // then, we prepare to replace the code based on what the CLI wishes. + let maybe_code_to_overwrite = match shared.runtime { + Runtime::Path(ref path) => Some(std::fs::read(path).map_err(|e| { + format!("error while reading runtime file from {:?}: {:?}", path, e) + })?), + Runtime::Existing => None, + }; + + // build the main ext. + let mut ext = builder.build().await?; + + // actually replace the code if needed. + if let Some(new_code) = maybe_code_to_overwrite { + let original_code = ext + .execute_with(|| sp_io::storage::get(well_known_keys::CODE)) + .expect("':CODE:' is always downloaded in try-runtime-cli; qed"); + + // NOTE: see the impl notes of `read_runtime_version`, the ext is almost not used here, + // only as a backup. + ext.insert(well_known_keys::CODE.to_vec(), new_code.clone()); + let old_version = ::decode( + &mut &*executor.read_runtime_version(&original_code, &mut ext.ext()).unwrap(), + ) + .unwrap(); + log::info!( + target: LOG_TARGET, + "original spec: {:?}-{:?}, code hash: {:?}", + old_version.spec_name, + old_version.spec_version, + HexDisplay::from(BlakeTwo256::hash(&original_code).as_fixed_bytes()), + ); + let new_version = ::decode( + &mut &*executor.read_runtime_version(&new_code, &mut ext.ext()).unwrap(), + ) + .unwrap(); + log::info!( + target: LOG_TARGET, + "new spec: {:?}-{:?}, code hash: {:?}", + new_version.spec_name, + new_version.spec_version, + HexDisplay::from(BlakeTwo256::hash(&new_code).as_fixed_bytes()) + ); - /// Get the uri, if self is `Live`. - pub(crate) fn live_uri(&self) -> Option { - match self { - State::Live { uri, .. } => Some(uri.clone()), - _ => None, + if new_version.spec_name != old_version.spec_name { + return Err("Spec names must match.".into()) + } + } + + // whatever runtime we have in store now must have been compiled with try-runtime feature. + if !ensure_try_runtime::(&executor, &mut ext) { + return Err("given runtime is NOT compiled with try-runtime feature!".into()) } + + Ok(ext) } } impl TryRuntimeCmd { - pub async fn run(&self, config: Configuration) -> sc_cli::Result<()> + pub async fn run(&self) -> sc_cli::Result<()> where Block: BlockT + DeserializeOwned, Block::Header: DeserializeOwned, Block::Hash: FromStr, ::Err: Debug, - NumberFor: FromStr, as FromStr>::Err: Debug, - ExecDispatch: NativeExecutionDispatch + 'static, + as TryInto>::Error: Debug, + NumberFor: FromStr, + HostFns: HostFunctions, { match &self.command { Command::OnRuntimeUpgrade(ref cmd) => - commands::on_runtime_upgrade::on_runtime_upgrade::( + commands::on_runtime_upgrade::on_runtime_upgrade::( self.shared.clone(), cmd.clone(), - config, ) .await, Command::OffchainWorker(cmd) => - commands::offchain_worker::offchain_worker::( + commands::offchain_worker::offchain_worker::( self.shared.clone(), cmd.clone(), - config, ) .await, Command::ExecuteBlock(cmd) => - commands::execute_block::execute_block::( + commands::execute_block::execute_block::( self.shared.clone(), cmd.clone(), - config, ) .await, Command::FollowChain(cmd) => - commands::follow_chain::follow_chain::( + commands::follow_chain::follow_chain::( + self.shared.clone(), + cmd.clone(), + ) + .await, + Command::CreateSnapshot(cmd) => + commands::create_snapshot::create_snapshot::( self.shared.clone(), cmd.clone(), - config, ) .await, } @@ -598,22 +769,6 @@ impl CliConfiguration for TryRuntimeCmd { } } -/// Extract `:code` from the given chain spec and return as `StorageData` along with the -/// corresponding `StorageKey`. -pub(crate) fn extract_code(spec: &Box) -> sc_cli::Result<(StorageKey, StorageData)> { - let genesis_storage = spec.build_storage()?; - let code = StorageData( - genesis_storage - .top - .get(well_known_keys::CODE) - .expect("code key must exist in genesis storage; qed") - .to_vec(), - ); - let code_key = StorageKey(well_known_keys::CODE.to_vec()); - - Ok((code_key, code)) -} - /// Get the hash type of the generic `Block` from a `hash_str`. pub(crate) fn hash_of(hash_str: &str) -> sc_cli::Result where @@ -625,67 +780,6 @@ where .map_err(|e| format!("Could not parse block hash: {:?}", e).into()) } -/// Check the spec_name of an `ext` -/// -/// If the spec names don't match, if `relaxed`, then it emits a warning, else it panics. -/// If the spec versions don't match, it only ever emits a warning. -pub(crate) async fn ensure_matching_spec( - uri: String, - expected_spec_name: String, - expected_spec_version: u32, - relaxed: bool, -) { - let rpc = ws_client(&uri).await.unwrap(); - match StateApi::::runtime_version(&rpc, None) - .await - .map(|version| (String::from(version.spec_name.clone()), version.spec_version)) - .map(|(spec_name, spec_version)| (spec_name.to_lowercase(), spec_version)) - { - Ok((name, version)) => { - // first, deal with spec name - if expected_spec_name.to_lowercase() == name { - log::info!(target: LOG_TARGET, "found matching spec name: {:?}", name); - } else { - let msg = format!( - "version mismatch: remote spec name: '{}', expected (local chain spec, aka. `--chain`): '{}'", - name, - expected_spec_name - ); - if relaxed { - log::warn!(target: LOG_TARGET, "{}", msg); - } else { - panic!("{}", msg); - } - } - - if expected_spec_version == version { - log::info!(target: LOG_TARGET, "found matching spec version: {:?}", version); - } else { - let msg = format!( - "spec version mismatch (local {} != remote {}). This could cause some issues.", - expected_spec_version, version - ); - if relaxed { - log::warn!(target: LOG_TARGET, "{}", msg); - } else { - panic!("{}", msg); - } - } - }, - Err(why) => { - let msg = format!( - "failed to fetch runtime version from {}: {:?}. Skipping the check", - uri, why - ); - if relaxed { - log::error!(target: LOG_TARGET, "{}", msg); - } else { - panic!("{}", msg); - } - }, - } -} - /// Build all extensions that we typically use. pub(crate) fn full_extensions() -> Extensions { let mut extensions = Extensions::default(); @@ -700,29 +794,43 @@ pub(crate) fn full_extensions() -> Extensions { extensions } -/// Build a default execution that we typically use. -pub(crate) fn build_executor( - shared: &SharedParams, - config: &sc_service::Configuration, -) -> NativeElseWasmExecutor { - let heap_pages = shared.heap_pages.or(config.default_heap_pages); - let max_runtime_instances = config.max_runtime_instances; - let runtime_cache_size = config.runtime_cache_size; - - NativeElseWasmExecutor::::new( - execution_method_from_cli(shared.wasm_method, shared.wasmtime_instantiation_strategy), +pub(crate) fn build_executor(shared: &SharedParams) -> WasmExecutor { + let heap_pages = shared.heap_pages.or(Some(2048)); + let max_runtime_instances = 8; + let runtime_cache_size = 2; + + WasmExecutor::new( + sc_executor::WasmExecutionMethod::Interpreted, heap_pages, max_runtime_instances, + None, runtime_cache_size, ) } +/// Ensure that the given `ext` is compiled with `try-runtime` +fn ensure_try_runtime( + executor: &WasmExecutor, + ext: &mut TestExternalities, +) -> bool { + use sp_api::RuntimeApiInfo; + let final_code = ext + .execute_with(|| sp_io::storage::get(well_known_keys::CODE)) + .expect("':CODE:' is always downloaded in try-runtime-cli; qed"); + let final_version = ::decode( + &mut &*executor.read_runtime_version(&final_code, &mut ext.ext()).unwrap(), + ) + .unwrap(); + final_version + .api_version(&>::ID) + .is_some() +} + /// Execute the given `method` and `data` on top of `ext`, returning the results (encoded) and the /// state `changes`. -pub(crate) fn state_machine_call( +pub(crate) fn state_machine_call( ext: &TestExternalities, - executor: &NativeElseWasmExecutor, - execution: sc_cli::ExecutionStrategy, + executor: &WasmExecutor, method: &'static str, data: &[u8], extensions: Extensions, @@ -738,7 +846,7 @@ pub(crate) fn state_machine_call(Into::into)?; @@ -749,28 +857,23 @@ pub(crate) fn state_machine_call( +pub(crate) fn state_machine_call_with_proof( ext: &TestExternalities, - executor: &NativeElseWasmExecutor, - execution: sc_cli::ExecutionStrategy, + executor: &WasmExecutor, method: &'static str, data: &[u8], extensions: Extensions, ) -> sc_cli::Result<(OverlayedChanges, Vec)> { use parity_scale_codec::Encode; - use sp_core::hexdisplay::HexDisplay; let mut changes = Default::default(); let backend = ext.backend.clone(); let runtime_code_backend = sp_state_machine::backend::BackendRuntimeCode::new(&backend); - let proving_backend = TrieBackendBuilder::wrap(&backend).with_recorder(Default::default()).build(); - let runtime_code = runtime_code_backend.runtime_code()?; let pre_root = *backend.root(); - let encoded_results = StateMachine::new( &proving_backend, &mut changes, @@ -781,7 +884,7 @@ pub(crate) fn state_machine_call_with_proof(Into::into)?; @@ -792,11 +895,24 @@ pub(crate) fn state_machine_call_with_proof(pre_root) - .map_err(|e| format!("failed to generate compact proof {}: {:?}", method, e))?; + .map_err(|e| { + log::error!(target: LOG_TARGET, "failed to generate compact proof {}: {:?}", method, e); + e + }) + .unwrap_or(CompactProof { encoded_nodes: Default::default() }); let compact_proof_size = compact_proof.encoded_size(); let compressed_proof = zstd::stream::encode_all(&compact_proof.encode()[..], 0) - .map_err(|e| format!("failed to generate compact proof {}: {:?}", method, e))?; + .map_err(|e| { + log::error!( + target: LOG_TARGET, + "failed to generate compressed proof {}: {:?}", + method, + e + ); + e + }) + .unwrap_or_default(); let proof_nodes = proof.into_nodes(); @@ -814,8 +930,8 @@ pub(crate) fn state_machine_call_with_proof>()), + "proof: 0x{}... / {} nodes", + HexDisplay::from(&proof_nodes.iter().flatten().cloned().take(10).collect::>()), proof_nodes.len() ); log::debug!(target: LOG_TARGET, "proof size: {}", humanize(proof_size)); @@ -825,28 +941,13 @@ pub(crate) fn state_machine_call_with_proof( - ext: &TestExternalities, - executor: &NativeElseWasmExecutor, -) -> (String, u32, sp_core::storage::StateVersion) { - let (_, encoded) = state_machine_call::( - ext, - executor, - sc_cli::ExecutionStrategy::NativeElseWasm, - "Core_version", - &[], - Default::default(), - ) - .expect("all runtimes should have version; qed"); - ::decode(&mut &*encoded) - .map_err(|e| format!("failed to decode output: {:?}", e)) - .map(|v| { - let state_version = v.state_version(); - (v.spec_name.into(), v.spec_version, state_version) - }) - .expect("all runtimes should have version; qed") +pub(crate) fn rpc_err_handler(error: impl Debug) -> &'static str { + log::error!(target: LOG_TARGET, "rpc error: {:?}", error); + "rpc error." }