From 11d1a3955e4bfd536e7716132f5440d7e75e43e5 Mon Sep 17 00:00:00 2001 From: joe petrowski <25483142+joepetrowski@users.noreply.github.com> Date: Sat, 16 Sep 2023 12:42:43 +0200 Subject: [PATCH 01/69] Include `bitvec` in `std` for Broker Pallet (#1558) When adding this pallet to the [Coretime Chain](https://github.com/paritytech/polkadot-sdk/pull/1479), this dependency results in conflicting implementations (rustc error below). This toml change fixes it. ``` error: failed to run custom build command for `coretime-rococo-runtime v1.0.0 (/home/joe/parity/polkadot-sdk/cumulus/parachains/runtimes/coretime/coretime-rococo)` Caused by: process didn't exit successfully: `/home/joe/parity/polkadot-sdk/target/debug/build/coretime-rococo-runtime-7943703d2770a119/build-script-build` (exit status: 1) --- stdout Information that should be included in a bug report. Executing build command: RUSTFLAGS="-C target-cpu=mvp -C target-feature=-sign-ext -C link-arg=--export-table -Clink-arg=--export=__heap_base -C link-arg=--import-memory " SKIP_WASM_BUILD="" "/home/joe/.rustup/toolchains/stable-x86_64-unknown-linux-gnu/bin/cargo" "rustc" "--target=wasm32-unknown-unknown" "--manifest-path=/home/joe/parity/polkadot-sdk/target/debug/wbuild/coretime-rococo-runtime/Cargo.toml" "--color=always" "--profile" "release" Using rustc version: rustc 1.71.1 (eb26296b5 2023-08-03) --- stderr Compiling sp-io v23.0.0 (/home/joe/parity/polkadot-sdk/substrate/primitives/io) Compiling coretime-rococo-runtime v1.0.0 (/home/joe/parity/polkadot-sdk/cumulus/parachains/runtimes/coretime/coretime-rococo) error[E0152]: found duplicate lang item `panic_impl` --> /home/joe/parity/polkadot-sdk/substrate/primitives/io/src/lib.rs:1749:1 | 1749 | pub fn panic(info: &core::panic::PanicInfo) -> ! { | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ | = note: the lang item is first defined in crate `std` (which `bitvec` depends on) = note: first definition in `std` loaded from /home/joe/.rustup/toolchains/stable-x86_64-unknown-linux-gnu/lib/rustlib/wasm32-unknown-unknown/lib/libstd-67dfbacfb4b441ef.rlib = note: second definition in the local crate (`sp_io`) For more information about this error, try `rustc --explain E0152`. ``` --- substrate/frame/broker/Cargo.toml | 3 ++- substrate/frame/broker/README.md | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/substrate/frame/broker/Cargo.toml b/substrate/frame/broker/Cargo.toml index edb3c5f63bbe..d6c5399d56eb 100644 --- a/substrate/frame/broker/Cargo.toml +++ b/substrate/frame/broker/Cargo.toml @@ -14,7 +14,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = [ "derive"] } scale-info = { version = "2.0.0", default-features = false, features = ["derive"] } -bitvec = "1" +bitvec = { version = "1.0.0", default-features = false } sp-std = { path = "../../primitives/std", default-features = false} sp-arithmetic = { path = "../../primitives/arithmetic", default-features = false} sp-core = { path = "../../primitives/core", default-features = false} @@ -30,6 +30,7 @@ sp-io = { path = "../../primitives/io" } default = [ "std" ] std = [ + "bitvec/std", "codec/std", "frame-benchmarking?/std", "frame-support/std", diff --git a/substrate/frame/broker/README.md b/substrate/frame/broker/README.md index 65b6179863e3..a3c4b251a452 100644 --- a/substrate/frame/broker/README.md +++ b/substrate/frame/broker/README.md @@ -4,7 +4,7 @@ Brokerage tool for managing Polkadot Core scheduling. Properly described in RFC-0001 Agile Coretime. -## Implemnentation Specifics +## Implementation Specifics ### Core Mask Bits From d787269cdfba050d5f966c377df645094a53df14 Mon Sep 17 00:00:00 2001 From: Gavin Wood Date: Sun, 17 Sep 2023 12:19:18 +0100 Subject: [PATCH 02/69] FRAME: Revamp Preimage pallet to use Consideration (#1363) Make Preimage pallet use Consideration instead of handling deposits directly. Other half of paritytech/substrate#13666. Depends/based on #1361. Script for the lazy migration that should be run manually once: [migrate-preimage-lazy.py](https://github.com/ggwpez/substrate-scripts/blob/master/migrate-preimage-lazy.py). ## TODO - [x] Migration code. --------- Signed-off-by: Oliver Tale-Yazdi Co-authored-by: Oliver Tale-Yazdi Co-authored-by: command-bot <> Co-authored-by: Francisco Aguirre Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> --- .../collectives-polkadot/src/lib.rs | 18 +- .../src/weights/pallet_preimage.rs | 15 + polkadot/runtime/kusama/src/lib.rs | 16 +- .../kusama/src/weights/pallet_preimage.rs | 15 + polkadot/runtime/polkadot/src/lib.rs | 19 +- .../polkadot/src/weights/pallet_preimage.rs | 15 + polkadot/runtime/rococo/src/lib.rs | 16 +- .../rococo/src/weights/pallet_preimage.rs | 15 + polkadot/runtime/westend/src/lib.rs | 18 +- .../westend/src/weights/pallet_preimage.rs | 15 + substrate/bin/node/runtime/src/lib.rs | 15 +- .../balances/src/tests/currency_tests.rs | 14 +- substrate/frame/democracy/src/tests.rs | 3 +- substrate/frame/preimage/src/benchmarking.rs | 77 +-- substrate/frame/preimage/src/lib.rs | 224 ++++++--- substrate/frame/preimage/src/migration.rs | 33 +- substrate/frame/preimage/src/mock.rs | 35 +- substrate/frame/preimage/src/tests.rs | 68 ++- substrate/frame/preimage/src/weights.rs | 438 +++++++++++------- substrate/frame/referenda/src/mock.rs | 3 +- substrate/frame/scheduler/src/mock.rs | 5 +- substrate/frame/support/src/dispatch.rs | 9 + substrate/frame/support/src/traits.rs | 4 +- substrate/frame/support/src/traits/storage.rs | 40 +- substrate/frame/whitelist/src/mock.rs | 3 +- 25 files changed, 787 insertions(+), 346 deletions(-) diff --git a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/lib.rs b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/lib.rs index 238db08a0c9e..d9125c2645ef 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/lib.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/lib.rs @@ -67,7 +67,10 @@ use frame_support::{ construct_runtime, dispatch::DispatchClass, parameter_types, - traits::{ConstBool, ConstU16, ConstU32, ConstU64, ConstU8, EitherOfDiverse, InstanceFilter}, + traits::{ + fungible::HoldConsideration, ConstBool, ConstU16, ConstU32, ConstU64, ConstU8, + EitherOfDiverse, InstanceFilter, LinearStoragePrice, + }, weights::{ConstantMultiplier, Weight}, PalletId, }; @@ -209,7 +212,7 @@ impl pallet_balances::Config for Runtime { type ReserveIdentifier = [u8; 8]; type RuntimeHoldReason = RuntimeHoldReason; type FreezeIdentifier = (); - type MaxHolds = ConstU32<0>; + type MaxHolds = ConstU32<1>; type MaxFreezes = ConstU32<0>; } @@ -545,6 +548,7 @@ impl pallet_scheduler::Config for Runtime { parameter_types! { pub const PreimageBaseDeposit: Balance = deposit(2, 64); pub const PreimageByteDeposit: Balance = deposit(0, 1); + pub const PreimageHoldReason: RuntimeHoldReason = RuntimeHoldReason::Preimage(pallet_preimage::HoldReason::Preimage); } impl pallet_preimage::Config for Runtime { @@ -552,8 +556,12 @@ impl pallet_preimage::Config for Runtime { type RuntimeEvent = RuntimeEvent; type Currency = Balances; type ManagerOrigin = EnsureRoot; - type BaseDeposit = PreimageBaseDeposit; - type ByteDeposit = PreimageByteDeposit; + type Consideration = HoldConsideration< + AccountId, + Balances, + PreimageHoldReason, + LinearStoragePrice, + >; } // Create the runtime by composing the FRAME pallets that were previously configured. @@ -589,7 +597,7 @@ construct_runtime!( Utility: pallet_utility::{Pallet, Call, Event} = 40, Multisig: pallet_multisig::{Pallet, Call, Storage, Event} = 41, Proxy: pallet_proxy::{Pallet, Call, Storage, Event} = 42, - Preimage: pallet_preimage::{Pallet, Call, Storage, Event} = 43, + Preimage: pallet_preimage::{Pallet, Call, Storage, Event, HoldReason} = 43, Scheduler: pallet_scheduler::{Pallet, Call, Storage, Event} = 44, // The main stage. diff --git a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_preimage.rs b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_preimage.rs index cf2f0ae39da6..e9f565d9387d 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_preimage.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_preimage.rs @@ -50,6 +50,21 @@ use core::marker::PhantomData; /// Weight functions for `pallet_preimage`. pub struct WeightInfo(PhantomData); impl pallet_preimage::WeightInfo for WeightInfo { + fn ensure_updated(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `193 + n * (91 ±0)` + // Estimated: `3593 + n * (2566 ±0)` + // Minimum execution time: 2_000_000 picoseconds. + Weight::from_parts(2_000_000, 3593) + // Standard Error: 13_720 + .saturating_add(Weight::from_parts(17_309_199, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) + .saturating_add(T::DbWeight::get().writes(1_u64)) + .saturating_add(T::DbWeight::get().writes((2_u64).saturating_mul(n.into()))) + .saturating_add(Weight::from_parts(0, 2566).saturating_mul(n.into())) + } + /// Storage: `Preimage::StatusFor` (r:1 w:1) /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) /// Storage: `Preimage::PreimageFor` (r:0 w:1) diff --git a/polkadot/runtime/kusama/src/lib.rs b/polkadot/runtime/kusama/src/lib.rs index 659a7052d2b7..fc9fd61790ea 100644 --- a/polkadot/runtime/kusama/src/lib.rs +++ b/polkadot/runtime/kusama/src/lib.rs @@ -63,8 +63,9 @@ use frame_election_provider_support::{ use frame_support::{ construct_runtime, parameter_types, traits::{ - ConstU32, Contains, EitherOf, EitherOfDiverse, InstanceFilter, KeyOwnerProofSystem, - PrivilegeCmp, ProcessMessage, ProcessMessageError, StorageMapShim, WithdrawReasons, + fungible::HoldConsideration, ConstU32, Contains, EitherOf, EitherOfDiverse, InstanceFilter, + KeyOwnerProofSystem, LinearStoragePrice, PrivilegeCmp, ProcessMessage, ProcessMessageError, + StorageMapShim, WithdrawReasons, }, weights::{ConstantMultiplier, WeightMeter}, PalletId, @@ -239,6 +240,7 @@ impl pallet_scheduler::Config for Runtime { parameter_types! { pub const PreimageBaseDeposit: Balance = deposit(2, 64); pub const PreimageByteDeposit: Balance = deposit(0, 1); + pub const PreimageHoldReason: RuntimeHoldReason = RuntimeHoldReason::Preimage(pallet_preimage::HoldReason::Preimage); } impl pallet_preimage::Config for Runtime { @@ -246,8 +248,12 @@ impl pallet_preimage::Config for Runtime { type RuntimeEvent = RuntimeEvent; type Currency = Balances; type ManagerOrigin = EnsureRoot; - type BaseDeposit = PreimageBaseDeposit; - type ByteDeposit = PreimageByteDeposit; + type Consideration = HoldConsideration< + AccountId, + Balances, + PreimageHoldReason, + LinearStoragePrice, + >; } parameter_types! { @@ -1560,7 +1566,7 @@ construct_runtime! { Multisig: pallet_multisig::{Pallet, Call, Storage, Event} = 31, // Preimage registrar. - Preimage: pallet_preimage::{Pallet, Call, Storage, Event} = 32, + Preimage: pallet_preimage::{Pallet, Call, Storage, Event, HoldReason} = 32, // Bounties modules. Bounties: pallet_bounties::{Pallet, Call, Storage, Event} = 35, diff --git a/polkadot/runtime/kusama/src/weights/pallet_preimage.rs b/polkadot/runtime/kusama/src/weights/pallet_preimage.rs index 8c04eb2cd4ea..d0bf20566609 100644 --- a/polkadot/runtime/kusama/src/weights/pallet_preimage.rs +++ b/polkadot/runtime/kusama/src/weights/pallet_preimage.rs @@ -50,6 +50,21 @@ use core::marker::PhantomData; /// Weight functions for `pallet_preimage`. pub struct WeightInfo(PhantomData); impl pallet_preimage::WeightInfo for WeightInfo { + fn ensure_updated(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `193 + n * (91 ±0)` + // Estimated: `3593 + n * (2566 ±0)` + // Minimum execution time: 2_000_000 picoseconds. + Weight::from_parts(2_000_000, 3593) + // Standard Error: 13_720 + .saturating_add(Weight::from_parts(17_309_199, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) + .saturating_add(T::DbWeight::get().writes(1_u64)) + .saturating_add(T::DbWeight::get().writes((2_u64).saturating_mul(n.into()))) + .saturating_add(Weight::from_parts(0, 2566).saturating_mul(n.into())) + } + /// Storage: Preimage StatusFor (r:1 w:1) /// Proof: Preimage StatusFor (max_values: None, max_size: Some(91), added: 2566, mode: MaxEncodedLen) /// Storage: Preimage PreimageFor (r:0 w:1) diff --git a/polkadot/runtime/polkadot/src/lib.rs b/polkadot/runtime/polkadot/src/lib.rs index 45ea561b33fa..6d06ee469333 100644 --- a/polkadot/runtime/polkadot/src/lib.rs +++ b/polkadot/runtime/polkadot/src/lib.rs @@ -47,8 +47,9 @@ use frame_election_provider_support::{ use frame_support::{ construct_runtime, parameter_types, traits::{ - ConstU32, Contains, EitherOf, EitherOfDiverse, InstanceFilter, KeyOwnerProofSystem, - PrivilegeCmp, ProcessMessage, ProcessMessageError, WithdrawReasons, + fungible::HoldConsideration, ConstU32, Contains, EitherOf, EitherOfDiverse, InstanceFilter, + KeyOwnerProofSystem, LinearStoragePrice, PrivilegeCmp, ProcessMessage, ProcessMessageError, + WithdrawReasons, }, weights::{ConstantMultiplier, WeightMeter}, PalletId, @@ -221,9 +222,9 @@ impl pallet_scheduler::Config for Runtime { } parameter_types! { - pub const PreimageMaxSize: u32 = 4096 * 1024; pub const PreimageBaseDeposit: Balance = deposit(2, 64); pub const PreimageByteDeposit: Balance = deposit(0, 1); + pub const PreimageHoldReason: RuntimeHoldReason = RuntimeHoldReason::Preimage(pallet_preimage::HoldReason::Preimage); } impl pallet_preimage::Config for Runtime { @@ -231,8 +232,12 @@ impl pallet_preimage::Config for Runtime { type RuntimeEvent = RuntimeEvent; type Currency = Balances; type ManagerOrigin = EnsureRoot; - type BaseDeposit = PreimageBaseDeposit; - type ByteDeposit = PreimageByteDeposit; + type Consideration = HoldConsideration< + AccountId, + Balances, + PreimageHoldReason, + LinearStoragePrice, + >; } parameter_types! { @@ -297,7 +302,7 @@ impl pallet_balances::Config for Runtime { type WeightInfo = weights::pallet_balances::WeightInfo; type RuntimeHoldReason = RuntimeHoldReason; type FreezeIdentifier = (); - type MaxHolds = ConstU32<0>; + type MaxHolds = ConstU32<1>; type MaxFreezes = ConstU32<0>; } @@ -1326,7 +1331,7 @@ construct_runtime! { // Basic stuff; balances is uncallable initially. System: frame_system::{Pallet, Call, Storage, Config, Event} = 0, Scheduler: pallet_scheduler::{Pallet, Call, Storage, Event} = 1, - Preimage: pallet_preimage::{Pallet, Call, Storage, Event} = 10, + Preimage: pallet_preimage::{Pallet, Call, Storage, Event, HoldReason} = 10, // Babe must be before session. Babe: pallet_babe::{Pallet, Call, Storage, Config, ValidateUnsigned} = 2, diff --git a/polkadot/runtime/polkadot/src/weights/pallet_preimage.rs b/polkadot/runtime/polkadot/src/weights/pallet_preimage.rs index 91605e072f09..a283f09eae5f 100644 --- a/polkadot/runtime/polkadot/src/weights/pallet_preimage.rs +++ b/polkadot/runtime/polkadot/src/weights/pallet_preimage.rs @@ -50,6 +50,21 @@ use core::marker::PhantomData; /// Weight functions for `pallet_preimage`. pub struct WeightInfo(PhantomData); impl pallet_preimage::WeightInfo for WeightInfo { + fn ensure_updated(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `193 + n * (91 ±0)` + // Estimated: `3593 + n * (2566 ±0)` + // Minimum execution time: 2_000_000 picoseconds. + Weight::from_parts(2_000_000, 3593) + // Standard Error: 13_720 + .saturating_add(Weight::from_parts(17_309_199, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) + .saturating_add(T::DbWeight::get().writes(1_u64)) + .saturating_add(T::DbWeight::get().writes((2_u64).saturating_mul(n.into()))) + .saturating_add(Weight::from_parts(0, 2566).saturating_mul(n.into())) + } + /// Storage: Preimage StatusFor (r:1 w:1) /// Proof: Preimage StatusFor (max_values: None, max_size: Some(91), added: 2566, mode: MaxEncodedLen) /// Storage: Preimage PreimageFor (r:0 w:1) diff --git a/polkadot/runtime/rococo/src/lib.rs b/polkadot/runtime/rococo/src/lib.rs index e043852901f1..6446ac4f00c1 100644 --- a/polkadot/runtime/rococo/src/lib.rs +++ b/polkadot/runtime/rococo/src/lib.rs @@ -60,8 +60,9 @@ use beefy_primitives::{ use frame_support::{ construct_runtime, parameter_types, traits::{ - Contains, EitherOfDiverse, InstanceFilter, KeyOwnerProofSystem, LockIdentifier, - PrivilegeCmp, ProcessMessage, ProcessMessageError, StorageMapShim, WithdrawReasons, + fungible::HoldConsideration, Contains, EitherOfDiverse, InstanceFilter, + KeyOwnerProofSystem, LinearStoragePrice, LockIdentifier, PrivilegeCmp, ProcessMessage, + ProcessMessageError, StorageMapShim, WithdrawReasons, }, weights::{ConstantMultiplier, WeightMeter}, PalletId, @@ -224,6 +225,7 @@ impl pallet_scheduler::Config for Runtime { parameter_types! { pub const PreimageBaseDeposit: Balance = deposit(2, 64); pub const PreimageByteDeposit: Balance = deposit(0, 1); + pub const PreimageHoldReason: RuntimeHoldReason = RuntimeHoldReason::Preimage(pallet_preimage::HoldReason::Preimage); } impl pallet_preimage::Config for Runtime { @@ -231,8 +233,12 @@ impl pallet_preimage::Config for Runtime { type RuntimeEvent = RuntimeEvent; type Currency = Balances; type ManagerOrigin = EnsureRoot; - type BaseDeposit = PreimageBaseDeposit; - type ByteDeposit = PreimageByteDeposit; + type Consideration = HoldConsideration< + AccountId, + Balances, + PreimageHoldReason, + LinearStoragePrice, + >; } parameter_types! { @@ -1449,7 +1455,7 @@ construct_runtime! { Multisig: pallet_multisig::{Pallet, Call, Storage, Event} = 31, // Preimage registrar. - Preimage: pallet_preimage::{Pallet, Call, Storage, Event} = 32, + Preimage: pallet_preimage::{Pallet, Call, Storage, Event, HoldReason} = 32, // Bounties modules. Bounties: pallet_bounties::{Pallet, Call, Storage, Event} = 35, diff --git a/polkadot/runtime/rococo/src/weights/pallet_preimage.rs b/polkadot/runtime/rococo/src/weights/pallet_preimage.rs index b067e6a6d91e..e051ebd5bbab 100644 --- a/polkadot/runtime/rococo/src/weights/pallet_preimage.rs +++ b/polkadot/runtime/rococo/src/weights/pallet_preimage.rs @@ -47,6 +47,21 @@ use core::marker::PhantomData; /// Weight functions for `pallet_preimage`. pub struct WeightInfo(PhantomData); impl pallet_preimage::WeightInfo for WeightInfo { + fn ensure_updated(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `193 + n * (91 ±0)` + // Estimated: `3593 + n * (2566 ±0)` + // Minimum execution time: 2_000_000 picoseconds. + Weight::from_parts(2_000_000, 3593) + // Standard Error: 13_720 + .saturating_add(Weight::from_parts(17_309_199, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) + .saturating_add(T::DbWeight::get().writes(1_u64)) + .saturating_add(T::DbWeight::get().writes((2_u64).saturating_mul(n.into()))) + .saturating_add(Weight::from_parts(0, 2566).saturating_mul(n.into())) + } + /// Storage: Preimage StatusFor (r:1 w:1) /// Proof: Preimage StatusFor (max_values: None, max_size: Some(91), added: 2566, mode: MaxEncodedLen) /// Storage: Preimage PreimageFor (r:0 w:1) diff --git a/polkadot/runtime/westend/src/lib.rs b/polkadot/runtime/westend/src/lib.rs index 7dfc781d2467..9a7e7c4548c8 100644 --- a/polkadot/runtime/westend/src/lib.rs +++ b/polkadot/runtime/westend/src/lib.rs @@ -29,8 +29,8 @@ use frame_election_provider_support::{bounds::ElectionBoundsBuilder, onchain, Se use frame_support::{ construct_runtime, parameter_types, traits::{ - ConstU32, InstanceFilter, KeyOwnerProofSystem, ProcessMessage, ProcessMessageError, - WithdrawReasons, + fungible::HoldConsideration, ConstU32, InstanceFilter, KeyOwnerProofSystem, + LinearStoragePrice, ProcessMessage, ProcessMessageError, WithdrawReasons, }, weights::{ConstantMultiplier, WeightMeter}, PalletId, @@ -193,9 +193,9 @@ impl pallet_scheduler::Config for Runtime { } parameter_types! { - pub const PreimageMaxSize: u32 = 4096 * 1024; pub const PreimageBaseDeposit: Balance = deposit(2, 64); pub const PreimageByteDeposit: Balance = deposit(0, 1); + pub const PreimageHoldReason: RuntimeHoldReason = RuntimeHoldReason::Preimage(pallet_preimage::HoldReason::Preimage); } impl pallet_preimage::Config for Runtime { @@ -203,8 +203,12 @@ impl pallet_preimage::Config for Runtime { type RuntimeEvent = RuntimeEvent; type Currency = Balances; type ManagerOrigin = EnsureRoot; - type BaseDeposit = PreimageBaseDeposit; - type ByteDeposit = PreimageByteDeposit; + type Consideration = HoldConsideration< + AccountId, + Balances, + PreimageHoldReason, + LinearStoragePrice, + >; } parameter_types! { @@ -268,7 +272,7 @@ impl pallet_balances::Config for Runtime { type WeightInfo = weights::pallet_balances::WeightInfo; type RuntimeHoldReason = RuntimeHoldReason; type FreezeIdentifier = (); - type MaxHolds = ConstU32<0>; + type MaxHolds = ConstU32<1>; type MaxFreezes = ConstU32<0>; } @@ -1311,7 +1315,7 @@ construct_runtime! { Scheduler: pallet_scheduler::{Pallet, Call, Storage, Event} = 20, // Preimage registrar. - Preimage: pallet_preimage::{Pallet, Call, Storage, Event} = 28, + Preimage: pallet_preimage::{Pallet, Call, Storage, Event, HoldReason} = 28, // Sudo. Sudo: pallet_sudo::{Pallet, Call, Storage, Event, Config} = 21, diff --git a/polkadot/runtime/westend/src/weights/pallet_preimage.rs b/polkadot/runtime/westend/src/weights/pallet_preimage.rs index 39d3626b189f..0c4677a7d969 100644 --- a/polkadot/runtime/westend/src/weights/pallet_preimage.rs +++ b/polkadot/runtime/westend/src/weights/pallet_preimage.rs @@ -50,6 +50,21 @@ use core::marker::PhantomData; /// Weight functions for `pallet_preimage`. pub struct WeightInfo(PhantomData); impl pallet_preimage::WeightInfo for WeightInfo { + fn ensure_updated(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `193 + n * (91 ±0)` + // Estimated: `3593 + n * (2566 ±0)` + // Minimum execution time: 2_000_000 picoseconds. + Weight::from_parts(2_000_000, 3593) + // Standard Error: 13_720 + .saturating_add(Weight::from_parts(17_309_199, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) + .saturating_add(T::DbWeight::get().writes(1_u64)) + .saturating_add(T::DbWeight::get().writes((2_u64).saturating_mul(n.into()))) + .saturating_add(Weight::from_parts(0, 2566).saturating_mul(n.into())) + } + /// Storage: Preimage StatusFor (r:1 w:1) /// Proof: Preimage StatusFor (max_values: None, max_size: Some(91), added: 2566, mode: MaxEncodedLen) /// Storage: Preimage PreimageFor (r:0 w:1) diff --git a/substrate/bin/node/runtime/src/lib.rs b/substrate/bin/node/runtime/src/lib.rs index 4f34e4ecd812..a97f2b09118c 100644 --- a/substrate/bin/node/runtime/src/lib.rs +++ b/substrate/bin/node/runtime/src/lib.rs @@ -35,11 +35,12 @@ use frame_support::{ pallet_prelude::Get, parameter_types, traits::{ - fungible::{Balanced, Credit, ItemOf}, + fungible::{Balanced, Credit, HoldConsideration, ItemOf}, tokens::{nonfungibles_v2::Inspect, GetSalary, PayFromAccount}, AsEnsureOriginWithArg, ConstBool, ConstU128, ConstU16, ConstU32, Contains, Currency, EitherOfDiverse, EqualPrivilegeOnly, Imbalance, InsideBoth, InstanceFilter, - KeyOwnerProofSystem, LockIdentifier, Nothing, OnUnbalanced, WithdrawReasons, + KeyOwnerProofSystem, LinearStoragePrice, LockIdentifier, Nothing, OnUnbalanced, + WithdrawReasons, }, weights::{ constants::{ @@ -447,10 +448,10 @@ impl pallet_glutton::Config for Runtime { } parameter_types! { - pub const PreimageMaxSize: u32 = 4096 * 1024; pub const PreimageBaseDeposit: Balance = 1 * DOLLARS; // One cent: $10,000 / MB pub const PreimageByteDeposit: Balance = 1 * CENTS; + pub const PreimageHoldReason: RuntimeHoldReason = RuntimeHoldReason::Preimage(pallet_preimage::HoldReason::Preimage); } impl pallet_preimage::Config for Runtime { @@ -458,8 +459,12 @@ impl pallet_preimage::Config for Runtime { type RuntimeEvent = RuntimeEvent; type Currency = Balances; type ManagerOrigin = EnsureRoot; - type BaseDeposit = PreimageBaseDeposit; - type ByteDeposit = PreimageByteDeposit; + type Consideration = HoldConsideration< + AccountId, + Balances, + PreimageHoldReason, + LinearStoragePrice, + >; } parameter_types! { diff --git a/substrate/frame/balances/src/tests/currency_tests.rs b/substrate/frame/balances/src/tests/currency_tests.rs index 969731b49a23..2449638788dd 100644 --- a/substrate/frame/balances/src/tests/currency_tests.rs +++ b/substrate/frame/balances/src/tests/currency_tests.rs @@ -22,7 +22,7 @@ use crate::NegativeImbalance; use frame_support::traits::{ BalanceStatus::{Free, Reserved}, Currency, - ExistenceRequirement::{self, AllowDeath}, + ExistenceRequirement::{self, AllowDeath, KeepAlive}, Hooks, LockIdentifier, LockableCurrency, NamedReservableCurrency, ReservableCurrency, WithdrawReasons, }; @@ -33,6 +33,18 @@ const ID_2: LockIdentifier = *b"2 "; pub const CALL: &::RuntimeCall = &RuntimeCall::Balances(crate::Call::transfer_allow_death { dest: 0, value: 0 }); +#[test] +fn ed_should_work() { + ExtBuilder::default().existential_deposit(1).build_and_execute_with(|| { + assert_ok!(Balances::force_set_balance(RuntimeOrigin::root(), 1, 1000)); + assert_noop!( + >::transfer(&1, &10, 1000, KeepAlive), + TokenError::NotExpendable + ); + assert_ok!(>::transfer(&1, &10, 1000, AllowDeath)); + }); +} + #[test] fn set_lock_with_amount_zero_removes_lock() { ExtBuilder::default() diff --git a/substrate/frame/democracy/src/tests.rs b/substrate/frame/democracy/src/tests.rs index e5cfcc5b4002..9b885fb5915b 100644 --- a/substrate/frame/democracy/src/tests.rs +++ b/substrate/frame/democracy/src/tests.rs @@ -111,8 +111,7 @@ impl pallet_preimage::Config for Test { type WeightInfo = (); type Currency = Balances; type ManagerOrigin = EnsureRoot; - type BaseDeposit = ConstU64<0>; - type ByteDeposit = ConstU64<0>; + type Consideration = (); } impl pallet_scheduler::Config for Test { diff --git a/substrate/frame/preimage/src/benchmarking.rs b/substrate/frame/preimage/src/benchmarking.rs index b719c28be641..d7c643ff9042 100644 --- a/substrate/frame/preimage/src/benchmarking.rs +++ b/substrate/frame/preimage/src/benchmarking.rs @@ -18,7 +18,7 @@ //! Preimage pallet benchmarking. use super::*; -use frame_benchmarking::v1::{account, benchmarks, whitelist_account, BenchmarkError}; +use frame_benchmarking::v1::{account, benchmarks, whitelisted_caller, BenchmarkError}; use frame_support::assert_ok; use frame_system::RawOrigin; use sp_runtime::traits::Bounded; @@ -26,11 +26,9 @@ use sp_std::{prelude::*, vec}; use crate::Pallet as Preimage; -const SEED: u32 = 0; - -fn funded_account(name: &'static str, index: u32) -> T::AccountId { - let caller: T::AccountId = account(name, index, SEED); - T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); +fn funded_account() -> T::AccountId { + let caller: T::AccountId = whitelisted_caller(); + T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value() / 2u32.into()); caller } @@ -49,8 +47,7 @@ benchmarks! { // Expensive note - will reserve. note_preimage { let s in 0 .. MAX_SIZE; - let caller = funded_account::("caller", 0); - whitelist_account!(caller); + let caller = funded_account::(); let (preimage, hash) = sized_preimage_and_hash::(s); }: _(RawOrigin::Signed(caller), preimage) verify { @@ -59,8 +56,7 @@ benchmarks! { // Cheap note - will not reserve since it was requested. note_requested_preimage { let s in 0 .. MAX_SIZE; - let caller = funded_account::("caller", 0); - whitelist_account!(caller); + let caller = funded_account::(); let (preimage, hash) = sized_preimage_and_hash::(s); assert_ok!(Preimage::::request_preimage( T::ManagerOrigin::try_successful_origin() @@ -89,8 +85,7 @@ benchmarks! { // Expensive unnote - will unreserve. unnote_preimage { - let caller = funded_account::("caller", 0); - whitelist_account!(caller); + let caller = funded_account::(); let (preimage, hash) = preimage_and_hash::(); assert_ok!(Preimage::::note_preimage(RawOrigin::Signed(caller.clone()).into(), preimage)); }: _(RawOrigin::Signed(caller), hash) @@ -115,16 +110,15 @@ benchmarks! { // Expensive request - will unreserve the noter's deposit. request_preimage { let (preimage, hash) = preimage_and_hash::(); - let noter = funded_account::("noter", 0); - whitelist_account!(noter); + let noter = funded_account::(); assert_ok!(Preimage::::note_preimage(RawOrigin::Signed(noter.clone()).into(), preimage)); }: _( T::ManagerOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?, hash ) verify { - let deposit = T::BaseDeposit::get() + T::ByteDeposit::get() * MAX_SIZE.into(); - let s = RequestStatus::Requested { deposit: Some((noter, deposit)), count: 1, len: Some(MAX_SIZE) }; - assert_eq!(StatusFor::::get(&hash), Some(s)); + let ticket = TicketOf::::new(¬er, Footprint { count: 1, size: MAX_SIZE as u64 }).unwrap(); + let s = RequestStatus::Requested { maybe_ticket: Some((noter, ticket)), count: 1, maybe_len: Some(MAX_SIZE) }; + assert_eq!(RequestStatusFor::::get(&hash), Some(s)); } // Cheap request - would unreserve the deposit but none was held. request_no_deposit_preimage { @@ -138,8 +132,8 @@ benchmarks! { T::ManagerOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?, hash ) verify { - let s = RequestStatus::Requested { deposit: None, count: 2, len: Some(MAX_SIZE) }; - assert_eq!(StatusFor::::get(&hash), Some(s)); + let s = RequestStatus::Requested { maybe_ticket: None, count: 2, maybe_len: Some(MAX_SIZE) }; + assert_eq!(RequestStatusFor::::get(&hash), Some(s)); } // Cheap request - the preimage is not yet noted, so deposit to unreserve. request_unnoted_preimage { @@ -148,8 +142,8 @@ benchmarks! { T::ManagerOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?, hash ) verify { - let s = RequestStatus::Requested { deposit: None, count: 1, len: None }; - assert_eq!(StatusFor::::get(&hash), Some(s)); + let s = RequestStatus::Requested { maybe_ticket: None, count: 1, maybe_len: None }; + assert_eq!(RequestStatusFor::::get(&hash), Some(s)); } // Cheap request - the preimage is already requested, so just a counter bump. request_requested_preimage { @@ -163,8 +157,8 @@ benchmarks! { T::ManagerOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?, hash ) verify { - let s = RequestStatus::Requested { deposit: None, count: 2, len: None }; - assert_eq!(StatusFor::::get(&hash), Some(s)); + let s = RequestStatus::Requested { maybe_ticket: None, count: 2, maybe_len: None }; + assert_eq!(RequestStatusFor::::get(&hash), Some(s)); } // Expensive unrequest - last reference and it's noted, so will destroy the preimage. @@ -184,7 +178,7 @@ benchmarks! { T::ManagerOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?, hash ) verify { - assert_eq!(StatusFor::::get(&hash), None); + assert_eq!(RequestStatusFor::::get(&hash), None); } // Cheap unrequest - last reference, but it's not noted. unrequest_unnoted_preimage { @@ -198,7 +192,7 @@ benchmarks! { T::ManagerOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?, hash ) verify { - assert_eq!(StatusFor::::get(&hash), None); + assert_eq!(RequestStatusFor::::get(&hash), None); } // Cheap unrequest - not the last reference. unrequest_multi_referenced_preimage { @@ -217,9 +211,38 @@ benchmarks! { T::ManagerOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?, hash ) verify { - let s = RequestStatus::Requested { deposit: None, count: 1, len: None }; - assert_eq!(StatusFor::::get(&hash), Some(s)); + let s = RequestStatus::Requested { maybe_ticket: None, count: 1, maybe_len: None }; + assert_eq!(RequestStatusFor::::get(&hash), Some(s)); + } + + ensure_updated { + let n in 0..MAX_HASH_UPGRADE_BULK_COUNT; + + let caller = funded_account::(); + let hashes = (0..n).map(|i| insert_old_unrequested::(i)).collect::>(); + }: _(RawOrigin::Signed(caller), hashes) + verify { + assert_eq!(RequestStatusFor::::iter_keys().count(), n as usize); + #[allow(deprecated)] + let c = StatusFor::::iter_keys().count(); + assert_eq!(c, 0); } impl_benchmark_test_suite!(Preimage, crate::mock::new_test_ext(), crate::mock::Test); } + +fn insert_old_unrequested(s: u32) -> ::Hash { + let acc = account("old", s, 0); + T::Currency::make_free_balance_be(&acc, BalanceOf::::max_value() / 2u32.into()); + + // The preimage size does not matter here as it is not touched. + let preimage = s.to_le_bytes(); + let hash = ::Hashing::hash(&preimage[..]); + + #[allow(deprecated)] + StatusFor::::insert( + &hash, + OldRequestStatus::Unrequested { deposit: (acc, 123u32.into()), len: preimage.len() as u32 }, + ); + hash +} diff --git a/substrate/frame/preimage/src/lib.rs b/substrate/frame/preimage/src/lib.rs index 5ab1e7643b2e..e603f1a32e6a 100644 --- a/substrate/frame/preimage/src/lib.rs +++ b/substrate/frame/preimage/src/lib.rs @@ -37,7 +37,10 @@ mod mock; mod tests; pub mod weights; -use sp_runtime::traits::{BadOrigin, Hash, Saturating}; +use sp_runtime::{ + traits::{BadOrigin, Hash, Saturating}, + Perbill, +}; use sp_std::{borrow::Cow, prelude::*}; use codec::{Decode, Encode, MaxEncodedLen}; @@ -46,8 +49,8 @@ use frame_support::{ ensure, pallet_prelude::Get, traits::{ - Currency, Defensive, FetchResult, Hash as PreimageHash, PreimageProvider, - PreimageRecipient, QueryPreimage, ReservableCurrency, StorePreimage, + Consideration, Currency, Defensive, FetchResult, Footprint, Hash as PreimageHash, + PreimageProvider, PreimageRecipient, QueryPreimage, ReservableCurrency, StorePreimage, }, BoundedSlice, BoundedVec, }; @@ -61,7 +64,7 @@ pub use pallet::*; /// A type to note whether a preimage is owned by a user or the system. #[derive(Clone, Eq, PartialEq, Encode, Decode, TypeInfo, MaxEncodedLen, RuntimeDebug)] -pub enum RequestStatus { +pub enum OldRequestStatus { /// The associated preimage has not yet been requested by the system. The given deposit (if /// some) is being held until either it becomes requested or the user retracts the preimage. Unrequested { deposit: (AccountId, Balance), len: u32 }, @@ -71,13 +74,31 @@ pub enum RequestStatus { Requested { deposit: Option<(AccountId, Balance)>, count: u32, len: Option }, } +/// A type to note whether a preimage is owned by a user or the system. +#[derive(Clone, Eq, PartialEq, Encode, Decode, TypeInfo, MaxEncodedLen, RuntimeDebug)] +pub enum RequestStatus { + /// The associated preimage has not yet been requested by the system. The given deposit (if + /// some) is being held until either it becomes requested or the user retracts the preimage. + Unrequested { ticket: (AccountId, Ticket), len: u32 }, + /// There are a non-zero number of outstanding requests for this hash by this chain. If there + /// is a preimage registered, then `len` is `Some` and it may be removed iff this counter + /// becomes zero. + Requested { maybe_ticket: Option<(AccountId, Ticket)>, count: u32, maybe_len: Option }, +} + type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; +type TicketOf = ::Consideration; /// Maximum size of preimage we can store is 4mb. const MAX_SIZE: u32 = 4 * 1024 * 1024; +/// Hard-limit on the number of hashes that can be passed to `ensure_updated`. +/// +/// Exists only for benchmarking purposes. +pub const MAX_HASH_UPGRADE_BULK_COUNT: u32 = 1024; #[frame_support::pallet] +#[allow(deprecated)] pub mod pallet { use super::*; @@ -93,17 +114,15 @@ pub mod pallet { type WeightInfo: weights::WeightInfo; /// Currency type for this pallet. + // TODO#1569: Remove. type Currency: ReservableCurrency; /// An origin that can request a preimage be placed on-chain without a deposit or fee, or /// manage existing preimages. type ManagerOrigin: EnsureOrigin; - /// The base deposit for placing a preimage on chain. - type BaseDeposit: Get>; - - /// The per-byte deposit for placing a preimage on chain. - type ByteDeposit: Get>; + /// A means of providing some cost while data is stored on-chain. + type Consideration: Consideration; } #[pallet::pallet] @@ -135,18 +154,33 @@ pub mod pallet { Requested, /// The preimage request cannot be removed since no outstanding requests exist. NotRequested, + /// More than `MAX_HASH_UPGRADE_BULK_COUNT` hashes were requested to be upgraded at once. + TooMany, + } + + /// A reason for this pallet placing a hold on funds. + #[pallet::composite_enum] + pub enum HoldReason { + /// The funds are held as storage deposit for a preimage. + Preimage, } /// The request status of a given hash. + #[deprecated = "RequestStatusFor"] #[pallet::storage] pub(super) type StatusFor = - StorageMap<_, Identity, T::Hash, RequestStatus>>; + StorageMap<_, Identity, T::Hash, OldRequestStatus>>; + + /// The request status of a given hash. + #[pallet::storage] + pub(super) type RequestStatusFor = + StorageMap<_, Identity, T::Hash, RequestStatus>>; #[pallet::storage] pub(super) type PreimageFor = StorageMap<_, Identity, (T::Hash, u32), BoundedVec>>; - #[pallet::call] + #[pallet::call(weight = T::WeightInfo)] impl Pallet { /// Register a preimage on-chain. /// @@ -173,7 +207,6 @@ pub mod pallet { /// - `hash`: The hash of the preimage to be removed from the store. /// - `len`: The length of the preimage of `hash`. #[pallet::call_index(1)] - #[pallet::weight(T::WeightInfo::unnote_preimage())] pub fn unnote_preimage(origin: OriginFor, hash: T::Hash) -> DispatchResult { let maybe_sender = Self::ensure_signed_or_manager(origin)?; Self::do_unnote_preimage(&hash, maybe_sender) @@ -184,7 +217,6 @@ pub mod pallet { /// If the preimage requests has already been provided on-chain, we unreserve any deposit /// a user may have paid, and take the control of the preimage out of their hands. #[pallet::call_index(2)] - #[pallet::weight(T::WeightInfo::request_preimage())] pub fn request_preimage(origin: OriginFor, hash: T::Hash) -> DispatchResult { T::ManagerOrigin::ensure_origin(origin)?; Self::do_request_preimage(&hash); @@ -195,15 +227,80 @@ pub mod pallet { /// /// NOTE: THIS MUST NOT BE CALLED ON `hash` MORE TIMES THAN `request_preimage`. #[pallet::call_index(3)] - #[pallet::weight(T::WeightInfo::unrequest_preimage())] pub fn unrequest_preimage(origin: OriginFor, hash: T::Hash) -> DispatchResult { T::ManagerOrigin::ensure_origin(origin)?; Self::do_unrequest_preimage(&hash) } + + /// Ensure that the a bulk of pre-images is upgraded. + /// + /// The caller pays no fee if at least 90% of pre-images were successfully updated. + #[pallet::call_index(4)] + #[pallet::weight(T::WeightInfo::ensure_updated(hashes.len() as u32))] + pub fn ensure_updated( + origin: OriginFor, + hashes: Vec, + ) -> DispatchResultWithPostInfo { + ensure_signed(origin)?; + ensure!(hashes.len() <= MAX_HASH_UPGRADE_BULK_COUNT as usize, Error::::TooMany); + + let updated = hashes.iter().map(Self::do_ensure_updated).filter(|b| *b).count() as u32; + let ratio = Perbill::from_rational(updated, hashes.len() as u32); + + let pays: Pays = (ratio < Perbill::from_percent(90)).into(); + Ok(pays.into()) + } } } impl Pallet { + fn do_ensure_updated(h: &T::Hash) -> bool { + #[allow(deprecated)] + let r = match StatusFor::::take(h) { + Some(r) => r, + None => return false, + }; + let n = match r { + OldRequestStatus::Unrequested { deposit: (who, amount), len } => { + // unreserve deposit + T::Currency::unreserve(&who, amount); + // take consideration + let Ok(ticket) = + T::Consideration::new(&who, Footprint::from_parts(1, len as usize)) + .defensive_proof("Unexpected inability to take deposit after unreserved") + else { + return true + }; + RequestStatus::Unrequested { ticket: (who, ticket), len } + }, + OldRequestStatus::Requested { deposit: maybe_deposit, count, len: maybe_len } => { + let maybe_ticket = if let Some((who, deposit)) = maybe_deposit { + // unreserve deposit + T::Currency::unreserve(&who, deposit); + // take consideration + if let Some(len) = maybe_len { + let Ok(ticket) = + T::Consideration::new(&who, Footprint::from_parts(1, len as usize)) + .defensive_proof( + "Unexpected inability to take deposit after unreserved", + ) + else { + return true + }; + Some((who, ticket)) + } else { + None + } + } else { + None + }; + RequestStatus::Requested { maybe_ticket, count, maybe_len } + }, + }; + RequestStatusFor::::insert(h, n); + true + } + /// Ensure that the origin is either the `ManagerOrigin` or a signed origin. fn ensure_signed_or_manager( origin: T::RuntimeOrigin, @@ -230,26 +327,29 @@ impl Pallet { let len = preimage.len() as u32; ensure!(len <= MAX_SIZE, Error::::TooBig); + Self::do_ensure_updated(&hash); // We take a deposit only if there is a provided depositor and the preimage was not // previously requested. This also allows the tx to pay no fee. - let status = match (StatusFor::::get(hash), maybe_depositor) { - (Some(RequestStatus::Requested { count, deposit, .. }), _) => - RequestStatus::Requested { count, deposit, len: Some(len) }, + let status = match (RequestStatusFor::::get(hash), maybe_depositor) { + (Some(RequestStatus::Requested { maybe_ticket, count, .. }), _) => + RequestStatus::Requested { maybe_ticket, count, maybe_len: Some(len) }, (Some(RequestStatus::Unrequested { .. }), Some(_)) => return Err(Error::::AlreadyNoted.into()), - (Some(RequestStatus::Unrequested { len, deposit }), None) => - RequestStatus::Requested { deposit: Some(deposit), count: 1, len: Some(len) }, - (None, None) => RequestStatus::Requested { count: 1, len: Some(len), deposit: None }, + (Some(RequestStatus::Unrequested { ticket, len }), None) => RequestStatus::Requested { + maybe_ticket: Some(ticket), + count: 1, + maybe_len: Some(len), + }, + (None, None) => + RequestStatus::Requested { maybe_ticket: None, count: 1, maybe_len: Some(len) }, (None, Some(depositor)) => { - let length = preimage.len() as u32; - let deposit = T::BaseDeposit::get() - .saturating_add(T::ByteDeposit::get().saturating_mul(length.into())); - T::Currency::reserve(depositor, deposit)?; - RequestStatus::Unrequested { deposit: (depositor.clone(), deposit), len } + let ticket = + T::Consideration::new(depositor, Footprint::from_parts(1, len as usize))?; + RequestStatus::Unrequested { ticket: (depositor.clone(), ticket), len } }, }; let was_requested = matches!(status, RequestStatus::Requested { .. }); - StatusFor::::insert(hash, status); + RequestStatusFor::::insert(hash, status); let _ = Self::insert(&hash, preimage) .defensive_proof("Unable to insert. Logic error in `note_bytes`?"); @@ -264,15 +364,19 @@ impl Pallet { // If the preimage already exists before the request is made, the deposit for the preimage is // returned to the user, and removed from their management. fn do_request_preimage(hash: &T::Hash) { - let (count, len, deposit) = - StatusFor::::get(hash).map_or((1, None, None), |x| match x { - RequestStatus::Requested { mut count, len, deposit } => { + Self::do_ensure_updated(&hash); + let (count, maybe_len, maybe_ticket) = + RequestStatusFor::::get(hash).map_or((1, None, None), |x| match x { + RequestStatus::Requested { maybe_ticket, mut count, maybe_len } => { count.saturating_inc(); - (count, len, deposit) + (count, maybe_len, maybe_ticket) }, - RequestStatus::Unrequested { deposit, len } => (1, Some(len), Some(deposit)), + RequestStatus::Unrequested { ticket, len } => (1, Some(len), Some(ticket)), }); - StatusFor::::insert(hash, RequestStatus::Requested { count, len, deposit }); + RequestStatusFor::::insert( + hash, + RequestStatus::Requested { maybe_ticket, count, maybe_len }, + ); if count == 1 { Self::deposit_event(Event::Requested { hash: *hash }); } @@ -288,24 +392,25 @@ impl Pallet { hash: &T::Hash, maybe_check_owner: Option, ) -> DispatchResult { - match StatusFor::::get(hash).ok_or(Error::::NotNoted)? { - RequestStatus::Requested { deposit: Some((owner, deposit)), count, len } => { + Self::do_ensure_updated(&hash); + match RequestStatusFor::::get(hash).ok_or(Error::::NotNoted)? { + RequestStatus::Requested { maybe_ticket: Some((owner, ticket)), count, maybe_len } => { ensure!(maybe_check_owner.map_or(true, |c| c == owner), Error::::NotAuthorized); - T::Currency::unreserve(&owner, deposit); - StatusFor::::insert( + let _ = ticket.drop(&owner); + RequestStatusFor::::insert( hash, - RequestStatus::Requested { deposit: None, count, len }, + RequestStatus::Requested { maybe_ticket: None, count, maybe_len }, ); Ok(()) }, - RequestStatus::Requested { deposit: None, .. } => { + RequestStatus::Requested { maybe_ticket: None, .. } => { ensure!(maybe_check_owner.is_none(), Error::::NotAuthorized); Self::do_unrequest_preimage(hash) }, - RequestStatus::Unrequested { deposit: (owner, deposit), len } => { + RequestStatus::Unrequested { ticket: (owner, ticket), len } => { ensure!(maybe_check_owner.map_or(true, |c| c == owner), Error::::NotAuthorized); - T::Currency::unreserve(&owner, deposit); - StatusFor::::remove(hash); + let _ = ticket.drop(&owner); + RequestStatusFor::::remove(hash); Self::remove(hash, len); Self::deposit_event(Event::Cleared { hash: *hash }); @@ -316,25 +421,32 @@ impl Pallet { /// Clear a preimage request. fn do_unrequest_preimage(hash: &T::Hash) -> DispatchResult { - match StatusFor::::get(hash).ok_or(Error::::NotRequested)? { - RequestStatus::Requested { mut count, len, deposit } if count > 1 => { + Self::do_ensure_updated(&hash); + match RequestStatusFor::::get(hash).ok_or(Error::::NotRequested)? { + RequestStatus::Requested { mut count, maybe_len, maybe_ticket } if count > 1 => { count.saturating_dec(); - StatusFor::::insert(hash, RequestStatus::Requested { count, len, deposit }); + RequestStatusFor::::insert( + hash, + RequestStatus::Requested { maybe_ticket, count, maybe_len }, + ); }, - RequestStatus::Requested { count, len, deposit } => { + RequestStatus::Requested { count, maybe_len, maybe_ticket } => { debug_assert!(count == 1, "preimage request counter at zero?"); - match (len, deposit) { + match (maybe_len, maybe_ticket) { // Preimage was never noted. - (None, _) => StatusFor::::remove(hash), + (None, _) => RequestStatusFor::::remove(hash), // Preimage was noted without owner - just remove it. (Some(len), None) => { Self::remove(hash, len); - StatusFor::::remove(hash); + RequestStatusFor::::remove(hash); Self::deposit_event(Event::Cleared { hash: *hash }); }, // Preimage was noted with owner - move to unrequested so they can get refund. - (Some(len), Some(deposit)) => { - StatusFor::::insert(hash, RequestStatus::Unrequested { deposit, len }); + (Some(len), Some(ticket)) => { + RequestStatusFor::::insert( + hash, + RequestStatus::Unrequested { ticket, len }, + ); }, } }, @@ -359,8 +471,10 @@ impl Pallet { fn len(hash: &T::Hash) -> Option { use RequestStatus::*; - match StatusFor::::get(hash) { - Some(Requested { len: Some(len), .. }) | Some(Unrequested { len, .. }) => Some(len), + Self::do_ensure_updated(&hash); + match RequestStatusFor::::get(hash) { + Some(Requested { maybe_len: Some(len), .. }) | Some(Unrequested { len, .. }) => + Some(len), _ => None, } } @@ -380,7 +494,8 @@ impl PreimageProvider for Pallet { } fn preimage_requested(hash: &T::Hash) -> bool { - matches!(StatusFor::::get(hash), Some(RequestStatus::Requested { .. })) + Self::do_ensure_updated(hash); + matches!(RequestStatusFor::::get(hash), Some(RequestStatus::Requested { .. })) } fn get_preimage(hash: &T::Hash) -> Option> { @@ -423,7 +538,8 @@ impl> QueryPreimage for Pallet { } fn is_requested(hash: &T::Hash) -> bool { - matches!(StatusFor::::get(hash), Some(RequestStatus::Requested { .. })) + Self::do_ensure_updated(&hash); + matches!(RequestStatusFor::::get(hash), Some(RequestStatus::Requested { .. })) } fn request(hash: &T::Hash) { diff --git a/substrate/frame/preimage/src/migration.rs b/substrate/frame/preimage/src/migration.rs index 0f3337e39bf5..821cb01bbaae 100644 --- a/substrate/frame/preimage/src/migration.rs +++ b/substrate/frame/preimage/src/migration.rs @@ -37,7 +37,7 @@ mod v0 { use super::*; #[derive(Clone, Eq, PartialEq, Encode, Decode, TypeInfo, MaxEncodedLen, RuntimeDebug)] - pub enum RequestStatus { + pub enum OldRequestStatus { Unrequested(Option<(AccountId, Balance)>), Requested(u32), } @@ -55,7 +55,7 @@ mod v0 { Pallet, Identity, ::Hash, - RequestStatus<::AccountId, BalanceOf>, + OldRequestStatus<::AccountId, BalanceOf>, >; /// Returns the number of images or `None` if the storage is corrupted. @@ -127,21 +127,22 @@ pub mod v1 { } let status = match status { - v0::RequestStatus::Unrequested(deposit) => match deposit { - Some(deposit) => RequestStatus::Unrequested { deposit, len }, + v0::OldRequestStatus::Unrequested(deposit) => match deposit { + Some(deposit) => OldRequestStatus::Unrequested { deposit, len }, // `None` depositor becomes system-requested. None => - RequestStatus::Requested { deposit: None, count: 1, len: Some(len) }, + OldRequestStatus::Requested { deposit: None, count: 1, len: Some(len) }, }, - v0::RequestStatus::Requested(count) if count == 0 => { + v0::OldRequestStatus::Requested(count) if count == 0 => { log::error!(target: TARGET, "preimage has counter of zero: {:?}", hash); continue }, - v0::RequestStatus::Requested(count) => - RequestStatus::Requested { deposit: None, count, len: Some(len) }, + v0::OldRequestStatus::Requested(count) => + OldRequestStatus::Requested { deposit: None, count, len: Some(len) }, }; log::trace!(target: TARGET, "Moving preimage {:?} with len {}", hash, len); + #[allow(deprecated)] crate::StatusFor::::insert(hash, status); crate::PreimageFor::::insert(&(hash, len), preimage); @@ -176,6 +177,7 @@ pub mod v1 { pub fn image_count() -> Option { // Use iter_values() to ensure that the values are decodable. let images = crate::PreimageFor::::iter_values().count() as u32; + #[allow(deprecated)] let status = crate::StatusFor::::iter_values().count() as u32; if images == status { @@ -189,6 +191,7 @@ pub mod v1 { #[cfg(test)] #[cfg(feature = "try-runtime")] mod test { + #![allow(deprecated)] use super::*; use crate::mock::{Test as T, *}; @@ -203,19 +206,19 @@ mod test { // Case 1: Unrequested without deposit let (p, h) = preimage::(128); v0::PreimageFor::::insert(h, p); - v0::StatusFor::::insert(h, v0::RequestStatus::Unrequested(None)); + v0::StatusFor::::insert(h, v0::OldRequestStatus::Unrequested(None)); // Case 2: Unrequested with deposit let (p, h) = preimage::(1024); v0::PreimageFor::::insert(h, p); - v0::StatusFor::::insert(h, v0::RequestStatus::Unrequested(Some((1, 1)))); + v0::StatusFor::::insert(h, v0::OldRequestStatus::Unrequested(Some((1, 1)))); // Case 3: Requested by 0 (invalid) let (p, h) = preimage::(8192); v0::PreimageFor::::insert(h, p); - v0::StatusFor::::insert(h, v0::RequestStatus::Requested(0)); + v0::StatusFor::::insert(h, v0::OldRequestStatus::Requested(0)); // Case 4: Requested by 10 let (p, h) = preimage::(65536); v0::PreimageFor::::insert(h, p); - v0::StatusFor::::insert(h, v0::RequestStatus::Requested(10)); + v0::StatusFor::::insert(h, v0::OldRequestStatus::Requested(10)); assert_eq!(v0::image_count::(), Some(4)); assert_eq!(v1::image_count::(), None, "V1 storage should be corrupted"); @@ -234,14 +237,14 @@ mod test { assert_eq!(crate::PreimageFor::::get(&(h, 128)), Some(p)); assert_eq!( crate::StatusFor::::get(h), - Some(RequestStatus::Requested { deposit: None, count: 1, len: Some(128) }) + Some(OldRequestStatus::Requested { deposit: None, count: 1, len: Some(128) }) ); // Case 2: Unrequested with deposit becomes unrequested let (p, h) = preimage::(1024); assert_eq!(crate::PreimageFor::::get(&(h, 1024)), Some(p)); assert_eq!( crate::StatusFor::::get(h), - Some(RequestStatus::Unrequested { deposit: (1, 1), len: 1024 }) + Some(OldRequestStatus::Unrequested { deposit: (1, 1), len: 1024 }) ); // Case 3: Requested by 0 should be skipped let (_, h) = preimage::(8192); @@ -252,7 +255,7 @@ mod test { assert_eq!(crate::PreimageFor::::get(&(h, 65536)), Some(p)); assert_eq!( crate::StatusFor::::get(h), - Some(RequestStatus::Requested { deposit: None, count: 10, len: Some(65536) }) + Some(OldRequestStatus::Requested { deposit: None, count: 10, len: Some(65536) }) ); }); } diff --git a/substrate/frame/preimage/src/mock.rs b/substrate/frame/preimage/src/mock.rs index 2fb9f36dec45..2e7051a99a49 100644 --- a/substrate/frame/preimage/src/mock.rs +++ b/substrate/frame/preimage/src/mock.rs @@ -22,13 +22,13 @@ use super::*; use crate as pallet_preimage; use frame_support::{ ord_parameter_types, - traits::{ConstU32, ConstU64, Everything}, + traits::{fungible::HoldConsideration, ConstU32, ConstU64, Everything}, weights::constants::RocksDbWeight, }; use frame_system::EnsureSignedBy; use sp_core::H256; use sp_runtime::{ - traits::{BlakeTwo256, IdentityLookup}, + traits::{BlakeTwo256, Convert, IdentityLookup}, BuildStorage, }; @@ -80,22 +80,28 @@ impl pallet_balances::Config for Test { type MaxReserves = ConstU32<50>; type ReserveIdentifier = [u8; 8]; type FreezeIdentifier = (); - type MaxFreezes = (); + type MaxFreezes = ConstU32<1>; type RuntimeHoldReason = (); - type MaxHolds = (); + type MaxHolds = ConstU32<2>; } ord_parameter_types! { pub const One: u64 = 1; } +pub struct ConvertDeposit; +impl Convert for ConvertDeposit { + fn convert(a: Footprint) -> u64 { + a.count * 2 + a.size + } +} + impl Config for Test { type WeightInfo = (); type RuntimeEvent = RuntimeEvent; type Currency = Balances; type ManagerOrigin = EnsureSignedBy; - type BaseDeposit = ConstU64<2>; - type ByteDeposit = ConstU64<1>; + type Consideration = HoldConsideration; } pub fn new_test_ext() -> sp_io::TestExternalities { @@ -110,3 +116,20 @@ pub fn new_test_ext() -> sp_io::TestExternalities { pub fn hashed(data: impl AsRef<[u8]>) -> H256 { BlakeTwo256::hash(data.as_ref()) } + +/// Insert an un-migrated preimage. +pub fn insert_old_unrequested( + s: u32, + acc: T::AccountId, +) -> ::Hash { + // The preimage size does not matter here as it is not touched. + let preimage = s.to_le_bytes(); + let hash = ::Hashing::hash(&preimage[..]); + + #[allow(deprecated)] + StatusFor::::insert( + &hash, + OldRequestStatus::Unrequested { deposit: (acc, 123u32.into()), len: preimage.len() as u32 }, + ); + hash +} diff --git a/substrate/frame/preimage/src/tests.rs b/substrate/frame/preimage/src/tests.rs index fa621c8f5589..a473a0ae8e25 100644 --- a/substrate/frame/preimage/src/tests.rs +++ b/substrate/frame/preimage/src/tests.rs @@ -24,12 +24,11 @@ use crate::mock::*; use frame_support::{ assert_err, assert_noop, assert_ok, assert_storage_noop, - traits::{Bounded, BoundedInline, Hash as PreimageHash}, + traits::{fungible::InspectHold, Bounded, BoundedInline, Hash as PreimageHash}, StorageNoopGuard, }; -use pallet_balances::Error as BalancesError; use sp_core::{blake2_256, H256}; -use sp_runtime::bounded_vec; +use sp_runtime::{bounded_vec, TokenError}; /// Returns one `Inline`, `Lookup` and `Legacy` item each with different data and hash. pub fn make_bounded_values() -> (Bounded>, Bounded>, Bounded>) { @@ -52,7 +51,7 @@ pub fn make_bounded_values() -> (Bounded>, Bounded>, Bounded::AlreadyNoted + Error::::AlreadyNoted, ); assert_noop!( Preimage::note_preimage(RuntimeOrigin::signed(0), vec![2]), - BalancesError::::InsufficientBalance + TokenError::FundsUnavailable, ); }); } @@ -171,7 +170,7 @@ fn request_note_order_makes_no_difference() { assert_ok!(Preimage::request_preimage(RuntimeOrigin::signed(1), hashed([1]))); assert_ok!(Preimage::note_preimage(RuntimeOrigin::signed(2), vec![1])); ( - StatusFor::::iter().collect::>(), + RequestStatusFor::::iter().collect::>(), PreimageFor::::iter().collect::>(), ) }); @@ -180,7 +179,7 @@ fn request_note_order_makes_no_difference() { assert_ok!(Preimage::request_preimage(RuntimeOrigin::signed(1), hashed([1]))); assert_ok!(Preimage::unnote_preimage(RuntimeOrigin::signed(2), hashed([1]))); let other_way = ( - StatusFor::::iter().collect::>(), + RequestStatusFor::::iter().collect::>(), PreimageFor::::iter().collect::>(), ); assert_eq!(one_way, other_way); @@ -207,7 +206,7 @@ fn request_user_note_order_makes_no_difference() { assert_ok!(Preimage::request_preimage(RuntimeOrigin::signed(1), hashed([1]))); assert_ok!(Preimage::note_preimage(RuntimeOrigin::signed(2), vec![1])); ( - StatusFor::::iter().collect::>(), + RequestStatusFor::::iter().collect::>(), PreimageFor::::iter().collect::>(), ) }); @@ -216,7 +215,7 @@ fn request_user_note_order_makes_no_difference() { assert_ok!(Preimage::request_preimage(RuntimeOrigin::signed(1), hashed([1]))); assert_ok!(Preimage::unnote_preimage(RuntimeOrigin::signed(2), hashed([1]))); let other_way = ( - StatusFor::::iter().collect::>(), + RequestStatusFor::::iter().collect::>(), PreimageFor::::iter().collect::>(), ); assert_eq!(one_way, other_way); @@ -249,13 +248,14 @@ fn unrequest_preimage_works() { fn user_noted_then_requested_preimage_is_refunded_once_only() { new_test_ext().execute_with(|| { assert_ok!(Preimage::note_preimage(RuntimeOrigin::signed(2), vec![1; 3])); + assert_eq!(Balances::balance_on_hold(&(), &2), 5); assert_ok!(Preimage::note_preimage(RuntimeOrigin::signed(2), vec![1])); + assert_eq!(Balances::balance_on_hold(&(), &2), 8); assert_ok!(Preimage::request_preimage(RuntimeOrigin::signed(1), hashed([1]))); assert_ok!(Preimage::unrequest_preimage(RuntimeOrigin::signed(1), hashed([1]))); assert_ok!(Preimage::unnote_preimage(RuntimeOrigin::signed(2), hashed([1]))); - // Still have reserve from `vec[1; 3]`. - assert_eq!(Balances::reserved_balance(2), 5); - assert_eq!(Balances::free_balance(2), 95); + // Still have hold from `vec[1; 3]`. + assert_eq!(Balances::balance_on_hold(&(), &2), 5); }); } @@ -270,7 +270,7 @@ fn noted_preimage_use_correct_map() { assert_eq!(PreimageFor::::iter().count(), 8); // All are present - assert_eq!(StatusFor::::iter().count(), 8); + assert_eq!(RequestStatusFor::::iter().count(), 8); // Now start removing them again... for i in 0..7 { @@ -287,7 +287,7 @@ fn noted_preimage_use_correct_map() { assert_eq!(PreimageFor::::iter().count(), 0); // All are gone - assert_eq!(StatusFor::::iter().count(), 0); + assert_eq!(RequestStatusFor::::iter().count(), 0); }); } @@ -327,20 +327,20 @@ fn query_and_store_preimage_workflow() { Preimage::request(&hash); // It is requested thrice. assert!(matches!( - StatusFor::::get(&hash).unwrap(), + RequestStatusFor::::get(&hash).unwrap(), RequestStatus::Requested { count: 3, .. } )); // It can be realized and decoded correctly. assert_eq!(Preimage::realize::>(&bound).unwrap(), (data.clone(), Some(len))); assert!(matches!( - StatusFor::::get(&hash).unwrap(), + RequestStatusFor::::get(&hash).unwrap(), RequestStatus::Requested { count: 2, .. } )); // Dropping should unrequest. Preimage::drop(&bound); assert!(matches!( - StatusFor::::get(&hash).unwrap(), + RequestStatusFor::::get(&hash).unwrap(), RequestStatus::Requested { count: 1, .. } )); @@ -381,7 +381,7 @@ fn query_preimage_request_works() { assert!(::len(&hash).is_none()); assert_noop!(::fetch(&hash, None), DispatchError::Unavailable); // But there is only one entry in the map. - assert_eq!(StatusFor::::iter().count(), 1); + assert_eq!(RequestStatusFor::::iter().count(), 1); // Un-request the preimage. ::unrequest(&hash); @@ -392,7 +392,7 @@ fn query_preimage_request_works() { // It is not requested anymore. assert!(!::is_requested(&hash)); // And there is no entry in the map. - assert_eq!(StatusFor::::iter().count(), 0); + assert_eq!(RequestStatusFor::::iter().count(), 0); }); } @@ -413,7 +413,7 @@ fn query_preimage_hold_and_drop_work() { assert!(::is_requested(&legacy.hash())); // There are two values requested in total. - assert_eq!(StatusFor::::iter().count(), 2); + assert_eq!(RequestStatusFor::::iter().count(), 2); // Cleanup by dropping both. ::drop(&lookup); @@ -422,7 +422,7 @@ fn query_preimage_hold_and_drop_work() { assert!(!::is_requested(&legacy.hash())); // There are no values requested anymore. - assert_eq!(StatusFor::::iter().count(), 0); + assert_eq!(RequestStatusFor::::iter().count(), 0); }); } @@ -489,3 +489,27 @@ fn store_preimage_bound_too_large_errors() { assert_ok!(::bound(data.clone())); }); } + +#[test] +fn ensure_updated_works() { + #![allow(deprecated)] + new_test_ext().execute_with(|| { + let alice = 2; + + for i in 0..100 { + let hashes = + (0..100).map(|j| insert_old_unrequested::(j, alice)).collect::>(); + let old = hashes.iter().take(i).cloned().collect::>(); + let bad = vec![hashed([0; 32]); 100 - i]; + + let hashes = [old.as_slice(), bad.as_slice()].concat(); + let res = Preimage::ensure_updated(RuntimeOrigin::signed(alice), hashes).unwrap(); + + // Alice pays a fee when less than 90% of the hashes are new. + assert_eq!(res.pays_fee, (i < 90).into()); + + assert_eq!(RequestStatusFor::::iter().count(), i); + assert_eq!(StatusFor::::iter().count(), 100 - i); + } + }); +} diff --git a/substrate/frame/preimage/src/weights.rs b/substrate/frame/preimage/src/weights.rs index 41e58a102780..c11ab74c1e55 100644 --- a/substrate/frame/preimage/src/weights.rs +++ b/substrate/frame/preimage/src/weights.rs @@ -15,32 +15,29 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Autogenerated weights for pallet_preimage +//! Autogenerated weights for `pallet_preimage` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-06-16, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-09-06, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-e8ezs4ez-project-145-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-mia4uyug-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// ./target/production/substrate +// target/production/substrate-node // benchmark // pallet -// --chain=dev // --steps=50 // --repeat=20 -// --pallet=pallet_preimage -// --no-storage-info -// --no-median-slopes -// --no-min-squares // --extrinsic=* -// --execution=wasm // --wasm-execution=compiled // --heap-pages=4096 -// --output=./frame/preimage/src/weights.rs -// --header=./HEADER-APACHE2 -// --template=./.maintain/frame-weight-template.hbs +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_preimage +// --chain=dev +// --header=./substrate/HEADER-APACHE2 +// --output=./substrate/frame/preimage/src/weights.rs +// --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -50,7 +47,7 @@ use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; use core::marker::PhantomData; -/// Weight functions needed for pallet_preimage. +/// Weight functions needed for `pallet_preimage`. pub trait WeightInfo { fn note_preimage(s: u32, ) -> Weight; fn note_requested_preimage(s: u32, ) -> Weight; @@ -64,319 +61,410 @@ pub trait WeightInfo { fn unrequest_preimage() -> Weight; fn unrequest_unnoted_preimage() -> Weight; fn unrequest_multi_referenced_preimage() -> Weight; + fn ensure_updated(n: u32, ) -> Weight; } -/// Weights for pallet_preimage using the Substrate node and recommended hardware. +/// Weights for `pallet_preimage` using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { - /// Storage: Preimage StatusFor (r:1 w:1) - /// Proof: Preimage StatusFor (max_values: None, max_size: Some(91), added: 2566, mode: MaxEncodedLen) - /// Storage: Preimage PreimageFor (r:0 w:1) - /// Proof: Preimage PreimageFor (max_values: None, max_size: Some(4194344), added: 4196819, mode: MaxEncodedLen) + /// Storage: `Preimage::StatusFor` (r:1 w:0) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(75), added: 2550, mode: `MaxEncodedLen`) + /// Storage: `Preimage::PreimageFor` (r:0 w:1) + /// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `MaxEncodedLen`) /// The range of component `s` is `[0, 4194304]`. fn note_preimage(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `143` + // Measured: `42` // Estimated: `3556` - // Minimum execution time: 30_479_000 picoseconds. - Weight::from_parts(23_381_775, 3556) - // Standard Error: 2 - .saturating_add(Weight::from_parts(1_670, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(1_u64)) + // Minimum execution time: 15_936_000 picoseconds. + Weight::from_parts(16_271_000, 3556) + // Standard Error: 1 + .saturating_add(Weight::from_parts(1_916, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: Preimage StatusFor (r:1 w:1) - /// Proof: Preimage StatusFor (max_values: None, max_size: Some(91), added: 2566, mode: MaxEncodedLen) - /// Storage: Preimage PreimageFor (r:0 w:1) - /// Proof: Preimage PreimageFor (max_values: None, max_size: Some(4194344), added: 4196819, mode: MaxEncodedLen) + /// Storage: `Preimage::StatusFor` (r:1 w:0) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(75), added: 2550, mode: `MaxEncodedLen`) + /// Storage: `Preimage::PreimageFor` (r:0 w:1) + /// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `MaxEncodedLen`) /// The range of component `s` is `[0, 4194304]`. fn note_requested_preimage(s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `106` // Estimated: `3556` - // Minimum execution time: 16_104_000 picoseconds. - Weight::from_parts(18_393_879, 3556) + // Minimum execution time: 16_468_000 picoseconds. + Weight::from_parts(17_031_000, 3556) // Standard Error: 2 - .saturating_add(Weight::from_parts(1_669, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(Weight::from_parts(1_948, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: Preimage StatusFor (r:1 w:1) - /// Proof: Preimage StatusFor (max_values: None, max_size: Some(91), added: 2566, mode: MaxEncodedLen) - /// Storage: Preimage PreimageFor (r:0 w:1) - /// Proof: Preimage PreimageFor (max_values: None, max_size: Some(4194344), added: 4196819, mode: MaxEncodedLen) + /// Storage: `Preimage::StatusFor` (r:1 w:0) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(75), added: 2550, mode: `MaxEncodedLen`) + /// Storage: `Preimage::PreimageFor` (r:0 w:1) + /// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `MaxEncodedLen`) /// The range of component `s` is `[0, 4194304]`. fn note_no_deposit_preimage(s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `106` // Estimated: `3556` - // Minimum execution time: 15_652_000 picoseconds. - Weight::from_parts(22_031_627, 3556) - // Standard Error: 2 - .saturating_add(Weight::from_parts(1_672, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(1_u64)) + // Minimum execution time: 16_342_000 picoseconds. + Weight::from_parts(16_535_000, 3556) + // Standard Error: 1 + .saturating_add(Weight::from_parts(1_906, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: Preimage StatusFor (r:1 w:1) - /// Proof: Preimage StatusFor (max_values: None, max_size: Some(91), added: 2566, mode: MaxEncodedLen) - /// Storage: Preimage PreimageFor (r:0 w:1) - /// Proof: Preimage PreimageFor (max_values: None, max_size: Some(4194344), added: 4196819, mode: MaxEncodedLen) + /// Storage: `Preimage::StatusFor` (r:1 w:0) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(75), added: 2550, mode: `MaxEncodedLen`) + /// Storage: `Preimage::PreimageFor` (r:0 w:1) + /// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `MaxEncodedLen`) fn unnote_preimage() -> Weight { // Proof Size summary in bytes: - // Measured: `289` + // Measured: `172` // Estimated: `3556` - // Minimum execution time: 37_148_000 picoseconds. - Weight::from_parts(40_247_000, 3556) - .saturating_add(T::DbWeight::get().reads(1_u64)) + // Minimum execution time: 31_047_000 picoseconds. + Weight::from_parts(34_099_000, 3556) + .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: Preimage StatusFor (r:1 w:1) - /// Proof: Preimage StatusFor (max_values: None, max_size: Some(91), added: 2566, mode: MaxEncodedLen) - /// Storage: Preimage PreimageFor (r:0 w:1) - /// Proof: Preimage PreimageFor (max_values: None, max_size: Some(4194344), added: 4196819, mode: MaxEncodedLen) + /// Storage: `Preimage::StatusFor` (r:1 w:0) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(75), added: 2550, mode: `MaxEncodedLen`) + /// Storage: `Preimage::PreimageFor` (r:0 w:1) + /// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `MaxEncodedLen`) fn unnote_no_deposit_preimage() -> Weight { // Proof Size summary in bytes: // Measured: `144` // Estimated: `3556` - // Minimum execution time: 19_909_000 picoseconds. - Weight::from_parts(21_572_000, 3556) - .saturating_add(T::DbWeight::get().reads(1_u64)) + // Minimum execution time: 32_559_000 picoseconds. + Weight::from_parts(36_677_000, 3556) + .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: Preimage StatusFor (r:1 w:1) - /// Proof: Preimage StatusFor (max_values: None, max_size: Some(91), added: 2566, mode: MaxEncodedLen) + /// Storage: `Preimage::StatusFor` (r:1 w:0) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(75), added: 2550, mode: `MaxEncodedLen`) fn request_preimage() -> Weight { // Proof Size summary in bytes: - // Measured: `188` + // Measured: `172` // Estimated: `3556` - // Minimum execution time: 17_602_000 picoseconds. - Weight::from_parts(18_899_000, 3556) - .saturating_add(T::DbWeight::get().reads(1_u64)) + // Minimum execution time: 27_887_000 picoseconds. + Weight::from_parts(30_303_000, 3556) + .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Preimage StatusFor (r:1 w:1) - /// Proof: Preimage StatusFor (max_values: None, max_size: Some(91), added: 2566, mode: MaxEncodedLen) + /// Storage: `Preimage::StatusFor` (r:1 w:0) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(75), added: 2550, mode: `MaxEncodedLen`) fn request_no_deposit_preimage() -> Weight { // Proof Size summary in bytes: // Measured: `144` // Estimated: `3556` - // Minimum execution time: 11_253_000 picoseconds. - Weight::from_parts(11_667_000, 3556) - .saturating_add(T::DbWeight::get().reads(1_u64)) + // Minimum execution time: 17_256_000 picoseconds. + Weight::from_parts(19_481_000, 3556) + .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Preimage StatusFor (r:1 w:1) - /// Proof: Preimage StatusFor (max_values: None, max_size: Some(91), added: 2566, mode: MaxEncodedLen) + /// Storage: `Preimage::StatusFor` (r:1 w:0) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(75), added: 2550, mode: `MaxEncodedLen`) fn request_unnoted_preimage() -> Weight { // Proof Size summary in bytes: // Measured: `42` // Estimated: `3556` - // Minimum execution time: 14_152_000 picoseconds. - Weight::from_parts(14_652_000, 3556) - .saturating_add(T::DbWeight::get().reads(1_u64)) + // Minimum execution time: 22_344_000 picoseconds. + Weight::from_parts(23_868_000, 3556) + .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Preimage StatusFor (r:1 w:1) - /// Proof: Preimage StatusFor (max_values: None, max_size: Some(91), added: 2566, mode: MaxEncodedLen) + /// Storage: `Preimage::StatusFor` (r:1 w:0) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(75), added: 2550, mode: `MaxEncodedLen`) fn request_requested_preimage() -> Weight { // Proof Size summary in bytes: // Measured: `106` // Estimated: `3556` - // Minimum execution time: 8_267_000 picoseconds. - Weight::from_parts(8_969_000, 3556) - .saturating_add(T::DbWeight::get().reads(1_u64)) + // Minimum execution time: 10_542_000 picoseconds. + Weight::from_parts(11_571_000, 3556) + .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Preimage StatusFor (r:1 w:1) - /// Proof: Preimage StatusFor (max_values: None, max_size: Some(91), added: 2566, mode: MaxEncodedLen) - /// Storage: Preimage PreimageFor (r:0 w:1) - /// Proof: Preimage PreimageFor (max_values: None, max_size: Some(4194344), added: 4196819, mode: MaxEncodedLen) + /// Storage: `Preimage::StatusFor` (r:1 w:0) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(75), added: 2550, mode: `MaxEncodedLen`) + /// Storage: `Preimage::PreimageFor` (r:0 w:1) + /// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `MaxEncodedLen`) fn unrequest_preimage() -> Weight { // Proof Size summary in bytes: // Measured: `144` // Estimated: `3556` - // Minimum execution time: 18_429_000 picoseconds. - Weight::from_parts(18_946_000, 3556) - .saturating_add(T::DbWeight::get().reads(1_u64)) + // Minimum execution time: 29_054_000 picoseconds. + Weight::from_parts(32_996_000, 3556) + .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: Preimage StatusFor (r:1 w:1) - /// Proof: Preimage StatusFor (max_values: None, max_size: Some(91), added: 2566, mode: MaxEncodedLen) + /// Storage: `Preimage::StatusFor` (r:1 w:0) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(75), added: 2550, mode: `MaxEncodedLen`) fn unrequest_unnoted_preimage() -> Weight { // Proof Size summary in bytes: // Measured: `106` // Estimated: `3556` - // Minimum execution time: 7_910_000 picoseconds. - Weight::from_parts(8_272_000, 3556) - .saturating_add(T::DbWeight::get().reads(1_u64)) + // Minimum execution time: 10_775_000 picoseconds. + Weight::from_parts(11_937_000, 3556) + .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Preimage StatusFor (r:1 w:1) - /// Proof: Preimage StatusFor (max_values: None, max_size: Some(91), added: 2566, mode: MaxEncodedLen) + /// Storage: `Preimage::StatusFor` (r:1 w:0) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(75), added: 2550, mode: `MaxEncodedLen`) fn unrequest_multi_referenced_preimage() -> Weight { // Proof Size summary in bytes: // Measured: `106` // Estimated: `3556` - // Minimum execution time: 7_936_000 picoseconds. - Weight::from_parts(8_504_000, 3556) + // Minimum execution time: 10_696_000 picoseconds. + Weight::from_parts(11_717_000, 3556) + .saturating_add(T::DbWeight::get().reads(2_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } + /// Storage: `Preimage::StatusFor` (r:1024 w:1024) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:0 w:1024) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(75), added: 2550, mode: `MaxEncodedLen`) + /// The range of component `n` is `[0, 1024]`. + fn ensure_updated(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `193 + n * (91 ±0)` + // Estimated: `3593 + n * (2566 ±0)` + // Minimum execution time: 2_452_000 picoseconds. + Weight::from_parts(2_641_000, 3593) + // Standard Error: 19_797 + .saturating_add(Weight::from_parts(15_620_946, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(T::DbWeight::get().writes(1_u64)) + .saturating_add(T::DbWeight::get().writes((2_u64).saturating_mul(n.into()))) + .saturating_add(Weight::from_parts(0, 2566).saturating_mul(n.into())) } } -// For backwards compatibility and tests +// For backwards compatibility and tests. impl WeightInfo for () { - /// Storage: Preimage StatusFor (r:1 w:1) - /// Proof: Preimage StatusFor (max_values: None, max_size: Some(91), added: 2566, mode: MaxEncodedLen) - /// Storage: Preimage PreimageFor (r:0 w:1) - /// Proof: Preimage PreimageFor (max_values: None, max_size: Some(4194344), added: 4196819, mode: MaxEncodedLen) + /// Storage: `Preimage::StatusFor` (r:1 w:0) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(75), added: 2550, mode: `MaxEncodedLen`) + /// Storage: `Preimage::PreimageFor` (r:0 w:1) + /// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `MaxEncodedLen`) /// The range of component `s` is `[0, 4194304]`. fn note_preimage(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `143` + // Measured: `42` // Estimated: `3556` - // Minimum execution time: 30_479_000 picoseconds. - Weight::from_parts(23_381_775, 3556) - // Standard Error: 2 - .saturating_add(Weight::from_parts(1_670, 0).saturating_mul(s.into())) - .saturating_add(RocksDbWeight::get().reads(1_u64)) + // Minimum execution time: 15_936_000 picoseconds. + Weight::from_parts(16_271_000, 3556) + // Standard Error: 1 + .saturating_add(Weight::from_parts(1_916, 0).saturating_mul(s.into())) + .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: Preimage StatusFor (r:1 w:1) - /// Proof: Preimage StatusFor (max_values: None, max_size: Some(91), added: 2566, mode: MaxEncodedLen) - /// Storage: Preimage PreimageFor (r:0 w:1) - /// Proof: Preimage PreimageFor (max_values: None, max_size: Some(4194344), added: 4196819, mode: MaxEncodedLen) + /// Storage: `Preimage::StatusFor` (r:1 w:0) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(75), added: 2550, mode: `MaxEncodedLen`) + /// Storage: `Preimage::PreimageFor` (r:0 w:1) + /// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `MaxEncodedLen`) /// The range of component `s` is `[0, 4194304]`. fn note_requested_preimage(s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `106` // Estimated: `3556` - // Minimum execution time: 16_104_000 picoseconds. - Weight::from_parts(18_393_879, 3556) + // Minimum execution time: 16_468_000 picoseconds. + Weight::from_parts(17_031_000, 3556) // Standard Error: 2 - .saturating_add(Weight::from_parts(1_669, 0).saturating_mul(s.into())) - .saturating_add(RocksDbWeight::get().reads(1_u64)) + .saturating_add(Weight::from_parts(1_948, 0).saturating_mul(s.into())) + .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: Preimage StatusFor (r:1 w:1) - /// Proof: Preimage StatusFor (max_values: None, max_size: Some(91), added: 2566, mode: MaxEncodedLen) - /// Storage: Preimage PreimageFor (r:0 w:1) - /// Proof: Preimage PreimageFor (max_values: None, max_size: Some(4194344), added: 4196819, mode: MaxEncodedLen) + /// Storage: `Preimage::StatusFor` (r:1 w:0) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(75), added: 2550, mode: `MaxEncodedLen`) + /// Storage: `Preimage::PreimageFor` (r:0 w:1) + /// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `MaxEncodedLen`) /// The range of component `s` is `[0, 4194304]`. fn note_no_deposit_preimage(s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `106` // Estimated: `3556` - // Minimum execution time: 15_652_000 picoseconds. - Weight::from_parts(22_031_627, 3556) - // Standard Error: 2 - .saturating_add(Weight::from_parts(1_672, 0).saturating_mul(s.into())) - .saturating_add(RocksDbWeight::get().reads(1_u64)) + // Minimum execution time: 16_342_000 picoseconds. + Weight::from_parts(16_535_000, 3556) + // Standard Error: 1 + .saturating_add(Weight::from_parts(1_906, 0).saturating_mul(s.into())) + .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: Preimage StatusFor (r:1 w:1) - /// Proof: Preimage StatusFor (max_values: None, max_size: Some(91), added: 2566, mode: MaxEncodedLen) - /// Storage: Preimage PreimageFor (r:0 w:1) - /// Proof: Preimage PreimageFor (max_values: None, max_size: Some(4194344), added: 4196819, mode: MaxEncodedLen) + /// Storage: `Preimage::StatusFor` (r:1 w:0) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(75), added: 2550, mode: `MaxEncodedLen`) + /// Storage: `Preimage::PreimageFor` (r:0 w:1) + /// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `MaxEncodedLen`) fn unnote_preimage() -> Weight { // Proof Size summary in bytes: - // Measured: `289` + // Measured: `172` // Estimated: `3556` - // Minimum execution time: 37_148_000 picoseconds. - Weight::from_parts(40_247_000, 3556) - .saturating_add(RocksDbWeight::get().reads(1_u64)) + // Minimum execution time: 31_047_000 picoseconds. + Weight::from_parts(34_099_000, 3556) + .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: Preimage StatusFor (r:1 w:1) - /// Proof: Preimage StatusFor (max_values: None, max_size: Some(91), added: 2566, mode: MaxEncodedLen) - /// Storage: Preimage PreimageFor (r:0 w:1) - /// Proof: Preimage PreimageFor (max_values: None, max_size: Some(4194344), added: 4196819, mode: MaxEncodedLen) + /// Storage: `Preimage::StatusFor` (r:1 w:0) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(75), added: 2550, mode: `MaxEncodedLen`) + /// Storage: `Preimage::PreimageFor` (r:0 w:1) + /// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `MaxEncodedLen`) fn unnote_no_deposit_preimage() -> Weight { // Proof Size summary in bytes: // Measured: `144` // Estimated: `3556` - // Minimum execution time: 19_909_000 picoseconds. - Weight::from_parts(21_572_000, 3556) - .saturating_add(RocksDbWeight::get().reads(1_u64)) + // Minimum execution time: 32_559_000 picoseconds. + Weight::from_parts(36_677_000, 3556) + .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: Preimage StatusFor (r:1 w:1) - /// Proof: Preimage StatusFor (max_values: None, max_size: Some(91), added: 2566, mode: MaxEncodedLen) + /// Storage: `Preimage::StatusFor` (r:1 w:0) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(75), added: 2550, mode: `MaxEncodedLen`) fn request_preimage() -> Weight { // Proof Size summary in bytes: - // Measured: `188` + // Measured: `172` // Estimated: `3556` - // Minimum execution time: 17_602_000 picoseconds. - Weight::from_parts(18_899_000, 3556) - .saturating_add(RocksDbWeight::get().reads(1_u64)) + // Minimum execution time: 27_887_000 picoseconds. + Weight::from_parts(30_303_000, 3556) + .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Preimage StatusFor (r:1 w:1) - /// Proof: Preimage StatusFor (max_values: None, max_size: Some(91), added: 2566, mode: MaxEncodedLen) + /// Storage: `Preimage::StatusFor` (r:1 w:0) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(75), added: 2550, mode: `MaxEncodedLen`) fn request_no_deposit_preimage() -> Weight { // Proof Size summary in bytes: // Measured: `144` // Estimated: `3556` - // Minimum execution time: 11_253_000 picoseconds. - Weight::from_parts(11_667_000, 3556) - .saturating_add(RocksDbWeight::get().reads(1_u64)) + // Minimum execution time: 17_256_000 picoseconds. + Weight::from_parts(19_481_000, 3556) + .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Preimage StatusFor (r:1 w:1) - /// Proof: Preimage StatusFor (max_values: None, max_size: Some(91), added: 2566, mode: MaxEncodedLen) + /// Storage: `Preimage::StatusFor` (r:1 w:0) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(75), added: 2550, mode: `MaxEncodedLen`) fn request_unnoted_preimage() -> Weight { // Proof Size summary in bytes: // Measured: `42` // Estimated: `3556` - // Minimum execution time: 14_152_000 picoseconds. - Weight::from_parts(14_652_000, 3556) - .saturating_add(RocksDbWeight::get().reads(1_u64)) + // Minimum execution time: 22_344_000 picoseconds. + Weight::from_parts(23_868_000, 3556) + .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Preimage StatusFor (r:1 w:1) - /// Proof: Preimage StatusFor (max_values: None, max_size: Some(91), added: 2566, mode: MaxEncodedLen) + /// Storage: `Preimage::StatusFor` (r:1 w:0) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(75), added: 2550, mode: `MaxEncodedLen`) fn request_requested_preimage() -> Weight { // Proof Size summary in bytes: // Measured: `106` // Estimated: `3556` - // Minimum execution time: 8_267_000 picoseconds. - Weight::from_parts(8_969_000, 3556) - .saturating_add(RocksDbWeight::get().reads(1_u64)) + // Minimum execution time: 10_542_000 picoseconds. + Weight::from_parts(11_571_000, 3556) + .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Preimage StatusFor (r:1 w:1) - /// Proof: Preimage StatusFor (max_values: None, max_size: Some(91), added: 2566, mode: MaxEncodedLen) - /// Storage: Preimage PreimageFor (r:0 w:1) - /// Proof: Preimage PreimageFor (max_values: None, max_size: Some(4194344), added: 4196819, mode: MaxEncodedLen) + /// Storage: `Preimage::StatusFor` (r:1 w:0) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(75), added: 2550, mode: `MaxEncodedLen`) + /// Storage: `Preimage::PreimageFor` (r:0 w:1) + /// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `MaxEncodedLen`) fn unrequest_preimage() -> Weight { // Proof Size summary in bytes: // Measured: `144` // Estimated: `3556` - // Minimum execution time: 18_429_000 picoseconds. - Weight::from_parts(18_946_000, 3556) - .saturating_add(RocksDbWeight::get().reads(1_u64)) + // Minimum execution time: 29_054_000 picoseconds. + Weight::from_parts(32_996_000, 3556) + .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: Preimage StatusFor (r:1 w:1) - /// Proof: Preimage StatusFor (max_values: None, max_size: Some(91), added: 2566, mode: MaxEncodedLen) + /// Storage: `Preimage::StatusFor` (r:1 w:0) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(75), added: 2550, mode: `MaxEncodedLen`) fn unrequest_unnoted_preimage() -> Weight { // Proof Size summary in bytes: // Measured: `106` // Estimated: `3556` - // Minimum execution time: 7_910_000 picoseconds. - Weight::from_parts(8_272_000, 3556) - .saturating_add(RocksDbWeight::get().reads(1_u64)) + // Minimum execution time: 10_775_000 picoseconds. + Weight::from_parts(11_937_000, 3556) + .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Preimage StatusFor (r:1 w:1) - /// Proof: Preimage StatusFor (max_values: None, max_size: Some(91), added: 2566, mode: MaxEncodedLen) + /// Storage: `Preimage::StatusFor` (r:1 w:0) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(75), added: 2550, mode: `MaxEncodedLen`) fn unrequest_multi_referenced_preimage() -> Weight { // Proof Size summary in bytes: // Measured: `106` // Estimated: `3556` - // Minimum execution time: 7_936_000 picoseconds. - Weight::from_parts(8_504_000, 3556) + // Minimum execution time: 10_696_000 picoseconds. + Weight::from_parts(11_717_000, 3556) + .saturating_add(RocksDbWeight::get().reads(2_u64)) + .saturating_add(RocksDbWeight::get().writes(1_u64)) + } + /// Storage: `Preimage::StatusFor` (r:1024 w:1024) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:0 w:1024) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(75), added: 2550, mode: `MaxEncodedLen`) + /// The range of component `n` is `[0, 1024]`. + fn ensure_updated(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `193 + n * (91 ±0)` + // Estimated: `3593 + n * (2566 ±0)` + // Minimum execution time: 2_452_000 picoseconds. + Weight::from_parts(2_641_000, 3593) + // Standard Error: 19_797 + .saturating_add(Weight::from_parts(15_620_946, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) + .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(RocksDbWeight::get().writes(1_u64)) + .saturating_add(RocksDbWeight::get().writes((2_u64).saturating_mul(n.into()))) + .saturating_add(Weight::from_parts(0, 2566).saturating_mul(n.into())) } } diff --git a/substrate/frame/referenda/src/mock.rs b/substrate/frame/referenda/src/mock.rs index e44167ed561c..c23fa4609d43 100644 --- a/substrate/frame/referenda/src/mock.rs +++ b/substrate/frame/referenda/src/mock.rs @@ -89,8 +89,7 @@ impl pallet_preimage::Config for Test { type WeightInfo = (); type Currency = Balances; type ManagerOrigin = EnsureRoot; - type BaseDeposit = (); - type ByteDeposit = (); + type Consideration = (); } impl pallet_scheduler::Config for Test { type RuntimeEvent = RuntimeEvent; diff --git a/substrate/frame/scheduler/src/mock.rs b/substrate/frame/scheduler/src/mock.rs index 28e334958d92..b6eb1d044fa2 100644 --- a/substrate/frame/scheduler/src/mock.rs +++ b/substrate/frame/scheduler/src/mock.rs @@ -100,7 +100,7 @@ frame_support::construct_runtime!( System: frame_system::{Pallet, Call, Config, Storage, Event}, Logger: logger::{Pallet, Call, Event}, Scheduler: scheduler::{Pallet, Call, Storage, Event}, - Preimage: pallet_preimage::{Pallet, Call, Storage, Event}, + Preimage: pallet_preimage::{Pallet, Call, Storage, Event, HoldReason}, } ); @@ -155,8 +155,7 @@ impl pallet_preimage::Config for Test { type WeightInfo = (); type Currency = (); type ManagerOrigin = EnsureRoot; - type BaseDeposit = (); - type ByteDeposit = (); + type Consideration = (); } pub struct TestWeightInfo; diff --git a/substrate/frame/support/src/dispatch.rs b/substrate/frame/support/src/dispatch.rs index eb1fc524200b..75e94827fea8 100644 --- a/substrate/frame/support/src/dispatch.rs +++ b/substrate/frame/support/src/dispatch.rs @@ -138,6 +138,15 @@ impl From for PostDispatchInfo { } } +impl From for Pays { + fn from(b: bool) -> Self { + match b { + true => Self::Yes, + false => Self::No, + } + } +} + /// A generalized group of dispatch types. /// /// NOTE whenever upgrading the enum make sure to also update diff --git a/substrate/frame/support/src/traits.rs b/substrate/frame/support/src/traits.rs index 40e348b5e373..10ae5d83b5ac 100644 --- a/substrate/frame/support/src/traits.rs +++ b/substrate/frame/support/src/traits.rs @@ -90,8 +90,8 @@ pub use hooks::{ pub mod schedule; mod storage; pub use storage::{ - Consideration, Footprint, Incrementable, Instance, PartialStorageInfoTrait, StorageInfo, - StorageInfoTrait, StorageInstance, TrackedStorageKey, WhitelistedStorageKeys, + Consideration, Footprint, Incrementable, Instance, LinearStoragePrice, PartialStorageInfoTrait, + StorageInfo, StorageInfoTrait, StorageInstance, TrackedStorageKey, WhitelistedStorageKeys, }; mod dispatch; diff --git a/substrate/frame/support/src/traits/storage.rs b/substrate/frame/support/src/traits/storage.rs index cfa24d73f94f..e0ce1c0fbd31 100644 --- a/substrate/frame/support/src/traits/storage.rs +++ b/substrate/frame/support/src/traits/storage.rs @@ -18,11 +18,13 @@ //! Traits for encoding data related to pallet's storage items. use codec::{Encode, FullCodec, MaxEncodedLen}; +use core::marker::PhantomData; use impl_trait_for_tuples::impl_for_tuples; use scale_info::TypeInfo; pub use sp_core::storage::TrackedStorageKey; +use sp_core::Get; use sp_runtime::{ - traits::{Member, Saturating}, + traits::{Convert, Member, Saturating}, DispatchError, RuntimeDebug, }; use sp_std::{collections::btree_set::BTreeSet, prelude::*}; @@ -152,6 +154,20 @@ impl Footprint { } } +/// A storage price that increases linearly with the number of elements and their size. +pub struct LinearStoragePrice(PhantomData<(Base, Slope, Balance)>); +impl Convert for LinearStoragePrice +where + Base: Get, + Slope: Get, + Balance: From + sp_runtime::Saturating, +{ + fn convert(a: Footprint) -> Balance { + let s: Balance = (a.count.saturating_mul(a.size)).into(); + s.saturating_mul(Slope::get()).saturating_add(Base::get()) + } +} + /// Some sort of cost taken from account temporarily in order to offset the cost to the chain of /// holding some data [`Footprint`] in state. /// @@ -239,3 +255,25 @@ where } impl_incrementable!(u8, u16, u32, u64, u128, i8, i16, i32, i64, i128); + +#[cfg(test)] +mod tests { + use super::*; + use sp_core::ConstU64; + + #[test] + fn linear_storage_price_works() { + type Linear = LinearStoragePrice, ConstU64<3>, u64>; + let p = |count, size| Linear::convert(Footprint { count, size }); + + assert_eq!(p(0, 0), 7); + assert_eq!(p(0, 1), 7); + assert_eq!(p(1, 0), 7); + + assert_eq!(p(1, 1), 10); + assert_eq!(p(8, 1), 31); + assert_eq!(p(1, 8), 31); + + assert_eq!(p(u64::MAX, u64::MAX), u64::MAX); + } +} diff --git a/substrate/frame/whitelist/src/mock.rs b/substrate/frame/whitelist/src/mock.rs index d91f43b33af9..e6ddbf02bb2b 100644 --- a/substrate/frame/whitelist/src/mock.rs +++ b/substrate/frame/whitelist/src/mock.rs @@ -90,8 +90,7 @@ impl pallet_preimage::Config for Test { type RuntimeEvent = RuntimeEvent; type Currency = Balances; type ManagerOrigin = EnsureRoot; - type BaseDeposit = ConstU64<1>; - type ByteDeposit = ConstU64<1>; + type Consideration = (); type WeightInfo = (); } From c7dbfc21b661c39f5dc2e181109d00945638350c Mon Sep 17 00:00:00 2001 From: Davide Galassi Date: Sun, 17 Sep 2023 22:06:19 +0200 Subject: [PATCH 03/69] Babe epoch newtype (#1596) Removal of verbatim duplication of BABE's `Epoch` struct in the client. I think is better to have one single definition and wrap the primitive `Epoch` in a newtype (required because we need to implement the `Epoch` trait). --- Cargo.lock | 3 - substrate/client/consensus/babe/Cargo.toml | 3 - .../client/consensus/babe/src/authorship.rs | 21 ++++--- substrate/client/consensus/babe/src/lib.rs | 55 +++++++++---------- .../client/consensus/babe/src/migration.rs | 4 +- substrate/client/consensus/babe/src/tests.rs | 10 ++-- 6 files changed, 45 insertions(+), 51 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a994d14a322d..efefda5c8781 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -14754,17 +14754,14 @@ dependencies = [ "num-traits", "parity-scale-codec", "parking_lot 0.12.1", - "rand_chacha 0.2.2", "sc-block-builder", "sc-client-api", "sc-consensus", "sc-consensus-epochs", "sc-consensus-slots", - "sc-network", "sc-network-test", "sc-telemetry", "sc-transaction-pool-api", - "scale-info", "sp-api", "sp-application-crypto", "sp-block-builder", diff --git a/substrate/client/consensus/babe/Cargo.toml b/substrate/client/consensus/babe/Cargo.toml index d95887945358..c8cff0981b36 100644 --- a/substrate/client/consensus/babe/Cargo.toml +++ b/substrate/client/consensus/babe/Cargo.toml @@ -15,7 +15,6 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] async-trait = "0.1.57" -scale-info = { version = "2.5.0", features = ["derive"] } codec = { package = "parity-scale-codec", version = "3.6.1", features = ["derive"] } futures = "0.3.21" log = "0.4.17" @@ -45,10 +44,8 @@ sp-keystore = { path = "../../../primitives/keystore" } sp-runtime = { path = "../../../primitives/runtime" } [dev-dependencies] -rand_chacha = "0.2.2" sc-block-builder = { path = "../../block-builder" } sp-keyring = { path = "../../../primitives/keyring" } -sc-network = { path = "../../network" } sc-network-test = { path = "../../network/test" } sp-timestamp = { path = "../../../primitives/timestamp" } sp-tracing = { path = "../../../primitives/tracing" } diff --git a/substrate/client/consensus/babe/src/authorship.rs b/substrate/client/consensus/babe/src/authorship.rs index 758d5321a94c..3580caba7461 100644 --- a/substrate/client/consensus/babe/src/authorship.rs +++ b/substrate/client/consensus/babe/src/authorship.rs @@ -132,23 +132,22 @@ fn claim_secondary_slot( keystore: &KeystorePtr, author_secondary_vrf: bool, ) -> Option<(PreDigest, AuthorityId)> { - let Epoch { authorities, randomness, mut epoch_index, .. } = epoch; - - if authorities.is_empty() { + if epoch.authorities.is_empty() { return None } + let mut epoch_index = epoch.epoch_index; if epoch.end_slot() <= slot { // Slot doesn't strictly belong to the epoch, create a clone with fixed values. epoch_index = epoch.clone_for_slot(slot).epoch_index; } - let expected_author = secondary_slot_author(slot, authorities, *randomness)?; + let expected_author = secondary_slot_author(slot, &epoch.authorities, epoch.randomness)?; for (authority_id, authority_index) in keys { if authority_id == expected_author { let pre_digest = if author_secondary_vrf { - let data = make_vrf_sign_data(randomness, slot, epoch_index); + let data = make_vrf_sign_data(&epoch.randomness, slot, epoch_index); let result = keystore.sr25519_vrf_sign(AuthorityId::ID, authority_id.as_ref(), &data); if let Ok(Some(vrf_signature)) = result { @@ -232,19 +231,18 @@ fn claim_primary_slot( keystore: &KeystorePtr, keys: &[(AuthorityId, usize)], ) -> Option<(PreDigest, AuthorityId)> { - let Epoch { authorities, randomness, mut epoch_index, .. } = epoch; - + let mut epoch_index = epoch.epoch_index; if epoch.end_slot() <= slot { // Slot doesn't strictly belong to the epoch, create a clone with fixed values. epoch_index = epoch.clone_for_slot(slot).epoch_index; } - let data = make_vrf_sign_data(randomness, slot, epoch_index); + let data = make_vrf_sign_data(&epoch.randomness, slot, epoch_index); for (authority_id, authority_index) in keys { let result = keystore.sr25519_vrf_sign(AuthorityId::ID, authority_id.as_ref(), &data); if let Ok(Some(vrf_signature)) = result { - let threshold = calculate_primary_threshold(c, authorities, *authority_index); + let threshold = calculate_primary_threshold(c, &epoch.authorities, *authority_index); let can_claim = authority_id .as_inner_ref() @@ -274,7 +272,7 @@ fn claim_primary_slot( #[cfg(test)] mod tests { use super::*; - use sp_consensus_babe::{AllowedSlots, AuthorityId, BabeEpochConfiguration}; + use sp_consensus_babe::{AllowedSlots, AuthorityId, BabeEpochConfiguration, Epoch}; use sp_core::{crypto::Pair as _, sr25519::Pair}; use sp_keystore::testing::MemoryKeystore; @@ -300,7 +298,8 @@ mod tests { c: (3, 10), allowed_slots: AllowedSlots::PrimaryAndSecondaryPlainSlots, }, - }; + } + .into(); assert!(claim_slot(10.into(), &epoch, &keystore).is_none()); diff --git a/substrate/client/consensus/babe/src/lib.rs b/substrate/client/consensus/babe/src/lib.rs index 90b7523ec18b..ccf72939631a 100644 --- a/substrate/client/consensus/babe/src/lib.rs +++ b/substrate/client/consensus/babe/src/lib.rs @@ -69,6 +69,7 @@ use std::{ collections::HashSet, future::Future, + ops::{Deref, DerefMut}, pin::Pin, sync::Arc, task::{Context, Poll}, @@ -156,20 +157,27 @@ const AUTHORING_SCORE_VRF_CONTEXT: &[u8] = b"substrate-babe-vrf"; const AUTHORING_SCORE_LENGTH: usize = 16; /// BABE epoch information -#[derive(Decode, Encode, PartialEq, Eq, Clone, Debug, scale_info::TypeInfo)] -pub struct Epoch { - /// The epoch index. - pub epoch_index: u64, - /// The starting slot of the epoch. - pub start_slot: Slot, - /// The duration of this epoch. - pub duration: u64, - /// The authorities and their weights. - pub authorities: Vec<(AuthorityId, BabeAuthorityWeight)>, - /// Randomness for this epoch. - pub randomness: Randomness, - /// Configuration of the epoch. - pub config: BabeEpochConfiguration, +#[derive(Clone, Debug, PartialEq, Eq, Encode, Decode)] +pub struct Epoch(sp_consensus_babe::Epoch); + +impl Deref for Epoch { + type Target = sp_consensus_babe::Epoch; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl DerefMut for Epoch { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } +} + +impl From for Epoch { + fn from(epoch: sp_consensus_babe::Epoch) -> Self { + Epoch(epoch) + } } impl EpochT for Epoch { @@ -180,7 +188,7 @@ impl EpochT for Epoch { &self, (descriptor, config): (NextEpochDescriptor, BabeEpochConfiguration), ) -> Epoch { - Epoch { + sp_consensus_babe::Epoch { epoch_index: self.epoch_index + 1, start_slot: self.start_slot + self.duration, duration: self.duration, @@ -188,6 +196,7 @@ impl EpochT for Epoch { randomness: descriptor.randomness, config, } + .into() } fn start_slot(&self) -> Slot { @@ -199,25 +208,12 @@ impl EpochT for Epoch { } } -impl From for Epoch { - fn from(epoch: sp_consensus_babe::Epoch) -> Self { - Epoch { - epoch_index: epoch.epoch_index, - start_slot: epoch.start_slot, - duration: epoch.duration, - authorities: epoch.authorities, - randomness: epoch.randomness, - config: epoch.config, - } - } -} - impl Epoch { /// Create the genesis epoch (epoch #0). /// /// This is defined to start at the slot of the first block, so that has to be provided. pub fn genesis(genesis_config: &BabeConfiguration, slot: Slot) -> Epoch { - Epoch { + sp_consensus_babe::Epoch { epoch_index: 0, start_slot: slot, duration: genesis_config.epoch_length, @@ -228,6 +224,7 @@ impl Epoch { allowed_slots: genesis_config.allowed_slots, }, } + .into() } /// Clone and tweak epoch information to refer to the specified slot. diff --git a/substrate/client/consensus/babe/src/migration.rs b/substrate/client/consensus/babe/src/migration.rs index 2b1396c41c26..bec2d0a61f4b 100644 --- a/substrate/client/consensus/babe/src/migration.rs +++ b/substrate/client/consensus/babe/src/migration.rs @@ -62,10 +62,11 @@ impl EpochT for EpochV0 { } } +// Implement From for Epoch impl EpochV0 { /// Migrate the sturct to current epoch version. pub fn migrate(self, config: &BabeConfiguration) -> Epoch { - Epoch { + sp_consensus_babe::Epoch { epoch_index: self.epoch_index, start_slot: self.start_slot, duration: self.duration, @@ -73,5 +74,6 @@ impl EpochV0 { randomness: self.randomness, config: BabeEpochConfiguration { c: config.c, allowed_slots: config.allowed_slots }, } + .into() } } diff --git a/substrate/client/consensus/babe/src/tests.rs b/substrate/client/consensus/babe/src/tests.rs index b3843f8acfa0..02882d8baaed 100644 --- a/substrate/client/consensus/babe/src/tests.rs +++ b/substrate/client/consensus/babe/src/tests.rs @@ -501,7 +501,7 @@ fn claim_epoch_slots() { let authority = Sr25519Keyring::Alice; let keystore = create_keystore(authority); - let mut epoch = Epoch { + let mut epoch: Epoch = sp_consensus_babe::Epoch { start_slot: 0.into(), authorities: vec![(authority.public().into(), 1)], randomness: [0; 32], @@ -511,7 +511,8 @@ fn claim_epoch_slots() { c: (3, 10), allowed_slots: AllowedSlots::PrimaryAndSecondaryPlainSlots, }, - }; + } + .into(); let claim_slot_wrap = |s, e| match claim_slot(Slot::from(s as u64), &e, &keystore) { None => 0, @@ -551,7 +552,7 @@ fn claim_vrf_check() { let public = authority.public(); - let epoch = Epoch { + let epoch: Epoch = sp_consensus_babe::Epoch { start_slot: 0.into(), authorities: vec![(public.into(), 1)], randomness: [0; 32], @@ -561,7 +562,8 @@ fn claim_vrf_check() { c: (3, 10), allowed_slots: AllowedSlots::PrimaryAndSecondaryVRFSlots, }, - }; + } + .into(); // We leverage the predictability of claim types given a constant randomness. From 2ca30ed10ad2f68f4a8769e61f045299ae362162 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jos=C3=A9=20Molina=20Colmenero?= Date: Mon, 18 Sep 2023 00:27:38 +0200 Subject: [PATCH 04/69] fix: export-genesis-state command (#1521) # Description Currently the `ExportGenesisState` command in polkadot parachain uses an asynchronous context to run, which seems to display some warnings. See the screenshot below: ![Screenshot 2023-09-12 at 17 12 15](https://github.com/paritytech/polkadot-sdk/assets/2722756/0140b48a-2edb-41fa-b046-579d526f8305) After the changes in this PR, which essentially runs the command in a synchronous context, the command works properly without any warning. ![Screenshot 2023-09-12 at 18 23 46](https://github.com/paritytech/polkadot-sdk/assets/2722756/31506917-ece2-4a5f-8909-f215cc1ac0de) The remaining runtimes were added to `construct_benchmark_partials` macro in order not to fail if the runtime was not included in the non-exhaustive initial list, similarly to the `construct_async_run` one. For completeness: tests were made following this [tutorial](https://docs.substrate.io/tutorials/build-a-parachain/connect-a-local-parachain/). --- cumulus/polkadot-parachain/src/command.rs | 52 +++++++++++++++++++---- 1 file changed, 44 insertions(+), 8 deletions(-) diff --git a/cumulus/polkadot-parachain/src/command.rs b/cumulus/polkadot-parachain/src/command.rs index 596b7baf6710..32e363aff74b 100644 --- a/cumulus/polkadot-parachain/src/command.rs +++ b/cumulus/polkadot-parachain/src/command.rs @@ -381,7 +381,7 @@ impl SubstrateCli for RelayChainCli { } /// Creates partial components for the runtimes that are supported by the benchmarks. -macro_rules! construct_benchmark_partials { +macro_rules! construct_partials { ($config:expr, |$partials:ident| $code:expr) => { match $config.chain_spec.runtime() { Runtime::AssetHubKusama => { @@ -456,7 +456,41 @@ macro_rules! construct_benchmark_partials { )?; $code }, - _ => Err("The chain is not supported".into()), + Runtime::Shell => { + let $partials = new_partial::( + &$config, + crate::service::shell_build_import_queue, + )?; + $code + }, + Runtime::Seedling => { + let $partials = new_partial::( + &$config, + crate::service::shell_build_import_queue, + )?; + $code + }, + Runtime::ContractsRococo => { + let $partials = new_partial::( + &$config, + crate::service::contracts_rococo_build_import_queue, + )?; + $code + }, + Runtime::Penpal(_) | Runtime::Default => { + let $partials = new_partial::( + &$config, + crate::service::rococo_parachain_build_import_queue, + )?; + $code + }, + Runtime::Glutton => { + let $partials = new_partial::( + &$config, + crate::service::shell_build_import_queue, + )?; + $code + }, } }; } @@ -679,10 +713,12 @@ pub fn run() -> Result<()> { cmd.run(config, polkadot_config) }) }, - Some(Subcommand::ExportGenesisState(cmd)) => - construct_async_run!(|components, cli, cmd, config| { - Ok(async move { cmd.run(&*config.chain_spec, &*components.client) }) - }), + Some(Subcommand::ExportGenesisState(cmd)) => { + let runner = cli.create_runner(cmd)?; + runner.sync_run(|config| { + construct_partials!(config, |partials| cmd.run(&*config.chain_spec, &*partials.client)) + }) + }, Some(Subcommand::ExportGenesisWasm(cmd)) => { let runner = cli.create_runner(cmd)?; runner.sync_run(|_config| { @@ -704,7 +740,7 @@ pub fn run() -> Result<()> { .into()) }, BenchmarkCmd::Block(cmd) => runner.sync_run(|config| { - construct_benchmark_partials!(config, |partials| cmd.run(partials.client)) + construct_partials!(config, |partials| cmd.run(partials.client)) }), #[cfg(not(feature = "runtime-benchmarks"))] BenchmarkCmd::Storage(_) => @@ -716,7 +752,7 @@ pub fn run() -> Result<()> { .into()), #[cfg(feature = "runtime-benchmarks")] BenchmarkCmd::Storage(cmd) => runner.sync_run(|config| { - construct_benchmark_partials!(config, |partials| { + construct_partials!(config, |partials| { let db = partials.backend.expose_db(); let storage = partials.backend.expose_storage(); From cf5c195237873d3741be11111283698e70f3e752 Mon Sep 17 00:00:00 2001 From: Egor_P Date: Mon, 18 Sep 2023 00:28:35 +0200 Subject: [PATCH 05/69] [Backport] Version bumps from release 1.1.0 (#1580) This PR backports version bumps for `polkadot` and `polkadot-parachain` from the v1.1.0 release branch --------- Co-authored-by: Mara Broda --- Cargo.lock | 6 +++--- cumulus/polkadot-parachain/Cargo.toml | 2 +- polkadot/Cargo.toml | 2 +- polkadot/cli/Cargo.toml | 2 +- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index efefda5c8781..06d4b34031f8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -11395,7 +11395,7 @@ dependencies = [ [[package]] name = "polkadot" -version = "1.0.0" +version = "1.1.0" dependencies = [ "assert_cmd", "color-eyre", @@ -11528,7 +11528,7 @@ dependencies = [ [[package]] name = "polkadot-cli" -version = "1.0.0" +version = "1.1.0" dependencies = [ "clap 4.4.3", "frame-benchmarking-cli", @@ -12340,7 +12340,7 @@ dependencies = [ [[package]] name = "polkadot-parachain-bin" -version = "1.0.0" +version = "1.1.0" dependencies = [ "assert_cmd", "asset-hub-kusama-runtime", diff --git a/cumulus/polkadot-parachain/Cargo.toml b/cumulus/polkadot-parachain/Cargo.toml index e6fedb5f1fab..5591e80317ca 100644 --- a/cumulus/polkadot-parachain/Cargo.toml +++ b/cumulus/polkadot-parachain/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "polkadot-parachain-bin" -version = "1.0.0" +version = "1.1.0" authors.workspace = true build = "build.rs" edition.workspace = true diff --git a/polkadot/Cargo.toml b/polkadot/Cargo.toml index b9c132bbff92..e4494b1abb20 100644 --- a/polkadot/Cargo.toml +++ b/polkadot/Cargo.toml @@ -18,7 +18,7 @@ rust-version = "1.64.0" readme = "README.md" authors.workspace = true edition.workspace = true -version = "1.0.0" +version = "1.1.0" default-run = "polkadot" [dependencies] diff --git a/polkadot/cli/Cargo.toml b/polkadot/cli/Cargo.toml index 794d8c4714af..a9631fb9a7da 100644 --- a/polkadot/cli/Cargo.toml +++ b/polkadot/cli/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "polkadot-cli" description = "Polkadot Relay-chain Client Node" -version = "1.0.0" +version = "1.1.0" authors.workspace = true edition.workspace = true license.workspace = true From e38998801e433ecc569ff6d58d1d0aa80eaff771 Mon Sep 17 00:00:00 2001 From: yjh Date: Mon, 18 Sep 2023 13:53:06 +0800 Subject: [PATCH 06/69] Executor: Remove `LegacyInstanceReuse` strategy (#1486) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit It seems the old strategy have been depracted more than one year. So maybe it's time to clean up old strategy for wasm executor. --- polkadot address: 15ouFh2SHpGbHtDPsJ6cXQfes9Cx1gEFnJJsJVqPGzBSTudr --------- Co-authored-by: Bastian Köcher Co-authored-by: Koute --- .../pvf/prepare-worker/src/memory_stats.rs | 2 +- substrate/client/cli/src/arg_enums.rs | 8 - substrate/client/executor/benches/bench.rs | 7 - .../runtime_blob/data_segments_snapshot.rs | 87 ---------- .../src/runtime_blob/globals_snapshot.rs | 112 ------------- .../executor/common/src/runtime_blob/mod.rs | 4 - .../common/src/runtime_blob/runtime_blob.rs | 19 +-- .../executor/common/src/wasm_runtime.rs | 10 -- .../executor/src/integration_tests/linux.rs | 84 ---------- .../src/integration_tests/linux/smaps.rs | 82 ---------- .../executor/src/integration_tests/mod.rs | 11 -- .../client/executor/wasmtime/src/host.rs | 2 +- .../executor/wasmtime/src/instance_wrapper.rs | 104 +----------- .../client/executor/wasmtime/src/runtime.rs | 149 +----------------- .../client/executor/wasmtime/src/tests.rs | 19 +-- .../client/executor/wasmtime/src/util.rs | 30 +--- 16 files changed, 20 insertions(+), 710 deletions(-) delete mode 100644 substrate/client/executor/common/src/runtime_blob/data_segments_snapshot.rs delete mode 100644 substrate/client/executor/common/src/runtime_blob/globals_snapshot.rs delete mode 100644 substrate/client/executor/src/integration_tests/linux.rs delete mode 100644 substrate/client/executor/src/integration_tests/linux/smaps.rs diff --git a/polkadot/node/core/pvf/prepare-worker/src/memory_stats.rs b/polkadot/node/core/pvf/prepare-worker/src/memory_stats.rs index 7904dfa9cb88..c70ff56fc84d 100644 --- a/polkadot/node/core/pvf/prepare-worker/src/memory_stats.rs +++ b/polkadot/node/core/pvf/prepare-worker/src/memory_stats.rs @@ -151,7 +151,7 @@ pub mod memory_tracker { /// Module for dealing with the `ru_maxrss` (peak resident memory) stat from `getrusage`. /// /// NOTE: `getrusage` with the `RUSAGE_THREAD` parameter is only supported on Linux. `RUSAGE_SELF` -/// works on MacOS, but we need to get the max rss only for the preparation thread. Gettng it for +/// works on MacOS, but we need to get the max rss only for the preparation thread. Getting it for /// the current process would conflate the stats of previous jobs run by the process. #[cfg(target_os = "linux")] pub mod max_rss_stat { diff --git a/substrate/client/cli/src/arg_enums.rs b/substrate/client/cli/src/arg_enums.rs index 40d86fd97988..67acb82c2c30 100644 --- a/substrate/client/cli/src/arg_enums.rs +++ b/substrate/client/cli/src/arg_enums.rs @@ -38,12 +38,6 @@ pub enum WasmtimeInstantiationStrategy { /// Recreate the instance from scratch on every instantiation. Very slow. RecreateInstance, - - /// Legacy instance reuse mechanism. DEPRECATED. Will be removed in the future. - /// - /// Should only be used in case of encountering any issues with the new default - /// instantiation strategy. - LegacyInstanceReuse, } /// The default [`WasmtimeInstantiationStrategy`]. @@ -92,8 +86,6 @@ pub fn execution_method_from_cli( sc_service::config::WasmtimeInstantiationStrategy::Pooling, WasmtimeInstantiationStrategy::RecreateInstance => sc_service::config::WasmtimeInstantiationStrategy::RecreateInstance, - WasmtimeInstantiationStrategy::LegacyInstanceReuse => - sc_service::config::WasmtimeInstantiationStrategy::LegacyInstanceReuse, }, } } diff --git a/substrate/client/executor/benches/bench.rs b/substrate/client/executor/benches/bench.rs index 66a82a175221..86c769f88811 100644 --- a/substrate/client/executor/benches/bench.rs +++ b/substrate/client/executor/benches/bench.rs @@ -150,13 +150,6 @@ fn bench_call_instance(c: &mut Criterion) { let _ = env_logger::try_init(); let strategies = [ - ( - "legacy_instance_reuse", - Method::Compiled { - instantiation_strategy: InstantiationStrategy::LegacyInstanceReuse, - precompile: false, - }, - ), ( "recreate_instance_vanilla", Method::Compiled { diff --git a/substrate/client/executor/common/src/runtime_blob/data_segments_snapshot.rs b/substrate/client/executor/common/src/runtime_blob/data_segments_snapshot.rs deleted file mode 100644 index 3fd546ce4457..000000000000 --- a/substrate/client/executor/common/src/runtime_blob/data_segments_snapshot.rs +++ /dev/null @@ -1,87 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 - -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -use super::RuntimeBlob; -use crate::error::{self, Error}; -use std::mem; -use wasm_instrument::parity_wasm::elements::Instruction; - -/// This is a snapshot of data segments specialzied for a particular instantiation. -/// -/// Note that this assumes that no mutable globals are used. -#[derive(Clone)] -pub struct DataSegmentsSnapshot { - /// The list of data segments represented by (offset, contents). - data_segments: Vec<(u32, Vec)>, -} - -impl DataSegmentsSnapshot { - /// Create a snapshot from the data segments from the module. - pub fn take(module: &RuntimeBlob) -> error::Result { - let data_segments = module - .data_segments() - .into_iter() - .map(|mut segment| { - // Just replace contents of the segment since the segments will be discarded later - // anyway. - let contents = mem::take(segment.value_mut()); - - let init_expr = match segment.offset() { - Some(offset) => offset.code(), - // Return if the segment is passive - None => return Err(Error::SharedMemUnsupported), - }; - - // [op, End] - if init_expr.len() != 2 { - return Err(Error::InitializerHasTooManyExpressions) - } - let offset = match &init_expr[0] { - Instruction::I32Const(v) => *v as u32, - Instruction::GetGlobal(_) => { - // In a valid wasm file, initializer expressions can only refer imported - // globals. - // - // At the moment of writing the Substrate Runtime Interface does not provide - // any globals. There is nothing that prevents us from supporting this - // if/when we gain those. - return Err(Error::ImportedGlobalsUnsupported) - }, - insn => return Err(Error::InvalidInitializerExpression(format!("{:?}", insn))), - }; - - Ok((offset, contents)) - }) - .collect::>>()?; - - Ok(Self { data_segments }) - } - - /// Apply the given snapshot to a linear memory. - /// - /// Linear memory interface is represented by a closure `memory_set`. - pub fn apply( - &self, - mut memory_set: impl FnMut(u32, &[u8]) -> Result<(), E>, - ) -> Result<(), E> { - for (offset, contents) in &self.data_segments { - memory_set(*offset, contents)?; - } - Ok(()) - } -} diff --git a/substrate/client/executor/common/src/runtime_blob/globals_snapshot.rs b/substrate/client/executor/common/src/runtime_blob/globals_snapshot.rs deleted file mode 100644 index 9ba6fc55e49c..000000000000 --- a/substrate/client/executor/common/src/runtime_blob/globals_snapshot.rs +++ /dev/null @@ -1,112 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 - -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -use super::RuntimeBlob; - -/// Saved value of particular exported global. -struct SavedValue { - /// The handle of this global which can be used to refer to this global. - handle: Global, - /// The global value that was observed during the snapshot creation. - value: sp_wasm_interface::Value, -} - -/// An adapter for a wasm module instance that is focused on getting and setting globals. -pub trait InstanceGlobals { - /// A handle to a global which can be used to get or set a global variable. This is supposed to - /// be a lightweight handle, like an index or an Rc-like smart-pointer, which is cheap to clone. - type Global: Clone; - /// Get a handle to a global by it's export name. - /// - /// The requested export is must exist in the exported list, and it should be a mutable global. - fn get_global(&mut self, export_name: &str) -> Self::Global; - /// Get the current value of the global. - fn get_global_value(&mut self, global: &Self::Global) -> sp_wasm_interface::Value; - /// Update the current value of the global. - /// - /// The global behind the handle is guaranteed to be mutable and the value to be the same type - /// as the global. - fn set_global_value(&mut self, global: &Self::Global, value: sp_wasm_interface::Value); -} - -/// A set of exposed mutable globals. -/// -/// This is set of globals required to create a [`GlobalsSnapshot`] and that are collected from -/// a runtime blob that was instrumented by -/// [`RuntimeBlob::expose_mutable_globals`](super::RuntimeBlob::expose_mutable_globals`). - -/// If the code wasn't instrumented then it would be empty and snapshot would do nothing. -pub struct ExposedMutableGlobalsSet(Vec); - -impl ExposedMutableGlobalsSet { - /// Collect the set from the given runtime blob. See the struct documentation for details. - pub fn collect(runtime_blob: &RuntimeBlob) -> Self { - let global_names = - runtime_blob.exported_internal_global_names().map(ToOwned::to_owned).collect(); - Self(global_names) - } -} - -/// A snapshot of a global variables values. This snapshot can be later used for restoring the -/// values to the preserved state. -/// -/// Technically, a snapshot stores only values of mutable global variables. This is because -/// immutable global variables always have the same values. -/// -/// We take it from an instance rather from a module because the start function could potentially -/// change any of the mutable global values. -pub struct GlobalsSnapshot(Vec>); - -impl GlobalsSnapshot { - /// Take a snapshot of global variables for a given instance. - /// - /// # Panics - /// - /// This function panics if the instance doesn't correspond to the module from which the - /// [`ExposedMutableGlobalsSet`] was collected. - pub fn take( - mutable_globals: &ExposedMutableGlobalsSet, - instance: &mut Instance, - ) -> Self - where - Instance: InstanceGlobals, - { - let global_names = &mutable_globals.0; - let mut saved_values = Vec::with_capacity(global_names.len()); - - for global_name in global_names { - let handle = instance.get_global(global_name); - let value = instance.get_global_value(&handle); - saved_values.push(SavedValue { handle, value }); - } - - Self(saved_values) - } - - /// Apply the snapshot to the given instance. - /// - /// This instance must be the same that was used for creation of this snapshot. - pub fn apply(&self, instance: &mut Instance) - where - Instance: InstanceGlobals, - { - for saved_value in &self.0 { - instance.set_global_value(&saved_value.handle, saved_value.value); - } - } -} diff --git a/substrate/client/executor/common/src/runtime_blob/mod.rs b/substrate/client/executor/common/src/runtime_blob/mod.rs index 07a0945cc2b6..8261d07eda5e 100644 --- a/substrate/client/executor/common/src/runtime_blob/mod.rs +++ b/substrate/client/executor/common/src/runtime_blob/mod.rs @@ -46,10 +46,6 @@ //! is free of any floating point operations, which is a useful step towards making instances //! produced from such a module deterministic. -mod data_segments_snapshot; -mod globals_snapshot; mod runtime_blob; -pub use data_segments_snapshot::DataSegmentsSnapshot; -pub use globals_snapshot::{ExposedMutableGlobalsSet, GlobalsSnapshot, InstanceGlobals}; pub use runtime_blob::RuntimeBlob; diff --git a/substrate/client/executor/common/src/runtime_blob/runtime_blob.rs b/substrate/client/executor/common/src/runtime_blob/runtime_blob.rs index 24dc7e393a4b..becf9e219b0b 100644 --- a/substrate/client/executor/common/src/runtime_blob/runtime_blob.rs +++ b/substrate/client/executor/common/src/runtime_blob/runtime_blob.rs @@ -20,8 +20,8 @@ use crate::{error::WasmError, wasm_runtime::HeapAllocStrategy}; use wasm_instrument::{ export_mutable_globals, parity_wasm::elements::{ - deserialize_buffer, serialize, DataSegment, ExportEntry, External, Internal, MemorySection, - MemoryType, Module, Section, + deserialize_buffer, serialize, ExportEntry, External, Internal, MemorySection, MemoryType, + Module, Section, }, }; @@ -52,11 +52,6 @@ impl RuntimeBlob { Ok(Self { raw_module }) } - /// Extract the data segments from the given wasm code. - pub(super) fn data_segments(&self) -> Vec { - self.raw_module.data_section().map(|ds| ds.entries()).unwrap_or(&[]).to_vec() - } - /// The number of globals defined in locally in this module. pub fn declared_globals_count(&self) -> u32 { self.raw_module @@ -190,16 +185,6 @@ impl RuntimeBlob { Ok(()) } - /// Returns an iterator of all globals which were exported by [`expose_mutable_globals`]. - pub(super) fn exported_internal_global_names(&self) -> impl Iterator { - let exports = self.raw_module.export_section().map(|es| es.entries()).unwrap_or(&[]); - exports.iter().filter_map(|export| match export.internal() { - Internal::Global(_) if export.field().starts_with("exported_internal_global") => - Some(export.field()), - _ => None, - }) - } - /// Scans the wasm blob for the first section with the name that matches the given. Returns the /// contents of the custom section if found or `None` otherwise. pub fn custom_section_contents(&self, section_name: &str) -> Option<&[u8]> { diff --git a/substrate/client/executor/common/src/wasm_runtime.rs b/substrate/client/executor/common/src/wasm_runtime.rs index 5dac77e59fa7..d8e142b9d559 100644 --- a/substrate/client/executor/common/src/wasm_runtime.rs +++ b/substrate/client/executor/common/src/wasm_runtime.rs @@ -115,16 +115,6 @@ pub trait WasmInstance: Send { /// /// This method is only suitable for getting immutable globals. fn get_global_const(&mut self, name: &str) -> Result, Error>; - - /// **Testing Only**. This function returns the base address of the linear memory. - /// - /// This is meant to be the starting address of the memory mapped area for the linear memory. - /// - /// This function is intended only for a specific test that measures physical memory - /// consumption. - fn linear_memory_base_ptr(&self) -> Option<*const u8> { - None - } } /// Defines the heap pages allocation strategy the wasm runtime should use. diff --git a/substrate/client/executor/src/integration_tests/linux.rs b/substrate/client/executor/src/integration_tests/linux.rs deleted file mode 100644 index 68ac37e9011a..000000000000 --- a/substrate/client/executor/src/integration_tests/linux.rs +++ /dev/null @@ -1,84 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 - -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -//! Tests that are only relevant for Linux. - -mod smaps; - -use super::mk_test_runtime; -use crate::WasmExecutionMethod; -use codec::Encode as _; -use sc_executor_common::wasm_runtime::DEFAULT_HEAP_ALLOC_STRATEGY; - -use self::smaps::Smaps; - -#[test] -fn memory_consumption_compiled() { - let _ = sp_tracing::try_init_simple(); - - if std::env::var("RUN_TEST").is_ok() { - memory_consumption(WasmExecutionMethod::Compiled { - instantiation_strategy: - sc_executor_wasmtime::InstantiationStrategy::LegacyInstanceReuse, - }); - } else { - // We need to run the test in isolation, to not getting interfered by the other tests. - let executable = std::env::current_exe().unwrap(); - let status = std::process::Command::new(executable) - .env("RUN_TEST", "1") - .args(&["--nocapture", "memory_consumption_compiled"]) - .status() - .unwrap(); - - assert!(status.success()); - } -} - -fn memory_consumption(wasm_method: WasmExecutionMethod) { - // This aims to see if linear memory stays backed by the physical memory after a runtime call. - // - // For that we make a series of runtime calls, probing the RSS for the VMA matching the linear - // memory. After the call we expect RSS to be equal to 0. - - let runtime = mk_test_runtime(wasm_method, DEFAULT_HEAP_ALLOC_STRATEGY); - - let mut instance = runtime.new_instance().unwrap(); - let heap_base = instance - .get_global_const("__heap_base") - .expect("`__heap_base` is valid") - .expect("`__heap_base` exists") - .as_i32() - .expect("`__heap_base` is an `i32`"); - - fn probe_rss(instance: &dyn sc_executor_common::wasm_runtime::WasmInstance) -> usize { - let base_addr = instance.linear_memory_base_ptr().unwrap() as usize; - Smaps::new().get_rss(base_addr).expect("failed to get rss") - } - - instance - .call_export("test_dirty_plenty_memory", &(heap_base as u32, 1u32).encode()) - .unwrap(); - let probe_1 = probe_rss(&*instance); - instance - .call_export("test_dirty_plenty_memory", &(heap_base as u32, 1024u32).encode()) - .unwrap(); - let probe_2 = probe_rss(&*instance); - - assert_eq!(probe_1, 0); - assert_eq!(probe_2, 0); -} diff --git a/substrate/client/executor/src/integration_tests/linux/smaps.rs b/substrate/client/executor/src/integration_tests/linux/smaps.rs deleted file mode 100644 index 1ac570dd8d5f..000000000000 --- a/substrate/client/executor/src/integration_tests/linux/smaps.rs +++ /dev/null @@ -1,82 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 - -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -//! A tool for extracting information about the memory consumption of the current process from -//! the procfs. - -use std::{collections::BTreeMap, ops::Range}; - -/// An interface to the /proc/self/smaps -/// -/// See docs about [procfs on kernel.org][procfs] -/// -/// [procfs]: https://www.kernel.org/doc/html/latest/filesystems/proc.html -pub struct Smaps(Vec<(Range, BTreeMap)>); - -impl Smaps { - pub fn new() -> Self { - let regex_start = regex::RegexBuilder::new("^([0-9a-f]+)-([0-9a-f]+)") - .multi_line(true) - .build() - .unwrap(); - let regex_kv = regex::RegexBuilder::new(r#"^([^:]+):\s*(\d+) kB"#) - .multi_line(true) - .build() - .unwrap(); - let smaps = std::fs::read_to_string("/proc/self/smaps").unwrap(); - let boundaries: Vec<_> = regex_start - .find_iter(&smaps) - .map(|matched| matched.start()) - .chain(std::iter::once(smaps.len())) - .collect(); - - let mut output = Vec::new(); - for window in boundaries.windows(2) { - let chunk = &smaps[window[0]..window[1]]; - let caps = regex_start.captures(chunk).unwrap(); - let start = usize::from_str_radix(caps.get(1).unwrap().as_str(), 16).unwrap(); - let end = usize::from_str_radix(caps.get(2).unwrap().as_str(), 16).unwrap(); - - let values = regex_kv - .captures_iter(chunk) - .map(|cap| { - let key = cap.get(1).unwrap().as_str().to_owned(); - let value = cap.get(2).unwrap().as_str().parse().unwrap(); - (key, value) - }) - .collect(); - - output.push((start..end, values)); - } - - Self(output) - } - - fn get_map(&self, addr: usize) -> &BTreeMap { - &self - .0 - .iter() - .find(|(range, _)| addr >= range.start && addr < range.end) - .unwrap() - .1 - } - - pub fn get_rss(&self, addr: usize) -> Option { - self.get_map(addr).get("Rss").cloned() - } -} diff --git a/substrate/client/executor/src/integration_tests/mod.rs b/substrate/client/executor/src/integration_tests/mod.rs index 37aed8eef96a..0bd080c24357 100644 --- a/substrate/client/executor/src/integration_tests/mod.rs +++ b/substrate/client/executor/src/integration_tests/mod.rs @@ -16,9 +16,6 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -#[cfg(target_os = "linux")] -mod linux; - use assert_matches::assert_matches; use codec::{Decode, Encode}; use sc_executor_common::{ @@ -81,14 +78,6 @@ macro_rules! test_wasm_execution { instantiation_strategy: sc_executor_wasmtime::InstantiationStrategy::Pooling }); } - - #[test] - fn [<$method_name _compiled_legacy_instance_reuse>]() { - let _ = sp_tracing::try_init_simple(); - $method_name(WasmExecutionMethod::Compiled { - instantiation_strategy: sc_executor_wasmtime::InstantiationStrategy::LegacyInstanceReuse - }); - } } }; } diff --git a/substrate/client/executor/wasmtime/src/host.rs b/substrate/client/executor/wasmtime/src/host.rs index 9bd3ca3dade5..f8c78cbb660e 100644 --- a/substrate/client/executor/wasmtime/src/host.rs +++ b/substrate/client/executor/wasmtime/src/host.rs @@ -32,7 +32,7 @@ use crate::{instance_wrapper::MemoryWrapper, runtime::StoreData, util}; pub struct HostState { /// The allocator instance to keep track of allocated memory. /// - /// This is stored as an `Option` as we need to temporarly set this to `None` when we are + /// This is stored as an `Option` as we need to temporarily set this to `None` when we are /// allocating/deallocating memory. The problem being that we can only mutable access `caller` /// once. allocator: Option, diff --git a/substrate/client/executor/wasmtime/src/instance_wrapper.rs b/substrate/client/executor/wasmtime/src/instance_wrapper.rs index 6d319cce509e..acc799061c27 100644 --- a/substrate/client/executor/wasmtime/src/instance_wrapper.rs +++ b/substrate/client/executor/wasmtime/src/instance_wrapper.rs @@ -116,14 +116,14 @@ impl EntryPoint { pub(crate) struct MemoryWrapper<'a, C>(pub &'a wasmtime::Memory, pub &'a mut C); impl sc_allocator::Memory for MemoryWrapper<'_, C> { - fn with_access(&self, run: impl FnOnce(&[u8]) -> R) -> R { - run(self.0.data(&self.1)) - } - fn with_access_mut(&mut self, run: impl FnOnce(&mut [u8]) -> R) -> R { run(self.0.data_mut(&mut self.1)) } + fn with_access(&self, run: impl FnOnce(&[u8]) -> R) -> R { + run(self.0.data(&self.1)) + } + fn grow(&mut self, additional: u32) -> std::result::Result<(), ()> { self.0 .grow(&mut self.1, additional as u64) @@ -153,11 +153,6 @@ impl sc_allocator::Memory for MemoryWrapper<'_, C> { /// routines. pub struct InstanceWrapper { instance: Instance, - /// The memory instance of the `instance`. - /// - /// It is important to make sure that we don't make any copies of this to make it easier to - /// proof - memory: Memory, store: Store, } @@ -177,7 +172,7 @@ impl InstanceWrapper { store.data_mut().memory = Some(memory); store.data_mut().table = table; - Ok(InstanceWrapper { instance, memory, store }) + Ok(InstanceWrapper { instance, store }) } /// Resolves a substrate entrypoint by the given name. @@ -280,11 +275,6 @@ impl InstanceWrapper { _ => Err("Unknown value type".into()), } } - - /// Get a global with the given `name`. - pub fn get_global(&mut self, name: &str) -> Option { - self.instance.get_global(&mut self.store, name) - } } /// Extract linear memory instance from the given instance. @@ -311,76 +301,6 @@ fn get_table(instance: &Instance, ctx: &mut Store) -> Option { /// Functions related to memory. impl InstanceWrapper { - /// Returns the pointer to the first byte of the linear memory for this instance. - pub fn base_ptr(&self) -> *const u8 { - self.memory.data_ptr(&self.store) - } - - /// If possible removes physical backing from the allocated linear memory which - /// leads to returning the memory back to the system; this also zeroes the memory - /// as a side-effect. - pub fn decommit(&mut self) { - if self.memory.data_size(&self.store) == 0 { - return - } - - cfg_if::cfg_if! { - if #[cfg(target_os = "linux")] { - use std::sync::Once; - - unsafe { - let ptr = self.memory.data_ptr(&self.store); - let len = self.memory.data_size(&self.store); - - // Linux handles MADV_DONTNEED reliably. The result is that the given area - // is unmapped and will be zeroed on the next pagefault. - if libc::madvise(ptr as _, len, libc::MADV_DONTNEED) != 0 { - static LOGGED: Once = Once::new(); - LOGGED.call_once(|| { - log::warn!( - "madvise(MADV_DONTNEED) failed: {}", - std::io::Error::last_os_error(), - ); - }); - } else { - return; - } - } - } else if #[cfg(target_os = "macos")] { - use std::sync::Once; - - unsafe { - let ptr = self.memory.data_ptr(&self.store); - let len = self.memory.data_size(&self.store); - - // On MacOS we can simply overwrite memory mapping. - if libc::mmap( - ptr as _, - len, - libc::PROT_READ | libc::PROT_WRITE, - libc::MAP_FIXED | libc::MAP_PRIVATE | libc::MAP_ANONYMOUS, - -1, - 0, - ) == libc::MAP_FAILED { - static LOGGED: Once = Once::new(); - LOGGED.call_once(|| { - log::warn!( - "Failed to decommit WASM instance memory through mmap: {}", - std::io::Error::last_os_error(), - ); - }); - } else { - return; - } - } - } - } - - // If we're on an unsupported OS or the memory couldn't have been - // decommited for some reason then just manually zero it out. - self.memory.data_mut(self.store.as_context_mut()).fill(0); - } - pub(crate) fn store(&self) -> &Store { &self.store } @@ -389,17 +309,3 @@ impl InstanceWrapper { &mut self.store } } - -#[test] -fn decommit_works() { - let engine = wasmtime::Engine::default(); - let code = wat::parse_str("(module (memory (export \"memory\") 1 4))").unwrap(); - let module = wasmtime::Module::new(&engine, code).unwrap(); - let linker = wasmtime::Linker::new(&engine); - let instance_pre = linker.instantiate_pre(&module).unwrap(); - let mut wrapper = InstanceWrapper::new(&engine, &instance_pre).unwrap(); - unsafe { *wrapper.memory.data_ptr(&wrapper.store) = 42 }; - assert_eq!(unsafe { *wrapper.memory.data_ptr(&wrapper.store) }, 42); - wrapper.decommit(); - assert_eq!(unsafe { *wrapper.memory.data_ptr(&wrapper.store) }, 0); -} diff --git a/substrate/client/executor/wasmtime/src/runtime.rs b/substrate/client/executor/wasmtime/src/runtime.rs index 23b069870aa3..ae78137959be 100644 --- a/substrate/client/executor/wasmtime/src/runtime.rs +++ b/substrate/client/executor/wasmtime/src/runtime.rs @@ -27,9 +27,7 @@ use crate::{ use sc_allocator::{AllocationStats, FreeingBumpHeapAllocator}; use sc_executor_common::{ error::{Error, Result, WasmError}, - runtime_blob::{ - self, DataSegmentsSnapshot, ExposedMutableGlobalsSet, GlobalsSnapshot, RuntimeBlob, - }, + runtime_blob::RuntimeBlob, util::checked_range, wasm_runtime::{HeapAllocStrategy, InvokeMethod, WasmInstance, WasmModule}, }; @@ -69,17 +67,11 @@ impl StoreData { pub(crate) type Store = wasmtime::Store; enum Strategy { - LegacyInstanceReuse { - instance_wrapper: InstanceWrapper, - globals_snapshot: GlobalsSnapshot, - data_segments_snapshot: Arc, - heap_base: u32, - }, RecreateInstance(InstanceCreator), } struct InstanceCreator { - engine: wasmtime::Engine, + engine: Engine, instance_pre: Arc>, } @@ -89,40 +81,10 @@ impl InstanceCreator { } } -struct InstanceGlobals<'a> { - instance: &'a mut InstanceWrapper, -} - -impl<'a> runtime_blob::InstanceGlobals for InstanceGlobals<'a> { - type Global = wasmtime::Global; - - fn get_global(&mut self, export_name: &str) -> Self::Global { - self.instance - .get_global(export_name) - .expect("get_global is guaranteed to be called with an export name of a global; qed") - } - - fn get_global_value(&mut self, global: &Self::Global) -> Value { - util::from_wasmtime_val(global.get(&mut self.instance.store_mut())) - } - - fn set_global_value(&mut self, global: &Self::Global, value: Value) { - global.set(&mut self.instance.store_mut(), util::into_wasmtime_val(value)).expect( - "the value is guaranteed to be of the same value; the global is guaranteed to be mutable; qed", - ); - } -} - -/// Data required for creating instances with the fast instance reuse strategy. -struct InstanceSnapshotData { - mutable_globals: ExposedMutableGlobalsSet, - data_segments_snapshot: Arc, -} - /// A `WasmModule` implementation using wasmtime to compile the runtime module to machine code /// and execute the compiled code. pub struct WasmtimeRuntime { - engine: wasmtime::Engine, + engine: Engine, instance_pre: Arc>, instantiation_strategy: InternalInstantiationStrategy, } @@ -130,26 +92,6 @@ pub struct WasmtimeRuntime { impl WasmModule for WasmtimeRuntime { fn new_instance(&self) -> Result> { let strategy = match self.instantiation_strategy { - InternalInstantiationStrategy::LegacyInstanceReuse(ref snapshot_data) => { - let mut instance_wrapper = InstanceWrapper::new(&self.engine, &self.instance_pre)?; - let heap_base = instance_wrapper.extract_heap_base()?; - - // This function panics if the instance was created from a runtime blob different - // from which the mutable globals were collected. Here, it is easy to see that there - // is only a single runtime blob and thus it's the same that was used for both - // creating the instance and collecting the mutable globals. - let globals_snapshot = GlobalsSnapshot::take( - &snapshot_data.mutable_globals, - &mut InstanceGlobals { instance: &mut instance_wrapper }, - ); - - Strategy::LegacyInstanceReuse { - instance_wrapper, - globals_snapshot, - data_segments_snapshot: snapshot_data.data_segments_snapshot.clone(), - heap_base, - } - }, InternalInstantiationStrategy::Builtin => Strategy::RecreateInstance(InstanceCreator { engine: self.engine.clone(), instance_pre: self.instance_pre.clone(), @@ -174,39 +116,12 @@ impl WasmtimeInstance { allocation_stats: &mut Option, ) -> Result> { match &mut self.strategy { - Strategy::LegacyInstanceReuse { - ref mut instance_wrapper, - globals_snapshot, - data_segments_snapshot, - heap_base, - } => { - let entrypoint = instance_wrapper.resolve_entrypoint(method)?; - - data_segments_snapshot.apply(|offset, contents| { - util::write_memory_from( - instance_wrapper.store_mut(), - Pointer::new(offset), - contents, - ) - })?; - globals_snapshot.apply(&mut InstanceGlobals { instance: instance_wrapper }); - let allocator = FreeingBumpHeapAllocator::new(*heap_base); - - let result = - perform_call(data, instance_wrapper, entrypoint, allocator, allocation_stats); - - // Signal to the OS that we are done with the linear memory and that it can be - // reclaimed. - instance_wrapper.decommit(); - - result - }, Strategy::RecreateInstance(ref mut instance_creator) => { let mut instance_wrapper = instance_creator.instantiate()?; let heap_base = instance_wrapper.extract_heap_base()?; let entrypoint = instance_wrapper.resolve_entrypoint(method)?; - let allocator = FreeingBumpHeapAllocator::new(heap_base); + perform_call(data, &mut instance_wrapper, entrypoint, allocator, allocation_stats) }, } @@ -226,24 +141,10 @@ impl WasmInstance for WasmtimeInstance { fn get_global_const(&mut self, name: &str) -> Result> { match &mut self.strategy { - Strategy::LegacyInstanceReuse { instance_wrapper, .. } => - instance_wrapper.get_global_val(name), Strategy::RecreateInstance(ref mut instance_creator) => instance_creator.instantiate()?.get_global_val(name), } } - - fn linear_memory_base_ptr(&self) -> Option<*const u8> { - match &self.strategy { - Strategy::RecreateInstance(_) => { - // We do not keep the wasm instance around, therefore there is no linear memory - // associated with it. - None - }, - Strategy::LegacyInstanceReuse { instance_wrapper, .. } => - Some(instance_wrapper.base_ptr()), - } - } } /// Prepare a directory structure and a config file to enable wasmtime caching. @@ -338,7 +239,6 @@ fn common_config(semantics: &Semantics) -> std::result::Result (true, false), InstantiationStrategy::RecreateInstanceCopyOnWrite => (false, true), InstantiationStrategy::RecreateInstance => (false, false), - InstantiationStrategy::LegacyInstanceReuse => (false, false), }; const WASM_PAGE_SIZE: u64 = 65536; @@ -409,7 +309,7 @@ fn common_config(semantics: &Semantics) -> std::result::Result { - let data_segments_snapshot = - DataSegmentsSnapshot::take(&blob).map_err(|e| { - WasmError::Other(format!("cannot take data segments snapshot: {}", e)) - })?; - let data_segments_snapshot = Arc::new(data_segments_snapshot); - let mutable_globals = ExposedMutableGlobalsSet::collect(&blob); - - ( - module, - InternalInstantiationStrategy::LegacyInstanceReuse(InstanceSnapshotData { - data_segments_snapshot, - mutable_globals, - }), - ) - }, InstantiationStrategy::Pooling | InstantiationStrategy::PoolingCopyOnWrite | InstantiationStrategy::RecreateInstance | @@ -679,12 +559,6 @@ where } }, CodeSupplyMode::Precompiled(compiled_artifact_path) => { - if let InstantiationStrategy::LegacyInstanceReuse = - config.semantics.instantiation_strategy - { - return Err(WasmError::Other("the legacy instance reuse instantiation strategy is incompatible with precompiled modules".into())); - } - // SAFETY: The unsafety of `deserialize_file` is covered by this function. The // responsibilities to maintain the invariants are passed to the caller. // @@ -695,12 +569,6 @@ where (module, InternalInstantiationStrategy::Builtin) }, CodeSupplyMode::PrecompiledBytes(compiled_artifact_bytes) => { - if let InstantiationStrategy::LegacyInstanceReuse = - config.semantics.instantiation_strategy - { - return Err(WasmError::Other("the legacy instance reuse instantiation strategy is incompatible with precompiled modules".into())); - } - // SAFETY: The unsafety of `deserialize` is covered by this function. The // responsibilities to maintain the invariants are passed to the caller. // @@ -730,13 +598,6 @@ fn prepare_blob_for_compilation( blob = blob.inject_stack_depth_metering(logical_max)?; } - if let InstantiationStrategy::LegacyInstanceReuse = semantics.instantiation_strategy { - // When this strategy is used this must be called after all other passes which may introduce - // new global variables, otherwise they will not be reset when we call into the runtime - // again. - blob.expose_mutable_globals(); - } - // We don't actually need the memory to be imported so we can just convert any memory // import into an export with impunity. This simplifies our code since `wasmtime` will // now automatically take care of creating the memory for us, and it is also necessary diff --git a/substrate/client/executor/wasmtime/src/tests.rs b/substrate/client/executor/wasmtime/src/tests.rs index 65093687822d..e185754b0769 100644 --- a/substrate/client/executor/wasmtime/src/tests.rs +++ b/substrate/client/executor/wasmtime/src/tests.rs @@ -30,7 +30,7 @@ type HostFunctions = sp_io::SubstrateHostFunctions; #[macro_export] macro_rules! test_wasm_execution { - (@no_legacy_instance_reuse $method_name:ident) => { + ($method_name:ident) => { paste::item! { #[test] fn [<$method_name _recreate_instance_cow>]() { @@ -61,19 +61,6 @@ macro_rules! test_wasm_execution { } } }; - - ($method_name:ident) => { - test_wasm_execution!(@no_legacy_instance_reuse $method_name); - - paste::item! { - #[test] - fn [<$method_name _legacy_instance_reuse>]() { - $method_name( - InstantiationStrategy::LegacyInstanceReuse - ); - } - } - }; } struct RuntimeBuilder { @@ -330,14 +317,14 @@ fn test_max_memory_pages_exported_memory_without_precompilation( test_max_memory_pages(instantiation_strategy, false, false); } -test_wasm_execution!(@no_legacy_instance_reuse test_max_memory_pages_imported_memory_with_precompilation); +test_wasm_execution!(test_max_memory_pages_imported_memory_with_precompilation); fn test_max_memory_pages_imported_memory_with_precompilation( instantiation_strategy: InstantiationStrategy, ) { test_max_memory_pages(instantiation_strategy, true, true); } -test_wasm_execution!(@no_legacy_instance_reuse test_max_memory_pages_exported_memory_with_precompilation); +test_wasm_execution!(test_max_memory_pages_exported_memory_with_precompilation); fn test_max_memory_pages_exported_memory_with_precompilation( instantiation_strategy: InstantiationStrategy, ) { diff --git a/substrate/client/executor/wasmtime/src/util.rs b/substrate/client/executor/wasmtime/src/util.rs index c38d969ce9dc..7af554c35e1b 100644 --- a/substrate/client/executor/wasmtime/src/util.rs +++ b/substrate/client/executor/wasmtime/src/util.rs @@ -21,33 +21,9 @@ use sc_executor_common::{ error::{Error, Result}, util::checked_range, }; -use sp_wasm_interface::{Pointer, Value}; +use sp_wasm_interface::Pointer; use wasmtime::{AsContext, AsContextMut}; -/// Converts a [`wasmtime::Val`] into a substrate runtime interface [`Value`]. -/// -/// Panics if the given value doesn't have a corresponding variant in `Value`. -pub fn from_wasmtime_val(val: wasmtime::Val) -> Value { - match val { - wasmtime::Val::I32(v) => Value::I32(v), - wasmtime::Val::I64(v) => Value::I64(v), - wasmtime::Val::F32(f_bits) => Value::F32(f_bits), - wasmtime::Val::F64(f_bits) => Value::F64(f_bits), - v => panic!("Given value type is unsupported by Substrate: {:?}", v), - } -} - -/// Converts a sp_wasm_interface's [`Value`] into the corresponding variant in wasmtime's -/// [`wasmtime::Val`]. -pub fn into_wasmtime_val(value: Value) -> wasmtime::Val { - match value { - Value::I32(v) => wasmtime::Val::I32(v), - Value::I64(v) => wasmtime::Val::I64(v), - Value::F32(f_bits) => wasmtime::Val::F32(f_bits), - Value::F64(f_bits) => wasmtime::Val::F64(f_bits), - } -} - /// Read data from the instance memory into a slice. /// /// Returns an error if the read would go out of the memory bounds. @@ -140,8 +116,8 @@ pub(crate) fn replace_strategy_if_broken(strategy: &mut InstantiationStrategy) { // These strategies require a working `madvise` to be sound. InstantiationStrategy::PoolingCopyOnWrite => InstantiationStrategy::Pooling, - InstantiationStrategy::RecreateInstanceCopyOnWrite | - InstantiationStrategy::LegacyInstanceReuse => InstantiationStrategy::RecreateInstance, + InstantiationStrategy::RecreateInstanceCopyOnWrite => + InstantiationStrategy::RecreateInstance, }; use std::sync::OnceLock; From a8e82a365e2a5f57a3bd51cb293dd6fbec4bf4f5 Mon Sep 17 00:00:00 2001 From: Muharem Ismailov Date: Mon, 18 Sep 2023 11:04:47 +0200 Subject: [PATCH 07/69] xcm-builder: PayOverXcm supports fallible convertors for asset kind and beneficiary conversion (#1572) `PayOverXcm` type accepts two converters to transform the `AssetKind` and `Beneficiary` parameter types into recognized `xcm` types. In this PR, we've modified the bounds for these converters, transitioning from `Convert` to `TryConvert`. One such use case for this adjustment is when dealing with versioned xcm types for `AssetKind` and `Beneficiary`. These types might be not convertible to the latest xcm version, hence the need for fallible conversion. This changes required for https://github.com/paritytech/polkadot-sdk/pull/1333 --- .../xcm/xcm-builder/src/location_conversion.rs | 8 ++++---- polkadot/xcm/xcm-builder/src/pay.rs | 18 ++++++++++-------- polkadot/xcm/xcm-builder/src/tests/pay/pay.rs | 6 +++--- 3 files changed, 17 insertions(+), 15 deletions(-) diff --git a/polkadot/xcm/xcm-builder/src/location_conversion.rs b/polkadot/xcm/xcm-builder/src/location_conversion.rs index 26b48fc88adc..ec23116e0e82 100644 --- a/polkadot/xcm/xcm-builder/src/location_conversion.rs +++ b/polkadot/xcm/xcm-builder/src/location_conversion.rs @@ -18,7 +18,7 @@ use crate::universal_exports::ensure_is_remote; use frame_support::traits::Get; use parity_scale_codec::{Compact, Decode, Encode}; use sp_io::hashing::blake2_256; -use sp_runtime::traits::{AccountIdConversion, Convert, TrailingZeroInput}; +use sp_runtime::traits::{AccountIdConversion, TrailingZeroInput, TryConvert}; use sp_std::{marker::PhantomData, prelude::*}; use xcm::latest::prelude::*; use xcm_executor::traits::ConvertLocation; @@ -322,10 +322,10 @@ impl>, AccountId: From<[u8; 32]> + Into<[u8; 32]> /// network (provided by `Network`) and the `AccountId`'s `[u8; 32]` datum for the `id`. pub struct AliasesIntoAccountId32(PhantomData<(Network, AccountId)>); impl<'a, Network: Get>, AccountId: Clone + Into<[u8; 32]> + Clone> - Convert<&'a AccountId, MultiLocation> for AliasesIntoAccountId32 + TryConvert<&'a AccountId, MultiLocation> for AliasesIntoAccountId32 { - fn convert(who: &AccountId) -> MultiLocation { - AccountId32 { network: Network::get(), id: who.clone().into() }.into() + fn try_convert(who: &AccountId) -> Result { + Ok(AccountId32 { network: Network::get(), id: who.clone().into() }.into()) } } diff --git a/polkadot/xcm/xcm-builder/src/pay.rs b/polkadot/xcm/xcm-builder/src/pay.rs index 39e09e056772..0f3a622f4ece 100644 --- a/polkadot/xcm/xcm-builder/src/pay.rs +++ b/polkadot/xcm/xcm-builder/src/pay.rs @@ -20,7 +20,7 @@ use frame_support::traits::{ tokens::{Pay, PaymentStatus}, Get, }; -use sp_runtime::traits::Convert; +use sp_runtime::traits::TryConvert; use sp_std::{marker::PhantomData, vec}; use xcm::{opaque::lts::Weight, prelude::*}; use xcm_executor::traits::{QueryHandler, QueryResponseStatus}; @@ -71,8 +71,8 @@ impl< Timeout: Get, Beneficiary: Clone, AssetKind, - AssetKindToLocatableAsset: Convert, - BeneficiaryRefToLocation: for<'a> Convert<&'a Beneficiary, MultiLocation>, + AssetKindToLocatableAsset: TryConvert, + BeneficiaryRefToLocation: for<'a> TryConvert<&'a Beneficiary, MultiLocation>, > Pay for PayOverXcm< Interior, @@ -96,12 +96,14 @@ impl< asset_kind: Self::AssetKind, amount: Self::Balance, ) -> Result { - let locatable = AssetKindToLocatableAsset::convert(asset_kind); + let locatable = AssetKindToLocatableAsset::try_convert(asset_kind) + .map_err(|_| xcm::latest::Error::InvalidLocation)?; let LocatableAssetId { asset_id, location: asset_location } = locatable; let destination = Querier::UniversalLocation::get() .invert_target(&asset_location) .map_err(|()| Self::Error::LocationNotInvertible)?; - let beneficiary = BeneficiaryRefToLocation::convert(&who); + let beneficiary = BeneficiaryRefToLocation::try_convert(&who) + .map_err(|_| xcm::latest::Error::InvalidLocation)?; let query_id = Querier::new_query(asset_location, Timeout::get(), Interior::get()); @@ -196,10 +198,10 @@ pub struct LocatableAssetId { /// Adapter `struct` which implements a conversion from any `AssetKind` into a [`LocatableAssetId`] /// value using a fixed `Location` for the `location` field. pub struct FixedLocation(sp_std::marker::PhantomData); -impl, AssetKind: Into> Convert +impl, AssetKind: Into> TryConvert for FixedLocation { - fn convert(value: AssetKind) -> LocatableAssetId { - LocatableAssetId { asset_id: value.into(), location: Location::get() } + fn try_convert(value: AssetKind) -> Result { + Ok(LocatableAssetId { asset_id: value.into(), location: Location::get() }) } } diff --git a/polkadot/xcm/xcm-builder/src/tests/pay/pay.rs b/polkadot/xcm/xcm-builder/src/tests/pay/pay.rs index 28b2feec0c23..491a2bcef7a0 100644 --- a/polkadot/xcm/xcm-builder/src/tests/pay/pay.rs +++ b/polkadot/xcm/xcm-builder/src/tests/pay/pay.rs @@ -29,9 +29,9 @@ pub struct AssetKind { } pub struct LocatableAssetKindConverter; -impl sp_runtime::traits::Convert for LocatableAssetKindConverter { - fn convert(value: AssetKind) -> LocatableAssetId { - LocatableAssetId { asset_id: value.asset_id, location: value.destination } +impl sp_runtime::traits::TryConvert for LocatableAssetKindConverter { + fn try_convert(value: AssetKind) -> Result { + Ok(LocatableAssetId { asset_id: value.asset_id, location: value.destination }) } } From 1d5a9d25e23df2a641b83b839c0a1a88b2e6b5ee Mon Sep 17 00:00:00 2001 From: Sacha Lansky Date: Mon, 18 Sep 2023 11:05:12 +0200 Subject: [PATCH 08/69] [improve docs] Example pallet crate and Basic Example pallet (#1546) This fixes the broken links in the crate level documentation of the Examples crate. It also updates the documentation for the Basic Example pallet by removing the template for documenting a pallet (we now have [this](https://github.com/paritytech/polkadot-sdk/blob/master/docs/DOCUMENTATION_GUIDELINE.md) to refer to instead). Note: I found it unnecessary to provide a link to the doc guidelines as I don't think this would be where someone should discover them. I also want to flag some ideas that came while making these minor improvements in [this issue](https://github.com/paritytech/polkadot-sdk-docs/issues/27) (for a subsequent PR) as part of ongoing docs work. --- substrate/frame/examples/Cargo.toml | 2 +- substrate/frame/examples/basic/src/lib.rs | 267 ++-------------------- substrate/frame/examples/src/lib.rs | 29 +-- 3 files changed, 42 insertions(+), 256 deletions(-) diff --git a/substrate/frame/examples/Cargo.toml b/substrate/frame/examples/Cargo.toml index b072416b6121..9c47d7442111 100644 --- a/substrate/frame/examples/Cargo.toml +++ b/substrate/frame/examples/Cargo.toml @@ -6,7 +6,7 @@ edition.workspace = true license = "Apache-2.0" homepage = "https://substrate.io" repository.workspace = true -description = "The single package with various examples for frame pallets" +description = "The single package with examples of various types of FRAME pallets" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/substrate/frame/examples/basic/src/lib.rs b/substrate/frame/examples/basic/src/lib.rs index 426e9b7ec648..31d20e07f5f7 100644 --- a/substrate/frame/examples/basic/src/lib.rs +++ b/substrate/frame/examples/basic/src/lib.rs @@ -15,258 +15,40 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! //! # Basic Example Pallet //! -//! -//! The Example: A simple example of a FRAME pallet demonstrating -//! concepts, APIs and structures common to most FRAME runtimes. -//! -//! Run `cargo doc --package pallet-example-basic --open` to view this pallet's documentation. +//! A pallet demonstrating concepts, APIs and structures common to most FRAME runtimes. //! //! **This pallet serves as an example and is not meant to be used in production.** //! -//! ### Documentation Guidelines: -//! -//! -//!
    -//!
  • Documentation comments (i.e. /// comment) - should -//! accompany pallet functions and be restricted to the pallet interface, -//! not the internals of the pallet implementation. Only state inputs, -//! outputs, and a brief description that mentions whether calling it -//! requires root, but without repeating the source code details. -//! Capitalize the first word of each documentation comment and end it with -//! a full stop. See -//! Generic example of annotating source code with documentation comments
  • -//! -//!
  • Self-documenting code - Try to refactor code to be self-documenting.
  • -//! -//!
  • Code comments - Supplement complex code with a brief explanation, not every line of -//! code.
  • -//! -//!
  • Identifiers - surround by backticks (i.e. INHERENT_IDENTIFIER, -//! InherentType, u64)
  • -//! -//!
  • Usage scenarios - should be simple doctests. The compiler should ensure they stay -//! valid.
  • -//! -//!
  • Extended tutorials - should be moved to external files and refer to.
  • -//! -//!
  • Mandatory - include all of the sections/subsections where MUST is specified.
  • -//! -//!
  • Optional - optionally include sections/subsections where CAN is specified.
  • -//!
-//! -//! ### Documentation Template:
-//! -//! Copy and paste this template from frame/examples/basic/src/lib.rs into file -//! `frame//src/lib.rs` of your own custom pallet and complete it. -//!

-//! // Add heading with custom pallet name
-//!
-//! \#  Pallet
-//!
-//! // Add simple description
-//!
-//! // Include the following links that shows what trait needs to be implemented to use the pallet
-//! // and the supported dispatchables that are documented in the Call enum.
-//!
-//! - \[`Config`]
-//! - \[`Call`]
-//! - \[`Pallet`]
-//!
-//! \## Overview
-//!
-//! 
-//! // Short description of pallet's purpose.
-//! // Links to Traits that should be implemented.
-//! // What this pallet is for.
-//! // What functionality the pallet provides.
-//! // When to use the pallet (use case examples).
-//! // How it is used.
-//! // Inputs it uses and the source of each input.
-//! // Outputs it produces.
-//!
-//! 
-//! 
-//!
-//! \## Terminology
-//!
-//! // Add terminology used in the custom pallet. Include concepts, storage items, or actions that
-//! you think // deserve to be noted to give context to the rest of the documentation or pallet
-//! usage. The author needs to // use some judgment about what is included. We don't want a list of
-//! every storage item nor types - the user // can go to the code for that. For example, "transfer
-//! fee" is obvious and should not be included, but // "free balance" and "reserved balance" should
-//! be noted to give context to the pallet. // Please do not link to outside resources. The
-//! reference docs should be the ultimate source of truth.
-//!
-//! 
-//!
-//! \## Goals
-//!
-//! // Add goals that the custom pallet is designed to achieve.
-//!
-//! 
-//!
-//! \### Scenarios
-//!
-//! 
-//!
-//! \#### 
-//!
-//! // Describe requirements prior to interacting with the custom pallet.
-//! // Describe the process of interacting with the custom pallet for this scenario and public API
-//! functions used.
-//!
-//! \## Interface
-//!
-//! \### Supported Origins
-//!
-//! // What origins are used and supported in this pallet (root, signed, none)
-//! // i.e. root when \`ensure_root\` used
-//! // i.e. none when \`ensure_none\` used
-//! // i.e. signed when \`ensure_signed\` used
-//!
-//! \`inherent\` 
-//!
-//! 
-//! 
-//!
-//! \### Types
-//!
-//! // Type aliases. Include any associated types and where the user would typically define them.
-//!
-//! \`ExampleType\` 
-//!
-//! 
-//!
-//! // Reference documentation of aspects such as `storageItems` and `dispatchable` functions should
-//! // only be included in the  Rustdocs for Substrate and not repeated in the
-//! // README file.
-//!
-//! \### Dispatchable Functions
-//!
-//! 
-//!
-//! // A brief description of dispatchable functions and a link to the rustdoc with their actual
-//! documentation.
-//!
-//! // MUST have link to Call enum
-//! // MUST have origin information included in function doc
-//! // CAN have more info up to the user
-//!
-//! \### Public Functions
-//!
-//! 
-//!
-//! // A link to the rustdoc and any notes about usage in the pallet, not for specific functions.
-//! // For example, in the Balances Pallet: "Note that when using the publicly exposed functions,
-//! // you (the runtime developer) are responsible for implementing any necessary checks
-//! // (e.g. that the sender is the signer) before calling a function that will affect storage."
-//!
-//! 
-//!
-//! // It is up to the writer of the respective pallet (with respect to how much information to
-//! provide).
-//!
-//! \#### Public Inspection functions - Immutable (getters)
-//!
-//! // Insert a subheading for each getter function signature
-//!
-//! \##### \`example_getter_name()\`
-//!
-//! // What it returns
-//! // Why, when, and how often to call it
-//! // When it could panic or error
-//! // When safety issues to consider
-//!
-//! \#### Public Mutable functions (changing state)
-//!
-//! // Insert a subheading for each setter function signature
-//!
-//! \##### \`example_setter_name(origin, parameter_name: T::ExampleType)\`
-//!
-//! // What state it changes
-//! // Why, when, and how often to call it
-//! // When it could panic or error
-//! // When safety issues to consider
-//! // What parameter values are valid and why
-//!
-//! \### Storage Items
-//!
-//! // Explain any storage items included in this pallet
-//!
-//! \### Digest Items
-//!
-//! // Explain any digest items included in this pallet
-//!
-//! \### Inherent Data
-//!
-//! // Explain what inherent data (if any) is defined in the pallet and any other related types
-//!
-//! \### Events:
-//!
-//! // Insert events for this pallet if any
-//!
-//! \### Errors:
-//!
-//! // Explain what generates errors
-//!
-//! \## Usage
-//!
-//! // Insert 2-3 examples of usage and code snippets that show how to
-//! // use  Pallet in a custom pallet.
-//!
-//! \### Prerequisites
-//!
-//! // Show how to include necessary imports for  and derive
-//! // your pallet configuration trait with the `INSERT_CUSTOM_PALLET_NAME` trait.
-//!
-//! \```rust
-//! use ;
-//!
-//! pub trait Config: ::Config { }
-//! \```
-//!
-//! \### Simple Code Snippet
-//!
-//! // Show a simple example (e.g. how to query a public getter function of
-//! )
-//!
-//! \### Example from FRAME
-//!
-//! // Show a usage example in an actual runtime
-//!
-//! // See:
-//! // - Substrate TCR 
-//! // - Substrate Kitties 
-//!
-//! \## Genesis Config
-//!
-//! 
-//!
-//! \## Dependencies
+//! > Made with *Substrate*, for *Polkadot*.
 //!
-//! // Dependencies on other FRAME pallets and the genesis config should be mentioned,
-//! // but not the Rust Standard Library.
-//! // Genesis configuration modifications that may be made to incorporate this pallet
-//! // Interaction with other pallets
+//! [![github]](https://github.com/paritytech/polkadot-sdk/tree/master/substrate/frame/examples/basic)
+//! [![polkadot]](https://polkadot.network)
 //!
-//! 
+//! [polkadot]: https://img.shields.io/badge/polkadot-E6007A?style=for-the-badge&logo=polkadot&logoColor=white
+//! [github]: https://img.shields.io/badge/github-8da0cb?style=for-the-badge&labelColor=555555&logo=github
 //!
-//! \## Related Pallets
+//! ## Pallet API
 //!
-//! // Interaction with other pallets in the form of a bullet point list
+//! See the [`pallet`] module for more information about the interfaces this pallet exposes,
+//! including its configuration trait, dispatchables, storage items, events and errors.
 //!
-//! \## References
+//! ## Overview
 //!
-//! 
+//! This pallet provides basic examples of using:
 //!
-//! // Links to reference material, if applicable. For example, Phragmen, W3F research, etc.
-//! // that the implementation is based on.
-//! 

+//! - A custom weight calculator able to classify a call's dispatch class (see: +//! [`frame_support::dispatch::DispatchClass`]) +//! - Pallet hooks to implement some custom logic that's executed before and after a block is +//! imported (see: [`frame_support::traits::Hooks`]) +//! - Inherited weight annotation for pallet calls, used to create less repetition for calls that +//! use the [`Config::WeightInfo`] trait to calculate call weights. This can also be overridden, +//! as demonstrated by [`Call::set_dummy`]. +//! - A private function that performs a storage update. +//! - A simple signed extension implementation (see: [`sp_runtime::traits::SignedExtension`]) which +//! increases the priority of the [`Call::set_dummy`] if it's present and drops any transaction +//! with an encoded length higher than 200 bytes. // Ensure we're `no_std` when compiling for Wasm. #![cfg_attr(not(feature = "std"), no_std)] @@ -381,7 +163,8 @@ pub mod pallet { #[pallet::pallet] pub struct Pallet(_); - // Pallet implements [`Hooks`] trait to define some logic to execute in some context. + // This pallet implements the [`frame_support::traits::Hooks`] trait to define some logic to + // execute in some context. #[pallet::hooks] impl Hooks> for Pallet { // `on_initialize` is executed at the beginning of the block before any extrinsic are @@ -686,7 +469,7 @@ impl Pallet { // sender of the transaction (if signed) are also provided. // // The full list of hooks that can be added to a signed extension can be found -// [here](https://crates.parity.io/sp_runtime/traits/trait.SignedExtension.html). +// [here](https://paritytech.github.io/polkadot-sdk/master/sp_runtime/traits/trait.SignedExtension.html). // // The signed extensions are aggregated in the runtime file of a substrate chain. All extensions // should be aggregated in a tuple and passed to the `CheckedExtrinsic` and `UncheckedExtrinsic` diff --git a/substrate/frame/examples/src/lib.rs b/substrate/frame/examples/src/lib.rs index d1cd32bb50f2..a7084fc6ef9b 100644 --- a/substrate/frame/examples/src/lib.rs +++ b/substrate/frame/examples/src/lib.rs @@ -17,24 +17,27 @@ //! # FRAME Pallet Examples //! -//! This crate contains examples of FRAME pallets. It is not intended to be used in production. +//! This crate contains a collection of simple examples of FRAME pallets, demonstrating useful +//! features in action. It is not intended to be used in production. //! //! ## Pallets //! -//! - [**`pallet_example_basic`**](./basic): A simple example of a FRAME pallet demonstrating -//! concepts, APIs and structures common to most FRAME runtimes. +//! - [`pallet_example_basic`]: This pallet demonstrates concepts, APIs and structures common to +//! most FRAME runtimes. //! -//! - [**`pallet_example_offchain_worker`**](./offchain-worker): A simple example of a FRAME pallet -//! demonstrating concepts, APIs and structures common to most offchain workers. +//! - [`pallet_example_offchain_worker`]: This pallet demonstrates concepts, APIs and structures +//! common to most offchain workers. //! -//! - [**`pallet-default-config-example`**](./default-config): A simple example of a FRAME pallet -//! demonstrating the simpler way to implement `Config` trait of pallets. +//! - [`pallet_default_config_example`]: This pallet demonstrates different ways to implement the +//! `Config` trait of pallets. //! -//! - [**`pallet-dev-mode`**](./dev-mode): A simple example of a FRAME pallet demonstrating the ease -//! of requirements for a pallet in dev mode. +//! - [`pallet_dev_mode`]: This pallet demonstrates the ease of requirements for a pallet in "dev +//! mode". //! -//! - [**`pallet-example-kitchensink`**](./kitchensink): A simple example of a FRAME pallet -//! demonstrating a catalog of the the FRAME macros and their various syntax options. +//! - [`pallet_example_kitchensink`]: This pallet demonstrates a catalog of all FRAME macros in use +//! and their various syntax options. //! -//! - [**`pallet-example-split`**](./split): A simple example of a FRAME pallet demonstrating the -//! ability to split sections across multiple files. +//! - [`pallet_example_split`]: A simple example of a FRAME pallet demonstrating the ability to +//! split sections across multiple files. +//! +//! **Tip**: Use `cargo doc --package --open` to view each pallet's documentation. From f6072e8be3adede66f5e56104f77920fe5c3b5db Mon Sep 17 00:00:00 2001 From: Sacha Lansky Date: Mon, 18 Sep 2023 11:09:17 +0200 Subject: [PATCH 09/69] [improve docs]: Timestamp pallet (#1435) This PR improves the docs for the Timestamp pallet by following our [Documentation Guidelines](https://github.com/paritytech/polkadot-sdk/blob/master/docs/DOCUMENTATION_GUIDELINE.md) more closely. --------- Co-authored-by: Juan Co-authored-by: Francisco Aguirre --- Cargo.lock | 1 + docs/DOCUMENTATION_GUIDELINE.md | 33 +++-- substrate/frame/timestamp/Cargo.toml | 2 + substrate/frame/timestamp/src/lib.rs | 162 ++++++++++++++-------- substrate/frame/timestamp/src/tests.rs | 2 + substrate/primitives/timestamp/src/lib.rs | 2 +- 6 files changed, 126 insertions(+), 76 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 06d4b34031f8..f09db7643e08 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -10502,6 +10502,7 @@ dependencies = [ name = "pallet-timestamp" version = "4.0.0-dev" dependencies = [ + "docify", "frame-benchmarking", "frame-support", "frame-system", diff --git a/docs/DOCUMENTATION_GUIDELINE.md b/docs/DOCUMENTATION_GUIDELINE.md index f6c8cac7cd2a..dbb4298d50fe 100644 --- a/docs/DOCUMENTATION_GUIDELINE.md +++ b/docs/DOCUMENTATION_GUIDELINE.md @@ -123,9 +123,9 @@ explicitly depend on them, you have likely not designed it properly. #### Proc-Macros -Note that there are special considerations when documenting proc macros. Doc links will appear to function _within_ your +Note that there are special considerations when documenting proc macros. Doc links will appear to function *within* your proc macro crate, but often will no longer function when these proc macros are re-exported elsewhere in your project. -The exception is doc links to _other proc macros_ which will function just fine if they are also being re-exported. It +The exception is doc links to *other proc macros* which will function just fine if they are also being re-exported. It is also often necessary to disambiguate between a proc macro and a function of the same name, which can be done using the `macro@my_macro_name` syntax in your link. Read more about how to correctly use links in your rust-docs [here](https://doc.rust-lang.org/rustdoc/write-documentation/linking-to-items-by-name.html#valid-links) and @@ -189,6 +189,7 @@ fn multiply_by_2(x: u32) -> u32 { .. } // More efficiency can be achieved if we improve this via such and such. fn multiply_by_2(x: u32) -> u32 { .. } ``` + They are both roughly conveying the same set of facts, but one is easier to follow because it was formatted cleanly. Especially for traits and types that you can foresee will be seen and used a lot, try and write a well formatted version. @@ -203,7 +204,6 @@ properly do this. --- - ## Pallet Crates The guidelines so far have been general in nature, and are applicable to crates that are pallets and crates that're not @@ -223,6 +223,14 @@ For the top-level pallet docs, consider the following template: //! //! . //! +//! ## Pallet API +//! +//! +//! +//! See the [`pallet`] module for more information about the interfaces this pallet exposes, including its +//! configuration trait, dispatchables, storage items, events and errors. +//! //! ## Overview //! //! @@ -233,20 +241,12 @@ For the top-level pallet docs, consider the following template: //! //! ### Example //! -//! . +//! //! -//! ## Pallet API -//! -//! -//! -//! See the [`pallet`] module for more information about the interfaces this pallet exposes, including its configuration -//! trait, dispatchables, storage items, events and errors. -//! -//! +//! //! //! This section can most often be left as-is. //! @@ -272,7 +272,6 @@ For the top-level pallet docs, consider the following template: //! up> ``` - This template's details (heading 3s and beyond) are left flexible, and at the discretion of the developer to make the best final choice about. For example, you might want to include `### Terminology` or not. Moreover, you might find it more useful to include it in `## Overview`. diff --git a/substrate/frame/timestamp/Cargo.toml b/substrate/frame/timestamp/Cargo.toml index a39c79892d19..6759d90aaf41 100644 --- a/substrate/frame/timestamp/Cargo.toml +++ b/substrate/frame/timestamp/Cargo.toml @@ -27,6 +27,8 @@ sp-std = { path = "../../primitives/std", default-features = false} sp-storage = { path = "../../primitives/storage", default-features = false} sp-timestamp = { path = "../../primitives/timestamp", default-features = false} +docify = "0.2.1" + [dev-dependencies] sp-core = { path = "../../primitives/core" } sp-io = { path = "../../primitives/io" } diff --git a/substrate/frame/timestamp/src/lib.rs b/substrate/frame/timestamp/src/lib.rs index 4eb95941d782..ad055bab004f 100644 --- a/substrate/frame/timestamp/src/lib.rs +++ b/substrate/frame/timestamp/src/lib.rs @@ -15,53 +15,40 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! # Timestamp Pallet -//! -//! The Timestamp pallet provides functionality to get and set the on-chain time. -//! -//! - [`Config`] -//! - [`Call`] -//! - [`Pallet`] -//! -//! ## Overview -//! -//! The Timestamp pallet allows the validators to set and validate a timestamp with each block. -//! -//! It uses inherents for timestamp data, which is provided by the block author and -//! validated/verified by other validators. The timestamp can be set only once per block and must be -//! set each block. There could be a constraint on how much time must pass before setting the new -//! timestamp. +//! > Made with *Substrate*, for *Polkadot*. //! -//! **NOTE:** The Timestamp pallet is the recommended way to query the on-chain time instead of -//! using an approach based on block numbers. The block number based time measurement can cause -//! issues because of cumulative calculation errors and hence should be avoided. +//! [![github]](https://github.com/paritytech/polkadot-sdk/substrate/frame/timestamp) +//! [![polkadot]](https://polkadot.network) //! -//! ## Interface +//! [polkadot]: https://img.shields.io/badge/polkadot-E6007A?style=for-the-badge&logo=polkadot&logoColor=white +//! [github]: https://img.shields.io/badge/github-8da0cb?style=for-the-badge&labelColor=555555&logo=github //! -//! ### Dispatchable Functions -//! -//! * `set` - Sets the current time. -//! -//! ### Public functions +//! # Timestamp Pallet //! -//! * `get` - Gets the current time for the current block. If this function is called prior to -//! setting the timestamp, it will return the timestamp of the previous block. +//! A pallet that provides a way for consensus systems to set and check the onchain time. //! -//! ### Config Getters +//! ## Pallet API //! -//! * `MinimumPeriod` - Gets the minimum (and advised) period between blocks for the chain. +//! See the [`pallet`] module for more information about the interfaces this pallet exposes, +//! including its configuration trait, dispatchables, storage items, events and errors. +//! +//! ## Overview //! -//! ## Usage +//! The Timestamp pallet is designed to create a consensus-based time source. This helps ensure that +//! nodes maintain a synchronized view of time that all network participants can agree on. //! -//! The following example shows how to use the Timestamp pallet in your custom pallet to query the -//! current timestamp. +//! It defines an _acceptable range_ using a configurable constant to specify how much time must +//! pass before setting the new timestamp. Validator nodes in the network must verify that the +//! timestamp falls within this acceptable range and reject blocks that do not. //! -//! ### Prerequisites +//! > **Note:** The timestamp set by this pallet is the recommended way to query the onchain time +//! > instead of using block numbers alone. Measuring time with block numbers can cause cumulative +//! > calculation errors if depended upon in time critical operations and hence should generally be +//! > avoided. //! -//! Import the Timestamp pallet into your custom pallet and derive the pallet configuration -//! trait from the timestamp trait. +//! ## Example //! -//! ### Get current timestamp +//! To get the current time for the current block in another pallet: //! //! ``` //! use pallet_timestamp::{self as timestamp}; @@ -83,7 +70,7 @@ //! #[pallet::weight(0)] //! pub fn get_time(origin: OriginFor) -> DispatchResult { //! let _sender = ensure_signed(origin)?; -//! let _now = >::get(); +//! let _now = timestamp::Pallet::::get(); //! Ok(()) //! } //! } @@ -91,15 +78,52 @@ //! # fn main() {} //! ``` //! -//! ### Example from the FRAME +//! If [`Pallet::get`] is called prior to setting the timestamp, it will return the timestamp of +//! the previous block. //! -//! The [Session pallet](https://github.com/paritytech/substrate/blob/master/frame/session/src/lib.rs) uses -//! the Timestamp pallet for session management. +//! ## Low Level / Implementation Details //! -//! ## Related Pallets +//! A timestamp is added to the chain using an _inherent extrinsic_ that only a block author can +//! submit. Inherents are a special type of extrinsic in Substrate chains that will always be +//! included in a block. //! -//! * [Session](../pallet_session/index.html) - +//! To provide inherent data to the runtime, this pallet implements +//! [`ProvideInherent`](frame_support::inherent::ProvideInherent). It will only create an inherent +//! if the [`Call::set`] dispatchable is called, using the +//! [`inherent`](frame_support::pallet_macros::inherent) macro which enables validator nodes to call +//! into the runtime to check that the timestamp provided is valid. +//! The implementation of [`ProvideInherent`](frame_support::inherent::ProvideInherent) specifies a +//! constant called `MAX_TIMESTAMP_DRIFT_MILLIS` which is used to determine the acceptable range for +//! a valid timestamp. If a block author sets a timestamp to anything that is more than this +//! constant, a validator node will reject the block. +//! +//! The pallet also ensures that a timestamp is set at the start of each block by running an +//! assertion in the `on_finalize` runtime hook. See [`frame_support::traits::Hooks`] for more +//! information about how hooks work. +//! +//! Because inherents are applied to a block in the order they appear in the runtime +//! construction, the index of this pallet in +//! [`construct_runtime`](frame_support::construct_runtime) must always be less than any other +//! pallet that depends on it. +//! +//! The [`Config::OnTimestampSet`] configuration trait can be set to another pallet we want to +//! notify that the timestamp has been updated, as long as it implements [`OnTimestampSet`]. +//! Examples are the Babe and Aura pallets. +//! This pallet also implements [`Time`] and [`UnixTime`] so it can be used to configure other +//! pallets that require these types (e.g. in Staking pallet). +//! +//! ## Panics +//! +//! There are 3 cases where this pallet could cause the runtime to panic. +//! +//! 1. If no timestamp is set at the end of a block. +//! +//! 2. If a timestamp is set more than once per block: +#![doc = docify::embed!("src/tests.rs", double_timestamp_should_fail)] +//! +//! 3. If a timestamp is set before the [`Config::MinimumPeriod`] is elapsed: +#![doc = docify::embed!("src/tests.rs", block_period_minimum_enforced)] +#![deny(missing_docs)] #![cfg_attr(not(feature = "std"), no_std)] mod benchmarking; @@ -123,10 +147,9 @@ pub mod pallet { use frame_support::pallet_prelude::*; use frame_system::pallet_prelude::*; - /// The pallet configuration trait #[pallet::config] pub trait Config: frame_system::Config { - /// Type used for expressing timestamp. + /// Type used for expressing a timestamp. type Moment: Parameter + Default + AtLeast32Bit @@ -135,14 +158,17 @@ pub mod pallet { + MaxEncodedLen + scale_info::StaticTypeInfo; - /// Something which can be notified when the timestamp is set. Set this to `()` if not - /// needed. + /// Something which can be notified (e.g. another pallet) when the timestamp is set. + /// + /// This can be set to `()` if it is not needed. type OnTimestampSet: OnTimestampSet; - /// The minimum period between blocks. Beware that this is different to the *expected* - /// period that the block production apparatus provides. Your chosen consensus system will - /// generally work with this to determine a sensible block time. e.g. For Aura, it will be - /// double this period on default settings. + /// The minimum period between blocks. + /// + /// Be aware that this is different to the *expected* period that the block production + /// apparatus provides. Your chosen consensus system will generally work with this to + /// determine a sensible block time. For example, in the Aura pallet it will be double this + /// period on default settings. #[pallet::constant] type MinimumPeriod: Get; @@ -153,23 +179,31 @@ pub mod pallet { #[pallet::pallet] pub struct Pallet(_); - /// Current time for the current block. + /// The current time for the current block. #[pallet::storage] #[pallet::getter(fn now)] pub type Now = StorageValue<_, T::Moment, ValueQuery>; - /// Did the timestamp get updated in this block? + /// Whether the timestamp has been updated in this block. + /// + /// This value is updated to `true` upon successful submission of a timestamp by a node. + /// It is then checked at the end of each block execution in the `on_finalize` hook. #[pallet::storage] pub(super) type DidUpdate = StorageValue<_, bool, ValueQuery>; #[pallet::hooks] impl Hooks> for Pallet { - /// dummy `on_initialize` to return the weight used in `on_finalize`. + /// A dummy `on_initialize` to return the amount of weight that `on_finalize` requires to + /// execute. fn on_initialize(_n: BlockNumberFor) -> Weight { // weight of `on_finalize` T::WeightInfo::on_finalize() } + /// At the end of block execution, the `on_finalize` hook checks that the timestamp was + /// updated. Upon success, it removes the boolean value from storage. If the value resolves + /// to `false`, the pallet will panic. + /// /// ## Complexity /// - `O(1)` fn on_finalize(_n: BlockNumberFor) { @@ -185,13 +219,17 @@ pub mod pallet { /// phase, if this call hasn't been invoked by that time. /// /// The timestamp should be greater than the previous one by the amount specified by - /// `MinimumPeriod`. + /// [`Config::MinimumPeriod`]. + /// + /// The dispatch origin for this call must be _None_. /// - /// The dispatch origin for this call must be `Inherent`. + /// This dispatch class is _Mandatory_ to ensure it gets executed in the block. Be aware + /// that changing the complexity of this call could result exhausting the resources in a + /// block to execute any other calls. /// /// ## Complexity /// - `O(1)` (Note that implementations of `OnTimestampSet` must also be `O(1)`) - /// - 1 storage read and 1 storage mutation (codec `O(1)`). (because of `DidUpdate::take` in + /// - 1 storage read and 1 storage mutation (codec `O(1)` because of `DidUpdate::take` in /// `on_finalize`) /// - 1 event handler `on_timestamp_set`. Must be `O(1)`. #[pallet::call_index(0)] @@ -216,6 +254,14 @@ pub mod pallet { } } + /// To check the inherent is valid, we simply take the max value between the current timestamp + /// and the current timestamp plus the [`Config::MinimumPeriod`]. + /// We also check that the timestamp has not already been set in this block. + /// + /// ## Errors: + /// - [`InherentError::TooFarInFuture`]: If the timestamp is larger than the current timestamp + + /// minimum drift period. + /// - [`InherentError::TooEarly`]: If the timestamp is less than the current + minimum period. #[pallet::inherent] impl ProvideInherent for Pallet { type Call = Call; @@ -285,9 +331,9 @@ impl Pallet { } impl Time for Pallet { + /// A type that represents a unit of time. type Moment = T::Moment; - /// Before the first set of now with inherent the value returned is zero. fn now() -> Self::Moment { Self::now() } diff --git a/substrate/frame/timestamp/src/tests.rs b/substrate/frame/timestamp/src/tests.rs index 317631eeb704..cc49d8a3296e 100644 --- a/substrate/frame/timestamp/src/tests.rs +++ b/substrate/frame/timestamp/src/tests.rs @@ -30,6 +30,7 @@ fn timestamp_works() { }); } +#[docify::export] #[test] #[should_panic(expected = "Timestamp must be updated only once in the block")] fn double_timestamp_should_fail() { @@ -39,6 +40,7 @@ fn double_timestamp_should_fail() { }); } +#[docify::export] #[test] #[should_panic( expected = "Timestamp must increment by at least between sequential blocks" diff --git a/substrate/primitives/timestamp/src/lib.rs b/substrate/primitives/timestamp/src/lib.rs index eeec73efbc8b..d1bd2a3446e6 100644 --- a/substrate/primitives/timestamp/src/lib.rs +++ b/substrate/primitives/timestamp/src/lib.rs @@ -140,7 +140,7 @@ pub enum InherentError { error("The time since the last timestamp is lower than the minimum period.") )] TooEarly, - /// The block timestamp is too far in the future + /// The block timestamp is too far in the future. #[cfg_attr(feature = "std", error("The timestamp of the block is too far in the future."))] TooFarInFuture, } From d569e728ca710c64392e524c9ccd4ec10c370269 Mon Sep 17 00:00:00 2001 From: Branislav Kontur Date: Mon, 18 Sep 2023 11:11:08 +0200 Subject: [PATCH 10/69] "Common good" vs "System" parachain clean up (#1406) ## Summary The term "common good parachain" has been abandoned in favor of "system parachain" - e.g. [Joe's speech at Decoded2023](https://youtu.be/CSO-ERHK2gY?t=456). This pull request tries to fix and align code with this vision. ## Impact The important change is implementation of `trait IsSystem` for `Id` [here](https://github.com/paritytech/polkadot-sdk/pull/1406/files#diff-0b7b4f5b962a18ce980354592b55ab2a27b5a2e9f6f8089ec803ca73853e8583R225-R229) where we changed condition from `< 1000` to `<= 1999`, which means that all parachain IDs bellow 1999 (included) are considered as "system parachain" IDs. This change has a direct impact on the following components: #### [ChildSystemParachainAsSuperuser](https://github.com/paritytech/polkadot-sdk/blob/master/polkadot/xcm/xcm-builder/src/origin_conversion.rs#L72-L88) This origin converter is used for allowing to process XCM `Transact` from "system parachain" on the relay chain - e.g. see [configuration for Kusama](https://github.com/paritytech/polkadot-sdk/blob/master/polkadot/runtime/kusama/src/xcm_config.rs#L92-L101). Only configured for Kusama, Westend, Rococo runtimes. **No need for this feature anymore.** See [comment](https://github.com/paritytech/polkadot-sdk/pull/1406#issuecomment-1708218715). #### [IsChildSystemParachain](https://github.com/paritytech/polkadot-sdk/blob/master/polkadot/xcm/xcm-builder/src/barriers.rs#L310-L317) `IsChildSystemParachain` is used with `AllowExplicitUnpaidExecutionFrom` barrier for checking XCM programs (they have to start with `UnpaidExecution` instruction). Only configured for Kusama, Westend, Rococo runtimes. **Overall the impact is low or mostly ok because it only allows unpaid execution for "system parachains" (e.g. AssetHub, BridgeHub...) on the relay chain.** #### [SiblingSystemParachainAsSuperuser](https://github.com/paritytech/polkadot-sdk/blob/master/polkadot/xcm/xcm-builder/src/origin_conversion.rs#L94-L114) Not used anywhere in `polkadot-sdk` repo. ## Unresolved Questions - [ ] constants `LOWEST_USER_ID` and `LOWEST_PUBLIC_ID` seem to express the same thing now, do we want to keep them both or deprecated one of them? If so, which one? - [x] determine impact for `ChildSystemParachainAsSuperuser` ## TODO - [ ] when merged here, open PR to the `polkadot-fellows` ## Related Material https://youtu.be/CSO-ERHK2gY?t=456 https://forum.polkadot.network/t/polkadot-protocol-and-common-good-parachains/866 https://wiki.polkadot.network/docs/learn-system-chains --- .../collectives/collectives-polkadot/src/lib.rs | 2 +- .../runtimes/testing/penpal/src/xcm_config.rs | 2 +- polkadot/parachain/src/primitives.rs | 10 ++++------ polkadot/primitives/src/v5/mod.rs | 2 +- polkadot/runtime/kusama/src/lib.rs | 2 +- polkadot/runtime/kusama/src/xcm_config.rs | 11 ++++------- polkadot/runtime/polkadot/src/lib.rs | 2 +- polkadot/runtime/rococo/src/xcm_config.rs | 11 ++++------- polkadot/runtime/westend/src/xcm_config.rs | 9 ++++----- 9 files changed, 21 insertions(+), 30 deletions(-) diff --git a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/lib.rs b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/lib.rs index d9125c2645ef..70e62a4dcdf9 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/lib.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/lib.rs @@ -20,7 +20,7 @@ //! //! ### Governance //! -//! As a common good parachain, Collectives defers its governance (namely, its `Root` origin), to +//! As a system parachain, Collectives defers its governance (namely, its `Root` origin), to //! its Relay Chain parent, Polkadot. //! //! ### Collator Selection diff --git a/cumulus/parachains/runtimes/testing/penpal/src/xcm_config.rs b/cumulus/parachains/runtimes/testing/penpal/src/xcm_config.rs index 97d2e63370ea..f2ffc451b10e 100644 --- a/cumulus/parachains/runtimes/testing/penpal/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/testing/penpal/src/xcm_config.rs @@ -163,7 +163,7 @@ pub type Barrier = TrailingSetTopicAsId< // If the message is one that immediately attemps to pay for execution, then // allow it. AllowTopLevelPaidExecutionFrom, - // Common Good Assets parachain, parent and its exec plurality get free + // System Assets parachain, parent and its exec plurality get free // execution AllowExplicitUnpaidExecutionFrom<( CommonGoodAssetsParachain, diff --git a/polkadot/parachain/src/primitives.rs b/polkadot/parachain/src/primitives.rs index 5cea9d3bbf4e..5f77810f5c23 100644 --- a/polkadot/parachain/src/primitives.rs +++ b/polkadot/parachain/src/primitives.rs @@ -199,13 +199,11 @@ impl From for Id { } } -const USER_INDEX_START: u32 = 1000; +// System parachain ID is considered `< 2000`. +const SYSTEM_INDEX_END: u32 = 1999; const PUBLIC_INDEX_START: u32 = 2000; -/// The ID of the first user (non-system) parachain. -pub const LOWEST_USER_ID: Id = Id(USER_INDEX_START); - -/// The ID of the first publicly registerable parachain. +/// The ID of the first publicly registrable parachain. pub const LOWEST_PUBLIC_ID: Id = Id(PUBLIC_INDEX_START); impl Id { @@ -223,7 +221,7 @@ pub trait IsSystem { impl IsSystem for Id { fn is_system(&self) -> bool { - self.0 < USER_INDEX_START + self.0 <= SYSTEM_INDEX_END } } diff --git a/polkadot/primitives/src/v5/mod.rs b/polkadot/primitives/src/v5/mod.rs index 30782f95611f..81743225403d 100644 --- a/polkadot/primitives/src/v5/mod.rs +++ b/polkadot/primitives/src/v5/mod.rs @@ -44,7 +44,7 @@ pub use polkadot_core_primitives::v2::{ // Export some polkadot-parachain primitives pub use polkadot_parachain_primitives::primitives::{ HeadData, HorizontalMessages, HrmpChannelId, Id, UpwardMessage, UpwardMessages, ValidationCode, - ValidationCodeHash, LOWEST_PUBLIC_ID, LOWEST_USER_ID, + ValidationCodeHash, LOWEST_PUBLIC_ID, }; use serde::{Deserialize, Serialize}; diff --git a/polkadot/runtime/kusama/src/lib.rs b/polkadot/runtime/kusama/src/lib.rs index fc9fd61790ea..0681db23bc25 100644 --- a/polkadot/runtime/kusama/src/lib.rs +++ b/polkadot/runtime/kusama/src/lib.rs @@ -643,7 +643,7 @@ impl pallet_staking::EraPayout for EraPayout { // all para-ids that are currently active. let auctioned_slots = Paras::parachains() .into_iter() - // all active para-ids that do not belong to a system or common good chain is the number + // all active para-ids that do not belong to a system chain is the number // of parachains that we should take into account for inflation. .filter(|i| *i >= LOWEST_PUBLIC_ID) .count() as u64; diff --git a/polkadot/runtime/kusama/src/xcm_config.rs b/polkadot/runtime/kusama/src/xcm_config.rs index 6a9cea22bbc2..59ea937e11c1 100644 --- a/polkadot/runtime/kusama/src/xcm_config.rs +++ b/polkadot/runtime/kusama/src/xcm_config.rs @@ -37,11 +37,10 @@ use xcm::latest::prelude::*; use xcm_builder::{ AccountId32Aliases, AllowExplicitUnpaidExecutionFrom, AllowKnownQueryResponses, AllowSubscriptionsFrom, AllowTopLevelPaidExecutionFrom, ChildParachainAsNative, - ChildParachainConvertsVia, ChildSystemParachainAsSuperuser, - CurrencyAdapter as XcmCurrencyAdapter, IsChildSystemParachain, IsConcrete, MintLocation, - OriginToPluralityVoice, SignedAccountId32AsNative, SignedToAccountId32, - SovereignSignedViaLocation, TakeWeightCredit, TrailingSetTopicAsId, UsingComponents, - WeightInfoBounds, WithComputedOrigin, WithUniqueTopic, + ChildParachainConvertsVia, CurrencyAdapter as XcmCurrencyAdapter, IsChildSystemParachain, + IsConcrete, MintLocation, OriginToPluralityVoice, SignedAccountId32AsNative, + SignedToAccountId32, SovereignSignedViaLocation, TakeWeightCredit, TrailingSetTopicAsId, + UsingComponents, WeightInfoBounds, WithComputedOrigin, WithUniqueTopic, }; parameter_types! { @@ -95,8 +94,6 @@ type LocalOriginConverter = ( ChildParachainAsNative, // The AccountId32 location type can be expressed natively as a `Signed` origin. SignedAccountId32AsNative, - // A system child parachain, expressed as a Superuser, converts to the `Root` origin. - ChildSystemParachainAsSuperuser, ); parameter_types! { diff --git a/polkadot/runtime/polkadot/src/lib.rs b/polkadot/runtime/polkadot/src/lib.rs index 6d06ee469333..441a5132eda8 100644 --- a/polkadot/runtime/polkadot/src/lib.rs +++ b/polkadot/runtime/polkadot/src/lib.rs @@ -552,7 +552,7 @@ impl pallet_staking::EraPayout for EraPayout { // all para-ids that are not active. let auctioned_slots = Paras::parachains() .into_iter() - // all active para-ids that do not belong to a system or common good chain is the number + // all active para-ids that do not belong to a system chain is the number // of parachains that we should take into account for inflation. .filter(|i| *i >= LOWEST_PUBLIC_ID) .count() as u64; diff --git a/polkadot/runtime/rococo/src/xcm_config.rs b/polkadot/runtime/rococo/src/xcm_config.rs index d561df14a027..288ee8400dcf 100644 --- a/polkadot/runtime/rococo/src/xcm_config.rs +++ b/polkadot/runtime/rococo/src/xcm_config.rs @@ -36,11 +36,10 @@ use xcm::latest::prelude::*; use xcm_builder::{ AccountId32Aliases, AllowExplicitUnpaidExecutionFrom, AllowKnownQueryResponses, AllowSubscriptionsFrom, AllowTopLevelPaidExecutionFrom, ChildParachainAsNative, - ChildParachainConvertsVia, ChildSystemParachainAsSuperuser, - CurrencyAdapter as XcmCurrencyAdapter, FixedWeightBounds, IsChildSystemParachain, IsConcrete, - MintLocation, SignedAccountId32AsNative, SignedToAccountId32, SovereignSignedViaLocation, - TakeWeightCredit, TrailingSetTopicAsId, UsingComponents, WeightInfoBounds, WithComputedOrigin, - WithUniqueTopic, + ChildParachainConvertsVia, CurrencyAdapter as XcmCurrencyAdapter, FixedWeightBounds, + IsChildSystemParachain, IsConcrete, MintLocation, SignedAccountId32AsNative, + SignedToAccountId32, SovereignSignedViaLocation, TakeWeightCredit, TrailingSetTopicAsId, + UsingComponents, WeightInfoBounds, WithComputedOrigin, WithUniqueTopic, }; use xcm_executor::XcmExecutor; @@ -80,8 +79,6 @@ type LocalOriginConverter = ( ChildParachainAsNative, // The AccountId32 location type can be expressed natively as a `Signed` origin. SignedAccountId32AsNative, - // A system child parachain, expressed as a Superuser, converts to the `Root` origin. - ChildSystemParachainAsSuperuser, ); parameter_types! { diff --git a/polkadot/runtime/westend/src/xcm_config.rs b/polkadot/runtime/westend/src/xcm_config.rs index 264830c693ef..afa0733b4c9a 100644 --- a/polkadot/runtime/westend/src/xcm_config.rs +++ b/polkadot/runtime/westend/src/xcm_config.rs @@ -35,10 +35,10 @@ use xcm::latest::prelude::*; use xcm_builder::{ AccountId32Aliases, AllowExplicitUnpaidExecutionFrom, AllowKnownQueryResponses, AllowSubscriptionsFrom, AllowTopLevelPaidExecutionFrom, ChildParachainAsNative, - ChildParachainConvertsVia, ChildSystemParachainAsSuperuser, - CurrencyAdapter as XcmCurrencyAdapter, IsChildSystemParachain, IsConcrete, MintLocation, - SignedAccountId32AsNative, SignedToAccountId32, SovereignSignedViaLocation, TakeWeightCredit, - TrailingSetTopicAsId, UsingComponents, WeightInfoBounds, WithComputedOrigin, WithUniqueTopic, + ChildParachainConvertsVia, CurrencyAdapter as XcmCurrencyAdapter, IsChildSystemParachain, + IsConcrete, MintLocation, SignedAccountId32AsNative, SignedToAccountId32, + SovereignSignedViaLocation, TakeWeightCredit, TrailingSetTopicAsId, UsingComponents, + WeightInfoBounds, WithComputedOrigin, WithUniqueTopic, }; use xcm_executor::XcmExecutor; @@ -74,7 +74,6 @@ type LocalOriginConverter = ( SovereignSignedViaLocation, ChildParachainAsNative, SignedAccountId32AsNative, - ChildSystemParachainAsSuperuser, ); /// The XCM router. When we want to send an XCM message, we use this type. It amalgamates all of our From 614aa31bf7a4b0e04595835a80562ce5eac96c4d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gon=C3=A7alo=20Pestana?= Date: Mon, 18 Sep 2023 11:18:19 +0200 Subject: [PATCH 11/69] Implements a variable deposit base calculation for EPM signed submissions (#1547) **Note**: This is a lift-and-shift PR from the old substrate and polkadot repos, both PRs have been reviewed and audited (https://github.com/paritytech/substrate/pull/13983, https://github.com/paritytech/polkadot/pull/7140) --- This PR implements a generic `BaseDeposit` calculation for signed submissions, based on the size of the submission queue. It adds a new associated type to EPM's config, `type SignedDepositBase`, that implements `Convert>`, which is used to calculate the base deposit for signed submissions based on the size of the signed submissions queue. `struct GeometricDepositBase` implements the convert trait so that the deposit value increases as a geometric progression. The deposit base is calculated by `deposit_base = fixed_deposit_base * (1 + increase_factor)^n`, where `n` is the term of the progression (i.e. the number of signed submissions in the queue). `Fixed` and `Inc` generic params are getters for `Balance` and `IncreaseFactor` to compute the geometric progression. If `IncreaseFactor = 0`, then the signed deposit is constant and equal to `Fixed` regardless of the size of the queue. ### Runtime configs In Kusama, the progression with 10% increase without changing the current signed fixed deposit is: (term == size of the queue) Term 1: `1,333,333,332,000` Term 2: `1,333,333,332,000 * 1.10 = 1,466,666,665,200` Term 3: `1,333,333,332,000 * 1.10^2 = 1,613,333,331,200` Term 4: `1,333,333,332,000 * 1.10^3 = 1,774,666,664,320` Term 5: `1,333,333,332,000 * 1.10^4 = 1,952,133,330,752` Term 6: `1,333,333,332,000 * 1.10^5 = 2,147,346,663,827.20` Term 7: `1,333,333,332,000 * 1.10^6 = 2,362,081,330,210.92` Term 8: `1,333,333,332,000 * 1.10^7 = 2,598,289,463,231.01` Term 9: `1,333,333,332,000 * 1.10^8 = 2,858,118,409,554.11` Term 10: `1,333,333,332,000 * 1.10^9 = 3,143,930,250,509.52` Westend: Term 1: `2,000,000,000,000` Term 2: `2,000,000,000,000 * 1.10 = 2,200,000,000,000` Term 3: `2,000,000,000,000 * 1.10^2 = 2,420,000,000,000` Term 4: `2,000,000,000,000 * 1.10^3 = 2,662,000,000,000` Term 5: `2,000,000,000,000 * 1.10^4 = 2,928,200,000,000` Term 6: `2,000,000,000,000 * 1.10^5 = 3,221,020,000,000` Term 7: `2,000,000,000,000 * 1.10^6 = 3,543,122,000,000` Term 8: `2,000,000,000,000 * 1.10^7 = 3,897,434,200,000` Term 9: `2,000,000,000,000 * 1.10^8 = 4,287,177,620,000` Term 10: `2,000,000,000,000 * 1.10^9 = 4,715,895,382,000` and in Polkadot, the deposit increase is disabled in the current state of the PR, as the increase factor is 0% -- so nothing changes from the current behaviour. Closes https://github.com/paritytech-secops/srlabs_findings/issues/189 --- polkadot/runtime/kusama/src/lib.rs | 10 ++- polkadot/runtime/polkadot/src/lib.rs | 9 +- polkadot/runtime/westend/src/lib.rs | 10 ++- substrate/bin/node/runtime/src/lib.rs | 8 +- .../election-provider-multi-phase/src/lib.rs | 13 +-- .../election-provider-multi-phase/src/mock.rs | 34 +++++-- .../src/signed.rs | 89 +++++++++++++++++-- .../test-staking-e2e/src/mock.rs | 11 ++- 8 files changed, 150 insertions(+), 34 deletions(-) diff --git a/polkadot/runtime/kusama/src/lib.rs b/polkadot/runtime/kusama/src/lib.rs index 0681db23bc25..5cf2b3b83b1f 100644 --- a/polkadot/runtime/kusama/src/lib.rs +++ b/polkadot/runtime/kusama/src/lib.rs @@ -93,7 +93,7 @@ use xcm::latest::Junction; pub use frame_system::Call as SystemCall; pub use pallet_balances::Call as BalancesCall; -pub use pallet_election_provider_multi_phase::Call as EPMCall; +pub use pallet_election_provider_multi_phase::{Call as EPMCall, GeometricDepositBase}; #[cfg(feature = "std")] pub use pallet_staking::StakerStatus; use pallet_staking::UseValidatorsMap; @@ -511,7 +511,8 @@ parameter_types! { // signed config pub const SignedMaxSubmissions: u32 = 16; pub const SignedMaxRefunds: u32 = 16 / 4; - pub const SignedDepositBase: Balance = deposit(2, 0); + pub const SignedFixedDeposit: Balance = deposit(2, 0); + pub const SignedDepositIncreaseFactor: Percent = Percent::from_percent(10); pub const SignedDepositByte: Balance = deposit(0, 10) / 1024; // Each good submission will get 1/10 KSM as reward pub SignedRewardBase: Balance = UNITS / 10; @@ -584,7 +585,8 @@ impl pallet_election_provider_multi_phase::Config for Runtime { type SignedMaxSubmissions = SignedMaxSubmissions; type SignedMaxRefunds = SignedMaxRefunds; type SignedRewardBase = SignedRewardBase; - type SignedDepositBase = SignedDepositBase; + type SignedDepositBase = + GeometricDepositBase; type SignedDepositByte = SignedDepositByte; type SignedDepositWeight = (); type SignedMaxWeight = @@ -2484,7 +2486,7 @@ mod fees_tests { fn signed_deposit_is_sensible() { // ensure this number does not change, or that it is checked after each change. // a 1 MB solution should need around 0.16 KSM deposit - let deposit = SignedDepositBase::get() + (SignedDepositByte::get() * 1024 * 1024); + let deposit = SignedFixedDeposit::get() + (SignedDepositByte::get() * 1024 * 1024); assert_eq_error_rate!(deposit, UNITS * 167 / 100, UNITS / 100); } } diff --git a/polkadot/runtime/polkadot/src/lib.rs b/polkadot/runtime/polkadot/src/lib.rs index 441a5132eda8..da27771af400 100644 --- a/polkadot/runtime/polkadot/src/lib.rs +++ b/polkadot/runtime/polkadot/src/lib.rs @@ -90,7 +90,7 @@ use xcm::latest::Junction; pub use frame_system::Call as SystemCall; pub use pallet_balances::Call as BalancesCall; -pub use pallet_election_provider_multi_phase::Call as EPMCall; +pub use pallet_election_provider_multi_phase::{Call as EPMCall, GeometricDepositBase}; #[cfg(feature = "std")] pub use pallet_staking::StakerStatus; use pallet_staking::UseValidatorsMap; @@ -382,6 +382,8 @@ parameter_types! { // signed config pub const SignedMaxSubmissions: u32 = 16; pub const SignedMaxRefunds: u32 = 16 / 4; + pub const SignedFixedDeposit: Balance = deposit(2, 0); + pub const SignedDepositIncreaseFactor: Percent = Percent::from_percent(10); // 40 DOTs fixed deposit.. pub const SignedDepositBase: Balance = deposit(2, 0); // 0.01 DOT per KB of solution data. @@ -456,7 +458,8 @@ impl pallet_election_provider_multi_phase::Config for Runtime { type SignedMaxSubmissions = SignedMaxSubmissions; type SignedMaxRefunds = SignedMaxRefunds; type SignedRewardBase = SignedRewardBase; - type SignedDepositBase = SignedDepositBase; + type SignedDepositBase = + GeometricDepositBase; type SignedDepositByte = SignedDepositByte; type SignedDepositWeight = (); type SignedMaxWeight = @@ -2352,7 +2355,7 @@ mod test_fees { fn signed_deposit_is_sensible() { // ensure this number does not change, or that it is checked after each change. // a 1 MB solution should take (40 + 10) DOTs of deposit. - let deposit = SignedDepositBase::get() + (SignedDepositByte::get() * 1024 * 1024); + let deposit = SignedFixedDeposit::get() + (SignedDepositByte::get() * 1024 * 1024); assert_eq_error_rate!(deposit, 50 * DOLLARS, DOLLARS); } } diff --git a/polkadot/runtime/westend/src/lib.rs b/polkadot/runtime/westend/src/lib.rs index 9a7e7c4548c8..5cb6cbbab20e 100644 --- a/polkadot/runtime/westend/src/lib.rs +++ b/polkadot/runtime/westend/src/lib.rs @@ -79,7 +79,7 @@ use sp_runtime::{ Keccak256, OpaqueKeys, SaturatedConversion, Verify, }, transaction_validity::{TransactionPriority, TransactionSource, TransactionValidity}, - ApplyExtrinsicResult, FixedU128, KeyTypeId, Perbill, + ApplyExtrinsicResult, FixedU128, KeyTypeId, Perbill, Percent, }; use sp_staking::SessionIndex; use sp_std::{collections::btree_map::BTreeMap, prelude::*}; @@ -90,7 +90,7 @@ use xcm::latest::Junction; pub use frame_system::Call as SystemCall; pub use pallet_balances::Call as BalancesCall; -pub use pallet_election_provider_multi_phase::Call as EPMCall; +pub use pallet_election_provider_multi_phase::{Call as EPMCall, GeometricDepositBase}; #[cfg(feature = "std")] pub use pallet_staking::StakerStatus; use pallet_staking::UseValidatorsMap; @@ -481,7 +481,8 @@ parameter_types! { // signed config pub const SignedMaxSubmissions: u32 = 128; pub const SignedMaxRefunds: u32 = 128 / 4; - pub const SignedDepositBase: Balance = deposit(2, 0); + pub const SignedFixedDeposit: Balance = deposit(2, 0); + pub const SignedDepositIncreaseFactor: Percent = Percent::from_percent(10); pub const SignedDepositByte: Balance = deposit(0, 10) / 1024; // Each good submission will get 1 WND as reward pub SignedRewardBase: Balance = 1 * UNITS; @@ -553,7 +554,8 @@ impl pallet_election_provider_multi_phase::Config for Runtime { type SignedMaxSubmissions = SignedMaxSubmissions; type SignedMaxRefunds = SignedMaxRefunds; type SignedRewardBase = SignedRewardBase; - type SignedDepositBase = SignedDepositBase; + type SignedDepositBase = + GeometricDepositBase; type SignedDepositByte = SignedDepositByte; type SignedDepositWeight = (); type SignedMaxWeight = diff --git a/substrate/bin/node/runtime/src/lib.rs b/substrate/bin/node/runtime/src/lib.rs index a97f2b09118c..df8fb06467d9 100644 --- a/substrate/bin/node/runtime/src/lib.rs +++ b/substrate/bin/node/runtime/src/lib.rs @@ -58,7 +58,7 @@ pub use node_primitives::{AccountId, Signature}; use node_primitives::{AccountIndex, Balance, BlockNumber, Hash, Moment, Nonce}; use pallet_asset_conversion::{NativeOrAssetId, NativeOrAssetIdConverter}; use pallet_broker::{CoreAssignment, CoreIndex, CoretimeInterface, PartsOf57600}; -use pallet_election_provider_multi_phase::SolutionAccuracyOf; +use pallet_election_provider_multi_phase::{GeometricDepositBase, SolutionAccuracyOf}; use pallet_im_online::sr25519::AuthorityId as ImOnlineId; use pallet_nfts::PalletFeatures; use pallet_nis::WithMaximumOf; @@ -694,7 +694,8 @@ parameter_types! { // signed config pub const SignedRewardBase: Balance = 1 * DOLLARS; - pub const SignedDepositBase: Balance = 1 * DOLLARS; + pub const SignedFixedDeposit: Balance = 1 * DOLLARS; + pub const SignedDepositIncreaseFactor: Percent = Percent::from_percent(10); pub const SignedDepositByte: Balance = 1 * CENTS; pub BetterUnsignedThreshold: Perbill = Perbill::from_rational(1u32, 10_000); @@ -822,7 +823,8 @@ impl pallet_election_provider_multi_phase::Config for Runtime { type MinerConfig = Self; type SignedMaxSubmissions = ConstU32<10>; type SignedRewardBase = SignedRewardBase; - type SignedDepositBase = SignedDepositBase; + type SignedDepositBase = + GeometricDepositBase; type SignedDepositByte = SignedDepositByte; type SignedMaxRefunds = ConstU32<3>; type SignedDepositWeight = (); diff --git a/substrate/frame/election-provider-multi-phase/src/lib.rs b/substrate/frame/election-provider-multi-phase/src/lib.rs index 0d751e3f9cb0..8b6e0827c715 100644 --- a/substrate/frame/election-provider-multi-phase/src/lib.rs +++ b/substrate/frame/election-provider-multi-phase/src/lib.rs @@ -279,8 +279,8 @@ use unsigned::VoterOf; pub use weights::WeightInfo; pub use signed::{ - BalanceOf, NegativeImbalanceOf, PositiveImbalanceOf, SignedSubmission, SignedSubmissionOf, - SignedSubmissions, SubmissionIndicesOf, + BalanceOf, GeometricDepositBase, NegativeImbalanceOf, PositiveImbalanceOf, SignedSubmission, + SignedSubmissionOf, SignedSubmissions, SubmissionIndicesOf, }; pub use unsigned::{Miner, MinerConfig}; @@ -572,6 +572,7 @@ pub mod pallet { use frame_election_provider_support::{InstantElectionProvider, NposSolver}; use frame_support::{pallet_prelude::*, traits::EstimateCallFee}; use frame_system::pallet_prelude::*; + use sp_runtime::traits::Convert; #[pallet::config] pub trait Config: frame_system::Config + SendTransactionTypes> { @@ -649,10 +650,6 @@ pub mod pallet { #[pallet::constant] type SignedRewardBase: Get>; - /// Base deposit for a signed solution. - #[pallet::constant] - type SignedDepositBase: Get>; - /// Per-byte deposit for a signed solution. #[pallet::constant] type SignedDepositByte: Get>; @@ -668,6 +665,10 @@ pub mod pallet { #[pallet::constant] type MaxWinners: Get; + /// Something that calculates the signed deposit base based on the signed submissions queue + /// size. + type SignedDepositBase: Convert>; + /// The maximum number of electing voters and electable targets to put in the snapshot. /// At the moment, snapshots are only over a single block, but once multi-block elections /// are introduced they will take place over multiple blocks. diff --git a/substrate/frame/election-provider-multi-phase/src/mock.rs b/substrate/frame/election-provider-multi-phase/src/mock.rs index 05d151e51ecc..d4659eba5661 100644 --- a/substrate/frame/election-provider-multi-phase/src/mock.rs +++ b/substrate/frame/election-provider-multi-phase/src/mock.rs @@ -16,7 +16,7 @@ // limitations under the License. use super::*; -use crate::{self as multi_phase, unsigned::MinerConfig}; +use crate::{self as multi_phase, signed::GeometricDepositBase, unsigned::MinerConfig}; use frame_election_provider_support::{ bounds::{DataProviderBounds, ElectionBounds}, data_provider, onchain, ElectionDataProvider, NposSolution, SequentialPhragmen, @@ -44,8 +44,8 @@ use sp_npos_elections::{ use sp_runtime::{ bounded_vec, testing::Header, - traits::{BlakeTwo256, IdentityLookup}, - BuildStorage, PerU16, + traits::{BlakeTwo256, Convert, IdentityLookup}, + BuildStorage, PerU16, Percent, }; use std::sync::Arc; @@ -283,7 +283,11 @@ parameter_types! { pub static UnsignedPhase: BlockNumber = 5; pub static SignedMaxSubmissions: u32 = 5; pub static SignedMaxRefunds: u32 = 1; - pub static SignedDepositBase: Balance = 5; + // for tests only. if `EnableVariableDepositBase` is true, the deposit base will be calculated + // by `Multiphase::DepositBase`. Otherwise the deposit base is `SignedFixedDeposit`. + pub static EnableVariableDepositBase: bool = false; + pub static SignedFixedDeposit: Balance = 5; + pub static SignedDepositIncreaseFactor: Percent = Percent::from_percent(10); pub static SignedDepositByte: Balance = 0; pub static SignedDepositWeight: Balance = 0; pub static SignedRewardBase: Balance = 7; @@ -393,7 +397,7 @@ impl crate::Config for Runtime { type OffchainRepeat = OffchainRepeat; type MinerTxPriority = MinerTxPriority; type SignedRewardBase = SignedRewardBase; - type SignedDepositBase = SignedDepositBase; + type SignedDepositBase = Self; type SignedDepositByte = (); type SignedDepositWeight = (); type SignedMaxWeight = SignedMaxWeight; @@ -414,6 +418,18 @@ impl crate::Config for Runtime { type ElectionBounds = ElectionsBounds; } +impl Convert> for Runtime { + /// returns the geometric increase deposit fee if `EnableVariableDepositBase` is set, otherwise + /// the fee is `SignedFixedDeposit`. + fn convert(queue_len: usize) -> Balance { + if !EnableVariableDepositBase::get() { + SignedFixedDeposit::get() + } else { + GeometricDepositBase::::convert(queue_len) + } + } +} + impl frame_system::offchain::SendTransactionTypes for Runtime where RuntimeCall: From, @@ -553,8 +569,14 @@ impl ExtBuilder { ::set(count); self } + pub fn signed_base_deposit(self, base: u64, variable: bool, increase: Percent) -> Self { + ::set(variable); + ::set(base); + ::set(increase); + self + } pub fn signed_deposit(self, base: u64, byte: u64, weight: u64) -> Self { - ::set(base); + ::set(base); ::set(byte); ::set(weight); self diff --git a/substrate/frame/election-provider-multi-phase/src/signed.rs b/substrate/frame/election-provider-multi-phase/src/signed.rs index 76068ba99d36..a5fe8ce55582 100644 --- a/substrate/frame/election-provider-multi-phase/src/signed.rs +++ b/substrate/frame/election-provider-multi-phase/src/signed.rs @@ -17,6 +17,8 @@ //! The signed phase implementation. +use core::marker::PhantomData; + use crate::{ unsigned::MinerConfig, Config, ElectionCompute, Pallet, QueuedSolution, RawSolution, ReadySolution, SignedSubmissionIndices, SignedSubmissionNextIndex, SignedSubmissionsMap, @@ -32,8 +34,8 @@ use sp_arithmetic::traits::SaturatedConversion; use sp_core::bounded::BoundedVec; use sp_npos_elections::ElectionScore; use sp_runtime::{ - traits::{Saturating, Zero}, - RuntimeDebug, + traits::{Convert, Saturating, Zero}, + FixedPointNumber, FixedPointOperand, FixedU128, Percent, RuntimeDebug, }; use sp_std::{ cmp::Ordering, @@ -348,6 +350,32 @@ impl SignedSubmissions { } } +/// Type that can be used to calculate the deposit base for signed submissions. +/// +/// The deposit base is calculated as a geometric progression based on the number of signed +/// submissions in the queue. The size of the queue represents the progression term. +pub struct GeometricDepositBase { + _marker: (PhantomData, PhantomData, PhantomData), +} + +impl Convert for GeometricDepositBase +where + Balance: FixedPointOperand, + Fixed: Get, + Inc: Get, +{ + // Calculates the base deposit as a geometric progression based on the number of signed + // submissions. + // + // The nth term is obtained by calculating `base * (1 + increase_factor)^nth`. Example: factor + // 5, with initial deposit of 1000 and 10% of increase factor is 1000 * (1 + 0.1)^5. + fn convert(queue_len: usize) -> Balance { + let increase_factor: FixedU128 = FixedU128::from_u32(1) + Inc::get().into(); + + increase_factor.saturating_pow(queue_len).saturating_mul_int(Fixed::get()) + } +} + impl Pallet { /// `Self` accessor for `SignedSubmission`. pub fn signed_submissions() -> SignedSubmissions { @@ -520,14 +548,14 @@ impl Pallet { size: SolutionOrSnapshotSize, ) -> BalanceOf { let encoded_len: u32 = raw_solution.encoded_size().saturated_into(); - let encoded_len: BalanceOf = encoded_len.into(); + let encoded_len_balance: BalanceOf = encoded_len.into(); let feasibility_weight = Self::solution_weight_of(raw_solution, size); - let len_deposit = T::SignedDepositByte::get().saturating_mul(encoded_len); + let len_deposit = T::SignedDepositByte::get().saturating_mul(encoded_len_balance); let weight_deposit = T::SignedDepositWeight::get() .saturating_mul(feasibility_weight.ref_time().saturated_into()); - T::SignedDepositBase::get() + T::SignedDepositBase::convert(Self::signed_submissions().len()) .saturating_add(len_deposit) .saturating_add(weight_deposit) } @@ -541,6 +569,7 @@ mod tests { Phase, }; use frame_support::{assert_noop, assert_ok, assert_storage_noop}; + use sp_runtime::Percent; #[test] fn cannot_submit_too_early() { @@ -779,6 +808,56 @@ mod tests { }) } + #[test] + fn geometric_deposit_queue_size_works() { + let constant = vec![1000; 10]; + // geometric progression with 10% increase in each iteration for 10 terms. + let progression_10 = vec![1000, 1100, 1210, 1331, 1464, 1610, 1771, 1948, 2143, 2357]; + let progression_40 = vec![1000, 1400, 1960, 2744, 3841, 5378, 7529, 10541, 14757, 20661]; + + let check_progressive_base_fee = |expected: &Vec| { + for s in 0..SignedMaxSubmissions::get() { + let account = 99 + s as u64; + Balances::make_free_balance_be(&account, 10000000); + let mut solution = raw_solution(); + solution.score.minimal_stake -= s as u128; + + assert_ok!(MultiPhase::submit(RuntimeOrigin::signed(account), Box::new(solution))); + assert_eq!(balances(&account).1, expected[s as usize]) + } + }; + + ExtBuilder::default() + .signed_max_submission(10) + .signed_base_deposit(1000, true, Percent::from_percent(0)) + .build_and_execute(|| { + roll_to_signed(); + assert!(MultiPhase::current_phase().is_signed()); + + check_progressive_base_fee(&constant); + }); + + ExtBuilder::default() + .signed_max_submission(10) + .signed_base_deposit(1000, true, Percent::from_percent(10)) + .build_and_execute(|| { + roll_to_signed(); + assert!(MultiPhase::current_phase().is_signed()); + + check_progressive_base_fee(&progression_10); + }); + + ExtBuilder::default() + .signed_max_submission(10) + .signed_base_deposit(1000, true, Percent::from_percent(40)) + .build_and_execute(|| { + roll_to_signed(); + assert!(MultiPhase::current_phase().is_signed()); + + check_progressive_base_fee(&progression_40); + }); + } + #[test] fn call_fee_refund_is_limited_by_signed_max_refunds() { ExtBuilder::default().build_and_execute(|| { diff --git a/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/mock.rs b/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/mock.rs index 501f9f89ab7a..ec646c311978 100644 --- a/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/mock.rs +++ b/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/mock.rs @@ -32,7 +32,7 @@ use sp_runtime::{ }, testing, traits::Zero, - transaction_validity, BuildStorage, PerU16, Perbill, + transaction_validity, BuildStorage, PerU16, Perbill, Percent, }; use sp_staking::{ offence::{DisableStrategy, OffenceDetails, OnOffenceHandler}, @@ -47,7 +47,8 @@ use frame_election_provider_support::{ SequentialPhragmen, Weight, }; use pallet_election_provider_multi_phase::{ - unsigned::MinerConfig, Call, ElectionCompute, QueuedSolution, SolutionAccuracyOf, + unsigned::MinerConfig, Call, ElectionCompute, GeometricDepositBase, QueuedSolution, + SolutionAccuracyOf, }; use pallet_staking::StakerStatus; use parking_lot::RwLock; @@ -182,6 +183,9 @@ parameter_types! { pub static TransactionPriority: transaction_validity::TransactionPriority = 1; #[derive(Debug)] pub static MaxWinners: u32 = 100; + pub static MaxVotesPerVoter: u32 = 16; + pub static SignedFixedDeposit: Balance = 1; + pub static SignedDepositIncreaseFactor: Percent = Percent::from_percent(10); pub static ElectionBounds: frame_election_provider_support::bounds::ElectionBounds = ElectionBoundsBuilder::default() .voters_count(1_000.into()).targets_count(1_000.into()).build(); } @@ -199,7 +203,8 @@ impl pallet_election_provider_multi_phase::Config for Runtime { type MinerConfig = Self; type SignedMaxSubmissions = ConstU32<10>; type SignedRewardBase = (); - type SignedDepositBase = (); + type SignedDepositBase = + GeometricDepositBase; type SignedDepositByte = (); type SignedMaxRefunds = ConstU32<3>; type SignedDepositWeight = (); From e05d3690bc6c2c1f5abc05a378753eda9e0fdaec Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 18 Sep 2023 11:51:48 +0200 Subject: [PATCH 12/69] Bump docker/setup-buildx-action from 2.1.0 to 3.0.0 (#1551) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [docker/setup-buildx-action](https://github.com/docker/setup-buildx-action) from 2.1.0 to 3.0.0.
Release notes

Sourced from docker/setup-buildx-action's releases.

v3.0.0

Full Changelog: https://github.com/docker/setup-buildx-action/compare/v2.10.0...v3.0.0

v2.10.0

What's Changed

Full Changelog: https://github.com/docker/setup-buildx-action/compare/v2.9.1...v2.10.0

v2.9.1

Full Changelog: https://github.com/docker/setup-buildx-action/compare/v2.9.0...v2.9.1

v2.9.0

  • Bump @​docker/actions-toolkit from 0.6.0 to 0.7.0 in docker/setup-buildx-action#246
    • Adds support to cache Buildx binary to hosted tool cache and GHA cache backend

Full Changelog: https://github.com/docker/setup-buildx-action/compare/v2.8.0...v2.9.0

v2.8.0

Full Changelog: https://github.com/docker/setup-buildx-action/compare/v2.7.0...v2.8.0

v2.7.0

Full Changelog: https://github.com/docker/setup-buildx-action/compare/v2.6.0...v2.7.0

v2.6.0

Full Changelog: https://github.com/docker/setup-buildx-action/compare/v2.5.0...v2.6.0

v2.5.0

Full Changelog: https://github.com/docker/setup-buildx-action/compare/v2.4.1...v2.5.0

v2.4.1

... (truncated)

Commits
  • f95db51 Merge pull request #267 from docker/dependabot/npm_and_yarn/actions/core-1.10.1
  • 998a87c chore: update generated content
  • 28bae59 build(deps): bump @​actions/core from 1.10.0 to 1.10.1
  • c215341 Merge pull request #264 from crazy-max/update-node20
  • 02e9319 chore: node 20 as default runtime
  • 5c9160e chore: update generated content
  • 1283140 chore: fix author in package.json
  • c6afe06 vendor: bump @​docker/actions-toolkit from 0.10.0 to 0.12.0
  • f35e0d5 chore: update dev dependencies
  • baeb468 dev: remove unneeded binaries
  • Additional commits viewable in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=docker/setup-buildx-action&package-manager=github_actions&previous-version=2.1.0&new-version=3.0.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/release-50_publish-docker.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/release-50_publish-docker.yml b/.github/workflows/release-50_publish-docker.yml index 04b3ebd3e79c..5cfe53e641df 100644 --- a/.github/workflows/release-50_publish-docker.yml +++ b/.github/workflows/release-50_publish-docker.yml @@ -242,7 +242,7 @@ jobs: uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0 - name: Set up Docker Buildx - uses: docker/setup-buildx-action@95cb08cb2672c73d4ffd2f422e6d11953d2a9c70 # v2.1.0 + uses: docker/setup-buildx-action@f95db51fddba0c2d1ec667646a06c2ce06100226 # v3.0.0 - name: Cache Docker layers uses: actions/cache@704facf57e6136b1bc63b828d79edcd491f0ee84 # v3.3.2 From 519a0f0688d8740c7944bdb7986a540cbf7e3917 Mon Sep 17 00:00:00 2001 From: Chevdor Date: Mon, 18 Sep 2023 11:56:35 +0200 Subject: [PATCH 13/69] Replace secrets with the new ones (#1564) In the monorepo, secrets used in the various previous repos have been renamed into: - `CUMULUS_DOCKERHUB_USERNAME` - `CUMULUS_DOCKERHUB_TOKEN` - `POLKADOT_DOCKERHUB_USERNAME` - `POLKADOT_DOCKERHUB_TOKEN` This PR makes those changes and remove one of the GHW that has now been updated for the monorepo. --- .../workflows/release-50_publish-docker.yml | 8 ++-- .../release-50_publish-docker-release.yml | 44 ------------------- 2 files changed, 4 insertions(+), 48 deletions(-) delete mode 100644 polkadot/.github/workflows/release-50_publish-docker-release.yml diff --git a/.github/workflows/release-50_publish-docker.yml b/.github/workflows/release-50_publish-docker.yml index 5cfe53e641df..535f54abc684 100644 --- a/.github/workflows/release-50_publish-docker.yml +++ b/.github/workflows/release-50_publish-docker.yml @@ -198,8 +198,8 @@ jobs: - name: Login to Dockerhub uses: docker/login-action@v2 with: - username: ${{ secrets.DOCKERHUB_USERNAME }} - password: ${{ secrets.DOCKERHUB_TOKEN }} + username: ${{ secrets.CUMULUS_DOCKERHUB_USERNAME }} + password: ${{ secrets.CUMULUS_DOCKERHUB_TOKEN }} - name: Push Container image for ${{ env.BINARY }} id: docker_push @@ -255,8 +255,8 @@ jobs: - name: Login to Docker Hub uses: docker/login-action@343f7c4344506bcbf9b4de18042ae17996df046d # v3.0.0 with: - username: ${{ secrets.DOCKERHUB_USERNAME }} - password: ${{ secrets.DOCKERHUB_TOKEN }} + username: ${{ secrets.POLKADOT_DOCKERHUB_USERNAME }} + password: ${{ secrets.POLKADOT_DOCKERHUB_TOKEN }} - name: Fetch values id: fetch-data diff --git a/polkadot/.github/workflows/release-50_publish-docker-release.yml b/polkadot/.github/workflows/release-50_publish-docker-release.yml deleted file mode 100644 index 81e5caa718f3..000000000000 --- a/polkadot/.github/workflows/release-50_publish-docker-release.yml +++ /dev/null @@ -1,44 +0,0 @@ -name: Release - Publish Docker image for new releases - -on: - release: - types: - - published - -jobs: - main: - runs-on: ubuntu-latest - steps: - - name: Checkout sources - uses: actions/checkout@v3 - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@95cb08cb2672c73d4ffd2f422e6d11953d2a9c70 # v2.1.0 - - name: Cache Docker layers - uses: actions/cache@v3 - with: - path: /tmp/.buildx-cache - key: ${{ runner.os }}-buildx-${{ github.sha }} - restore-keys: | - ${{ runner.os }}-buildx- - - name: Login to Dockerhub - uses: docker/login-action@v2 - with: - username: ${{ secrets.DOCKERHUB_USERNAME }} - password: ${{ secrets.DOCKERHUB_TOKEN }} - - name: Build and push - id: docker_build - uses: docker/build-push-action@v4 - with: - push: true - file: scripts/ci/dockerfiles/polkadot/polkadot_injected_debian.Dockerfile - tags: | - parity/polkadot:latest - parity/polkadot:${{ github.event.release.tag_name }} - build-args: | - POLKADOT_VERSION=${{ github.event.release.tag_name }} - VCS_REF=${{ github.ref }} - BUILD_DATE=${{ github.event.release.published_at }} - cache-from: type=local,src=/tmp/.buildx-cache - cache-to: type=local,dest=/tmp/.buildx-cache - - name: Image digest - run: echo ${{ steps.docker_build.outputs.digest }} From f14bf347e8c89744ff6b4b8e76548d1137dc9c34 Mon Sep 17 00:00:00 2001 From: Sergej Sakac <73715684+Szegoo@users.noreply.github.com> Date: Mon, 18 Sep 2023 12:02:15 +0200 Subject: [PATCH 14/69] Broker pallet: `RegionDropped` event fix & additional tests (#1609) This PR includes the following fix: - [x] The `duration` is always set to zero in the `RegionDropped` event. This is fixed in this PR. Also added some additional tests to cover some cases that aren't covered : - [x] Selling a partitioned region to the instantaneous coretime pool. - [x] Partitioning a region after assigning it to a particular task. - [x] Interlacing a region after assigning it to a particular task. --- .../frame/broker/src/dispatchable_impls.rs | 4 +- substrate/frame/broker/src/tests.rs | 119 +++++++++++++++++- substrate/frame/broker/src/utility_impls.rs | 2 +- 3 files changed, 119 insertions(+), 6 deletions(-) diff --git a/substrate/frame/broker/src/dispatchable_impls.rs b/substrate/frame/broker/src/dispatchable_impls.rs index 54cf5d71dcad..0b08a7b665b7 100644 --- a/substrate/frame/broker/src/dispatchable_impls.rs +++ b/substrate/frame/broker/src/dispatchable_impls.rs @@ -76,7 +76,7 @@ impl Pallet { last_timeslice: Self::current_timeslice(), }; let now = frame_system::Pallet::::block_number(); - let dummy_sale = SaleInfoRecord { + let new_sale = SaleInfoRecord { sale_start: now, leadin_length: Zero::zero(), price, @@ -89,7 +89,7 @@ impl Pallet { cores_sold: 0, }; Self::deposit_event(Event::::SalesStarted { price, core_count }); - Self::rotate_sale(dummy_sale, &config, &status); + Self::rotate_sale(new_sale, &config, &status); Status::::put(&status); Ok(()) } diff --git a/substrate/frame/broker/src/tests.rs b/substrate/frame/broker/src/tests.rs index 3c326010dddf..e1b489bbe6e6 100644 --- a/substrate/frame/broker/src/tests.rs +++ b/substrate/frame/broker/src/tests.rs @@ -77,7 +77,9 @@ fn drop_renewal_works() { let e = Error::::StillValid; assert_noop!(Broker::do_drop_renewal(region.core, region.begin + 3), e); advance_to(12); + assert_eq!(AllowedRenewals::::iter().count(), 1); assert_ok!(Broker::do_drop_renewal(region.core, region.begin + 3)); + assert_eq!(AllowedRenewals::::iter().count(), 0); let e = Error::::UnknownRenewal; assert_noop!(Broker::do_drop_renewal(region.core, region.begin + 3), e); }); @@ -90,7 +92,10 @@ fn drop_contribution_works() { advance_to(2); let region = Broker::do_purchase(1, u64::max_value()).unwrap(); // Place region in pool. Active in pool timeslices 4, 5, 6 = rcblocks 8, 10, 12; we - // expect the contribution record to timeout 3 timeslices following 7 = 10 + // expect the contribution record to timeout 3 timeslices following 7 = 14 + // + // Due to the contribution_timeout being configured for 3 timeslices, the contribution + // can only be discarded at timeslice 10, i.e. rcblock 20. assert_ok!(Broker::do_pool(region, Some(1), 1, Final)); assert_eq!(InstaPoolContribution::::iter().count(), 1); advance_to(19); @@ -378,6 +383,41 @@ fn instapool_partial_core_payouts_work() { }); } +#[test] +fn instapool_core_payouts_work_with_partitioned_region() { + TestExt::new().endow(1, 1000).execute_with(|| { + assert_ok!(Broker::do_start_sales(100, 1)); + advance_to(2); + let region = Broker::do_purchase(1, u64::max_value()).unwrap(); + let (region1, region2) = Broker::do_partition(region, None, 2).unwrap(); + // `region1` duration is from rcblock 8 to rcblock 12. This means that the + // coretime purchased during this time period will be purchased from `region1` + // + // `region2` duration is from rcblock 12 to rcblock 14 and during this period + // coretime will be purchased from `region2`. + assert_ok!(Broker::do_pool(region1, None, 2, Final)); + assert_ok!(Broker::do_pool(region2, None, 3, Final)); + assert_ok!(Broker::do_purchase_credit(1, 20, 1)); + advance_to(8); + assert_ok!(TestCoretimeProvider::spend_instantaneous(1, 10)); + advance_to(11); + assert_eq!(pot(), 20); + assert_eq!(revenue(), 100); + assert_ok!(Broker::do_claim_revenue(region1, 100)); + assert_eq!(pot(), 10); + assert_eq!(balance(2), 10); + advance_to(12); + assert_ok!(TestCoretimeProvider::spend_instantaneous(1, 10)); + advance_to(15); + assert_eq!(pot(), 10); + assert_ok!(Broker::do_claim_revenue(region2, 100)); + assert_eq!(pot(), 0); + // The balance of account `2` remains unchanged. + assert_eq!(balance(2), 10); + assert_eq!(balance(3), 10); + }); +} + #[test] fn initialize_with_system_paras_works() { TestExt::new().execute_with(|| { @@ -654,6 +694,79 @@ fn partition_then_interlace_works() { }); } +#[test] +fn partitioning_after_assignment_works() { + TestExt::new().endow(1, 1000).execute_with(|| { + assert_ok!(Broker::do_start_sales(100, 1)); + advance_to(2); + // We will initially allocate a task to a purchased region, and after that + // we will proceed to partition the region. + let region = Broker::do_purchase(1, u64::max_value()).unwrap(); + assert_ok!(Broker::do_assign(region, None, 1001, Provisional)); + let (_region, region1) = Broker::do_partition(region, None, 2).unwrap(); + // After the partitioning if we assign a new task to `region` the other region + // will still be assigned to `Task(1001)`. + assert_ok!(Broker::do_assign(region1, None, 1002, Provisional)); + advance_to(10); + assert_eq!( + CoretimeTrace::get(), + vec![ + ( + 6, + AssignCore { + core: 0, + begin: 8, + assignment: vec![(Task(1001), 57600),], + end_hint: None + } + ), + ( + 10, + AssignCore { + core: 0, + begin: 12, + assignment: vec![(Task(1002), 57600),], + end_hint: None + } + ), + ] + ); + }); +} + +#[test] +fn interlacing_after_assignment_works() { + TestExt::new().endow(1, 1000).execute_with(|| { + assert_ok!(Broker::do_start_sales(100, 1)); + advance_to(2); + // We will initially allocate a task to a purchased region, and after that + // we will proceed to interlace the region. + let region = Broker::do_purchase(1, u64::max_value()).unwrap(); + assert_ok!(Broker::do_assign(region, None, 1001, Provisional)); + let (region1, _region) = + Broker::do_interlace(region, None, CoreMask::from_chunk(0, 40)).unwrap(); + // Interlacing the region won't affect the assignment. The entire region will still + // be assigned to `Task(1001)`. + // + // However, after we assign a task to `region1` the `_region` won't be assigned + // to `Task(1001)` anymore. It will become idle. + assert_ok!(Broker::do_assign(region1, None, 1002, Provisional)); + advance_to(10); + assert_eq!( + CoretimeTrace::get(), + vec![( + 6, + AssignCore { + core: 0, + begin: 8, + assignment: vec![(Idle, 28800), (Task(1002), 28800)], + end_hint: None + } + ),] + ); + }); +} + #[test] fn reservations_are_limited() { TestExt::new().execute_with(|| { @@ -866,7 +979,7 @@ fn assign_should_drop_invalid_region() { advance_to(10); assert_ok!(Broker::do_assign(region, Some(1), 1001, Provisional)); region.begin = 7; - System::assert_last_event(Event::RegionDropped { region_id: region, duration: 0 }.into()); + System::assert_last_event(Event::RegionDropped { region_id: region, duration: 3 }.into()); }); } @@ -879,7 +992,7 @@ fn pool_should_drop_invalid_region() { advance_to(10); assert_ok!(Broker::do_pool(region, Some(1), 1001, Provisional)); region.begin = 7; - System::assert_last_event(Event::RegionDropped { region_id: region, duration: 0 }.into()); + System::assert_last_event(Event::RegionDropped { region_id: region, duration: 3 }.into()); }); } diff --git a/substrate/frame/broker/src/utility_impls.rs b/substrate/frame/broker/src/utility_impls.rs index 99c4de32f776..2450198050b6 100644 --- a/substrate/frame/broker/src/utility_impls.rs +++ b/substrate/frame/broker/src/utility_impls.rs @@ -101,9 +101,9 @@ impl Pallet { let last_committed_timeslice = status.last_committed_timeslice; if region_id.begin <= last_committed_timeslice { + let duration = region.end.saturating_sub(region_id.begin); region_id.begin = last_committed_timeslice + 1; if region_id.begin >= region.end { - let duration = region.end.saturating_sub(region_id.begin); Self::deposit_event(Event::RegionDropped { region_id, duration }); return Ok(None) } From a50e6ba7af50a4be9ae78ebb90e86a61f3dd85e1 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 18 Sep 2023 12:18:49 +0200 Subject: [PATCH 15/69] Bump docker/login-action from 2 to 3 (#1531) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [docker/login-action](https://github.com/docker/login-action) from 2 to 3.
Release notes

Sourced from docker/login-action's releases.

v3.0.0

Full Changelog: https://github.com/docker/login-action/compare/v2.2.0...v3.0.0

v2.2.0

Full Changelog: https://github.com/docker/login-action/compare/v2.1.0...v2.2.0

v2.1.0

  • Ensure AWS temp credentials are redacted in workflow logs by @​crazy-max (#275)
  • Bump @​actions/core from 1.6.0 to 1.10.0 (#252 #292)
  • Bump @​aws-sdk/client-ecr from 3.53.0 to 3.186.0 (#298)
  • Bump @​aws-sdk/client-ecr-public from 3.53.0 to 3.186.0 (#299)

Full Changelog: https://github.com/docker/login-action/compare/v2.0.0...v2.1.0

Commits
  • 343f7c4 Merge pull request #599 from docker/dependabot/npm_and_yarn/aws-sdk-dependenc...
  • aad0f97 chore: update generated content
  • 2e0cd39 build(deps): bump the aws-sdk-dependencies group with 2 updates
  • 203bc9c Merge pull request #588 from docker/dependabot/npm_and_yarn/proxy-agent-depen...
  • 2199648 chore: update generated content
  • b489376 build(deps): bump the proxy-agent-dependencies group with 1 update
  • 7c309e7 Merge pull request #598 from docker/dependabot/npm_and_yarn/actions/core-1.10.1
  • 0ccf222 chore: update generated content
  • 56d703e Merge pull request #597 from docker/dependabot/github_actions/aws-actions/con...
  • 24d3b35 build(deps): bump @​actions/core from 1.10.0 to 1.10.1
  • Additional commits viewable in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=docker/login-action&package-manager=github_actions&previous-version=2&new-version=3)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
--------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Sergejs Kostjucenko <85877331+sergejparity@users.noreply.github.com> --- .github/workflows/release-50_publish-docker.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/release-50_publish-docker.yml b/.github/workflows/release-50_publish-docker.yml index 535f54abc684..e787d83616f9 100644 --- a/.github/workflows/release-50_publish-docker.yml +++ b/.github/workflows/release-50_publish-docker.yml @@ -196,7 +196,7 @@ jobs: ./docker/scripts/build-injected.sh - name: Login to Dockerhub - uses: docker/login-action@v2 + uses: docker/login-action@343f7c4344506bcbf9b4de18042ae17996df046d # v3.0.0 with: username: ${{ secrets.CUMULUS_DOCKERHUB_USERNAME }} password: ${{ secrets.CUMULUS_DOCKERHUB_TOKEN }} From 20052e1675f96fb25d5b19c26a4e1d5e7425e724 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 18 Sep 2023 12:37:38 +0200 Subject: [PATCH 16/69] Bump docker/build-push-action from 4 to 5 (#1552) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [docker/build-push-action](https://github.com/docker/build-push-action) from 4 to 5.
Release notes

Sourced from docker/build-push-action's releases.

v5.0.0

Full Changelog: https://github.com/docker/build-push-action/compare/v4.2.1...v5.0.0

v4.2.1

Note

Buildx v0.10 enables support for a minimal SLSA Provenance attestation, which requires support for OCI-compliant multi-platform images. This may introduce issues with registry and runtime support (e.g. Google Cloud Run and AWS Lambda). You can optionally disable the default provenance attestation functionality using provenance: false.

Full Changelog: https://github.com/docker/build-push-action/compare/v4.2.0...v4.2.1

v4.2.0

Note

Buildx v0.10 enables support for a minimal SLSA Provenance attestation, which requires support for OCI-compliant multi-platform images. This may introduce issues with registry and runtime support (e.g. Google Cloud Run and AWS Lambda). You can optionally disable the default provenance attestation functionality using provenance: false.

Full Changelog: https://github.com/docker/build-push-action/compare/v4.1.1...v4.2.0

v4.1.1

Note

Buildx v0.10 enables support for a minimal SLSA Provenance attestation, which requires support for OCI-compliant multi-platform images. This may introduce issues with registry and runtime support (e.g. Google Cloud Run and AWS Lambda). You can optionally disable the default provenance attestation functionality using provenance: false.

Full Changelog: https://github.com/docker/build-push-action/compare/v4.1.0...v4.1.1

v4.1.0

Note

Buildx v0.10 enables support for a minimal SLSA Provenance attestation, which requires support for OCI-compliant multi-platform images. This may introduce issues with registry and runtime support (e.g. Google Cloud Run and AWS Lambda). You can optionally disable the default provenance attestation functionality using provenance: false.

Full Changelog: https://github.com/docker/build-push-action/compare/v4.0.0...v4.1.0

Commits
  • 0565240 Merge pull request #959 from docker/dependabot/npm_and_yarn/actions/core-1.10.1
  • 3ab07f8 chore: update generated content
  • b9e7e4d chore(deps): Bump @​actions/core from 1.10.0 to 1.10.1
  • 04d1a3b Merge pull request #954 from crazy-max/update-node20
  • 1a4d1a1 chore: node 20 as default runtime
  • 675965c chore: update generated content
  • 58ee34c chore: fix author in package.json
  • c97c406 fix ProxyConfig type when checking length
  • 47d5369 vendor: bump @​docker/actions-toolkit from 0.8.0 to 0.12.0
  • 8895c74 chore: update dev dependencies
  • Additional commits viewable in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=docker/build-push-action&package-manager=github_actions&previous-version=4&new-version=5)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
--------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Chevdor --- .github/workflows/release-50_publish-docker.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/release-50_publish-docker.yml b/.github/workflows/release-50_publish-docker.yml index e787d83616f9..d01d78631e19 100644 --- a/.github/workflows/release-50_publish-docker.yml +++ b/.github/workflows/release-50_publish-docker.yml @@ -266,7 +266,7 @@ jobs: - name: Build and push id: docker_build - uses: docker/build-push-action@v4 + uses: docker/build-push-action@0565240e2d4ab88bba5387d719585280857ece09 # v5.0.0 with: push: true file: docker/dockerfiles/polkadot/polkadot_injected_debian.Dockerfile From 5d346643ca0d1b6d9ddcfb2d01e490858ce4cb0e Mon Sep 17 00:00:00 2001 From: Alexandru Vasile <60601340+lexnv@users.noreply.github.com> Date: Mon, 18 Sep 2023 13:54:44 +0300 Subject: [PATCH 17/69] chainHead: Add support for storage closest merkle descendant #14818 (#1153) This PR adds support for fetching the closest merkle value of some key. Builds on top of - https://github.com/paritytech/trie/pull/199 Migrates https://github.com/paritytech/substrate/pull/14818 to the monorepo. Closes: https://github.com/paritytech/substrate/issues/14550 Closes: https://github.com/paritytech/polkadot-sdk/issues/1506 // @paritytech/subxt-team --------- Signed-off-by: Alexandru Vasile Co-authored-by: Sebastian Kunert --- Cargo.lock | 1 + substrate/client/api/Cargo.toml | 1 + substrate/client/api/src/backend.rs | 16 ++ substrate/client/db/src/bench.rs | 23 ++- substrate/client/db/src/lib.rs | 17 +- substrate/client/db/src/record_stats_state.rs | 16 ++ .../rpc-spec-v2/src/chain_head/chain_head.rs | 17 +- .../src/chain_head/chain_head_storage.rs | 40 +++- .../rpc-spec-v2/src/chain_head/test_utils.rs | 21 +- .../rpc-spec-v2/src/chain_head/tests.rs | 190 +++++++++++++++++- substrate/client/service/src/client/client.rs | 23 ++- .../primitives/state-machine/src/backend.rs | 14 +- .../state-machine/src/trie_backend.rs | 14 +- .../state-machine/src/trie_backend_essence.rs | 42 +++- substrate/primitives/trie/src/lib.rs | 42 +++- 15 files changed, 449 insertions(+), 28 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f09db7643e08..ea4cf4a1817e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -14641,6 +14641,7 @@ dependencies = [ "sp-statement-store", "sp-storage", "sp-test-primitives", + "sp-trie", "substrate-prometheus-endpoint", "substrate-test-runtime", "thiserror", diff --git a/substrate/client/api/Cargo.toml b/substrate/client/api/Cargo.toml index b59149424ed3..2b64c86038dd 100644 --- a/substrate/client/api/Cargo.toml +++ b/substrate/client/api/Cargo.toml @@ -35,6 +35,7 @@ sp-runtime = { path = "../../primitives/runtime", default-features = false} sp-state-machine = { path = "../../primitives/state-machine" } sp-statement-store = { path = "../../primitives/statement-store" } sp-storage = { path = "../../primitives/storage" } +sp-trie = { path = "../../primitives/trie" } [dev-dependencies] thiserror = "1.0.48" diff --git a/substrate/client/api/src/backend.rs b/substrate/client/api/src/backend.rs index 2d8fdef77cdb..31b100433c70 100644 --- a/substrate/client/api/src/backend.rs +++ b/substrate/client/api/src/backend.rs @@ -33,6 +33,7 @@ use sp_state_machine::{ OffchainChangesCollection, StorageCollection, StorageIterator, }; use sp_storage::{ChildInfo, StorageData, StorageKey}; +pub use sp_trie::MerkleValue; use crate::{blockchain::Backend as BlockchainBackend, UsageInfo}; @@ -470,6 +471,21 @@ pub trait StorageProvider> { child_info: &ChildInfo, key: &StorageKey, ) -> sp_blockchain::Result>; + + /// Given a block's `Hash` and a key, return the closest merkle value. + fn closest_merkle_value( + &self, + hash: Block::Hash, + key: &StorageKey, + ) -> sp_blockchain::Result>>; + + /// Given a block's `Hash`, a key and a child storage key, return the closest merkle value. + fn child_closest_merkle_value( + &self, + hash: Block::Hash, + child_info: &ChildInfo, + key: &StorageKey, + ) -> sp_blockchain::Result>>; } /// Client backend. diff --git a/substrate/client/db/src/bench.rs b/substrate/client/db/src/bench.rs index 38c37a42ede7..03ad4817b53b 100644 --- a/substrate/client/db/src/bench.rs +++ b/substrate/client/db/src/bench.rs @@ -37,7 +37,7 @@ use sp_state_machine::{ }; use sp_trie::{ cache::{CacheSize, SharedTrieCache}, - prefixed_key, MemoryDB, + prefixed_key, MemoryDB, MerkleValue, }; use std::{ cell::{Cell, RefCell}, @@ -382,6 +382,27 @@ impl StateBackend> for BenchmarkingState { .child_storage_hash(child_info, key) } + fn closest_merkle_value( + &self, + key: &[u8], + ) -> Result>, Self::Error> { + self.add_read_key(None, key); + self.state.borrow().as_ref().ok_or_else(state_err)?.closest_merkle_value(key) + } + + fn child_closest_merkle_value( + &self, + child_info: &ChildInfo, + key: &[u8], + ) -> Result>, Self::Error> { + self.add_read_key(None, key); + self.state + .borrow() + .as_ref() + .ok_or_else(state_err)? + .child_closest_merkle_value(child_info, key) + } + fn exists_storage(&self, key: &[u8]) -> Result { self.add_read_key(None, key); self.state.borrow().as_ref().ok_or_else(state_err)?.exists_storage(key) diff --git a/substrate/client/db/src/lib.rs b/substrate/client/db/src/lib.rs index 73fb4f8ce6db..194bec8a88eb 100644 --- a/substrate/client/db/src/lib.rs +++ b/substrate/client/db/src/lib.rs @@ -90,7 +90,7 @@ use sp_state_machine::{ OffchainChangesCollection, StateMachineStats, StorageCollection, StorageIterator, StorageKey, StorageValue, UsageInfo as StateUsageInfo, }; -use sp_trie::{cache::SharedTrieCache, prefixed_key, MemoryDB, PrefixedMemoryDB}; +use sp_trie::{cache::SharedTrieCache, prefixed_key, MemoryDB, MerkleValue, PrefixedMemoryDB}; // Re-export the Database trait so that one can pass an implementation of it. pub use sc_state_db::PruningMode; @@ -214,6 +214,21 @@ impl StateBackend> for RefTrackingState { self.state.child_storage_hash(child_info, key) } + fn closest_merkle_value( + &self, + key: &[u8], + ) -> Result>, Self::Error> { + self.state.closest_merkle_value(key) + } + + fn child_closest_merkle_value( + &self, + child_info: &ChildInfo, + key: &[u8], + ) -> Result>, Self::Error> { + self.state.child_closest_merkle_value(child_info, key) + } + fn exists_storage(&self, key: &[u8]) -> Result { self.state.exists_storage(key) } diff --git a/substrate/client/db/src/record_stats_state.rs b/substrate/client/db/src/record_stats_state.rs index 29ece84f97e5..d9a35c075d79 100644 --- a/substrate/client/db/src/record_stats_state.rs +++ b/substrate/client/db/src/record_stats_state.rs @@ -28,6 +28,7 @@ use sp_state_machine::{ backend::{AsTrieBackend, Backend as StateBackend}, BackendTransaction, IterArgs, StorageIterator, StorageKey, StorageValue, TrieBackend, }; +use sp_trie::MerkleValue; use std::sync::Arc; /// State abstraction for recording stats about state access. @@ -144,6 +145,21 @@ impl>, B: BlockT> StateBackend> self.state.child_storage_hash(child_info, key) } + fn closest_merkle_value( + &self, + key: &[u8], + ) -> Result>, Self::Error> { + self.state.closest_merkle_value(key) + } + + fn child_closest_merkle_value( + &self, + child_info: &ChildInfo, + key: &[u8], + ) -> Result>, Self::Error> { + self.state.child_closest_merkle_value(child_info, key) + } + fn exists_storage(&self, key: &[u8]) -> Result { self.state.exists_storage(key) } diff --git a/substrate/client/rpc-spec-v2/src/chain_head/chain_head.rs b/substrate/client/rpc-spec-v2/src/chain_head/chain_head.rs index 14364c331e6c..a8c1c4f7e083 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/chain_head.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/chain_head.rs @@ -27,7 +27,7 @@ use crate::{ api::ChainHeadApiServer, chain_head_follow::ChainHeadFollower, error::Error as ChainHeadRpcError, - event::{FollowEvent, MethodResponse, OperationError, StorageQuery, StorageQueryType}, + event::{FollowEvent, MethodResponse, OperationError, StorageQuery}, hex_string, subscription::{SubscriptionManagement, SubscriptionManagementError}, }, @@ -329,19 +329,10 @@ where let items = items .into_iter() .map(|query| { - if query.query_type == StorageQueryType::ClosestDescendantMerkleValue { - // Note: remove this once all types are implemented. - return Err(ChainHeadRpcError::InvalidParam( - "Storage query type not supported".into(), - )) - } - - Ok(StorageQuery { - key: StorageKey(parse_hex_param(query.key)?), - query_type: query.query_type, - }) + let key = StorageKey(parse_hex_param(query.key)?); + Ok(StorageQuery { key, query_type: query.query_type }) }) - .collect::, _>>()?; + .collect::, ChainHeadRpcError>>()?; let child_trie = child_trie .map(|child_trie| parse_hex_param(child_trie)) diff --git a/substrate/client/rpc-spec-v2/src/chain_head/chain_head_storage.rs b/substrate/client/rpc-spec-v2/src/chain_head/chain_head_storage.rs index 48a673f47e3b..7095548a2b16 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/chain_head_storage.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/chain_head_storage.rs @@ -145,6 +145,36 @@ where .unwrap_or_else(|error| QueryResult::Err(error.to_string())) } + /// Fetch the closest merkle value. + fn query_storage_merkle_value( + &self, + hash: Block::Hash, + key: &StorageKey, + child_key: Option<&ChildInfo>, + ) -> QueryResult { + let result = if let Some(child_key) = child_key { + self.client.child_closest_merkle_value(hash, child_key, key) + } else { + self.client.closest_merkle_value(hash, key) + }; + + result + .map(|opt| { + QueryResult::Ok(opt.map(|storage_data| { + let result = match &storage_data { + sc_client_api::MerkleValue::Node(data) => hex_string(&data.as_slice()), + sc_client_api::MerkleValue::Hash(hash) => hex_string(&hash.as_ref()), + }; + + StorageResult { + key: hex_string(&key.0), + result: StorageResultType::ClosestDescendantMerkleValue(result), + } + })) + }) + .unwrap_or_else(|error| QueryResult::Err(error.to_string())) + } + /// Iterate over at most `operation_max_storage_items` keys. /// /// Returns the storage result with a potential next key to resume iteration. @@ -286,13 +316,21 @@ where return }, }, + StorageQueryType::ClosestDescendantMerkleValue => + match self.query_storage_merkle_value(hash, &item.key, child_key.as_ref()) { + Ok(Some(value)) => storage_results.push(value), + Ok(None) => continue, + Err(error) => { + send_error::(&sender, operation.operation_id(), error); + return + }, + }, StorageQueryType::DescendantsValues => self .iter_operations .push_back(QueryIter { next_key: item.key, ty: IterQueryType::Value }), StorageQueryType::DescendantsHashes => self .iter_operations .push_back(QueryIter { next_key: item.key, ty: IterQueryType::Hash }), - _ => continue, }; } diff --git a/substrate/client/rpc-spec-v2/src/chain_head/test_utils.rs b/substrate/client/rpc-spec-v2/src/chain_head/test_utils.rs index 6e92e87608b4..a901f3039ffe 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/test_utils.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/test_utils.rs @@ -20,8 +20,8 @@ use parking_lot::Mutex; use sc_client_api::{ execution_extensions::ExecutionExtensions, BlockBackend, BlockImportNotification, BlockchainEvents, CallExecutor, ChildInfo, ExecutorProvider, FinalityNotification, - FinalityNotifications, FinalizeSummary, ImportNotifications, KeysIter, PairsIter, StorageData, - StorageEventStream, StorageKey, StorageProvider, + FinalityNotifications, FinalizeSummary, ImportNotifications, KeysIter, MerkleValue, PairsIter, + StorageData, StorageEventStream, StorageKey, StorageProvider, }; use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedSender}; use sp_api::{CallApiAt, CallApiAtParams, NumberFor, RuntimeVersion}; @@ -198,6 +198,23 @@ impl< ) -> sp_blockchain::Result> { self.client.child_storage_hash(hash, child_info, key) } + + fn closest_merkle_value( + &self, + hash: Block::Hash, + key: &StorageKey, + ) -> sp_blockchain::Result>> { + self.client.closest_merkle_value(hash, key) + } + + fn child_closest_merkle_value( + &self, + hash: Block::Hash, + child_info: &ChildInfo, + key: &StorageKey, + ) -> sp_blockchain::Result>> { + self.client.child_closest_merkle_value(hash, child_info, key) + } } impl> CallApiAt for ChainHeadMockClient { diff --git a/substrate/client/rpc-spec-v2/src/chain_head/tests.rs b/substrate/client/rpc-spec-v2/src/chain_head/tests.rs index 1336cff84b6f..3ab47991c4e5 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/tests.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/tests.rs @@ -43,7 +43,12 @@ use sp_core::{ Blake2Hasher, Hasher, }; use sp_version::RuntimeVersion; -use std::{collections::HashSet, fmt::Debug, sync::Arc, time::Duration}; +use std::{ + collections::{HashMap, HashSet}, + fmt::Debug, + sync::Arc, + time::Duration, +}; use substrate_test_runtime::Transfer; use substrate_test_runtime_client::{ prelude::*, runtime, runtime::RuntimeApi, Backend, BlockBuilderExt, Client, @@ -2583,3 +2588,186 @@ async fn stop_storage_operation() { ) .await; } + +#[tokio::test] +async fn storage_closest_merkle_value() { + let (mut client, api, mut sub, sub_id, _) = setup_api().await; + + /// The core of this test. + /// + /// Checks keys that are exact match, keys with descedant and keys that should not return + /// values. + /// + /// Returns (key, merkle value) pairs. + async fn expect_merkle_request( + api: &RpcModule>>, + mut sub: &mut RpcSubscription, + sub_id: String, + block_hash: String, + ) -> HashMap { + // Valid call with storage at the keys. + let response: MethodResponse = api + .call( + "chainHead_unstable_storage", + rpc_params![ + &sub_id, + &block_hash, + vec![ + StorageQuery { + key: hex_string(b":AAAA"), + query_type: StorageQueryType::ClosestDescendantMerkleValue + }, + StorageQuery { + key: hex_string(b":AAAB"), + query_type: StorageQueryType::ClosestDescendantMerkleValue + }, + // Key with descedent. + StorageQuery { + key: hex_string(b":A"), + query_type: StorageQueryType::ClosestDescendantMerkleValue + }, + StorageQuery { + key: hex_string(b":AA"), + query_type: StorageQueryType::ClosestDescendantMerkleValue + }, + // Keys below this comment do not produce a result. + // Key that exceed the keyspace of the trie. + StorageQuery { + key: hex_string(b":AAAAX"), + query_type: StorageQueryType::ClosestDescendantMerkleValue + }, + StorageQuery { + key: hex_string(b":AAABX"), + query_type: StorageQueryType::ClosestDescendantMerkleValue + }, + // Key that are not part of the trie. + StorageQuery { + key: hex_string(b":AAX"), + query_type: StorageQueryType::ClosestDescendantMerkleValue + }, + StorageQuery { + key: hex_string(b":AAAX"), + query_type: StorageQueryType::ClosestDescendantMerkleValue + }, + ] + ], + ) + .await + .unwrap(); + let operation_id = match response { + MethodResponse::Started(started) => started.operation_id, + MethodResponse::LimitReached => panic!("Expected started response"), + }; + + let event = get_next_event::>(&mut sub).await; + let merkle_values: HashMap<_, _> = match event { + FollowEvent::OperationStorageItems(res) => { + assert_eq!(res.operation_id, operation_id); + + res.items + .into_iter() + .map(|res| { + let value = match res.result { + StorageResultType::ClosestDescendantMerkleValue(value) => value, + _ => panic!("Unexpected StorageResultType"), + }; + (res.key, value) + }) + .collect() + }, + _ => panic!("Expected OperationStorageItems event"), + }; + + // Finished. + assert_matches!( + get_next_event::>(&mut sub).await, + FollowEvent::OperationStorageDone(done) if done.operation_id == operation_id + ); + + // Response for AAAA, AAAB, A and AA. + assert_eq!(merkle_values.len(), 4); + + // While checking for expected merkle values to align, + // the following will check that the returned keys are + // expected. + + // Values for AAAA and AAAB are different. + assert_ne!( + merkle_values.get(&hex_string(b":AAAA")).unwrap(), + merkle_values.get(&hex_string(b":AAAB")).unwrap() + ); + + // Values for A and AA should be on the same branch node. + assert_eq!( + merkle_values.get(&hex_string(b":A")).unwrap(), + merkle_values.get(&hex_string(b":AA")).unwrap() + ); + // The branch node value must be different than the leaf of either + // AAAA and AAAB. + assert_ne!( + merkle_values.get(&hex_string(b":A")).unwrap(), + merkle_values.get(&hex_string(b":AAAA")).unwrap() + ); + assert_ne!( + merkle_values.get(&hex_string(b":A")).unwrap(), + merkle_values.get(&hex_string(b":AAAB")).unwrap() + ); + + merkle_values + } + + // Import a new block with storage changes. + let mut builder = client.new_block(Default::default()).unwrap(); + builder.push_storage_change(b":AAAA".to_vec(), Some(vec![1; 64])).unwrap(); + builder.push_storage_change(b":AAAB".to_vec(), Some(vec![2; 64])).unwrap(); + let block = builder.build().unwrap().block; + let block_hash = format!("{:?}", block.header.hash()); + client.import(BlockOrigin::Own, block.clone()).await.unwrap(); + + // Ensure the imported block is propagated and pinned for this subscription. + assert_matches!( + get_next_event::>(&mut sub).await, + FollowEvent::NewBlock(_) + ); + assert_matches!( + get_next_event::>(&mut sub).await, + FollowEvent::BestBlockChanged(_) + ); + + let merkle_values_lhs = expect_merkle_request(&api, &mut sub, sub_id.clone(), block_hash).await; + + // Import a new block with and change AAAB value. + let mut builder = client.new_block(Default::default()).unwrap(); + builder.push_storage_change(b":AAAA".to_vec(), Some(vec![1; 64])).unwrap(); + builder.push_storage_change(b":AAAB".to_vec(), Some(vec![3; 64])).unwrap(); + let block = builder.build().unwrap().block; + let block_hash = format!("{:?}", block.header.hash()); + client.import(BlockOrigin::Own, block.clone()).await.unwrap(); + + // Ensure the imported block is propagated and pinned for this subscription. + assert_matches!( + get_next_event::>(&mut sub).await, + FollowEvent::NewBlock(_) + ); + assert_matches!( + get_next_event::>(&mut sub).await, + FollowEvent::BestBlockChanged(_) + ); + + let merkle_values_rhs = expect_merkle_request(&api, &mut sub, sub_id.clone(), block_hash).await; + + // Change propagated to the root. + assert_ne!( + merkle_values_lhs.get(&hex_string(b":A")).unwrap(), + merkle_values_rhs.get(&hex_string(b":A")).unwrap() + ); + assert_ne!( + merkle_values_lhs.get(&hex_string(b":AAAB")).unwrap(), + merkle_values_rhs.get(&hex_string(b":AAAB")).unwrap() + ); + // However the AAAA branch leaf remains unchanged. + assert_eq!( + merkle_values_lhs.get(&hex_string(b":AAAA")).unwrap(), + merkle_values_rhs.get(&hex_string(b":AAAA")).unwrap() + ); +} diff --git a/substrate/client/service/src/client/client.rs b/substrate/client/service/src/client/client.rs index 09c1673884aa..26dcd0f9e21a 100644 --- a/substrate/client/service/src/client/client.rs +++ b/substrate/client/service/src/client/client.rs @@ -78,7 +78,7 @@ use sp_state_machine::{ ChildStorageCollection, KeyValueStates, KeyValueStorageLevel, StorageCollection, MAX_NESTED_TRIE_DEPTH, }; -use sp_trie::{CompactProof, StorageProof}; +use sp_trie::{CompactProof, MerkleValue, StorageProof}; use std::{ collections::{HashMap, HashSet}, marker::PhantomData, @@ -1545,6 +1545,27 @@ where .child_storage_hash(child_info, &key.0) .map_err(|e| sp_blockchain::Error::from_state(Box::new(e))) } + + fn closest_merkle_value( + &self, + hash: ::Hash, + key: &StorageKey, + ) -> blockchain::Result::Hash>>> { + self.state_at(hash)? + .closest_merkle_value(&key.0) + .map_err(|e| sp_blockchain::Error::from_state(Box::new(e))) + } + + fn child_closest_merkle_value( + &self, + hash: ::Hash, + child_info: &ChildInfo, + key: &StorageKey, + ) -> blockchain::Result::Hash>>> { + self.state_at(hash)? + .child_closest_merkle_value(child_info, &key.0) + .map_err(|e| sp_blockchain::Error::from_state(Box::new(e))) + } } impl HeaderMetadata for Client diff --git a/substrate/primitives/state-machine/src/backend.rs b/substrate/primitives/state-machine/src/backend.rs index 2a25bdc54d94..ea9cd442d70b 100644 --- a/substrate/primitives/state-machine/src/backend.rs +++ b/substrate/primitives/state-machine/src/backend.rs @@ -30,7 +30,7 @@ use sp_core::storage::{ChildInfo, StateVersion, TrackedStorageKey}; #[cfg(feature = "std")] use sp_core::traits::RuntimeCode; use sp_std::vec::Vec; -use sp_trie::PrefixedMemoryDB; +use sp_trie::{MerkleValue, PrefixedMemoryDB}; /// A struct containing arguments for iterating over the storage. #[derive(Default)] @@ -195,7 +195,17 @@ pub trait Backend: sp_std::fmt::Debug { /// Get keyed storage value hash or None if there is nothing associated. fn storage_hash(&self, key: &[u8]) -> Result, Self::Error>; - /// Get keyed child storage or None if there is nothing associated. + /// Get the merkle value or None if there is nothing associated. + fn closest_merkle_value(&self, key: &[u8]) -> Result>, Self::Error>; + + /// Get the child merkle value or None if there is nothing associated. + fn child_closest_merkle_value( + &self, + child_info: &ChildInfo, + key: &[u8], + ) -> Result>, Self::Error>; + + /// Get child keyed child storage or None if there is nothing associated. fn child_storage( &self, child_info: &ChildInfo, diff --git a/substrate/primitives/state-machine/src/trie_backend.rs b/substrate/primitives/state-machine/src/trie_backend.rs index cc7132181f90..afa80addd2ba 100644 --- a/substrate/primitives/state-machine/src/trie_backend.rs +++ b/substrate/primitives/state-machine/src/trie_backend.rs @@ -30,7 +30,6 @@ use codec::Codec; use hash_db::HashDB; use hash_db::Hasher; use sp_core::storage::{ChildInfo, StateVersion}; -use sp_trie::PrefixedMemoryDB; #[cfg(feature = "std")] use sp_trie::{ cache::{LocalTrieCache, TrieCache}, @@ -39,6 +38,7 @@ use sp_trie::{ }; #[cfg(not(feature = "std"))] use sp_trie::{Error, NodeCodec}; +use sp_trie::{MerkleValue, PrefixedMemoryDB}; use trie_db::TrieCache as TrieCacheT; #[cfg(not(feature = "std"))] use trie_db::{node::NodeOwned, CachedValue}; @@ -405,6 +405,18 @@ where self.essence.child_storage(child_info, key) } + fn closest_merkle_value(&self, key: &[u8]) -> Result>, Self::Error> { + self.essence.closest_merkle_value(key) + } + + fn child_closest_merkle_value( + &self, + child_info: &ChildInfo, + key: &[u8], + ) -> Result>, Self::Error> { + self.essence.child_closest_merkle_value(child_info, key) + } + fn next_storage_key(&self, key: &[u8]) -> Result, Self::Error> { let (is_cached, mut cache) = access_cache(&self.next_storage_key_cache, Option::take) .map(|cache| (cache.last_key == key, cache)) diff --git a/substrate/primitives/state-machine/src/trie_backend_essence.rs b/substrate/primitives/state-machine/src/trie_backend_essence.rs index 4bb51f4a1343..ad7aeab899c8 100644 --- a/substrate/primitives/state-machine/src/trie_backend_essence.rs +++ b/substrate/primitives/state-machine/src/trie_backend_essence.rs @@ -32,11 +32,12 @@ use sp_std::{boxed::Box, marker::PhantomData, vec::Vec}; #[cfg(feature = "std")] use sp_trie::recorder::Recorder; use sp_trie::{ - child_delta_trie_root, delta_trie_root, empty_child_trie_root, read_child_trie_hash, - read_child_trie_value, read_trie_value, + child_delta_trie_root, delta_trie_root, empty_child_trie_root, + read_child_trie_first_descedant_value, read_child_trie_hash, read_child_trie_value, + read_trie_first_descedant_value, read_trie_value, trie_types::{TrieDBBuilder, TrieError}, - DBValue, KeySpacedDB, NodeCodec, PrefixedMemoryDB, Trie, TrieCache, TrieDBRawIterator, - TrieRecorder, + DBValue, KeySpacedDB, MerkleValue, NodeCodec, PrefixedMemoryDB, Trie, TrieCache, + TrieDBRawIterator, TrieRecorder, }; #[cfg(feature = "std")] use std::{collections::HashMap, sync::Arc}; @@ -574,6 +575,39 @@ where }) } + /// Get the closest merkle value at given key. + pub fn closest_merkle_value(&self, key: &[u8]) -> Result>> { + let map_e = |e| format!("Trie lookup error: {}", e); + + self.with_recorder_and_cache(None, |recorder, cache| { + read_trie_first_descedant_value::, _>(self, &self.root, key, recorder, cache) + .map_err(map_e) + }) + } + + /// Get the child closest merkle value at given key. + pub fn child_closest_merkle_value( + &self, + child_info: &ChildInfo, + key: &[u8], + ) -> Result>> { + let Some(child_root) = self.child_root(child_info)? else { return Ok(None) }; + + let map_e = |e| format!("Trie lookup error: {}", e); + + self.with_recorder_and_cache(Some(child_root), |recorder, cache| { + read_child_trie_first_descedant_value::, _>( + child_info.keyspace(), + self, + &child_root, + key, + recorder, + cache, + ) + .map_err(map_e) + }) + } + /// Create a raw iterator over the storage. pub fn raw_iter(&self, args: IterArgs) -> Result> { let root = if let Some(child_info) = args.child_info.as_ref() { diff --git a/substrate/primitives/trie/src/lib.rs b/substrate/primitives/trie/src/lib.rs index 94155458569b..1a1ed670454d 100644 --- a/substrate/primitives/trie/src/lib.rs +++ b/substrate/primitives/trie/src/lib.rs @@ -44,7 +44,6 @@ pub use storage_proof::{CompactProof, StorageProof}; /// Trie codec reexport, mainly child trie support /// for trie compact proof. pub use trie_codec::{decode_compact, encode_compact, Error as CompactProofError}; -pub use trie_db::proof::VerifyError; use trie_db::proof::{generate_proof, verify_proof}; /// Various re-exports from the `trie-db` crate. pub use trie_db::{ @@ -53,6 +52,7 @@ pub use trie_db::{ CError, DBValue, Query, Recorder, Trie, TrieCache, TrieConfiguration, TrieDBIterator, TrieDBKeyIterator, TrieDBRawIterator, TrieLayout, TrieMut, TrieRecorder, }; +pub use trie_db::{proof::VerifyError, MerkleValue}; /// The Substrate format implementation of `TrieStream`. pub use trie_stream::TrieStream; @@ -295,6 +295,25 @@ pub fn read_trie_value( + db: &DB, + root: &TrieHash, + key: &[u8], + recorder: Option<&mut dyn TrieRecorder>>, + cache: Option<&mut dyn TrieCache>, +) -> Result>>, Box>> +where + DB: hash_db::HashDBRef, +{ + TrieDBBuilder::::new(db, root) + .with_optional_cache(cache) + .with_optional_recorder(recorder) + .build() + .lookup_first_descendant(key) +} + /// Read a value from the trie with given Query. pub fn read_trie_value_with< L: TrieLayout, @@ -397,6 +416,27 @@ where .get_hash(key) } +/// Read the [`trie_db::MerkleValue`] of the node that is the closest descendant for +/// the provided child key. +pub fn read_child_trie_first_descedant_value( + keyspace: &[u8], + db: &DB, + root: &TrieHash, + key: &[u8], + recorder: Option<&mut dyn TrieRecorder>>, + cache: Option<&mut dyn TrieCache>, +) -> Result>>, Box>> +where + DB: hash_db::HashDBRef, +{ + let db = KeySpacedDB::new(db, keyspace); + TrieDBBuilder::::new(&db, &root) + .with_optional_recorder(recorder) + .with_optional_cache(cache) + .build() + .lookup_first_descendant(key) +} + /// Read a value from the child trie with given query. pub fn read_child_trie_value_with( keyspace: &[u8], From 372929fa256f5839047bb5d569ad523cb4b15094 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 18 Sep 2023 14:49:39 +0200 Subject: [PATCH 18/69] Bump the known_good_semver group with 1 update (#1606) Bumps the known_good_semver group with 1 update: [syn](https://github.com/dtolnay/syn).
Release notes

Sourced from syn's releases.

2.0.36

  • Restore compatibility with --generate-link-to-definition documentation builds (#1514)

2.0.35

  • Make rust-analyzer produce preferred brackets for invocations of Token! macro (#1510, #1512)

2.0.34

  • Documentation improvements
Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=syn&package-manager=cargo&previous-version=2.0.33&new-version=2.0.36)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore major version` will close this group update PR and stop Dependabot creating any more for the specific dependency's major version (unless you unignore this specific dependency's major version or upgrade to it yourself) - `@dependabot ignore minor version` will close this group update PR and stop Dependabot creating any more for the specific dependency's minor version (unless you unignore this specific dependency's minor version or upgrade to it yourself) - `@dependabot ignore ` will close this group update PR and stop Dependabot creating any more for the specific dependency (unless you unignore this specific dependency or upgrade to it yourself) - `@dependabot unignore ` will remove all of the ignore conditions of the specified dependency - `@dependabot unignore ` will remove the ignore condition of the specified dependency and ignore conditions
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 100 +++++++++--------- .../parachain-system/proc-macro/Cargo.toml | 2 +- polkadot/node/gum/proc-macro/Cargo.toml | 2 +- polkadot/xcm/procedural/Cargo.toml | 2 +- substrate/client/chain-spec/derive/Cargo.toml | 2 +- .../client/tracing/proc-macro/Cargo.toml | 2 +- .../frame/contracts/proc-macro/Cargo.toml | 2 +- .../solution-type/Cargo.toml | 2 +- .../frame/staking/reward-curve/Cargo.toml | 2 +- substrate/frame/support/procedural/Cargo.toml | 2 +- .../frame/support/procedural/tools/Cargo.toml | 2 +- .../procedural/tools/derive/Cargo.toml | 2 +- .../primitives/api/proc-macro/Cargo.toml | 2 +- .../core/hashing/proc-macro/Cargo.toml | 2 +- substrate/primitives/debug-derive/Cargo.toml | 2 +- .../runtime-interface/proc-macro/Cargo.toml | 2 +- .../primitives/version/proc-macro/Cargo.toml | 2 +- 17 files changed, 66 insertions(+), 66 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ea4cf4a1817e..a5ea33e1ae49 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1138,7 +1138,7 @@ checksum = "0e97ce7de6cf12de5d7226c73f5ba9811622f4db3a5b91b55c53e987e5f91cba" dependencies = [ "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.36", ] [[package]] @@ -1160,7 +1160,7 @@ checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" dependencies = [ "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.36", ] [[package]] @@ -1177,7 +1177,7 @@ checksum = "bc00ceb34980c03614e35a3a4e218276a0a824e911d07651cd0d858a51e8c0f0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.36", ] [[package]] @@ -1351,7 +1351,7 @@ dependencies = [ "regex", "rustc-hash", "shlex", - "syn 2.0.33", + "syn 2.0.36", ] [[package]] @@ -2514,7 +2514,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.36", ] [[package]] @@ -3574,7 +3574,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.36", ] [[package]] @@ -4057,7 +4057,7 @@ checksum = "83fdaf97f4804dcebfa5862639bc9ce4121e82140bec2a987ac5140294865b5b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.36", ] [[package]] @@ -4097,7 +4097,7 @@ dependencies = [ "proc-macro2", "quote", "scratch", - "syn 2.0.33", + "syn 2.0.36", ] [[package]] @@ -4114,7 +4114,7 @@ checksum = "50c49547d73ba8dcfd4ad7325d64c6d5391ff4224d498fc39a6f3f49825a530d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.36", ] [[package]] @@ -4412,7 +4412,7 @@ checksum = "487585f4d0c6655fe74905e2504d8ad6908e4db67f744eb140876906c2f3175d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.36", ] [[package]] @@ -4474,7 +4474,7 @@ dependencies = [ "proc-macro2", "quote", "regex", - "syn 2.0.33", + "syn 2.0.36", "termcolor", "toml 0.7.6", "walkdir", @@ -4695,7 +4695,7 @@ checksum = "5e9a1f9f7d83e59740248a6e14ecf93929ade55027844dfcea78beafccc15745" dependencies = [ "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.36", ] [[package]] @@ -4706,7 +4706,7 @@ checksum = "c2ad8cef1d801a4686bfd8919f0b30eac4c8e48968c437a6405ded4fb5272d2b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.36", ] [[package]] @@ -4851,7 +4851,7 @@ dependencies = [ "fs-err", "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.36", ] [[package]] @@ -5207,7 +5207,7 @@ dependencies = [ "quote", "scale-info", "sp-arithmetic", - "syn 2.0.33", + "syn 2.0.36", "trybuild", ] @@ -5359,7 +5359,7 @@ dependencies = [ "proc-macro-warning", "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.36", ] [[package]] @@ -5370,7 +5370,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.36", ] [[package]] @@ -5379,7 +5379,7 @@ version = "3.0.0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.36", ] [[package]] @@ -5602,7 +5602,7 @@ checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72" dependencies = [ "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.36", ] [[package]] @@ -7612,7 +7612,7 @@ dependencies = [ "macro_magic_core", "macro_magic_macros", "quote", - "syn 2.0.33", + "syn 2.0.36", ] [[package]] @@ -7626,7 +7626,7 @@ dependencies = [ "macro_magic_core_macros", "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.36", ] [[package]] @@ -7637,7 +7637,7 @@ checksum = "c12469fc165526520dff2807c2975310ab47cf7190a45b99b49a7dc8befab17b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.36", ] [[package]] @@ -7648,7 +7648,7 @@ checksum = "b8fb85ec1620619edf2984a7693497d4ec88a9665d8b87e942856884c92dbf2a" dependencies = [ "macro_magic_core", "quote", - "syn 2.0.33", + "syn 2.0.36", ] [[package]] @@ -9334,7 +9334,7 @@ version = "4.0.0-dev" dependencies = [ "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.36", ] [[package]] @@ -10405,7 +10405,7 @@ dependencies = [ "proc-macro2", "quote", "sp-runtime", - "syn 2.0.33", + "syn 2.0.36", ] [[package]] @@ -11272,7 +11272,7 @@ dependencies = [ "pest_meta", "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.36", ] [[package]] @@ -11313,7 +11313,7 @@ checksum = "4359fd9c9171ec6e8c62926d6faaf553a8dc3f64e1507e76da7911b4f6a04405" dependencies = [ "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.36", ] [[package]] @@ -13292,7 +13292,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6c64d9ba0963cdcea2e1b2230fbae2bab30eb25a174be395c41e764bfb65dd62" dependencies = [ "proc-macro2", - "syn 2.0.33", + "syn 2.0.36", ] [[package]] @@ -13374,7 +13374,7 @@ checksum = "3d1eaa7fa0aa1929ffdf7eeb6eac234dde6268914a14ad44d23521ab6a9b258e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.36", ] [[package]] @@ -13420,7 +13420,7 @@ checksum = "440f724eba9f6996b75d63681b0a92b06947f1457076d503a4d2e2c8f56442b8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.36", ] [[package]] @@ -13811,7 +13811,7 @@ checksum = "7f7473c2cfcf90008193dd0e3e16599455cb601a9fce322b5bb55de799664925" dependencies = [ "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.36", ] [[package]] @@ -14574,7 +14574,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.36", ] [[package]] @@ -15785,7 +15785,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.36", ] [[package]] @@ -16139,7 +16139,7 @@ checksum = "4eca7ac642d82aa35b60049a6eccb4be6be75e599bd2e9adb5f875a737654af2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.36", ] [[package]] @@ -16205,7 +16205,7 @@ checksum = "91d129178576168c589c9ec973feedf7d3126c01ac2bf08795109aa35b69fb8f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.36", ] [[package]] @@ -16633,7 +16633,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.36", ] [[package]] @@ -17033,7 +17033,7 @@ version = "9.0.0" dependencies = [ "quote", "sp-core-hashing", - "syn 2.0.33", + "syn 2.0.36", ] [[package]] @@ -17077,7 +17077,7 @@ version = "8.0.0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.36", ] [[package]] @@ -17308,7 +17308,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.36", ] [[package]] @@ -17548,7 +17548,7 @@ dependencies = [ "proc-macro2", "quote", "sp-version", - "syn 2.0.33", + "syn 2.0.36", ] [[package]] @@ -18323,9 +18323,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.33" +version = "2.0.36" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9caece70c63bfba29ec2fed841a09851b14a235c60010fa4de58089b6c025668" +checksum = "91e02e55d62894af2a08aca894c6577281f76769ba47c94d5756bec8ac6e7373" dependencies = [ "proc-macro2", "quote", @@ -18570,7 +18570,7 @@ checksum = "49922ecae66cc8a249b77e68d1d0623c1b2c514f0060c27cdc68bd62a1219d35" dependencies = [ "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.36", ] [[package]] @@ -18750,7 +18750,7 @@ checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.36", ] [[package]] @@ -18931,7 +18931,7 @@ checksum = "5f4f31f56159e98206da9efd823404b79b6ef3143b4a7ab76e67b1751b25a4ab" dependencies = [ "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.36", ] [[package]] @@ -18974,7 +18974,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.36", ] [[package]] @@ -19523,7 +19523,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.36", "wasm-bindgen-shared", ] @@ -19557,7 +19557,7 @@ checksum = "54681b18a46765f095758388f2d0cf16eb8d4169b639ab575a8f5693af210c7b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.36", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -20693,7 +20693,7 @@ dependencies = [ "Inflector", "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.36", ] [[package]] @@ -20812,7 +20812,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.36", ] [[package]] diff --git a/cumulus/pallets/parachain-system/proc-macro/Cargo.toml b/cumulus/pallets/parachain-system/proc-macro/Cargo.toml index 77e298363c85..ee0943bb99ee 100644 --- a/cumulus/pallets/parachain-system/proc-macro/Cargo.toml +++ b/cumulus/pallets/parachain-system/proc-macro/Cargo.toml @@ -9,7 +9,7 @@ description = "Proc macros provided by the parachain-system pallet" proc-macro = true [dependencies] -syn = "2.0.33" +syn = "2.0.36" proc-macro2 = "1.0.64" quote = "1.0.33" proc-macro-crate = "1.3.1" diff --git a/polkadot/node/gum/proc-macro/Cargo.toml b/polkadot/node/gum/proc-macro/Cargo.toml index 0d6cee2ccf0a..be302e46ad90 100644 --- a/polkadot/node/gum/proc-macro/Cargo.toml +++ b/polkadot/node/gum/proc-macro/Cargo.toml @@ -13,7 +13,7 @@ targets = ["x86_64-unknown-linux-gnu"] proc-macro = true [dependencies] -syn = { version = "2.0.33", features = ["full", "extra-traits"] } +syn = { version = "2.0.36", features = ["full", "extra-traits"] } quote = "1.0.28" proc-macro2 = "1.0.56" proc-macro-crate = "1.1.3" diff --git a/polkadot/xcm/procedural/Cargo.toml b/polkadot/xcm/procedural/Cargo.toml index 3e137c42843b..6beaa1d667f0 100644 --- a/polkadot/xcm/procedural/Cargo.toml +++ b/polkadot/xcm/procedural/Cargo.toml @@ -11,5 +11,5 @@ proc-macro = true [dependencies] proc-macro2 = "1.0.56" quote = "1.0.28" -syn = "2.0.33" +syn = "2.0.36" Inflector = "0.11.4" diff --git a/substrate/client/chain-spec/derive/Cargo.toml b/substrate/client/chain-spec/derive/Cargo.toml index ff1ce5141e69..f0ad4c68d1f0 100644 --- a/substrate/client/chain-spec/derive/Cargo.toml +++ b/substrate/client/chain-spec/derive/Cargo.toml @@ -18,4 +18,4 @@ proc-macro = true proc-macro-crate = "1.1.3" proc-macro2 = "1.0.56" quote = "1.0.28" -syn = "2.0.33" +syn = "2.0.36" diff --git a/substrate/client/tracing/proc-macro/Cargo.toml b/substrate/client/tracing/proc-macro/Cargo.toml index bf45111de88c..270f34b6d04b 100644 --- a/substrate/client/tracing/proc-macro/Cargo.toml +++ b/substrate/client/tracing/proc-macro/Cargo.toml @@ -18,4 +18,4 @@ proc-macro = true proc-macro-crate = "1.1.3" proc-macro2 = "1.0.56" quote = { version = "1.0.28", features = ["proc-macro"] } -syn = { version = "2.0.33", features = ["proc-macro", "full", "extra-traits", "parsing"] } +syn = { version = "2.0.36", features = ["proc-macro", "full", "extra-traits", "parsing"] } diff --git a/substrate/frame/contracts/proc-macro/Cargo.toml b/substrate/frame/contracts/proc-macro/Cargo.toml index 8779f97b9cde..ccc80a2eba4d 100644 --- a/substrate/frame/contracts/proc-macro/Cargo.toml +++ b/substrate/frame/contracts/proc-macro/Cargo.toml @@ -17,7 +17,7 @@ proc-macro = true [dependencies] proc-macro2 = "1.0.56" quote = "1.0.28" -syn = { version = "2.0.33", features = ["full"] } +syn = { version = "2.0.36", features = ["full"] } [dev-dependencies] diff --git a/substrate/frame/election-provider-support/solution-type/Cargo.toml b/substrate/frame/election-provider-support/solution-type/Cargo.toml index 50381d838696..1b432204470a 100644 --- a/substrate/frame/election-provider-support/solution-type/Cargo.toml +++ b/substrate/frame/election-provider-support/solution-type/Cargo.toml @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] proc-macro = true [dependencies] -syn = { version = "2.0.33", features = ["full", "visit"] } +syn = { version = "2.0.36", features = ["full", "visit"] } quote = "1.0.28" proc-macro2 = "1.0.56" proc-macro-crate = "1.1.3" diff --git a/substrate/frame/staking/reward-curve/Cargo.toml b/substrate/frame/staking/reward-curve/Cargo.toml index fc1f1b4b3ee8..7646bbc9a55d 100644 --- a/substrate/frame/staking/reward-curve/Cargo.toml +++ b/substrate/frame/staking/reward-curve/Cargo.toml @@ -18,7 +18,7 @@ proc-macro = true proc-macro-crate = "1.1.3" proc-macro2 = "1.0.56" quote = "1.0.28" -syn = { version = "2.0.33", features = ["full", "visit"] } +syn = { version = "2.0.36", features = ["full", "visit"] } [dev-dependencies] sp-runtime = { path = "../../../primitives/runtime" } diff --git a/substrate/frame/support/procedural/Cargo.toml b/substrate/frame/support/procedural/Cargo.toml index b582457e4b8d..e16068546056 100644 --- a/substrate/frame/support/procedural/Cargo.toml +++ b/substrate/frame/support/procedural/Cargo.toml @@ -21,7 +21,7 @@ cfg-expr = "0.15.5" itertools = "0.10.3" proc-macro2 = "1.0.56" quote = "1.0.28" -syn = { version = "2.0.33", features = ["full"] } +syn = { version = "2.0.36", features = ["full"] } frame-support-procedural-tools = { path = "tools" } proc-macro-warning = { version = "0.4.2", default-features = false } macro_magic = { version = "0.4.2", features = ["proc_support"] } diff --git a/substrate/frame/support/procedural/tools/Cargo.toml b/substrate/frame/support/procedural/tools/Cargo.toml index 211dc3bd66a5..fb0a1b51cbcf 100644 --- a/substrate/frame/support/procedural/tools/Cargo.toml +++ b/substrate/frame/support/procedural/tools/Cargo.toml @@ -15,5 +15,5 @@ targets = ["x86_64-unknown-linux-gnu"] proc-macro-crate = "1.1.3" proc-macro2 = "1.0.56" quote = "1.0.28" -syn = { version = "2.0.33", features = ["full", "visit", "extra-traits"] } +syn = { version = "2.0.36", features = ["full", "visit", "extra-traits"] } frame-support-procedural-tools-derive = { path = "derive" } diff --git a/substrate/frame/support/procedural/tools/derive/Cargo.toml b/substrate/frame/support/procedural/tools/derive/Cargo.toml index 472e288c3df8..747d3bacd425 100644 --- a/substrate/frame/support/procedural/tools/derive/Cargo.toml +++ b/substrate/frame/support/procedural/tools/derive/Cargo.toml @@ -17,4 +17,4 @@ proc-macro = true [dependencies] proc-macro2 = "1.0.56" quote = { version = "1.0.28", features = ["proc-macro"] } -syn = { version = "2.0.33", features = ["proc-macro", "full", "extra-traits", "parsing"] } +syn = { version = "2.0.36", features = ["proc-macro", "full", "extra-traits", "parsing"] } diff --git a/substrate/primitives/api/proc-macro/Cargo.toml b/substrate/primitives/api/proc-macro/Cargo.toml index d53b9b702e7b..71f1ff95d555 100644 --- a/substrate/primitives/api/proc-macro/Cargo.toml +++ b/substrate/primitives/api/proc-macro/Cargo.toml @@ -17,7 +17,7 @@ proc-macro = true [dependencies] quote = "1.0.28" -syn = { version = "2.0.33", features = ["full", "fold", "extra-traits", "visit"] } +syn = { version = "2.0.36", features = ["full", "fold", "extra-traits", "visit"] } proc-macro2 = "1.0.56" blake2 = { version = "0.10.4", default-features = false } proc-macro-crate = "1.1.3" diff --git a/substrate/primitives/core/hashing/proc-macro/Cargo.toml b/substrate/primitives/core/hashing/proc-macro/Cargo.toml index 5e7cd9e3a7a4..fce09b452e5d 100644 --- a/substrate/primitives/core/hashing/proc-macro/Cargo.toml +++ b/substrate/primitives/core/hashing/proc-macro/Cargo.toml @@ -17,5 +17,5 @@ proc-macro = true [dependencies] quote = "1.0.28" -syn = { version = "2.0.33", features = ["full", "parsing"] } +syn = { version = "2.0.36", features = ["full", "parsing"] } sp-core-hashing = { path = "..", default-features = false} diff --git a/substrate/primitives/debug-derive/Cargo.toml b/substrate/primitives/debug-derive/Cargo.toml index 82f4e8cf6048..689a12505694 100644 --- a/substrate/primitives/debug-derive/Cargo.toml +++ b/substrate/primitives/debug-derive/Cargo.toml @@ -18,7 +18,7 @@ proc-macro = true [dependencies] quote = "1.0.28" -syn = "2.0.33" +syn = "2.0.36" proc-macro2 = "1.0.56" [features] diff --git a/substrate/primitives/runtime-interface/proc-macro/Cargo.toml b/substrate/primitives/runtime-interface/proc-macro/Cargo.toml index 0884efc56d4e..fe06c56d5a15 100644 --- a/substrate/primitives/runtime-interface/proc-macro/Cargo.toml +++ b/substrate/primitives/runtime-interface/proc-macro/Cargo.toml @@ -20,4 +20,4 @@ Inflector = "0.11.4" proc-macro-crate = "1.1.3" proc-macro2 = "1.0.56" quote = "1.0.28" -syn = { version = "2.0.33", features = ["full", "visit", "fold", "extra-traits"] } +syn = { version = "2.0.36", features = ["full", "visit", "fold", "extra-traits"] } diff --git a/substrate/primitives/version/proc-macro/Cargo.toml b/substrate/primitives/version/proc-macro/Cargo.toml index a3478e3e5ca9..3cd1b7a3a76d 100644 --- a/substrate/primitives/version/proc-macro/Cargo.toml +++ b/substrate/primitives/version/proc-macro/Cargo.toml @@ -19,7 +19,7 @@ proc-macro = true codec = { package = "parity-scale-codec", version = "3.6.1", features = [ "derive" ] } proc-macro2 = "1.0.56" quote = "1.0.28" -syn = { version = "2.0.33", features = ["full", "fold", "extra-traits", "visit"] } +syn = { version = "2.0.36", features = ["full", "fold", "extra-traits", "visit"] } [dev-dependencies] sp-version = { path = ".." } From 8900d5b23a8e0be100552801207e174c47821ad6 Mon Sep 17 00:00:00 2001 From: Chevdor Date: Mon, 18 Sep 2023 16:08:57 +0200 Subject: [PATCH 19/69] Move ISSUE_TEMPLATE (#1567) This PR moves the `ISSUE_TEMPLATE` to the root and removes the old ones. --- .../ISSUE_TEMPLATE/blank.md | 0 .../ISSUE_TEMPLATE/bug_report.yaml | 5 +- .../ISSUE_TEMPLATE/config.yml | 0 .../ISSUE_TEMPLATE/feature.yaml | 2 +- .../.github/ISSUE_TEMPLATE/release-client.md | 20 ------- .../.github/ISSUE_TEMPLATE/release-runtime.md | 54 ------------------- polkadot/.github/ISSUE_TEMPLATE/bug_report.md | 13 ----- polkadot/.github/ISSUE_TEMPLATE/release.md | 52 ------------------ 8 files changed, 4 insertions(+), 142 deletions(-) rename {cumulus/.github => .github}/ISSUE_TEMPLATE/blank.md (100%) rename substrate/.github/ISSUE_TEMPLATE/bug.yaml => .github/ISSUE_TEMPLATE/bug_report.yaml (92%) rename {substrate/.github => .github}/ISSUE_TEMPLATE/config.yml (100%) rename {substrate/.github => .github}/ISSUE_TEMPLATE/feature.yaml (98%) delete mode 100644 cumulus/.github/ISSUE_TEMPLATE/release-client.md delete mode 100644 cumulus/.github/ISSUE_TEMPLATE/release-runtime.md delete mode 100644 polkadot/.github/ISSUE_TEMPLATE/bug_report.md delete mode 100644 polkadot/.github/ISSUE_TEMPLATE/release.md diff --git a/cumulus/.github/ISSUE_TEMPLATE/blank.md b/.github/ISSUE_TEMPLATE/blank.md similarity index 100% rename from cumulus/.github/ISSUE_TEMPLATE/blank.md rename to .github/ISSUE_TEMPLATE/blank.md diff --git a/substrate/.github/ISSUE_TEMPLATE/bug.yaml b/.github/ISSUE_TEMPLATE/bug_report.yaml similarity index 92% rename from substrate/.github/ISSUE_TEMPLATE/bug.yaml rename to .github/ISSUE_TEMPLATE/bug_report.yaml index ae40df08eca7..f828a5d9d893 100644 --- a/substrate/.github/ISSUE_TEMPLATE/bug.yaml +++ b/.github/ISSUE_TEMPLATE/bug_report.yaml @@ -1,6 +1,7 @@ name: Bug Report description: Let us know about an issue you experienced with this software -# labels: ["some existing label","another one"] +labels: [ I2-bug, I10-unconfirmed ] + body: - type: checkboxes attributes: @@ -20,7 +21,7 @@ body: id: bug attributes: label: Description of bug - # description: What seems to be the problem? + description: What seems to be the problem? # placeholder: Describe the problem. validations: required: true diff --git a/substrate/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml similarity index 100% rename from substrate/.github/ISSUE_TEMPLATE/config.yml rename to .github/ISSUE_TEMPLATE/config.yml diff --git a/substrate/.github/ISSUE_TEMPLATE/feature.yaml b/.github/ISSUE_TEMPLATE/feature.yaml similarity index 98% rename from substrate/.github/ISSUE_TEMPLATE/feature.yaml rename to .github/ISSUE_TEMPLATE/feature.yaml index 6a59522ab4b4..828e8b461ccc 100644 --- a/substrate/.github/ISSUE_TEMPLATE/feature.yaml +++ b/.github/ISSUE_TEMPLATE/feature.yaml @@ -1,6 +1,6 @@ name: Feature Request description: Submit your requests and suggestions to improve! -labels: ["J0-enhancement"] +labels: [ I5-enhancement ] body: - type: checkboxes id: existing diff --git a/cumulus/.github/ISSUE_TEMPLATE/release-client.md b/cumulus/.github/ISSUE_TEMPLATE/release-client.md deleted file mode 100644 index bb7f20615767..000000000000 --- a/cumulus/.github/ISSUE_TEMPLATE/release-client.md +++ /dev/null @@ -1,20 +0,0 @@ ---- -name: Release Checklist for Client -about: Release Checklist for Client -title: Release Checklist for Client {{ env.VERSION }} ---- - -# Release Checklist - Client - -### Client Release - -- [ ] build a new `polkadot-parachain` binary and publish it to S3 -- [ ] new `polkadot-parachain` version has [run on the network](../../docs/release.md#burnin) - without issue for at least 12h -- [ ] a draft release has been created in the [Github Releases page](https://github.com/paritytech/cumulus/releases) with the relevant release-notes -- [ ] the [build artifacts](../../docs/release.md#build-artifacts) have been added to the - draft-release. - ---- - -Read more about the [release documentation](../../docs/release.md). diff --git a/cumulus/.github/ISSUE_TEMPLATE/release-runtime.md b/cumulus/.github/ISSUE_TEMPLATE/release-runtime.md deleted file mode 100644 index 0f3543759afd..000000000000 --- a/cumulus/.github/ISSUE_TEMPLATE/release-runtime.md +++ /dev/null @@ -1,54 +0,0 @@ ---- -name: Release Checklist for Runtime -about: Release Checklist for Runtime -title: Release Checklist for Runtime {{ env.VERSION }} ---- - -# Release Checklist - Runtimes - -**All** following checks must be completed before publishing a new release. -The release process is owned and led by @paritytech/release-engineering team. -The checks marked with :crab: are meant to be checked by [a runtime engineer](https://github.com/paritytech/cumulus/issues/1761). - -## Runtimes Release - -### Codebase -These checks should be performed on the codebase. - -- [ ] the [`spec_version`](https://github.com/paritytech/cumulus/blob/master/docs/release.md#spec-version) has been incremented since the - last release for any native runtimes from any existing use on public (non-private/test) networks -- [ ] :crab: previously [completed migrations](https://github.com/paritytech/cumulus/blob/master/docs/release.md#old-migrations-removed) are removed for any public (non-private/test) networks -- [ ] pallet and [extrinsic ordering](https://github.com/paritytech/cumulus/blob/master/docs/release.md#extrinsic-ordering--storage) as well as `SignedExtension`s have stayed - the same. Bump `transaction_version` otherwise -- [ ] the [benchmarks](https://github.com/paritytech/ci_cd/wiki/Benchmarks:-cumulus) ran -- [ ] the weights have been updated for any modified runtime logic -- [ ] :crab: the new weights are sane, there are no significant (>50%) drops or rises with no reason -- [ ] :crab: XCM config is compatible with the configurations and versions of relevant interlocutors, like the Relay Chain. - -### On the release branch - -The following checks can be performed after we have forked off to the release-candidate branch or started an additional release candidate branch (rc-2, rc-3, etc) - -- [ ] Verify [new migrations](https://github.com/paritytech/cumulus/blob/master/docs/release.md#new-migrations) complete successfully, and the - runtime state is correctly updated for any public (non-private/test) - networks -- [ ] Run [integration tests](https://github.com/paritytech/cumulus/blob/master/docs/release.md#integration-tests), and make sure they pass. -- [ ] Push runtime upgrade to Asset Hub Westend and verify network stability -- [ ] Push runtime upgrade to Collectives and verify network stability -- [ ] Push runtime upgrade to Bridge-Hub-Kusama and verify network stability - - -### Github - -- [ ] Check that a draft release has been created at the [Github Releases page](https://github.com/paritytech/cumulus/releases) with relevant [release - notes](https://github.com/paritytech/cumulus/blob/master/docs/release.md#release-notes) -- [ ] Check that [build artifacts](https://github.com/paritytech/cumulus/blob/master/docs/release.md#build-artifacts) have been added to the - draft-release. - -# Post release - -- [ ] :crab: all commits (runtime version bumps, fixes) on this release branch have been merged back to master. - ---- - -Read more about the [release documentation](https://github.com/paritytech/cumulus/blob/master/docs/release.md). diff --git a/polkadot/.github/ISSUE_TEMPLATE/bug_report.md b/polkadot/.github/ISSUE_TEMPLATE/bug_report.md deleted file mode 100644 index c2214ab7d932..000000000000 --- a/polkadot/.github/ISSUE_TEMPLATE/bug_report.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -name: Bug report -about: Create a report to help us improve -title: '' -labels: '' -assignees: '' - ---- - -- It would help if you submit info about the system you are running, e.g.: operating system, kernel version, amount of available memory and swap, etc. -- Logs could be very helpful. If possible, submit the whole log. Please format it as ```code blocks```. -- Describe the role your node plays, e.g. validator, full node or light client. -- Any command-line options were passed? diff --git a/polkadot/.github/ISSUE_TEMPLATE/release.md b/polkadot/.github/ISSUE_TEMPLATE/release.md deleted file mode 100644 index 37b422a9b3ec..000000000000 --- a/polkadot/.github/ISSUE_TEMPLATE/release.md +++ /dev/null @@ -1,52 +0,0 @@ ---- -name: Release issue template -about: Tracking issue for new releases -title: Polkadot {{ env.VERSION }} Release checklist ---- -# Release Checklist - -This is the release checklist for Polkadot {{ env.VERSION }}. **All** following -checks should be completed before publishing a new release of the -Polkadot/Kusama/Westend/Rococo runtime or client. The current release candidate can be -checked out with `git checkout release-{{ env.VERSION }}` - -### Runtime Releases - -These checks should be performed on the codebase prior to forking to a release- -candidate branch. - -- [ ] Verify [`spec_version`](https://github.com/paritytech/polkadot/blob/master/doc/release-checklist.md#spec-version) has been incremented since the - last release for any native runtimes from any existing use on public - (non-private) networks. If the runtime was published (release or pre-release), either - the `spec_version` or `impl` must be bumped. -- [ ] Verify previously [completed migrations](https://github.com/paritytech/polkadot/blob/master/doc/release-checklist.md#old-migrations-removed) are - removed for any public (non-private/test) networks. -- [ ] Verify pallet and [extrinsic ordering](https://github.com/paritytech/polkadot/blob/master/doc/release-checklist.md#extrinsic-ordering) has stayed - the same. Bump `transaction_version` if not. -- [ ] Verify new extrinsics have been correctly whitelisted/blacklisted for - [proxy filters](https://github.com/paritytech/polkadot/blob/master/doc/release-checklist.md#proxy-filtering). -- [ ] Verify [benchmarks](https://github.com/paritytech/polkadot/blob/master/doc/release-checklist.md#benchmarks) have been updated for any modified - runtime logic. - -The following checks can be performed after we have forked off to the release- -candidate branch or started an additional release candidate branch (rc-2, rc-3, etc) - -- [ ] Verify [new migrations](https://github.com/paritytech/polkadot/blob/master/doc/release-checklist.md#new-migrations) complete successfully, and the - runtime state is correctly updated for any public (non-private/test) - networks. -- [ ] Verify [Polkadot JS API](https://github.com/paritytech/polkadot/blob/master/doc/release-checklist.md#polkadot-js) are up to date with the latest - runtime changes. -- [ ] Check with the Signer's team to make sure metadata update QR are lined up -- [ ] Push runtime upgrade to Westend and verify network stability. - -### All Releases - -- [ ] Check that the new client versions have [run on the network](https://github.com/paritytech/polkadot/blob/master/doc/release-checklist.md#burn-in) - without issue for 12+ hours on >75% of our validator nodes. -- [ ] Check that a draft release has been created at - https://github.com/paritytech/polkadot/releases with relevant [release - notes](https://github.com/paritytech/polkadot/blob/master/doc/release-checklist.md#release-notes) -- [ ] Check that [build artifacts](https://github.com/paritytech/polkadot/blob/master/doc/release-checklist.md#build-artifacts) have been added to the - draft-release -- [ ] Check that all items listed in the [milestone](https://github.com/paritytech/polkadot/milestones) are included in the release. -- [ ] Ensure that no `freenotes` were added into the release branch after the latest generated RC From e6f5e23b0f894954c0f04ba379966f2466f51de6 Mon Sep 17 00:00:00 2001 From: Alexander Samusev <41779041+alvicsam@users.noreply.github.com> Date: Mon, 18 Sep 2023 16:35:40 +0200 Subject: [PATCH 20/69] [ci] Publish implementers guide (#1615) PR adds a job that publishes implementers guide cc https://github.com/paritytech/polkadot-sdk/issues/1614 cc https://github.com/paritytech/ci_cd/issues/879 --- .gitlab/pipeline/build.yml | 2 +- .gitlab/pipeline/publish.yml | 12 ++++++++++-- 2 files changed, 11 insertions(+), 3 deletions(-) diff --git a/.gitlab/pipeline/build.yml b/.gitlab/pipeline/build.yml index 3085d2230063..5b53798c403d 100644 --- a/.gitlab/pipeline/build.yml +++ b/.gitlab/pipeline/build.yml @@ -127,7 +127,7 @@ build-implementers-guide: - .kubernetes-env - .common-refs - .run-immediately - # - .collect-artifacts + - .collect-artifacts # git depth is set on purpose: https://github.com/paritytech/polkadot/issues/6284 variables: GIT_STRATEGY: clone diff --git a/.gitlab/pipeline/publish.yml b/.gitlab/pipeline/publish.yml index 9e24b8606a4d..a03d407c0409 100644 --- a/.gitlab/pipeline/publish.yml +++ b/.gitlab/pipeline/publish.yml @@ -16,6 +16,8 @@ publish-rustdoc: needs: - job: build-rustdoc artifacts: true + - job: build-implementers-guide + artifacts: true script: # If $CI_COMMIT_REF_NAME doesn't match one of $RUSTDOCS_DEPLOY_REFS space-separated values, we # exit immediately. @@ -34,15 +36,21 @@ publish-rustdoc: - git fetch origin gh-pages # Save README and docs - cp -r ./crate-docs/ /tmp/doc/ + - cp -r ./artifacts/book/ /tmp/ - cp README.md /tmp/doc/ # we don't need to commit changes because we copy docs to /tmp - git checkout gh-pages --force + # Enable if docs needed for other refs # Install `index-tpl-crud` and generate index.html based on RUSTDOCS_DEPLOY_REFS - - which index-tpl-crud &> /dev/null || yarn global add @substrate/index-tpl-crud - - index-tpl-crud upsert ./index.html ${CI_COMMIT_REF_NAME} + # - which index-tpl-crud &> /dev/null || yarn global add @substrate/index-tpl-crud + # - index-tpl-crud upsert ./index.html ${CI_COMMIT_REF_NAME} # Ensure the destination dir doesn't exist. - rm -rf ${CI_COMMIT_REF_NAME} + - rm -rf book/ - mv -f /tmp/doc ${CI_COMMIT_REF_NAME} + # dir for implementors guide + - mkdir -p book + - mv /tmp/book/html/* book/ # Upload files - git add --all # `git commit` has an exit code of > 0 if there is nothing to commit. From a181ced46b6924d6179288e5142e81fdd0927d30 Mon Sep 17 00:00:00 2001 From: georgepisaltu <52418509+georgepisaltu@users.noreply.github.com> Date: Mon, 18 Sep 2023 18:17:14 +0300 Subject: [PATCH 21/69] Replace free for all collation in `cumulus` runtimes (#1251) Partially fixes #103 This PR removes instances of "free for all" collation in the `glutton`, `shell`, and `seedling` runtimes and replaces them with Aura instances. Aura is configured without a session manager, so the initial authority set cannot be changed later on. --------- Signed-off-by: georgepisaltu --- Cargo.lock | 14 ++++ .../glutton/glutton-kusama/Cargo.toml | 16 ++++ .../glutton/glutton-kusama/src/lib.rs | 80 ++++++++++++++++--- .../glutton/glutton-kusama/src/weights/mod.rs | 1 + .../src/weights/pallet_timestamp.rs | 75 +++++++++++++++++ .../runtimes/starters/seedling/Cargo.toml | 14 +++- .../runtimes/starters/seedling/src/lib.rs | 76 +++++++++++++++--- .../runtimes/starters/shell/Cargo.toml | 13 +++ .../runtimes/starters/shell/src/lib.rs | 78 +++++++++++++++--- .../src/chain_spec/glutton.rs | 32 +++++++- .../src/chain_spec/seedling.rs | 8 +- .../src/chain_spec/shell.rs | 14 +++- 12 files changed, 378 insertions(+), 43 deletions(-) create mode 100644 cumulus/parachains/runtimes/glutton/glutton-kusama/src/weights/pallet_timestamp.rs diff --git a/Cargo.lock b/Cargo.lock index a5ea33e1ae49..670d9c9e771c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5788,9 +5788,11 @@ dependencies = [ name = "glutton-runtime" version = "1.0.0" dependencies = [ + "cumulus-pallet-aura-ext", "cumulus-pallet-parachain-system", "cumulus-pallet-xcm", "cumulus-primitives-core", + "cumulus-primitives-timestamp", "frame-benchmarking", "frame-executive", "frame-support", @@ -5798,14 +5800,17 @@ dependencies = [ "frame-system-benchmarking", "frame-system-rpc-runtime-api", "frame-try-runtime", + "pallet-aura", "pallet-glutton", "pallet-sudo", + "pallet-timestamp", "parachain-info", "parachains-common", "parity-scale-codec", "scale-info", "sp-api", "sp-block-builder", + "sp-consensus-aura", "sp-core", "sp-inherents", "sp-offchain", @@ -16058,20 +16063,25 @@ dependencies = [ name = "seedling-runtime" version = "0.1.0" dependencies = [ + "cumulus-pallet-aura-ext", "cumulus-pallet-parachain-system", "cumulus-pallet-solo-to-para", "cumulus-primitives-core", + "cumulus-primitives-timestamp", "frame-executive", "frame-support", "frame-system", + "pallet-aura", "pallet-balances", "pallet-sudo", + "pallet-timestamp", "parachain-info", "parachains-common", "parity-scale-codec", "scale-info", "sp-api", "sp-block-builder", + "sp-consensus-aura", "sp-core", "sp-inherents", "sp-offchain", @@ -16302,6 +16312,7 @@ dependencies = [ name = "shell-runtime" version = "0.1.0" dependencies = [ + "cumulus-pallet-aura-ext", "cumulus-pallet-parachain-system", "cumulus-pallet-xcm", "cumulus-primitives-core", @@ -16309,12 +16320,15 @@ dependencies = [ "frame-support", "frame-system", "frame-try-runtime", + "pallet-aura", + "pallet-timestamp", "parachain-info", "parachains-common", "parity-scale-codec", "scale-info", "sp-api", "sp-block-builder", + "sp-consensus-aura", "sp-core", "sp-inherents", "sp-offchain", diff --git a/cumulus/parachains/runtimes/glutton/glutton-kusama/Cargo.toml b/cumulus/parachains/runtimes/glutton/glutton-kusama/Cargo.toml index 2e2975ab87b8..0ffe59b927f9 100644 --- a/cumulus/parachains/runtimes/glutton/glutton-kusama/Cargo.toml +++ b/cumulus/parachains/runtimes/glutton/glutton-kusama/Cargo.toml @@ -16,10 +16,13 @@ frame-system = { path = "../../../../../substrate/frame/system", default-feature frame-system-rpc-runtime-api = { path = "../../../../../substrate/frame/system/rpc/runtime-api", default-features = false} frame-system-benchmarking = { path = "../../../../../substrate/frame/system/benchmarking", default-features = false, optional = true} frame-try-runtime = { path = "../../../../../substrate/frame/try-runtime", default-features = false, optional = true} +pallet-aura = { path = "../../../../../substrate/frame/aura", default-features = false} pallet-glutton = { path = "../../../../../substrate/frame/glutton", default-features = false, optional = true} pallet-sudo = { path = "../../../../../substrate/frame/sudo", default-features = false, optional = true} +pallet-timestamp = { path = "../../../../../substrate/frame/timestamp", default-features = false } sp-api = { path = "../../../../../substrate/primitives/api", default-features = false} sp-block-builder = { path = "../../../../../substrate/primitives/block-builder", default-features = false} +sp-consensus-aura = { path = "../../../../../substrate/primitives/consensus/aura", default-features = false } sp-core = { path = "../../../../../substrate/primitives/core", default-features = false} sp-inherents = { path = "../../../../../substrate/primitives/inherents", default-features = false} sp-offchain = { path = "../../../../../substrate/primitives/offchain", default-features = false} @@ -36,9 +39,11 @@ xcm-builder = { package = "staging-xcm-builder", path = "../../../../../polkadot xcm-executor = { package = "staging-xcm-executor", path = "../../../../../polkadot/xcm/xcm-executor", default-features = false} # Cumulus +cumulus-pallet-aura-ext = { path = "../../../../pallets/aura-ext", default-features = false } cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false, features = ["parameterized-consensus-hook",] } cumulus-pallet-xcm = { path = "../../../../pallets/xcm", default-features = false } cumulus-primitives-core = { path = "../../../../primitives/core", default-features = false } +cumulus-primitives-timestamp = { path = "../../../../primitives/timestamp", default-features = false } parachain-info = { path = "../../../pallets/parachain-info", default-features = false } parachains-common = { path = "../../../common", default-features = false } @@ -55,6 +60,7 @@ runtime-benchmarks = [ "frame-system/runtime-benchmarks", "pallet-glutton/runtime-benchmarks", "pallet-sudo?/runtime-benchmarks", + "pallet-timestamp/runtime-benchmarks", "parachains-common/runtime-benchmarks", "sp-runtime/runtime-benchmarks", "xcm-builder/runtime-benchmarks", @@ -62,9 +68,11 @@ runtime-benchmarks = [ ] std = [ "codec/std", + "cumulus-pallet-aura-ext/std", "cumulus-pallet-parachain-system/std", "cumulus-pallet-xcm/std", "cumulus-primitives-core/std", + "cumulus-primitives-timestamp/std", "frame-benchmarking?/std", "frame-executive/std", "frame-support/std", @@ -72,13 +80,16 @@ std = [ "frame-system-rpc-runtime-api/std", "frame-system/std", "frame-try-runtime?/std", + "pallet-aura/std", "pallet-glutton/std", "pallet-sudo/std", + "pallet-timestamp/std", "parachain-info/std", "parachains-common/std", "scale-info/std", "sp-api/std", "sp-block-builder/std", + "sp-consensus-aura/std", "sp-core/std", "sp-inherents/std", "sp-offchain/std", @@ -93,14 +104,19 @@ std = [ "xcm/std", ] try-runtime = [ + "cumulus-pallet-aura-ext/try-runtime", "cumulus-pallet-parachain-system/try-runtime", "cumulus-pallet-xcm/try-runtime", "frame-executive/try-runtime", "frame-support/try-runtime", "frame-system/try-runtime", "frame-try-runtime/try-runtime", + "pallet-aura/try-runtime", "pallet-glutton/try-runtime", "pallet-sudo/try-runtime", + "pallet-timestamp/try-runtime", "parachain-info/try-runtime", "sp-runtime/try-runtime", ] + +experimental = [ "pallet-aura/experimental" ] diff --git a/cumulus/parachains/runtimes/glutton/glutton-kusama/src/lib.rs b/cumulus/parachains/runtimes/glutton/glutton-kusama/src/lib.rs index dde8f747d463..41cb0fceebb5 100644 --- a/cumulus/parachains/runtimes/glutton/glutton-kusama/src/lib.rs +++ b/cumulus/parachains/runtimes/glutton/glutton-kusama/src/lib.rs @@ -46,11 +46,12 @@ include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); pub mod weights; pub mod xcm_config; -use cumulus_pallet_parachain_system::RelayNumberStrictlyIncreases; +use cumulus_pallet_parachain_system::RelayNumberMonotonicallyIncreases; use sp_api::impl_runtime_apis; -use sp_core::OpaqueMetadata; +pub use sp_consensus_aura::sr25519::AuthorityId as AuraId; +use sp_core::{crypto::KeyTypeId, OpaqueMetadata}; use sp_runtime::{ - create_runtime_str, generic, + create_runtime_str, generic, impl_opaque_keys, traits::{AccountIdLookup, BlakeTwo256, Block as BlockT}, transaction_validity::{TransactionSource, TransactionValidity}, ApplyExtrinsicResult, @@ -64,24 +65,37 @@ pub use frame_support::{ construct_runtime, dispatch::DispatchClass, parameter_types, - traits::{Everything, IsInVec, Randomness}, + traits::{ + ConstBool, ConstU32, ConstU64, ConstU8, EitherOfDiverse, Everything, IsInVec, Randomness, + }, weights::{ constants::{ BlockExecutionWeight, ExtrinsicBaseWeight, RocksDbWeight, WEIGHT_REF_TIME_PER_SECOND, }, IdentityFee, Weight, }, - StorageValue, + PalletId, StorageValue, }; use frame_system::{ limits::{BlockLength, BlockWeights}, EnsureRoot, }; -use parachains_common::{AccountId, Signature}; +use parachains_common::{ + kusama::consensus::{ + BLOCK_PROCESSING_VELOCITY, RELAY_CHAIN_SLOT_DURATION_MILLIS, UNINCLUDED_SEGMENT_CAPACITY, + }, + AccountId, Signature, SLOT_DURATION, +}; #[cfg(any(feature = "std", test))] pub use sp_runtime::BuildStorage; pub use sp_runtime::{Perbill, Permill}; +impl_opaque_keys! { + pub struct SessionKeys { + pub aura: Aura, + } +} + #[sp_version::runtime_version] pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("glutton"), @@ -178,12 +192,35 @@ impl cumulus_pallet_parachain_system::Config for Runtime { type ReservedDmpWeight = ReservedDmpWeight; type XcmpMessageHandler = (); type ReservedXcmpWeight = (); - type CheckAssociatedRelayNumber = RelayNumberStrictlyIncreases; - type ConsensusHook = cumulus_pallet_parachain_system::consensus_hook::ExpectParentIncluded; + type CheckAssociatedRelayNumber = RelayNumberMonotonicallyIncreases; + type ConsensusHook = cumulus_pallet_aura_ext::FixedVelocityConsensusHook< + Runtime, + RELAY_CHAIN_SLOT_DURATION_MILLIS, + BLOCK_PROCESSING_VELOCITY, + UNINCLUDED_SEGMENT_CAPACITY, + >; } impl parachain_info::Config for Runtime {} +impl cumulus_pallet_aura_ext::Config for Runtime {} + +impl pallet_timestamp::Config for Runtime { + type Moment = u64; + type OnTimestampSet = Aura; + type MinimumPeriod = ConstU64<{ SLOT_DURATION / 2 }>; + type WeightInfo = weights::pallet_timestamp::WeightInfo; +} + +impl pallet_aura::Config for Runtime { + type AuthorityId = AuraId; + type DisabledValidators = (); + type MaxAuthorities = ConstU32<100_000>; + type AllowMultipleBlocksPerSlot = ConstBool; + #[cfg(feature = "experimental")] + type SlotDuration = pallet_aura::MinimumPeriodTimesTwo; +} + impl pallet_glutton::Config for Runtime { type RuntimeEvent = RuntimeEvent; type WeightInfo = weights::pallet_glutton::WeightInfo; @@ -204,6 +241,7 @@ construct_runtime! { Pallet, Call, Config, Storage, Inherent, Event, ValidateUnsigned, } = 1, ParachainInfo: parachain_info::{Pallet, Storage, Config} = 2, + Timestamp: pallet_timestamp::{Pallet, Call, Storage, Inherent} = 3, // DMP handler. CumulusXcm: cumulus_pallet_xcm::{Pallet, Call, Storage, Event, Origin} = 10, @@ -211,6 +249,10 @@ construct_runtime! { // The main stage. Glutton: pallet_glutton::{Pallet, Call, Storage, Event, Config} = 20, + // Collator support + Aura: pallet_aura::{Pallet, Storage, Config} = 30, + AuraExt: cumulus_pallet_aura_ext::{Pallet, Storage, Config} = 31, + // Sudo. Sudo: pallet_sudo::{Pallet, Call, Storage, Event, Config} = 255, } @@ -295,6 +337,16 @@ impl_runtime_apis! { } } + impl sp_consensus_aura::AuraApi for Runtime { + fn slot_duration() -> sp_consensus_aura::SlotDuration { + sp_consensus_aura::SlotDuration::from_millis(Aura::slot_duration()) + } + + fn authorities() -> Vec { + Aura::authorities().into_inner() + } + } + impl sp_block_builder::BlockBuilder for Runtime { fn apply_extrinsic( extrinsic: ::Extrinsic, @@ -332,12 +384,14 @@ impl_runtime_apis! { } impl sp_session::SessionKeys for Runtime { - fn decode_session_keys(_: Vec) -> Option, sp_core::crypto::KeyTypeId)>> { - Some(Vec::new()) + fn generate_session_keys(seed: Option>) -> Vec { + SessionKeys::generate(seed) } - fn generate_session_keys(_: Option>) -> Vec { - Vec::new() + fn decode_session_keys( + encoded: Vec, + ) -> Option, KeyTypeId)>> { + SessionKeys::decode_into_raw_public_keys(&encoded) } } @@ -402,5 +456,5 @@ impl_runtime_apis! { cumulus_pallet_parachain_system::register_validate_block! { Runtime = Runtime, - BlockExecutor = Executive, + BlockExecutor = cumulus_pallet_aura_ext::BlockExecutor::, } diff --git a/cumulus/parachains/runtimes/glutton/glutton-kusama/src/weights/mod.rs b/cumulus/parachains/runtimes/glutton/glutton-kusama/src/weights/mod.rs index 990558538acf..955010505d31 100644 --- a/cumulus/parachains/runtimes/glutton/glutton-kusama/src/weights/mod.rs +++ b/cumulus/parachains/runtimes/glutton/glutton-kusama/src/weights/mod.rs @@ -15,3 +15,4 @@ // along with Cumulus. If not, see . pub mod pallet_glutton; +pub mod pallet_timestamp; diff --git a/cumulus/parachains/runtimes/glutton/glutton-kusama/src/weights/pallet_timestamp.rs b/cumulus/parachains/runtimes/glutton/glutton-kusama/src/weights/pallet_timestamp.rs new file mode 100644 index 000000000000..8edae065f1b9 --- /dev/null +++ b/cumulus/parachains/runtimes/glutton/glutton-kusama/src/weights/pallet_timestamp.rs @@ -0,0 +1,75 @@ +// Copyright Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `pallet_timestamp` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-kusama-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/production/polkadot-parachain +// benchmark +// pallet +// --chain=asset-hub-kusama-dev +// --wasm-execution=compiled +// --pallet=pallet_timestamp +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=50 +// --repeat=20 +// --json +// --header=./file_header.txt +// --output=./parachains/runtimes/assets/asset-hub-kusama/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_timestamp`. +pub struct WeightInfo(PhantomData); +impl pallet_timestamp::WeightInfo for WeightInfo { + /// Storage: `Timestamp::Now` (r:1 w:1) + /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `Aura::CurrentSlot` (r:1 w:0) + /// Proof: `Aura::CurrentSlot` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + fn set() -> Weight { + // Proof Size summary in bytes: + // Measured: `86` + // Estimated: `1493` + // Minimum execution time: 9_313_000 picoseconds. + Weight::from_parts(9_775_000, 0) + .saturating_add(Weight::from_parts(0, 1493)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + fn on_finalize() -> Weight { + // Proof Size summary in bytes: + // Measured: `57` + // Estimated: `0` + // Minimum execution time: 3_322_000 picoseconds. + Weight::from_parts(3_577_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } +} diff --git a/cumulus/parachains/runtimes/starters/seedling/Cargo.toml b/cumulus/parachains/runtimes/starters/seedling/Cargo.toml index 2cd09d3a9eb0..1b68b720d977 100644 --- a/cumulus/parachains/runtimes/starters/seedling/Cargo.toml +++ b/cumulus/parachains/runtimes/starters/seedling/Cargo.toml @@ -12,10 +12,13 @@ scale-info = { version = "2.9.0", default-features = false, features = ["derive" frame-executive = { path = "../../../../../substrate/frame/executive", default-features = false} frame-support = { path = "../../../../../substrate/frame/support", default-features = false} frame-system = { path = "../../../../../substrate/frame/system", default-features = false} +pallet-aura = { path = "../../../../../substrate/frame/aura", default-features = false} pallet-balances = { path = "../../../../../substrate/frame/balances", default-features = false} pallet-sudo = { path = "../../../../../substrate/frame/sudo", default-features = false} +pallet-timestamp = { path = "../../../../../substrate/frame/timestamp", default-features = false } sp-api = { path = "../../../../../substrate/primitives/api", default-features = false} sp-block-builder = { path = "../../../../../substrate/primitives/block-builder", default-features = false} +sp-consensus-aura = { path = "../../../../../substrate/primitives/consensus/aura", default-features = false } sp-core = { path = "../../../../../substrate/primitives/core", default-features = false} sp-inherents = { path = "../../../../../substrate/primitives/inherents", default-features = false} sp-offchain = { path = "../../../../../substrate/primitives/offchain", default-features = false} @@ -26,11 +29,13 @@ sp-transaction-pool = { path = "../../../../../substrate/primitives/transaction- sp-version = { path = "../../../../../substrate/primitives/version", default-features = false} # Cumulus +cumulus-pallet-aura-ext = { path = "../../../../pallets/aura-ext", default-features = false } cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false, features = ["parameterized-consensus-hook",] } cumulus-pallet-solo-to-para = { path = "../../../../pallets/solo-to-para", default-features = false } +cumulus-primitives-core = { path = "../../../../primitives/core", default-features = false } +cumulus-primitives-timestamp = { path = "../../../../primitives/timestamp", default-features = false } parachain-info = { path = "../../../pallets/parachain-info", default-features = false } parachains-common = { path = "../../../common", default-features = false } -cumulus-primitives-core = { path = "../../../../primitives/core", default-features = false } [build-dependencies] substrate-wasm-builder = { path = "../../../../../substrate/utils/wasm-builder", optional = true } @@ -39,19 +44,24 @@ substrate-wasm-builder = { path = "../../../../../substrate/utils/wasm-builder", default = [ "std" ] std = [ "codec/std", + "cumulus-pallet-aura-ext/std", "cumulus-pallet-parachain-system/std", "cumulus-pallet-solo-to-para/std", "cumulus-primitives-core/std", + "cumulus-primitives-timestamp/std", "frame-executive/std", "frame-support/std", "frame-system/std", + "pallet-aura/std", "pallet-balances/std", "pallet-sudo/std", + "pallet-timestamp/std", "parachain-info/std", "parachains-common/std", "scale-info/std", "sp-api/std", "sp-block-builder/std", + "sp-consensus-aura/std", "sp-core/std", "sp-inherents/std", "sp-offchain/std", @@ -62,3 +72,5 @@ std = [ "sp-version/std", "substrate-wasm-builder", ] + +experimental = [ "pallet-aura/experimental" ] diff --git a/cumulus/parachains/runtimes/starters/seedling/src/lib.rs b/cumulus/parachains/runtimes/starters/seedling/src/lib.rs index 5f6733faf706..34e82737f82b 100644 --- a/cumulus/parachains/runtimes/starters/seedling/src/lib.rs +++ b/cumulus/parachains/runtimes/starters/seedling/src/lib.rs @@ -27,11 +27,12 @@ #[cfg(feature = "std")] include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); -use cumulus_pallet_parachain_system::RelayNumberStrictlyIncreases; +use cumulus_pallet_parachain_system::RelayNumberMonotonicallyIncreases; use sp_api::impl_runtime_apis; -use sp_core::OpaqueMetadata; +pub use sp_consensus_aura::sr25519::AuthorityId as AuraId; +use sp_core::{crypto::KeyTypeId, OpaqueMetadata}; use sp_runtime::{ - create_runtime_str, generic, + create_runtime_str, generic, impl_opaque_keys, traits::{AccountIdLookup, BlakeTwo256, Block as BlockT}, transaction_validity::{TransactionSource, TransactionValidity}, ApplyExtrinsicResult, @@ -46,7 +47,7 @@ pub use frame_support::{ construct_runtime, dispatch::DispatchClass, parameter_types, - traits::{IsInVec, Randomness}, + traits::{ConstBool, ConstU32, ConstU64, ConstU8, EitherOfDiverse, IsInVec, Randomness}, weights::{ constants::{ BlockExecutionWeight, ExtrinsicBaseWeight, RocksDbWeight, WEIGHT_REF_TIME_PER_SECOND, @@ -61,6 +62,12 @@ use parachains_common::{AccountId, Signature}; pub use sp_runtime::BuildStorage; pub use sp_runtime::{Perbill, Permill}; +impl_opaque_keys! { + pub struct SessionKeys { + pub aura: Aura, + } +} + /// This runtime version. #[sp_version::runtime_version] pub const VERSION: RuntimeVersion = RuntimeVersion { @@ -80,6 +87,15 @@ pub fn native_version() -> NativeVersion { NativeVersion { runtime_version: VERSION, can_author_with: Default::default() } } +/// Maximum number of blocks simultaneously accepted by the Runtime, not yet included +/// into the relay chain. +const UNINCLUDED_SEGMENT_CAPACITY: u32 = 1; +/// How many parachain blocks are processed by the relay chain per parent. Limits the +/// number of blocks authored per slot. +const BLOCK_PROCESSING_VELOCITY: u32 = 1; +/// Relay chain slot duration, in milliseconds. +const RELAY_CHAIN_SLOT_DURATION_MILLIS: u32 = 6000; + /// We assume that ~10% of the block weight is consumed by `on_initialize` handlers. /// This is used to limit the maximal weight of a single extrinsic. const AVERAGE_ON_INITIALIZE_RATIO: Perbill = Perbill::from_percent(10); @@ -174,23 +190,49 @@ impl cumulus_pallet_parachain_system::Config for Runtime { type ReservedDmpWeight = (); type XcmpMessageHandler = (); type ReservedXcmpWeight = (); - type CheckAssociatedRelayNumber = RelayNumberStrictlyIncreases; - type ConsensusHook = cumulus_pallet_parachain_system::consensus_hook::ExpectParentIncluded; + type CheckAssociatedRelayNumber = RelayNumberMonotonicallyIncreases; + type ConsensusHook = cumulus_pallet_aura_ext::FixedVelocityConsensusHook< + Runtime, + RELAY_CHAIN_SLOT_DURATION_MILLIS, + BLOCK_PROCESSING_VELOCITY, + UNINCLUDED_SEGMENT_CAPACITY, + >; } impl parachain_info::Config for Runtime {} +impl cumulus_pallet_aura_ext::Config for Runtime {} + +impl pallet_aura::Config for Runtime { + type AuthorityId = AuraId; + type DisabledValidators = (); + type MaxAuthorities = ConstU32<100_000>; + type AllowMultipleBlocksPerSlot = ConstBool; + #[cfg(feature = "experimental")] + type SlotDuration = pallet_aura::MinimumPeriodTimesTwo; +} + +impl pallet_timestamp::Config for Runtime { + type Moment = u64; + type OnTimestampSet = Aura; + type MinimumPeriod = ConstU64<0>; + type WeightInfo = (); +} + construct_runtime! { pub enum Runtime { System: frame_system::{Pallet, Call, Storage, Config, Event}, Sudo: pallet_sudo::{Pallet, Call, Storage, Config, Event}, + Timestamp: pallet_timestamp::{Pallet, Call, Storage, Inherent}, ParachainSystem: cumulus_pallet_parachain_system::{ Pallet, Call, Config, Storage, Inherent, Event, ValidateUnsigned, }, ParachainInfo: parachain_info::{Pallet, Storage, Config}, SoloToPara: cumulus_pallet_solo_to_para::{Pallet, Call, Storage, Event}, + Aura: pallet_aura::{Pallet, Storage, Config}, + AuraExt: cumulus_pallet_aura_ext::{Pallet, Storage, Config}, } } @@ -233,6 +275,16 @@ pub type Executive = frame_executive::Executive< >; impl_runtime_apis! { + impl sp_consensus_aura::AuraApi for Runtime { + fn slot_duration() -> sp_consensus_aura::SlotDuration { + sp_consensus_aura::SlotDuration::from_millis(Aura::slot_duration()) + } + + fn authorities() -> Vec { + Aura::authorities().into_inner() + } + } + impl sp_api::Core for Runtime { fn version() -> RuntimeVersion { VERSION @@ -298,12 +350,14 @@ impl_runtime_apis! { } impl sp_session::SessionKeys for Runtime { - fn decode_session_keys(_: Vec) -> Option, sp_core::crypto::KeyTypeId)>> { - Some(Vec::new()) + fn generate_session_keys(seed: Option>) -> Vec { + SessionKeys::generate(seed) } - fn generate_session_keys(_: Option>) -> Vec { - Vec::new() + fn decode_session_keys( + encoded: Vec, + ) -> Option, KeyTypeId)>> { + SessionKeys::decode_into_raw_public_keys(&encoded) } } @@ -316,5 +370,5 @@ impl_runtime_apis! { cumulus_pallet_parachain_system::register_validate_block! { Runtime = Runtime, - BlockExecutor = Executive, + BlockExecutor = cumulus_pallet_aura_ext::BlockExecutor::, } diff --git a/cumulus/parachains/runtimes/starters/shell/Cargo.toml b/cumulus/parachains/runtimes/starters/shell/Cargo.toml index 6f9046057102..46cef8d4ae08 100644 --- a/cumulus/parachains/runtimes/starters/shell/Cargo.toml +++ b/cumulus/parachains/runtimes/starters/shell/Cargo.toml @@ -13,8 +13,11 @@ frame-executive = { path = "../../../../../substrate/frame/executive", default-f frame-support = { path = "../../../../../substrate/frame/support", default-features = false} frame-system = { path = "../../../../../substrate/frame/system", default-features = false} frame-try-runtime = { path = "../../../../../substrate/frame/try-runtime", default-features = false, optional = true } +pallet-aura = { path = "../../../../../substrate/frame/aura", default-features = false} +pallet-timestamp = { path = "../../../../../substrate/frame/timestamp", default-features = false } sp-api = { path = "../../../../../substrate/primitives/api", default-features = false} sp-block-builder = { path = "../../../../../substrate/primitives/block-builder", default-features = false} +sp-consensus-aura = { path = "../../../../../substrate/primitives/consensus/aura", default-features = false } sp-core = { path = "../../../../../substrate/primitives/core", default-features = false} sp-inherents = { path = "../../../../../substrate/primitives/inherents", default-features = false} sp-offchain = { path = "../../../../../substrate/primitives/offchain", default-features = false} @@ -30,6 +33,7 @@ xcm-builder = { package = "staging-xcm-builder", path = "../../../../../polkadot xcm-executor = { package = "staging-xcm-executor", path = "../../../../../polkadot/xcm/xcm-executor", default-features = false} # Cumulus +cumulus-pallet-aura-ext = { path = "../../../../pallets/aura-ext", default-features = false } cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false, features = ["parameterized-consensus-hook",] } cumulus-pallet-xcm = { path = "../../../../pallets/xcm", default-features = false } cumulus-primitives-core = { path = "../../../../primitives/core", default-features = false } @@ -43,6 +47,7 @@ substrate-wasm-builder = { path = "../../../../../substrate/utils/wasm-builder", default = [ "std" ] std = [ "codec/std", + "cumulus-pallet-aura-ext/std", "cumulus-pallet-parachain-system/std", "cumulus-pallet-xcm/std", "cumulus-primitives-core/std", @@ -50,11 +55,14 @@ std = [ "frame-support/std", "frame-system/std", "frame-try-runtime?/std", + "pallet-aura/std", + "pallet-timestamp/std", "parachain-info/std", "parachains-common/std", "scale-info/std", "sp-api/std", "sp-block-builder/std", + "sp-consensus-aura/std", "sp-core/std", "sp-inherents/std", "sp-offchain/std", @@ -69,12 +77,17 @@ std = [ "xcm/std", ] try-runtime = [ + "cumulus-pallet-aura-ext/try-runtime", "cumulus-pallet-parachain-system/try-runtime", "cumulus-pallet-xcm/try-runtime", "frame-executive/try-runtime", "frame-support/try-runtime", "frame-system/try-runtime", "frame-try-runtime/try-runtime", + "pallet-aura/try-runtime", + "pallet-timestamp/try-runtime", "parachain-info/try-runtime", "sp-runtime/try-runtime", ] + +experimental = [ "pallet-aura/experimental" ] diff --git a/cumulus/parachains/runtimes/starters/shell/src/lib.rs b/cumulus/parachains/runtimes/starters/shell/src/lib.rs index ef914a246efc..477933b5c8d2 100644 --- a/cumulus/parachains/runtimes/starters/shell/src/lib.rs +++ b/cumulus/parachains/runtimes/starters/shell/src/lib.rs @@ -32,13 +32,14 @@ include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); pub mod xcm_config; use codec::{Decode, Encode}; -use cumulus_pallet_parachain_system::RelayNumberStrictlyIncreases; +use cumulus_pallet_parachain_system::RelayNumberMonotonicallyIncreases; use frame_support::unsigned::TransactionValidityError; use scale_info::TypeInfo; use sp_api::impl_runtime_apis; -use sp_core::OpaqueMetadata; +pub use sp_consensus_aura::sr25519::AuthorityId as AuraId; +use sp_core::{crypto::KeyTypeId, OpaqueMetadata}; use sp_runtime::{ - create_runtime_str, generic, + create_runtime_str, generic, impl_opaque_keys, traits::{AccountIdLookup, BlakeTwo256, Block as BlockT, DispatchInfoOf}, transaction_validity::{TransactionSource, TransactionValidity}, ApplyExtrinsicResult, @@ -53,7 +54,7 @@ pub use frame_support::{ construct_runtime, dispatch::DispatchClass, parameter_types, - traits::{Everything, IsInVec, Randomness}, + traits::{ConstBool, ConstU32, ConstU64, ConstU8, EitherOfDiverse, IsInVec, Randomness}, weights::{ constants::{ BlockExecutionWeight, ExtrinsicBaseWeight, RocksDbWeight, WEIGHT_REF_TIME_PER_SECOND, @@ -68,6 +69,12 @@ use parachains_common::{AccountId, Signature}; pub use sp_runtime::BuildStorage; pub use sp_runtime::{Perbill, Permill}; +impl_opaque_keys! { + pub struct SessionKeys { + pub aura: Aura, + } +} + /// This runtime version. #[sp_version::runtime_version] pub const VERSION: RuntimeVersion = RuntimeVersion { @@ -87,6 +94,15 @@ pub fn native_version() -> NativeVersion { NativeVersion { runtime_version: VERSION, can_author_with: Default::default() } } +/// Maximum number of blocks simultaneously accepted by the Runtime, not yet included +/// into the relay chain. +const UNINCLUDED_SEGMENT_CAPACITY: u32 = 1; +/// How many parachain blocks are processed by the relay chain per parent. Limits the +/// number of blocks authored per slot. +const BLOCK_PROCESSING_VELOCITY: u32 = 1; +/// Relay chain slot duration, in milliseconds. +const RELAY_CHAIN_SLOT_DURATION_MILLIS: u32 = 6000; + /// We assume that ~10% of the block weight is consumed by `on_initialize` handlers. /// This is used to limit the maximal weight of a single extrinsic. const AVERAGE_ON_INITIALIZE_RATIO: Perbill = Perbill::from_percent(10); @@ -177,16 +193,41 @@ impl cumulus_pallet_parachain_system::Config for Runtime { type ReservedDmpWeight = ReservedDmpWeight; type XcmpMessageHandler = (); type ReservedXcmpWeight = (); - type CheckAssociatedRelayNumber = RelayNumberStrictlyIncreases; - type ConsensusHook = cumulus_pallet_parachain_system::consensus_hook::ExpectParentIncluded; + type CheckAssociatedRelayNumber = RelayNumberMonotonicallyIncreases; + type ConsensusHook = cumulus_pallet_aura_ext::FixedVelocityConsensusHook< + Runtime, + RELAY_CHAIN_SLOT_DURATION_MILLIS, + BLOCK_PROCESSING_VELOCITY, + UNINCLUDED_SEGMENT_CAPACITY, + >; } impl parachain_info::Config for Runtime {} +impl cumulus_pallet_aura_ext::Config for Runtime {} + +impl pallet_aura::Config for Runtime { + type AuthorityId = AuraId; + type DisabledValidators = (); + type MaxAuthorities = ConstU32<100_000>; + type AllowMultipleBlocksPerSlot = ConstBool; + #[cfg(feature = "experimental")] + type SlotDuration = pallet_aura::MinimumPeriodTimesTwo; +} + +impl pallet_timestamp::Config for Runtime { + type Moment = u64; + type OnTimestampSet = Aura; + type MinimumPeriod = ConstU64<0>; + type WeightInfo = (); +} + construct_runtime! { pub enum Runtime { System: frame_system::{Pallet, Call, Storage, Config, Event}, + Timestamp: pallet_timestamp::{Pallet, Call, Storage, Inherent}, + ParachainSystem: cumulus_pallet_parachain_system::{ Pallet, Call, Config, Storage, Inherent, Event, ValidateUnsigned, }, @@ -194,6 +235,9 @@ construct_runtime! { // DMP handler. CumulusXcm: cumulus_pallet_xcm::{Pallet, Call, Storage, Event, Origin}, + + Aura: pallet_aura::{Pallet, Storage, Config}, + AuraExt: cumulus_pallet_aura_ext::{Pallet, Storage, Config}, } } @@ -263,6 +307,16 @@ pub type Executive = frame_executive::Executive< >; impl_runtime_apis! { + impl sp_consensus_aura::AuraApi for Runtime { + fn slot_duration() -> sp_consensus_aura::SlotDuration { + sp_consensus_aura::SlotDuration::from_millis(Aura::slot_duration()) + } + + fn authorities() -> Vec { + Aura::authorities().into_inner() + } + } + impl sp_api::Core for Runtime { fn version() -> RuntimeVersion { VERSION @@ -328,12 +382,14 @@ impl_runtime_apis! { } impl sp_session::SessionKeys for Runtime { - fn decode_session_keys(_: Vec) -> Option, sp_core::crypto::KeyTypeId)>> { - Some(Vec::new()) + fn generate_session_keys(seed: Option>) -> Vec { + SessionKeys::generate(seed) } - fn generate_session_keys(_: Option>) -> Vec { - Vec::new() + fn decode_session_keys( + encoded: Vec, + ) -> Option, KeyTypeId)>> { + SessionKeys::decode_into_raw_public_keys(&encoded) } } @@ -346,5 +402,5 @@ impl_runtime_apis! { cumulus_pallet_parachain_system::register_validate_block! { Runtime = Runtime, - BlockExecutor = Executive, + BlockExecutor = cumulus_pallet_aura_ext::BlockExecutor::, } diff --git a/cumulus/polkadot-parachain/src/chain_spec/glutton.rs b/cumulus/polkadot-parachain/src/chain_spec/glutton.rs index acd5b5bfbe13..881fae398827 100644 --- a/cumulus/polkadot-parachain/src/chain_spec/glutton.rs +++ b/cumulus/polkadot-parachain/src/chain_spec/glutton.rs @@ -16,9 +16,12 @@ use crate::chain_spec::{get_account_id_from_seed, Extensions}; use cumulus_primitives_core::ParaId; +use parachains_common::AuraId; use sc_service::ChainType; use sp_core::sr25519; +use super::get_collator_keys_from_seed; + /// Specialized `ChainSpec` for the Glutton parachain runtime. pub type GluttonChainSpec = sc_service::GenericChainSpec; @@ -30,7 +33,7 @@ pub fn glutton_development_config(para_id: ParaId) -> GluttonChainSpec { // ID "glutton_dev", ChainType::Local, - move || glutton_genesis(para_id), + move || glutton_genesis(para_id, vec![get_collator_keys_from_seed::("Alice")]), Vec::new(), None, None, @@ -47,7 +50,15 @@ pub fn glutton_local_config(para_id: ParaId) -> GluttonChainSpec { // ID "glutton_local", ChainType::Local, - move || glutton_genesis(para_id), + move || { + glutton_genesis( + para_id, + vec![ + get_collator_keys_from_seed::("Alice"), + get_collator_keys_from_seed::("Bob"), + ], + ) + }, Vec::new(), None, None, @@ -67,7 +78,15 @@ pub fn glutton_config(para_id: ParaId) -> GluttonChainSpec { // ID format!("glutton-kusama-{}", para_id).as_str(), ChainType::Live, - move || glutton_genesis(para_id), + move || { + glutton_genesis( + para_id, + vec![ + get_collator_keys_from_seed::("Alice"), + get_collator_keys_from_seed::("Bob"), + ], + ) + }, Vec::new(), None, // Protocol ID @@ -78,7 +97,10 @@ pub fn glutton_config(para_id: ParaId) -> GluttonChainSpec { ) } -fn glutton_genesis(parachain_id: ParaId) -> glutton_runtime::RuntimeGenesisConfig { +fn glutton_genesis( + parachain_id: ParaId, + collators: Vec, +) -> glutton_runtime::RuntimeGenesisConfig { glutton_runtime::RuntimeGenesisConfig { system: glutton_runtime::SystemConfig { code: glutton_runtime::WASM_BINARY @@ -94,6 +116,8 @@ fn glutton_genesis(parachain_id: ParaId) -> glutton_runtime::RuntimeGenesisConfi trash_data_count: Default::default(), ..Default::default() }, + aura: glutton_runtime::AuraConfig { authorities: collators }, + aura_ext: Default::default(), sudo: glutton_runtime::SudoConfig { key: Some(get_account_id_from_seed::("Alice")), }, diff --git a/cumulus/polkadot-parachain/src/chain_spec/seedling.rs b/cumulus/polkadot-parachain/src/chain_spec/seedling.rs index 3ebfb80d4685..6a5842320976 100644 --- a/cumulus/polkadot-parachain/src/chain_spec/seedling.rs +++ b/cumulus/polkadot-parachain/src/chain_spec/seedling.rs @@ -16,10 +16,12 @@ use crate::chain_spec::{get_account_id_from_seed, Extensions}; use cumulus_primitives_core::ParaId; -use parachains_common::AccountId; +use parachains_common::{AccountId, AuraId}; use sc_service::ChainType; use sp_core::sr25519; +use super::get_collator_keys_from_seed; + /// Specialized `ChainSpec` for the seedling parachain runtime. pub type SeedlingChainSpec = sc_service::GenericChainSpec; @@ -33,6 +35,7 @@ pub fn get_seedling_chain_spec() -> SeedlingChainSpec { seedling_testnet_genesis( get_account_id_from_seed::("Alice"), 2000.into(), + vec![get_collator_keys_from_seed::("Alice")], ) }, Vec::new(), @@ -47,6 +50,7 @@ pub fn get_seedling_chain_spec() -> SeedlingChainSpec { fn seedling_testnet_genesis( root_key: AccountId, parachain_id: ParaId, + collators: Vec, ) -> seedling_runtime::RuntimeGenesisConfig { seedling_runtime::RuntimeGenesisConfig { system: seedling_runtime::SystemConfig { @@ -61,5 +65,7 @@ fn seedling_testnet_genesis( ..Default::default() }, parachain_system: Default::default(), + aura: seedling_runtime::AuraConfig { authorities: collators }, + aura_ext: Default::default(), } } diff --git a/cumulus/polkadot-parachain/src/chain_spec/shell.rs b/cumulus/polkadot-parachain/src/chain_spec/shell.rs index 7eb65591b12f..0899c1af3b4d 100644 --- a/cumulus/polkadot-parachain/src/chain_spec/shell.rs +++ b/cumulus/polkadot-parachain/src/chain_spec/shell.rs @@ -16,8 +16,11 @@ use crate::chain_spec::Extensions; use cumulus_primitives_core::ParaId; +use parachains_common::AuraId; use sc_service::ChainType; +use super::get_collator_keys_from_seed; + /// Specialized `ChainSpec` for the shell parachain runtime. pub type ShellChainSpec = sc_service::GenericChainSpec; @@ -27,7 +30,9 @@ pub fn get_shell_chain_spec() -> ShellChainSpec { "Shell Local Testnet", "shell_local_testnet", ChainType::Local, - move || shell_testnet_genesis(1000.into()), + move || { + shell_testnet_genesis(1000.into(), vec![get_collator_keys_from_seed::("Alice")]) + }, Vec::new(), None, None, @@ -37,7 +42,10 @@ pub fn get_shell_chain_spec() -> ShellChainSpec { ) } -fn shell_testnet_genesis(parachain_id: ParaId) -> shell_runtime::RuntimeGenesisConfig { +fn shell_testnet_genesis( + parachain_id: ParaId, + collators: Vec, +) -> shell_runtime::RuntimeGenesisConfig { shell_runtime::RuntimeGenesisConfig { system: shell_runtime::SystemConfig { code: shell_runtime::WASM_BINARY @@ -47,5 +55,7 @@ fn shell_testnet_genesis(parachain_id: ParaId) -> shell_runtime::RuntimeGenesisC }, parachain_info: shell_runtime::ParachainInfoConfig { parachain_id, ..Default::default() }, parachain_system: Default::default(), + aura: shell_runtime::AuraConfig { authorities: collators }, + aura_ext: Default::default(), } } From ffe5db0f7662f80dcf4805d093c6736880c0fb4d Mon Sep 17 00:00:00 2001 From: Ross Bulat Date: Mon, 18 Sep 2023 18:32:30 +0100 Subject: [PATCH 22/69] Staking: Add `dest` to `Rewarded` to aid in reward calculations (#1602) Addresses https://github.com/paritytech/polkadot-sdk/issues/129. Returns `Self:payee()` from `make_payout` in a tuple alongside an imbalance & adds it to `Rewarded` event. --- substrate/frame/staking/src/pallet/impls.rs | 23 ++++++++++++++------- substrate/frame/staking/src/pallet/mod.rs | 8 +++++-- substrate/frame/staking/src/tests.rs | 10 +++++++++ 3 files changed, 31 insertions(+), 10 deletions(-) diff --git a/substrate/frame/staking/src/pallet/impls.rs b/substrate/frame/staking/src/pallet/impls.rs index e0f5c9558781..3d9b1157ca87 100644 --- a/substrate/frame/staking/src/pallet/impls.rs +++ b/substrate/frame/staking/src/pallet/impls.rs @@ -236,11 +236,12 @@ impl Pallet { let mut total_imbalance = PositiveImbalanceOf::::zero(); // We can now make total validator payout: - if let Some(imbalance) = + if let Some((imbalance, dest)) = Self::make_payout(&ledger.stash, validator_staking_payout + validator_commission_payout) { Self::deposit_event(Event::::Rewarded { stash: ledger.stash, + dest, amount: imbalance.peek(), }); total_imbalance.subsume(imbalance); @@ -259,11 +260,14 @@ impl Pallet { let nominator_reward: BalanceOf = nominator_exposure_part * validator_leftover_payout; // We can now make nominator payout: - if let Some(imbalance) = Self::make_payout(&nominator.who, nominator_reward) { + if let Some((imbalance, dest)) = Self::make_payout(&nominator.who, nominator_reward) { // Note: this logic does not count payouts for `RewardDestination::None`. nominator_payout_count += 1; - let e = - Event::::Rewarded { stash: nominator.who.clone(), amount: imbalance.peek() }; + let e = Event::::Rewarded { + stash: nominator.who.clone(), + dest, + amount: imbalance.peek(), + }; Self::deposit_event(e); total_imbalance.subsume(imbalance); } @@ -293,9 +297,11 @@ impl Pallet { /// Actually make a payment to a staker. This uses the currency's reward function /// to pay the right payee for the given staker account. - fn make_payout(stash: &T::AccountId, amount: BalanceOf) -> Option> { - let dest = Self::payee(stash); - match dest { + fn make_payout( + stash: &T::AccountId, + amount: BalanceOf, + ) -> Option<(PositiveImbalanceOf, RewardDestination)> { + let maybe_imbalance = match Self::payee(stash) { RewardDestination::Controller => Self::bonded(stash) .map(|controller| T::Currency::deposit_creating(&controller, amount)), RewardDestination::Stash => T::Currency::deposit_into_existing(stash, amount).ok(), @@ -311,7 +317,8 @@ impl Pallet { RewardDestination::Account(dest_account) => Some(T::Currency::deposit_creating(&dest_account, amount)), RewardDestination::None => None, - } + }; + maybe_imbalance.map(|imbalance| (imbalance, Self::payee(stash))) } /// Plan a new session potentially trigger a new era. diff --git a/substrate/frame/staking/src/pallet/mod.rs b/substrate/frame/staking/src/pallet/mod.rs index c6c75326b80c..0bcf932d90b4 100644 --- a/substrate/frame/staking/src/pallet/mod.rs +++ b/substrate/frame/staking/src/pallet/mod.rs @@ -664,8 +664,12 @@ pub mod pallet { /// The era payout has been set; the first balance is the validator-payout; the second is /// the remainder from the maximum amount of reward. EraPaid { era_index: EraIndex, validator_payout: BalanceOf, remainder: BalanceOf }, - /// The nominator has been rewarded by this amount. - Rewarded { stash: T::AccountId, amount: BalanceOf }, + /// The nominator has been rewarded by this amount to this destination. + Rewarded { + stash: T::AccountId, + dest: RewardDestination, + amount: BalanceOf, + }, /// A staker (validator or nominator) has been slashed by the given amount. Slashed { staker: T::AccountId, amount: BalanceOf }, /// A slash for the given validator, for the given percentage of their stake, at the given diff --git a/substrate/frame/staking/src/tests.rs b/substrate/frame/staking/src/tests.rs index fd7dabac74d8..78183cfde929 100644 --- a/substrate/frame/staking/src/tests.rs +++ b/substrate/frame/staking/src/tests.rs @@ -3759,6 +3759,16 @@ fn test_payout_stakers() { ); assert!(RewardOnUnbalanceWasCalled::get()); + // `Rewarded` events are being executed. + assert!(matches!( + staking_events_since_last_call().as_slice(), + &[ + .., + Event::Rewarded { stash: 1037, dest: RewardDestination::Controller, amount: 108 }, + Event::Rewarded { stash: 1036, dest: RewardDestination::Controller, amount: 108 } + ] + )); + // Top 64 nominators of validator 11 automatically paid out, including the validator // Validator payout goes to controller. assert!(Balances::free_balance(&11) > balance); From 122086d3d582b1e86190507f3a377667e66c729c Mon Sep 17 00:00:00 2001 From: Vsevolod Stakhov Date: Mon, 18 Sep 2023 21:32:19 +0100 Subject: [PATCH 23/69] Revert #1409 partially (#1603) Futures channels that are used by default has a side effect of `Sender::Clone` that efficiently increases the capacity of the bounded channel by one. This PR fixes the undesired backpressure removal that was caused by the #1409. This issue has been discovered by @sandreim during Versi testing and needs to be treated as critical that should not be included in any release without this reversion. This PR reverts the original behaviour. --- polkadot/node/network/bridge/src/rx/mod.rs | 63 +++------------------- 1 file changed, 8 insertions(+), 55 deletions(-) diff --git a/polkadot/node/network/bridge/src/rx/mod.rs b/polkadot/node/network/bridge/src/rx/mod.rs index e1125ebc904d..82c67061d9a5 100644 --- a/polkadot/node/network/bridge/src/rx/mod.rs +++ b/polkadot/node/network/bridge/src/rx/mod.rs @@ -20,10 +20,7 @@ use super::*; use always_assert::never; use bytes::Bytes; -use futures::{ - future::BoxFuture, - stream::{BoxStream, FuturesUnordered, StreamExt}, -}; +use futures::stream::{BoxStream, StreamExt}; use parity_scale_codec::{Decode, DecodeAll}; use sc_network::Event as NetworkEvent; @@ -1044,65 +1041,21 @@ fn dispatch_collation_event_to_all_unbounded( } } -fn send_or_queue_validation_event( - event: E, - sender: &mut Sender, - delayed_queue: &FuturesUnordered>, -) where - E: Send + 'static, - Sender: overseer::NetworkBridgeRxSenderTrait + overseer::SubsystemSender, -{ - match sender.try_send_message(event) { - Ok(()) => {}, - Err(overseer::TrySendError::Full(event)) => { - let mut sender = sender.clone(); - delayed_queue.push(Box::pin(async move { - sender.send_message(event).await; - })); - }, - Err(overseer::TrySendError::Closed(_)) => { - panic!( - "NetworkBridgeRxSender is closed when trying to send event of type: {}", - std::any::type_name::() - ); - }, - } -} - async fn dispatch_validation_events_to_all( events: I, sender: &mut impl overseer::NetworkBridgeRxSenderTrait, - metrics: &Metrics, + _metrics: &Metrics, ) where I: IntoIterator>, I::IntoIter: Send, { - let delayed_messages: FuturesUnordered> = FuturesUnordered::new(); - - // Fast path for sending events to subsystems, if any subsystem's queue is full, we hold - // the slow path future in the `delayed_messages` queue. for event in events { - if let Ok(msg) = event.focus().map(StatementDistributionMessage::from) { - send_or_queue_validation_event(msg, sender, &delayed_messages); - } - if let Ok(msg) = event.focus().map(BitfieldDistributionMessage::from) { - send_or_queue_validation_event(msg, sender, &delayed_messages); - } - if let Ok(msg) = event.focus().map(ApprovalDistributionMessage::from) { - send_or_queue_validation_event(msg, sender, &delayed_messages); - } - if let Ok(msg) = event.focus().map(GossipSupportMessage::from) { - send_or_queue_validation_event(msg, sender, &delayed_messages); - } - } - - let delayed_messages_count = delayed_messages.len(); - metrics.on_delayed_rx_queue(delayed_messages_count); - - if delayed_messages_count > 0 { - // Here we wait for all the delayed messages to be sent. - let _timer = metrics.time_delayed_rx_events(); // Dropped after `await` is completed - let _: Vec<()> = delayed_messages.collect().await; + sender + .send_messages(event.focus().map(StatementDistributionMessage::from)) + .await; + sender.send_messages(event.focus().map(BitfieldDistributionMessage::from)).await; + sender.send_messages(event.focus().map(ApprovalDistributionMessage::from)).await; + sender.send_messages(event.focus().map(GossipSupportMessage::from)).await; } } From 6079b6dd3aaba56ef257111fda74a57a800f16d0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Tue, 19 Sep 2023 12:07:21 +0100 Subject: [PATCH 24/69] Remove Polkadot & Kusama native runtime (#1304) This pull request removes the Polkadot and Kusama native runtime from the polkadot node. This brings some implications with it: There are no more kusama/polkadot-dev chain specs available. We will need to write some tooling in the fellowship repo to provide them easily. The try-runtime job for polkadot & kusama is not available anymore as we don't have the dev chain specs anymore. Certain benchmarking commands will also not work until we migrate them to use a runtime api. Some crates in utils are still depending on the polkadot/kusama native runtime that will also need to be fixed. Port of: https://github.com/paritytech/polkadot/pull/7467 --- .gitlab/pipeline/short-benchmarks.yml | 14 +- Cargo.lock | 5 - polkadot/Cargo.toml | 6 +- polkadot/cli/Cargo.toml | 6 +- polkadot/cli/src/command.rs | 47 +- polkadot/cli/src/error.rs | 6 + polkadot/node/malus/Cargo.toml | 2 +- polkadot/node/service/Cargo.toml | 21 +- polkadot/node/service/src/benchmarking.rs | 166 +---- polkadot/node/service/src/chain_spec.rs | 667 +----------------- polkadot/node/service/src/lib.rs | 6 +- polkadot/node/test/service/Cargo.toml | 1 + polkadot/node/test/service/src/chain_spec.rs | 14 +- polkadot/tests/benchmark_block.rs | 31 +- polkadot/tests/benchmark_extrinsic.rs | 2 +- polkadot/tests/benchmark_overhead.rs | 10 +- polkadot/tests/common.rs | 39 +- polkadot/tests/purge_chain_works.rs | 189 +++-- .../tests/running_the_node_and_interrupt.rs | 17 +- 19 files changed, 193 insertions(+), 1056 deletions(-) diff --git a/.gitlab/pipeline/short-benchmarks.yml b/.gitlab/pipeline/short-benchmarks.yml index 7b7704ee66d8..5b0ea276773d 100644 --- a/.gitlab/pipeline/short-benchmarks.yml +++ b/.gitlab/pipeline/short-benchmarks.yml @@ -5,7 +5,7 @@ # run short-benchmarks for relay chain runtimes from polkadot -short-benchmark-polkadot: &short-bench +short-benchmark-westend: &short-bench stage: short-benchmarks extends: - .docker-env @@ -14,22 +14,12 @@ short-benchmark-polkadot: &short-bench - job: build-short-benchmark artifacts: true variables: - RUNTIME: polkadot + RUNTIME: westend tags: - benchmark script: - ./artifacts/polkadot benchmark pallet --chain $RUNTIME-dev --pallet "*" --extrinsic "*" --steps 2 --repeat 1 -short-benchmark-kusama: - <<: *short-bench - variables: - RUNTIME: kusama - -short-benchmark-westend: - <<: *short-bench - variables: - RUNTIME: westend - # run short-benchmarks for system parachain runtimes from cumulus .short-benchmark-cumulus: &short-bench-cumulus diff --git a/Cargo.lock b/Cargo.lock index 670d9c9e771c..9dfff599300d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -12779,7 +12779,6 @@ dependencies = [ "futures", "hex-literal", "is_executable", - "kusama-runtime-constants", "kvdb", "kvdb-rocksdb", "log", @@ -12825,9 +12824,6 @@ dependencies = [ "polkadot-parachain-primitives", "polkadot-primitives", "polkadot-rpc", - "polkadot-runtime", - "polkadot-runtime-common", - "polkadot-runtime-constants", "polkadot-runtime-parachains", "polkadot-statement-distribution", "polkadot-test-client", @@ -12883,7 +12879,6 @@ dependencies = [ "sp-transaction-pool", "sp-version", "sp-weights", - "staging-kusama-runtime", "substrate-prometheus-endpoint", "tempfile", "thiserror", diff --git a/polkadot/Cargo.toml b/polkadot/Cargo.toml index e4494b1abb20..aacc6ad405cc 100644 --- a/polkadot/Cargo.toml +++ b/polkadot/Cargo.toml @@ -26,11 +26,7 @@ color-eyre = { version = "0.6.1", default-features = false } tikv-jemallocator = { version = "0.5.0", optional = true } # Crates in our workspace, defined as dependencies so we can pass them feature flags. -polkadot-cli = { path = "cli", features = [ - "kusama-native", - "westend-native", - "rococo-native", -] } +polkadot-cli = { path = "cli", features = [ "westend-native", "rococo-native" ] } polkadot-node-core-pvf = { path = "node/core/pvf" } polkadot-node-core-pvf-prepare-worker = { path = "node/core/pvf/prepare-worker" } polkadot-overseer = { path = "node/overseer" } diff --git a/polkadot/cli/Cargo.toml b/polkadot/cli/Cargo.toml index a9631fb9a7da..83fd64759b7f 100644 --- a/polkadot/cli/Cargo.toml +++ b/polkadot/cli/Cargo.toml @@ -67,11 +67,7 @@ fast-runtime = [ "service/fast-runtime" ] pyroscope = [ "pyro", "pyroscope_pprofrs" ] hostperfcheck = [ "polkadot-performance-test" ] -# Configure the native runtimes to use. Polkadot is enabled by default. -# -# Validators require the native runtime currently -polkadot-native = [ "service/polkadot-native" ] -kusama-native = [ "service/kusama-native" ] +# Configure the native runtimes to use. westend-native = [ "service/westend-native" ] rococo-native = [ "service/rococo-native" ] diff --git a/polkadot/cli/src/command.rs b/polkadot/cli/src/command.rs index a081e1b5181b..19437ce87530 100644 --- a/polkadot/cli/src/command.rs +++ b/polkadot/cli/src/command.rs @@ -34,12 +34,6 @@ pub use polkadot_performance_test::PerfCheckError; #[cfg(feature = "pyroscope")] use pyroscope_pprofrs::{pprof_backend, PprofConfig}; -impl From for Error { - fn from(s: String) -> Self { - Self::Other(s) - } -} - type Result = std::result::Result; fn get_exec_name() -> Option { @@ -92,29 +86,20 @@ impl SubstrateCli for Cli { }; Ok(match id { "kusama" => Box::new(service::chain_spec::kusama_config()?), - #[cfg(feature = "kusama-native")] - "kusama-dev" => Box::new(service::chain_spec::kusama_development_config()?), - #[cfg(feature = "kusama-native")] - "kusama-local" => Box::new(service::chain_spec::kusama_local_testnet_config()?), - #[cfg(feature = "kusama-native")] - "kusama-staging" => Box::new(service::chain_spec::kusama_staging_testnet_config()?), - #[cfg(not(feature = "kusama-native"))] name if name.starts_with("kusama-") && !name.ends_with(".json") => - Err(format!("`{}` only supported with `kusama-native` feature enabled.", name))?, + Err(format!("`{name}` is not supported anymore as the kusama native runtime no longer part of the node."))?, "polkadot" => Box::new(service::chain_spec::polkadot_config()?), - #[cfg(feature = "polkadot-native")] - "polkadot-dev" | "dev" => Box::new(service::chain_spec::polkadot_development_config()?), - #[cfg(feature = "polkadot-native")] - "polkadot-local" => Box::new(service::chain_spec::polkadot_local_testnet_config()?), + name if name.starts_with("polkadot-") && !name.ends_with(".json") => + Err(format!("`{name}` is not supported anymore as the polkadot native runtime no longer part of the node."))?, "rococo" => Box::new(service::chain_spec::rococo_config()?), #[cfg(feature = "rococo-native")] - "rococo-dev" => Box::new(service::chain_spec::rococo_development_config()?), + "dev" | "rococo-dev" => Box::new(service::chain_spec::rococo_development_config()?), #[cfg(feature = "rococo-native")] "rococo-local" => Box::new(service::chain_spec::rococo_local_testnet_config()?), #[cfg(feature = "rococo-native")] "rococo-staging" => Box::new(service::chain_spec::rococo_staging_testnet_config()?), #[cfg(not(feature = "rococo-native"))] - name if name.starts_with("rococo-") && !name.ends_with(".json") => + name if name.starts_with("rococo-") && !name.ends_with(".json") || name == "dev" => Err(format!("`{}` only supported with `rococo-native` feature enabled.", name))?, "westend" => Box::new(service::chain_spec::westend_config()?), #[cfg(feature = "westend-native")] @@ -146,7 +131,7 @@ impl SubstrateCli for Cli { path => { let path = std::path::PathBuf::from(path); - let chain_spec = Box::new(service::PolkadotChainSpec::from_json_file(path.clone())?) + let chain_spec = Box::new(service::GenericChainSpec::from_json_file(path.clone())?) as Box; // When `force_*` is given or the file name starts with the name of one of the known @@ -158,7 +143,7 @@ impl SubstrateCli for Cli { { Box::new(service::RococoChainSpec::from_json_file(path)?) } else if self.run.force_kusama || chain_spec.is_kusama() { - Box::new(service::KusamaChainSpec::from_json_file(path)?) + Box::new(service::GenericChainSpec::from_json_file(path)?) } else if self.run.force_westend || chain_spec.is_westend() { Box::new(service::WestendChainSpec::from_json_file(path)?) } else { @@ -182,17 +167,6 @@ fn set_default_ss58_version(spec: &Box) { sp_core::crypto::set_default_ss58_version(ss58_version); } -const DEV_ONLY_ERROR_PATTERN: &'static str = - "can only use subcommand with --chain [polkadot-dev, kusama-dev, westend-dev, rococo-dev, wococo-dev], got "; - -fn ensure_dev(spec: &Box) -> std::result::Result<(), String> { - if spec.is_dev() { - Ok(()) - } else { - Err(format!("{}{}", DEV_ONLY_ERROR_PATTERN, spec.id())) - } -} - /// Runs performance checks. /// Should only be used in release build since the check would take too much time otherwise. fn host_perf_check() -> Result<()> { @@ -471,8 +445,7 @@ pub fn run() -> Result<()> { cmd.run(client.clone()).map_err(Error::SubstrateCli) }), // These commands are very similar and can be handled in nearly the same way. - BenchmarkCmd::Extrinsic(_) | BenchmarkCmd::Overhead(_) => { - ensure_dev(chain_spec).map_err(Error::Other)?; + BenchmarkCmd::Extrinsic(_) | BenchmarkCmd::Overhead(_) => runner.sync_run(|mut config| { let (client, _, _, _) = service::new_chain_ops(&mut config, None)?; let header = client.header(client.info().genesis_hash).unwrap().unwrap(); @@ -508,11 +481,9 @@ pub fn run() -> Result<()> { .map_err(Error::SubstrateCli), _ => unreachable!("Ensured by the outside match; qed"), } - }) - }, + }), BenchmarkCmd::Pallet(cmd) => { set_default_ss58_version(chain_spec); - ensure_dev(chain_spec).map_err(Error::Other)?; if cfg!(feature = "runtime-benchmarks") { runner.sync_run(|config| { diff --git a/polkadot/cli/src/error.rs b/polkadot/cli/src/error.rs index a4591e2508c9..62ee4d139074 100644 --- a/polkadot/cli/src/error.rs +++ b/polkadot/cli/src/error.rs @@ -58,3 +58,9 @@ pub enum Error { #[error("This subcommand is only available when compiled with `{feature}`")] FeatureNotEnabled { feature: &'static str }, } + +impl From for Error { + fn from(s: String) -> Self { + Self::Other(s) + } +} diff --git a/polkadot/node/malus/Cargo.toml b/polkadot/node/malus/Cargo.toml index 08d203281cff..9fa22aef4dcc 100644 --- a/polkadot/node/malus/Cargo.toml +++ b/polkadot/node/malus/Cargo.toml @@ -26,7 +26,7 @@ path = "../../src/bin/prepare-worker.rs" doc = false [dependencies] -polkadot-cli = { path = "../../cli", features = [ "malus", "rococo-native", "kusama-native", "westend-native", "polkadot-native" ] } +polkadot-cli = { path = "../../cli", features = [ "malus", "rococo-native", "westend-native" ] } polkadot-node-subsystem = { path = "../subsystem" } polkadot-node-subsystem-util = { path = "../subsystem-util" } polkadot-node-subsystem-types = { path = "../subsystem-types" } diff --git a/polkadot/node/service/Cargo.toml b/polkadot/node/service/Cargo.toml index 667f9b86d3e4..ee092e277332 100644 --- a/polkadot/node/service/Cargo.toml +++ b/polkadot/node/service/Cargo.toml @@ -103,17 +103,12 @@ polkadot-node-subsystem-util = { path = "../subsystem-util" } polkadot-node-subsystem-types = { path = "../subsystem-types" } polkadot-runtime-parachains = { path = "../../runtime/parachains" } polkadot-node-network-protocol = { path = "../network/protocol" } -polkadot-runtime-common = { path = "../../runtime/common" } # Polkadot Runtime Constants -polkadot-runtime-constants = { path = "../../runtime/polkadot/constants", optional = true } -kusama-runtime-constants = { path = "../../runtime/kusama/constants", optional = true } rococo-runtime-constants = { path = "../../runtime/rococo/constants", optional = true } westend-runtime-constants = { path = "../../runtime/westend/constants", optional = true } # Polkadot Runtimes -polkadot-runtime = { path = "../../runtime/polkadot", optional = true } -kusama-runtime = { package = "staging-kusama-runtime", path = "../../runtime/kusama", optional = true } westend-runtime = { path = "../../runtime/westend", optional = true } rococo-runtime = { path = "../../runtime/rococo", optional = true } @@ -183,11 +178,7 @@ full-node = [ "polkadot-statement-distribution", ] -# Configure the native runtimes to use. Polkadot is enabled by default. -# -# Validators require the native runtime currently -polkadot-native = [ "polkadot-runtime", "polkadot-runtime-constants" ] -kusama-native = [ "kusama-runtime", "kusama-runtime-constants" ] +# Configure the native runtimes to use. westend-native = [ "westend-runtime", "westend-runtime-constants" ] rococo-native = [ "rococo-runtime", "rococo-runtime-constants" ] @@ -196,15 +187,12 @@ runtime-benchmarks = [ "frame-benchmarking/runtime-benchmarks", "frame-support/runtime-benchmarks", "frame-system/runtime-benchmarks", - "kusama-runtime?/runtime-benchmarks", "pallet-babe/runtime-benchmarks", "pallet-im-online/runtime-benchmarks", "pallet-staking/runtime-benchmarks", "polkadot-parachain-primitives/runtime-benchmarks", "polkadot-primitives/runtime-benchmarks", - "polkadot-runtime-common/runtime-benchmarks", "polkadot-runtime-parachains/runtime-benchmarks", - "polkadot-runtime?/runtime-benchmarks", "polkadot-test-client/runtime-benchmarks", "rococo-runtime?/runtime-benchmarks", "sc-client-db/runtime-benchmarks", @@ -215,30 +203,23 @@ runtime-benchmarks = [ try-runtime = [ "frame-support/try-runtime", "frame-system/try-runtime", - "kusama-runtime?/try-runtime", "pallet-babe/try-runtime", "pallet-im-online/try-runtime", "pallet-staking/try-runtime", "pallet-transaction-payment/try-runtime", - "polkadot-runtime-common/try-runtime", "polkadot-runtime-parachains/try-runtime", - "polkadot-runtime?/try-runtime", "rococo-runtime?/try-runtime", "sp-runtime/try-runtime", "westend-runtime?/try-runtime", ] fast-runtime = [ - "kusama-runtime?/fast-runtime", - "polkadot-runtime?/fast-runtime", "rococo-runtime?/fast-runtime", "westend-runtime?/fast-runtime", ] malus = [ "full-node" ] runtime-metrics = [ - "kusama-runtime?/runtime-metrics", "polkadot-runtime-parachains/runtime-metrics", - "polkadot-runtime?/runtime-metrics", "rococo-runtime?/runtime-metrics", "westend-runtime?/runtime-metrics", ] diff --git a/polkadot/node/service/src/benchmarking.rs b/polkadot/node/service/src/benchmarking.rs index cfe1c873c055..400daf1aee34 100644 --- a/polkadot/node/service/src/benchmarking.rs +++ b/polkadot/node/service/src/benchmarking.rs @@ -34,36 +34,8 @@ macro_rules! identify_chain { $generic_code:expr $(,)* ) => { match $chain { - Chain::Polkadot => { - #[cfg(feature = "polkadot-native")] - { - use polkadot_runtime as runtime; - - let call = $generic_code; - - Ok(polkadot_sign_call(call, $nonce, $current_block, $period, $genesis, $signer)) - } - - #[cfg(not(feature = "polkadot-native"))] - { - Err("`polkadot-native` feature not enabled") - } - }, - Chain::Kusama => { - #[cfg(feature = "kusama-native")] - { - use kusama_runtime as runtime; - - let call = $generic_code; - - Ok(kusama_sign_call(call, $nonce, $current_block, $period, $genesis, $signer)) - } - - #[cfg(not(feature = "kusama-native"))] - { - Err("`kusama-native` feature not enabled") - } - }, + Chain::Polkadot => Err("Polkadot runtimes are currently not supported"), + Chain::Kusama => Err("Kusama runtimes are currently not supported"), Chain::Rococo => { #[cfg(feature = "rococo-native")] { @@ -91,16 +63,18 @@ macro_rules! identify_chain { #[cfg(not(feature = "westend-native"))] { - let _ = $nonce; - let _ = $current_block; - let _ = $period; - let _ = $genesis; - let _ = $signer; - Err("`westend-native` feature not enabled") } }, - Chain::Unknown => Err("Unknown chain"), + Chain::Unknown => { + let _ = $nonce; + let _ = $current_block; + let _ = $period; + let _ = $genesis; + let _ = $signer; + + Err("Unknown chain") + }, } }; } @@ -130,10 +104,8 @@ impl frame_benchmarking_cli::ExtrinsicBuilder for RemarkBuilder { } fn build(&self, nonce: u32) -> std::result::Result { - let period = polkadot_runtime_common::BlockHashCount::get() - .checked_next_power_of_two() - .map(|c| c / 2) - .unwrap_or(2) as u64; + // We apply the extrinsic directly, so let's take some random period. + let period = 128; let genesis = self.client.usage_info().chain.best_hash; let signer = Sr25519Keyring::Bob.pair(); let current_block = 0; @@ -181,10 +153,8 @@ impl frame_benchmarking_cli::ExtrinsicBuilder for TransferKeepAliveBuilder { fn build(&self, nonce: u32) -> std::result::Result { let signer = Sr25519Keyring::Bob.pair(); - let period = polkadot_runtime_common::BlockHashCount::get() - .checked_next_power_of_two() - .map(|c| c / 2) - .unwrap_or(2) as u64; + // We apply the extrinsic directly, so let's take some random period. + let period = 128; let genesis = self.client.usage_info().chain.best_hash; let current_block = 0; let _dest = self.dest.clone(); @@ -206,60 +176,6 @@ impl frame_benchmarking_cli::ExtrinsicBuilder for TransferKeepAliveBuilder { } } -#[cfg(feature = "polkadot-native")] -fn polkadot_sign_call( - call: polkadot_runtime::RuntimeCall, - nonce: u32, - current_block: u64, - period: u64, - genesis: sp_core::H256, - acc: sp_core::sr25519::Pair, -) -> OpaqueExtrinsic { - use codec::Encode; - use polkadot_runtime as runtime; - use sp_core::Pair; - - let extra: runtime::SignedExtra = ( - frame_system::CheckNonZeroSender::::new(), - frame_system::CheckSpecVersion::::new(), - frame_system::CheckTxVersion::::new(), - frame_system::CheckGenesis::::new(), - frame_system::CheckMortality::::from(sp_runtime::generic::Era::mortal( - period, - current_block, - )), - frame_system::CheckNonce::::from(nonce), - frame_system::CheckWeight::::new(), - pallet_transaction_payment::ChargeTransactionPayment::::from(0), - polkadot_runtime_common::claims::PrevalidateAttests::::new(), - ); - - let payload = runtime::SignedPayload::from_raw( - call.clone(), - extra.clone(), - ( - (), - runtime::VERSION.spec_version, - runtime::VERSION.transaction_version, - genesis, - genesis, - (), - (), - (), - (), - ), - ); - - let signature = payload.using_encoded(|p| acc.sign(p)); - runtime::UncheckedExtrinsic::new_signed( - call, - sp_runtime::AccountId32::from(acc.public()).into(), - polkadot_core_primitives::Signature::Sr25519(signature.clone()), - extra, - ) - .into() -} - #[cfg(feature = "westend-native")] fn westend_sign_call( call: westend_runtime::RuntimeCall, @@ -312,58 +228,6 @@ fn westend_sign_call( .into() } -#[cfg(feature = "kusama-native")] -fn kusama_sign_call( - call: kusama_runtime::RuntimeCall, - nonce: u32, - current_block: u64, - period: u64, - genesis: sp_core::H256, - acc: sp_core::sr25519::Pair, -) -> OpaqueExtrinsic { - use codec::Encode; - use kusama_runtime as runtime; - use sp_core::Pair; - - let extra: runtime::SignedExtra = ( - frame_system::CheckNonZeroSender::::new(), - frame_system::CheckSpecVersion::::new(), - frame_system::CheckTxVersion::::new(), - frame_system::CheckGenesis::::new(), - frame_system::CheckMortality::::from(sp_runtime::generic::Era::mortal( - period, - current_block, - )), - frame_system::CheckNonce::::from(nonce), - frame_system::CheckWeight::::new(), - pallet_transaction_payment::ChargeTransactionPayment::::from(0), - ); - - let payload = runtime::SignedPayload::from_raw( - call.clone(), - extra.clone(), - ( - (), - runtime::VERSION.spec_version, - runtime::VERSION.transaction_version, - genesis, - genesis, - (), - (), - (), - ), - ); - - let signature = payload.using_encoded(|p| acc.sign(p)); - runtime::UncheckedExtrinsic::new_signed( - call, - sp_runtime::AccountId32::from(acc.public()).into(), - polkadot_core_primitives::Signature::Sr25519(signature.clone()), - extra, - ) - .into() -} - #[cfg(feature = "rococo-native")] fn rococo_sign_call( call: rococo_runtime::RuntimeCall, diff --git a/polkadot/node/service/src/chain_spec.rs b/polkadot/node/service/src/chain_spec.rs index 1e5aaa807b81..7fd9ce61c95e 100644 --- a/polkadot/node/service/src/chain_spec.rs +++ b/polkadot/node/service/src/chain_spec.rs @@ -18,22 +18,10 @@ use beefy_primitives::ecdsa_crypto::AuthorityId as BeefyId; use grandpa::AuthorityId as GrandpaId; -#[cfg(feature = "kusama-native")] -use kusama_runtime as kusama; -#[cfg(feature = "kusama-native")] -use kusama_runtime_constants::currency::UNITS as KSM; use pallet_im_online::sr25519::AuthorityId as ImOnlineId; -#[cfg(any( - feature = "polkadot-native", - feature = "kusama-native", - feature = "westend-native", -))] +#[cfg(feature = "westend-native")] use pallet_staking::Forcing; use polkadot_primitives::{AccountId, AccountPublic, AssignmentId, ValidatorId}; -#[cfg(feature = "polkadot-native")] -use polkadot_runtime as polkadot; -#[cfg(feature = "polkadot-native")] -use polkadot_runtime_constants::currency::UNITS as DOT; use sp_authority_discovery::AuthorityId as AuthorityDiscoveryId; use sp_consensus_babe::AuthorityId as BabeId; @@ -42,48 +30,27 @@ use rococo_runtime as rococo; #[cfg(feature = "rococo-native")] use rococo_runtime_constants::currency::UNITS as ROC; use sc_chain_spec::ChainSpecExtension; -#[cfg(any( - feature = "polkadot-native", - feature = "kusama-native", - feature = "westend-native", - feature = "rococo-native" -))] +#[cfg(any(feature = "westend-native", feature = "rococo-native"))] use sc_chain_spec::ChainType; use serde::{Deserialize, Serialize}; use sp_core::{sr25519, Pair, Public}; use sp_runtime::traits::IdentifyAccount; -#[cfg(any( - feature = "polkadot-native", - feature = "kusama-native", - feature = "westend-native", -))] +#[cfg(feature = "westend-native")] use sp_runtime::Perbill; -#[cfg(any( - feature = "polkadot-native", - feature = "kusama-native", - feature = "westend-native", - feature = "rococo-native" -))] +#[cfg(any(feature = "westend-native", feature = "rococo-native"))] use telemetry::TelemetryEndpoints; #[cfg(feature = "westend-native")] use westend_runtime as westend; #[cfg(feature = "westend-native")] use westend_runtime_constants::currency::UNITS as WND; -#[cfg(feature = "kusama-native")] -const KUSAMA_STAGING_TELEMETRY_URL: &str = "wss://telemetry.polkadot.io/submit/"; #[cfg(feature = "westend-native")] const WESTEND_STAGING_TELEMETRY_URL: &str = "wss://telemetry.polkadot.io/submit/"; #[cfg(feature = "rococo-native")] const ROCOCO_STAGING_TELEMETRY_URL: &str = "wss://telemetry.polkadot.io/submit/"; #[cfg(feature = "rococo-native")] const VERSI_STAGING_TELEMETRY_URL: &str = "wss://telemetry.polkadot.io/submit/"; -#[cfg(any( - feature = "polkadot-native", - feature = "kusama-native", - feature = "westend-native", - feature = "rococo-native" -))] +#[cfg(any(feature = "westend-native", feature = "rococo-native"))] const DEFAULT_PROTOCOL_ID: &str = "dot"; /// Node `ChainSpec` extensions. @@ -103,25 +70,8 @@ pub struct Extensions { pub light_sync_state: sc_sync_state_rpc::LightSyncStateExtension, } -/// The `ChainSpec` parameterized for the polkadot runtime. -#[cfg(feature = "polkadot-native")] -pub type PolkadotChainSpec = service::GenericChainSpec; - -// Dummy chain spec, in case when we don't have the native runtime. -pub type DummyChainSpec = service::GenericChainSpec<(), Extensions>; - -// Dummy chain spec, but that is fine when we don't have the native runtime. -#[cfg(not(feature = "polkadot-native"))] -pub type PolkadotChainSpec = DummyChainSpec; - -/// The `ChainSpec` parameterized for the kusama runtime. -#[cfg(feature = "kusama-native")] -pub type KusamaChainSpec = service::GenericChainSpec; - -/// The `ChainSpec` parameterized for the kusama runtime. -// Dummy chain spec, but that is fine when we don't have the native runtime. -#[cfg(not(feature = "kusama-native"))] -pub type KusamaChainSpec = DummyChainSpec; +// Generic chain spec, in case when we don't have the native runtime. +pub type GenericChainSpec = service::GenericChainSpec<(), Extensions>; /// The `ChainSpec` parameterized for the westend runtime. #[cfg(feature = "westend-native")] @@ -130,7 +80,7 @@ pub type WestendChainSpec = service::GenericChainSpec Result { - PolkadotChainSpec::from_json_bytes(&include_bytes!("../chain-specs/polkadot.json")[..]) +pub fn polkadot_config() -> Result { + GenericChainSpec::from_json_bytes(&include_bytes!("../chain-specs/polkadot.json")[..]) } -pub fn kusama_config() -> Result { - KusamaChainSpec::from_json_bytes(&include_bytes!("../chain-specs/kusama.json")[..]) +pub fn kusama_config() -> Result { + GenericChainSpec::from_json_bytes(&include_bytes!("../chain-specs/kusama.json")[..]) } pub fn westend_config() -> Result { @@ -192,12 +142,7 @@ pub fn wococo_config() -> Result { } /// The default parachains host configuration. -#[cfg(any( - feature = "rococo-native", - feature = "kusama-native", - feature = "westend-native", - feature = "polkadot-native" -))] +#[cfg(any(feature = "rococo-native", feature = "westend-native",))] fn default_parachains_host_configuration( ) -> polkadot_runtime_parachains::configuration::HostConfiguration { @@ -236,57 +181,12 @@ fn default_parachains_host_configuration( } } -#[cfg(any( - feature = "rococo-native", - feature = "kusama-native", - feature = "westend-native", - feature = "polkadot-native" -))] +#[cfg(any(feature = "rococo-native", feature = "westend-native",))] #[test] fn default_parachains_host_configuration_is_consistent() { default_parachains_host_configuration().panic_if_not_consistent(); } -#[cfg(feature = "polkadot-native")] -fn polkadot_session_keys( - babe: BabeId, - grandpa: GrandpaId, - im_online: ImOnlineId, - para_validator: ValidatorId, - para_assignment: AssignmentId, - authority_discovery: AuthorityDiscoveryId, -) -> polkadot::SessionKeys { - polkadot::SessionKeys { - babe, - grandpa, - im_online, - para_validator, - para_assignment, - authority_discovery, - } -} - -#[cfg(feature = "kusama-native")] -fn kusama_session_keys( - babe: BabeId, - grandpa: GrandpaId, - im_online: ImOnlineId, - para_validator: ValidatorId, - para_assignment: AssignmentId, - authority_discovery: AuthorityDiscoveryId, - beefy: BeefyId, -) -> kusama::SessionKeys { - kusama::SessionKeys { - babe, - grandpa, - im_online, - para_validator, - para_assignment, - authority_discovery, - beefy, - } -} - #[cfg(feature = "westend-native")] fn westend_session_keys( babe: BabeId, @@ -539,214 +439,6 @@ fn westend_staging_testnet_config_genesis(wasm_binary: &[u8]) -> westend::Runtim } } -#[cfg(feature = "kusama-native")] -fn kusama_staging_testnet_config_genesis(wasm_binary: &[u8]) -> kusama::RuntimeGenesisConfig { - use hex_literal::hex; - use sp_core::crypto::UncheckedInto; - - // Following keys are used in genesis config for development chains. - // DO NOT use them in production chains as the secret seed is public. - // - // SECRET_SEED="explain impose opinion genius bar parrot erupt panther surround best expire - // album" subkey inspect -n kusama "$SECRET_SEED" - let endowed_accounts = vec![ - // FLN5cfhF7VCGJYefjPQJR2V6WwbfRmb9ozTwLAzBNeQQG6y - hex!["7a0fe424217ed176da7abf12e08198db0d0949298e1372c80a1930cb6dc21d3e"].into(), - ]; - - // SECRET=$SECRET_SEED ./scripts/prepare-test-net.sh 4 - let initial_authorities: Vec<( - AccountId, - AccountId, - BabeId, - GrandpaId, - ImOnlineId, - ValidatorId, - AssignmentId, - AuthorityDiscoveryId, - BeefyId, - )> = vec![ - ( - //5D5EsvSJf3KR3WHeZNG8rETdW6homig1cGHezspFt1P4o7sL - hex!["2ca4a9582244a3356a0d96e59d71f7e4d12aa88bca6d46f360ef11f6487cab1f"].into(), - //5Ev6RixvmK62UQE2PW19MPdLsYT4Nomwj85HKPdbnRECbDYh - hex!["7e237806f642b7f45f70ec45fbc41034516c8e5561bae2a62cd287129e1d0712"].into(), - //5GbjzK1uYVo6v1SaYhTeK3dbYy2GN9X4K5iwRkHEQ9eLS3We - hex!["c89cb7afc47ec0b5aac5824e5338a62959c92978167d3f841491836746e70b3d"] - .unchecked_into(), - //5GFz3YFW8QzEUsWhRjJzvDP7e5X5tPf5U12vUw32R8oJVgqb - hex!["b98b200021a608148f9817aeb553596b6968a5aa61b6d320c522f520ecc9cf9c"] - .unchecked_into(), - //5GzaFD8YsqnP5FYe5ijA9M4LQvzU9TPJmnBGdpuoqEvR1gQC - hex!["da0690438c0dd7a9aa26e03c9f1deaa58ba2b88d0bec0954b06478632164a401"] - .unchecked_into(), - //5CkZPtNy61PtbJpLqnjNFmbi1qukGkFdqFr5GKduSEthJ1cd - hex!["1e6554d35f6f17a37176c71801426204d6df400a1869114e4f00564b35d31150"] - .unchecked_into(), - //5CodnwweaYA1zB4QhdP4YVYFWnuZHY6W7zkN1NCRqJ9wZhap - hex!["20bddf09b1d0a2d93bafeb87fe19eb5bd59950c174f23a141a6d99736a5e700d"] - .unchecked_into(), - //5E7TSvNAP6QeJNeckdvYvADpHsx7v6aHXtGoQv5R2N1V3hEB - hex!["5a91b2546f1aac1c388eb0739c83e42d9972884d74360200ce32b7595bc65a04"] - .unchecked_into(), - //5GsoKeoM2HmjXPsdCua4oPu3Ms1Jgu4HbSnB81Lisa2tBFZp - hex!["02fd1e7e8455ab888ad054bbec7bc19409e6b1a5bb0300feefc6b58e60efae7e85"] - .unchecked_into(), - ), - ( - //5HMtKQuL2GQ7YvLBTh3vqFJEpkZW19sQh2X2mcUzAwBAe885 - hex!["ea478deab0ebfbeab7342febc236a9f1af5129ca0083fa25e6b0cf6a998d8354"].into(), - //5EFD5pLC3w5NFEcmQ6rGw9dUZ6fTSjWJemsvJZuaj7Qmq2WT - hex!["607b4e88129804eca8cd6fa26cbe2dd36667130e2a061050b08d9015871f4263"].into(), - //5DFztsnvC9hN85j5AP116atcnzFhAxnbzPodEp1AsYq1LYXu - hex!["34d949c39fae5801ba328ac6d0ddc76e469b7d5a4372a4a0d94f6aad6f9c1600"] - .unchecked_into(), - //5EZJNJ4j1eEEwCWusg7nYsZxTYBwoTH2drszxRqgMBTgNxMW - hex!["6e47830dcfc1f2b53a1b5db3f76702fc2760c1cc119119aceb00a57ec6658465"] - .unchecked_into(), - //5Dts3SrgDQMY9XCzKeQrxYSTh5MphPek994qkDCDk5c4neeF - hex!["50f6ef6326cd61ac500f167493e435f1204ce1d66ad18024bc5810d09673785e"] - .unchecked_into(), - //5DMKT99825TvA8F1yCQvE1ZcKTqg8T8Ad1KEjN6EuVpz4E6w - hex!["38e7fb2f6a1dcec73d93b07a0dc7cff1f9a9cc32cde8eb1e6ea1782f5316b431"] - .unchecked_into(), - //5EestuSehdMsWsBZ1hXCVo5YQiYiTPJwtV281x5fjUVtaqtP - hex!["72889a7b6ada28c3bd05a5a7298437f01d6d3270559768d16275efaf11864c0a"] - .unchecked_into(), - //5FNd5EabUbcReXEPwY9aASJMwSqyiic9w1Qt23YxNXj3dzbi - hex!["925f03f6211c68377987b0f78cd02aa882ad1fa9cc00c01fe6ce68e14c23340d"] - .unchecked_into(), - //5DxhuqfovpooTn8yH7WJGFjYw3pQxSEN9y9kvYUiGguHAj9D - hex!["030e77039e470ccdec7fe23dbc41c66f1c187ec8345e8919d3dc1250d975c3ce82"] - .unchecked_into(), - ), - ( - //5DAiYTKQ5KxwLncfNoTAH58dXBk2oDcQxtAXyDwMdKGLpGeY - hex!["30d203d942c1d056245b51e466a50b684f172a37c1cdde678f5346a0b3dbcd52"].into(), - //5Dq778qqNiAsjdF4qLVdkSBR8SftJKU35nyeBnkztRgniVhV - hex!["4e194bbafeec45647b2679e6b615b2a879d2e74fe706921930509ab3c9dbb22d"].into(), - //5E6iENoE1tXJUd7PkopQ8uqejg6xhPpqAnsVjS3hAQHWK1tm - hex!["5a0037b6bfc5e879ba5ef480ac29c59a12873854159686899082f41950ffd472"] - .unchecked_into(), - //5F8Dtgoc5dCaLAGYtaDqQUDg91fPQUynd497Fvhor8SYMdXp - hex!["87638aef8ab75db093150a6677c0919292ff66fc17f9f006a71fd0618415e164"] - .unchecked_into(), - //5EKsYx6Wj1Qg7LLc12U2YRjRUFmHa4Q3rNSoGZaP1ofS54km - hex!["6409c85a1125fa456b9dc6e85408a6d931aa8e04f48511c87fc147d1c103e902"] - .unchecked_into(), - //5H3UQy1NhCUUq3getmSEG8R1capY7Uy8JtKJz68UABmD9UxS - hex!["dc3cab0f94fa974cba826984f23dd4dc77ade20f25d935af5f07b85518da8044"] - .unchecked_into(), - //5DstCjokShCt9NppNnAcjg2nS4M5PKY3etn2BoFkZzMhQJ3w - hex!["50379866eb62e5c8aac31133efc4a1723e964a8e30c93c3ce2e7758bd03eb776"] - .unchecked_into(), - //5E4SCbSqUWKC4NVRCkMkJEnXCaVRiNQbSHL4upRB1ffd1Mk1 - hex!["5843c339c39d2c308bfb1841cd10beecfa157580492db05b66db8553e8d6512c"] - .unchecked_into(), - //5HNoMQ1PL3m7eBhp24FZxZUBtz4eh3AiwWq8i8jXLCRpJHsu - hex!["03c81d4e72cbdb96a7e6aad76830ae783b0b4650dc19703dde96866d8894dc921f"] - .unchecked_into(), - ), - ( - //5FNnjg8hXcPVLKASA69bPbooatacxcWNqkQAyXZfFiXi7T8r - hex!["927f8b12a0fa7185077353d9f6b4fe6bc6cd9682bd498642fa3801280909711a"].into(), - //5GipjBdL3rbex9qyxMinZpJYQbobbwk1ctbZp6B2mh3H25c6 - hex!["ce03638cd1e8496793b0540ba23370034511ea5d08837deb17f6c4d905b8d017"].into(), - //5GByn4uRpwmPe4i4MA4PjTQ8HXuycdue8HMWDhZ7vbU4WR9R - hex!["b67d3ed42ab1fcf3fcd7dee99bd6963bc22058ee22bcfddddb776492e85bd76e"] - .unchecked_into(), - //5GnZZ1rs7RE1jwPiyw1kts4JqaxnML5SdsWMuHV9TqCcuPWj - hex!["d0dd492b1a33d2f06a9aa7213e1aaa41d8820a6b56e95cd2462129b446574014"] - .unchecked_into(), - //5GKEKSAa3gbitHhvu5gm4f7q942azCVGDNhrw3hnsGPEMzyg - hex!["bc04e9764e23330b9f4e6922aa6437f87f3dd17b8590825e824724ae89d4ac51"] - .unchecked_into(), - //5H6QLnsfU7sAQ5ZACs9bPivsn9CXrqqwxhq4KKyoquZb5mVW - hex!["de78b26966c08357d66f7f56e7dcac7e4beb16aa0b74939290a42b3f5949bc36"] - .unchecked_into(), - //5FUUeYiAvFfXfB5yZLNkis2ZDy9T3CBLBPC6SwXFriGEjH5f - hex!["96d61fe92a50a79944ea93e3afc0a95a328773878e774cf8c8fbe8eba81cd95c"] - .unchecked_into(), - //5DLkWtgJahWG99cMcQxtftW9W14oduySyQi6hdhav7w3BiKq - hex!["38791c68ee472b94105c66cf150387979c49175062a687d1a1509119cfdc9e0c"] - .unchecked_into(), - //5Cjm1c3Jwt5jp6AaN2XfnncgZcswAmyfJn1buHEUaPauXAKK - hex!["025185a88886008267d27797fc74e34241e3aa8da767fafc9dd3ae5a59546802bb"] - .unchecked_into(), - ), - ]; - - const ENDOWMENT: u128 = 1_000_000 * KSM; - const STASH: u128 = 100 * KSM; - - kusama::RuntimeGenesisConfig { - system: kusama::SystemConfig { code: wasm_binary.to_vec(), ..Default::default() }, - balances: kusama::BalancesConfig { - balances: endowed_accounts - .iter() - .map(|k: &AccountId| (k.clone(), ENDOWMENT)) - .chain(initial_authorities.iter().map(|x| (x.0.clone(), STASH))) - .collect(), - }, - beefy: Default::default(), - indices: kusama::IndicesConfig { indices: vec![] }, - session: kusama::SessionConfig { - keys: initial_authorities - .iter() - .map(|x| { - ( - x.0.clone(), - x.0.clone(), - kusama_session_keys( - x.2.clone(), - x.3.clone(), - x.4.clone(), - x.5.clone(), - x.6.clone(), - x.7.clone(), - x.8.clone(), - ), - ) - }) - .collect::>(), - }, - staking: kusama::StakingConfig { - validator_count: 50, - minimum_validator_count: 4, - stakers: initial_authorities - .iter() - .map(|x| (x.0.clone(), x.0.clone(), STASH, kusama::StakerStatus::Validator)) - .collect(), - invulnerables: initial_authorities.iter().map(|x| x.0.clone()).collect(), - force_era: Forcing::ForceNone, - slash_reward_fraction: Perbill::from_percent(10), - ..Default::default() - }, - babe: kusama::BabeConfig { - authorities: Default::default(), - epoch_config: Some(kusama::BABE_GENESIS_EPOCH_CONFIG), - ..Default::default() - }, - grandpa: Default::default(), - im_online: Default::default(), - authority_discovery: kusama::AuthorityDiscoveryConfig { - keys: vec![], - ..Default::default() - }, - claims: kusama::ClaimsConfig { claims: vec![], vesting: vec![] }, - vesting: kusama::VestingConfig { vesting: vec![] }, - treasury: Default::default(), - hrmp: Default::default(), - configuration: kusama::ConfigurationConfig { - config: default_parachains_host_configuration(), - }, - paras: Default::default(), - xcm_pallet: Default::default(), - nomination_pools: Default::default(), - nis_counterpart_balances: Default::default(), - } -} - #[cfg(feature = "rococo-native")] fn rococo_staging_testnet_config_genesis( wasm_binary: &[u8], @@ -1062,39 +754,6 @@ fn rococo_staging_testnet_config_genesis( } } -/// Returns the properties for the [`PolkadotChainSpec`]. -pub fn polkadot_chain_spec_properties() -> serde_json::map::Map { - serde_json::json!({ - "tokenDecimals": 10, - }) - .as_object() - .expect("Map given; qed") - .clone() -} - -/// Staging testnet config. -#[cfg(feature = "kusama-native")] -pub fn kusama_staging_testnet_config() -> Result { - let wasm_binary = kusama::WASM_BINARY.ok_or("Kusama development wasm not available")?; - let boot_nodes = vec![]; - - Ok(KusamaChainSpec::from_genesis( - "Kusama Staging Testnet", - "kusama_staging_testnet", - ChainType::Live, - move || kusama_staging_testnet_config_genesis(wasm_binary), - boot_nodes, - Some( - TelemetryEndpoints::new(vec![(KUSAMA_STAGING_TELEMETRY_URL.to_string(), 0)]) - .expect("Kusama Staging telemetry url is valid; qed"), - ), - Some(DEFAULT_PROTOCOL_ID), - None, - None, - Default::default(), - )) -} - /// Westend staging testnet config. #[cfg(feature = "westend-native")] pub fn westend_staging_testnet_config() -> Result { @@ -1239,12 +898,7 @@ pub fn get_authority_keys_from_seed_no_beefy( ) } -#[cfg(any( - feature = "polkadot-native", - feature = "kusama-native", - feature = "westend-native", - feature = "rococo-native" -))] +#[cfg(any(feature = "westend-native", feature = "rococo-native"))] fn testnet_accounts() -> Vec { vec![ get_account_id_from_seed::("Alice"), @@ -1262,176 +916,6 @@ fn testnet_accounts() -> Vec { ] } -/// Helper function to create polkadot `RuntimeGenesisConfig` for testing -#[cfg(feature = "polkadot-native")] -pub fn polkadot_testnet_genesis( - wasm_binary: &[u8], - initial_authorities: Vec<( - AccountId, - AccountId, - BabeId, - GrandpaId, - ImOnlineId, - ValidatorId, - AssignmentId, - AuthorityDiscoveryId, - )>, - _root_key: AccountId, - endowed_accounts: Option>, -) -> polkadot::RuntimeGenesisConfig { - let endowed_accounts: Vec = endowed_accounts.unwrap_or_else(testnet_accounts); - - const ENDOWMENT: u128 = 1_000_000 * DOT; - const STASH: u128 = 100 * DOT; - - polkadot::RuntimeGenesisConfig { - system: polkadot::SystemConfig { code: wasm_binary.to_vec(), ..Default::default() }, - indices: polkadot::IndicesConfig { indices: vec![] }, - balances: polkadot::BalancesConfig { - balances: endowed_accounts.iter().map(|k| (k.clone(), ENDOWMENT)).collect(), - }, - session: polkadot::SessionConfig { - keys: initial_authorities - .iter() - .map(|x| { - ( - x.0.clone(), - x.0.clone(), - polkadot_session_keys( - x.2.clone(), - x.3.clone(), - x.4.clone(), - x.5.clone(), - x.6.clone(), - x.7.clone(), - ), - ) - }) - .collect::>(), - }, - staking: polkadot::StakingConfig { - minimum_validator_count: 1, - validator_count: initial_authorities.len() as u32, - stakers: initial_authorities - .iter() - .map(|x| (x.0.clone(), x.0.clone(), STASH, polkadot::StakerStatus::Validator)) - .collect(), - invulnerables: initial_authorities.iter().map(|x| x.0.clone()).collect(), - force_era: Forcing::NotForcing, - slash_reward_fraction: Perbill::from_percent(10), - ..Default::default() - }, - babe: polkadot::BabeConfig { - authorities: Default::default(), - epoch_config: Some(polkadot::BABE_GENESIS_EPOCH_CONFIG), - ..Default::default() - }, - grandpa: Default::default(), - im_online: Default::default(), - authority_discovery: polkadot::AuthorityDiscoveryConfig { - keys: vec![], - ..Default::default() - }, - claims: polkadot::ClaimsConfig { claims: vec![], vesting: vec![] }, - vesting: polkadot::VestingConfig { vesting: vec![] }, - treasury: Default::default(), - hrmp: Default::default(), - configuration: polkadot::ConfigurationConfig { - config: default_parachains_host_configuration(), - }, - paras: Default::default(), - xcm_pallet: Default::default(), - nomination_pools: Default::default(), - } -} - -/// Helper function to create kusama `RuntimeGenesisConfig` for testing -#[cfg(feature = "kusama-native")] -pub fn kusama_testnet_genesis( - wasm_binary: &[u8], - initial_authorities: Vec<( - AccountId, - AccountId, - BabeId, - GrandpaId, - ImOnlineId, - ValidatorId, - AssignmentId, - AuthorityDiscoveryId, - BeefyId, - )>, - _root_key: AccountId, - endowed_accounts: Option>, -) -> kusama::RuntimeGenesisConfig { - let endowed_accounts: Vec = endowed_accounts.unwrap_or_else(testnet_accounts); - - const ENDOWMENT: u128 = 1_000_000 * KSM; - const STASH: u128 = 100 * KSM; - - kusama::RuntimeGenesisConfig { - system: kusama::SystemConfig { code: wasm_binary.to_vec(), ..Default::default() }, - indices: kusama::IndicesConfig { indices: vec![] }, - balances: kusama::BalancesConfig { - balances: endowed_accounts.iter().map(|k| (k.clone(), ENDOWMENT)).collect(), - }, - beefy: Default::default(), - session: kusama::SessionConfig { - keys: initial_authorities - .iter() - .map(|x| { - ( - x.0.clone(), - x.0.clone(), - kusama_session_keys( - x.2.clone(), - x.3.clone(), - x.4.clone(), - x.5.clone(), - x.6.clone(), - x.7.clone(), - x.8.clone(), - ), - ) - }) - .collect::>(), - }, - staking: kusama::StakingConfig { - minimum_validator_count: 1, - validator_count: initial_authorities.len() as u32, - stakers: initial_authorities - .iter() - .map(|x| (x.0.clone(), x.0.clone(), STASH, kusama::StakerStatus::Validator)) - .collect(), - invulnerables: initial_authorities.iter().map(|x| x.0.clone()).collect(), - force_era: Forcing::NotForcing, - slash_reward_fraction: Perbill::from_percent(10), - ..Default::default() - }, - babe: kusama::BabeConfig { - authorities: Default::default(), - epoch_config: Some(kusama::BABE_GENESIS_EPOCH_CONFIG), - ..Default::default() - }, - grandpa: Default::default(), - im_online: Default::default(), - authority_discovery: kusama::AuthorityDiscoveryConfig { - keys: vec![], - ..Default::default() - }, - claims: kusama::ClaimsConfig { claims: vec![], vesting: vec![] }, - vesting: kusama::VestingConfig { vesting: vec![] }, - treasury: Default::default(), - hrmp: Default::default(), - configuration: kusama::ConfigurationConfig { - config: default_parachains_host_configuration(), - }, - paras: Default::default(), - xcm_pallet: Default::default(), - nomination_pools: Default::default(), - nis_counterpart_balances: Default::default(), - } -} - /// Helper function to create westend `RuntimeGenesisConfig` for testing #[cfg(feature = "westend-native")] pub fn westend_testnet_genesis( @@ -1612,26 +1096,6 @@ pub fn rococo_testnet_genesis( } } -#[cfg(feature = "polkadot-native")] -fn polkadot_development_config_genesis(wasm_binary: &[u8]) -> polkadot::RuntimeGenesisConfig { - polkadot_testnet_genesis( - wasm_binary, - vec![get_authority_keys_from_seed_no_beefy("Alice")], - get_account_id_from_seed::("Alice"), - None, - ) -} - -#[cfg(feature = "kusama-native")] -fn kusama_development_config_genesis(wasm_binary: &[u8]) -> kusama::RuntimeGenesisConfig { - kusama_testnet_genesis( - wasm_binary, - vec![get_authority_keys_from_seed("Alice")], - get_account_id_from_seed::("Alice"), - None, - ) -} - #[cfg(feature = "westend-native")] fn westend_development_config_genesis(wasm_binary: &[u8]) -> westend::RuntimeGenesisConfig { westend_testnet_genesis( @@ -1652,44 +1116,6 @@ fn rococo_development_config_genesis(wasm_binary: &[u8]) -> rococo_runtime::Runt ) } -/// Polkadot development config (single validator Alice) -#[cfg(feature = "polkadot-native")] -pub fn polkadot_development_config() -> Result { - let wasm_binary = polkadot::WASM_BINARY.ok_or("Polkadot development wasm not available")?; - - Ok(PolkadotChainSpec::from_genesis( - "Development", - "polkadot_dev", - ChainType::Development, - move || polkadot_development_config_genesis(wasm_binary), - vec![], - None, - Some(DEFAULT_PROTOCOL_ID), - None, - Some(polkadot_chain_spec_properties()), - Default::default(), - )) -} - -/// Kusama development config (single validator Alice) -#[cfg(feature = "kusama-native")] -pub fn kusama_development_config() -> Result { - let wasm_binary = kusama::WASM_BINARY.ok_or("Kusama development wasm not available")?; - - Ok(KusamaChainSpec::from_genesis( - "Development", - "kusama_dev", - ChainType::Development, - move || kusama_development_config_genesis(wasm_binary), - vec![], - None, - Some(DEFAULT_PROTOCOL_ID), - None, - None, - Default::default(), - )) -} - /// Westend development config (single validator Alice) #[cfg(feature = "westend-native")] pub fn westend_development_config() -> Result { @@ -1779,67 +1205,6 @@ pub fn wococo_development_config() -> Result { )) } -#[cfg(feature = "polkadot-native")] -fn polkadot_local_testnet_genesis(wasm_binary: &[u8]) -> polkadot::RuntimeGenesisConfig { - polkadot_testnet_genesis( - wasm_binary, - vec![ - get_authority_keys_from_seed_no_beefy("Alice"), - get_authority_keys_from_seed_no_beefy("Bob"), - ], - get_account_id_from_seed::("Alice"), - None, - ) -} - -/// Polkadot local testnet config (multivalidator Alice + Bob) -#[cfg(feature = "polkadot-native")] -pub fn polkadot_local_testnet_config() -> Result { - let wasm_binary = polkadot::WASM_BINARY.ok_or("Polkadot development wasm not available")?; - - Ok(PolkadotChainSpec::from_genesis( - "Local Testnet", - "local_testnet", - ChainType::Local, - move || polkadot_local_testnet_genesis(wasm_binary), - vec![], - None, - Some(DEFAULT_PROTOCOL_ID), - None, - Some(polkadot_chain_spec_properties()), - Default::default(), - )) -} - -#[cfg(feature = "kusama-native")] -fn kusama_local_testnet_genesis(wasm_binary: &[u8]) -> kusama::RuntimeGenesisConfig { - kusama_testnet_genesis( - wasm_binary, - vec![get_authority_keys_from_seed("Alice"), get_authority_keys_from_seed("Bob")], - get_account_id_from_seed::("Alice"), - None, - ) -} - -/// Kusama local testnet config (multivalidator Alice + Bob) -#[cfg(feature = "kusama-native")] -pub fn kusama_local_testnet_config() -> Result { - let wasm_binary = kusama::WASM_BINARY.ok_or("Kusama development wasm not available")?; - - Ok(KusamaChainSpec::from_genesis( - "Kusama Local Testnet", - "kusama_local_testnet", - ChainType::Local, - move || kusama_local_testnet_genesis(wasm_binary), - vec![], - None, - Some(DEFAULT_PROTOCOL_ID), - None, - None, - Default::default(), - )) -} - #[cfg(feature = "westend-native")] fn westend_local_testnet_genesis(wasm_binary: &[u8]) -> westend::RuntimeGenesisConfig { westend_testnet_genesis( diff --git a/polkadot/node/service/src/lib.rs b/polkadot/node/service/src/lib.rs index 75e853d0ef85..5286631fbbb5 100644 --- a/polkadot/node/service/src/lib.rs +++ b/polkadot/node/service/src/lib.rs @@ -84,7 +84,7 @@ use telemetry::TelemetryWorker; #[cfg(feature = "full-node")] use telemetry::{Telemetry, TelemetryWorkerHandle}; -pub use chain_spec::{KusamaChainSpec, PolkadotChainSpec, RococoChainSpec, WestendChainSpec}; +pub use chain_spec::{GenericChainSpec, RococoChainSpec, WestendChainSpec}; pub use consensus_common::{Proposal, SelectChain}; use frame_benchmarking_cli::SUBSTRATE_REFERENCE_HARDWARE; use mmr_gadget::MmrGadget; @@ -104,10 +104,6 @@ pub use sp_runtime::{ traits::{self as runtime_traits, BlakeTwo256, Block as BlockT, Header as HeaderT, NumberFor}, }; -#[cfg(feature = "kusama-native")] -pub use {kusama_runtime, kusama_runtime_constants}; -#[cfg(feature = "polkadot-native")] -pub use {polkadot_runtime, polkadot_runtime_constants}; #[cfg(feature = "rococo-native")] pub use {rococo_runtime, rococo_runtime_constants}; #[cfg(feature = "westend-native")] diff --git a/polkadot/node/test/service/Cargo.toml b/polkadot/node/test/service/Cargo.toml index ce4148b459ee..1924418a4856 100644 --- a/polkadot/node/test/service/Cargo.toml +++ b/polkadot/node/test/service/Cargo.toml @@ -11,6 +11,7 @@ futures = "0.3.21" hex = "0.4.3" gum = { package = "tracing-gum", path = "../../gum" } rand = "0.8.5" +serde_json = "1.0.106" tempfile = "3.2.0" tokio = "1.24.2" diff --git a/polkadot/node/test/service/src/chain_spec.rs b/polkadot/node/test/service/src/chain_spec.rs index 9aadd7d203c0..bedb1250b2a9 100644 --- a/polkadot/node/test/service/src/chain_spec.rs +++ b/polkadot/node/test/service/src/chain_spec.rs @@ -20,9 +20,7 @@ use babe_primitives::AuthorityId as BabeId; use grandpa::AuthorityId as GrandpaId; use pallet_staking::Forcing; use polkadot_primitives::{AccountId, AssignmentId, ValidatorId, MAX_CODE_SIZE, MAX_POV_SIZE}; -use polkadot_service::chain_spec::{ - get_account_id_from_seed, get_from_seed, polkadot_chain_spec_properties, Extensions, -}; +use polkadot_service::chain_spec::{get_account_id_from_seed, get_from_seed, Extensions}; use polkadot_test_runtime::BABE_GENESIS_EPOCH_CONFIG; use sc_chain_spec::{ChainSpec, ChainType}; use sp_authority_discovery::AuthorityId as AuthorityDiscoveryId; @@ -36,6 +34,16 @@ const DEFAULT_PROTOCOL_ID: &str = "dot"; pub type PolkadotChainSpec = sc_service::GenericChainSpec; +/// Returns the properties for the [`PolkadotChainSpec`]. +pub fn polkadot_chain_spec_properties() -> serde_json::map::Map { + serde_json::json!({ + "tokenDecimals": 10, + }) + .as_object() + .expect("Map given; qed") + .clone() +} + /// Local testnet config (multivalidator Alice + Bob) pub fn polkadot_local_testnet_config() -> PolkadotChainSpec { PolkadotChainSpec::from_genesis( diff --git a/polkadot/tests/benchmark_block.rs b/polkadot/tests/benchmark_block.rs index bc91b31bc7b6..99f95ef611a4 100644 --- a/polkadot/tests/benchmark_block.rs +++ b/polkadot/tests/benchmark_block.rs @@ -18,6 +18,7 @@ #![cfg(unix)] use assert_cmd::cargo::cargo_bin; +use common::run_with_timeout; use nix::{ sys::signal::{kill, Signal::SIGINT}, unistd::Pid, @@ -32,25 +33,28 @@ use tempfile::tempdir; pub mod common; -static RUNTIMES: [&str; 4] = ["polkadot", "kusama", "westend", "rococo"]; +static RUNTIMES: &[&str] = &["westend", "rococo"]; /// `benchmark block` works for all dev runtimes using the wasm executor. #[tokio::test] async fn benchmark_block_works() { for runtime in RUNTIMES { - let tmp_dir = tempdir().expect("could not create a temp dir"); - let base_path = tmp_dir.path(); - let runtime = format!("{}-dev", runtime); - - // Build a chain with a single block. - build_chain(&runtime, base_path).await.unwrap(); - // Benchmark the one block. - benchmark_block(&runtime, base_path, 1).unwrap(); + run_with_timeout(Duration::from_secs(10 * 60), async move { + let tmp_dir = tempdir().expect("could not create a temp dir"); + let base_path = tmp_dir.path(); + let runtime = format!("{}-dev", runtime); + + // Build a chain with a single block. + build_chain(&runtime, base_path).await; + // Benchmark the one block. + benchmark_block(&runtime, base_path, 1).unwrap(); + }) + .await } } /// Builds a chain with one block for the given runtime and base path. -async fn build_chain(runtime: &str, base_path: &Path) -> Result<(), String> { +async fn build_chain(runtime: &str, base_path: &Path) { let mut cmd = Command::new(cargo_bin("polkadot")) .stdout(process::Stdio::piped()) .stderr(process::Stdio::piped()) @@ -64,13 +68,10 @@ async fn build_chain(runtime: &str, base_path: &Path) -> Result<(), String> { let (ws_url, _) = common::find_ws_url_from_output(cmd.stderr.take().unwrap()); // Wait for the chain to produce one block. - let ok = common::wait_n_finalized_blocks(1, Duration::from_secs(60), &ws_url).await; + common::wait_n_finalized_blocks(1, &ws_url).await; // Send SIGINT to node. kill(Pid::from_raw(cmd.id().try_into().unwrap()), SIGINT).unwrap(); - // Wait for the node to handle it and exit. - assert!(common::wait_for(&mut cmd, 30).map(|x| x.success()).unwrap_or_default()); - - ok.map_err(|e| format!("Node did not build the chain: {:?}", e)) + assert!(cmd.wait().unwrap().success()); } /// Benchmarks the given block with the wasm executor. diff --git a/polkadot/tests/benchmark_extrinsic.rs b/polkadot/tests/benchmark_extrinsic.rs index e701fd9c923c..529eb29362d4 100644 --- a/polkadot/tests/benchmark_extrinsic.rs +++ b/polkadot/tests/benchmark_extrinsic.rs @@ -17,7 +17,7 @@ use assert_cmd::cargo::cargo_bin; use std::{process::Command, result::Result}; -static RUNTIMES: [&str; 4] = ["polkadot", "kusama", "westend", "rococo"]; +static RUNTIMES: &[&str] = &["westend", "rococo"]; static EXTRINSICS: [(&str, &str); 2] = [("system", "remark"), ("balances", "transfer_keep_alive")]; diff --git a/polkadot/tests/benchmark_overhead.rs b/polkadot/tests/benchmark_overhead.rs index 295479594eb9..b0912225347d 100644 --- a/polkadot/tests/benchmark_overhead.rs +++ b/polkadot/tests/benchmark_overhead.rs @@ -18,26 +18,26 @@ use assert_cmd::cargo::cargo_bin; use std::{process::Command, result::Result}; use tempfile::tempdir; -static RUNTIMES: [&str; 4] = ["polkadot", "kusama", "westend", "rococo"]; +static RUNTIMES: &[&str] = &["westend", "rococo"]; /// `benchmark overhead` works for all dev runtimes. #[test] fn benchmark_overhead_works() { for runtime in RUNTIMES { let runtime = format!("{}-dev", runtime); - assert!(benchmark_overhead(runtime).is_ok()); + assert!(benchmark_overhead(&runtime).is_ok()); } } /// `benchmark overhead` rejects all non-dev runtimes. #[test] fn benchmark_overhead_rejects_non_dev_runtimes() { - for runtime in RUNTIMES { - assert!(benchmark_overhead(runtime.into()).is_err()); + for runtime in RUNTIMES.into_iter() { + assert!(benchmark_overhead(runtime).is_err()); } } -fn benchmark_overhead(runtime: String) -> Result<(), String> { +fn benchmark_overhead(runtime: &str) -> Result<(), String> { let tmp_dir = tempdir().expect("could not create a temp dir"); let base_path = tmp_dir.path(); diff --git a/polkadot/tests/common.rs b/polkadot/tests/common.rs index 940a0c6f18d0..10859ead5fe8 100644 --- a/polkadot/tests/common.rs +++ b/polkadot/tests/common.rs @@ -16,49 +16,25 @@ use polkadot_core_primitives::{Block, Hash, Header}; use std::{ + future::Future, io::{BufRead, BufReader, Read}, - process::{Child, ExitStatus}, - thread, time::Duration, }; use substrate_rpc_client::{ws_client, ChainApi}; -use tokio::time::timeout; -/// Wait for the given `child` the given amount of `secs`. -/// -/// Returns the `Some(exit status)` or `None` if the process did not finish in the given time. -pub fn wait_for(child: &mut Child, secs: usize) -> Option { - for _ in 0..secs { - match child.try_wait().unwrap() { - Some(status) => return Some(status), - None => thread::sleep(Duration::from_secs(1)), - } - } - eprintln!("Took to long to exit. Killing..."); - let _ = child.kill(); - child.wait().unwrap(); - - None -} - -/// Wait for at least `n` blocks to be finalized within the specified time. -pub async fn wait_n_finalized_blocks( - n: usize, - timeout_duration: Duration, - url: &str, -) -> Result<(), tokio::time::error::Elapsed> { - timeout(timeout_duration, wait_n_finalized_blocks_from(n, url)).await +/// Run the given `future` and panic if the `timeout` is hit. +pub async fn run_with_timeout(timeout: Duration, future: impl Future) { + tokio::time::timeout(timeout, future).await.expect("Hit timeout"); } /// Wait for at least `n` blocks to be finalized from a specified node. -async fn wait_n_finalized_blocks_from(n: usize, url: &str) { +pub async fn wait_n_finalized_blocks(n: usize, url: &str) { let mut built_blocks = std::collections::HashSet::new(); let mut interval = tokio::time::interval(Duration::from_secs(6)); loop { - let rpc = match ws_client(url).await { - Ok(rpc_service) => rpc_service, - Err(_) => continue, + let Ok(rpc) = ws_client(url).await else { + continue; }; if let Ok(block) = ChainApi::<(), Hash, Header, Block>::finalized_head(&rpc).await { @@ -67,6 +43,7 @@ async fn wait_n_finalized_blocks_from(n: usize, url: &str) { break } }; + interval.tick().await; } } diff --git a/polkadot/tests/purge_chain_works.rs b/polkadot/tests/purge_chain_works.rs index 3e9a37814781..831155fb4d7e 100644 --- a/polkadot/tests/purge_chain_works.rs +++ b/polkadot/tests/purge_chain_works.rs @@ -14,7 +14,14 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . +#![cfg(unix)] + use assert_cmd::cargo::cargo_bin; +use common::run_with_timeout; +use nix::{ + sys::signal::{kill, Signal::SIGINT}, + unistd::Pid, +}; use std::{ process::{self, Command}, time::Duration, @@ -24,105 +31,95 @@ use tempfile::tempdir; pub mod common; #[tokio::test] -#[cfg(unix)] async fn purge_chain_rocksdb_works() { - use nix::{ - sys::signal::{kill, Signal::SIGINT}, - unistd::Pid, - }; - - let tmpdir = tempdir().expect("could not create temp dir"); - - let mut cmd = Command::new(cargo_bin("polkadot")) - .stdout(process::Stdio::piped()) - .stderr(process::Stdio::piped()) - .args(["--dev", "-d"]) - .arg(tmpdir.path()) - .arg("--port") - .arg("33034") - .arg("--no-hardware-benchmarks") - .spawn() - .unwrap(); - - let (ws_url, _) = common::find_ws_url_from_output(cmd.stderr.take().unwrap()); - - // Let it produce 1 block. - common::wait_n_finalized_blocks(1, Duration::from_secs(60), &ws_url) - .await - .unwrap(); - - // Send SIGINT to node. - kill(Pid::from_raw(cmd.id().try_into().unwrap()), SIGINT).unwrap(); - // Wait for the node to handle it and exit. - assert!(common::wait_for(&mut cmd, 30).map(|x| x.success()).unwrap_or_default()); - assert!(tmpdir.path().join("chains/polkadot_dev").exists()); - assert!(tmpdir.path().join("chains/polkadot_dev/db/full").exists()); - assert!(tmpdir.path().join("chains/polkadot_dev/db/full/parachains").exists()); - - // Purge chain - let status = Command::new(cargo_bin("polkadot")) - .args(["purge-chain", "--dev", "-d"]) - .arg(tmpdir.path()) - .arg("-y") - .status() - .unwrap(); - assert!(status.success()); - - // Make sure that the chain folder exists, but `db/full` is deleted. - assert!(tmpdir.path().join("chains/polkadot_dev").exists()); - assert!(!tmpdir.path().join("chains/polkadot_dev/db/full").exists()); + run_with_timeout(Duration::from_secs(10 * 60), async move { + let tmpdir = tempdir().expect("could not create temp dir"); + + let mut cmd = Command::new(cargo_bin("polkadot")) + .stdout(process::Stdio::piped()) + .stderr(process::Stdio::piped()) + .args(["--dev", "-d"]) + .arg(tmpdir.path()) + .arg("--port") + .arg("33034") + .arg("--no-hardware-benchmarks") + .spawn() + .unwrap(); + + let (ws_url, _) = common::find_ws_url_from_output(cmd.stderr.take().unwrap()); + + // Let it produce 1 block. + common::wait_n_finalized_blocks(1, &ws_url).await; + + // Send SIGINT to node. + kill(Pid::from_raw(cmd.id().try_into().unwrap()), SIGINT).unwrap(); + // Wait for the node to handle it and exit. + assert!(cmd.wait().unwrap().success()); + assert!(tmpdir.path().join("chains/rococo_dev").exists()); + assert!(tmpdir.path().join("chains/rococo_dev/db/full").exists()); + assert!(tmpdir.path().join("chains/rococo_dev/db/full/parachains").exists()); + + // Purge chain + let status = Command::new(cargo_bin("polkadot")) + .args(["purge-chain", "--dev", "-d"]) + .arg(tmpdir.path()) + .arg("-y") + .status() + .unwrap(); + assert!(status.success()); + + // Make sure that the chain folder exists, but `db/full` is deleted. + assert!(tmpdir.path().join("chains/rococo_dev").exists()); + assert!(!tmpdir.path().join("chains/rococo_dev/db/full").exists()); + }) + .await; } #[tokio::test] -#[cfg(unix)] async fn purge_chain_paritydb_works() { - use nix::{ - sys::signal::{kill, Signal::SIGINT}, - unistd::Pid, - }; - - let tmpdir = tempdir().expect("could not create temp dir"); - - let mut cmd = Command::new(cargo_bin("polkadot")) - .stdout(process::Stdio::piped()) - .stderr(process::Stdio::piped()) - .args(["--dev", "-d"]) - .arg(tmpdir.path()) - .arg("--database") - .arg("paritydb-experimental") - .arg("--no-hardware-benchmarks") - .spawn() - .unwrap(); - - let (ws_url, _) = common::find_ws_url_from_output(cmd.stderr.take().unwrap()); - - // Let it produce 1 block. - common::wait_n_finalized_blocks(1, Duration::from_secs(60), &ws_url) - .await - .unwrap(); - - // Send SIGINT to node. - kill(Pid::from_raw(cmd.id().try_into().unwrap()), SIGINT).unwrap(); - // Wait for the node to handle it and exit. - assert!(common::wait_for(&mut cmd, 30).map(|x| x.success()).unwrap_or_default()); - assert!(tmpdir.path().join("chains/polkadot_dev").exists()); - assert!(tmpdir.path().join("chains/polkadot_dev/paritydb/full").exists()); - assert!(tmpdir.path().join("chains/polkadot_dev/paritydb/parachains").exists()); - - // Purge chain - let status = Command::new(cargo_bin("polkadot")) - .args(["purge-chain", "--dev", "-d"]) - .arg(tmpdir.path()) - .arg("--database") - .arg("paritydb-experimental") - .arg("-y") - .status() - .unwrap(); - assert!(status.success()); - - // Make sure that the chain folder exists, but `db/full` is deleted. - assert!(tmpdir.path().join("chains/polkadot_dev").exists()); - assert!(!tmpdir.path().join("chains/polkadot_dev/paritydb/full").exists()); - // Parachains removal requires calling "purge-chain --parachains". - assert!(tmpdir.path().join("chains/polkadot_dev/paritydb/parachains").exists()); + run_with_timeout(Duration::from_secs(10 * 60), async move { + let tmpdir = tempdir().expect("could not create temp dir"); + + let mut cmd = Command::new(cargo_bin("polkadot")) + .stdout(process::Stdio::piped()) + .stderr(process::Stdio::piped()) + .args(["--dev", "-d"]) + .arg(tmpdir.path()) + .arg("--database") + .arg("paritydb-experimental") + .arg("--no-hardware-benchmarks") + .spawn() + .unwrap(); + + let (ws_url, _) = common::find_ws_url_from_output(cmd.stderr.take().unwrap()); + + // Let it produce 1 block. + common::wait_n_finalized_blocks(1, &ws_url).await; + + // Send SIGINT to node. + kill(Pid::from_raw(cmd.id().try_into().unwrap()), SIGINT).unwrap(); + // Wait for the node to handle it and exit. + assert!(cmd.wait().unwrap().success()); + assert!(tmpdir.path().join("chains/rococo_dev").exists()); + assert!(tmpdir.path().join("chains/rococo_dev/paritydb/full").exists()); + assert!(tmpdir.path().join("chains/rococo_dev/paritydb/parachains").exists()); + + // Purge chain + let status = Command::new(cargo_bin("polkadot")) + .args(["purge-chain", "--dev", "-d"]) + .arg(tmpdir.path()) + .arg("--database") + .arg("paritydb-experimental") + .arg("-y") + .status() + .unwrap(); + assert!(status.success()); + + // Make sure that the chain folder exists, but `db/full` is deleted. + assert!(tmpdir.path().join("chains/rococo_dev").exists()); + assert!(!tmpdir.path().join("chains/rococo_dev/paritydb/full").exists()); + // Parachains removal requires calling "purge-chain --parachains". + assert!(tmpdir.path().join("chains/rococo_dev/paritydb/parachains").exists()); + }) + .await; } diff --git a/polkadot/tests/running_the_node_and_interrupt.rs b/polkadot/tests/running_the_node_and_interrupt.rs index d3935cbb02ad..079c34e0421e 100644 --- a/polkadot/tests/running_the_node_and_interrupt.rs +++ b/polkadot/tests/running_the_node_and_interrupt.rs @@ -15,10 +15,7 @@ // along with Substrate. If not, see . use assert_cmd::cargo::cargo_bin; -use std::{ - process::{self, Command}, - time::Duration, -}; +use std::process::{self, Command}; use tempfile::tempdir; pub mod common; @@ -49,17 +46,13 @@ async fn running_the_node_works_and_can_be_interrupted() { let (ws_url, _) = common::find_ws_url_from_output(cmd.stderr.take().unwrap()); // Let it produce three blocks. - common::wait_n_finalized_blocks(3, Duration::from_secs(60), &ws_url) - .await - .unwrap(); + common::wait_n_finalized_blocks(3, &ws_url).await; assert!(cmd.try_wait().unwrap().is_none(), "the process should still be running"); kill(Pid::from_raw(cmd.id().try_into().unwrap()), signal).unwrap(); - assert_eq!( - common::wait_for(&mut cmd, 30).map(|x| x.success()), - Some(true), - "the process must exit gracefully after signal {}", - signal, + assert!( + cmd.wait().unwrap().success(), + "the process must exit gracefully after signal {signal}", ); } From f3061c14bf51867400e56deb8248094ab37f3f0c Mon Sep 17 00:00:00 2001 From: Alexandru Vasile <60601340+lexnv@users.noreply.github.com> Date: Tue, 19 Sep 2023 14:29:22 +0300 Subject: [PATCH 25/69] archive: Fetch body, genesisHash and header (#1560) This PR lays the foundation for implementing the archive RPC methods. The methods implemented by this PR: - archive_unstable_body: Fetch the block's body (a vector of hex-encoded scale-encoded extrinsics) from a given block hash - archive_unstable_genesisHash: Fetch the genesis hash - archive_unstable_header: Fetch the header from a given block hash Added unit tests for the methods. This PR is implementing the methods without exposing them to the RPC layer; which are to be exposed by a follow-up PR. Closes: https://github.com/paritytech/polkadot-sdk/issues/1509 Closes: https://github.com/paritytech/polkadot-sdk/issues/1514 @paritytech/subxt-team --------- Signed-off-by: Alexandru Vasile --- .../client/rpc-spec-v2/src/archive/api.rs | 56 +++++++++ .../client/rpc-spec-v2/src/archive/archive.rs | 86 +++++++++++++ .../client/rpc-spec-v2/src/archive/mod.rs | 31 +++++ .../client/rpc-spec-v2/src/archive/tests.rs | 113 ++++++++++++++++++ substrate/client/rpc-spec-v2/src/lib.rs | 1 + 5 files changed, 287 insertions(+) create mode 100644 substrate/client/rpc-spec-v2/src/archive/api.rs create mode 100644 substrate/client/rpc-spec-v2/src/archive/archive.rs create mode 100644 substrate/client/rpc-spec-v2/src/archive/mod.rs create mode 100644 substrate/client/rpc-spec-v2/src/archive/tests.rs diff --git a/substrate/client/rpc-spec-v2/src/archive/api.rs b/substrate/client/rpc-spec-v2/src/archive/api.rs new file mode 100644 index 000000000000..ca94779c887c --- /dev/null +++ b/substrate/client/rpc-spec-v2/src/archive/api.rs @@ -0,0 +1,56 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! API trait of the archive methods. + +use jsonrpsee::{core::RpcResult, proc_macros::rpc}; + +#[rpc(client, server)] +pub trait ArchiveApi { + /// Retrieves the body (list of transactions) of a given block hash. + /// + /// Returns an array of strings containing the hexadecimal-encoded SCALE-codec-encoded + /// transactions in that block. If no block with that hash is found, null. + /// + /// # Unstable + /// + /// This method is unstable and subject to change in the future. + #[method(name = "archive_unstable_body")] + fn archive_unstable_body(&self, hash: Hash) -> RpcResult>>; + + /// Get the chain's genesis hash. + /// + /// Returns a string containing the hexadecimal-encoded hash of the genesis block of the chain. + /// + /// # Unstable + /// + /// This method is unstable and subject to change in the future. + #[method(name = "archive_unstable_genesisHash")] + fn archive_unstable_genesis_hash(&self) -> RpcResult; + + /// Get the block's header. + /// + /// Returns a string containing the hexadecimal-encoded SCALE-codec encoding header of the + /// block. + /// + /// # Unstable + /// + /// This method is unstable and subject to change in the future. + #[method(name = "archive_unstable_header")] + fn archive_unstable_header(&self, hash: Hash) -> RpcResult>; +} diff --git a/substrate/client/rpc-spec-v2/src/archive/archive.rs b/substrate/client/rpc-spec-v2/src/archive/archive.rs new file mode 100644 index 000000000000..4fb2e5671d30 --- /dev/null +++ b/substrate/client/rpc-spec-v2/src/archive/archive.rs @@ -0,0 +1,86 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! API implementation for `archive`. + +use super::ArchiveApiServer; +use crate::chain_head::hex_string; +use codec::Encode; +use jsonrpsee::core::{async_trait, RpcResult}; +use sc_client_api::{Backend, BlockBackend, BlockchainEvents, ExecutorProvider, StorageProvider}; +use sp_api::CallApiAt; +use sp_blockchain::{Error as BlockChainError, HeaderBackend, HeaderMetadata}; +use sp_runtime::traits::Block as BlockT; +use std::{marker::PhantomData, sync::Arc}; + +/// An API for archive RPC calls. +pub struct Archive, Block: BlockT, Client> { + /// Substrate client. + client: Arc, + /// The hexadecimal encoded hash of the genesis block. + genesis_hash: String, + /// Phantom member to pin the block type. + _phantom: PhantomData<(Block, BE)>, +} + +impl, Block: BlockT, Client> Archive { + /// Create a new [`Archive`]. + pub fn new>(client: Arc, genesis_hash: GenesisHash) -> Self { + let genesis_hash = hex_string(&genesis_hash.as_ref()); + Self { client, genesis_hash, _phantom: PhantomData } + } +} + +#[async_trait] +impl ArchiveApiServer for Archive +where + Block: BlockT + 'static, + Block::Header: Unpin, + BE: Backend + 'static, + Client: BlockBackend + + ExecutorProvider + + HeaderBackend + + HeaderMetadata + + BlockchainEvents + + CallApiAt + + StorageProvider + + 'static, +{ + fn archive_unstable_body(&self, hash: Block::Hash) -> RpcResult>> { + let Ok(Some(signed_block)) = self.client.block(hash) else { return Ok(None) }; + + let extrinsics = signed_block + .block + .extrinsics() + .iter() + .map(|extrinsic| hex_string(&extrinsic.encode())) + .collect(); + + Ok(Some(extrinsics)) + } + + fn archive_unstable_genesis_hash(&self) -> RpcResult { + Ok(self.genesis_hash.clone()) + } + + fn archive_unstable_header(&self, hash: Block::Hash) -> RpcResult> { + let Ok(Some(header)) = self.client.header(hash) else { return Ok(None) }; + + Ok(Some(hex_string(&header.encode()))) + } +} diff --git a/substrate/client/rpc-spec-v2/src/archive/mod.rs b/substrate/client/rpc-spec-v2/src/archive/mod.rs new file mode 100644 index 000000000000..767f658ecd75 --- /dev/null +++ b/substrate/client/rpc-spec-v2/src/archive/mod.rs @@ -0,0 +1,31 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Substrate archive API. +//! +//! # Note +//! +//! Methods are prefixed by `archive`. + +#[cfg(test)] +mod tests; + +pub mod api; +pub mod archive; + +pub use api::ArchiveApiServer; diff --git a/substrate/client/rpc-spec-v2/src/archive/tests.rs b/substrate/client/rpc-spec-v2/src/archive/tests.rs new file mode 100644 index 000000000000..bc75fc749acc --- /dev/null +++ b/substrate/client/rpc-spec-v2/src/archive/tests.rs @@ -0,0 +1,113 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use crate::chain_head::hex_string; + +use super::{archive::Archive, *}; + +use codec::{Decode, Encode}; +use jsonrpsee::{types::EmptyServerParams as EmptyParams, RpcModule}; +use sc_block_builder::BlockBuilderProvider; + +use sp_consensus::BlockOrigin; +use std::sync::Arc; +use substrate_test_runtime_client::{ + prelude::*, runtime, Backend, BlockBuilderExt, Client, ClientBlockImportExt, +}; + +const CHAIN_GENESIS: [u8; 32] = [0; 32]; +const INVALID_HASH: [u8; 32] = [1; 32]; + +type Header = substrate_test_runtime_client::runtime::Header; +type Block = substrate_test_runtime_client::runtime::Block; + +fn setup_api() -> (Arc>, RpcModule>>) { + let builder = TestClientBuilder::new(); + let client = Arc::new(builder.build()); + + let api = Archive::new(client.clone(), CHAIN_GENESIS).into_rpc(); + + (client, api) +} + +#[tokio::test] +async fn archive_genesis() { + let (_client, api) = setup_api(); + + let genesis: String = + api.call("archive_unstable_genesisHash", EmptyParams::new()).await.unwrap(); + assert_eq!(genesis, hex_string(&CHAIN_GENESIS)); +} + +#[tokio::test] +async fn archive_body() { + let (mut client, api) = setup_api(); + + // Invalid block hash. + let invalid_hash = hex_string(&INVALID_HASH); + let res: Option> = api.call("archive_unstable_body", [invalid_hash]).await.unwrap(); + assert!(res.is_none()); + + // Import a new block with an extrinsic. + let mut builder = client.new_block(Default::default()).unwrap(); + builder + .push_transfer(runtime::Transfer { + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), + amount: 42, + nonce: 0, + }) + .unwrap(); + let block = builder.build().unwrap().block; + let block_hash = format!("{:?}", block.header.hash()); + client.import(BlockOrigin::Own, block.clone()).await.unwrap(); + + let expected_tx = hex_string(&block.extrinsics[0].encode()); + + let body: Vec = api.call("archive_unstable_body", [block_hash]).await.unwrap(); + assert_eq!(vec![expected_tx], body); +} + +#[tokio::test] +async fn archive_header() { + let (mut client, api) = setup_api(); + + // Invalid block hash. + let invalid_hash = hex_string(&INVALID_HASH); + let res: Option = api.call("archive_unstable_header", [invalid_hash]).await.unwrap(); + assert!(res.is_none()); + + // Import a new block with an extrinsic. + let mut builder = client.new_block(Default::default()).unwrap(); + builder + .push_transfer(runtime::Transfer { + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), + amount: 42, + nonce: 0, + }) + .unwrap(); + let block = builder.build().unwrap().block; + let block_hash = format!("{:?}", block.header.hash()); + client.import(BlockOrigin::Own, block.clone()).await.unwrap(); + + let header: String = api.call("archive_unstable_header", [block_hash]).await.unwrap(); + let bytes = array_bytes::hex2bytes(&header).unwrap(); + let header: Header = Decode::decode(&mut &bytes[..]).unwrap(); + assert_eq!(header, block.header); +} diff --git a/substrate/client/rpc-spec-v2/src/lib.rs b/substrate/client/rpc-spec-v2/src/lib.rs index 7c22ef5d5231..9a455c5984a5 100644 --- a/substrate/client/rpc-spec-v2/src/lib.rs +++ b/substrate/client/rpc-spec-v2/src/lib.rs @@ -23,6 +23,7 @@ #![warn(missing_docs)] #![deny(unused_crate_dependencies)] +pub mod archive; pub mod chain_head; pub mod chain_spec; pub mod transaction; From 2d96c8d27cbb54c3bba4699b22603e0f7997c146 Mon Sep 17 00:00:00 2001 From: joe petrowski <25483142+joepetrowski@users.noreply.github.com> Date: Tue, 19 Sep 2023 13:46:28 +0200 Subject: [PATCH 26/69] Add Method to Establish HRMP Channels Among System Parachains (#1473) Solution to establish HRMP channels between system parachains. --------- Co-authored-by: Muharem Ismailov --- .../src/tests/hrmp_channels.rs | 14 -- .../src/tests/hrmp_channels.rs | 14 -- .../src/weights/runtime_parachains_hrmp.rs | 42 ++++ polkadot/runtime/parachains/src/hrmp.rs | 212 ++++++++++++++++-- .../parachains/src/hrmp/benchmarking.rs | 73 +++++- polkadot/runtime/parachains/src/hrmp/tests.rs | 194 +++++++++++++--- .../src/weights/runtime_parachains_hrmp.rs | 42 ++++ .../src/weights/runtime_parachains_hrmp.rs | 42 ++++ .../src/weights/runtime_parachains_hrmp.rs | 42 ++++ 9 files changed, 592 insertions(+), 83 deletions(-) diff --git a/cumulus/parachains/integration-tests/emulated/assets/asset-hub-kusama/src/tests/hrmp_channels.rs b/cumulus/parachains/integration-tests/emulated/assets/asset-hub-kusama/src/tests/hrmp_channels.rs index 623b3ff599c8..7bb64333db52 100644 --- a/cumulus/parachains/integration-tests/emulated/assets/asset-hub-kusama/src/tests/hrmp_channels.rs +++ b/cumulus/parachains/integration-tests/emulated/assets/asset-hub-kusama/src/tests/hrmp_channels.rs @@ -159,12 +159,6 @@ fn force_open_hrmp_channel_for_system_para_works() { // Parachain A init values let para_a_id = PenpalKusamaA::para_id(); - let fund_amount = KUSAMA_ED * 1000_000_000; - - // Fund Parachain's Sovereign accounts to be able to reserve the deposit - let para_a_sovereign_account = Kusama::fund_para_sovereign(fund_amount, para_a_id); - let system_para_sovereign_account = Kusama::fund_para_sovereign(fund_amount, system_para_id); - Kusama::execute_with(|| { assert_ok!(::Hrmp::force_open_hrmp_channel( relay_root_origin, @@ -179,14 +173,6 @@ fn force_open_hrmp_channel_for_system_para_works() { assert_expected_events!( Kusama, vec![ - // Sender deposit is reserved for System Parachain's Sovereign account - RuntimeEvent::Balances(pallet_balances::Event::Reserved { who, .. }) =>{ - who: *who == system_para_sovereign_account, - }, - // Recipient deposit is reserved for Parachain's Sovereign account - RuntimeEvent::Balances(pallet_balances::Event::Reserved { who, .. }) =>{ - who: *who == para_a_sovereign_account, - }, // HRMP channel forced opened RuntimeEvent::Hrmp( polkadot_runtime_parachains::hrmp::Event::HrmpChannelForceOpened( diff --git a/cumulus/parachains/integration-tests/emulated/assets/asset-hub-polkadot/src/tests/hrmp_channels.rs b/cumulus/parachains/integration-tests/emulated/assets/asset-hub-polkadot/src/tests/hrmp_channels.rs index a1423f2ea90b..a6286c619f64 100644 --- a/cumulus/parachains/integration-tests/emulated/assets/asset-hub-polkadot/src/tests/hrmp_channels.rs +++ b/cumulus/parachains/integration-tests/emulated/assets/asset-hub-polkadot/src/tests/hrmp_channels.rs @@ -159,12 +159,6 @@ fn force_open_hrmp_channel_for_system_para_works() { // Parachain A init values let para_a_id = PenpalPolkadotA::para_id(); - let fund_amount = POLKADOT_ED * 1000_000_000; - - // Fund Parachain's Sovereign accounts to be able to reserve the deposit - let system_para_sovereign_account = Polkadot::fund_para_sovereign(fund_amount, system_para_id); - let para_a_sovereign_account = Polkadot::fund_para_sovereign(fund_amount, para_a_id); - Polkadot::execute_with(|| { assert_ok!(::Hrmp::force_open_hrmp_channel( relay_root_origin, @@ -179,14 +173,6 @@ fn force_open_hrmp_channel_for_system_para_works() { assert_expected_events!( Polkadot, vec![ - // Sender deposit is reserved for System Parachain's Sovereign account - RuntimeEvent::Balances(pallet_balances::Event::Reserved { who, .. }) =>{ - who: *who == system_para_sovereign_account, - }, - // Recipient deposit is reserved for Parachain's Sovereign account - RuntimeEvent::Balances(pallet_balances::Event::Reserved { who, .. }) =>{ - who: *who == para_a_sovereign_account, - }, // HRMP channel forced opened RuntimeEvent::Hrmp( polkadot_runtime_parachains::hrmp::Event::HrmpChannelForceOpened( diff --git a/polkadot/runtime/kusama/src/weights/runtime_parachains_hrmp.rs b/polkadot/runtime/kusama/src/weights/runtime_parachains_hrmp.rs index c13a8413e410..f2bc2aa2b085 100644 --- a/polkadot/runtime/kusama/src/weights/runtime_parachains_hrmp.rs +++ b/polkadot/runtime/kusama/src/weights/runtime_parachains_hrmp.rs @@ -282,4 +282,46 @@ impl runtime_parachains::hrmp::WeightInfo for WeightInf .saturating_add(T::DbWeight::get().reads(13)) .saturating_add(T::DbWeight::get().writes(8)) } + /// Storage: `Paras::ParaLifecycles` (r:1 w:0) + /// Proof: `Paras::ParaLifecycles` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpOpenChannelRequests` (r:1 w:1) + /// Proof: `Hrmp::HrmpOpenChannelRequests` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpChannels` (r:1 w:0) + /// Proof: `Hrmp::HrmpChannels` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpEgressChannelsIndex` (r:1 w:0) + /// Proof: `Hrmp::HrmpEgressChannelsIndex` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpOpenChannelRequestCount` (r:1 w:1) + /// Proof: `Hrmp::HrmpOpenChannelRequestCount` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpOpenChannelRequestsList` (r:1 w:1) + /// Proof: `Hrmp::HrmpOpenChannelRequestsList` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:2 w:2) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:2 w:2) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpIngressChannelsIndex` (r:1 w:0) + /// Proof: `Hrmp::HrmpIngressChannelsIndex` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpAcceptedChannelRequestCount` (r:1 w:1) + /// Proof: `Hrmp::HrmpAcceptedChannelRequestCount` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn establish_system_channel() -> Weight { + // Proof Size summary in bytes: + // Measured: `417` + // Estimated: `6357` + // Minimum execution time: 629_674_000 picoseconds. + Weight::from_parts(640_174_000, 0) + .saturating_add(Weight::from_parts(0, 6357)) + .saturating_add(T::DbWeight::get().reads(12)) + .saturating_add(T::DbWeight::get().writes(8)) + } + /// Storage: `Hrmp::HrmpChannels` (r:1 w:1) + /// Proof: `Hrmp::HrmpChannels` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn poke_channel_deposits() -> Weight { + // Proof Size summary in bytes: + // Measured: `263` + // Estimated: `3728` + // Minimum execution time: 173_371_000 picoseconds. + Weight::from_parts(175_860_000, 0) + .saturating_add(Weight::from_parts(0, 3728)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } } diff --git a/polkadot/runtime/parachains/src/hrmp.rs b/polkadot/runtime/parachains/src/hrmp.rs index cdb38ba6a8ea..3f0a5e0830cb 100644 --- a/polkadot/runtime/parachains/src/hrmp.rs +++ b/polkadot/runtime/parachains/src/hrmp.rs @@ -21,13 +21,16 @@ use crate::{ use frame_support::{pallet_prelude::*, traits::ReservableCurrency, DefaultNoBound}; use frame_system::pallet_prelude::*; use parity_scale_codec::{Decode, Encode}; -use polkadot_parachain_primitives::primitives::HorizontalMessages; +use polkadot_parachain_primitives::primitives::{HorizontalMessages, IsSystem}; use primitives::{ Balance, Hash, HrmpChannelId, Id as ParaId, InboundHrmpMessage, OutboundHrmpMessage, SessionIndex, }; use scale_info::TypeInfo; -use sp_runtime::traits::{AccountIdConversion, BlakeTwo256, Hash as HashT, UniqueSaturatedInto}; +use sp_runtime::{ + traits::{AccountIdConversion, BlakeTwo256, Hash as HashT, UniqueSaturatedInto, Zero}, + ArithmeticError, +}; use sp_std::{ collections::{btree_map::BTreeMap, btree_set::BTreeSet}, fmt, mem, @@ -60,6 +63,8 @@ pub trait WeightInfo { fn hrmp_cancel_open_request(c: u32) -> Weight; fn clean_open_channel_requests(c: u32) -> Weight; fn force_open_hrmp_channel(c: u32) -> Weight; + fn establish_system_channel() -> Weight; + fn poke_channel_deposits() -> Weight; } /// A weight info that is only suitable for testing. @@ -93,6 +98,12 @@ impl WeightInfo for TestWeightInfo { fn force_open_hrmp_channel(_: u32) -> Weight { Weight::MAX } + fn establish_system_channel() -> Weight { + Weight::MAX + } + fn poke_channel_deposits() -> Weight { + Weight::MAX + } } /// A description of a request to open an HRMP channel. @@ -279,6 +290,12 @@ pub mod pallet { /// An HRMP channel was opened via Root origin. /// `[sender, recipient, proposed_max_capacity, proposed_max_message_size]` HrmpChannelForceOpened(ParaId, ParaId, u32, u32), + /// An HRMP channel was opened between two system chains. + /// `[sender, recipient, proposed_max_capacity, proposed_max_message_size]` + HrmpSystemChannelOpened(ParaId, ParaId, u32, u32), + /// An HRMP channel's deposits were updated. + /// `[sender, recipient]` + OpenChannelDepositsUpdated(ParaId, ParaId), } #[pallet::error] @@ -321,6 +338,8 @@ pub mod pallet { OpenHrmpChannelAlreadyConfirmed, /// The provided witness data is wrong. WrongWitness, + /// The channel between these two chains cannot be authorized. + ChannelCreationNotAuthorized, } /// The set of pending HRMP open channel requests. @@ -600,8 +619,8 @@ pub mod pallet { /// the `max_capacity` and `max_message_size` are still subject to the Relay Chain's /// configured limits. /// - /// Expected use is when one of the `ParaId`s involved in the channel is governed by the - /// Relay Chain, e.g. a system parachain. + /// Expected use is when one (and only one) of the `ParaId`s involved in the channel is + /// governed by the system, e.g. a system parachain. /// /// Origin must be the `ChannelManager`. #[pallet::call_index(7)] @@ -628,7 +647,8 @@ pub mod pallet { 0 }; - // Now we proceed with normal init/accept. + // Now we proceed with normal init/accept, except that we set `no_deposit` to true such + // that it will not require deposits from either member. Self::init_open_channel(sender, recipient, max_capacity, max_message_size)?; Self::accept_open_channel(recipient, sender)?; Self::deposit_event(Event::HrmpChannelForceOpened( @@ -640,6 +660,146 @@ pub mod pallet { Ok(Some(::WeightInfo::force_open_hrmp_channel(cancel_request)).into()) } + + /// Establish an HRMP channel between two system chains. If the channel does not already + /// exist, the transaction fees will be refunded to the caller. The system does not take + /// deposits for channels between system chains, and automatically sets the message number + /// and size limits to the maximum allowed by the network's configuration. + /// + /// Arguments: + /// + /// - `sender`: A system chain, `ParaId`. + /// - `recipient`: A system chain, `ParaId`. + /// + /// Any signed origin can call this function, but _both_ inputs MUST be system chains. If + /// the channel does not exist yet, there is no fee. + #[pallet::call_index(8)] + #[pallet::weight(::WeightInfo::establish_system_channel())] + pub fn establish_system_channel( + origin: OriginFor, + sender: ParaId, + recipient: ParaId, + ) -> DispatchResultWithPostInfo { + let _caller = ensure_signed(origin)?; + + // both chains must be system + ensure!( + sender.is_system() && recipient.is_system(), + Error::::ChannelCreationNotAuthorized + ); + + let config = >::config(); + let max_message_size = config.hrmp_channel_max_message_size; + let max_capacity = config.hrmp_channel_max_capacity; + + Self::init_open_channel(sender, recipient, max_capacity, max_message_size)?; + Self::accept_open_channel(recipient, sender)?; + + Self::deposit_event(Event::HrmpSystemChannelOpened( + sender, + recipient, + max_capacity, + max_message_size, + )); + + Ok(Pays::No.into()) + } + + /// Update the deposits held for an HRMP channel to the latest `Configuration`. Channels + /// with system chains do not require a deposit. + /// + /// Arguments: + /// + /// - `sender`: A chain, `ParaId`. + /// - `recipient`: A chain, `ParaId`. + /// + /// Any signed origin can call this function. + #[pallet::call_index(9)] + #[pallet::weight(::WeightInfo::poke_channel_deposits())] + pub fn poke_channel_deposits( + origin: OriginFor, + sender: ParaId, + recipient: ParaId, + ) -> DispatchResult { + let _caller = ensure_signed(origin)?; + let channel_id = HrmpChannelId { sender, recipient }; + let is_system = sender.is_system() || recipient.is_system(); + + let config = >::config(); + + // Channels with and amongst the system do not require a deposit. + let (new_sender_deposit, new_recipient_deposit) = if is_system { + (0, 0) + } else { + (config.hrmp_sender_deposit, config.hrmp_recipient_deposit) + }; + + let _ = HrmpChannels::::mutate(&channel_id, |channel| -> DispatchResult { + if let Some(ref mut channel) = channel { + let current_sender_deposit = channel.sender_deposit; + let current_recipient_deposit = channel.recipient_deposit; + + // nothing to update + if current_sender_deposit == new_sender_deposit && + current_recipient_deposit == new_recipient_deposit + { + return Ok(()) + } + + // sender + if current_sender_deposit > new_sender_deposit { + // Can never underflow, but be paranoid. + let amount = current_sender_deposit + .checked_sub(new_sender_deposit) + .ok_or(ArithmeticError::Underflow)?; + T::Currency::unreserve( + &channel_id.sender.into_account_truncating(), + // The difference should always be convertable into `Balance`, but be + // paranoid and do nothing in case. + amount.try_into().unwrap_or(Zero::zero()), + ); + } else if current_sender_deposit < new_sender_deposit { + let amount = new_sender_deposit + .checked_sub(current_sender_deposit) + .ok_or(ArithmeticError::Underflow)?; + T::Currency::reserve( + &channel_id.sender.into_account_truncating(), + amount.try_into().unwrap_or(Zero::zero()), + )?; + } + + // recipient + if current_recipient_deposit > new_recipient_deposit { + let amount = current_recipient_deposit + .checked_sub(new_recipient_deposit) + .ok_or(ArithmeticError::Underflow)?; + T::Currency::unreserve( + &channel_id.recipient.into_account_truncating(), + amount.try_into().unwrap_or(Zero::zero()), + ); + } else if current_recipient_deposit < new_recipient_deposit { + let amount = new_recipient_deposit + .checked_sub(current_recipient_deposit) + .ok_or(ArithmeticError::Underflow)?; + T::Currency::reserve( + &channel_id.recipient.into_account_truncating(), + amount.try_into().unwrap_or(Zero::zero()), + )?; + } + + // update storage + channel.sender_deposit = new_sender_deposit; + channel.recipient_deposit = new_recipient_deposit; + } else { + return Err(Error::::OpenHrmpChannelDoesntExist.into()) + } + Ok(()) + })?; + + Self::deposit_event(Event::OpenChannelDepositsUpdated(sender, recipient)); + + Ok(()) + } } } @@ -817,6 +977,10 @@ impl Pallet { "can't be `None` due to the invariant that the list contains the same items as the set; qed", ); + let system_channel = channel_id.sender.is_system() || channel_id.recipient.is_system(); + let sender_deposit = request.sender_deposit; + let recipient_deposit = if system_channel { 0 } else { config.hrmp_recipient_deposit }; + if request.confirmed { if >::is_valid_para(channel_id.sender) && >::is_valid_para(channel_id.recipient) @@ -824,8 +988,8 @@ impl Pallet { HrmpChannels::::insert( &channel_id, HrmpChannel { - sender_deposit: request.sender_deposit, - recipient_deposit: config.hrmp_recipient_deposit, + sender_deposit, + recipient_deposit, max_capacity: request.max_capacity, max_total_size: request.max_total_size, max_message_size: request.max_message_size, @@ -1173,7 +1337,9 @@ impl Pallet { } /// Initiate opening a channel from a parachain to a given recipient with given channel - /// parameters. + /// parameters. If neither chain is part of the system, then a deposit from the `Configuration` + /// will be required for `origin` (the sender) upon opening the request and the `recipient` upon + /// accepting it. /// /// Basically the same as [`hrmp_init_open_channel`](Pallet::hrmp_init_open_channel) but /// intended for calling directly from other pallets rather than dispatched. @@ -1219,10 +1385,15 @@ impl Pallet { Error::::OpenHrmpChannelLimitExceeded, ); - T::Currency::reserve( - &origin.into_account_truncating(), - config.hrmp_sender_deposit.unique_saturated_into(), - )?; + // Do not require deposits for channels with or amongst the system. + let is_system = origin.is_system() || recipient.is_system(); + let deposit = if is_system { 0 } else { config.hrmp_sender_deposit }; + if !deposit.is_zero() { + T::Currency::reserve( + &origin.into_account_truncating(), + deposit.unique_saturated_into(), + )?; + } // mutating storage directly now -- shall not bail henceforth. @@ -1232,7 +1403,7 @@ impl Pallet { HrmpOpenChannelRequest { confirmed: false, _age: 0, - sender_deposit: config.hrmp_sender_deposit, + sender_deposit: deposit, max_capacity: proposed_max_capacity, max_message_size: proposed_max_message_size, max_total_size: config.hrmp_channel_max_total_size, @@ -1254,7 +1425,7 @@ impl Pallet { if let Err(dmp::QueueDownwardMessageError::ExceedsMaxMessageSize) = >::queue_downward_message(&config, recipient, notification_bytes) { - // this should never happen unless the max downward message size is configured to an + // this should never happen unless the max downward message size is configured to a // jokingly small number. log::error!( target: "runtime::hrmp", @@ -1287,10 +1458,15 @@ impl Pallet { Error::::AcceptHrmpChannelLimitExceeded, ); - T::Currency::reserve( - &origin.into_account_truncating(), - config.hrmp_recipient_deposit.unique_saturated_into(), - )?; + // Do not require deposits for channels with or amongst the system. + let is_system = origin.is_system() || sender.is_system(); + let deposit = if is_system { 0 } else { config.hrmp_recipient_deposit }; + if !deposit.is_zero() { + T::Currency::reserve( + &origin.into_account_truncating(), + deposit.unique_saturated_into(), + )?; + } // persist the updated open channel request and then increment the number of accepted // channels. diff --git a/polkadot/runtime/parachains/src/hrmp/benchmarking.rs b/polkadot/runtime/parachains/src/hrmp/benchmarking.rs index 3fe347a7bcba..9ffa264a7dc8 100644 --- a/polkadot/runtime/parachains/src/hrmp/benchmarking.rs +++ b/polkadot/runtime/parachains/src/hrmp/benchmarking.rs @@ -304,14 +304,13 @@ frame_benchmarking::benchmarks! { let sender_origin: crate::Origin = 1u32.into(); let recipient_id: ParaId = 2u32.into(); - // make sure para is registered, and has enough balance. + // Make sure para is registered. The sender does actually need the normal deposit because it + // is first going the "init" route. let ed = T::Currency::minimum_balance(); let sender_deposit: BalanceOf = Configuration::::config().hrmp_sender_deposit.unique_saturated_into(); - let recipient_deposit: BalanceOf = - Configuration::::config().hrmp_recipient_deposit.unique_saturated_into(); register_parachain_with_balance::(sender_id, sender_deposit + ed); - register_parachain_with_balance::(recipient_id, recipient_deposit + ed); + register_parachain_with_balance::(recipient_id, Zero::zero()); let capacity = Configuration::::config().hrmp_channel_max_capacity; let message_size = Configuration::::config().hrmp_channel_max_message_size; @@ -351,6 +350,72 @@ frame_benchmarking::benchmarks! { Event::::HrmpChannelForceOpened(sender_id, recipient_id, capacity, message_size).into() ); } + + establish_system_channel { + let sender_id: ParaId = 1u32.into(); + let recipient_id: ParaId = 2u32.into(); + + let caller: T::AccountId = frame_benchmarking::whitelisted_caller(); + let config = Configuration::::config(); + + // make sure para is registered, and has zero balance. + register_parachain_with_balance::(sender_id, Zero::zero()); + register_parachain_with_balance::(recipient_id, Zero::zero()); + + let capacity = config.hrmp_channel_max_capacity; + let message_size = config.hrmp_channel_max_message_size; + }: _(frame_system::RawOrigin::Signed(caller), sender_id, recipient_id) + verify { + assert_last_event::( + Event::::HrmpSystemChannelOpened(sender_id, recipient_id, capacity, message_size).into() + ); + } + + poke_channel_deposits { + let sender_id: ParaId = 1u32.into(); + let recipient_id: ParaId = 2u32.into(); + let channel_id = HrmpChannelId {sender: sender_id, recipient: recipient_id }; + + let caller: T::AccountId = frame_benchmarking::whitelisted_caller(); + let config = Configuration::::config(); + + // make sure para is registered, and has balance to reserve. + let ed = T::Currency::minimum_balance(); + let sender_deposit: BalanceOf = config.hrmp_sender_deposit.unique_saturated_into(); + let recipient_deposit: BalanceOf = config.hrmp_recipient_deposit.unique_saturated_into(); + register_parachain_with_balance::(sender_id, ed + sender_deposit); + register_parachain_with_balance::(recipient_id, ed + recipient_deposit); + + // Our normal establishment won't actually reserve deposits, so just insert them directly. + HrmpChannels::::insert( + &channel_id, + HrmpChannel { + sender_deposit: config.hrmp_sender_deposit, + recipient_deposit: config.hrmp_recipient_deposit, + max_capacity: config.hrmp_channel_max_capacity, + max_total_size: config.hrmp_channel_max_total_size, + max_message_size: config.hrmp_channel_max_message_size, + msg_count: 0, + total_size: 0, + mqc_head: None, + }, + ); + // Actually reserve the deposits. + let _ = T::Currency::reserve(&sender_id.into_account_truncating(), sender_deposit); + let _ = T::Currency::reserve(&recipient_id.into_account_truncating(), recipient_deposit); + }: _(frame_system::RawOrigin::Signed(caller), sender_id, recipient_id) + verify { + assert_last_event::( + Event::::OpenChannelDepositsUpdated(sender_id, recipient_id).into() + ); + let channel = HrmpChannels::::get(&channel_id).unwrap(); + // Check that the deposit was updated in the channel state. + assert_eq!(channel.sender_deposit, 0); + assert_eq!(channel.recipient_deposit, 0); + // And that the funds were unreserved. + assert_eq!(T::Currency::reserved_balance(&sender_id.into_account_truncating()), 0u128.unique_saturated_into()); + assert_eq!(T::Currency::reserved_balance(&recipient_id.into_account_truncating()), 0u128.unique_saturated_into()); + } } frame_benchmarking::impl_benchmark_test_suite!( diff --git a/polkadot/runtime/parachains/src/hrmp/tests.rs b/polkadot/runtime/parachains/src/hrmp/tests.rs index 8cfaf48d10ef..236745b7cc35 100644 --- a/polkadot/runtime/parachains/src/hrmp/tests.rs +++ b/polkadot/runtime/parachains/src/hrmp/tests.rs @@ -14,13 +14,17 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . +// NOTE: System chains, identified by ParaId < 2000, are treated as special in HRMP channel +// initialization. Namely, they do not require a deposit if even one ParaId is a system para. If +// both paras are system chains, then they are also configured to the system's max configuration. + use super::*; use crate::mock::{ deregister_parachain, new_test_ext, register_parachain, register_parachain_with_balance, Configuration, Hrmp, MockGenesisConfig, Paras, ParasShared, RuntimeEvent as MockEvent, RuntimeOrigin, System, Test, }; -use frame_support::assert_noop; +use frame_support::{assert_noop, assert_ok}; use primitives::BlockNumber; use std::collections::BTreeMap; @@ -133,10 +137,10 @@ fn empty_state_consistent_state() { #[test] fn open_channel_works() { - let para_a = 1.into(); - let para_a_origin: crate::Origin = 1.into(); - let para_b = 3.into(); - let para_b_origin: crate::Origin = 3.into(); + let para_a = 2001.into(); + let para_a_origin: crate::Origin = 2001.into(); + let para_b = 2003.into(); + let para_b_origin: crate::Origin = 2003.into(); new_test_ext(GenesisConfigBuilder::default().build()).execute_with(|| { // We need both A & B to be registered and alive parachains. @@ -171,13 +175,16 @@ fn open_channel_works() { #[test] fn force_open_channel_works() { let para_a = 1.into(); - let para_b = 3.into(); + let para_b = 2003.into(); new_test_ext(GenesisConfigBuilder::default().build()).execute_with(|| { // We need both A & B to be registered and live parachains. register_parachain(para_a); register_parachain(para_b); + let para_b_free_balance = + ::Currency::free_balance(¶_b.into_account_truncating()); + run_to_block(5, Some(vec![4, 5])); Hrmp::force_open_hrmp_channel(RuntimeOrigin::root(), para_a, para_b, 2, 8).unwrap(); Hrmp::assert_storage_consistency_exhaustive(); @@ -193,14 +200,19 @@ fn force_open_channel_works() { // Now let the session change happen and thus open the channel. run_to_block(8, Some(vec![8])); assert!(channel_exists(para_a, para_b)); + // Because para_a is a system chain, para_b's free balance should not have changed. + assert_eq!( + ::Currency::free_balance(¶_b.into_account_truncating()), + para_b_free_balance + ); }); } #[test] fn force_open_channel_works_with_existing_request() { - let para_a = 1.into(); - let para_a_origin: crate::Origin = 1.into(); - let para_b = 3.into(); + let para_a = 2001.into(); + let para_a_origin: crate::Origin = 2001.into(); + let para_b = 2003.into(); new_test_ext(GenesisConfigBuilder::default().build()).execute_with(|| { // We need both A & B to be registered and live parachains. @@ -240,11 +252,127 @@ fn force_open_channel_works_with_existing_request() { }); } +#[test] +fn open_system_channel_works() { + let para_a = 1.into(); + let para_b = 3.into(); + + new_test_ext(GenesisConfigBuilder::default().build()).execute_with(|| { + // We need both A & B to be registered and live parachains. + register_parachain(para_a); + register_parachain(para_b); + + run_to_block(5, Some(vec![4, 5])); + Hrmp::establish_system_channel(RuntimeOrigin::signed(1), para_a, para_b).unwrap(); + Hrmp::assert_storage_consistency_exhaustive(); + assert!(System::events().iter().any(|record| record.event == + MockEvent::Hrmp(Event::HrmpSystemChannelOpened(para_a, para_b, 2, 8)))); + + // Advance to a block 6, but without session change. That means that the channel has + // not been created yet. + run_to_block(6, None); + assert!(!channel_exists(para_a, para_b)); + Hrmp::assert_storage_consistency_exhaustive(); + + // Now let the session change happen and thus open the channel. + run_to_block(8, Some(vec![8])); + assert!(channel_exists(para_a, para_b)); + }); +} + +#[test] +fn open_system_channel_does_not_work_for_non_system_chains() { + let para_a = 2001.into(); + let para_b = 2003.into(); + + new_test_ext(GenesisConfigBuilder::default().build()).execute_with(|| { + // We need both A & B to be registered and live parachains. + register_parachain(para_a); + register_parachain(para_b); + + run_to_block(5, Some(vec![4, 5])); + assert_noop!( + Hrmp::establish_system_channel(RuntimeOrigin::signed(1), para_a, para_b), + Error::::ChannelCreationNotAuthorized + ); + Hrmp::assert_storage_consistency_exhaustive(); + }); +} + +#[test] +fn open_system_channel_does_not_work_with_one_non_system_chain() { + let para_a = 1.into(); + let para_b = 2003.into(); + + new_test_ext(GenesisConfigBuilder::default().build()).execute_with(|| { + // We need both A & B to be registered and live parachains. + register_parachain(para_a); + register_parachain(para_b); + + run_to_block(5, Some(vec![4, 5])); + assert_noop!( + Hrmp::establish_system_channel(RuntimeOrigin::signed(1), para_a, para_b), + Error::::ChannelCreationNotAuthorized + ); + Hrmp::assert_storage_consistency_exhaustive(); + }); +} + +#[test] +fn poke_deposits_works() { + let para_a = 1.into(); + let para_b = 2001.into(); + + new_test_ext(GenesisConfigBuilder::default().build()).execute_with(|| { + // We need both A & B to be registered and live parachains. + register_parachain_with_balance(para_a, 200); + register_parachain_with_balance(para_b, 200); + + let config = Configuration::config(); + let channel_id = HrmpChannelId { sender: para_a, recipient: para_b }; + + // Our normal establishment won't actually reserve deposits, so just insert them directly. + HrmpChannels::::insert( + &channel_id, + HrmpChannel { + sender_deposit: config.hrmp_sender_deposit, + recipient_deposit: config.hrmp_recipient_deposit, + max_capacity: config.hrmp_channel_max_capacity, + max_total_size: config.hrmp_channel_max_total_size, + max_message_size: config.hrmp_channel_max_message_size, + msg_count: 0, + total_size: 0, + mqc_head: None, + }, + ); + // reserve funds + assert_ok!(::Currency::reserve( + ¶_a.into_account_truncating(), + config.hrmp_sender_deposit + )); + assert_ok!(::Currency::reserve( + ¶_b.into_account_truncating(), + config.hrmp_recipient_deposit + )); + + assert_ok!(Hrmp::poke_channel_deposits(RuntimeOrigin::signed(1), para_a, para_b)); + + assert_eq!( + ::Currency::reserved_balance(¶_a.into_account_truncating()), + 0 + ); + assert_eq!( + ::Currency::reserved_balance(¶_b.into_account_truncating()), + 0 + ); + }); +} + #[test] fn close_channel_works() { - let para_a = 5.into(); - let para_b = 2.into(); - let para_b_origin: crate::Origin = 2.into(); + let para_a = 2005.into(); + let para_b = 2002.into(); + let para_b_origin: crate::Origin = 2002.into(); new_test_ext(GenesisConfigBuilder::default().build()).execute_with(|| { register_parachain(para_a); @@ -275,8 +403,8 @@ fn close_channel_works() { #[test] fn send_recv_messages() { - let para_a = 32.into(); - let para_b = 64.into(); + let para_a = 2032.into(); + let para_b = 2064.into(); let mut genesis = GenesisConfigBuilder::default(); genesis.hrmp_channel_max_message_size = 20; @@ -358,8 +486,8 @@ fn hrmp_mqc_head_fixture() { #[test] fn accept_incoming_request_and_offboard() { - let para_a = 32.into(); - let para_b = 64.into(); + let para_a = 2032.into(); + let para_b = 2064.into(); new_test_ext(GenesisConfigBuilder::default().build()).execute_with(|| { register_parachain(para_a); @@ -380,9 +508,9 @@ fn accept_incoming_request_and_offboard() { #[test] fn check_sent_messages() { - let para_a = 32.into(); - let para_b = 64.into(); - let para_c = 97.into(); + let para_a = 2032.into(); + let para_b = 2064.into(); + let para_c = 2097.into(); new_test_ext(GenesisConfigBuilder::default().build()).execute_with(|| { register_parachain(para_a); @@ -444,8 +572,8 @@ fn check_sent_messages() { fn verify_externally_accessible() { use primitives::{well_known_keys, AbridgedHrmpChannel}; - let para_a = 20.into(); - let para_b = 21.into(); + let para_a = 2020.into(); + let para_b = 2021.into(); new_test_ext(GenesisConfigBuilder::default().build()).execute_with(|| { // Register two parachains, wait until a session change, then initiate channel open @@ -502,8 +630,8 @@ fn verify_externally_accessible() { #[test] fn charging_deposits() { - let para_a = 32.into(); - let para_b = 64.into(); + let para_a = 2032.into(); + let para_b = 2064.into(); new_test_ext(GenesisConfigBuilder::default().build()).execute_with(|| { register_parachain_with_balance(para_a, 0); @@ -532,8 +660,8 @@ fn charging_deposits() { #[test] fn refund_deposit_on_normal_closure() { - let para_a = 32.into(); - let para_b = 64.into(); + let para_a = 2032.into(); + let para_b = 2064.into(); let mut genesis = GenesisConfigBuilder::default(); genesis.hrmp_sender_deposit = 20; @@ -565,8 +693,8 @@ fn refund_deposit_on_normal_closure() { #[test] fn refund_deposit_on_offboarding() { - let para_a = 32.into(); - let para_b = 64.into(); + let para_a = 2032.into(); + let para_b = 2064.into(); let mut genesis = GenesisConfigBuilder::default(); genesis.hrmp_sender_deposit = 20; @@ -605,8 +733,8 @@ fn refund_deposit_on_offboarding() { #[test] fn no_dangling_open_requests() { - let para_a = 32.into(); - let para_b = 64.into(); + let para_a = 2032.into(); + let para_b = 2064.into(); let mut genesis = GenesisConfigBuilder::default(); genesis.hrmp_sender_deposit = 20; @@ -643,8 +771,8 @@ fn no_dangling_open_requests() { #[test] fn cancel_pending_open_channel_request() { - let para_a = 32.into(); - let para_b = 64.into(); + let para_a = 2032.into(); + let para_b = 2064.into(); let mut genesis = GenesisConfigBuilder::default(); genesis.hrmp_sender_deposit = 20; @@ -675,8 +803,8 @@ fn cancel_pending_open_channel_request() { #[test] fn watermark_maxed_out_at_relay_parent() { - let para_a = 32.into(); - let para_b = 64.into(); + let para_a = 2032.into(); + let para_b = 2064.into(); let mut genesis = GenesisConfigBuilder::default(); genesis.hrmp_channel_max_message_size = 20; diff --git a/polkadot/runtime/polkadot/src/weights/runtime_parachains_hrmp.rs b/polkadot/runtime/polkadot/src/weights/runtime_parachains_hrmp.rs index 82d8c30bacad..73a08d33eed8 100644 --- a/polkadot/runtime/polkadot/src/weights/runtime_parachains_hrmp.rs +++ b/polkadot/runtime/polkadot/src/weights/runtime_parachains_hrmp.rs @@ -292,4 +292,46 @@ impl runtime_parachains::hrmp::WeightInfo for WeightInf .saturating_add(T::DbWeight::get().reads(14)) .saturating_add(T::DbWeight::get().writes(8)) } + /// Storage: `Paras::ParaLifecycles` (r:1 w:0) + /// Proof: `Paras::ParaLifecycles` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpOpenChannelRequests` (r:1 w:1) + /// Proof: `Hrmp::HrmpOpenChannelRequests` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpChannels` (r:1 w:0) + /// Proof: `Hrmp::HrmpChannels` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpEgressChannelsIndex` (r:1 w:0) + /// Proof: `Hrmp::HrmpEgressChannelsIndex` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpOpenChannelRequestCount` (r:1 w:1) + /// Proof: `Hrmp::HrmpOpenChannelRequestCount` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpOpenChannelRequestsList` (r:1 w:1) + /// Proof: `Hrmp::HrmpOpenChannelRequestsList` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:2 w:2) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:2 w:2) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpIngressChannelsIndex` (r:1 w:0) + /// Proof: `Hrmp::HrmpIngressChannelsIndex` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpAcceptedChannelRequestCount` (r:1 w:1) + /// Proof: `Hrmp::HrmpAcceptedChannelRequestCount` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn establish_system_channel() -> Weight { + // Proof Size summary in bytes: + // Measured: `417` + // Estimated: `6357` + // Minimum execution time: 629_674_000 picoseconds. + Weight::from_parts(640_174_000, 0) + .saturating_add(Weight::from_parts(0, 6357)) + .saturating_add(T::DbWeight::get().reads(12)) + .saturating_add(T::DbWeight::get().writes(8)) + } + /// Storage: `Hrmp::HrmpChannels` (r:1 w:1) + /// Proof: `Hrmp::HrmpChannels` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn poke_channel_deposits() -> Weight { + // Proof Size summary in bytes: + // Measured: `263` + // Estimated: `3728` + // Minimum execution time: 173_371_000 picoseconds. + Weight::from_parts(175_860_000, 0) + .saturating_add(Weight::from_parts(0, 3728)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } } diff --git a/polkadot/runtime/rococo/src/weights/runtime_parachains_hrmp.rs b/polkadot/runtime/rococo/src/weights/runtime_parachains_hrmp.rs index 9f1cf65efa64..417820e6627f 100644 --- a/polkadot/runtime/rococo/src/weights/runtime_parachains_hrmp.rs +++ b/polkadot/runtime/rococo/src/weights/runtime_parachains_hrmp.rs @@ -289,4 +289,46 @@ impl runtime_parachains::hrmp::WeightInfo for WeightInf .saturating_add(T::DbWeight::get().reads(14)) .saturating_add(T::DbWeight::get().writes(8)) } + /// Storage: `Paras::ParaLifecycles` (r:1 w:0) + /// Proof: `Paras::ParaLifecycles` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpOpenChannelRequests` (r:1 w:1) + /// Proof: `Hrmp::HrmpOpenChannelRequests` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpChannels` (r:1 w:0) + /// Proof: `Hrmp::HrmpChannels` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpEgressChannelsIndex` (r:1 w:0) + /// Proof: `Hrmp::HrmpEgressChannelsIndex` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpOpenChannelRequestCount` (r:1 w:1) + /// Proof: `Hrmp::HrmpOpenChannelRequestCount` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpOpenChannelRequestsList` (r:1 w:1) + /// Proof: `Hrmp::HrmpOpenChannelRequestsList` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:2 w:2) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:2 w:2) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpIngressChannelsIndex` (r:1 w:0) + /// Proof: `Hrmp::HrmpIngressChannelsIndex` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpAcceptedChannelRequestCount` (r:1 w:1) + /// Proof: `Hrmp::HrmpAcceptedChannelRequestCount` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn establish_system_channel() -> Weight { + // Proof Size summary in bytes: + // Measured: `417` + // Estimated: `6357` + // Minimum execution time: 629_674_000 picoseconds. + Weight::from_parts(640_174_000, 0) + .saturating_add(Weight::from_parts(0, 6357)) + .saturating_add(T::DbWeight::get().reads(12)) + .saturating_add(T::DbWeight::get().writes(8)) + } + /// Storage: `Hrmp::HrmpChannels` (r:1 w:1) + /// Proof: `Hrmp::HrmpChannels` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn poke_channel_deposits() -> Weight { + // Proof Size summary in bytes: + // Measured: `263` + // Estimated: `3728` + // Minimum execution time: 173_371_000 picoseconds. + Weight::from_parts(175_860_000, 0) + .saturating_add(Weight::from_parts(0, 3728)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } } diff --git a/polkadot/runtime/westend/src/weights/runtime_parachains_hrmp.rs b/polkadot/runtime/westend/src/weights/runtime_parachains_hrmp.rs index e6ff97fa0878..9beb15303d87 100644 --- a/polkadot/runtime/westend/src/weights/runtime_parachains_hrmp.rs +++ b/polkadot/runtime/westend/src/weights/runtime_parachains_hrmp.rs @@ -282,4 +282,46 @@ impl runtime_parachains::hrmp::WeightInfo for WeightInf .saturating_add(T::DbWeight::get().reads(13)) .saturating_add(T::DbWeight::get().writes(8)) } + /// Storage: `Paras::ParaLifecycles` (r:1 w:0) + /// Proof: `Paras::ParaLifecycles` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpOpenChannelRequests` (r:1 w:1) + /// Proof: `Hrmp::HrmpOpenChannelRequests` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpChannels` (r:1 w:0) + /// Proof: `Hrmp::HrmpChannels` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpEgressChannelsIndex` (r:1 w:0) + /// Proof: `Hrmp::HrmpEgressChannelsIndex` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpOpenChannelRequestCount` (r:1 w:1) + /// Proof: `Hrmp::HrmpOpenChannelRequestCount` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpOpenChannelRequestsList` (r:1 w:1) + /// Proof: `Hrmp::HrmpOpenChannelRequestsList` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:2 w:2) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:2 w:2) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpIngressChannelsIndex` (r:1 w:0) + /// Proof: `Hrmp::HrmpIngressChannelsIndex` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpAcceptedChannelRequestCount` (r:1 w:1) + /// Proof: `Hrmp::HrmpAcceptedChannelRequestCount` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn establish_system_channel() -> Weight { + // Proof Size summary in bytes: + // Measured: `417` + // Estimated: `6357` + // Minimum execution time: 629_674_000 picoseconds. + Weight::from_parts(640_174_000, 0) + .saturating_add(Weight::from_parts(0, 6357)) + .saturating_add(T::DbWeight::get().reads(12)) + .saturating_add(T::DbWeight::get().writes(8)) + } + /// Storage: `Hrmp::HrmpChannels` (r:1 w:1) + /// Proof: `Hrmp::HrmpChannels` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn poke_channel_deposits() -> Weight { + // Proof Size summary in bytes: + // Measured: `263` + // Estimated: `3728` + // Minimum execution time: 173_371_000 picoseconds. + Weight::from_parts(175_860_000, 0) + .saturating_add(Weight::from_parts(0, 3728)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } } From cdbdbc7546b34907a0c5843699d4287179b692fe Mon Sep 17 00:00:00 2001 From: Xiliang Chen Date: Wed, 20 Sep 2023 01:27:42 +1200 Subject: [PATCH 27/69] allow governance body on parachain to have sovereign account (#1291) The goal is to allow Fellowship on Collective chain to have a sovereign account on Polkadot so that we can add it as an identity registrar. This will allow Fellows origin to be able to provide judgements for Fellowship members. This currently allow any body on any parachain including non system parachains to have sovereign account. I cannot think of any reason why that may be an issue but let me know if I should change it to filter only system parachains. [This](https://gist.github.com/xlc/ec61bfa4e9f6d62da27d30141ad2c72b) is the testing script. Original PR: https://github.com/paritytech/polkadot/pull/7518 --- polkadot/runtime/kusama/src/xcm_config.rs | 11 +++++++---- polkadot/runtime/polkadot/src/xcm_config.rs | 10 ++++++---- polkadot/runtime/rococo/src/xcm_config.rs | 19 +++++++++++++------ polkadot/runtime/westend/src/xcm_config.rs | 18 ++++++++++++------ polkadot/xcm/xcm-builder/src/lib.rs | 4 ++-- .../xcm-builder/src/location_conversion.rs | 11 +++++++++++ 6 files changed, 51 insertions(+), 22 deletions(-) diff --git a/polkadot/runtime/kusama/src/xcm_config.rs b/polkadot/runtime/kusama/src/xcm_config.rs index 59ea937e11c1..40ecc83fe641 100644 --- a/polkadot/runtime/kusama/src/xcm_config.rs +++ b/polkadot/runtime/kusama/src/xcm_config.rs @@ -37,10 +37,11 @@ use xcm::latest::prelude::*; use xcm_builder::{ AccountId32Aliases, AllowExplicitUnpaidExecutionFrom, AllowKnownQueryResponses, AllowSubscriptionsFrom, AllowTopLevelPaidExecutionFrom, ChildParachainAsNative, - ChildParachainConvertsVia, CurrencyAdapter as XcmCurrencyAdapter, IsChildSystemParachain, - IsConcrete, MintLocation, OriginToPluralityVoice, SignedAccountId32AsNative, - SignedToAccountId32, SovereignSignedViaLocation, TakeWeightCredit, TrailingSetTopicAsId, - UsingComponents, WeightInfoBounds, WithComputedOrigin, WithUniqueTopic, + ChildParachainConvertsVia, CurrencyAdapter as XcmCurrencyAdapter, DescribeBodyTerminal, + DescribeFamily, HashedDescription, IsChildSystemParachain, IsConcrete, MintLocation, + OriginToPluralityVoice, SignedAccountId32AsNative, SignedToAccountId32, + SovereignSignedViaLocation, TakeWeightCredit, TrailingSetTopicAsId, UsingComponents, + WeightInfoBounds, WithComputedOrigin, WithUniqueTopic, }; parameter_types! { @@ -67,6 +68,8 @@ pub type SovereignAccountOf = ( ChildParachainConvertsVia, // We can directly alias an `AccountId32` into a local account. AccountId32Aliases, + // Allow governance body to be used as a sovereign account. + HashedDescription>, ); /// Our asset transactor. This is what allows us to interest with the runtime facilities from the diff --git a/polkadot/runtime/polkadot/src/xcm_config.rs b/polkadot/runtime/polkadot/src/xcm_config.rs index c7b15f7256ba..1110c0d8eb95 100644 --- a/polkadot/runtime/polkadot/src/xcm_config.rs +++ b/polkadot/runtime/polkadot/src/xcm_config.rs @@ -40,10 +40,10 @@ use xcm::latest::prelude::*; use xcm_builder::{ AccountId32Aliases, AllowExplicitUnpaidExecutionFrom, AllowKnownQueryResponses, AllowSubscriptionsFrom, AllowTopLevelPaidExecutionFrom, ChildParachainAsNative, - ChildParachainConvertsVia, CurrencyAdapter as XcmCurrencyAdapter, IsConcrete, MintLocation, - OriginToPluralityVoice, SignedAccountId32AsNative, SignedToAccountId32, - SovereignSignedViaLocation, TakeWeightCredit, TrailingSetTopicAsId, UsingComponents, - WeightInfoBounds, WithComputedOrigin, WithUniqueTopic, + ChildParachainConvertsVia, CurrencyAdapter as XcmCurrencyAdapter, DescribeBodyTerminal, + DescribeFamily, HashedDescription, IsConcrete, MintLocation, OriginToPluralityVoice, + SignedAccountId32AsNative, SignedToAccountId32, SovereignSignedViaLocation, TakeWeightCredit, + TrailingSetTopicAsId, UsingComponents, WeightInfoBounds, WithComputedOrigin, WithUniqueTopic, }; parameter_types! { @@ -68,6 +68,8 @@ pub type SovereignAccountOf = ( ChildParachainConvertsVia, // We can directly alias an `AccountId32` into a local account. AccountId32Aliases, + // Allow governance body to be used as a sovereign account. + HashedDescription>, ); /// Our asset transactor. This is what allows us to interact with the runtime assets from the point diff --git a/polkadot/runtime/rococo/src/xcm_config.rs b/polkadot/runtime/rococo/src/xcm_config.rs index 288ee8400dcf..445cb8014357 100644 --- a/polkadot/runtime/rococo/src/xcm_config.rs +++ b/polkadot/runtime/rococo/src/xcm_config.rs @@ -36,10 +36,11 @@ use xcm::latest::prelude::*; use xcm_builder::{ AccountId32Aliases, AllowExplicitUnpaidExecutionFrom, AllowKnownQueryResponses, AllowSubscriptionsFrom, AllowTopLevelPaidExecutionFrom, ChildParachainAsNative, - ChildParachainConvertsVia, CurrencyAdapter as XcmCurrencyAdapter, FixedWeightBounds, - IsChildSystemParachain, IsConcrete, MintLocation, SignedAccountId32AsNative, - SignedToAccountId32, SovereignSignedViaLocation, TakeWeightCredit, TrailingSetTopicAsId, - UsingComponents, WeightInfoBounds, WithComputedOrigin, WithUniqueTopic, + ChildParachainConvertsVia, CurrencyAdapter as XcmCurrencyAdapter, DescribeBodyTerminal, + DescribeFamily, FixedWeightBounds, HashedDescription, IsChildSystemParachain, IsConcrete, + MintLocation, SignedAccountId32AsNative, SignedToAccountId32, SovereignSignedViaLocation, + TakeWeightCredit, TrailingSetTopicAsId, UsingComponents, WeightInfoBounds, WithComputedOrigin, + WithUniqueTopic, }; use xcm_executor::XcmExecutor; @@ -51,8 +52,14 @@ parameter_types! { pub LocalCheckAccount: (AccountId, MintLocation) = (CheckAccount::get(), MintLocation::Local); } -pub type LocationConverter = - (ChildParachainConvertsVia, AccountId32Aliases); +pub type LocationConverter = ( + // We can convert a child parachain using the standard `AccountId` conversion. + ChildParachainConvertsVia, + // We can directly alias an `AccountId32` into a local account. + AccountId32Aliases, + // Allow governance body to be used as a sovereign account. + HashedDescription>, +); /// Our asset transactor. This is what allows us to interest with the runtime facilities from the /// point of view of XCM-only concepts like `MultiLocation` and `MultiAsset`. diff --git a/polkadot/runtime/westend/src/xcm_config.rs b/polkadot/runtime/westend/src/xcm_config.rs index afa0733b4c9a..92dcee150aab 100644 --- a/polkadot/runtime/westend/src/xcm_config.rs +++ b/polkadot/runtime/westend/src/xcm_config.rs @@ -35,10 +35,10 @@ use xcm::latest::prelude::*; use xcm_builder::{ AccountId32Aliases, AllowExplicitUnpaidExecutionFrom, AllowKnownQueryResponses, AllowSubscriptionsFrom, AllowTopLevelPaidExecutionFrom, ChildParachainAsNative, - ChildParachainConvertsVia, CurrencyAdapter as XcmCurrencyAdapter, IsChildSystemParachain, - IsConcrete, MintLocation, SignedAccountId32AsNative, SignedToAccountId32, - SovereignSignedViaLocation, TakeWeightCredit, TrailingSetTopicAsId, UsingComponents, - WeightInfoBounds, WithComputedOrigin, WithUniqueTopic, + ChildParachainConvertsVia, CurrencyAdapter as XcmCurrencyAdapter, DescribeBodyTerminal, + DescribeFamily, HashedDescription, IsChildSystemParachain, IsConcrete, MintLocation, + SignedAccountId32AsNative, SignedToAccountId32, SovereignSignedViaLocation, TakeWeightCredit, + TrailingSetTopicAsId, UsingComponents, WeightInfoBounds, WithComputedOrigin, WithUniqueTopic, }; use xcm_executor::XcmExecutor; @@ -54,8 +54,14 @@ parameter_types! { pub const BaseDeliveryFee: u128 = CENTS.saturating_mul(3); } -pub type LocationConverter = - (ChildParachainConvertsVia, AccountId32Aliases); +pub type LocationConverter = ( + // We can convert a child parachain using the standard `AccountId` conversion. + ChildParachainConvertsVia, + // We can directly alias an `AccountId32` into a local account. + AccountId32Aliases, + // Allow governance body to be used as a sovereign account. + HashedDescription>, +); pub type LocalAssetTransactor = XcmCurrencyAdapter< // Use this currency: diff --git a/polkadot/xcm/xcm-builder/src/lib.rs b/polkadot/xcm/xcm-builder/src/lib.rs index e3f910409638..26f2aadadf3d 100644 --- a/polkadot/xcm/xcm-builder/src/lib.rs +++ b/polkadot/xcm/xcm-builder/src/lib.rs @@ -32,8 +32,8 @@ pub use location_conversion::ForeignChainAliasAccount; pub use location_conversion::{ Account32Hash, AccountId32Aliases, AccountKey20Aliases, AliasesIntoAccountId32, ChildParachainConvertsVia, DescribeAccountId32Terminal, DescribeAccountIdTerminal, - DescribeAccountKey20Terminal, DescribeAllTerminal, DescribeFamily, DescribeLocation, - DescribePalletTerminal, DescribeTerminus, GlobalConsensusConvertsFor, + DescribeAccountKey20Terminal, DescribeAllTerminal, DescribeBodyTerminal, DescribeFamily, + DescribeLocation, DescribePalletTerminal, DescribeTerminus, GlobalConsensusConvertsFor, GlobalConsensusParachainConvertsFor, HashedDescription, ParentIsPreset, SiblingParachainConvertsVia, }; diff --git a/polkadot/xcm/xcm-builder/src/location_conversion.rs b/polkadot/xcm/xcm-builder/src/location_conversion.rs index ec23116e0e82..4a5fbbb123be 100644 --- a/polkadot/xcm/xcm-builder/src/location_conversion.rs +++ b/polkadot/xcm/xcm-builder/src/location_conversion.rs @@ -86,11 +86,22 @@ impl DescribeLocation for DescribeAccountKey20Terminal { pub type DescribeAccountIdTerminal = (DescribeAccountId32Terminal, DescribeAccountKey20Terminal); +pub struct DescribeBodyTerminal; +impl DescribeLocation for DescribeBodyTerminal { + fn describe_location(l: &MultiLocation) -> Option> { + match (l.parents, &l.interior) { + (0, X1(Plurality { id, part })) => Some((b"Body", id, part).encode()), + _ => return None, + } + } +} + pub type DescribeAllTerminal = ( DescribeTerminus, DescribePalletTerminal, DescribeAccountId32Terminal, DescribeAccountKey20Terminal, + DescribeBodyTerminal, ); pub struct DescribeFamily(PhantomData); From 771c3fbde7ba3849298d6fc7df390fc6b5a04d2f Mon Sep 17 00:00:00 2001 From: joe petrowski <25483142+joepetrowski@users.noreply.github.com> Date: Wed, 20 Sep 2023 07:28:05 +0200 Subject: [PATCH 28/69] Disable Calls to Identity Pallet (#1476) This PR filters calls from the Identity pallet from all Relay Chain runtimes as preparation to move the identity state and logic to a system parachain within each network. After this change is deployed to a runtime, no more changes such as adding new sub-identities will be possible. The frozen state will be part of the genesis state of the system chain. After the system chain launches, the pallet and all state will be removed from each Relay Chain. Applications and UIs that render display information from this pallet will need to read from the system chain when it launches. --- polkadot/runtime/kusama/src/lib.rs | 21 +++++++++++-------- polkadot/runtime/polkadot/src/lib.rs | 19 +++++++++++++---- polkadot/runtime/rococo/src/lib.rs | 17 ++++++++------- polkadot/runtime/westend/src/lib.rs | 18 +++++++++++++--- .../xcm/xcm-builder/src/universal_exports.rs | 8 +++++-- 5 files changed, 58 insertions(+), 25 deletions(-) diff --git a/polkadot/runtime/kusama/src/lib.rs b/polkadot/runtime/kusama/src/lib.rs index 5cf2b3b83b1f..8d8bd4baacf8 100644 --- a/polkadot/runtime/kusama/src/lib.rs +++ b/polkadot/runtime/kusama/src/lib.rs @@ -63,9 +63,9 @@ use frame_election_provider_support::{ use frame_support::{ construct_runtime, parameter_types, traits::{ - fungible::HoldConsideration, ConstU32, Contains, EitherOf, EitherOfDiverse, InstanceFilter, - KeyOwnerProofSystem, LinearStoragePrice, PrivilegeCmp, ProcessMessage, ProcessMessageError, - StorageMapShim, WithdrawReasons, + fungible::HoldConsideration, ConstU32, Contains, EitherOf, EitherOfDiverse, EverythingBut, + InstanceFilter, KeyOwnerProofSystem, LinearStoragePrice, PrivilegeCmp, ProcessMessage, + ProcessMessageError, StorageMapShim, WithdrawReasons, }, weights::{ConstantMultiplier, WeightMeter}, PalletId, @@ -158,11 +158,14 @@ pub fn native_version() -> NativeVersion { NativeVersion { runtime_version: VERSION, can_author_with: Default::default() } } -/// We currently allow all calls. -pub struct BaseFilter; -impl Contains for BaseFilter { - fn contains(_c: &RuntimeCall) -> bool { - true +/// A type to identify calls to the Identity pallet. These will be filtered to prevent invocation, +/// locking the state of the pallet and preventing further updates to identities and sub-identities. +/// The locked state will be the genesis state of a new system chain and then removed from the Relay +/// Chain. +pub struct IdentityCalls; +impl Contains for IdentityCalls { + fn contains(c: &RuntimeCall) -> bool { + matches!(c, RuntimeCall::Identity(_)) } } @@ -172,7 +175,7 @@ parameter_types! { } impl frame_system::Config for Runtime { - type BaseCallFilter = frame_support::traits::Everything; + type BaseCallFilter = EverythingBut; type BlockWeights = BlockWeights; type BlockLength = BlockLength; type RuntimeOrigin = RuntimeOrigin; diff --git a/polkadot/runtime/polkadot/src/lib.rs b/polkadot/runtime/polkadot/src/lib.rs index da27771af400..5956b0e155bb 100644 --- a/polkadot/runtime/polkadot/src/lib.rs +++ b/polkadot/runtime/polkadot/src/lib.rs @@ -47,9 +47,9 @@ use frame_election_provider_support::{ use frame_support::{ construct_runtime, parameter_types, traits::{ - fungible::HoldConsideration, ConstU32, Contains, EitherOf, EitherOfDiverse, InstanceFilter, - KeyOwnerProofSystem, LinearStoragePrice, PrivilegeCmp, ProcessMessage, ProcessMessageError, - WithdrawReasons, + fungible::HoldConsideration, ConstU32, Contains, EitherOf, EitherOfDiverse, EverythingBut, + InstanceFilter, KeyOwnerProofSystem, LinearStoragePrice, PrivilegeCmp, ProcessMessage, + ProcessMessageError, WithdrawReasons, }, weights::{ConstantMultiplier, WeightMeter}, PalletId, @@ -149,13 +149,24 @@ pub fn native_version() -> NativeVersion { NativeVersion { runtime_version: VERSION, can_author_with: Default::default() } } +/// A type to identify calls to the Identity pallet. These will be filtered to prevent invocation, +/// locking the state of the pallet and preventing further updates to identities and sub-identities. +/// The locked state will be the genesis state of a new system chain and then removed from the Relay +/// Chain. +pub struct IdentityCalls; +impl Contains for IdentityCalls { + fn contains(c: &RuntimeCall) -> bool { + matches!(c, RuntimeCall::Identity(_)) + } +} + parameter_types! { pub const Version: RuntimeVersion = VERSION; pub const SS58Prefix: u8 = 0; } impl frame_system::Config for Runtime { - type BaseCallFilter = frame_support::traits::Everything; + type BaseCallFilter = EverythingBut; type BlockWeights = BlockWeights; type BlockLength = BlockLength; type RuntimeOrigin = RuntimeOrigin; diff --git a/polkadot/runtime/rococo/src/lib.rs b/polkadot/runtime/rococo/src/lib.rs index 6446ac4f00c1..7046c4640c04 100644 --- a/polkadot/runtime/rococo/src/lib.rs +++ b/polkadot/runtime/rococo/src/lib.rs @@ -60,7 +60,7 @@ use beefy_primitives::{ use frame_support::{ construct_runtime, parameter_types, traits::{ - fungible::HoldConsideration, Contains, EitherOfDiverse, InstanceFilter, + fungible::HoldConsideration, Contains, EitherOfDiverse, EverythingBut, InstanceFilter, KeyOwnerProofSystem, LinearStoragePrice, LockIdentifier, PrivilegeCmp, ProcessMessage, ProcessMessageError, StorageMapShim, WithdrawReasons, }, @@ -135,11 +135,14 @@ pub fn native_version() -> NativeVersion { NativeVersion { runtime_version: VERSION, can_author_with: Default::default() } } -/// We currently allow all calls. -pub struct BaseFilter; -impl Contains for BaseFilter { - fn contains(_call: &RuntimeCall) -> bool { - true +/// A type to identify calls to the Identity pallet. These will be filtered to prevent invocation, +/// locking the state of the pallet and preventing further updates to identities and sub-identities. +/// The locked state will be the genesis state of a new system chain and then removed from the Relay +/// Chain. +pub struct IdentityCalls; +impl Contains for IdentityCalls { + fn contains(c: &RuntimeCall) -> bool { + matches!(c, RuntimeCall::Identity(_)) } } @@ -149,7 +152,7 @@ parameter_types! { } impl frame_system::Config for Runtime { - type BaseCallFilter = BaseFilter; + type BaseCallFilter = EverythingBut; type BlockWeights = BlockWeights; type BlockLength = BlockLength; type DbWeight = RocksDbWeight; diff --git a/polkadot/runtime/westend/src/lib.rs b/polkadot/runtime/westend/src/lib.rs index 5cb6cbbab20e..9af18b5be2bb 100644 --- a/polkadot/runtime/westend/src/lib.rs +++ b/polkadot/runtime/westend/src/lib.rs @@ -29,8 +29,9 @@ use frame_election_provider_support::{bounds::ElectionBoundsBuilder, onchain, Se use frame_support::{ construct_runtime, parameter_types, traits::{ - fungible::HoldConsideration, ConstU32, InstanceFilter, KeyOwnerProofSystem, - LinearStoragePrice, ProcessMessage, ProcessMessageError, WithdrawReasons, + fungible::HoldConsideration, ConstU32, Contains, EverythingBut, InstanceFilter, + KeyOwnerProofSystem, LinearStoragePrice, ProcessMessage, ProcessMessageError, + WithdrawReasons, }, weights::{ConstantMultiplier, WeightMeter}, PalletId, @@ -141,13 +142,24 @@ pub fn native_version() -> NativeVersion { NativeVersion { runtime_version: VERSION, can_author_with: Default::default() } } +/// A type to identify calls to the Identity pallet. These will be filtered to prevent invocation, +/// locking the state of the pallet and preventing further updates to identities and sub-identities. +/// The locked state will be the genesis state of a new system chain and then removed from the Relay +/// Chain. +pub struct IdentityCalls; +impl Contains for IdentityCalls { + fn contains(c: &RuntimeCall) -> bool { + matches!(c, RuntimeCall::Identity(_)) + } +} + parameter_types! { pub const Version: RuntimeVersion = VERSION; pub const SS58Prefix: u8 = 42; } impl frame_system::Config for Runtime { - type BaseCallFilter = frame_support::traits::Everything; + type BaseCallFilter = EverythingBut; type BlockWeights = BlockWeights; type BlockLength = BlockLength; type RuntimeOrigin = RuntimeOrigin; diff --git a/polkadot/xcm/xcm-builder/src/universal_exports.rs b/polkadot/xcm/xcm-builder/src/universal_exports.rs index d1b8c8c721fe..a24631ffa942 100644 --- a/polkadot/xcm/xcm-builder/src/universal_exports.rs +++ b/polkadot/xcm/xcm-builder/src/universal_exports.rs @@ -171,7 +171,9 @@ impl Date: Wed, 20 Sep 2023 15:56:43 +0300 Subject: [PATCH 29/69] Refactor availability-recovery strategies (#1457) Refactors availability-recovery strategies to allow for easily adding new hotpaths and failover mechanisms. The new interface allows for chaining multiple `RecoveryStrategy`-es together, to cleanly express the relationship between them and share state and code where neccessary/possible: This was done in order to aid in implementing new hotpaths like [systematic chunks recovery](https://github.com/paritytech/polkadot-sdk/issues/598) and [fetching from approval checkers](https://github.com/paritytech/polkadot-sdk/issues/575). Thanks to this design, intermediate state can be shared between the strategies. For example, if the systematic chunks recovery retrieved less than the needed amount of chunks, pass them over to the next FetchChunks strategy, which will only need to recover the remaining number of chunks. Draft example of how a systematic chunk recovery strategy would look: https://github.com/paritytech/polkadot-sdk/commit/667d870bdf1470525d66c13929d5eac7249dd995 (notice how easy it was to add and reuse code) Note that this PR doesn't itself add any new strategy, it should fully preserve backwards compatiblity in terms of functionality. Follow-up PRs to add new strategies will come. --- Cargo.lock | 1 + .../network/availability-recovery/Cargo.toml | 1 + .../network/availability-recovery/src/lib.rs | 824 +++-------------- .../network/availability-recovery/src/task.rs | 830 ++++++++++++++++++ .../availability-recovery/src/tests.rs | 51 +- 5 files changed, 953 insertions(+), 754 deletions(-) create mode 100644 polkadot/node/network/availability-recovery/src/task.rs diff --git a/Cargo.lock b/Cargo.lock index 9dfff599300d..ce0d70112042 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -11508,6 +11508,7 @@ name = "polkadot-availability-recovery" version = "1.0.0" dependencies = [ "assert_matches", + "async-trait", "env_logger 0.9.3", "fatality", "futures", diff --git a/polkadot/node/network/availability-recovery/Cargo.toml b/polkadot/node/network/availability-recovery/Cargo.toml index 07ff09c7e70e..42c3abef547b 100644 --- a/polkadot/node/network/availability-recovery/Cargo.toml +++ b/polkadot/node/network/availability-recovery/Cargo.toml @@ -11,6 +11,7 @@ schnellru = "0.2.1" rand = "0.8.5" fatality = "0.0.6" thiserror = "1.0.48" +async-trait = "0.1.73" gum = { package = "tracing-gum", path = "../../gum" } polkadot-erasure-coding = { path = "../../../erasure-coding" } diff --git a/polkadot/node/network/availability-recovery/src/lib.rs b/polkadot/node/network/availability-recovery/src/lib.rs index 99f42f4bf9fe..e2146981da92 100644 --- a/polkadot/node/network/availability-recovery/src/lib.rs +++ b/polkadot/node/network/availability-recovery/src/lib.rs @@ -23,11 +23,10 @@ use std::{ iter::Iterator, num::NonZeroUsize, pin::Pin, - time::Duration, }; use futures::{ - channel::oneshot::{self, channel}, + channel::oneshot, future::{Future, FutureExt, RemoteHandle}, pin_mut, prelude::*, @@ -35,77 +34,55 @@ use futures::{ stream::{FuturesUnordered, StreamExt}, task::{Context, Poll}, }; -use rand::seq::SliceRandom; use schnellru::{ByLength, LruMap}; +use task::{FetchChunks, FetchChunksParams, FetchFull, FetchFullParams}; use fatality::Nested; use polkadot_erasure_coding::{ branch_hash, branches, obtain_chunks_v1, recovery_threshold, Error as ErasureEncodingError, }; -#[cfg(not(test))] -use polkadot_node_network_protocol::request_response::CHUNK_REQUEST_TIMEOUT; +use task::{RecoveryParams, RecoveryStrategy, RecoveryTask}; + use polkadot_node_network_protocol::{ - request_response::{ - self as req_res, outgoing::RequestError, v1 as request_v1, IncomingRequestReceiver, - OutgoingRequest, Recipient, Requests, - }, - IfDisconnected, UnifiedReputationChange as Rep, + request_response::{v1 as request_v1, IncomingRequestReceiver}, + UnifiedReputationChange as Rep, }; use polkadot_node_primitives::{AvailableData, ErasureChunk}; use polkadot_node_subsystem::{ errors::RecoveryError, jaeger, - messages::{AvailabilityRecoveryMessage, AvailabilityStoreMessage, NetworkBridgeTxMessage}, - overseer, ActiveLeavesUpdate, FromOrchestra, OverseerSignal, SpawnedSubsystem, SubsystemError, - SubsystemResult, + messages::{AvailabilityRecoveryMessage, AvailabilityStoreMessage}, + overseer, ActiveLeavesUpdate, FromOrchestra, OverseerSignal, SpawnedSubsystem, + SubsystemContext, SubsystemError, SubsystemResult, }; use polkadot_node_subsystem_util::request_session_info; use polkadot_primitives::{ - AuthorityDiscoveryId, BlakeTwo256, BlockNumber, CandidateHash, CandidateReceipt, GroupIndex, - Hash, HashT, IndexedVec, SessionIndex, SessionInfo, ValidatorId, ValidatorIndex, + BlakeTwo256, BlockNumber, CandidateHash, CandidateReceipt, GroupIndex, Hash, HashT, + SessionIndex, SessionInfo, ValidatorIndex, }; mod error; mod futures_undead; mod metrics; +mod task; use metrics::Metrics; -use futures_undead::FuturesUndead; -use sc_network::{OutboundFailure, RequestFailure}; - #[cfg(test)] mod tests; const LOG_TARGET: &str = "parachain::availability-recovery"; -// How many parallel recovery tasks should be running at once. -const N_PARALLEL: usize = 50; - // Size of the LRU cache where we keep recovered data. const LRU_SIZE: u32 = 16; const COST_INVALID_REQUEST: Rep = Rep::CostMajor("Peer sent unparsable request"); -/// Time after which we consider a request to have failed -/// -/// and we should try more peers. Note in theory the request times out at the network level, -/// measurements have shown, that in practice requests might actually take longer to fail in -/// certain occasions. (The very least, authority discovery is not part of the timeout.) -/// -/// For the time being this value is the same as the timeout on the networking layer, but as this -/// timeout is more soft than the networking one, it might make sense to pick different values as -/// well. -#[cfg(not(test))] -const TIMEOUT_START_NEW_REQUESTS: Duration = CHUNK_REQUEST_TIMEOUT; -#[cfg(test)] -const TIMEOUT_START_NEW_REQUESTS: Duration = Duration::from_millis(100); - /// PoV size limit in bytes for which prefer fetching from backers. const SMALL_POV_LIMIT: usize = 128 * 1024; #[derive(Clone, PartialEq)] /// The strategy we use to recover the PoV. -pub enum RecoveryStrategy { +pub enum RecoveryStrategyKind { /// We always try the backing group first, then fallback to validator chunks. BackersFirstAlways, /// We try the backing group first if PoV size is lower than specified, then fallback to @@ -113,101 +90,25 @@ pub enum RecoveryStrategy { BackersFirstIfSizeLower(usize), /// We always recover using validator chunks. ChunksAlways, - /// Do not request data from the availability store. - /// This is the useful for nodes where the - /// availability-store subsystem is not expected to run, - /// such as collators. - BypassAvailabilityStore, } -impl RecoveryStrategy { - /// Returns true if the strategy needs backing group index. - pub fn needs_backing_group(&self) -> bool { - match self { - RecoveryStrategy::BackersFirstAlways | RecoveryStrategy::BackersFirstIfSizeLower(_) => - true, - _ => false, - } - } - - /// Returns the PoV size limit in bytes for `BackersFirstIfSizeLower` strategy, otherwise - /// `None`. - pub fn pov_size_limit(&self) -> Option { - match *self { - RecoveryStrategy::BackersFirstIfSizeLower(limit) => Some(limit), - _ => None, - } - } -} /// The Availability Recovery Subsystem. pub struct AvailabilityRecoverySubsystem { /// PoV recovery strategy to use. - recovery_strategy: RecoveryStrategy, + recovery_strategy_kind: RecoveryStrategyKind, + // If this is true, do not request data from the availability store. + /// This is the useful for nodes where the + /// availability-store subsystem is not expected to run, + /// such as collators. + bypass_availability_store: bool, /// Receiver for available data requests. req_receiver: IncomingRequestReceiver, /// Metrics for this subsystem. metrics: Metrics, } -struct RequestFromBackers { - // a random shuffling of the validators from the backing group which indicates the order - // in which we connect to them and request the chunk. - shuffled_backers: Vec, - // channel to the erasure task handler. - erasure_task_tx: futures::channel::mpsc::Sender, -} - -struct RequestChunksFromValidators { - /// How many request have been unsuccessful so far. - error_count: usize, - /// Total number of responses that have been received. - /// - /// including failed ones. - total_received_responses: usize, - /// a random shuffling of the validators which indicates the order in which we connect to the - /// validators and request the chunk from them. - shuffling: VecDeque, - /// Chunks received so far. - received_chunks: HashMap, - /// Pending chunk requests with soft timeout. - requesting_chunks: FuturesUndead, (ValidatorIndex, RequestError)>>, - // channel to the erasure task handler. - erasure_task_tx: futures::channel::mpsc::Sender, -} - -struct RecoveryParams { - /// Discovery ids of `validators`. - validator_authority_keys: Vec, - - /// Validators relevant to this `RecoveryTask`. - validators: IndexedVec, - - /// The number of pieces needed. - threshold: usize, - - /// A hash of the relevant candidate. - candidate_hash: CandidateHash, - - /// The root of the erasure encoding of the para block. - erasure_root: Hash, - - /// Metrics to report - metrics: Metrics, - - /// Do not request data from availability-store - bypass_availability_store: bool, -} - -/// Source the availability data either by means -/// of direct request response protocol to -/// backers (a.k.a. fast-path), or recover from chunks. -enum Source { - RequestFromBackers(RequestFromBackers), - RequestChunks(RequestChunksFromValidators), -} - /// Expensive erasure coding computations that we want to run on a blocking thread. -enum ErasureTask { +pub enum ErasureTask { /// Reconstructs `AvailableData` from chunks given `n_validators`. Reconstruct( usize, @@ -219,486 +120,6 @@ enum ErasureTask { Reencode(usize, Hash, AvailableData, oneshot::Sender>), } -/// A stateful reconstruction of availability data in reference to -/// a candidate hash. -struct RecoveryTask { - sender: Sender, - - /// The parameters of the recovery process. - params: RecoveryParams, - - /// The source to obtain the availability data from. - source: Source, - - // channel to the erasure task handler. - erasure_task_tx: futures::channel::mpsc::Sender, -} - -impl RequestFromBackers { - fn new( - mut backers: Vec, - erasure_task_tx: futures::channel::mpsc::Sender, - ) -> Self { - backers.shuffle(&mut rand::thread_rng()); - - RequestFromBackers { shuffled_backers: backers, erasure_task_tx } - } - - // Run this phase to completion. - async fn run( - &mut self, - params: &RecoveryParams, - sender: &mut impl overseer::AvailabilityRecoverySenderTrait, - ) -> Result { - gum::trace!( - target: LOG_TARGET, - candidate_hash = ?params.candidate_hash, - erasure_root = ?params.erasure_root, - "Requesting from backers", - ); - loop { - // Pop the next backer, and proceed to next phase if we're out. - let validator_index = - self.shuffled_backers.pop().ok_or_else(|| RecoveryError::Unavailable)?; - - // Request data. - let (req, response) = OutgoingRequest::new( - Recipient::Authority( - params.validator_authority_keys[validator_index.0 as usize].clone(), - ), - req_res::v1::AvailableDataFetchingRequest { candidate_hash: params.candidate_hash }, - ); - - sender - .send_message(NetworkBridgeTxMessage::SendRequests( - vec![Requests::AvailableDataFetchingV1(req)], - IfDisconnected::ImmediateError, - )) - .await; - - match response.await { - Ok(req_res::v1::AvailableDataFetchingResponse::AvailableData(data)) => { - let (reencode_tx, reencode_rx) = channel(); - self.erasure_task_tx - .send(ErasureTask::Reencode( - params.validators.len(), - params.erasure_root, - data, - reencode_tx, - )) - .await - .map_err(|_| RecoveryError::ChannelClosed)?; - - let reencode_response = - reencode_rx.await.map_err(|_| RecoveryError::ChannelClosed)?; - - if let Some(data) = reencode_response { - gum::trace!( - target: LOG_TARGET, - candidate_hash = ?params.candidate_hash, - "Received full data", - ); - - return Ok(data) - } else { - gum::debug!( - target: LOG_TARGET, - candidate_hash = ?params.candidate_hash, - ?validator_index, - "Invalid data response", - ); - - // it doesn't help to report the peer with req/res. - } - }, - Ok(req_res::v1::AvailableDataFetchingResponse::NoSuchData) => {}, - Err(e) => gum::debug!( - target: LOG_TARGET, - candidate_hash = ?params.candidate_hash, - ?validator_index, - err = ?e, - "Error fetching full available data." - ), - } - } - } -} - -impl RequestChunksFromValidators { - fn new( - n_validators: u32, - erasure_task_tx: futures::channel::mpsc::Sender, - ) -> Self { - let mut shuffling: Vec<_> = (0..n_validators).map(ValidatorIndex).collect(); - shuffling.shuffle(&mut rand::thread_rng()); - - RequestChunksFromValidators { - error_count: 0, - total_received_responses: 0, - shuffling: shuffling.into(), - received_chunks: HashMap::new(), - requesting_chunks: FuturesUndead::new(), - erasure_task_tx, - } - } - - fn is_unavailable(&self, params: &RecoveryParams) -> bool { - is_unavailable( - self.chunk_count(), - self.requesting_chunks.total_len(), - self.shuffling.len(), - params.threshold, - ) - } - - fn chunk_count(&self) -> usize { - self.received_chunks.len() - } - - fn insert_chunk(&mut self, validator_index: ValidatorIndex, chunk: ErasureChunk) { - self.received_chunks.insert(validator_index, chunk); - } - - fn can_conclude(&self, params: &RecoveryParams) -> bool { - self.chunk_count() >= params.threshold || self.is_unavailable(params) - } - - /// Desired number of parallel requests. - /// - /// For the given threshold (total required number of chunks) get the desired number of - /// requests we want to have running in parallel at this time. - fn get_desired_request_count(&self, threshold: usize) -> usize { - // Upper bound for parallel requests. - // We want to limit this, so requests can be processed within the timeout and we limit the - // following feedback loop: - // 1. Requests fail due to timeout - // 2. We request more chunks to make up for it - // 3. Bandwidth is spread out even more, so we get even more timeouts - // 4. We request more chunks to make up for it ... - let max_requests_boundary = std::cmp::min(N_PARALLEL, threshold); - // How many chunks are still needed? - let remaining_chunks = threshold.saturating_sub(self.chunk_count()); - // What is the current error rate, so we can make up for it? - let inv_error_rate = - self.total_received_responses.checked_div(self.error_count).unwrap_or(0); - // Actual number of requests we want to have in flight in parallel: - std::cmp::min( - max_requests_boundary, - remaining_chunks + remaining_chunks.checked_div(inv_error_rate).unwrap_or(0), - ) - } - - async fn launch_parallel_requests( - &mut self, - params: &RecoveryParams, - sender: &mut Sender, - ) where - Sender: overseer::AvailabilityRecoverySenderTrait, - { - let num_requests = self.get_desired_request_count(params.threshold); - let candidate_hash = ¶ms.candidate_hash; - let already_requesting_count = self.requesting_chunks.len(); - - gum::debug!( - target: LOG_TARGET, - ?candidate_hash, - ?num_requests, - error_count= ?self.error_count, - total_received = ?self.total_received_responses, - threshold = ?params.threshold, - ?already_requesting_count, - "Requesting availability chunks for a candidate", - ); - let mut requests = Vec::with_capacity(num_requests - already_requesting_count); - - while self.requesting_chunks.len() < num_requests { - if let Some(validator_index) = self.shuffling.pop_back() { - let validator = params.validator_authority_keys[validator_index.0 as usize].clone(); - gum::trace!( - target: LOG_TARGET, - ?validator, - ?validator_index, - ?candidate_hash, - "Requesting chunk", - ); - - // Request data. - let raw_request = req_res::v1::ChunkFetchingRequest { - candidate_hash: params.candidate_hash, - index: validator_index, - }; - - let (req, res) = OutgoingRequest::new(Recipient::Authority(validator), raw_request); - requests.push(Requests::ChunkFetchingV1(req)); - - params.metrics.on_chunk_request_issued(); - let timer = params.metrics.time_chunk_request(); - - self.requesting_chunks.push(Box::pin(async move { - let _timer = timer; - match res.await { - Ok(req_res::v1::ChunkFetchingResponse::Chunk(chunk)) => - Ok(Some(chunk.recombine_into_chunk(&raw_request))), - Ok(req_res::v1::ChunkFetchingResponse::NoSuchChunk) => Ok(None), - Err(e) => Err((validator_index, e)), - } - })); - } else { - break - } - } - - sender - .send_message(NetworkBridgeTxMessage::SendRequests( - requests, - IfDisconnected::TryConnect, - )) - .await; - } - - /// Wait for a sufficient amount of chunks to reconstruct according to the provided `params`. - async fn wait_for_chunks(&mut self, params: &RecoveryParams) { - let metrics = ¶ms.metrics; - - // Wait for all current requests to conclude or time-out, or until we reach enough chunks. - // We also declare requests undead, once `TIMEOUT_START_NEW_REQUESTS` is reached and will - // return in that case for `launch_parallel_requests` to fill up slots again. - while let Some(request_result) = - self.requesting_chunks.next_with_timeout(TIMEOUT_START_NEW_REQUESTS).await - { - self.total_received_responses += 1; - - match request_result { - Ok(Some(chunk)) => - if is_chunk_valid(params, &chunk) { - metrics.on_chunk_request_succeeded(); - gum::trace!( - target: LOG_TARGET, - candidate_hash = ?params.candidate_hash, - validator_index = ?chunk.index, - "Received valid chunk", - ); - self.insert_chunk(chunk.index, chunk); - } else { - metrics.on_chunk_request_invalid(); - self.error_count += 1; - }, - Ok(None) => { - metrics.on_chunk_request_no_such_chunk(); - self.error_count += 1; - }, - Err((validator_index, e)) => { - self.error_count += 1; - - gum::trace!( - target: LOG_TARGET, - candidate_hash= ?params.candidate_hash, - err = ?e, - ?validator_index, - "Failure requesting chunk", - ); - - match e { - RequestError::InvalidResponse(_) => { - metrics.on_chunk_request_invalid(); - - gum::debug!( - target: LOG_TARGET, - candidate_hash = ?params.candidate_hash, - err = ?e, - ?validator_index, - "Chunk fetching response was invalid", - ); - }, - RequestError::NetworkError(err) => { - // No debug logs on general network errors - that became very spammy - // occasionally. - if let RequestFailure::Network(OutboundFailure::Timeout) = err { - metrics.on_chunk_request_timeout(); - } else { - metrics.on_chunk_request_error(); - } - - self.shuffling.push_front(validator_index); - }, - RequestError::Canceled(_) => { - metrics.on_chunk_request_error(); - - self.shuffling.push_front(validator_index); - }, - } - }, - } - - // Stop waiting for requests when we either can already recover the data - // or have gotten firm 'No' responses from enough validators. - if self.can_conclude(params) { - gum::debug!( - target: LOG_TARGET, - candidate_hash = ?params.candidate_hash, - received_chunks_count = ?self.chunk_count(), - requested_chunks_count = ?self.requesting_chunks.len(), - threshold = ?params.threshold, - "Can conclude availability for a candidate", - ); - break - } - } - } - - async fn run( - &mut self, - params: &RecoveryParams, - sender: &mut Sender, - ) -> Result - where - Sender: overseer::AvailabilityRecoverySenderTrait, - { - let metrics = ¶ms.metrics; - - // First query the store for any chunks we've got. - if !params.bypass_availability_store { - let (tx, rx) = oneshot::channel(); - sender - .send_message(AvailabilityStoreMessage::QueryAllChunks(params.candidate_hash, tx)) - .await; - - match rx.await { - Ok(chunks) => { - // This should either be length 1 or 0. If we had the whole data, - // we wouldn't have reached this stage. - let chunk_indices: Vec<_> = chunks.iter().map(|c| c.index).collect(); - self.shuffling.retain(|i| !chunk_indices.contains(i)); - - for chunk in chunks { - if is_chunk_valid(params, &chunk) { - gum::trace!( - target: LOG_TARGET, - candidate_hash = ?params.candidate_hash, - validator_index = ?chunk.index, - "Found valid chunk on disk" - ); - self.insert_chunk(chunk.index, chunk); - } else { - gum::error!( - target: LOG_TARGET, - "Loaded invalid chunk from disk! Disk/Db corruption _very_ likely - please fix ASAP!" - ); - }; - } - }, - Err(oneshot::Canceled) => { - gum::warn!( - target: LOG_TARGET, - candidate_hash = ?params.candidate_hash, - "Failed to reach the availability store" - ); - }, - } - } - - let _recovery_timer = metrics.time_full_recovery(); - - loop { - if self.is_unavailable(¶ms) { - gum::debug!( - target: LOG_TARGET, - candidate_hash = ?params.candidate_hash, - erasure_root = ?params.erasure_root, - received = %self.chunk_count(), - requesting = %self.requesting_chunks.len(), - total_requesting = %self.requesting_chunks.total_len(), - n_validators = %params.validators.len(), - "Data recovery is not possible", - ); - - metrics.on_recovery_failed(); - - return Err(RecoveryError::Unavailable) - } - - self.launch_parallel_requests(params, sender).await; - self.wait_for_chunks(params).await; - - // If received_chunks has more than threshold entries, attempt to recover the data. - // If that fails, or a re-encoding of it doesn't match the expected erasure root, - // return Err(RecoveryError::Invalid) - if self.chunk_count() >= params.threshold { - let recovery_duration = metrics.time_erasure_recovery(); - - // Send request to reconstruct available data from chunks. - let (avilable_data_tx, available_data_rx) = channel(); - self.erasure_task_tx - .send(ErasureTask::Reconstruct( - params.validators.len(), - std::mem::take(&mut self.received_chunks), - avilable_data_tx, - )) - .await - .map_err(|_| RecoveryError::ChannelClosed)?; - - let available_data_response = - available_data_rx.await.map_err(|_| RecoveryError::ChannelClosed)?; - - return match available_data_response { - Ok(data) => { - // Send request to re-encode the chunks and check merkle root. - let (reencode_tx, reencode_rx) = channel(); - self.erasure_task_tx - .send(ErasureTask::Reencode( - params.validators.len(), - params.erasure_root, - data, - reencode_tx, - )) - .await - .map_err(|_| RecoveryError::ChannelClosed)?; - - let reencode_response = - reencode_rx.await.map_err(|_| RecoveryError::ChannelClosed)?; - - if let Some(data) = reencode_response { - gum::trace!( - target: LOG_TARGET, - candidate_hash = ?params.candidate_hash, - erasure_root = ?params.erasure_root, - "Data recovery complete", - ); - metrics.on_recovery_succeeded(); - - Ok(data) - } else { - recovery_duration.map(|rd| rd.stop_and_discard()); - gum::trace!( - target: LOG_TARGET, - candidate_hash = ?params.candidate_hash, - erasure_root = ?params.erasure_root, - "Data recovery - root mismatch", - ); - metrics.on_recovery_invalid(); - - Err(RecoveryError::Invalid) - } - }, - Err(err) => { - recovery_duration.map(|rd| rd.stop_and_discard()); - gum::trace!( - target: LOG_TARGET, - candidate_hash = ?params.candidate_hash, - erasure_root = ?params.erasure_root, - ?err, - "Data recovery error ", - ); - metrics.on_recovery_invalid(); - - Err(RecoveryError::Invalid) - }, - } - } - } - } -} - const fn is_unavailable( received_chunks: usize, requesting_chunks: usize, @@ -777,60 +198,6 @@ fn reconstructed_data_matches_root( branches.root() == *expected_root } -impl RecoveryTask -where - Sender: overseer::AvailabilityRecoverySenderTrait, -{ - async fn run(mut self) -> Result { - // First just see if we have the data available locally. - if !self.params.bypass_availability_store { - let (tx, rx) = oneshot::channel(); - self.sender - .send_message(AvailabilityStoreMessage::QueryAvailableData( - self.params.candidate_hash, - tx, - )) - .await; - - match rx.await { - Ok(Some(data)) => return Ok(data), - Ok(None) => {}, - Err(oneshot::Canceled) => { - gum::warn!( - target: LOG_TARGET, - candidate_hash = ?self.params.candidate_hash, - "Failed to reach the availability store", - ) - }, - } - } - - self.params.metrics.on_recovery_started(); - - loop { - // These only fail if we cannot reach the underlying subsystem, which case there is - // nothing meaningful we can do. - match self.source { - Source::RequestFromBackers(ref mut from_backers) => { - match from_backers.run(&self.params, &mut self.sender).await { - Ok(data) => break Ok(data), - Err(RecoveryError::Invalid) => break Err(RecoveryError::Invalid), - Err(RecoveryError::ChannelClosed) => - break Err(RecoveryError::ChannelClosed), - Err(RecoveryError::Unavailable) => - self.source = Source::RequestChunks(RequestChunksFromValidators::new( - self.params.validators.len() as _, - self.erasure_task_tx.clone(), - )), - } - }, - Source::RequestChunks(ref mut from_all) => - break from_all.run(&self.params, &mut self.sender).await, - } - } - } -} - /// Accumulate all awaiting sides for some particular `AvailableData`. struct RecoveryHandle { candidate_hash: CandidateHash, @@ -973,65 +340,23 @@ async fn launch_recovery_task( ctx: &mut Context, session_info: SessionInfo, receipt: CandidateReceipt, - mut backing_group: Option, response_sender: oneshot::Sender>, metrics: &Metrics, - recovery_strategy: &RecoveryStrategy, - erasure_task_tx: futures::channel::mpsc::Sender, + recovery_strategies: VecDeque::Sender>>>, + bypass_availability_store: bool, ) -> error::Result<()> { let candidate_hash = receipt.hash(); let params = RecoveryParams { validator_authority_keys: session_info.discovery_keys.clone(), - validators: session_info.validators.clone(), + n_validators: session_info.validators.len(), threshold: recovery_threshold(session_info.validators.len())?, candidate_hash, erasure_root: receipt.descriptor.erasure_root, metrics: metrics.clone(), - bypass_availability_store: recovery_strategy == &RecoveryStrategy::BypassAvailabilityStore, + bypass_availability_store, }; - if let Some(small_pov_limit) = recovery_strategy.pov_size_limit() { - // Get our own chunk size to get an estimate of the PoV size. - let chunk_size: Result, error::Error> = - query_chunk_size(ctx, candidate_hash).await; - if let Ok(Some(chunk_size)) = chunk_size { - let pov_size_estimate = chunk_size.saturating_mul(session_info.validators.len()) / 3; - let prefer_backing_group = pov_size_estimate < small_pov_limit; - - gum::trace!( - target: LOG_TARGET, - ?candidate_hash, - pov_size_estimate, - small_pov_limit, - enabled = prefer_backing_group, - "Prefer fetch from backing group", - ); - - backing_group = backing_group.filter(|_| { - // We keep the backing group only if `1/3` of chunks sum up to less than - // `small_pov_limit`. - prefer_backing_group - }); - } - } - - let phase = backing_group - .and_then(|g| session_info.validator_groups.get(g)) - .map(|group| { - Source::RequestFromBackers(RequestFromBackers::new( - group.clone(), - erasure_task_tx.clone(), - )) - }) - .unwrap_or_else(|| { - Source::RequestChunks(RequestChunksFromValidators::new( - params.validators.len() as _, - erasure_task_tx.clone(), - )) - }); - - let recovery_task = - RecoveryTask { sender: ctx.sender().clone(), params, source: phase, erasure_task_tx }; + let recovery_task = RecoveryTask::new(ctx.sender().clone(), params, recovery_strategies); let (remote, remote_handle) = recovery_task.run().remote_handle(); @@ -1062,8 +387,9 @@ async fn handle_recover( backing_group: Option, response_sender: oneshot::Sender>, metrics: &Metrics, - recovery_strategy: &RecoveryStrategy, erasure_task_tx: futures::channel::mpsc::Sender, + recovery_strategy_kind: RecoveryStrategyKind, + bypass_availability_store: bool, ) -> error::Result<()> { let candidate_hash = receipt.hash(); @@ -1098,19 +424,71 @@ async fn handle_recover( let _span = span.child("session-info-ctx-received"); match session_info { - Some(session_info) => + Some(session_info) => { + let mut recovery_strategies: VecDeque< + Box::Sender>>, + > = VecDeque::with_capacity(2); + + if let Some(backing_group) = backing_group { + if let Some(backing_validators) = session_info.validator_groups.get(backing_group) { + let mut small_pov_size = true; + + if let RecoveryStrategyKind::BackersFirstIfSizeLower(small_pov_limit) = + recovery_strategy_kind + { + // Get our own chunk size to get an estimate of the PoV size. + let chunk_size: Result, error::Error> = + query_chunk_size(ctx, candidate_hash).await; + if let Ok(Some(chunk_size)) = chunk_size { + let pov_size_estimate = + chunk_size.saturating_mul(session_info.validators.len()) / 3; + small_pov_size = pov_size_estimate < small_pov_limit; + + gum::trace!( + target: LOG_TARGET, + ?candidate_hash, + pov_size_estimate, + small_pov_limit, + enabled = small_pov_size, + "Prefer fetch from backing group", + ); + } else { + // we have a POV limit but were not able to query the chunk size, so + // don't use the backing group. + small_pov_size = false; + } + }; + + match (&recovery_strategy_kind, small_pov_size) { + (RecoveryStrategyKind::BackersFirstAlways, _) | + (RecoveryStrategyKind::BackersFirstIfSizeLower(_), true) => recovery_strategies.push_back( + Box::new(FetchFull::new(FetchFullParams { + validators: backing_validators.to_vec(), + erasure_task_tx: erasure_task_tx.clone(), + })), + ), + _ => {}, + }; + } + } + + recovery_strategies.push_back(Box::new(FetchChunks::new(FetchChunksParams { + n_validators: session_info.validators.len(), + erasure_task_tx, + }))); + launch_recovery_task( state, ctx, session_info, receipt, - backing_group, response_sender, metrics, - recovery_strategy, - erasure_task_tx, + recovery_strategies, + bypass_availability_store, ) - .await, + .await + }, None => { gum::warn!(target: LOG_TARGET, "SessionInfo is `None` at {:?}", state.live_block); response_sender @@ -1155,7 +533,12 @@ impl AvailabilityRecoverySubsystem { req_receiver: IncomingRequestReceiver, metrics: Metrics, ) -> Self { - Self { recovery_strategy: RecoveryStrategy::BypassAvailabilityStore, req_receiver, metrics } + Self { + recovery_strategy_kind: RecoveryStrategyKind::BackersFirstIfSizeLower(SMALL_POV_LIMIT), + bypass_availability_store: true, + req_receiver, + metrics, + } } /// Create a new instance of `AvailabilityRecoverySubsystem` which starts with a fast path to @@ -1164,7 +547,12 @@ impl AvailabilityRecoverySubsystem { req_receiver: IncomingRequestReceiver, metrics: Metrics, ) -> Self { - Self { recovery_strategy: RecoveryStrategy::BackersFirstAlways, req_receiver, metrics } + Self { + recovery_strategy_kind: RecoveryStrategyKind::BackersFirstAlways, + bypass_availability_store: false, + req_receiver, + metrics, + } } /// Create a new instance of `AvailabilityRecoverySubsystem` which requests only chunks @@ -1172,7 +560,12 @@ impl AvailabilityRecoverySubsystem { req_receiver: IncomingRequestReceiver, metrics: Metrics, ) -> Self { - Self { recovery_strategy: RecoveryStrategy::ChunksAlways, req_receiver, metrics } + Self { + recovery_strategy_kind: RecoveryStrategyKind::ChunksAlways, + bypass_availability_store: false, + req_receiver, + metrics, + } } /// Create a new instance of `AvailabilityRecoverySubsystem` which requests chunks if PoV is @@ -1182,7 +575,8 @@ impl AvailabilityRecoverySubsystem { metrics: Metrics, ) -> Self { Self { - recovery_strategy: RecoveryStrategy::BackersFirstIfSizeLower(SMALL_POV_LIMIT), + recovery_strategy_kind: RecoveryStrategyKind::BackersFirstIfSizeLower(SMALL_POV_LIMIT), + bypass_availability_store: false, req_receiver, metrics, } @@ -1190,7 +584,8 @@ impl AvailabilityRecoverySubsystem { async fn run(self, mut ctx: Context) -> SubsystemResult<()> { let mut state = State::default(); - let Self { recovery_strategy, mut req_receiver, metrics } = self; + let Self { mut req_receiver, metrics, recovery_strategy_kind, bypass_availability_store } = + self; let (erasure_task_tx, erasure_task_rx) = futures::channel::mpsc::channel(16); let mut erasure_task_rx = erasure_task_rx.fuse(); @@ -1275,11 +670,12 @@ impl AvailabilityRecoverySubsystem { &mut ctx, receipt, session_index, - maybe_backing_group.filter(|_| recovery_strategy.needs_backing_group()), + maybe_backing_group, response_sender, &metrics, - &recovery_strategy, erasure_task_tx.clone(), + recovery_strategy_kind.clone(), + bypass_availability_store ).await { gum::warn!( target: LOG_TARGET, @@ -1295,7 +691,7 @@ impl AvailabilityRecoverySubsystem { in_req = recv_req => { match in_req.into_nested().map_err(|fatal| SubsystemError::with_origin("availability-recovery", fatal))? { Ok(req) => { - if recovery_strategy == RecoveryStrategy::BypassAvailabilityStore { + if bypass_availability_store { gum::debug!( target: LOG_TARGET, "Skipping request to availability-store.", diff --git a/polkadot/node/network/availability-recovery/src/task.rs b/polkadot/node/network/availability-recovery/src/task.rs new file mode 100644 index 000000000000..d5bc2da84944 --- /dev/null +++ b/polkadot/node/network/availability-recovery/src/task.rs @@ -0,0 +1,830 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Recovery task and associated strategies. + +#![warn(missing_docs)] + +use crate::{ + futures_undead::FuturesUndead, is_chunk_valid, is_unavailable, metrics::Metrics, ErasureTask, + LOG_TARGET, +}; +use futures::{channel::oneshot, SinkExt}; +#[cfg(not(test))] +use polkadot_node_network_protocol::request_response::CHUNK_REQUEST_TIMEOUT; +use polkadot_node_network_protocol::request_response::{ + self as req_res, outgoing::RequestError, OutgoingRequest, Recipient, Requests, +}; +use polkadot_node_primitives::{AvailableData, ErasureChunk}; +use polkadot_node_subsystem::{ + messages::{AvailabilityStoreMessage, NetworkBridgeTxMessage}, + overseer, RecoveryError, +}; +use polkadot_primitives::{AuthorityDiscoveryId, CandidateHash, Hash, ValidatorIndex}; +use rand::seq::SliceRandom; +use sc_network::{IfDisconnected, OutboundFailure, RequestFailure}; +use std::{ + collections::{HashMap, VecDeque}, + time::Duration, +}; + +// How many parallel recovery tasks should be running at once. +const N_PARALLEL: usize = 50; + +/// Time after which we consider a request to have failed +/// +/// and we should try more peers. Note in theory the request times out at the network level, +/// measurements have shown, that in practice requests might actually take longer to fail in +/// certain occasions. (The very least, authority discovery is not part of the timeout.) +/// +/// For the time being this value is the same as the timeout on the networking layer, but as this +/// timeout is more soft than the networking one, it might make sense to pick different values as +/// well. +#[cfg(not(test))] +const TIMEOUT_START_NEW_REQUESTS: Duration = CHUNK_REQUEST_TIMEOUT; +#[cfg(test)] +const TIMEOUT_START_NEW_REQUESTS: Duration = Duration::from_millis(100); + +#[async_trait::async_trait] +/// Common trait for runnable recovery strategies. +pub trait RecoveryStrategy: Send { + /// Main entry point of the strategy. + async fn run( + &mut self, + state: &mut State, + sender: &mut Sender, + common_params: &RecoveryParams, + ) -> Result; + + /// Return the name of the strategy for logging purposes. + fn display_name(&self) -> &'static str; +} + +/// Recovery parameters common to all strategies in a `RecoveryTask`. +pub struct RecoveryParams { + /// Discovery ids of `validators`. + pub validator_authority_keys: Vec, + + /// Number of validators. + pub n_validators: usize, + + /// The number of chunks needed. + pub threshold: usize, + + /// A hash of the relevant candidate. + pub candidate_hash: CandidateHash, + + /// The root of the erasure encoding of the candidate. + pub erasure_root: Hash, + + /// Metrics to report. + pub metrics: Metrics, + + /// Do not request data from availability-store. Useful for collators. + pub bypass_availability_store: bool, +} + +/// Intermediate/common data that must be passed between `RecoveryStrategy`s belonging to the +/// same `RecoveryTask`. +pub struct State { + /// Chunks received so far. + received_chunks: HashMap, +} + +impl State { + fn new() -> Self { + Self { received_chunks: HashMap::new() } + } + + fn insert_chunk(&mut self, validator: ValidatorIndex, chunk: ErasureChunk) { + self.received_chunks.insert(validator, chunk); + } + + fn chunk_count(&self) -> usize { + self.received_chunks.len() + } + + /// Retrieve the local chunks held in the av-store (either 0 or 1). + async fn populate_from_av_store( + &mut self, + params: &RecoveryParams, + sender: &mut Sender, + ) -> Vec { + let (tx, rx) = oneshot::channel(); + sender + .send_message(AvailabilityStoreMessage::QueryAllChunks(params.candidate_hash, tx)) + .await; + + match rx.await { + Ok(chunks) => { + // This should either be length 1 or 0. If we had the whole data, + // we wouldn't have reached this stage. + let chunk_indices: Vec<_> = chunks.iter().map(|c| c.index).collect(); + + for chunk in chunks { + if is_chunk_valid(params, &chunk) { + gum::trace!( + target: LOG_TARGET, + candidate_hash = ?params.candidate_hash, + validator_index = ?chunk.index, + "Found valid chunk on disk" + ); + self.insert_chunk(chunk.index, chunk); + } else { + gum::error!( + target: LOG_TARGET, + "Loaded invalid chunk from disk! Disk/Db corruption _very_ likely - please fix ASAP!" + ); + }; + } + + chunk_indices + }, + Err(oneshot::Canceled) => { + gum::warn!( + target: LOG_TARGET, + candidate_hash = ?params.candidate_hash, + "Failed to reach the availability store" + ); + + vec![] + }, + } + } + + /// Launch chunk requests in parallel, according to the parameters. + async fn launch_parallel_chunk_requests( + &mut self, + params: &RecoveryParams, + sender: &mut Sender, + desired_requests_count: usize, + validators: &mut VecDeque, + requesting_chunks: &mut FuturesUndead< + Result, (ValidatorIndex, RequestError)>, + >, + ) where + Sender: overseer::AvailabilityRecoverySenderTrait, + { + let candidate_hash = ¶ms.candidate_hash; + let already_requesting_count = requesting_chunks.len(); + + let mut requests = Vec::with_capacity(desired_requests_count - already_requesting_count); + + while requesting_chunks.len() < desired_requests_count { + if let Some(validator_index) = validators.pop_back() { + let validator = params.validator_authority_keys[validator_index.0 as usize].clone(); + gum::trace!( + target: LOG_TARGET, + ?validator, + ?validator_index, + ?candidate_hash, + "Requesting chunk", + ); + + // Request data. + let raw_request = req_res::v1::ChunkFetchingRequest { + candidate_hash: params.candidate_hash, + index: validator_index, + }; + + let (req, res) = OutgoingRequest::new(Recipient::Authority(validator), raw_request); + requests.push(Requests::ChunkFetchingV1(req)); + + params.metrics.on_chunk_request_issued(); + let timer = params.metrics.time_chunk_request(); + + requesting_chunks.push(Box::pin(async move { + let _timer = timer; + match res.await { + Ok(req_res::v1::ChunkFetchingResponse::Chunk(chunk)) => + Ok(Some(chunk.recombine_into_chunk(&raw_request))), + Ok(req_res::v1::ChunkFetchingResponse::NoSuchChunk) => Ok(None), + Err(e) => Err((validator_index, e)), + } + })); + } else { + break + } + } + + sender + .send_message(NetworkBridgeTxMessage::SendRequests( + requests, + IfDisconnected::TryConnect, + )) + .await; + } + + /// Wait for a sufficient amount of chunks to reconstruct according to the provided `params`. + async fn wait_for_chunks( + &mut self, + params: &RecoveryParams, + validators: &mut VecDeque, + requesting_chunks: &mut FuturesUndead< + Result, (ValidatorIndex, RequestError)>, + >, + can_conclude: impl Fn(usize, usize, usize, &RecoveryParams, usize) -> bool, + ) -> (usize, usize) { + let metrics = ¶ms.metrics; + + let mut total_received_responses = 0; + let mut error_count = 0; + + // Wait for all current requests to conclude or time-out, or until we reach enough chunks. + // We also declare requests undead, once `TIMEOUT_START_NEW_REQUESTS` is reached and will + // return in that case for `launch_parallel_requests` to fill up slots again. + while let Some(request_result) = + requesting_chunks.next_with_timeout(TIMEOUT_START_NEW_REQUESTS).await + { + total_received_responses += 1; + + match request_result { + Ok(Some(chunk)) => + if is_chunk_valid(params, &chunk) { + metrics.on_chunk_request_succeeded(); + gum::trace!( + target: LOG_TARGET, + candidate_hash = ?params.candidate_hash, + validator_index = ?chunk.index, + "Received valid chunk", + ); + self.insert_chunk(chunk.index, chunk); + } else { + metrics.on_chunk_request_invalid(); + error_count += 1; + }, + Ok(None) => { + metrics.on_chunk_request_no_such_chunk(); + error_count += 1; + }, + Err((validator_index, e)) => { + error_count += 1; + + gum::trace!( + target: LOG_TARGET, + candidate_hash= ?params.candidate_hash, + err = ?e, + ?validator_index, + "Failure requesting chunk", + ); + + match e { + RequestError::InvalidResponse(_) => { + metrics.on_chunk_request_invalid(); + + gum::debug!( + target: LOG_TARGET, + candidate_hash = ?params.candidate_hash, + err = ?e, + ?validator_index, + "Chunk fetching response was invalid", + ); + }, + RequestError::NetworkError(err) => { + // No debug logs on general network errors - that became very spammy + // occasionally. + if let RequestFailure::Network(OutboundFailure::Timeout) = err { + metrics.on_chunk_request_timeout(); + } else { + metrics.on_chunk_request_error(); + } + + validators.push_front(validator_index); + }, + RequestError::Canceled(_) => { + metrics.on_chunk_request_error(); + + validators.push_front(validator_index); + }, + } + }, + } + + // Stop waiting for requests when we either can already recover the data + // or have gotten firm 'No' responses from enough validators. + if can_conclude( + validators.len(), + requesting_chunks.total_len(), + self.chunk_count(), + params, + error_count, + ) { + gum::debug!( + target: LOG_TARGET, + candidate_hash = ?params.candidate_hash, + received_chunks_count = ?self.chunk_count(), + requested_chunks_count = ?requesting_chunks.len(), + threshold = ?params.threshold, + "Can conclude availability for a candidate", + ); + break + } + } + + (total_received_responses, error_count) + } +} + +/// A stateful reconstruction of availability data in reference to +/// a candidate hash. +pub struct RecoveryTask { + sender: Sender, + params: RecoveryParams, + strategies: VecDeque>>, + state: State, +} + +impl RecoveryTask +where + Sender: overseer::AvailabilityRecoverySenderTrait, +{ + /// Instantiate a new recovery task. + pub fn new( + sender: Sender, + params: RecoveryParams, + strategies: VecDeque>>, + ) -> Self { + Self { sender, params, strategies, state: State::new() } + } + + async fn in_availability_store(&mut self) -> Option { + if !self.params.bypass_availability_store { + let (tx, rx) = oneshot::channel(); + self.sender + .send_message(AvailabilityStoreMessage::QueryAvailableData( + self.params.candidate_hash, + tx, + )) + .await; + + match rx.await { + Ok(Some(data)) => return Some(data), + Ok(None) => {}, + Err(oneshot::Canceled) => { + gum::warn!( + target: LOG_TARGET, + candidate_hash = ?self.params.candidate_hash, + "Failed to reach the availability store", + ) + }, + } + } + + None + } + + /// Run this recovery task to completion. It will loop through the configured strategies + /// in-order and return whenever the first one recovers the full `AvailableData`. + pub async fn run(mut self) -> Result { + if let Some(data) = self.in_availability_store().await { + return Ok(data) + } + + self.params.metrics.on_recovery_started(); + + let _timer = self.params.metrics.time_full_recovery(); + + while let Some(mut current_strategy) = self.strategies.pop_front() { + gum::debug!( + target: LOG_TARGET, + candidate_hash = ?self.params.candidate_hash, + "Starting `{}` strategy", + current_strategy.display_name(), + ); + + let res = current_strategy.run(&mut self.state, &mut self.sender, &self.params).await; + + match res { + Err(RecoveryError::Unavailable) => + if self.strategies.front().is_some() { + gum::debug!( + target: LOG_TARGET, + candidate_hash = ?self.params.candidate_hash, + "Recovery strategy `{}` did not conclude. Trying the next one.", + current_strategy.display_name(), + ); + continue + }, + Err(err) => { + match &err { + RecoveryError::Invalid => self.params.metrics.on_recovery_invalid(), + _ => self.params.metrics.on_recovery_failed(), + } + return Err(err) + }, + Ok(data) => { + self.params.metrics.on_recovery_succeeded(); + return Ok(data) + }, + } + } + + // We have no other strategies to try. + gum::warn!( + target: LOG_TARGET, + candidate_hash = ?self.params.candidate_hash, + "Recovery of available data failed.", + ); + self.params.metrics.on_recovery_failed(); + + Err(RecoveryError::Unavailable) + } +} + +/// `RecoveryStrategy` that sequentially tries to fetch the full `AvailableData` from +/// already-connected validators in the configured validator set. +pub struct FetchFull { + params: FetchFullParams, +} + +pub struct FetchFullParams { + /// Validators that will be used for fetching the data. + pub validators: Vec, + /// Channel to the erasure task handler. + pub erasure_task_tx: futures::channel::mpsc::Sender, +} + +impl FetchFull { + /// Create a new `FetchFull` recovery strategy. + pub fn new(mut params: FetchFullParams) -> Self { + params.validators.shuffle(&mut rand::thread_rng()); + Self { params } + } +} + +#[async_trait::async_trait] +impl RecoveryStrategy for FetchFull { + fn display_name(&self) -> &'static str { + "Full recovery from backers" + } + + async fn run( + &mut self, + _: &mut State, + sender: &mut Sender, + common_params: &RecoveryParams, + ) -> Result { + loop { + // Pop the next validator, and proceed to next fetch_chunks_task if we're out. + let validator_index = + self.params.validators.pop().ok_or_else(|| RecoveryError::Unavailable)?; + + // Request data. + let (req, response) = OutgoingRequest::new( + Recipient::Authority( + common_params.validator_authority_keys[validator_index.0 as usize].clone(), + ), + req_res::v1::AvailableDataFetchingRequest { + candidate_hash: common_params.candidate_hash, + }, + ); + + sender + .send_message(NetworkBridgeTxMessage::SendRequests( + vec![Requests::AvailableDataFetchingV1(req)], + IfDisconnected::ImmediateError, + )) + .await; + + match response.await { + Ok(req_res::v1::AvailableDataFetchingResponse::AvailableData(data)) => { + let (reencode_tx, reencode_rx) = oneshot::channel(); + self.params + .erasure_task_tx + .send(ErasureTask::Reencode( + common_params.n_validators, + common_params.erasure_root, + data, + reencode_tx, + )) + .await + .map_err(|_| RecoveryError::ChannelClosed)?; + + let reencode_response = + reencode_rx.await.map_err(|_| RecoveryError::ChannelClosed)?; + + if let Some(data) = reencode_response { + gum::trace!( + target: LOG_TARGET, + candidate_hash = ?common_params.candidate_hash, + "Received full data", + ); + + return Ok(data) + } else { + gum::debug!( + target: LOG_TARGET, + candidate_hash = ?common_params.candidate_hash, + ?validator_index, + "Invalid data response", + ); + + // it doesn't help to report the peer with req/res. + } + }, + Ok(req_res::v1::AvailableDataFetchingResponse::NoSuchData) => {}, + Err(e) => gum::debug!( + target: LOG_TARGET, + candidate_hash = ?common_params.candidate_hash, + ?validator_index, + err = ?e, + "Error fetching full available data." + ), + } + } + } +} + +/// `RecoveryStrategy` that requests chunks from validators, in parallel. +pub struct FetchChunks { + /// How many requests have been unsuccessful so far. + error_count: usize, + /// Total number of responses that have been received, including failed ones. + total_received_responses: usize, + /// Collection of in-flight requests. + requesting_chunks: FuturesUndead, (ValidatorIndex, RequestError)>>, + /// A random shuffling of the validators which indicates the order in which we connect to the + /// validators and request the chunk from them. + validators: VecDeque, + /// Channel to the erasure task handler. + erasure_task_tx: futures::channel::mpsc::Sender, +} + +/// Parameters specific to the `FetchChunks` strategy. +pub struct FetchChunksParams { + /// Total number of validators. + pub n_validators: usize, + /// Channel to the erasure task handler. + pub erasure_task_tx: futures::channel::mpsc::Sender, +} + +impl FetchChunks { + /// Instantiate a new strategy. + pub fn new(params: FetchChunksParams) -> Self { + let mut shuffling: Vec<_> = (0..params.n_validators) + .map(|i| ValidatorIndex(i.try_into().expect("number of validators must fit in a u32"))) + .collect(); + shuffling.shuffle(&mut rand::thread_rng()); + + Self { + error_count: 0, + total_received_responses: 0, + requesting_chunks: FuturesUndead::new(), + validators: shuffling.into(), + erasure_task_tx: params.erasure_task_tx, + } + } + + fn is_unavailable( + unrequested_validators: usize, + in_flight_requests: usize, + chunk_count: usize, + threshold: usize, + ) -> bool { + is_unavailable(chunk_count, in_flight_requests, unrequested_validators, threshold) + } + + /// Desired number of parallel requests. + /// + /// For the given threshold (total required number of chunks) get the desired number of + /// requests we want to have running in parallel at this time. + fn get_desired_request_count(&self, chunk_count: usize, threshold: usize) -> usize { + // Upper bound for parallel requests. + // We want to limit this, so requests can be processed within the timeout and we limit the + // following feedback loop: + // 1. Requests fail due to timeout + // 2. We request more chunks to make up for it + // 3. Bandwidth is spread out even more, so we get even more timeouts + // 4. We request more chunks to make up for it ... + let max_requests_boundary = std::cmp::min(N_PARALLEL, threshold); + // How many chunks are still needed? + let remaining_chunks = threshold.saturating_sub(chunk_count); + // What is the current error rate, so we can make up for it? + let inv_error_rate = + self.total_received_responses.checked_div(self.error_count).unwrap_or(0); + // Actual number of requests we want to have in flight in parallel: + std::cmp::min( + max_requests_boundary, + remaining_chunks + remaining_chunks.checked_div(inv_error_rate).unwrap_or(0), + ) + } + + async fn attempt_recovery( + &mut self, + state: &mut State, + common_params: &RecoveryParams, + ) -> Result { + let recovery_duration = common_params.metrics.time_erasure_recovery(); + + // Send request to reconstruct available data from chunks. + let (avilable_data_tx, available_data_rx) = oneshot::channel(); + self.erasure_task_tx + .send(ErasureTask::Reconstruct( + common_params.n_validators, + // Safe to leave an empty vec in place, as we're stopping the recovery process if + // this reconstruct fails. + std::mem::take(&mut state.received_chunks), + avilable_data_tx, + )) + .await + .map_err(|_| RecoveryError::ChannelClosed)?; + + let available_data_response = + available_data_rx.await.map_err(|_| RecoveryError::ChannelClosed)?; + + match available_data_response { + Ok(data) => { + // Send request to re-encode the chunks and check merkle root. + let (reencode_tx, reencode_rx) = oneshot::channel(); + self.erasure_task_tx + .send(ErasureTask::Reencode( + common_params.n_validators, + common_params.erasure_root, + data, + reencode_tx, + )) + .await + .map_err(|_| RecoveryError::ChannelClosed)?; + + let reencode_response = + reencode_rx.await.map_err(|_| RecoveryError::ChannelClosed)?; + + if let Some(data) = reencode_response { + gum::trace!( + target: LOG_TARGET, + candidate_hash = ?common_params.candidate_hash, + erasure_root = ?common_params.erasure_root, + "Data recovery from chunks complete", + ); + + Ok(data) + } else { + recovery_duration.map(|rd| rd.stop_and_discard()); + gum::trace!( + target: LOG_TARGET, + candidate_hash = ?common_params.candidate_hash, + erasure_root = ?common_params.erasure_root, + "Data recovery error - root mismatch", + ); + + Err(RecoveryError::Invalid) + } + }, + Err(err) => { + recovery_duration.map(|rd| rd.stop_and_discard()); + gum::trace!( + target: LOG_TARGET, + candidate_hash = ?common_params.candidate_hash, + erasure_root = ?common_params.erasure_root, + ?err, + "Data recovery error ", + ); + + Err(RecoveryError::Invalid) + }, + } + } +} + +#[async_trait::async_trait] +impl RecoveryStrategy for FetchChunks { + fn display_name(&self) -> &'static str { + "Fetch chunks" + } + + async fn run( + &mut self, + state: &mut State, + sender: &mut Sender, + common_params: &RecoveryParams, + ) -> Result { + // First query the store for any chunks we've got. + if !common_params.bypass_availability_store { + let local_chunk_indices = state.populate_from_av_store(common_params, sender).await; + self.validators.retain(|i| !local_chunk_indices.contains(i)); + } + + // No need to query the validators that have the chunks we already received. + self.validators.retain(|i| !state.received_chunks.contains_key(i)); + + loop { + // If received_chunks has more than threshold entries, attempt to recover the data. + // If that fails, or a re-encoding of it doesn't match the expected erasure root, + // return Err(RecoveryError::Invalid). + // Do this before requesting any chunks because we may have enough of them coming from + // past RecoveryStrategies. + if state.chunk_count() >= common_params.threshold { + return self.attempt_recovery(state, common_params).await + } + + if Self::is_unavailable( + self.validators.len(), + self.requesting_chunks.total_len(), + state.chunk_count(), + common_params.threshold, + ) { + gum::debug!( + target: LOG_TARGET, + candidate_hash = ?common_params.candidate_hash, + erasure_root = ?common_params.erasure_root, + received = %state.chunk_count(), + requesting = %self.requesting_chunks.len(), + total_requesting = %self.requesting_chunks.total_len(), + n_validators = %common_params.n_validators, + "Data recovery from chunks is not possible", + ); + + return Err(RecoveryError::Unavailable) + } + + let desired_requests_count = + self.get_desired_request_count(state.chunk_count(), common_params.threshold); + let already_requesting_count = self.requesting_chunks.len(); + gum::debug!( + target: LOG_TARGET, + ?common_params.candidate_hash, + ?desired_requests_count, + error_count= ?self.error_count, + total_received = ?self.total_received_responses, + threshold = ?common_params.threshold, + ?already_requesting_count, + "Requesting availability chunks for a candidate", + ); + state + .launch_parallel_chunk_requests( + common_params, + sender, + desired_requests_count, + &mut self.validators, + &mut self.requesting_chunks, + ) + .await; + + let (total_responses, error_count) = state + .wait_for_chunks( + common_params, + &mut self.validators, + &mut self.requesting_chunks, + |unrequested_validators, reqs, chunk_count, params, _error_count| { + chunk_count >= params.threshold || + Self::is_unavailable( + unrequested_validators, + reqs, + chunk_count, + params.threshold, + ) + }, + ) + .await; + + self.total_received_responses += total_responses; + self.error_count += error_count; + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use polkadot_erasure_coding::recovery_threshold; + + #[test] + fn parallel_request_calculation_works_as_expected() { + let num_validators = 100; + let threshold = recovery_threshold(num_validators).unwrap(); + let (erasure_task_tx, _erasure_task_rx) = futures::channel::mpsc::channel(16); + + let mut fetch_chunks_task = + FetchChunks::new(FetchChunksParams { n_validators: 100, erasure_task_tx }); + assert_eq!(fetch_chunks_task.get_desired_request_count(0, threshold), threshold); + fetch_chunks_task.error_count = 1; + fetch_chunks_task.total_received_responses = 1; + // We saturate at threshold (34): + assert_eq!(fetch_chunks_task.get_desired_request_count(0, threshold), threshold); + + fetch_chunks_task.total_received_responses = 2; + // With given error rate - still saturating: + assert_eq!(fetch_chunks_task.get_desired_request_count(1, threshold), threshold); + fetch_chunks_task.total_received_responses += 8; + // error rate: 1/10 + // remaining chunks needed: threshold (34) - 9 + // expected: 24 * (1+ 1/10) = (next greater integer) = 27 + assert_eq!(fetch_chunks_task.get_desired_request_count(9, threshold), 27); + fetch_chunks_task.error_count = 0; + // With error count zero - we should fetch exactly as needed: + assert_eq!(fetch_chunks_task.get_desired_request_count(10, threshold), threshold - 10); + } +} diff --git a/polkadot/node/network/availability-recovery/src/tests.rs b/polkadot/node/network/availability-recovery/src/tests.rs index 60c2d38ab31b..63ccf0e94f91 100644 --- a/polkadot/node/network/availability-recovery/src/tests.rs +++ b/polkadot/node/network/availability-recovery/src/tests.rs @@ -21,15 +21,19 @@ use futures::{executor, future}; use futures_timer::Delay; use parity_scale_codec::Encode; -use polkadot_node_network_protocol::request_response::{IncomingRequest, ReqProtocolNames}; +use polkadot_node_network_protocol::request_response::{ + self as req_res, IncomingRequest, Recipient, ReqProtocolNames, Requests, +}; use super::*; -use sc_network::config::RequestResponseConfig; +use sc_network::{config::RequestResponseConfig, IfDisconnected, OutboundFailure, RequestFailure}; use polkadot_erasure_coding::{branches, obtain_chunks_v1 as obtain_chunks}; use polkadot_node_primitives::{BlockData, PoV, Proof}; -use polkadot_node_subsystem::messages::{AllMessages, RuntimeApiMessage, RuntimeApiRequest}; +use polkadot_node_subsystem::messages::{ + AllMessages, NetworkBridgeTxMessage, RuntimeApiMessage, RuntimeApiRequest, +}; use polkadot_node_subsystem_test_helpers::{ make_subsystem_context, mock::new_leaf, TestSubsystemContextHandle, }; @@ -204,7 +208,7 @@ use sp_keyring::Sr25519Keyring; enum Has { No, Yes, - NetworkError(sc_network::RequestFailure), + NetworkError(RequestFailure), /// Make request not return at all, instead the sender is returned from the function. /// /// Note, if you use `DoesNotReturn` you have to keep the returned senders alive, otherwise the @@ -214,7 +218,7 @@ enum Has { impl Has { fn timeout() -> Self { - Has::NetworkError(sc_network::RequestFailure::Network(sc_network::OutboundFailure::Timeout)) + Has::NetworkError(RequestFailure::Network(OutboundFailure::Timeout)) } } @@ -393,7 +397,7 @@ impl TestState { candidate_hash: CandidateHash, virtual_overseer: &mut VirtualOverseer, who_has: impl Fn(usize) -> Has, - ) -> Vec, sc_network::RequestFailure>>> { + ) -> Vec, RequestFailure>>> { let mut senders = Vec::new(); for _ in 0..self.validators.len() { // Receive a request for a chunk. @@ -1010,7 +1014,7 @@ fn recovers_from_only_chunks_if_pov_large() { AvailabilityRecoveryMessage::RecoverAvailableData( new_candidate.clone(), test_state.session_index, - None, + Some(GroupIndex(0)), tx, ), ) @@ -1546,36 +1550,3 @@ fn invalid_local_chunk_is_ignored() { (virtual_overseer, req_cfg) }); } - -#[test] -fn parallel_request_calculation_works_as_expected() { - let num_validators = 100; - let threshold = recovery_threshold(num_validators).unwrap(); - let (erasure_task_tx, _erasure_task_rx) = futures::channel::mpsc::channel(16); - - let mut phase = RequestChunksFromValidators::new(100, erasure_task_tx); - assert_eq!(phase.get_desired_request_count(threshold), threshold); - phase.error_count = 1; - phase.total_received_responses = 1; - // We saturate at threshold (34): - assert_eq!(phase.get_desired_request_count(threshold), threshold); - - let dummy_chunk = - ErasureChunk { chunk: Vec::new(), index: ValidatorIndex(0), proof: Proof::dummy_proof() }; - phase.insert_chunk(ValidatorIndex(0), dummy_chunk.clone()); - phase.total_received_responses = 2; - // With given error rate - still saturating: - assert_eq!(phase.get_desired_request_count(threshold), threshold); - for i in 1..9 { - phase.insert_chunk(ValidatorIndex(i), dummy_chunk.clone()); - } - phase.total_received_responses += 8; - // error rate: 1/10 - // remaining chunks needed: threshold (34) - 9 - // expected: 24 * (1+ 1/10) = (next greater integer) = 27 - assert_eq!(phase.get_desired_request_count(threshold), 27); - phase.insert_chunk(ValidatorIndex(9), dummy_chunk.clone()); - phase.error_count = 0; - // With error count zero - we should fetch exactly as needed: - assert_eq!(phase.get_desired_request_count(threshold), threshold - phase.chunk_count()); -} From e7d29bc349881d6f62bbe7079a2f7429649cbffd Mon Sep 17 00:00:00 2001 From: Gavin Wood Date: Wed, 20 Sep 2023 13:56:57 +0100 Subject: [PATCH 30/69] XCM: Deprecate old functions (#1645) Two old functions should be deprecated and have their code altered in line with capabilities of XCMv2 and above. This means we can use the proper `Unlimited` form of weight limit rather than guessing weight from the local chain. --- polkadot/xcm/pallet-xcm/src/lib.rs | 48 ++++++---------------------- polkadot/xcm/pallet-xcm/src/tests.rs | 4 +-- 2 files changed, 12 insertions(+), 40 deletions(-) diff --git a/polkadot/xcm/pallet-xcm/src/lib.rs b/polkadot/xcm/pallet-xcm/src/lib.rs index 9dfb63c2c9c6..5acd093d3bca 100644 --- a/polkadot/xcm/pallet-xcm/src/lib.rs +++ b/polkadot/xcm/pallet-xcm/src/lib.rs @@ -787,6 +787,8 @@ pub mod pallet { /// Teleport some assets from the local chain to some destination chain. /// + /// **This function is deprecated: Use `limited_teleport_assets` instead.** + /// /// Fee payment on the destination side is made from the asset in the `assets` vector of /// index `fee_asset_item`. The weight limit for fees is not provided and thus is unlimited, /// with all fees taken as needed from the asset. @@ -830,12 +832,14 @@ pub mod pallet { assets: Box, fee_asset_item: u32, ) -> DispatchResult { - Self::do_teleport_assets(origin, dest, beneficiary, assets, fee_asset_item, None) + Self::do_teleport_assets(origin, dest, beneficiary, assets, fee_asset_item, Unlimited) } /// Transfer some assets from the local chain to the sovereign account of a destination /// chain and forward a notification XCM. /// + /// **This function is deprecated: Use `limited_reserve_transfer_assets` instead.** + /// /// Fee payment on the destination side is made from the asset in the `assets` vector of /// index `fee_asset_item`. The weight limit for fees is not provided and thus is unlimited, /// with all fees taken as needed from the asset. @@ -879,7 +883,7 @@ pub mod pallet { beneficiary, assets, fee_asset_item, - None, + Unlimited, ) } @@ -1055,7 +1059,7 @@ pub mod pallet { beneficiary, assets, fee_asset_item, - Some(weight_limit), + weight_limit, ) } @@ -1109,7 +1113,7 @@ pub mod pallet { beneficiary, assets, fee_asset_item, - Some(weight_limit), + weight_limit, ) } @@ -1197,7 +1201,7 @@ impl Pallet { beneficiary: Box, assets: Box, fee_asset_item: u32, - maybe_weight_limit: Option, + weight_limit: WeightLimit, ) -> DispatchResult { let origin_location = T::ExecuteXcmOrigin::ensure_origin(origin)?; let dest = (*dest).try_into().map_err(|()| Error::::BadVersion)?; @@ -1218,22 +1222,6 @@ impl Pallet { .map_err(|_| Error::::CannotReanchor)?; let max_assets = assets.len() as u32; let assets: MultiAssets = assets.into(); - let weight_limit = match maybe_weight_limit { - Some(weight_limit) => weight_limit, - None => { - let fees = fees.clone(); - let mut remote_message = Xcm(vec![ - ReserveAssetDeposited(assets.clone()), - ClearOrigin, - BuyExecution { fees, weight_limit: Limited(Weight::zero()) }, - DepositAsset { assets: Wild(AllCounted(max_assets)), beneficiary }, - ]); - // use local weight for remote message and hope for the best. - let remote_weight = T::Weigher::weight(&mut remote_message) - .map_err(|()| Error::::UnweighableMessage)?; - Limited(remote_weight) - }, - }; let xcm = Xcm(vec![ BuyExecution { fees, weight_limit }, DepositAsset { assets: Wild(AllCounted(max_assets)), beneficiary }, @@ -1257,7 +1245,7 @@ impl Pallet { beneficiary: Box, assets: Box, fee_asset_item: u32, - maybe_weight_limit: Option, + weight_limit: WeightLimit, ) -> DispatchResult { let origin_location = T::ExecuteXcmOrigin::ensure_origin(origin)?; let dest = (*dest).try_into().map_err(|()| Error::::BadVersion)?; @@ -1278,22 +1266,6 @@ impl Pallet { .map_err(|_| Error::::CannotReanchor)?; let max_assets = assets.len() as u32; let assets: MultiAssets = assets.into(); - let weight_limit = match maybe_weight_limit { - Some(weight_limit) => weight_limit, - None => { - let fees = fees.clone(); - let mut remote_message = Xcm(vec![ - ReceiveTeleportedAsset(assets.clone()), - ClearOrigin, - BuyExecution { fees, weight_limit: Limited(Weight::zero()) }, - DepositAsset { assets: Wild(AllCounted(max_assets)), beneficiary }, - ]); - // use local weight for remote message and hope for the best. - let remote_weight = T::Weigher::weight(&mut remote_message) - .map_err(|()| Error::::UnweighableMessage)?; - Limited(remote_weight) - }, - }; let xcm = Xcm(vec![ BuyExecution { fees, weight_limit }, DepositAsset { assets: Wild(AllCounted(max_assets)), beneficiary }, diff --git a/polkadot/xcm/pallet-xcm/src/tests.rs b/polkadot/xcm/pallet-xcm/src/tests.rs index 486887598c78..5d8aee8d665f 100644 --- a/polkadot/xcm/pallet-xcm/src/tests.rs +++ b/polkadot/xcm/pallet-xcm/src/tests.rs @@ -375,7 +375,7 @@ fn teleport_assets_works() { Xcm(vec![ ReceiveTeleportedAsset((Here, SEND_AMOUNT).into()), ClearOrigin, - buy_limited_execution((Here, SEND_AMOUNT), Weight::from_parts(4000, 4000)), + buy_execution((Here, SEND_AMOUNT)), DepositAsset { assets: AllCounted(1).into(), beneficiary: dest }, ]), )] @@ -508,7 +508,7 @@ fn reserve_transfer_assets_works() { Xcm(vec![ ReserveAssetDeposited((Parent, SEND_AMOUNT).into()), ClearOrigin, - buy_limited_execution((Parent, SEND_AMOUNT), Weight::from_parts(4000, 4000)), + buy_execution((Parent, SEND_AMOUNT)), DepositAsset { assets: AllCounted(1).into(), beneficiary: dest }, ]), )] From 1517eb8dd47722830c56ab1498806707851f2540 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 20 Sep 2023 15:56:40 +0200 Subject: [PATCH 31/69] Bump the known_good_semver group with 2 updates (#1620) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [//]: # (dependabot-start) ⚠️ **Dependabot is rebasing this PR** ⚠️ Rebasing might not happen immediately, so don't worry if this takes some time. Note: if you make any changes to this PR yourself, they will take precedence over the rebase. --- [//]: # (dependabot-end) Bumps the known_good_semver group with 2 updates: [clap](https://github.com/clap-rs/clap) and [syn](https://github.com/dtolnay/syn). Updates `clap` from 4.4.3 to 4.4.4
Release notes

Sourced from clap's releases.

v4.4.4

[4.4.4] - 2023-09-18

Internal

  • Update terminal_size to 0.3
Changelog

Sourced from clap's changelog.

[4.4.4] - 2023-09-18

Internal

  • Update terminal_size to 0.3
Commits

Updates `syn` from 2.0.36 to 2.0.37
Release notes

Sourced from syn's releases.

2.0.37

  • Work around incorrect future compatibility warning in rustc 1.74.0-nightly
Commits
  • 9681088 Release 2.0.37
  • fbe3bc2 Work around unknown_lints warning on rustc older than 1.64
  • 75cf912 Ignore more repr_transparent_external_private_fields
  • 299c782 Ignore false repr_transparent_external_private_fields FCW in generated code
  • See full diff in compare view

Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore major version` will close this group update PR and stop Dependabot creating any more for the specific dependency's major version (unless you unignore this specific dependency's major version or upgrade to it yourself) - `@dependabot ignore minor version` will close this group update PR and stop Dependabot creating any more for the specific dependency's minor version (unless you unignore this specific dependency's minor version or upgrade to it yourself) - `@dependabot ignore ` will close this group update PR and stop Dependabot creating any more for the specific dependency (unless you unignore this specific dependency or upgrade to it yourself) - `@dependabot unignore ` will remove all of the ignore conditions of the specified dependency - `@dependabot unignore ` will remove the ignore condition of the specified dependency and ignore conditions
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 162 +++++++++--------- cumulus/client/cli/Cargo.toml | 2 +- .../parachain-system/proc-macro/Cargo.toml | 2 +- cumulus/parachain-template/node/Cargo.toml | 2 +- cumulus/polkadot-parachain/Cargo.toml | 2 +- cumulus/test/service/Cargo.toml | 2 +- polkadot/cli/Cargo.toml | 2 +- polkadot/node/gum/proc-macro/Cargo.toml | 2 +- polkadot/node/malus/Cargo.toml | 2 +- .../test-parachains/adder/collator/Cargo.toml | 2 +- .../undying/collator/Cargo.toml | 2 +- polkadot/utils/generate-bags/Cargo.toml | 2 +- .../remote-ext-tests/bags-list/Cargo.toml | 2 +- polkadot/xcm/procedural/Cargo.toml | 2 +- substrate/bin/node-template/node/Cargo.toml | 2 +- substrate/bin/node/bench/Cargo.toml | 2 +- substrate/bin/node/cli/Cargo.toml | 4 +- substrate/bin/node/inspect/Cargo.toml | 2 +- .../bin/utils/chain-spec-builder/Cargo.toml | 2 +- substrate/bin/utils/subkey/Cargo.toml | 2 +- substrate/client/chain-spec/derive/Cargo.toml | 2 +- substrate/client/cli/Cargo.toml | 2 +- substrate/client/storage-monitor/Cargo.toml | 2 +- .../client/tracing/proc-macro/Cargo.toml | 2 +- .../frame/contracts/proc-macro/Cargo.toml | 2 +- .../solution-type/Cargo.toml | 2 +- .../solution-type/fuzzer/Cargo.toml | 2 +- .../frame/staking/reward-curve/Cargo.toml | 2 +- substrate/frame/support/procedural/Cargo.toml | 2 +- .../frame/support/procedural/tools/Cargo.toml | 2 +- .../procedural/tools/derive/Cargo.toml | 2 +- .../primitives/api/proc-macro/Cargo.toml | 2 +- .../core/hashing/proc-macro/Cargo.toml | 2 +- substrate/primitives/debug-derive/Cargo.toml | 2 +- .../npos-elections/fuzzer/Cargo.toml | 2 +- .../runtime-interface/proc-macro/Cargo.toml | 2 +- .../primitives/version/proc-macro/Cargo.toml | 2 +- .../ci/node-template-release/Cargo.toml | 2 +- .../utils/frame/benchmarking-cli/Cargo.toml | 2 +- .../frame/frame-utilities-cli/Cargo.toml | 2 +- .../generate-bags/node-runtime/Cargo.toml | 2 +- .../utils/frame/try-runtime/cli/Cargo.toml | 2 +- 42 files changed, 123 insertions(+), 123 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ce0d70112042..2ed92d3240bf 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1138,7 +1138,7 @@ checksum = "0e97ce7de6cf12de5d7226c73f5ba9811622f4db3a5b91b55c53e987e5f91cba" dependencies = [ "proc-macro2", "quote", - "syn 2.0.36", + "syn 2.0.37", ] [[package]] @@ -1160,7 +1160,7 @@ checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" dependencies = [ "proc-macro2", "quote", - "syn 2.0.36", + "syn 2.0.37", ] [[package]] @@ -1177,7 +1177,7 @@ checksum = "bc00ceb34980c03614e35a3a4e218276a0a824e911d07651cd0d858a51e8c0f0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.36", + "syn 2.0.37", ] [[package]] @@ -1351,7 +1351,7 @@ dependencies = [ "regex", "rustc-hash", "shlex", - "syn 2.0.36", + "syn 2.0.37", ] [[package]] @@ -2333,7 +2333,7 @@ name = "chain-spec-builder" version = "2.0.0" dependencies = [ "ansi_term", - "clap 4.4.3", + "clap 4.4.4", "node-cli", "rand 0.8.5", "sc-chain-spec", @@ -2463,9 +2463,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.4.3" +version = "4.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "84ed82781cea27b43c9b106a979fe450a13a31aab0500595fb3fc06616de08e6" +checksum = "b1d7b8d5ec32af0fadc644bf1fd509a688c2103b185644bb1e29d164e0703136" dependencies = [ "clap_builder", "clap_derive 4.4.2", @@ -2473,9 +2473,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.4.2" +version = "4.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2bb9faaa7c2ef94b2743a21f5a29e6f0010dff4caa69ac8e9d6cf8b6fa74da08" +checksum = "5179bb514e4d7c2051749d8fcefa2ed6d06a9f4e6d69faf3805f5d80b8cf8d56" dependencies = [ "anstream", "anstyle", @@ -2489,7 +2489,7 @@ version = "4.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "586a385f7ef2f8b4d86bddaa0c094794e7ccbfe5ffef1f434fe928143fc783a5" dependencies = [ - "clap 4.4.3", + "clap 4.4.4", ] [[package]] @@ -2514,7 +2514,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.36", + "syn 2.0.37", ] [[package]] @@ -3085,7 +3085,7 @@ dependencies = [ "anes", "cast", "ciborium", - "clap 4.4.3", + "clap 4.4.4", "criterion-plot", "futures", "is-terminal", @@ -3250,7 +3250,7 @@ dependencies = [ name = "cumulus-client-cli" version = "0.1.0" dependencies = [ - "clap 4.4.3", + "clap 4.4.4", "parity-scale-codec", "sc-chain-spec", "sc-cli", @@ -3574,7 +3574,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.36", + "syn 2.0.37", ] [[package]] @@ -3935,7 +3935,7 @@ name = "cumulus-test-service" version = "0.1.0" dependencies = [ "async-trait", - "clap 4.4.3", + "clap 4.4.4", "criterion 0.5.1", "cumulus-client-cli", "cumulus-client-consensus-common", @@ -4057,7 +4057,7 @@ checksum = "83fdaf97f4804dcebfa5862639bc9ce4121e82140bec2a987ac5140294865b5b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.36", + "syn 2.0.37", ] [[package]] @@ -4097,7 +4097,7 @@ dependencies = [ "proc-macro2", "quote", "scratch", - "syn 2.0.36", + "syn 2.0.37", ] [[package]] @@ -4114,7 +4114,7 @@ checksum = "50c49547d73ba8dcfd4ad7325d64c6d5391ff4224d498fc39a6f3f49825a530d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.36", + "syn 2.0.37", ] [[package]] @@ -4412,7 +4412,7 @@ checksum = "487585f4d0c6655fe74905e2504d8ad6908e4db67f744eb140876906c2f3175d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.36", + "syn 2.0.37", ] [[package]] @@ -4474,7 +4474,7 @@ dependencies = [ "proc-macro2", "quote", "regex", - "syn 2.0.36", + "syn 2.0.37", "termcolor", "toml 0.7.6", "walkdir", @@ -4695,7 +4695,7 @@ checksum = "5e9a1f9f7d83e59740248a6e14ecf93929ade55027844dfcea78beafccc15745" dependencies = [ "proc-macro2", "quote", - "syn 2.0.36", + "syn 2.0.37", ] [[package]] @@ -4706,7 +4706,7 @@ checksum = "c2ad8cef1d801a4686bfd8919f0b30eac4c8e48968c437a6405ded4fb5272d2b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.36", + "syn 2.0.37", ] [[package]] @@ -4851,7 +4851,7 @@ dependencies = [ "fs-err", "proc-macro2", "quote", - "syn 2.0.36", + "syn 2.0.37", ] [[package]] @@ -5141,7 +5141,7 @@ dependencies = [ "Inflector", "array-bytes", "chrono", - "clap 4.4.3", + "clap 4.4.4", "comfy-table", "frame-benchmarking", "frame-support", @@ -5207,7 +5207,7 @@ dependencies = [ "quote", "scale-info", "sp-arithmetic", - "syn 2.0.36", + "syn 2.0.37", "trybuild", ] @@ -5233,7 +5233,7 @@ dependencies = [ name = "frame-election-solution-type-fuzzer" version = "2.0.0-alpha.5" dependencies = [ - "clap 4.4.3", + "clap 4.4.4", "frame-election-provider-solution-type", "frame-election-provider-support", "frame-support", @@ -5359,7 +5359,7 @@ dependencies = [ "proc-macro-warning", "proc-macro2", "quote", - "syn 2.0.36", + "syn 2.0.37", ] [[package]] @@ -5370,7 +5370,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.36", + "syn 2.0.37", ] [[package]] @@ -5379,7 +5379,7 @@ version = "3.0.0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.36", + "syn 2.0.37", ] [[package]] @@ -5602,7 +5602,7 @@ checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72" dependencies = [ "proc-macro2", "quote", - "syn 2.0.36", + "syn 2.0.37", ] [[package]] @@ -7617,7 +7617,7 @@ dependencies = [ "macro_magic_core", "macro_magic_macros", "quote", - "syn 2.0.36", + "syn 2.0.37", ] [[package]] @@ -7631,7 +7631,7 @@ dependencies = [ "macro_magic_core_macros", "proc-macro2", "quote", - "syn 2.0.36", + "syn 2.0.37", ] [[package]] @@ -7642,7 +7642,7 @@ checksum = "c12469fc165526520dff2807c2975310ab47cf7190a45b99b49a7dc8befab17b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.36", + "syn 2.0.37", ] [[package]] @@ -7653,7 +7653,7 @@ checksum = "b8fb85ec1620619edf2984a7693497d4ec88a9665d8b87e942856884c92dbf2a" dependencies = [ "macro_magic_core", "quote", - "syn 2.0.36", + "syn 2.0.37", ] [[package]] @@ -8140,7 +8140,7 @@ name = "node-bench" version = "0.9.0-dev" dependencies = [ "array-bytes", - "clap 4.4.3", + "clap 4.4.4", "derive_more", "fs_extra", "futures", @@ -8177,7 +8177,7 @@ version = "3.0.0-dev" dependencies = [ "array-bytes", "assert_cmd", - "clap 4.4.3", + "clap 4.4.4", "clap_complete", "criterion 0.4.0", "frame-benchmarking-cli", @@ -8303,7 +8303,7 @@ dependencies = [ name = "node-inspect" version = "0.9.0-dev" dependencies = [ - "clap 4.4.3", + "clap 4.4.4", "parity-scale-codec", "sc-cli", "sc-client-api", @@ -8357,7 +8357,7 @@ dependencies = [ name = "node-runtime-generate-bags" version = "3.0.0" dependencies = [ - "clap 4.4.3", + "clap 4.4.4", "generate-bags", "kitchensink-runtime", ] @@ -8366,7 +8366,7 @@ dependencies = [ name = "node-template" version = "4.0.0-dev" dependencies = [ - "clap 4.4.3", + "clap 4.4.4", "frame-benchmarking", "frame-benchmarking-cli", "frame-system", @@ -8409,7 +8409,7 @@ dependencies = [ name = "node-template-release" version = "3.0.0" dependencies = [ - "clap 4.4.3", + "clap 4.4.4", "flate2", "fs_extra", "glob", @@ -9339,7 +9339,7 @@ version = "4.0.0-dev" dependencies = [ "proc-macro2", "quote", - "syn 2.0.36", + "syn 2.0.37", ] [[package]] @@ -10410,7 +10410,7 @@ dependencies = [ "proc-macro2", "quote", "sp-runtime", - "syn 2.0.36", + "syn 2.0.37", ] [[package]] @@ -10800,7 +10800,7 @@ dependencies = [ name = "parachain-template-node" version = "0.1.0" dependencies = [ - "clap 4.4.3", + "clap 4.4.4", "color-print", "cumulus-client-cli", "cumulus-client-collator", @@ -11277,7 +11277,7 @@ dependencies = [ "pest_meta", "proc-macro2", "quote", - "syn 2.0.36", + "syn 2.0.37", ] [[package]] @@ -11318,7 +11318,7 @@ checksum = "4359fd9c9171ec6e8c62926d6faaf553a8dc3f64e1507e76da7911b4f6a04405" dependencies = [ "proc-macro2", "quote", - "syn 2.0.36", + "syn 2.0.37", ] [[package]] @@ -11537,7 +11537,7 @@ dependencies = [ name = "polkadot-cli" version = "1.1.0" dependencies = [ - "clap 4.4.3", + "clap 4.4.4", "frame-benchmarking-cli", "futures", "log", @@ -12357,7 +12357,7 @@ dependencies = [ "bridge-hub-kusama-runtime", "bridge-hub-polkadot-runtime", "bridge-hub-rococo-runtime", - "clap 4.4.3", + "clap 4.4.4", "collectives-polkadot-runtime", "color-print", "contracts-rococo-runtime", @@ -12967,7 +12967,7 @@ version = "1.0.0" dependencies = [ "assert_matches", "async-trait", - "clap 4.4.3", + "clap 4.4.4", "color-eyre", "futures", "futures-timer", @@ -13113,7 +13113,7 @@ dependencies = [ name = "polkadot-voter-bags" version = "1.0.0" dependencies = [ - "clap 4.4.3", + "clap 4.4.4", "generate-bags", "polkadot-runtime", "sp-io", @@ -13293,7 +13293,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6c64d9ba0963cdcea2e1b2230fbae2bab30eb25a174be395c41e764bfb65dd62" dependencies = [ "proc-macro2", - "syn 2.0.36", + "syn 2.0.37", ] [[package]] @@ -13375,7 +13375,7 @@ checksum = "3d1eaa7fa0aa1929ffdf7eeb6eac234dde6268914a14ad44d23521ab6a9b258e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.36", + "syn 2.0.37", ] [[package]] @@ -13421,7 +13421,7 @@ checksum = "440f724eba9f6996b75d63681b0a92b06947f1457076d503a4d2e2c8f56442b8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.36", + "syn 2.0.37", ] [[package]] @@ -13812,7 +13812,7 @@ checksum = "7f7473c2cfcf90008193dd0e3e16599455cb601a9fce322b5bb55de799664925" dependencies = [ "proc-macro2", "quote", - "syn 2.0.36", + "syn 2.0.37", ] [[package]] @@ -13875,7 +13875,7 @@ checksum = "e5ea92a5b6195c6ef2a0295ea818b312502c6fc94dde986c5553242e18fd4ce2" name = "remote-ext-tests-bags-list" version = "1.0.0" dependencies = [ - "clap 4.4.3", + "clap 4.4.4", "frame-system", "kusama-runtime-constants", "log", @@ -14575,7 +14575,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.36", + "syn 2.0.37", ] [[package]] @@ -14584,7 +14584,7 @@ version = "0.10.0-dev" dependencies = [ "array-bytes", "chrono", - "clap 4.4.3", + "clap 4.4.4", "fdlimit", "futures", "futures-timer", @@ -15686,7 +15686,7 @@ dependencies = [ name = "sc-storage-monitor" version = "0.1.0" dependencies = [ - "clap 4.4.3", + "clap 4.4.4", "fs4", "log", "sc-client-db", @@ -15786,7 +15786,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.36", + "syn 2.0.37", ] [[package]] @@ -16145,7 +16145,7 @@ checksum = "4eca7ac642d82aa35b60049a6eccb4be6be75e599bd2e9adb5f875a737654af2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.36", + "syn 2.0.37", ] [[package]] @@ -16211,7 +16211,7 @@ checksum = "91d129178576168c589c9ec973feedf7d3126c01ac2bf08795109aa35b69fb8f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.36", + "syn 2.0.37", ] [[package]] @@ -16643,7 +16643,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.36", + "syn 2.0.37", ] [[package]] @@ -17043,7 +17043,7 @@ version = "9.0.0" dependencies = [ "quote", "sp-core-hashing", - "syn 2.0.36", + "syn 2.0.37", ] [[package]] @@ -17087,7 +17087,7 @@ version = "8.0.0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.36", + "syn 2.0.37", ] [[package]] @@ -17225,7 +17225,7 @@ dependencies = [ name = "sp-npos-elections-fuzzer" version = "2.0.0-alpha.5" dependencies = [ - "clap 4.4.3", + "clap 4.4.4", "honggfuzz", "rand 0.8.5", "sp-npos-elections", @@ -17318,7 +17318,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.36", + "syn 2.0.37", ] [[package]] @@ -17558,7 +17558,7 @@ dependencies = [ "proc-macro2", "quote", "sp-version", - "syn 2.0.36", + "syn 2.0.37", ] [[package]] @@ -17942,7 +17942,7 @@ dependencies = [ name = "subkey" version = "3.0.0" dependencies = [ - "clap 4.4.3", + "clap 4.4.4", "sc-cli", ] @@ -17984,7 +17984,7 @@ dependencies = [ name = "substrate-frame-cli" version = "4.0.0-dev" dependencies = [ - "clap 4.4.3", + "clap 4.4.4", "frame-support", "frame-system", "sc-cli", @@ -18333,9 +18333,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.36" +version = "2.0.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91e02e55d62894af2a08aca894c6577281f76769ba47c94d5756bec8ac6e7373" +checksum = "7303ef2c05cd654186cb250d29049a24840ca25d2747c25c0381c8d9e2f582e8" dependencies = [ "proc-macro2", "quote", @@ -18443,7 +18443,7 @@ dependencies = [ name = "test-parachain-adder-collator" version = "1.0.0" dependencies = [ - "clap 4.4.3", + "clap 4.4.4", "futures", "futures-timer", "log", @@ -18491,7 +18491,7 @@ dependencies = [ name = "test-parachain-undying-collator" version = "1.0.0" dependencies = [ - "clap 4.4.3", + "clap 4.4.4", "futures", "futures-timer", "log", @@ -18580,7 +18580,7 @@ checksum = "49922ecae66cc8a249b77e68d1d0623c1b2c514f0060c27cdc68bd62a1219d35" dependencies = [ "proc-macro2", "quote", - "syn 2.0.36", + "syn 2.0.37", ] [[package]] @@ -18760,7 +18760,7 @@ checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.36", + "syn 2.0.37", ] [[package]] @@ -18941,7 +18941,7 @@ checksum = "5f4f31f56159e98206da9efd823404b79b6ef3143b4a7ab76e67b1751b25a4ab" dependencies = [ "proc-macro2", "quote", - "syn 2.0.36", + "syn 2.0.37", ] [[package]] @@ -18984,7 +18984,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.36", + "syn 2.0.37", ] [[package]] @@ -19137,7 +19137,7 @@ version = "0.10.0-dev" dependencies = [ "assert_cmd", "async-trait", - "clap 4.4.3", + "clap 4.4.4", "frame-remote-externalities", "frame-try-runtime", "hex", @@ -19533,7 +19533,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.36", + "syn 2.0.37", "wasm-bindgen-shared", ] @@ -19567,7 +19567,7 @@ checksum = "54681b18a46765f095758388f2d0cf16eb8d4169b639ab575a8f5693af210c7b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.36", + "syn 2.0.37", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -20703,7 +20703,7 @@ dependencies = [ "Inflector", "proc-macro2", "quote", - "syn 2.0.36", + "syn 2.0.37", ] [[package]] @@ -20822,7 +20822,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.36", + "syn 2.0.37", ] [[package]] diff --git a/cumulus/client/cli/Cargo.toml b/cumulus/client/cli/Cargo.toml index 15f28d73b36e..c45b669fc6d1 100644 --- a/cumulus/client/cli/Cargo.toml +++ b/cumulus/client/cli/Cargo.toml @@ -5,7 +5,7 @@ authors.workspace = true edition.workspace = true [dependencies] -clap = { version = "4.4.3", features = ["derive"] } +clap = { version = "4.4.4", features = ["derive"] } codec = { package = "parity-scale-codec", version = "3.0.0" } url = "2.4.0" diff --git a/cumulus/pallets/parachain-system/proc-macro/Cargo.toml b/cumulus/pallets/parachain-system/proc-macro/Cargo.toml index ee0943bb99ee..a1510847fc2f 100644 --- a/cumulus/pallets/parachain-system/proc-macro/Cargo.toml +++ b/cumulus/pallets/parachain-system/proc-macro/Cargo.toml @@ -9,7 +9,7 @@ description = "Proc macros provided by the parachain-system pallet" proc-macro = true [dependencies] -syn = "2.0.36" +syn = "2.0.37" proc-macro2 = "1.0.64" quote = "1.0.33" proc-macro-crate = "1.3.1" diff --git a/cumulus/parachain-template/node/Cargo.toml b/cumulus/parachain-template/node/Cargo.toml index 6de57b185e4a..223a78dacc49 100644 --- a/cumulus/parachain-template/node/Cargo.toml +++ b/cumulus/parachain-template/node/Cargo.toml @@ -11,7 +11,7 @@ build = "build.rs" publish = false [dependencies] -clap = { version = "4.4.3", features = ["derive"] } +clap = { version = "4.4.4", features = ["derive"] } log = "0.4.20" codec = { package = "parity-scale-codec", version = "3.0.0" } serde = { version = "1.0.188", features = ["derive"] } diff --git a/cumulus/polkadot-parachain/Cargo.toml b/cumulus/polkadot-parachain/Cargo.toml index 5591e80317ca..ac8ad53b5243 100644 --- a/cumulus/polkadot-parachain/Cargo.toml +++ b/cumulus/polkadot-parachain/Cargo.toml @@ -12,7 +12,7 @@ path = "src/main.rs" [dependencies] async-trait = "0.1.73" -clap = { version = "4.4.3", features = ["derive"] } +clap = { version = "4.4.4", features = ["derive"] } codec = { package = "parity-scale-codec", version = "3.0.0" } futures = "0.3.28" hex-literal = "0.4.1" diff --git a/cumulus/test/service/Cargo.toml b/cumulus/test/service/Cargo.toml index b8469cb66a74..5285376f3d59 100644 --- a/cumulus/test/service/Cargo.toml +++ b/cumulus/test/service/Cargo.toml @@ -11,7 +11,7 @@ path = "src/main.rs" [dependencies] async-trait = "0.1.73" -clap = { version = "4.4.3", features = ["derive"] } +clap = { version = "4.4.4", features = ["derive"] } codec = { package = "parity-scale-codec", version = "3.0.0" } criterion = { version = "0.5.1", features = [ "async_tokio" ] } jsonrpsee = { version = "0.16.2", features = ["server"] } diff --git a/polkadot/cli/Cargo.toml b/polkadot/cli/Cargo.toml index 83fd64759b7f..53961c90a2a8 100644 --- a/polkadot/cli/Cargo.toml +++ b/polkadot/cli/Cargo.toml @@ -15,7 +15,7 @@ wasm-opt = false crate-type = ["cdylib", "rlib"] [dependencies] -clap = { version = "4.4.3", features = ["derive"], optional = true } +clap = { version = "4.4.4", features = ["derive"], optional = true } log = "0.4.17" thiserror = "1.0.48" futures = "0.3.21" diff --git a/polkadot/node/gum/proc-macro/Cargo.toml b/polkadot/node/gum/proc-macro/Cargo.toml index be302e46ad90..83d064cadbed 100644 --- a/polkadot/node/gum/proc-macro/Cargo.toml +++ b/polkadot/node/gum/proc-macro/Cargo.toml @@ -13,7 +13,7 @@ targets = ["x86_64-unknown-linux-gnu"] proc-macro = true [dependencies] -syn = { version = "2.0.36", features = ["full", "extra-traits"] } +syn = { version = "2.0.37", features = ["full", "extra-traits"] } quote = "1.0.28" proc-macro2 = "1.0.56" proc-macro-crate = "1.1.3" diff --git a/polkadot/node/malus/Cargo.toml b/polkadot/node/malus/Cargo.toml index 9fa22aef4dcc..42dd4af73c13 100644 --- a/polkadot/node/malus/Cargo.toml +++ b/polkadot/node/malus/Cargo.toml @@ -40,7 +40,7 @@ assert_matches = "1.5" async-trait = "0.1.57" sp-keystore = { path = "../../../substrate/primitives/keystore" } sp-core = { path = "../../../substrate/primitives/core" } -clap = { version = "4.4.3", features = ["derive"] } +clap = { version = "4.4.4", features = ["derive"] } futures = "0.3.21" futures-timer = "3.0.2" gum = { package = "tracing-gum", path = "../gum" } diff --git a/polkadot/parachain/test-parachains/adder/collator/Cargo.toml b/polkadot/parachain/test-parachains/adder/collator/Cargo.toml index a2ce15c21db2..fcbba9bbe212 100644 --- a/polkadot/parachain/test-parachains/adder/collator/Cargo.toml +++ b/polkadot/parachain/test-parachains/adder/collator/Cargo.toml @@ -13,7 +13,7 @@ path = "src/main.rs" [dependencies] parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] } -clap = { version = "4.4.3", features = ["derive"] } +clap = { version = "4.4.4", features = ["derive"] } futures = "0.3.21" futures-timer = "3.0.2" log = "0.4.17" diff --git a/polkadot/parachain/test-parachains/undying/collator/Cargo.toml b/polkadot/parachain/test-parachains/undying/collator/Cargo.toml index 9a5e580af1b9..3fbed4046bde 100644 --- a/polkadot/parachain/test-parachains/undying/collator/Cargo.toml +++ b/polkadot/parachain/test-parachains/undying/collator/Cargo.toml @@ -13,7 +13,7 @@ path = "src/main.rs" [dependencies] parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] } -clap = { version = "4.4.3", features = ["derive"] } +clap = { version = "4.4.4", features = ["derive"] } futures = "0.3.21" futures-timer = "3.0.2" log = "0.4.17" diff --git a/polkadot/utils/generate-bags/Cargo.toml b/polkadot/utils/generate-bags/Cargo.toml index 99948cb68b18..d270cabd3f03 100644 --- a/polkadot/utils/generate-bags/Cargo.toml +++ b/polkadot/utils/generate-bags/Cargo.toml @@ -6,7 +6,7 @@ edition.workspace = true license.workspace = true [dependencies] -clap = { version = "4.4.3", features = ["derive"] } +clap = { version = "4.4.4", features = ["derive"] } generate-bags = { path = "../../../substrate/utils/frame/generate-bags" } sp-io = { path = "../../../substrate/primitives/io" } diff --git a/polkadot/utils/remote-ext-tests/bags-list/Cargo.toml b/polkadot/utils/remote-ext-tests/bags-list/Cargo.toml index ed49f77bb81a..cb9547ac7dc6 100644 --- a/polkadot/utils/remote-ext-tests/bags-list/Cargo.toml +++ b/polkadot/utils/remote-ext-tests/bags-list/Cargo.toml @@ -19,6 +19,6 @@ sp-tracing = { path = "../../../../substrate/primitives/tracing" } frame-system = { path = "../../../../substrate/frame/system" } sp-core = { path = "../../../../substrate/primitives/core" } -clap = { version = "4.4.3", features = ["derive"] } +clap = { version = "4.4.4", features = ["derive"] } log = "0.4.17" tokio = { version = "1.24.2", features = ["macros"] } diff --git a/polkadot/xcm/procedural/Cargo.toml b/polkadot/xcm/procedural/Cargo.toml index 6beaa1d667f0..1ff73c64780e 100644 --- a/polkadot/xcm/procedural/Cargo.toml +++ b/polkadot/xcm/procedural/Cargo.toml @@ -11,5 +11,5 @@ proc-macro = true [dependencies] proc-macro2 = "1.0.56" quote = "1.0.28" -syn = "2.0.36" +syn = "2.0.37" Inflector = "0.11.4" diff --git a/substrate/bin/node-template/node/Cargo.toml b/substrate/bin/node-template/node/Cargo.toml index b510ed34c23a..35654e7d5649 100644 --- a/substrate/bin/node-template/node/Cargo.toml +++ b/substrate/bin/node-template/node/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] name = "node-template" [dependencies] -clap = { version = "4.4.3", features = ["derive"] } +clap = { version = "4.4.4", features = ["derive"] } futures = { version = "0.3.21", features = ["thread-pool"]} sc-cli = { path = "../../../client/cli" } diff --git a/substrate/bin/node/bench/Cargo.toml b/substrate/bin/node/bench/Cargo.toml index 05c13e312b21..33560bca4923 100644 --- a/substrate/bin/node/bench/Cargo.toml +++ b/substrate/bin/node/bench/Cargo.toml @@ -13,7 +13,7 @@ publish = false [dependencies] array-bytes = "6.1" -clap = { version = "4.4.3", features = ["derive"] } +clap = { version = "4.4.4", features = ["derive"] } log = "0.4.17" node-primitives = { path = "../primitives" } node-testing = { path = "../testing" } diff --git a/substrate/bin/node/cli/Cargo.toml b/substrate/bin/node/cli/Cargo.toml index 84d8de11a35f..c47f8a5c3e52 100644 --- a/substrate/bin/node/cli/Cargo.toml +++ b/substrate/bin/node/cli/Cargo.toml @@ -38,7 +38,7 @@ crate-type = ["cdylib", "rlib"] [dependencies] # third-party dependencies array-bytes = "6.1" -clap = { version = "4.4.3", features = ["derive"], optional = true } +clap = { version = "4.4.4", features = ["derive"], optional = true } codec = { package = "parity-scale-codec", version = "3.6.1" } serde = { version = "1.0.188", features = ["derive"] } jsonrpsee = { version = "0.16.2", features = ["server"] } @@ -135,7 +135,7 @@ pallet-timestamp = { path = "../../../frame/timestamp" } substrate-cli-test-utils = { path = "../../../test-utils/cli" } [build-dependencies] -clap = { version = "4.4.3", optional = true } +clap = { version = "4.4.4", optional = true } clap_complete = { version = "4.0.2", optional = true } node-inspect = { path = "../inspect", optional = true} frame-benchmarking-cli = { path = "../../../utils/frame/benchmarking-cli", optional = true} diff --git a/substrate/bin/node/inspect/Cargo.toml b/substrate/bin/node/inspect/Cargo.toml index f28ceb459571..06e9674117e6 100644 --- a/substrate/bin/node/inspect/Cargo.toml +++ b/substrate/bin/node/inspect/Cargo.toml @@ -13,7 +13,7 @@ publish = false targets = ["x86_64-unknown-linux-gnu"] [dependencies] -clap = { version = "4.4.3", features = ["derive"] } +clap = { version = "4.4.4", features = ["derive"] } codec = { package = "parity-scale-codec", version = "3.6.1" } thiserror = "1.0" sc-cli = { path = "../../../client/cli" } diff --git a/substrate/bin/utils/chain-spec-builder/Cargo.toml b/substrate/bin/utils/chain-spec-builder/Cargo.toml index c626a1612eb1..f564ff19af0f 100644 --- a/substrate/bin/utils/chain-spec-builder/Cargo.toml +++ b/substrate/bin/utils/chain-spec-builder/Cargo.toml @@ -22,7 +22,7 @@ crate-type = ["rlib"] [dependencies] ansi_term = "0.12.1" -clap = { version = "4.4.3", features = ["derive"] } +clap = { version = "4.4.4", features = ["derive"] } rand = "0.8" node-cli = { path = "../../node/cli" } sc-chain-spec = { path = "../../../client/chain-spec" } diff --git a/substrate/bin/utils/subkey/Cargo.toml b/substrate/bin/utils/subkey/Cargo.toml index 8a5f4ec49c54..4e8cb606c94e 100644 --- a/substrate/bin/utils/subkey/Cargo.toml +++ b/substrate/bin/utils/subkey/Cargo.toml @@ -17,5 +17,5 @@ path = "src/main.rs" name = "subkey" [dependencies] -clap = { version = "4.4.3", features = ["derive"] } +clap = { version = "4.4.4", features = ["derive"] } sc-cli = { path = "../../../client/cli" } diff --git a/substrate/client/chain-spec/derive/Cargo.toml b/substrate/client/chain-spec/derive/Cargo.toml index f0ad4c68d1f0..202817438b7d 100644 --- a/substrate/client/chain-spec/derive/Cargo.toml +++ b/substrate/client/chain-spec/derive/Cargo.toml @@ -18,4 +18,4 @@ proc-macro = true proc-macro-crate = "1.1.3" proc-macro2 = "1.0.56" quote = "1.0.28" -syn = "2.0.36" +syn = "2.0.37" diff --git a/substrate/client/cli/Cargo.toml b/substrate/client/cli/Cargo.toml index b781e8782ec7..cfdcb39b1fa7 100644 --- a/substrate/client/cli/Cargo.toml +++ b/substrate/client/cli/Cargo.toml @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] array-bytes = "6.1" chrono = "0.4.27" -clap = { version = "4.4.3", features = ["derive", "string"] } +clap = { version = "4.4.4", features = ["derive", "string"] } fdlimit = "0.2.1" futures = "0.3.21" libp2p-identity = { version = "0.1.3", features = ["peerid", "ed25519"]} diff --git a/substrate/client/storage-monitor/Cargo.toml b/substrate/client/storage-monitor/Cargo.toml index 6d425749a91b..7538f5ba602c 100644 --- a/substrate/client/storage-monitor/Cargo.toml +++ b/substrate/client/storage-monitor/Cargo.toml @@ -9,7 +9,7 @@ description = "Storage monitor service for substrate" homepage = "https://substrate.io" [dependencies] -clap = { version = "4.4.3", features = ["derive", "string"] } +clap = { version = "4.4.4", features = ["derive", "string"] } log = "0.4.17" fs4 = "0.6.3" sc-client-db = { path = "../db", default-features = false} diff --git a/substrate/client/tracing/proc-macro/Cargo.toml b/substrate/client/tracing/proc-macro/Cargo.toml index 270f34b6d04b..f18e0aacd377 100644 --- a/substrate/client/tracing/proc-macro/Cargo.toml +++ b/substrate/client/tracing/proc-macro/Cargo.toml @@ -18,4 +18,4 @@ proc-macro = true proc-macro-crate = "1.1.3" proc-macro2 = "1.0.56" quote = { version = "1.0.28", features = ["proc-macro"] } -syn = { version = "2.0.36", features = ["proc-macro", "full", "extra-traits", "parsing"] } +syn = { version = "2.0.37", features = ["proc-macro", "full", "extra-traits", "parsing"] } diff --git a/substrate/frame/contracts/proc-macro/Cargo.toml b/substrate/frame/contracts/proc-macro/Cargo.toml index ccc80a2eba4d..a04f55440670 100644 --- a/substrate/frame/contracts/proc-macro/Cargo.toml +++ b/substrate/frame/contracts/proc-macro/Cargo.toml @@ -17,7 +17,7 @@ proc-macro = true [dependencies] proc-macro2 = "1.0.56" quote = "1.0.28" -syn = { version = "2.0.36", features = ["full"] } +syn = { version = "2.0.37", features = ["full"] } [dev-dependencies] diff --git a/substrate/frame/election-provider-support/solution-type/Cargo.toml b/substrate/frame/election-provider-support/solution-type/Cargo.toml index 1b432204470a..39e535c6c3ee 100644 --- a/substrate/frame/election-provider-support/solution-type/Cargo.toml +++ b/substrate/frame/election-provider-support/solution-type/Cargo.toml @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] proc-macro = true [dependencies] -syn = { version = "2.0.36", features = ["full", "visit"] } +syn = { version = "2.0.37", features = ["full", "visit"] } quote = "1.0.28" proc-macro2 = "1.0.56" proc-macro-crate = "1.1.3" diff --git a/substrate/frame/election-provider-support/solution-type/fuzzer/Cargo.toml b/substrate/frame/election-provider-support/solution-type/fuzzer/Cargo.toml index fe84c4c0ef94..6ac09dd45c60 100644 --- a/substrate/frame/election-provider-support/solution-type/fuzzer/Cargo.toml +++ b/substrate/frame/election-provider-support/solution-type/fuzzer/Cargo.toml @@ -13,7 +13,7 @@ publish = false targets = ["x86_64-unknown-linux-gnu"] [dependencies] -clap = { version = "4.4.3", features = ["derive"] } +clap = { version = "4.4.4", features = ["derive"] } honggfuzz = "0.5" rand = { version = "0.8", features = ["std", "small_rng"] } diff --git a/substrate/frame/staking/reward-curve/Cargo.toml b/substrate/frame/staking/reward-curve/Cargo.toml index 7646bbc9a55d..484afb6136bf 100644 --- a/substrate/frame/staking/reward-curve/Cargo.toml +++ b/substrate/frame/staking/reward-curve/Cargo.toml @@ -18,7 +18,7 @@ proc-macro = true proc-macro-crate = "1.1.3" proc-macro2 = "1.0.56" quote = "1.0.28" -syn = { version = "2.0.36", features = ["full", "visit"] } +syn = { version = "2.0.37", features = ["full", "visit"] } [dev-dependencies] sp-runtime = { path = "../../../primitives/runtime" } diff --git a/substrate/frame/support/procedural/Cargo.toml b/substrate/frame/support/procedural/Cargo.toml index e16068546056..6381e430f2ba 100644 --- a/substrate/frame/support/procedural/Cargo.toml +++ b/substrate/frame/support/procedural/Cargo.toml @@ -21,7 +21,7 @@ cfg-expr = "0.15.5" itertools = "0.10.3" proc-macro2 = "1.0.56" quote = "1.0.28" -syn = { version = "2.0.36", features = ["full"] } +syn = { version = "2.0.37", features = ["full"] } frame-support-procedural-tools = { path = "tools" } proc-macro-warning = { version = "0.4.2", default-features = false } macro_magic = { version = "0.4.2", features = ["proc_support"] } diff --git a/substrate/frame/support/procedural/tools/Cargo.toml b/substrate/frame/support/procedural/tools/Cargo.toml index fb0a1b51cbcf..7589fa353d16 100644 --- a/substrate/frame/support/procedural/tools/Cargo.toml +++ b/substrate/frame/support/procedural/tools/Cargo.toml @@ -15,5 +15,5 @@ targets = ["x86_64-unknown-linux-gnu"] proc-macro-crate = "1.1.3" proc-macro2 = "1.0.56" quote = "1.0.28" -syn = { version = "2.0.36", features = ["full", "visit", "extra-traits"] } +syn = { version = "2.0.37", features = ["full", "visit", "extra-traits"] } frame-support-procedural-tools-derive = { path = "derive" } diff --git a/substrate/frame/support/procedural/tools/derive/Cargo.toml b/substrate/frame/support/procedural/tools/derive/Cargo.toml index 747d3bacd425..5bf67d43d06e 100644 --- a/substrate/frame/support/procedural/tools/derive/Cargo.toml +++ b/substrate/frame/support/procedural/tools/derive/Cargo.toml @@ -17,4 +17,4 @@ proc-macro = true [dependencies] proc-macro2 = "1.0.56" quote = { version = "1.0.28", features = ["proc-macro"] } -syn = { version = "2.0.36", features = ["proc-macro", "full", "extra-traits", "parsing"] } +syn = { version = "2.0.37", features = ["proc-macro", "full", "extra-traits", "parsing"] } diff --git a/substrate/primitives/api/proc-macro/Cargo.toml b/substrate/primitives/api/proc-macro/Cargo.toml index 71f1ff95d555..de5ddcf9dac0 100644 --- a/substrate/primitives/api/proc-macro/Cargo.toml +++ b/substrate/primitives/api/proc-macro/Cargo.toml @@ -17,7 +17,7 @@ proc-macro = true [dependencies] quote = "1.0.28" -syn = { version = "2.0.36", features = ["full", "fold", "extra-traits", "visit"] } +syn = { version = "2.0.37", features = ["full", "fold", "extra-traits", "visit"] } proc-macro2 = "1.0.56" blake2 = { version = "0.10.4", default-features = false } proc-macro-crate = "1.1.3" diff --git a/substrate/primitives/core/hashing/proc-macro/Cargo.toml b/substrate/primitives/core/hashing/proc-macro/Cargo.toml index fce09b452e5d..64b46ab9c19e 100644 --- a/substrate/primitives/core/hashing/proc-macro/Cargo.toml +++ b/substrate/primitives/core/hashing/proc-macro/Cargo.toml @@ -17,5 +17,5 @@ proc-macro = true [dependencies] quote = "1.0.28" -syn = { version = "2.0.36", features = ["full", "parsing"] } +syn = { version = "2.0.37", features = ["full", "parsing"] } sp-core-hashing = { path = "..", default-features = false} diff --git a/substrate/primitives/debug-derive/Cargo.toml b/substrate/primitives/debug-derive/Cargo.toml index 689a12505694..9d3930ac2572 100644 --- a/substrate/primitives/debug-derive/Cargo.toml +++ b/substrate/primitives/debug-derive/Cargo.toml @@ -18,7 +18,7 @@ proc-macro = true [dependencies] quote = "1.0.28" -syn = "2.0.36" +syn = "2.0.37" proc-macro2 = "1.0.56" [features] diff --git a/substrate/primitives/npos-elections/fuzzer/Cargo.toml b/substrate/primitives/npos-elections/fuzzer/Cargo.toml index 1e9f0517df12..eeb9deebb71e 100644 --- a/substrate/primitives/npos-elections/fuzzer/Cargo.toml +++ b/substrate/primitives/npos-elections/fuzzer/Cargo.toml @@ -14,7 +14,7 @@ publish = false targets = ["x86_64-unknown-linux-gnu"] [dependencies] -clap = { version = "4.4.3", features = ["derive"] } +clap = { version = "4.4.4", features = ["derive"] } honggfuzz = "0.5" rand = { version = "0.8", features = ["std", "small_rng"] } sp-npos-elections = { path = ".." } diff --git a/substrate/primitives/runtime-interface/proc-macro/Cargo.toml b/substrate/primitives/runtime-interface/proc-macro/Cargo.toml index fe06c56d5a15..5569e31c9360 100644 --- a/substrate/primitives/runtime-interface/proc-macro/Cargo.toml +++ b/substrate/primitives/runtime-interface/proc-macro/Cargo.toml @@ -20,4 +20,4 @@ Inflector = "0.11.4" proc-macro-crate = "1.1.3" proc-macro2 = "1.0.56" quote = "1.0.28" -syn = { version = "2.0.36", features = ["full", "visit", "fold", "extra-traits"] } +syn = { version = "2.0.37", features = ["full", "visit", "fold", "extra-traits"] } diff --git a/substrate/primitives/version/proc-macro/Cargo.toml b/substrate/primitives/version/proc-macro/Cargo.toml index 3cd1b7a3a76d..cc28b8f176b8 100644 --- a/substrate/primitives/version/proc-macro/Cargo.toml +++ b/substrate/primitives/version/proc-macro/Cargo.toml @@ -19,7 +19,7 @@ proc-macro = true codec = { package = "parity-scale-codec", version = "3.6.1", features = [ "derive" ] } proc-macro2 = "1.0.56" quote = "1.0.28" -syn = { version = "2.0.36", features = ["full", "fold", "extra-traits", "visit"] } +syn = { version = "2.0.37", features = ["full", "fold", "extra-traits", "visit"] } [dev-dependencies] sp-version = { path = ".." } diff --git a/substrate/scripts/ci/node-template-release/Cargo.toml b/substrate/scripts/ci/node-template-release/Cargo.toml index 63611ae8da6b..c0e027587246 100644 --- a/substrate/scripts/ci/node-template-release/Cargo.toml +++ b/substrate/scripts/ci/node-template-release/Cargo.toml @@ -11,7 +11,7 @@ publish = false targets = ["x86_64-unknown-linux-gnu"] [dependencies] -clap = { version = "4.4.3", features = ["derive"] } +clap = { version = "4.4.4", features = ["derive"] } flate2 = "1.0" fs_extra = "1.3" glob = "0.3" diff --git a/substrate/utils/frame/benchmarking-cli/Cargo.toml b/substrate/utils/frame/benchmarking-cli/Cargo.toml index 017e4b4d5034..9ba22e24faac 100644 --- a/substrate/utils/frame/benchmarking-cli/Cargo.toml +++ b/substrate/utils/frame/benchmarking-cli/Cargo.toml @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] array-bytes = "6.1" chrono = "0.4" -clap = { version = "4.4.3", features = ["derive"] } +clap = { version = "4.4.4", features = ["derive"] } codec = { package = "parity-scale-codec", version = "3.6.1" } comfy-table = { version = "7.0.1", default-features = false } handlebars = "4.2.2" diff --git a/substrate/utils/frame/frame-utilities-cli/Cargo.toml b/substrate/utils/frame/frame-utilities-cli/Cargo.toml index 89aef5338650..5a3365dc9003 100644 --- a/substrate/utils/frame/frame-utilities-cli/Cargo.toml +++ b/substrate/utils/frame/frame-utilities-cli/Cargo.toml @@ -11,7 +11,7 @@ documentation = "https://docs.rs/substrate-frame-cli" readme = "README.md" [dependencies] -clap = { version = "4.4.3", features = ["derive"] } +clap = { version = "4.4.4", features = ["derive"] } frame-support = { path = "../../../frame/support" } frame-system = { path = "../../../frame/system" } sc-cli = { path = "../../../client/cli" } diff --git a/substrate/utils/frame/generate-bags/node-runtime/Cargo.toml b/substrate/utils/frame/generate-bags/node-runtime/Cargo.toml index b06674a62cb3..e1490aa363ca 100644 --- a/substrate/utils/frame/generate-bags/node-runtime/Cargo.toml +++ b/substrate/utils/frame/generate-bags/node-runtime/Cargo.toml @@ -14,4 +14,4 @@ kitchensink-runtime = { path = "../../../../bin/node/runtime" } generate-bags = { path = ".." } # third-party -clap = { version = "4.4.3", features = ["derive"] } +clap = { version = "4.4.4", features = ["derive"] } diff --git a/substrate/utils/frame/try-runtime/cli/Cargo.toml b/substrate/utils/frame/try-runtime/cli/Cargo.toml index c736c3f6cc56..3f693ca6c82d 100644 --- a/substrate/utils/frame/try-runtime/cli/Cargo.toml +++ b/substrate/utils/frame/try-runtime/cli/Cargo.toml @@ -35,7 +35,7 @@ frame-try-runtime = { path = "../../../../frame/try-runtime", optional = true} substrate-rpc-client = { path = "../../rpc/client" } async-trait = "0.1.57" -clap = { version = "4.4.3", features = ["derive"] } +clap = { version = "4.4.4", features = ["derive"] } hex = { version = "0.4.3", default-features = false } log = "0.4.17" parity-scale-codec = "3.6.1" From bb792af268fe60f8489494ce25acb4dab3cfe7e4 Mon Sep 17 00:00:00 2001 From: Gavin Wood Date: Wed, 20 Sep 2023 15:28:28 +0100 Subject: [PATCH 32/69] Preimage: Check that at least one is upgraded (#1648) Sanity check. --- substrate/frame/preimage/src/benchmarking.rs | 2 +- substrate/frame/preimage/src/lib.rs | 3 +++ 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/substrate/frame/preimage/src/benchmarking.rs b/substrate/frame/preimage/src/benchmarking.rs index d7c643ff9042..d0c3404f40a9 100644 --- a/substrate/frame/preimage/src/benchmarking.rs +++ b/substrate/frame/preimage/src/benchmarking.rs @@ -216,7 +216,7 @@ benchmarks! { } ensure_updated { - let n in 0..MAX_HASH_UPGRADE_BULK_COUNT; + let n in 1..MAX_HASH_UPGRADE_BULK_COUNT; let caller = funded_account::(); let hashes = (0..n).map(|i| insert_old_unrequested::(i)).collect::>(); diff --git a/substrate/frame/preimage/src/lib.rs b/substrate/frame/preimage/src/lib.rs index e603f1a32e6a..4dda0207e62c 100644 --- a/substrate/frame/preimage/src/lib.rs +++ b/substrate/frame/preimage/src/lib.rs @@ -156,6 +156,8 @@ pub mod pallet { NotRequested, /// More than `MAX_HASH_UPGRADE_BULK_COUNT` hashes were requested to be upgraded at once. TooMany, + /// Too few hashes were requested to be upgraded (i.e. zero). + TooFew, } /// A reason for this pallet placing a hold on funds. @@ -242,6 +244,7 @@ pub mod pallet { hashes: Vec, ) -> DispatchResultWithPostInfo { ensure_signed(origin)?; + ensure!(hashes.len() > 0, Error::::TooFew); ensure!(hashes.len() <= MAX_HASH_UPGRADE_BULK_COUNT as usize, Error::::TooMany); let updated = hashes.iter().map(Self::do_ensure_updated).filter(|b| *b).count() as u32; From 41e38b2c093a17ef1faa6fad7eebc703683c010a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?D=C3=B3nal=20Murray?= Date: Wed, 20 Sep 2023 19:11:42 +0100 Subject: [PATCH 33/69] Fix minor typos and file name in contributing docs (#1651) # Description *Please include a summary of the changes and the related issue. Please also include relevant motivation and context, including:* - What does this PR do? This PR fixes two minor typos in the contributing top level docs, and fixes a broken link to another file. - Why are these changes needed? While reading the guidelines I noticed two small typos and a broken link to the documentation guidelines in the same directory. - How were these changes implemented and what do they affect? Both typos were fixed. The file has been renamed from `DOCUMENTATION_GUIDELINE.md` to `DOCUMENTATION_GUIDELINES.md` to match the link, as the link seems correct vs the filename. There are no other references to this file within this repo. There are no open issues related to this --- docs/CONTRIBUTING.md | 4 ++-- ...DOCUMENTATION_GUIDELINE.md => DOCUMENTATION_GUIDELINES.md} | 0 2 files changed, 2 insertions(+), 2 deletions(-) rename docs/{DOCUMENTATION_GUIDELINE.md => DOCUMENTATION_GUIDELINES.md} (100%) diff --git a/docs/CONTRIBUTING.md b/docs/CONTRIBUTING.md index d134188e25df..245369b70201 100644 --- a/docs/CONTRIBUTING.md +++ b/docs/CONTRIBUTING.md @@ -43,7 +43,7 @@ The set of labels and their description can be found [here](https://paritytech.g 2. Please tag each PR with minimum one `T*` label. The respective `T*` labels should signal the component that was changed, they are also used by downstream users to track changes and to include these changes properly into their own releases. -3. If your’re still working on your PR, please submit as “Draft”. Once a PR is ready for review change +3. If you’re still working on your PR, please submit as “Draft”. Once a PR is ready for review change the status to “Open”, so that the maintainers get to review your PR. Generally PRs should sit for 48 hours in order to garner feedback. It may be merged before if all relevant parties had a look at it. 4. If you’re introducing a major change, that might impact the documentation please add the label @@ -143,4 +143,4 @@ UI tests are used for macros to ensure that the output of a macro doesn’t chan These UI tests are sensible to any changes in the macro generated code or to switching the rust stable version. The tests are only run when the `RUN_UI_TESTS` environment variable is set. So, when the CI is for example complaining about failing UI tests and it is expected that they fail these tests need to be executed locally. -To simplify the updating of the UI test ouput there is the `.maintain/update-rust-stable +To simplify the updating of the UI test output there is the `.maintain/update-rust-stable diff --git a/docs/DOCUMENTATION_GUIDELINE.md b/docs/DOCUMENTATION_GUIDELINES.md similarity index 100% rename from docs/DOCUMENTATION_GUIDELINE.md rename to docs/DOCUMENTATION_GUIDELINES.md From d6b3fc0dbb00a7478558041effae9eeb5b7a1970 Mon Sep 17 00:00:00 2001 From: Sam Johnson Date: Wed, 20 Sep 2023 15:37:52 -0400 Subject: [PATCH 34/69] upgrade to docify v0.2.4 (#1653) Upgrades to docify v0.2.4 which adds support for exporting trait and impl items in addition to regular items. This fixes @liamaharon's https://github.com/sam0x17/docify/issues/9 issue. See the release notes for more information: https://github.com/sam0x17/docify/releases/tag/v0.2.4 --- Cargo.lock | 8 ++++---- substrate/frame/bags-list/Cargo.toml | 2 +- substrate/frame/fast-unstake/Cargo.toml | 2 +- substrate/frame/paged-list/Cargo.toml | 2 +- substrate/frame/scheduler/Cargo.toml | 2 +- substrate/frame/sudo/Cargo.toml | 2 +- substrate/frame/support/Cargo.toml | 2 +- substrate/frame/timestamp/Cargo.toml | 2 +- 8 files changed, 11 insertions(+), 11 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 2ed92d3240bf..9d3535bdf8b9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4455,18 +4455,18 @@ checksum = "fea41bba32d969b513997752735605054bc0dfa92b4c56bf1189f2e174be7a10" [[package]] name = "docify" -version = "0.2.3" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff509d6aa8e7ca86b36eb3d593132e64204597de3ccb763ffd8bfd2264d54cf3" +checksum = "76ee528c501ddd15d5181997e9518e59024844eac44fd1e40cb20ddb2a8562fa" dependencies = [ "docify_macros", ] [[package]] name = "docify_macros" -version = "0.2.3" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b135598b950330937f3a0ddfbe908106ee2ecd5fa95d8ae7952a3c3863efe8da" +checksum = "0ca01728ab2679c464242eca99f94e2ce0514b52ac9ad950e2ed03fca991231c" dependencies = [ "common-path", "derive-syn-parse", diff --git a/substrate/frame/bags-list/Cargo.toml b/substrate/frame/bags-list/Cargo.toml index 21cdb4f89830..f4644890e2ba 100644 --- a/substrate/frame/bags-list/Cargo.toml +++ b/substrate/frame/bags-list/Cargo.toml @@ -27,7 +27,7 @@ frame-election-provider-support = { path = "../election-provider-support", defau # third party log = { version = "0.4.17", default-features = false } -docify = "0.2.3" +docify = "0.2.4" aquamarine = { version = "0.3.2" } # Optional imports for benchmarking diff --git a/substrate/frame/fast-unstake/Cargo.toml b/substrate/frame/fast-unstake/Cargo.toml index 224067d4fa70..85548c6600db 100644 --- a/substrate/frame/fast-unstake/Cargo.toml +++ b/substrate/frame/fast-unstake/Cargo.toml @@ -27,7 +27,7 @@ frame-election-provider-support = { path = "../election-provider-support", defau frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true} -docify = "0.2.3" +docify = "0.2.4" [dev-dependencies] pallet-staking-reward-curve = { path = "../staking/reward-curve" } diff --git a/substrate/frame/paged-list/Cargo.toml b/substrate/frame/paged-list/Cargo.toml index 9fbc3fdb2c62..eee099fff5f0 100644 --- a/substrate/frame/paged-list/Cargo.toml +++ b/substrate/frame/paged-list/Cargo.toml @@ -13,7 +13,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = [ "derive"] } -docify = "0.2.3" +docify = "0.2.4" scale-info = { version = "2.0.0", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true} diff --git a/substrate/frame/scheduler/Cargo.toml b/substrate/frame/scheduler/Cargo.toml index 7fa14e861e8c..e81b4dbeff78 100644 --- a/substrate/frame/scheduler/Cargo.toml +++ b/substrate/frame/scheduler/Cargo.toml @@ -20,7 +20,7 @@ sp-io = { path = "../../primitives/io", default-features = false} sp-runtime = { path = "../../primitives/runtime", default-features = false} sp-std = { path = "../../primitives/std", default-features = false} sp-weights = { path = "../../primitives/weights", default-features = false} -docify = "0.2.3" +docify = "0.2.4" [dev-dependencies] pallet-preimage = { path = "../preimage" } diff --git a/substrate/frame/sudo/Cargo.toml b/substrate/frame/sudo/Cargo.toml index bdd4252414a6..a4934346d5de 100644 --- a/substrate/frame/sudo/Cargo.toml +++ b/substrate/frame/sudo/Cargo.toml @@ -22,7 +22,7 @@ sp-io = { path = "../../primitives/io", default-features = false} sp-runtime = { path = "../../primitives/runtime", default-features = false} sp-std = { path = "../../primitives/std", default-features = false} -docify = "0.2.3" +docify = "0.2.4" [dev-dependencies] sp-core = { path = "../../primitives/core" } diff --git a/substrate/frame/support/Cargo.toml b/substrate/frame/support/Cargo.toml index 62a7b5e41fe7..5cb5d6d12ab7 100644 --- a/substrate/frame/support/Cargo.toml +++ b/substrate/frame/support/Cargo.toml @@ -43,7 +43,7 @@ k256 = { version = "0.13.1", default-features = false, features = ["ecdsa"] } environmental = { version = "1.1.4", default-features = false } sp-genesis-builder = { path = "../../primitives/genesis-builder", default-features=false} serde_json = { version = "1.0.107", default-features = false, features = ["alloc"] } -docify = "0.2.3" +docify = "0.2.4" static_assertions = "1.1.0" aquamarine = { version = "0.3.2" } diff --git a/substrate/frame/timestamp/Cargo.toml b/substrate/frame/timestamp/Cargo.toml index 6759d90aaf41..291f6b1cf590 100644 --- a/substrate/frame/timestamp/Cargo.toml +++ b/substrate/frame/timestamp/Cargo.toml @@ -27,7 +27,7 @@ sp-std = { path = "../../primitives/std", default-features = false} sp-storage = { path = "../../primitives/storage", default-features = false} sp-timestamp = { path = "../../primitives/timestamp", default-features = false} -docify = "0.2.1" +docify = "0.2.4" [dev-dependencies] sp-core = { path = "../../primitives/core" } From 9e403629d590abcd57c13f4010eb0714a4ee6642 Mon Sep 17 00:00:00 2001 From: Muharem Date: Wed, 20 Sep 2023 21:55:56 +0200 Subject: [PATCH 35/69] The Ambassador Program (#1308) The Ambassador Program on Polkadot Collectives Parachain The Polkadot Ambassador Program has existed for a while; more information can be found [here](https://wiki.polkadot.network/docs/ambassadors). In this PR, the program is being brought on chain. ### On Chain Structure The on-chain program consists of nine ranks, divided into four categories ([full list](https://github.com/paritytech/cumulus/blob/c238fb26b75569a11abb57437fd14acd26e05f18/parachains/runtimes/collectives/collectives-polkadot/src/ambassador/mod.rs#L52)): - Ambassadors (1-2 tiers) - Senior Ambassadors (3-4 tiers) - Head Ambassadors (5-7 tiers) - Master Ambassadors (8-9 tiers) Each rank has a corresponding `Origin` (e.g., `HeadAmbassadorsTier5` - [full list](https://github.com/paritytech/cumulus/blob/c238fb26b75569a11abb57437fd14acd26e05f18/parachains/runtimes/collectives/collectives-polkadot/src/ambassador/origins.rs#L35)), which represents the collective voice of members of that rank and above. ### Referendum The `AmbassadorReferenda` instance of [referenda pallet](https://docs.rs/pallet-referenda/latest/pallet_referenda/) consists of [nine tracks](https://github.com/paritytech/cumulus/blob/c238fb26b75569a11abb57437fd14acd26e05f18/parachains/runtimes/collectives/collectives-polkadot/src/ambassador/tracks.rs#L51), each corresponding to an `Origin`. A referendum taken on `senior ambassador tier 4` track invites all members from rank 4 or above to vote and commands `SeniorAmbassadors` `Origin`. Every member gets one vote plus an additional vote for every excess rank. The referendum proposal can be submitted by any member of a senior rank or above. ### Membership Management Initial members will be brought on chain via migration, with subsequent member management handled through the `AmbassadorCollective` instance of [ranked collective pallet](https://docs.rs/pallet-ranked-collective/latest/pallet_ranked_collective/). Both `Root` and `FellowshipAdmin` `Origins`, commanded via public Polkadot referendum, can promote or demote members to and from any rank. Members themselves also have the power to promote or demote via its referendum, with a senior member vote by the rank two above the new / current rank - [full configuration](https://github.com/paritytech/cumulus/blob/9ab6aa47063d7e8b67ddc10d9c136037f99c03a3/parachains/runtimes/collectives/collectives-polkadot/src/ambassador/mod.rs#L67). ### Content Management The program's on-chain content is managed via the collectives content pallet, allowing for setting its charter and making announcements. The voice of head ambassadors have the authority to set the charter, while announcements can be made by any senior rank member or through a referendum among all members. ### Additional Functionality The `AmbassadorCore` instance of [core fellowship pallet](https://docs.rs/pallet-core-fellowship/latest/pallet_core_fellowship/) decorates the ranked collectives pallet with features like salary determination, activity/passivity registration, and the handling of promotion and demotion periods. While the usage of this pallet is optional in the first version, future updates will make it the exclusive method for induction/promotion. Periodic salaries in USDt, payable on Asset Hub, are introduced through the [salary pallet](https://docs.rs/pallet-salary/latest/pallet_salary/). This requires induction into the ambassador core pallet. Please for more information on the pallets' functionality refer to their documentations. ### Next Steps: - Migrate to seed the program members - Mint ambassador NFT badges on Asset Hub when promoting - Treasury pallet instance for the Ambassador Program --------- Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> Co-authored-by: joe petrowski <25483142+joepetrowski@users.noreply.github.com> --- Cargo.lock | 17 + cumulus/parachains/common/src/polkadot.rs | 4 + .../collectives-polkadot/Cargo.toml | 1 + .../src/tests/ambassador.rs | 65 +++ .../collectives-polkadot/src/tests/mod.rs | 1 + .../pallets/collective-content/Cargo.toml | 48 ++ .../collective-content/src/benchmarking.rs | 88 +++ .../pallets/collective-content/src/lib.rs | 206 +++++++ .../pallets/collective-content/src/mock.rs | 107 ++++ .../pallets/collective-content/src/tests.rs | 204 +++++++ .../pallets/collective-content/src/weights.rs | 41 ++ .../asset-hub-polkadot/src/xcm_config.rs | 4 + .../collectives-polkadot/Cargo.toml | 4 + .../src/ambassador/mod.rs | 255 +++++++++ .../src/ambassador/origins.rs | 135 +++++ .../src/ambassador/tracks.rs | 282 +++++++++ .../src/fellowship/mod.rs | 32 +- .../collectives-polkadot/src/lib.rs | 29 + .../collectives-polkadot/src/weights/mod.rs | 13 +- .../src/weights/pallet_collective_content.rs | 94 +++ .../pallet_core_fellowship_ambassador_core.rs | 223 ++++++++ ...pallet_core_fellowship_fellowship_core.rs} | 73 ++- ...ranked_collective_ambassador_collective.rs | 177 ++++++ ...anked_collective_fellowship_collective.rs} | 73 +-- .../pallet_referenda_ambassador_referenda.rs | 536 ++++++++++++++++++ ... pallet_referenda_fellowship_referenda.rs} | 195 ++++--- .../pallet_salary_ambassador_salary.rs | 190 +++++++ ....rs => pallet_salary_fellowship_salary.rs} | 61 +- .../collectives-polkadot/src/xcm_config.rs | 20 +- 29 files changed, 2945 insertions(+), 233 deletions(-) create mode 100644 cumulus/parachains/integration-tests/emulated/collectives/collectives-polkadot/src/tests/ambassador.rs create mode 100644 cumulus/parachains/pallets/collective-content/Cargo.toml create mode 100644 cumulus/parachains/pallets/collective-content/src/benchmarking.rs create mode 100644 cumulus/parachains/pallets/collective-content/src/lib.rs create mode 100644 cumulus/parachains/pallets/collective-content/src/mock.rs create mode 100644 cumulus/parachains/pallets/collective-content/src/tests.rs create mode 100644 cumulus/parachains/pallets/collective-content/src/weights.rs create mode 100644 cumulus/parachains/runtimes/collectives/collectives-polkadot/src/ambassador/mod.rs create mode 100644 cumulus/parachains/runtimes/collectives/collectives-polkadot/src/ambassador/origins.rs create mode 100644 cumulus/parachains/runtimes/collectives/collectives-polkadot/src/ambassador/tracks.rs create mode 100644 cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_collective_content.rs create mode 100644 cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_core_fellowship_ambassador_core.rs rename cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/{pallet_core_fellowship.rs => pallet_core_fellowship_fellowship_core.rs} (87%) create mode 100644 cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_ranked_collective_ambassador_collective.rs rename cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/{pallet_ranked_collective.rs => pallet_ranked_collective_fellowship_collective.rs} (82%) create mode 100644 cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_referenda_ambassador_referenda.rs rename cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/{pallet_referenda.rs => pallet_referenda_fellowship_referenda.rs} (87%) create mode 100644 cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_salary_ambassador_salary.rs rename cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/{pallet_salary.rs => pallet_salary_fellowship_salary.rs} (86%) diff --git a/Cargo.lock b/Cargo.lock index 9d3535bdf8b9..ddb19c24e1f6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2564,6 +2564,7 @@ dependencies = [ "frame-support", "integration-tests-common", "pallet-assets", + "pallet-balances", "pallet-core-fellowship", "pallet-salary", "pallet-xcm", @@ -2606,6 +2607,7 @@ dependencies = [ "pallet-balances", "pallet-collator-selection", "pallet-collective", + "pallet-collective-content", "pallet-core-fellowship", "pallet-multisig", "pallet-preimage", @@ -9282,6 +9284,21 @@ dependencies = [ "sp-std", ] +[[package]] +name = "pallet-collective-content" +version = "0.1.0" +dependencies = [ + "frame-benchmarking", + "frame-support", + "frame-system", + "parity-scale-codec", + "scale-info", + "sp-core", + "sp-io", + "sp-runtime", + "sp-std", +] + [[package]] name = "pallet-contracts" version = "4.0.0-dev" diff --git a/cumulus/parachains/common/src/polkadot.rs b/cumulus/parachains/common/src/polkadot.rs index 52cee939224c..4f459b9bb5a3 100644 --- a/cumulus/parachains/common/src/polkadot.rs +++ b/cumulus/parachains/common/src/polkadot.rs @@ -27,6 +27,10 @@ pub mod account { /// It is used as a temporarily place to deposit a slashed imbalance /// before the teleport to the Treasury. pub const REFERENDA_PALLET_ID: PalletId = PalletId(*b"py/refer"); + /// Ambassador Referenda pallet ID. + /// It is used as a temporarily place to deposit a slashed imbalance + /// before the teleport to the Treasury. + pub const AMBASSADOR_REFERENDA_PALLET_ID: PalletId = PalletId(*b"py/amref"); } /// Consensus-related. diff --git a/cumulus/parachains/integration-tests/emulated/collectives/collectives-polkadot/Cargo.toml b/cumulus/parachains/integration-tests/emulated/collectives/collectives-polkadot/Cargo.toml index 5a28b1274151..99caccc81590 100644 --- a/cumulus/parachains/integration-tests/emulated/collectives/collectives-polkadot/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/collectives/collectives-polkadot/Cargo.toml @@ -15,6 +15,7 @@ sp-runtime = { path = "../../../../../../substrate/primitives/runtime", default- frame-support = { path = "../../../../../../substrate/frame/support", default-features = false} sp-core = { path = "../../../../../../substrate/primitives/core", default-features = false} pallet-assets = { path = "../../../../../../substrate/frame/assets", default-features = false} +pallet-balances = { path = "../../../../../../substrate/frame/balances", default-features = false} pallet-core-fellowship = { path = "../../../../../../substrate/frame/core-fellowship", default-features = false} pallet-salary = { path = "../../../../../../substrate/frame/salary", default-features = false} diff --git a/cumulus/parachains/integration-tests/emulated/collectives/collectives-polkadot/src/tests/ambassador.rs b/cumulus/parachains/integration-tests/emulated/collectives/collectives-polkadot/src/tests/ambassador.rs new file mode 100644 index 000000000000..d9fd78fbcbff --- /dev/null +++ b/cumulus/parachains/integration-tests/emulated/collectives/collectives-polkadot/src/tests/ambassador.rs @@ -0,0 +1,65 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Integration tests concerning the Ambassador Program. + +use crate::*; +use collectives_polkadot_runtime::ambassador::AmbassadorSalaryPaymaster; +use frame_support::traits::{fungible::Mutate, tokens::Pay}; +use sp_core::crypto::Ss58Codec; +use xcm_emulator::TestExt; + +#[test] +fn pay_salary() { + let pay_from: AccountId = + ::from_string("5DS1Gaf6R9eFAV8QyeZP9P89kTkJMurxv3y3J3TTMu8p8VCX") + .unwrap(); + let pay_to = Polkadot::account_id_of(ALICE); + let pay_amount = 90000000000; + + AssetHubPolkadot::execute_with(|| { + type AssetHubBalances = ::Balances; + + assert_ok!(>::mint_into(&pay_from, pay_amount * 2)); + }); + + Collectives::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + + assert_ok!(AmbassadorSalaryPaymaster::pay(&pay_to, (), pay_amount)); + assert_expected_events!( + Collectives, + vec![ + RuntimeEvent::XcmpQueue(cumulus_pallet_xcmp_queue::Event::XcmpMessageSent { .. }) => {}, + ] + ); + }); + + AssetHubPolkadot::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + + assert_expected_events!( + AssetHubPolkadot, + vec![ + RuntimeEvent::Balances(pallet_balances::Event::Transfer { from, to, amount }) => { + from: from == &pay_from, + to: to == &pay_to, + amount: amount == &pay_amount, + }, + RuntimeEvent::XcmpQueue(cumulus_pallet_xcmp_queue::Event::Success { .. }) => {}, + ] + ); + }); +} diff --git a/cumulus/parachains/integration-tests/emulated/collectives/collectives-polkadot/src/tests/mod.rs b/cumulus/parachains/integration-tests/emulated/collectives/collectives-polkadot/src/tests/mod.rs index fb3e235a25cd..42d2432d2237 100644 --- a/cumulus/parachains/integration-tests/emulated/collectives/collectives-polkadot/src/tests/mod.rs +++ b/cumulus/parachains/integration-tests/emulated/collectives/collectives-polkadot/src/tests/mod.rs @@ -13,4 +13,5 @@ // See the License for the specific language governing permissions and // limitations under the License. +mod ambassador; mod fellowship; diff --git a/cumulus/parachains/pallets/collective-content/Cargo.toml b/cumulus/parachains/pallets/collective-content/Cargo.toml new file mode 100644 index 000000000000..1c831ac7268a --- /dev/null +++ b/cumulus/parachains/pallets/collective-content/Cargo.toml @@ -0,0 +1,48 @@ +[package] +name = "pallet-collective-content" +version = "0.1.0" +authors = ["Parity Technologies "] +edition = "2021" +description = "Managed content" + +[dependencies] +codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive", "max-encoded-len"] } +scale-info = { version = "2.3.0", default-features = false, features = ["derive"] } + +frame-benchmarking = { path = "../../../../substrate/frame/benchmarking", optional = true, default-features = false } +frame-support = { path = "../../../../substrate/frame/support", default-features = false } +frame-system = { path = "../../../../substrate/frame/system", default-features = false } + +sp-core = { path = "../../../../substrate/primitives/core", default-features = false } +sp-runtime = { path = "../../../../substrate/primitives/runtime", default-features = false } +sp-std = { path = "../../../../substrate/primitives/std", default-features = false } + +[dev-dependencies] +sp-io = { path = "../../../../substrate/primitives/io", default-features = false } + +[features] +default = [ "std" ] +runtime-benchmarks = [ + "frame-benchmarking/runtime-benchmarks", + "frame-support/runtime-benchmarks", + "frame-system/runtime-benchmarks", + "sp-runtime/runtime-benchmarks", +] + +try-runtime = [ + "frame-support/try-runtime", + "frame-system/try-runtime", + "sp-runtime/try-runtime", +] + +std = [ + "codec/std", + "frame-benchmarking/std", + "frame-support/std", + "frame-system/std", + "scale-info/std", + "sp-core/std", + "sp-io/std", + "sp-runtime/std", + "sp-std/std", +] diff --git a/cumulus/parachains/pallets/collective-content/src/benchmarking.rs b/cumulus/parachains/pallets/collective-content/src/benchmarking.rs new file mode 100644 index 000000000000..1f145f725b13 --- /dev/null +++ b/cumulus/parachains/pallets/collective-content/src/benchmarking.rs @@ -0,0 +1,88 @@ +// Copyright (C) 2023 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! The pallet benchmarks. + +use super::{Pallet as CollectiveContent, *}; +use frame_benchmarking::{impl_benchmark_test_suite, v2::*}; +use frame_support::traits::EnsureOrigin; + +fn assert_last_event, I: 'static>(generic_event: >::RuntimeEvent) { + frame_system::Pallet::::assert_last_event(generic_event.into()); +} + +/// returns CID hash of 68 bytes of given `i`. +fn create_cid(i: u8) -> OpaqueCid { + let cid: OpaqueCid = [i; 68].to_vec().try_into().unwrap(); + cid +} + +#[instance_benchmarks] +mod benchmarks { + use super::*; + + #[benchmark] + fn set_charter() -> Result<(), BenchmarkError> { + let cid: OpaqueCid = create_cid(1); + let origin = + T::CharterOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; + + #[extrinsic_call] + _(origin as T::RuntimeOrigin, cid.clone()); + + assert_eq!(Charter::::get(), Some(cid.clone())); + assert_last_event::(Event::NewCharterSet { cid }.into()); + Ok(()) + } + + #[benchmark] + fn announce() -> Result<(), BenchmarkError> { + let expire_at = DispatchTime::<_>::At(10u32.into()); + let now = frame_system::Pallet::::block_number(); + let cid: OpaqueCid = create_cid(1); + let origin = T::AnnouncementOrigin::try_successful_origin() + .map_err(|_| BenchmarkError::Weightless)?; + + #[extrinsic_call] + _(origin as T::RuntimeOrigin, cid.clone(), Some(expire_at.clone())); + + assert_eq!(>::count(), 1); + assert_last_event::( + Event::AnnouncementAnnounced { cid, expire_at: expire_at.evaluate(now) }.into(), + ); + + Ok(()) + } + + #[benchmark] + fn remove_announcement() -> Result<(), BenchmarkError> { + let cid: OpaqueCid = create_cid(1); + let origin = T::AnnouncementOrigin::try_successful_origin() + .map_err(|_| BenchmarkError::Weightless)?; + CollectiveContent::::announce(origin.clone(), cid.clone(), None) + .expect("could not publish an announcement"); + assert_eq!(>::count(), 1); + + #[extrinsic_call] + _(origin as T::RuntimeOrigin, cid.clone()); + + assert_eq!(>::count(), 0); + assert_last_event::(Event::AnnouncementRemoved { cid }.into()); + + Ok(()) + } + + impl_benchmark_test_suite!(CollectiveContent, super::mock::new_bench_ext(), super::mock::Test); +} diff --git a/cumulus/parachains/pallets/collective-content/src/lib.rs b/cumulus/parachains/pallets/collective-content/src/lib.rs new file mode 100644 index 000000000000..7a685858accb --- /dev/null +++ b/cumulus/parachains/pallets/collective-content/src/lib.rs @@ -0,0 +1,206 @@ +// Copyright (C) 2023 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Managed Collective Content Pallet +//! +//! The pallet provides the functionality to store different types of content. This would typically +//! be used by an on-chain collective, such as the Polkadot Alliance or Ambassador Program. +//! +//! The pallet stores content as an [OpaqueCid], which should correspond to some off-chain hosting +//! service, such as IPFS, and contain any type of data. Each type of content has its own origin +//! from which it can be managed. The origins are configurable in the runtime. Storing content does +//! not require a deposit, as it is expected to be managed by a trusted collective. +//! +//! Content types: +//! +//! - Collective [charter](pallet::Charter): A single document (`OpaqueCid`) managed by +//! [CharterOrigin](pallet::Config::CharterOrigin). +//! - Collective [announcements](pallet::Announcements): A list of announcements managed by +//! [AnnouncementOrigin](pallet::Config::AnnouncementOrigin). + +#![cfg_attr(not(feature = "std"), no_std)] + +#[cfg(test)] +mod mock; +#[cfg(test)] +mod tests; + +#[cfg(feature = "runtime-benchmarks")] +mod benchmarking; +pub mod weights; + +pub use pallet::*; +pub use weights::WeightInfo; + +use frame_support::{traits::schedule::DispatchTime, BoundedVec}; +use sp_core::ConstU32; +use sp_std::prelude::*; + +/// IPFS compatible CID. +// Worst case 2 bytes base and codec, 2 bytes hash type and size, 64 bytes hash digest. +pub type OpaqueCid = BoundedVec>; + +#[frame_support::pallet] +pub mod pallet { + use super::*; + use frame_support::{ensure, pallet_prelude::*}; + use frame_system::pallet_prelude::*; + use sp_runtime::{traits::BadOrigin, Saturating}; + + /// The current storage version. + const STORAGE_VERSION: StorageVersion = StorageVersion::new(0); + + #[pallet::pallet] + #[pallet::storage_version(STORAGE_VERSION)] + pub struct Pallet(PhantomData<(T, I)>); + + /// The module configuration trait. + #[pallet::config] + pub trait Config: frame_system::Config { + /// The overarching event type. + type RuntimeEvent: From> + + IsType<::RuntimeEvent>; + + /// Default lifetime for an announcement before it expires. + type AnnouncementLifetime: Get>; + + /// The origin to control the collective announcements. + type AnnouncementOrigin: EnsureOrigin; + + /// Maximum number of announcements in the storage. + #[pallet::constant] + type MaxAnnouncements: Get; + + /// The origin to control the collective charter. + type CharterOrigin: EnsureOrigin; + + /// Weight information needed for the pallet. + type WeightInfo: WeightInfo; + } + + #[pallet::error] + pub enum Error { + /// The announcement is not found. + MissingAnnouncement, + /// Number of announcements exceeds `MaxAnnouncementsCount`. + TooManyAnnouncements, + /// Cannot expire in the past. + InvalidExpiration, + } + + #[pallet::event] + #[pallet::generate_deposit(pub(super) fn deposit_event)] + pub enum Event, I: 'static = ()> { + /// A new charter has been set. + NewCharterSet { cid: OpaqueCid }, + /// A new announcement has been made. + AnnouncementAnnounced { cid: OpaqueCid, expire_at: BlockNumberFor }, + /// An on-chain announcement has been removed. + AnnouncementRemoved { cid: OpaqueCid }, + } + + /// The collective charter. + #[pallet::storage] + pub type Charter, I: 'static = ()> = StorageValue<_, OpaqueCid, OptionQuery>; + + /// The collective announcements. + #[pallet::storage] + pub type Announcements, I: 'static = ()> = + CountedStorageMap<_, Blake2_128Concat, OpaqueCid, BlockNumberFor, OptionQuery>; + + #[pallet::call] + impl, I: 'static> Pallet { + /// Set the collective charter. + /// + /// Parameters: + /// - `origin`: Must be the [Config::CharterOrigin]. + /// - `cid`: [CID](super::OpaqueCid) of the IPFS document of the collective charter. + #[pallet::call_index(0)] + #[pallet::weight(T::WeightInfo::set_charter())] + pub fn set_charter(origin: OriginFor, cid: OpaqueCid) -> DispatchResult { + T::CharterOrigin::ensure_origin(origin)?; + + Charter::::put(&cid); + + Self::deposit_event(Event::::NewCharterSet { cid }); + Ok(()) + } + + /// Publish an announcement. + /// + /// Parameters: + /// - `origin`: Must be the [Config::AnnouncementOrigin]. + /// - `cid`: [CID](super::OpaqueCid) of the IPFS document to announce. + /// - `maybe_expire`: Expiration block of the announcement. If `None` + /// [`Config::AnnouncementLifetime`] + /// used as a default. + #[pallet::call_index(1)] + #[pallet::weight(T::WeightInfo::announce())] + pub fn announce( + origin: OriginFor, + cid: OpaqueCid, + maybe_expire: Option>>, + ) -> DispatchResult { + T::AnnouncementOrigin::ensure_origin(origin)?; + + let now = frame_system::Pallet::::block_number(); + let expire_at = maybe_expire + .map_or(now.saturating_add(T::AnnouncementLifetime::get()), |e| e.evaluate(now)); + ensure!(expire_at > now, Error::::InvalidExpiration); + ensure!( + T::MaxAnnouncements::get() > >::count(), + Error::::TooManyAnnouncements + ); + + >::insert(cid.clone(), expire_at); + + Self::deposit_event(Event::::AnnouncementAnnounced { cid, expire_at }); + Ok(()) + } + + /// Remove an announcement. + /// + /// Transaction fee refunded for expired announcements. + /// + /// Parameters: + /// - `origin`: Must be the [Config::AnnouncementOrigin] or signed for expired + /// announcements. + /// - `cid`: [CID](super::OpaqueCid) of the IPFS document to remove. + #[pallet::call_index(2)] + #[pallet::weight(T::WeightInfo::remove_announcement())] + pub fn remove_announcement( + origin: OriginFor, + cid: OpaqueCid, + ) -> DispatchResultWithPostInfo { + let maybe_who = match T::AnnouncementOrigin::try_origin(origin) { + Ok(_) => None, + Err(origin) => Some(ensure_signed(origin)?), + }; + let expire_at = >::get(cid.clone()) + .ok_or(Error::::MissingAnnouncement)?; + let now = frame_system::Pallet::::block_number(); + ensure!(maybe_who.is_none() || now >= expire_at, BadOrigin); + + >::remove(cid.clone()); + + Self::deposit_event(Event::::AnnouncementRemoved { cid }); + + if now >= expire_at { + return Ok(Pays::No.into()) + } + Ok(Pays::Yes.into()) + } + } +} diff --git a/cumulus/parachains/pallets/collective-content/src/mock.rs b/cumulus/parachains/pallets/collective-content/src/mock.rs new file mode 100644 index 000000000000..2ae5943f332a --- /dev/null +++ b/cumulus/parachains/pallets/collective-content/src/mock.rs @@ -0,0 +1,107 @@ +// Copyright (C) 2023 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Test utilities. + +pub use crate as pallet_collective_content; +use crate::WeightInfo; +use frame_support::{ + ord_parameter_types, parameter_types, + traits::{ConstU32, ConstU64}, + weights::Weight, +}; +use frame_system::EnsureSignedBy; +use sp_runtime::{traits::IdentityLookup, BuildStorage}; + +frame_support::construct_runtime!( + pub enum Test { + System: frame_system, + CollectiveContent: pallet_collective_content, + } +); + +type AccountId = u64; +type Block = frame_system::mocking::MockBlock; + +ord_parameter_types! { + pub const CharterManager: u64 = 1; + pub const AnnouncementManager: u64 = 2; + pub const SomeAccount: u64 = 3; +} + +parameter_types! { + pub const AnnouncementLifetime: u64 = 100; + pub const MaxAnnouncements: u32 = 5; +} + +impl pallet_collective_content::Config for Test { + type RuntimeEvent = RuntimeEvent; + type AnnouncementLifetime = AnnouncementLifetime; + type AnnouncementOrigin = EnsureSignedBy; + type MaxAnnouncements = MaxAnnouncements; + type CharterOrigin = EnsureSignedBy; + type WeightInfo = CCWeightInfo; +} + +impl frame_system::Config for Test { + type BaseCallFilter = (); + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); + type RuntimeOrigin = RuntimeOrigin; + type RuntimeCall = RuntimeCall; + type Nonce = u64; + type Block = Block; + type Hash = sp_core::H256; + type Hashing = sp_runtime::traits::BlakeTwo256; + type AccountId = AccountId; + type Lookup = IdentityLookup; + type RuntimeEvent = RuntimeEvent; + type BlockHashCount = ConstU64<250>; + type Version = (); + type PalletInfo = PalletInfo; + type AccountData = (); + type OnNewAccount = (); + type OnKilledAccount = (); + type SystemWeightInfo = (); + type SS58Prefix = (); + type OnSetCode = (); + type MaxConsumers = ConstU32<16>; +} +pub struct CCWeightInfo; +impl WeightInfo for CCWeightInfo { + fn set_charter() -> Weight { + Weight::zero() + } + fn announce() -> Weight { + Weight::zero() + } + fn remove_announcement() -> Weight { + Weight::zero() + } +} + +// Build test environment. +pub fn new_test_ext() -> sp_io::TestExternalities { + let t = RuntimeGenesisConfig::default().build_storage().unwrap().into(); + let mut ext = sp_io::TestExternalities::new(t); + ext.execute_with(|| System::set_block_number(1)); + ext +} + +#[cfg(feature = "runtime-benchmarks")] +pub fn new_bench_ext() -> sp_io::TestExternalities { + RuntimeGenesisConfig::default().build_storage().unwrap().into() +} diff --git a/cumulus/parachains/pallets/collective-content/src/tests.rs b/cumulus/parachains/pallets/collective-content/src/tests.rs new file mode 100644 index 000000000000..4910b30b89af --- /dev/null +++ b/cumulus/parachains/pallets/collective-content/src/tests.rs @@ -0,0 +1,204 @@ +// Copyright (C) 2023 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Tests. + +use super::{mock::*, *}; +use frame_support::{assert_noop, assert_ok, error::BadOrigin, pallet_prelude::Pays}; + +/// returns CID hash of 68 bytes of given `i`. +fn create_cid(i: u8) -> OpaqueCid { + let cid: OpaqueCid = [i; 68].to_vec().try_into().unwrap(); + cid +} + +#[test] +fn set_charter_works() { + new_test_ext().execute_with(|| { + // wrong origin. + let origin = RuntimeOrigin::signed(SomeAccount::get()); + let cid = create_cid(1); + assert_noop!(CollectiveContent::set_charter(origin, cid), BadOrigin); + + // success. + let origin = RuntimeOrigin::signed(CharterManager::get()); + let cid = create_cid(2); + + assert_ok!(CollectiveContent::set_charter(origin, cid.clone())); + assert_eq!(Charter::::get(), Some(cid.clone())); + System::assert_last_event(RuntimeEvent::CollectiveContent(Event::NewCharterSet { cid })); + + // reset. success. + let origin = RuntimeOrigin::signed(CharterManager::get()); + let cid = create_cid(3); + + assert_ok!(CollectiveContent::set_charter(origin, cid.clone())); + assert_eq!(Charter::::get(), Some(cid.clone())); + System::assert_last_event(RuntimeEvent::CollectiveContent(Event::NewCharterSet { cid })); + }); +} + +#[test] +fn announce_works() { + new_test_ext().execute_with(|| { + let now = frame_system::Pallet::::block_number(); + // wrong origin. + let origin = RuntimeOrigin::signed(SomeAccount::get()); + let cid = create_cid(1); + + assert_noop!(CollectiveContent::announce(origin, cid, None), BadOrigin); + + // success. + let origin = RuntimeOrigin::signed(AnnouncementManager::get()); + let cid = create_cid(2); + let maybe_expire_at = None; + + assert_ok!(CollectiveContent::announce(origin, cid.clone(), maybe_expire_at)); + System::assert_last_event(RuntimeEvent::CollectiveContent(Event::AnnouncementAnnounced { + cid, + expire_at: now.saturating_add(AnnouncementLifetime::get()), + })); + + // one more. success. + let origin = RuntimeOrigin::signed(AnnouncementManager::get()); + let cid = create_cid(3); + let maybe_expire_at = None; + + assert_ok!(CollectiveContent::announce(origin, cid.clone(), maybe_expire_at)); + System::assert_last_event(RuntimeEvent::CollectiveContent(Event::AnnouncementAnnounced { + cid, + expire_at: now.saturating_add(AnnouncementLifetime::get()), + })); + + // one more with expire. success. + let origin = RuntimeOrigin::signed(AnnouncementManager::get()); + let cid = create_cid(4); + let maybe_expire_at = DispatchTime::<_>::After(10); + + assert_ok!(CollectiveContent::announce(origin, cid.clone(), Some(maybe_expire_at))); + System::assert_last_event(RuntimeEvent::CollectiveContent(Event::AnnouncementAnnounced { + cid, + expire_at: maybe_expire_at.evaluate(now), + })); + + // one more with later expire. success. + let origin = RuntimeOrigin::signed(AnnouncementManager::get()); + let cid = create_cid(5); + let maybe_expire_at = DispatchTime::<_>::At(now + 20); + + assert_ok!(CollectiveContent::announce(origin, cid.clone(), Some(maybe_expire_at))); + System::assert_last_event(RuntimeEvent::CollectiveContent(Event::AnnouncementAnnounced { + cid, + expire_at: maybe_expire_at.evaluate(now), + })); + + // one more with earlier expire. success. + let origin = RuntimeOrigin::signed(AnnouncementManager::get()); + let cid = create_cid(6); + let maybe_expire_at = DispatchTime::<_>::At(now + 5); + + assert_ok!(CollectiveContent::announce(origin, cid.clone(), Some(maybe_expire_at))); + System::assert_last_event(RuntimeEvent::CollectiveContent(Event::AnnouncementAnnounced { + cid, + expire_at: maybe_expire_at.evaluate(now), + })); + + // one more with earlier expire. success. + let origin = RuntimeOrigin::signed(AnnouncementManager::get()); + let cid = create_cid(7); + let maybe_expire_at = DispatchTime::<_>::At(now + 5); + + assert_eq!(>::count(), MaxAnnouncements::get()); + assert_noop!( + CollectiveContent::announce(origin, cid.clone(), Some(maybe_expire_at)), + Error::::TooManyAnnouncements + ); + }); +} + +#[test] +fn remove_announcement_works() { + new_test_ext().execute_with(|| { + // wrong origin. + let origin = RuntimeOrigin::signed(CharterManager::get()); + let cid = create_cid(8); + + assert_noop!( + CollectiveContent::remove_announcement(origin, cid), + Error::::MissingAnnouncement + ); + + // missing announcement. + let origin = RuntimeOrigin::signed(AnnouncementManager::get()); + let cid = create_cid(9); + + assert_noop!( + CollectiveContent::remove_announcement(origin, cid), + Error::::MissingAnnouncement + ); + + // wrong origin. announcement not yet expired. + let origin = RuntimeOrigin::signed(AnnouncementManager::get()); + let cid = create_cid(10); + assert_ok!(CollectiveContent::announce(origin.clone(), cid.clone(), None)); + assert!(>::contains_key(cid.clone())); + + let origin = RuntimeOrigin::signed(SomeAccount::get()); + assert_noop!(CollectiveContent::remove_announcement(origin, cid.clone()), BadOrigin); + let origin = RuntimeOrigin::signed(AnnouncementManager::get()); + assert_ok!(CollectiveContent::remove_announcement(origin, cid)); + + // success. + + // remove first announcement and assert. + let origin = RuntimeOrigin::signed(AnnouncementManager::get()); + let cid = create_cid(11); + assert_ok!(CollectiveContent::announce(origin.clone(), cid.clone(), None)); + assert!(>::contains_key(cid.clone())); + + let info = CollectiveContent::remove_announcement(origin.clone(), cid.clone()).unwrap(); + assert_eq!(info.pays_fee, Pays::Yes); + System::assert_last_event(RuntimeEvent::CollectiveContent(Event::AnnouncementRemoved { + cid: cid.clone(), + })); + assert_noop!( + CollectiveContent::remove_announcement(origin, cid.clone()), + Error::::MissingAnnouncement + ); + assert!(!>::contains_key(cid)); + + // remove expired announcement and assert. + let origin = RuntimeOrigin::signed(AnnouncementManager::get()); + let cid = create_cid(12); + assert_ok!(CollectiveContent::announce( + origin.clone(), + cid.clone(), + Some(DispatchTime::<_>::At(10)) + )); + assert!(>::contains_key(cid.clone())); + + System::set_block_number(11); + let origin = RuntimeOrigin::signed(SomeAccount::get()); + let info = CollectiveContent::remove_announcement(origin.clone(), cid.clone()).unwrap(); + assert_eq!(info.pays_fee, Pays::No); + System::assert_last_event(RuntimeEvent::CollectiveContent(Event::AnnouncementRemoved { + cid: cid.clone(), + })); + assert_noop!( + CollectiveContent::remove_announcement(origin, cid), + Error::::MissingAnnouncement + ); + }); +} diff --git a/cumulus/parachains/pallets/collective-content/src/weights.rs b/cumulus/parachains/pallets/collective-content/src/weights.rs new file mode 100644 index 000000000000..7ebbaabb5acc --- /dev/null +++ b/cumulus/parachains/pallets/collective-content/src/weights.rs @@ -0,0 +1,41 @@ +// Copyright (C) 2023 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! The pallet weight info trait and its unit implementation. + +use frame_support::weights::Weight; + +/// Weights information needed for the pallet. +pub trait WeightInfo { + /// Returns the weight of the set_charter extrinsic. + fn set_charter() -> Weight; + /// Returns the weight of the announce extrinsic. + fn announce() -> Weight; + /// Returns the weight of the remove_announcement extrinsic. + fn remove_announcement() -> Weight; +} + +/// Unit implementation of the [WeightInfo]. +impl WeightInfo for () { + fn set_charter() -> Weight { + Weight::zero() + } + fn announce() -> Weight { + Weight::zero() + } + fn remove_announcement() -> Weight { + Weight::zero() + } +} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/xcm_config.rs b/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/xcm_config.rs index d59507e4bd07..65cf62a610f4 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/xcm_config.rs @@ -186,6 +186,9 @@ match_types! { pub type FellowshipSalaryPallet: impl Contains = { MultiLocation { parents: 1, interior: X2(Parachain(1001), PalletInstance(64)) } }; + pub type AmbassadorSalaryPallet: impl Contains = { + MultiLocation { parents: 1, interior: X2(Parachain(1001), PalletInstance(74)) } + }; } /// A call filter for the XCM Transact instruction. This is a temporary measure until we properly @@ -372,6 +375,7 @@ pub type Barrier = TrailingSetTopicAsId< ParentOrParentsPlurality, FellowsPlurality, FellowshipSalaryPallet, + AmbassadorSalaryPallet, )>, // Subscriptions for version tracking are OK. AllowSubscriptionsFrom, diff --git a/cumulus/parachains/runtimes/collectives/collectives-polkadot/Cargo.toml b/cumulus/parachains/runtimes/collectives/collectives-polkadot/Cargo.toml index 167f460610d6..6a5806d3b901 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-polkadot/Cargo.toml +++ b/cumulus/parachains/runtimes/collectives/collectives-polkadot/Cargo.toml @@ -72,6 +72,7 @@ cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-f cumulus-primitives-core = { path = "../../../../primitives/core", default-features = false } cumulus-primitives-utility = { path = "../../../../primitives/utility", default-features = false } pallet-collator-selection = { path = "../../../../pallets/collator-selection", default-features = false } +pallet-collective-content = { path = "../../../pallets/collective-content", default-features = false } parachain-info = { path = "../../../pallets/parachain-info", default-features = false } parachains-common = { path = "../../../common", default-features = false } @@ -95,6 +96,7 @@ runtime-benchmarks = [ "pallet-alliance/runtime-benchmarks", "pallet-balances/runtime-benchmarks", "pallet-collator-selection/runtime-benchmarks", + "pallet-collective-content/runtime-benchmarks", "pallet-collective/runtime-benchmarks", "pallet-core-fellowship/runtime-benchmarks", "pallet-multisig/runtime-benchmarks", @@ -129,6 +131,7 @@ try-runtime = [ "pallet-authorship/try-runtime", "pallet-balances/try-runtime", "pallet-collator-selection/try-runtime", + "pallet-collective-content/try-runtime", "pallet-collective/try-runtime", "pallet-core-fellowship/try-runtime", "pallet-multisig/try-runtime", @@ -170,6 +173,7 @@ std = [ "pallet-authorship/std", "pallet-balances/std", "pallet-collator-selection/std", + "pallet-collective-content/std", "pallet-collective/std", "pallet-core-fellowship/std", "pallet-multisig/std", diff --git a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/ambassador/mod.rs b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/ambassador/mod.rs new file mode 100644 index 000000000000..b055ffc8abf1 --- /dev/null +++ b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/ambassador/mod.rs @@ -0,0 +1,255 @@ +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! The Ambassador Program. +//! +//! The module defines the following on-chain functionality of the Ambassador Program: +//! +//! - Managed set of program members, where every member has a [rank](ranks) +//! (via [AmbassadorCollective](pallet_ranked_collective)). +//! - Referendum functionality for the program members to propose, vote on, and execute +//! proposals on behalf of the members of a certain [rank](Origin) +//! (via [AmbassadorReferenda](pallet_referenda)). +//! - Managed content (charter, announcements) (via [pallet_collective_content]). +//! - Promotion and demotion periods, register of members' activity, and rank based salaries +//! (via [AmbassadorCore](pallet_core_fellowship)). +//! - Members' salaries (via [AmbassadorSalary](pallet_salary), requiring a member to be +//! imported or inducted into [AmbassadorCore](pallet_core_fellowship)). + +pub mod origins; +mod tracks; + +use super::*; +use crate::xcm_config::{DotAssetHub, FellowshipAdminBodyId}; +use frame_support::traits::{EitherOf, MapSuccess, TryMapSuccess}; +pub use origins::pallet_origins as pallet_ambassador_origins; +use origins::pallet_origins::{ + EnsureAmbassadorsVoice, EnsureAmbassadorsVoiceFrom, EnsureHeadAmbassadorsVoice, Origin, +}; +use parachains_common::polkadot::account; +use sp_core::ConstU128; +use sp_runtime::traits::{CheckedReduceBy, ConstU16, ConvertToValue, Replace}; +use xcm::prelude::*; +use xcm_builder::{AliasesIntoAccountId32, PayOverXcm}; + +/// The Ambassador Program's member ranks. +pub mod ranks { + use pallet_ranked_collective::Rank; + + #[allow(dead_code)] + pub const CANDIDATE: Rank = 0; + pub const AMBASSADOR_TIER_1: Rank = 1; + pub const AMBASSADOR_TIER_2: Rank = 2; + pub const SENIOR_AMBASSADOR_TIER_3: Rank = 3; + pub const SENIOR_AMBASSADOR_TIER_4: Rank = 4; + pub const HEAD_AMBASSADOR_TIER_5: Rank = 5; + pub const HEAD_AMBASSADOR_TIER_6: Rank = 6; + pub const HEAD_AMBASSADOR_TIER_7: Rank = 7; + pub const MASTER_AMBASSADOR_TIER_8: Rank = 8; + pub const MASTER_AMBASSADOR_TIER_9: Rank = 9; +} + +impl pallet_ambassador_origins::Config for Runtime {} + +pub type AmbassadorCollectiveInstance = pallet_ranked_collective::Instance2; + +/// Demotion is by any of: +/// - Root can promote arbitrarily. +/// - the FellowshipAdmin origin (i.e. token holder referendum); +/// - a senior members vote by the rank two above the current rank. +pub type DemoteOrigin = EitherOf< + frame_system::EnsureRootWithSuccess>, + EitherOf< + MapSuccess< + EnsureXcm>, + Replace>, + >, + TryMapSuccess< + EnsureAmbassadorsVoiceFrom>, + CheckedReduceBy>, + >, + >, +>; + +/// Promotion and approval (rank-retention) is by any of: +/// - Root can promote arbitrarily. +/// - the FellowshipAdmin origin (i.e. token holder referendum); +/// - a senior members vote by the rank two above the new/current rank. +/// - a member of rank `5` or above can add a candidate (rank `0`). +pub type PromoteOrigin = EitherOf< + DemoteOrigin, + TryMapSuccess< + pallet_ranked_collective::EnsureMember< + Runtime, + AmbassadorCollectiveInstance, + { ranks::HEAD_AMBASSADOR_TIER_5 }, + >, + Replace>, + >, +>; + +impl pallet_ranked_collective::Config for Runtime { + type WeightInfo = weights::pallet_ranked_collective_ambassador_collective::WeightInfo; + type RuntimeEvent = RuntimeEvent; + type PromoteOrigin = PromoteOrigin; + type DemoteOrigin = DemoteOrigin; + type Polls = AmbassadorReferenda; + type MinRankOfClass = sp_runtime::traits::Identity; + type VoteWeight = pallet_ranked_collective::Linear; +} + +parameter_types! { + pub const AlarmInterval: BlockNumber = 1; + pub const SubmissionDeposit: Balance = 0; + pub const UndecidingTimeout: BlockNumber = 7 * DAYS; + // The Ambassador Referenda pallet account, used as a temporarily place to deposit a slashed imbalance before teleport to the treasury. + pub AmbassadorPalletAccount: AccountId = account::AMBASSADOR_REFERENDA_PALLET_ID.into_account_truncating(); +} + +pub type AmbassadorReferendaInstance = pallet_referenda::Instance2; + +impl pallet_referenda::Config for Runtime { + type WeightInfo = weights::pallet_referenda_ambassador_referenda::WeightInfo; + type RuntimeCall = RuntimeCall; + type RuntimeEvent = RuntimeEvent; + type Scheduler = Scheduler; + type Currency = Balances; + // A proposal can be submitted by a member of the Ambassador Program of + // [ranks::SENIOR_AMBASSADOR_TIER_3] rank or higher. + type SubmitOrigin = pallet_ranked_collective::EnsureMember< + Runtime, + AmbassadorCollectiveInstance, + { ranks::SENIOR_AMBASSADOR_TIER_3 }, + >; + type CancelOrigin = EitherOf, EnsureHeadAmbassadorsVoice>; + type KillOrigin = EitherOf, EnsureHeadAmbassadorsVoice>; + type Slash = ToParentTreasury; + type Votes = pallet_ranked_collective::Votes; + type Tally = pallet_ranked_collective::TallyOf; + type SubmissionDeposit = SubmissionDeposit; + type MaxQueued = ConstU32<20>; + type UndecidingTimeout = UndecidingTimeout; + type AlarmInterval = AlarmInterval; + type Tracks = tracks::TracksInfo; + type Preimages = Preimage; +} + +parameter_types! { + pub const AnnouncementLifetime: BlockNumber = 180 * DAYS; + pub const MaxAnnouncements: u32 = 50; +} + +pub type AmbassadorContentInstance = pallet_collective_content::Instance1; + +impl pallet_collective_content::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type CharterOrigin = EitherOf, EnsureHeadAmbassadorsVoice>; + type AnnouncementLifetime = AnnouncementLifetime; + // An announcement can be submitted by a Senior Ambassador member or an ambassador plurality + // voice taken via referendum. + type AnnouncementOrigin = EitherOfDiverse< + pallet_ranked_collective::EnsureMember< + Runtime, + AmbassadorCollectiveInstance, + { ranks::SENIOR_AMBASSADOR_TIER_3 }, + >, + EnsureAmbassadorsVoice, + >; + type MaxAnnouncements = MaxAnnouncements; + type WeightInfo = weights::pallet_collective_content::WeightInfo; +} + +pub type AmbassadorCoreInstance = pallet_core_fellowship::Instance2; + +impl pallet_core_fellowship::Config for Runtime { + type WeightInfo = weights::pallet_core_fellowship_ambassador_core::WeightInfo; + type RuntimeEvent = RuntimeEvent; + type Members = pallet_ranked_collective::Pallet; + type Balance = Balance; + // Parameters are set by any of: + // - Root; + // - the FellowshipAdmin origin (i.e. token holder referendum); + // - a vote among all Head Ambassadors. + type ParamsOrigin = EitherOfDiverse< + EnsureXcm>, + EnsureHeadAmbassadorsVoice, + >; + // Induction (creating a candidate) is by any of: + // - Root; + // - the FellowshipAdmin origin (i.e. token holder referendum); + // - a single Head Ambassador; + // - a vote among all senior members. + type InductOrigin = EitherOfDiverse< + EnsureXcm>, + EitherOfDiverse< + pallet_ranked_collective::EnsureMember< + Runtime, + AmbassadorCollectiveInstance, + { ranks::HEAD_AMBASSADOR_TIER_5 }, + >, + EnsureAmbassadorsVoiceFrom>, + >, + >; + type ApproveOrigin = PromoteOrigin; + type PromoteOrigin = PromoteOrigin; + type EvidenceSize = ConstU32<65536>; +} + +pub type AmbassadorSalaryInstance = pallet_salary::Instance2; + +parameter_types! { + // The interior location on AssetHub for the paying account. This is the Ambassador Salary + // pallet instance (which sits at index 74). This sovereign account will need funding. + pub AmbassadorSalaryLocation: InteriorMultiLocation = PalletInstance(74).into(); +} + +/// [`PayOverXcm`] setup to pay the Ambassador salary on the AssetHub in DOT. +pub type AmbassadorSalaryPaymaster = PayOverXcm< + AmbassadorSalaryLocation, + crate::xcm_config::XcmRouter, + crate::PolkadotXcm, + ConstU32<{ 6 * HOURS }>, + AccountId, + (), + ConvertToValue, + AliasesIntoAccountId32<(), AccountId>, +>; + +impl pallet_salary::Config for Runtime { + type WeightInfo = weights::pallet_salary_ambassador_salary::WeightInfo; + type RuntimeEvent = RuntimeEvent; + + #[cfg(not(feature = "runtime-benchmarks"))] + type Paymaster = AmbassadorSalaryPaymaster; + #[cfg(feature = "runtime-benchmarks")] + type Paymaster = crate::impls::benchmarks::PayWithEnsure< + AmbassadorSalaryPaymaster, + crate::impls::benchmarks::OpenHrmpChannel>, + >; + type Members = pallet_ranked_collective::Pallet; + + #[cfg(not(feature = "runtime-benchmarks"))] + type Salary = pallet_core_fellowship::Pallet; + #[cfg(feature = "runtime-benchmarks")] + type Salary = frame_support::traits::tokens::ConvertRank< + crate::impls::benchmarks::RankToSalary, + >; + // 15 days to register for a salary payment. + type RegistrationPeriod = ConstU32<{ 15 * DAYS }>; + // 15 days to claim the salary payment. + type PayoutPeriod = ConstU32<{ 15 * DAYS }>; + // Total monthly salary budget. + type Budget = ConstU128<{ 10_000 * DOLLARS }>; +} diff --git a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/ambassador/origins.rs b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/ambassador/origins.rs new file mode 100644 index 000000000000..3ce8a6b9e5d1 --- /dev/null +++ b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/ambassador/origins.rs @@ -0,0 +1,135 @@ +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! The Ambassador Program's origins. + +#[frame_support::pallet] +pub mod pallet_origins { + use crate::ambassador::ranks; + use frame_support::pallet_prelude::*; + use pallet_ranked_collective::Rank; + + #[pallet::pallet] + pub struct Pallet(PhantomData); + + /// The pallet configuration trait. + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[derive(PartialEq, Eq, Clone, MaxEncodedLen, Encode, Decode, TypeInfo, RuntimeDebug)] + #[pallet::origin] + pub enum Origin { + /// Plurality voice of the [ranks::AMBASSADOR_TIER_1] members or above given via + /// referendum. + Ambassadors, + /// Plurality voice of the [ranks::AMBASSADOR_TIER_2] members or above given via + /// referendum. + AmbassadorsTier2, + /// Plurality voice of the [ranks::SENIOR_AMBASSADOR_TIER_3] members or above given via + /// referendum. + SeniorAmbassadors, + /// Plurality voice of the [ranks::SENIOR_AMBASSADOR_TIER_4] members or above given via + /// referendum. + SeniorAmbassadorsTier4, + /// Plurality voice of the [ranks::HEAD_AMBASSADOR_TIER_5] members or above given via + /// referendum. + HeadAmbassadors, + /// Plurality voice of the [ranks::HEAD_AMBASSADOR_TIER_6] members or above given via + /// referendum. + HeadAmbassadorsTier6, + /// Plurality voice of the [ranks::HEAD_AMBASSADOR_TIER_7] members or above given via + /// referendum. + HeadAmbassadorsTier7, + /// Plurality voice of the [ranks::MASTER_AMBASSADOR_TIER_8] members or above given via + /// referendum. + MasterAmbassadors, + /// Plurality voice of the [ranks::MASTER_AMBASSADOR_TIER_9] members or above given via + /// referendum. + MasterAmbassadorsTier9, + } + + impl Origin { + /// Returns the rank that the origin `self` speaks for, or `None` if it doesn't speak for + /// any. + pub fn as_voice(&self) -> Option { + Some(match &self { + Origin::Ambassadors => ranks::AMBASSADOR_TIER_1, + Origin::AmbassadorsTier2 => ranks::AMBASSADOR_TIER_2, + Origin::SeniorAmbassadors => ranks::SENIOR_AMBASSADOR_TIER_3, + Origin::SeniorAmbassadorsTier4 => ranks::SENIOR_AMBASSADOR_TIER_4, + Origin::HeadAmbassadors => ranks::HEAD_AMBASSADOR_TIER_5, + Origin::HeadAmbassadorsTier6 => ranks::HEAD_AMBASSADOR_TIER_6, + Origin::HeadAmbassadorsTier7 => ranks::HEAD_AMBASSADOR_TIER_7, + Origin::MasterAmbassadors => ranks::MASTER_AMBASSADOR_TIER_8, + Origin::MasterAmbassadorsTier9 => ranks::MASTER_AMBASSADOR_TIER_9, + }) + } + } + + /// Implementation of the [EnsureOrigin] trait for the [Origin::HeadAmbassadors] origin. + pub struct EnsureHeadAmbassadorsVoice; + impl> + From> EnsureOrigin for EnsureHeadAmbassadorsVoice { + type Success = (); + fn try_origin(o: O) -> Result { + o.into().and_then(|o| match o { + Origin::HeadAmbassadors => Ok(()), + r => Err(O::from(r)), + }) + } + + #[cfg(feature = "runtime-benchmarks")] + fn try_successful_origin() -> Result { + Ok(O::from(Origin::HeadAmbassadors)) + } + } + + /// Implementation of the [EnsureOrigin] trait for the plurality voice [Origin]s + /// from a given rank `R` with the success result of the corresponding [Rank]. + pub struct EnsureAmbassadorsVoiceFrom(PhantomData); + impl, O: Into> + From> EnsureOrigin + for EnsureAmbassadorsVoiceFrom + { + type Success = Rank; + fn try_origin(o: O) -> Result { + o.into().and_then(|o| match Origin::as_voice(&o) { + Some(r) if r >= R::get() => Ok(r), + _ => Err(O::from(o)), + }) + } + + #[cfg(feature = "runtime-benchmarks")] + fn try_successful_origin() -> Result { + ranks::MASTER_AMBASSADOR_TIER_9 + .ge(&R::get()) + .then(|| O::from(Origin::MasterAmbassadorsTier9)) + .ok_or(()) + } + } + + /// Implementation of the [EnsureOrigin] trait for the plurality voice [Origin]s with the + /// success result of the corresponding [Rank]. + pub struct EnsureAmbassadorsVoice; + impl> + From> EnsureOrigin for EnsureAmbassadorsVoice { + type Success = Rank; + fn try_origin(o: O) -> Result { + o.into().and_then(|o| Origin::as_voice(&o).ok_or(O::from(o))) + } + + #[cfg(feature = "runtime-benchmarks")] + fn try_successful_origin() -> Result { + Ok(O::from(Origin::MasterAmbassadorsTier9)) + } + } +} diff --git a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/ambassador/tracks.rs b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/ambassador/tracks.rs new file mode 100644 index 000000000000..073d8e6ee362 --- /dev/null +++ b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/ambassador/tracks.rs @@ -0,0 +1,282 @@ +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! The Ambassador Program's referenda voting tracks. + +use super::Origin; +use crate::{Balance, BlockNumber, RuntimeOrigin, DAYS, DOLLARS, HOURS}; +use sp_runtime::Perbill; + +/// Referendum `TrackId` type. +pub type TrackId = u16; + +/// Referendum track IDs. +pub mod constants { + use super::TrackId; + + pub const AMBASSADOR_TIER_1: TrackId = 1; + pub const AMBASSADOR_TIER_2: TrackId = 2; + pub const SENIOR_AMBASSADOR_TIER_3: TrackId = 3; + pub const SENIOR_AMBASSADOR_TIER_4: TrackId = 4; + pub const HEAD_AMBASSADOR_TIER_5: TrackId = 5; + pub const HEAD_AMBASSADOR_TIER_6: TrackId = 6; + pub const HEAD_AMBASSADOR_TIER_7: TrackId = 7; + pub const MASTER_AMBASSADOR_TIER_8: TrackId = 8; + pub const MASTER_AMBASSADOR_TIER_9: TrackId = 9; +} + +/// The type implementing the [`pallet_referenda::TracksInfo`] trait for referenda pallet. +pub struct TracksInfo; + +/// Information on the voting tracks. +impl pallet_referenda::TracksInfo for TracksInfo { + type Id = TrackId; + + type RuntimeOrigin = ::PalletsOrigin; + + /// Return the array of available tracks and their information. + fn tracks() -> &'static [(Self::Id, pallet_referenda::TrackInfo)] { + static DATA: [(TrackId, pallet_referenda::TrackInfo); 9] = [ + ( + constants::AMBASSADOR_TIER_1, + pallet_referenda::TrackInfo { + name: "ambassador tier 1", + max_deciding: 10, + decision_deposit: 5 * DOLLARS, + prepare_period: 24 * HOURS, + decision_period: 7 * DAYS, + confirm_period: 24 * HOURS, + min_enactment_period: 1 * HOURS, + min_approval: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(50), + ceil: Perbill::from_percent(100), + }, + min_support: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(10), + ceil: Perbill::from_percent(50), + }, + }, + ), + ( + constants::AMBASSADOR_TIER_2, + pallet_referenda::TrackInfo { + name: "ambassador tier 2", + max_deciding: 10, + decision_deposit: 5 * DOLLARS, + prepare_period: 24 * HOURS, + decision_period: 7 * DAYS, + confirm_period: 24 * HOURS, + min_enactment_period: 1 * HOURS, + min_approval: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(50), + ceil: Perbill::from_percent(100), + }, + min_support: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(10), + ceil: Perbill::from_percent(50), + }, + }, + ), + ( + constants::SENIOR_AMBASSADOR_TIER_3, + pallet_referenda::TrackInfo { + name: "senior ambassador tier 3", + max_deciding: 10, + decision_deposit: 5 * DOLLARS, + prepare_period: 24 * HOURS, + decision_period: 7 * DAYS, + confirm_period: 24 * HOURS, + min_enactment_period: 1 * HOURS, + min_approval: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(50), + ceil: Perbill::from_percent(100), + }, + min_support: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(10), + ceil: Perbill::from_percent(50), + }, + }, + ), + ( + constants::SENIOR_AMBASSADOR_TIER_4, + pallet_referenda::TrackInfo { + name: "senior ambassador tier 4", + max_deciding: 10, + decision_deposit: 5 * DOLLARS, + prepare_period: 24 * HOURS, + decision_period: 7 * DAYS, + confirm_period: 24 * HOURS, + min_enactment_period: 1 * HOURS, + min_approval: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(50), + ceil: Perbill::from_percent(100), + }, + min_support: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(10), + ceil: Perbill::from_percent(50), + }, + }, + ), + ( + constants::HEAD_AMBASSADOR_TIER_5, + pallet_referenda::TrackInfo { + name: "head ambassador tier 5", + max_deciding: 10, + decision_deposit: 5 * DOLLARS, + prepare_period: 24 * HOURS, + decision_period: 7 * DAYS, + confirm_period: 24 * HOURS, + min_enactment_period: 1 * HOURS, + min_approval: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(50), + ceil: Perbill::from_percent(100), + }, + min_support: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(10), + ceil: Perbill::from_percent(50), + }, + }, + ), + ( + constants::HEAD_AMBASSADOR_TIER_6, + pallet_referenda::TrackInfo { + name: "head ambassador tier 6", + max_deciding: 10, + decision_deposit: 5 * DOLLARS, + prepare_period: 24 * HOURS, + decision_period: 7 * DAYS, + confirm_period: 24 * HOURS, + min_enactment_period: 1 * HOURS, + min_approval: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(50), + ceil: Perbill::from_percent(100), + }, + min_support: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(10), + ceil: Perbill::from_percent(50), + }, + }, + ), + ( + constants::HEAD_AMBASSADOR_TIER_7, + pallet_referenda::TrackInfo { + name: "head ambassador tier 7", + max_deciding: 10, + decision_deposit: 5 * DOLLARS, + prepare_period: 24 * HOURS, + decision_period: 7 * DAYS, + confirm_period: 24 * HOURS, + min_enactment_period: 1 * HOURS, + min_approval: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(50), + ceil: Perbill::from_percent(100), + }, + min_support: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(10), + ceil: Perbill::from_percent(50), + }, + }, + ), + ( + constants::MASTER_AMBASSADOR_TIER_8, + pallet_referenda::TrackInfo { + name: "master ambassador tier 8", + max_deciding: 10, + decision_deposit: 5 * DOLLARS, + prepare_period: 24 * HOURS, + decision_period: 7 * DAYS, + confirm_period: 24 * HOURS, + min_enactment_period: 1 * HOURS, + min_approval: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(50), + ceil: Perbill::from_percent(100), + }, + min_support: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(10), + ceil: Perbill::from_percent(50), + }, + }, + ), + ( + constants::MASTER_AMBASSADOR_TIER_9, + pallet_referenda::TrackInfo { + name: "master ambassador tier 9", + max_deciding: 10, + decision_deposit: 5 * DOLLARS, + prepare_period: 24 * HOURS, + decision_period: 7 * DAYS, + confirm_period: 24 * HOURS, + min_enactment_period: 1 * HOURS, + min_approval: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(50), + ceil: Perbill::from_percent(100), + }, + min_support: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(10), + ceil: Perbill::from_percent(50), + }, + }, + ), + ]; + &DATA[..] + } + + /// Determine the voting track for the given `origin`. + fn track_for(id: &Self::RuntimeOrigin) -> Result { + #[cfg(feature = "runtime-benchmarks")] + { + // For benchmarks, we enable a root origin. + // It is important that this is not available in production! + let root: Self::RuntimeOrigin = frame_system::RawOrigin::Root.into(); + if &root == id { + return Ok(constants::MASTER_AMBASSADOR_TIER_9) + } + } + + match Origin::try_from(id.clone()) { + Ok(Origin::Ambassadors) => Ok(constants::AMBASSADOR_TIER_1), + Ok(Origin::AmbassadorsTier2) => Ok(constants::AMBASSADOR_TIER_2), + Ok(Origin::SeniorAmbassadors) => Ok(constants::SENIOR_AMBASSADOR_TIER_3), + Ok(Origin::SeniorAmbassadorsTier4) => Ok(constants::SENIOR_AMBASSADOR_TIER_4), + Ok(Origin::HeadAmbassadors) => Ok(constants::HEAD_AMBASSADOR_TIER_5), + Ok(Origin::HeadAmbassadorsTier6) => Ok(constants::HEAD_AMBASSADOR_TIER_6), + Ok(Origin::HeadAmbassadorsTier7) => Ok(constants::HEAD_AMBASSADOR_TIER_7), + Ok(Origin::MasterAmbassadors) => Ok(constants::MASTER_AMBASSADOR_TIER_8), + Ok(Origin::MasterAmbassadorsTier9) => Ok(constants::MASTER_AMBASSADOR_TIER_9), + _ => Err(()), + } + } +} + +// implements [`frame_support::traits::Get`] for [`TracksInfo`] +pallet_referenda::impl_tracksinfo_get!(TracksInfo, Balance, BlockNumber); diff --git a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/fellowship/mod.rs b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/fellowship/mod.rs index b97e44dda1be..2a2757ea5ceb 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/fellowship/mod.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/fellowship/mod.rs @@ -20,11 +20,12 @@ pub(crate) mod migration; mod origins; mod tracks; use crate::{ - impls::ToParentTreasury, weights, AccountId, Balance, Balances, FellowshipReferenda, - GovernanceLocation, PolkadotTreasuryAccount, Preimage, Runtime, RuntimeCall, RuntimeEvent, - RuntimeOrigin, Scheduler, DAYS, + impls::ToParentTreasury, + weights, + xcm_config::{FellowshipAdminBodyId, UsdtAssetHub}, + AccountId, Balance, Balances, FellowshipReferenda, GovernanceLocation, PolkadotTreasuryAccount, + Preimage, Runtime, RuntimeCall, RuntimeEvent, RuntimeOrigin, Scheduler, DAYS, }; -use cumulus_primitives_core::Junction::GeneralIndex; use frame_support::{ parameter_types, traits::{EitherOf, EitherOfDiverse, MapSuccess, OriginTrait, TryWithMorphedArg}, @@ -36,12 +37,10 @@ pub use origins::{ }; use pallet_ranked_collective::EnsureOfRank; use pallet_xcm::{EnsureXcm, IsVoiceOfBody}; -use parachains_common::polkadot::account; -use polkadot_runtime_constants::{time::HOURS, xcm::body::FELLOWSHIP_ADMIN_INDEX}; +use parachains_common::{polkadot::account, HOURS}; use sp_core::{ConstU128, ConstU32}; use sp_runtime::traits::{AccountIdConversion, ConstU16, ConvertToValue, Replace, TakeFirst}; -use xcm::latest::BodyId; -use xcm_builder::{AliasesIntoAccountId32, LocatableAssetId, PayOverXcm}; +use xcm_builder::{AliasesIntoAccountId32, PayOverXcm}; #[cfg(feature = "runtime-benchmarks")] use crate::impls::benchmarks::{OpenHrmpChannel, PayWithEnsure}; @@ -64,7 +63,6 @@ pub mod ranks { parameter_types! { // Referenda pallet account, used to temporarily deposit slashed imbalance before teleporting. pub ReferendaPalletAccount: AccountId = account::REFERENDA_PALLET_ID.into_account_truncating(); - pub const FellowshipAdminBodyId: BodyId = BodyId::Index(FELLOWSHIP_ADMIN_INDEX); } impl pallet_fellowship_origins::Config for Runtime {} @@ -72,7 +70,7 @@ impl pallet_fellowship_origins::Config for Runtime {} pub type FellowshipReferendaInstance = pallet_referenda::Instance1; impl pallet_referenda::Config for Runtime { - type WeightInfo = weights::pallet_referenda::WeightInfo; + type WeightInfo = weights::pallet_referenda_fellowship_referenda::WeightInfo; type RuntimeCall = RuntimeCall; type RuntimeEvent = RuntimeEvent; type Scheduler = Scheduler; @@ -107,7 +105,7 @@ impl pallet_referenda::Config for Runtime { pub type FellowshipCollectiveInstance = pallet_ranked_collective::Instance1; impl pallet_ranked_collective::Config for Runtime { - type WeightInfo = weights::pallet_ranked_collective::WeightInfo; + type WeightInfo = weights::pallet_ranked_collective_fellowship_collective::WeightInfo; type RuntimeEvent = RuntimeEvent; #[cfg(not(feature = "runtime-benchmarks"))] @@ -139,7 +137,7 @@ impl pallet_ranked_collective::Config for Runtime pub type FellowshipCoreInstance = pallet_core_fellowship::Instance1; impl pallet_core_fellowship::Config for Runtime { - type WeightInfo = weights::pallet_core_fellowship::WeightInfo; + type WeightInfo = weights::pallet_core_fellowship_fellowship_core::WeightInfo; type RuntimeEvent = RuntimeEvent; type Members = pallet_ranked_collective::Pallet; type Balance = Balance; @@ -197,12 +195,6 @@ pub type FellowshipSalaryInstance = pallet_salary::Instance1; use xcm::prelude::*; parameter_types! { - pub AssetHub: MultiLocation = (Parent, Parachain(1000)).into(); - pub AssetHubUsdtId: AssetId = (PalletInstance(50), GeneralIndex(1984)).into(); - pub UsdtAsset: LocatableAssetId = LocatableAssetId { - location: AssetHub::get(), - asset_id: AssetHubUsdtId::get(), - }; // The interior location on AssetHub for the paying account. This is the Fellowship Salary // pallet instance (which sits at index 64). This sovereign account will need funding. pub Interior: InteriorMultiLocation = PalletInstance(64).into(); @@ -218,12 +210,12 @@ pub type FellowshipSalaryPaymaster = PayOverXcm< ConstU32<{ 6 * HOURS }>, AccountId, (), - ConvertToValue, + ConvertToValue, AliasesIntoAccountId32<(), AccountId>, >; impl pallet_salary::Config for Runtime { - type WeightInfo = weights::pallet_salary::WeightInfo; + type WeightInfo = weights::pallet_salary_fellowship_salary::WeightInfo; type RuntimeEvent = RuntimeEvent; #[cfg(not(feature = "runtime-benchmarks"))] diff --git a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/lib.rs b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/lib.rs index 70e62a4dcdf9..d833c75d470c 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/lib.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/lib.rs @@ -36,11 +36,13 @@ #[cfg(feature = "std")] include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); +pub mod ambassador; pub mod impls; mod weights; pub mod xcm_config; // Fellowship configurations. pub mod fellowship; +pub use ambassador::pallet_ambassador_origins; use cumulus_pallet_parachain_system::RelayNumberStrictlyIncreases; use fellowship::{ @@ -292,6 +294,8 @@ pub enum ProxyType { Alliance, /// Fellowship proxy. Allows calls related to the Fellowship. Fellowship, + /// Ambassador proxy. Allows calls related to the Ambassador Program. + Ambassador, } impl Default for ProxyType { fn default() -> Self { @@ -326,6 +330,18 @@ impl InstanceFilter for ProxyType { c, RuntimeCall::FellowshipCollective { .. } | RuntimeCall::FellowshipReferenda { .. } | + RuntimeCall::FellowshipCore { .. } | + RuntimeCall::FellowshipSalary { .. } | + RuntimeCall::Utility { .. } | + RuntimeCall::Multisig { .. } + ), + ProxyType::Ambassador => matches!( + c, + RuntimeCall::AmbassadorCollective { .. } | + RuntimeCall::AmbassadorReferenda { .. } | + RuntimeCall::AmbassadorContent { .. } | + RuntimeCall::AmbassadorCore { .. } | + RuntimeCall::AmbassadorSalary { .. } | RuntimeCall::Utility { .. } | RuntimeCall::Multisig { .. } ), @@ -616,6 +632,14 @@ construct_runtime!( FellowshipCore: pallet_core_fellowship::::{Pallet, Call, Storage, Event} = 63, // pub type FellowshipSalaryInstance = pallet_salary::Instance1; FellowshipSalary: pallet_salary::::{Pallet, Call, Storage, Event} = 64, + + // Ambassador Program. + AmbassadorCollective: pallet_ranked_collective::::{Pallet, Call, Storage, Event} = 70, + AmbassadorReferenda: pallet_referenda::::{Pallet, Call, Storage, Event} = 71, + AmbassadorOrigins: pallet_ambassador_origins::{Origin} = 72, + AmbassadorCore: pallet_core_fellowship::::{Pallet, Call, Storage, Event} = 73, + AmbassadorSalary: pallet_salary::::{Pallet, Call, Storage, Event} = 74, + AmbassadorContent: pallet_collective_content::::{Pallet, Call, Storage, Event} = 75, } ); @@ -684,6 +708,11 @@ mod benches { [pallet_ranked_collective, FellowshipCollective] [pallet_core_fellowship, FellowshipCore] [pallet_salary, FellowshipSalary] + [pallet_referenda, AmbassadorReferenda] + [pallet_ranked_collective, AmbassadorCollective] + [pallet_collective_content, AmbassadorContent] + [pallet_core_fellowship, AmbassadorCore] + [pallet_salary, AmbassadorSalary] ); } diff --git a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/mod.rs b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/mod.rs index 9ddf53792ea4..b5a3a892f79a 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/mod.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/mod.rs @@ -22,13 +22,18 @@ pub mod pallet_alliance; pub mod pallet_balances; pub mod pallet_collator_selection; pub mod pallet_collective; -pub mod pallet_core_fellowship; +pub mod pallet_collective_content; +pub mod pallet_core_fellowship_ambassador_core; +pub mod pallet_core_fellowship_fellowship_core; pub mod pallet_multisig; pub mod pallet_preimage; pub mod pallet_proxy; -pub mod pallet_ranked_collective; -pub mod pallet_referenda; -pub mod pallet_salary; +pub mod pallet_ranked_collective_ambassador_collective; +pub mod pallet_ranked_collective_fellowship_collective; +pub mod pallet_referenda_ambassador_referenda; +pub mod pallet_referenda_fellowship_referenda; +pub mod pallet_salary_ambassador_salary; +pub mod pallet_salary_fellowship_salary; pub mod pallet_scheduler; pub mod pallet_session; pub mod pallet_timestamp; diff --git a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_collective_content.rs b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_collective_content.rs new file mode 100644 index 000000000000..e66907b94530 --- /dev/null +++ b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_collective_content.rs @@ -0,0 +1,94 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `pallet_collective_content` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-08-18, STEPS: `10`, REPEAT: `3`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `cob`, CPU: `` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("collectives-polkadot-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/debug/polkadot-parachain +// benchmark +// pallet +// --chain=collectives-polkadot-dev +// --steps=10 +// --repeat=3 +// --pallet=pallet_collective_content +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --output=./parachains/runtimes/collectives/collectives-polkadot/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_collective_content`. +pub struct WeightInfo(PhantomData); +impl pallet_collective_content::WeightInfo for WeightInfo { + /// Storage: `AmbassadorContent::Charter` (r:0 w:1) + /// Proof: `AmbassadorContent::Charter` (`max_values`: Some(1), `max_size`: Some(70), added: 565, mode: `MaxEncodedLen`) + fn set_charter() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 99_000_000 picoseconds. + Weight::from_parts(99_000_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `AmbassadorCollective::Members` (r:1 w:0) + /// Proof: `AmbassadorCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorContent::AnnouncementsCount` (r:1 w:1) + /// Proof: `AmbassadorContent::AnnouncementsCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorContent::NextAnnouncementExpireAt` (r:1 w:1) + /// Proof: `AmbassadorContent::NextAnnouncementExpireAt` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorContent::Announcements` (r:0 w:1) + /// Proof: `AmbassadorContent::Announcements` (`max_values`: None, `max_size`: Some(90), added: 2565, mode: `MaxEncodedLen`) + fn announce() -> Weight { + // Proof Size summary in bytes: + // Measured: `285` + // Estimated: `3507` + // Minimum execution time: 273_000_000 picoseconds. + Weight::from_parts(278_000_000, 0) + .saturating_add(Weight::from_parts(0, 3507)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `AmbassadorCollective::Members` (r:1 w:0) + /// Proof: `AmbassadorCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorContent::Announcements` (r:1 w:1) + /// Proof: `AmbassadorContent::Announcements` (`max_values`: None, `max_size`: Some(90), added: 2565, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorContent::AnnouncementsCount` (r:1 w:1) + /// Proof: `AmbassadorContent::AnnouncementsCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + fn remove_announcement() -> Weight { + // Proof Size summary in bytes: + // Measured: `450` + // Estimated: `3555` + // Minimum execution time: 326_000_000 picoseconds. + Weight::from_parts(338_000_000, 0) + .saturating_add(Weight::from_parts(0, 3555)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } +} diff --git a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_core_fellowship_ambassador_core.rs b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_core_fellowship_ambassador_core.rs new file mode 100644 index 000000000000..f40940a8b25f --- /dev/null +++ b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_core_fellowship_ambassador_core.rs @@ -0,0 +1,223 @@ +// Copyright Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `pallet_core_fellowship` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-08-11, STEPS: `2`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `cob`, CPU: `` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("collectives-polkadot-dev")`, DB CACHE: 1024 + +// Executed Command: +// target/release/polkadot-parachain +// benchmark +// pallet +// --chain=collectives-polkadot-dev +// --wasm-execution=compiled +// --pallet=pallet_core_fellowship +// --extrinsic=* +// --steps=2 +// --repeat=2 +// --json +// --header=./file_header.txt +// --output=./parachains/runtimes/collectives/collectives-polkadot/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_core_fellowship`. +pub struct WeightInfo(PhantomData); +impl pallet_core_fellowship::WeightInfo for WeightInfo { + /// Storage: `AmbassadorCore::Params` (r:0 w:1) + /// Proof: `AmbassadorCore::Params` (`max_values`: Some(1), `max_size`: Some(364), added: 859, mode: `MaxEncodedLen`) + fn set_params() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 11_000_000 picoseconds. + Weight::from_parts(11_000_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `AmbassadorCore::Member` (r:1 w:1) + /// Proof: `AmbassadorCore::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::Members` (r:1 w:1) + /// Proof: `AmbassadorCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCore::Params` (r:1 w:0) + /// Proof: `AmbassadorCore::Params` (`max_values`: Some(1), `max_size`: Some(364), added: 859, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::MemberCount` (r:1 w:1) + /// Proof: `AmbassadorCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::IdToIndex` (r:1 w:0) + /// Proof: `AmbassadorCollective::IdToIndex` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCore::MemberEvidence` (r:1 w:1) + /// Proof: `AmbassadorCore::MemberEvidence` (`max_values`: None, `max_size`: Some(65581), added: 68056, mode: `MaxEncodedLen`) + fn bump_offboard() -> Weight { + // Proof Size summary in bytes: + // Measured: `66011` + // Estimated: `69046` + // Minimum execution time: 96_000_000 picoseconds. + Weight::from_parts(111_000_000, 0) + .saturating_add(Weight::from_parts(0, 69046)) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `AmbassadorCore::Member` (r:1 w:1) + /// Proof: `AmbassadorCore::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::Members` (r:1 w:1) + /// Proof: `AmbassadorCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCore::Params` (r:1 w:0) + /// Proof: `AmbassadorCore::Params` (`max_values`: Some(1), `max_size`: Some(364), added: 859, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::MemberCount` (r:1 w:1) + /// Proof: `AmbassadorCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::IdToIndex` (r:1 w:0) + /// Proof: `AmbassadorCollective::IdToIndex` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCore::MemberEvidence` (r:1 w:1) + /// Proof: `AmbassadorCore::MemberEvidence` (`max_values`: None, `max_size`: Some(65581), added: 68056, mode: `MaxEncodedLen`) + fn bump_demote() -> Weight { + // Proof Size summary in bytes: + // Measured: `66121` + // Estimated: `69046` + // Minimum execution time: 99_000_000 picoseconds. + Weight::from_parts(116_000_000, 0) + .saturating_add(Weight::from_parts(0, 69046)) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `AmbassadorCollective::Members` (r:1 w:0) + /// Proof: `AmbassadorCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCore::Member` (r:1 w:1) + /// Proof: `AmbassadorCore::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) + fn set_active() -> Weight { + // Proof Size summary in bytes: + // Measured: `360` + // Estimated: `3514` + // Minimum execution time: 21_000_000 picoseconds. + Weight::from_parts(22_000_000, 0) + .saturating_add(Weight::from_parts(0, 3514)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `AmbassadorCore::Member` (r:1 w:1) + /// Proof: `AmbassadorCore::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::Members` (r:1 w:1) + /// Proof: `AmbassadorCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::MemberCount` (r:1 w:1) + /// Proof: `AmbassadorCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::IndexToId` (r:0 w:1) + /// Proof: `AmbassadorCollective::IndexToId` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::IdToIndex` (r:0 w:1) + /// Proof: `AmbassadorCollective::IdToIndex` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + fn induct() -> Weight { + // Proof Size summary in bytes: + // Measured: `118` + // Estimated: `3514` + // Minimum execution time: 36_000_000 picoseconds. + Weight::from_parts(36_000_000, 0) + .saturating_add(Weight::from_parts(0, 3514)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(5)) + } + /// Storage: `AmbassadorCollective::Members` (r:1 w:1) + /// Proof: `AmbassadorCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCore::Member` (r:1 w:1) + /// Proof: `AmbassadorCore::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCore::Params` (r:1 w:0) + /// Proof: `AmbassadorCore::Params` (`max_values`: Some(1), `max_size`: Some(364), added: 859, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::MemberCount` (r:1 w:1) + /// Proof: `AmbassadorCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCore::MemberEvidence` (r:1 w:1) + /// Proof: `AmbassadorCore::MemberEvidence` (`max_values`: None, `max_size`: Some(65581), added: 68056, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::IndexToId` (r:0 w:1) + /// Proof: `AmbassadorCollective::IndexToId` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::IdToIndex` (r:0 w:1) + /// Proof: `AmbassadorCollective::IdToIndex` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + fn promote() -> Weight { + // Proof Size summary in bytes: + // Measured: `65989` + // Estimated: `69046` + // Minimum execution time: 95_000_000 picoseconds. + Weight::from_parts(110_000_000, 0) + .saturating_add(Weight::from_parts(0, 69046)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(6)) + } + /// Storage: `AmbassadorCollective::Members` (r:1 w:0) + /// Proof: `AmbassadorCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCore::Member` (r:1 w:1) + /// Proof: `AmbassadorCore::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCore::MemberEvidence` (r:0 w:1) + /// Proof: `AmbassadorCore::MemberEvidence` (`max_values`: None, `max_size`: Some(65581), added: 68056, mode: `MaxEncodedLen`) + fn offboard() -> Weight { + // Proof Size summary in bytes: + // Measured: `331` + // Estimated: `3514` + // Minimum execution time: 21_000_000 picoseconds. + Weight::from_parts(22_000_000, 0) + .saturating_add(Weight::from_parts(0, 3514)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `AmbassadorCore::Member` (r:1 w:1) + /// Proof: `AmbassadorCore::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::Members` (r:1 w:0) + /// Proof: `AmbassadorCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + fn import() -> Weight { + // Proof Size summary in bytes: + // Measured: `285` + // Estimated: `3514` + // Minimum execution time: 20_000_000 picoseconds. + Weight::from_parts(21_000_000, 0) + .saturating_add(Weight::from_parts(0, 3514)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `AmbassadorCollective::Members` (r:1 w:0) + /// Proof: `AmbassadorCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCore::Member` (r:1 w:1) + /// Proof: `AmbassadorCore::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCore::MemberEvidence` (r:1 w:1) + /// Proof: `AmbassadorCore::MemberEvidence` (`max_values`: None, `max_size`: Some(65581), added: 68056, mode: `MaxEncodedLen`) + fn approve() -> Weight { + // Proof Size summary in bytes: + // Measured: `65967` + // Estimated: `69046` + // Minimum execution time: 78_000_000 picoseconds. + Weight::from_parts(104_000_000, 0) + .saturating_add(Weight::from_parts(0, 69046)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `AmbassadorCore::Member` (r:1 w:0) + /// Proof: `AmbassadorCore::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCore::MemberEvidence` (r:1 w:1) + /// Proof: `AmbassadorCore::MemberEvidence` (`max_values`: None, `max_size`: Some(65581), added: 68056, mode: `MaxEncodedLen`) + fn submit_evidence() -> Weight { + // Proof Size summary in bytes: + // Measured: `151` + // Estimated: `69046` + // Minimum execution time: 43_000_000 picoseconds. + Weight::from_parts(44_000_000, 0) + .saturating_add(Weight::from_parts(0, 69046)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } +} diff --git a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_core_fellowship.rs b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_core_fellowship_fellowship_core.rs similarity index 87% rename from cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_core_fellowship.rs rename to cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_core_fellowship_fellowship_core.rs index d053513b53ac..434986b03bba 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_core_fellowship.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_core_fellowship_fellowship_core.rs @@ -17,24 +17,21 @@ //! Autogenerated weights for `pallet_core_fellowship` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-08-11, STEPS: `2`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("collectives-polkadot-dev")`, DB CACHE: 1024 +//! HOSTNAME: `cob`, CPU: `` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("collectives-polkadot-dev")`, DB CACHE: 1024 // Executed Command: -// ./target/production/polkadot-parachain +// target/release/polkadot-parachain // benchmark // pallet // --chain=collectives-polkadot-dev // --wasm-execution=compiled // --pallet=pallet_core_fellowship -// --no-storage-info -// --no-median-slopes -// --no-min-squares // --extrinsic=* -// --steps=50 -// --repeat=20 +// --steps=2 +// --repeat=2 // --json // --header=./file_header.txt // --output=./parachains/runtimes/collectives/collectives-polkadot/src/weights/ @@ -56,8 +53,8 @@ impl pallet_core_fellowship::WeightInfo for WeightInfo< // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_077_000 picoseconds. - Weight::from_parts(9_356_000, 0) + // Minimum execution time: 11_000_000 picoseconds. + Weight::from_parts(12_000_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -75,10 +72,10 @@ impl pallet_core_fellowship::WeightInfo for WeightInfo< /// Proof: `FellowshipCore::MemberEvidence` (`max_values`: None, `max_size`: Some(65581), added: 68056, mode: `MaxEncodedLen`) fn bump_offboard() -> Weight { // Proof Size summary in bytes: - // Measured: `66111` + // Measured: `66144` // Estimated: `69046` - // Minimum execution time: 128_419_000 picoseconds. - Weight::from_parts(149_318_000, 0) + // Minimum execution time: 109_000_000 picoseconds. + Weight::from_parts(125_000_000, 0) .saturating_add(Weight::from_parts(0, 69046)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(4)) @@ -97,10 +94,10 @@ impl pallet_core_fellowship::WeightInfo for WeightInfo< /// Proof: `FellowshipCore::MemberEvidence` (`max_values`: None, `max_size`: Some(65581), added: 68056, mode: `MaxEncodedLen`) fn bump_demote() -> Weight { // Proof Size summary in bytes: - // Measured: `66221` + // Measured: `66254` // Estimated: `69046` - // Minimum execution time: 127_629_000 picoseconds. - Weight::from_parts(130_928_000, 0) + // Minimum execution time: 112_000_000 picoseconds. + Weight::from_parts(114_000_000, 0) .saturating_add(Weight::from_parts(0, 69046)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(4)) @@ -111,10 +108,10 @@ impl pallet_core_fellowship::WeightInfo for WeightInfo< /// Proof: `FellowshipCore::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) fn set_active() -> Weight { // Proof Size summary in bytes: - // Measured: `460` + // Measured: `493` // Estimated: `3514` - // Minimum execution time: 18_655_000 picoseconds. - Weight::from_parts(19_331_000, 0) + // Minimum execution time: 22_000_000 picoseconds. + Weight::from_parts(27_000_000, 0) .saturating_add(Weight::from_parts(0, 3514)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) @@ -131,10 +128,10 @@ impl pallet_core_fellowship::WeightInfo for WeightInfo< /// Proof: `FellowshipCollective::IdToIndex` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) fn induct() -> Weight { // Proof Size summary in bytes: - // Measured: `218` + // Measured: `251` // Estimated: `3514` - // Minimum execution time: 28_764_000 picoseconds. - Weight::from_parts(29_385_000, 0) + // Minimum execution time: 35_000_000 picoseconds. + Weight::from_parts(36_000_000, 0) .saturating_add(Weight::from_parts(0, 3514)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(5)) @@ -155,10 +152,10 @@ impl pallet_core_fellowship::WeightInfo for WeightInfo< /// Proof: `FellowshipCollective::IdToIndex` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) fn promote() -> Weight { // Proof Size summary in bytes: - // Measured: `66089` + // Measured: `66122` // Estimated: `69046` - // Minimum execution time: 123_179_000 picoseconds. - Weight::from_parts(125_302_000, 0) + // Minimum execution time: 97_000_000 picoseconds. + Weight::from_parts(129_000_000, 0) .saturating_add(Weight::from_parts(0, 69046)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(6)) @@ -171,10 +168,10 @@ impl pallet_core_fellowship::WeightInfo for WeightInfo< /// Proof: `FellowshipCore::MemberEvidence` (`max_values`: None, `max_size`: Some(65581), added: 68056, mode: `MaxEncodedLen`) fn offboard() -> Weight { // Proof Size summary in bytes: - // Measured: `431` + // Measured: `464` // Estimated: `3514` - // Minimum execution time: 19_700_000 picoseconds. - Weight::from_parts(20_319_000, 0) + // Minimum execution time: 22_000_000 picoseconds. + Weight::from_parts(22_000_000, 0) .saturating_add(Weight::from_parts(0, 3514)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) @@ -185,10 +182,10 @@ impl pallet_core_fellowship::WeightInfo for WeightInfo< /// Proof: `FellowshipCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) fn import() -> Weight { // Proof Size summary in bytes: - // Measured: `385` + // Measured: `418` // Estimated: `3514` - // Minimum execution time: 18_048_000 picoseconds. - Weight::from_parts(18_345_000, 0) + // Minimum execution time: 20_000_000 picoseconds. + Weight::from_parts(24_000_000, 0) .saturating_add(Weight::from_parts(0, 3514)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) @@ -201,10 +198,10 @@ impl pallet_core_fellowship::WeightInfo for WeightInfo< /// Proof: `FellowshipCore::MemberEvidence` (`max_values`: None, `max_size`: Some(65581), added: 68056, mode: `MaxEncodedLen`) fn approve() -> Weight { // Proof Size summary in bytes: - // Measured: `66067` + // Measured: `66100` // Estimated: `69046` - // Minimum execution time: 108_578_000 picoseconds. - Weight::from_parts(111_311_000, 0) + // Minimum execution time: 89_000_000 picoseconds. + Weight::from_parts(119_000_000, 0) .saturating_add(Weight::from_parts(0, 69046)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) @@ -215,10 +212,10 @@ impl pallet_core_fellowship::WeightInfo for WeightInfo< /// Proof: `FellowshipCore::MemberEvidence` (`max_values`: None, `max_size`: Some(65581), added: 68056, mode: `MaxEncodedLen`) fn submit_evidence() -> Weight { // Proof Size summary in bytes: - // Measured: `151` + // Measured: `184` // Estimated: `69046` - // Minimum execution time: 94_484_000 picoseconds. - Weight::from_parts(97_930_000, 0) + // Minimum execution time: 43_000_000 picoseconds. + Weight::from_parts(52_000_000, 0) .saturating_add(Weight::from_parts(0, 69046)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) diff --git a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_ranked_collective_ambassador_collective.rs b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_ranked_collective_ambassador_collective.rs new file mode 100644 index 000000000000..a6372c4b89dc --- /dev/null +++ b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_ranked_collective_ambassador_collective.rs @@ -0,0 +1,177 @@ +// Copyright Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `pallet_ranked_collective` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-08-11, STEPS: `2`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `cob`, CPU: `` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("collectives-polkadot-dev")`, DB CACHE: 1024 + +// Executed Command: +// target/release/polkadot-parachain +// benchmark +// pallet +// --chain=collectives-polkadot-dev +// --wasm-execution=compiled +// --pallet=pallet_ranked_collective +// --extrinsic=* +// --steps=2 +// --repeat=2 +// --json +// --header=./file_header.txt +// --output=./parachains/runtimes/collectives/collectives-polkadot/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_ranked_collective`. +pub struct WeightInfo(PhantomData); +impl pallet_ranked_collective::WeightInfo for WeightInfo { + /// Storage: `AmbassadorCollective::Members` (r:1 w:1) + /// Proof: `AmbassadorCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::MemberCount` (r:1 w:1) + /// Proof: `AmbassadorCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::IndexToId` (r:0 w:1) + /// Proof: `AmbassadorCollective::IndexToId` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::IdToIndex` (r:0 w:1) + /// Proof: `AmbassadorCollective::IdToIndex` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + fn add_member() -> Weight { + // Proof Size summary in bytes: + // Measured: `42` + // Estimated: `3507` + // Minimum execution time: 21_000_000 picoseconds. + Weight::from_parts(23_000_000, 0) + .saturating_add(Weight::from_parts(0, 3507)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `AmbassadorCollective::Members` (r:1 w:1) + /// Proof: `AmbassadorCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::MemberCount` (r:11 w:11) + /// Proof: `AmbassadorCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::IdToIndex` (r:11 w:11) + /// Proof: `AmbassadorCollective::IdToIndex` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::IndexToId` (r:11 w:11) + /// Proof: `AmbassadorCollective::IndexToId` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + /// The range of component `r` is `[0, 10]`. + /// The range of component `r` is `[0, 10]`. + fn remove_member(r: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `508 + r * (281 ±0)` + // Estimated: `3519 + r * (2529 ±0)` + // Minimum execution time: 34_000_000 picoseconds. + Weight::from_parts(36_500_000, 0) + .saturating_add(Weight::from_parts(0, 3519)) + // Standard Error: 158_113 + .saturating_add(Weight::from_parts(16_000_000, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().reads((3_u64).saturating_mul(r.into()))) + .saturating_add(T::DbWeight::get().writes(4)) + .saturating_add(T::DbWeight::get().writes((3_u64).saturating_mul(r.into()))) + .saturating_add(Weight::from_parts(0, 2529).saturating_mul(r.into())) + } + /// Storage: `AmbassadorCollective::Members` (r:1 w:1) + /// Proof: `AmbassadorCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::MemberCount` (r:1 w:1) + /// Proof: `AmbassadorCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::IndexToId` (r:0 w:1) + /// Proof: `AmbassadorCollective::IndexToId` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::IdToIndex` (r:0 w:1) + /// Proof: `AmbassadorCollective::IdToIndex` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + /// The range of component `r` is `[0, 10]`. + /// The range of component `r` is `[0, 10]`. + fn promote_member(r: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `210 + r * (17 ±0)` + // Estimated: `3507` + // Minimum execution time: 25_000_000 picoseconds. + Weight::from_parts(26_000_000, 0) + .saturating_add(Weight::from_parts(0, 3507)) + // Standard Error: 180_277 + .saturating_add(Weight::from_parts(650_000, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `AmbassadorCollective::Members` (r:1 w:1) + /// Proof: `AmbassadorCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::MemberCount` (r:1 w:1) + /// Proof: `AmbassadorCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::IdToIndex` (r:1 w:1) + /// Proof: `AmbassadorCollective::IdToIndex` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::IndexToId` (r:1 w:1) + /// Proof: `AmbassadorCollective::IndexToId` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + /// The range of component `r` is `[0, 10]`. + /// The range of component `r` is `[0, 10]`. + fn demote_member(r: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `508 + r * (71 ±0)` + // Estimated: `3519` + // Minimum execution time: 34_000_000 picoseconds. + Weight::from_parts(36_500_000, 0) + .saturating_add(Weight::from_parts(0, 3519)) + // Standard Error: 335_410 + .saturating_add(Weight::from_parts(550_000, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `AmbassadorCollective::Members` (r:1 w:0) + /// Proof: `AmbassadorCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `AmbassadorReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::Voting` (r:1 w:1) + /// Proof: `AmbassadorCollective::Voting` (`max_values`: None, `max_size`: Some(65), added: 2540, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + fn vote() -> Weight { + // Proof Size summary in bytes: + // Measured: `566` + // Estimated: `317568` + // Minimum execution time: 57_000_000 picoseconds. + Weight::from_parts(60_000_000, 0) + .saturating_add(Weight::from_parts(0, 317568)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `AmbassadorReferenda::ReferendumInfoFor` (r:1 w:0) + /// Proof: `AmbassadorReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::VotingCleanup` (r:1 w:0) + /// Proof: `AmbassadorCollective::VotingCleanup` (`max_values`: None, `max_size`: Some(114), added: 2589, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::Voting` (r:100 w:100) + /// Proof: `AmbassadorCollective::Voting` (`max_values`: None, `max_size`: Some(65), added: 2540, mode: `MaxEncodedLen`) + /// The range of component `n` is `[0, 100]`. + /// The range of component `n` is `[0, 100]`. + fn cleanup_poll(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `209 + n * (52 ±0)` + // Estimated: `4365 + n * (2550 ±0)` + // Minimum execution time: 18_000_000 picoseconds. + Weight::from_parts(18_500_000, 0) + .saturating_add(Weight::from_parts(0, 4365)) + // Standard Error: 11_180 + .saturating_add(Weight::from_parts(1_335_000, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(n.into()))) + .saturating_add(Weight::from_parts(0, 2550).saturating_mul(n.into())) + } +} diff --git a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_ranked_collective.rs b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_ranked_collective_fellowship_collective.rs similarity index 82% rename from cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_ranked_collective.rs rename to cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_ranked_collective_fellowship_collective.rs index 561edda953b9..7515aecbb525 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_ranked_collective.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_ranked_collective_fellowship_collective.rs @@ -17,24 +17,21 @@ //! Autogenerated weights for `pallet_ranked_collective` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-08-11, STEPS: `2`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("collectives-polkadot-dev")`, DB CACHE: 1024 +//! HOSTNAME: `cob`, CPU: `` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("collectives-polkadot-dev")`, DB CACHE: 1024 // Executed Command: -// ./target/production/polkadot-parachain +// target/release/polkadot-parachain // benchmark // pallet // --chain=collectives-polkadot-dev // --wasm-execution=compiled // --pallet=pallet_ranked_collective -// --no-storage-info -// --no-median-slopes -// --no-min-squares // --extrinsic=* -// --steps=50 -// --repeat=20 +// --steps=2 +// --repeat=2 // --json // --header=./file_header.txt // --output=./parachains/runtimes/collectives/collectives-polkadot/src/weights/ @@ -62,8 +59,8 @@ impl pallet_ranked_collective::WeightInfo for WeightInf // Proof Size summary in bytes: // Measured: `142` // Estimated: `3507` - // Minimum execution time: 16_027_000 picoseconds. - Weight::from_parts(16_501_000, 0) + // Minimum execution time: 21_000_000 picoseconds. + Weight::from_parts(22_000_000, 0) .saturating_add(Weight::from_parts(0, 3507)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(4)) @@ -77,15 +74,16 @@ impl pallet_ranked_collective::WeightInfo for WeightInf /// Storage: `FellowshipCollective::IndexToId` (r:11 w:11) /// Proof: `FellowshipCollective::IndexToId` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) /// The range of component `r` is `[0, 10]`. + /// The range of component `r` is `[0, 10]`. fn remove_member(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `617 + r * (281 ±0)` + // Measured: `608 + r * (281 ±0)` // Estimated: `3519 + r * (2529 ±0)` - // Minimum execution time: 27_829_000 picoseconds. - Weight::from_parts(30_053_705, 0) + // Minimum execution time: 35_000_000 picoseconds. + Weight::from_parts(36_500_000, 0) .saturating_add(Weight::from_parts(0, 3519)) - // Standard Error: 26_813 - .saturating_add(Weight::from_parts(13_088_861, 0).saturating_mul(r.into())) + // Standard Error: 254_950 + .saturating_add(Weight::from_parts(15_900_000, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().reads((3_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(4)) @@ -101,15 +99,16 @@ impl pallet_ranked_collective::WeightInfo for WeightInf /// Storage: `FellowshipCollective::IdToIndex` (r:0 w:1) /// Proof: `FellowshipCollective::IdToIndex` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) /// The range of component `r` is `[0, 10]`. + /// The range of component `r` is `[0, 10]`. fn promote_member(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `314 + r * (17 ±0)` + // Measured: `310 + r * (17 ±0)` // Estimated: `3507` - // Minimum execution time: 19_762_000 picoseconds. - Weight::from_parts(20_493_905, 0) + // Minimum execution time: 25_000_000 picoseconds. + Weight::from_parts(25_500_000, 0) .saturating_add(Weight::from_parts(0, 3507)) - // Standard Error: 5_519 - .saturating_add(Weight::from_parts(349_033, 0).saturating_mul(r.into())) + // Standard Error: 70_710 + .saturating_add(Weight::from_parts(400_000, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(4)) } @@ -122,15 +121,16 @@ impl pallet_ranked_collective::WeightInfo for WeightInf /// Storage: `FellowshipCollective::IndexToId` (r:1 w:1) /// Proof: `FellowshipCollective::IndexToId` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) /// The range of component `r` is `[0, 10]`. + /// The range of component `r` is `[0, 10]`. fn demote_member(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `632 + r * (72 ±0)` + // Measured: `608 + r * (71 ±0)` // Estimated: `3519` - // Minimum execution time: 28_092_000 picoseconds. - Weight::from_parts(30_800_398, 0) + // Minimum execution time: 35_000_000 picoseconds. + Weight::from_parts(37_500_000, 0) .saturating_add(Weight::from_parts(0, 3519)) - // Standard Error: 17_223 - .saturating_add(Weight::from_parts(615_330, 0).saturating_mul(r.into())) + // Standard Error: 150_000 + .saturating_add(Weight::from_parts(350_000, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(4)) } @@ -144,10 +144,10 @@ impl pallet_ranked_collective::WeightInfo for WeightInf /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) fn vote() -> Weight { // Proof Size summary in bytes: - // Measured: `666` + // Measured: `700` // Estimated: `317568` - // Minimum execution time: 46_255_000 picoseconds. - Weight::from_parts(47_590_000, 0) + // Minimum execution time: 57_000_000 picoseconds. + Weight::from_parts(57_000_000, 0) .saturating_add(Weight::from_parts(0, 317568)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(4)) @@ -159,18 +159,19 @@ impl pallet_ranked_collective::WeightInfo for WeightInf /// Storage: `FellowshipCollective::Voting` (r:100 w:100) /// Proof: `FellowshipCollective::Voting` (`max_values`: None, `max_size`: Some(65), added: 2540, mode: `MaxEncodedLen`) /// The range of component `n` is `[0, 100]`. + /// The range of component `n` is `[0, 100]`. fn cleanup_poll(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `500 + n * (50 ±0)` - // Estimated: `4365 + n * (2540 ±0)` - // Minimum execution time: 14_975_000 picoseconds. - Weight::from_parts(17_408_362, 0) + // Measured: `343 + n * (52 ±0)` + // Estimated: `4365 + n * (2550 ±0)` + // Minimum execution time: 18_000_000 picoseconds. + Weight::from_parts(19_000_000, 0) .saturating_add(Weight::from_parts(0, 4365)) - // Standard Error: 3_134 - .saturating_add(Weight::from_parts(1_222_024, 0).saturating_mul(n.into())) + // Standard Error: 25_000 + .saturating_add(Weight::from_parts(1_395_000, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(n.into()))) - .saturating_add(Weight::from_parts(0, 2540).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(0, 2550).saturating_mul(n.into())) } } diff --git a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_referenda_ambassador_referenda.rs b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_referenda_ambassador_referenda.rs new file mode 100644 index 000000000000..fdc451c5d31c --- /dev/null +++ b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_referenda_ambassador_referenda.rs @@ -0,0 +1,536 @@ +// Copyright Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `pallet_referenda` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-08-11, STEPS: `2`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `cob`, CPU: `` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("collectives-polkadot-dev")`, DB CACHE: 1024 + +// Executed Command: +// target/release/polkadot-parachain +// benchmark +// pallet +// --chain=collectives-polkadot-dev +// --wasm-execution=compiled +// --pallet=pallet_referenda +// --extrinsic=* +// --steps=2 +// --repeat=2 +// --json +// --header=./file_header.txt +// --output=./parachains/runtimes/collectives/collectives-polkadot/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_referenda`. +pub struct WeightInfo(PhantomData); +impl pallet_referenda::WeightInfo for WeightInfo { + /// Storage: `AmbassadorCollective::Members` (r:1 w:0) + /// Proof: `AmbassadorCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorReferenda::ReferendumCount` (r:1 w:1) + /// Proof: `AmbassadorReferenda::ReferendumCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorReferenda::ReferendumInfoFor` (r:0 w:1) + /// Proof: `AmbassadorReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + fn submit() -> Weight { + // Proof Size summary in bytes: + // Measured: `255` + // Estimated: `159279` + // Minimum execution time: 32_000_000 picoseconds. + Weight::from_parts(34_000_000, 0) + .saturating_add(Weight::from_parts(0, 159279)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `AmbassadorReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `AmbassadorReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + fn place_decision_deposit_preparing() -> Weight { + // Proof Size summary in bytes: + // Measured: `366` + // Estimated: `317568` + // Minimum execution time: 63_000_000 picoseconds. + Weight::from_parts(68_000_000, 0) + .saturating_add(Weight::from_parts(0, 317568)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `AmbassadorReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `AmbassadorReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorReferenda::DecidingCount` (r:1 w:0) + /// Proof: `AmbassadorReferenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorReferenda::TrackQueue` (r:1 w:1) + /// Proof: `AmbassadorReferenda::TrackQueue` (`max_values`: None, `max_size`: Some(171), added: 2646, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + fn place_decision_deposit_queued() -> Weight { + // Proof Size summary in bytes: + // Measured: `1165` + // Estimated: `159279` + // Minimum execution time: 97_000_000 picoseconds. + Weight::from_parts(123_000_000, 0) + .saturating_add(Weight::from_parts(0, 159279)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `AmbassadorReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `AmbassadorReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorReferenda::DecidingCount` (r:1 w:0) + /// Proof: `AmbassadorReferenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorReferenda::TrackQueue` (r:1 w:1) + /// Proof: `AmbassadorReferenda::TrackQueue` (`max_values`: None, `max_size`: Some(171), added: 2646, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + fn place_decision_deposit_not_queued() -> Weight { + // Proof Size summary in bytes: + // Measured: `1173` + // Estimated: `159279` + // Minimum execution time: 104_000_000 picoseconds. + Weight::from_parts(111_000_000, 0) + .saturating_add(Weight::from_parts(0, 159279)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `AmbassadorReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `AmbassadorReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorReferenda::DecidingCount` (r:1 w:1) + /// Proof: `AmbassadorReferenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::MemberCount` (r:1 w:0) + /// Proof: `AmbassadorCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + fn place_decision_deposit_passing() -> Weight { + // Proof Size summary in bytes: + // Measured: `702` + // Estimated: `317568` + // Minimum execution time: 140_000_000 picoseconds. + Weight::from_parts(150_000_000, 0) + .saturating_add(Weight::from_parts(0, 317568)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `AmbassadorReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `AmbassadorReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorReferenda::DecidingCount` (r:1 w:1) + /// Proof: `AmbassadorReferenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::MemberCount` (r:1 w:0) + /// Proof: `AmbassadorCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + fn place_decision_deposit_failing() -> Weight { + // Proof Size summary in bytes: + // Measured: `601` + // Estimated: `317568` + // Minimum execution time: 81_000_000 picoseconds. + Weight::from_parts(82_000_000, 0) + .saturating_add(Weight::from_parts(0, 317568)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `AmbassadorReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `AmbassadorReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + fn refund_decision_deposit() -> Weight { + // Proof Size summary in bytes: + // Measured: `317` + // Estimated: `4365` + // Minimum execution time: 38_000_000 picoseconds. + Weight::from_parts(38_000_000, 0) + .saturating_add(Weight::from_parts(0, 4365)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `AmbassadorReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `AmbassadorReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + fn refund_submission_deposit() -> Weight { + // Proof Size summary in bytes: + // Measured: `167` + // Estimated: `4365` + // Minimum execution time: 17_000_000 picoseconds. + Weight::from_parts(18_000_000, 0) + .saturating_add(Weight::from_parts(0, 4365)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `AmbassadorReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `AmbassadorReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + fn cancel() -> Weight { + // Proof Size summary in bytes: + // Measured: `311` + // Estimated: `317568` + // Minimum execution time: 44_000_000 picoseconds. + Weight::from_parts(45_000_000, 0) + .saturating_add(Weight::from_parts(0, 317568)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `AmbassadorReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `AmbassadorReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) + /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AmbassadorReferenda::MetadataOf` (r:1 w:0) + /// Proof: `AmbassadorReferenda::MetadataOf` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + fn kill() -> Weight { + // Proof Size summary in bytes: + // Measured: `626` + // Estimated: `317568` + // Minimum execution time: 183_000_000 picoseconds. + Weight::from_parts(187_000_000, 0) + .saturating_add(Weight::from_parts(0, 317568)) + .saturating_add(T::DbWeight::get().reads(11)) + .saturating_add(T::DbWeight::get().writes(6)) + } + /// Storage: `AmbassadorReferenda::TrackQueue` (r:1 w:0) + /// Proof: `AmbassadorReferenda::TrackQueue` (`max_values`: None, `max_size`: Some(171), added: 2646, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorReferenda::DecidingCount` (r:1 w:1) + /// Proof: `AmbassadorReferenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + fn one_fewer_deciding_queue_empty() -> Weight { + // Proof Size summary in bytes: + // Measured: `140` + // Estimated: `3636` + // Minimum execution time: 12_000_000 picoseconds. + Weight::from_parts(12_000_000, 0) + .saturating_add(Weight::from_parts(0, 3636)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `AmbassadorReferenda::TrackQueue` (r:1 w:1) + /// Proof: `AmbassadorReferenda::TrackQueue` (`max_values`: None, `max_size`: Some(171), added: 2646, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `AmbassadorReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::MemberCount` (r:1 w:0) + /// Proof: `AmbassadorCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + fn one_fewer_deciding_failing() -> Weight { + // Proof Size summary in bytes: + // Measured: `1412` + // Estimated: `159279` + // Minimum execution time: 88_000_000 picoseconds. + Weight::from_parts(97_000_000, 0) + .saturating_add(Weight::from_parts(0, 159279)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `AmbassadorReferenda::TrackQueue` (r:1 w:1) + /// Proof: `AmbassadorReferenda::TrackQueue` (`max_values`: None, `max_size`: Some(171), added: 2646, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `AmbassadorReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::MemberCount` (r:1 w:0) + /// Proof: `AmbassadorCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + fn one_fewer_deciding_passing() -> Weight { + // Proof Size summary in bytes: + // Measured: `1412` + // Estimated: `159279` + // Minimum execution time: 87_000_000 picoseconds. + Weight::from_parts(92_000_000, 0) + .saturating_add(Weight::from_parts(0, 159279)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `AmbassadorReferenda::ReferendumInfoFor` (r:1 w:0) + /// Proof: `AmbassadorReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorReferenda::TrackQueue` (r:1 w:1) + /// Proof: `AmbassadorReferenda::TrackQueue` (`max_values`: None, `max_size`: Some(171), added: 2646, mode: `MaxEncodedLen`) + fn nudge_referendum_requeued_insertion() -> Weight { + // Proof Size summary in bytes: + // Measured: `935` + // Estimated: `4365` + // Minimum execution time: 43_000_000 picoseconds. + Weight::from_parts(46_000_000, 0) + .saturating_add(Weight::from_parts(0, 4365)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `AmbassadorReferenda::ReferendumInfoFor` (r:1 w:0) + /// Proof: `AmbassadorReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorReferenda::TrackQueue` (r:1 w:1) + /// Proof: `AmbassadorReferenda::TrackQueue` (`max_values`: None, `max_size`: Some(171), added: 2646, mode: `MaxEncodedLen`) + fn nudge_referendum_requeued_slide() -> Weight { + // Proof Size summary in bytes: + // Measured: `935` + // Estimated: `4365` + // Minimum execution time: 39_000_000 picoseconds. + Weight::from_parts(43_000_000, 0) + .saturating_add(Weight::from_parts(0, 4365)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `AmbassadorReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `AmbassadorReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorReferenda::DecidingCount` (r:1 w:0) + /// Proof: `AmbassadorReferenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorReferenda::TrackQueue` (r:1 w:1) + /// Proof: `AmbassadorReferenda::TrackQueue` (`max_values`: None, `max_size`: Some(171), added: 2646, mode: `MaxEncodedLen`) + fn nudge_referendum_queued() -> Weight { + // Proof Size summary in bytes: + // Measured: `951` + // Estimated: `4365` + // Minimum execution time: 48_000_000 picoseconds. + Weight::from_parts(50_000_000, 0) + .saturating_add(Weight::from_parts(0, 4365)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `AmbassadorReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `AmbassadorReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorReferenda::DecidingCount` (r:1 w:0) + /// Proof: `AmbassadorReferenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorReferenda::TrackQueue` (r:1 w:1) + /// Proof: `AmbassadorReferenda::TrackQueue` (`max_values`: None, `max_size`: Some(171), added: 2646, mode: `MaxEncodedLen`) + fn nudge_referendum_not_queued() -> Weight { + // Proof Size summary in bytes: + // Measured: `959` + // Estimated: `4365` + // Minimum execution time: 42_000_000 picoseconds. + Weight::from_parts(48_000_000, 0) + .saturating_add(Weight::from_parts(0, 4365)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `AmbassadorReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `AmbassadorReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + fn nudge_referendum_no_deposit() -> Weight { + // Proof Size summary in bytes: + // Measured: `263` + // Estimated: `159279` + // Minimum execution time: 28_000_000 picoseconds. + Weight::from_parts(30_000_000, 0) + .saturating_add(Weight::from_parts(0, 159279)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `AmbassadorReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `AmbassadorReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + fn nudge_referendum_preparing() -> Weight { + // Proof Size summary in bytes: + // Measured: `311` + // Estimated: `159279` + // Minimum execution time: 26_000_000 picoseconds. + Weight::from_parts(28_000_000, 0) + .saturating_add(Weight::from_parts(0, 159279)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `AmbassadorReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `AmbassadorReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + fn nudge_referendum_timed_out() -> Weight { + // Proof Size summary in bytes: + // Measured: `208` + // Estimated: `4365` + // Minimum execution time: 19_000_000 picoseconds. + Weight::from_parts(20_000_000, 0) + .saturating_add(Weight::from_parts(0, 4365)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `AmbassadorReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `AmbassadorReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorReferenda::DecidingCount` (r:1 w:1) + /// Proof: `AmbassadorReferenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::MemberCount` (r:1 w:0) + /// Proof: `AmbassadorCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + fn nudge_referendum_begin_deciding_failing() -> Weight { + // Proof Size summary in bytes: + // Measured: `546` + // Estimated: `159279` + // Minimum execution time: 42_000_000 picoseconds. + Weight::from_parts(46_000_000, 0) + .saturating_add(Weight::from_parts(0, 159279)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `AmbassadorReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `AmbassadorReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorReferenda::DecidingCount` (r:1 w:1) + /// Proof: `AmbassadorReferenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::MemberCount` (r:1 w:0) + /// Proof: `AmbassadorCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + fn nudge_referendum_begin_deciding_passing() -> Weight { + // Proof Size summary in bytes: + // Measured: `647` + // Estimated: `159279` + // Minimum execution time: 87_000_000 picoseconds. + Weight::from_parts(93_000_000, 0) + .saturating_add(Weight::from_parts(0, 159279)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `AmbassadorReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `AmbassadorReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::MemberCount` (r:1 w:0) + /// Proof: `AmbassadorCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + fn nudge_referendum_begin_confirming() -> Weight { + // Proof Size summary in bytes: + // Measured: `700` + // Estimated: `159279` + // Minimum execution time: 100_000_000 picoseconds. + Weight::from_parts(120_000_000, 0) + .saturating_add(Weight::from_parts(0, 159279)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `AmbassadorReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `AmbassadorReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::MemberCount` (r:1 w:0) + /// Proof: `AmbassadorCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + fn nudge_referendum_end_confirming() -> Weight { + // Proof Size summary in bytes: + // Measured: `683` + // Estimated: `159279` + // Minimum execution time: 90_000_000 picoseconds. + Weight::from_parts(100_000_000, 0) + .saturating_add(Weight::from_parts(0, 159279)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `AmbassadorReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `AmbassadorReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::MemberCount` (r:1 w:0) + /// Proof: `AmbassadorCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + fn nudge_referendum_continue_not_confirming() -> Weight { + // Proof Size summary in bytes: + // Measured: `700` + // Estimated: `159279` + // Minimum execution time: 77_000_000 picoseconds. + Weight::from_parts(82_000_000, 0) + .saturating_add(Weight::from_parts(0, 159279)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `AmbassadorReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `AmbassadorReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::MemberCount` (r:1 w:0) + /// Proof: `AmbassadorCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + fn nudge_referendum_continue_confirming() -> Weight { + // Proof Size summary in bytes: + // Measured: `704` + // Estimated: `159279` + // Minimum execution time: 68_000_000 picoseconds. + Weight::from_parts(77_000_000, 0) + .saturating_add(Weight::from_parts(0, 159279)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `AmbassadorReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `AmbassadorReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::MemberCount` (r:1 w:0) + /// Proof: `AmbassadorCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Lookup` (r:1 w:1) + /// Proof: `Scheduler::Lookup` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + fn nudge_referendum_approved() -> Weight { + // Proof Size summary in bytes: + // Measured: `704` + // Estimated: `317568` + // Minimum execution time: 99_000_000 picoseconds. + Weight::from_parts(104_000_000, 0) + .saturating_add(Weight::from_parts(0, 317568)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `AmbassadorReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `AmbassadorReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::MemberCount` (r:1 w:0) + /// Proof: `AmbassadorCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + fn nudge_referendum_rejected() -> Weight { + // Proof Size summary in bytes: + // Measured: `700` + // Estimated: `159279` + // Minimum execution time: 87_000_000 picoseconds. + Weight::from_parts(100_000_000, 0) + .saturating_add(Weight::from_parts(0, 159279)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `AmbassadorReferenda::ReferendumInfoFor` (r:1 w:0) + /// Proof: `AmbassadorReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `Preimage::StatusFor` (r:1 w:0) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorReferenda::MetadataOf` (r:0 w:1) + /// Proof: `AmbassadorReferenda::MetadataOf` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + fn set_some_metadata() -> Weight { + // Proof Size summary in bytes: + // Measured: `419` + // Estimated: `4365` + // Minimum execution time: 23_000_000 picoseconds. + Weight::from_parts(25_000_000, 0) + .saturating_add(Weight::from_parts(0, 4365)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `AmbassadorReferenda::ReferendumInfoFor` (r:1 w:0) + /// Proof: `AmbassadorReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorReferenda::MetadataOf` (r:1 w:1) + /// Proof: `AmbassadorReferenda::MetadataOf` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + fn clear_metadata() -> Weight { + // Proof Size summary in bytes: + // Measured: `285` + // Estimated: `4365` + // Minimum execution time: 20_000_000 picoseconds. + Weight::from_parts(21_000_000, 0) + .saturating_add(Weight::from_parts(0, 4365)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } +} diff --git a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_referenda.rs b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_referenda_fellowship_referenda.rs similarity index 87% rename from cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_referenda.rs rename to cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_referenda_fellowship_referenda.rs index 12d92a803cad..5b4aed06899f 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_referenda.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_referenda_fellowship_referenda.rs @@ -17,24 +17,21 @@ //! Autogenerated weights for `pallet_referenda` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-08-11, STEPS: `2`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("collectives-polkadot-dev")`, DB CACHE: 1024 +//! HOSTNAME: `cob`, CPU: `` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("collectives-polkadot-dev")`, DB CACHE: 1024 // Executed Command: -// ./target/production/polkadot-parachain +// target/release/polkadot-parachain // benchmark // pallet // --chain=collectives-polkadot-dev // --wasm-execution=compiled // --pallet=pallet_referenda -// --no-storage-info -// --no-median-slopes -// --no-min-squares // --extrinsic=* -// --steps=50 -// --repeat=20 +// --steps=2 +// --repeat=2 // --json // --header=./file_header.txt // --output=./parachains/runtimes/collectives/collectives-polkadot/src/weights/ @@ -60,10 +57,10 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) fn submit() -> Weight { // Proof Size summary in bytes: - // Measured: `355` + // Measured: `389` // Estimated: `159279` - // Minimum execution time: 29_271_000 picoseconds. - Weight::from_parts(30_285_000, 0) + // Minimum execution time: 34_000_000 picoseconds. + Weight::from_parts(36_000_000, 0) .saturating_add(Weight::from_parts(0, 159279)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(3)) @@ -74,10 +71,10 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) fn place_decision_deposit_preparing() -> Weight { // Proof Size summary in bytes: - // Measured: `366` + // Measured: `400` // Estimated: `317568` - // Minimum execution time: 52_128_000 picoseconds. - Weight::from_parts(53_504_000, 0) + // Minimum execution time: 64_000_000 picoseconds. + Weight::from_parts(67_000_000, 0) .saturating_add(Weight::from_parts(0, 317568)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(3)) @@ -92,10 +89,10 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) fn place_decision_deposit_queued() -> Weight { // Proof Size summary in bytes: - // Measured: `2004` + // Measured: `2038` // Estimated: `159279` - // Minimum execution time: 110_018_000 picoseconds. - Weight::from_parts(114_369_000, 0) + // Minimum execution time: 99_000_000 picoseconds. + Weight::from_parts(109_000_000, 0) .saturating_add(Weight::from_parts(0, 159279)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(3)) @@ -110,10 +107,10 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) fn place_decision_deposit_not_queued() -> Weight { // Proof Size summary in bytes: - // Measured: `2045` + // Measured: `2079` // Estimated: `159279` - // Minimum execution time: 110_231_000 picoseconds. - Weight::from_parts(114_517_000, 0) + // Minimum execution time: 101_000_000 picoseconds. + Weight::from_parts(111_000_000, 0) .saturating_add(Weight::from_parts(0, 159279)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(3)) @@ -128,10 +125,10 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) fn place_decision_deposit_passing() -> Weight { // Proof Size summary in bytes: - // Measured: `802` + // Measured: `836` // Estimated: `317568` - // Minimum execution time: 195_619_000 picoseconds. - Weight::from_parts(207_157_000, 0) + // Minimum execution time: 135_000_000 picoseconds. + Weight::from_parts(153_000_000, 0) .saturating_add(Weight::from_parts(0, 317568)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(4)) @@ -146,10 +143,10 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) fn place_decision_deposit_failing() -> Weight { // Proof Size summary in bytes: - // Measured: `701` + // Measured: `735` // Estimated: `317568` - // Minimum execution time: 64_020_000 picoseconds. - Weight::from_parts(65_463_000, 0) + // Minimum execution time: 78_000_000 picoseconds. + Weight::from_parts(82_000_000, 0) .saturating_add(Weight::from_parts(0, 317568)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(4)) @@ -158,10 +155,10 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) fn refund_decision_deposit() -> Weight { // Proof Size summary in bytes: - // Measured: `317` + // Measured: `351` // Estimated: `4365` - // Minimum execution time: 30_701_000 picoseconds. - Weight::from_parts(31_528_000, 0) + // Minimum execution time: 38_000_000 picoseconds. + Weight::from_parts(39_000_000, 0) .saturating_add(Weight::from_parts(0, 4365)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -170,10 +167,10 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) fn refund_submission_deposit() -> Weight { // Proof Size summary in bytes: - // Measured: `167` + // Measured: `201` // Estimated: `4365` - // Minimum execution time: 15_173_000 picoseconds. - Weight::from_parts(15_787_000, 0) + // Minimum execution time: 18_000_000 picoseconds. + Weight::from_parts(19_000_000, 0) .saturating_add(Weight::from_parts(0, 4365)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -184,10 +181,10 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) fn cancel() -> Weight { // Proof Size summary in bytes: - // Measured: `311` + // Measured: `345` // Estimated: `317568` - // Minimum execution time: 37_886_000 picoseconds. - Weight::from_parts(38_679_000, 0) + // Minimum execution time: 45_000_000 picoseconds. + Weight::from_parts(46_000_000, 0) .saturating_add(Weight::from_parts(0, 317568)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(3)) @@ -214,10 +211,10 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `FellowshipReferenda::MetadataOf` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) fn kill() -> Weight { // Proof Size summary in bytes: - // Measured: `517` + // Measured: `587` // Estimated: `317568` - // Minimum execution time: 152_111_000 picoseconds. - Weight::from_parts(155_738_000, 0) + // Minimum execution time: 185_000_000 picoseconds. + Weight::from_parts(196_000_000, 0) .saturating_add(Weight::from_parts(0, 317568)) .saturating_add(T::DbWeight::get().reads(11)) .saturating_add(T::DbWeight::get().writes(6)) @@ -228,10 +225,10 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `FellowshipReferenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) fn one_fewer_deciding_queue_empty() -> Weight { // Proof Size summary in bytes: - // Measured: `140` + // Measured: `174` // Estimated: `4277` - // Minimum execution time: 10_712_000 picoseconds. - Weight::from_parts(10_976_000, 0) + // Minimum execution time: 12_000_000 picoseconds. + Weight::from_parts(15_000_000, 0) .saturating_add(Weight::from_parts(0, 4277)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) @@ -246,10 +243,10 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) fn one_fewer_deciding_failing() -> Weight { // Proof Size summary in bytes: - // Measured: `2418` + // Measured: `2452` // Estimated: `159279` - // Minimum execution time: 97_671_000 picoseconds. - Weight::from_parts(104_911_000, 0) + // Minimum execution time: 82_000_000 picoseconds. + Weight::from_parts(90_000_000, 0) .saturating_add(Weight::from_parts(0, 159279)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(3)) @@ -264,10 +261,10 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) fn one_fewer_deciding_passing() -> Weight { // Proof Size summary in bytes: - // Measured: `2418` + // Measured: `2452` // Estimated: `159279` - // Minimum execution time: 104_019_000 picoseconds. - Weight::from_parts(108_208_000, 0) + // Minimum execution time: 91_000_000 picoseconds. + Weight::from_parts(99_000_000, 0) .saturating_add(Weight::from_parts(0, 159279)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(3)) @@ -278,10 +275,10 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `FellowshipReferenda::TrackQueue` (`max_values`: None, `max_size`: Some(812), added: 3287, mode: `MaxEncodedLen`) fn nudge_referendum_requeued_insertion() -> Weight { // Proof Size summary in bytes: - // Measured: `1807` + // Measured: `1841` // Estimated: `4365` - // Minimum execution time: 50_199_000 picoseconds. - Weight::from_parts(54_350_000, 0) + // Minimum execution time: 41_000_000 picoseconds. + Weight::from_parts(44_000_000, 0) .saturating_add(Weight::from_parts(0, 4365)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) @@ -292,10 +289,10 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `FellowshipReferenda::TrackQueue` (`max_values`: None, `max_size`: Some(812), added: 3287, mode: `MaxEncodedLen`) fn nudge_referendum_requeued_slide() -> Weight { // Proof Size summary in bytes: - // Measured: `1774` + // Measured: `1808` // Estimated: `4365` - // Minimum execution time: 52_459_000 picoseconds. - Weight::from_parts(54_382_000, 0) + // Minimum execution time: 46_000_000 picoseconds. + Weight::from_parts(55_000_000, 0) .saturating_add(Weight::from_parts(0, 4365)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) @@ -308,10 +305,10 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `FellowshipReferenda::TrackQueue` (`max_values`: None, `max_size`: Some(812), added: 3287, mode: `MaxEncodedLen`) fn nudge_referendum_queued() -> Weight { // Proof Size summary in bytes: - // Measured: `1790` + // Measured: `1824` // Estimated: `4365` - // Minimum execution time: 57_810_000 picoseconds. - Weight::from_parts(63_690_000, 0) + // Minimum execution time: 49_000_000 picoseconds. + Weight::from_parts(53_000_000, 0) .saturating_add(Weight::from_parts(0, 4365)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) @@ -324,10 +321,10 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `FellowshipReferenda::TrackQueue` (`max_values`: None, `max_size`: Some(812), added: 3287, mode: `MaxEncodedLen`) fn nudge_referendum_not_queued() -> Weight { // Proof Size summary in bytes: - // Measured: `1831` + // Measured: `1865` // Estimated: `4365` - // Minimum execution time: 56_778_000 picoseconds. - Weight::from_parts(59_556_000, 0) + // Minimum execution time: 51_000_000 picoseconds. + Weight::from_parts(54_000_000, 0) .saturating_add(Weight::from_parts(0, 4365)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) @@ -338,10 +335,10 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) fn nudge_referendum_no_deposit() -> Weight { // Proof Size summary in bytes: - // Measured: `263` + // Measured: `297` // Estimated: `159279` - // Minimum execution time: 24_377_000 picoseconds. - Weight::from_parts(27_031_000, 0) + // Minimum execution time: 28_000_000 picoseconds. + Weight::from_parts(30_000_000, 0) .saturating_add(Weight::from_parts(0, 159279)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) @@ -352,10 +349,10 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) fn nudge_referendum_preparing() -> Weight { // Proof Size summary in bytes: - // Measured: `311` + // Measured: `345` // Estimated: `159279` - // Minimum execution time: 24_717_000 picoseconds. - Weight::from_parts(25_578_000, 0) + // Minimum execution time: 28_000_000 picoseconds. + Weight::from_parts(29_000_000, 0) .saturating_add(Weight::from_parts(0, 159279)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) @@ -364,10 +361,10 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) fn nudge_referendum_timed_out() -> Weight { // Proof Size summary in bytes: - // Measured: `208` + // Measured: `242` // Estimated: `4365` - // Minimum execution time: 17_280_000 picoseconds. - Weight::from_parts(17_845_000, 0) + // Minimum execution time: 20_000_000 picoseconds. + Weight::from_parts(21_000_000, 0) .saturating_add(Weight::from_parts(0, 4365)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -382,10 +379,10 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) fn nudge_referendum_begin_deciding_failing() -> Weight { // Proof Size summary in bytes: - // Measured: `646` + // Measured: `680` // Estimated: `159279` - // Minimum execution time: 36_996_000 picoseconds. - Weight::from_parts(37_970_000, 0) + // Minimum execution time: 42_000_000 picoseconds. + Weight::from_parts(47_000_000, 0) .saturating_add(Weight::from_parts(0, 159279)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(3)) @@ -400,10 +397,10 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) fn nudge_referendum_begin_deciding_passing() -> Weight { // Proof Size summary in bytes: - // Measured: `747` + // Measured: `781` // Estimated: `159279` - // Minimum execution time: 91_681_000 picoseconds. - Weight::from_parts(98_640_000, 0) + // Minimum execution time: 90_000_000 picoseconds. + Weight::from_parts(95_000_000, 0) .saturating_add(Weight::from_parts(0, 159279)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(3)) @@ -416,10 +413,10 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) fn nudge_referendum_begin_confirming() -> Weight { // Proof Size summary in bytes: - // Measured: `800` + // Measured: `834` // Estimated: `159279` - // Minimum execution time: 149_940_000 picoseconds. - Weight::from_parts(167_561_000, 0) + // Minimum execution time: 84_000_000 picoseconds. + Weight::from_parts(93_000_000, 0) .saturating_add(Weight::from_parts(0, 159279)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) @@ -432,10 +429,10 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) fn nudge_referendum_end_confirming() -> Weight { // Proof Size summary in bytes: - // Measured: `783` + // Measured: `817` // Estimated: `159279` - // Minimum execution time: 157_443_000 picoseconds. - Weight::from_parts(168_023_000, 0) + // Minimum execution time: 88_000_000 picoseconds. + Weight::from_parts(98_000_000, 0) .saturating_add(Weight::from_parts(0, 159279)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) @@ -448,10 +445,10 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) fn nudge_referendum_continue_not_confirming() -> Weight { // Proof Size summary in bytes: - // Measured: `800` + // Measured: `834` // Estimated: `159279` - // Minimum execution time: 155_539_000 picoseconds. - Weight::from_parts(161_877_000, 0) + // Minimum execution time: 81_000_000 picoseconds. + Weight::from_parts(93_000_000, 0) .saturating_add(Weight::from_parts(0, 159279)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) @@ -464,10 +461,10 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) fn nudge_referendum_continue_confirming() -> Weight { // Proof Size summary in bytes: - // Measured: `804` + // Measured: `838` // Estimated: `159279` - // Minimum execution time: 82_000_000 picoseconds. - Weight::from_parts(87_101_000, 0) + // Minimum execution time: 74_000_000 picoseconds. + Weight::from_parts(77_000_000, 0) .saturating_add(Weight::from_parts(0, 159279)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) @@ -482,10 +479,10 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `Scheduler::Lookup` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) fn nudge_referendum_approved() -> Weight { // Proof Size summary in bytes: - // Measured: `804` + // Measured: `838` // Estimated: `317568` - // Minimum execution time: 154_590_000 picoseconds. - Weight::from_parts(186_418_000, 0) + // Minimum execution time: 105_000_000 picoseconds. + Weight::from_parts(123_000_000, 0) .saturating_add(Weight::from_parts(0, 317568)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(4)) @@ -498,10 +495,10 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) fn nudge_referendum_rejected() -> Weight { // Proof Size summary in bytes: - // Measured: `800` + // Measured: `834` // Estimated: `159279` - // Minimum execution time: 149_822_000 picoseconds. - Weight::from_parts(164_866_000, 0) + // Minimum execution time: 90_000_000 picoseconds. + Weight::from_parts(100_000_000, 0) .saturating_add(Weight::from_parts(0, 159279)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) @@ -514,10 +511,10 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `FellowshipReferenda::MetadataOf` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) fn set_some_metadata() -> Weight { // Proof Size summary in bytes: - // Measured: `386` + // Measured: `453` // Estimated: `4365` - // Minimum execution time: 21_413_000 picoseconds. - Weight::from_parts(21_938_000, 0) + // Minimum execution time: 24_000_000 picoseconds. + Weight::from_parts(24_000_000, 0) .saturating_add(Weight::from_parts(0, 4365)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) @@ -528,10 +525,10 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `FellowshipReferenda::MetadataOf` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) fn clear_metadata() -> Weight { // Proof Size summary in bytes: - // Measured: `285` + // Measured: `319` // Estimated: `4365` - // Minimum execution time: 18_927_000 picoseconds. - Weight::from_parts(19_423_000, 0) + // Minimum execution time: 21_000_000 picoseconds. + Weight::from_parts(23_000_000, 0) .saturating_add(Weight::from_parts(0, 4365)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) diff --git a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_salary_ambassador_salary.rs b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_salary_ambassador_salary.rs new file mode 100644 index 000000000000..0522420f2f51 --- /dev/null +++ b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_salary_ambassador_salary.rs @@ -0,0 +1,190 @@ +// Copyright Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `pallet_salary` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-08-11, STEPS: `2`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `cob`, CPU: `` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("collectives-polkadot-dev")`, DB CACHE: 1024 + +// Executed Command: +// target/release/polkadot-parachain +// benchmark +// pallet +// --chain=collectives-polkadot-dev +// --wasm-execution=compiled +// --pallet=pallet_salary +// --extrinsic=* +// --steps=2 +// --repeat=2 +// --json +// --header=./file_header.txt +// --output=./parachains/runtimes/collectives/collectives-polkadot/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_salary`. +pub struct WeightInfo(PhantomData); +impl pallet_salary::WeightInfo for WeightInfo { + /// Storage: `AmbassadorSalary::Status` (r:1 w:1) + /// Proof: `AmbassadorSalary::Status` (`max_values`: Some(1), `max_size`: Some(56), added: 551, mode: `MaxEncodedLen`) + fn init() -> Weight { + // Proof Size summary in bytes: + // Measured: `109` + // Estimated: `1541` + // Minimum execution time: 12_000_000 picoseconds. + Weight::from_parts(14_000_000, 0) + .saturating_add(Weight::from_parts(0, 1541)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `AmbassadorSalary::Status` (r:1 w:1) + /// Proof: `AmbassadorSalary::Status` (`max_values`: Some(1), `max_size`: Some(56), added: 551, mode: `MaxEncodedLen`) + fn bump() -> Weight { + // Proof Size summary in bytes: + // Measured: `191` + // Estimated: `1541` + // Minimum execution time: 15_000_000 picoseconds. + Weight::from_parts(16_000_000, 0) + .saturating_add(Weight::from_parts(0, 1541)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `AmbassadorSalary::Status` (r:1 w:0) + /// Proof: `AmbassadorSalary::Status` (`max_values`: Some(1), `max_size`: Some(56), added: 551, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::Members` (r:1 w:0) + /// Proof: `AmbassadorCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorSalary::Claimant` (r:1 w:1) + /// Proof: `AmbassadorSalary::Claimant` (`max_values`: None, `max_size`: Some(86), added: 2561, mode: `MaxEncodedLen`) + fn induct() -> Weight { + // Proof Size summary in bytes: + // Measured: `400` + // Estimated: `3551` + // Minimum execution time: 23_000_000 picoseconds. + Weight::from_parts(23_000_000, 0) + .saturating_add(Weight::from_parts(0, 3551)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `AmbassadorCollective::Members` (r:1 w:0) + /// Proof: `AmbassadorCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorSalary::Status` (r:1 w:1) + /// Proof: `AmbassadorSalary::Status` (`max_values`: Some(1), `max_size`: Some(56), added: 551, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorSalary::Claimant` (r:1 w:1) + /// Proof: `AmbassadorSalary::Claimant` (`max_values`: None, `max_size`: Some(86), added: 2561, mode: `MaxEncodedLen`) + fn register() -> Weight { + // Proof Size summary in bytes: + // Measured: `467` + // Estimated: `3551` + // Minimum execution time: 27_000_000 picoseconds. + Weight::from_parts(28_000_000, 0) + .saturating_add(Weight::from_parts(0, 3551)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `AmbassadorSalary::Status` (r:1 w:1) + /// Proof: `AmbassadorSalary::Status` (`max_values`: Some(1), `max_size`: Some(56), added: 551, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorSalary::Claimant` (r:1 w:1) + /// Proof: `AmbassadorSalary::Claimant` (`max_values`: None, `max_size`: Some(86), added: 2561, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::Members` (r:1 w:0) + /// Proof: `AmbassadorCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) + /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `PolkadotXcm::QueryCounter` (r:1 w:1) + /// Proof: `PolkadotXcm::QueryCounter` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::RelevantMessagingState` (r:1 w:0) + /// Proof: `ParachainSystem::RelevantMessagingState` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:1) + /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `XcmpQueue::OutboundXcmpMessages` (r:0 w:1) + /// Proof: `XcmpQueue::OutboundXcmpMessages` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::Queries` (r:0 w:1) + /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn payout() -> Weight { + // Proof Size summary in bytes: + // Measured: `879` + // Estimated: `4344` + // Minimum execution time: 68_000_000 picoseconds. + Weight::from_parts(72_000_000, 0) + .saturating_add(Weight::from_parts(0, 4344)) + .saturating_add(T::DbWeight::get().reads(10)) + .saturating_add(T::DbWeight::get().writes(7)) + } + /// Storage: `AmbassadorSalary::Status` (r:1 w:1) + /// Proof: `AmbassadorSalary::Status` (`max_values`: Some(1), `max_size`: Some(56), added: 551, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorSalary::Claimant` (r:1 w:1) + /// Proof: `AmbassadorSalary::Claimant` (`max_values`: None, `max_size`: Some(86), added: 2561, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::Members` (r:1 w:0) + /// Proof: `AmbassadorCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) + /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `PolkadotXcm::QueryCounter` (r:1 w:1) + /// Proof: `PolkadotXcm::QueryCounter` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::RelevantMessagingState` (r:1 w:0) + /// Proof: `ParachainSystem::RelevantMessagingState` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:1) + /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `XcmpQueue::OutboundXcmpMessages` (r:0 w:1) + /// Proof: `XcmpQueue::OutboundXcmpMessages` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::Queries` (r:0 w:1) + /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn payout_other() -> Weight { + // Proof Size summary in bytes: + // Measured: `879` + // Estimated: `4344` + // Minimum execution time: 69_000_000 picoseconds. + Weight::from_parts(70_000_000, 0) + .saturating_add(Weight::from_parts(0, 4344)) + .saturating_add(T::DbWeight::get().reads(10)) + .saturating_add(T::DbWeight::get().writes(7)) + } + /// Storage: `AmbassadorSalary::Status` (r:1 w:1) + /// Proof: `AmbassadorSalary::Status` (`max_values`: Some(1), `max_size`: Some(56), added: 551, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorSalary::Claimant` (r:1 w:1) + /// Proof: `AmbassadorSalary::Claimant` (`max_values`: None, `max_size`: Some(86), added: 2561, mode: `MaxEncodedLen`) + /// Storage: `PolkadotXcm::Queries` (r:1 w:1) + /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn check_payment() -> Weight { + // Proof Size summary in bytes: + // Measured: `479` + // Estimated: `3944` + // Minimum execution time: 27_000_000 picoseconds. + Weight::from_parts(28_000_000, 0) + .saturating_add(Weight::from_parts(0, 3944)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(3)) + } +} diff --git a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_salary.rs b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_salary_fellowship_salary.rs similarity index 86% rename from cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_salary.rs rename to cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_salary_fellowship_salary.rs index 3a6825cf6415..9bb7e68d3145 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_salary.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_salary_fellowship_salary.rs @@ -17,24 +17,21 @@ //! Autogenerated weights for `pallet_salary` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-08-11, STEPS: `2`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("collectives-polkadot-dev")`, DB CACHE: 1024 +//! HOSTNAME: `cob`, CPU: `` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("collectives-polkadot-dev")`, DB CACHE: 1024 // Executed Command: -// ./target/production/polkadot-parachain +// target/release/polkadot-parachain // benchmark // pallet // --chain=collectives-polkadot-dev // --wasm-execution=compiled // --pallet=pallet_salary -// --no-storage-info -// --no-median-slopes -// --no-min-squares // --extrinsic=* -// --steps=50 -// --repeat=20 +// --steps=2 +// --repeat=2 // --json // --header=./file_header.txt // --output=./parachains/runtimes/collectives/collectives-polkadot/src/weights/ @@ -56,8 +53,8 @@ impl pallet_salary::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `142` // Estimated: `1541` - // Minimum execution time: 10_579_000 picoseconds. - Weight::from_parts(10_898_000, 0) + // Minimum execution time: 13_000_000 picoseconds. + Weight::from_parts(17_000_000, 0) .saturating_add(Weight::from_parts(0, 1541)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -68,8 +65,8 @@ impl pallet_salary::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `224` // Estimated: `1541` - // Minimum execution time: 12_723_000 picoseconds. - Weight::from_parts(13_221_000, 0) + // Minimum execution time: 15_000_000 picoseconds. + Weight::from_parts(18_000_000, 0) .saturating_add(Weight::from_parts(0, 1541)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -84,8 +81,8 @@ impl pallet_salary::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `395` // Estimated: `3551` - // Minimum execution time: 18_522_000 picoseconds. - Weight::from_parts(19_120_000, 0) + // Minimum execution time: 22_000_000 picoseconds. + Weight::from_parts(25_000_000, 0) .saturating_add(Weight::from_parts(0, 3551)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) @@ -100,8 +97,8 @@ impl pallet_salary::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `462` // Estimated: `3551` - // Minimum execution time: 22_270_000 picoseconds. - Weight::from_parts(23_325_000, 0) + // Minimum execution time: 26_000_000 picoseconds. + Weight::from_parts(29_000_000, 0) .saturating_add(Weight::from_parts(0, 3551)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) @@ -132,11 +129,11 @@ impl pallet_salary::WeightInfo for WeightInfo { /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) fn payout() -> Weight { // Proof Size summary in bytes: - // Measured: `703` - // Estimated: `4168` - // Minimum execution time: 54_436_000 picoseconds. - Weight::from_parts(56_347_000, 0) - .saturating_add(Weight::from_parts(0, 4168)) + // Measured: `774` + // Estimated: `4239` + // Minimum execution time: 67_000_000 picoseconds. + Weight::from_parts(74_000_000, 0) + .saturating_add(Weight::from_parts(0, 4239)) .saturating_add(T::DbWeight::get().reads(10)) .saturating_add(T::DbWeight::get().writes(7)) } @@ -166,11 +163,11 @@ impl pallet_salary::WeightInfo for WeightInfo { /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) fn payout_other() -> Weight { // Proof Size summary in bytes: - // Measured: `703` - // Estimated: `4168` - // Minimum execution time: 54_140_000 picoseconds. - Weight::from_parts(56_312_000, 0) - .saturating_add(Weight::from_parts(0, 4168)) + // Measured: `774` + // Estimated: `4239` + // Minimum execution time: 66_000_000 picoseconds. + Weight::from_parts(71_000_000, 0) + .saturating_add(Weight::from_parts(0, 4239)) .saturating_add(T::DbWeight::get().reads(10)) .saturating_add(T::DbWeight::get().writes(7)) } @@ -182,11 +179,11 @@ impl pallet_salary::WeightInfo for WeightInfo { /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) fn check_payment() -> Weight { // Proof Size summary in bytes: - // Measured: `478` - // Estimated: `3943` - // Minimum execution time: 24_650_000 picoseconds. - Weight::from_parts(25_242_000, 0) - .saturating_add(Weight::from_parts(0, 3943)) + // Measured: `512` + // Estimated: `3977` + // Minimum execution time: 26_000_000 picoseconds. + Weight::from_parts(27_000_000, 0) + .saturating_add(Weight::from_parts(0, 3977)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(3)) } diff --git a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/xcm_config.rs b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/xcm_config.rs index b4db73e3ab44..02e4913fcd9c 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/xcm_config.rs @@ -26,15 +26,16 @@ use frame_system::EnsureRoot; use pallet_xcm::XcmPassthrough; use parachains_common::{impls::ToStakingPot, xcm_config::ConcreteNativeAssetFrom}; use polkadot_parachain_primitives::primitives::Sibling; +use polkadot_runtime_constants::xcm::body::FELLOWSHIP_ADMIN_INDEX; use xcm::latest::prelude::*; use xcm_builder::{ AccountId32Aliases, AllowExplicitUnpaidExecutionFrom, AllowKnownQueryResponses, AllowSubscriptionsFrom, AllowTopLevelPaidExecutionFrom, CurrencyAdapter, DenyReserveTransferToRelayChain, DenyThenTry, EnsureXcmOrigin, FixedWeightBounds, IsConcrete, - OriginToPluralityVoice, ParentAsSuperuser, ParentIsPreset, RelayChainAsNative, - SiblingParachainAsNative, SiblingParachainConvertsVia, SignedAccountId32AsNative, - SignedToAccountId32, SovereignSignedViaLocation, TakeWeightCredit, TrailingSetTopicAsId, - UsingComponents, WithComputedOrigin, WithUniqueTopic, + LocatableAssetId, OriginToPluralityVoice, ParentAsSuperuser, ParentIsPreset, + RelayChainAsNative, SiblingParachainAsNative, SiblingParachainConvertsVia, + SignedAccountId32AsNative, SignedToAccountId32, SovereignSignedViaLocation, TakeWeightCredit, + TrailingSetTopicAsId, UsingComponents, WithComputedOrigin, WithUniqueTopic, }; use xcm_executor::{traits::WithOriginFilter, XcmExecutor}; @@ -46,6 +47,17 @@ parameter_types! { X2(GlobalConsensus(RelayNetwork::get().unwrap()), Parachain(ParachainInfo::parachain_id().into())); pub CheckingAccount: AccountId = PolkadotXcm::check_account(); pub const GovernanceLocation: MultiLocation = MultiLocation::parent(); + pub const FellowshipAdminBodyId: BodyId = BodyId::Index(FELLOWSHIP_ADMIN_INDEX); + pub AssetHub: MultiLocation = (Parent, Parachain(1000)).into(); + pub AssetHubUsdtId: AssetId = (PalletInstance(50), GeneralIndex(1984)).into(); + pub UsdtAssetHub: LocatableAssetId = LocatableAssetId { + location: AssetHub::get(), + asset_id: AssetHubUsdtId::get(), + }; + pub DotAssetHub: LocatableAssetId = LocatableAssetId { + location: AssetHub::get(), + asset_id: DotLocation::get().into(), + }; } /// Type for specifying how a `MultiLocation` can be converted into an `AccountId`. This is used From a56fd32e0affbe9cb61abb5ee3ec0a0bd15f5bf0 Mon Sep 17 00:00:00 2001 From: tmpolaczyk <44604217+tmpolaczyk@users.noreply.github.com> Date: Thu, 21 Sep 2023 12:22:42 +0200 Subject: [PATCH 36/69] Add graceful shutdown to prometheus server (#1637) Fixes prometheus server not stopping if there are open connections --- substrate/utils/prometheus/src/lib.rs | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/substrate/utils/prometheus/src/lib.rs b/substrate/utils/prometheus/src/lib.rs index 581666635ab5..ed1f9137aec4 100644 --- a/substrate/utils/prometheus/src/lib.rs +++ b/substrate/utils/prometheus/src/lib.rs @@ -111,10 +111,16 @@ async fn init_prometheus_with_listener( } }); - let server = Server::builder(listener).serve(service); + let (signal, on_exit) = tokio::sync::oneshot::channel::<()>(); + let server = Server::builder(listener).serve(service).with_graceful_shutdown(async { + let _ = on_exit.await; + }); let result = server.await.map_err(Into::into); + // Gracefully shutdown server, otherwise the server does not stop if it has open connections + let _ = signal.send(()); + result } From 93bb4926b34cf46401f8c5f2b7c01dd07e19309c Mon Sep 17 00:00:00 2001 From: Javier Viola Date: Thu, 21 Sep 2023 12:20:22 -0300 Subject: [PATCH 37/69] bump zombienet version (#1655) Includes: - fix for https://github.com/paritytech/zombienet/issues/1360 (root cause of https://github.com/paritytech/polkadot-sdk/issues/1647) - Improve default concurrency to spawn nodes. Also, fix `var` name in CI file. cc @skunert --- .gitlab-ci.yml | 2 +- .gitlab/pipeline/zombienet/cumulus.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 10dd69f12a77..ee6b9c987333 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -30,7 +30,7 @@ variables: RUSTY_CACHIER_COMPRESSION_METHOD: zstd NEXTEST_FAILURE_OUTPUT: immediate-final NEXTEST_SUCCESS_OUTPUT: final - ZOMBIENET_IMAGE: "docker.io/paritytech/zombienet:v1.3.67" + ZOMBIENET_IMAGE: "docker.io/paritytech/zombienet:v1.3.68" DOCKER_IMAGES_VERSION: "${CI_COMMIT_REF_NAME}-${CI_COMMIT_SHORT_SHA}" default: diff --git a/.gitlab/pipeline/zombienet/cumulus.yml b/.gitlab/pipeline/zombienet/cumulus.yml index 3347eda1baae..3f2c6f64fbfe 100644 --- a/.gitlab/pipeline/zombienet/cumulus.yml +++ b/.gitlab/pipeline/zombienet/cumulus.yml @@ -5,7 +5,7 @@ before_script: - echo "Zombie-net Tests Config" - echo "${ZOMBIENET_IMAGE}" - - echo "${RELAY_IMAGE}" + - echo "${POLKADOT_IMAGE}" - echo "${COL_IMAGE}" - echo "${GH_DIR}" - echo "${LOCAL_DIR}" From cb7559f698ad5059a501fd620d0bacb5867f9b0c Mon Sep 17 00:00:00 2001 From: Davide Galassi Date: Thu, 21 Sep 2023 20:25:48 +0200 Subject: [PATCH 38/69] [Trivial] Use 'into_inner' for WeakBoundedVec (#1664) Prevents authority set clone --- substrate/frame/babe/src/lib.rs | 9 ++++----- substrate/frame/babe/src/tests.rs | 2 +- substrate/frame/grandpa/src/lib.rs | 10 ++++------ 3 files changed, 9 insertions(+), 12 deletions(-) diff --git a/substrate/frame/babe/src/lib.rs b/substrate/frame/babe/src/lib.rs index 9549fac9fe2b..4b99cd517968 100644 --- a/substrate/frame/babe/src/lib.rs +++ b/substrate/frame/babe/src/lib.rs @@ -590,7 +590,6 @@ impl Pallet { if authorities.is_empty() { log::warn!(target: LOG_TARGET, "Ignoring empty epoch change."); - return } @@ -664,7 +663,7 @@ impl Pallet { let next_randomness = NextRandomness::::get(); let next_epoch = NextEpochDescriptor { - authorities: next_authorities.to_vec(), + authorities: next_authorities.into_inner(), randomness: next_randomness, }; Self::deposit_consensus(ConsensusLog::NextEpochData(next_epoch)); @@ -700,7 +699,7 @@ impl Pallet { epoch_index: EpochIndex::::get(), start_slot: Self::current_epoch_start(), duration: T::EpochDuration::get(), - authorities: Self::authorities().to_vec(), + authorities: Self::authorities().into_inner(), randomness: Self::randomness(), config: EpochConfig::::get() .expect("EpochConfig is initialized in genesis; we never `take` or `kill` it; qed"), @@ -725,7 +724,7 @@ impl Pallet { epoch_index: next_epoch_index, start_slot, duration: T::EpochDuration::get(), - authorities: NextAuthorities::::get().to_vec(), + authorities: NextAuthorities::::get().into_inner(), randomness: NextRandomness::::get(), config: NextEpochConfig::::get().unwrap_or_else(|| { EpochConfig::::get().expect( @@ -778,7 +777,7 @@ impl Pallet { // we use the same values as genesis because we haven't collected any // randomness yet. let next = NextEpochDescriptor { - authorities: Self::authorities().to_vec(), + authorities: Self::authorities().into_inner(), randomness: Self::randomness(), }; diff --git a/substrate/frame/babe/src/tests.rs b/substrate/frame/babe/src/tests.rs index ae0c3e3873c5..ec4e6fd97270 100644 --- a/substrate/frame/babe/src/tests.rs +++ b/substrate/frame/babe/src/tests.rs @@ -95,7 +95,7 @@ fn first_block_epoch_zero_start() { let consensus_log = sp_consensus_babe::ConsensusLog::NextEpochData( sp_consensus_babe::digests::NextEpochDescriptor { - authorities: Babe::authorities().to_vec(), + authorities: Babe::authorities().into_inner(), randomness: Babe::randomness(), }, ); diff --git a/substrate/frame/grandpa/src/lib.rs b/substrate/frame/grandpa/src/lib.rs index 2a0e707ac414..95d1c8aa6094 100644 --- a/substrate/frame/grandpa/src/lib.rs +++ b/substrate/frame/grandpa/src/lib.rs @@ -129,18 +129,16 @@ pub mod pallet { if let Some(pending_change) = >::get() { // emit signal if we're at the block that scheduled the change if block_number == pending_change.scheduled_at { + let next_authorities = pending_change.next_authorities.to_vec(); if let Some(median) = pending_change.forced { Self::deposit_log(ConsensusLog::ForcedChange( median, - ScheduledChange { - delay: pending_change.delay, - next_authorities: pending_change.next_authorities.to_vec(), - }, + ScheduledChange { delay: pending_change.delay, next_authorities }, )) } else { Self::deposit_log(ConsensusLog::ScheduledChange(ScheduledChange { delay: pending_change.delay, - next_authorities: pending_change.next_authorities.to_vec(), + next_authorities, })); } } @@ -149,7 +147,7 @@ pub mod pallet { if block_number == pending_change.scheduled_at + pending_change.delay { Self::set_grandpa_authorities(&pending_change.next_authorities); Self::deposit_event(Event::NewAuthorities { - authority_set: pending_change.next_authorities.to_vec(), + authority_set: pending_change.next_authorities.into_inner(), }); >::kill(); } From cfc6cc65cc133f69f472149b2449a673597e8530 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 21 Sep 2023 23:45:06 +0200 Subject: [PATCH 39/69] Bump quinn-proto from 0.9.4 to 0.9.5 (#1670) Bumps [quinn-proto](https://github.com/quinn-rs/quinn) from 0.9.4 to 0.9.5.
Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=quinn-proto&package-manager=cargo&previous-version=0.9.4&new-version=0.9.5)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself) You can disable automated security fix PRs for this repo from the [Security Alerts page](https://github.com/paritytech/polkadot-sdk/network/alerts).
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Liam Aharon --- Cargo.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ddb19c24e1f6..ccf546b16b6b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -13596,9 +13596,9 @@ dependencies = [ [[package]] name = "quinn-proto" -version = "0.9.4" +version = "0.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f31999cfc7927c4e212e60fd50934ab40e8e8bfd2d493d6095d2d306bc0764d9" +checksum = "c956be1b23f4261676aed05a0046e204e8a6836e50203902683a718af0797989" dependencies = [ "bytes", "rand 0.8.5", @@ -19257,7 +19257,7 @@ checksum = "97fee6b57c6a41524a810daee9286c02d7752c4253064d0b05472833a438f675" dependencies = [ "cfg-if", "digest 0.10.7", - "rand 0.8.5", + "rand 0.7.3", "static_assertions", ] From fb0fd3e62445eb2dee2b2456a0c8574d1ecdcc73 Mon Sep 17 00:00:00 2001 From: Sebastian Kunert Date: Fri, 22 Sep 2023 12:19:09 +0200 Subject: [PATCH 40/69] Use correct target dir in polkadot readme (#1643) Closes #1372 This fixes the target directories in the polkadot readme. I verified manually the Ubuntu based install works on a fresh cloud machine. Build from source also worked as described. Since #1304 landed `--dev` also works again (not on v1.1.0 though), so I would consider the install instructions fixed. --- polkadot/README.md | 87 +++++++-------------- polkadot/scripts/init.sh | 16 ---- substrate/.maintain/init.sh | 12 --- substrate/bin/node-template/scripts/init.sh | 12 --- 4 files changed, 29 insertions(+), 98 deletions(-) delete mode 100755 polkadot/scripts/init.sh delete mode 100755 substrate/.maintain/init.sh delete mode 100755 substrate/bin/node-template/scripts/init.sh diff --git a/polkadot/README.md b/polkadot/README.md index 93e93cfba0ee..3c234bb8e3f4 100644 --- a/polkadot/README.md +++ b/polkadot/README.md @@ -2,29 +2,17 @@ Implementation of a node in Rust based on the Substrate framework. -> **NOTE:** In 2018, we split our implementation of "Polkadot" from its development framework > -"Substrate". See the [Substrate][substrate-repo] repo for git history prior to 2018. - -[substrate-repo]: https://github.com/paritytech/substrate - -This repo contains runtimes for the Polkadot, Kusama, and Westend networks. The README provides -information about installing the `polkadot` binary and developing on the codebase. For more specific -guides, like how to be a validator, see the [Polkadot -Wiki](https://wiki.polkadot.network/docs/getting-started). +The README provides information about installing the `polkadot` binary and developing on the codebase. For more specific +guides, like how to run a validator node, see the [Polkadot Wiki](https://wiki.polkadot.network/docs/getting-started). ## Installation +### Using a pre-compiled binary + If you just wish to run a Polkadot node without compiling it yourself, you may either run the latest binary from our [releases](https://github.com/paritytech/polkadot-sdk/releases) page, or install Polkadot from one of our package repositories. -Installation from the Debian repository will create a `systemd` service that can be used to run a -Polkadot node. This is disabled by default, and can be started by running `systemctl start polkadot` -on demand (use `systemctl enable polkadot` to make it auto-start after reboot). By default, it will -run as the `polkadot` user. Command-line flags passed to the binary can be customized by editing -`/etc/default/polkadot`. This file will not be overwritten on updating Polkadot. You may also just -run the node directly from the command-line. - ### Debian-based (Debian, Ubuntu) Currently supports Debian 10 (Buster) and Ubuntu 20.04 (Focal), and derivatives. Run the following @@ -45,8 +33,18 @@ apt install polkadot ``` +Installation from the Debian repository will create a `systemd` service that can be used to run a +Polkadot node. This is disabled by default, and can be started by running `systemctl start polkadot` +on demand (use `systemctl enable polkadot` to make it auto-start after reboot). By default, it will +run as the `polkadot` user. Command-line flags passed to the binary can be customized by editing +`/etc/default/polkadot`. This file will not be overwritten on updating Polkadot. You may also just +run the node directly from the command-line. + ## Building +Since the Polkadot node is based on Substrate, first set up your build environment according to the +[Substrate installation instructions](https://docs.substrate.io/install/). + ### Install via Cargo Make sure you have the support software installed from the **Build from Source** section below this @@ -60,25 +58,6 @@ cargo install --git https://github.com/paritytech/polkadot-sdk --tag p ### Build from Source -If you'd like to build from source, first install Rust. You may need to add Cargo's bin directory to -your PATH environment variable. Restarting your computer will do this for you automatically. - -```bash -curl https://sh.rustup.rs -sSf | sh -``` - -If you already have Rust installed, make sure you're using the latest version by running: - -```bash -rustup update -``` - -Once done, finish installing the support software: - -```bash -sudo apt install build-essential git clang libclang-dev pkg-config libssl-dev protobuf-compiler -``` - Build the client by cloning this repository and running the following commands from the root directory of the repo: @@ -88,9 +67,6 @@ git checkout cargo build --release ``` -**Note:** compilation is a memory intensive process. We recommend having 4 GiB of physical RAM or -swap available (keep in mind that if a build hits swap it tends to be very slow). - **Note:** if you want to move the built `polkadot` binary somewhere (e.g. into $PATH) you will also need to move `polkadot-execute-worker` and `polkadot-prepare-worker`. You can let cargo do all this for you by running: @@ -123,7 +99,7 @@ This repo supports runtimes for Polkadot, Kusama, and Westend. Connect to the global Polkadot Mainnet network by running: ```bash -./target/release/polkadot --chain=polkadot +../target/release/polkadot --chain=polkadot ``` You can see your node on [telemetry] (set a custom name with `--name "my custom name"`). @@ -135,7 +111,7 @@ You can see your node on [telemetry] (set a custom name with `--name "my custom Connect to the global Kusama canary network by running: ```bash -./target/release/polkadot --chain=kusama +../target/release/polkadot --chain=kusama ``` You can see your node on [telemetry] (set a custom name with `--name "my custom name"`). @@ -147,7 +123,7 @@ You can see your node on [telemetry] (set a custom name with `--name "my custom Connect to the global Westend testnet by running: ```bash -./target/release/polkadot --chain=westend +../target/release/polkadot --chain=westend ``` You can see your node on [telemetry] (set a custom name with `--name "my custom name"`). @@ -157,20 +133,14 @@ You can see your node on [telemetry] (set a custom name with `--name "my custom ### Obtaining DOTs If you want to do anything on Polkadot, Kusama, or Westend, then you'll need to get an account and -some DOT, KSM, or WND tokens, respectively. See the [claims -instructions](https://claims.polkadot.network/) for Polkadot if you have DOTs to claim. For -Westend's WND tokens, see the faucet -[instructions](https://wiki.polkadot.network/docs/learn-DOT#getting-westies) on the Wiki. +some DOT, KSM, or WND tokens, respectively. Follow the +[instructions](https://wiki.polkadot.network/docs/learn-DOT#obtaining-testnet-tokens) on the Wiki to obtain tokens for +your testnet of choice. ## Hacking on Polkadot If you'd actually like to hack on Polkadot, you can grab the source code and build it. Ensure you -have Rust and the support software installed. This script will install or update Rust and install -the required dependencies (this may take up to 30 minutes on Mac machines): - -```bash -curl https://getsubstrate.io -sSf | bash -s -- --fast -``` +have Rust and the support software installed. Then, grab the Polkadot source code: @@ -183,14 +153,15 @@ Then build the code. You will need to build in release mode (`--release`) to sta use debug mode for development (faster compile times for development and testing). ```bash -./scripts/init.sh # Install WebAssembly. Update Rust -cargo build # Builds all native code +cargo build ``` You can run the tests if you like: ```bash -cargo test --workspace --release +cargo test --workspace --profile testnet +# Or run only the tests for specified crated +cargo test -p --profile testnet ``` You can start a development chain with: @@ -202,7 +173,7 @@ cargo run --bin polkadot -- --dev Detailed logs may be shown by running the node with the following environment variables set: ```bash -RUST_LOG=debug RUST_BACKTRACE=1 cargo run --bin polkadot -- --dev +RUST_LOG=debug RUST_BACKTRACE=1 cargo run --bin polkadot -- --dev ``` ### Development @@ -222,13 +193,13 @@ If you want to see the multi-node consensus algorithm in action locally, then yo testnet. You'll need two terminals open. In one, run: ```bash -polkadot --chain=polkadot-local --alice -d /tmp/alice +polkadot --dev --alice -d /tmp/alice ``` And in the other, run: ```bash -polkadot --chain=polkadot-local --bob -d /tmp/bob --port 30334 --bootnodes '/ip4/127.0.0.1/tcp/30333/p2p/ALICE_BOOTNODE_ID_HERE' +polkadot --dev --bob -d /tmp/bob --bootnodes '/ip4/127.0.0.1/tcp/30333/p2p/ALICE_BOOTNODE_ID_HERE' ``` Ensure you replace `ALICE_BOOTNODE_ID_HERE` with the node ID from the output of the first terminal. @@ -242,7 +213,7 @@ that we currently maintain. ### Using Docker -[Using Docker](doc/docker.md) +[Using Docker](../docs/docker.md) ### Shell Completion diff --git a/polkadot/scripts/init.sh b/polkadot/scripts/init.sh deleted file mode 100755 index cf5ecf97926f..000000000000 --- a/polkadot/scripts/init.sh +++ /dev/null @@ -1,16 +0,0 @@ -#!/usr/bin/env bash - -set -e - -echo "*** Initializing WASM build environment" - -if [ -z $CI_PROJECT_NAME ] ; then - rustup update nightly - rustup update stable -fi - -rustup target add wasm32-unknown-unknown --toolchain nightly - -# Install wasm-gc. It's useful for stripping slimming down wasm binaries. -command -v wasm-gc || \ - cargo +nightly install --git https://github.com/alexcrichton/wasm-gc --force diff --git a/substrate/.maintain/init.sh b/substrate/.maintain/init.sh deleted file mode 100755 index 1405a41ef333..000000000000 --- a/substrate/.maintain/init.sh +++ /dev/null @@ -1,12 +0,0 @@ -#!/usr/bin/env bash - -set -e - -echo "*** Initializing WASM build environment" - -if [ -z $CI_PROJECT_NAME ] ; then - rustup update nightly - rustup update stable -fi - -rustup target add wasm32-unknown-unknown --toolchain nightly diff --git a/substrate/bin/node-template/scripts/init.sh b/substrate/bin/node-template/scripts/init.sh deleted file mode 100755 index f976f7235d70..000000000000 --- a/substrate/bin/node-template/scripts/init.sh +++ /dev/null @@ -1,12 +0,0 @@ -#!/usr/bin/env bash -# This script is meant to be run on Unix/Linux based systems -set -e - -echo "*** Initializing WASM build environment" - -if [ -z $CI_PROJECT_NAME ] ; then - rustup update nightly - rustup update stable -fi - -rustup target add wasm32-unknown-unknown --toolchain nightly From b09ab37119acf0f3c4c8b9e3662255afd6d0ae53 Mon Sep 17 00:00:00 2001 From: Kian Paimani <5588131+kianenigma@users.noreply.github.com> Date: Fri, 22 Sep 2023 13:48:48 +0200 Subject: [PATCH 41/69] update `genesis_config` related docs and tests and error messages (#1642) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Follow-up to https://github.com/paritytech/substrate/pull/14306. I hope this also showcases the important message of: **It is really not that hard to make the examples codes in rust-docs compile, and therefore remain correct. Please embrace this :)** It moves the documentation of proc macros to their re-export, such that can link other items in frame-support. This is a patter that we should embrace for all of macro docs, and apply in PRs like https://github.com/paritytech/substrate/pull/13987 as well. --------- Co-authored-by: Gonçalo Pestana Co-authored-by: joe petrowski <25483142+joepetrowski@users.noreply.github.com> Co-authored-by: command-bot <> --- substrate/frame/support/procedural/src/lib.rs | 46 ----------- .../src/pallet/expand/genesis_build.rs | 2 +- .../procedural/src/pallet/parse/helper.rs | 2 +- substrate/frame/support/src/lib.rs | 76 ++++++++++++++++++- .../pallet_ui/genesis_invalid_generic.stderr | 2 +- 5 files changed, 76 insertions(+), 52 deletions(-) diff --git a/substrate/frame/support/procedural/src/lib.rs b/substrate/frame/support/procedural/src/lib.rs index 9957cf1cff85..b7aaf7c7cc1b 100644 --- a/substrate/frame/support/procedural/src/lib.rs +++ b/substrate/frame/support/procedural/src/lib.rs @@ -1417,57 +1417,11 @@ pub fn type_value(_: TokenStream, _: TokenStream) -> TokenStream { pallet_macro_stub() } -/// The `#[pallet::genesis_config]` attribute allows you to define the genesis configuration -/// for the pallet. -/// -/// Item is defined as either an enum or a struct. It needs to be public and implement the -/// trait `GenesisBuild` with [`#[pallet::genesis_build]`](`macro@genesis_build`). The type -/// generics are constrained to be either none, or `T` or `T: Config`. -/// -/// E.g: -/// -/// ```ignore -/// #[pallet::genesis_config] -/// pub struct GenesisConfig { -/// _myfield: BalanceOf, -/// } -/// ``` #[proc_macro_attribute] pub fn genesis_config(_: TokenStream, _: TokenStream) -> TokenStream { pallet_macro_stub() } -/// The `#[pallet::genesis_build]` attribute allows you to define how `genesis_configuration` -/// is built. This takes as input the `GenesisConfig` type (as `self`) and constructs the pallet's -/// initial state. -/// -/// The impl must be defined as: -/// -/// ```ignore -/// #[pallet::genesis_build] -/// impl GenesisBuild for GenesisConfig<$maybe_generics> { -/// fn build(&self) { $expr } -/// } -/// ``` -/// -/// I.e. a trait implementation with generic `T: Config`, of trait `GenesisBuild` on -/// type `GenesisConfig` with generics none or `T`. -/// -/// E.g.: -/// -/// ```ignore -/// #[pallet::genesis_build] -/// impl GenesisBuild for GenesisConfig { -/// fn build(&self) {} -/// } -/// ``` -/// -/// ## Macro expansion -/// -/// The macro will add the following attribute: -/// * `#[cfg(feature = "std")]` -/// -/// The macro will implement `sp_runtime::BuildStorage`. #[proc_macro_attribute] pub fn genesis_build(_: TokenStream, _: TokenStream) -> TokenStream { pallet_macro_stub() diff --git a/substrate/frame/support/procedural/src/pallet/expand/genesis_build.rs b/substrate/frame/support/procedural/src/pallet/expand/genesis_build.rs index 15ddfcf1d49d..248e83469435 100644 --- a/substrate/frame/support/procedural/src/pallet/expand/genesis_build.rs +++ b/substrate/frame/support/procedural/src/pallet/expand/genesis_build.rs @@ -38,7 +38,7 @@ pub fn expand_genesis_build(def: &mut Def) -> proc_macro2::TokenStream { #[cfg(feature = "std")] impl<#type_impl_gen> #frame_support::sp_runtime::BuildStorage for #gen_cfg_ident<#gen_cfg_use_gen> #where_clause { - fn assimilate_storage(&self, storage: &mut sp_runtime::Storage) -> std::result::Result<(), std::string::String> { + fn assimilate_storage(&self, storage: &mut #frame_support::sp_runtime::Storage) -> std::result::Result<(), std::string::String> { #frame_support::__private::BasicExternalities::execute_with_storage(storage, || { self.build(); Ok(()) diff --git a/substrate/frame/support/procedural/src/pallet/parse/helper.rs b/substrate/frame/support/procedural/src/pallet/parse/helper.rs index 1e6e83d7eeba..bfa19d8ddc39 100644 --- a/substrate/frame/support/procedural/src/pallet/parse/helper.rs +++ b/substrate/frame/support/procedural/src/pallet/parse/helper.rs @@ -494,7 +494,7 @@ pub fn check_type_def_gen( /// return the instance if found for `GenesisBuild` /// return None for BuildGenesisConfig pub fn check_genesis_builder_usage(type_: &syn::Path) -> syn::Result> { - let expected = "expected `GenesisBuild` or `GenesisBuild`"; + let expected = "expected `BuildGenesisConfig` (or the deprecated `GenesisBuild` or `GenesisBuild`)"; pub struct Checker(Option); impl syn::parse::Parse for Checker { fn parse(input: syn::parse::ParseStream) -> syn::Result { diff --git a/substrate/frame/support/src/lib.rs b/substrate/frame/support/src/lib.rs index a7106780e021..8c4b3de49b5d 100644 --- a/substrate/frame/support/src/lib.rs +++ b/substrate/frame/support/src/lib.rs @@ -2188,10 +2188,80 @@ pub mod pallet_macros { pub use frame_support_procedural::{ call_index, compact, composite_enum, config, constant, disable_frame_system_supertrait_check, error, event, extra_constants, generate_deposit, - generate_store, genesis_build, genesis_config, getter, hooks, import_section, inherent, - no_default, no_default_bounds, origin, pallet_section, storage, storage_prefix, - storage_version, type_value, unbounded, validate_unsigned, weight, whitelist_storage, + generate_store, getter, hooks, import_section, inherent, no_default, no_default_bounds, + origin, pallet_section, storage, storage_prefix, storage_version, type_value, unbounded, + validate_unsigned, weight, whitelist_storage, }; + + /// Allows you to define the genesis configuration for the pallet. + /// + /// Item is defined as either an enum or a struct. It needs to be public and implement the + /// trait [`frame_support::traits::BuildGenesisConfig`]. + /// + /// See [`genesis_build`] for an example. + pub use frame_support_procedural::genesis_config; + + /// Allows you to define how the state of your pallet at genesis is built. This + /// takes as input the `GenesisConfig` type (as `self`) and constructs the pallet's initial + /// state. + /// + /// The fields of the `GenesisConfig` can in turn be populated by the chain-spec. + /// + /// ## Example: + /// + /// ``` + /// #[frame_support::pallet] + /// pub mod pallet { + /// # #[pallet::config] + /// # pub trait Config: frame_system::Config {} + /// # #[pallet::pallet] + /// # pub struct Pallet(_); + /// # use frame_support::traits::BuildGenesisConfig; + /// #[pallet::genesis_config] + /// #[derive(frame_support::DefaultNoBound)] + /// pub struct GenesisConfig { + /// foo: Vec + /// } + /// + /// #[pallet::genesis_build] + /// impl BuildGenesisConfig for GenesisConfig { + /// fn build(&self) { + /// // use &self to access fields. + /// let foo = &self.foo; + /// todo!() + /// } + /// } + /// } + /// ``` + /// + /// ## Former Usage + /// + /// Prior to , the following syntax was used. + /// This is deprecated and will soon be removed. + /// + /// ``` + /// #[frame_support::pallet] + /// pub mod pallet { + /// # #[pallet::config] + /// # pub trait Config: frame_system::Config {} + /// # #[pallet::pallet] + /// # pub struct Pallet(_); + /// # use frame_support::traits::GenesisBuild; + /// #[pallet::genesis_config] + /// #[derive(frame_support::DefaultNoBound)] + /// pub struct GenesisConfig { + /// foo: Vec + /// } + /// + /// #[pallet::genesis_build] + /// impl GenesisBuild for GenesisConfig { + /// fn build(&self) { + /// todo!() + /// } + /// } + /// } + /// ``` + pub use frame_support_procedural::genesis_build; } #[deprecated(note = "Will be removed after July 2023; Use `sp_runtime::traits` directly instead.")] diff --git a/substrate/frame/support/test/tests/pallet_ui/genesis_invalid_generic.stderr b/substrate/frame/support/test/tests/pallet_ui/genesis_invalid_generic.stderr index e3dd0b7aa1d3..b54a23c91b4e 100644 --- a/substrate/frame/support/test/tests/pallet_ui/genesis_invalid_generic.stderr +++ b/substrate/frame/support/test/tests/pallet_ui/genesis_invalid_generic.stderr @@ -1,4 +1,4 @@ -error: Invalid genesis builder: expected `GenesisBuild` or `GenesisBuild` +error: Invalid genesis builder: expected `BuildGenesisConfig` (or the deprecated `GenesisBuild` or `GenesisBuild`) --> tests/pallet_ui/genesis_invalid_generic.rs:36:7 | 36 | impl GenesisBuild for GenesisConfig {} From 22d9e2abc65f06ae68297f390658ab61037669d7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?D=C3=B3nal=20Murray?= Date: Fri, 22 Sep 2023 17:02:15 +0100 Subject: [PATCH 42/69] Update HRMP pallet benchmarking to use benchmarks v2 (#1676) Updates the HRMP pallet benchmarks to Benchmarks v2 and fixes a small misconfiguration in one benchmark. --------- Co-authored-by: command-bot <> --- .../parachains/src/hrmp/benchmarking.rs | 280 +++++++++++------- 1 file changed, 179 insertions(+), 101 deletions(-) diff --git a/polkadot/runtime/parachains/src/hrmp/benchmarking.rs b/polkadot/runtime/parachains/src/hrmp/benchmarking.rs index 9ffa264a7dc8..7d82bcc99a75 100644 --- a/polkadot/runtime/parachains/src/hrmp/benchmarking.rs +++ b/polkadot/runtime/parachains/src/hrmp/benchmarking.rs @@ -14,12 +14,15 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . +#![cfg(feature = "runtime-benchmarks")] + use crate::{ configuration::Pallet as Configuration, hrmp::{Pallet as Hrmp, *}, paras::{Pallet as Paras, ParaKind, ParachainsCache}, shared::Pallet as Shared, }; +use frame_benchmarking::{impl_benchmark_test_suite, v2::*, whitelisted_caller}; use frame_support::{assert_ok, traits::Currency}; type BalanceOf = @@ -138,10 +141,12 @@ static_assertions::const_assert!(MAX_UNIQUE_CHANNELS < PREFIX_0); static_assertions::const_assert!(HRMP_MAX_INBOUND_CHANNELS_BOUND < PREFIX_0); static_assertions::const_assert!(HRMP_MAX_OUTBOUND_CHANNELS_BOUND < PREFIX_0); -frame_benchmarking::benchmarks! { - where_clause { where ::RuntimeOrigin: From } +#[benchmarks(where ::RuntimeOrigin: From)] +mod benchmarks { + use super::*; - hrmp_init_open_channel { + #[benchmark] + fn hrmp_init_open_channel() { let sender_id: ParaId = 1u32.into(); let sender_origin: crate::Origin = 1u32.into(); @@ -149,51 +154,68 @@ frame_benchmarking::benchmarks! { // make sure para is registered, and has enough balance. let ed = T::Currency::minimum_balance(); - let deposit: BalanceOf = Configuration::::config().hrmp_sender_deposit.unique_saturated_into(); + let deposit: BalanceOf = + Configuration::::config().hrmp_sender_deposit.unique_saturated_into(); register_parachain_with_balance::(sender_id, deposit + ed); register_parachain_with_balance::(recipient_id, deposit + ed); let capacity = Configuration::::config().hrmp_channel_max_capacity; let message_size = Configuration::::config().hrmp_channel_max_message_size; - }: _(sender_origin, recipient_id, capacity, message_size) - verify { + + #[extrinsic_call] + _(sender_origin, recipient_id, capacity, message_size); + assert_last_event::( - Event::::OpenChannelRequested(sender_id, recipient_id, capacity, message_size).into() + Event::::OpenChannelRequested(sender_id, recipient_id, capacity, message_size) + .into(), ); } - hrmp_accept_open_channel { + #[benchmark] + fn hrmp_accept_open_channel() { let [(sender, _), (recipient, recipient_origin)] = establish_para_connection::(1, 2, ParachainSetupStep::Requested); - }: _(recipient_origin, sender) - verify { + + #[extrinsic_call] + _(recipient_origin, sender); + assert_last_event::(Event::::OpenChannelAccepted(sender, recipient).into()); } - hrmp_close_channel { + #[benchmark] + fn hrmp_close_channel() { let [(sender, sender_origin), (recipient, _)] = establish_para_connection::(1, 2, ParachainSetupStep::Established); let channel_id = HrmpChannelId { sender, recipient }; - }: _(sender_origin, channel_id.clone()) - verify { + + #[extrinsic_call] + _(sender_origin, channel_id.clone()); + assert_last_event::(Event::::ChannelClosed(sender, channel_id).into()); } // NOTE: a single parachain should have the maximum number of allowed ingress and egress // channels. - force_clean_hrmp { + #[benchmark] + fn force_clean_hrmp( // ingress channels to a single leaving parachain that need to be closed. - let i in 0 .. (HRMP_MAX_INBOUND_CHANNELS_BOUND - 1); + i: Linear<0, { HRMP_MAX_INBOUND_CHANNELS_BOUND - 1 }>, // egress channels to a single leaving parachain that need to be closed. - let e in 0 .. (HRMP_MAX_OUTBOUND_CHANNELS_BOUND - 1); - + e: Linear<0, { HRMP_MAX_OUTBOUND_CHANNELS_BOUND - 1 }>, + ) { // first, update the configs to support this many open channels... - assert_ok!( - Configuration::::set_hrmp_max_parachain_outbound_channels(frame_system::RawOrigin::Root.into(), e + 1) - ); - assert_ok!( - Configuration::::set_hrmp_max_parachain_inbound_channels(frame_system::RawOrigin::Root.into(), i + 1) - ); + assert_ok!(Configuration::::set_hrmp_max_parachain_outbound_channels( + frame_system::RawOrigin::Root.into(), + e + 1 + )); + assert_ok!(Configuration::::set_hrmp_max_parachain_inbound_channels( + frame_system::RawOrigin::Root.into(), + i + 1 + )); + assert_ok!(Configuration::::set_max_downward_message_size( + frame_system::RawOrigin::Root.into(), + 1024 + )); // .. and enact it. Configuration::::initializer_on_new_session(&Shared::::scheduled_session()); @@ -201,14 +223,17 @@ frame_benchmarking::benchmarks! { let deposit: BalanceOf = config.hrmp_sender_deposit.unique_saturated_into(); let para: ParaId = 1u32.into(); - let para_origin: crate::Origin = 1u32.into(); register_parachain_with_balance::(para, deposit); T::Currency::make_free_balance_be(¶.into_account_truncating(), deposit * 256u32.into()); for ingress_para_id in 0..i { // establish ingress channels to `para`. let ingress_para_id = ingress_para_id + PREFIX_0; - let _ = establish_para_connection::(ingress_para_id, para.into(), ParachainSetupStep::Established); + let _ = establish_para_connection::( + ingress_para_id, + para.into(), + ParachainSetupStep::Established, + ); } // nothing should be left unprocessed. @@ -217,7 +242,11 @@ frame_benchmarking::benchmarks! { for egress_para_id in 0..e { // establish egress channels to `para`. let egress_para_id = egress_para_id + PREFIX_1; - let _ = establish_para_connection::(para.into(), egress_para_id, ParachainSetupStep::Established); + let _ = establish_para_connection::( + para.into(), + egress_para_id, + ParachainSetupStep::Established, + ); } // nothing should be left unprocessed. @@ -225,7 +254,10 @@ frame_benchmarking::benchmarks! { // all in all, we have created this many channels. assert_eq!(HrmpChannels::::iter().count() as u32, i + e); - }: _(frame_system::Origin::::Root, para, i, e) verify { + + #[extrinsic_call] + _(frame_system::Origin::::Root, para, i, e); + // all in all, all of them must be gone by now. assert_eq!(HrmpChannels::::iter().count() as u32, 0); // borrow this function from the tests to make sure state is clear, given that we do a lot @@ -233,73 +265,108 @@ frame_benchmarking::benchmarks! { Hrmp::::assert_storage_consistency_exhaustive(); } - force_process_hrmp_open { + #[benchmark] + fn force_process_hrmp_open( // number of channels that need to be processed. Worse case is an N-M relation: unique // sender and recipients for all channels. - let c in 0 .. MAX_UNIQUE_CHANNELS; - - for id in 0 .. c { - let _ = establish_para_connection::(PREFIX_0 + id, PREFIX_1 + id, ParachainSetupStep::Accepted); + c: Linear<0, MAX_UNIQUE_CHANNELS>, + ) { + for id in 0..c { + let _ = establish_para_connection::( + PREFIX_0 + id, + PREFIX_1 + id, + ParachainSetupStep::Accepted, + ); } assert_eq!(HrmpOpenChannelRequestsList::::decode_len().unwrap_or_default() as u32, c); - }: _(frame_system::Origin::::Root, c) - verify { + + #[extrinsic_call] + _(frame_system::Origin::::Root, c); + assert_eq!(HrmpOpenChannelRequestsList::::decode_len().unwrap_or_default() as u32, 0); } - force_process_hrmp_close { + #[benchmark] + fn force_process_hrmp_close( // number of channels that need to be processed. Worse case is an N-M relation: unique // sender and recipients for all channels. - let c in 0 .. MAX_UNIQUE_CHANNELS; - - for id in 0 .. c { - let _ = establish_para_connection::(PREFIX_0 + id, PREFIX_1 + id, ParachainSetupStep::CloseRequested); + c: Linear<0, MAX_UNIQUE_CHANNELS>, + ) { + for id in 0..c { + let _ = establish_para_connection::( + PREFIX_0 + id, + PREFIX_1 + id, + ParachainSetupStep::CloseRequested, + ); } assert_eq!(HrmpCloseChannelRequestsList::::decode_len().unwrap_or_default() as u32, c); - }: _(frame_system::Origin::::Root, c) - verify { + + #[extrinsic_call] + _(frame_system::Origin::::Root, c); + assert_eq!(HrmpCloseChannelRequestsList::::decode_len().unwrap_or_default() as u32, 0); } - hrmp_cancel_open_request { + #[benchmark] + fn hrmp_cancel_open_request( // number of items already existing in the `HrmpOpenChannelRequestsList`, other than the // one that we remove. - let c in 0 .. MAX_UNIQUE_CHANNELS; - - for id in 0 .. c { - let _ = establish_para_connection::(PREFIX_0 + id, PREFIX_1 + id, ParachainSetupStep::Requested); + c: Linear<0, MAX_UNIQUE_CHANNELS>, + ) { + for id in 0..c { + let _ = establish_para_connection::( + PREFIX_0 + id, + PREFIX_1 + id, + ParachainSetupStep::Requested, + ); } let [(sender, sender_origin), (recipient, _)] = establish_para_connection::(1, 2, ParachainSetupStep::Requested); - assert_eq!(HrmpOpenChannelRequestsList::::decode_len().unwrap_or_default() as u32, c + 1); + assert_eq!( + HrmpOpenChannelRequestsList::::decode_len().unwrap_or_default() as u32, + c + 1 + ); let channel_id = HrmpChannelId { sender, recipient }; - }: _(sender_origin, channel_id, c + 1) - verify { + + #[extrinsic_call] + _(sender_origin, channel_id, c + 1); + assert_eq!(HrmpOpenChannelRequestsList::::decode_len().unwrap_or_default() as u32, c); } // worse case will be `n` parachain channel requests, where in all of them either the sender or // the recipient need to be cleaned. This enforces the deposit of at least one to be processed. // No code path for triggering two deposit process exists. - clean_open_channel_requests { - let c in 0 .. MAX_UNIQUE_CHANNELS; - - for id in 0 .. c { - let _ = establish_para_connection::(PREFIX_0 + id, PREFIX_1 + id, ParachainSetupStep::Requested); + #[benchmark] + fn clean_open_channel_requests(c: Linear<0, MAX_UNIQUE_CHANNELS>) { + for id in 0..c { + let _ = establish_para_connection::( + PREFIX_0 + id, + PREFIX_1 + id, + ParachainSetupStep::Requested, + ); } assert_eq!(HrmpOpenChannelRequestsList::::decode_len().unwrap_or_default() as u32, c); let outgoing = (0..c).map(|id| (id + PREFIX_1).into()).collect::>(); let config = Configuration::::config(); - }: { - Hrmp::::clean_open_channel_requests(&config, &outgoing); - } verify { + + #[block] + { + Hrmp::::clean_open_channel_requests(&config, &outgoing); + } + assert_eq!(HrmpOpenChannelRequestsList::::decode_len().unwrap_or_default() as u32, 0); } - force_open_hrmp_channel { + #[benchmark] + fn force_open_hrmp_channel( + // Weight parameter only accepts `u32`, `0` and `1` used to represent `false` and `true`, + // respectively. + c: Linear<0, 1>, + ) { let sender_id: ParaId = 1u32.into(); let sender_origin: crate::Origin = 1u32.into(); let recipient_id: ParaId = 2u32.into(); @@ -315,47 +382,46 @@ frame_benchmarking::benchmarks! { let capacity = Configuration::::config().hrmp_channel_max_capacity; let message_size = Configuration::::config().hrmp_channel_max_message_size; - // Weight parameter only accepts `u32`, `0` and `1` used to represent `false` and `true`, - // respectively. - let c = [0, 1]; let channel_id = HrmpChannelId { sender: sender_id, recipient: recipient_id }; - for channels_to_close in c { - if channels_to_close == 1 { - // this will consume more weight if a channel _request_ already exists, because it - // will need to clear the request. - assert_ok!(Hrmp::::hrmp_init_open_channel( + if c == 1 { + // this will consume more weight if a channel _request_ already exists, because it + // will need to clear the request. + assert_ok!(Hrmp::::hrmp_init_open_channel( + sender_origin.clone().into(), + recipient_id, + capacity, + message_size + )); + assert!(HrmpOpenChannelRequests::::get(&channel_id).is_some()); + } else { + if HrmpOpenChannelRequests::::get(&channel_id).is_some() { + assert_ok!(Hrmp::::hrmp_cancel_open_request( sender_origin.clone().into(), - recipient_id, - capacity, - message_size + channel_id.clone(), + MAX_UNIQUE_CHANNELS, )); - assert!(HrmpOpenChannelRequests::::get(&channel_id).is_some()); - } else { - if HrmpOpenChannelRequests::::get(&channel_id).is_some() { - assert_ok!(Hrmp::::hrmp_cancel_open_request( - sender_origin.clone().into(), - channel_id.clone(), - MAX_UNIQUE_CHANNELS, - )); - } - assert!(HrmpOpenChannelRequests::::get(&channel_id).is_none()); } + assert!(HrmpOpenChannelRequests::::get(&channel_id).is_none()); } // but the _channel_ should not exist assert!(HrmpChannels::::get(&channel_id).is_none()); - }: _(frame_system::Origin::::Root, sender_id, recipient_id, capacity, message_size) - verify { + + #[extrinsic_call] + _(frame_system::Origin::::Root, sender_id, recipient_id, capacity, message_size); + assert_last_event::( - Event::::HrmpChannelForceOpened(sender_id, recipient_id, capacity, message_size).into() + Event::::HrmpChannelForceOpened(sender_id, recipient_id, capacity, message_size) + .into(), ); } - establish_system_channel { + #[benchmark] + fn establish_system_channel() { let sender_id: ParaId = 1u32.into(); let recipient_id: ParaId = 2u32.into(); - let caller: T::AccountId = frame_benchmarking::whitelisted_caller(); + let caller: T::AccountId = whitelisted_caller(); let config = Configuration::::config(); // make sure para is registered, and has zero balance. @@ -364,19 +430,23 @@ frame_benchmarking::benchmarks! { let capacity = config.hrmp_channel_max_capacity; let message_size = config.hrmp_channel_max_message_size; - }: _(frame_system::RawOrigin::Signed(caller), sender_id, recipient_id) - verify { + + #[extrinsic_call] + _(frame_system::RawOrigin::Signed(caller), sender_id, recipient_id); + assert_last_event::( - Event::::HrmpSystemChannelOpened(sender_id, recipient_id, capacity, message_size).into() + Event::::HrmpSystemChannelOpened(sender_id, recipient_id, capacity, message_size) + .into(), ); } - poke_channel_deposits { + #[benchmark] + fn poke_channel_deposits() { let sender_id: ParaId = 1u32.into(); let recipient_id: ParaId = 2u32.into(); - let channel_id = HrmpChannelId {sender: sender_id, recipient: recipient_id }; + let channel_id = HrmpChannelId { sender: sender_id, recipient: recipient_id }; - let caller: T::AccountId = frame_benchmarking::whitelisted_caller(); + let caller: T::AccountId = whitelisted_caller(); let config = Configuration::::config(); // make sure para is registered, and has balance to reserve. @@ -403,23 +473,31 @@ frame_benchmarking::benchmarks! { // Actually reserve the deposits. let _ = T::Currency::reserve(&sender_id.into_account_truncating(), sender_deposit); let _ = T::Currency::reserve(&recipient_id.into_account_truncating(), recipient_deposit); - }: _(frame_system::RawOrigin::Signed(caller), sender_id, recipient_id) - verify { + + #[extrinsic_call] + _(frame_system::RawOrigin::Signed(caller), sender_id, recipient_id); + assert_last_event::( - Event::::OpenChannelDepositsUpdated(sender_id, recipient_id).into() + Event::::OpenChannelDepositsUpdated(sender_id, recipient_id).into(), ); let channel = HrmpChannels::::get(&channel_id).unwrap(); // Check that the deposit was updated in the channel state. assert_eq!(channel.sender_deposit, 0); assert_eq!(channel.recipient_deposit, 0); // And that the funds were unreserved. - assert_eq!(T::Currency::reserved_balance(&sender_id.into_account_truncating()), 0u128.unique_saturated_into()); - assert_eq!(T::Currency::reserved_balance(&recipient_id.into_account_truncating()), 0u128.unique_saturated_into()); + assert_eq!( + T::Currency::reserved_balance(&sender_id.into_account_truncating()), + 0u128.unique_saturated_into() + ); + assert_eq!( + T::Currency::reserved_balance(&recipient_id.into_account_truncating()), + 0u128.unique_saturated_into() + ); } -} -frame_benchmarking::impl_benchmark_test_suite!( - Hrmp, - crate::mock::new_test_ext(crate::hrmp::tests::GenesisConfigBuilder::default().build()), - crate::mock::Test -); + impl_benchmark_test_suite!( + Hrmp, + crate::mock::new_test_ext(crate::hrmp::tests::GenesisConfigBuilder::default().build()), + crate::mock::Test + ); +} From f79fa6c8bd6042d34bcf18bb71b3393c615c26c7 Mon Sep 17 00:00:00 2001 From: Niklas Adolfsson Date: Fri, 22 Sep 2023 19:22:18 +0200 Subject: [PATCH 43/69] pallet epm: add `TrimmingStatus` to the mined solution (#1659) For tools such that is using the `Miner` it's useful to know whether a solution was trimmed or not and also how much that was trimmed. --------- Co-authored-by: Alexandru Vasile <60601340+lexnv@users.noreply.github.com> --- polkadot/tests/common.rs | 4 +- .../election-provider-multi-phase/src/lib.rs | 4 +- .../src/signed.rs | 2 +- .../src/unsigned.rs | 99 +++++++++++++------ 4 files changed, 73 insertions(+), 36 deletions(-) diff --git a/polkadot/tests/common.rs b/polkadot/tests/common.rs index 10859ead5fe8..15721c990e01 100644 --- a/polkadot/tests/common.rs +++ b/polkadot/tests/common.rs @@ -33,9 +33,7 @@ pub async fn wait_n_finalized_blocks(n: usize, url: &str) { let mut interval = tokio::time::interval(Duration::from_secs(6)); loop { - let Ok(rpc) = ws_client(url).await else { - continue; - }; + let Ok(rpc) = ws_client(url).await else { continue }; if let Ok(block) = ChainApi::<(), Hash, Header, Block>::finalized_head(&rpc).await { built_blocks.insert(block); diff --git a/substrate/frame/election-provider-multi-phase/src/lib.rs b/substrate/frame/election-provider-multi-phase/src/lib.rs index 8b6e0827c715..05f9b24f8f9c 100644 --- a/substrate/frame/election-provider-multi-phase/src/lib.rs +++ b/substrate/frame/election-provider-multi-phase/src/lib.rs @@ -2365,7 +2365,7 @@ mod tests { assert_eq!(MultiPhase::desired_targets().unwrap(), 2); // mine seq_phragmen solution with 2 iters. - let (solution, witness) = MultiPhase::mine_solution().unwrap(); + let (solution, witness, _) = MultiPhase::mine_solution().unwrap(); // ensure this solution is valid. assert!(MultiPhase::queued_solution().is_none()); @@ -2647,7 +2647,7 @@ mod tests { // set the solution balancing to get the desired score. crate::mock::Balancing::set(Some(BalancingConfig { iterations: 2, tolerance: 0 })); - let (solution, _) = MultiPhase::mine_solution().unwrap(); + let (solution, _, _) = MultiPhase::mine_solution().unwrap(); // Default solution's score. assert!(matches!(solution.score, ElectionScore { minimal_stake: 50, .. })); diff --git a/substrate/frame/election-provider-multi-phase/src/signed.rs b/substrate/frame/election-provider-multi-phase/src/signed.rs index a5fe8ce55582..7e4b029ff8c8 100644 --- a/substrate/frame/election-provider-multi-phase/src/signed.rs +++ b/substrate/frame/election-provider-multi-phase/src/signed.rs @@ -1348,7 +1348,7 @@ mod tests { roll_to_signed(); assert!(MultiPhase::current_phase().is_signed()); - let (raw, witness) = MultiPhase::mine_solution().unwrap(); + let (raw, witness, _) = MultiPhase::mine_solution().unwrap(); let solution_weight = ::solution_weight( witness.voters, witness.targets, diff --git a/substrate/frame/election-provider-multi-phase/src/unsigned.rs b/substrate/frame/election-provider-multi-phase/src/unsigned.rs index f1c9e92a704e..e3d0ded97515 100644 --- a/substrate/frame/election-provider-multi-phase/src/unsigned.rs +++ b/substrate/frame/election-provider-multi-phase/src/unsigned.rs @@ -108,6 +108,27 @@ impl From for MinerError { } } +/// Reports the trimming result of a mined solution +#[derive(Debug, Clone)] +pub struct TrimmingStatus { + weight: usize, + length: usize, +} + +impl TrimmingStatus { + pub fn is_trimmed(&self) -> bool { + self.weight > 0 || self.length > 0 + } + + pub fn trimmed_weight(&self) -> usize { + self.weight + } + + pub fn trimmed_length(&self) -> usize { + self.length + } +} + /// Save a given call into OCW storage. fn save_solution(call: &Call) -> Result<(), MinerError> { log!(debug, "saving a call to the offchain storage."); @@ -162,16 +183,21 @@ impl Pallet { /// /// The Npos Solver type, `S`, must have the same AccountId and Error type as the /// [`crate::Config::Solver`] in order to create a unified return type. - pub fn mine_solution( - ) -> Result<(RawSolution>, SolutionOrSnapshotSize), MinerError> { + pub fn mine_solution() -> Result< + (RawSolution>, SolutionOrSnapshotSize, TrimmingStatus), + MinerError, + > { let RoundSnapshot { voters, targets } = Self::snapshot().ok_or(MinerError::SnapshotUnAvailable)?; let desired_targets = Self::desired_targets().ok_or(MinerError::SnapshotUnAvailable)?; - let (solution, score, size) = Miner::::mine_solution_with_snapshot::< - T::Solver, - >(voters, targets, desired_targets)?; + let (solution, score, size, is_trimmed) = + Miner::::mine_solution_with_snapshot::( + voters, + targets, + desired_targets, + )?; let round = Self::round(); - Ok((RawSolution { solution, score, round }, size)) + Ok((RawSolution { solution, score, round }, size, is_trimmed)) } /// Attempt to restore a solution from cache. Otherwise, compute it fresh. Either way, submit @@ -232,7 +258,7 @@ impl Pallet { /// Mine a new solution as a call. Performs all checks. pub fn mine_checked_call() -> Result, MinerError> { // get the solution, with a load of checks to ensure if submitted, IT IS ABSOLUTELY VALID. - let (raw_solution, witness) = Self::mine_and_check()?; + let (raw_solution, witness, _) = Self::mine_and_check()?; let score = raw_solution.score; let call: Call = Call::submit_unsigned { raw_solution: Box::new(raw_solution), witness }; @@ -282,11 +308,13 @@ impl Pallet { /// If you want an unchecked solution, use [`Pallet::mine_solution`]. /// If you want a checked solution and submit it at the same time, use /// [`Pallet::mine_check_save_submit`]. - pub fn mine_and_check( - ) -> Result<(RawSolution>, SolutionOrSnapshotSize), MinerError> { - let (raw_solution, witness) = Self::mine_solution()?; + pub fn mine_and_check() -> Result< + (RawSolution>, SolutionOrSnapshotSize, TrimmingStatus), + MinerError, + > { + let (raw_solution, witness, is_trimmed) = Self::mine_solution()?; Self::basic_checks(&raw_solution, "mined")?; - Ok((raw_solution, witness)) + Ok((raw_solution, witness, is_trimmed)) } /// Checks if an execution of the offchain worker is permitted at the given block number, or @@ -408,7 +436,7 @@ impl Miner { voters: Vec<(T::AccountId, VoteWeight, BoundedVec)>, targets: Vec, desired_targets: u32, - ) -> Result<(SolutionOf, ElectionScore, SolutionOrSnapshotSize), MinerError> + ) -> Result<(SolutionOf, ElectionScore, SolutionOrSnapshotSize, TrimmingStatus), MinerError> where S: NposSolver, { @@ -436,7 +464,8 @@ impl Miner { voters: Vec<(T::AccountId, VoteWeight, BoundedVec)>, targets: Vec, desired_targets: u32, - ) -> Result<(SolutionOf, ElectionScore, SolutionOrSnapshotSize), MinerError> { + ) -> Result<(SolutionOf, ElectionScore, SolutionOrSnapshotSize, TrimmingStatus), MinerError> + { // now make some helper closures. let cache = helpers::generate_voter_cache::(&voters); let voter_index = helpers::voter_index_fn::(&cache); @@ -495,13 +524,13 @@ impl Miner { // trim assignments list for weight and length. let size = SolutionOrSnapshotSize { voters: voters.len() as u32, targets: targets.len() as u32 }; - Self::trim_assignments_weight( + let weight_trimmed = Self::trim_assignments_weight( desired_targets, size, T::MaxWeight::get(), &mut index_assignments, ); - Self::trim_assignments_length( + let length_trimmed = Self::trim_assignments_length( T::MaxLength::get(), &mut index_assignments, &encoded_size_of, @@ -513,7 +542,9 @@ impl Miner { // re-calc score. let score = solution.clone().score(stake_of, voter_at, target_at)?; - Ok((solution, score, size)) + let is_trimmed = TrimmingStatus { weight: weight_trimmed, length: length_trimmed }; + + Ok((solution, score, size, is_trimmed)) } /// Greedily reduce the size of the solution to fit into the block w.r.t length. @@ -534,7 +565,7 @@ impl Miner { max_allowed_length: u32, assignments: &mut Vec>, encoded_size_of: impl Fn(&[IndexAssignmentOf]) -> Result, - ) -> Result<(), MinerError> { + ) -> Result { // Perform a binary search for the max subset of which can fit into the allowed // length. Having discovered that, we can truncate efficiently. let max_allowed_length: usize = max_allowed_length.saturated_into(); @@ -543,7 +574,7 @@ impl Miner { // not much we can do if assignments are already empty. if high == low { - return Ok(()) + return Ok(0) } while high - low > 1 { @@ -577,16 +608,18 @@ impl Miner { // after this point, we never error. // check before edit. + let remove = assignments.len().saturating_sub(maximum_allowed_voters); + log_no_system!( debug, "from {} assignments, truncating to {} for length, removing {}", assignments.len(), maximum_allowed_voters, - assignments.len().saturating_sub(maximum_allowed_voters), + remove ); assignments.truncate(maximum_allowed_voters); - Ok(()) + Ok(remove) } /// Greedily reduce the size of the solution to fit into the block w.r.t. weight. @@ -609,7 +642,7 @@ impl Miner { size: SolutionOrSnapshotSize, max_weight: Weight, assignments: &mut Vec>, - ) { + ) -> usize { let maximum_allowed_voters = Self::maximum_voter_for_weight(desired_targets, size, max_weight); let removing: usize = @@ -622,6 +655,8 @@ impl Miner { removing, ); assignments.truncate(maximum_allowed_voters as usize); + + removing } /// Find the maximum `len` that a solution can have in order to fit into the block weight. @@ -1230,7 +1265,7 @@ mod tests { assert_eq!(MultiPhase::desired_targets().unwrap(), 2); // mine seq_phragmen solution with 2 iters. - let (solution, witness) = MultiPhase::mine_solution().unwrap(); + let (solution, witness, _) = MultiPhase::mine_solution().unwrap(); // ensure this solution is valid. assert!(MultiPhase::queued_solution().is_none()); @@ -1268,7 +1303,7 @@ mod tests { roll_to_unsigned(); assert!(MultiPhase::current_phase().is_unsigned()); - let (raw, witness) = MultiPhase::mine_solution().unwrap(); + let (raw, witness, t) = MultiPhase::mine_solution().unwrap(); let solution_weight = ::solution_weight( witness.voters, witness.targets, @@ -1278,11 +1313,12 @@ mod tests { // default solution will have 5 edges (5 * 5 + 10) assert_eq!(solution_weight, Weight::from_parts(35, 0)); assert_eq!(raw.solution.voter_count(), 5); + assert_eq!(t.trimmed_weight(), 0); // now reduce the max weight ::set(Weight::from_parts(25, u64::MAX)); - let (raw, witness) = MultiPhase::mine_solution().unwrap(); + let (raw, witness, t) = MultiPhase::mine_solution().unwrap(); let solution_weight = ::solution_weight( witness.voters, witness.targets, @@ -1292,6 +1328,7 @@ mod tests { // default solution will have 5 edges (5 * 5 + 10) assert_eq!(solution_weight, Weight::from_parts(25, 0)); assert_eq!(raw.solution.voter_count(), 3); + assert_eq!(t.trimmed_weight(), 2); }) } @@ -1303,7 +1340,7 @@ mod tests { assert!(MultiPhase::current_phase().is_unsigned()); // Force the number of winners to be bigger to fail - let (mut solution, _) = MultiPhase::mine_solution().unwrap(); + let (mut solution, _, _) = MultiPhase::mine_solution().unwrap(); solution.solution.votes1[0].1 = 4; assert_eq!( @@ -1342,7 +1379,7 @@ mod tests { let RoundSnapshot { voters, targets } = MultiPhase::snapshot().unwrap(); let desired_targets = MultiPhase::desired_targets().unwrap(); - let (raw, score, witness) = + let (raw, score, witness, _) = Miner::::prepare_election_result_with_snapshot( result, voters.clone(), @@ -1371,7 +1408,7 @@ mod tests { }, ], }; - let (raw, score, _) = Miner::::prepare_election_result_with_snapshot( + let (raw, score, _, _) = Miner::::prepare_election_result_with_snapshot( result, voters.clone(), targets.clone(), @@ -1400,7 +1437,7 @@ mod tests { }, ], }; - let (raw, score, witness) = + let (raw, score, witness, _) = Miner::::prepare_election_result_with_snapshot( result, voters.clone(), @@ -1769,7 +1806,7 @@ mod tests { let solution_clone = solution.clone(); // when - Miner::::trim_assignments_length( + let trimmed_len = Miner::::trim_assignments_length( encoded_len, &mut assignments, encoded_size_of, @@ -1779,6 +1816,7 @@ mod tests { // then let solution = SolutionOf::::try_from(assignments.as_slice()).unwrap(); assert_eq!(solution, solution_clone); + assert_eq!(trimmed_len, 0); }); } @@ -1794,7 +1832,7 @@ mod tests { let solution_clone = solution.clone(); // when - Miner::::trim_assignments_length( + let trimmed_len = Miner::::trim_assignments_length( encoded_len as u32 - 1, &mut assignments, encoded_size_of, @@ -1805,6 +1843,7 @@ mod tests { let solution = SolutionOf::::try_from(assignments.as_slice()).unwrap(); assert_ne!(solution, solution_clone); assert!(solution.encoded_size() < encoded_len); + assert_eq!(trimmed_len, 1); }); } From 3d9f5e8bfcce054c3722d52ac3a0f19ffd7252ec Mon Sep 17 00:00:00 2001 From: Maksym H <1177472+mordamax@users.noreply.github.com> Date: Fri, 22 Sep 2023 22:48:38 +0100 Subject: [PATCH 44/69] update contributing guide and ui-tests scripts (#1668) - Updated ./docs/CONTRIBUTION.md to clarify how to use command bot and update ui-tests locally or in PR - Moved update-ui-tests.sh to a root of project to run them along for substrate and polkadot together --------- Co-authored-by: Mira Ressel --- .gitlab/pipeline/test.yml | 4 +-- docs/CONTRIBUTING.md | 16 ++++++++- polkadot/scripts/update-rust-stable.sh | 39 --------------------- scripts/update-ui-tests.sh | 40 +++++++++++++++++++++ substrate/.maintain/update-rust-stable.sh | 42 ----------------------- 5 files changed, 57 insertions(+), 84 deletions(-) delete mode 100755 polkadot/scripts/update-rust-stable.sh create mode 100755 scripts/update-ui-tests.sh delete mode 100755 substrate/.maintain/update-rust-stable.sh diff --git a/.gitlab/pipeline/test.yml b/.gitlab/pipeline/test.yml index 3a4f5e71247c..ad0ef4d9b8e6 100644 --- a/.gitlab/pipeline/test.yml +++ b/.gitlab/pipeline/test.yml @@ -314,8 +314,8 @@ node-bench-regression-guard: --compare-with artifacts/benches/$CI_COMMIT_REF_NAME-$CI_COMMIT_SHORT_SHA" after_script: [""] -# if this fails (especially after rust version upgrade) run -# ./substrate/.maintain/update-rust-stable.sh +# if this fails run `bot update-ui` in the Pull Request or "./scripts/update-ui-tests.sh" locally +# see ./docs/CONTRIBUTING.md#ui-tests test-frame-ui: stage: test extends: diff --git a/docs/CONTRIBUTING.md b/docs/CONTRIBUTING.md index 245369b70201..1e05755a9b83 100644 --- a/docs/CONTRIBUTING.md +++ b/docs/CONTRIBUTING.md @@ -143,4 +143,18 @@ UI tests are used for macros to ensure that the output of a macro doesn’t chan These UI tests are sensible to any changes in the macro generated code or to switching the rust stable version. The tests are only run when the `RUN_UI_TESTS` environment variable is set. So, when the CI is for example complaining about failing UI tests and it is expected that they fail these tests need to be executed locally. -To simplify the updating of the UI test output there is the `.maintain/update-rust-stable +To simplify the updating of the UI test output there is a script +- `./scripts/update-ui-tests.sh` to update the tests for a current rust version locally +- `./scripts/update-ui-tests.sh 1.70` # to update the tests for a specific rust version locally + +Or if you have opened PR and you're member of `paritytech` - you can use command-bot to run the tests for you in CI: +- `bot update-ui` - will run the tests for the current rust version +- `bot update-ui latest --rust_version=1.70.0` - will run the tests for the specified rust version +- `bot update-ui latest -v CMD_IMAGE=paritytech/ci-unified:bullseye-1.70.0-2023-05-23 --rust_version=1.70.0` - +will run the tests for the specified rust version and specified image + +## Command Bot + +If you're member of **paritytech** org - you can use command-bot to run various of common commands in CI: + +Start with comment in PR: `bot help` to see the list of available commands. diff --git a/polkadot/scripts/update-rust-stable.sh b/polkadot/scripts/update-rust-stable.sh deleted file mode 100755 index 6aae75d8cb2d..000000000000 --- a/polkadot/scripts/update-rust-stable.sh +++ /dev/null @@ -1,39 +0,0 @@ -#!/usr/bin/env bash -# -# Script for updating the UI tests for a new rust stable version. -# -# It needs to be called like this: -# -# update-rust-stable.sh 1.61 -# -# This will run all UI tests with the rust stable 1.61. The script -# requires that rustup is installed. -set -e - -if [ "$#" -ne 1 ]; then - echo "Please specify the rust version to use. E.g. update-rust-stable.sh 1.61" - exit -fi - -RUST_VERSION=$1 - -if ! command -v rustup &> /dev/null -then - echo "rustup needs to be installed" - exit -fi - -rustup install $RUST_VERSION -rustup component add rust-src --toolchain $RUST_VERSION - -# Ensure we run the ui tests -export RUN_UI_TESTS=1 -# We don't need any wasm files for ui tests -export SKIP_WASM_BUILD=1 -# Let trybuild overwrite the .stderr files -export TRYBUILD=overwrite - -# Run all the relevant UI tests -# -# Any new UI tests in different crates need to be added here as well. -rustup run $RUST_VERSION cargo test -p orchestra ui diff --git a/scripts/update-ui-tests.sh b/scripts/update-ui-tests.sh new file mode 100755 index 000000000000..785cc7bd3291 --- /dev/null +++ b/scripts/update-ui-tests.sh @@ -0,0 +1,40 @@ +#!/usr/bin/env bash +# Script for updating the UI tests for a new rust stable version. +# Exit on error +set -e + +# by default current rust stable will be used +RUSTUP_RUN="" +# check if we have a parameter +# ./scripts/update-ui-tests.sh 1.70 +if [ ! -z "$1" ]; then + echo "RUST_VERSION: $1" + # This will run all UI tests with the rust stable 1.70. + # The script requires that rustup is installed. + RUST_VERSION=$1 + RUSTUP_RUN="rustup run $RUST_VERSION" + + + echo "installing rustup $RUST_VERSION" + if ! command -v rustup &> /dev/null + then + echo "rustup needs to be installed" + exit + fi + + rustup install $RUST_VERSION + rustup component add rust-src --toolchain $RUST_VERSION +fi + +# Ensure we run the ui tests +export RUN_UI_TESTS=1 +# We don't need any wasm files for ui tests +export SKIP_WASM_BUILD=1 +# Let trybuild overwrite the .stderr files +export TRYBUILD=overwrite + +# ./substrate +$RUSTUP_RUN cargo test -p sp-runtime-interface ui +$RUSTUP_RUN cargo test -p sp-api-test ui +$RUSTUP_RUN cargo test -p frame-election-provider-solution-type ui +$RUSTUP_RUN cargo test -p frame-support-test ui diff --git a/substrate/.maintain/update-rust-stable.sh b/substrate/.maintain/update-rust-stable.sh deleted file mode 100755 index b253bb410531..000000000000 --- a/substrate/.maintain/update-rust-stable.sh +++ /dev/null @@ -1,42 +0,0 @@ -#!/usr/bin/env bash -# -# Script for updating the UI tests for a new rust stable version. -# -# It needs to be called like this: -# -# update-rust-stable.sh 1.61 -# -# This will run all UI tests with the rust stable 1.61. The script -# requires that rustup is installed. -set -e - -if [ "$#" -ne 1 ]; then - echo "Please specify the rust version to use. E.g. update-rust-stable.sh 1.61" - exit -fi - -RUST_VERSION=$1 - -if ! command -v rustup &> /dev/null -then - echo "rustup needs to be installed" - exit -fi - -rustup install $RUST_VERSION -rustup component add rust-src --toolchain $RUST_VERSION - -# Ensure we run the ui tests -export RUN_UI_TESTS=1 -# We don't need any wasm files for ui tests -export SKIP_WASM_BUILD=1 -# Let trybuild overwrite the .stderr files -export TRYBUILD=overwrite - -# Run all the relevant UI tests -# -# Any new UI tests in different crates need to be added here as well. -rustup run $RUST_VERSION cargo test -p sp-runtime-interface ui -rustup run $RUST_VERSION cargo test -p sp-api-test ui -rustup run $RUST_VERSION cargo test -p frame-election-provider-solution-type ui -rustup run $RUST_VERSION cargo test -p frame-support-test ui From 6a897f0694232fa9bc83d3f98fd47a5f69282601 Mon Sep 17 00:00:00 2001 From: Squirrel Date: Sun, 24 Sep 2023 12:27:55 +0100 Subject: [PATCH 45/69] docs / Update PR template to reflect monorepo (#1674) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Remove: ``` Polkadot companion: (*if applicable*) Cumulus companion: (*if applicable*) ``` --------- Co-authored-by: Chevdor Co-authored-by: Bastian Köcher --- docs/PULL_REQUEST_TEMPLATE.md | 6 ------ 1 file changed, 6 deletions(-) diff --git a/docs/PULL_REQUEST_TEMPLATE.md b/docs/PULL_REQUEST_TEMPLATE.md index 284ad15afc9e..c93ac90c7e32 100644 --- a/docs/PULL_REQUEST_TEMPLATE.md +++ b/docs/PULL_REQUEST_TEMPLATE.md @@ -24,10 +24,6 @@ Fixes # (issue number, *if applicable*) Closes # (issue number, *if applicable*) -Polkadot companion: (*if applicable*) - -Cumulus companion: (*if applicable*) - # Checklist - [ ] My PR includes a detailed description as outlined in the "Description" section above @@ -35,8 +31,6 @@ Cumulus companion: (*if applicable*) required) - [ ] I have made corresponding changes to the documentation (if applicable) - [ ] I have added tests that prove my fix is effective or that my feature works (if applicable) -- [ ] If this PR alters any external APIs or interfaces used by Polkadot, the corresponding Polkadot PR is ready as well - as the corresponding Cumulus PR (optional) You can remove the "Checklist" section once all have been checked. Thank you for your contribution! From e67a879ca07e0efb5aae6b158e2469cba6cf09b3 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 25 Sep 2023 00:47:09 +0300 Subject: [PATCH 46/69] Bump aes-gcm from 0.10.2 to 0.10.3 (#1681) Bumps [aes-gcm](https://github.com/RustCrypto/AEADs) from 0.10.2 to 0.10.3.
Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=aes-gcm&package-manager=cargo&previous-version=0.10.2&new-version=0.10.3)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself) You can disable automated security fix PRs for this repo from the [Security Alerts page](https://github.com/paritytech/polkadot-sdk/network/alerts).
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ccf546b16b6b..dade37f9a025 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -121,9 +121,9 @@ dependencies = [ [[package]] name = "aes-gcm" -version = "0.10.2" +version = "0.10.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "209b47e8954a928e1d72e86eca7000ebb6655fe1436d33eefc2201cad027e237" +checksum = "831010a0f742e1209b3bcea8fab6a8e149051ba6099432c8cb2cc117dec3ead1" dependencies = [ "aead 0.5.2", "aes 0.8.3", @@ -17431,7 +17431,7 @@ dependencies = [ name = "sp-statement-store" version = "4.0.0-dev" dependencies = [ - "aes-gcm 0.10.2", + "aes-gcm 0.10.3", "curve25519-dalek 4.0.0", "ed25519-dalek", "hkdf", @@ -19257,7 +19257,7 @@ checksum = "97fee6b57c6a41524a810daee9286c02d7752c4253064d0b05472833a438f675" dependencies = [ "cfg-if", "digest 0.10.7", - "rand 0.7.3", + "rand 0.8.5", "static_assertions", ] @@ -20060,7 +20060,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c4a00f4242f2db33307347bd5be53263c52a0331c96c14292118c9a6bb48d267" dependencies = [ "aes 0.6.0", - "aes-gcm 0.10.2", + "aes-gcm 0.10.3", "async-trait", "bincode", "block-modes", From fa4c672abcb7492e23a51bdfa2129a723bd526ad Mon Sep 17 00:00:00 2001 From: Gabriel Facco de Arruda Date: Mon, 25 Sep 2023 05:14:11 -0300 Subject: [PATCH 47/69] Uncoupling pallet-xcm from frame-system's RuntimeCall (#1684) --- polkadot/xcm/pallet-xcm/src/lib.rs | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/polkadot/xcm/pallet-xcm/src/lib.rs b/polkadot/xcm/pallet-xcm/src/lib.rs index 5acd093d3bca..321bb294b88d 100644 --- a/polkadot/xcm/pallet-xcm/src/lib.rs +++ b/polkadot/xcm/pallet-xcm/src/lib.rs @@ -203,10 +203,10 @@ pub mod pallet { >; /// Our XCM filter which messages to be executed using `XcmExecutor` must pass. - type XcmExecuteFilter: Contains<(MultiLocation, Xcm<::RuntimeCall>)>; + type XcmExecuteFilter: Contains<(MultiLocation, Xcm<::RuntimeCall>)>; /// Something to execute an XCM message. - type XcmExecutor: ExecuteXcm<::RuntimeCall>; + type XcmExecutor: ExecuteXcm<::RuntimeCall>; /// Our XCM filter which messages to be teleported using the dedicated extrinsic must pass. type XcmTeleportFilter: Contains<(MultiLocation, Vec)>; @@ -216,7 +216,7 @@ pub mod pallet { type XcmReserveTransferFilter: Contains<(MultiLocation, Vec)>; /// Means of measuring the weight consumed by an XCM message locally. - type Weigher: WeightBounds<::RuntimeCall>; + type Weigher: WeightBounds<::RuntimeCall>; /// This chain's Universal Location. type UniversalLocation: Get; @@ -227,7 +227,6 @@ pub mod pallet { /// The runtime `Call` type. type RuntimeCall: Parameter + GetDispatchInfo - + IsType<::RuntimeCall> + Dispatchable< RuntimeOrigin = ::RuntimeOrigin, PostInfo = PostDispatchInfo, @@ -902,7 +901,7 @@ pub mod pallet { #[pallet::weight(max_weight.saturating_add(T::WeightInfo::execute()))] pub fn execute( origin: OriginFor, - message: Box::RuntimeCall>>, + message: Box::RuntimeCall>>, max_weight: Weight, ) -> DispatchResultWithPostInfo { let origin_location = T::ExecuteXcmOrigin::ensure_origin(origin)?; From b5fcdff9f564e113efa0aff1e3e53d9bc807c28c Mon Sep 17 00:00:00 2001 From: Kian Paimani <5588131+kianenigma@users.noreply.github.com> Date: Mon, 25 Sep 2023 11:38:02 +0200 Subject: [PATCH 48/69] tweak pallet macro (genesis_config etc) to cater for RA users as well. (#1689) Small tweak to https://github.com/paritytech/polkadot-sdk/pull/1642 to incorporate the ideas from https://github.com/paritytech/polkadot-sdk/issues/247. I think this is the good middle ground, where we have good rust-docs, and the RA users will also have some hope. cc @wentelteefje @aaronbassett @sam0x17 --- substrate/frame/support/procedural/src/lib.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/substrate/frame/support/procedural/src/lib.rs b/substrate/frame/support/procedural/src/lib.rs index b7aaf7c7cc1b..466ceca42961 100644 --- a/substrate/frame/support/procedural/src/lib.rs +++ b/substrate/frame/support/procedural/src/lib.rs @@ -1417,11 +1417,15 @@ pub fn type_value(_: TokenStream, _: TokenStream) -> TokenStream { pallet_macro_stub() } +/// **Rust-Analyzer users**: See the documentation of the Rust item in +/// `frame_support::pallet_macros::genesis_config`. #[proc_macro_attribute] pub fn genesis_config(_: TokenStream, _: TokenStream) -> TokenStream { pallet_macro_stub() } +/// **Rust-Analyzer users**: See the documentation of the Rust item in +/// `frame_support::pallet_macros::genesis_build`. #[proc_macro_attribute] pub fn genesis_build(_: TokenStream, _: TokenStream) -> TokenStream { pallet_macro_stub() From c0a4ce1fc87b7b39bc307072e60db97ade3cd6be Mon Sep 17 00:00:00 2001 From: Nazar Mokrynskyi Date: Mon, 25 Sep 2023 13:56:43 +0300 Subject: [PATCH 49/69] Fix documentation about justification and `finalized == true` requirement (#1607) I was reading source code and noticed these two places that are no longer true after https://github.com/paritytech/polkadot-sdk/pull/1211 --- substrate/client/service/src/client/client.rs | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/substrate/client/service/src/client/client.rs b/substrate/client/service/src/client/client.rs index 26dcd0f9e21a..da4a4f66e2af 100644 --- a/substrate/client/service/src/client/client.rs +++ b/substrate/client/service/src/client/client.rs @@ -484,8 +484,7 @@ where CallExecutor::runtime_version(&self.executor, hash) } - /// Apply a checked and validated block to an operation. If a justification is provided - /// then `finalized` *must* be true. + /// Apply a checked and validated block to an operation. fn apply_block( &self, operation: &mut ClientImportOperation, @@ -1766,8 +1765,7 @@ where { type Error = ConsensusError; - /// Import a checked and validated block. If a justification is provided in - /// `BlockImportParams` then `finalized` *must* be true. + /// Import a checked and validated block. /// /// NOTE: only use this implementation when there are NO consensus-level BlockImport /// objects. Otherwise, importing blocks directly into the client would be bypassing From e8da320734ae44803f89dd2b35b3cfea0e1ecca1 Mon Sep 17 00:00:00 2001 From: Kevin Wang Date: Mon, 25 Sep 2023 21:13:11 +0800 Subject: [PATCH 50/69] contracts: Fix incorrect storage alias in mirgration (#1687) # Description We are recently trying to upgrade our Substrate version from `polkadot-v0.9.43` to `polkadot-v1.0.0` and we noticed a critical issue: all deployed contracts seem to be experiencing a `CodeNotFound` error. After a thorough investigation, it appears that the root cause of this issue lies in the mismatch between the storage alias of `CodeInfoOf` in the migration and its original definition. This PR corrects the storage alias to align it with its original definition I am uncertain about the proper approach for adding tests to this change. Would the team consider taking over this PR to bring it to completion? --------- Co-authored-by: pgherveou --- substrate/frame/contracts/src/migration.rs | 26 +++++++++++++++++++ .../frame/contracts/src/migration/v12.rs | 2 +- .../frame/contracts/src/migration/v14.rs | 2 +- 3 files changed, 28 insertions(+), 2 deletions(-) diff --git a/substrate/frame/contracts/src/migration.rs b/substrate/frame/contracts/src/migration.rs index 3e3d6f37884c..271462073120 100644 --- a/substrate/frame/contracts/src/migration.rs +++ b/substrate/frame/contracts/src/migration.rs @@ -328,6 +328,32 @@ impl OnRuntimeUpgrade for Migration) -> Result<(), TryRuntimeError> { + if !TEST_ALL_STEPS { + return Ok(()) + } + + log::info!(target: LOG_TARGET, "=== POST UPGRADE CHECKS ==="); + + // Ensure that the hashing algorithm is correct for each storage map. + if let Some(hash) = crate::CodeInfoOf::::iter_keys().next() { + crate::CodeInfoOf::::get(hash).expect("CodeInfo exists for hash; qed"); + } + if let Some(hash) = crate::PristineCode::::iter_keys().next() { + crate::PristineCode::::get(hash).expect("PristineCode exists for hash; qed"); + } + if let Some(account_id) = crate::ContractInfoOf::::iter_keys().next() { + crate::ContractInfoOf::::get(account_id) + .expect("ContractInfo exists for account_id; qed"); + } + if let Some(nonce) = crate::DeletionQueue::::iter_keys().next() { + crate::DeletionQueue::::get(nonce).expect("DeletionQueue exists for nonce; qed"); + } + + Ok(()) + } } /// The result of running the migration. diff --git a/substrate/frame/contracts/src/migration/v12.rs b/substrate/frame/contracts/src/migration/v12.rs index eb045aa42e9d..4ddc57584b30 100644 --- a/substrate/frame/contracts/src/migration/v12.rs +++ b/substrate/frame/contracts/src/migration/v12.rs @@ -96,7 +96,7 @@ where #[storage_alias] pub type CodeInfoOf = - StorageMap, Twox64Concat, CodeHash, CodeInfo>; + StorageMap, Identity, CodeHash, CodeInfo>; #[storage_alias] pub type PristineCode = StorageMap, Identity, CodeHash, Vec>; diff --git a/substrate/frame/contracts/src/migration/v14.rs b/substrate/frame/contracts/src/migration/v14.rs index efb49dff4f10..94534d05fdf8 100644 --- a/substrate/frame/contracts/src/migration/v14.rs +++ b/substrate/frame/contracts/src/migration/v14.rs @@ -70,7 +70,7 @@ mod old { #[storage_alias] pub type CodeInfoOf = - StorageMap, Twox64Concat, CodeHash, CodeInfo>; + StorageMap, Identity, CodeHash, CodeInfo>; } #[cfg(feature = "runtime-benchmarks")] From b18fb35e0dada9a23761f511d502b9a73fac9593 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 25 Sep 2023 17:53:16 +0300 Subject: [PATCH 51/69] Bump actions/checkout from 4.0.0 to 4.1.0 (#1688) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [actions/checkout](https://github.com/actions/checkout) from 4.0.0 to 4.1.0.
Release notes

Sourced from actions/checkout's releases.

v4.1.0

What's Changed

New Contributors

Full Changelog: https://github.com/actions/checkout/compare/v4.0.0...v4.1.0

Changelog

Sourced from actions/checkout's changelog.

Changelog

v4.1.0

v4.0.0

v3.6.0

v3.5.3

v3.5.2

v3.5.1

v3.5.0

v3.4.0

v3.3.0

v3.2.0

v3.1.0

v3.0.2

... (truncated)

Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=actions/checkout&package-manager=github_actions&previous-version=4.0.0&new-version=4.1.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/check-licenses.yml | 2 +- .github/workflows/check-markdown.yml | 2 +- .github/workflows/check-prdoc.yml | 2 +- .github/workflows/fmt-check.yml | 2 +- .github/workflows/release-50_publish-docker.yml | 6 +++--- 5 files changed, 7 insertions(+), 7 deletions(-) diff --git a/.github/workflows/check-licenses.yml b/.github/workflows/check-licenses.yml index 4d0afefc47aa..a5c4f2577c00 100644 --- a/.github/workflows/check-licenses.yml +++ b/.github/workflows/check-licenses.yml @@ -14,7 +14,7 @@ jobs: NODE_AUTH_TOKEN: ${{ secrets.GITHUB_TOKEN }} steps: - name: Checkout sources - uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0 + uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608 # v4.1.0 - uses: actions/setup-node@v3.8.1 with: node-version: "18.x" diff --git a/.github/workflows/check-markdown.yml b/.github/workflows/check-markdown.yml index f1e46ca27351..be676d3c1e8c 100644 --- a/.github/workflows/check-markdown.yml +++ b/.github/workflows/check-markdown.yml @@ -13,7 +13,7 @@ jobs: steps: - name: Checkout sources - uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0 + uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608 # v4.1.0 - uses: actions/setup-node@v3.8.1 with: diff --git a/.github/workflows/check-prdoc.yml b/.github/workflows/check-prdoc.yml index d153184941ac..23ae2d1aca9d 100644 --- a/.github/workflows/check-prdoc.yml +++ b/.github/workflows/check-prdoc.yml @@ -40,7 +40,7 @@ jobs: - name: Checkout repo if: ${{ !contains(steps.get-labels.outputs.labels, 'R0') }} - uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac #v4.0.0 + uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608 #v4.1.0 - name: PRdoc check for PR#${{ github.event.pull_request.number }} if: ${{ !contains(steps.get-labels.outputs.labels, 'R0') }} diff --git a/.github/workflows/fmt-check.yml b/.github/workflows/fmt-check.yml index df785404036e..db93a3d53a76 100644 --- a/.github/workflows/fmt-check.yml +++ b/.github/workflows/fmt-check.yml @@ -16,7 +16,7 @@ jobs: container: image: paritytech/ci-unified:bullseye-1.70.0-2023-05-23-v20230706 steps: - - uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0 + - uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608 # v4.1.0 - name: Cargo fmt run: cargo +nightly fmt --all -- --check diff --git a/.github/workflows/release-50_publish-docker.yml b/.github/workflows/release-50_publish-docker.yml index d01d78631e19..15631c172b9f 100644 --- a/.github/workflows/release-50_publish-docker.yml +++ b/.github/workflows/release-50_publish-docker.yml @@ -79,7 +79,7 @@ jobs: steps: - name: Checkout sources - uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0 + uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608 # v4.1.0 #TODO: this step will be needed when automated triggering will work #this step runs only if the workflow is triggered automatically when new release is published @@ -117,7 +117,7 @@ jobs: steps: - name: Checkout sources - uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0 + uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608 # v4.1.0 - name: Get artifacts from cache uses: actions/cache@704facf57e6136b1bc63b828d79edcd491f0ee84 # v3.3.2 @@ -239,7 +239,7 @@ jobs: needs: fetch-latest-debian-package-version steps: - name: Checkout sources - uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0 + uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608 # v4.1.0 - name: Set up Docker Buildx uses: docker/setup-buildx-action@f95db51fddba0c2d1ec667646a06c2ce06100226 # v3.0.0 From f209b31b48586bca81c0ffafa150e1796ee05845 Mon Sep 17 00:00:00 2001 From: Javier Viola Date: Mon, 25 Sep 2023 15:46:36 -0300 Subject: [PATCH 52/69] Make downloads in parallel and give more time to complete (#1699) Fix flaky test (e.g https://gitlab.parity.io/parity/mirrors/polkadot-sdk/-/jobs/3787906). Sometimes just timeout when trying to download the artifacts from ci. This make the download in parallel and allow for more time to complete. Thx! cc: @michalkucharczyk --- .../misc/0002-download-polkadot-from-pr.sh | 8 +++++--- polkadot/zombienet_tests/misc/0002-upgrade-node.zndsl | 4 ++-- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/polkadot/zombienet_tests/misc/0002-download-polkadot-from-pr.sh b/polkadot/zombienet_tests/misc/0002-download-polkadot-from-pr.sh index 0d4b28075795..b55534559278 100644 --- a/polkadot/zombienet_tests/misc/0002-download-polkadot-from-pr.sh +++ b/polkadot/zombienet_tests/misc/0002-download-polkadot-from-pr.sh @@ -12,8 +12,10 @@ export PATH=$CFG_DIR:$PATH cd $CFG_DIR # see 0002-upgrade-node.zndsl to view the args. -curl -L -O $1/polkadot -curl -L -O $1/polkadot-prepare-worker -curl -L -O $1/polkadot-execute-worker +curl -L -O $1/polkadot & +curl -L -O $1/polkadot-prepare-worker & +curl -L -O $1/polkadot-execute-worker & +wait + chmod +x $CFG_DIR/polkadot $CFG_DIR/polkadot-prepare-worker $CFG_DIR/polkadot-execute-worker echo $(polkadot --version) diff --git a/polkadot/zombienet_tests/misc/0002-upgrade-node.zndsl b/polkadot/zombienet_tests/misc/0002-upgrade-node.zndsl index fdf16b7286c9..9191fb027de0 100644 --- a/polkadot/zombienet_tests/misc/0002-upgrade-node.zndsl +++ b/polkadot/zombienet_tests/misc/0002-upgrade-node.zndsl @@ -11,8 +11,8 @@ dave: parachain 2001 block height is at least 10 within 200 seconds # with the version of polkadot you want to download. # avg 30s in our infra -alice: run ./0002-download-polkadot-from-pr.sh with "{{POLKADOT_PR_ARTIFACTS_URL}}" within 40 seconds -bob: run ./0002-download-polkadot-from-pr.sh with "{{POLKADOT_PR_ARTIFACTS_URL}}" within 40 seconds +alice: run ./0002-download-polkadot-from-pr.sh with "{{POLKADOT_PR_ARTIFACTS_URL}}" within 60 seconds +bob: run ./0002-download-polkadot-from-pr.sh with "{{POLKADOT_PR_ARTIFACTS_URL}}" within 60 seconds alice: restart after 5 seconds bob: restart after 5 seconds From bc5005217a8c2e7c95b9011c96d7e619879b1200 Mon Sep 17 00:00:00 2001 From: Nazar Mokrynskyi Date: Tue, 26 Sep 2023 01:43:36 +0300 Subject: [PATCH 53/69] Implement more useful traits in `Slot` type (#1595) I had quite a few conversions to/from `u64` because these were lacking. Let me know if others are desirable as well for symmetry. --- .../primitives/consensus/slots/src/lib.rs | 22 ++++++++++++++++++- 1 file changed, 21 insertions(+), 1 deletion(-) diff --git a/substrate/primitives/consensus/slots/src/lib.rs b/substrate/primitives/consensus/slots/src/lib.rs index 30bb42e2c758..a299ce395ea4 100644 --- a/substrate/primitives/consensus/slots/src/lib.rs +++ b/substrate/primitives/consensus/slots/src/lib.rs @@ -24,7 +24,7 @@ use scale_info::TypeInfo; use sp_timestamp::Timestamp; /// Unit type wrapper that represents a slot. -#[derive(Debug, Encode, MaxEncodedLen, Decode, Eq, Clone, Copy, Default, Ord, TypeInfo)] +#[derive(Debug, Encode, MaxEncodedLen, Decode, Eq, Clone, Copy, Default, Ord, Hash, TypeInfo)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub struct Slot(u64); @@ -44,6 +44,26 @@ impl core::ops::Add for Slot { } } +impl core::ops::Sub for Slot { + type Output = Self; + + fn sub(self, other: Self) -> Self { + Self(self.0 - other.0) + } +} + +impl core::ops::AddAssign for Slot { + fn add_assign(&mut self, rhs: Self) { + self.0 += rhs.0 + } +} + +impl core::ops::SubAssign for Slot { + fn sub_assign(&mut self, rhs: Self) { + self.0 -= rhs.0 + } +} + impl core::ops::Add for Slot { type Output = Self; From cc50eda0988337a3174f9f86f4435aca0078053d Mon Sep 17 00:00:00 2001 From: Alexandru Vasile <60601340+lexnv@users.noreply.github.com> Date: Tue, 26 Sep 2023 14:36:24 +0300 Subject: [PATCH 54/69] chainHead/storage: Fix storage iteration using the query key (#1665) This PR ensures that all storage keys under a prefix are returned by the `chainHead_storage` method. Before this PR, the `storage_keys` was used with just the `start_key`. Before the pagination event was generated, the last reported key `last_key` was saved internally. When the pagination is resumed, the `last_key` will serve as the next `start_key` to the `storage_keys` API. However, this behavior does not function properly for non-prefixed storage keys. Entry keys `a`, `ab`, `abc` share a common prefix and therefore the `ab` key leads to `abc`. However, for `a`, `ab`, `aB` and `abc`, the `aB` key does not immediately lead to `abc`. To mitigate this, the PR saves the start key of the query, together with the next pagination key. Improve testing to ensure we have a key entry that doesn't share the prefix with the descendant key. @paritytech/subxt-team --------- Signed-off-by: Alexandru Vasile --- .../src/chain_head/chain_head_storage.rs | 43 +++++++++++++------ .../rpc-spec-v2/src/chain_head/tests.rs | 20 +++++++++ 2 files changed, 50 insertions(+), 13 deletions(-) diff --git a/substrate/client/rpc-spec-v2/src/chain_head/chain_head_storage.rs b/substrate/client/rpc-spec-v2/src/chain_head/chain_head_storage.rs index 7095548a2b16..6e19f59a5d68 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/chain_head_storage.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/chain_head_storage.rs @@ -71,8 +71,10 @@ impl ChainHeadStorage { /// Query to iterate over storage. struct QueryIter { - /// The next key from which the iteration should continue. - next_key: StorageKey, + /// The key from which the iteration was started. + query_key: StorageKey, + /// The key after which pagination should resume. + pagination_start_key: Option, /// The type of the query (either value or hash). ty: IterQueryType, } @@ -184,20 +186,27 @@ where hash: Block::Hash, child_key: Option<&ChildInfo>, ) -> QueryIterResult { - let QueryIter { next_key, ty } = query; + let QueryIter { ty, query_key, pagination_start_key } = query; let mut keys_iter = if let Some(child_key) = child_key { - self.client - .child_storage_keys(hash, child_key.to_owned(), Some(&next_key), None) + self.client.child_storage_keys( + hash, + child_key.to_owned(), + Some(&query_key), + pagination_start_key.as_ref(), + ) } else { - self.client.storage_keys(hash, Some(&next_key), None) + self.client.storage_keys(hash, Some(&query_key), pagination_start_key.as_ref()) } .map_err(|err| err.to_string())?; let mut ret = Vec::with_capacity(self.operation_max_storage_items); + let mut next_pagination_key = None; for _ in 0..self.operation_max_storage_items { let Some(key) = keys_iter.next() else { break }; + next_pagination_key = Some(key.clone()); + let result = match ty { IterQueryType::Value => self.query_storage_value(hash, &key, child_key), IterQueryType::Hash => self.query_storage_hash(hash, &key, child_key), @@ -209,7 +218,11 @@ where } // Save the next key if any to continue the iteration. - let maybe_next_query = keys_iter.next().map(|next_key| QueryIter { next_key, ty }); + let maybe_next_query = keys_iter.next().map(|_| QueryIter { + ty, + query_key, + pagination_start_key: next_pagination_key, + }); Ok((ret, maybe_next_query)) } @@ -325,12 +338,16 @@ where return }, }, - StorageQueryType::DescendantsValues => self - .iter_operations - .push_back(QueryIter { next_key: item.key, ty: IterQueryType::Value }), - StorageQueryType::DescendantsHashes => self - .iter_operations - .push_back(QueryIter { next_key: item.key, ty: IterQueryType::Hash }), + StorageQueryType::DescendantsValues => self.iter_operations.push_back(QueryIter { + query_key: item.key, + ty: IterQueryType::Value, + pagination_start_key: None, + }), + StorageQueryType::DescendantsHashes => self.iter_operations.push_back(QueryIter { + query_key: item.key, + ty: IterQueryType::Hash, + pagination_start_key: None, + }), }; } diff --git a/substrate/client/rpc-spec-v2/src/chain_head/tests.rs b/substrate/client/rpc-spec-v2/src/chain_head/tests.rs index 3ab47991c4e5..1d5b45260a22 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/tests.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/tests.rs @@ -2352,6 +2352,7 @@ async fn check_continue_operation() { builder.push_storage_change(b":m".to_vec(), Some(b"a".to_vec())).unwrap(); builder.push_storage_change(b":mo".to_vec(), Some(b"ab".to_vec())).unwrap(); builder.push_storage_change(b":moc".to_vec(), Some(b"abc".to_vec())).unwrap(); + builder.push_storage_change(b":moD".to_vec(), Some(b"abcmoD".to_vec())).unwrap(); builder.push_storage_change(b":mock".to_vec(), Some(b"abcd".to_vec())).unwrap(); let block = builder.build().unwrap().block; let block_hash = format!("{:?}", block.header.hash()); @@ -2430,6 +2431,25 @@ async fn check_continue_operation() { res.items[0].result == StorageResultType::Value(hex_string(b"ab")) ); + // Pagination event. + assert_matches!( + get_next_event::>(&mut sub).await, + FollowEvent::OperationWaitingForContinue(res) if res.operation_id == operation_id + ); + does_not_produce_event::>( + &mut sub, + std::time::Duration::from_secs(DOES_NOT_PRODUCE_EVENTS_SECONDS), + ) + .await; + let _res: () = api.call("chainHead_unstable_continue", [&sub_id, &operation_id]).await.unwrap(); + assert_matches!( + get_next_event::>(&mut sub).await, + FollowEvent::OperationStorageItems(res) if res.operation_id == operation_id && + res.items.len() == 1 && + res.items[0].key == hex_string(b":moD") && + res.items[0].result == StorageResultType::Value(hex_string(b"abcmoD")) + ); + // Pagination event. assert_matches!( get_next_event::>(&mut sub).await, From 7d3ce4df72df7a25e0d71c75aa8c78959b2de3ef Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?D=C3=B3nal=20Murray?= Date: Tue, 26 Sep 2023 14:42:37 +0100 Subject: [PATCH 55/69] Allow debug_assertions in short-benchmarks CI job (#1711) Add debug_assertions to WASM builds in the short-benchmarks CI job. Disallow warnings, show a full backtrace and remove the WASM output colour to improve information from this CI step in the event of a failure. This is motivated by a [case](https://github.com/paritytech/polkadot-sdk/pull/1676/commits/23d64918be3ca68413050383c5d8a73f63d451dd) where a benchmark was misconfigured in master and failing to send notifications, but passing CI. We don't want this to fail if the problem happens in production, but ideally we'd know from the CI if it was misconfigured and the messages weren't getting sent, as that could (would?) affect the weights that the benchmark generates. Enabling debug-assertions in WASM builds for the short-benchmarks would catch "soft" failures like this in future. --- .gitlab/pipeline/short-benchmarks.yml | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/.gitlab/pipeline/short-benchmarks.yml b/.gitlab/pipeline/short-benchmarks.yml index 5b0ea276773d..5bfe4b729e62 100644 --- a/.gitlab/pipeline/short-benchmarks.yml +++ b/.gitlab/pipeline/short-benchmarks.yml @@ -15,6 +15,12 @@ short-benchmark-westend: &short-bench artifacts: true variables: RUNTIME: westend + # Enable debug assertions since we are running optimized builds for testing + # but still want to have debug assertions. + RUSTFLAGS: "-C debug-assertions -D warnings" + RUST_BACKTRACE: "full" + WASM_BUILD_NO_COLOR: 1 + WASM_BUILD_RUSTFLAGS: "-C debug-assertions -D warnings" tags: - benchmark script: @@ -32,6 +38,12 @@ short-benchmark-westend: &short-bench artifacts: true variables: RUNTIME_CHAIN: benchmarked-runtime-chain + # Enable debug assertions since we are running optimized builds for testing + # but still want to have debug assertions. + RUSTFLAGS: "-C debug-assertions -D warnings" + RUST_BACKTRACE: "full" + WASM_BUILD_NO_COLOR: 1 + WASM_BUILD_RUSTFLAGS: "-C debug-assertions -D warnings" tags: - benchmark script: From a846b7460465ddcb70450359b0c92a1be59afb2f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 26 Sep 2023 23:25:46 +0200 Subject: [PATCH 56/69] Bump directories from 4.0.1 to 5.0.1 (#1656) Bumps [directories](https://github.com/soc/directories-rs) from 4.0.1 to 5.0.1.
Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=directories&package-manager=cargo&previous-version=4.0.1&new-version=5.0.1)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore major version` will close this group update PR and stop Dependabot creating any more for the specific dependency's major version (unless you unignore this specific dependency's major version or upgrade to it yourself) - `@dependabot ignore minor version` will close this group update PR and stop Dependabot creating any more for the specific dependency's minor version (unless you unignore this specific dependency's minor version or upgrade to it yourself) - `@dependabot ignore ` will close this group update PR and stop Dependabot creating any more for the specific dependency (unless you unignore this specific dependency or upgrade to it yourself) - `@dependabot unignore ` will remove all of the ignore conditions of the specified dependency - `@dependabot unignore ` will remove the ignore condition of the specified dependency and ignore conditions
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 17 ++++++++++++----- substrate/client/service/Cargo.toml | 2 +- 2 files changed, 13 insertions(+), 6 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index dade37f9a025..683289adb45e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4367,9 +4367,9 @@ dependencies = [ [[package]] name = "directories" -version = "4.0.1" +version = "5.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f51c5d4ddabd36886dd3e1438cb358cdcb0d7c499cb99cb4ac2e38e18b5cb210" +checksum = "9a49173b84e034382284f27f1af4dcbbd231ffa358c0fe316541a7337f376a35" dependencies = [ "dirs-sys", ] @@ -4386,13 +4386,14 @@ dependencies = [ [[package]] name = "dirs-sys" -version = "0.3.7" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b1d1d91c932ef41c0f2663aa8b0ca0342d444d842c06914aa0a7e352d0bada6" +checksum = "520f05a5cbd335fae5a99ff7a6ab8627577660ee5cfd6a94a6a929b52ff0321c" dependencies = [ "libc", + "option-ext", "redox_users", - "winapi", + "windows-sys 0.48.0", ] [[package]] @@ -8694,6 +8695,12 @@ version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" +[[package]] +name = "option-ext" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "04744f49eae99ab78e0d5c0b603ab218f515ea8cfe5a456d7629ad883a3b6e7d" + [[package]] name = "orchestra" version = "0.3.3" diff --git a/substrate/client/service/Cargo.toml b/substrate/client/service/Cargo.toml index ccf23bc8994b..2386bebf24d2 100644 --- a/substrate/client/service/Cargo.toml +++ b/substrate/client/service/Cargo.toml @@ -80,7 +80,7 @@ tracing-futures = { version = "0.2.4" } async-trait = "0.1.57" tokio = { version = "1.22.0", features = ["time", "rt-multi-thread", "parking_lot"] } tempfile = "3.1.0" -directories = "4.0.1" +directories = "5.0.1" static_init = "1.0.3" [dev-dependencies] From ab3a3bc2786673bfda47646a20f871b8a2e4d59d Mon Sep 17 00:00:00 2001 From: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> Date: Wed, 27 Sep 2023 11:58:39 +0200 Subject: [PATCH 57/69] `BlockId` removal: `tx-pool` refactor (#1678) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit It changes following APIs: - trait `ChainApi` -- `validate_transaction` - trait `TransactionPool` --`submit_at` --`submit_one` --`submit_and_watch` and some implementation details, in particular: - impl `Pool` --`submit_at` --`resubmit_at` --`submit_one` --`submit_and_watch` --`prune_known` --`prune` --`prune_tags` --`resolve_block_number` --`verify` --`verify_one` - revalidation queue All tests are also adjusted. --------- Co-authored-by: command-bot <> Co-authored-by: Bastian Köcher --- .../service/benches/transaction_throughput.rs | 4 +- substrate/bin/node/bench/src/construct.rs | 8 +- substrate/bin/node/bench/src/txpool.rs | 6 +- .../bin/node/cli/benches/transaction_pool.rs | 4 +- .../basic-authorship/src/basic_authorship.rs | 47 +- .../client/consensus/manual-seal/src/lib.rs | 23 +- .../src/transaction/transaction.rs | 8 +- substrate/client/rpc/src/author/mod.rs | 30 +- substrate/client/service/src/lib.rs | 14 +- substrate/client/service/test/src/lib.rs | 11 +- .../client/transaction-pool/api/src/lib.rs | 11 +- .../client/transaction-pool/benches/basics.rs | 37 +- substrate/client/transaction-pool/src/api.rs | 27 +- .../client/transaction-pool/src/graph/pool.rs | 168 +++--- substrate/client/transaction-pool/src/lib.rs | 44 +- .../transaction-pool/src/revalidation.rs | 109 +++- .../client/transaction-pool/src/tests.rs | 20 +- .../client/transaction-pool/tests/pool.rs | 480 ++++++++++-------- .../runtime/transaction-pool/src/lib.rs | 12 +- substrate/utils/frame/rpc/system/src/lib.rs | 6 +- 20 files changed, 609 insertions(+), 460 deletions(-) diff --git a/cumulus/test/service/benches/transaction_throughput.rs b/cumulus/test/service/benches/transaction_throughput.rs index 83981a91d46f..81ecc84db7bf 100644 --- a/cumulus/test/service/benches/transaction_throughput.rs +++ b/cumulus/test/service/benches/transaction_throughput.rs @@ -21,7 +21,7 @@ use cumulus_test_runtime::{AccountId, BalancesCall, ExistentialDeposit, SudoCall use futures::{future, StreamExt}; use sc_transaction_pool_api::{TransactionPool as _, TransactionSource, TransactionStatus}; use sp_core::{crypto::Pair, sr25519}; -use sp_runtime::{generic::BlockId, OpaqueExtrinsic}; +use sp_runtime::OpaqueExtrinsic; use cumulus_primitives_core::ParaId; use cumulus_test_service::{ @@ -117,7 +117,7 @@ async fn submit_tx_and_wait_for_inclusion( let best_hash = client.chain_info().best_hash; let mut watch = tx_pool - .submit_and_watch(&BlockId::Hash(best_hash), TransactionSource::External, tx.clone()) + .submit_and_watch(best_hash, TransactionSource::External, tx.clone()) .await .expect("Submits tx to pool") .fuse(); diff --git a/substrate/bin/node/bench/src/construct.rs b/substrate/bin/node/bench/src/construct.rs index f14f89fcd3ab..23d0a0cc1ee5 100644 --- a/substrate/bin/node/bench/src/construct.rs +++ b/substrate/bin/node/bench/src/construct.rs @@ -35,7 +35,7 @@ use sc_transaction_pool_api::{ }; use sp_consensus::{Environment, Proposer}; use sp_inherents::InherentDataProvider; -use sp_runtime::{generic::BlockId, traits::NumberFor, OpaqueExtrinsic}; +use sp_runtime::{traits::NumberFor, OpaqueExtrinsic}; use crate::{ common::SizeType, @@ -233,7 +233,7 @@ impl sc_transaction_pool_api::TransactionPool for Transactions { /// Returns a future that imports a bunch of unverified transactions to the pool. fn submit_at( &self, - _at: &BlockId, + _at: Self::Hash, _source: TransactionSource, _xts: Vec>, ) -> PoolFuture>, Self::Error> { @@ -243,7 +243,7 @@ impl sc_transaction_pool_api::TransactionPool for Transactions { /// Returns a future that imports one unverified transaction to the pool. fn submit_one( &self, - _at: &BlockId, + _at: Self::Hash, _source: TransactionSource, _xt: TransactionFor, ) -> PoolFuture, Self::Error> { @@ -252,7 +252,7 @@ impl sc_transaction_pool_api::TransactionPool for Transactions { fn submit_and_watch( &self, - _at: &BlockId, + _at: Self::Hash, _source: TransactionSource, _xt: TransactionFor, ) -> PoolFuture>>, Self::Error> { diff --git a/substrate/bin/node/bench/src/txpool.rs b/substrate/bin/node/bench/src/txpool.rs index a3524ac5bc89..e56bb559a7b5 100644 --- a/substrate/bin/node/bench/src/txpool.rs +++ b/substrate/bin/node/bench/src/txpool.rs @@ -27,7 +27,6 @@ use node_testing::bench::{BenchDb, BlockType, DatabaseType, KeyTypes}; use sc_transaction_pool::BasicPool; use sc_transaction_pool_api::{TransactionPool, TransactionSource}; -use sp_runtime::generic::BlockId; use crate::core::{self, Mode, Path}; @@ -58,10 +57,11 @@ impl core::BenchmarkDescription for PoolBenchmarkDescription { impl core::Benchmark for PoolBenchmark { fn run(&mut self, mode: Mode) -> std::time::Duration { let context = self.database.create_context(); + let genesis_hash = context.client.chain_info().genesis_hash; let _ = context .client - .runtime_version_at(context.client.chain_info().genesis_hash) + .runtime_version_at(genesis_hash) .expect("Failed to get runtime version") .spec_version; @@ -90,7 +90,7 @@ impl core::Benchmark for PoolBenchmark { let start = std::time::Instant::now(); let submissions = generated_transactions .into_iter() - .map(|tx| txpool.submit_one(&BlockId::Number(0), TransactionSource::External, tx)); + .map(|tx| txpool.submit_one(genesis_hash, TransactionSource::External, tx)); futures::executor::block_on(futures::future::join_all(submissions)); let elapsed = start.elapsed(); diff --git a/substrate/bin/node/cli/benches/transaction_pool.rs b/substrate/bin/node/cli/benches/transaction_pool.rs index d3e8c02a958f..d21edc55bbac 100644 --- a/substrate/bin/node/cli/benches/transaction_pool.rs +++ b/substrate/bin/node/cli/benches/transaction_pool.rs @@ -34,7 +34,7 @@ use sc_transaction_pool::PoolLimit; use sc_transaction_pool_api::{TransactionPool as _, TransactionSource, TransactionStatus}; use sp_core::{crypto::Pair, sr25519}; use sp_keyring::Sr25519Keyring; -use sp_runtime::{generic::BlockId, OpaqueExtrinsic}; +use sp_runtime::OpaqueExtrinsic; use tokio::runtime::Handle; fn new_node(tokio_handle: Handle) -> node_cli::service::NewFullBase { @@ -191,7 +191,7 @@ async fn submit_tx_and_wait_for_inclusion( let best_hash = client.chain_info().best_hash; let mut watch = tx_pool - .submit_and_watch(&BlockId::Hash(best_hash), TransactionSource::External, tx.clone()) + .submit_and_watch(best_hash, TransactionSource::External, tx.clone()) .await .expect("Submits tx to pool") .fuse(); diff --git a/substrate/client/basic-authorship/src/basic_authorship.rs b/substrate/client/basic-authorship/src/basic_authorship.rs index b3a8f0d8970b..0fb61b6fab1f 100644 --- a/substrate/client/basic-authorship/src/basic_authorship.rs +++ b/substrate/client/basic-authorship/src/basic_authorship.rs @@ -642,8 +642,8 @@ mod tests { client.clone(), ); - block_on(txpool.submit_at(&BlockId::number(0), SOURCE, vec![extrinsic(0), extrinsic(1)])) - .unwrap(); + let hashof0 = client.info().genesis_hash; + block_on(txpool.submit_at(hashof0, SOURCE, vec![extrinsic(0), extrinsic(1)])).unwrap(); block_on( txpool.maintain(chain_event( @@ -658,7 +658,7 @@ mod tests { let cell = Mutex::new((false, time::Instant::now())); let proposer = proposer_factory.init_with_now( - &client.expect_header(client.info().genesis_hash).unwrap(), + &client.expect_header(hashof0).unwrap(), Box::new(move || { let mut value = cell.lock(); if !value.0 { @@ -736,7 +736,7 @@ mod tests { let genesis_hash = client.info().best_hash; - block_on(txpool.submit_at(&BlockId::number(0), SOURCE, vec![extrinsic(0)])).unwrap(); + block_on(txpool.submit_at(genesis_hash, SOURCE, vec![extrinsic(0)])).unwrap(); block_on( txpool.maintain(chain_event( @@ -800,7 +800,7 @@ mod tests { }; block_on(txpool.submit_at( - &BlockId::number(0), + client.info().genesis_hash, SOURCE, vec![medium(0), medium(1), huge(2), medium(3), huge(4), medium(5), medium(6)], )) @@ -897,9 +897,8 @@ mod tests { spawner.clone(), client.clone(), ); - let genesis_header = client - .expect_header(client.info().genesis_hash) - .expect("there should be header"); + let genesis_hash = client.info().genesis_hash; + let genesis_header = client.expect_header(genesis_hash).expect("there should be header"); let extrinsics_num = 5; let extrinsics = std::iter::once( @@ -922,7 +921,7 @@ mod tests { .sum::() + Vec::::new().encoded_size(); - block_on(txpool.submit_at(&BlockId::number(0), SOURCE, extrinsics.clone())).unwrap(); + block_on(txpool.submit_at(genesis_hash, SOURCE, extrinsics.clone())).unwrap(); block_on(txpool.maintain(chain_event(genesis_header.clone()))); @@ -999,6 +998,7 @@ mod tests { spawner.clone(), client.clone(), ); + let genesis_hash = client.info().genesis_hash; let tiny = |nonce| { ExtrinsicBuilder::new_fill_block(Perbill::from_parts(TINY)).nonce(nonce).build() @@ -1011,7 +1011,7 @@ mod tests { block_on( txpool.submit_at( - &BlockId::number(0), + genesis_hash, SOURCE, // add 2 * MAX_SKIPPED_TRANSACTIONS that exhaust resources (0..MAX_SKIPPED_TRANSACTIONS * 2) @@ -1024,13 +1024,9 @@ mod tests { ) .unwrap(); - block_on( - txpool.maintain(chain_event( - client - .expect_header(client.info().genesis_hash) - .expect("there should be header"), - )), - ); + block_on(txpool.maintain(chain_event( + client.expect_header(genesis_hash).expect("there should be header"), + ))); assert_eq!(txpool.ready().count(), MAX_SKIPPED_TRANSACTIONS * 3); let mut proposer_factory = @@ -1038,7 +1034,7 @@ mod tests { let cell = Mutex::new(time::Instant::now()); let proposer = proposer_factory.init_with_now( - &client.expect_header(client.info().genesis_hash).unwrap(), + &client.expect_header(genesis_hash).unwrap(), Box::new(move || { let mut value = cell.lock(); let old = *value; @@ -1071,6 +1067,7 @@ mod tests { spawner.clone(), client.clone(), ); + let genesis_hash = client.info().genesis_hash; let tiny = |who| { ExtrinsicBuilder::new_fill_block(Perbill::from_parts(TINY)) @@ -1086,7 +1083,7 @@ mod tests { block_on( txpool.submit_at( - &BlockId::number(0), + genesis_hash, SOURCE, (0..MAX_SKIPPED_TRANSACTIONS + 2) .into_iter() @@ -1098,13 +1095,9 @@ mod tests { ) .unwrap(); - block_on( - txpool.maintain(chain_event( - client - .expect_header(client.info().genesis_hash) - .expect("there should be header"), - )), - ); + block_on(txpool.maintain(chain_event( + client.expect_header(genesis_hash).expect("there should be header"), + ))); assert_eq!(txpool.ready().count(), MAX_SKIPPED_TRANSACTIONS * 2 + 4); let mut proposer_factory = @@ -1114,7 +1107,7 @@ mod tests { let cell = Arc::new(Mutex::new((0, time::Instant::now()))); let cell2 = cell.clone(); let proposer = proposer_factory.init_with_now( - &client.expect_header(client.info().genesis_hash).unwrap(), + &client.expect_header(genesis_hash).unwrap(), Box::new(move || { let mut value = cell.lock(); let (called, old) = *value; diff --git a/substrate/client/consensus/manual-seal/src/lib.rs b/substrate/client/consensus/manual-seal/src/lib.rs index 1e5db966e66d..41cd5f3127e8 100644 --- a/substrate/client/consensus/manual-seal/src/lib.rs +++ b/substrate/client/consensus/manual-seal/src/lib.rs @@ -351,7 +351,7 @@ mod tests { use sc_transaction_pool::{BasicPool, FullChainApi, Options, RevalidationType}; use sc_transaction_pool_api::{MaintainedTransactionPool, TransactionPool, TransactionSource}; use sp_inherents::InherentData; - use sp_runtime::generic::{BlockId, Digest, DigestItem}; + use sp_runtime::generic::{Digest, DigestItem}; use substrate_test_runtime_client::{ AccountKeyring::*, DefaultTestClientBuilderExt, TestClientBuilder, TestClientBuilderExt, }; @@ -400,10 +400,11 @@ mod tests { let client = Arc::new(client); let spawner = sp_core::testing::TaskExecutor::new(); let genesis_hash = client.info().genesis_hash; + let pool_api = Arc::new(FullChainApi::new(client.clone(), None, &spawner.clone())); let pool = Arc::new(BasicPool::with_revalidation_type( Options::default(), true.into(), - api(), + pool_api, None, RevalidationType::Full, spawner.clone(), @@ -444,7 +445,7 @@ mod tests { rt.block_on(future); }); // submit a transaction to pool. - let result = pool.submit_one(&BlockId::Number(0), SOURCE, uxt(Alice, 0)).await; + let result = pool.submit_one(genesis_hash, SOURCE, uxt(Alice, 0)).await; // assert that it was successfully imported assert!(result.is_ok()); // assert that the background task returns ok @@ -475,10 +476,11 @@ mod tests { let client = Arc::new(client); let spawner = sp_core::testing::TaskExecutor::new(); let genesis_hash = client.info().genesis_hash; + let pool_api = Arc::new(FullChainApi::new(client.clone(), None, &spawner.clone())); let pool = Arc::new(BasicPool::with_revalidation_type( Options::default(), true.into(), - api(), + pool_api, None, RevalidationType::Full, spawner.clone(), @@ -535,7 +537,7 @@ mod tests { let mut finality_stream = client.finality_notification_stream(); // submit a transaction to pool. - let result = pool.submit_one(&BlockId::Number(0), SOURCE, uxt(Alice, 0)).await; + let result = pool.submit_one(genesis_hash, SOURCE, uxt(Alice, 0)).await; // assert that it was successfully imported assert!(result.is_ok()); // assert that the background task returns ok @@ -571,10 +573,11 @@ mod tests { let client = Arc::new(client); let spawner = sp_core::testing::TaskExecutor::new(); let genesis_hash = client.info().genesis_hash; + let pool_api = Arc::new(FullChainApi::new(client.clone(), None, &spawner.clone())); let pool = Arc::new(BasicPool::with_revalidation_type( Options::default(), true.into(), - api(), + pool_api, None, RevalidationType::Full, spawner.clone(), @@ -602,7 +605,7 @@ mod tests { rt.block_on(future); }); // submit a transaction to pool. - let result = pool.submit_one(&BlockId::Number(0), SOURCE, uxt(Alice, 0)).await; + let result = pool.submit_one(genesis_hash, SOURCE, uxt(Alice, 0)).await; // assert that it was successfully imported assert!(result.is_ok()); let (tx, rx) = futures::channel::oneshot::channel(); @@ -688,7 +691,7 @@ mod tests { rt.block_on(future); }); // submit a transaction to pool. - let result = pool.submit_one(&BlockId::Number(0), SOURCE, uxt(Alice, 0)).await; + let result = pool.submit_one(genesis_hash, SOURCE, uxt(Alice, 0)).await; // assert that it was successfully imported assert!(result.is_ok()); @@ -719,7 +722,7 @@ mod tests { } ); - assert!(pool.submit_one(&BlockId::Number(1), SOURCE, uxt(Alice, 1)).await.is_ok()); + assert!(pool.submit_one(created_block.hash, SOURCE, uxt(Alice, 1)).await.is_ok()); let header = client.header(created_block.hash).expect("db error").expect("imported above"); assert_eq!(header.number, 1); @@ -741,7 +744,7 @@ mod tests { .is_ok()); assert_matches::assert_matches!(rx1.await.expect("should be no error receiving"), Ok(_)); - assert!(pool.submit_one(&BlockId::Number(1), SOURCE, uxt(Bob, 0)).await.is_ok()); + assert!(pool.submit_one(created_block.hash, SOURCE, uxt(Bob, 0)).await.is_ok()); let (tx2, rx2) = futures::channel::oneshot::channel(); assert!(sink .send(EngineCommand::SealNewBlock { diff --git a/substrate/client/rpc-spec-v2/src/transaction/transaction.rs b/substrate/client/rpc-spec-v2/src/transaction/transaction.rs index 44f4bd36c8b8..fe16310aeffa 100644 --- a/substrate/client/rpc-spec-v2/src/transaction/transaction.rs +++ b/substrate/client/rpc-spec-v2/src/transaction/transaction.rs @@ -46,7 +46,7 @@ use std::sync::Arc; use sp_api::ProvideRuntimeApi; use sp_blockchain::HeaderBackend; use sp_core::Bytes; -use sp_runtime::{generic, traits::Block as BlockT}; +use sp_runtime::traits::Block as BlockT; use codec::Decode; use futures::{FutureExt, StreamExt, TryFutureExt}; @@ -110,11 +110,7 @@ where let submit = self .pool - .submit_and_watch( - &generic::BlockId::hash(best_block_hash), - TX_SOURCE, - decoded_extrinsic, - ) + .submit_and_watch(best_block_hash, TX_SOURCE, decoded_extrinsic) .map_err(|e| { e.into_pool_error() .map(Error::from) diff --git a/substrate/client/rpc/src/author/mod.rs b/substrate/client/rpc/src/author/mod.rs index feee22641ef3..55d0a504aa67 100644 --- a/substrate/client/rpc/src/author/mod.rs +++ b/substrate/client/rpc/src/author/mod.rs @@ -41,7 +41,7 @@ use sp_api::{ApiExt, ProvideRuntimeApi}; use sp_blockchain::HeaderBackend; use sp_core::Bytes; use sp_keystore::{KeystoreExt, KeystorePtr}; -use sp_runtime::{generic, traits::Block as BlockT}; +use sp_runtime::traits::Block as BlockT; use sp_session::SessionKeys; use self::error::{Error, Result}; @@ -97,15 +97,12 @@ where Err(err) => return Err(Error::Client(Box::new(err)).into()), }; let best_block_hash = self.client.info().best_hash; - self.pool - .submit_one(&generic::BlockId::hash(best_block_hash), TX_SOURCE, xt) - .await - .map_err(|e| { - e.into_pool_error() - .map(|e| Error::Pool(e)) - .unwrap_or_else(|e| Error::Verification(Box::new(e))) - .into() - }) + self.pool.submit_one(best_block_hash, TX_SOURCE, xt).await.map_err(|e| { + e.into_pool_error() + .map(|e| Error::Pool(e)) + .unwrap_or_else(|e| Error::Verification(Box::new(e))) + .into() + }) } fn insert_key(&self, key_type: String, suri: String, public: Bytes) -> RpcResult<()> { @@ -191,14 +188,11 @@ where }, }; - let submit = self - .pool - .submit_and_watch(&generic::BlockId::hash(best_block_hash), TX_SOURCE, dxt) - .map_err(|e| { - e.into_pool_error() - .map(error::Error::from) - .unwrap_or_else(|e| error::Error::Verification(Box::new(e))) - }); + let submit = self.pool.submit_and_watch(best_block_hash, TX_SOURCE, dxt).map_err(|e| { + e.into_pool_error() + .map(error::Error::from) + .unwrap_or_else(|e| error::Error::Verification(Box::new(e))) + }); let fut = async move { let stream = match submit.await { diff --git a/substrate/client/service/src/lib.rs b/substrate/client/service/src/lib.rs index cd720e1c1e09..ff9eb982b862 100644 --- a/substrate/client/service/src/lib.rs +++ b/substrate/client/service/src/lib.rs @@ -48,10 +48,7 @@ use sc_network_sync::SyncingService; use sc_utils::mpsc::TracingUnboundedReceiver; use sp_blockchain::HeaderMetadata; use sp_consensus::SyncOracle; -use sp_runtime::{ - generic::BlockId, - traits::{Block as BlockT, Header as HeaderT}, -}; +use sp_runtime::traits::{Block as BlockT, Header as HeaderT}; pub use self::{ builder::{ @@ -481,10 +478,8 @@ where }, }; - let best_block_id = BlockId::hash(self.client.info().best_hash); - let import_future = self.pool.submit_one( - &best_block_id, + self.client.info().best_hash, sc_transaction_pool_api::TransactionSource::External, uxt, ); @@ -549,10 +544,9 @@ mod tests { to: AccountKeyring::Bob.into(), } .into_unchecked_extrinsic(); - block_on(pool.submit_one(&BlockId::hash(best.hash()), source, transaction.clone())) - .unwrap(); + block_on(pool.submit_one(best.hash(), source, transaction.clone())).unwrap(); block_on(pool.submit_one( - &BlockId::hash(best.hash()), + best.hash(), source, ExtrinsicBuilder::new_call_do_not_propagate().nonce(1).build(), )) diff --git a/substrate/client/service/test/src/lib.rs b/substrate/client/service/test/src/lib.rs index 38a811acc740..9700c7643c48 100644 --- a/substrate/client/service/test/src/lib.rs +++ b/substrate/client/service/test/src/lib.rs @@ -34,7 +34,6 @@ use sc_service::{ RuntimeGenesis, SpawnTaskHandle, TaskManager, }; use sc_transaction_pool_api::TransactionPool; -use sp_api::BlockId; use sp_blockchain::HeaderBackend; use sp_runtime::traits::Block as BlockT; use std::{iter, net::Ipv4Addr, pin::Pin, sync::Arc, task::Context, time::Duration}; @@ -501,15 +500,13 @@ pub fn sync( info!("Checking extrinsic propagation"); let first_service = network.full_nodes[0].1.clone(); let first_user_data = &network.full_nodes[0].2; - let best_block = BlockId::number(first_service.client().info().best_number); + let best_block = first_service.client().info().best_hash; let extrinsic = extrinsic_factory(&first_service, first_user_data); let source = sc_transaction_pool_api::TransactionSource::External; - futures::executor::block_on(first_service.transaction_pool().submit_one( - &best_block, - source, - extrinsic, - )) + futures::executor::block_on( + first_service.transaction_pool().submit_one(best_block, source, extrinsic), + ) .expect("failed to submit extrinsic"); network.run_until_all_full(|_index, service| service.transaction_pool().ready().count() == 1); diff --git a/substrate/client/transaction-pool/api/src/lib.rs b/substrate/client/transaction-pool/api/src/lib.rs index 73cc513708d2..a795917528f9 100644 --- a/substrate/client/transaction-pool/api/src/lib.rs +++ b/substrate/client/transaction-pool/api/src/lib.rs @@ -26,10 +26,7 @@ use codec::Codec; use futures::{Future, Stream}; use serde::{de::DeserializeOwned, Deserialize, Serialize}; use sp_core::offchain::TransactionPoolExt; -use sp_runtime::{ - generic::BlockId, - traits::{Block as BlockT, Member, NumberFor}, -}; +use sp_runtime::traits::{Block as BlockT, Member, NumberFor}; use std::{collections::HashMap, hash::Hash, marker::PhantomData, pin::Pin, sync::Arc}; const LOG_TARGET: &str = "txpool::api"; @@ -202,7 +199,7 @@ pub trait TransactionPool: Send + Sync { /// Returns a future that imports a bunch of unverified transactions to the pool. fn submit_at( &self, - at: &BlockId, + at: ::Hash, source: TransactionSource, xts: Vec>, ) -> PoolFuture, Self::Error>>, Self::Error>; @@ -210,7 +207,7 @@ pub trait TransactionPool: Send + Sync { /// Returns a future that imports one unverified transaction to the pool. fn submit_one( &self, - at: &BlockId, + at: ::Hash, source: TransactionSource, xt: TransactionFor, ) -> PoolFuture, Self::Error>; @@ -219,7 +216,7 @@ pub trait TransactionPool: Send + Sync { /// pool. fn submit_and_watch( &self, - at: &BlockId, + at: ::Hash, source: TransactionSource, xt: TransactionFor, ) -> PoolFuture>>, Self::Error>; diff --git a/substrate/client/transaction-pool/benches/basics.rs b/substrate/client/transaction-pool/benches/basics.rs index d114acc343d5..0caf00bf2955 100644 --- a/substrate/client/transaction-pool/benches/basics.rs +++ b/substrate/client/transaction-pool/benches/basics.rs @@ -33,6 +33,7 @@ use sp_runtime::{ ValidTransaction, }, }; +use std::sync::Arc; use substrate_test_runtime::{AccountId, Block, Extrinsic, ExtrinsicBuilder, TransferData, H256}; #[derive(Clone, Debug, Default)] @@ -61,7 +62,7 @@ impl ChainApi for TestApi { fn validate_transaction( &self, - at: &BlockId, + at: ::Hash, _source: TransactionSource, uxt: ::Extrinsic, ) -> Self::ValidationFuture { @@ -70,7 +71,7 @@ impl ChainApi for TestApi { let nonce = transfer.nonce; let from = transfer.from; - match self.block_id_to_number(at) { + match self.block_id_to_number(&BlockId::Hash(at)) { Ok(Some(num)) if num > 5 => return ready(Ok(Err(InvalidTransaction::Stale.into()))), _ => {}, } @@ -94,6 +95,8 @@ impl ChainApi for TestApi { ) -> Result>, Self::Error> { Ok(match at { BlockId::Number(num) => Some(*num), + BlockId::Hash(hash) if *hash == H256::from_low_u64_be(hash.to_low_u64_be()) => + Some(hash.to_low_u64_be()), BlockId::Hash(_) => None, }) } @@ -104,7 +107,7 @@ impl ChainApi for TestApi { ) -> Result::Hash>, Self::Error> { Ok(match at { BlockId::Number(num) => Some(H256::from_low_u64_be(*num)).into(), - BlockId::Hash(_) => None, + BlockId::Hash(hash) => Some(*hash), }) } @@ -137,7 +140,7 @@ fn uxt(transfer: TransferData) -> Extrinsic { ExtrinsicBuilder::new_bench_call(transfer).build() } -fn bench_configured(pool: Pool, number: u64) { +fn bench_configured(pool: Pool, number: u64, api: Arc) { let source = TransactionSource::External; let mut futures = Vec::new(); let mut tags = Vec::new(); @@ -151,7 +154,12 @@ fn bench_configured(pool: Pool, number: u64) { }); tags.push(to_tag(nonce, AccountId::from_h256(H256::from_low_u64_be(1)))); - futures.push(pool.submit_one(&BlockId::Number(1), source, xt)); + + futures.push(pool.submit_one( + api.block_id_to_hash(&BlockId::Number(1)).unwrap().unwrap(), + source, + xt, + )); } let res = block_on(futures::future::join_all(futures.into_iter())); @@ -162,7 +170,12 @@ fn bench_configured(pool: Pool, number: u64) { // Prune all transactions. let block_num = 6; - block_on(pool.prune_tags(&BlockId::Number(block_num), tags, vec![])).expect("Prune failed"); + block_on(pool.prune_tags( + api.block_id_to_hash(&BlockId::Number(block_num)).unwrap().unwrap(), + tags, + vec![], + )) + .expect("Prune failed"); // pool is empty assert_eq!(pool.validated_pool().status().ready, 0); @@ -172,19 +185,15 @@ fn bench_configured(pool: Pool, number: u64) { fn benchmark_main(c: &mut Criterion) { c.bench_function("sequential 50 tx", |b| { b.iter(|| { - bench_configured( - Pool::new(Default::default(), true.into(), TestApi::new_dependant().into()), - 50, - ); + let api = Arc::from(TestApi::new_dependant()); + bench_configured(Pool::new(Default::default(), true.into(), api.clone()), 50, api); }); }); c.bench_function("random 100 tx", |b| { b.iter(|| { - bench_configured( - Pool::new(Default::default(), true.into(), TestApi::default().into()), - 100, - ); + let api = Arc::from(TestApi::default()); + bench_configured(Pool::new(Default::default(), true.into(), api.clone()), 100, api); }); }); } diff --git a/substrate/client/transaction-pool/src/api.rs b/substrate/client/transaction-pool/src/api.rs index 871d8e9c8170..cccaad7c8994 100644 --- a/substrate/client/transaction-pool/src/api.rs +++ b/substrate/client/transaction-pool/src/api.rs @@ -133,13 +133,12 @@ where fn validate_transaction( &self, - at: &BlockId, + at: ::Hash, source: TransactionSource, uxt: graph::ExtrinsicFor, ) -> Self::ValidationFuture { let (tx, rx) = oneshot::channel(); let client = self.client.clone(); - let at = *at; let validation_pool = self.validation_pool.clone(); let metrics = self.metrics.clone(); @@ -151,7 +150,7 @@ where .await .send( async move { - let res = validate_transaction_blocking(&*client, &at, source, uxt); + let res = validate_transaction_blocking(&*client, at, source, uxt); let _ = tx.send(res); metrics.report(|m| m.validations_finished.inc()); } @@ -209,7 +208,7 @@ where /// This method will call into the runtime to perform the validation. fn validate_transaction_blocking( client: &Client, - at: &BlockId, + at: Block::Hash, source: TransactionSource, uxt: graph::ExtrinsicFor>, ) -> error::Result @@ -225,14 +224,10 @@ where { sp_tracing::within_span!(sp_tracing::Level::TRACE, "validate_transaction"; { - let block_hash = client.to_hash(at) - .map_err(|e| Error::RuntimeApi(e.to_string()))? - .ok_or_else(|| Error::RuntimeApi(format!("Could not get hash for block `{:?}`.", at)))?; - let runtime_api = client.runtime_api(); let api_version = sp_tracing::within_span! { sp_tracing::Level::TRACE, "check_version"; runtime_api - .api_version::>(block_hash) + .api_version::>(at) .map_err(|e| Error::RuntimeApi(e.to_string()))? .ok_or_else(|| Error::RuntimeApi( format!("Could not find `TaggedTransactionQueue` api for block `{:?}`.", at) @@ -245,31 +240,31 @@ where sp_tracing::Level::TRACE, "runtime::validate_transaction"; { if api_version >= 3 { - runtime_api.validate_transaction(block_hash, source, uxt, block_hash) + runtime_api.validate_transaction(at, source, uxt, at) .map_err(|e| Error::RuntimeApi(e.to_string())) } else { - let block_number = client.to_number(at) + let block_number = client.to_number(&BlockId::Hash(at)) .map_err(|e| Error::RuntimeApi(e.to_string()))? .ok_or_else(|| Error::RuntimeApi(format!("Could not get number for block `{:?}`.", at)) )?; // The old versions require us to call `initialize_block` before. - runtime_api.initialize_block(block_hash, &sp_runtime::traits::Header::new( + runtime_api.initialize_block(at, &sp_runtime::traits::Header::new( block_number + sp_runtime::traits::One::one(), Default::default(), Default::default(), - block_hash, + at, Default::default()), ).map_err(|e| Error::RuntimeApi(e.to_string()))?; if api_version == 2 { #[allow(deprecated)] // old validate_transaction - runtime_api.validate_transaction_before_version_3(block_hash, source, uxt) + runtime_api.validate_transaction_before_version_3(at, source, uxt) .map_err(|e| Error::RuntimeApi(e.to_string())) } else { #[allow(deprecated)] // old validate_transaction - runtime_api.validate_transaction_before_version_2(block_hash, uxt) + runtime_api.validate_transaction_before_version_2(at, uxt) .map_err(|e| Error::RuntimeApi(e.to_string())) } } @@ -294,7 +289,7 @@ where /// the runtime locally. pub fn validate_transaction_blocking( &self, - at: &BlockId, + at: Block::Hash, source: TransactionSource, uxt: graph::ExtrinsicFor, ) -> error::Result { diff --git a/substrate/client/transaction-pool/src/graph/pool.rs b/substrate/client/transaction-pool/src/graph/pool.rs index 4d34737a7ba7..5305b5f1c12e 100644 --- a/substrate/client/transaction-pool/src/graph/pool.rs +++ b/substrate/client/transaction-pool/src/graph/pool.rs @@ -71,7 +71,7 @@ pub trait ChainApi: Send + Sync { /// Verify extrinsic at given block. fn validate_transaction( &self, - at: &BlockId, + at: ::Hash, source: TransactionSource, uxt: ExtrinsicFor, ) -> Self::ValidationFuture; @@ -154,7 +154,7 @@ impl Pool { /// Imports a bunch of unverified extrinsics to the pool pub async fn submit_at( &self, - at: &BlockId, + at: ::Hash, source: TransactionSource, xts: impl IntoIterator>, ) -> Result, B::Error>>, B::Error> { @@ -168,7 +168,7 @@ impl Pool { /// This does not check if a transaction is banned, before we verify it again. pub async fn resubmit_at( &self, - at: &BlockId, + at: ::Hash, source: TransactionSource, xts: impl IntoIterator>, ) -> Result, B::Error>>, B::Error> { @@ -180,7 +180,7 @@ impl Pool { /// Imports one unverified extrinsic to the pool pub async fn submit_one( &self, - at: &BlockId, + at: ::Hash, source: TransactionSource, xt: ExtrinsicFor, ) -> Result, B::Error> { @@ -191,11 +191,11 @@ impl Pool { /// Import a single extrinsic and starts to watch its progress in the pool. pub async fn submit_and_watch( &self, - at: &BlockId, + at: ::Hash, source: TransactionSource, xt: ExtrinsicFor, ) -> Result, ExtrinsicHash>, B::Error> { - let block_number = self.resolve_block_number(at)?; + let block_number = self.resolve_block_number(&BlockId::Hash(at))?; let (_, tx) = self .verify_one(at, block_number, source, xt, CheckBannedBeforeVerify::Yes) .await; @@ -246,8 +246,8 @@ impl Pool { /// their provided tags from there. Otherwise we query the runtime at the `parent` block. pub async fn prune( &self, - at: &BlockId, - parent: &BlockId, + at: ::Hash, + parent: ::Hash, extrinsics: &[ExtrinsicFor], ) -> Result<(), B::Error> { log::debug!( @@ -324,7 +324,7 @@ impl Pool { /// prevent importing them in the (near) future. pub async fn prune_tags( &self, - at: &BlockId, + at: ::Hash, tags: impl IntoIterator, known_imported_hashes: impl IntoIterator> + Clone, ) -> Result<(), B::Error> { @@ -351,7 +351,7 @@ impl Pool { // And finally - submit reverified transactions back to the pool self.validated_pool.resubmit_pruned( - at, + &BlockId::Hash(at), known_imported_hashes, pruned_hashes, reverified_transactions.into_values().collect(), @@ -373,12 +373,12 @@ impl Pool { /// Returns future that validates a bunch of transactions at given block. async fn verify( &self, - at: &BlockId, + at: ::Hash, xts: impl IntoIterator)>, check: CheckBannedBeforeVerify, ) -> Result, ValidatedTransactionFor>, B::Error> { // we need a block number to compute tx validity - let block_number = self.resolve_block_number(at)?; + let block_number = self.resolve_block_number(&BlockId::Hash(at))?; let res = futures::future::join_all( xts.into_iter() @@ -394,7 +394,7 @@ impl Pool { /// Returns future that validates single transaction at given block. async fn verify_one( &self, - block_id: &BlockId, + block_hash: ::Hash, block_number: NumberFor, source: TransactionSource, xt: ExtrinsicFor, @@ -410,7 +410,7 @@ impl Pool { let validation_result = self .validated_pool .api() - .validate_transaction(block_id, source, xt.clone()) + .validate_transaction(block_hash, source, xt.clone()) .await; let status = match validation_result { @@ -458,6 +458,7 @@ mod tests { use super::{super::base_pool::Limit, *}; use crate::tests::{pool, uxt, TestApi, INVALID_NONCE}; use assert_matches::assert_matches; + use codec::Encode; use futures::executor::block_on; use parking_lot::Mutex; use sc_transaction_pool_api::TransactionStatus; @@ -471,11 +472,11 @@ mod tests { #[test] fn should_validate_and_import_transaction() { // given - let pool = pool(); + let (pool, api) = pool(); // when let hash = block_on(pool.submit_one( - &BlockId::Number(0), + api.expect_hash_from_number(0), SOURCE, uxt(Transfer { from: Alice.into(), @@ -493,7 +494,7 @@ mod tests { #[test] fn should_reject_if_temporarily_banned() { // given - let pool = pool(); + let (pool, api) = pool(); let uxt = uxt(Transfer { from: Alice.into(), to: AccountId::from_h256(H256::from_low_u64_be(2)), @@ -503,7 +504,7 @@ mod tests { // when pool.validated_pool.ban(&Instant::now(), vec![pool.hash_of(&uxt)]); - let res = block_on(pool.submit_one(&BlockId::Number(0), SOURCE, uxt)); + let res = block_on(pool.submit_one(api.expect_hash_from_number(0), SOURCE, uxt)); assert_eq!(pool.validated_pool().status().ready, 0); assert_eq!(pool.validated_pool().status().future, 0); @@ -514,18 +515,19 @@ mod tests { #[test] fn should_reject_unactionable_transactions() { // given + let api = Arc::new(TestApi::default()); let pool = Pool::new( Default::default(), // the node does not author blocks false.into(), - TestApi::default().into(), + api.clone(), ); // after validation `IncludeData` will be set to non-propagable (validate_transaction mock) let uxt = ExtrinsicBuilder::new_include_data(vec![42]).build(); // when - let res = block_on(pool.submit_one(&BlockId::Number(0), SOURCE, uxt)); + let res = block_on(pool.submit_one(api.expect_hash_from_number(0), SOURCE, uxt)); // then assert_matches!(res.unwrap_err(), error::Error::Unactionable); @@ -535,12 +537,13 @@ mod tests { fn should_notify_about_pool_events() { let (stream, hash0, hash1) = { // given - let pool = pool(); + let (pool, api) = pool(); + let hash_of_block0 = api.expect_hash_from_number(0); let stream = pool.validated_pool().import_notification_stream(); // when let hash0 = block_on(pool.submit_one( - &BlockId::Number(0), + hash_of_block0, SOURCE, uxt(Transfer { from: Alice.into(), @@ -551,7 +554,7 @@ mod tests { )) .unwrap(); let hash1 = block_on(pool.submit_one( - &BlockId::Number(0), + hash_of_block0, SOURCE, uxt(Transfer { from: Alice.into(), @@ -563,7 +566,7 @@ mod tests { .unwrap(); // future doesn't count let _hash = block_on(pool.submit_one( - &BlockId::Number(0), + hash_of_block0, SOURCE, uxt(Transfer { from: Alice.into(), @@ -590,9 +593,10 @@ mod tests { #[test] fn should_clear_stale_transactions() { // given - let pool = pool(); + let (pool, api) = pool(); + let hash_of_block0 = api.expect_hash_from_number(0); let hash1 = block_on(pool.submit_one( - &BlockId::Number(0), + hash_of_block0, SOURCE, uxt(Transfer { from: Alice.into(), @@ -603,7 +607,7 @@ mod tests { )) .unwrap(); let hash2 = block_on(pool.submit_one( - &BlockId::Number(0), + hash_of_block0, SOURCE, uxt(Transfer { from: Alice.into(), @@ -614,7 +618,7 @@ mod tests { )) .unwrap(); let hash3 = block_on(pool.submit_one( - &BlockId::Number(0), + hash_of_block0, SOURCE, uxt(Transfer { from: Alice.into(), @@ -641,9 +645,9 @@ mod tests { #[test] fn should_ban_mined_transactions() { // given - let pool = pool(); + let (pool, api) = pool(); let hash1 = block_on(pool.submit_one( - &BlockId::Number(0), + api.expect_hash_from_number(0), SOURCE, uxt(Transfer { from: Alice.into(), @@ -655,12 +659,12 @@ mod tests { .unwrap(); // when - block_on(pool.prune_tags(&BlockId::Number(1), vec![vec![0]], vec![hash1])).unwrap(); + block_on(pool.prune_tags(api.expect_hash_from_number(1), vec![vec![0]], vec![hash1])) + .unwrap(); // then assert!(pool.validated_pool.is_banned(&hash1)); } - use codec::Encode; #[test] fn should_limit_futures() { @@ -678,14 +682,15 @@ mod tests { let options = Options { ready: limit.clone(), future: limit.clone(), ..Default::default() }; - let pool = Pool::new(options, true.into(), TestApi::default().into()); + let api = Arc::new(TestApi::default()); + let pool = Pool::new(options, true.into(), api.clone()); - let hash1 = block_on(pool.submit_one(&BlockId::Number(0), SOURCE, xt)).unwrap(); + let hash1 = block_on(pool.submit_one(api.expect_hash_from_number(0), SOURCE, xt)).unwrap(); assert_eq!(pool.validated_pool().status().future, 1); // when let hash2 = block_on(pool.submit_one( - &BlockId::Number(0), + api.expect_hash_from_number(0), SOURCE, uxt(Transfer { from: Bob.into(), @@ -709,11 +714,12 @@ mod tests { let options = Options { ready: limit.clone(), future: limit.clone(), ..Default::default() }; - let pool = Pool::new(options, true.into(), TestApi::default().into()); + let api = Arc::new(TestApi::default()); + let pool = Pool::new(options, true.into(), api.clone()); // when block_on(pool.submit_one( - &BlockId::Number(0), + api.expect_hash_from_number(0), SOURCE, uxt(Transfer { from: Alice.into(), @@ -732,11 +738,11 @@ mod tests { #[test] fn should_reject_transactions_with_no_provides() { // given - let pool = pool(); + let (pool, api) = pool(); // when let err = block_on(pool.submit_one( - &BlockId::Number(0), + api.expect_hash_from_number(0), SOURCE, uxt(Transfer { from: Alice.into(), @@ -759,9 +765,9 @@ mod tests { #[test] fn should_trigger_ready_and_finalized() { // given - let pool = pool(); + let (pool, api) = pool(); let watcher = block_on(pool.submit_and_watch( - &BlockId::Number(0), + api.expect_hash_from_number(0), SOURCE, uxt(Transfer { from: Alice.into(), @@ -774,26 +780,25 @@ mod tests { assert_eq!(pool.validated_pool().status().ready, 1); assert_eq!(pool.validated_pool().status().future, 0); + let hash_of_block2 = api.expect_hash_from_number(2); + // when - block_on(pool.prune_tags(&BlockId::Number(2), vec![vec![0u8]], vec![])).unwrap(); + block_on(pool.prune_tags(hash_of_block2, vec![vec![0u8]], vec![])).unwrap(); assert_eq!(pool.validated_pool().status().ready, 0); assert_eq!(pool.validated_pool().status().future, 0); // then let mut stream = futures::executor::block_on_stream(watcher.into_stream()); assert_eq!(stream.next(), Some(TransactionStatus::Ready)); - assert_eq!( - stream.next(), - Some(TransactionStatus::InBlock((H256::from_low_u64_be(2).into(), 0))), - ); + assert_eq!(stream.next(), Some(TransactionStatus::InBlock((hash_of_block2.into(), 0))),); } #[test] fn should_trigger_ready_and_finalized_when_pruning_via_hash() { // given - let pool = pool(); + let (pool, api) = pool(); let watcher = block_on(pool.submit_and_watch( - &BlockId::Number(0), + api.expect_hash_from_number(0), SOURCE, uxt(Transfer { from: Alice.into(), @@ -806,8 +811,10 @@ mod tests { assert_eq!(pool.validated_pool().status().ready, 1); assert_eq!(pool.validated_pool().status().future, 0); + let hash_of_block2 = api.expect_hash_from_number(2); + // when - block_on(pool.prune_tags(&BlockId::Number(2), vec![vec![0u8]], vec![*watcher.hash()])) + block_on(pool.prune_tags(hash_of_block2, vec![vec![0u8]], vec![*watcher.hash()])) .unwrap(); assert_eq!(pool.validated_pool().status().ready, 0); assert_eq!(pool.validated_pool().status().future, 0); @@ -815,18 +822,17 @@ mod tests { // then let mut stream = futures::executor::block_on_stream(watcher.into_stream()); assert_eq!(stream.next(), Some(TransactionStatus::Ready)); - assert_eq!( - stream.next(), - Some(TransactionStatus::InBlock((H256::from_low_u64_be(2).into(), 0))), - ); + assert_eq!(stream.next(), Some(TransactionStatus::InBlock((hash_of_block2.into(), 0))),); } #[test] fn should_trigger_future_and_ready_after_promoted() { // given - let pool = pool(); + let (pool, api) = pool(); + let hash_of_block0 = api.expect_hash_from_number(0); + let watcher = block_on(pool.submit_and_watch( - &BlockId::Number(0), + hash_of_block0, SOURCE, uxt(Transfer { from: Alice.into(), @@ -841,7 +847,7 @@ mod tests { // when block_on(pool.submit_one( - &BlockId::Number(0), + hash_of_block0, SOURCE, uxt(Transfer { from: Alice.into(), @@ -862,7 +868,7 @@ mod tests { #[test] fn should_trigger_invalid_and_ban() { // given - let pool = pool(); + let (pool, api) = pool(); let uxt = uxt(Transfer { from: Alice.into(), to: AccountId::from_h256(H256::from_low_u64_be(2)), @@ -870,7 +876,8 @@ mod tests { nonce: 0, }); let watcher = - block_on(pool.submit_and_watch(&BlockId::Number(0), SOURCE, uxt)).unwrap(); + block_on(pool.submit_and_watch(api.expect_hash_from_number(0), SOURCE, uxt)) + .unwrap(); assert_eq!(pool.validated_pool().status().ready, 1); // when @@ -886,7 +893,7 @@ mod tests { #[test] fn should_trigger_broadcasted() { // given - let pool = pool(); + let (pool, api) = pool(); let uxt = uxt(Transfer { from: Alice.into(), to: AccountId::from_h256(H256::from_low_u64_be(2)), @@ -894,7 +901,8 @@ mod tests { nonce: 0, }); let watcher = - block_on(pool.submit_and_watch(&BlockId::Number(0), SOURCE, uxt)).unwrap(); + block_on(pool.submit_and_watch(api.expect_hash_from_number(0), SOURCE, uxt)) + .unwrap(); assert_eq!(pool.validated_pool().status().ready, 1); // when @@ -916,7 +924,8 @@ mod tests { let options = Options { ready: limit.clone(), future: limit.clone(), ..Default::default() }; - let pool = Pool::new(options, true.into(), TestApi::default().into()); + let api = Arc::new(TestApi::default()); + let pool = Pool::new(options, true.into(), api.clone()); let xt = uxt(Transfer { from: Alice.into(), @@ -924,7 +933,9 @@ mod tests { amount: 5, nonce: 0, }); - let watcher = block_on(pool.submit_and_watch(&BlockId::Number(0), SOURCE, xt)).unwrap(); + let watcher = + block_on(pool.submit_and_watch(api.expect_hash_from_number(0), SOURCE, xt)) + .unwrap(); assert_eq!(pool.validated_pool().status().ready, 1); // when @@ -934,7 +945,7 @@ mod tests { amount: 4, nonce: 1, }); - block_on(pool.submit_one(&BlockId::Number(1), SOURCE, xt)).unwrap(); + block_on(pool.submit_one(api.expect_hash_from_number(1), SOURCE, xt)).unwrap(); assert_eq!(pool.validated_pool().status().ready, 1); // then @@ -951,12 +962,13 @@ mod tests { let options = Options { ready: limit.clone(), future: limit.clone(), ..Default::default() }; - let pool = Pool::new(options, true.into(), TestApi::default().into()); + let api = Arc::new(TestApi::default()); + let pool = Pool::new(options, true.into(), api.clone()); // after validation `IncludeData` will have priority set to 9001 // (validate_transaction mock) let xt = ExtrinsicBuilder::new_include_data(Vec::new()).build(); - block_on(pool.submit_one(&BlockId::Number(0), SOURCE, xt)).unwrap(); + block_on(pool.submit_one(api.expect_hash_from_number(0), SOURCE, xt)).unwrap(); assert_eq!(pool.validated_pool().status().ready, 1); // then @@ -968,7 +980,7 @@ mod tests { amount: 4, nonce: 1, }); - let result = block_on(pool.submit_one(&BlockId::Number(1), SOURCE, xt)); + let result = block_on(pool.submit_one(api.expect_hash_from_number(1), SOURCE, xt)); assert!(matches!( result, Err(sc_transaction_pool_api::error::Error::ImmediatelyDropped) @@ -980,12 +992,15 @@ mod tests { let options = Options { ready: limit.clone(), future: limit.clone(), ..Default::default() }; - let pool = Pool::new(options, true.into(), TestApi::default().into()); + let api = Arc::new(TestApi::default()); + let pool = Pool::new(options, true.into(), api.clone()); + + let hash_of_block0 = api.expect_hash_from_number(0); // after validation `IncludeData` will have priority set to 9001 // (validate_transaction mock) let xt = ExtrinsicBuilder::new_include_data(Vec::new()).build(); - block_on(pool.submit_and_watch(&BlockId::Number(0), SOURCE, xt)).unwrap(); + block_on(pool.submit_and_watch(hash_of_block0, SOURCE, xt)).unwrap(); assert_eq!(pool.validated_pool().status().ready, 1); // after validation `Transfer` will have priority set to 4 (validate_transaction @@ -996,15 +1011,14 @@ mod tests { amount: 5, nonce: 0, }); - let watcher = - block_on(pool.submit_and_watch(&BlockId::Number(0), SOURCE, xt)).unwrap(); + let watcher = block_on(pool.submit_and_watch(hash_of_block0, SOURCE, xt)).unwrap(); assert_eq!(pool.validated_pool().status().ready, 2); // when // after validation `Store` will have priority set to 9001 (validate_transaction // mock) let xt = ExtrinsicBuilder::new_indexed_call(Vec::new()).build(); - block_on(pool.submit_one(&BlockId::Number(1), SOURCE, xt)).unwrap(); + block_on(pool.submit_one(api.expect_hash_from_number(1), SOURCE, xt)).unwrap(); assert_eq!(pool.validated_pool().status().ready, 2); // then @@ -1021,7 +1035,10 @@ mod tests { let (tx, rx) = std::sync::mpsc::sync_channel(1); let mut api = TestApi::default(); api.delay = Arc::new(Mutex::new(rx.into())); - let pool = Arc::new(Pool::new(Default::default(), true.into(), api.into())); + let api = Arc::new(api); + let pool = Arc::new(Pool::new(Default::default(), true.into(), api.clone())); + + let hash_of_block0 = api.expect_hash_from_number(0); // when let xt = uxt(Transfer { @@ -1034,7 +1051,7 @@ mod tests { // This transaction should go to future, since we use `nonce: 1` let pool2 = pool.clone(); std::thread::spawn(move || { - block_on(pool2.submit_one(&BlockId::Number(0), SOURCE, xt)).unwrap(); + block_on(pool2.submit_one(hash_of_block0, SOURCE, xt)).unwrap(); ready.send(()).unwrap(); }); @@ -1048,12 +1065,13 @@ mod tests { }); // The tag the above transaction provides (TestApi is using just nonce as u8) let provides = vec![0_u8]; - block_on(pool.submit_one(&BlockId::Number(0), SOURCE, xt)).unwrap(); + block_on(pool.submit_one(hash_of_block0, SOURCE, xt)).unwrap(); assert_eq!(pool.validated_pool().status().ready, 1); // Now block import happens before the second transaction is able to finish // verification. - block_on(pool.prune_tags(&BlockId::Number(1), vec![provides], vec![])).unwrap(); + block_on(pool.prune_tags(api.expect_hash_from_number(1), vec![provides], vec![])) + .unwrap(); assert_eq!(pool.validated_pool().status().ready, 0); // so when we release the verification of the previous one it will have diff --git a/substrate/client/transaction-pool/src/lib.rs b/substrate/client/transaction-pool/src/lib.rs index ffaab89d9823..faa3f455a580 100644 --- a/substrate/client/transaction-pool/src/lib.rs +++ b/substrate/client/transaction-pool/src/lib.rs @@ -166,8 +166,11 @@ where finalized_hash: Block::Hash, ) -> (Self, Pin + Send>>) { let pool = Arc::new(graph::Pool::new(Default::default(), true.into(), pool_api.clone())); - let (revalidation_queue, background_task) = - revalidation::RevalidationQueue::new_background(pool_api.clone(), pool.clone()); + let (revalidation_queue, background_task) = revalidation::RevalidationQueue::new_background( + pool_api.clone(), + pool.clone(), + finalized_hash, + ); ( Self { api: pool_api, @@ -203,8 +206,11 @@ where RevalidationType::Light => (revalidation::RevalidationQueue::new(pool_api.clone(), pool.clone()), None), RevalidationType::Full => { - let (queue, background) = - revalidation::RevalidationQueue::new_background(pool_api.clone(), pool.clone()); + let (queue, background) = revalidation::RevalidationQueue::new_background( + pool_api.clone(), + pool.clone(), + finalized_hash, + ); (queue, Some(background)) }, }; @@ -254,46 +260,43 @@ where fn submit_at( &self, - at: &BlockId, + at: ::Hash, source: TransactionSource, xts: Vec>, ) -> PoolFuture, Self::Error>>, Self::Error> { let pool = self.pool.clone(); - let at = *at; self.metrics .report(|metrics| metrics.submitted_transactions.inc_by(xts.len() as u64)); - async move { pool.submit_at(&at, source, xts).await }.boxed() + async move { pool.submit_at(at, source, xts).await }.boxed() } fn submit_one( &self, - at: &BlockId, + at: ::Hash, source: TransactionSource, xt: TransactionFor, ) -> PoolFuture, Self::Error> { let pool = self.pool.clone(); - let at = *at; self.metrics.report(|metrics| metrics.submitted_transactions.inc()); - async move { pool.submit_one(&at, source, xt).await }.boxed() + async move { pool.submit_one(at, source, xt).await }.boxed() } fn submit_and_watch( &self, - at: &BlockId, + at: ::Hash, source: TransactionSource, xt: TransactionFor, ) -> PoolFuture>>, Self::Error> { - let at = *at; let pool = self.pool.clone(); self.metrics.report(|metrics| metrics.submitted_transactions.inc()); async move { - let watcher = pool.submit_and_watch(&at, source, xt).await?; + let watcher = pool.submit_and_watch(at, source, xt).await?; Ok(watcher.into_stream().boxed()) } @@ -433,11 +436,7 @@ where let validity = self .api - .validate_transaction_blocking( - &BlockId::hash(at), - TransactionSource::Local, - xt.clone(), - )? + .validate_transaction_blocking(at, TransactionSource::Local, xt.clone())? .map_err(|e| { Self::Error::Pool(match e { TransactionValidityError::Invalid(i) => TxPoolError::InvalidTransaction(i), @@ -577,10 +576,7 @@ async fn prune_known_txs_for_block { - at: NumberFor, + at: BlockHash, transactions: Vec>, } @@ -54,9 +52,9 @@ struct WorkerPayload { struct RevalidationWorker { api: Arc, pool: Arc>, - best_block: NumberFor, - block_ordered: BTreeMap, HashSet>>, - members: HashMap, NumberFor>, + best_block: BlockHash, + block_ordered: BTreeMap, HashSet>>, + members: HashMap, BlockHash>, } impl Unpin for RevalidationWorker {} @@ -68,15 +66,30 @@ impl Unpin for RevalidationWorker {} async fn batch_revalidate( pool: Arc>, api: Arc, - at: NumberFor, + at: BlockHash, batch: impl IntoIterator>, ) { + // This conversion should work. Otherwise, for unknown block the revalidation shall be skipped, + // all the transactions will be kept in the validated pool, and can be scheduled for + // revalidation with the next request. + let block_number = match api.block_id_to_number(&BlockId::Hash(at)) { + Ok(Some(n)) => n, + Ok(None) => { + log::debug!(target: LOG_TARGET, "revalidation skipped at block {at:?}, could not get block number."); + return + }, + Err(e) => { + log::debug!(target: LOG_TARGET, "revalidation skipped at block {at:?}: {e:?}."); + return + }, + }; + let mut invalid_hashes = Vec::new(); let mut revalidated = HashMap::new(); let validation_results = futures::future::join_all(batch.into_iter().filter_map(|ext_hash| { pool.validated_pool().ready_by_hash(&ext_hash).map(|ext| { - api.validate_transaction(&BlockId::Number(at), ext.source, ext.data.clone()) + api.validate_transaction(at, ext.source, ext.data.clone()) .map(move |validation_result| (validation_result, ext_hash, ext)) }) })) @@ -107,7 +120,7 @@ async fn batch_revalidate( revalidated.insert( ext_hash, ValidatedTransaction::valid_at( - at.saturated_into::(), + block_number.saturated_into::(), ext_hash, ext.source, ext.data.clone(), @@ -135,13 +148,13 @@ async fn batch_revalidate( } impl RevalidationWorker { - fn new(api: Arc, pool: Arc>) -> Self { + fn new(api: Arc, pool: Arc>, best_block: BlockHash) -> Self { Self { api, pool, + best_block, block_ordered: Default::default(), members: Default::default(), - best_block: Zero::zero(), } } @@ -303,10 +316,11 @@ where api: Arc, pool: Arc>, interval: Duration, + best_block: BlockHash, ) -> (Self, Pin + Send>>) { let (to_worker, from_queue) = tracing_unbounded("mpsc_revalidation_queue", 100_000); - let worker = RevalidationWorker::new(api.clone(), pool.clone()); + let worker = RevalidationWorker::new(api.clone(), pool.clone(), best_block); let queue = Self { api, pool, background: Some(to_worker) }; @@ -317,8 +331,9 @@ where pub fn new_background( api: Arc, pool: Arc>, + best_block: BlockHash, ) -> (Self, Pin + Send>>) { - Self::new_with_interval(api, pool, BACKGROUND_REVALIDATION_INTERVAL) + Self::new_with_interval(api, pool, BACKGROUND_REVALIDATION_INTERVAL, best_block) } /// Queue some transaction for later revalidation. @@ -328,7 +343,7 @@ where /// revalidation is actually done. pub async fn revalidate_later( &self, - at: NumberFor, + at: BlockHash, transactions: Vec>, ) { if transactions.len() > 0 { @@ -360,9 +375,8 @@ mod tests { }; use futures::executor::block_on; use sc_transaction_pool_api::TransactionSource; - use sp_runtime::generic::BlockId; use substrate_test_runtime::{AccountId, Transfer, H256}; - use substrate_test_runtime_client::AccountKeyring::Alice; + use substrate_test_runtime_client::AccountKeyring::{Alice, Bob}; #[test] fn revalidation_queue_works() { @@ -376,18 +390,63 @@ mod tests { amount: 5, nonce: 0, }); - let uxt_hash = block_on(pool.submit_one( - &BlockId::number(0), - TransactionSource::External, - uxt.clone(), - )) - .expect("Should be valid"); - block_on(queue.revalidate_later(0, vec![uxt_hash])); + let hash_of_block0 = api.expect_hash_from_number(0); + + let uxt_hash = + block_on(pool.submit_one(hash_of_block0, TransactionSource::External, uxt.clone())) + .expect("Should be valid"); + + block_on(queue.revalidate_later(hash_of_block0, vec![uxt_hash])); // revalidated in sync offload 2nd time assert_eq!(api.validation_requests().len(), 2); // number of ready assert_eq!(pool.validated_pool().status().ready, 1); } + + #[test] + fn revalidation_queue_skips_revalidation_for_unknown_block_hash() { + let api = Arc::new(TestApi::default()); + let pool = Arc::new(Pool::new(Default::default(), true.into(), api.clone())); + let queue = Arc::new(RevalidationQueue::new(api.clone(), pool.clone())); + + let uxt0 = uxt(Transfer { + from: Alice.into(), + to: AccountId::from_h256(H256::from_low_u64_be(2)), + amount: 5, + nonce: 0, + }); + let uxt1 = uxt(Transfer { + from: Bob.into(), + to: AccountId::from_h256(H256::from_low_u64_be(2)), + amount: 4, + nonce: 1, + }); + + let hash_of_block0 = api.expect_hash_from_number(0); + let unknown_block = H256::repeat_byte(0x13); + + let uxt_hashes = + block_on(pool.submit_at(hash_of_block0, TransactionSource::External, vec![uxt0, uxt1])) + .expect("Should be valid") + .into_iter() + .map(|r| r.expect("Should be valid")) + .collect::>(); + + assert_eq!(api.validation_requests().len(), 2); + assert_eq!(pool.validated_pool().status().ready, 2); + + // revalidation works fine for block 0: + block_on(queue.revalidate_later(hash_of_block0, uxt_hashes.clone())); + assert_eq!(api.validation_requests().len(), 4); + assert_eq!(pool.validated_pool().status().ready, 2); + + // revalidation shall be skipped for unknown block: + block_on(queue.revalidate_later(unknown_block, uxt_hashes)); + // no revalidation shall be done + assert_eq!(api.validation_requests().len(), 4); + // number of ready shall not change + assert_eq!(pool.validated_pool().status().ready, 2); + } } diff --git a/substrate/client/transaction-pool/src/tests.rs b/substrate/client/transaction-pool/src/tests.rs index 62911d5cbb47..325add3fb1c5 100644 --- a/substrate/client/transaction-pool/src/tests.rs +++ b/substrate/client/transaction-pool/src/tests.rs @@ -32,7 +32,7 @@ use sp_runtime::{ }; use std::{collections::HashSet, sync::Arc}; use substrate_test_runtime::{ - substrate_test_pallet::pallet::Call as PalletCall, BalancesCall, Block, Extrinsic, + substrate_test_pallet::pallet::Call as PalletCall, BalancesCall, Block, BlockNumber, Extrinsic, ExtrinsicBuilder, Hashing, RuntimeCall, Transfer, TransferData, H256, }; @@ -53,6 +53,11 @@ impl TestApi { pub fn validation_requests(&self) -> Vec { self.validation_requests.lock().clone() } + + /// Helper function for mapping block number to hash. Use if mapping shall not fail. + pub fn expect_hash_from_number(&self, n: BlockNumber) -> H256 { + self.block_id_to_hash(&BlockId::Number(n)).unwrap().unwrap() + } } impl ChainApi for TestApi { @@ -64,13 +69,13 @@ impl ChainApi for TestApi { /// Verify extrinsic at given block. fn validate_transaction( &self, - at: &BlockId, + at: ::Hash, _source: TransactionSource, uxt: ExtrinsicFor, ) -> Self::ValidationFuture { self.validation_requests.lock().push(uxt.clone()); let hash = self.hash_and_length(&uxt).0; - let block_number = self.block_id_to_number(at).unwrap().unwrap(); + let block_number = self.block_id_to_number(&BlockId::Hash(at)).unwrap().unwrap(); let res = match uxt { Extrinsic { @@ -153,6 +158,8 @@ impl ChainApi for TestApi { ) -> Result>, Self::Error> { Ok(match at { BlockId::Number(num) => Some(*num), + BlockId::Hash(hash) if *hash == H256::from_low_u64_be(hash.to_low_u64_be()) => + Some(hash.to_low_u64_be()), BlockId::Hash(_) => None, }) } @@ -164,7 +171,7 @@ impl ChainApi for TestApi { ) -> Result::Hash>, Self::Error> { Ok(match at { BlockId::Number(num) => Some(H256::from_low_u64_be(*num)).into(), - BlockId::Hash(_) => None, + BlockId::Hash(hash) => Some(*hash), }) } @@ -199,6 +206,7 @@ pub(crate) fn uxt(transfer: Transfer) -> Extrinsic { ExtrinsicBuilder::new_transfer(transfer).build() } -pub(crate) fn pool() -> Pool { - Pool::new(Default::default(), true.into(), TestApi::default().into()) +pub(crate) fn pool() -> (Pool, Arc) { + let api = Arc::new(TestApi::default()); + (Pool::new(Default::default(), true.into(), api.clone()), api) } diff --git a/substrate/client/transaction-pool/tests/pool.rs b/substrate/client/transaction-pool/tests/pool.rs index 4adf811b4252..e5ab01ffbf09 100644 --- a/substrate/client/transaction-pool/tests/pool.rs +++ b/substrate/client/transaction-pool/tests/pool.rs @@ -47,8 +47,9 @@ use substrate_test_runtime_transaction_pool::{uxt, TestApi}; const LOG_TARGET: &str = "txpool"; -fn pool() -> Pool { - Pool::new(Default::default(), true.into(), TestApi::with_alice_nonce(209).into()) +fn pool() -> (Pool, Arc) { + let api = Arc::new(TestApi::with_alice_nonce(209)); + (Pool::new(Default::default(), true.into(), api.clone()), api) } fn maintained_pool() -> (BasicPool, Arc, futures::executor::ThreadPool) { @@ -83,8 +84,8 @@ const SOURCE: TransactionSource = TransactionSource::External; #[test] fn submission_should_work() { - let pool = pool(); - block_on(pool.submit_one(&BlockId::number(0), SOURCE, uxt(Alice, 209))).unwrap(); + let (pool, api) = pool(); + block_on(pool.submit_one(api.expect_hash_from_number(0), SOURCE, uxt(Alice, 209))).unwrap(); let pending: Vec<_> = pool .validated_pool() @@ -96,9 +97,9 @@ fn submission_should_work() { #[test] fn multiple_submission_should_work() { - let pool = pool(); - block_on(pool.submit_one(&BlockId::number(0), SOURCE, uxt(Alice, 209))).unwrap(); - block_on(pool.submit_one(&BlockId::number(0), SOURCE, uxt(Alice, 210))).unwrap(); + let (pool, api) = pool(); + block_on(pool.submit_one(api.expect_hash_from_number(0), SOURCE, uxt(Alice, 209))).unwrap(); + block_on(pool.submit_one(api.expect_hash_from_number(0), SOURCE, uxt(Alice, 210))).unwrap(); let pending: Vec<_> = pool .validated_pool() @@ -111,8 +112,8 @@ fn multiple_submission_should_work() { #[test] fn early_nonce_should_be_culled() { sp_tracing::try_init_simple(); - let pool = pool(); - block_on(pool.submit_one(&BlockId::number(0), SOURCE, uxt(Alice, 208))).unwrap(); + let (pool, api) = pool(); + block_on(pool.submit_one(api.expect_hash_from_number(0), SOURCE, uxt(Alice, 208))).unwrap(); let pending: Vec<_> = pool .validated_pool() @@ -124,9 +125,9 @@ fn early_nonce_should_be_culled() { #[test] fn late_nonce_should_be_queued() { - let pool = pool(); + let (pool, api) = pool(); - block_on(pool.submit_one(&BlockId::number(0), SOURCE, uxt(Alice, 210))).unwrap(); + block_on(pool.submit_one(api.expect_hash_from_number(0), SOURCE, uxt(Alice, 210))).unwrap(); let pending: Vec<_> = pool .validated_pool() .ready() @@ -134,7 +135,7 @@ fn late_nonce_should_be_queued() { .collect(); assert_eq!(pending, Vec::::new()); - block_on(pool.submit_one(&BlockId::number(0), SOURCE, uxt(Alice, 209))).unwrap(); + block_on(pool.submit_one(api.expect_hash_from_number(0), SOURCE, uxt(Alice, 209))).unwrap(); let pending: Vec<_> = pool .validated_pool() .ready() @@ -145,9 +146,10 @@ fn late_nonce_should_be_queued() { #[test] fn prune_tags_should_work() { - let pool = pool(); - let hash209 = block_on(pool.submit_one(&BlockId::number(0), SOURCE, uxt(Alice, 209))).unwrap(); - block_on(pool.submit_one(&BlockId::number(0), SOURCE, uxt(Alice, 210))).unwrap(); + let (pool, api) = pool(); + let hash209 = + block_on(pool.submit_one(api.expect_hash_from_number(0), SOURCE, uxt(Alice, 209))).unwrap(); + block_on(pool.submit_one(api.expect_hash_from_number(0), SOURCE, uxt(Alice, 210))).unwrap(); let pending: Vec<_> = pool .validated_pool() @@ -157,7 +159,7 @@ fn prune_tags_should_work() { assert_eq!(pending, vec![209, 210]); pool.validated_pool().api().push_block(1, Vec::new(), true); - block_on(pool.prune_tags(&BlockId::number(1), vec![vec![209]], vec![hash209])) + block_on(pool.prune_tags(api.expect_hash_from_number(1), vec![vec![209]], vec![hash209])) .expect("Prune tags"); let pending: Vec<_> = pool @@ -170,11 +172,12 @@ fn prune_tags_should_work() { #[test] fn should_ban_invalid_transactions() { - let pool = pool(); + let (pool, api) = pool(); let uxt = uxt(Alice, 209); - let hash = block_on(pool.submit_one(&BlockId::number(0), SOURCE, uxt.clone())).unwrap(); + let hash = + block_on(pool.submit_one(api.expect_hash_from_number(0), SOURCE, uxt.clone())).unwrap(); pool.validated_pool().remove_invalid(&[hash]); - block_on(pool.submit_one(&BlockId::number(0), SOURCE, uxt.clone())).unwrap_err(); + block_on(pool.submit_one(api.expect_hash_from_number(0), SOURCE, uxt.clone())).unwrap_err(); // when let pending: Vec<_> = pool @@ -185,7 +188,7 @@ fn should_ban_invalid_transactions() { assert_eq!(pending, Vec::::new()); // then - block_on(pool.submit_one(&BlockId::number(0), SOURCE, uxt.clone())).unwrap_err(); + block_on(pool.submit_one(api.expect_hash_from_number(0), SOURCE, uxt.clone())).unwrap_err(); } #[test] @@ -193,9 +196,9 @@ fn only_prune_on_new_best() { let (pool, api, _) = maintained_pool(); let uxt = uxt(Alice, 209); - let _ = block_on(pool.submit_and_watch(&BlockId::number(0), SOURCE, uxt.clone())) + let _ = block_on(pool.submit_and_watch(api.expect_hash_from_number(0), SOURCE, uxt.clone())) .expect("1. Imported"); - pool.api().push_block(1, vec![uxt.clone()], true); + api.push_block(1, vec![uxt.clone()], true); assert_eq!(pool.status().ready, 1); let header = api.push_block(2, vec![uxt], true); @@ -212,13 +215,15 @@ fn should_correctly_prune_transactions_providing_more_than_one_tag() { })); let pool = Pool::new(Default::default(), true.into(), api.clone()); let xt = uxt(Alice, 209); - block_on(pool.submit_one(&BlockId::number(0), SOURCE, xt.clone())).expect("1. Imported"); + block_on(pool.submit_one(api.expect_hash_from_number(0), SOURCE, xt.clone())) + .expect("1. Imported"); assert_eq!(pool.validated_pool().status().ready, 1); // remove the transaction that just got imported. api.increment_nonce(Alice.into()); api.push_block(1, Vec::new(), true); - block_on(pool.prune_tags(&BlockId::number(1), vec![vec![209]], vec![])).expect("1. Pruned"); + block_on(pool.prune_tags(api.expect_hash_from_number(1), vec![vec![209]], vec![])) + .expect("1. Pruned"); assert_eq!(pool.validated_pool().status().ready, 0); // it's re-imported to future assert_eq!(pool.validated_pool().status().future, 1); @@ -227,7 +232,8 @@ fn should_correctly_prune_transactions_providing_more_than_one_tag() { api.increment_nonce(Alice.into()); api.push_block(2, Vec::new(), true); let xt = uxt(Alice, 211); - block_on(pool.submit_one(&BlockId::number(2), SOURCE, xt.clone())).expect("2. Imported"); + block_on(pool.submit_one(api.expect_hash_from_number(2), SOURCE, xt.clone())) + .expect("2. Imported"); assert_eq!(pool.validated_pool().status().ready, 1); assert_eq!(pool.validated_pool().status().future, 1); let pending: Vec<_> = pool @@ -240,7 +246,8 @@ fn should_correctly_prune_transactions_providing_more_than_one_tag() { // prune it and make sure the pool is empty api.increment_nonce(Alice.into()); api.push_block(3, Vec::new(), true); - block_on(pool.prune_tags(&BlockId::number(3), vec![vec![155]], vec![])).expect("2. Pruned"); + block_on(pool.prune_tags(api.expect_hash_from_number(3), vec![vec![155]], vec![])) + .expect("2. Pruned"); assert_eq!(pool.validated_pool().status().ready, 0); assert_eq!(pool.validated_pool().status().future, 2); } @@ -270,7 +277,8 @@ fn should_prune_old_during_maintenance() { let (pool, api, _guard) = maintained_pool(); - block_on(pool.submit_one(&BlockId::number(0), SOURCE, xt.clone())).expect("1. Imported"); + block_on(pool.submit_one(api.expect_hash_from_number(0), SOURCE, xt.clone())) + .expect("1. Imported"); assert_eq!(pool.status().ready, 1); let header = api.push_block(1, vec![xt.clone()], true); @@ -285,9 +293,11 @@ fn should_revalidate_during_maintenance() { let xt2 = uxt(Alice, 210); let (pool, api, _guard) = maintained_pool(); - block_on(pool.submit_one(&BlockId::number(0), SOURCE, xt1.clone())).expect("1. Imported"); - let watcher = block_on(pool.submit_and_watch(&BlockId::number(0), SOURCE, xt2.clone())) - .expect("2. Imported"); + block_on(pool.submit_one(api.expect_hash_from_number(0), SOURCE, xt1.clone())) + .expect("1. Imported"); + let watcher = + block_on(pool.submit_and_watch(api.expect_hash_from_number(0), SOURCE, xt2.clone())) + .expect("2. Imported"); assert_eq!(pool.status().ready, 2); assert_eq!(api.validation_requests().len(), 2); @@ -311,7 +321,8 @@ fn should_resubmit_from_retracted_during_maintenance() { let (pool, api, _guard) = maintained_pool(); - block_on(pool.submit_one(&BlockId::number(0), SOURCE, xt.clone())).expect("1. Imported"); + block_on(pool.submit_one(api.expect_hash_from_number(0), SOURCE, xt.clone())) + .expect("1. Imported"); assert_eq!(pool.status().ready, 1); let header = api.push_block(1, vec![], true); @@ -329,7 +340,8 @@ fn should_not_resubmit_from_retracted_during_maintenance_if_tx_is_also_in_enacte let (pool, api, _guard) = maintained_pool(); - block_on(pool.submit_one(&BlockId::number(0), SOURCE, xt.clone())).expect("1. Imported"); + block_on(pool.submit_one(api.expect_hash_from_number(0), SOURCE, xt.clone())) + .expect("1. Imported"); assert_eq!(pool.status().ready, 1); let header = api.push_block(1, vec![xt.clone()], true); @@ -347,8 +359,9 @@ fn should_not_retain_invalid_hashes_from_retracted() { let (pool, api, _guard) = maintained_pool(); - let watcher = block_on(pool.submit_and_watch(&BlockId::number(0), SOURCE, xt.clone())) - .expect("1. Imported"); + let watcher = + block_on(pool.submit_and_watch(api.expect_hash_from_number(0), SOURCE, xt.clone())) + .expect("1. Imported"); assert_eq!(pool.status().ready, 1); let header = api.push_block(1, vec![], true); @@ -374,15 +387,18 @@ fn should_revalidate_across_many_blocks() { let (pool, api, _guard) = maintained_pool(); - let watcher1 = block_on(pool.submit_and_watch(&BlockId::number(0), SOURCE, xt1.clone())) + let watcher1 = + block_on(pool.submit_and_watch(api.expect_hash_from_number(0), SOURCE, xt1.clone())) + .expect("1. Imported"); + block_on(pool.submit_one(api.expect_hash_from_number(0), SOURCE, xt2.clone())) .expect("1. Imported"); - block_on(pool.submit_one(&BlockId::number(0), SOURCE, xt2.clone())).expect("1. Imported"); assert_eq!(pool.status().ready, 2); let header = api.push_block(1, vec![], true); block_on(pool.maintain(block_event(header))); - block_on(pool.submit_one(&BlockId::number(1), SOURCE, xt3.clone())).expect("1. Imported"); + block_on(pool.submit_one(api.expect_hash_from_number(1), SOURCE, xt3.clone())) + .expect("1. Imported"); assert_eq!(pool.status().ready, 3); let header = api.push_block(2, vec![xt1.clone()], true); @@ -409,19 +425,24 @@ fn should_push_watchers_during_maintenance() { let tx0 = alice_uxt(0); let watcher0 = - block_on(pool.submit_and_watch(&BlockId::Number(0), SOURCE, tx0.clone())).unwrap(); + block_on(pool.submit_and_watch(api.expect_hash_from_number(0), SOURCE, tx0.clone())) + .unwrap(); let tx1 = alice_uxt(1); let watcher1 = - block_on(pool.submit_and_watch(&BlockId::Number(0), SOURCE, tx1.clone())).unwrap(); + block_on(pool.submit_and_watch(api.expect_hash_from_number(0), SOURCE, tx1.clone())) + .unwrap(); let tx2 = alice_uxt(2); let watcher2 = - block_on(pool.submit_and_watch(&BlockId::Number(0), SOURCE, tx2.clone())).unwrap(); + block_on(pool.submit_and_watch(api.expect_hash_from_number(0), SOURCE, tx2.clone())) + .unwrap(); let tx3 = alice_uxt(3); let watcher3 = - block_on(pool.submit_and_watch(&BlockId::Number(0), SOURCE, tx3.clone())).unwrap(); + block_on(pool.submit_and_watch(api.expect_hash_from_number(0), SOURCE, tx3.clone())) + .unwrap(); let tx4 = alice_uxt(4); let watcher4 = - block_on(pool.submit_and_watch(&BlockId::Number(0), SOURCE, tx4.clone())).unwrap(); + block_on(pool.submit_and_watch(api.expect_hash_from_number(0), SOURCE, tx4.clone())) + .unwrap(); assert_eq!(pool.status().ready, 5); // when @@ -489,11 +510,13 @@ fn finalization() { let api = TestApi::with_alice_nonce(209); api.push_block(1, vec![], true); let pool = create_basic_pool(api); - let watcher = block_on(pool.submit_and_watch(&BlockId::number(1), SOURCE, xt.clone())) - .expect("1. Imported"); - pool.api().push_block(2, vec![xt.clone()], true); + let api = pool.api(); + let watcher = + block_on(pool.submit_and_watch(api.expect_hash_from_number(1), SOURCE, xt.clone())) + .expect("1. Imported"); + api.push_block(2, vec![xt.clone()], true); - let header = pool.api().chain().read().block_by_number.get(&2).unwrap()[0].0.header().clone(); + let header = api.chain().read().block_by_number.get(&2).unwrap()[0].0.header().clone(); let event = ChainEvent::NewBestBlock { hash: header.hash(), tree_route: None }; block_on(pool.maintain(event)); @@ -515,16 +538,17 @@ fn fork_aware_finalization() { let a_header = api.push_block(1, vec![], true); let pool = create_basic_pool(api); + let api = pool.api(); let mut canon_watchers = vec![]; let from_alice = uxt(Alice, 1); let from_dave = uxt(Dave, 2); let from_bob = uxt(Bob, 1); let from_charlie = uxt(Charlie, 1); - pool.api().increment_nonce(Alice.into()); - pool.api().increment_nonce(Dave.into()); - pool.api().increment_nonce(Charlie.into()); - pool.api().increment_nonce(Bob.into()); + api.increment_nonce(Alice.into()); + api.increment_nonce(Dave.into()); + api.increment_nonce(Charlie.into()); + api.increment_nonce(Bob.into()); let from_dave_watcher; let from_bob_watcher; @@ -538,10 +562,13 @@ fn fork_aware_finalization() { // block B1 { - let watcher = - block_on(pool.submit_and_watch(&BlockId::number(1), SOURCE, from_alice.clone())) - .expect("1. Imported"); - let header = pool.api().push_block(2, vec![from_alice.clone()], true); + let watcher = block_on(pool.submit_and_watch( + api.expect_hash_from_number(1), + SOURCE, + from_alice.clone(), + )) + .expect("1. Imported"); + let header = api.push_block(2, vec![from_alice.clone()], true); canon_watchers.push((watcher, header.hash())); assert_eq!(pool.status().ready, 1); @@ -556,10 +583,13 @@ fn fork_aware_finalization() { // block C2 { - let header = pool.api().push_block_with_parent(b1, vec![from_dave.clone()], true); - from_dave_watcher = - block_on(pool.submit_and_watch(&BlockId::number(1), SOURCE, from_dave.clone())) - .expect("1. Imported"); + let header = api.push_block_with_parent(b1, vec![from_dave.clone()], true); + from_dave_watcher = block_on(pool.submit_and_watch( + api.expect_hash_from_number(1), + SOURCE, + from_dave.clone(), + )) + .expect("1. Imported"); assert_eq!(pool.status().ready, 1); log::trace!(target: LOG_TARGET, ">> C2: {:?} {:?}", header.hash(), header); let event = ChainEvent::NewBestBlock { hash: header.hash(), tree_route: None }; @@ -570,11 +600,14 @@ fn fork_aware_finalization() { // block D2 { - from_bob_watcher = - block_on(pool.submit_and_watch(&BlockId::number(1), SOURCE, from_bob.clone())) - .expect("1. Imported"); + from_bob_watcher = block_on(pool.submit_and_watch( + api.expect_hash_from_number(1), + SOURCE, + from_bob.clone(), + )) + .expect("1. Imported"); assert_eq!(pool.status().ready, 1); - let header = pool.api().push_block_with_parent(c2, vec![from_bob.clone()], true); + let header = api.push_block_with_parent(c2, vec![from_bob.clone()], true); log::trace!(target: LOG_TARGET, ">> D2: {:?} {:?}", header.hash(), header); let event = ChainEvent::NewBestBlock { hash: header.hash(), tree_route: None }; @@ -585,15 +618,18 @@ fn fork_aware_finalization() { // block C1 { - let watcher = - block_on(pool.submit_and_watch(&BlockId::number(1), SOURCE, from_charlie.clone())) - .expect("1.Imported"); + let watcher = block_on(pool.submit_and_watch( + api.expect_hash_from_number(1), + SOURCE, + from_charlie.clone(), + )) + .expect("1.Imported"); assert_eq!(pool.status().ready, 1); - let header = pool.api().push_block_with_parent(b1, vec![from_charlie.clone()], true); + let header = api.push_block_with_parent(b1, vec![from_charlie.clone()], true); log::trace!(target: LOG_TARGET, ">> C1: {:?} {:?}", header.hash(), header); c1 = header.hash(); canon_watchers.push((watcher, header.hash())); - let event = block_event_with_retracted(header.clone(), d2, pool.api()); + let event = block_event_with_retracted(header.clone(), d2, api); block_on(pool.maintain(event)); assert_eq!(pool.status().ready, 2); @@ -604,10 +640,10 @@ fn fork_aware_finalization() { // block D1 { let xt = uxt(Eve, 0); - let w = block_on(pool.submit_and_watch(&BlockId::number(1), SOURCE, xt.clone())) + let w = block_on(pool.submit_and_watch(api.expect_hash_from_number(1), SOURCE, xt.clone())) .expect("1. Imported"); assert_eq!(pool.status().ready, 3); - let header = pool.api().push_block_with_parent(c1, vec![xt.clone()], true); + let header = api.push_block_with_parent(c1, vec![xt.clone()], true); log::trace!(target: LOG_TARGET, ">> D1: {:?} {:?}", header.hash(), header); d1 = header.hash(); canon_watchers.push((w, header.hash())); @@ -623,7 +659,7 @@ fn fork_aware_finalization() { // block E1 { - let header = pool.api().push_block_with_parent(d1, vec![from_dave, from_bob], true); + let header = api.push_block_with_parent(d1, vec![from_dave, from_bob], true); log::trace!(target: LOG_TARGET, ">> E1: {:?} {:?}", header.hash(), header); e1 = header.hash(); let event = ChainEvent::NewBestBlock { hash: header.hash(), tree_route: None }; @@ -673,16 +709,18 @@ fn prune_and_retract_tx_at_same_time() { api.push_block(1, vec![], true); let pool = create_basic_pool(api); + let api = pool.api(); let from_alice = uxt(Alice, 1); - pool.api().increment_nonce(Alice.into()); + api.increment_nonce(Alice.into()); - let watcher = block_on(pool.submit_and_watch(&BlockId::number(1), SOURCE, from_alice.clone())) - .expect("1. Imported"); + let watcher = + block_on(pool.submit_and_watch(api.expect_hash_from_number(1), SOURCE, from_alice.clone())) + .expect("1. Imported"); // Block B1 let b1 = { - let header = pool.api().push_block(2, vec![from_alice.clone()], true); + let header = api.push_block(2, vec![from_alice.clone()], true); assert_eq!(pool.status().ready, 1); let event = ChainEvent::NewBestBlock { hash: header.hash(), tree_route: None }; @@ -693,10 +731,10 @@ fn prune_and_retract_tx_at_same_time() { // Block B2 let b2 = { - let header = pool.api().push_block(2, vec![from_alice.clone()], true); + let header = api.push_block(2, vec![from_alice.clone()], true); assert_eq!(pool.status().ready, 0); - let event = block_event_with_retracted(header.clone(), b1, pool.api()); + let event = block_event_with_retracted(header.clone(), b1, api); block_on(pool.maintain(event)); assert_eq!(pool.status().ready, 0); @@ -739,19 +777,21 @@ fn resubmit_tx_of_fork_that_is_not_part_of_retracted() { api.push_block(1, vec![], true); let pool = create_basic_pool(api); + let api = pool.api(); let tx0 = uxt(Alice, 1); let tx1 = uxt(Dave, 2); - pool.api().increment_nonce(Alice.into()); - pool.api().increment_nonce(Dave.into()); + api.increment_nonce(Alice.into()); + api.increment_nonce(Dave.into()); let d0; // Block D0 { - let _ = block_on(pool.submit_and_watch(&BlockId::number(1), SOURCE, tx0.clone())) - .expect("1. Imported"); - let header = pool.api().push_block(2, vec![tx0.clone()], true); + let _ = + block_on(pool.submit_and_watch(api.expect_hash_from_number(1), SOURCE, tx0.clone())) + .expect("1. Imported"); + let header = api.push_block(2, vec![tx0.clone()], true); assert_eq!(pool.status().ready, 1); let event = ChainEvent::NewBestBlock { hash: header.hash(), tree_route: None }; @@ -762,17 +802,18 @@ fn resubmit_tx_of_fork_that_is_not_part_of_retracted() { // Block D1 { - let _ = block_on(pool.submit_and_watch(&BlockId::number(1), SOURCE, tx1.clone())) - .expect("1. Imported"); - pool.api().push_block(2, vec![tx1.clone()], false); + let _ = + block_on(pool.submit_and_watch(api.expect_hash_from_number(1), SOURCE, tx1.clone())) + .expect("1. Imported"); + api.push_block(2, vec![tx1.clone()], false); assert_eq!(pool.status().ready, 1); } // Block D2 { //push new best block - let header = pool.api().push_block(2, vec![], true); - let event = block_event_with_retracted(header, d0, pool.api()); + let header = api.push_block(2, vec![], true); + let event = block_event_with_retracted(header, d0, api); block_on(pool.maintain(event)); assert_eq!(pool.status().ready, 2); } @@ -786,6 +827,8 @@ fn resubmit_from_retracted_fork() { let pool = create_basic_pool(api); + let api = pool.api(); + let tx0 = uxt(Alice, 1); let tx1 = uxt(Dave, 2); let tx2 = uxt(Bob, 3); @@ -795,18 +838,19 @@ fn resubmit_from_retracted_fork() { let tx4 = uxt(Ferdie, 2); let tx5 = uxt(One, 3); - pool.api().increment_nonce(Alice.into()); - pool.api().increment_nonce(Dave.into()); - pool.api().increment_nonce(Bob.into()); - pool.api().increment_nonce(Eve.into()); - pool.api().increment_nonce(Ferdie.into()); - pool.api().increment_nonce(One.into()); + api.increment_nonce(Alice.into()); + api.increment_nonce(Dave.into()); + api.increment_nonce(Bob.into()); + api.increment_nonce(Eve.into()); + api.increment_nonce(Ferdie.into()); + api.increment_nonce(One.into()); // Block D0 { - let _ = block_on(pool.submit_and_watch(&BlockId::number(1), SOURCE, tx0.clone())) - .expect("1. Imported"); - let header = pool.api().push_block(2, vec![tx0.clone()], true); + let _ = + block_on(pool.submit_and_watch(api.expect_hash_from_number(1), SOURCE, tx0.clone())) + .expect("1. Imported"); + let header = api.push_block(2, vec![tx0.clone()], true); assert_eq!(pool.status().ready, 1); block_on(pool.maintain(block_event(header))); @@ -815,18 +859,20 @@ fn resubmit_from_retracted_fork() { // Block E0 { - let _ = block_on(pool.submit_and_watch(&BlockId::number(1), SOURCE, tx1.clone())) - .expect("1. Imported"); - let header = pool.api().push_block(3, vec![tx1.clone()], true); + let _ = + block_on(pool.submit_and_watch(api.expect_hash_from_number(1), SOURCE, tx1.clone())) + .expect("1. Imported"); + let header = api.push_block(3, vec![tx1.clone()], true); block_on(pool.maintain(block_event(header))); assert_eq!(pool.status().ready, 0); } // Block F0 let f0 = { - let _ = block_on(pool.submit_and_watch(&BlockId::number(1), SOURCE, tx2.clone())) - .expect("1. Imported"); - let header = pool.api().push_block(4, vec![tx2.clone()], true); + let _ = + block_on(pool.submit_and_watch(api.expect_hash_from_number(1), SOURCE, tx2.clone())) + .expect("1. Imported"); + let header = api.push_block(4, vec![tx2.clone()], true); block_on(pool.maintain(block_event(header.clone()))); assert_eq!(pool.status().ready, 0); header.hash() @@ -834,27 +880,30 @@ fn resubmit_from_retracted_fork() { // Block D1 let d1 = { - let _ = block_on(pool.submit_and_watch(&BlockId::number(1), SOURCE, tx3.clone())) - .expect("1. Imported"); - let header = pool.api().push_block(2, vec![tx3.clone()], true); + let _ = + block_on(pool.submit_and_watch(api.expect_hash_from_number(1), SOURCE, tx3.clone())) + .expect("1. Imported"); + let header = api.push_block(2, vec![tx3.clone()], true); assert_eq!(pool.status().ready, 1); header.hash() }; // Block E1 let e1 = { - let _ = block_on(pool.submit_and_watch(&BlockId::number(1), SOURCE, tx4.clone())) - .expect("1. Imported"); - let header = pool.api().push_block_with_parent(d1, vec![tx4.clone()], true); + let _ = + block_on(pool.submit_and_watch(api.expect_hash_from_number(1), SOURCE, tx4.clone())) + .expect("1. Imported"); + let header = api.push_block_with_parent(d1, vec![tx4.clone()], true); assert_eq!(pool.status().ready, 2); header.hash() }; // Block F1 let f1_header = { - let _ = block_on(pool.submit_and_watch(&BlockId::number(1), SOURCE, tx5.clone())) - .expect("1. Imported"); - let header = pool.api().push_block_with_parent(e1, vec![tx5.clone()], true); + let _ = + block_on(pool.submit_and_watch(api.expect_hash_from_number(1), SOURCE, tx5.clone())) + .expect("1. Imported"); + let header = api.push_block_with_parent(e1, vec![tx5.clone()], true); // Don't announce the block event to the pool directly, because we will // re-org to this block. assert_eq!(pool.status().ready, 3); @@ -865,7 +914,7 @@ fn resubmit_from_retracted_fork() { let expected_ready = vec![tx3, tx4, tx5].iter().map(Encode::encode).collect::>(); assert_eq!(expected_ready, ready); - let event = block_event_with_retracted(f1_header, f0, pool.api()); + let event = block_event_with_retracted(f1_header, f0, api); block_on(pool.maintain(event)); assert_eq!(pool.status().ready, 3); @@ -876,9 +925,10 @@ fn resubmit_from_retracted_fork() { #[test] fn ready_set_should_not_resolve_before_block_update() { - let (pool, _api, _guard) = maintained_pool(); + let (pool, api, _guard) = maintained_pool(); let xt1 = uxt(Alice, 209); - block_on(pool.submit_one(&BlockId::number(0), SOURCE, xt1.clone())).expect("1. Imported"); + block_on(pool.submit_one(api.expect_hash_from_number(0), SOURCE, xt1.clone())) + .expect("1. Imported"); assert!(pool.ready_at(1).now_or_never().is_none()); } @@ -890,7 +940,8 @@ fn ready_set_should_resolve_after_block_update() { let xt1 = uxt(Alice, 209); - block_on(pool.submit_one(&BlockId::number(1), SOURCE, xt1.clone())).expect("1. Imported"); + block_on(pool.submit_one(api.expect_hash_from_number(1), SOURCE, xt1.clone())) + .expect("1. Imported"); block_on(pool.maintain(block_event(header))); assert!(pool.ready_at(1).now_or_never().is_some()); @@ -903,7 +954,8 @@ fn ready_set_should_eventually_resolve_when_block_update_arrives() { let xt1 = uxt(Alice, 209); - block_on(pool.submit_one(&BlockId::number(1), SOURCE, xt1.clone())).expect("1. Imported"); + block_on(pool.submit_one(api.expect_hash_from_number(1), SOURCE, xt1.clone())) + .expect("1. Imported"); let noop_waker = futures::task::noop_waker(); let mut context = futures::task::Context::from_waker(&noop_waker); @@ -948,7 +1000,12 @@ fn import_notification_to_pool_maintain_works() { // Prepare the extrisic, push it to the pool and check that it was added. let xt = uxt(Alice, 0); - block_on(pool.submit_one(&BlockId::number(0), SOURCE, xt.clone())).expect("1. Imported"); + block_on(pool.submit_one( + pool.api().block_id_to_hash(&BlockId::Number(0)).unwrap().unwrap(), + SOURCE, + xt.clone(), + )) + .expect("1. Imported"); assert_eq!(pool.status().ready, 1); let mut import_stream = block_on_stream(client.import_notification_stream()); @@ -973,7 +1030,8 @@ fn pruning_a_transaction_should_remove_it_from_best_transaction() { let xt1 = ExtrinsicBuilder::new_include_data(Vec::new()).build(); - block_on(pool.submit_one(&BlockId::number(0), SOURCE, xt1.clone())).expect("1. Imported"); + block_on(pool.submit_one(api.expect_hash_from_number(0), SOURCE, xt1.clone())) + .expect("1. Imported"); assert_eq!(pool.status().ready, 1); let header = api.push_block(1, vec![xt1.clone()], true); @@ -997,8 +1055,12 @@ fn stale_transactions_are_pruned() { let (pool, api, _guard) = maintained_pool(); xts.into_iter().for_each(|xt| { - block_on(pool.submit_one(&BlockId::number(0), SOURCE, xt.into_unchecked_extrinsic())) - .expect("1. Imported"); + block_on(pool.submit_one( + api.expect_hash_from_number(0), + SOURCE, + xt.into_unchecked_extrinsic(), + )) + .expect("1. Imported"); }); assert_eq!(pool.status().ready, 0); assert_eq!(pool.status().future, 3); @@ -1038,8 +1100,9 @@ fn finalized_only_handled_correctly() { let (pool, api, _guard) = maintained_pool(); - let watcher = block_on(pool.submit_and_watch(&BlockId::number(0), SOURCE, xt.clone())) - .expect("1. Imported"); + let watcher = + block_on(pool.submit_and_watch(api.expect_hash_from_number(0), SOURCE, xt.clone())) + .expect("1. Imported"); assert_eq!(pool.status().ready, 1); let header = api.push_block(1, vec![xt], true); @@ -1066,8 +1129,9 @@ fn best_block_after_finalized_handled_correctly() { let (pool, api, _guard) = maintained_pool(); - let watcher = block_on(pool.submit_and_watch(&BlockId::number(0), SOURCE, xt.clone())) - .expect("1. Imported"); + let watcher = + block_on(pool.submit_and_watch(api.expect_hash_from_number(0), SOURCE, xt.clone())) + .expect("1. Imported"); assert_eq!(pool.status().ready, 1); let header = api.push_block(1, vec![xt], true); @@ -1096,11 +1160,12 @@ fn switching_fork_with_finalized_works() { let a_header = api.push_block(1, vec![], true); let pool = create_basic_pool(api); + let api = pool.api(); let from_alice = uxt(Alice, 1); let from_bob = uxt(Bob, 2); - pool.api().increment_nonce(Alice.into()); - pool.api().increment_nonce(Bob.into()); + api.increment_nonce(Alice.into()); + api.increment_nonce(Bob.into()); let from_alice_watcher; let from_bob_watcher; @@ -1109,12 +1174,13 @@ fn switching_fork_with_finalized_works() { // block B1 { - from_alice_watcher = - block_on(pool.submit_and_watch(&BlockId::number(1), SOURCE, from_alice.clone())) - .expect("1. Imported"); - let header = - pool.api() - .push_block_with_parent(a_header.hash(), vec![from_alice.clone()], true); + from_alice_watcher = block_on(pool.submit_and_watch( + api.expect_hash_from_number(1), + SOURCE, + from_alice.clone(), + )) + .expect("1. Imported"); + let header = api.push_block_with_parent(a_header.hash(), vec![from_alice.clone()], true); assert_eq!(pool.status().ready, 1); log::trace!(target: LOG_TARGET, ">> B1: {:?} {:?}", header.hash(), header); b1_header = header; @@ -1122,10 +1188,13 @@ fn switching_fork_with_finalized_works() { // block B2 { - from_bob_watcher = - block_on(pool.submit_and_watch(&BlockId::number(1), SOURCE, from_bob.clone())) - .expect("1. Imported"); - let header = pool.api().push_block_with_parent( + from_bob_watcher = block_on(pool.submit_and_watch( + api.expect_hash_from_number(1), + SOURCE, + from_bob.clone(), + )) + .expect("1. Imported"); + let header = api.push_block_with_parent( a_header.hash(), vec![from_alice.clone(), from_bob.clone()], true, @@ -1174,11 +1243,12 @@ fn switching_fork_multiple_times_works() { let a_header = api.push_block(1, vec![], true); let pool = create_basic_pool(api); + let api = pool.api(); let from_alice = uxt(Alice, 1); let from_bob = uxt(Bob, 2); - pool.api().increment_nonce(Alice.into()); - pool.api().increment_nonce(Bob.into()); + api.increment_nonce(Alice.into()); + api.increment_nonce(Bob.into()); let from_alice_watcher; let from_bob_watcher; @@ -1187,12 +1257,13 @@ fn switching_fork_multiple_times_works() { // block B1 { - from_alice_watcher = - block_on(pool.submit_and_watch(&BlockId::number(1), SOURCE, from_alice.clone())) - .expect("1. Imported"); - let header = - pool.api() - .push_block_with_parent(a_header.hash(), vec![from_alice.clone()], true); + from_alice_watcher = block_on(pool.submit_and_watch( + api.expect_hash_from_number(1), + SOURCE, + from_alice.clone(), + )) + .expect("1. Imported"); + let header = api.push_block_with_parent(a_header.hash(), vec![from_alice.clone()], true); assert_eq!(pool.status().ready, 1); log::trace!(target: LOG_TARGET, ">> B1: {:?} {:?}", header.hash(), header); b1_header = header; @@ -1200,10 +1271,13 @@ fn switching_fork_multiple_times_works() { // block B2 { - from_bob_watcher = - block_on(pool.submit_and_watch(&BlockId::number(1), SOURCE, from_bob.clone())) - .expect("1. Imported"); - let header = pool.api().push_block_with_parent( + from_bob_watcher = block_on(pool.submit_and_watch( + api.expect_hash_from_number(1), + SOURCE, + from_bob.clone(), + )) + .expect("1. Imported"); + let header = api.push_block_with_parent( a_header.hash(), vec![from_alice.clone(), from_bob.clone()], true, @@ -1223,14 +1297,14 @@ fn switching_fork_multiple_times_works() { { // phase-1 - let event = block_event_with_retracted(b2_header.clone(), b1_header.hash(), pool.api()); + let event = block_event_with_retracted(b2_header.clone(), b1_header.hash(), api); block_on(pool.maintain(event)); assert_eq!(pool.status().ready, 0); } { // phase-2 - let event = block_event_with_retracted(b1_header.clone(), b2_header.hash(), pool.api()); + let event = block_event_with_retracted(b1_header.clone(), b2_header.hash(), api); block_on(pool.maintain(event)); assert_eq!(pool.status().ready, 1); } @@ -1282,13 +1356,14 @@ fn two_blocks_delayed_finalization_works() { let a_header = api.push_block(1, vec![], true); let pool = create_basic_pool(api); + let api = pool.api(); let from_alice = uxt(Alice, 1); let from_bob = uxt(Bob, 2); let from_charlie = uxt(Charlie, 3); - pool.api().increment_nonce(Alice.into()); - pool.api().increment_nonce(Bob.into()); - pool.api().increment_nonce(Charlie.into()); + api.increment_nonce(Alice.into()); + api.increment_nonce(Bob.into()); + api.increment_nonce(Charlie.into()); let from_alice_watcher; let from_bob_watcher; @@ -1299,12 +1374,13 @@ fn two_blocks_delayed_finalization_works() { // block B1 { - from_alice_watcher = - block_on(pool.submit_and_watch(&BlockId::number(1), SOURCE, from_alice.clone())) - .expect("1. Imported"); - let header = - pool.api() - .push_block_with_parent(a_header.hash(), vec![from_alice.clone()], true); + from_alice_watcher = block_on(pool.submit_and_watch( + api.expect_hash_from_number(1), + SOURCE, + from_alice.clone(), + )) + .expect("1. Imported"); + let header = api.push_block_with_parent(a_header.hash(), vec![from_alice.clone()], true); assert_eq!(pool.status().ready, 1); log::trace!(target: LOG_TARGET, ">> B1: {:?} {:?}", header.hash(), header); @@ -1313,12 +1389,13 @@ fn two_blocks_delayed_finalization_works() { // block C1 { - from_bob_watcher = - block_on(pool.submit_and_watch(&BlockId::number(1), SOURCE, from_bob.clone())) - .expect("1. Imported"); - let header = - pool.api() - .push_block_with_parent(b1_header.hash(), vec![from_bob.clone()], true); + from_bob_watcher = block_on(pool.submit_and_watch( + api.expect_hash_from_number(1), + SOURCE, + from_bob.clone(), + )) + .expect("1. Imported"); + let header = api.push_block_with_parent(b1_header.hash(), vec![from_bob.clone()], true); assert_eq!(pool.status().ready, 2); log::trace!(target: LOG_TARGET, ">> C1: {:?} {:?}", header.hash(), header); @@ -1327,12 +1404,13 @@ fn two_blocks_delayed_finalization_works() { // block D1 { - from_charlie_watcher = - block_on(pool.submit_and_watch(&BlockId::number(1), SOURCE, from_charlie.clone())) - .expect("1. Imported"); - let header = - pool.api() - .push_block_with_parent(c1_header.hash(), vec![from_charlie.clone()], true); + from_charlie_watcher = block_on(pool.submit_and_watch( + api.expect_hash_from_number(1), + SOURCE, + from_charlie.clone(), + )) + .expect("1. Imported"); + let header = api.push_block_with_parent(c1_header.hash(), vec![from_charlie.clone()], true); assert_eq!(pool.status().ready, 3); log::trace!(target: LOG_TARGET, ">> D1: {:?} {:?}", header.hash(), header); @@ -1398,11 +1476,12 @@ fn delayed_finalization_does_not_retract() { let a_header = api.push_block(1, vec![], true); let pool = create_basic_pool(api); + let api = pool.api(); let from_alice = uxt(Alice, 1); let from_bob = uxt(Bob, 2); - pool.api().increment_nonce(Alice.into()); - pool.api().increment_nonce(Bob.into()); + api.increment_nonce(Alice.into()); + api.increment_nonce(Bob.into()); let from_alice_watcher; let from_bob_watcher; @@ -1411,12 +1490,13 @@ fn delayed_finalization_does_not_retract() { // block B1 { - from_alice_watcher = - block_on(pool.submit_and_watch(&BlockId::number(1), SOURCE, from_alice.clone())) - .expect("1. Imported"); - let header = - pool.api() - .push_block_with_parent(a_header.hash(), vec![from_alice.clone()], true); + from_alice_watcher = block_on(pool.submit_and_watch( + api.expect_hash_from_number(1), + SOURCE, + from_alice.clone(), + )) + .expect("1. Imported"); + let header = api.push_block_with_parent(a_header.hash(), vec![from_alice.clone()], true); assert_eq!(pool.status().ready, 1); log::trace!(target: LOG_TARGET, ">> B1: {:?} {:?}", header.hash(), header); @@ -1425,12 +1505,13 @@ fn delayed_finalization_does_not_retract() { // block C1 { - from_bob_watcher = - block_on(pool.submit_and_watch(&BlockId::number(1), SOURCE, from_bob.clone())) - .expect("1. Imported"); - let header = - pool.api() - .push_block_with_parent(b1_header.hash(), vec![from_bob.clone()], true); + from_bob_watcher = block_on(pool.submit_and_watch( + api.expect_hash_from_number(1), + SOURCE, + from_bob.clone(), + )) + .expect("1. Imported"); + let header = api.push_block_with_parent(b1_header.hash(), vec![from_bob.clone()], true); assert_eq!(pool.status().ready, 2); log::trace!(target: LOG_TARGET, ">> C1: {:?} {:?}", header.hash(), header); @@ -1493,11 +1574,12 @@ fn best_block_after_finalization_does_not_retract() { let a_header = api.push_block(1, vec![], true); let pool = create_basic_pool(api); + let api = pool.api(); let from_alice = uxt(Alice, 1); let from_bob = uxt(Bob, 2); - pool.api().increment_nonce(Alice.into()); - pool.api().increment_nonce(Bob.into()); + api.increment_nonce(Alice.into()); + api.increment_nonce(Bob.into()); let from_alice_watcher; let from_bob_watcher; @@ -1506,12 +1588,13 @@ fn best_block_after_finalization_does_not_retract() { // block B1 { - from_alice_watcher = - block_on(pool.submit_and_watch(&BlockId::number(1), SOURCE, from_alice.clone())) - .expect("1. Imported"); - let header = - pool.api() - .push_block_with_parent(a_header.hash(), vec![from_alice.clone()], true); + from_alice_watcher = block_on(pool.submit_and_watch( + api.expect_hash_from_number(1), + SOURCE, + from_alice.clone(), + )) + .expect("1. Imported"); + let header = api.push_block_with_parent(a_header.hash(), vec![from_alice.clone()], true); assert_eq!(pool.status().ready, 1); log::trace!(target: LOG_TARGET, ">> B1: {:?} {:?}", header.hash(), header); @@ -1520,12 +1603,13 @@ fn best_block_after_finalization_does_not_retract() { // block C1 { - from_bob_watcher = - block_on(pool.submit_and_watch(&BlockId::number(1), SOURCE, from_bob.clone())) - .expect("1. Imported"); - let header = - pool.api() - .push_block_with_parent(b1_header.hash(), vec![from_bob.clone()], true); + from_bob_watcher = block_on(pool.submit_and_watch( + api.expect_hash_from_number(1), + SOURCE, + from_bob.clone(), + )) + .expect("1. Imported"); + let header = api.push_block_with_parent(b1_header.hash(), vec![from_bob.clone()], true); assert_eq!(pool.status().ready, 2); log::trace!(target: LOG_TARGET, ">> C1: {:?} {:?}", header.hash(), header); diff --git a/substrate/test-utils/runtime/transaction-pool/src/lib.rs b/substrate/test-utils/runtime/transaction-pool/src/lib.rs index 7b5292004402..8c8345b06bd3 100644 --- a/substrate/test-utils/runtime/transaction-pool/src/lib.rs +++ b/substrate/test-utils/runtime/transaction-pool/src/lib.rs @@ -22,6 +22,7 @@ use codec::Encode; use futures::future::ready; use parking_lot::RwLock; +use sc_transaction_pool::ChainApi; use sp_blockchain::{CachedHeaderMetadata, TreeRoute}; use sp_runtime::{ generic::{self, BlockId}, @@ -237,9 +238,14 @@ impl TestApi { ) -> Result, Error> { sp_blockchain::tree_route(self, from, to) } + + /// Helper function for mapping block number to hash. Use if mapping shall not fail. + pub fn expect_hash_from_number(&self, n: BlockNumber) -> Hash { + self.block_id_to_hash(&BlockId::Number(n)).unwrap().unwrap() + } } -impl sc_transaction_pool::ChainApi for TestApi { +impl ChainApi for TestApi { type Block = Block; type Error = Error; type ValidationFuture = futures::future::Ready>; @@ -247,13 +253,13 @@ impl sc_transaction_pool::ChainApi for TestApi { fn validate_transaction( &self, - at: &BlockId, + at: ::Hash, _source: TransactionSource, uxt: ::Extrinsic, ) -> Self::ValidationFuture { self.validation_requests.write().push(uxt.clone()); - match self.block_id_to_number(at) { + match self.block_id_to_number(&BlockId::Hash(at)) { Ok(Some(number)) => { let found_best = self .chain diff --git a/substrate/utils/frame/rpc/system/src/lib.rs b/substrate/utils/frame/rpc/system/src/lib.rs index 1eff71e3390a..f467a8798904 100644 --- a/substrate/utils/frame/rpc/system/src/lib.rs +++ b/substrate/utils/frame/rpc/system/src/lib.rs @@ -219,7 +219,6 @@ mod tests { use jsonrpsee::{core::Error as JsonRpseeError, types::error::CallError}; use sc_transaction_pool::BasicPool; use sp_runtime::{ - generic::BlockId, transaction_validity::{InvalidTransaction, TransactionValidityError}, ApplyExtrinsicResult, }; @@ -245,11 +244,12 @@ mod tests { }; t.into_unchecked_extrinsic() }; + let hash_of_block0 = client.info().genesis_hash; // Populate the pool let ext0 = new_transaction(0); - block_on(pool.submit_one(&BlockId::number(0), source, ext0)).unwrap(); + block_on(pool.submit_one(hash_of_block0, source, ext0)).unwrap(); let ext1 = new_transaction(1); - block_on(pool.submit_one(&BlockId::number(0), source, ext1)).unwrap(); + block_on(pool.submit_one(hash_of_block0, source, ext1)).unwrap(); let accounts = System::new(client, pool, DenyUnsafe::Yes); From 5a2833cceb87cf227fc49b8b7441adb5634cb1f1 Mon Sep 17 00:00:00 2001 From: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> Date: Wed, 27 Sep 2023 11:59:19 +0200 Subject: [PATCH 58/69] genesis-builder: implemented for all runtimes (#1492) This PR implements [`GenesisBuilder` API](https://github.com/paritytech/polkadot-sdk/blob/a414ea7515c9cdc81f1d12410e646afc148250e8/substrate/primitives/genesis-builder/src/lib.rs#L38) for all the runtimes in polkadot repo. Step towards: paritytech/polkadot-sdk#25 --------- Co-authored-by: ordian --- Cargo.lock | 21 +++++++++++++++++++ cumulus/parachain-template/runtime/Cargo.toml | 2 ++ cumulus/parachain-template/runtime/src/lib.rs | 11 ++++++++++ .../assets/asset-hub-kusama/Cargo.toml | 2 ++ .../assets/asset-hub-kusama/src/lib.rs | 11 ++++++++++ .../assets/asset-hub-polkadot/Cargo.toml | 2 ++ .../assets/asset-hub-polkadot/src/lib.rs | 11 ++++++++++ .../assets/asset-hub-westend/Cargo.toml | 2 ++ .../assets/asset-hub-westend/src/lib.rs | 11 ++++++++++ .../bridge-hubs/bridge-hub-kusama/Cargo.toml | 2 ++ .../bridge-hubs/bridge-hub-kusama/src/lib.rs | 11 ++++++++++ .../bridge-hub-polkadot/Cargo.toml | 2 ++ .../bridge-hub-polkadot/src/lib.rs | 11 ++++++++++ .../bridge-hubs/bridge-hub-rococo/Cargo.toml | 2 ++ .../bridge-hubs/bridge-hub-rococo/src/lib.rs | 11 ++++++++++ .../collectives-polkadot/Cargo.toml | 2 ++ .../collectives-polkadot/src/lib.rs | 11 ++++++++++ .../contracts/contracts-rococo/Cargo.toml | 2 ++ .../contracts/contracts-rococo/src/lib.rs | 11 ++++++++++ .../glutton/glutton-kusama/Cargo.toml | 2 ++ .../glutton/glutton-kusama/src/lib.rs | 11 ++++++++++ .../runtimes/starters/seedling/Cargo.toml | 2 ++ .../runtimes/starters/seedling/src/lib.rs | 11 ++++++++++ .../runtimes/starters/shell/Cargo.toml | 2 ++ .../runtimes/starters/shell/src/lib.rs | 11 ++++++++++ .../runtimes/testing/penpal/Cargo.toml | 2 ++ .../runtimes/testing/penpal/src/lib.rs | 11 ++++++++++ .../testing/rococo-parachain/Cargo.toml | 2 ++ .../testing/rococo-parachain/src/lib.rs | 11 ++++++++++ polkadot/runtime/kusama/Cargo.toml | 2 ++ polkadot/runtime/kusama/src/lib.rs | 15 ++++++++++++- polkadot/runtime/polkadot/Cargo.toml | 2 ++ polkadot/runtime/polkadot/src/lib.rs | 15 ++++++++++++- polkadot/runtime/rococo/Cargo.toml | 2 ++ polkadot/runtime/rococo/src/lib.rs | 14 ++++++++++++- polkadot/runtime/test-runtime/Cargo.toml | 2 ++ polkadot/runtime/test-runtime/src/lib.rs | 14 ++++++++++++- polkadot/runtime/westend/Cargo.toml | 2 ++ polkadot/runtime/westend/src/lib.rs | 14 ++++++++++++- .../bin/node-template/runtime/Cargo.toml | 2 ++ .../bin/node-template/runtime/src/lib.rs | 11 ++++++++++ substrate/bin/node/runtime/Cargo.toml | 2 ++ substrate/bin/node/runtime/src/lib.rs | 11 ++++++++++ substrate/test-utils/runtime/Cargo.toml | 3 --- substrate/test-utils/runtime/build.rs | 15 ------------- substrate/test-utils/runtime/src/lib.rs | 5 +---- 46 files changed, 307 insertions(+), 27 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 683289adb45e..dcfd8cd66017 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -774,6 +774,7 @@ dependencies = [ "sp-block-builder", "sp-consensus-aura", "sp-core", + "sp-genesis-builder", "sp-inherents", "sp-offchain", "sp-runtime", @@ -866,6 +867,7 @@ dependencies = [ "sp-block-builder", "sp-consensus-aura", "sp-core", + "sp-genesis-builder", "sp-inherents", "sp-offchain", "sp-runtime", @@ -961,6 +963,7 @@ dependencies = [ "sp-block-builder", "sp-consensus-aura", "sp-core", + "sp-genesis-builder", "sp-inherents", "sp-io", "sp-offchain", @@ -1859,6 +1862,7 @@ dependencies = [ "sp-block-builder", "sp-consensus-aura", "sp-core", + "sp-genesis-builder", "sp-inherents", "sp-io", "sp-offchain", @@ -1922,6 +1926,7 @@ dependencies = [ "sp-block-builder", "sp-consensus-aura", "sp-core", + "sp-genesis-builder", "sp-inherents", "sp-io", "sp-offchain", @@ -2021,6 +2026,7 @@ dependencies = [ "sp-block-builder", "sp-consensus-aura", "sp-core", + "sp-genesis-builder", "sp-inherents", "sp-io", "sp-keyring", @@ -2636,6 +2642,7 @@ dependencies = [ "sp-block-builder", "sp-consensus-aura", "sp-core", + "sp-genesis-builder", "sp-inherents", "sp-io", "sp-offchain", @@ -2846,6 +2853,7 @@ dependencies = [ "sp-block-builder", "sp-consensus-aura", "sp-core", + "sp-genesis-builder", "sp-inherents", "sp-offchain", "sp-runtime", @@ -5815,6 +5823,7 @@ dependencies = [ "sp-block-builder", "sp-consensus-aura", "sp-core", + "sp-genesis-builder", "sp-inherents", "sp-offchain", "sp-runtime", @@ -6811,6 +6820,7 @@ dependencies = [ "sp-consensus-babe", "sp-consensus-grandpa", "sp-core", + "sp-genesis-builder", "sp-inherents", "sp-io", "sp-offchain", @@ -8448,6 +8458,7 @@ dependencies = [ "sp-consensus-aura", "sp-consensus-grandpa", "sp-core", + "sp-genesis-builder", "sp-inherents", "sp-offchain", "sp-runtime", @@ -10919,6 +10930,7 @@ dependencies = [ "sp-block-builder", "sp-consensus-aura", "sp-core", + "sp-genesis-builder", "sp-inherents", "sp-offchain", "sp-runtime", @@ -11251,6 +11263,7 @@ dependencies = [ "sp-block-builder", "sp-consensus-aura", "sp-core", + "sp-genesis-builder", "sp-inherents", "sp-offchain", "sp-runtime", @@ -12633,6 +12646,7 @@ dependencies = [ "sp-consensus-babe", "sp-consensus-beefy", "sp-core", + "sp-genesis-builder", "sp-inherents", "sp-io", "sp-keyring", @@ -13061,6 +13075,7 @@ dependencies = [ "sp-consensus-babe", "sp-consensus-beefy", "sp-core", + "sp-genesis-builder", "sp-inherents", "sp-io", "sp-keyring", @@ -14075,6 +14090,7 @@ dependencies = [ "sp-block-builder", "sp-consensus-aura", "sp-core", + "sp-genesis-builder", "sp-inherents", "sp-offchain", "sp-runtime", @@ -14160,6 +14176,7 @@ dependencies = [ "sp-consensus-babe", "sp-consensus-beefy", "sp-core", + "sp-genesis-builder", "sp-inherents", "sp-io", "sp-keyring", @@ -16103,6 +16120,7 @@ dependencies = [ "sp-block-builder", "sp-consensus-aura", "sp-core", + "sp-genesis-builder", "sp-inherents", "sp-offchain", "sp-runtime", @@ -16350,6 +16368,7 @@ dependencies = [ "sp-block-builder", "sp-consensus-aura", "sp-core", + "sp-genesis-builder", "sp-inherents", "sp-offchain", "sp-runtime", @@ -17764,6 +17783,7 @@ dependencies = [ "sp-consensus-babe", "sp-consensus-beefy", "sp-core", + "sp-genesis-builder", "sp-inherents", "sp-io", "sp-keyring", @@ -20293,6 +20313,7 @@ dependencies = [ "sp-consensus-babe", "sp-consensus-beefy", "sp-core", + "sp-genesis-builder", "sp-inherents", "sp-io", "sp-keyring", diff --git a/cumulus/parachain-template/runtime/Cargo.toml b/cumulus/parachain-template/runtime/Cargo.toml index 4e51f16dca16..68db2f041ddb 100644 --- a/cumulus/parachain-template/runtime/Cargo.toml +++ b/cumulus/parachain-template/runtime/Cargo.toml @@ -44,6 +44,7 @@ sp-api = { path = "../../../substrate/primitives/api", default-features = false} sp-block-builder = { path = "../../../substrate/primitives/block-builder", default-features = false} sp-consensus-aura = { path = "../../../substrate/primitives/consensus/aura", default-features = false} sp-core = { path = "../../../substrate/primitives/core", default-features = false} +sp-genesis-builder = { path = "../../../substrate/primitives/genesis-builder", default-features = false } sp-inherents = { path = "../../../substrate/primitives/inherents", default-features = false} sp-offchain = { path = "../../../substrate/primitives/offchain", default-features = false} sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false} @@ -111,6 +112,7 @@ std = [ "sp-block-builder/std", "sp-consensus-aura/std", "sp-core/std", + "sp-genesis-builder/std", "sp-inherents/std", "sp-offchain/std", "sp-runtime/std", diff --git a/cumulus/parachain-template/runtime/src/lib.rs b/cumulus/parachain-template/runtime/src/lib.rs index 038597096f6a..b9bf97d7786f 100644 --- a/cumulus/parachain-template/runtime/src/lib.rs +++ b/cumulus/parachain-template/runtime/src/lib.rs @@ -28,6 +28,7 @@ use sp_version::RuntimeVersion; use frame_support::{ construct_runtime, dispatch::DispatchClass, + genesis_builder_helper::{build_config, create_default_config}, parameter_types, traits::{ConstBool, ConstU32, ConstU64, ConstU8, EitherOfDiverse, Everything}, weights::{ @@ -742,6 +743,16 @@ impl_runtime_apis! { Ok(batches) } } + + impl sp_genesis_builder::GenesisBuilder for Runtime { + fn create_default_config() -> Vec { + create_default_config::() + } + + fn build_config(config: Vec) -> sp_genesis_builder::Result { + build_config::(config) + } + } } cumulus_pallet_parachain_system::register_validate_block! { diff --git a/cumulus/parachains/runtimes/assets/asset-hub-kusama/Cargo.toml b/cumulus/parachains/runtimes/assets/asset-hub-kusama/Cargo.toml index 1d0c7bc98563..4853195d329a 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-kusama/Cargo.toml +++ b/cumulus/parachains/runtimes/assets/asset-hub-kusama/Cargo.toml @@ -42,6 +42,7 @@ sp-api = { path = "../../../../../substrate/primitives/api", default-features = sp-block-builder = { path = "../../../../../substrate/primitives/block-builder", default-features = false} sp-consensus-aura = { path = "../../../../../substrate/primitives/consensus/aura", default-features = false} sp-core = { path = "../../../../../substrate/primitives/core", default-features = false} +sp-genesis-builder = { path = "../../../../../substrate/primitives/genesis-builder", default-features = false } sp-inherents = { path = "../../../../../substrate/primitives/inherents", default-features = false} sp-offchain = { path = "../../../../../substrate/primitives/offchain", default-features = false} sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-features = false} @@ -209,6 +210,7 @@ std = [ "sp-block-builder/std", "sp-consensus-aura/std", "sp-core/std", + "sp-genesis-builder/std", "sp-inherents/std", "sp-offchain/std", "sp-runtime/std", diff --git a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/lib.rs index 828d1b4750a3..40ce122112d2 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/lib.rs @@ -52,6 +52,7 @@ use codec::{Decode, Encode, MaxEncodedLen}; use frame_support::{ construct_runtime, dispatch::DispatchClass, + genesis_builder_helper::{build_config, create_default_config}, ord_parameter_types, parameter_types, traits::{ AsEnsureOriginWithArg, ConstBool, ConstU128, ConstU32, ConstU64, ConstU8, EitherOfDiverse, @@ -1340,6 +1341,16 @@ impl_runtime_apis! { Ok(batches) } } + + impl sp_genesis_builder::GenesisBuilder for Runtime { + fn create_default_config() -> Vec { + create_default_config::() + } + + fn build_config(config: Vec) -> sp_genesis_builder::Result { + build_config::(config) + } + } } cumulus_pallet_parachain_system::register_validate_block! { diff --git a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/Cargo.toml b/cumulus/parachains/runtimes/assets/asset-hub-polkadot/Cargo.toml index 4a6ce4cbf593..f0eae31f53a9 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/Cargo.toml +++ b/cumulus/parachains/runtimes/assets/asset-hub-polkadot/Cargo.toml @@ -39,6 +39,7 @@ sp-api = { path = "../../../../../substrate/primitives/api", default-features = sp-block-builder = { path = "../../../../../substrate/primitives/block-builder", default-features = false} sp-consensus-aura = { path = "../../../../../substrate/primitives/consensus/aura", default-features = false} sp-core = { path = "../../../../../substrate/primitives/core", default-features = false} +sp-genesis-builder = { path = "../../../../../substrate/primitives/genesis-builder", default-features = false } sp-inherents = { path = "../../../../../substrate/primitives/inherents", default-features = false} sp-offchain = { path = "../../../../../substrate/primitives/offchain", default-features = false} sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-features = false} @@ -190,6 +191,7 @@ std = [ "sp-block-builder/std", "sp-consensus-aura/std", "sp-core/std", + "sp-genesis-builder/std", "sp-inherents/std", "sp-offchain/std", "sp-runtime/std", diff --git a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/lib.rs index 0051af21f9a3..d4f7d6ef3616 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/lib.rs @@ -84,6 +84,7 @@ use codec::{Decode, Encode, MaxEncodedLen}; use frame_support::{ construct_runtime, dispatch::DispatchClass, + genesis_builder_helper::{build_config, create_default_config}, parameter_types, traits::{ AsEnsureOriginWithArg, ConstBool, ConstU32, ConstU64, ConstU8, EitherOfDiverse, @@ -1218,6 +1219,16 @@ impl_runtime_apis! { Ok(batches) } } + + impl sp_genesis_builder::GenesisBuilder for Runtime { + fn create_default_config() -> Vec { + create_default_config::() + } + + fn build_config(config: Vec) -> sp_genesis_builder::Result { + build_config::(config) + } + } } cumulus_pallet_parachain_system::register_validate_block! { diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml b/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml index f436ae9537f4..f7f6fdf68e46 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml @@ -41,6 +41,7 @@ sp-api = { path = "../../../../../substrate/primitives/api", default-features = sp-block-builder = { path = "../../../../../substrate/primitives/block-builder", default-features = false} sp-consensus-aura = { path = "../../../../../substrate/primitives/consensus/aura", default-features = false} sp-core = { path = "../../../../../substrate/primitives/core", default-features = false} +sp-genesis-builder = { path = "../../../../../substrate/primitives/genesis-builder", default-features = false } sp-inherents = { path = "../../../../../substrate/primitives/inherents", default-features = false} sp-offchain = { path = "../../../../../substrate/primitives/offchain", default-features = false} sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-features = false} @@ -198,6 +199,7 @@ std = [ "sp-block-builder/std", "sp-consensus-aura/std", "sp-core/std", + "sp-genesis-builder/std", "sp-inherents/std", "sp-offchain/std", "sp-runtime/std", diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs index 4887fce1b0a4..759cd727f1dc 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs @@ -39,6 +39,7 @@ use cumulus_pallet_parachain_system::RelayNumberStrictlyIncreases; use frame_support::{ construct_runtime, dispatch::DispatchClass, + genesis_builder_helper::{build_config, create_default_config}, ord_parameter_types, parameter_types, traits::{ tokens::nonfungibles_v2::Inspect, AsEnsureOriginWithArg, ConstBool, ConstU128, ConstU32, @@ -1353,6 +1354,16 @@ impl_runtime_apis! { Ok(batches) } } + + impl sp_genesis_builder::GenesisBuilder for Runtime { + fn create_default_config() -> Vec { + create_default_config::() + } + + fn build_config(config: Vec) -> sp_genesis_builder::Result { + build_config::(config) + } + } } cumulus_pallet_parachain_system::register_validate_block! { diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/Cargo.toml b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/Cargo.toml index 67c3fa37df04..d54e66c42dc9 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/Cargo.toml +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/Cargo.toml @@ -37,6 +37,7 @@ sp-api = { path = "../../../../../substrate/primitives/api", default-features = sp-block-builder = { path = "../../../../../substrate/primitives/block-builder", default-features = false} sp-consensus-aura = { path = "../../../../../substrate/primitives/consensus/aura", default-features = false} sp-core = { path = "../../../../../substrate/primitives/core", default-features = false} +sp-genesis-builder = { path = "../../../../../substrate/primitives/genesis-builder", default-features = false } sp-inherents = { path = "../../../../../substrate/primitives/inherents", default-features = false} sp-io = { path = "../../../../../substrate/primitives/io", default-features = false} sp-offchain = { path = "../../../../../substrate/primitives/offchain", default-features = false} @@ -118,6 +119,7 @@ std = [ "sp-block-builder/std", "sp-consensus-aura/std", "sp-core/std", + "sp-genesis-builder/std", "sp-inherents/std", "sp-io/std", "sp-offchain/std", diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/lib.rs index 54b15e6b327b..791751e77368 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/lib.rs @@ -43,6 +43,7 @@ use sp_version::RuntimeVersion; use frame_support::{ construct_runtime, dispatch::DispatchClass, + genesis_builder_helper::{build_config, create_default_config}, parameter_types, traits::{ConstBool, ConstU32, ConstU64, ConstU8, EitherOfDiverse, Everything}, weights::{ConstantMultiplier, Weight}, @@ -779,6 +780,16 @@ impl_runtime_apis! { Ok(batches) } } + + impl sp_genesis_builder::GenesisBuilder for Runtime { + fn create_default_config() -> Vec { + create_default_config::() + } + + fn build_config(config: Vec) -> sp_genesis_builder::Result { + build_config::(config) + } + } } cumulus_pallet_parachain_system::register_validate_block! { diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/Cargo.toml b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/Cargo.toml index 77923ee74f89..d9dba5576814 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/Cargo.toml +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/Cargo.toml @@ -37,6 +37,7 @@ sp-api = { path = "../../../../../substrate/primitives/api", default-features = sp-block-builder = { path = "../../../../../substrate/primitives/block-builder", default-features = false} sp-consensus-aura = { path = "../../../../../substrate/primitives/consensus/aura", default-features = false} sp-core = { path = "../../../../../substrate/primitives/core", default-features = false} +sp-genesis-builder = { path = "../../../../../substrate/primitives/genesis-builder", default-features = false } sp-inherents = { path = "../../../../../substrate/primitives/inherents", default-features = false} sp-io = { path = "../../../../../substrate/primitives/io", default-features = false} sp-offchain = { path = "../../../../../substrate/primitives/offchain", default-features = false} @@ -118,6 +119,7 @@ std = [ "sp-block-builder/std", "sp-consensus-aura/std", "sp-core/std", + "sp-genesis-builder/std", "sp-inherents/std", "sp-io/std", "sp-offchain/std", diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/lib.rs index dbfdc249a3cd..928b9d091ec5 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/lib.rs @@ -43,6 +43,7 @@ use sp_version::RuntimeVersion; use frame_support::{ construct_runtime, dispatch::DispatchClass, + genesis_builder_helper::{build_config, create_default_config}, parameter_types, traits::{ConstBool, ConstU32, ConstU64, ConstU8, EitherOfDiverse, Everything}, weights::{ConstantMultiplier, Weight}, @@ -779,6 +780,16 @@ impl_runtime_apis! { Ok(batches) } } + + impl sp_genesis_builder::GenesisBuilder for Runtime { + fn create_default_config() -> Vec { + create_default_config::() + } + + fn build_config(config: Vec) -> sp_genesis_builder::Result { + build_config::(config) + } + } } cumulus_pallet_parachain_system::register_validate_block! { diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml index 70c62d1a34fd..fec00fe0794c 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml @@ -37,6 +37,7 @@ sp-api = { path = "../../../../../substrate/primitives/api", default-features = sp-block-builder = { path = "../../../../../substrate/primitives/block-builder", default-features = false} sp-consensus-aura = { path = "../../../../../substrate/primitives/consensus/aura", default-features = false} sp-core = { path = "../../../../../substrate/primitives/core", default-features = false} +sp-genesis-builder = { path = "../../../../../substrate/primitives/genesis-builder", default-features = false } sp-inherents = { path = "../../../../../substrate/primitives/inherents", default-features = false} sp-io = { path = "../../../../../substrate/primitives/io", default-features = false} sp-offchain = { path = "../../../../../substrate/primitives/offchain", default-features = false} @@ -153,6 +154,7 @@ std = [ "sp-block-builder/std", "sp-consensus-aura/std", "sp-core/std", + "sp-genesis-builder/std", "sp-inherents/std", "sp-io/std", "sp-offchain/std", diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs index dbdc2133ec88..4c850f92b8de 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs @@ -45,6 +45,7 @@ use sp_version::RuntimeVersion; use frame_support::{ construct_runtime, dispatch::DispatchClass, + genesis_builder_helper::{build_config, create_default_config}, parameter_types, traits::{ConstBool, ConstU32, ConstU64, ConstU8, Everything}, weights::{ConstantMultiplier, Weight}, @@ -1233,6 +1234,16 @@ impl_runtime_apis! { Ok(batches) } } + + impl sp_genesis_builder::GenesisBuilder for Runtime { + fn create_default_config() -> Vec { + create_default_config::() + } + + fn build_config(config: Vec) -> sp_genesis_builder::Result { + build_config::(config) + } + } } cumulus_pallet_parachain_system::register_validate_block! { diff --git a/cumulus/parachains/runtimes/collectives/collectives-polkadot/Cargo.toml b/cumulus/parachains/runtimes/collectives/collectives-polkadot/Cargo.toml index 6a5806d3b901..8cb5519a24db 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-polkadot/Cargo.toml +++ b/cumulus/parachains/runtimes/collectives/collectives-polkadot/Cargo.toml @@ -43,6 +43,7 @@ sp-arithmetic = { path = "../../../../../substrate/primitives/arithmetic", defau sp-block-builder = { path = "../../../../../substrate/primitives/block-builder", default-features = false} sp-consensus-aura = { path = "../../../../../substrate/primitives/consensus/aura", default-features = false} sp-core = { path = "../../../../../substrate/primitives/core", default-features = false} +sp-genesis-builder = { path = "../../../../../substrate/primitives/genesis-builder", default-features = false } sp-inherents = { path = "../../../../../substrate/primitives/inherents", default-features = false} sp-offchain = { path = "../../../../../substrate/primitives/offchain", default-features = false} sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-features = false} @@ -201,6 +202,7 @@ std = [ "sp-block-builder/std", "sp-consensus-aura/std", "sp-core/std", + "sp-genesis-builder/std", "sp-inherents/std", "sp-io/std", "sp-offchain/std", diff --git a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/lib.rs b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/lib.rs index d833c75d470c..ff16f93d8f54 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/lib.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/lib.rs @@ -68,6 +68,7 @@ use codec::{Decode, Encode, MaxEncodedLen}; use frame_support::{ construct_runtime, dispatch::DispatchClass, + genesis_builder_helper::{build_config, create_default_config}, parameter_types, traits::{ fungible::HoldConsideration, ConstBool, ConstU16, ConstU32, ConstU64, ConstU8, @@ -939,6 +940,16 @@ impl_runtime_apis! { Ok(batches) } } + + impl sp_genesis_builder::GenesisBuilder for Runtime { + fn create_default_config() -> Vec { + create_default_config::() + } + + fn build_config(config: Vec) -> sp_genesis_builder::Result { + build_config::(config) + } + } } cumulus_pallet_parachain_system::register_validate_block! { diff --git a/cumulus/parachains/runtimes/contracts/contracts-rococo/Cargo.toml b/cumulus/parachains/runtimes/contracts/contracts-rococo/Cargo.toml index f6fccc043542..a85aa8dcb174 100644 --- a/cumulus/parachains/runtimes/contracts/contracts-rococo/Cargo.toml +++ b/cumulus/parachains/runtimes/contracts/contracts-rococo/Cargo.toml @@ -22,6 +22,7 @@ sp-api = { path = "../../../../../substrate/primitives/api", default-features = sp-block-builder = { path = "../../../../../substrate/primitives/block-builder", default-features = false} sp-consensus-aura = { path = "../../../../../substrate/primitives/consensus/aura", default-features = false} sp-core = { path = "../../../../../substrate/primitives/core", default-features = false} +sp-genesis-builder = { path = "../../../../../substrate/primitives/genesis-builder", default-features = false } sp-inherents = { path = "../../../../../substrate/primitives/inherents", default-features = false} sp-offchain = { path = "../../../../../substrate/primitives/offchain", default-features = false} sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-features = false} @@ -120,6 +121,7 @@ std = [ "sp-block-builder/std", "sp-consensus-aura/std", "sp-core/std", + "sp-genesis-builder/std", "sp-inherents/std", "sp-offchain/std", "sp-runtime/std", diff --git a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs index 399ada1be2c7..70392c5ecbcc 100644 --- a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs @@ -47,6 +47,7 @@ use sp_version::RuntimeVersion; use frame_support::{ construct_runtime, dispatch::DispatchClass, + genesis_builder_helper::{build_config, create_default_config}, parameter_types, traits::{ConstBool, ConstU128, ConstU16, ConstU32, ConstU64, ConstU8, Everything}, weights::{ConstantMultiplier, Weight}, @@ -694,6 +695,16 @@ impl_runtime_apis! { Ok(batches) } } + + impl sp_genesis_builder::GenesisBuilder for Runtime { + fn create_default_config() -> Vec { + create_default_config::() + } + + fn build_config(config: Vec) -> sp_genesis_builder::Result { + build_config::(config) + } + } } cumulus_pallet_parachain_system::register_validate_block! { diff --git a/cumulus/parachains/runtimes/glutton/glutton-kusama/Cargo.toml b/cumulus/parachains/runtimes/glutton/glutton-kusama/Cargo.toml index 0ffe59b927f9..63b658ca977a 100644 --- a/cumulus/parachains/runtimes/glutton/glutton-kusama/Cargo.toml +++ b/cumulus/parachains/runtimes/glutton/glutton-kusama/Cargo.toml @@ -24,6 +24,7 @@ sp-api = { path = "../../../../../substrate/primitives/api", default-features = sp-block-builder = { path = "../../../../../substrate/primitives/block-builder", default-features = false} sp-consensus-aura = { path = "../../../../../substrate/primitives/consensus/aura", default-features = false } sp-core = { path = "../../../../../substrate/primitives/core", default-features = false} +sp-genesis-builder = { path = "../../../../../substrate/primitives/genesis-builder", default-features = false } sp-inherents = { path = "../../../../../substrate/primitives/inherents", default-features = false} sp-offchain = { path = "../../../../../substrate/primitives/offchain", default-features = false} sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-features = false} @@ -91,6 +92,7 @@ std = [ "sp-block-builder/std", "sp-consensus-aura/std", "sp-core/std", + "sp-genesis-builder/std", "sp-inherents/std", "sp-offchain/std", "sp-runtime/std", diff --git a/cumulus/parachains/runtimes/glutton/glutton-kusama/src/lib.rs b/cumulus/parachains/runtimes/glutton/glutton-kusama/src/lib.rs index 41cb0fceebb5..d3369202aac4 100644 --- a/cumulus/parachains/runtimes/glutton/glutton-kusama/src/lib.rs +++ b/cumulus/parachains/runtimes/glutton/glutton-kusama/src/lib.rs @@ -64,6 +64,7 @@ use sp_version::RuntimeVersion; pub use frame_support::{ construct_runtime, dispatch::DispatchClass, + genesis_builder_helper::{build_config, create_default_config}, parameter_types, traits::{ ConstBool, ConstU32, ConstU64, ConstU8, EitherOfDiverse, Everything, IsInVec, Randomness, @@ -452,6 +453,16 @@ impl_runtime_apis! { Ok(batches) } } + + impl sp_genesis_builder::GenesisBuilder for Runtime { + fn create_default_config() -> Vec { + create_default_config::() + } + + fn build_config(config: Vec) -> sp_genesis_builder::Result { + build_config::(config) + } + } } cumulus_pallet_parachain_system::register_validate_block! { diff --git a/cumulus/parachains/runtimes/starters/seedling/Cargo.toml b/cumulus/parachains/runtimes/starters/seedling/Cargo.toml index 1b68b720d977..d9711b57b377 100644 --- a/cumulus/parachains/runtimes/starters/seedling/Cargo.toml +++ b/cumulus/parachains/runtimes/starters/seedling/Cargo.toml @@ -20,6 +20,7 @@ sp-api = { path = "../../../../../substrate/primitives/api", default-features = sp-block-builder = { path = "../../../../../substrate/primitives/block-builder", default-features = false} sp-consensus-aura = { path = "../../../../../substrate/primitives/consensus/aura", default-features = false } sp-core = { path = "../../../../../substrate/primitives/core", default-features = false} +sp-genesis-builder = { path = "../../../../../substrate/primitives/genesis-builder", default-features = false } sp-inherents = { path = "../../../../../substrate/primitives/inherents", default-features = false} sp-offchain = { path = "../../../../../substrate/primitives/offchain", default-features = false} sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-features = false} @@ -63,6 +64,7 @@ std = [ "sp-block-builder/std", "sp-consensus-aura/std", "sp-core/std", + "sp-genesis-builder/std", "sp-inherents/std", "sp-offchain/std", "sp-runtime/std", diff --git a/cumulus/parachains/runtimes/starters/seedling/src/lib.rs b/cumulus/parachains/runtimes/starters/seedling/src/lib.rs index 34e82737f82b..c2bcaf8a1266 100644 --- a/cumulus/parachains/runtimes/starters/seedling/src/lib.rs +++ b/cumulus/parachains/runtimes/starters/seedling/src/lib.rs @@ -46,6 +46,7 @@ use sp_version::RuntimeVersion; pub use frame_support::{ construct_runtime, dispatch::DispatchClass, + genesis_builder_helper::{build_config, create_default_config}, parameter_types, traits::{ConstBool, ConstU32, ConstU64, ConstU8, EitherOfDiverse, IsInVec, Randomness}, weights::{ @@ -366,6 +367,16 @@ impl_runtime_apis! { ParachainSystem::collect_collation_info(header) } } + + impl sp_genesis_builder::GenesisBuilder for Runtime { + fn create_default_config() -> Vec { + create_default_config::() + } + + fn build_config(config: Vec) -> sp_genesis_builder::Result { + build_config::(config) + } + } } cumulus_pallet_parachain_system::register_validate_block! { diff --git a/cumulus/parachains/runtimes/starters/shell/Cargo.toml b/cumulus/parachains/runtimes/starters/shell/Cargo.toml index 46cef8d4ae08..675abc07b773 100644 --- a/cumulus/parachains/runtimes/starters/shell/Cargo.toml +++ b/cumulus/parachains/runtimes/starters/shell/Cargo.toml @@ -19,6 +19,7 @@ sp-api = { path = "../../../../../substrate/primitives/api", default-features = sp-block-builder = { path = "../../../../../substrate/primitives/block-builder", default-features = false} sp-consensus-aura = { path = "../../../../../substrate/primitives/consensus/aura", default-features = false } sp-core = { path = "../../../../../substrate/primitives/core", default-features = false} +sp-genesis-builder = { path = "../../../../../substrate/primitives/genesis-builder", default-features = false } sp-inherents = { path = "../../../../../substrate/primitives/inherents", default-features = false} sp-offchain = { path = "../../../../../substrate/primitives/offchain", default-features = false} sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-features = false} @@ -64,6 +65,7 @@ std = [ "sp-block-builder/std", "sp-consensus-aura/std", "sp-core/std", + "sp-genesis-builder/std", "sp-inherents/std", "sp-offchain/std", "sp-runtime/std", diff --git a/cumulus/parachains/runtimes/starters/shell/src/lib.rs b/cumulus/parachains/runtimes/starters/shell/src/lib.rs index 477933b5c8d2..4aad553e6a3b 100644 --- a/cumulus/parachains/runtimes/starters/shell/src/lib.rs +++ b/cumulus/parachains/runtimes/starters/shell/src/lib.rs @@ -53,6 +53,7 @@ use sp_version::RuntimeVersion; pub use frame_support::{ construct_runtime, dispatch::DispatchClass, + genesis_builder_helper::{build_config, create_default_config}, parameter_types, traits::{ConstBool, ConstU32, ConstU64, ConstU8, EitherOfDiverse, IsInVec, Randomness}, weights::{ @@ -398,6 +399,16 @@ impl_runtime_apis! { ParachainSystem::collect_collation_info(header) } } + + impl sp_genesis_builder::GenesisBuilder for Runtime { + fn create_default_config() -> Vec { + create_default_config::() + } + + fn build_config(config: Vec) -> sp_genesis_builder::Result { + build_config::(config) + } + } } cumulus_pallet_parachain_system::register_validate_block! { diff --git a/cumulus/parachains/runtimes/testing/penpal/Cargo.toml b/cumulus/parachains/runtimes/testing/penpal/Cargo.toml index 547695e76210..6c4f8c3895a8 100644 --- a/cumulus/parachains/runtimes/testing/penpal/Cargo.toml +++ b/cumulus/parachains/runtimes/testing/penpal/Cargo.toml @@ -44,6 +44,7 @@ sp-api = { path = "../../../../../substrate/primitives/api", default-features = sp-block-builder = { path = "../../../../../substrate/primitives/block-builder", default-features = false} sp-consensus-aura = { path = "../../../../../substrate/primitives/consensus/aura", default-features = false} sp-core = { path = "../../../../../substrate/primitives/core", default-features = false} +sp-genesis-builder = { path = "../../../../../substrate/primitives/genesis-builder", default-features = false } sp-inherents = { path = "../../../../../substrate/primitives/inherents", default-features = false} sp-offchain = { path = "../../../../../substrate/primitives/offchain", default-features = false} sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-features = false} @@ -117,6 +118,7 @@ std = [ "sp-block-builder/std", "sp-consensus-aura/std", "sp-core/std", + "sp-genesis-builder/std", "sp-inherents/std", "sp-offchain/std", "sp-runtime/std", diff --git a/cumulus/parachains/runtimes/testing/penpal/src/lib.rs b/cumulus/parachains/runtimes/testing/penpal/src/lib.rs index 9a758cdd9782..fe0f19c30632 100644 --- a/cumulus/parachains/runtimes/testing/penpal/src/lib.rs +++ b/cumulus/parachains/runtimes/testing/penpal/src/lib.rs @@ -36,6 +36,7 @@ use cumulus_pallet_parachain_system::RelayNumberStrictlyIncreases; use frame_support::{ construct_runtime, dispatch::DispatchClass, + genesis_builder_helper::{build_config, create_default_config}, pallet_prelude::Weight, parameter_types, traits::{AsEnsureOriginWithArg, ConstBool, ConstU32, ConstU64, ConstU8, Everything}, @@ -832,6 +833,16 @@ impl_runtime_apis! { Ok(batches) } } + + impl sp_genesis_builder::GenesisBuilder for Runtime { + fn create_default_config() -> Vec { + create_default_config::() + } + + fn build_config(config: Vec) -> sp_genesis_builder::Result { + build_config::(config) + } + } } cumulus_pallet_parachain_system::register_validate_block! { diff --git a/cumulus/parachains/runtimes/testing/rococo-parachain/Cargo.toml b/cumulus/parachains/runtimes/testing/rococo-parachain/Cargo.toml index 54055ca732ba..029d5d10f986 100644 --- a/cumulus/parachains/runtimes/testing/rococo-parachain/Cargo.toml +++ b/cumulus/parachains/runtimes/testing/rococo-parachain/Cargo.toml @@ -27,6 +27,7 @@ sp-api = { path = "../../../../../substrate/primitives/api", default-features = sp-block-builder = { path = "../../../../../substrate/primitives/block-builder", default-features = false} sp-consensus-aura = { path = "../../../../../substrate/primitives/consensus/aura", default-features = false} sp-core = { path = "../../../../../substrate/primitives/core", default-features = false} +sp-genesis-builder = { path = "../../../../../substrate/primitives/genesis-builder", default-features = false } sp-inherents = { path = "../../../../../substrate/primitives/inherents", default-features = false} sp-offchain = { path = "../../../../../substrate/primitives/offchain", default-features = false} sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-features = false} @@ -90,6 +91,7 @@ std = [ "sp-block-builder/std", "sp-consensus-aura/std", "sp-core/std", + "sp-genesis-builder/std", "sp-inherents/std", "sp-offchain/std", "sp-runtime/std", diff --git a/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs b/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs index 362ad0383a23..50c5a445c25f 100644 --- a/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs +++ b/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs @@ -40,6 +40,7 @@ use sp_version::RuntimeVersion; pub use frame_support::{ construct_runtime, dispatch::DispatchClass, + genesis_builder_helper::{build_config, create_default_config}, match_types, parameter_types, traits::{ AsEnsureOriginWithArg, ConstBool, ConstU32, ConstU64, ConstU8, EitherOfDiverse, Everything, @@ -796,6 +797,16 @@ impl_runtime_apis! { ParachainSystem::collect_collation_info(header) } } + + impl sp_genesis_builder::GenesisBuilder for Runtime { + fn create_default_config() -> Vec { + create_default_config::() + } + + fn build_config(config: Vec) -> sp_genesis_builder::Result { + build_config::(config) + } + } } cumulus_pallet_parachain_system::register_validate_block! { diff --git a/polkadot/runtime/kusama/Cargo.toml b/polkadot/runtime/kusama/Cargo.toml index 565b34637e29..ebbcfc71e953 100644 --- a/polkadot/runtime/kusama/Cargo.toml +++ b/polkadot/runtime/kusama/Cargo.toml @@ -28,6 +28,7 @@ offchain-primitives = { package = "sp-offchain", path = "../../../substrate/prim sp-std = { package = "sp-std", path = "../../../substrate/primitives/std", default-features = false } sp-application-crypto = { path = "../../../substrate/primitives/application-crypto", default-features = false } sp-arithmetic = { path = "../../../substrate/primitives/arithmetic", default-features = false } +sp-genesis-builder = { path = "../../../substrate/primitives/genesis-builder", default-features = false } sp-io = { path = "../../../substrate/primitives/io", default-features = false } sp-mmr-primitives = { path = "../../../substrate/primitives/merkle-mountain-range", default-features = false } sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } @@ -211,6 +212,7 @@ std = [ "sp-application-crypto/std", "sp-arithmetic/std", "sp-core/std", + "sp-genesis-builder/std", "sp-io/std", "sp-mmr-primitives/std", "sp-npos-elections/std", diff --git a/polkadot/runtime/kusama/src/lib.rs b/polkadot/runtime/kusama/src/lib.rs index 8d8bd4baacf8..082e1aca375c 100644 --- a/polkadot/runtime/kusama/src/lib.rs +++ b/polkadot/runtime/kusama/src/lib.rs @@ -60,8 +60,11 @@ use frame_election_provider_support::{ bounds::ElectionBoundsBuilder, generate_solution_type, onchain, NposSolution, SequentialPhragmen, }; + use frame_support::{ - construct_runtime, parameter_types, + construct_runtime, + genesis_builder_helper::{build_config, create_default_config}, + parameter_types, traits::{ fungible::HoldConsideration, ConstU32, Contains, EitherOf, EitherOfDiverse, EverythingBut, InstanceFilter, KeyOwnerProofSystem, LinearStoragePrice, PrivilegeCmp, ProcessMessage, @@ -2478,6 +2481,16 @@ sp_api::impl_runtime_apis! { Ok(batches) } } + + impl sp_genesis_builder::GenesisBuilder for Runtime { + fn create_default_config() -> Vec { + create_default_config::() + } + + fn build_config(config: Vec) -> sp_genesis_builder::Result { + build_config::(config) + } + } } #[cfg(test)] diff --git a/polkadot/runtime/polkadot/Cargo.toml b/polkadot/runtime/polkadot/Cargo.toml index a659a4553220..5e283b496697 100644 --- a/polkadot/runtime/polkadot/Cargo.toml +++ b/polkadot/runtime/polkadot/Cargo.toml @@ -26,6 +26,7 @@ offchain-primitives = { package = "sp-offchain", path = "../../../substrate/prim tx-pool-api = { package = "sp-transaction-pool", path = "../../../substrate/primitives/transaction-pool", default-features = false } sp-arithmetic = { path = "../../../substrate/primitives/arithmetic", default-features = false } sp-api = { path = "../../../substrate/primitives/api", default-features = false } +sp-genesis-builder = { path = "../../../substrate/primitives/genesis-builder", default-features = false } sp-std = { path = "../../../substrate/primitives/std", default-features = false } sp-io = { path = "../../../substrate/primitives/io", default-features = false } sp-mmr-primitives = { path = "../../../substrate/primitives/merkle-mountain-range", default-features = false } @@ -193,6 +194,7 @@ std = [ "sp-api/std", "sp-arithmetic/std", "sp-core/std", + "sp-genesis-builder/std", "sp-io/std", "sp-mmr-primitives/std", "sp-npos-elections/std", diff --git a/polkadot/runtime/polkadot/src/lib.rs b/polkadot/runtime/polkadot/src/lib.rs index 5956b0e155bb..c9e3ded6dadd 100644 --- a/polkadot/runtime/polkadot/src/lib.rs +++ b/polkadot/runtime/polkadot/src/lib.rs @@ -45,7 +45,9 @@ use frame_election_provider_support::{ bounds::ElectionBoundsBuilder, generate_solution_type, onchain, SequentialPhragmen, }; use frame_support::{ - construct_runtime, parameter_types, + construct_runtime, + genesis_builder_helper::{build_config, create_default_config}, + parameter_types, traits::{ fungible::HoldConsideration, ConstU32, Contains, EitherOf, EitherOfDiverse, EverythingBut, InstanceFilter, KeyOwnerProofSystem, LinearStoragePrice, PrivilegeCmp, ProcessMessage, @@ -2228,6 +2230,17 @@ sp_api::impl_runtime_apis! { Ok(batches) } } + + impl sp_genesis_builder::GenesisBuilder for Runtime { + fn create_default_config() -> Vec { + create_default_config::() + } + + fn build_config(config: Vec) -> sp_genesis_builder::Result { + build_config::(config) + } + } + } #[cfg(test)] diff --git a/polkadot/runtime/rococo/Cargo.toml b/polkadot/runtime/rococo/Cargo.toml index 8f38659b84f5..64cf6207236d 100644 --- a/polkadot/runtime/rococo/Cargo.toml +++ b/polkadot/runtime/rococo/Cargo.toml @@ -21,6 +21,7 @@ beefy-primitives = { package = "sp-consensus-beefy", path = "../../../substrate/ binary-merkle-tree = { path = "../../../substrate/utils/binary-merkle-tree", default-features = false } rococo-runtime-constants = { package = "rococo-runtime-constants", path = "constants", default-features = false } sp-api = { path = "../../../substrate/primitives/api", default-features = false } +sp-genesis-builder = { path = "../../../substrate/primitives/genesis-builder", default-features = false } inherents = { package = "sp-inherents", path = "../../../substrate/primitives/inherents", default-features = false } offchain-primitives = { package = "sp-offchain", path = "../../../substrate/primitives/offchain", default-features = false } sp-std = { package = "sp-std", path = "../../../substrate/primitives/std", default-features = false } @@ -175,6 +176,7 @@ std = [ "serde_derive", "sp-api/std", "sp-core/std", + "sp-genesis-builder/std", "sp-io/std", "sp-mmr-primitives/std", "sp-runtime/std", diff --git a/polkadot/runtime/rococo/src/lib.rs b/polkadot/runtime/rococo/src/lib.rs index 7046c4640c04..c7d429bc99af 100644 --- a/polkadot/runtime/rococo/src/lib.rs +++ b/polkadot/runtime/rococo/src/lib.rs @@ -58,7 +58,9 @@ use beefy_primitives::{ }; use frame_support::{ - construct_runtime, parameter_types, + construct_runtime, + genesis_builder_helper::{build_config, create_default_config}, + parameter_types, traits::{ fungible::HoldConsideration, Contains, EitherOfDiverse, EverythingBut, InstanceFilter, KeyOwnerProofSystem, LinearStoragePrice, LockIdentifier, PrivilegeCmp, ProcessMessage, @@ -2232,6 +2234,16 @@ sp_api::impl_runtime_apis! { Ok(batches) } } + + impl sp_genesis_builder::GenesisBuilder for Runtime { + fn create_default_config() -> Vec { + create_default_config::() + } + + fn build_config(config: Vec) -> sp_genesis_builder::Result { + build_config::(config) + } + } } #[cfg(test)] diff --git a/polkadot/runtime/test-runtime/Cargo.toml b/polkadot/runtime/test-runtime/Cargo.toml index a8f26eb5b3d9..2e9c773a3f8c 100644 --- a/polkadot/runtime/test-runtime/Cargo.toml +++ b/polkadot/runtime/test-runtime/Cargo.toml @@ -28,6 +28,7 @@ sp-io = { path = "../../../substrate/primitives/io", default-features = false } sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } sp-staking = { path = "../../../substrate/primitives/staking", default-features = false } sp-core = { path = "../../../substrate/primitives/core", default-features = false } +sp-genesis-builder = { path = "../../../substrate/primitives/genesis-builder", default-features = false } sp-mmr-primitives = { path = "../../../substrate/primitives/merkle-mountain-range", default-features = false } sp-session = { path = "../../../substrate/primitives/session", default-features = false } sp-version = { path = "../../../substrate/primitives/version", default-features = false } @@ -124,6 +125,7 @@ std = [ "serde_derive", "sp-api/std", "sp-core/std", + "sp-genesis-builder/std", "sp-io/std", "sp-mmr-primitives/std", "sp-runtime/std", diff --git a/polkadot/runtime/test-runtime/src/lib.rs b/polkadot/runtime/test-runtime/src/lib.rs index 94852ad39f5a..99d6e58bc403 100644 --- a/polkadot/runtime/test-runtime/src/lib.rs +++ b/polkadot/runtime/test-runtime/src/lib.rs @@ -42,7 +42,9 @@ use frame_election_provider_support::{ onchain, SequentialPhragmen, }; use frame_support::{ - construct_runtime, parameter_types, + construct_runtime, + genesis_builder_helper::{build_config, create_default_config}, + parameter_types, traits::{Everything, KeyOwnerProofSystem, WithdrawReasons}, }; use pallet_grandpa::{fg_primitives, AuthorityId as GrandpaId}; @@ -1137,4 +1139,14 @@ sp_api::impl_runtime_apis! { Timestamp::now() } } + + impl sp_genesis_builder::GenesisBuilder for Runtime { + fn create_default_config() -> Vec { + create_default_config::() + } + + fn build_config(config: Vec) -> sp_genesis_builder::Result { + build_config::(config) + } + } } diff --git a/polkadot/runtime/westend/Cargo.toml b/polkadot/runtime/westend/Cargo.toml index f4bb66f68cdf..e23d322b5a70 100644 --- a/polkadot/runtime/westend/Cargo.toml +++ b/polkadot/runtime/westend/Cargo.toml @@ -25,6 +25,7 @@ offchain-primitives = { package = "sp-offchain", path = "../../../substrate/prim sp-api = { path = "../../../substrate/primitives/api", default-features = false } sp-application-crypto = { path = "../../../substrate/primitives/application-crypto", default-features = false } sp-std = { package = "sp-std", path = "../../../substrate/primitives/std", default-features = false } +sp-genesis-builder = { path = "../../../substrate/primitives/genesis-builder", default-features = false } sp-io = { path = "../../../substrate/primitives/io", default-features = false } sp-mmr-primitives = { path = "../../../substrate/primitives/merkle-mountain-range", default-features = false } sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } @@ -195,6 +196,7 @@ std = [ "sp-api/std", "sp-application-crypto/std", "sp-core/std", + "sp-genesis-builder/std", "sp-io/std", "sp-mmr-primitives/std", "sp-npos-elections/std", diff --git a/polkadot/runtime/westend/src/lib.rs b/polkadot/runtime/westend/src/lib.rs index 9af18b5be2bb..ad08360f382d 100644 --- a/polkadot/runtime/westend/src/lib.rs +++ b/polkadot/runtime/westend/src/lib.rs @@ -27,7 +27,9 @@ use beefy_primitives::{ }; use frame_election_provider_support::{bounds::ElectionBoundsBuilder, onchain, SequentialPhragmen}; use frame_support::{ - construct_runtime, parameter_types, + construct_runtime, + genesis_builder_helper::{build_config, create_default_config}, + parameter_types, traits::{ fungible::HoldConsideration, ConstU32, Contains, EverythingBut, InstanceFilter, KeyOwnerProofSystem, LinearStoragePrice, ProcessMessage, ProcessMessageError, @@ -2153,6 +2155,16 @@ sp_api::impl_runtime_apis! { Ok(batches) } } + + impl sp_genesis_builder::GenesisBuilder for Runtime { + fn create_default_config() -> Vec { + create_default_config::() + } + + fn build_config(config: Vec) -> sp_genesis_builder::Result { + build_config::(config) + } + } } #[cfg(all(test, feature = "try-runtime"))] diff --git a/substrate/bin/node-template/runtime/Cargo.toml b/substrate/bin/node-template/runtime/Cargo.toml index 65d0cfca59c4..caca54ce2ba4 100644 --- a/substrate/bin/node-template/runtime/Cargo.toml +++ b/substrate/bin/node-template/runtime/Cargo.toml @@ -39,6 +39,7 @@ sp-std = { path = "../../../primitives/std", default-features = false} sp-storage = { path = "../../../primitives/storage", default-features = false} sp-transaction-pool = { path = "../../../primitives/transaction-pool", default-features = false} sp-version = { path = "../../../primitives/version", default-features = false} +sp-genesis-builder = { version = "0.1.0-dev", default-features = false, path = "../../../primitives/genesis-builder" } # Used for the node template's RPCs frame-system-rpc-runtime-api = { path = "../../../frame/system/rpc/runtime-api", default-features = false} @@ -79,6 +80,7 @@ std = [ "sp-consensus-aura/std", "sp-consensus-grandpa/std", "sp-core/std", + "sp-genesis-builder/std", "sp-inherents/std", "sp-offchain/std", "sp-runtime/std", diff --git a/substrate/bin/node-template/runtime/src/lib.rs b/substrate/bin/node-template/runtime/src/lib.rs index 216be9588bca..4653b49bf2c3 100644 --- a/substrate/bin/node-template/runtime/src/lib.rs +++ b/substrate/bin/node-template/runtime/src/lib.rs @@ -23,6 +23,7 @@ use sp_std::prelude::*; use sp_version::NativeVersion; use sp_version::RuntimeVersion; +use frame_support::genesis_builder_helper::{build_config, create_default_config}; // A few exports that help ease life for downstream crates. pub use frame_support::{ construct_runtime, parameter_types, @@ -571,4 +572,14 @@ impl_runtime_apis! { Executive::try_execute_block(block, state_root_check, signature_check, select).expect("execute-block failed") } } + + impl sp_genesis_builder::GenesisBuilder for Runtime { + fn create_default_config() -> Vec { + create_default_config::() + } + + fn build_config(config: Vec) -> sp_genesis_builder::Result { + build_config::(config) + } + } } diff --git a/substrate/bin/node/runtime/Cargo.toml b/substrate/bin/node/runtime/Cargo.toml index 09c1fd9c6f31..7771b5f20971 100644 --- a/substrate/bin/node/runtime/Cargo.toml +++ b/substrate/bin/node/runtime/Cargo.toml @@ -32,6 +32,7 @@ sp-authority-discovery = { path = "../../../primitives/authority-discovery", def sp-consensus-babe = { path = "../../../primitives/consensus/babe", default-features = false} sp-consensus-grandpa = { path = "../../../primitives/consensus/grandpa", default-features = false} sp-block-builder = { path = "../../../primitives/block-builder", default-features = false} +sp-genesis-builder = { version = "0.1.0-dev", default-features = false, path = "../../../primitives/genesis-builder" } sp-inherents = { path = "../../../primitives/inherents", default-features = false} node-primitives = { path = "../primitives", default-features = false} sp-offchain = { path = "../../../primitives/offchain", default-features = false} @@ -231,6 +232,7 @@ std = [ "sp-consensus-babe/std", "sp-consensus-grandpa/std", "sp-core/std", + "sp-genesis-builder/std", "sp-inherents/std", "sp-io/std", "sp-offchain/std", diff --git a/substrate/bin/node/runtime/src/lib.rs b/substrate/bin/node/runtime/src/lib.rs index df8fb06467d9..c90b9076decc 100644 --- a/substrate/bin/node/runtime/src/lib.rs +++ b/substrate/bin/node/runtime/src/lib.rs @@ -30,6 +30,7 @@ use frame_election_provider_support::{ use frame_support::{ construct_runtime, dispatch::DispatchClass, + genesis_builder_helper::{build_config, create_default_config}, instances::{Instance1, Instance2}, ord_parameter_types, pallet_prelude::Get, @@ -2750,6 +2751,16 @@ impl_runtime_apis! { Ok(batches) } } + + impl sp_genesis_builder::GenesisBuilder for Runtime { + fn create_default_config() -> Vec { + create_default_config::() + } + + fn build_config(config: Vec) -> sp_genesis_builder::Result { + build_config::(config) + } + } } #[cfg(test)] diff --git a/substrate/test-utils/runtime/Cargo.toml b/substrate/test-utils/runtime/Cargo.toml index 727d46cb8f53..4d279c7b703d 100644 --- a/substrate/test-utils/runtime/Cargo.toml +++ b/substrate/test-utils/runtime/Cargo.toml @@ -107,6 +107,3 @@ std = [ ] # Special feature to disable logging disable-logging = [ "sp-api/disable-logging" ] - -#Enabling this flag will disable GenesisBuilder API implementation in runtime. -disable-genesis-builder = [] diff --git a/substrate/test-utils/runtime/build.rs b/substrate/test-utils/runtime/build.rs index 230606635f7d..dd79ce2c5ae8 100644 --- a/substrate/test-utils/runtime/build.rs +++ b/substrate/test-utils/runtime/build.rs @@ -15,8 +15,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -const BUILD_NO_GENESIS_BUILDER_SUPPORT_ENV: &str = "BUILD_NO_GENESIS_BUILDER_SUPPORT"; - fn main() { #[cfg(feature = "std")] { @@ -31,19 +29,6 @@ fn main() { .build(); } - #[cfg(feature = "std")] - if std::env::var(BUILD_NO_GENESIS_BUILDER_SUPPORT_ENV).is_ok() { - substrate_wasm_builder::WasmBuilder::new() - .with_current_project() - .export_heap_base() - .append_to_rust_flags("-Clink-arg=-zstack-size=1048576") - .set_file_name("wasm_binary_no_genesis_builder") - .import_memory() - .enable_feature("disable-genesis-builder") - .build(); - } - println!("cargo:rerun-if-env-changed={}", BUILD_NO_GENESIS_BUILDER_SUPPORT_ENV); - #[cfg(feature = "std")] { substrate_wasm_builder::WasmBuilder::new() diff --git a/substrate/test-utils/runtime/src/lib.rs b/substrate/test-utils/runtime/src/lib.rs index b116c8556815..687790f2ffaf 100644 --- a/substrate/test-utils/runtime/src/lib.rs +++ b/substrate/test-utils/runtime/src/lib.rs @@ -26,11 +26,10 @@ pub mod genesismap; pub mod substrate_test_pallet; use codec::{Decode, Encode}; -#[cfg(not(feature = "disable-genesis-builder"))] -use frame_support::genesis_builder_helper::{build_config, create_default_config}; use frame_support::{ construct_runtime, dispatch::DispatchClass, + genesis_builder_helper::{build_config, create_default_config}, parameter_types, traits::{ConstU32, ConstU64}, weights::{ @@ -722,7 +721,6 @@ impl_runtime_apis! { } } - #[cfg(not(feature = "disable-genesis-builder"))] impl sp_genesis_builder::GenesisBuilder for Runtime { fn create_default_config() -> Vec { create_default_config::() @@ -1203,7 +1201,6 @@ mod tests { }) } - #[cfg(not(feature = "disable-genesis-builder"))] mod genesis_builder_tests { use super::*; use crate::genesismap::GenesisStorageBuilder; From 7cbe0c76ef8fd2aabf9f07de0156941ce3ed44b0 Mon Sep 17 00:00:00 2001 From: Chris Sosnin <48099298+slumber@users.noreply.github.com> Date: Wed, 27 Sep 2023 13:32:02 +0300 Subject: [PATCH 59/69] Migrate polkadot-primitives to v6 (#1543) - Async-backing related primitives are stable `primitives::v6` - Async-backing API is now part of `api_version(7)` - It's enabled on Rococo and Westend runtimes --------- Signed-off-by: Andrei Sandu Co-authored-by: Andrei Sandu <54316454+sandreim@users.noreply.github.com> --- .gitlab/pipeline/zombienet/polkadot.yml | 30 +- .../chain-bridge-hub-cumulus/src/lib.rs | 2 +- .../Cargo.toml | 3 - .../relay-chain-minimal-node/Cargo.toml | 4 - .../src/blockchain_rpc_client.rs | 10 +- .../src/collator_overseer.rs | 11 +- .../relay-chain-minimal-node/src/lib.rs | 12 +- .../src/rpc_client.rs | 25 +- cumulus/client/service/Cargo.toml | 5 - cumulus/pallets/parachain-system/src/lib.rs | 2 +- cumulus/parachain-template/node/Cargo.toml | 5 +- .../emulated/common/src/lib.rs | 6 +- cumulus/test/relay-sproof-builder/src/lib.rs | 2 +- polkadot/Cargo.toml | 1 - polkadot/cli/Cargo.toml | 1 - polkadot/node/collation-generation/src/lib.rs | 7 +- .../node/collation-generation/src/tests.rs | 8 +- polkadot/node/core/backing/src/tests/mod.rs | 2 +- .../src/tests/prospective_parachains.rs | 8 +- .../dispute-coordinator/src/initialized.rs | 12 +- .../src/fragment_tree.rs | 10 +- .../core/prospective-parachains/src/lib.rs | 13 +- .../core/prospective-parachains/src/tests.rs | 12 +- polkadot/node/core/runtime-api/src/cache.rs | 74 +- polkadot/node/core/runtime-api/src/lib.rs | 33 +- polkadot/node/core/runtime-api/src/tests.rs | 22 +- .../network/approval-distribution/src/lib.rs | 48 +- .../approval-distribution/src/tests.rs | 16 +- .../network/bitfield-distribution/src/lib.rs | 19 +- .../bitfield-distribution/src/tests.rs | 10 +- polkadot/node/network/bridge/src/rx/mod.rs | 44 +- polkadot/node/network/bridge/src/rx/tests.rs | 29 +- polkadot/node/network/bridge/src/tx/mod.rs | 28 +- polkadot/node/network/bridge/src/tx/tests.rs | 19 +- .../src/collator_side/collation.rs | 21 +- .../src/collator_side/mod.rs | 52 +- .../src/collator_side/tests/mod.rs | 49 +- .../tests/prospective_parachains.rs | 39 +- .../node/network/collator-protocol/src/lib.rs | 11 +- .../src/validator_side/collation.rs | 2 +- .../src/validator_side/mod.rs | 44 +- .../src/validator_side/tests/mod.rs | 25 +- .../tests/prospective_parachains.rs | 36 +- .../node/network/gossip-support/src/lib.rs | 2 +- polkadot/node/network/protocol/Cargo.toml | 3 - polkadot/node/network/protocol/src/lib.rs | 62 +- .../node/network/protocol/src/peer_set.rs | 23 +- .../protocol/src/request_response/mod.rs | 24 +- .../protocol/src/request_response/outgoing.rs | 14 +- .../request_response/{vstaging.rs => v2.rs} | 8 +- .../src/legacy_v1/mod.rs | 30 +- .../src/legacy_v1/tests.rs | 12 +- .../network/statement-distribution/src/lib.rs | 55 +- .../src/{vstaging => v2}/candidates.rs | 2 +- .../src/{vstaging => v2}/cluster.rs | 4 +- .../src/{vstaging => v2}/grid.rs | 8 +- .../src/{vstaging => v2}/groups.rs | 3 +- .../src/{vstaging => v2}/mod.rs | 69 +- .../src/{vstaging => v2}/requests.rs | 8 +- .../src/{vstaging => v2}/statement_store.rs | 4 +- .../src/{vstaging => v2}/tests/cluster.rs | 46 +- .../src/{vstaging => v2}/tests/grid.rs | 86 +- .../src/{vstaging => v2}/tests/mod.rs | 12 +- .../src/{vstaging => v2}/tests/requests.rs | 86 +- polkadot/node/service/Cargo.toml | 4 - polkadot/node/service/src/lib.rs | 8 +- polkadot/node/service/src/overseer.rs | 18 +- polkadot/node/subsystem-types/src/messages.rs | 24 +- .../subsystem-types/src/runtime_client.rs | 46 +- .../src/backing_implicit_view.rs | 2 +- .../src/inclusion_emulator/mod.rs | 1435 +++++++++++++++- .../src/inclusion_emulator/staging.rs | 1450 ----------------- polkadot/node/subsystem-util/src/lib.rs | 4 +- .../node/subsystem-util/src/runtime/mod.rs | 29 +- .../test-parachains/adder/collator/Cargo.toml | 3 - polkadot/primitives/src/lib.rs | 31 +- polkadot/primitives/src/runtime_api.rs | 30 +- polkadot/primitives/src/v6/async_backing.rs | 132 ++ .../src/{v5 => v6}/executor_params.rs | 0 polkadot/primitives/src/{v5 => v6}/metrics.rs | 0 polkadot/primitives/src/{v5 => v6}/mod.rs | 12 +- polkadot/primitives/src/{v5 => v6}/signed.rs | 0 .../primitives/src/{v5 => v6}/slashing.rs | 0 polkadot/primitives/src/vstaging/mod.rs | 118 -- .../node/backing/prospective-parachains.md | 2 +- polkadot/runtime/kusama/src/lib.rs | 2 +- .../src/assigner_on_demand/tests.rs | 2 +- .../runtime/parachains/src/configuration.rs | 2 +- .../src/configuration/migration/v6.rs | 2 +- .../src/configuration/migration/v7.rs | 2 +- .../src/configuration/migration/v8.rs | 3 +- .../parachains/src/disputes/slashing.rs | 2 +- .../parachains/src/runtime_api_impl/mod.rs | 3 +- .../src/runtime_api_impl/{v5.rs => v7.rs} | 103 +- .../src/runtime_api_impl/vstaging.rs | 108 -- polkadot/runtime/polkadot/src/lib.rs | 2 +- polkadot/runtime/rococo/src/lib.rs | 15 +- polkadot/runtime/test-runtime/src/lib.rs | 2 +- polkadot/runtime/westend/src/lib.rs | 16 +- .../001-async-backing-compatibility.toml | 34 - .../001-async-backing-compatibility.zndsl | 23 - .../002-async-backing-runtime-upgrade.toml | 54 - .../002-async-backing-runtime-upgrade.zndsl | 34 - .../003-async-backing-collator-mix.toml | 40 - .../003-async-backing-collator-mix.zndsl | 19 - .../zombienet_tests/async_backing/README.md | 9 - .../0002-parachains-upgrade-smoke-test.toml | 4 +- 107 files changed, 2410 insertions(+), 2792 deletions(-) rename polkadot/node/network/protocol/src/request_response/{vstaging.rs => v2.rs} (93%) rename polkadot/node/network/statement-distribution/src/{vstaging => v2}/candidates.rs (99%) rename polkadot/node/network/statement-distribution/src/{vstaging => v2}/cluster.rs (99%) rename polkadot/node/network/statement-distribution/src/{vstaging => v2}/grid.rs (99%) rename polkadot/node/network/statement-distribution/src/{vstaging => v2}/groups.rs (96%) rename polkadot/node/network/statement-distribution/src/{vstaging => v2}/mod.rs (97%) rename polkadot/node/network/statement-distribution/src/{vstaging => v2}/requests.rs (99%) rename polkadot/node/network/statement-distribution/src/{vstaging => v2}/statement_store.rs (98%) rename polkadot/node/network/statement-distribution/src/{vstaging => v2}/tests/cluster.rs (95%) rename polkadot/node/network/statement-distribution/src/{vstaging => v2}/tests/grid.rs (95%) rename polkadot/node/network/statement-distribution/src/{vstaging => v2}/tests/mod.rs (98%) rename polkadot/node/network/statement-distribution/src/{vstaging => v2}/tests/requests.rs (95%) delete mode 100644 polkadot/node/subsystem-util/src/inclusion_emulator/staging.rs create mode 100644 polkadot/primitives/src/v6/async_backing.rs rename polkadot/primitives/src/{v5 => v6}/executor_params.rs (100%) rename polkadot/primitives/src/{v5 => v6}/metrics.rs (100%) rename polkadot/primitives/src/{v5 => v6}/mod.rs (99%) rename polkadot/primitives/src/{v5 => v6}/signed.rs (100%) rename polkadot/primitives/src/{v5 => v6}/slashing.rs (100%) rename polkadot/runtime/parachains/src/runtime_api_impl/{v5.rs => v7.rs} (79%) delete mode 100644 polkadot/zombienet_tests/async_backing/001-async-backing-compatibility.toml delete mode 100644 polkadot/zombienet_tests/async_backing/001-async-backing-compatibility.zndsl delete mode 100644 polkadot/zombienet_tests/async_backing/002-async-backing-runtime-upgrade.toml delete mode 100644 polkadot/zombienet_tests/async_backing/002-async-backing-runtime-upgrade.zndsl delete mode 100644 polkadot/zombienet_tests/async_backing/003-async-backing-collator-mix.toml delete mode 100644 polkadot/zombienet_tests/async_backing/003-async-backing-collator-mix.zndsl delete mode 100644 polkadot/zombienet_tests/async_backing/README.md diff --git a/.gitlab/pipeline/zombienet/polkadot.yml b/.gitlab/pipeline/zombienet/polkadot.yml index e420baf486aa..0402c194134b 100644 --- a/.gitlab/pipeline/zombienet/polkadot.yml +++ b/.gitlab/pipeline/zombienet/polkadot.yml @@ -110,7 +110,7 @@ zombienet-polkadot-smoke-0001-parachains-smoke-test: - .zombienet-polkadot-common before_script: - export ZOMBIENET_INTEGRATION_TEST_IMAGE="${POLKADOT_IMAGE}":${PIPELINE_IMAGE_TAG} - - export COL_IMAGE="docker.io/paritypr/colander:4519" # The collator image is fixed + - export COL_IMAGE="${COLANDER_IMAGE}":${PIPELINE_IMAGE_TAG} - echo "Zombienet Tests Config" - echo "gh-dir ${GH_DIR}" - echo "local-dir ${LOCAL_DIR}" @@ -127,12 +127,12 @@ zombienet-polkadot-smoke-0002-parachains-parachains-upgrade-smoke: - .zombienet-polkadot-common before_script: - export ZOMBIENET_INTEGRATION_TEST_IMAGE="${POLKADOT_IMAGE}":${PIPELINE_IMAGE_TAG} - - export COL_IMAGE="docker.io/parity/polkadot-collator:latest" # Use cumulus lastest image + - export CUMULUS_IMAGE="docker.io/paritypr/polkadot-parachain-debug:${DOCKER_IMAGES_VERSION}" - echo "Zombienet Tests Config" - echo "gh-dir ${GH_DIR}" - echo "local-dir ${LOCAL_DIR}" - echo "polkadot image ${ZOMBIENET_INTEGRATION_TEST_IMAGE}" - - echo "colander image ${COL_IMAGE}" + - echo "polkadot-parachain image ${CUMULUS_IMAGE}" - echo "malus image ${MALUS_IMAGE}" script: - /home/nonroot/zombie-net/scripts/ci/run-test-local-env-manager.sh @@ -193,27 +193,3 @@ zombienet-polkadot-malus-0001-dispute-valid: - /home/nonroot/zombie-net/scripts/ci/run-test-local-env-manager.sh --local-dir="${LOCAL_DIR}/integrationtests" --test="0001-dispute-valid-block.zndsl" - -zombienet-polkadot-async-backing-compatibility: - extends: - - .zombienet-polkadot-common - script: - - /home/nonroot/zombie-net/scripts/ci/run-test-local-env-manager.sh - --local-dir="${LOCAL_DIR}/async_backing" - --test="001-async-backing-compatibility.zndsl" - -zombienet-polkadot-async-backing-runtime-upgrade: - extends: - - .zombienet-polkadot-common - script: - - /home/nonroot/zombie-net/scripts/ci/run-test-local-env-manager.sh - --local-dir="${LOCAL_DIR}/async_backing" - --test="002-async-backing-runtime-upgrade.zndsl" - -zombienet-polkadot-async-backing-collator-mix: - extends: - - .zombienet-polkadot-common - script: - - /home/nonroot/zombie-net/scripts/ci/run-test-local-env-manager.sh - --local-dir="${LOCAL_DIR}/async_backing" - --test="003-async-backing-collator-mix.zndsl" diff --git a/bridges/primitives/chain-bridge-hub-cumulus/src/lib.rs b/bridges/primitives/chain-bridge-hub-cumulus/src/lib.rs index c1dbc6db36f6..cd281324ee55 100644 --- a/bridges/primitives/chain-bridge-hub-cumulus/src/lib.rs +++ b/bridges/primitives/chain-bridge-hub-cumulus/src/lib.rs @@ -52,7 +52,7 @@ pub const NORMAL_DISPATCH_RATIO: Perbill = Perbill::from_percent(75); /// This is a copy-paste from the cumulus repo's `parachains-common` crate. const MAXIMUM_BLOCK_WEIGHT: Weight = Weight::from_parts(constants::WEIGHT_REF_TIME_PER_SECOND, 0) .saturating_div(2) - .set_proof_size(polkadot_primitives::v5::MAX_POV_SIZE as u64); + .set_proof_size(polkadot_primitives::MAX_POV_SIZE as u64); /// All cumulus bridge hubs assume that about 5 percent of the block weight is consumed by /// `on_initialize` handlers. This is used to limit the maximal weight of a single extrinsic. diff --git a/cumulus/client/relay-chain-inprocess-interface/Cargo.toml b/cumulus/client/relay-chain-inprocess-interface/Cargo.toml index 39eda5075e29..bc8d0d430c77 100644 --- a/cumulus/client/relay-chain-inprocess-interface/Cargo.toml +++ b/cumulus/client/relay-chain-inprocess-interface/Cargo.toml @@ -41,6 +41,3 @@ metered = { package = "prioritized-metered-channel", version = "0.5.1", default- # Cumulus cumulus-test-service = { path = "../../test/service" } - -[features] -network-protocol-staging = [ "polkadot-service/network-protocol-staging" ] diff --git a/cumulus/client/relay-chain-minimal-node/Cargo.toml b/cumulus/client/relay-chain-minimal-node/Cargo.toml index 39056d6b6511..226474d3d38c 100644 --- a/cumulus/client/relay-chain-minimal-node/Cargo.toml +++ b/cumulus/client/relay-chain-minimal-node/Cargo.toml @@ -41,7 +41,3 @@ tracing = "0.1.37" async-trait = "0.1.73" futures = "0.3.28" -[features] -network-protocol-staging = [ - "polkadot-node-network-protocol/network-protocol-staging", -] diff --git a/cumulus/client/relay-chain-minimal-node/src/blockchain_rpc_client.rs b/cumulus/client/relay-chain-minimal-node/src/blockchain_rpc_client.rs index 57e16bc4283c..3f4c08ecbb83 100644 --- a/cumulus/client/relay-chain-minimal-node/src/blockchain_rpc_client.rs +++ b/cumulus/client/relay-chain-minimal-node/src/blockchain_rpc_client.rs @@ -22,8 +22,8 @@ use futures::{Stream, StreamExt}; use polkadot_core_primitives::{Block, BlockNumber, Hash, Header}; use polkadot_overseer::RuntimeApiSubsystemClient; use polkadot_primitives::{ + async_backing::{AsyncBackingParams, BackingState}, slashing, - vstaging::{AsyncBackingParams, BackingState}, }; use sc_authority_discovery::{AuthorityDiscovery, Error as AuthorityDiscoveryError}; use sp_api::{ApiError, RuntimeApiInfo}; @@ -346,16 +346,16 @@ impl RuntimeApiSubsystemClient for BlockChainRpcClient { Ok(self.rpc_client.parachain_host_minimum_backing_votes(at, session_index).await?) } - async fn staging_async_backing_params(&self, at: Hash) -> Result { - Ok(self.rpc_client.parachain_host_staging_async_backing_params(at).await?) + async fn async_backing_params(&self, at: Hash) -> Result { + Ok(self.rpc_client.parachain_host_async_backing_params(at).await?) } - async fn staging_para_backing_state( + async fn para_backing_state( &self, at: Hash, para_id: cumulus_primitives_core::ParaId, ) -> Result, ApiError> { - Ok(self.rpc_client.parachain_host_staging_para_backing_state(at, para_id).await?) + Ok(self.rpc_client.parachain_host_para_backing_state(at, para_id).await?) } } diff --git a/cumulus/client/relay-chain-minimal-node/src/collator_overseer.rs b/cumulus/client/relay-chain-minimal-node/src/collator_overseer.rs index bea2fc330a24..a83a18f7cd96 100644 --- a/cumulus/client/relay-chain-minimal-node/src/collator_overseer.rs +++ b/cumulus/client/relay-chain-minimal-node/src/collator_overseer.rs @@ -30,7 +30,7 @@ use polkadot_node_network_protocol::{ peer_set::PeerSetProtocolNames, request_response::{ v1::{self, AvailableDataFetchingRequest}, - vstaging, IncomingRequestReceiver, ReqProtocolNames, + v2, IncomingRequestReceiver, ReqProtocolNames, }, }; use polkadot_node_subsystem_util::metrics::{prometheus::Registry, Metrics}; @@ -63,9 +63,8 @@ pub(crate) struct CollatorOverseerGenArgs<'a> { pub authority_discovery_service: AuthorityDiscoveryService, /// Receiver for collation request protocol v1. pub collation_req_receiver_v1: IncomingRequestReceiver, - /// Receiver for collation request protocol vstaging. - pub collation_req_receiver_vstaging: - IncomingRequestReceiver, + /// Receiver for collation request protocol v2. + pub collation_req_receiver_v2: IncomingRequestReceiver, /// Receiver for availability request protocol pub available_data_req_receiver: IncomingRequestReceiver, /// Prometheus registry, commonly used for production systems, less so for test. @@ -88,7 +87,7 @@ fn build_overseer( sync_oracle, authority_discovery_service, collation_req_receiver_v1, - collation_req_receiver_vstaging, + collation_req_receiver_v2, available_data_req_receiver, registry, spawner, @@ -121,7 +120,7 @@ fn build_overseer( peer_id: network_service.local_peer_id(), collator_pair, request_receiver_v1: collation_req_receiver_v1, - request_receiver_vstaging: collation_req_receiver_vstaging, + request_receiver_v2: collation_req_receiver_v2, metrics: Metrics::register(registry)?, }; CollatorProtocolSubsystem::new(side) diff --git a/cumulus/client/relay-chain-minimal-node/src/lib.rs b/cumulus/client/relay-chain-minimal-node/src/lib.rs index 366d428eda70..08e4e8e34aba 100644 --- a/cumulus/client/relay-chain-minimal-node/src/lib.rs +++ b/cumulus/client/relay-chain-minimal-node/src/lib.rs @@ -23,7 +23,7 @@ use polkadot_network_bridge::{peer_sets_info, IsAuthority}; use polkadot_node_network_protocol::{ peer_set::PeerSetProtocolNames, request_response::{ - v1, vstaging, IncomingRequest, IncomingRequestReceiver, Protocol, ReqProtocolNames, + v1, v2, IncomingRequest, IncomingRequestReceiver, Protocol, ReqProtocolNames, }, }; @@ -182,7 +182,7 @@ async fn new_minimal_relay_chain( } let request_protocol_names = ReqProtocolNames::new(genesis_hash, config.chain_spec.fork_id()); - let (collation_req_receiver_v1, collation_req_receiver_vstaging, available_data_req_receiver) = + let (collation_req_receiver_v1, collation_req_receiver_v2, available_data_req_receiver) = build_request_response_protocol_receivers(&request_protocol_names, &mut net_config); let best_header = relay_chain_rpc_client @@ -212,7 +212,7 @@ async fn new_minimal_relay_chain( sync_oracle, authority_discovery_service, collation_req_receiver_v1, - collation_req_receiver_vstaging, + collation_req_receiver_v2, available_data_req_receiver, registry: prometheus_registry.as_ref(), spawner: task_manager.spawn_handle(), @@ -234,13 +234,13 @@ fn build_request_response_protocol_receivers( config: &mut FullNetworkConfiguration, ) -> ( IncomingRequestReceiver, - IncomingRequestReceiver, + IncomingRequestReceiver, IncomingRequestReceiver, ) { let (collation_req_receiver_v1, cfg) = IncomingRequest::get_config_receiver(request_protocol_names); config.add_request_response_protocol(cfg); - let (collation_req_receiver_vstaging, cfg) = + let (collation_req_receiver_v2, cfg) = IncomingRequest::get_config_receiver(request_protocol_names); config.add_request_response_protocol(cfg); let (available_data_req_receiver, cfg) = @@ -248,5 +248,5 @@ fn build_request_response_protocol_receivers( config.add_request_response_protocol(cfg); let cfg = Protocol::ChunkFetchingV1.get_outbound_only_config(request_protocol_names); config.add_request_response_protocol(cfg); - (collation_req_receiver_v1, collation_req_receiver_vstaging, available_data_req_receiver) + (collation_req_receiver_v1, collation_req_receiver_v2, available_data_req_receiver) } diff --git a/cumulus/client/relay-chain-rpc-interface/src/rpc_client.rs b/cumulus/client/relay-chain-rpc-interface/src/rpc_client.rs index c1e92b249d77..b1fd7d1ab7d9 100644 --- a/cumulus/client/relay-chain-rpc-interface/src/rpc_client.rs +++ b/cumulus/client/relay-chain-rpc-interface/src/rpc_client.rs @@ -30,9 +30,8 @@ use parity_scale_codec::{Decode, Encode}; use cumulus_primitives_core::{ relay_chain::{ - slashing, - vstaging::{AsyncBackingParams, BackingState}, - BlockNumber, CandidateCommitments, CandidateEvent, CandidateHash, + async_backing::{AsyncBackingParams, BackingState}, + slashing, BlockNumber, CandidateCommitments, CandidateEvent, CandidateHash, CommittedCandidateReceipt, CoreState, DisputeState, ExecutorParams, GroupRotationInfo, Hash as RelayHash, Header as RelayHeader, InboundHrmpMessage, OccupiedCoreAssumption, PvfCheckStatement, ScrapedOnChainVotes, SessionIndex, SessionInfo, ValidationCode, @@ -599,30 +598,22 @@ impl RelayChainRpcClient { } #[allow(missing_docs)] - pub async fn parachain_host_staging_async_backing_params( + pub async fn parachain_host_async_backing_params( &self, at: RelayHash, ) -> Result { - self.call_remote_runtime_function( - "ParachainHost_staging_async_backing_params", - at, - None::<()>, - ) - .await + self.call_remote_runtime_function("ParachainHost_async_backing_params", at, None::<()>) + .await } #[allow(missing_docs)] - pub async fn parachain_host_staging_para_backing_state( + pub async fn parachain_host_para_backing_state( &self, at: RelayHash, para_id: ParaId, ) -> Result, RelayChainError> { - self.call_remote_runtime_function( - "ParachainHost_staging_para_backing_state", - at, - Some(para_id), - ) - .await + self.call_remote_runtime_function("ParachainHost_para_backing_state", at, Some(para_id)) + .await } fn send_register_message_to_worker( diff --git a/cumulus/client/service/Cargo.toml b/cumulus/client/service/Cargo.toml index b53bdbdfc815..b7c274ceecdc 100644 --- a/cumulus/client/service/Cargo.toml +++ b/cumulus/client/service/Cargo.toml @@ -40,8 +40,3 @@ cumulus-relay-chain-interface = { path = "../relay-chain-interface" } cumulus-relay-chain-inprocess-interface = { path = "../relay-chain-inprocess-interface" } cumulus-relay-chain-minimal-node = { path = "../relay-chain-minimal-node" } -[features] -network-protocol-staging = [ - "cumulus-relay-chain-inprocess-interface/network-protocol-staging", - "cumulus-relay-chain-minimal-node/network-protocol-staging", -] diff --git a/cumulus/pallets/parachain-system/src/lib.rs b/cumulus/pallets/parachain-system/src/lib.rs index a7e59a61c9be..a8f0a49223f5 100644 --- a/cumulus/pallets/parachain-system/src/lib.rs +++ b/cumulus/pallets/parachain-system/src/lib.rs @@ -1447,7 +1447,7 @@ impl Pallet { hrmp_max_message_num_per_candidate: 2, validation_upgrade_cooldown: 2, validation_upgrade_delay: 2, - async_backing_params: relay_chain::vstaging::AsyncBackingParams { + async_backing_params: relay_chain::AsyncBackingParams { allowed_ancestry_len: 0, max_candidate_depth: 0, }, diff --git a/cumulus/parachain-template/node/Cargo.toml b/cumulus/parachain-template/node/Cargo.toml index 223a78dacc49..114b25d12611 100644 --- a/cumulus/parachain-template/node/Cargo.toml +++ b/cumulus/parachain-template/node/Cargo.toml @@ -89,7 +89,4 @@ try-runtime = [ "polkadot-cli/try-runtime", "sp-runtime/try-runtime", ] -network-protocol-staging = [ - "cumulus-client-service/network-protocol-staging", - "polkadot-cli/network-protocol-staging", -] + diff --git a/cumulus/parachains/integration-tests/emulated/common/src/lib.rs b/cumulus/parachains/integration-tests/emulated/common/src/lib.rs index 7461165f2a19..2804128ec014 100644 --- a/cumulus/parachains/integration-tests/emulated/common/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/common/src/lib.rs @@ -64,7 +64,7 @@ decl_test_relay_chains! { Hrmp: kusama_runtime::Hrmp, } }, - #[api_version(6)] + #[api_version(7)] pub struct Westend { genesis = westend::genesis(), on_init = (), @@ -79,7 +79,7 @@ decl_test_relay_chains! { Balances: westend_runtime::Balances, } }, - #[api_version(5)] + #[api_version(7)] pub struct Rococo { genesis = rococo::genesis(), on_init = (), @@ -94,7 +94,7 @@ decl_test_relay_chains! { Balances: rococo_runtime::Balances, } }, - #[api_version(5)] + #[api_version(7)] pub struct Wococo { genesis = rococo::genesis(), on_init = (), diff --git a/cumulus/test/relay-sproof-builder/src/lib.rs b/cumulus/test/relay-sproof-builder/src/lib.rs index 69a82d05d816..fbd2692a36b4 100644 --- a/cumulus/test/relay-sproof-builder/src/lib.rs +++ b/cumulus/test/relay-sproof-builder/src/lib.rs @@ -63,7 +63,7 @@ impl Default for RelayStateSproofBuilder { hrmp_max_message_num_per_candidate: 5, validation_upgrade_cooldown: 6, validation_upgrade_delay: 6, - async_backing_params: relay_chain::vstaging::AsyncBackingParams { + async_backing_params: relay_chain::AsyncBackingParams { allowed_ancestry_len: 0, max_candidate_depth: 0, }, diff --git a/polkadot/Cargo.toml b/polkadot/Cargo.toml index aacc6ad405cc..6e82cb69f6ec 100644 --- a/polkadot/Cargo.toml +++ b/polkadot/Cargo.toml @@ -68,7 +68,6 @@ jemalloc-allocator = [ # Enables timeout-based tests supposed to be run only in CI environment as they may be flaky # when run locally depending on system load ci-only-tests = [ "polkadot-node-core-pvf/ci-only-tests" ] -network-protocol-staging = [ "polkadot-cli/network-protocol-staging" ] # Configuration for building a .deb package - for use with `cargo-deb` [package.metadata.deb] diff --git a/polkadot/cli/Cargo.toml b/polkadot/cli/Cargo.toml index 53961c90a2a8..799a229b6ad1 100644 --- a/polkadot/cli/Cargo.toml +++ b/polkadot/cli/Cargo.toml @@ -76,4 +76,3 @@ runtime-metrics = [ "polkadot-node-metrics/runtime-metrics", "service/runtime-metrics", ] -network-protocol-staging = [ "service/network-protocol-staging" ] diff --git a/polkadot/node/collation-generation/src/lib.rs b/polkadot/node/collation-generation/src/lib.rs index 27779f3d1acb..4e13755deedf 100644 --- a/polkadot/node/collation-generation/src/lib.rs +++ b/polkadot/node/collation-generation/src/lib.rs @@ -43,9 +43,8 @@ use polkadot_node_subsystem::{ SubsystemContext, SubsystemError, SubsystemResult, }; use polkadot_node_subsystem_util::{ - request_availability_cores, request_persisted_validation_data, - request_staging_async_backing_params, request_validation_code, request_validation_code_hash, - request_validators, + request_async_backing_params, request_availability_cores, request_persisted_validation_data, + request_validation_code, request_validation_code_hash, request_validators, }; use polkadot_primitives::{ collator_signature_payload, CandidateCommitments, CandidateDescriptor, CandidateReceipt, @@ -208,7 +207,7 @@ async fn handle_new_activations( let (availability_cores, validators, async_backing_params) = join!( request_availability_cores(relay_parent, ctx.sender()).await, request_validators(relay_parent, ctx.sender()).await, - request_staging_async_backing_params(relay_parent, ctx.sender()).await, + request_async_backing_params(relay_parent, ctx.sender()).await, ); let availability_cores = availability_cores??; diff --git a/polkadot/node/collation-generation/src/tests.rs b/polkadot/node/collation-generation/src/tests.rs index da6b343e6aee..9094f40cca84 100644 --- a/polkadot/node/collation-generation/src/tests.rs +++ b/polkadot/node/collation-generation/src/tests.rs @@ -153,7 +153,7 @@ fn requests_availability_per_relay_parent() { } Some(AllMessages::RuntimeApi(RuntimeApiMessage::Request( _hash, - RuntimeApiRequest::StagingAsyncBackingParams( + RuntimeApiRequest::AsyncBackingParams( tx, ), ))) => { @@ -235,7 +235,7 @@ fn requests_validation_data_for_scheduled_matches() { }, Some(AllMessages::RuntimeApi(RuntimeApiMessage::Request( _hash, - RuntimeApiRequest::StagingAsyncBackingParams(tx), + RuntimeApiRequest::AsyncBackingParams(tx), ))) => { tx.send(Err(RuntimeApiError::NotSupported { runtime_api_name: "doesnt_matter", @@ -332,7 +332,7 @@ fn sends_distribute_collation_message() { }, Some(AllMessages::RuntimeApi(RuntimeApiMessage::Request( _hash, - RuntimeApiRequest::StagingAsyncBackingParams(tx), + RuntimeApiRequest::AsyncBackingParams(tx), ))) => { tx.send(Err(RuntimeApiError::NotSupported { runtime_api_name: "doesnt_matter", @@ -494,7 +494,7 @@ fn fallback_when_no_validation_code_hash_api() { }, Some(AllMessages::RuntimeApi(RuntimeApiMessage::Request( _hash, - RuntimeApiRequest::StagingAsyncBackingParams(tx), + RuntimeApiRequest::AsyncBackingParams(tx), ))) => { tx.send(Err(RuntimeApiError::NotSupported { runtime_api_name: "doesnt_matter", diff --git a/polkadot/node/core/backing/src/tests/mod.rs b/polkadot/node/core/backing/src/tests/mod.rs index 4c2fd6becb42..bdc8b3fa1af8 100644 --- a/polkadot/node/core/backing/src/tests/mod.rs +++ b/polkadot/node/core/backing/src/tests/mod.rs @@ -237,7 +237,7 @@ async fn test_startup(virtual_overseer: &mut VirtualOverseer, test_state: &TestS assert_matches!( virtual_overseer.recv().await, AllMessages::RuntimeApi( - RuntimeApiMessage::Request(parent, RuntimeApiRequest::StagingAsyncBackingParams(tx)) + RuntimeApiMessage::Request(parent, RuntimeApiRequest::AsyncBackingParams(tx)) ) if parent == test_state.relay_parent => { tx.send(Err(ASYNC_BACKING_DISABLED_ERROR)).unwrap(); } diff --git a/polkadot/node/core/backing/src/tests/prospective_parachains.rs b/polkadot/node/core/backing/src/tests/prospective_parachains.rs index 14f720b721f2..b79515ed37a6 100644 --- a/polkadot/node/core/backing/src/tests/prospective_parachains.rs +++ b/polkadot/node/core/backing/src/tests/prospective_parachains.rs @@ -20,12 +20,12 @@ use polkadot_node_subsystem::{ messages::{ChainApiMessage, FragmentTreeMembership}, ActivatedLeaf, TimeoutExt, }; -use polkadot_primitives::{vstaging as vstaging_primitives, BlockNumber, Header, OccupiedCore}; +use polkadot_primitives::{AsyncBackingParams, BlockNumber, Header, OccupiedCore}; use super::*; -const ASYNC_BACKING_PARAMETERS: vstaging_primitives::AsyncBackingParams = - vstaging_primitives::AsyncBackingParams { max_candidate_depth: 4, allowed_ancestry_len: 3 }; +const ASYNC_BACKING_PARAMETERS: AsyncBackingParams = + AsyncBackingParams { max_candidate_depth: 4, allowed_ancestry_len: 3 }; struct TestLeaf { activated: ActivatedLeaf, @@ -56,7 +56,7 @@ async fn activate_leaf( assert_matches!( virtual_overseer.recv().await, AllMessages::RuntimeApi( - RuntimeApiMessage::Request(parent, RuntimeApiRequest::StagingAsyncBackingParams(tx)) + RuntimeApiMessage::Request(parent, RuntimeApiRequest::AsyncBackingParams(tx)) ) if parent == leaf_hash => { tx.send(Ok(ASYNC_BACKING_PARAMETERS)).unwrap(); } diff --git a/polkadot/node/core/dispute-coordinator/src/initialized.rs b/polkadot/node/core/dispute-coordinator/src/initialized.rs index 9cd544a8c536..e44530b3f1bb 100644 --- a/polkadot/node/core/dispute-coordinator/src/initialized.rs +++ b/polkadot/node/core/dispute-coordinator/src/initialized.rs @@ -43,7 +43,7 @@ use polkadot_node_subsystem_util::runtime::{ self, key_ownership_proof, submit_report_dispute_lost, RuntimeInfo, }; use polkadot_primitives::{ - vstaging, BlockNumber, CandidateHash, CandidateReceipt, CompactStatement, DisputeStatement, + slashing, BlockNumber, CandidateHash, CandidateReceipt, CompactStatement, DisputeStatement, DisputeStatementSet, Hash, ScrapedOnChainVotes, SessionIndex, ValidDisputeStatementKind, ValidatorId, ValidatorIndex, }; @@ -385,7 +385,7 @@ impl Initialized { &mut self, ctx: &mut Context, relay_parent: Hash, - unapplied_slashes: Vec<(SessionIndex, CandidateHash, vstaging::slashing::PendingSlashes)>, + unapplied_slashes: Vec<(SessionIndex, CandidateHash, slashing::PendingSlashes)>, ) { for (session_index, candidate_hash, pending) in unapplied_slashes { gum::info!( @@ -422,11 +422,9 @@ impl Initialized { match res { Ok(Some(key_ownership_proof)) => { key_ownership_proofs.push(key_ownership_proof); - let time_slot = vstaging::slashing::DisputesTimeSlot::new( - session_index, - candidate_hash, - ); - let dispute_proof = vstaging::slashing::DisputeProof { + let time_slot = + slashing::DisputesTimeSlot::new(session_index, candidate_hash); + let dispute_proof = slashing::DisputeProof { time_slot, kind: pending.kind, validator_index: *validator_index, diff --git a/polkadot/node/core/prospective-parachains/src/fragment_tree.rs b/polkadot/node/core/prospective-parachains/src/fragment_tree.rs index ed2988fcb39f..292e4ebe5282 100644 --- a/polkadot/node/core/prospective-parachains/src/fragment_tree.rs +++ b/polkadot/node/core/prospective-parachains/src/fragment_tree.rs @@ -96,10 +96,10 @@ use std::{ use super::LOG_TARGET; use bitvec::prelude::*; -use polkadot_node_subsystem_util::inclusion_emulator::staging::{ +use polkadot_node_subsystem_util::inclusion_emulator::{ ConstraintModifications, Constraints, Fragment, ProspectiveCandidate, RelayChainBlockInfo, }; -use polkadot_primitives::vstaging::{ +use polkadot_primitives::{ BlockNumber, CandidateHash, CommittedCandidateReceipt, Hash, HeadData, Id as ParaId, PersistedValidationData, }; @@ -981,10 +981,8 @@ impl FragmentNode { mod tests { use super::*; use assert_matches::assert_matches; - use polkadot_node_subsystem_util::inclusion_emulator::staging::InboundHrmpLimitations; - use polkadot_primitives::vstaging::{ - BlockNumber, CandidateCommitments, CandidateDescriptor, HeadData, - }; + use polkadot_node_subsystem_util::inclusion_emulator::InboundHrmpLimitations; + use polkadot_primitives::{BlockNumber, CandidateCommitments, CandidateDescriptor, HeadData}; use polkadot_primitives_test_helpers as test_helpers; fn make_constraints( diff --git a/polkadot/node/core/prospective-parachains/src/lib.rs b/polkadot/node/core/prospective-parachains/src/lib.rs index 6e5844a62a16..fcca0dd0b536 100644 --- a/polkadot/node/core/prospective-parachains/src/lib.rs +++ b/polkadot/node/core/prospective-parachains/src/lib.rs @@ -22,7 +22,7 @@ //! backing phases of parachain consensus. //! //! This is primarily an implementation of "Fragment Trees", as described in -//! [`polkadot_node_subsystem_util::inclusion_emulator::staging`]. +//! [`polkadot_node_subsystem_util::inclusion_emulator`]. //! //! This subsystem also handles concerns such as the relay-chain being forkful and session changes. @@ -42,13 +42,14 @@ use polkadot_node_subsystem::{ overseer, ActiveLeavesUpdate, FromOrchestra, OverseerSignal, SpawnedSubsystem, SubsystemError, }; use polkadot_node_subsystem_util::{ - inclusion_emulator::staging::{Constraints, RelayChainBlockInfo}, + inclusion_emulator::{Constraints, RelayChainBlockInfo}, request_session_index_for_child, runtime::{prospective_parachains_mode, ProspectiveParachainsMode}, }; -use polkadot_primitives::vstaging::{ - BlockNumber, CandidateHash, CandidatePendingAvailability, CommittedCandidateReceipt, CoreState, - Hash, HeadData, Header, Id as ParaId, PersistedValidationData, +use polkadot_primitives::{ + async_backing::CandidatePendingAvailability, BlockNumber, CandidateHash, + CommittedCandidateReceipt, CoreState, Hash, HeadData, Header, Id as ParaId, + PersistedValidationData, }; use crate::{ @@ -792,7 +793,7 @@ async fn fetch_backing_state( let (tx, rx) = oneshot::channel(); ctx.send_message(RuntimeApiMessage::Request( relay_parent, - RuntimeApiRequest::StagingParaBackingState(para_id, tx), + RuntimeApiRequest::ParaBackingState(para_id, tx), )) .await; diff --git a/polkadot/node/core/prospective-parachains/src/tests.rs b/polkadot/node/core/prospective-parachains/src/tests.rs index eb12ea4537f7..d2cd23fe95fc 100644 --- a/polkadot/node/core/prospective-parachains/src/tests.rs +++ b/polkadot/node/core/prospective-parachains/src/tests.rs @@ -25,7 +25,7 @@ use polkadot_node_subsystem::{ }; use polkadot_node_subsystem_test_helpers as test_helpers; use polkadot_primitives::{ - vstaging::{AsyncBackingParams, BackingState, Constraints, InboundHrmpLimitations}, + async_backing::{AsyncBackingParams, BackingState, Constraints, InboundHrmpLimitations}, CommittedCandidateReceipt, HeadData, Header, PersistedValidationData, ScheduledCore, ValidationCodeHash, }; @@ -219,7 +219,7 @@ async fn handle_leaf_activation( assert_matches!( virtual_overseer.recv().await, AllMessages::RuntimeApi( - RuntimeApiMessage::Request(parent, RuntimeApiRequest::StagingAsyncBackingParams(tx)) + RuntimeApiMessage::Request(parent, RuntimeApiRequest::AsyncBackingParams(tx)) ) if parent == *hash => { tx.send(Ok(async_backing_params)).unwrap(); } @@ -284,7 +284,7 @@ async fn handle_leaf_activation( let para_id = match message { AllMessages::RuntimeApi(RuntimeApiMessage::Request( _, - RuntimeApiRequest::StagingParaBackingState(p_id, _), + RuntimeApiRequest::ParaBackingState(p_id, _), )) => p_id, _ => panic!("received unexpected message {:?}", message), }; @@ -303,7 +303,7 @@ async fn handle_leaf_activation( assert_matches!( message, AllMessages::RuntimeApi( - RuntimeApiMessage::Request(parent, RuntimeApiRequest::StagingParaBackingState(p_id, tx)) + RuntimeApiMessage::Request(parent, RuntimeApiRequest::ParaBackingState(p_id, tx)) ) if parent == *hash && p_id == para_id => { tx.send(Ok(Some(backing_state))).unwrap(); } @@ -499,7 +499,7 @@ fn should_do_no_work_if_async_backing_disabled_for_leaf() { assert_matches!( virtual_overseer.recv().await, AllMessages::RuntimeApi( - RuntimeApiMessage::Request(parent, RuntimeApiRequest::StagingAsyncBackingParams(tx)) + RuntimeApiMessage::Request(parent, RuntimeApiRequest::AsyncBackingParams(tx)) ) if parent == hash => { tx.send(Err(ASYNC_BACKING_DISABLED_ERROR)).unwrap(); } @@ -1569,7 +1569,7 @@ fn uses_ancestry_only_within_session() { assert_matches!( virtual_overseer.recv().await, AllMessages::RuntimeApi( - RuntimeApiMessage::Request(parent, RuntimeApiRequest::StagingAsyncBackingParams(tx)) + RuntimeApiMessage::Request(parent, RuntimeApiRequest::AsyncBackingParams(tx)) ) if parent == hash => { tx.send(Ok(AsyncBackingParams { max_candidate_depth: 0, allowed_ancestry_len: ancestry_len })).unwrap(); } diff --git a/polkadot/node/core/runtime-api/src/cache.rs b/polkadot/node/core/runtime-api/src/cache.rs index 7f41d74e616c..e05e5823a282 100644 --- a/polkadot/node/core/runtime-api/src/cache.rs +++ b/polkadot/node/core/runtime-api/src/cache.rs @@ -20,12 +20,12 @@ use schnellru::{ByLength, LruMap}; use sp_consensus_babe::Epoch; use polkadot_primitives::{ - vstaging, AuthorityDiscoveryId, BlockNumber, CandidateCommitments, CandidateEvent, - CandidateHash, CommittedCandidateReceipt, CoreState, DisputeState, ExecutorParams, - GroupRotationInfo, Hash, Id as ParaId, InboundDownwardMessage, InboundHrmpMessage, - OccupiedCoreAssumption, PersistedValidationData, PvfCheckStatement, ScrapedOnChainVotes, - SessionIndex, SessionInfo, ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, - ValidatorSignature, + async_backing, slashing, AuthorityDiscoveryId, BlockNumber, CandidateCommitments, + CandidateEvent, CandidateHash, CommittedCandidateReceipt, CoreState, DisputeState, + ExecutorParams, GroupRotationInfo, Hash, Id as ParaId, InboundDownwardMessage, + InboundHrmpMessage, OccupiedCoreAssumption, PersistedValidationData, PvfCheckStatement, + ScrapedOnChainVotes, SessionIndex, SessionInfo, ValidationCode, ValidationCodeHash, + ValidatorId, ValidatorIndex, ValidatorSignature, }; /// For consistency we have the same capacity for all caches. We use 128 as we'll only need that @@ -61,14 +61,11 @@ pub(crate) struct RequestResultCache { LruMap<(Hash, ParaId, OccupiedCoreAssumption), Option>, version: LruMap, disputes: LruMap)>>, - unapplied_slashes: - LruMap>, - key_ownership_proof: - LruMap<(Hash, ValidatorId), Option>, + unapplied_slashes: LruMap>, + key_ownership_proof: LruMap<(Hash, ValidatorId), Option>, minimum_backing_votes: LruMap, - - staging_para_backing_state: LruMap<(Hash, ParaId), Option>, - staging_async_backing_params: LruMap, + para_backing_state: LruMap<(Hash, ParaId), Option>, + async_backing_params: LruMap, } impl Default for RequestResultCache { @@ -100,8 +97,8 @@ impl Default for RequestResultCache { key_ownership_proof: LruMap::new(ByLength::new(DEFAULT_CACHE_CAP)), minimum_backing_votes: LruMap::new(ByLength::new(DEFAULT_CACHE_CAP)), - staging_para_backing_state: LruMap::new(ByLength::new(DEFAULT_CACHE_CAP)), - staging_async_backing_params: LruMap::new(ByLength::new(DEFAULT_CACHE_CAP)), + para_backing_state: LruMap::new(ByLength::new(DEFAULT_CACHE_CAP)), + async_backing_params: LruMap::new(ByLength::new(DEFAULT_CACHE_CAP)), } } } @@ -401,14 +398,14 @@ impl RequestResultCache { pub(crate) fn unapplied_slashes( &mut self, relay_parent: &Hash, - ) -> Option<&Vec<(SessionIndex, CandidateHash, vstaging::slashing::PendingSlashes)>> { + ) -> Option<&Vec<(SessionIndex, CandidateHash, slashing::PendingSlashes)>> { self.unapplied_slashes.get(relay_parent).map(|v| &*v) } pub(crate) fn cache_unapplied_slashes( &mut self, relay_parent: Hash, - value: Vec<(SessionIndex, CandidateHash, vstaging::slashing::PendingSlashes)>, + value: Vec<(SessionIndex, CandidateHash, slashing::PendingSlashes)>, ) { self.unapplied_slashes.insert(relay_parent, value); } @@ -416,14 +413,14 @@ impl RequestResultCache { pub(crate) fn key_ownership_proof( &mut self, key: (Hash, ValidatorId), - ) -> Option<&Option> { + ) -> Option<&Option> { self.key_ownership_proof.get(&key).map(|v| &*v) } pub(crate) fn cache_key_ownership_proof( &mut self, key: (Hash, ValidatorId), - value: Option, + value: Option, ) { self.key_ownership_proof.insert(key, value); } @@ -431,7 +428,7 @@ impl RequestResultCache { // This request is never cached, hence always returns `None`. pub(crate) fn submit_report_dispute_lost( &mut self, - _key: (Hash, vstaging::slashing::DisputeProof, vstaging::slashing::OpaqueKeyOwnershipProof), + _key: (Hash, slashing::DisputeProof, slashing::OpaqueKeyOwnershipProof), ) -> Option<&Option<()>> { None } @@ -448,34 +445,34 @@ impl RequestResultCache { self.minimum_backing_votes.insert(session_index, minimum_backing_votes); } - pub(crate) fn staging_para_backing_state( + pub(crate) fn para_backing_state( &mut self, key: (Hash, ParaId), - ) -> Option<&Option> { - self.staging_para_backing_state.get(&key).map(|v| &*v) + ) -> Option<&Option> { + self.para_backing_state.get(&key).map(|v| &*v) } - pub(crate) fn cache_staging_para_backing_state( + pub(crate) fn cache_para_backing_state( &mut self, key: (Hash, ParaId), - value: Option, + value: Option, ) { - self.staging_para_backing_state.insert(key, value); + self.para_backing_state.insert(key, value); } - pub(crate) fn staging_async_backing_params( + pub(crate) fn async_backing_params( &mut self, key: &Hash, - ) -> Option<&vstaging::AsyncBackingParams> { - self.staging_async_backing_params.get(key).map(|v| &*v) + ) -> Option<&async_backing::AsyncBackingParams> { + self.async_backing_params.get(key).map(|v| &*v) } - pub(crate) fn cache_staging_async_backing_params( + pub(crate) fn cache_async_backing_params( &mut self, key: Hash, - value: vstaging::AsyncBackingParams, + value: async_backing::AsyncBackingParams, ) { - self.staging_async_backing_params.insert(key, value); + self.async_backing_params.insert(key, value); } } @@ -515,16 +512,15 @@ pub(crate) enum RequestResult { ValidationCodeHash(Hash, ParaId, OccupiedCoreAssumption, Option), Version(Hash, u32), Disputes(Hash, Vec<(SessionIndex, CandidateHash, DisputeState)>), - UnappliedSlashes(Hash, Vec<(SessionIndex, CandidateHash, vstaging::slashing::PendingSlashes)>), - KeyOwnershipProof(Hash, ValidatorId, Option), + UnappliedSlashes(Hash, Vec<(SessionIndex, CandidateHash, slashing::PendingSlashes)>), + KeyOwnershipProof(Hash, ValidatorId, Option), // This is a request with side-effects. SubmitReportDisputeLost( Hash, - vstaging::slashing::DisputeProof, - vstaging::slashing::OpaqueKeyOwnershipProof, + slashing::DisputeProof, + slashing::OpaqueKeyOwnershipProof, Option<()>, ), - - StagingParaBackingState(Hash, ParaId, Option), - StagingAsyncBackingParams(Hash, vstaging::AsyncBackingParams), + ParaBackingState(Hash, ParaId, Option), + AsyncBackingParams(Hash, async_backing::AsyncBackingParams), } diff --git a/polkadot/node/core/runtime-api/src/lib.rs b/polkadot/node/core/runtime-api/src/lib.rs index ec9bf10fa6e3..19b2f5565a22 100644 --- a/polkadot/node/core/runtime-api/src/lib.rs +++ b/polkadot/node/core/runtime-api/src/lib.rs @@ -166,12 +166,11 @@ where .requests_cache .cache_key_ownership_proof((relay_parent, validator_id), key_ownership_proof), SubmitReportDisputeLost(_, _, _, _) => {}, - - StagingParaBackingState(relay_parent, para_id, constraints) => self + ParaBackingState(relay_parent, para_id, constraints) => self .requests_cache - .cache_staging_para_backing_state((relay_parent, para_id), constraints), - StagingAsyncBackingParams(relay_parent, params) => - self.requests_cache.cache_staging_async_backing_params(relay_parent, params), + .cache_para_backing_state((relay_parent, para_id), constraints), + AsyncBackingParams(relay_parent, params) => + self.requests_cache.cache_async_backing_params(relay_parent, params), } } @@ -297,13 +296,10 @@ where Request::SubmitReportDisputeLost(dispute_proof, key_ownership_proof, sender) }, ), - - Request::StagingParaBackingState(para, sender) => - query!(staging_para_backing_state(para), sender) - .map(|sender| Request::StagingParaBackingState(para, sender)), - Request::StagingAsyncBackingParams(sender) => - query!(staging_async_backing_params(), sender) - .map(|sender| Request::StagingAsyncBackingParams(sender)), + Request::ParaBackingState(para, sender) => query!(para_backing_state(para), sender) + .map(|sender| Request::ParaBackingState(para, sender)), + Request::AsyncBackingParams(sender) => query!(async_backing_params(), sender) + .map(|sender| Request::AsyncBackingParams(sender)), Request::MinimumBackingVotes(index, sender) => { if let Some(value) = self.requests_cache.minimum_backing_votes(index) { self.metrics.on_cached_request(); @@ -569,19 +565,18 @@ where ver = Request::MINIMUM_BACKING_VOTES_RUNTIME_REQUIREMENT, sender ), - - Request::StagingParaBackingState(para, sender) => { + Request::ParaBackingState(para, sender) => { query!( - StagingParaBackingState, - staging_para_backing_state(para), + ParaBackingState, + para_backing_state(para), ver = Request::STAGING_BACKING_STATE, sender ) }, - Request::StagingAsyncBackingParams(sender) => { + Request::AsyncBackingParams(sender) => { query!( - StagingAsyncBackingParams, - staging_async_backing_params(), + AsyncBackingParams, + async_backing_params(), ver = Request::STAGING_BACKING_STATE, sender ) diff --git a/polkadot/node/core/runtime-api/src/tests.rs b/polkadot/node/core/runtime-api/src/tests.rs index bb7c29689611..fb97139a8028 100644 --- a/polkadot/node/core/runtime-api/src/tests.rs +++ b/polkadot/node/core/runtime-api/src/tests.rs @@ -20,9 +20,9 @@ use polkadot_node_primitives::{BabeAllowedSlots, BabeEpoch, BabeEpochConfigurati use polkadot_node_subsystem::SpawnGlue; use polkadot_node_subsystem_test_helpers::make_subsystem_context; use polkadot_primitives::{ - vstaging, AuthorityDiscoveryId, BlockNumber, CandidateCommitments, CandidateEvent, - CandidateHash, CommittedCandidateReceipt, CoreState, DisputeState, ExecutorParams, - GroupRotationInfo, Id as ParaId, InboundDownwardMessage, InboundHrmpMessage, + async_backing, slashing, AuthorityDiscoveryId, BlockNumber, CandidateCommitments, + CandidateEvent, CandidateHash, CommittedCandidateReceipt, CoreState, DisputeState, + ExecutorParams, GroupRotationInfo, Id as ParaId, InboundDownwardMessage, InboundHrmpMessage, OccupiedCoreAssumption, PersistedValidationData, PvfCheckStatement, ScrapedOnChainVotes, SessionIndex, SessionInfo, Slot, ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, ValidatorSignature, @@ -213,7 +213,7 @@ impl RuntimeApiSubsystemClient for MockSubsystemClient { async fn unapplied_slashes( &self, _: Hash, - ) -> Result, ApiError> { + ) -> Result, ApiError> { todo!("Not required for tests") } @@ -221,15 +221,15 @@ impl RuntimeApiSubsystemClient for MockSubsystemClient { &self, _: Hash, _: ValidatorId, - ) -> Result, ApiError> { + ) -> Result, ApiError> { todo!("Not required for tests") } async fn submit_report_dispute_lost( &self, _: Hash, - _: vstaging::slashing::DisputeProof, - _: vstaging::slashing::OpaqueKeyOwnershipProof, + _: slashing::DisputeProof, + _: slashing::OpaqueKeyOwnershipProof, ) -> Result, ApiError> { todo!("Not required for tests") } @@ -250,18 +250,18 @@ impl RuntimeApiSubsystemClient for MockSubsystemClient { Ok(self.authorities.clone()) } - async fn staging_async_backing_params( + async fn async_backing_params( &self, _: Hash, - ) -> Result { + ) -> Result { todo!("Not required for tests") } - async fn staging_para_backing_state( + async fn para_backing_state( &self, _: Hash, _: ParaId, - ) -> Result, ApiError> { + ) -> Result, ApiError> { todo!("Not required for tests") } diff --git a/polkadot/node/network/approval-distribution/src/lib.rs b/polkadot/node/network/approval-distribution/src/lib.rs index 70c20437d125..f76826d7fdf4 100644 --- a/polkadot/node/network/approval-distribution/src/lib.rs +++ b/polkadot/node/network/approval-distribution/src/lib.rs @@ -26,8 +26,8 @@ use polkadot_node_network_protocol::{ self as net_protocol, grid_topology::{RandomRouting, RequiredRouting, SessionGridTopologies, SessionGridTopology}, peer_set::{ValidationVersion, MAX_NOTIFICATION_SIZE}, - v1 as protocol_v1, vstaging as protocol_vstaging, PeerId, UnifiedReputationChange as Rep, - Versioned, VersionedValidationProtocol, View, + v1 as protocol_v1, v2 as protocol_v2, PeerId, UnifiedReputationChange as Rep, Versioned, + VersionedValidationProtocol, View, }; use polkadot_node_primitives::approval::{ AssignmentCert, BlockApprovalMeta, IndirectAssignmentCert, IndirectSignedApprovalVote, @@ -602,9 +602,7 @@ impl State { { match msg { Versioned::V1(protocol_v1::ApprovalDistributionMessage::Assignments(assignments)) | - Versioned::VStaging(protocol_vstaging::ApprovalDistributionMessage::Assignments( - assignments, - )) => { + Versioned::V2(protocol_v2::ApprovalDistributionMessage::Assignments(assignments)) => { gum::trace!( target: LOG_TARGET, peer_id = %peer_id, @@ -644,9 +642,7 @@ impl State { } }, Versioned::V1(protocol_v1::ApprovalDistributionMessage::Approvals(approvals)) | - Versioned::VStaging(protocol_vstaging::ApprovalDistributionMessage::Approvals( - approvals, - )) => { + Versioned::V2(protocol_v2::ApprovalDistributionMessage::Approvals(approvals)) => { gum::trace!( target: LOG_TARGET, peer_id = %peer_id, @@ -1060,7 +1056,7 @@ impl State { route_random }; - let (v1_peers, vstaging_peers) = { + let (v1_peers, v2_peers) = { let peer_data = &self.peer_data; let peers = entry .known_by @@ -1090,9 +1086,9 @@ impl State { } let v1_peers = filter_peers_by_version(&peers, ValidationVersion::V1); - let vstaging_peers = filter_peers_by_version(&peers, ValidationVersion::VStaging); + let v2_peers = filter_peers_by_version(&peers, ValidationVersion::V2); - (v1_peers, vstaging_peers) + (v1_peers, v2_peers) }; if !v1_peers.is_empty() { @@ -1103,10 +1099,10 @@ impl State { .await; } - if !vstaging_peers.is_empty() { + if !v2_peers.is_empty() { ctx.send_message(NetworkBridgeTxMessage::SendValidationMessage( - vstaging_peers, - versioned_assignments_packet(ValidationVersion::VStaging, assignments.clone()), + v2_peers, + versioned_assignments_packet(ValidationVersion::V2, assignments.clone()), )) .await; } @@ -1395,7 +1391,7 @@ impl State { in_topology || knowledge.sent.contains(message_subject, MessageKind::Assignment) }; - let (v1_peers, vstaging_peers) = { + let (v1_peers, v2_peers) = { let peer_data = &self.peer_data; let peers = entry .known_by @@ -1425,9 +1421,9 @@ impl State { } let v1_peers = filter_peers_by_version(&peers, ValidationVersion::V1); - let vstaging_peers = filter_peers_by_version(&peers, ValidationVersion::VStaging); + let v2_peers = filter_peers_by_version(&peers, ValidationVersion::V2); - (v1_peers, vstaging_peers) + (v1_peers, v2_peers) }; let approvals = vec![vote]; @@ -1440,10 +1436,10 @@ impl State { .await; } - if !vstaging_peers.is_empty() { + if !v2_peers.is_empty() { ctx.send_message(NetworkBridgeTxMessage::SendValidationMessage( - vstaging_peers, - versioned_approvals_packet(ValidationVersion::VStaging, approvals), + v2_peers, + versioned_approvals_packet(ValidationVersion::V2, approvals), )) .await; } @@ -2017,9 +2013,9 @@ fn versioned_approvals_packet( Versioned::V1(protocol_v1::ValidationProtocol::ApprovalDistribution( protocol_v1::ApprovalDistributionMessage::Approvals(approvals), )), - ValidationVersion::VStaging => - Versioned::VStaging(protocol_vstaging::ValidationProtocol::ApprovalDistribution( - protocol_vstaging::ApprovalDistributionMessage::Approvals(approvals), + ValidationVersion::V2 => + Versioned::V2(protocol_v2::ValidationProtocol::ApprovalDistribution( + protocol_v2::ApprovalDistributionMessage::Approvals(approvals), )), } } @@ -2033,9 +2029,9 @@ fn versioned_assignments_packet( Versioned::V1(protocol_v1::ValidationProtocol::ApprovalDistribution( protocol_v1::ApprovalDistributionMessage::Assignments(assignments), )), - ValidationVersion::VStaging => - Versioned::VStaging(protocol_vstaging::ValidationProtocol::ApprovalDistribution( - protocol_vstaging::ApprovalDistributionMessage::Assignments(assignments), + ValidationVersion::V2 => + Versioned::V2(protocol_v2::ValidationProtocol::ApprovalDistribution( + protocol_v2::ApprovalDistributionMessage::Assignments(assignments), )), } } diff --git a/polkadot/node/network/approval-distribution/src/tests.rs b/polkadot/node/network/approval-distribution/src/tests.rs index 1e9ae7b62007..29c7d8aa45da 100644 --- a/polkadot/node/network/approval-distribution/src/tests.rs +++ b/polkadot/node/network/approval-distribution/src/tests.rs @@ -2388,9 +2388,9 @@ fn import_versioned_approval() { let _ = test_harness(state, |mut virtual_overseer| async move { let overseer = &mut virtual_overseer; // All peers are aware of relay parent. - setup_peer_with_view(overseer, &peer_a, ValidationVersion::VStaging, view![hash]).await; + setup_peer_with_view(overseer, &peer_a, ValidationVersion::V2, view![hash]).await; setup_peer_with_view(overseer, &peer_b, ValidationVersion::V1, view![hash]).await; - setup_peer_with_view(overseer, &peer_c, ValidationVersion::VStaging, view![hash]).await; + setup_peer_with_view(overseer, &peer_c, ValidationVersion::V2, view![hash]).await; // new block `hash_a` with 1 candidates let meta = BlockApprovalMeta { @@ -2431,8 +2431,8 @@ fn import_versioned_approval() { overseer_recv(overseer).await, AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::SendValidationMessage( peers, - Versioned::VStaging(protocol_vstaging::ValidationProtocol::ApprovalDistribution( - protocol_vstaging::ApprovalDistributionMessage::Assignments(assignments) + Versioned::V2(protocol_v2::ValidationProtocol::ApprovalDistribution( + protocol_v2::ApprovalDistributionMessage::Assignments(assignments) )) )) => { assert_eq!(peers.len(), 2); @@ -2450,8 +2450,8 @@ fn import_versioned_approval() { validator: validator_index, signature: dummy_signature(), }; - let msg = protocol_vstaging::ApprovalDistributionMessage::Approvals(vec![approval.clone()]); - send_message_from_peer(overseer, &peer_a, Versioned::VStaging(msg)).await; + let msg = protocol_v2::ApprovalDistributionMessage::Approvals(vec![approval.clone()]); + send_message_from_peer(overseer, &peer_a, Versioned::V2(msg)).await; assert_matches!( overseer_recv(overseer).await, @@ -2483,8 +2483,8 @@ fn import_versioned_approval() { overseer_recv(overseer).await, AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::SendValidationMessage( peers, - Versioned::VStaging(protocol_vstaging::ValidationProtocol::ApprovalDistribution( - protocol_vstaging::ApprovalDistributionMessage::Approvals(approvals) + Versioned::V2(protocol_v2::ValidationProtocol::ApprovalDistribution( + protocol_v2::ApprovalDistributionMessage::Approvals(approvals) )) )) => { assert_eq!(peers, vec![peer_c]); diff --git a/polkadot/node/network/bitfield-distribution/src/lib.rs b/polkadot/node/network/bitfield-distribution/src/lib.rs index c85d874bc4db..68e381ab6be5 100644 --- a/polkadot/node/network/bitfield-distribution/src/lib.rs +++ b/polkadot/node/network/bitfield-distribution/src/lib.rs @@ -31,8 +31,8 @@ use polkadot_node_network_protocol::{ GridNeighbors, RandomRouting, RequiredRouting, SessionBoundGridTopologyStorage, }, peer_set::{ProtocolVersion, ValidationVersion}, - v1 as protocol_v1, vstaging as protocol_vstaging, OurView, PeerId, - UnifiedReputationChange as Rep, Versioned, View, + v1 as protocol_v1, v2 as protocol_v2, OurView, PeerId, UnifiedReputationChange as Rep, + Versioned, View, }; use polkadot_node_subsystem::{ jaeger, messages::*, overseer, ActiveLeavesUpdate, FromOrchestra, OverseerSignal, PerLeafSpan, @@ -96,8 +96,8 @@ impl BitfieldGossipMessage { self.relay_parent, self.signed_availability.into(), )), - Some(ValidationVersion::VStaging) => - Versioned::VStaging(protocol_vstaging::BitfieldDistributionMessage::Bitfield( + Some(ValidationVersion::V2) => + Versioned::V2(protocol_v2::BitfieldDistributionMessage::Bitfield( self.relay_parent, self.signed_availability.into(), )), @@ -502,8 +502,7 @@ async fn relay_message( }; let v1_interested_peers = filter_by_version(&interested_peers, ValidationVersion::V1); - let vstaging_interested_peers = - filter_by_version(&interested_peers, ValidationVersion::VStaging); + let v2_interested_peers = filter_by_version(&interested_peers, ValidationVersion::V2); if !v1_interested_peers.is_empty() { ctx.send_message(NetworkBridgeTxMessage::SendValidationMessage( @@ -513,10 +512,10 @@ async fn relay_message( .await; } - if !vstaging_interested_peers.is_empty() { + if !v2_interested_peers.is_empty() { ctx.send_message(NetworkBridgeTxMessage::SendValidationMessage( - vstaging_interested_peers, - message.into_validation_protocol(ValidationVersion::VStaging.into()), + v2_interested_peers, + message.into_validation_protocol(ValidationVersion::V2.into()), )) .await } @@ -538,7 +537,7 @@ async fn process_incoming_peer_message( relay_parent, bitfield, )) => (relay_parent, bitfield), - Versioned::VStaging(protocol_vstaging::BitfieldDistributionMessage::Bitfield( + Versioned::V2(protocol_v2::BitfieldDistributionMessage::Bitfield( relay_parent, bitfield, )) => (relay_parent, bitfield), diff --git a/polkadot/node/network/bitfield-distribution/src/tests.rs b/polkadot/node/network/bitfield-distribution/src/tests.rs index d6795247e786..ba2434ea47d6 100644 --- a/polkadot/node/network/bitfield-distribution/src/tests.rs +++ b/polkadot/node/network/bitfield-distribution/src/tests.rs @@ -1111,9 +1111,9 @@ fn network_protocol_versioning() { let peer_c = PeerId::random(); let peers = [ - (peer_a, ValidationVersion::VStaging), + (peer_a, ValidationVersion::V2), (peer_b, ValidationVersion::V1), - (peer_c, ValidationVersion::VStaging), + (peer_c, ValidationVersion::V2), ]; // validator 0 key pair @@ -1173,7 +1173,7 @@ fn network_protocol_versioning() { &Default::default(), NetworkBridgeEvent::PeerMessage( peer_a, - msg.clone().into_network_message(ValidationVersion::VStaging.into()), + msg.clone().into_network_message(ValidationVersion::V2.into()), ), &mut rng, )); @@ -1201,14 +1201,14 @@ fn network_protocol_versioning() { } ); - // vstaging gossip + // v2 gossip assert_matches!( handle.recv().await, AllMessages::NetworkBridgeTx( NetworkBridgeTxMessage::SendValidationMessage(peers, send_msg), ) => { assert_eq!(peers, vec![peer_c]); - assert_eq!(send_msg, msg.clone().into_validation_protocol(ValidationVersion::VStaging.into())); + assert_eq!(send_msg, msg.clone().into_validation_protocol(ValidationVersion::V2.into())); } ); diff --git a/polkadot/node/network/bridge/src/rx/mod.rs b/polkadot/node/network/bridge/src/rx/mod.rs index 82c67061d9a5..7e86b46a7e03 100644 --- a/polkadot/node/network/bridge/src/rx/mod.rs +++ b/polkadot/node/network/bridge/src/rx/mod.rs @@ -33,7 +33,7 @@ use polkadot_node_network_protocol::{ CollationVersion, PeerSet, PeerSetProtocolNames, PerPeerSet, ProtocolVersion, ValidationVersion, }, - v1 as protocol_v1, vstaging as protocol_vstaging, ObservedRole, OurView, PeerId, + v1 as protocol_v1, v2 as protocol_v2, ObservedRole, OurView, PeerId, UnifiedReputationChange as Rep, View, }; @@ -262,13 +262,13 @@ where ), &metrics, ), - ValidationVersion::VStaging => send_message( + ValidationVersion::V2 => send_message( &mut network_service, vec![peer], PeerSet::Validation, version, &peerset_protocol_names, - WireMessage::::ViewUpdate( + WireMessage::::ViewUpdate( local_view, ), &metrics, @@ -304,13 +304,13 @@ where ), &metrics, ), - CollationVersion::VStaging => send_message( + CollationVersion::V2 => send_message( &mut network_service, vec![peer], PeerSet::Collation, version, &peerset_protocol_names, - WireMessage::::ViewUpdate( + WireMessage::::ViewUpdate( local_view, ), &metrics, @@ -465,9 +465,9 @@ where &metrics, ) } else if expected_versions[PeerSet::Validation] == - Some(ValidationVersion::VStaging.into()) + Some(ValidationVersion::V2.into()) { - handle_peer_messages::( + handle_peer_messages::( remote, PeerSet::Validation, &mut shared.0.lock().validation_peers, @@ -507,9 +507,9 @@ where &metrics, ) } else if expected_versions[PeerSet::Collation] == - Some(CollationVersion::VStaging.into()) + Some(CollationVersion::V2.into()) { - handle_peer_messages::( + handle_peer_messages::( remote, PeerSet::Collation, &mut shared.0.lock().collation_peers, @@ -813,10 +813,8 @@ fn update_our_view( let v1_validation_peers = filter_by_version(&validation_peers, ValidationVersion::V1.into()); let v1_collation_peers = filter_by_version(&collation_peers, CollationVersion::V1.into()); - let vstaging_validation_peers = - filter_by_version(&validation_peers, ValidationVersion::VStaging.into()); - let vstaging_collation_peers = - filter_by_version(&collation_peers, ValidationVersion::VStaging.into()); + let v2_validation_peers = filter_by_version(&validation_peers, ValidationVersion::V2.into()); + let v2_collation_peers = filter_by_version(&collation_peers, ValidationVersion::V2.into()); send_validation_message_v1( net, @@ -834,17 +832,17 @@ fn update_our_view( metrics, ); - send_validation_message_vstaging( + send_validation_message_v2( net, - vstaging_validation_peers, + v2_validation_peers, peerset_protocol_names, WireMessage::ViewUpdate(new_view.clone()), metrics, ); - send_collation_message_vstaging( + send_collation_message_v2( net, - vstaging_collation_peers, + v2_collation_peers, peerset_protocol_names, WireMessage::ViewUpdate(new_view), metrics, @@ -955,36 +953,36 @@ fn send_collation_message_v1( ); } -fn send_validation_message_vstaging( +fn send_validation_message_v2( net: &mut impl Network, peers: Vec, protocol_names: &PeerSetProtocolNames, - message: WireMessage, + message: WireMessage, metrics: &Metrics, ) { send_message( net, peers, PeerSet::Validation, - ValidationVersion::VStaging.into(), + ValidationVersion::V2.into(), protocol_names, message, metrics, ); } -fn send_collation_message_vstaging( +fn send_collation_message_v2( net: &mut impl Network, peers: Vec, protocol_names: &PeerSetProtocolNames, - message: WireMessage, + message: WireMessage, metrics: &Metrics, ) { send_message( net, peers, PeerSet::Collation, - CollationVersion::VStaging.into(), + CollationVersion::V2.into(), protocol_names, message, metrics, diff --git a/polkadot/node/network/bridge/src/rx/tests.rs b/polkadot/node/network/bridge/src/rx/tests.rs index 127f46e0fa37..7c69cce48391 100644 --- a/polkadot/node/network/bridge/src/rx/tests.rs +++ b/polkadot/node/network/bridge/src/rx/tests.rs @@ -1216,10 +1216,10 @@ fn network_protocol_versioning_view_update() { let peer_ids: Vec<_> = (0..4).map(|_| PeerId::random()).collect(); let peers = [ - (peer_ids[0], PeerSet::Validation, ValidationVersion::VStaging), + (peer_ids[0], PeerSet::Validation, ValidationVersion::V2), (peer_ids[1], PeerSet::Collation, ValidationVersion::V1), (peer_ids[2], PeerSet::Validation, ValidationVersion::V1), - (peer_ids[3], PeerSet::Collation, ValidationVersion::VStaging), + (peer_ids[3], PeerSet::Collation, ValidationVersion::V2), ]; let head = Hash::repeat_byte(1); @@ -1245,8 +1245,8 @@ fn network_protocol_versioning_view_update() { ValidationVersion::V1 => WireMessage::::ViewUpdate(view.clone()) .encode(), - ValidationVersion::VStaging => - WireMessage::::ViewUpdate(view.clone()) + ValidationVersion::V2 => + WireMessage::::ViewUpdate(view.clone()) .encode(), }; assert_network_actions_contains( @@ -1268,12 +1268,7 @@ fn network_protocol_versioning_subsystem_msg() { let peer = PeerId::random(); network_handle - .connect_peer( - peer, - ValidationVersion::VStaging, - PeerSet::Validation, - ObservedRole::Full, - ) + .connect_peer(peer, ValidationVersion::V2, PeerSet::Validation, ObservedRole::Full) .await; // bridge will inform about all connected peers. @@ -1282,7 +1277,7 @@ fn network_protocol_versioning_subsystem_msg() { NetworkBridgeEvent::PeerConnected( peer, ObservedRole::Full, - ValidationVersion::VStaging.into(), + ValidationVersion::V2.into(), None, ), &mut virtual_overseer, @@ -1297,9 +1292,9 @@ fn network_protocol_versioning_subsystem_msg() { } let approval_distribution_message = - protocol_vstaging::ApprovalDistributionMessage::Approvals(Vec::new()); + protocol_v2::ApprovalDistributionMessage::Approvals(Vec::new()); - let msg = protocol_vstaging::ValidationProtocol::ApprovalDistribution( + let msg = protocol_v2::ValidationProtocol::ApprovalDistribution( approval_distribution_message.clone(), ); @@ -1315,7 +1310,7 @@ fn network_protocol_versioning_subsystem_msg() { virtual_overseer.recv().await, AllMessages::ApprovalDistribution( ApprovalDistributionMessage::NetworkBridgeUpdate( - NetworkBridgeEvent::PeerMessage(p, Versioned::VStaging(m)) + NetworkBridgeEvent::PeerMessage(p, Versioned::V2(m)) ) ) => { assert_eq!(p, peer); @@ -1330,10 +1325,10 @@ fn network_protocol_versioning_subsystem_msg() { signature: sp_core::crypto::UncheckedFrom::unchecked_from([1u8; 64]), }; let statement_distribution_message = - protocol_vstaging::StatementDistributionMessage::V1Compatibility( + protocol_v2::StatementDistributionMessage::V1Compatibility( protocol_v1::StatementDistributionMessage::LargeStatement(metadata), ); - let msg = protocol_vstaging::ValidationProtocol::StatementDistribution( + let msg = protocol_v2::ValidationProtocol::StatementDistribution( statement_distribution_message.clone(), ); @@ -1349,7 +1344,7 @@ fn network_protocol_versioning_subsystem_msg() { virtual_overseer.recv().await, AllMessages::StatementDistribution( StatementDistributionMessage::NetworkBridgeUpdate( - NetworkBridgeEvent::PeerMessage(p, Versioned::VStaging(m)) + NetworkBridgeEvent::PeerMessage(p, Versioned::V2(m)) ) ) => { assert_eq!(p, peer); diff --git a/polkadot/node/network/bridge/src/tx/mod.rs b/polkadot/node/network/bridge/src/tx/mod.rs index 7fa1149593ca..f15635f1f41c 100644 --- a/polkadot/node/network/bridge/src/tx/mod.rs +++ b/polkadot/node/network/bridge/src/tx/mod.rs @@ -20,7 +20,7 @@ use super::*; use polkadot_node_network_protocol::{ peer_set::{CollationVersion, PeerSet, PeerSetProtocolNames, ValidationVersion}, request_response::ReqProtocolNames, - v1 as protocol_v1, vstaging as protocol_vstaging, PeerId, Versioned, + v1 as protocol_v1, v2 as protocol_v2, PeerId, Versioned, }; use polkadot_node_subsystem::{ @@ -198,7 +198,7 @@ where WireMessage::ProtocolMessage(msg), &metrics, ), - Versioned::VStaging(msg) => send_validation_message_vstaging( + Versioned::V2(msg) => send_validation_message_v2( &mut network_service, peers, peerset_protocol_names, @@ -223,7 +223,7 @@ where WireMessage::ProtocolMessage(msg), &metrics, ), - Versioned::VStaging(msg) => send_validation_message_vstaging( + Versioned::V2(msg) => send_validation_message_v2( &mut network_service, peers, peerset_protocol_names, @@ -248,7 +248,7 @@ where WireMessage::ProtocolMessage(msg), &metrics, ), - Versioned::VStaging(msg) => send_collation_message_vstaging( + Versioned::V2(msg) => send_collation_message_v2( &mut network_service, peers, peerset_protocol_names, @@ -273,7 +273,7 @@ where WireMessage::ProtocolMessage(msg), &metrics, ), - Versioned::VStaging(msg) => send_collation_message_vstaging( + Versioned::V2(msg) => send_collation_message_v2( &mut network_service, peers, peerset_protocol_names, @@ -296,13 +296,11 @@ where Requests::AvailableDataFetchingV1(_) => metrics.on_message("available_data_fetching_v1"), Requests::CollationFetchingV1(_) => metrics.on_message("collation_fetching_v1"), - Requests::CollationFetchingVStaging(_) => - metrics.on_message("collation_fetching_vstaging"), + Requests::CollationFetchingV2(_) => metrics.on_message("collation_fetching_v2"), Requests::PoVFetchingV1(_) => metrics.on_message("pov_fetching_v1"), Requests::DisputeSendingV1(_) => metrics.on_message("dispute_sending_v1"), Requests::StatementFetchingV1(_) => metrics.on_message("statement_fetching_v1"), - Requests::AttestedCandidateVStaging(_) => - metrics.on_message("attested_candidate_vstaging"), + Requests::AttestedCandidateV2(_) => metrics.on_message("attested_candidate_v2"), } network_service @@ -425,36 +423,36 @@ fn send_collation_message_v1( ); } -fn send_validation_message_vstaging( +fn send_validation_message_v2( net: &mut impl Network, peers: Vec, protocol_names: &PeerSetProtocolNames, - message: WireMessage, + message: WireMessage, metrics: &Metrics, ) { send_message( net, peers, PeerSet::Validation, - ValidationVersion::VStaging.into(), + ValidationVersion::V2.into(), protocol_names, message, metrics, ); } -fn send_collation_message_vstaging( +fn send_collation_message_v2( net: &mut impl Network, peers: Vec, protocol_names: &PeerSetProtocolNames, - message: WireMessage, + message: WireMessage, metrics: &Metrics, ) { send_message( net, peers, PeerSet::Collation, - CollationVersion::VStaging.into(), + CollationVersion::V2.into(), protocol_names, message, metrics, diff --git a/polkadot/node/network/bridge/src/tx/tests.rs b/polkadot/node/network/bridge/src/tx/tests.rs index 21cd134c54f2..48287f8b74c9 100644 --- a/polkadot/node/network/bridge/src/tx/tests.rs +++ b/polkadot/node/network/bridge/src/tx/tests.rs @@ -341,10 +341,10 @@ fn network_protocol_versioning_send() { let peer_ids: Vec<_> = (0..4).map(|_| PeerId::random()).collect(); let peers = [ - (peer_ids[0], PeerSet::Validation, ValidationVersion::VStaging), + (peer_ids[0], PeerSet::Validation, ValidationVersion::V2), (peer_ids[1], PeerSet::Collation, ValidationVersion::V1), (peer_ids[2], PeerSet::Validation, ValidationVersion::V1), - (peer_ids[3], PeerSet::Collation, ValidationVersion::VStaging), + (peer_ids[3], PeerSet::Collation, ValidationVersion::V2), ]; for &(peer_id, peer_set, version) in &peers { @@ -359,9 +359,9 @@ fn network_protocol_versioning_send() { { let approval_distribution_message = - protocol_vstaging::ApprovalDistributionMessage::Approvals(Vec::new()); + protocol_v2::ApprovalDistributionMessage::Approvals(Vec::new()); - let msg = protocol_vstaging::ValidationProtocol::ApprovalDistribution( + let msg = protocol_v2::ValidationProtocol::ApprovalDistribution( approval_distribution_message.clone(), ); @@ -372,7 +372,7 @@ fn network_protocol_versioning_send() { .send(FromOrchestra::Communication { msg: NetworkBridgeTxMessage::SendValidationMessage( receivers.clone(), - Versioned::VStaging(msg.clone()), + Versioned::V2(msg.clone()), ), }) .timeout(TIMEOUT) @@ -398,15 +398,14 @@ fn network_protocol_versioning_send() { // send a collation protocol message. { - let collator_protocol_message = protocol_vstaging::CollatorProtocolMessage::Declare( + let collator_protocol_message = protocol_v2::CollatorProtocolMessage::Declare( Sr25519Keyring::Alice.public().into(), 0_u32.into(), dummy_collator_signature(), ); - let msg = protocol_vstaging::CollationProtocol::CollatorProtocol( - collator_protocol_message.clone(), - ); + let msg = + protocol_v2::CollationProtocol::CollatorProtocol(collator_protocol_message.clone()); let receivers = vec![peer_ids[1], peer_ids[2]]; @@ -414,7 +413,7 @@ fn network_protocol_versioning_send() { .send(FromOrchestra::Communication { msg: NetworkBridgeTxMessage::SendCollationMessages(vec![( receivers.clone(), - Versioned::VStaging(msg.clone()), + Versioned::V2(msg.clone()), )]), }) .await; diff --git a/polkadot/node/network/collator-protocol/src/collator_side/collation.rs b/polkadot/node/network/collator-protocol/src/collator_side/collation.rs index 627c38b776f7..53f947142d10 100644 --- a/polkadot/node/network/collator-protocol/src/collator_side/collation.rs +++ b/polkadot/node/network/collator-protocol/src/collator_side/collation.rs @@ -22,8 +22,7 @@ use futures::{future::BoxFuture, stream::FuturesUnordered}; use polkadot_node_network_protocol::{ request_response::{ - incoming::OutgoingResponse, v1 as protocol_v1, vstaging as protocol_vstaging, - IncomingRequest, + incoming::OutgoingResponse, v1 as protocol_v1, v2 as protocol_v2, IncomingRequest, }, PeerId, }; @@ -89,7 +88,7 @@ pub struct WaitingCollationFetches { /// Backwards-compatible wrapper for incoming collations requests. pub enum VersionedCollationRequest { V1(IncomingRequest), - VStaging(IncomingRequest), + V2(IncomingRequest), } impl From> for VersionedCollationRequest { @@ -98,11 +97,9 @@ impl From> for VersionedC } } -impl From> - for VersionedCollationRequest -{ - fn from(req: IncomingRequest) -> Self { - Self::VStaging(req) +impl From> for VersionedCollationRequest { + fn from(req: IncomingRequest) -> Self { + Self::V2(req) } } @@ -111,7 +108,7 @@ impl VersionedCollationRequest { pub fn para_id(&self) -> ParaId { match self { VersionedCollationRequest::V1(req) => req.payload.para_id, - VersionedCollationRequest::VStaging(req) => req.payload.para_id, + VersionedCollationRequest::V2(req) => req.payload.para_id, } } @@ -119,7 +116,7 @@ impl VersionedCollationRequest { pub fn relay_parent(&self) -> Hash { match self { VersionedCollationRequest::V1(req) => req.payload.relay_parent, - VersionedCollationRequest::VStaging(req) => req.payload.relay_parent, + VersionedCollationRequest::V2(req) => req.payload.relay_parent, } } @@ -127,7 +124,7 @@ impl VersionedCollationRequest { pub fn peer_id(&self) -> PeerId { match self { VersionedCollationRequest::V1(req) => req.peer, - VersionedCollationRequest::VStaging(req) => req.peer, + VersionedCollationRequest::V2(req) => req.peer, } } @@ -138,7 +135,7 @@ impl VersionedCollationRequest { ) -> Result<(), ()> { match self { VersionedCollationRequest::V1(req) => req.send_outgoing_response(response), - VersionedCollationRequest::VStaging(req) => req.send_outgoing_response(response), + VersionedCollationRequest::V2(req) => req.send_outgoing_response(response), } } } diff --git a/polkadot/node/network/collator-protocol/src/collator_side/mod.rs b/polkadot/node/network/collator-protocol/src/collator_side/mod.rs index ad2ab99568c8..304cabbaac80 100644 --- a/polkadot/node/network/collator-protocol/src/collator_side/mod.rs +++ b/polkadot/node/network/collator-protocol/src/collator_side/mod.rs @@ -31,10 +31,10 @@ use polkadot_node_network_protocol::{ peer_set::{CollationVersion, PeerSet}, request_response::{ incoming::{self, OutgoingResponse}, - v1 as request_v1, vstaging as request_vstaging, IncomingRequestReceiver, + v1 as request_v1, v2 as request_v2, IncomingRequestReceiver, }, - v1 as protocol_v1, vstaging as protocol_vstaging, OurView, PeerId, - UnifiedReputationChange as Rep, Versioned, View, + v1 as protocol_v1, v2 as protocol_v2, OurView, PeerId, UnifiedReputationChange as Rep, + Versioned, View, }; use polkadot_node_primitives::{CollationSecondedSignal, PoV, Statement}; use polkadot_node_subsystem::{ @@ -577,7 +577,7 @@ async fn determine_our_validators( fn declare_message( state: &mut State, version: CollationVersion, -) -> Option> { +) -> Option> { let para_id = state.collating_on?; Some(match version { CollationVersion::V1 => { @@ -590,17 +590,15 @@ fn declare_message( ); Versioned::V1(protocol_v1::CollationProtocol::CollatorProtocol(wire_message)) }, - CollationVersion::VStaging => { + CollationVersion::V2 => { let declare_signature_payload = - protocol_vstaging::declare_signature_payload(&state.local_peer_id); - let wire_message = protocol_vstaging::CollatorProtocolMessage::Declare( + protocol_v2::declare_signature_payload(&state.local_peer_id); + let wire_message = protocol_v2::CollatorProtocolMessage::Declare( state.collator_pair.public(), para_id, state.collator_pair.sign(&declare_signature_payload), ); - Versioned::VStaging(protocol_vstaging::CollationProtocol::CollatorProtocol( - wire_message, - )) + Versioned::V2(protocol_v2::CollationProtocol::CollatorProtocol(wire_message)) }, }) } @@ -706,15 +704,13 @@ async fn advertise_collation( collation.status.advance_to_advertised(); let collation_message = match protocol_version { - CollationVersion::VStaging => { - let wire_message = protocol_vstaging::CollatorProtocolMessage::AdvertiseCollation { + CollationVersion::V2 => { + let wire_message = protocol_v2::CollatorProtocolMessage::AdvertiseCollation { relay_parent, candidate_hash: *candidate_hash, parent_head_data_hash: collation.parent_head_data_hash, }; - Versioned::VStaging(protocol_vstaging::CollationProtocol::CollatorProtocol( - wire_message, - )) + Versioned::V2(protocol_v2::CollationProtocol::CollatorProtocol(wire_message)) }, CollationVersion::V1 => { let wire_message = @@ -837,7 +833,7 @@ async fn send_collation( let candidate_hash = receipt.hash(); // The response payload is the same for both versions of protocol - // and doesn't have vstaging alias for simplicity. + // and doesn't have v2 alias for simplicity. let response = OutgoingResponse { result: Ok(request_v1::CollationFetchingResponse::Collation(receipt, pov)), reputation_changes: Vec::new(), @@ -868,16 +864,13 @@ async fn handle_incoming_peer_message( runtime: &mut RuntimeInfo, state: &mut State, origin: PeerId, - msg: Versioned< - protocol_v1::CollatorProtocolMessage, - protocol_vstaging::CollatorProtocolMessage, - >, + msg: Versioned, ) -> Result<()> { use protocol_v1::CollatorProtocolMessage as V1; - use protocol_vstaging::CollatorProtocolMessage as VStaging; + use protocol_v2::CollatorProtocolMessage as V2; match msg { - Versioned::V1(V1::Declare(..)) | Versioned::VStaging(VStaging::Declare(..)) => { + Versioned::V1(V1::Declare(..)) | Versioned::V2(V2::Declare(..)) => { gum::trace!( target: LOG_TARGET, ?origin, @@ -888,8 +881,7 @@ async fn handle_incoming_peer_message( ctx.send_message(NetworkBridgeTxMessage::DisconnectPeer(origin, PeerSet::Collation)) .await; }, - Versioned::V1(V1::AdvertiseCollation(_)) | - Versioned::VStaging(VStaging::AdvertiseCollation { .. }) => { + Versioned::V1(V1::AdvertiseCollation(_)) | Versioned::V2(V2::AdvertiseCollation { .. }) => { gum::trace!( target: LOG_TARGET, ?origin, @@ -904,7 +896,7 @@ async fn handle_incoming_peer_message( .await; }, Versioned::V1(V1::CollationSeconded(relay_parent, statement)) | - Versioned::VStaging(VStaging::CollationSeconded(relay_parent, statement)) => { + Versioned::V2(V2::CollationSeconded(relay_parent, statement)) => { if !matches!(statement.unchecked_payload(), Statement::Seconded(_)) { gum::warn!( target: LOG_TARGET, @@ -1006,7 +998,7 @@ async fn handle_incoming_request( let collation = match &req { VersionedCollationRequest::V1(_) if !mode.is_enabled() => per_relay_parent.collations.values_mut().next(), - VersionedCollationRequest::VStaging(req) => + VersionedCollationRequest::V2(req) => per_relay_parent.collations.get_mut(&req.payload.candidate_hash), _ => { gum::warn!( @@ -1322,7 +1314,7 @@ pub(crate) async fn run( local_peer_id: PeerId, collator_pair: CollatorPair, req_v1_receiver: IncomingRequestReceiver, - req_v2_receiver: IncomingRequestReceiver, + req_v2_receiver: IncomingRequestReceiver, metrics: Metrics, ) -> std::result::Result<(), FatalError> { run_inner( @@ -1344,7 +1336,7 @@ async fn run_inner( local_peer_id: PeerId, collator_pair: CollatorPair, mut req_v1_receiver: IncomingRequestReceiver, - mut req_v2_receiver: IncomingRequestReceiver, + mut req_v2_receiver: IncomingRequestReceiver, metrics: Metrics, reputation: ReputationAggregator, reputation_interval: Duration, @@ -1425,7 +1417,7 @@ async fn run_inner( (ProspectiveParachainsMode::Disabled, VersionedCollationRequest::V1(_)) => { per_relay_parent.collations.values().next() }, - (ProspectiveParachainsMode::Enabled { .. }, VersionedCollationRequest::VStaging(req)) => { + (ProspectiveParachainsMode::Enabled { .. }, VersionedCollationRequest::V2(req)) => { per_relay_parent.collations.get(&req.payload.candidate_hash) }, _ => { @@ -1476,7 +1468,7 @@ async fn run_inner( log_error( handle_incoming_request(&mut ctx, &mut state, request).await, - "Handling incoming collation fetch request VStaging" + "Handling incoming collation fetch request V2" )?; } } diff --git a/polkadot/node/network/collator-protocol/src/collator_side/tests/mod.rs b/polkadot/node/network/collator-protocol/src/collator_side/tests/mod.rs index b452c84c2cd8..7dd2287dab68 100644 --- a/polkadot/node/network/collator-protocol/src/collator_side/tests/mod.rs +++ b/polkadot/node/network/collator-protocol/src/collator_side/tests/mod.rs @@ -198,7 +198,7 @@ impl TestState { overseer_recv(virtual_overseer).await, AllMessages::RuntimeApi(RuntimeApiMessage::Request( relay_parent, - RuntimeApiRequest::StagingAsyncBackingParams(tx) + RuntimeApiRequest::AsyncBackingParams(tx) )) => { assert_eq!(relay_parent, self.relay_parent); tx.send(Err(ASYNC_BACKING_DISABLED_ERROR)).unwrap(); @@ -212,7 +212,7 @@ type VirtualOverseer = test_helpers::TestSubsystemContextHandle>( @@ -236,7 +236,7 @@ fn test_harness>( let (collation_req_receiver, req_v1_cfg) = IncomingRequest::get_config_receiver(&req_protocol_names); - let (collation_req_vstaging_receiver, req_vstaging_cfg) = + let (collation_req_v2_receiver, req_v2_cfg) = IncomingRequest::get_config_receiver(&req_protocol_names); let subsystem = async { run_inner( @@ -244,7 +244,7 @@ fn test_harness>( local_peer_id, collator_pair, collation_req_receiver, - collation_req_vstaging_receiver, + collation_req_v2_receiver, Default::default(), reputation, REPUTATION_CHANGE_TEST_INTERVAL, @@ -253,7 +253,7 @@ fn test_harness>( .unwrap(); }; - let test_fut = test(TestHarness { virtual_overseer, req_v1_cfg, req_vstaging_cfg }); + let test_fut = test(TestHarness { virtual_overseer, req_v1_cfg, req_v2_cfg }); futures::pin_mut!(test_fut); futures::pin_mut!(subsystem); @@ -330,7 +330,7 @@ async fn setup_system(virtual_overseer: &mut VirtualOverseer, test_state: &TestS overseer_recv(virtual_overseer).await, AllMessages::RuntimeApi(RuntimeApiMessage::Request( relay_parent, - RuntimeApiRequest::StagingAsyncBackingParams(tx) + RuntimeApiRequest::AsyncBackingParams(tx) )) => { assert_eq!(relay_parent, test_state.relay_parent); tx.send(Err(ASYNC_BACKING_DISABLED_ERROR)).unwrap(); @@ -545,7 +545,7 @@ async fn expect_declare_msg( /// Check that the next received message is a collation advertisement message. /// -/// Expects vstaging message if `expected_candidate_hashes` is `Some`, v1 otherwise. +/// Expects v2 message if `expected_candidate_hashes` is `Some`, v1 otherwise. async fn expect_advertise_collation_msg( virtual_overseer: &mut VirtualOverseer, peer: &PeerId, @@ -579,13 +579,13 @@ async fn expect_advertise_collation_msg( }, ( Some(candidate_hashes), - Versioned::VStaging(protocol_vstaging::CollationProtocol::CollatorProtocol( + Versioned::V2(protocol_v2::CollationProtocol::CollatorProtocol( wire_message, )), ) => { assert_matches!( wire_message, - protocol_vstaging::CollatorProtocolMessage::AdvertiseCollation { + protocol_v2::CollatorProtocolMessage::AdvertiseCollation { relay_parent, candidate_hash, .. @@ -634,7 +634,7 @@ fn advertise_and_send_collation() { |test_harness| async move { let mut virtual_overseer = test_harness.virtual_overseer; let mut req_v1_cfg = test_harness.req_v1_cfg; - let req_vstaging_cfg = test_harness.req_vstaging_cfg; + let req_v2_cfg = test_harness.req_v2_cfg; setup_system(&mut virtual_overseer, &test_state).await; @@ -789,7 +789,7 @@ fn advertise_and_send_collation() { None, ) .await; - TestHarness { virtual_overseer, req_v1_cfg, req_vstaging_cfg } + TestHarness { virtual_overseer, req_v1_cfg, req_v2_cfg } }, ); } @@ -807,7 +807,7 @@ fn delay_reputation_change() { |test_harness| async move { let mut virtual_overseer = test_harness.virtual_overseer; let mut req_v1_cfg = test_harness.req_v1_cfg; - let req_vstaging_cfg = test_harness.req_vstaging_cfg; + let req_v2_cfg = test_harness.req_v2_cfg; setup_system(&mut virtual_overseer, &test_state).await; @@ -903,15 +903,15 @@ fn delay_reputation_change() { ); } - TestHarness { virtual_overseer, req_v1_cfg, req_vstaging_cfg } + TestHarness { virtual_overseer, req_v1_cfg, req_v2_cfg } }, ); } -/// Tests that collator side works with vstaging network protocol +/// Tests that collator side works with v2 network protocol /// before async backing is enabled. #[test] -fn advertise_collation_vstaging_protocol() { +fn advertise_collation_v2_protocol() { let test_state = TestState::default(); let local_peer_id = test_state.local_peer_id; let collator_pair = test_state.collator_pair.clone(); @@ -941,21 +941,16 @@ fn advertise_collation_vstaging_protocol() { Some(validators[0].clone()), ) .await; - // The rest with vstaging. + // The rest with v2. for (val, peer) in validators.iter().zip(peer_ids.iter()).skip(1) { - connect_peer( - virtual_overseer, - *peer, - CollationVersion::VStaging, - Some(val.clone()), - ) - .await; + connect_peer(virtual_overseer, *peer, CollationVersion::V2, Some(val.clone())) + .await; } // Declare messages. expect_declare_msg(virtual_overseer, &test_state, &peer_ids[0]).await; for peer_id in peer_ids.iter().skip(1) { - prospective_parachains::expect_declare_msg_vstaging( + prospective_parachains::expect_declare_msg_v2( virtual_overseer, &test_state, &peer_id, @@ -981,7 +976,7 @@ fn advertise_collation_vstaging_protocol() { virtual_overseer, peer_id, test_state.relay_parent, - Some(vec![candidate.hash()]), // This is `Some`, advertisement is vstaging. + Some(vec![candidate.hash()]), // This is `Some`, advertisement is v2. ) .await; } @@ -1405,7 +1400,7 @@ fn connect_to_buffered_groups() { |test_harness| async move { let mut virtual_overseer = test_harness.virtual_overseer; let mut req_cfg = test_harness.req_v1_cfg; - let req_vstaging_cfg = test_harness.req_vstaging_cfg; + let req_v2_cfg = test_harness.req_v2_cfg; setup_system(&mut virtual_overseer, &test_state).await; @@ -1510,7 +1505,7 @@ fn connect_to_buffered_groups() { } ); - TestHarness { virtual_overseer, req_v1_cfg: req_cfg, req_vstaging_cfg } + TestHarness { virtual_overseer, req_v1_cfg: req_cfg, req_v2_cfg } }, ); } diff --git a/polkadot/node/network/collator-protocol/src/collator_side/tests/prospective_parachains.rs b/polkadot/node/network/collator-protocol/src/collator_side/tests/prospective_parachains.rs index bd55c35852fa..fd9d7a746ebe 100644 --- a/polkadot/node/network/collator-protocol/src/collator_side/tests/prospective_parachains.rs +++ b/polkadot/node/network/collator-protocol/src/collator_side/tests/prospective_parachains.rs @@ -19,10 +19,10 @@ use super::*; use polkadot_node_subsystem::messages::{ChainApiMessage, ProspectiveParachainsMessage}; -use polkadot_primitives::{vstaging as vstaging_primitives, Header, OccupiedCore}; +use polkadot_primitives::{AsyncBackingParams, Header, OccupiedCore}; -const ASYNC_BACKING_PARAMETERS: vstaging_primitives::AsyncBackingParams = - vstaging_primitives::AsyncBackingParams { max_candidate_depth: 4, allowed_ancestry_len: 3 }; +const ASYNC_BACKING_PARAMETERS: AsyncBackingParams = + AsyncBackingParams { max_candidate_depth: 4, allowed_ancestry_len: 3 }; fn get_parent_hash(hash: Hash) -> Hash { Hash::from_low_u64_be(hash.to_low_u64_be() + 1) @@ -52,7 +52,7 @@ async fn update_view( overseer_recv(virtual_overseer).await, AllMessages::RuntimeApi(RuntimeApiMessage::Request( parent, - RuntimeApiRequest::StagingAsyncBackingParams(tx), + RuntimeApiRequest::AsyncBackingParams(tx), )) => { tx.send(Ok(ASYNC_BACKING_PARAMETERS)).unwrap(); (parent, new_view.get(&parent).copied().expect("Unknown parent requested")) @@ -124,7 +124,7 @@ async fn update_view( } /// Check that the next received message is a `Declare` message. -pub(super) async fn expect_declare_msg_vstaging( +pub(super) async fn expect_declare_msg_v2( virtual_overseer: &mut VirtualOverseer, test_state: &TestState, peer: &PeerId, @@ -133,20 +133,20 @@ pub(super) async fn expect_declare_msg_vstaging( overseer_recv(virtual_overseer).await, AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::SendCollationMessage( to, - Versioned::VStaging(protocol_vstaging::CollationProtocol::CollatorProtocol( + Versioned::V2(protocol_v2::CollationProtocol::CollatorProtocol( wire_message, )), )) => { assert_eq!(to[0], *peer); assert_matches!( wire_message, - protocol_vstaging::CollatorProtocolMessage::Declare( + protocol_v2::CollatorProtocolMessage::Declare( collator_id, para_id, signature, ) => { assert!(signature.verify( - &*protocol_vstaging::declare_signature_payload(&test_state.local_peer_id), + &*protocol_v2::declare_signature_payload(&test_state.local_peer_id), &collator_id), ); assert_eq!(collator_id, test_state.collator_pair.public()); @@ -203,13 +203,12 @@ fn distribute_collation_from_implicit_view() { .into_iter() .zip(validator_peer_ids.clone()) { - connect_peer(virtual_overseer, peer, CollationVersion::VStaging, Some(val.clone())) - .await; + connect_peer(virtual_overseer, peer, CollationVersion::V2, Some(val.clone())).await; } // Collator declared itself to each peer. for peer_id in &validator_peer_ids { - expect_declare_msg_vstaging(virtual_overseer, &test_state, peer_id).await; + expect_declare_msg_v2(virtual_overseer, &test_state, peer_id).await; } let pov = PoV { block_data: BlockData(vec![1, 2, 3]) }; @@ -386,7 +385,7 @@ fn advertise_and_send_collation_by_hash() { |test_harness| async move { let mut virtual_overseer = test_harness.virtual_overseer; let req_v1_cfg = test_harness.req_v1_cfg; - let mut req_vstaging_cfg = test_harness.req_vstaging_cfg; + let mut req_v2_cfg = test_harness.req_v2_cfg; let head_a = Hash::from_low_u64_be(128); let head_a_num: u32 = 64; @@ -435,11 +434,11 @@ fn advertise_and_send_collation_by_hash() { connect_peer( &mut virtual_overseer, peer, - CollationVersion::VStaging, + CollationVersion::V2, Some(validator_id.clone()), ) .await; - expect_declare_msg_vstaging(&mut virtual_overseer, &test_state, &peer).await; + expect_declare_msg_v2(&mut virtual_overseer, &test_state, &peer).await; // Head `b` is not a leaf, but both advertisements are still relevant. send_peer_view_change(&mut virtual_overseer, &peer, vec![head_b]).await; @@ -449,13 +448,13 @@ fn advertise_and_send_collation_by_hash() { for (candidate, pov_block) in candidates { let (pending_response, rx) = oneshot::channel(); - req_vstaging_cfg + req_v2_cfg .inbound_queue .as_mut() .unwrap() .send(RawIncomingRequest { peer, - payload: request_vstaging::CollationFetchingRequest { + payload: request_v2::CollationFetchingRequest { relay_parent: head_b, para_id: test_state.para_id, candidate_hash: candidate.hash(), @@ -469,7 +468,7 @@ fn advertise_and_send_collation_by_hash() { assert_matches!( rx.await, Ok(full_response) => { - // Response is the same for vstaging. + // Response is the same for v2. let request_v1::CollationFetchingResponse::Collation(receipt, pov): request_v1::CollationFetchingResponse = request_v1::CollationFetchingResponse::decode( &mut full_response.result @@ -482,7 +481,7 @@ fn advertise_and_send_collation_by_hash() { ); } - TestHarness { virtual_overseer, req_v1_cfg, req_vstaging_cfg } + TestHarness { virtual_overseer, req_v1_cfg, req_v2_cfg } }, ) } @@ -552,11 +551,11 @@ fn advertise_core_occupied() { connect_peer( virtual_overseer, peer_ids[0], - CollationVersion::VStaging, + CollationVersion::V2, Some(validators[0].clone()), ) .await; - expect_declare_msg_vstaging(virtual_overseer, &test_state, &peer_ids[0]).await; + expect_declare_msg_v2(virtual_overseer, &test_state, &peer_ids[0]).await; // Peer is aware of the leaf. send_peer_view_change(virtual_overseer, &peer_ids[0], vec![head_a]).await; diff --git a/polkadot/node/network/collator-protocol/src/lib.rs b/polkadot/node/network/collator-protocol/src/lib.rs index 62c033954f75..1edc67664172 100644 --- a/polkadot/node/network/collator-protocol/src/lib.rs +++ b/polkadot/node/network/collator-protocol/src/lib.rs @@ -32,7 +32,7 @@ use polkadot_node_subsystem_util::reputation::ReputationAggregator; use sp_keystore::KeystorePtr; use polkadot_node_network_protocol::{ - request_response::{v1 as request_v1, vstaging as protocol_vstaging, IncomingRequestReceiver}, + request_response::{v1 as request_v1, v2 as protocol_v2, IncomingRequestReceiver}, PeerId, UnifiedReputationChange as Rep, }; use polkadot_primitives::CollatorPair; @@ -83,9 +83,8 @@ pub enum ProtocolSide { collator_pair: CollatorPair, /// Receiver for v1 collation fetching requests. request_receiver_v1: IncomingRequestReceiver, - /// Receiver for vstaging collation fetching requests. - request_receiver_vstaging: - IncomingRequestReceiver, + /// Receiver for v2 collation fetching requests. + request_receiver_v2: IncomingRequestReceiver, /// Metrics. metrics: collator_side::Metrics, }, @@ -121,14 +120,14 @@ impl CollatorProtocolSubsystem { peer_id, collator_pair, request_receiver_v1, - request_receiver_vstaging, + request_receiver_v2, metrics, } => collator_side::run( ctx, peer_id, collator_pair, request_receiver_v1, - request_receiver_vstaging, + request_receiver_v2, metrics, ) .map_err(|e| SubsystemError::with_origin("collator-protocol", e)) diff --git a/polkadot/node/network/collator-protocol/src/validator_side/collation.rs b/polkadot/node/network/collator-protocol/src/validator_side/collation.rs index 4c92780f2da9..a53e0028b9e7 100644 --- a/polkadot/node/network/collator-protocol/src/validator_side/collation.rs +++ b/polkadot/node/network/collator-protocol/src/validator_side/collation.rs @@ -119,7 +119,7 @@ impl PendingCollation { } } -/// vstaging advertisement that was rejected by the backing +/// v2 advertisement that was rejected by the backing /// subsystem. Validator may fetch it later if its fragment /// membership gets recognized before relay parent goes out of view. #[derive(Debug, Clone)] diff --git a/polkadot/node/network/collator-protocol/src/validator_side/mod.rs b/polkadot/node/network/collator-protocol/src/validator_side/mod.rs index e8cf769d2e5f..fcb408d54b1b 100644 --- a/polkadot/node/network/collator-protocol/src/validator_side/mod.rs +++ b/polkadot/node/network/collator-protocol/src/validator_side/mod.rs @@ -34,10 +34,10 @@ use polkadot_node_network_protocol::{ peer_set::{CollationVersion, PeerSet}, request_response::{ outgoing::{Recipient, RequestError}, - v1 as request_v1, vstaging as request_vstaging, OutgoingRequest, Requests, + v1 as request_v1, v2 as request_v2, OutgoingRequest, Requests, }, - v1 as protocol_v1, vstaging as protocol_vstaging, OurView, PeerId, - UnifiedReputationChange as Rep, Versioned, View, + v1 as protocol_v1, v2 as protocol_v2, OurView, PeerId, UnifiedReputationChange as Rep, + Versioned, View, }; use polkadot_node_primitives::{SignedFullStatement, Statement}; use polkadot_node_subsystem::{ @@ -624,13 +624,9 @@ async fn notify_collation_seconded( CollationVersion::V1 => Versioned::V1(protocol_v1::CollationProtocol::CollatorProtocol( protocol_v1::CollatorProtocolMessage::CollationSeconded(relay_parent, statement), )), - CollationVersion::VStaging => - Versioned::VStaging(protocol_vstaging::CollationProtocol::CollatorProtocol( - protocol_vstaging::CollatorProtocolMessage::CollationSeconded( - relay_parent, - statement, - ), - )), + CollationVersion::V2 => Versioned::V2(protocol_v2::CollationProtocol::CollatorProtocol( + protocol_v2::CollatorProtocolMessage::CollationSeconded(relay_parent, statement), + )), }; sender .send_message(NetworkBridgeTxMessage::SendCollationMessage(vec![peer_id], wire_message)) @@ -694,16 +690,12 @@ async fn request_collation( let requests = Requests::CollationFetchingV1(req); (requests, response_recv.boxed()) }, - (CollationVersion::VStaging, Some(ProspectiveCandidate { candidate_hash, .. })) => { + (CollationVersion::V2, Some(ProspectiveCandidate { candidate_hash, .. })) => { let (req, response_recv) = OutgoingRequest::new( Recipient::Peer(peer_id), - request_vstaging::CollationFetchingRequest { - relay_parent, - para_id, - candidate_hash, - }, + request_v2::CollationFetchingRequest { relay_parent, para_id, candidate_hash }, ); - let requests = Requests::CollationFetchingVStaging(req); + let requests = Requests::CollationFetchingV2(req); (requests, response_recv.boxed()) }, _ => return Err(FetchError::ProtocolMismatch), @@ -758,18 +750,15 @@ async fn process_incoming_peer_message( ctx: &mut Context, state: &mut State, origin: PeerId, - msg: Versioned< - protocol_v1::CollatorProtocolMessage, - protocol_vstaging::CollatorProtocolMessage, - >, + msg: Versioned, ) { use protocol_v1::CollatorProtocolMessage as V1; - use protocol_vstaging::CollatorProtocolMessage as VStaging; + use protocol_v2::CollatorProtocolMessage as V2; use sp_runtime::traits::AppVerify; match msg { Versioned::V1(V1::Declare(collator_id, para_id, signature)) | - Versioned::VStaging(VStaging::Declare(collator_id, para_id, signature)) => { + Versioned::V2(V2::Declare(collator_id, para_id, signature)) => { if collator_peer_id(&state.peer_data, &collator_id).is_some() { modify_reputation( &mut state.reputation, @@ -881,7 +870,7 @@ async fn process_incoming_peer_message( modify_reputation(&mut state.reputation, ctx.sender(), origin, rep).await; } }, - Versioned::VStaging(VStaging::AdvertiseCollation { + Versioned::V2(V2::AdvertiseCollation { relay_parent, candidate_hash, parent_head_data_hash, @@ -901,15 +890,14 @@ async fn process_incoming_peer_message( ?relay_parent, ?candidate_hash, error = ?err, - "Rejected vstaging advertisement", + "Rejected v2 advertisement", ); if let Some(rep) = err.reputation_changes() { modify_reputation(&mut state.reputation, ctx.sender(), origin, rep).await; } }, - Versioned::V1(V1::CollationSeconded(..)) | - Versioned::VStaging(VStaging::CollationSeconded(..)) => { + Versioned::V1(V1::CollationSeconded(..)) | Versioned::V2(V2::CollationSeconded(..)) => { gum::warn!( target: LOG_TARGET, peer_id = ?origin, @@ -1074,7 +1062,7 @@ where }; if relay_parent_mode.is_enabled() && prospective_candidate.is_none() { - // Expected vstaging advertisement. + // Expected v2 advertisement. return Err(AdvertisementError::ProtocolMismatch) } diff --git a/polkadot/node/network/collator-protocol/src/validator_side/tests/mod.rs b/polkadot/node/network/collator-protocol/src/validator_side/tests/mod.rs index 1cb656e325d3..9812998aab76 100644 --- a/polkadot/node/network/collator-protocol/src/validator_side/tests/mod.rs +++ b/polkadot/node/network/collator-protocol/src/validator_side/tests/mod.rs @@ -357,7 +357,7 @@ async fn assert_fetch_collation_request( ), Some(candidate_hash) => assert_matches!( req, - Requests::CollationFetchingVStaging(req) => { + Requests::CollationFetchingV2(req) => { let payload = req.payload; assert_eq!(payload.relay_parent, relay_parent); assert_eq!(payload.para_id, para_id); @@ -394,12 +394,11 @@ async fn connect_and_declare_collator( para_id, collator.sign(&protocol_v1::declare_signature_payload(&peer)), )), - CollationVersion::VStaging => - Versioned::VStaging(protocol_vstaging::CollatorProtocolMessage::Declare( - collator.public(), - para_id, - collator.sign(&protocol_v1::declare_signature_payload(&peer)), - )), + CollationVersion::V2 => Versioned::V2(protocol_v2::CollatorProtocolMessage::Declare( + collator.public(), + para_id, + collator.sign(&protocol_v1::declare_signature_payload(&peer)), + )), }; overseer_send( @@ -421,7 +420,7 @@ async fn advertise_collation( ) { let wire_message = match candidate { Some((candidate_hash, parent_head_data_hash)) => - Versioned::VStaging(protocol_vstaging::CollatorProtocolMessage::AdvertiseCollation { + Versioned::V2(protocol_v2::CollatorProtocolMessage::AdvertiseCollation { relay_parent, candidate_hash, parent_head_data_hash, @@ -444,7 +443,7 @@ async fn assert_async_backing_params_request(virtual_overseer: &mut VirtualOvers overseer_recv(virtual_overseer).await, AllMessages::RuntimeApi(RuntimeApiMessage::Request( relay_parent, - RuntimeApiRequest::StagingAsyncBackingParams(tx) + RuntimeApiRequest::AsyncBackingParams(tx) )) => { assert_eq!(relay_parent, hash); tx.send(Err(ASYNC_BACKING_DISABLED_ERROR)).unwrap(); @@ -499,10 +498,10 @@ fn act_on_advertisement() { }); } -/// Tests that validator side works with vstaging network protocol +/// Tests that validator side works with v2 network protocol /// before async backing is enabled. #[test] -fn act_on_advertisement_vstaging() { +fn act_on_advertisement_v2() { let test_state = TestState::default(); test_harness(ReputationAggregator::new(|_| true), |test_harness| async move { @@ -529,13 +528,13 @@ fn act_on_advertisement_vstaging() { peer_b, pair.clone(), test_state.chain_ids[0], - CollationVersion::VStaging, + CollationVersion::V2, ) .await; let candidate_hash = CandidateHash::default(); let parent_head_data_hash = Hash::zero(); - // vstaging advertisement. + // v2 advertisement. advertise_collation( &mut virtual_overseer, peer_b, diff --git a/polkadot/node/network/collator-protocol/src/validator_side/tests/prospective_parachains.rs b/polkadot/node/network/collator-protocol/src/validator_side/tests/prospective_parachains.rs index e2a007b308e5..4da0f11da390 100644 --- a/polkadot/node/network/collator-protocol/src/validator_side/tests/prospective_parachains.rs +++ b/polkadot/node/network/collator-protocol/src/validator_side/tests/prospective_parachains.rs @@ -20,12 +20,12 @@ use super::*; use polkadot_node_subsystem::messages::ChainApiMessage; use polkadot_primitives::{ - vstaging as vstaging_primitives, BlockNumber, CandidateCommitments, CommittedCandidateReceipt, - Header, SigningContext, ValidatorId, + AsyncBackingParams, BlockNumber, CandidateCommitments, CommittedCandidateReceipt, Header, + SigningContext, ValidatorId, }; -const ASYNC_BACKING_PARAMETERS: vstaging_primitives::AsyncBackingParams = - vstaging_primitives::AsyncBackingParams { max_candidate_depth: 4, allowed_ancestry_len: 3 }; +const ASYNC_BACKING_PARAMETERS: AsyncBackingParams = + AsyncBackingParams { max_candidate_depth: 4, allowed_ancestry_len: 3 }; fn get_parent_hash(hash: Hash) -> Hash { Hash::from_low_u64_be(hash.to_low_u64_be() + 1) @@ -97,7 +97,7 @@ async fn update_view( overseer_recv(virtual_overseer).await, AllMessages::RuntimeApi(RuntimeApiMessage::Request( parent, - RuntimeApiRequest::StagingAsyncBackingParams(tx), + RuntimeApiRequest::AsyncBackingParams(tx), )) => { tx.send(Ok(ASYNC_BACKING_PARAMETERS)).unwrap(); (parent, new_view.get(&parent).copied().expect("Unknown parent requested")) @@ -226,8 +226,8 @@ async fn assert_collation_seconded( overseer_recv(virtual_overseer).await, AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::SendCollationMessage( peers, - Versioned::VStaging(protocol_vstaging::CollationProtocol::CollatorProtocol( - protocol_vstaging::CollatorProtocolMessage::CollationSeconded( + Versioned::V2(protocol_v2::CollationProtocol::CollatorProtocol( + protocol_v2::CollatorProtocolMessage::CollationSeconded( _relay_parent, .., ), @@ -306,7 +306,7 @@ fn accept_advertisements_from_implicit_view() { peer_a, pair_a.clone(), test_state.chain_ids[0], - CollationVersion::VStaging, + CollationVersion::V2, ) .await; connect_and_declare_collator( @@ -314,7 +314,7 @@ fn accept_advertisements_from_implicit_view() { peer_b, pair_b.clone(), test_state.chain_ids[1], - CollationVersion::VStaging, + CollationVersion::V2, ) .await; @@ -406,7 +406,7 @@ fn second_multiple_candidates_per_relay_parent() { peer_a, pair.clone(), test_state.chain_ids[0], - CollationVersion::VStaging, + CollationVersion::V2, ) .await; @@ -457,7 +457,7 @@ fn second_multiple_candidates_per_relay_parent() { let pov = PoV { block_data: BlockData(vec![1]) }; response_channel - .send(Ok(request_vstaging::CollationFetchingResponse::Collation( + .send(Ok(request_v2::CollationFetchingResponse::Collation( candidate.clone(), pov.clone(), ) @@ -514,7 +514,7 @@ fn second_multiple_candidates_per_relay_parent() { peer_b, pair_b.clone(), test_state.chain_ids[0], - CollationVersion::VStaging, + CollationVersion::V2, ) .await; @@ -562,7 +562,7 @@ fn fetched_collation_sanity_check() { peer_a, pair.clone(), test_state.chain_ids[0], - CollationVersion::VStaging, + CollationVersion::V2, ) .await; @@ -611,7 +611,7 @@ fn fetched_collation_sanity_check() { let pov = PoV { block_data: BlockData(vec![1]) }; response_channel - .send(Ok(request_vstaging::CollationFetchingResponse::Collation( + .send(Ok(request_v2::CollationFetchingResponse::Collation( candidate.clone(), pov.clone(), ) @@ -668,7 +668,7 @@ fn advertisement_spam_protection() { peer_a, pair_a.clone(), test_state.chain_ids[1], - CollationVersion::VStaging, + CollationVersion::V2, ) .await; @@ -748,7 +748,7 @@ fn backed_candidate_unblocks_advertisements() { peer_a, pair_a.clone(), test_state.chain_ids[0], - CollationVersion::VStaging, + CollationVersion::V2, ) .await; connect_and_declare_collator( @@ -756,7 +756,7 @@ fn backed_candidate_unblocks_advertisements() { peer_b, pair_b.clone(), test_state.chain_ids[1], - CollationVersion::VStaging, + CollationVersion::V2, ) .await; @@ -856,7 +856,7 @@ fn active_leave_unblocks_advertisements() { *peer_id, peer.clone(), test_state.chain_ids[0], - CollationVersion::VStaging, + CollationVersion::V2, ) .await; } diff --git a/polkadot/node/network/gossip-support/src/lib.rs b/polkadot/node/network/gossip-support/src/lib.rs index c5dc1ba14bd3..4fa23507e86b 100644 --- a/polkadot/node/network/gossip-support/src/lib.rs +++ b/polkadot/node/network/gossip-support/src/lib.rs @@ -452,7 +452,7 @@ where // match void -> LLVM unreachable match message { Versioned::V1(m) => match m {}, - Versioned::VStaging(m) => match m {}, + Versioned::V2(m) => match m {}, } }, } diff --git a/polkadot/node/network/protocol/Cargo.toml b/polkadot/node/network/protocol/Cargo.toml index c33b9eae3252..379334ded24a 100644 --- a/polkadot/node/network/protocol/Cargo.toml +++ b/polkadot/node/network/protocol/Cargo.toml @@ -27,6 +27,3 @@ bitvec = "1" [dev-dependencies] rand_chacha = "0.3.1" - -[features] -network-protocol-staging = [] diff --git a/polkadot/node/network/protocol/src/lib.rs b/polkadot/node/network/protocol/src/lib.rs index 1bed2c12fe20..901ac99b6693 100644 --- a/polkadot/node/network/protocol/src/lib.rs +++ b/polkadot/node/network/protocol/src/lib.rs @@ -253,26 +253,25 @@ impl View { /// A protocol-versioned type. #[derive(Debug, Clone, PartialEq, Eq)] -pub enum Versioned { +pub enum Versioned { /// V1 type. V1(V1), - /// VStaging type. - VStaging(VStaging), + /// V2 type. + V2(V2), } -impl Versioned<&'_ V1, &'_ VStaging> { +impl Versioned<&'_ V1, &'_ V2> { /// Convert to a fully-owned version of the message. - pub fn clone_inner(&self) -> Versioned { + pub fn clone_inner(&self) -> Versioned { match *self { Versioned::V1(inner) => Versioned::V1(inner.clone()), - Versioned::VStaging(inner) => Versioned::VStaging(inner.clone()), + Versioned::V2(inner) => Versioned::V2(inner.clone()), } } } /// All supported versions of the validation protocol message. -pub type VersionedValidationProtocol = - Versioned; +pub type VersionedValidationProtocol = Versioned; impl From for VersionedValidationProtocol { fn from(v1: v1::ValidationProtocol) -> Self { @@ -280,14 +279,14 @@ impl From for VersionedValidationProtocol { } } -impl From for VersionedValidationProtocol { - fn from(vstaging: vstaging::ValidationProtocol) -> Self { - VersionedValidationProtocol::VStaging(vstaging) +impl From for VersionedValidationProtocol { + fn from(v2: v2::ValidationProtocol) -> Self { + VersionedValidationProtocol::V2(v2) } } /// All supported versions of the collation protocol message. -pub type VersionedCollationProtocol = Versioned; +pub type VersionedCollationProtocol = Versioned; impl From for VersionedCollationProtocol { fn from(v1: v1::CollationProtocol) -> Self { @@ -295,9 +294,9 @@ impl From for VersionedCollationProtocol { } } -impl From for VersionedCollationProtocol { - fn from(vstaging: vstaging::CollationProtocol) -> Self { - VersionedCollationProtocol::VStaging(vstaging) +impl From for VersionedCollationProtocol { + fn from(v2: v2::CollationProtocol) -> Self { + VersionedCollationProtocol::V2(v2) } } @@ -307,7 +306,7 @@ macro_rules! impl_versioned_full_protocol_from { fn from(versioned_from: $from) -> $out { match versioned_from { Versioned::V1(x) => Versioned::V1(x.into()), - Versioned::VStaging(x) => Versioned::VStaging(x.into()), + Versioned::V2(x) => Versioned::V2(x.into()), } } } @@ -321,7 +320,7 @@ macro_rules! impl_versioned_try_from { $from:ty, $out:ty, $v1_pat:pat => $v1_out:expr, - $vstaging_pat:pat => $vstaging_out:expr + $v2_pat:pat => $v2_out:expr ) => { impl TryFrom<$from> for $out { type Error = crate::WrongVariant; @@ -330,7 +329,7 @@ macro_rules! impl_versioned_try_from { #[allow(unreachable_patterns)] // when there is only one variant match x { Versioned::V1($v1_pat) => Ok(Versioned::V1($v1_out)), - Versioned::VStaging($vstaging_pat) => Ok(Versioned::VStaging($vstaging_out)), + Versioned::V2($v2_pat) => Ok(Versioned::V2($v2_out)), _ => Err(crate::WrongVariant), } } @@ -343,8 +342,7 @@ macro_rules! impl_versioned_try_from { #[allow(unreachable_patterns)] // when there is only one variant match x { Versioned::V1($v1_pat) => Ok(Versioned::V1($v1_out.clone())), - Versioned::VStaging($vstaging_pat) => - Ok(Versioned::VStaging($vstaging_out.clone())), + Versioned::V2($v2_pat) => Ok(Versioned::V2($v2_out.clone())), _ => Err(crate::WrongVariant), } } @@ -354,7 +352,7 @@ macro_rules! impl_versioned_try_from { /// Version-annotated messages used by the bitfield distribution subsystem. pub type BitfieldDistributionMessage = - Versioned; + Versioned; impl_versioned_full_protocol_from!( BitfieldDistributionMessage, VersionedValidationProtocol, @@ -364,12 +362,12 @@ impl_versioned_try_from!( VersionedValidationProtocol, BitfieldDistributionMessage, v1::ValidationProtocol::BitfieldDistribution(x) => x, - vstaging::ValidationProtocol::BitfieldDistribution(x) => x + v2::ValidationProtocol::BitfieldDistribution(x) => x ); /// Version-annotated messages used by the statement distribution subsystem. pub type StatementDistributionMessage = - Versioned; + Versioned; impl_versioned_full_protocol_from!( StatementDistributionMessage, VersionedValidationProtocol, @@ -379,12 +377,12 @@ impl_versioned_try_from!( VersionedValidationProtocol, StatementDistributionMessage, v1::ValidationProtocol::StatementDistribution(x) => x, - vstaging::ValidationProtocol::StatementDistribution(x) => x + v2::ValidationProtocol::StatementDistribution(x) => x ); /// Version-annotated messages used by the approval distribution subsystem. pub type ApprovalDistributionMessage = - Versioned; + Versioned; impl_versioned_full_protocol_from!( ApprovalDistributionMessage, VersionedValidationProtocol, @@ -394,13 +392,13 @@ impl_versioned_try_from!( VersionedValidationProtocol, ApprovalDistributionMessage, v1::ValidationProtocol::ApprovalDistribution(x) => x, - vstaging::ValidationProtocol::ApprovalDistribution(x) => x + v2::ValidationProtocol::ApprovalDistribution(x) => x ); /// Version-annotated messages used by the gossip-support subsystem (this is void). pub type GossipSupportNetworkMessage = - Versioned; + Versioned; // This is a void enum placeholder, so never gets sent over the wire. impl TryFrom for GossipSupportNetworkMessage { type Error = WrongVariant; @@ -418,7 +416,7 @@ impl<'a> TryFrom<&'a VersionedValidationProtocol> for GossipSupportNetworkMessag /// Version-annotated messages used by the bitfield distribution subsystem. pub type CollatorProtocolMessage = - Versioned; + Versioned; impl_versioned_full_protocol_from!( CollatorProtocolMessage, VersionedCollationProtocol, @@ -428,7 +426,7 @@ impl_versioned_try_from!( VersionedCollationProtocol, CollatorProtocolMessage, v1::CollationProtocol::CollatorProtocol(x) => x, - vstaging::CollationProtocol::CollatorProtocol(x) => x + v2::CollationProtocol::CollatorProtocol(x) => x ); /// v1 notification protocol types. @@ -589,12 +587,12 @@ pub mod v1 { } } -/// vstaging network protocol types. -pub mod vstaging { +/// v2 network protocol types. +pub mod v2 { use bitvec::{order::Lsb0, slice::BitSlice, vec::BitVec}; use parity_scale_codec::{Decode, Encode}; - use polkadot_primitives::vstaging::{ + use polkadot_primitives::{ CandidateHash, CandidateIndex, CollatorId, CollatorSignature, GroupIndex, Hash, Id as ParaId, UncheckedSignedAvailabilityBitfield, UncheckedSignedStatement, }; diff --git a/polkadot/node/network/protocol/src/peer_set.rs b/polkadot/node/network/protocol/src/peer_set.rs index c2163783c2ce..8dd68b297e30 100644 --- a/polkadot/node/network/protocol/src/peer_set.rs +++ b/polkadot/node/network/protocol/src/peer_set.rs @@ -118,16 +118,9 @@ impl PeerSet { /// Networking layer relies on `get_main_version()` being the version /// of the main protocol name reported by [`PeerSetProtocolNames::get_main_name()`]. pub fn get_main_version(self) -> ProtocolVersion { - #[cfg(not(feature = "network-protocol-staging"))] match self { - PeerSet::Validation => ValidationVersion::V1.into(), - PeerSet::Collation => CollationVersion::V1.into(), - } - - #[cfg(feature = "network-protocol-staging")] - match self { - PeerSet::Validation => ValidationVersion::VStaging.into(), - PeerSet::Collation => CollationVersion::VStaging.into(), + PeerSet::Validation => ValidationVersion::V2.into(), + PeerSet::Collation => CollationVersion::V2.into(), } } @@ -152,7 +145,7 @@ impl PeerSet { PeerSet::Validation => if version == ValidationVersion::V1.into() { Some("validation/1") - } else if version == ValidationVersion::VStaging.into() { + } else if version == ValidationVersion::V2.into() { Some("validation/2") } else { None @@ -160,7 +153,7 @@ impl PeerSet { PeerSet::Collation => if version == CollationVersion::V1.into() { Some("collation/1") - } else if version == CollationVersion::VStaging.into() { + } else if version == CollationVersion::V2.into() { Some("collation/2") } else { None @@ -223,8 +216,8 @@ impl From for u32 { pub enum ValidationVersion { /// The first version. V1 = 1, - /// The staging version. - VStaging = 2, + /// The second version. + V2 = 2, } /// Supported collation protocol versions. Only versions defined here must be used in the codebase. @@ -232,8 +225,8 @@ pub enum ValidationVersion { pub enum CollationVersion { /// The first version. V1 = 1, - /// The staging version. - VStaging = 2, + /// The second version. + V2 = 2, } /// Marker indicating the version is unknown. diff --git a/polkadot/node/network/protocol/src/request_response/mod.rs b/polkadot/node/network/protocol/src/request_response/mod.rs index baed4b846316..96f7adeb29ba 100644 --- a/polkadot/node/network/protocol/src/request_response/mod.rs +++ b/polkadot/node/network/protocol/src/request_response/mod.rs @@ -55,7 +55,7 @@ pub use outgoing::{OutgoingRequest, OutgoingResult, Recipient, Requests, Respons pub mod v1; /// Actual versioned requests and responses that are sent over the wire. -pub mod vstaging; +pub mod v2; /// A protocol per subsystem seems to make the most sense, this way we don't need any dispatching /// within protocols. @@ -66,7 +66,7 @@ pub enum Protocol { /// Protocol for fetching collations from collators. CollationFetchingV1, /// Protocol for fetching collations from collators when async backing is enabled. - CollationFetchingVStaging, + CollationFetchingV2, /// Protocol for fetching seconded PoVs from validators of the same group. PoVFetchingV1, /// Protocol for fetching available data. @@ -78,7 +78,7 @@ pub enum Protocol { /// Protocol for requesting candidates with attestations in statement distribution /// when async backing is enabled. - AttestedCandidateVStaging, + AttestedCandidateV2, } /// Minimum bandwidth we expect for validators - 500Mbit/s is the recommendation, so approximately @@ -147,7 +147,7 @@ const POV_RESPONSE_SIZE: u64 = MAX_POV_SIZE as u64 + 10_000; /// This is `MAX_CODE_SIZE` plus some additional space for protocol overhead. const STATEMENT_RESPONSE_SIZE: u64 = MAX_CODE_SIZE as u64 + 10_000; -/// Maximum response sizes for `AttestedCandidateVStaging`. +/// Maximum response sizes for `AttestedCandidateV2`. /// /// This is `MAX_CODE_SIZE` plus some additional space for protocol overhead and /// additional backing statements. @@ -199,7 +199,7 @@ impl Protocol { request_timeout: CHUNK_REQUEST_TIMEOUT, inbound_queue: tx, }, - Protocol::CollationFetchingV1 | Protocol::CollationFetchingVStaging => + Protocol::CollationFetchingV1 | Protocol::CollationFetchingV2 => RequestResponseConfig { name, fallback_names, @@ -254,7 +254,7 @@ impl Protocol { request_timeout: DISPUTE_REQUEST_TIMEOUT, inbound_queue: tx, }, - Protocol::AttestedCandidateVStaging => RequestResponseConfig { + Protocol::AttestedCandidateV2 => RequestResponseConfig { name, fallback_names, max_request_size: 1_000, @@ -275,7 +275,7 @@ impl Protocol { // as well. Protocol::ChunkFetchingV1 => 100, // 10 seems reasonable, considering group sizes of max 10 validators. - Protocol::CollationFetchingV1 | Protocol::CollationFetchingVStaging => 10, + Protocol::CollationFetchingV1 | Protocol::CollationFetchingV2 => 10, // 10 seems reasonable, considering group sizes of max 10 validators. Protocol::PoVFetchingV1 => 10, // Validators are constantly self-selecting to request available data which may lead @@ -307,7 +307,7 @@ impl Protocol { // failure, so having a good value here is mostly about performance tuning. Protocol::DisputeSendingV1 => 100, - Protocol::AttestedCandidateVStaging => { + Protocol::AttestedCandidateV2 => { // We assume we can utilize up to 70% of the available bandwidth for statements. // This is just a guess/estimate, with the following considerations: If we are // faster than that, queue size will stay low anyway, even if not - requesters will @@ -344,8 +344,8 @@ impl Protocol { Protocol::DisputeSendingV1 => Some("/polkadot/send_dispute/1"), // Introduced after legacy names became legacy. - Protocol::AttestedCandidateVStaging => None, - Protocol::CollationFetchingVStaging => None, + Protocol::AttestedCandidateV2 => None, + Protocol::CollationFetchingV2 => None, } } } @@ -402,8 +402,8 @@ impl ReqProtocolNames { Protocol::StatementFetchingV1 => "/req_statement/1", Protocol::DisputeSendingV1 => "/send_dispute/1", - Protocol::CollationFetchingVStaging => "/req_collation/2", - Protocol::AttestedCandidateVStaging => "/req_attested_candidate/2", + Protocol::CollationFetchingV2 => "/req_collation/2", + Protocol::AttestedCandidateV2 => "/req_attested_candidate/2", }; format!("{}{}", prefix, short_name).into() diff --git a/polkadot/node/network/protocol/src/request_response/outgoing.rs b/polkadot/node/network/protocol/src/request_response/outgoing.rs index ddc6b85645bb..c613d5778f5e 100644 --- a/polkadot/node/network/protocol/src/request_response/outgoing.rs +++ b/polkadot/node/network/protocol/src/request_response/outgoing.rs @@ -23,7 +23,7 @@ use sc_network::PeerId; use polkadot_primitives::AuthorityDiscoveryId; -use super::{v1, vstaging, IsRequest, Protocol}; +use super::{v1, v2, IsRequest, Protocol}; /// All requests that can be sent to the network bridge via `NetworkBridgeTxMessage::SendRequest`. #[derive(Debug)] @@ -42,10 +42,10 @@ pub enum Requests { DisputeSendingV1(OutgoingRequest), /// Request a candidate and attestations. - AttestedCandidateVStaging(OutgoingRequest), + AttestedCandidateV2(OutgoingRequest), /// Fetch a collation from a collator which previously announced it. /// Compared to V1 it requires specifying which candidate is requested by its hash. - CollationFetchingVStaging(OutgoingRequest), + CollationFetchingV2(OutgoingRequest), } impl Requests { @@ -54,12 +54,12 @@ impl Requests { match self { Self::ChunkFetchingV1(_) => Protocol::ChunkFetchingV1, Self::CollationFetchingV1(_) => Protocol::CollationFetchingV1, - Self::CollationFetchingVStaging(_) => Protocol::CollationFetchingVStaging, + Self::CollationFetchingV2(_) => Protocol::CollationFetchingV2, Self::PoVFetchingV1(_) => Protocol::PoVFetchingV1, Self::AvailableDataFetchingV1(_) => Protocol::AvailableDataFetchingV1, Self::StatementFetchingV1(_) => Protocol::StatementFetchingV1, Self::DisputeSendingV1(_) => Protocol::DisputeSendingV1, - Self::AttestedCandidateVStaging(_) => Protocol::AttestedCandidateVStaging, + Self::AttestedCandidateV2(_) => Protocol::AttestedCandidateV2, } } @@ -74,12 +74,12 @@ impl Requests { match self { Self::ChunkFetchingV1(r) => r.encode_request(), Self::CollationFetchingV1(r) => r.encode_request(), - Self::CollationFetchingVStaging(r) => r.encode_request(), + Self::CollationFetchingV2(r) => r.encode_request(), Self::PoVFetchingV1(r) => r.encode_request(), Self::AvailableDataFetchingV1(r) => r.encode_request(), Self::StatementFetchingV1(r) => r.encode_request(), Self::DisputeSendingV1(r) => r.encode_request(), - Self::AttestedCandidateVStaging(r) => r.encode_request(), + Self::AttestedCandidateV2(r) => r.encode_request(), } } } diff --git a/polkadot/node/network/protocol/src/request_response/vstaging.rs b/polkadot/node/network/protocol/src/request_response/v2.rs similarity index 93% rename from polkadot/node/network/protocol/src/request_response/vstaging.rs rename to polkadot/node/network/protocol/src/request_response/v2.rs index 34a17b4baaa6..6b90c579237f 100644 --- a/polkadot/node/network/protocol/src/request_response/vstaging.rs +++ b/polkadot/node/network/protocol/src/request_response/v2.rs @@ -18,13 +18,13 @@ use parity_scale_codec::{Decode, Encode}; -use polkadot_primitives::vstaging::{ +use polkadot_primitives::{ CandidateHash, CommittedCandidateReceipt, Hash, Id as ParaId, PersistedValidationData, UncheckedSignedStatement, }; use super::{IsRequest, Protocol}; -use crate::vstaging::StatementFilter; +use crate::v2::StatementFilter; /// Request a candidate with statements. #[derive(Debug, Clone, Encode, Decode)] @@ -56,7 +56,7 @@ pub struct AttestedCandidateResponse { impl IsRequest for AttestedCandidateRequest { type Response = AttestedCandidateResponse; - const PROTOCOL: Protocol = Protocol::AttestedCandidateVStaging; + const PROTOCOL: Protocol = Protocol::AttestedCandidateV2; } /// Responses as sent by collators. @@ -76,5 +76,5 @@ pub struct CollationFetchingRequest { impl IsRequest for CollationFetchingRequest { // The response is the same as for V1. type Response = CollationFetchingResponse; - const PROTOCOL: Protocol = Protocol::CollationFetchingVStaging; + const PROTOCOL: Protocol = Protocol::CollationFetchingV2; } diff --git a/polkadot/node/network/statement-distribution/src/legacy_v1/mod.rs b/polkadot/node/network/statement-distribution/src/legacy_v1/mod.rs index 9ae76047383c..fc2aff0da305 100644 --- a/polkadot/node/network/statement-distribution/src/legacy_v1/mod.rs +++ b/polkadot/node/network/statement-distribution/src/legacy_v1/mod.rs @@ -21,8 +21,7 @@ use polkadot_node_network_protocol::{ grid_topology::{GridNeighbors, RequiredRouting, SessionBoundGridTopologyStorage}, peer_set::{IsAuthority, PeerSet, ValidationVersion}, v1::{self as protocol_v1, StatementMetadata}, - vstaging as protocol_vstaging, IfDisconnected, PeerId, UnifiedReputationChange as Rep, - Versioned, View, + v2 as protocol_v2, IfDisconnected, PeerId, UnifiedReputationChange as Rep, Versioned, View, }; use polkadot_node_primitives::{ SignedFullStatement, Statement, StatementWithPVD, UncheckedSignedFullStatement, @@ -1062,7 +1061,7 @@ async fn circulate_statement<'a, Context>( "We filter out duplicates above. qed.", ); - let (v1_peers_to_send, vstaging_peers_to_send) = peers_to_send + let (v1_peers_to_send, v2_peers_to_send) = peers_to_send .into_iter() .map(|peer_id| { let peer_data = @@ -1074,7 +1073,7 @@ async fn circulate_statement<'a, Context>( }) .partition::, _>(|(_, _, version)| match version { ValidationVersion::V1 => true, - ValidationVersion::VStaging => false, + ValidationVersion::V2 => false, }); // partition is handy here but not if we add more protocol versions let payload = v1_statement_message(relay_parent, stored.statement.clone(), metrics); @@ -1094,24 +1093,24 @@ async fn circulate_statement<'a, Context>( )) .await; } - if !vstaging_peers_to_send.is_empty() { + if !v2_peers_to_send.is_empty() { gum::trace!( target: LOG_TARGET, - ?vstaging_peers_to_send, + ?v2_peers_to_send, ?relay_parent, statement = ?stored.statement, - "Sending statement to vstaging peers", + "Sending statement to v2 peers", ); ctx.send_message(NetworkBridgeTxMessage::SendValidationMessage( - vstaging_peers_to_send.iter().map(|(p, _, _)| *p).collect(), - compatible_v1_message(ValidationVersion::VStaging, payload.clone()).into(), + v2_peers_to_send.iter().map(|(p, _, _)| *p).collect(), + compatible_v1_message(ValidationVersion::V2, payload.clone()).into(), )) .await; } v1_peers_to_send .into_iter() - .chain(vstaging_peers_to_send) + .chain(v2_peers_to_send) .filter_map(|(peer, needs_dependent, _)| if needs_dependent { Some(peer) } else { None }) .collect() } @@ -1443,10 +1442,8 @@ async fn handle_incoming_message<'a, Context>( let message = match message { Versioned::V1(m) => m, - Versioned::VStaging(protocol_vstaging::StatementDistributionMessage::V1Compatibility( - m, - )) => m, - Versioned::VStaging(_) => { + Versioned::V2(protocol_v2::StatementDistributionMessage::V1Compatibility(m)) => m, + Versioned::V2(_) => { // The higher-level subsystem code is supposed to filter out // all non v1 messages. gum::debug!( @@ -2170,8 +2167,7 @@ fn compatible_v1_message( ) -> net_protocol::StatementDistributionMessage { match version { ValidationVersion::V1 => Versioned::V1(message), - ValidationVersion::VStaging => Versioned::VStaging( - protocol_vstaging::StatementDistributionMessage::V1Compatibility(message), - ), + ValidationVersion::V2 => + Versioned::V2(protocol_v2::StatementDistributionMessage::V1Compatibility(message)), } } diff --git a/polkadot/node/network/statement-distribution/src/legacy_v1/tests.rs b/polkadot/node/network/statement-distribution/src/legacy_v1/tests.rs index 17a66a9ff792..ca3038f9b3f3 100644 --- a/polkadot/node/network/statement-distribution/src/legacy_v1/tests.rs +++ b/polkadot/node/network/statement-distribution/src/legacy_v1/tests.rs @@ -793,7 +793,7 @@ fn receiving_from_one_sends_to_another_and_to_candidate_backing() { assert_matches!( handle.recv().await, AllMessages::RuntimeApi( - RuntimeApiMessage::Request(r, RuntimeApiRequest::StagingAsyncBackingParams(tx)) + RuntimeApiMessage::Request(r, RuntimeApiRequest::AsyncBackingParams(tx)) ) if r == hash_a => { @@ -1033,7 +1033,7 @@ fn receiving_large_statement_from_one_sends_to_another_and_to_candidate_backing( assert_matches!( handle.recv().await, AllMessages::RuntimeApi( - RuntimeApiMessage::Request(r, RuntimeApiRequest::StagingAsyncBackingParams(tx)) + RuntimeApiMessage::Request(r, RuntimeApiRequest::AsyncBackingParams(tx)) ) if r == hash_a => { @@ -1563,7 +1563,7 @@ fn delay_reputation_changes() { assert_matches!( handle.recv().await, AllMessages::RuntimeApi( - RuntimeApiMessage::Request(r, RuntimeApiRequest::StagingAsyncBackingParams(tx)) + RuntimeApiMessage::Request(r, RuntimeApiRequest::AsyncBackingParams(tx)) ) if r == hash_a => { @@ -2043,7 +2043,7 @@ fn share_prioritizes_backing_group() { assert_matches!( handle.recv().await, AllMessages::RuntimeApi( - RuntimeApiMessage::Request(r, RuntimeApiRequest::StagingAsyncBackingParams(tx)) + RuntimeApiMessage::Request(r, RuntimeApiRequest::AsyncBackingParams(tx)) ) if r == hash_a => { @@ -2365,7 +2365,7 @@ fn peer_cant_flood_with_large_statements() { assert_matches!( handle.recv().await, AllMessages::RuntimeApi( - RuntimeApiMessage::Request(r, RuntimeApiRequest::StagingAsyncBackingParams(tx)) + RuntimeApiMessage::Request(r, RuntimeApiRequest::AsyncBackingParams(tx)) ) if r == hash_a => { @@ -2590,7 +2590,7 @@ fn handle_multiple_seconded_statements() { assert_matches!( handle.recv().await, AllMessages::RuntimeApi( - RuntimeApiMessage::Request(r, RuntimeApiRequest::StagingAsyncBackingParams(tx)) + RuntimeApiMessage::Request(r, RuntimeApiRequest::AsyncBackingParams(tx)) ) if r == relay_parent_hash => { diff --git a/polkadot/node/network/statement-distribution/src/lib.rs b/polkadot/node/network/statement-distribution/src/lib.rs index b2eb9cccced4..eead7df5224d 100644 --- a/polkadot/node/network/statement-distribution/src/lib.rs +++ b/polkadot/node/network/statement-distribution/src/lib.rs @@ -26,10 +26,8 @@ use error::{log_error, FatalResult}; use std::time::Duration; use polkadot_node_network_protocol::{ - request_response::{ - v1 as request_v1, vstaging::AttestedCandidateRequest, IncomingRequestReceiver, - }, - vstaging as protocol_vstaging, Versioned, + request_response::{v1 as request_v1, v2::AttestedCandidateRequest, IncomingRequestReceiver}, + v2 as protocol_v2, Versioned, }; use polkadot_node_primitives::StatementWithPVD; use polkadot_node_subsystem::{ @@ -60,7 +58,7 @@ use legacy_v1::{ ResponderMessage as V1ResponderMessage, }; -mod vstaging; +mod v2; const LOG_TARGET: &str = "parachain::statement-distribution"; @@ -104,9 +102,9 @@ enum MuxedMessage { /// Messages from spawned v1 (legacy) responder background task. V1Responder(Option), /// Messages from candidate responder background task. - Responder(Option), + Responder(Option), /// Messages from answered requests. - Response(vstaging::UnhandledResponse), + Response(v2::UnhandledResponse), /// Message that a request is ready to be retried. This just acts as a signal that we should /// dispatch all pending requests again. RetryRequest(()), @@ -116,10 +114,10 @@ enum MuxedMessage { impl MuxedMessage { async fn receive( ctx: &mut Context, - state: &mut vstaging::State, + state: &mut v2::State, from_v1_requester: &mut mpsc::Receiver, from_v1_responder: &mut mpsc::Receiver, - from_responder: &mut mpsc::Receiver, + from_responder: &mut mpsc::Receiver, ) -> MuxedMessage { let (request_manager, response_manager) = state.request_and_response_managers(); // We are only fusing here to make `select` happy, in reality we will quit if one of those @@ -128,8 +126,8 @@ impl MuxedMessage { let from_v1_requester = from_v1_requester.next(); let from_v1_responder = from_v1_responder.next(); let from_responder = from_responder.next(); - let receive_response = vstaging::receive_response(response_manager).fuse(); - let retry_request = vstaging::next_retry(request_manager).fuse(); + let receive_response = v2::receive_response(response_manager).fuse(); + let retry_request = v2::next_retry(request_manager).fuse(); futures::pin_mut!( from_orchestra, from_v1_requester, @@ -182,7 +180,7 @@ impl StatementDistributionSubsystem { let mut reputation_delay = new_reputation_delay(); let mut legacy_v1_state = crate::legacy_v1::State::new(self.keystore.clone()); - let mut state = crate::vstaging::State::new(self.keystore.clone()); + let mut state = crate::v2::State::new(self.keystore.clone()); // Sender/Receiver for getting news from our statement fetching tasks. let (v1_req_sender, mut v1_req_receiver) = mpsc::channel(1); @@ -206,7 +204,7 @@ impl StatementDistributionSubsystem { ctx.spawn( "candidate-responder", - vstaging::respond_task( + v2::respond_task( self.req_receiver.take().expect("Mandatory argument to new. qed"), res_sender.clone(), ) @@ -280,14 +278,13 @@ impl StatementDistributionSubsystem { )?; }, MuxedMessage::Responder(result) => { - vstaging::answer_request( + v2::answer_request( &mut state, result.ok_or(FatalError::RequesterReceiverFinished)?, ); }, MuxedMessage::Response(result) => { - vstaging::handle_response(&mut ctx, &mut state, result, &mut self.reputation) - .await; + v2::handle_response(&mut ctx, &mut state, result, &mut self.reputation).await; }, MuxedMessage::RetryRequest(()) => { // A pending request is ready to retry. This is only a signal to call @@ -296,7 +293,7 @@ impl StatementDistributionSubsystem { }, }; - vstaging::dispatch_requests(&mut ctx, &mut state).await; + v2::dispatch_requests(&mut ctx, &mut state).await; } Ok(()) } @@ -304,7 +301,7 @@ impl StatementDistributionSubsystem { async fn handle_subsystem_message( &mut self, ctx: &mut Context, - state: &mut vstaging::State, + state: &mut v2::State, legacy_v1_state: &mut legacy_v1::State, v1_req_sender: &mpsc::Sender, message: FromOrchestra, @@ -318,11 +315,11 @@ impl StatementDistributionSubsystem { })) => { let _timer = metrics.time_active_leaves_update(); - // vstaging should handle activated first because of implicit view. + // v2 should handle activated first because of implicit view. if let Some(ref activated) = activated { let mode = prospective_parachains_mode(ctx.sender(), activated.hash).await?; if let ProspectiveParachainsMode::Enabled { .. } = mode { - vstaging::handle_active_leaves_update(ctx, state, activated, mode).await?; + v2::handle_active_leaves_update(ctx, state, activated, mode).await?; } else if let ProspectiveParachainsMode::Disabled = mode { for deactivated in &deactivated { crate::legacy_v1::handle_deactivate_leaf(legacy_v1_state, *deactivated); @@ -339,7 +336,7 @@ impl StatementDistributionSubsystem { for deactivated in &deactivated { crate::legacy_v1::handle_deactivate_leaf(legacy_v1_state, *deactivated); } - vstaging::handle_deactivate_leaves(state, &deactivated); + v2::handle_deactivate_leaves(state, &deactivated); } }, FromOrchestra::Signal(OverseerSignal::BlockFinalized(..)) => { @@ -362,7 +359,7 @@ impl StatementDistributionSubsystem { ) .await?; } else { - vstaging::share_local_statement( + v2::share_local_statement( ctx, state, relay_parent, @@ -399,11 +396,11 @@ impl StatementDistributionSubsystem { let target = match &event { NetworkBridgeEvent::PeerMessage(_, message) => match message { - Versioned::VStaging( - protocol_vstaging::StatementDistributionMessage::V1Compatibility(_), + Versioned::V2( + protocol_v2::StatementDistributionMessage::V1Compatibility(_), ) => VersionTarget::Legacy, Versioned::V1(_) => VersionTarget::Legacy, - Versioned::VStaging(_) => VersionTarget::Current, + Versioned::V2(_) => VersionTarget::Current, }, _ => VersionTarget::Both, }; @@ -422,14 +419,12 @@ impl StatementDistributionSubsystem { } if target.targets_current() { - // pass to vstaging. - vstaging::handle_network_update(ctx, state, event, &mut self.reputation) - .await; + // pass to v2. + v2::handle_network_update(ctx, state, event, &mut self.reputation).await; } }, StatementDistributionMessage::Backed(candidate_hash) => { - crate::vstaging::handle_backed_candidate_message(ctx, state, candidate_hash) - .await; + crate::v2::handle_backed_candidate_message(ctx, state, candidate_hash).await; }, }, } diff --git a/polkadot/node/network/statement-distribution/src/vstaging/candidates.rs b/polkadot/node/network/statement-distribution/src/v2/candidates.rs similarity index 99% rename from polkadot/node/network/statement-distribution/src/vstaging/candidates.rs rename to polkadot/node/network/statement-distribution/src/v2/candidates.rs index d6b68510f1c1..e660df5da173 100644 --- a/polkadot/node/network/statement-distribution/src/vstaging/candidates.rs +++ b/polkadot/node/network/statement-distribution/src/v2/candidates.rs @@ -27,7 +27,7 @@ use polkadot_node_network_protocol::PeerId; use polkadot_node_subsystem::messages::HypotheticalCandidate; -use polkadot_primitives::vstaging::{ +use polkadot_primitives::{ CandidateHash, CommittedCandidateReceipt, GroupIndex, Hash, Id as ParaId, PersistedValidationData, }; diff --git a/polkadot/node/network/statement-distribution/src/vstaging/cluster.rs b/polkadot/node/network/statement-distribution/src/v2/cluster.rs similarity index 99% rename from polkadot/node/network/statement-distribution/src/vstaging/cluster.rs rename to polkadot/node/network/statement-distribution/src/v2/cluster.rs index 55d847f83157..8adb8353ca92 100644 --- a/polkadot/node/network/statement-distribution/src/vstaging/cluster.rs +++ b/polkadot/node/network/statement-distribution/src/v2/cluster.rs @@ -55,7 +55,7 @@ //! and to keep track of what we have sent to other validators in the group and what we may //! continue to send them. -use polkadot_primitives::vstaging::{CandidateHash, CompactStatement, ValidatorIndex}; +use polkadot_primitives::{CandidateHash, CompactStatement, ValidatorIndex}; use std::collections::{HashMap, HashSet}; @@ -459,7 +459,7 @@ pub enum RejectOutgoing { #[cfg(test)] mod tests { use super::*; - use polkadot_primitives::vstaging::Hash; + use polkadot_primitives::Hash; #[test] fn rejects_incoming_outside_of_group() { diff --git a/polkadot/node/network/statement-distribution/src/vstaging/grid.rs b/polkadot/node/network/statement-distribution/src/v2/grid.rs similarity index 99% rename from polkadot/node/network/statement-distribution/src/vstaging/grid.rs rename to polkadot/node/network/statement-distribution/src/v2/grid.rs index 4fd77d0ced1c..3d53ff6d321e 100644 --- a/polkadot/node/network/statement-distribution/src/vstaging/grid.rs +++ b/polkadot/node/network/statement-distribution/src/v2/grid.rs @@ -60,12 +60,8 @@ //! - which has sent a `BackedCandidateAcknowledgement` //! - 1st-hop nodes do the same thing -use polkadot_node_network_protocol::{ - grid_topology::SessionGridTopology, vstaging::StatementFilter, -}; -use polkadot_primitives::vstaging::{ - CandidateHash, CompactStatement, GroupIndex, Hash, ValidatorIndex, -}; +use polkadot_node_network_protocol::{grid_topology::SessionGridTopology, v2::StatementFilter}; +use polkadot_primitives::{CandidateHash, CompactStatement, GroupIndex, Hash, ValidatorIndex}; use std::collections::{ hash_map::{Entry, HashMap}, diff --git a/polkadot/node/network/statement-distribution/src/vstaging/groups.rs b/polkadot/node/network/statement-distribution/src/v2/groups.rs similarity index 96% rename from polkadot/node/network/statement-distribution/src/vstaging/groups.rs rename to polkadot/node/network/statement-distribution/src/v2/groups.rs index b2daa1c0ac7c..d917b2090529 100644 --- a/polkadot/node/network/statement-distribution/src/vstaging/groups.rs +++ b/polkadot/node/network/statement-distribution/src/v2/groups.rs @@ -17,8 +17,7 @@ //! A utility for tracking groups and their members within a session. use polkadot_primitives::{ - effective_minimum_backing_votes, - vstaging::{GroupIndex, IndexedVec, ValidatorIndex}, + effective_minimum_backing_votes, GroupIndex, IndexedVec, ValidatorIndex, }; use std::collections::HashMap; diff --git a/polkadot/node/network/statement-distribution/src/vstaging/mod.rs b/polkadot/node/network/statement-distribution/src/v2/mod.rs similarity index 97% rename from polkadot/node/network/statement-distribution/src/vstaging/mod.rs rename to polkadot/node/network/statement-distribution/src/v2/mod.rs index 4639720b3221..e11d66c41a04 100644 --- a/polkadot/node/network/statement-distribution/src/vstaging/mod.rs +++ b/polkadot/node/network/statement-distribution/src/v2/mod.rs @@ -23,11 +23,11 @@ use polkadot_node_network_protocol::{ peer_set::ValidationVersion, request_response::{ incoming::OutgoingResponse, - vstaging::{AttestedCandidateRequest, AttestedCandidateResponse}, + v2::{AttestedCandidateRequest, AttestedCandidateResponse}, IncomingRequest, IncomingRequestReceiver, Requests, MAX_PARALLEL_ATTESTED_CANDIDATE_REQUESTS, }, - vstaging::{self as protocol_vstaging, StatementFilter}, + v2::{self as protocol_v2, StatementFilter}, IfDisconnected, PeerId, UnifiedReputationChange as Rep, Versioned, View, }; use polkadot_node_primitives::{ @@ -45,7 +45,7 @@ use polkadot_node_subsystem_util::{ reputation::ReputationAggregator, runtime::{request_min_backing_votes, ProspectiveParachainsMode}, }; -use polkadot_primitives::vstaging::{ +use polkadot_primitives::{ AuthorityDiscoveryId, CandidateHash, CompactStatement, CoreIndex, CoreState, GroupIndex, GroupRotationInfo, Hash, Id as ParaId, IndexedVec, SessionIndex, SessionInfo, SignedStatement, SigningContext, UncheckedSignedStatement, ValidatorId, ValidatorIndex, @@ -323,7 +323,7 @@ pub(crate) async fn handle_network_update( NetworkBridgeEvent::PeerConnected(peer_id, role, protocol_version, mut authority_ids) => { gum::trace!(target: LOG_TARGET, ?peer_id, ?role, ?protocol_version, "Peer connected"); - if protocol_version != ValidationVersion::VStaging.into() { + if protocol_version != ValidationVersion::V2.into() { return } @@ -381,19 +381,19 @@ pub(crate) async fn handle_network_update( }, NetworkBridgeEvent::PeerMessage(peer_id, message) => match message { net_protocol::StatementDistributionMessage::V1(_) => return, - net_protocol::StatementDistributionMessage::VStaging( - protocol_vstaging::StatementDistributionMessage::V1Compatibility(_), + net_protocol::StatementDistributionMessage::V2( + protocol_v2::StatementDistributionMessage::V1Compatibility(_), ) => return, - net_protocol::StatementDistributionMessage::VStaging( - protocol_vstaging::StatementDistributionMessage::Statement(relay_parent, statement), + net_protocol::StatementDistributionMessage::V2( + protocol_v2::StatementDistributionMessage::Statement(relay_parent, statement), ) => handle_incoming_statement(ctx, state, peer_id, relay_parent, statement, reputation) .await, - net_protocol::StatementDistributionMessage::VStaging( - protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest(inner), + net_protocol::StatementDistributionMessage::V2( + protocol_v2::StatementDistributionMessage::BackedCandidateManifest(inner), ) => handle_incoming_manifest(ctx, state, peer_id, inner, reputation).await, - net_protocol::StatementDistributionMessage::VStaging( - protocol_vstaging::StatementDistributionMessage::BackedCandidateKnown(inner), + net_protocol::StatementDistributionMessage::V2( + protocol_v2::StatementDistributionMessage::BackedCandidateKnown(inner), ) => handle_incoming_acknowledgement(ctx, state, peer_id, inner, reputation).await, }, NetworkBridgeEvent::PeerViewChange(peer_id, view) => @@ -727,10 +727,8 @@ fn pending_statement_network_message( statement_store .validator_statement(originator, compact) .map(|s| s.as_unchecked().clone()) - .map(|signed| { - protocol_vstaging::StatementDistributionMessage::Statement(relay_parent, signed) - }) - .map(|msg| (vec![*peer], Versioned::VStaging(msg).into())) + .map(|signed| protocol_v2::StatementDistributionMessage::Statement(relay_parent, signed)) + .map(|msg| (vec![*peer], Versioned::V2(msg).into())) } /// Send a peer all pending cluster statements for a relay parent. @@ -823,7 +821,7 @@ async fn send_pending_grid_messages( match kind { grid::ManifestKind::Full => { - let manifest = protocol_vstaging::BackedCandidateManifest { + let manifest = protocol_v2::BackedCandidateManifest { relay_parent, candidate_hash, group_index, @@ -847,8 +845,8 @@ async fn send_pending_grid_messages( messages.push(( vec![*peer_id], - Versioned::VStaging( - protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest( + Versioned::V2( + protocol_v2::StatementDistributionMessage::BackedCandidateManifest( manifest, ), ) @@ -1192,7 +1190,7 @@ async fn circulate_statement( ctx.send_message(NetworkBridgeTxMessage::SendValidationMessage( statement_to, - Versioned::VStaging(protocol_vstaging::StatementDistributionMessage::Statement( + Versioned::V2(protocol_v2::StatementDistributionMessage::Statement( relay_parent, statement.as_unchecked().clone(), )) @@ -1672,7 +1670,7 @@ async fn provide_candidate_to_grid( filter.clone(), ); - let manifest = protocol_vstaging::BackedCandidateManifest { + let manifest = protocol_v2::BackedCandidateManifest { relay_parent, candidate_hash, group_index, @@ -1680,16 +1678,15 @@ async fn provide_candidate_to_grid( parent_head_data_hash: confirmed_candidate.parent_head_data_hash(), statement_knowledge: filter.clone(), }; - let acknowledgement = protocol_vstaging::BackedCandidateAcknowledgement { + let acknowledgement = protocol_v2::BackedCandidateAcknowledgement { candidate_hash, statement_knowledge: filter.clone(), }; - let manifest_message = Versioned::VStaging( - protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest(manifest), - ); - let ack_message = Versioned::VStaging( - protocol_vstaging::StatementDistributionMessage::BackedCandidateKnown(acknowledgement), + let manifest_message = + Versioned::V2(protocol_v2::StatementDistributionMessage::BackedCandidateManifest(manifest)); + let ack_message = Versioned::V2( + protocol_v2::StatementDistributionMessage::BackedCandidateKnown(acknowledgement), ); let mut manifest_peers = Vec::new(); @@ -2062,8 +2059,8 @@ fn post_acknowledgement_statement_messages( statement.payload(), ); - messages.push(Versioned::VStaging( - protocol_vstaging::StatementDistributionMessage::Statement( + messages.push(Versioned::V2( + protocol_v2::StatementDistributionMessage::Statement( relay_parent, statement.as_unchecked().clone(), ) @@ -2079,7 +2076,7 @@ async fn handle_incoming_manifest( ctx: &mut Context, state: &mut State, peer: PeerId, - manifest: net_protocol::vstaging::BackedCandidateManifest, + manifest: net_protocol::v2::BackedCandidateManifest, reputation: &mut ReputationAggregator, ) { gum::debug!( @@ -2183,14 +2180,14 @@ fn acknowledgement_and_statement_messages( Some(l) => l, }; - let acknowledgement = protocol_vstaging::BackedCandidateAcknowledgement { + let acknowledgement = protocol_v2::BackedCandidateAcknowledgement { candidate_hash, statement_knowledge: local_knowledge.clone(), }; - let msg = Versioned::VStaging( - protocol_vstaging::StatementDistributionMessage::BackedCandidateKnown(acknowledgement), - ); + let msg = Versioned::V2(protocol_v2::StatementDistributionMessage::BackedCandidateKnown( + acknowledgement, + )); let mut messages = vec![(vec![peer], msg.into())]; @@ -2221,7 +2218,7 @@ async fn handle_incoming_acknowledgement( ctx: &mut Context, state: &mut State, peer: PeerId, - acknowledgement: net_protocol::vstaging::BackedCandidateAcknowledgement, + acknowledgement: net_protocol::v2::BackedCandidateAcknowledgement, reputation: &mut ReputationAggregator, ) { // The key difference between acknowledgments and full manifests is that only @@ -2521,7 +2518,7 @@ pub(crate) async fn dispatch_requests(ctx: &mut Context, state: &mut St ) { // Peer is supposedly connected. ctx.send_message(NetworkBridgeTxMessage::SendRequests( - vec![Requests::AttestedCandidateVStaging(request)], + vec![Requests::AttestedCandidateV2(request)], IfDisconnected::ImmediateError, )) .await; diff --git a/polkadot/node/network/statement-distribution/src/vstaging/requests.rs b/polkadot/node/network/statement-distribution/src/v2/requests.rs similarity index 99% rename from polkadot/node/network/statement-distribution/src/vstaging/requests.rs rename to polkadot/node/network/statement-distribution/src/v2/requests.rs index 79925f2115d4..f13496024fcf 100644 --- a/polkadot/node/network/statement-distribution/src/vstaging/requests.rs +++ b/polkadot/node/network/statement-distribution/src/v2/requests.rs @@ -39,14 +39,14 @@ use crate::LOG_TARGET; use polkadot_node_network_protocol::{ request_response::{ outgoing::{Recipient as RequestRecipient, RequestError}, - vstaging::{AttestedCandidateRequest, AttestedCandidateResponse}, + v2::{AttestedCandidateRequest, AttestedCandidateResponse}, OutgoingRequest, OutgoingResult, MAX_PARALLEL_ATTESTED_CANDIDATE_REQUESTS, }, - vstaging::StatementFilter, + v2::StatementFilter, PeerId, UnifiedReputationChange as Rep, }; -use polkadot_primitives::vstaging::{ - CandidateHash, CommittedCandidateReceipt, CompactStatement, GroupIndex, Hash, ParaId, +use polkadot_primitives::{ + CandidateHash, CommittedCandidateReceipt, CompactStatement, GroupIndex, Hash, Id as ParaId, PersistedValidationData, SessionIndex, SignedStatement, SigningContext, ValidatorId, ValidatorIndex, }; diff --git a/polkadot/node/network/statement-distribution/src/vstaging/statement_store.rs b/polkadot/node/network/statement-distribution/src/v2/statement_store.rs similarity index 98% rename from polkadot/node/network/statement-distribution/src/vstaging/statement_store.rs rename to polkadot/node/network/statement-distribution/src/v2/statement_store.rs index 9ea926f24aa8..74db431eda1d 100644 --- a/polkadot/node/network/statement-distribution/src/vstaging/statement_store.rs +++ b/polkadot/node/network/statement-distribution/src/v2/statement_store.rs @@ -24,8 +24,8 @@ //! groups, and views based on the validators themselves. use bitvec::{order::Lsb0 as BitOrderLsb0, vec::BitVec}; -use polkadot_node_network_protocol::vstaging::StatementFilter; -use polkadot_primitives::vstaging::{ +use polkadot_node_network_protocol::v2::StatementFilter; +use polkadot_primitives::{ CandidateHash, CompactStatement, GroupIndex, SignedStatement, ValidatorIndex, }; use std::collections::hash_map::{Entry as HEntry, HashMap}; diff --git a/polkadot/node/network/statement-distribution/src/vstaging/tests/cluster.rs b/polkadot/node/network/statement-distribution/src/v2/tests/cluster.rs similarity index 95% rename from polkadot/node/network/statement-distribution/src/vstaging/tests/cluster.rs rename to polkadot/node/network/statement-distribution/src/v2/tests/cluster.rs index 50d0477eb516..80dec1d75ab9 100644 --- a/polkadot/node/network/statement-distribution/src/vstaging/tests/cluster.rs +++ b/polkadot/node/network/statement-distribution/src/v2/tests/cluster.rs @@ -103,8 +103,8 @@ fn share_seconded_circulated_to_cluster() { overseer.recv().await, AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::SendValidationMessage( peers, - Versioned::VStaging(protocol_vstaging::ValidationProtocol::StatementDistribution( - protocol_vstaging::StatementDistributionMessage::Statement( + Versioned::V2(protocol_v2::ValidationProtocol::StatementDistribution( + protocol_v2::StatementDistributionMessage::Statement( r, s, ) @@ -173,7 +173,7 @@ fn cluster_valid_statement_before_seconded_ignored() { send_peer_message( &mut overseer, peer_a.clone(), - protocol_vstaging::StatementDistributionMessage::Statement( + protocol_v2::StatementDistributionMessage::Statement( relay_parent, signed_valid.as_unchecked().clone(), ), @@ -252,7 +252,7 @@ fn cluster_statement_bad_signature() { send_peer_message( &mut overseer, peer_a.clone(), - protocol_vstaging::StatementDistributionMessage::Statement( + protocol_v2::StatementDistributionMessage::Statement( relay_parent, statement.clone(), ), @@ -327,7 +327,7 @@ fn useful_cluster_statement_from_non_cluster_peer_rejected() { send_peer_message( &mut overseer, peer_a.clone(), - protocol_vstaging::StatementDistributionMessage::Statement(relay_parent, statement), + protocol_v2::StatementDistributionMessage::Statement(relay_parent, statement), ) .await; @@ -388,7 +388,7 @@ fn statement_from_non_cluster_originator_unexpected() { send_peer_message( &mut overseer, peer_a.clone(), - protocol_vstaging::StatementDistributionMessage::Statement(relay_parent, statement), + protocol_v2::StatementDistributionMessage::Statement(relay_parent, statement), ) .await; @@ -465,7 +465,7 @@ fn seconded_statement_leads_to_request() { send_peer_message( &mut overseer, peer_a.clone(), - protocol_vstaging::StatementDistributionMessage::Statement(relay_parent, statement), + protocol_v2::StatementDistributionMessage::Statement(relay_parent, statement), ) .await; @@ -593,8 +593,8 @@ fn cluster_statements_shared_seconded_first() { assert_matches!( &messages[0].1, - Versioned::VStaging(protocol_vstaging::ValidationProtocol::StatementDistribution( - protocol_vstaging::StatementDistributionMessage::Statement( + Versioned::V2(protocol_v2::ValidationProtocol::StatementDistribution( + protocol_v2::StatementDistributionMessage::Statement( r, s, ) @@ -604,8 +604,8 @@ fn cluster_statements_shared_seconded_first() { assert_matches!( &messages[1].1, - Versioned::VStaging(protocol_vstaging::ValidationProtocol::StatementDistribution( - protocol_vstaging::StatementDistributionMessage::Statement( + Versioned::V2(protocol_v2::ValidationProtocol::StatementDistribution( + protocol_v2::StatementDistributionMessage::Statement( r, s, ) @@ -699,8 +699,8 @@ fn cluster_accounts_for_implicit_view() { overseer.recv().await, AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::SendValidationMessage( peers, - Versioned::VStaging(protocol_vstaging::ValidationProtocol::StatementDistribution( - protocol_vstaging::StatementDistributionMessage::Statement( + Versioned::V2(protocol_v2::ValidationProtocol::StatementDistribution( + protocol_v2::StatementDistributionMessage::Statement( r, s, ) @@ -749,8 +749,8 @@ fn cluster_accounts_for_implicit_view() { &messages[0], ( peers, - Versioned::VStaging(protocol_vstaging::ValidationProtocol::StatementDistribution( - protocol_vstaging::StatementDistributionMessage::Statement( + Versioned::V2(protocol_v2::ValidationProtocol::StatementDistribution( + protocol_v2::StatementDistributionMessage::Statement( r, s, ) @@ -836,10 +836,7 @@ fn cluster_messages_imported_after_confirmed_candidate_importable_check() { send_peer_message( &mut overseer, peer_a.clone(), - protocol_vstaging::StatementDistributionMessage::Statement( - relay_parent, - a_seconded, - ), + protocol_v2::StatementDistributionMessage::Statement(relay_parent, a_seconded), ) .await; @@ -971,10 +968,7 @@ fn cluster_messages_imported_after_new_leaf_importable_check() { send_peer_message( &mut overseer, peer_a.clone(), - protocol_vstaging::StatementDistributionMessage::Statement( - relay_parent, - a_seconded, - ), + protocol_v2::StatementDistributionMessage::Statement(relay_parent, a_seconded), ) .await; @@ -1191,7 +1185,7 @@ fn ensure_seconding_limit_is_respected() { send_peer_message( &mut overseer, peer_a.clone(), - protocol_vstaging::StatementDistributionMessage::Statement(relay_parent, statement), + protocol_v2::StatementDistributionMessage::Statement(relay_parent, statement), ) .await; @@ -1216,7 +1210,7 @@ fn ensure_seconding_limit_is_respected() { send_peer_message( &mut overseer, peer_a.clone(), - protocol_vstaging::StatementDistributionMessage::Statement(relay_parent, statement), + protocol_v2::StatementDistributionMessage::Statement(relay_parent, statement), ) .await; @@ -1241,7 +1235,7 @@ fn ensure_seconding_limit_is_respected() { send_peer_message( &mut overseer, peer_a.clone(), - protocol_vstaging::StatementDistributionMessage::Statement(relay_parent, statement), + protocol_v2::StatementDistributionMessage::Statement(relay_parent, statement), ) .await; diff --git a/polkadot/node/network/statement-distribution/src/vstaging/tests/grid.rs b/polkadot/node/network/statement-distribution/src/v2/tests/grid.rs similarity index 95% rename from polkadot/node/network/statement-distribution/src/vstaging/tests/grid.rs rename to polkadot/node/network/statement-distribution/src/v2/tests/grid.rs index 0739f3019437..a0af95798235 100644 --- a/polkadot/node/network/statement-distribution/src/vstaging/tests/grid.rs +++ b/polkadot/node/network/statement-distribution/src/v2/tests/grid.rs @@ -17,9 +17,7 @@ use super::*; use bitvec::order::Lsb0; -use polkadot_node_network_protocol::vstaging::{ - BackedCandidateAcknowledgement, BackedCandidateManifest, -}; +use polkadot_node_network_protocol::v2::{BackedCandidateAcknowledgement, BackedCandidateManifest}; use polkadot_node_subsystem::messages::CandidateBackingMessage; use polkadot_primitives_test_helpers::make_candidate; @@ -156,7 +154,7 @@ fn backed_candidate_leads_to_advertisement() { send_peer_message( &mut overseer, peer_a.clone(), - protocol_vstaging::StatementDistributionMessage::Statement(relay_parent, statement), + protocol_v2::StatementDistributionMessage::Statement(relay_parent, statement), ) .await; @@ -181,7 +179,7 @@ fn backed_candidate_leads_to_advertisement() { send_peer_message( &mut overseer, peer_b.clone(), - protocol_vstaging::StatementDistributionMessage::Statement(relay_parent, statement), + protocol_v2::StatementDistributionMessage::Statement(relay_parent, statement), ) .await; @@ -210,9 +208,9 @@ fn backed_candidate_leads_to_advertisement() { AllMessages:: NetworkBridgeTx( NetworkBridgeTxMessage::SendValidationMessage( peers, - Versioned::VStaging( - protocol_vstaging::ValidationProtocol::StatementDistribution( - protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest(manifest), + Versioned::V2( + protocol_v2::ValidationProtocol::StatementDistribution( + protocol_v2::StatementDistributionMessage::BackedCandidateManifest(manifest), ), ), ) @@ -349,7 +347,7 @@ fn received_advertisement_before_confirmation_leads_to_request() { send_peer_message( &mut overseer, peer_c.clone(), - protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest(manifest), + protocol_v2::StatementDistributionMessage::BackedCandidateManifest(manifest), ) .await; @@ -534,7 +532,7 @@ fn received_advertisement_after_backing_leads_to_acknowledgement() { send_peer_message( &mut overseer, peer_c.clone(), - protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest( + protocol_v2::StatementDistributionMessage::BackedCandidateManifest( manifest.clone(), ), ) @@ -603,9 +601,9 @@ fn received_advertisement_after_backing_leads_to_acknowledgement() { AllMessages:: NetworkBridgeTx( NetworkBridgeTxMessage::SendValidationMessage( peers, - Versioned::VStaging( - protocol_vstaging::ValidationProtocol::StatementDistribution( - protocol_vstaging::StatementDistributionMessage::BackedCandidateKnown(ack), + Versioned::V2( + protocol_v2::ValidationProtocol::StatementDistribution( + protocol_v2::StatementDistributionMessage::BackedCandidateKnown(ack), ), ), ) @@ -629,7 +627,7 @@ fn received_advertisement_after_backing_leads_to_acknowledgement() { send_peer_message( &mut overseer, peer_d.clone(), - protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest( + protocol_v2::StatementDistributionMessage::BackedCandidateManifest( manifest.clone(), ), ) @@ -654,8 +652,8 @@ fn received_advertisement_after_backing_leads_to_acknowledgement() { assert_matches!( &messages[0].1, - Versioned::VStaging(protocol_vstaging::ValidationProtocol::StatementDistribution( - protocol_vstaging::StatementDistributionMessage::BackedCandidateKnown(ack) + Versioned::V2(protocol_v2::ValidationProtocol::StatementDistribution( + protocol_v2::StatementDistributionMessage::BackedCandidateKnown(ack) )) if *ack == expected_ack ); } @@ -782,7 +780,7 @@ fn received_advertisement_after_confirmation_before_backing() { send_peer_message( &mut overseer, peer_c.clone(), - protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest( + protocol_v2::StatementDistributionMessage::BackedCandidateManifest( manifest.clone(), ), ) @@ -842,7 +840,7 @@ fn received_advertisement_after_confirmation_before_backing() { send_peer_message( &mut overseer, peer_d.clone(), - protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest( + protocol_v2::StatementDistributionMessage::BackedCandidateManifest( manifest.clone(), ), ) @@ -951,7 +949,7 @@ fn additional_statements_are_shared_after_manifest_exchange() { send_peer_message( &mut overseer, peer_c.clone(), - protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest( + protocol_v2::StatementDistributionMessage::BackedCandidateManifest( manifest.clone(), ), ) @@ -1066,9 +1064,9 @@ fn additional_statements_are_shared_after_manifest_exchange() { AllMessages:: NetworkBridgeTx( NetworkBridgeTxMessage::SendValidationMessage( peers, - Versioned::VStaging( - protocol_vstaging::ValidationProtocol::StatementDistribution( - protocol_vstaging::StatementDistributionMessage::BackedCandidateKnown(ack), + Versioned::V2( + protocol_v2::ValidationProtocol::StatementDistribution( + protocol_v2::StatementDistributionMessage::BackedCandidateKnown(ack), ), ), ) @@ -1104,7 +1102,7 @@ fn additional_statements_are_shared_after_manifest_exchange() { send_peer_message( &mut overseer, peer_d.clone(), - protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest( + protocol_v2::StatementDistributionMessage::BackedCandidateManifest( manifest.clone(), ), ) @@ -1130,15 +1128,15 @@ fn additional_statements_are_shared_after_manifest_exchange() { assert_matches!( &messages[0].1, - Versioned::VStaging(protocol_vstaging::ValidationProtocol::StatementDistribution( - protocol_vstaging::StatementDistributionMessage::BackedCandidateKnown(ack) + Versioned::V2(protocol_v2::ValidationProtocol::StatementDistribution( + protocol_v2::StatementDistributionMessage::BackedCandidateKnown(ack) )) if *ack == expected_ack ); assert_matches!( &messages[1].1, - Versioned::VStaging(protocol_vstaging::ValidationProtocol::StatementDistribution( - protocol_vstaging::StatementDistributionMessage::Statement(r, s) + Versioned::V2(protocol_v2::ValidationProtocol::StatementDistribution( + protocol_v2::StatementDistributionMessage::Statement(r, s) )) if *r == relay_parent && s.unchecked_payload() == &CompactStatement::Seconded(candidate_hash) && s.unchecked_validator_index() == v_e ); } @@ -1281,7 +1279,7 @@ fn advertisement_sent_when_peer_enters_relay_parent_view() { send_peer_message( &mut overseer, peer_a.clone(), - protocol_vstaging::StatementDistributionMessage::Statement(relay_parent, statement), + protocol_v2::StatementDistributionMessage::Statement(relay_parent, statement), ) .await; @@ -1306,7 +1304,7 @@ fn advertisement_sent_when_peer_enters_relay_parent_view() { send_peer_message( &mut overseer, peer_b.clone(), - protocol_vstaging::StatementDistributionMessage::Statement(relay_parent, statement), + protocol_v2::StatementDistributionMessage::Statement(relay_parent, statement), ) .await; @@ -1357,8 +1355,8 @@ fn advertisement_sent_when_peer_enters_relay_parent_view() { assert_matches!( &messages[0].1, - Versioned::VStaging(protocol_vstaging::ValidationProtocol::StatementDistribution( - protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest(manifest) + Versioned::V2(protocol_v2::ValidationProtocol::StatementDistribution( + protocol_v2::StatementDistributionMessage::BackedCandidateManifest(manifest) )) => { assert_eq!(*manifest, expected_manifest); } @@ -1504,7 +1502,7 @@ fn advertisement_not_re_sent_when_peer_re_enters_view() { send_peer_message( &mut overseer, peer_a.clone(), - protocol_vstaging::StatementDistributionMessage::Statement(relay_parent, statement), + protocol_v2::StatementDistributionMessage::Statement(relay_parent, statement), ) .await; @@ -1529,7 +1527,7 @@ fn advertisement_not_re_sent_when_peer_re_enters_view() { send_peer_message( &mut overseer, peer_b.clone(), - protocol_vstaging::StatementDistributionMessage::Statement(relay_parent, statement), + protocol_v2::StatementDistributionMessage::Statement(relay_parent, statement), ) .await; @@ -1558,9 +1556,9 @@ fn advertisement_not_re_sent_when_peer_re_enters_view() { AllMessages:: NetworkBridgeTx( NetworkBridgeTxMessage::SendValidationMessage( peers, - Versioned::VStaging( - protocol_vstaging::ValidationProtocol::StatementDistribution( - protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest(manifest), + Versioned::V2( + protocol_v2::ValidationProtocol::StatementDistribution( + protocol_v2::StatementDistributionMessage::BackedCandidateManifest(manifest), ), ), ) @@ -1692,7 +1690,7 @@ fn grid_statements_imported_to_backing() { send_peer_message( &mut overseer, peer_c.clone(), - protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest( + protocol_v2::StatementDistributionMessage::BackedCandidateManifest( manifest.clone(), ), ) @@ -1907,7 +1905,7 @@ fn advertisements_rejected_from_incorrect_peers() { send_peer_message( &mut overseer, peer_a.clone(), - protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest( + protocol_v2::StatementDistributionMessage::BackedCandidateManifest( manifest.clone(), ), ) @@ -1925,7 +1923,7 @@ fn advertisements_rejected_from_incorrect_peers() { send_peer_message( &mut overseer, peer_b.clone(), - protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest(manifest), + protocol_v2::StatementDistributionMessage::BackedCandidateManifest(manifest), ) .await; @@ -2029,7 +2027,7 @@ fn manifest_rejected_with_unknown_relay_parent() { send_peer_message( &mut overseer, peer_c.clone(), - protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest( + protocol_v2::StatementDistributionMessage::BackedCandidateManifest( manifest.clone(), ), ) @@ -2131,7 +2129,7 @@ fn manifest_rejected_when_not_a_validator() { send_peer_message( &mut overseer, peer_c.clone(), - protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest( + protocol_v2::StatementDistributionMessage::BackedCandidateManifest( manifest.clone(), ), ) @@ -2238,7 +2236,7 @@ fn manifest_rejected_when_group_does_not_match_para() { send_peer_message( &mut overseer, peer_c.clone(), - protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest( + protocol_v2::StatementDistributionMessage::BackedCandidateManifest( manifest.clone(), ), ) @@ -2370,7 +2368,7 @@ fn peer_reported_for_advertisement_conflicting_with_confirmed_candidate() { send_peer_message( &mut overseer, peer_c.clone(), - protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest( + protocol_v2::StatementDistributionMessage::BackedCandidateManifest( manifest.clone(), ), ) @@ -2439,7 +2437,7 @@ fn peer_reported_for_advertisement_conflicting_with_confirmed_candidate() { send_peer_message( &mut overseer, peer_c.clone(), - protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest(manifest), + protocol_v2::StatementDistributionMessage::BackedCandidateManifest(manifest), ) .await; diff --git a/polkadot/node/network/statement-distribution/src/vstaging/tests/mod.rs b/polkadot/node/network/statement-distribution/src/v2/tests/mod.rs similarity index 98% rename from polkadot/node/network/statement-distribution/src/vstaging/tests/mod.rs rename to polkadot/node/network/statement-distribution/src/v2/tests/mod.rs index 48ceebb1949b..4150377a0c6c 100644 --- a/polkadot/node/network/statement-distribution/src/vstaging/tests/mod.rs +++ b/polkadot/node/network/statement-distribution/src/v2/tests/mod.rs @@ -31,7 +31,7 @@ use polkadot_node_subsystem::messages::{ }; use polkadot_node_subsystem_test_helpers as test_helpers; use polkadot_node_subsystem_util::TimeoutExt; -use polkadot_primitives::vstaging::{ +use polkadot_primitives::{ AssignmentPair, AsyncBackingParams, BlockNumber, CommittedCandidateReceipt, CoreState, GroupRotationInfo, HeadData, Header, IndexedVec, PersistedValidationData, ScheduledCore, SessionIndex, SessionInfo, ValidatorPair, @@ -380,7 +380,7 @@ async fn handle_leaf_activation( assert_matches!( virtual_overseer.recv().await, AllMessages::RuntimeApi( - RuntimeApiMessage::Request(parent, RuntimeApiRequest::StagingAsyncBackingParams(tx)) + RuntimeApiMessage::Request(parent, RuntimeApiRequest::AsyncBackingParams(tx)) ) if parent == *hash => { tx.send(Ok(test_state.config.async_backing_params.unwrap_or(DEFAULT_ASYNC_BACKING_PARAMETERS))).unwrap(); } @@ -479,7 +479,7 @@ async fn handle_sent_request( assert_eq!(requests.len(), 1); assert_matches!( requests.pop().unwrap(), - Requests::AttestedCandidateVStaging(outgoing) => { + Requests::AttestedCandidateV2(outgoing) => { assert_eq!(outgoing.peer, Recipient::Peer(peer)); assert_eq!(outgoing.payload.candidate_hash, candidate_hash); assert_eq!(outgoing.payload.mask, mask); @@ -537,7 +537,7 @@ async fn connect_peer( NetworkBridgeEvent::PeerConnected( peer, ObservedRole::Authority, - ValidationVersion::VStaging.into(), + ValidationVersion::V2.into(), authority_ids, ), ), @@ -570,12 +570,12 @@ async fn send_peer_view_change(virtual_overseer: &mut VirtualOverseer, peer: Pee async fn send_peer_message( virtual_overseer: &mut VirtualOverseer, peer: PeerId, - message: protocol_vstaging::StatementDistributionMessage, + message: protocol_v2::StatementDistributionMessage, ) { virtual_overseer .send(FromOrchestra::Communication { msg: StatementDistributionMessage::NetworkBridgeUpdate( - NetworkBridgeEvent::PeerMessage(peer, Versioned::VStaging(message)), + NetworkBridgeEvent::PeerMessage(peer, Versioned::V2(message)), ), }) .await; diff --git a/polkadot/node/network/statement-distribution/src/vstaging/tests/requests.rs b/polkadot/node/network/statement-distribution/src/v2/tests/requests.rs similarity index 95% rename from polkadot/node/network/statement-distribution/src/vstaging/tests/requests.rs rename to polkadot/node/network/statement-distribution/src/v2/tests/requests.rs index 5eef5809b4d4..0734b75c9712 100644 --- a/polkadot/node/network/statement-distribution/src/vstaging/tests/requests.rs +++ b/polkadot/node/network/statement-distribution/src/v2/tests/requests.rs @@ -19,7 +19,7 @@ use super::*; use bitvec::order::Lsb0; use parity_scale_codec::{Decode, Encode}; use polkadot_node_network_protocol::{ - request_response::vstaging as request_vstaging, vstaging::BackedCandidateManifest, + request_response::v2 as request_v2, v2::BackedCandidateManifest, }; use polkadot_primitives_test_helpers::make_candidate; use sc_network::config::{ @@ -109,10 +109,7 @@ fn cluster_peer_allowed_to_send_incomplete_statements() { send_peer_message( &mut overseer, peer_a.clone(), - protocol_vstaging::StatementDistributionMessage::Statement( - relay_parent, - a_seconded, - ), + protocol_v2::StatementDistributionMessage::Statement(relay_parent, a_seconded), ) .await; @@ -164,9 +161,9 @@ fn cluster_peer_allowed_to_send_incomplete_statements() { AllMessages:: NetworkBridgeTx( NetworkBridgeTxMessage::SendValidationMessage( peers, - Versioned::VStaging( - protocol_vstaging::ValidationProtocol::StatementDistribution( - protocol_vstaging::StatementDistributionMessage::Statement(hash, statement), + Versioned::V2( + protocol_v2::ValidationProtocol::StatementDistribution( + protocol_v2::StatementDistributionMessage::Statement(hash, statement), ), ), ) @@ -304,7 +301,7 @@ fn peer_reported_for_providing_statements_meant_to_be_masked_out() { send_peer_message( &mut overseer, peer_c.clone(), - protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest( + protocol_v2::StatementDistributionMessage::BackedCandidateManifest( manifest.clone(), ), ) @@ -376,7 +373,7 @@ fn peer_reported_for_providing_statements_meant_to_be_masked_out() { send_peer_message( &mut overseer, peer_c.clone(), - protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest( + protocol_v2::StatementDistributionMessage::BackedCandidateManifest( manifest.clone(), ), ) @@ -453,7 +450,7 @@ fn peer_reported_for_providing_statements_meant_to_be_masked_out() { send_peer_message( &mut overseer, peer_c.clone(), - protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest( + protocol_v2::StatementDistributionMessage::BackedCandidateManifest( manifest.clone(), ), ) @@ -568,9 +565,7 @@ fn peer_reported_for_not_enough_statements() { send_peer_message( &mut overseer, peer_c.clone(), - protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest( - manifest.clone(), - ), + protocol_v2::StatementDistributionMessage::BackedCandidateManifest(manifest.clone()), ) .await; @@ -752,10 +747,7 @@ fn peer_reported_for_duplicate_statements() { send_peer_message( &mut overseer, peer_a.clone(), - protocol_vstaging::StatementDistributionMessage::Statement( - relay_parent, - a_seconded, - ), + protocol_v2::StatementDistributionMessage::Statement(relay_parent, a_seconded), ) .await; @@ -812,9 +804,9 @@ fn peer_reported_for_duplicate_statements() { AllMessages:: NetworkBridgeTx( NetworkBridgeTxMessage::SendValidationMessage( peers, - Versioned::VStaging( - protocol_vstaging::ValidationProtocol::StatementDistribution( - protocol_vstaging::StatementDistributionMessage::Statement(hash, statement), + Versioned::V2( + protocol_v2::ValidationProtocol::StatementDistribution( + protocol_v2::StatementDistributionMessage::Statement(hash, statement), ), ), ) @@ -916,10 +908,7 @@ fn peer_reported_for_providing_statements_with_invalid_signatures() { send_peer_message( &mut overseer, peer_a.clone(), - protocol_vstaging::StatementDistributionMessage::Statement( - relay_parent, - a_seconded, - ), + protocol_v2::StatementDistributionMessage::Statement(relay_parent, a_seconded), ) .await; @@ -1058,10 +1047,7 @@ fn peer_reported_for_providing_statements_with_wrong_validator_id() { send_peer_message( &mut overseer, peer_a.clone(), - protocol_vstaging::StatementDistributionMessage::Statement( - relay_parent, - a_seconded, - ), + protocol_v2::StatementDistributionMessage::Statement(relay_parent, a_seconded), ) .await; @@ -1191,7 +1177,7 @@ fn local_node_sanity_checks_incoming_requests() { .send(RawIncomingRequest { // Request from peer that received manifest. peer: peer_c, - payload: request_vstaging::AttestedCandidateRequest { + payload: request_v2::AttestedCandidateRequest { candidate_hash: candidate.hash(), mask: mask.clone(), } @@ -1225,8 +1211,8 @@ fn local_node_sanity_checks_incoming_requests() { overseer.recv().await, AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::SendValidationMessage( peers, - Versioned::VStaging(protocol_vstaging::ValidationProtocol::StatementDistribution( - protocol_vstaging::StatementDistributionMessage::Statement( + Versioned::V2(protocol_v2::ValidationProtocol::StatementDistribution( + protocol_v2::StatementDistributionMessage::Statement( r, s, ) @@ -1250,7 +1236,7 @@ fn local_node_sanity_checks_incoming_requests() { .send(RawIncomingRequest { // Request from peer that received manifest. peer: peer_d, - payload: request_vstaging::AttestedCandidateRequest { + payload: request_v2::AttestedCandidateRequest { candidate_hash: candidate.hash(), mask: mask.clone(), } @@ -1269,10 +1255,7 @@ fn local_node_sanity_checks_incoming_requests() { let response = state .send_request( peer_c, - request_vstaging::AttestedCandidateRequest { - candidate_hash: candidate.hash(), - mask, - }, + request_v2::AttestedCandidateRequest { candidate_hash: candidate.hash(), mask }, ) .await .await; @@ -1296,7 +1279,7 @@ fn local_node_sanity_checks_incoming_requests() { let response = state .send_request( peer_c, - request_vstaging::AttestedCandidateRequest { + request_v2::AttestedCandidateRequest { candidate_hash: candidate.hash(), mask: mask.clone(), }, @@ -1455,7 +1438,7 @@ fn local_node_respects_statement_mask() { send_peer_message( &mut overseer, peer_a.clone(), - protocol_vstaging::StatementDistributionMessage::Statement(relay_parent, statement), + protocol_v2::StatementDistributionMessage::Statement(relay_parent, statement), ) .await; @@ -1479,7 +1462,7 @@ fn local_node_respects_statement_mask() { send_peer_message( &mut overseer, peer_b.clone(), - protocol_vstaging::StatementDistributionMessage::Statement( + protocol_v2::StatementDistributionMessage::Statement( relay_parent, statement_b.clone(), ), @@ -1511,9 +1494,9 @@ fn local_node_respects_statement_mask() { AllMessages:: NetworkBridgeTx( NetworkBridgeTxMessage::SendValidationMessage( peers, - Versioned::VStaging( - protocol_vstaging::ValidationProtocol::StatementDistribution( - protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest(manifest), + Versioned::V2( + protocol_v2::ValidationProtocol::StatementDistribution( + protocol_v2::StatementDistributionMessage::BackedCandidateManifest(manifest), ), ), ) @@ -1547,19 +1530,16 @@ fn local_node_respects_statement_mask() { let response = state .send_request( peer_c, - request_vstaging::AttestedCandidateRequest { - candidate_hash: candidate.hash(), - mask, - }, + request_v2::AttestedCandidateRequest { candidate_hash: candidate.hash(), mask }, ) .await .await; let expected_statements = vec![statement_b]; assert_matches!(response, full_response => { - // Response is the same for vstaging. - let request_vstaging::AttestedCandidateResponse { candidate_receipt, persisted_validation_data, statements } = - request_vstaging::AttestedCandidateResponse::decode( + // Response is the same for v2. + let request_v2::AttestedCandidateResponse { candidate_receipt, persisted_validation_data, statements } = + request_v2::AttestedCandidateResponse::decode( &mut full_response.result.expect("We should have a proper answer").as_ref(), ).expect("Decoding should work"); assert_eq!(candidate_receipt, candidate); @@ -1683,7 +1663,7 @@ fn should_delay_before_retrying_dropped_requests() { send_peer_message( &mut overseer, peer_c.clone(), - protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest( + protocol_v2::StatementDistributionMessage::BackedCandidateManifest( manifest.clone(), ), ) @@ -1696,7 +1676,7 @@ fn should_delay_before_retrying_dropped_requests() { assert_eq!(requests.len(), 1); assert_matches!( requests.pop().unwrap(), - Requests::AttestedCandidateVStaging(outgoing) => { + Requests::AttestedCandidateV2(outgoing) => { assert_eq!(outgoing.peer, Recipient::Peer(peer_c)); assert_eq!(outgoing.payload.candidate_hash, candidate_hash_1); assert_eq!(outgoing.payload.mask, mask); @@ -1729,7 +1709,7 @@ fn should_delay_before_retrying_dropped_requests() { send_peer_message( &mut overseer, peer_c.clone(), - protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest( + protocol_v2::StatementDistributionMessage::BackedCandidateManifest( manifest.clone(), ), ) diff --git a/polkadot/node/service/Cargo.toml b/polkadot/node/service/Cargo.toml index ee092e277332..ba5976fdceef 100644 --- a/polkadot/node/service/Cargo.toml +++ b/polkadot/node/service/Cargo.toml @@ -223,7 +223,3 @@ runtime-metrics = [ "rococo-runtime?/runtime-metrics", "westend-runtime?/runtime-metrics", ] - -network-protocol-staging = [ - "polkadot-node-network-protocol/network-protocol-staging", -] diff --git a/polkadot/node/service/src/lib.rs b/polkadot/node/service/src/lib.rs index 5286631fbbb5..5991744dc3af 100644 --- a/polkadot/node/service/src/lib.rs +++ b/polkadot/node/service/src/lib.rs @@ -854,7 +854,7 @@ pub fn new_full( let (collation_req_v1_receiver, cfg) = IncomingRequest::get_config_receiver(&req_protocol_names); net_config.add_request_response_protocol(cfg); - let (collation_req_vstaging_receiver, cfg) = + let (collation_req_v2_receiver, cfg) = IncomingRequest::get_config_receiver(&req_protocol_names); net_config.add_request_response_protocol(cfg); let (available_data_req_receiver, cfg) = @@ -862,7 +862,7 @@ pub fn new_full( net_config.add_request_response_protocol(cfg); let (statement_req_receiver, cfg) = IncomingRequest::get_config_receiver(&req_protocol_names); net_config.add_request_response_protocol(cfg); - let (candidate_req_vstaging_receiver, cfg) = + let (candidate_req_v2_receiver, cfg) = IncomingRequest::get_config_receiver(&req_protocol_names); net_config.add_request_response_protocol(cfg); let (dispute_req_receiver, cfg) = IncomingRequest::get_config_receiver(&req_protocol_names); @@ -1051,10 +1051,10 @@ pub fn new_full( pov_req_receiver, chunk_req_receiver, collation_req_v1_receiver, - collation_req_vstaging_receiver, + collation_req_v2_receiver, available_data_req_receiver, statement_req_receiver, - candidate_req_vstaging_receiver, + candidate_req_v2_receiver, dispute_req_receiver, registry: prometheus_registry.as_ref(), spawner, diff --git a/polkadot/node/service/src/overseer.rs b/polkadot/node/service/src/overseer.rs index 33127b638e5a..7d1add118241 100644 --- a/polkadot/node/service/src/overseer.rs +++ b/polkadot/node/service/src/overseer.rs @@ -28,7 +28,7 @@ use polkadot_node_core_dispute_coordinator::Config as DisputeCoordinatorConfig; use polkadot_node_network_protocol::{ peer_set::PeerSetProtocolNames, request_response::{ - v1 as request_v1, vstaging as request_vstaging, IncomingRequestReceiver, ReqProtocolNames, + v1 as request_v1, v2 as request_v2, IncomingRequestReceiver, ReqProtocolNames, }, }; #[cfg(any(feature = "malus", test))] @@ -104,17 +104,15 @@ where pub chunk_req_receiver: IncomingRequestReceiver, /// Collations request receiver for network protocol v1. pub collation_req_v1_receiver: IncomingRequestReceiver, - /// Collations request receiver for network protocol vstaging. - pub collation_req_vstaging_receiver: - IncomingRequestReceiver, + /// Collations request receiver for network protocol v2. + pub collation_req_v2_receiver: IncomingRequestReceiver, /// Receiver for available data requests. pub available_data_req_receiver: IncomingRequestReceiver, /// Receiver for incoming large statement requests. pub statement_req_receiver: IncomingRequestReceiver, /// Receiver for incoming candidate requests. - pub candidate_req_vstaging_receiver: - IncomingRequestReceiver, + pub candidate_req_v2_receiver: IncomingRequestReceiver, /// Receiver for incoming disputes. pub dispute_req_receiver: IncomingRequestReceiver, /// Prometheus registry, commonly used for production systems, less so for test. @@ -158,10 +156,10 @@ pub fn prepared_overseer_builder( pov_req_receiver, chunk_req_receiver, collation_req_v1_receiver, - collation_req_vstaging_receiver, + collation_req_v2_receiver, available_data_req_receiver, statement_req_receiver, - candidate_req_vstaging_receiver, + candidate_req_v2_receiver, dispute_req_receiver, registry, spawner, @@ -288,7 +286,7 @@ where peer_id: network_service.local_peer_id(), collator_pair, request_receiver_v1: collation_req_v1_receiver, - request_receiver_vstaging: collation_req_vstaging_receiver, + request_receiver_v2: collation_req_v2_receiver, metrics: Metrics::register(registry)?, }, IsParachainNode::FullNode => ProtocolSide::None, @@ -309,7 +307,7 @@ where .statement_distribution(StatementDistributionSubsystem::new( keystore.clone(), statement_req_receiver, - candidate_req_vstaging_receiver, + candidate_req_v2_receiver, Metrics::register(registry)?, rand::rngs::StdRng::from_entropy(), )) diff --git a/polkadot/node/subsystem-types/src/messages.rs b/polkadot/node/subsystem-types/src/messages.rs index a53908d3c2cb..eb94f1696c9d 100644 --- a/polkadot/node/subsystem-types/src/messages.rs +++ b/polkadot/node/subsystem-types/src/messages.rs @@ -39,12 +39,12 @@ use polkadot_node_primitives::{ ValidationResult, }; use polkadot_primitives::{ - slashing, vstaging as vstaging_primitives, AuthorityDiscoveryId, BackedCandidate, BlockNumber, - CandidateEvent, CandidateHash, CandidateIndex, CandidateReceipt, CollatorId, - CommittedCandidateReceipt, CoreState, DisputeState, ExecutorParams, GroupIndex, - GroupRotationInfo, Hash, Header as BlockHeader, Id as ParaId, InboundDownwardMessage, - InboundHrmpMessage, MultiDisputeStatementSet, OccupiedCoreAssumption, PersistedValidationData, - PvfCheckStatement, PvfExecTimeoutKind, SessionIndex, SessionInfo, SignedAvailabilityBitfield, + async_backing, slashing, AuthorityDiscoveryId, BackedCandidate, BlockNumber, CandidateEvent, + CandidateHash, CandidateIndex, CandidateReceipt, CollatorId, CommittedCandidateReceipt, + CoreState, DisputeState, ExecutorParams, GroupIndex, GroupRotationInfo, Hash, + Header as BlockHeader, Id as ParaId, InboundDownwardMessage, InboundHrmpMessage, + MultiDisputeStatementSet, OccupiedCoreAssumption, PersistedValidationData, PvfCheckStatement, + PvfExecTimeoutKind, SessionIndex, SessionInfo, SignedAvailabilityBitfield, SignedAvailabilityBitfields, ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, ValidatorSignature, }; @@ -695,14 +695,12 @@ pub enum RuntimeApiRequest { ), /// Get the minimum required backing votes. MinimumBackingVotes(SessionIndex, RuntimeApiSender), - /// Get the backing state of the given para. - /// This is a staging API that will not be available on production runtimes. - StagingParaBackingState(ParaId, RuntimeApiSender>), + ParaBackingState(ParaId, RuntimeApiSender>), /// Get candidate's acceptance limitations for asynchronous backing for a relay parent. /// /// If it's not supported by the Runtime, the async backing is said to be disabled. - StagingAsyncBackingParams(RuntimeApiSender), + AsyncBackingParams(RuntimeApiSender), } impl RuntimeApiRequest { @@ -726,10 +724,8 @@ impl RuntimeApiRequest { /// `MinimumBackingVotes` pub const MINIMUM_BACKING_VOTES_RUNTIME_REQUIREMENT: u32 = 6; - /// Minimum version for backing state, required for async backing. - /// - /// 99 for now, should be adjusted to VSTAGING/actual runtime version once released. - pub const STAGING_BACKING_STATE: u32 = 99; + /// Minimum version to enable asynchronous backing: `AsyncBackingParams` and `ParaBackingState`. + pub const STAGING_BACKING_STATE: u32 = 7; } /// A message to the Runtime API subsystem. diff --git a/polkadot/node/subsystem-types/src/runtime_client.rs b/polkadot/node/subsystem-types/src/runtime_client.rs index 06aa351efb4b..3007e985b4f7 100644 --- a/polkadot/node/subsystem-types/src/runtime_client.rs +++ b/polkadot/node/subsystem-types/src/runtime_client.rs @@ -16,9 +16,9 @@ use async_trait::async_trait; use polkadot_primitives::{ - runtime_api::ParachainHost, vstaging, Block, BlockNumber, CandidateCommitments, CandidateEvent, - CandidateHash, CommittedCandidateReceipt, CoreState, DisputeState, ExecutorParams, - GroupRotationInfo, Hash, Id, InboundDownwardMessage, InboundHrmpMessage, + async_backing, runtime_api::ParachainHost, slashing, Block, BlockNumber, CandidateCommitments, + CandidateEvent, CandidateHash, CommittedCandidateReceipt, CoreState, DisputeState, + ExecutorParams, GroupRotationInfo, Hash, Id, InboundDownwardMessage, InboundHrmpMessage, OccupiedCoreAssumption, PersistedValidationData, PvfCheckStatement, ScrapedOnChainVotes, SessionIndex, SessionInfo, ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, ValidatorSignature, @@ -190,7 +190,7 @@ pub trait RuntimeApiSubsystemClient { async fn unapplied_slashes( &self, at: Hash, - ) -> Result, ApiError>; + ) -> Result, ApiError>; /// Returns a merkle proof of a validator session key in a past session. /// @@ -199,7 +199,7 @@ pub trait RuntimeApiSubsystemClient { &self, at: Hash, validator_id: ValidatorId, - ) -> Result, ApiError>; + ) -> Result, ApiError>; /// Submits an unsigned extrinsic to slash validators who lost a dispute about /// a candidate of a past session. @@ -208,8 +208,8 @@ pub trait RuntimeApiSubsystemClient { async fn submit_report_dispute_lost( &self, at: Hash, - dispute_proof: vstaging::slashing::DisputeProof, - key_ownership_proof: vstaging::slashing::OpaqueKeyOwnershipProof, + dispute_proof: slashing::DisputeProof, + key_ownership_proof: slashing::OpaqueKeyOwnershipProof, ) -> Result, ApiError>; // === BABE API === @@ -232,7 +232,7 @@ pub trait RuntimeApiSubsystemClient { session_index: SessionIndex, ) -> Result, ApiError>; - // === STAGING v6 === + // === v6 === /// Get the minimum number of backing votes. async fn minimum_backing_votes( &self, @@ -240,21 +240,21 @@ pub trait RuntimeApiSubsystemClient { session_index: SessionIndex, ) -> Result; - // === Asynchronous backing API === + // === v7: Asynchronous backing API === /// Returns candidate's acceptance limitations for asynchronous backing for a relay parent. - async fn staging_async_backing_params( + async fn async_backing_params( &self, at: Hash, - ) -> Result; + ) -> Result; /// Returns the state of parachain backing for a given para. /// This is a staging method! Do not use on production runtimes! - async fn staging_para_backing_state( + async fn para_backing_state( &self, at: Hash, para_id: Id, - ) -> Result, ApiError>; + ) -> Result, ApiError>; } /// Default implementation of [`RuntimeApiSubsystemClient`] using the client. @@ -454,7 +454,7 @@ where async fn unapplied_slashes( &self, at: Hash, - ) -> Result, ApiError> { + ) -> Result, ApiError> { self.client.runtime_api().unapplied_slashes(at) } @@ -462,15 +462,15 @@ where &self, at: Hash, validator_id: ValidatorId, - ) -> Result, ApiError> { + ) -> Result, ApiError> { self.client.runtime_api().key_ownership_proof(at, validator_id) } async fn submit_report_dispute_lost( &self, at: Hash, - dispute_proof: vstaging::slashing::DisputeProof, - key_ownership_proof: vstaging::slashing::OpaqueKeyOwnershipProof, + dispute_proof: slashing::DisputeProof, + key_ownership_proof: slashing::OpaqueKeyOwnershipProof, ) -> Result, ApiError> { let mut runtime_api = self.client.runtime_api(); @@ -489,19 +489,19 @@ where self.client.runtime_api().minimum_backing_votes(at) } - async fn staging_para_backing_state( + async fn para_backing_state( &self, at: Hash, para_id: Id, - ) -> Result, ApiError> { - self.client.runtime_api().staging_para_backing_state(at, para_id) + ) -> Result, ApiError> { + self.client.runtime_api().para_backing_state(at, para_id) } /// Returns candidate's acceptance limitations for asynchronous backing for a relay parent. - async fn staging_async_backing_params( + async fn async_backing_params( &self, at: Hash, - ) -> Result { - self.client.runtime_api().staging_async_backing_params(at) + ) -> Result { + self.client.runtime_api().async_backing_params(at) } } diff --git a/polkadot/node/subsystem-util/src/backing_implicit_view.rs b/polkadot/node/subsystem-util/src/backing_implicit_view.rs index 83c15fdef959..a14536a17666 100644 --- a/polkadot/node/subsystem-util/src/backing_implicit_view.rs +++ b/polkadot/node/subsystem-util/src/backing_implicit_view.rs @@ -20,7 +20,7 @@ use polkadot_node_subsystem::{ messages::{ChainApiMessage, ProspectiveParachainsMessage}, SubsystemSender, }; -use polkadot_primitives::vstaging::{BlockNumber, Hash, Id as ParaId}; +use polkadot_primitives::{BlockNumber, Hash, Id as ParaId}; use std::collections::HashMap; diff --git a/polkadot/node/subsystem-util/src/inclusion_emulator/mod.rs b/polkadot/node/subsystem-util/src/inclusion_emulator/mod.rs index 1487077d9eda..c7b91bffb3d7 100644 --- a/polkadot/node/subsystem-util/src/inclusion_emulator/mod.rs +++ b/polkadot/node/subsystem-util/src/inclusion_emulator/mod.rs @@ -11,4 +11,1437 @@ // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. -pub mod staging; +/// # Overview +/// +/// A set of utilities for node-side code to emulate the logic the runtime uses for checking +/// parachain blocks in order to build prospective parachains that are produced ahead of the +/// relay chain. These utilities allow the node-side to predict, with high accuracy, what +/// the relay-chain will accept in the near future. +/// +/// This module has 2 key data types: [`Constraints`] and [`Fragment`]s. [`Constraints`] +/// exhaustively define the set of valid inputs and outputs to parachain execution. A +/// [`Fragment`] indicates a parachain block, anchored to the relay-chain at a particular +/// relay-chain block, known as the relay-parent. +/// +/// ## Fragment Validity +/// +/// Every relay-parent is implicitly associated with a unique set of [`Constraints`] that +/// describe the properties that must be true for a block to be included in a direct child of +/// that block, assuming there is no intermediate parachain block pending availability. +/// +/// However, the key factor that makes asynchronously-grown prospective chains +/// possible is the fact that the relay-chain accepts candidate blocks based on whether they +/// are valid under the constraints of the present moment, not based on whether they were +/// valid at the time of construction. +/// +/// As such, [`Fragment`]s are often, but not always constructed in such a way that they are +/// invalid at first and become valid later on, as the relay chain grows. +/// +/// # Usage +/// +/// It's expected that the users of this module will be building up trees of +/// [`Fragment`]s and consistently pruning and adding to the tree. +/// +/// ## Operating Constraints +/// +/// The *operating constraints* of a `Fragment` are the constraints with which that fragment +/// was intended to comply. The operating constraints are defined as the base constraints +/// of the relay-parent of the fragment modified by the cumulative modifications of all +/// fragments between the relay-parent and the current fragment. +/// +/// What the operating constraints are, in practice, is a prediction about the state of the +/// relay-chain in the future. The relay-chain is aware of some current state, and we want to +/// make an intelligent prediction about what might be accepted in the future based on +/// prior fragments that also exist off-chain. +/// +/// ## Fragment Trees +/// +/// As the relay-chain grows, some predictions come true and others come false. +/// And new predictions get made. These three changes correspond distinctly to the +/// 3 primary operations on fragment trees. +/// +/// A fragment tree is a mental model for thinking about a forking series of predictions +/// about a single parachain. There may be one or more fragment trees per parachain. +/// +/// In expectation, most parachains will have a plausibly-unique authorship method which means +/// that they should really be much closer to fragment-chains, maybe with an occasional fork. +/// +/// Avoiding fragment-tree blowup is beyond the scope of this module. +/// +/// ### Pruning Fragment Trees +/// +/// When the relay-chain advances, we want to compare the new constraints of that relay-parent +/// to the roots of the fragment trees we have. There are 3 cases: +/// +/// 1. The root fragment is still valid under the new constraints. In this case, we do nothing. +/// This is the "prediction still uncertain" case. +/// +/// 2. The root fragment is invalid under the new constraints because it has been subsumed by +/// the relay-chain. In this case, we can discard the root and split & re-root the fragment +/// tree under its descendents and compare to the new constraints again. This is the +/// "prediction came true" case. +/// +/// 3. The root fragment is invalid under the new constraints because a competing parachain +/// block has been included or it would never be accepted for some other reason. In this +/// case we can discard the entire fragment tree. This is the "prediction came false" case. +/// +/// This is all a bit of a simplification because it assumes that the relay-chain advances +/// without forks and is finalized instantly. In practice, the set of fragment-trees needs to +/// be observable from the perspective of a few different possible forks of the relay-chain and +/// not pruned too eagerly. +/// +/// Note that the fragments themselves don't need to change and the only thing we care about +/// is whether the predictions they represent are still valid. +/// +/// ### Extending Fragment Trees +/// +/// As predictions fade into the past, new ones should be stacked on top. +/// +/// Every new relay-chain block is an opportunity to make a new prediction about the future. +/// Higher-level logic should select the leaves of the fragment-trees to build upon or whether +/// to create a new fragment-tree. +/// +/// ### Code Upgrades +/// +/// Code upgrades are the main place where this emulation fails. The on-chain PVF upgrade +/// scheduling logic is very path-dependent and intricate so we just assume that code upgrades +/// can't be initiated and applied within a single fragment-tree. Fragment-trees aren't deep, +/// in practice and code upgrades are fairly rare. So what's likely to happen around code +/// upgrades is that the entire fragment-tree has to get discarded at some point. +/// +/// That means a few blocks of execution time lost, which is not a big deal for code upgrades +/// in practice at most once every few weeks. +use polkadot_primitives::{ + async_backing::Constraints as PrimitiveConstraints, BlockNumber, CandidateCommitments, + CollatorId, CollatorSignature, Hash, HeadData, Id as ParaId, PersistedValidationData, + UpgradeRestriction, ValidationCodeHash, +}; +use std::{ + borrow::{Borrow, Cow}, + collections::HashMap, +}; + +/// Constraints on inbound HRMP channels. +#[derive(Debug, Clone, PartialEq)] +pub struct InboundHrmpLimitations { + /// An exhaustive set of all valid watermarks, sorted ascending + pub valid_watermarks: Vec, +} + +/// Constraints on outbound HRMP channels. +#[derive(Debug, Clone, PartialEq)] +pub struct OutboundHrmpChannelLimitations { + /// The maximum bytes that can be written to the channel. + pub bytes_remaining: usize, + /// The maximum messages that can be written to the channel. + pub messages_remaining: usize, +} + +/// Constraints on the actions that can be taken by a new parachain +/// block. These limitations are implicitly associated with some particular +/// parachain, which should be apparent from usage. +#[derive(Debug, Clone, PartialEq)] +pub struct Constraints { + /// The minimum relay-parent number accepted under these constraints. + pub min_relay_parent_number: BlockNumber, + /// The maximum Proof-of-Validity size allowed, in bytes. + pub max_pov_size: usize, + /// The maximum new validation code size allowed, in bytes. + pub max_code_size: usize, + /// The amount of UMP messages remaining. + pub ump_remaining: usize, + /// The amount of UMP bytes remaining. + pub ump_remaining_bytes: usize, + /// The maximum number of UMP messages allowed per candidate. + pub max_ump_num_per_candidate: usize, + /// Remaining DMP queue. Only includes sent-at block numbers. + pub dmp_remaining_messages: Vec, + /// The limitations of all registered inbound HRMP channels. + pub hrmp_inbound: InboundHrmpLimitations, + /// The limitations of all registered outbound HRMP channels. + pub hrmp_channels_out: HashMap, + /// The maximum number of HRMP messages allowed per candidate. + pub max_hrmp_num_per_candidate: usize, + /// The required parent head-data of the parachain. + pub required_parent: HeadData, + /// The expected validation-code-hash of this parachain. + pub validation_code_hash: ValidationCodeHash, + /// The code upgrade restriction signal as-of this parachain. + pub upgrade_restriction: Option, + /// The future validation code hash, if any, and at what relay-parent + /// number the upgrade would be minimally applied. + pub future_validation_code: Option<(BlockNumber, ValidationCodeHash)>, +} + +impl From for Constraints { + fn from(c: PrimitiveConstraints) -> Self { + Constraints { + min_relay_parent_number: c.min_relay_parent_number, + max_pov_size: c.max_pov_size as _, + max_code_size: c.max_code_size as _, + ump_remaining: c.ump_remaining as _, + ump_remaining_bytes: c.ump_remaining_bytes as _, + max_ump_num_per_candidate: c.max_ump_num_per_candidate as _, + dmp_remaining_messages: c.dmp_remaining_messages, + hrmp_inbound: InboundHrmpLimitations { + valid_watermarks: c.hrmp_inbound.valid_watermarks, + }, + hrmp_channels_out: c + .hrmp_channels_out + .into_iter() + .map(|(para_id, limits)| { + ( + para_id, + OutboundHrmpChannelLimitations { + bytes_remaining: limits.bytes_remaining as _, + messages_remaining: limits.messages_remaining as _, + }, + ) + }) + .collect(), + max_hrmp_num_per_candidate: c.max_hrmp_num_per_candidate as _, + required_parent: c.required_parent, + validation_code_hash: c.validation_code_hash, + upgrade_restriction: c.upgrade_restriction, + future_validation_code: c.future_validation_code, + } + } +} + +/// Kinds of errors that can occur when modifying constraints. +#[derive(Debug, Clone, PartialEq)] +pub enum ModificationError { + /// The HRMP watermark is not allowed. + DisallowedHrmpWatermark(BlockNumber), + /// No such HRMP outbound channel. + NoSuchHrmpChannel(ParaId), + /// Too many messages submitted to HRMP channel. + HrmpMessagesOverflow { + /// The ID of the recipient. + para_id: ParaId, + /// The amount of remaining messages in the capacity of the channel. + messages_remaining: usize, + /// The amount of messages submitted to the channel. + messages_submitted: usize, + }, + /// Too many bytes submitted to HRMP channel. + HrmpBytesOverflow { + /// The ID of the recipient. + para_id: ParaId, + /// The amount of remaining bytes in the capacity of the channel. + bytes_remaining: usize, + /// The amount of bytes submitted to the channel. + bytes_submitted: usize, + }, + /// Too many messages submitted to UMP. + UmpMessagesOverflow { + /// The amount of remaining messages in the capacity of UMP. + messages_remaining: usize, + /// The amount of messages submitted to UMP. + messages_submitted: usize, + }, + /// Too many bytes submitted to UMP. + UmpBytesOverflow { + /// The amount of remaining bytes in the capacity of UMP. + bytes_remaining: usize, + /// The amount of bytes submitted to UMP. + bytes_submitted: usize, + }, + /// Too many messages processed from DMP. + DmpMessagesUnderflow { + /// The amount of messages waiting to be processed from DMP. + messages_remaining: usize, + /// The amount of messages processed. + messages_processed: usize, + }, + /// No validation code upgrade to apply. + AppliedNonexistentCodeUpgrade, +} + +impl Constraints { + /// Check modifications against constraints. + pub fn check_modifications( + &self, + modifications: &ConstraintModifications, + ) -> Result<(), ModificationError> { + if let Some(HrmpWatermarkUpdate::Trunk(hrmp_watermark)) = modifications.hrmp_watermark { + // head updates are always valid. + if self.hrmp_inbound.valid_watermarks.iter().all(|w| w != &hrmp_watermark) { + return Err(ModificationError::DisallowedHrmpWatermark(hrmp_watermark)) + } + } + + for (id, outbound_hrmp_mod) in &modifications.outbound_hrmp { + if let Some(outbound) = self.hrmp_channels_out.get(&id) { + outbound.bytes_remaining.checked_sub(outbound_hrmp_mod.bytes_submitted).ok_or( + ModificationError::HrmpBytesOverflow { + para_id: *id, + bytes_remaining: outbound.bytes_remaining, + bytes_submitted: outbound_hrmp_mod.bytes_submitted, + }, + )?; + + outbound + .messages_remaining + .checked_sub(outbound_hrmp_mod.messages_submitted) + .ok_or(ModificationError::HrmpMessagesOverflow { + para_id: *id, + messages_remaining: outbound.messages_remaining, + messages_submitted: outbound_hrmp_mod.messages_submitted, + })?; + } else { + return Err(ModificationError::NoSuchHrmpChannel(*id)) + } + } + + self.ump_remaining.checked_sub(modifications.ump_messages_sent).ok_or( + ModificationError::UmpMessagesOverflow { + messages_remaining: self.ump_remaining, + messages_submitted: modifications.ump_messages_sent, + }, + )?; + + self.ump_remaining_bytes.checked_sub(modifications.ump_bytes_sent).ok_or( + ModificationError::UmpBytesOverflow { + bytes_remaining: self.ump_remaining_bytes, + bytes_submitted: modifications.ump_bytes_sent, + }, + )?; + + self.dmp_remaining_messages + .len() + .checked_sub(modifications.dmp_messages_processed) + .ok_or(ModificationError::DmpMessagesUnderflow { + messages_remaining: self.dmp_remaining_messages.len(), + messages_processed: modifications.dmp_messages_processed, + })?; + + if self.future_validation_code.is_none() && modifications.code_upgrade_applied { + return Err(ModificationError::AppliedNonexistentCodeUpgrade) + } + + Ok(()) + } + + /// Apply modifications to these constraints. If this succeeds, it passes + /// all sanity-checks. + pub fn apply_modifications( + &self, + modifications: &ConstraintModifications, + ) -> Result { + let mut new = self.clone(); + + if let Some(required_parent) = modifications.required_parent.as_ref() { + new.required_parent = required_parent.clone(); + } + + if let Some(ref hrmp_watermark) = modifications.hrmp_watermark { + match new.hrmp_inbound.valid_watermarks.binary_search(&hrmp_watermark.watermark()) { + Ok(pos) => { + // Exact match, so this is OK in all cases. + let _ = new.hrmp_inbound.valid_watermarks.drain(..pos + 1); + }, + Err(pos) => match hrmp_watermark { + HrmpWatermarkUpdate::Head(_) => { + // Updates to Head are always OK. + let _ = new.hrmp_inbound.valid_watermarks.drain(..pos); + }, + HrmpWatermarkUpdate::Trunk(n) => { + // Trunk update landing on disallowed watermark is not OK. + return Err(ModificationError::DisallowedHrmpWatermark(*n)) + }, + }, + } + } + + for (id, outbound_hrmp_mod) in &modifications.outbound_hrmp { + if let Some(outbound) = new.hrmp_channels_out.get_mut(&id) { + outbound.bytes_remaining = outbound + .bytes_remaining + .checked_sub(outbound_hrmp_mod.bytes_submitted) + .ok_or(ModificationError::HrmpBytesOverflow { + para_id: *id, + bytes_remaining: outbound.bytes_remaining, + bytes_submitted: outbound_hrmp_mod.bytes_submitted, + })?; + + outbound.messages_remaining = outbound + .messages_remaining + .checked_sub(outbound_hrmp_mod.messages_submitted) + .ok_or(ModificationError::HrmpMessagesOverflow { + para_id: *id, + messages_remaining: outbound.messages_remaining, + messages_submitted: outbound_hrmp_mod.messages_submitted, + })?; + } else { + return Err(ModificationError::NoSuchHrmpChannel(*id)) + } + } + + new.ump_remaining = new.ump_remaining.checked_sub(modifications.ump_messages_sent).ok_or( + ModificationError::UmpMessagesOverflow { + messages_remaining: new.ump_remaining, + messages_submitted: modifications.ump_messages_sent, + }, + )?; + + new.ump_remaining_bytes = new + .ump_remaining_bytes + .checked_sub(modifications.ump_bytes_sent) + .ok_or(ModificationError::UmpBytesOverflow { + bytes_remaining: new.ump_remaining_bytes, + bytes_submitted: modifications.ump_bytes_sent, + })?; + + if modifications.dmp_messages_processed > new.dmp_remaining_messages.len() { + return Err(ModificationError::DmpMessagesUnderflow { + messages_remaining: new.dmp_remaining_messages.len(), + messages_processed: modifications.dmp_messages_processed, + }) + } else { + new.dmp_remaining_messages = + new.dmp_remaining_messages[modifications.dmp_messages_processed..].to_vec(); + } + + if modifications.code_upgrade_applied { + new.validation_code_hash = new + .future_validation_code + .take() + .ok_or(ModificationError::AppliedNonexistentCodeUpgrade)? + .1; + } + + Ok(new) + } +} + +/// Information about a relay-chain block. +#[derive(Debug, Clone, PartialEq)] +pub struct RelayChainBlockInfo { + /// The hash of the relay-chain block. + pub hash: Hash, + /// The number of the relay-chain block. + pub number: BlockNumber, + /// The storage-root of the relay-chain block. + pub storage_root: Hash, +} + +/// An update to outbound HRMP channels. +#[derive(Debug, Clone, PartialEq, Default)] +pub struct OutboundHrmpChannelModification { + /// The number of bytes submitted to the channel. + pub bytes_submitted: usize, + /// The number of messages submitted to the channel. + pub messages_submitted: usize, +} + +/// An update to the HRMP Watermark. +#[derive(Debug, Clone, PartialEq)] +pub enum HrmpWatermarkUpdate { + /// This is an update placing the watermark at the head of the chain, + /// which is always legal. + Head(BlockNumber), + /// This is an update placing the watermark behind the head of the + /// chain, which is only legal if it lands on a block where messages + /// were queued. + Trunk(BlockNumber), +} + +impl HrmpWatermarkUpdate { + fn watermark(&self) -> BlockNumber { + match *self { + HrmpWatermarkUpdate::Head(n) | HrmpWatermarkUpdate::Trunk(n) => n, + } + } +} + +/// Modifications to constraints as a result of prospective candidates. +#[derive(Debug, Clone, PartialEq)] +pub struct ConstraintModifications { + /// The required parent head to build upon. + pub required_parent: Option, + /// The new HRMP watermark + pub hrmp_watermark: Option, + /// Outbound HRMP channel modifications. + pub outbound_hrmp: HashMap, + /// The amount of UMP messages sent. + pub ump_messages_sent: usize, + /// The amount of UMP bytes sent. + pub ump_bytes_sent: usize, + /// The amount of DMP messages processed. + pub dmp_messages_processed: usize, + /// Whether a pending code upgrade has been applied. + pub code_upgrade_applied: bool, +} + +impl ConstraintModifications { + /// The 'identity' modifications: these can be applied to + /// any constraints and yield the exact same result. + pub fn identity() -> Self { + ConstraintModifications { + required_parent: None, + hrmp_watermark: None, + outbound_hrmp: HashMap::new(), + ump_messages_sent: 0, + ump_bytes_sent: 0, + dmp_messages_processed: 0, + code_upgrade_applied: false, + } + } + + /// Stack other modifications on top of these. + /// + /// This does no sanity-checking, so if `other` is garbage relative + /// to `self`, then the new value will be garbage as well. + /// + /// This is an addition which is not commutative. + pub fn stack(&mut self, other: &Self) { + if let Some(ref new_parent) = other.required_parent { + self.required_parent = Some(new_parent.clone()); + } + if let Some(ref new_hrmp_watermark) = other.hrmp_watermark { + self.hrmp_watermark = Some(new_hrmp_watermark.clone()); + } + + for (id, mods) in &other.outbound_hrmp { + let record = self.outbound_hrmp.entry(*id).or_default(); + record.messages_submitted += mods.messages_submitted; + record.bytes_submitted += mods.bytes_submitted; + } + + self.ump_messages_sent += other.ump_messages_sent; + self.ump_bytes_sent += other.ump_bytes_sent; + self.dmp_messages_processed += other.dmp_messages_processed; + self.code_upgrade_applied |= other.code_upgrade_applied; + } +} + +/// The prospective candidate. +/// +/// This comprises the key information that represent a candidate +/// without pinning it to a particular session. For example, everything +/// to do with the collator's signature and commitments are represented +/// here. But the erasure-root is not. This means that prospective candidates +/// are not correlated to any session in particular. +#[derive(Debug, Clone, PartialEq)] +pub struct ProspectiveCandidate<'a> { + /// The commitments to the output of the execution. + pub commitments: Cow<'a, CandidateCommitments>, + /// The collator that created the candidate. + pub collator: CollatorId, + /// The signature of the collator on the payload. + pub collator_signature: CollatorSignature, + /// The persisted validation data used to create the candidate. + pub persisted_validation_data: PersistedValidationData, + /// The hash of the PoV. + pub pov_hash: Hash, + /// The validation code hash used by the candidate. + pub validation_code_hash: ValidationCodeHash, +} + +impl<'a> ProspectiveCandidate<'a> { + fn into_owned(self) -> ProspectiveCandidate<'static> { + ProspectiveCandidate { commitments: Cow::Owned(self.commitments.into_owned()), ..self } + } + + /// Partially clone the prospective candidate, but borrow the + /// parts which are potentially heavy. + pub fn partial_clone(&self) -> ProspectiveCandidate { + ProspectiveCandidate { + commitments: Cow::Borrowed(self.commitments.borrow()), + collator: self.collator.clone(), + collator_signature: self.collator_signature.clone(), + persisted_validation_data: self.persisted_validation_data.clone(), + pov_hash: self.pov_hash, + validation_code_hash: self.validation_code_hash, + } + } +} + +#[cfg(test)] +impl ProspectiveCandidate<'static> { + fn commitments_mut(&mut self) -> &mut CandidateCommitments { + self.commitments.to_mut() + } +} + +/// Kinds of errors with the validity of a fragment. +#[derive(Debug, Clone, PartialEq)] +pub enum FragmentValidityError { + /// The validation code of the candidate doesn't match the + /// operating constraints. + /// + /// Expected, Got + ValidationCodeMismatch(ValidationCodeHash, ValidationCodeHash), + /// The persisted-validation-data doesn't match. + /// + /// Expected, Got + PersistedValidationDataMismatch(PersistedValidationData, PersistedValidationData), + /// The outputs of the candidate are invalid under the operating + /// constraints. + OutputsInvalid(ModificationError), + /// New validation code size too big. + /// + /// Max allowed, new. + CodeSizeTooLarge(usize, usize), + /// Relay parent too old. + /// + /// Min allowed, current. + RelayParentTooOld(BlockNumber, BlockNumber), + /// Para is required to process at least one DMP message from the queue. + DmpAdvancementRule, + /// Too many messages upward messages submitted. + UmpMessagesPerCandidateOverflow { + /// The amount of messages a single candidate can submit. + messages_allowed: usize, + /// The amount of messages sent to all HRMP channels. + messages_submitted: usize, + }, + /// Too many messages submitted to all HRMP channels. + HrmpMessagesPerCandidateOverflow { + /// The amount of messages a single candidate can submit. + messages_allowed: usize, + /// The amount of messages sent to all HRMP channels. + messages_submitted: usize, + }, + /// Code upgrade not allowed. + CodeUpgradeRestricted, + /// HRMP messages are not ascending or are duplicate. + /// + /// The `usize` is the index into the outbound HRMP messages of + /// the candidate. + HrmpMessagesDescendingOrDuplicate(usize), +} + +/// A parachain fragment, representing another prospective parachain block. +/// +/// This is a type which guarantees that the candidate is valid under the +/// operating constraints. +#[derive(Debug, Clone, PartialEq)] +pub struct Fragment<'a> { + /// The new relay-parent. + relay_parent: RelayChainBlockInfo, + /// The constraints this fragment is operating under. + operating_constraints: Constraints, + /// The core information about the prospective candidate. + candidate: ProspectiveCandidate<'a>, + /// Modifications to the constraints based on the outputs of + /// the candidate. + modifications: ConstraintModifications, +} + +impl<'a> Fragment<'a> { + /// Create a new fragment. + /// + /// This fails if the fragment isn't in line with the operating + /// constraints. That is, either its inputs or its outputs fail + /// checks against the constraints. + /// + /// This doesn't check that the collator signature is valid or + /// whether the PoV is small enough. + pub fn new( + relay_parent: RelayChainBlockInfo, + operating_constraints: Constraints, + candidate: ProspectiveCandidate<'a>, + ) -> Result { + let modifications = { + let commitments = &candidate.commitments; + ConstraintModifications { + required_parent: Some(commitments.head_data.clone()), + hrmp_watermark: Some({ + if commitments.hrmp_watermark == relay_parent.number { + HrmpWatermarkUpdate::Head(commitments.hrmp_watermark) + } else { + HrmpWatermarkUpdate::Trunk(commitments.hrmp_watermark) + } + }), + outbound_hrmp: { + let mut outbound_hrmp = HashMap::<_, OutboundHrmpChannelModification>::new(); + + let mut last_recipient = None::; + for (i, message) in commitments.horizontal_messages.iter().enumerate() { + if let Some(last) = last_recipient { + if last >= message.recipient { + return Err( + FragmentValidityError::HrmpMessagesDescendingOrDuplicate(i), + ) + } + } + + last_recipient = Some(message.recipient); + let record = outbound_hrmp.entry(message.recipient).or_default(); + + record.bytes_submitted += message.data.len(); + record.messages_submitted += 1; + } + + outbound_hrmp + }, + ump_messages_sent: commitments.upward_messages.len(), + ump_bytes_sent: commitments.upward_messages.iter().map(|msg| msg.len()).sum(), + dmp_messages_processed: commitments.processed_downward_messages as _, + code_upgrade_applied: operating_constraints + .future_validation_code + .map_or(false, |(at, _)| relay_parent.number >= at), + } + }; + + validate_against_constraints( + &operating_constraints, + &relay_parent, + &candidate, + &modifications, + )?; + + Ok(Fragment { relay_parent, operating_constraints, candidate, modifications }) + } + + /// Access the relay parent information. + pub fn relay_parent(&self) -> &RelayChainBlockInfo { + &self.relay_parent + } + + /// Access the operating constraints + pub fn operating_constraints(&self) -> &Constraints { + &self.operating_constraints + } + + /// Access the underlying prospective candidate. + pub fn candidate(&self) -> &ProspectiveCandidate<'a> { + &self.candidate + } + + /// Modifications to constraints based on the outputs of the candidate. + pub fn constraint_modifications(&self) -> &ConstraintModifications { + &self.modifications + } + + /// Convert the fragment into an owned variant. + pub fn into_owned(self) -> Fragment<'static> { + Fragment { candidate: self.candidate.into_owned(), ..self } + } + + /// Validate this fragment against some set of constraints + /// instead of the operating constraints. + pub fn validate_against_constraints( + &self, + constraints: &Constraints, + ) -> Result<(), FragmentValidityError> { + validate_against_constraints( + constraints, + &self.relay_parent, + &self.candidate, + &self.modifications, + ) + } +} + +fn validate_against_constraints( + constraints: &Constraints, + relay_parent: &RelayChainBlockInfo, + candidate: &ProspectiveCandidate, + modifications: &ConstraintModifications, +) -> Result<(), FragmentValidityError> { + let expected_pvd = PersistedValidationData { + parent_head: constraints.required_parent.clone(), + relay_parent_number: relay_parent.number, + relay_parent_storage_root: relay_parent.storage_root, + max_pov_size: constraints.max_pov_size as u32, + }; + + if expected_pvd != candidate.persisted_validation_data { + return Err(FragmentValidityError::PersistedValidationDataMismatch( + expected_pvd, + candidate.persisted_validation_data.clone(), + )) + } + + if constraints.validation_code_hash != candidate.validation_code_hash { + return Err(FragmentValidityError::ValidationCodeMismatch( + constraints.validation_code_hash, + candidate.validation_code_hash, + )) + } + + if relay_parent.number < constraints.min_relay_parent_number { + return Err(FragmentValidityError::RelayParentTooOld( + constraints.min_relay_parent_number, + relay_parent.number, + )) + } + + if candidate.commitments.new_validation_code.is_some() { + match constraints.upgrade_restriction { + None => {}, + Some(UpgradeRestriction::Present) => + return Err(FragmentValidityError::CodeUpgradeRestricted), + } + } + + let announced_code_size = candidate + .commitments + .new_validation_code + .as_ref() + .map_or(0, |code| code.0.len()); + + if announced_code_size > constraints.max_code_size { + return Err(FragmentValidityError::CodeSizeTooLarge( + constraints.max_code_size, + announced_code_size, + )) + } + + if modifications.dmp_messages_processed == 0 { + if constraints + .dmp_remaining_messages + .get(0) + .map_or(false, |&msg_sent_at| msg_sent_at <= relay_parent.number) + { + return Err(FragmentValidityError::DmpAdvancementRule) + } + } + + if candidate.commitments.horizontal_messages.len() > constraints.max_hrmp_num_per_candidate { + return Err(FragmentValidityError::HrmpMessagesPerCandidateOverflow { + messages_allowed: constraints.max_hrmp_num_per_candidate, + messages_submitted: candidate.commitments.horizontal_messages.len(), + }) + } + + if candidate.commitments.upward_messages.len() > constraints.max_ump_num_per_candidate { + return Err(FragmentValidityError::UmpMessagesPerCandidateOverflow { + messages_allowed: constraints.max_ump_num_per_candidate, + messages_submitted: candidate.commitments.upward_messages.len(), + }) + } + + constraints + .check_modifications(&modifications) + .map_err(FragmentValidityError::OutputsInvalid) +} + +#[cfg(test)] +mod tests { + use super::*; + use polkadot_primitives::{ + CollatorPair, HorizontalMessages, OutboundHrmpMessage, ValidationCode, + }; + use sp_application_crypto::Pair; + + #[test] + fn stack_modifications() { + let para_a = ParaId::from(1u32); + let para_b = ParaId::from(2u32); + let para_c = ParaId::from(3u32); + + let a = ConstraintModifications { + required_parent: None, + hrmp_watermark: None, + outbound_hrmp: { + let mut map = HashMap::new(); + map.insert( + para_a, + OutboundHrmpChannelModification { bytes_submitted: 100, messages_submitted: 5 }, + ); + + map.insert( + para_b, + OutboundHrmpChannelModification { bytes_submitted: 100, messages_submitted: 5 }, + ); + + map + }, + ump_messages_sent: 6, + ump_bytes_sent: 1000, + dmp_messages_processed: 5, + code_upgrade_applied: true, + }; + + let b = ConstraintModifications { + required_parent: None, + hrmp_watermark: None, + outbound_hrmp: { + let mut map = HashMap::new(); + map.insert( + para_b, + OutboundHrmpChannelModification { bytes_submitted: 100, messages_submitted: 5 }, + ); + + map.insert( + para_c, + OutboundHrmpChannelModification { bytes_submitted: 100, messages_submitted: 5 }, + ); + + map + }, + ump_messages_sent: 6, + ump_bytes_sent: 1000, + dmp_messages_processed: 5, + code_upgrade_applied: true, + }; + + let mut c = a.clone(); + c.stack(&b); + + assert_eq!( + c, + ConstraintModifications { + required_parent: None, + hrmp_watermark: None, + outbound_hrmp: { + let mut map = HashMap::new(); + map.insert( + para_a, + OutboundHrmpChannelModification { + bytes_submitted: 100, + messages_submitted: 5, + }, + ); + + map.insert( + para_b, + OutboundHrmpChannelModification { + bytes_submitted: 200, + messages_submitted: 10, + }, + ); + + map.insert( + para_c, + OutboundHrmpChannelModification { + bytes_submitted: 100, + messages_submitted: 5, + }, + ); + + map + }, + ump_messages_sent: 12, + ump_bytes_sent: 2000, + dmp_messages_processed: 10, + code_upgrade_applied: true, + }, + ); + + let mut d = ConstraintModifications::identity(); + d.stack(&a); + d.stack(&b); + + assert_eq!(c, d); + } + + fn make_constraints() -> Constraints { + let para_a = ParaId::from(1u32); + let para_b = ParaId::from(2u32); + let para_c = ParaId::from(3u32); + + Constraints { + min_relay_parent_number: 5, + max_pov_size: 1000, + max_code_size: 1000, + ump_remaining: 10, + ump_remaining_bytes: 1024, + max_ump_num_per_candidate: 5, + dmp_remaining_messages: Vec::new(), + hrmp_inbound: InboundHrmpLimitations { valid_watermarks: vec![6, 8] }, + hrmp_channels_out: { + let mut map = HashMap::new(); + + map.insert( + para_a, + OutboundHrmpChannelLimitations { messages_remaining: 5, bytes_remaining: 512 }, + ); + + map.insert( + para_b, + OutboundHrmpChannelLimitations { + messages_remaining: 10, + bytes_remaining: 1024, + }, + ); + + map.insert( + para_c, + OutboundHrmpChannelLimitations { messages_remaining: 1, bytes_remaining: 128 }, + ); + + map + }, + max_hrmp_num_per_candidate: 5, + required_parent: HeadData::from(vec![1, 2, 3]), + validation_code_hash: ValidationCode(vec![4, 5, 6]).hash(), + upgrade_restriction: None, + future_validation_code: None, + } + } + + #[test] + fn constraints_disallowed_trunk_watermark() { + let constraints = make_constraints(); + let mut modifications = ConstraintModifications::identity(); + modifications.hrmp_watermark = Some(HrmpWatermarkUpdate::Trunk(7)); + + assert_eq!( + constraints.check_modifications(&modifications), + Err(ModificationError::DisallowedHrmpWatermark(7)), + ); + + assert_eq!( + constraints.apply_modifications(&modifications), + Err(ModificationError::DisallowedHrmpWatermark(7)), + ); + } + + #[test] + fn constraints_always_allow_head_watermark() { + let constraints = make_constraints(); + let mut modifications = ConstraintModifications::identity(); + modifications.hrmp_watermark = Some(HrmpWatermarkUpdate::Head(7)); + + assert!(constraints.check_modifications(&modifications).is_ok()); + + let new_constraints = constraints.apply_modifications(&modifications).unwrap(); + assert_eq!(new_constraints.hrmp_inbound.valid_watermarks, vec![8]); + } + + #[test] + fn constraints_no_such_hrmp_channel() { + let constraints = make_constraints(); + let mut modifications = ConstraintModifications::identity(); + let bad_para = ParaId::from(100u32); + modifications.outbound_hrmp.insert( + bad_para, + OutboundHrmpChannelModification { bytes_submitted: 0, messages_submitted: 0 }, + ); + + assert_eq!( + constraints.check_modifications(&modifications), + Err(ModificationError::NoSuchHrmpChannel(bad_para)), + ); + + assert_eq!( + constraints.apply_modifications(&modifications), + Err(ModificationError::NoSuchHrmpChannel(bad_para)), + ); + } + + #[test] + fn constraints_hrmp_messages_overflow() { + let constraints = make_constraints(); + let mut modifications = ConstraintModifications::identity(); + let para_a = ParaId::from(1u32); + modifications.outbound_hrmp.insert( + para_a, + OutboundHrmpChannelModification { bytes_submitted: 0, messages_submitted: 6 }, + ); + + assert_eq!( + constraints.check_modifications(&modifications), + Err(ModificationError::HrmpMessagesOverflow { + para_id: para_a, + messages_remaining: 5, + messages_submitted: 6, + }), + ); + + assert_eq!( + constraints.apply_modifications(&modifications), + Err(ModificationError::HrmpMessagesOverflow { + para_id: para_a, + messages_remaining: 5, + messages_submitted: 6, + }), + ); + } + + #[test] + fn constraints_hrmp_bytes_overflow() { + let constraints = make_constraints(); + let mut modifications = ConstraintModifications::identity(); + let para_a = ParaId::from(1u32); + modifications.outbound_hrmp.insert( + para_a, + OutboundHrmpChannelModification { bytes_submitted: 513, messages_submitted: 1 }, + ); + + assert_eq!( + constraints.check_modifications(&modifications), + Err(ModificationError::HrmpBytesOverflow { + para_id: para_a, + bytes_remaining: 512, + bytes_submitted: 513, + }), + ); + + assert_eq!( + constraints.apply_modifications(&modifications), + Err(ModificationError::HrmpBytesOverflow { + para_id: para_a, + bytes_remaining: 512, + bytes_submitted: 513, + }), + ); + } + + #[test] + fn constraints_ump_messages_overflow() { + let constraints = make_constraints(); + let mut modifications = ConstraintModifications::identity(); + modifications.ump_messages_sent = 11; + + assert_eq!( + constraints.check_modifications(&modifications), + Err(ModificationError::UmpMessagesOverflow { + messages_remaining: 10, + messages_submitted: 11, + }), + ); + + assert_eq!( + constraints.apply_modifications(&modifications), + Err(ModificationError::UmpMessagesOverflow { + messages_remaining: 10, + messages_submitted: 11, + }), + ); + } + + #[test] + fn constraints_ump_bytes_overflow() { + let constraints = make_constraints(); + let mut modifications = ConstraintModifications::identity(); + modifications.ump_bytes_sent = 1025; + + assert_eq!( + constraints.check_modifications(&modifications), + Err(ModificationError::UmpBytesOverflow { + bytes_remaining: 1024, + bytes_submitted: 1025, + }), + ); + + assert_eq!( + constraints.apply_modifications(&modifications), + Err(ModificationError::UmpBytesOverflow { + bytes_remaining: 1024, + bytes_submitted: 1025, + }), + ); + } + + #[test] + fn constraints_dmp_messages() { + let mut constraints = make_constraints(); + let mut modifications = ConstraintModifications::identity(); + assert!(constraints.check_modifications(&modifications).is_ok()); + assert!(constraints.apply_modifications(&modifications).is_ok()); + + modifications.dmp_messages_processed = 6; + + assert_eq!( + constraints.check_modifications(&modifications), + Err(ModificationError::DmpMessagesUnderflow { + messages_remaining: 0, + messages_processed: 6, + }), + ); + + assert_eq!( + constraints.apply_modifications(&modifications), + Err(ModificationError::DmpMessagesUnderflow { + messages_remaining: 0, + messages_processed: 6, + }), + ); + + constraints.dmp_remaining_messages = vec![1, 4, 8, 10]; + modifications.dmp_messages_processed = 2; + assert!(constraints.check_modifications(&modifications).is_ok()); + let constraints = constraints + .apply_modifications(&modifications) + .expect("modifications are valid"); + + assert_eq!(&constraints.dmp_remaining_messages, &[8, 10]); + } + + #[test] + fn constraints_nonexistent_code_upgrade() { + let constraints = make_constraints(); + let mut modifications = ConstraintModifications::identity(); + modifications.code_upgrade_applied = true; + + assert_eq!( + constraints.check_modifications(&modifications), + Err(ModificationError::AppliedNonexistentCodeUpgrade), + ); + + assert_eq!( + constraints.apply_modifications(&modifications), + Err(ModificationError::AppliedNonexistentCodeUpgrade), + ); + } + + fn make_candidate( + constraints: &Constraints, + relay_parent: &RelayChainBlockInfo, + ) -> ProspectiveCandidate<'static> { + let collator_pair = CollatorPair::generate().0; + let collator = collator_pair.public(); + + let sig = collator_pair.sign(b"blabla".as_slice()); + + ProspectiveCandidate { + commitments: Cow::Owned(CandidateCommitments { + upward_messages: Default::default(), + horizontal_messages: Default::default(), + new_validation_code: None, + head_data: HeadData::from(vec![1, 2, 3, 4, 5]), + processed_downward_messages: 0, + hrmp_watermark: relay_parent.number, + }), + collator, + collator_signature: sig, + persisted_validation_data: PersistedValidationData { + parent_head: constraints.required_parent.clone(), + relay_parent_number: relay_parent.number, + relay_parent_storage_root: relay_parent.storage_root, + max_pov_size: constraints.max_pov_size as u32, + }, + pov_hash: Hash::repeat_byte(1), + validation_code_hash: constraints.validation_code_hash, + } + } + + #[test] + fn fragment_validation_code_mismatch() { + let relay_parent = RelayChainBlockInfo { + number: 6, + hash: Hash::repeat_byte(0x0a), + storage_root: Hash::repeat_byte(0xff), + }; + + let constraints = make_constraints(); + let mut candidate = make_candidate(&constraints, &relay_parent); + + let expected_code = constraints.validation_code_hash; + let got_code = ValidationCode(vec![9, 9, 9]).hash(); + + candidate.validation_code_hash = got_code; + + assert_eq!( + Fragment::new(relay_parent, constraints, candidate), + Err(FragmentValidityError::ValidationCodeMismatch(expected_code, got_code,)), + ) + } + + #[test] + fn fragment_pvd_mismatch() { + let relay_parent = RelayChainBlockInfo { + number: 6, + hash: Hash::repeat_byte(0x0a), + storage_root: Hash::repeat_byte(0xff), + }; + + let relay_parent_b = RelayChainBlockInfo { + number: 6, + hash: Hash::repeat_byte(0x0b), + storage_root: Hash::repeat_byte(0xee), + }; + + let constraints = make_constraints(); + let candidate = make_candidate(&constraints, &relay_parent); + + let expected_pvd = PersistedValidationData { + parent_head: constraints.required_parent.clone(), + relay_parent_number: relay_parent_b.number, + relay_parent_storage_root: relay_parent_b.storage_root, + max_pov_size: constraints.max_pov_size as u32, + }; + + let got_pvd = candidate.persisted_validation_data.clone(); + + assert_eq!( + Fragment::new(relay_parent_b, constraints, candidate), + Err(FragmentValidityError::PersistedValidationDataMismatch(expected_pvd, got_pvd,)), + ); + } + + #[test] + fn fragment_code_size_too_large() { + let relay_parent = RelayChainBlockInfo { + number: 6, + hash: Hash::repeat_byte(0x0a), + storage_root: Hash::repeat_byte(0xff), + }; + + let constraints = make_constraints(); + let mut candidate = make_candidate(&constraints, &relay_parent); + + let max_code_size = constraints.max_code_size; + candidate.commitments_mut().new_validation_code = Some(vec![0; max_code_size + 1].into()); + + assert_eq!( + Fragment::new(relay_parent, constraints, candidate), + Err(FragmentValidityError::CodeSizeTooLarge(max_code_size, max_code_size + 1,)), + ); + } + + #[test] + fn fragment_relay_parent_too_old() { + let relay_parent = RelayChainBlockInfo { + number: 3, + hash: Hash::repeat_byte(0x0a), + storage_root: Hash::repeat_byte(0xff), + }; + + let constraints = make_constraints(); + let candidate = make_candidate(&constraints, &relay_parent); + + assert_eq!( + Fragment::new(relay_parent, constraints, candidate), + Err(FragmentValidityError::RelayParentTooOld(5, 3,)), + ); + } + + #[test] + fn fragment_hrmp_messages_overflow() { + let relay_parent = RelayChainBlockInfo { + number: 6, + hash: Hash::repeat_byte(0x0a), + storage_root: Hash::repeat_byte(0xff), + }; + + let constraints = make_constraints(); + let mut candidate = make_candidate(&constraints, &relay_parent); + + let max_hrmp = constraints.max_hrmp_num_per_candidate; + + candidate + .commitments_mut() + .horizontal_messages + .try_extend((0..max_hrmp + 1).map(|i| OutboundHrmpMessage { + recipient: ParaId::from(i as u32), + data: vec![1, 2, 3], + })) + .unwrap(); + + assert_eq!( + Fragment::new(relay_parent, constraints, candidate), + Err(FragmentValidityError::HrmpMessagesPerCandidateOverflow { + messages_allowed: max_hrmp, + messages_submitted: max_hrmp + 1, + }), + ); + } + + #[test] + fn fragment_dmp_advancement_rule() { + let relay_parent = RelayChainBlockInfo { + number: 6, + hash: Hash::repeat_byte(0x0a), + storage_root: Hash::repeat_byte(0xff), + }; + + let mut constraints = make_constraints(); + let mut candidate = make_candidate(&constraints, &relay_parent); + + // Empty dmp queue is ok. + assert!(Fragment::new(relay_parent.clone(), constraints.clone(), candidate.clone()).is_ok()); + // Unprocessed message that was sent later is ok. + constraints.dmp_remaining_messages = vec![relay_parent.number + 1]; + assert!(Fragment::new(relay_parent.clone(), constraints.clone(), candidate.clone()).is_ok()); + + for block_number in 0..=relay_parent.number { + constraints.dmp_remaining_messages = vec![block_number]; + + assert_eq!( + Fragment::new(relay_parent.clone(), constraints.clone(), candidate.clone()), + Err(FragmentValidityError::DmpAdvancementRule), + ); + } + + candidate.commitments.to_mut().processed_downward_messages = 1; + assert!(Fragment::new(relay_parent, constraints, candidate).is_ok()); + } + + #[test] + fn fragment_ump_messages_overflow() { + let relay_parent = RelayChainBlockInfo { + number: 6, + hash: Hash::repeat_byte(0x0a), + storage_root: Hash::repeat_byte(0xff), + }; + + let constraints = make_constraints(); + let mut candidate = make_candidate(&constraints, &relay_parent); + + let max_ump = constraints.max_ump_num_per_candidate; + + candidate + .commitments + .to_mut() + .upward_messages + .try_extend((0..max_ump + 1).map(|i| vec![i as u8])) + .unwrap(); + + assert_eq!( + Fragment::new(relay_parent, constraints, candidate), + Err(FragmentValidityError::UmpMessagesPerCandidateOverflow { + messages_allowed: max_ump, + messages_submitted: max_ump + 1, + }), + ); + } + + #[test] + fn fragment_code_upgrade_restricted() { + let relay_parent = RelayChainBlockInfo { + number: 6, + hash: Hash::repeat_byte(0x0a), + storage_root: Hash::repeat_byte(0xff), + }; + + let mut constraints = make_constraints(); + let mut candidate = make_candidate(&constraints, &relay_parent); + + constraints.upgrade_restriction = Some(UpgradeRestriction::Present); + candidate.commitments_mut().new_validation_code = Some(ValidationCode(vec![1, 2, 3])); + + assert_eq!( + Fragment::new(relay_parent, constraints, candidate), + Err(FragmentValidityError::CodeUpgradeRestricted), + ); + } + + #[test] + fn fragment_hrmp_messages_descending_or_duplicate() { + let relay_parent = RelayChainBlockInfo { + number: 6, + hash: Hash::repeat_byte(0x0a), + storage_root: Hash::repeat_byte(0xff), + }; + + let constraints = make_constraints(); + let mut candidate = make_candidate(&constraints, &relay_parent); + + candidate.commitments_mut().horizontal_messages = HorizontalMessages::truncate_from(vec![ + OutboundHrmpMessage { recipient: ParaId::from(0 as u32), data: vec![1, 2, 3] }, + OutboundHrmpMessage { recipient: ParaId::from(0 as u32), data: vec![4, 5, 6] }, + ]); + + assert_eq!( + Fragment::new(relay_parent.clone(), constraints.clone(), candidate.clone()), + Err(FragmentValidityError::HrmpMessagesDescendingOrDuplicate(1)), + ); + + candidate.commitments_mut().horizontal_messages = HorizontalMessages::truncate_from(vec![ + OutboundHrmpMessage { recipient: ParaId::from(1 as u32), data: vec![1, 2, 3] }, + OutboundHrmpMessage { recipient: ParaId::from(0 as u32), data: vec![4, 5, 6] }, + ]); + + assert_eq!( + Fragment::new(relay_parent, constraints, candidate), + Err(FragmentValidityError::HrmpMessagesDescendingOrDuplicate(1)), + ); + } +} diff --git a/polkadot/node/subsystem-util/src/inclusion_emulator/staging.rs b/polkadot/node/subsystem-util/src/inclusion_emulator/staging.rs deleted file mode 100644 index eb0632297528..000000000000 --- a/polkadot/node/subsystem-util/src/inclusion_emulator/staging.rs +++ /dev/null @@ -1,1450 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Polkadot. - -// Polkadot is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Polkadot is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -//! The implementation of the inclusion emulator for the 'staging' runtime version. -//! -//! # Overview -//! -//! A set of utilities for node-side code to emulate the logic the runtime uses for checking -//! parachain blocks in order to build prospective parachains that are produced ahead of the -//! relay chain. These utilities allow the node-side to predict, with high accuracy, what -//! the relay-chain will accept in the near future. -//! -//! This module has 2 key data types: [`Constraints`] and [`Fragment`]s. [`Constraints`] -//! exhaustively define the set of valid inputs and outputs to parachain execution. A [`Fragment`] -//! indicates a parachain block, anchored to the relay-chain at a particular relay-chain block, -//! known as the relay-parent. -//! -//! ## Fragment Validity -//! -//! Every relay-parent is implicitly associated with a unique set of [`Constraints`] that describe -//! the properties that must be true for a block to be included in a direct child of that block, -//! assuming there is no intermediate parachain block pending availability. -//! -//! However, the key factor that makes asynchronously-grown prospective chains -//! possible is the fact that the relay-chain accepts candidate blocks based on whether they -//! are valid under the constraints of the present moment, not based on whether they were -//! valid at the time of construction. -//! -//! As such, [`Fragment`]s are often, but not always constructed in such a way that they are -//! invalid at first and become valid later on, as the relay chain grows. -//! -//! # Usage -//! -//! It's expected that the users of this module will be building up trees of -//! [`Fragment`]s and consistently pruning and adding to the tree. -//! -//! ## Operating Constraints -//! -//! The *operating constraints* of a `Fragment` are the constraints with which that fragment -//! was intended to comply. The operating constraints are defined as the base constraints -//! of the relay-parent of the fragment modified by the cumulative modifications of all -//! fragments between the relay-parent and the current fragment. -//! -//! What the operating constraints are, in practice, is a prediction about the state of the -//! relay-chain in the future. The relay-chain is aware of some current state, and we want to -//! make an intelligent prediction about what might be accepted in the future based on -//! prior fragments that also exist off-chain. -//! -//! ## Fragment Trees -//! -//! As the relay-chain grows, some predictions come true and others come false. -//! And new predictions get made. These three changes correspond distinctly to the -//! 3 primary operations on fragment trees. -//! -//! A fragment tree is a mental model for thinking about a forking series of predictions -//! about a single parachain. There may be one or more fragment trees per parachain. -//! -//! In expectation, most parachains will have a plausibly-unique authorship method which means that -//! they should really be much closer to fragment-chains, maybe with an occasional fork. -//! -//! Avoiding fragment-tree blowup is beyond the scope of this module. -//! -//! ### Pruning Fragment Trees -//! -//! When the relay-chain advances, we want to compare the new constraints of that relay-parent to -//! the roots of the fragment trees we have. There are 3 cases: -//! -//! 1. The root fragment is still valid under the new constraints. In this case, we do nothing. This -//! is the "prediction still uncertain" case. -//! -//! 2. The root fragment is invalid under the new constraints because it has been subsumed by the -//! relay-chain. In this case, we can discard the root and split & re-root the fragment tree under -//! its descendents and compare to the new constraints again. This is the "prediction came true" -//! case. -//! -//! 3. The root fragment is invalid under the new constraints because a competing parachain block -//! has been included or it would never be accepted for some other reason. In this case we can -//! discard the entire fragment tree. This is the "prediction came false" case. -//! -//! This is all a bit of a simplification because it assumes that the relay-chain advances without -//! forks and is finalized instantly. In practice, the set of fragment-trees needs to be observable -//! from the perspective of a few different possible forks of the relay-chain and not pruned -//! too eagerly. -//! -//! Note that the fragments themselves don't need to change and the only thing we care about -//! is whether the predictions they represent are still valid. -//! -//! ### Extending Fragment Trees -//! -//! As predictions fade into the past, new ones should be stacked on top. -//! -//! Every new relay-chain block is an opportunity to make a new prediction about the future. -//! Higher-level logic should select the leaves of the fragment-trees to build upon or whether -//! to create a new fragment-tree. -//! -//! ### Code Upgrades -//! -//! Code upgrades are the main place where this emulation fails. The on-chain PVF upgrade scheduling -//! logic is very path-dependent and intricate so we just assume that code upgrades -//! can't be initiated and applied within a single fragment-tree. Fragment-trees aren't deep, -//! in practice and code upgrades are fairly rare. So what's likely to happen around code -//! upgrades is that the entire fragment-tree has to get discarded at some point. -//! -//! That means a few blocks of execution time lost, which is not a big deal for code upgrades -//! in practice at most once every few weeks. - -use polkadot_primitives::vstaging::{ - BlockNumber, CandidateCommitments, CollatorId, CollatorSignature, - Constraints as PrimitiveConstraints, Hash, HeadData, Id as ParaId, PersistedValidationData, - UpgradeRestriction, ValidationCodeHash, -}; -use std::{ - borrow::{Borrow, Cow}, - collections::HashMap, -}; - -/// Constraints on inbound HRMP channels. -#[derive(Debug, Clone, PartialEq)] -pub struct InboundHrmpLimitations { - /// An exhaustive set of all valid watermarks, sorted ascending - pub valid_watermarks: Vec, -} - -/// Constraints on outbound HRMP channels. -#[derive(Debug, Clone, PartialEq)] -pub struct OutboundHrmpChannelLimitations { - /// The maximum bytes that can be written to the channel. - pub bytes_remaining: usize, - /// The maximum messages that can be written to the channel. - pub messages_remaining: usize, -} - -/// Constraints on the actions that can be taken by a new parachain -/// block. These limitations are implicitly associated with some particular -/// parachain, which should be apparent from usage. -#[derive(Debug, Clone, PartialEq)] -pub struct Constraints { - /// The minimum relay-parent number accepted under these constraints. - pub min_relay_parent_number: BlockNumber, - /// The maximum Proof-of-Validity size allowed, in bytes. - pub max_pov_size: usize, - /// The maximum new validation code size allowed, in bytes. - pub max_code_size: usize, - /// The amount of UMP messages remaining. - pub ump_remaining: usize, - /// The amount of UMP bytes remaining. - pub ump_remaining_bytes: usize, - /// The maximum number of UMP messages allowed per candidate. - pub max_ump_num_per_candidate: usize, - /// Remaining DMP queue. Only includes sent-at block numbers. - pub dmp_remaining_messages: Vec, - /// The limitations of all registered inbound HRMP channels. - pub hrmp_inbound: InboundHrmpLimitations, - /// The limitations of all registered outbound HRMP channels. - pub hrmp_channels_out: HashMap, - /// The maximum number of HRMP messages allowed per candidate. - pub max_hrmp_num_per_candidate: usize, - /// The required parent head-data of the parachain. - pub required_parent: HeadData, - /// The expected validation-code-hash of this parachain. - pub validation_code_hash: ValidationCodeHash, - /// The code upgrade restriction signal as-of this parachain. - pub upgrade_restriction: Option, - /// The future validation code hash, if any, and at what relay-parent - /// number the upgrade would be minimally applied. - pub future_validation_code: Option<(BlockNumber, ValidationCodeHash)>, -} - -impl From for Constraints { - fn from(c: PrimitiveConstraints) -> Self { - Constraints { - min_relay_parent_number: c.min_relay_parent_number, - max_pov_size: c.max_pov_size as _, - max_code_size: c.max_code_size as _, - ump_remaining: c.ump_remaining as _, - ump_remaining_bytes: c.ump_remaining_bytes as _, - max_ump_num_per_candidate: c.max_ump_num_per_candidate as _, - dmp_remaining_messages: c.dmp_remaining_messages, - hrmp_inbound: InboundHrmpLimitations { - valid_watermarks: c.hrmp_inbound.valid_watermarks, - }, - hrmp_channels_out: c - .hrmp_channels_out - .into_iter() - .map(|(para_id, limits)| { - ( - para_id, - OutboundHrmpChannelLimitations { - bytes_remaining: limits.bytes_remaining as _, - messages_remaining: limits.messages_remaining as _, - }, - ) - }) - .collect(), - max_hrmp_num_per_candidate: c.max_hrmp_num_per_candidate as _, - required_parent: c.required_parent, - validation_code_hash: c.validation_code_hash, - upgrade_restriction: c.upgrade_restriction, - future_validation_code: c.future_validation_code, - } - } -} - -/// Kinds of errors that can occur when modifying constraints. -#[derive(Debug, Clone, PartialEq)] -pub enum ModificationError { - /// The HRMP watermark is not allowed. - DisallowedHrmpWatermark(BlockNumber), - /// No such HRMP outbound channel. - NoSuchHrmpChannel(ParaId), - /// Too many messages submitted to HRMP channel. - HrmpMessagesOverflow { - /// The ID of the recipient. - para_id: ParaId, - /// The amount of remaining messages in the capacity of the channel. - messages_remaining: usize, - /// The amount of messages submitted to the channel. - messages_submitted: usize, - }, - /// Too many bytes submitted to HRMP channel. - HrmpBytesOverflow { - /// The ID of the recipient. - para_id: ParaId, - /// The amount of remaining bytes in the capacity of the channel. - bytes_remaining: usize, - /// The amount of bytes submitted to the channel. - bytes_submitted: usize, - }, - /// Too many messages submitted to UMP. - UmpMessagesOverflow { - /// The amount of remaining messages in the capacity of UMP. - messages_remaining: usize, - /// The amount of messages submitted to UMP. - messages_submitted: usize, - }, - /// Too many bytes submitted to UMP. - UmpBytesOverflow { - /// The amount of remaining bytes in the capacity of UMP. - bytes_remaining: usize, - /// The amount of bytes submitted to UMP. - bytes_submitted: usize, - }, - /// Too many messages processed from DMP. - DmpMessagesUnderflow { - /// The amount of messages waiting to be processed from DMP. - messages_remaining: usize, - /// The amount of messages processed. - messages_processed: usize, - }, - /// No validation code upgrade to apply. - AppliedNonexistentCodeUpgrade, -} - -impl Constraints { - /// Check modifications against constraints. - pub fn check_modifications( - &self, - modifications: &ConstraintModifications, - ) -> Result<(), ModificationError> { - if let Some(HrmpWatermarkUpdate::Trunk(hrmp_watermark)) = modifications.hrmp_watermark { - // head updates are always valid. - if self.hrmp_inbound.valid_watermarks.iter().all(|w| w != &hrmp_watermark) { - return Err(ModificationError::DisallowedHrmpWatermark(hrmp_watermark)) - } - } - - for (id, outbound_hrmp_mod) in &modifications.outbound_hrmp { - if let Some(outbound) = self.hrmp_channels_out.get(&id) { - outbound.bytes_remaining.checked_sub(outbound_hrmp_mod.bytes_submitted).ok_or( - ModificationError::HrmpBytesOverflow { - para_id: *id, - bytes_remaining: outbound.bytes_remaining, - bytes_submitted: outbound_hrmp_mod.bytes_submitted, - }, - )?; - - outbound - .messages_remaining - .checked_sub(outbound_hrmp_mod.messages_submitted) - .ok_or(ModificationError::HrmpMessagesOverflow { - para_id: *id, - messages_remaining: outbound.messages_remaining, - messages_submitted: outbound_hrmp_mod.messages_submitted, - })?; - } else { - return Err(ModificationError::NoSuchHrmpChannel(*id)) - } - } - - self.ump_remaining.checked_sub(modifications.ump_messages_sent).ok_or( - ModificationError::UmpMessagesOverflow { - messages_remaining: self.ump_remaining, - messages_submitted: modifications.ump_messages_sent, - }, - )?; - - self.ump_remaining_bytes.checked_sub(modifications.ump_bytes_sent).ok_or( - ModificationError::UmpBytesOverflow { - bytes_remaining: self.ump_remaining_bytes, - bytes_submitted: modifications.ump_bytes_sent, - }, - )?; - - self.dmp_remaining_messages - .len() - .checked_sub(modifications.dmp_messages_processed) - .ok_or(ModificationError::DmpMessagesUnderflow { - messages_remaining: self.dmp_remaining_messages.len(), - messages_processed: modifications.dmp_messages_processed, - })?; - - if self.future_validation_code.is_none() && modifications.code_upgrade_applied { - return Err(ModificationError::AppliedNonexistentCodeUpgrade) - } - - Ok(()) - } - - /// Apply modifications to these constraints. If this succeeds, it passes - /// all sanity-checks. - pub fn apply_modifications( - &self, - modifications: &ConstraintModifications, - ) -> Result { - let mut new = self.clone(); - - if let Some(required_parent) = modifications.required_parent.as_ref() { - new.required_parent = required_parent.clone(); - } - - if let Some(ref hrmp_watermark) = modifications.hrmp_watermark { - match new.hrmp_inbound.valid_watermarks.binary_search(&hrmp_watermark.watermark()) { - Ok(pos) => { - // Exact match, so this is OK in all cases. - let _ = new.hrmp_inbound.valid_watermarks.drain(..pos + 1); - }, - Err(pos) => match hrmp_watermark { - HrmpWatermarkUpdate::Head(_) => { - // Updates to Head are always OK. - let _ = new.hrmp_inbound.valid_watermarks.drain(..pos); - }, - HrmpWatermarkUpdate::Trunk(n) => { - // Trunk update landing on disallowed watermark is not OK. - return Err(ModificationError::DisallowedHrmpWatermark(*n)) - }, - }, - } - } - - for (id, outbound_hrmp_mod) in &modifications.outbound_hrmp { - if let Some(outbound) = new.hrmp_channels_out.get_mut(&id) { - outbound.bytes_remaining = outbound - .bytes_remaining - .checked_sub(outbound_hrmp_mod.bytes_submitted) - .ok_or(ModificationError::HrmpBytesOverflow { - para_id: *id, - bytes_remaining: outbound.bytes_remaining, - bytes_submitted: outbound_hrmp_mod.bytes_submitted, - })?; - - outbound.messages_remaining = outbound - .messages_remaining - .checked_sub(outbound_hrmp_mod.messages_submitted) - .ok_or(ModificationError::HrmpMessagesOverflow { - para_id: *id, - messages_remaining: outbound.messages_remaining, - messages_submitted: outbound_hrmp_mod.messages_submitted, - })?; - } else { - return Err(ModificationError::NoSuchHrmpChannel(*id)) - } - } - - new.ump_remaining = new.ump_remaining.checked_sub(modifications.ump_messages_sent).ok_or( - ModificationError::UmpMessagesOverflow { - messages_remaining: new.ump_remaining, - messages_submitted: modifications.ump_messages_sent, - }, - )?; - - new.ump_remaining_bytes = new - .ump_remaining_bytes - .checked_sub(modifications.ump_bytes_sent) - .ok_or(ModificationError::UmpBytesOverflow { - bytes_remaining: new.ump_remaining_bytes, - bytes_submitted: modifications.ump_bytes_sent, - })?; - - if modifications.dmp_messages_processed > new.dmp_remaining_messages.len() { - return Err(ModificationError::DmpMessagesUnderflow { - messages_remaining: new.dmp_remaining_messages.len(), - messages_processed: modifications.dmp_messages_processed, - }) - } else { - new.dmp_remaining_messages = - new.dmp_remaining_messages[modifications.dmp_messages_processed..].to_vec(); - } - - if modifications.code_upgrade_applied { - new.validation_code_hash = new - .future_validation_code - .take() - .ok_or(ModificationError::AppliedNonexistentCodeUpgrade)? - .1; - } - - Ok(new) - } -} - -/// Information about a relay-chain block. -#[derive(Debug, Clone, PartialEq)] -pub struct RelayChainBlockInfo { - /// The hash of the relay-chain block. - pub hash: Hash, - /// The number of the relay-chain block. - pub number: BlockNumber, - /// The storage-root of the relay-chain block. - pub storage_root: Hash, -} - -/// An update to outbound HRMP channels. -#[derive(Debug, Clone, PartialEq, Default)] -pub struct OutboundHrmpChannelModification { - /// The number of bytes submitted to the channel. - pub bytes_submitted: usize, - /// The number of messages submitted to the channel. - pub messages_submitted: usize, -} - -/// An update to the HRMP Watermark. -#[derive(Debug, Clone, PartialEq)] -pub enum HrmpWatermarkUpdate { - /// This is an update placing the watermark at the head of the chain, - /// which is always legal. - Head(BlockNumber), - /// This is an update placing the watermark behind the head of the - /// chain, which is only legal if it lands on a block where messages - /// were queued. - Trunk(BlockNumber), -} - -impl HrmpWatermarkUpdate { - fn watermark(&self) -> BlockNumber { - match *self { - HrmpWatermarkUpdate::Head(n) | HrmpWatermarkUpdate::Trunk(n) => n, - } - } -} - -/// Modifications to constraints as a result of prospective candidates. -#[derive(Debug, Clone, PartialEq)] -pub struct ConstraintModifications { - /// The required parent head to build upon. - pub required_parent: Option, - /// The new HRMP watermark - pub hrmp_watermark: Option, - /// Outbound HRMP channel modifications. - pub outbound_hrmp: HashMap, - /// The amount of UMP messages sent. - pub ump_messages_sent: usize, - /// The amount of UMP bytes sent. - pub ump_bytes_sent: usize, - /// The amount of DMP messages processed. - pub dmp_messages_processed: usize, - /// Whether a pending code upgrade has been applied. - pub code_upgrade_applied: bool, -} - -impl ConstraintModifications { - /// The 'identity' modifications: these can be applied to - /// any constraints and yield the exact same result. - pub fn identity() -> Self { - ConstraintModifications { - required_parent: None, - hrmp_watermark: None, - outbound_hrmp: HashMap::new(), - ump_messages_sent: 0, - ump_bytes_sent: 0, - dmp_messages_processed: 0, - code_upgrade_applied: false, - } - } - - /// Stack other modifications on top of these. - /// - /// This does no sanity-checking, so if `other` is garbage relative - /// to `self`, then the new value will be garbage as well. - /// - /// This is an addition which is not commutative. - pub fn stack(&mut self, other: &Self) { - if let Some(ref new_parent) = other.required_parent { - self.required_parent = Some(new_parent.clone()); - } - if let Some(ref new_hrmp_watermark) = other.hrmp_watermark { - self.hrmp_watermark = Some(new_hrmp_watermark.clone()); - } - - for (id, mods) in &other.outbound_hrmp { - let record = self.outbound_hrmp.entry(*id).or_default(); - record.messages_submitted += mods.messages_submitted; - record.bytes_submitted += mods.bytes_submitted; - } - - self.ump_messages_sent += other.ump_messages_sent; - self.ump_bytes_sent += other.ump_bytes_sent; - self.dmp_messages_processed += other.dmp_messages_processed; - self.code_upgrade_applied |= other.code_upgrade_applied; - } -} - -/// The prospective candidate. -/// -/// This comprises the key information that represent a candidate -/// without pinning it to a particular session. For example, everything -/// to do with the collator's signature and commitments are represented -/// here. But the erasure-root is not. This means that prospective candidates -/// are not correlated to any session in particular. -#[derive(Debug, Clone, PartialEq)] -pub struct ProspectiveCandidate<'a> { - /// The commitments to the output of the execution. - pub commitments: Cow<'a, CandidateCommitments>, - /// The collator that created the candidate. - pub collator: CollatorId, - /// The signature of the collator on the payload. - pub collator_signature: CollatorSignature, - /// The persisted validation data used to create the candidate. - pub persisted_validation_data: PersistedValidationData, - /// The hash of the PoV. - pub pov_hash: Hash, - /// The validation code hash used by the candidate. - pub validation_code_hash: ValidationCodeHash, -} - -impl<'a> ProspectiveCandidate<'a> { - fn into_owned(self) -> ProspectiveCandidate<'static> { - ProspectiveCandidate { commitments: Cow::Owned(self.commitments.into_owned()), ..self } - } - - /// Partially clone the prospective candidate, but borrow the - /// parts which are potentially heavy. - pub fn partial_clone(&self) -> ProspectiveCandidate { - ProspectiveCandidate { - commitments: Cow::Borrowed(self.commitments.borrow()), - collator: self.collator.clone(), - collator_signature: self.collator_signature.clone(), - persisted_validation_data: self.persisted_validation_data.clone(), - pov_hash: self.pov_hash, - validation_code_hash: self.validation_code_hash, - } - } -} - -#[cfg(test)] -impl ProspectiveCandidate<'static> { - fn commitments_mut(&mut self) -> &mut CandidateCommitments { - self.commitments.to_mut() - } -} - -/// Kinds of errors with the validity of a fragment. -#[derive(Debug, Clone, PartialEq)] -pub enum FragmentValidityError { - /// The validation code of the candidate doesn't match the - /// operating constraints. - /// - /// Expected, Got - ValidationCodeMismatch(ValidationCodeHash, ValidationCodeHash), - /// The persisted-validation-data doesn't match. - /// - /// Expected, Got - PersistedValidationDataMismatch(PersistedValidationData, PersistedValidationData), - /// The outputs of the candidate are invalid under the operating - /// constraints. - OutputsInvalid(ModificationError), - /// New validation code size too big. - /// - /// Max allowed, new. - CodeSizeTooLarge(usize, usize), - /// Relay parent too old. - /// - /// Min allowed, current. - RelayParentTooOld(BlockNumber, BlockNumber), - /// Para is required to process at least one DMP message from the queue. - DmpAdvancementRule, - /// Too many messages upward messages submitted. - UmpMessagesPerCandidateOverflow { - /// The amount of messages a single candidate can submit. - messages_allowed: usize, - /// The amount of messages sent to all HRMP channels. - messages_submitted: usize, - }, - /// Too many messages submitted to all HRMP channels. - HrmpMessagesPerCandidateOverflow { - /// The amount of messages a single candidate can submit. - messages_allowed: usize, - /// The amount of messages sent to all HRMP channels. - messages_submitted: usize, - }, - /// Code upgrade not allowed. - CodeUpgradeRestricted, - /// HRMP messages are not ascending or are duplicate. - /// - /// The `usize` is the index into the outbound HRMP messages of - /// the candidate. - HrmpMessagesDescendingOrDuplicate(usize), -} - -/// A parachain fragment, representing another prospective parachain block. -/// -/// This is a type which guarantees that the candidate is valid under the -/// operating constraints. -#[derive(Debug, Clone, PartialEq)] -pub struct Fragment<'a> { - /// The new relay-parent. - relay_parent: RelayChainBlockInfo, - /// The constraints this fragment is operating under. - operating_constraints: Constraints, - /// The core information about the prospective candidate. - candidate: ProspectiveCandidate<'a>, - /// Modifications to the constraints based on the outputs of - /// the candidate. - modifications: ConstraintModifications, -} - -impl<'a> Fragment<'a> { - /// Create a new fragment. - /// - /// This fails if the fragment isn't in line with the operating - /// constraints. That is, either its inputs or its outputs fail - /// checks against the constraints. - /// - /// This doesn't check that the collator signature is valid or - /// whether the PoV is small enough. - pub fn new( - relay_parent: RelayChainBlockInfo, - operating_constraints: Constraints, - candidate: ProspectiveCandidate<'a>, - ) -> Result { - let modifications = { - let commitments = &candidate.commitments; - ConstraintModifications { - required_parent: Some(commitments.head_data.clone()), - hrmp_watermark: Some({ - if commitments.hrmp_watermark == relay_parent.number { - HrmpWatermarkUpdate::Head(commitments.hrmp_watermark) - } else { - HrmpWatermarkUpdate::Trunk(commitments.hrmp_watermark) - } - }), - outbound_hrmp: { - let mut outbound_hrmp = HashMap::<_, OutboundHrmpChannelModification>::new(); - - let mut last_recipient = None::; - for (i, message) in commitments.horizontal_messages.iter().enumerate() { - if let Some(last) = last_recipient { - if last >= message.recipient { - return Err( - FragmentValidityError::HrmpMessagesDescendingOrDuplicate(i), - ) - } - } - - last_recipient = Some(message.recipient); - let record = outbound_hrmp.entry(message.recipient).or_default(); - - record.bytes_submitted += message.data.len(); - record.messages_submitted += 1; - } - - outbound_hrmp - }, - ump_messages_sent: commitments.upward_messages.len(), - ump_bytes_sent: commitments.upward_messages.iter().map(|msg| msg.len()).sum(), - dmp_messages_processed: commitments.processed_downward_messages as _, - code_upgrade_applied: operating_constraints - .future_validation_code - .map_or(false, |(at, _)| relay_parent.number >= at), - } - }; - - validate_against_constraints( - &operating_constraints, - &relay_parent, - &candidate, - &modifications, - )?; - - Ok(Fragment { relay_parent, operating_constraints, candidate, modifications }) - } - - /// Access the relay parent information. - pub fn relay_parent(&self) -> &RelayChainBlockInfo { - &self.relay_parent - } - - /// Access the operating constraints - pub fn operating_constraints(&self) -> &Constraints { - &self.operating_constraints - } - - /// Access the underlying prospective candidate. - pub fn candidate(&self) -> &ProspectiveCandidate<'a> { - &self.candidate - } - - /// Modifications to constraints based on the outputs of the candidate. - pub fn constraint_modifications(&self) -> &ConstraintModifications { - &self.modifications - } - - /// Convert the fragment into an owned variant. - pub fn into_owned(self) -> Fragment<'static> { - Fragment { candidate: self.candidate.into_owned(), ..self } - } - - /// Validate this fragment against some set of constraints - /// instead of the operating constraints. - pub fn validate_against_constraints( - &self, - constraints: &Constraints, - ) -> Result<(), FragmentValidityError> { - validate_against_constraints( - constraints, - &self.relay_parent, - &self.candidate, - &self.modifications, - ) - } -} - -fn validate_against_constraints( - constraints: &Constraints, - relay_parent: &RelayChainBlockInfo, - candidate: &ProspectiveCandidate, - modifications: &ConstraintModifications, -) -> Result<(), FragmentValidityError> { - let expected_pvd = PersistedValidationData { - parent_head: constraints.required_parent.clone(), - relay_parent_number: relay_parent.number, - relay_parent_storage_root: relay_parent.storage_root, - max_pov_size: constraints.max_pov_size as u32, - }; - - if expected_pvd != candidate.persisted_validation_data { - return Err(FragmentValidityError::PersistedValidationDataMismatch( - expected_pvd, - candidate.persisted_validation_data.clone(), - )) - } - - if constraints.validation_code_hash != candidate.validation_code_hash { - return Err(FragmentValidityError::ValidationCodeMismatch( - constraints.validation_code_hash, - candidate.validation_code_hash, - )) - } - - if relay_parent.number < constraints.min_relay_parent_number { - return Err(FragmentValidityError::RelayParentTooOld( - constraints.min_relay_parent_number, - relay_parent.number, - )) - } - - if candidate.commitments.new_validation_code.is_some() { - match constraints.upgrade_restriction { - None => {}, - Some(UpgradeRestriction::Present) => - return Err(FragmentValidityError::CodeUpgradeRestricted), - } - } - - let announced_code_size = candidate - .commitments - .new_validation_code - .as_ref() - .map_or(0, |code| code.0.len()); - - if announced_code_size > constraints.max_code_size { - return Err(FragmentValidityError::CodeSizeTooLarge( - constraints.max_code_size, - announced_code_size, - )) - } - - if modifications.dmp_messages_processed == 0 { - if constraints - .dmp_remaining_messages - .get(0) - .map_or(false, |&msg_sent_at| msg_sent_at <= relay_parent.number) - { - return Err(FragmentValidityError::DmpAdvancementRule) - } - } - - if candidate.commitments.horizontal_messages.len() > constraints.max_hrmp_num_per_candidate { - return Err(FragmentValidityError::HrmpMessagesPerCandidateOverflow { - messages_allowed: constraints.max_hrmp_num_per_candidate, - messages_submitted: candidate.commitments.horizontal_messages.len(), - }) - } - - if candidate.commitments.upward_messages.len() > constraints.max_ump_num_per_candidate { - return Err(FragmentValidityError::UmpMessagesPerCandidateOverflow { - messages_allowed: constraints.max_ump_num_per_candidate, - messages_submitted: candidate.commitments.upward_messages.len(), - }) - } - - constraints - .check_modifications(&modifications) - .map_err(FragmentValidityError::OutputsInvalid) -} - -#[cfg(test)] -mod tests { - use super::*; - use polkadot_primitives::vstaging::{ - CollatorPair, HorizontalMessages, OutboundHrmpMessage, ValidationCode, - }; - use sp_application_crypto::Pair; - - #[test] - fn stack_modifications() { - let para_a = ParaId::from(1u32); - let para_b = ParaId::from(2u32); - let para_c = ParaId::from(3u32); - - let a = ConstraintModifications { - required_parent: None, - hrmp_watermark: None, - outbound_hrmp: { - let mut map = HashMap::new(); - map.insert( - para_a, - OutboundHrmpChannelModification { bytes_submitted: 100, messages_submitted: 5 }, - ); - - map.insert( - para_b, - OutboundHrmpChannelModification { bytes_submitted: 100, messages_submitted: 5 }, - ); - - map - }, - ump_messages_sent: 6, - ump_bytes_sent: 1000, - dmp_messages_processed: 5, - code_upgrade_applied: true, - }; - - let b = ConstraintModifications { - required_parent: None, - hrmp_watermark: None, - outbound_hrmp: { - let mut map = HashMap::new(); - map.insert( - para_b, - OutboundHrmpChannelModification { bytes_submitted: 100, messages_submitted: 5 }, - ); - - map.insert( - para_c, - OutboundHrmpChannelModification { bytes_submitted: 100, messages_submitted: 5 }, - ); - - map - }, - ump_messages_sent: 6, - ump_bytes_sent: 1000, - dmp_messages_processed: 5, - code_upgrade_applied: true, - }; - - let mut c = a.clone(); - c.stack(&b); - - assert_eq!( - c, - ConstraintModifications { - required_parent: None, - hrmp_watermark: None, - outbound_hrmp: { - let mut map = HashMap::new(); - map.insert( - para_a, - OutboundHrmpChannelModification { - bytes_submitted: 100, - messages_submitted: 5, - }, - ); - - map.insert( - para_b, - OutboundHrmpChannelModification { - bytes_submitted: 200, - messages_submitted: 10, - }, - ); - - map.insert( - para_c, - OutboundHrmpChannelModification { - bytes_submitted: 100, - messages_submitted: 5, - }, - ); - - map - }, - ump_messages_sent: 12, - ump_bytes_sent: 2000, - dmp_messages_processed: 10, - code_upgrade_applied: true, - }, - ); - - let mut d = ConstraintModifications::identity(); - d.stack(&a); - d.stack(&b); - - assert_eq!(c, d); - } - - fn make_constraints() -> Constraints { - let para_a = ParaId::from(1u32); - let para_b = ParaId::from(2u32); - let para_c = ParaId::from(3u32); - - Constraints { - min_relay_parent_number: 5, - max_pov_size: 1000, - max_code_size: 1000, - ump_remaining: 10, - ump_remaining_bytes: 1024, - max_ump_num_per_candidate: 5, - dmp_remaining_messages: Vec::new(), - hrmp_inbound: InboundHrmpLimitations { valid_watermarks: vec![6, 8] }, - hrmp_channels_out: { - let mut map = HashMap::new(); - - map.insert( - para_a, - OutboundHrmpChannelLimitations { messages_remaining: 5, bytes_remaining: 512 }, - ); - - map.insert( - para_b, - OutboundHrmpChannelLimitations { - messages_remaining: 10, - bytes_remaining: 1024, - }, - ); - - map.insert( - para_c, - OutboundHrmpChannelLimitations { messages_remaining: 1, bytes_remaining: 128 }, - ); - - map - }, - max_hrmp_num_per_candidate: 5, - required_parent: HeadData::from(vec![1, 2, 3]), - validation_code_hash: ValidationCode(vec![4, 5, 6]).hash(), - upgrade_restriction: None, - future_validation_code: None, - } - } - - #[test] - fn constraints_disallowed_trunk_watermark() { - let constraints = make_constraints(); - let mut modifications = ConstraintModifications::identity(); - modifications.hrmp_watermark = Some(HrmpWatermarkUpdate::Trunk(7)); - - assert_eq!( - constraints.check_modifications(&modifications), - Err(ModificationError::DisallowedHrmpWatermark(7)), - ); - - assert_eq!( - constraints.apply_modifications(&modifications), - Err(ModificationError::DisallowedHrmpWatermark(7)), - ); - } - - #[test] - fn constraints_always_allow_head_watermark() { - let constraints = make_constraints(); - let mut modifications = ConstraintModifications::identity(); - modifications.hrmp_watermark = Some(HrmpWatermarkUpdate::Head(7)); - - assert!(constraints.check_modifications(&modifications).is_ok()); - - let new_constraints = constraints.apply_modifications(&modifications).unwrap(); - assert_eq!(new_constraints.hrmp_inbound.valid_watermarks, vec![8]); - } - - #[test] - fn constraints_no_such_hrmp_channel() { - let constraints = make_constraints(); - let mut modifications = ConstraintModifications::identity(); - let bad_para = ParaId::from(100u32); - modifications.outbound_hrmp.insert( - bad_para, - OutboundHrmpChannelModification { bytes_submitted: 0, messages_submitted: 0 }, - ); - - assert_eq!( - constraints.check_modifications(&modifications), - Err(ModificationError::NoSuchHrmpChannel(bad_para)), - ); - - assert_eq!( - constraints.apply_modifications(&modifications), - Err(ModificationError::NoSuchHrmpChannel(bad_para)), - ); - } - - #[test] - fn constraints_hrmp_messages_overflow() { - let constraints = make_constraints(); - let mut modifications = ConstraintModifications::identity(); - let para_a = ParaId::from(1u32); - modifications.outbound_hrmp.insert( - para_a, - OutboundHrmpChannelModification { bytes_submitted: 0, messages_submitted: 6 }, - ); - - assert_eq!( - constraints.check_modifications(&modifications), - Err(ModificationError::HrmpMessagesOverflow { - para_id: para_a, - messages_remaining: 5, - messages_submitted: 6, - }), - ); - - assert_eq!( - constraints.apply_modifications(&modifications), - Err(ModificationError::HrmpMessagesOverflow { - para_id: para_a, - messages_remaining: 5, - messages_submitted: 6, - }), - ); - } - - #[test] - fn constraints_hrmp_bytes_overflow() { - let constraints = make_constraints(); - let mut modifications = ConstraintModifications::identity(); - let para_a = ParaId::from(1u32); - modifications.outbound_hrmp.insert( - para_a, - OutboundHrmpChannelModification { bytes_submitted: 513, messages_submitted: 1 }, - ); - - assert_eq!( - constraints.check_modifications(&modifications), - Err(ModificationError::HrmpBytesOverflow { - para_id: para_a, - bytes_remaining: 512, - bytes_submitted: 513, - }), - ); - - assert_eq!( - constraints.apply_modifications(&modifications), - Err(ModificationError::HrmpBytesOverflow { - para_id: para_a, - bytes_remaining: 512, - bytes_submitted: 513, - }), - ); - } - - #[test] - fn constraints_ump_messages_overflow() { - let constraints = make_constraints(); - let mut modifications = ConstraintModifications::identity(); - modifications.ump_messages_sent = 11; - - assert_eq!( - constraints.check_modifications(&modifications), - Err(ModificationError::UmpMessagesOverflow { - messages_remaining: 10, - messages_submitted: 11, - }), - ); - - assert_eq!( - constraints.apply_modifications(&modifications), - Err(ModificationError::UmpMessagesOverflow { - messages_remaining: 10, - messages_submitted: 11, - }), - ); - } - - #[test] - fn constraints_ump_bytes_overflow() { - let constraints = make_constraints(); - let mut modifications = ConstraintModifications::identity(); - modifications.ump_bytes_sent = 1025; - - assert_eq!( - constraints.check_modifications(&modifications), - Err(ModificationError::UmpBytesOverflow { - bytes_remaining: 1024, - bytes_submitted: 1025, - }), - ); - - assert_eq!( - constraints.apply_modifications(&modifications), - Err(ModificationError::UmpBytesOverflow { - bytes_remaining: 1024, - bytes_submitted: 1025, - }), - ); - } - - #[test] - fn constraints_dmp_messages() { - let mut constraints = make_constraints(); - let mut modifications = ConstraintModifications::identity(); - assert!(constraints.check_modifications(&modifications).is_ok()); - assert!(constraints.apply_modifications(&modifications).is_ok()); - - modifications.dmp_messages_processed = 6; - - assert_eq!( - constraints.check_modifications(&modifications), - Err(ModificationError::DmpMessagesUnderflow { - messages_remaining: 0, - messages_processed: 6, - }), - ); - - assert_eq!( - constraints.apply_modifications(&modifications), - Err(ModificationError::DmpMessagesUnderflow { - messages_remaining: 0, - messages_processed: 6, - }), - ); - - constraints.dmp_remaining_messages = vec![1, 4, 8, 10]; - modifications.dmp_messages_processed = 2; - assert!(constraints.check_modifications(&modifications).is_ok()); - let constraints = constraints - .apply_modifications(&modifications) - .expect("modifications are valid"); - - assert_eq!(&constraints.dmp_remaining_messages, &[8, 10]); - } - - #[test] - fn constraints_nonexistent_code_upgrade() { - let constraints = make_constraints(); - let mut modifications = ConstraintModifications::identity(); - modifications.code_upgrade_applied = true; - - assert_eq!( - constraints.check_modifications(&modifications), - Err(ModificationError::AppliedNonexistentCodeUpgrade), - ); - - assert_eq!( - constraints.apply_modifications(&modifications), - Err(ModificationError::AppliedNonexistentCodeUpgrade), - ); - } - - fn make_candidate( - constraints: &Constraints, - relay_parent: &RelayChainBlockInfo, - ) -> ProspectiveCandidate<'static> { - let collator_pair = CollatorPair::generate().0; - let collator = collator_pair.public(); - - let sig = collator_pair.sign(b"blabla".as_slice()); - - ProspectiveCandidate { - commitments: Cow::Owned(CandidateCommitments { - upward_messages: Default::default(), - horizontal_messages: Default::default(), - new_validation_code: None, - head_data: HeadData::from(vec![1, 2, 3, 4, 5]), - processed_downward_messages: 0, - hrmp_watermark: relay_parent.number, - }), - collator, - collator_signature: sig, - persisted_validation_data: PersistedValidationData { - parent_head: constraints.required_parent.clone(), - relay_parent_number: relay_parent.number, - relay_parent_storage_root: relay_parent.storage_root, - max_pov_size: constraints.max_pov_size as u32, - }, - pov_hash: Hash::repeat_byte(1), - validation_code_hash: constraints.validation_code_hash, - } - } - - #[test] - fn fragment_validation_code_mismatch() { - let relay_parent = RelayChainBlockInfo { - number: 6, - hash: Hash::repeat_byte(0x0a), - storage_root: Hash::repeat_byte(0xff), - }; - - let constraints = make_constraints(); - let mut candidate = make_candidate(&constraints, &relay_parent); - - let expected_code = constraints.validation_code_hash; - let got_code = ValidationCode(vec![9, 9, 9]).hash(); - - candidate.validation_code_hash = got_code; - - assert_eq!( - Fragment::new(relay_parent, constraints, candidate), - Err(FragmentValidityError::ValidationCodeMismatch(expected_code, got_code,)), - ) - } - - #[test] - fn fragment_pvd_mismatch() { - let relay_parent = RelayChainBlockInfo { - number: 6, - hash: Hash::repeat_byte(0x0a), - storage_root: Hash::repeat_byte(0xff), - }; - - let relay_parent_b = RelayChainBlockInfo { - number: 6, - hash: Hash::repeat_byte(0x0b), - storage_root: Hash::repeat_byte(0xee), - }; - - let constraints = make_constraints(); - let candidate = make_candidate(&constraints, &relay_parent); - - let expected_pvd = PersistedValidationData { - parent_head: constraints.required_parent.clone(), - relay_parent_number: relay_parent_b.number, - relay_parent_storage_root: relay_parent_b.storage_root, - max_pov_size: constraints.max_pov_size as u32, - }; - - let got_pvd = candidate.persisted_validation_data.clone(); - - assert_eq!( - Fragment::new(relay_parent_b, constraints, candidate), - Err(FragmentValidityError::PersistedValidationDataMismatch(expected_pvd, got_pvd,)), - ); - } - - #[test] - fn fragment_code_size_too_large() { - let relay_parent = RelayChainBlockInfo { - number: 6, - hash: Hash::repeat_byte(0x0a), - storage_root: Hash::repeat_byte(0xff), - }; - - let constraints = make_constraints(); - let mut candidate = make_candidate(&constraints, &relay_parent); - - let max_code_size = constraints.max_code_size; - candidate.commitments_mut().new_validation_code = Some(vec![0; max_code_size + 1].into()); - - assert_eq!( - Fragment::new(relay_parent, constraints, candidate), - Err(FragmentValidityError::CodeSizeTooLarge(max_code_size, max_code_size + 1,)), - ); - } - - #[test] - fn fragment_relay_parent_too_old() { - let relay_parent = RelayChainBlockInfo { - number: 3, - hash: Hash::repeat_byte(0x0a), - storage_root: Hash::repeat_byte(0xff), - }; - - let constraints = make_constraints(); - let candidate = make_candidate(&constraints, &relay_parent); - - assert_eq!( - Fragment::new(relay_parent, constraints, candidate), - Err(FragmentValidityError::RelayParentTooOld(5, 3,)), - ); - } - - #[test] - fn fragment_hrmp_messages_overflow() { - let relay_parent = RelayChainBlockInfo { - number: 6, - hash: Hash::repeat_byte(0x0a), - storage_root: Hash::repeat_byte(0xff), - }; - - let constraints = make_constraints(); - let mut candidate = make_candidate(&constraints, &relay_parent); - - let max_hrmp = constraints.max_hrmp_num_per_candidate; - - candidate - .commitments_mut() - .horizontal_messages - .try_extend((0..max_hrmp + 1).map(|i| OutboundHrmpMessage { - recipient: ParaId::from(i as u32), - data: vec![1, 2, 3], - })) - .unwrap(); - - assert_eq!( - Fragment::new(relay_parent, constraints, candidate), - Err(FragmentValidityError::HrmpMessagesPerCandidateOverflow { - messages_allowed: max_hrmp, - messages_submitted: max_hrmp + 1, - }), - ); - } - - #[test] - fn fragment_dmp_advancement_rule() { - let relay_parent = RelayChainBlockInfo { - number: 6, - hash: Hash::repeat_byte(0x0a), - storage_root: Hash::repeat_byte(0xff), - }; - - let mut constraints = make_constraints(); - let mut candidate = make_candidate(&constraints, &relay_parent); - - // Empty dmp queue is ok. - assert!(Fragment::new(relay_parent.clone(), constraints.clone(), candidate.clone()).is_ok()); - // Unprocessed message that was sent later is ok. - constraints.dmp_remaining_messages = vec![relay_parent.number + 1]; - assert!(Fragment::new(relay_parent.clone(), constraints.clone(), candidate.clone()).is_ok()); - - for block_number in 0..=relay_parent.number { - constraints.dmp_remaining_messages = vec![block_number]; - - assert_eq!( - Fragment::new(relay_parent.clone(), constraints.clone(), candidate.clone()), - Err(FragmentValidityError::DmpAdvancementRule), - ); - } - - candidate.commitments.to_mut().processed_downward_messages = 1; - assert!(Fragment::new(relay_parent, constraints, candidate).is_ok()); - } - - #[test] - fn fragment_ump_messages_overflow() { - let relay_parent = RelayChainBlockInfo { - number: 6, - hash: Hash::repeat_byte(0x0a), - storage_root: Hash::repeat_byte(0xff), - }; - - let constraints = make_constraints(); - let mut candidate = make_candidate(&constraints, &relay_parent); - - let max_ump = constraints.max_ump_num_per_candidate; - - candidate - .commitments - .to_mut() - .upward_messages - .try_extend((0..max_ump + 1).map(|i| vec![i as u8])) - .unwrap(); - - assert_eq!( - Fragment::new(relay_parent, constraints, candidate), - Err(FragmentValidityError::UmpMessagesPerCandidateOverflow { - messages_allowed: max_ump, - messages_submitted: max_ump + 1, - }), - ); - } - - #[test] - fn fragment_code_upgrade_restricted() { - let relay_parent = RelayChainBlockInfo { - number: 6, - hash: Hash::repeat_byte(0x0a), - storage_root: Hash::repeat_byte(0xff), - }; - - let mut constraints = make_constraints(); - let mut candidate = make_candidate(&constraints, &relay_parent); - - constraints.upgrade_restriction = Some(UpgradeRestriction::Present); - candidate.commitments_mut().new_validation_code = Some(ValidationCode(vec![1, 2, 3])); - - assert_eq!( - Fragment::new(relay_parent, constraints, candidate), - Err(FragmentValidityError::CodeUpgradeRestricted), - ); - } - - #[test] - fn fragment_hrmp_messages_descending_or_duplicate() { - let relay_parent = RelayChainBlockInfo { - number: 6, - hash: Hash::repeat_byte(0x0a), - storage_root: Hash::repeat_byte(0xff), - }; - - let constraints = make_constraints(); - let mut candidate = make_candidate(&constraints, &relay_parent); - - candidate.commitments_mut().horizontal_messages = HorizontalMessages::truncate_from(vec![ - OutboundHrmpMessage { recipient: ParaId::from(0 as u32), data: vec![1, 2, 3] }, - OutboundHrmpMessage { recipient: ParaId::from(0 as u32), data: vec![4, 5, 6] }, - ]); - - assert_eq!( - Fragment::new(relay_parent.clone(), constraints.clone(), candidate.clone()), - Err(FragmentValidityError::HrmpMessagesDescendingOrDuplicate(1)), - ); - - candidate.commitments_mut().horizontal_messages = HorizontalMessages::truncate_from(vec![ - OutboundHrmpMessage { recipient: ParaId::from(1 as u32), data: vec![1, 2, 3] }, - OutboundHrmpMessage { recipient: ParaId::from(0 as u32), data: vec![4, 5, 6] }, - ]); - - assert_eq!( - Fragment::new(relay_parent, constraints, candidate), - Err(FragmentValidityError::HrmpMessagesDescendingOrDuplicate(1)), - ); - } -} diff --git a/polkadot/node/subsystem-util/src/lib.rs b/polkadot/node/subsystem-util/src/lib.rs index daee4a8350e5..e60a9ff82eeb 100644 --- a/polkadot/node/subsystem-util/src/lib.rs +++ b/polkadot/node/subsystem-util/src/lib.rs @@ -43,7 +43,7 @@ use futures::channel::{mpsc, oneshot}; use parity_scale_codec::Encode; use polkadot_primitives::{ - vstaging as vstaging_primitives, AuthorityDiscoveryId, CandidateEvent, CandidateHash, + AsyncBackingParams, AuthorityDiscoveryId, CandidateEvent, CandidateHash, CommittedCandidateReceipt, CoreState, EncodeAs, GroupIndex, GroupRotationInfo, Hash, Id as ParaId, OccupiedCoreAssumption, PersistedValidationData, ScrapedOnChainVotes, SessionIndex, SessionInfo, Signed, SigningContext, ValidationCode, ValidationCodeHash, @@ -227,7 +227,7 @@ specialize_requests! { fn request_key_ownership_proof(validator_id: ValidatorId) -> Option; KeyOwnershipProof; fn request_submit_report_dispute_lost(dp: slashing::DisputeProof, okop: slashing::OpaqueKeyOwnershipProof) -> Option<()>; SubmitReportDisputeLost; - fn request_staging_async_backing_params() -> vstaging_primitives::AsyncBackingParams; StagingAsyncBackingParams; + fn request_async_backing_params() -> AsyncBackingParams; AsyncBackingParams; } /// Requests executor parameters from the runtime effective at given relay-parent. First obtains diff --git a/polkadot/node/subsystem-util/src/runtime/mod.rs b/polkadot/node/subsystem-util/src/runtime/mod.rs index c078b17d2175..8d7cef88a70e 100644 --- a/polkadot/node/subsystem-util/src/runtime/mod.rs +++ b/polkadot/node/subsystem-util/src/runtime/mod.rs @@ -30,16 +30,16 @@ use polkadot_node_subsystem::{ }; use polkadot_node_subsystem_types::UnpinHandle; use polkadot_primitives::{ - vstaging, CandidateEvent, CandidateHash, CoreState, EncodeAs, ExecutorParams, GroupIndex, - GroupRotationInfo, Hash, IndexedVec, OccupiedCore, ScrapedOnChainVotes, SessionIndex, - SessionInfo, Signed, SigningContext, UncheckedSigned, ValidationCode, ValidationCodeHash, - ValidatorId, ValidatorIndex, LEGACY_MIN_BACKING_VOTES, + slashing, AsyncBackingParams, CandidateEvent, CandidateHash, CoreState, EncodeAs, + ExecutorParams, GroupIndex, GroupRotationInfo, Hash, IndexedVec, OccupiedCore, + ScrapedOnChainVotes, SessionIndex, SessionInfo, Signed, SigningContext, UncheckedSigned, + ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, LEGACY_MIN_BACKING_VOTES, }; use crate::{ - request_availability_cores, request_candidate_events, request_from_runtime, - request_key_ownership_proof, request_on_chain_votes, request_session_executor_params, - request_session_index_for_child, request_session_info, request_staging_async_backing_params, + request_async_backing_params, request_availability_cores, request_candidate_events, + request_from_runtime, request_key_ownership_proof, request_on_chain_votes, + request_session_executor_params, request_session_index_for_child, request_session_info, request_submit_report_dispute_lost, request_unapplied_slashes, request_validation_code_by_hash, request_validator_groups, }; @@ -377,7 +377,7 @@ where pub async fn get_unapplied_slashes( sender: &mut Sender, relay_parent: Hash, -) -> Result> +) -> Result> where Sender: SubsystemSender, { @@ -392,7 +392,7 @@ pub async fn key_ownership_proof( sender: &mut Sender, relay_parent: Hash, validator_id: ValidatorId, -) -> Result> +) -> Result> where Sender: SubsystemSender, { @@ -403,8 +403,8 @@ where pub async fn submit_report_dispute_lost( sender: &mut Sender, relay_parent: Hash, - dispute_proof: vstaging::slashing::DisputeProof, - key_ownership_proof: vstaging::slashing::OpaqueKeyOwnershipProof, + dispute_proof: slashing::DisputeProof, + key_ownership_proof: slashing::OpaqueKeyOwnershipProof, ) -> Result> where Sender: SubsystemSender, @@ -429,7 +429,7 @@ where pub enum ProspectiveParachainsMode { /// Runtime API without support of `async_backing_params`: no prospective parachains. Disabled, - /// vstaging runtime API: prospective parachains. + /// v6 runtime API: prospective parachains. Enabled { /// The maximum number of para blocks between the para head in a relay parent /// and a new candidate. Restricts nodes from building arbitrary long chains @@ -457,8 +457,7 @@ pub async fn prospective_parachains_mode( where Sender: SubsystemSender, { - let result = - recv_runtime(request_staging_async_backing_params(relay_parent, sender).await).await; + let result = recv_runtime(request_async_backing_params(relay_parent, sender).await).await; if let Err(error::Error::RuntimeRequest(RuntimeApiError::NotSupported { runtime_api_name })) = &result @@ -472,7 +471,7 @@ where Ok(ProspectiveParachainsMode::Disabled) } else { - let vstaging::AsyncBackingParams { max_candidate_depth, allowed_ancestry_len } = result?; + let AsyncBackingParams { max_candidate_depth, allowed_ancestry_len } = result?; Ok(ProspectiveParachainsMode::Enabled { max_candidate_depth: max_candidate_depth as _, allowed_ancestry_len: allowed_ancestry_len as _, diff --git a/polkadot/parachain/test-parachains/adder/collator/Cargo.toml b/polkadot/parachain/test-parachains/adder/collator/Cargo.toml index fcbba9bbe212..73b1fab529ef 100644 --- a/polkadot/parachain/test-parachains/adder/collator/Cargo.toml +++ b/polkadot/parachain/test-parachains/adder/collator/Cargo.toml @@ -39,6 +39,3 @@ sc-service = { path = "../../../../../substrate/client/service" } sp-keyring = { path = "../../../../../substrate/primitives/keyring" } tokio = { version = "1.24.2", features = ["macros"] } - -[features] -network-protocol-staging = [ "polkadot-cli/network-protocol-staging" ] diff --git a/polkadot/primitives/src/lib.rs b/polkadot/primitives/src/lib.rs index 9121b3790858..5adb6d253134 100644 --- a/polkadot/primitives/src/lib.rs +++ b/polkadot/primitives/src/lib.rs @@ -19,8 +19,8 @@ #![warn(missing_docs)] #![cfg_attr(not(feature = "std"), no_std)] -// `v5` is currently the latest stable version of the runtime API. -pub mod v5; +// `v6` is currently the latest stable version of the runtime API. +pub mod v6; // The 'staging' version is special - it contains primitives which are // still in development. Once they are considered stable, they will be @@ -33,20 +33,21 @@ pub mod runtime_api; // Current primitives not requiring versioning are exported here. // Primitives requiring versioning must not be exported and must be referred by an exact version. -pub use v5::{ - byzantine_threshold, check_candidate_backing, collator_signature_payload, +pub use v6::{ + async_backing, byzantine_threshold, check_candidate_backing, collator_signature_payload, effective_minimum_backing_votes, metric_definitions, slashing, supermajority_threshold, well_known_keys, AbridgedHostConfiguration, AbridgedHrmpChannel, AccountId, AccountIndex, - AccountPublic, ApprovalVote, AssignmentId, AuthorityDiscoveryId, AvailabilityBitfield, - BackedCandidate, Balance, BlakeTwo256, Block, BlockId, BlockNumber, CandidateCommitments, - CandidateDescriptor, CandidateEvent, CandidateHash, CandidateIndex, CandidateReceipt, - CheckedDisputeStatementSet, CheckedMultiDisputeStatementSet, CollatorId, CollatorSignature, - CommittedCandidateReceipt, CompactStatement, ConsensusLog, CoreIndex, CoreState, DisputeState, - DisputeStatement, DisputeStatementSet, DownwardMessage, EncodeAs, ExecutorParam, - ExecutorParams, ExecutorParamsHash, ExplicitDisputeStatement, GroupIndex, GroupRotationInfo, - Hash, HashT, HeadData, Header, HrmpChannelId, Id, InboundDownwardMessage, InboundHrmpMessage, - IndexedVec, InherentData, InvalidDisputeStatementKind, Moment, MultiDisputeStatementSet, Nonce, - OccupiedCore, OccupiedCoreAssumption, OutboundHrmpMessage, ParathreadClaim, ParathreadEntry, + AccountPublic, ApprovalVote, AssignmentId, AsyncBackingParams, AuthorityDiscoveryId, + AvailabilityBitfield, BackedCandidate, Balance, BlakeTwo256, Block, BlockId, BlockNumber, + CandidateCommitments, CandidateDescriptor, CandidateEvent, CandidateHash, CandidateIndex, + CandidateReceipt, CheckedDisputeStatementSet, CheckedMultiDisputeStatementSet, CollatorId, + CollatorSignature, CommittedCandidateReceipt, CompactStatement, ConsensusLog, CoreIndex, + CoreState, DisputeState, DisputeStatement, DisputeStatementSet, DownwardMessage, EncodeAs, + ExecutorParam, ExecutorParams, ExecutorParamsHash, ExplicitDisputeStatement, GroupIndex, + GroupRotationInfo, Hash, HashT, HeadData, Header, HorizontalMessages, HrmpChannelId, Id, + InboundDownwardMessage, InboundHrmpMessage, IndexedVec, InherentData, + InvalidDisputeStatementKind, Moment, MultiDisputeStatementSet, Nonce, OccupiedCore, + OccupiedCoreAssumption, OutboundHrmpMessage, ParathreadClaim, ParathreadEntry, PersistedValidationData, PvfCheckStatement, PvfExecTimeoutKind, PvfPrepTimeoutKind, RuntimeMetricLabel, RuntimeMetricLabelValue, RuntimeMetricLabelValues, RuntimeMetricLabels, RuntimeMetricOp, RuntimeMetricUpdate, ScheduledCore, ScrapedOnChainVotes, SessionIndex, @@ -61,4 +62,4 @@ pub use v5::{ }; #[cfg(feature = "std")] -pub use v5::{AssignmentPair, CollatorPair, ValidatorPair}; +pub use v6::{AssignmentPair, CollatorPair, ValidatorPair}; diff --git a/polkadot/primitives/src/runtime_api.rs b/polkadot/primitives/src/runtime_api.rs index e5f1aa4276ef..6cb66d40204d 100644 --- a/polkadot/primitives/src/runtime_api.rs +++ b/polkadot/primitives/src/runtime_api.rs @@ -114,10 +114,11 @@ //! separated from the stable primitives. use crate::{ - vstaging, BlockNumber, CandidateCommitments, CandidateEvent, CandidateHash, - CommittedCandidateReceipt, CoreState, DisputeState, ExecutorParams, GroupRotationInfo, - OccupiedCoreAssumption, PersistedValidationData, PvfCheckStatement, ScrapedOnChainVotes, - SessionIndex, SessionInfo, ValidatorId, ValidatorIndex, ValidatorSignature, + async_backing, slashing, AsyncBackingParams, BlockNumber, CandidateCommitments, CandidateEvent, + CandidateHash, CommittedCandidateReceipt, CoreState, DisputeState, ExecutorParams, + GroupRotationInfo, OccupiedCoreAssumption, PersistedValidationData, PvfCheckStatement, + ScrapedOnChainVotes, SessionIndex, SessionInfo, ValidatorId, ValidatorIndex, + ValidatorSignature, }; use parity_scale_codec::{Decode, Encode}; use polkadot_core_primitives as pcp; @@ -224,38 +225,37 @@ sp_api::decl_runtime_apis! { /// Returns a list of validators that lost a past session dispute and need to be slashed. /// NOTE: This function is only available since parachain host version 5. - fn unapplied_slashes() -> Vec<(SessionIndex, CandidateHash, vstaging::slashing::PendingSlashes)>; + fn unapplied_slashes() -> Vec<(SessionIndex, CandidateHash, slashing::PendingSlashes)>; /// Returns a merkle proof of a validator session key. /// NOTE: This function is only available since parachain host version 5. fn key_ownership_proof( validator_id: ValidatorId, - ) -> Option; + ) -> Option; /// Submit an unsigned extrinsic to slash validators who lost a dispute about /// a candidate of a past session. /// NOTE: This function is only available since parachain host version 5. fn submit_report_dispute_lost( - dispute_proof: vstaging::slashing::DisputeProof, - key_ownership_proof: vstaging::slashing::OpaqueKeyOwnershipProof, + dispute_proof: slashing::DisputeProof, + key_ownership_proof: slashing::OpaqueKeyOwnershipProof, ) -> Option<()>; - /***** Staging *****/ + /***** Added in v6 *****/ /// Get the minimum number of backing votes for a parachain candidate. /// This is a staging method! Do not use on production runtimes! #[api_version(6)] fn minimum_backing_votes() -> u32; - /***** Asynchronous backing *****/ + /***** Added in v7: Asynchronous backing *****/ /// Returns the state of parachain backing for a given para. - /// This is a staging method! Do not use on production runtimes! - #[api_version(99)] - fn staging_para_backing_state(_: ppp::Id) -> Option>; + #[api_version(7)] + fn para_backing_state(_: ppp::Id) -> Option>; /// Returns candidate's acceptance limitations for asynchronous backing for a relay parent. - #[api_version(99)] - fn staging_async_backing_params() -> vstaging::AsyncBackingParams; + #[api_version(7)] + fn async_backing_params() -> AsyncBackingParams; } } diff --git a/polkadot/primitives/src/v6/async_backing.rs b/polkadot/primitives/src/v6/async_backing.rs new file mode 100644 index 000000000000..1abe87b6dec4 --- /dev/null +++ b/polkadot/primitives/src/v6/async_backing.rs @@ -0,0 +1,132 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Asynchronous backing primitives. + +use super::*; + +use parity_scale_codec::{Decode, Encode}; +use primitives::RuntimeDebug; +use scale_info::TypeInfo; + +/// Candidate's acceptance limitations for asynchronous backing per relay parent. +#[derive( + RuntimeDebug, + Copy, + Clone, + PartialEq, + Encode, + Decode, + TypeInfo, + serde::Serialize, + serde::Deserialize, +)] + +pub struct AsyncBackingParams { + /// The maximum number of para blocks between the para head in a relay parent + /// and a new candidate. Restricts nodes from building arbitrary long chains + /// and spamming other validators. + /// + /// When async backing is disabled, the only valid value is 0. + pub max_candidate_depth: u32, + /// How many ancestors of a relay parent are allowed to build candidates on top + /// of. + /// + /// When async backing is disabled, the only valid value is 0. + pub allowed_ancestry_len: u32, +} + +/// Constraints on inbound HRMP channels. +#[derive(RuntimeDebug, Clone, PartialEq, Encode, Decode, TypeInfo)] +pub struct InboundHrmpLimitations { + /// An exhaustive set of all valid watermarks, sorted ascending. + /// + /// It's only expected to contain block numbers at which messages were + /// previously sent to a para, excluding most recent head. + pub valid_watermarks: Vec, +} + +/// Constraints on outbound HRMP channels. +#[derive(RuntimeDebug, Clone, PartialEq, Encode, Decode, TypeInfo)] +pub struct OutboundHrmpChannelLimitations { + /// The maximum bytes that can be written to the channel. + pub bytes_remaining: u32, + /// The maximum messages that can be written to the channel. + pub messages_remaining: u32, +} + +/// Constraints on the actions that can be taken by a new parachain +/// block. These limitations are implicitly associated with some particular +/// parachain, which should be apparent from usage. +#[derive(RuntimeDebug, Clone, PartialEq, Encode, Decode, TypeInfo)] +pub struct Constraints { + /// The minimum relay-parent number accepted under these constraints. + pub min_relay_parent_number: N, + /// The maximum Proof-of-Validity size allowed, in bytes. + pub max_pov_size: u32, + /// The maximum new validation code size allowed, in bytes. + pub max_code_size: u32, + /// The amount of UMP messages remaining. + pub ump_remaining: u32, + /// The amount of UMP bytes remaining. + pub ump_remaining_bytes: u32, + /// The maximum number of UMP messages allowed per candidate. + pub max_ump_num_per_candidate: u32, + /// Remaining DMP queue. Only includes sent-at block numbers. + pub dmp_remaining_messages: Vec, + /// The limitations of all registered inbound HRMP channels. + pub hrmp_inbound: InboundHrmpLimitations, + /// The limitations of all registered outbound HRMP channels. + pub hrmp_channels_out: Vec<(Id, OutboundHrmpChannelLimitations)>, + /// The maximum number of HRMP messages allowed per candidate. + pub max_hrmp_num_per_candidate: u32, + /// The required parent head-data of the parachain. + pub required_parent: HeadData, + /// The expected validation-code-hash of this parachain. + pub validation_code_hash: ValidationCodeHash, + /// The code upgrade restriction signal as-of this parachain. + pub upgrade_restriction: Option, + /// The future validation code hash, if any, and at what relay-parent + /// number the upgrade would be minimally applied. + pub future_validation_code: Option<(N, ValidationCodeHash)>, +} + +/// A candidate pending availability. +#[derive(RuntimeDebug, Clone, PartialEq, Encode, Decode, TypeInfo)] +pub struct CandidatePendingAvailability { + /// The hash of the candidate. + pub candidate_hash: CandidateHash, + /// The candidate's descriptor. + pub descriptor: CandidateDescriptor, + /// The commitments of the candidate. + pub commitments: CandidateCommitments, + /// The candidate's relay parent's number. + pub relay_parent_number: N, + /// The maximum Proof-of-Validity size allowed, in bytes. + pub max_pov_size: u32, +} + +/// The per-parachain state of the backing system, including +/// state-machine constraints and candidates pending availability. +#[derive(RuntimeDebug, Clone, PartialEq, Encode, Decode, TypeInfo)] +pub struct BackingState { + /// The state-machine constraints of the parachain. + pub constraints: Constraints, + /// The candidates pending availability. These should be ordered, i.e. they should form + /// a sub-chain, where the first candidate builds on top of the required parent of the + /// constraints and each subsequent builds on top of the previous head-data. + pub pending_availability: Vec>, +} diff --git a/polkadot/primitives/src/v5/executor_params.rs b/polkadot/primitives/src/v6/executor_params.rs similarity index 100% rename from polkadot/primitives/src/v5/executor_params.rs rename to polkadot/primitives/src/v6/executor_params.rs diff --git a/polkadot/primitives/src/v5/metrics.rs b/polkadot/primitives/src/v6/metrics.rs similarity index 100% rename from polkadot/primitives/src/v5/metrics.rs rename to polkadot/primitives/src/v6/metrics.rs diff --git a/polkadot/primitives/src/v5/mod.rs b/polkadot/primitives/src/v6/mod.rs similarity index 99% rename from polkadot/primitives/src/v5/mod.rs rename to polkadot/primitives/src/v6/mod.rs index 81743225403d..cf9008355178 100644 --- a/polkadot/primitives/src/v5/mod.rs +++ b/polkadot/primitives/src/v6/mod.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! `V2` Primitives. +//! `V6` Primitives. use bitvec::vec::BitVec; use parity_scale_codec::{Decode, Encode}; @@ -57,8 +57,13 @@ pub use sp_staking::SessionIndex; mod signed; pub use signed::{EncodeAs, Signed, UncheckedSigned}; +pub mod async_backing; +pub mod executor_params; pub mod slashing; +pub use async_backing::AsyncBackingParams; +pub use executor_params::{ExecutorParam, ExecutorParams, ExecutorParamsHash}; + mod metrics; pub use metrics::{ metric_definitions, RuntimeMetricLabel, RuntimeMetricLabelValue, RuntimeMetricLabelValues, @@ -1116,7 +1121,7 @@ pub struct AbridgedHostConfiguration { /// The delay, in blocks, before a validation upgrade is applied. pub validation_upgrade_delay: BlockNumber, /// Asynchronous backing parameters. - pub async_backing_params: super::vstaging::AsyncBackingParams, + pub async_backing_params: AsyncBackingParams, } /// Abridged version of `HrmpChannel` (from the `Hrmp` parachains host runtime module) meant to be @@ -1803,9 +1808,6 @@ pub enum PvfExecTimeoutKind { Approval, } -pub mod executor_params; -pub use executor_params::{ExecutorParam, ExecutorParams, ExecutorParamsHash}; - #[cfg(test)] mod tests { use super::*; diff --git a/polkadot/primitives/src/v5/signed.rs b/polkadot/primitives/src/v6/signed.rs similarity index 100% rename from polkadot/primitives/src/v5/signed.rs rename to polkadot/primitives/src/v6/signed.rs diff --git a/polkadot/primitives/src/v5/slashing.rs b/polkadot/primitives/src/v6/slashing.rs similarity index 100% rename from polkadot/primitives/src/v5/slashing.rs rename to polkadot/primitives/src/v6/slashing.rs diff --git a/polkadot/primitives/src/vstaging/mod.rs b/polkadot/primitives/src/vstaging/mod.rs index ea341ee5b4fc..1429b0c326ac 100644 --- a/polkadot/primitives/src/vstaging/mod.rs +++ b/polkadot/primitives/src/vstaging/mod.rs @@ -17,121 +17,3 @@ //! Staging Primitives. // Put any primitives used by staging APIs functions here -pub use crate::v5::*; -use sp_std::prelude::*; - -use parity_scale_codec::{Decode, Encode}; -use primitives::RuntimeDebug; -use scale_info::TypeInfo; - -/// Useful type alias for Para IDs. -pub type ParaId = Id; - -/// Candidate's acceptance limitations for asynchronous backing per relay parent. -#[derive( - RuntimeDebug, - Copy, - Clone, - PartialEq, - Encode, - Decode, - TypeInfo, - serde::Serialize, - serde::Deserialize, -)] - -pub struct AsyncBackingParams { - /// The maximum number of para blocks between the para head in a relay parent - /// and a new candidate. Restricts nodes from building arbitrary long chains - /// and spamming other validators. - /// - /// When async backing is disabled, the only valid value is 0. - pub max_candidate_depth: u32, - /// How many ancestors of a relay parent are allowed to build candidates on top - /// of. - /// - /// When async backing is disabled, the only valid value is 0. - pub allowed_ancestry_len: u32, -} - -/// Constraints on inbound HRMP channels. -#[derive(RuntimeDebug, Clone, PartialEq, Encode, Decode, TypeInfo)] -pub struct InboundHrmpLimitations { - /// An exhaustive set of all valid watermarks, sorted ascending. - /// - /// It's only expected to contain block numbers at which messages were - /// previously sent to a para, excluding most recent head. - pub valid_watermarks: Vec, -} - -/// Constraints on outbound HRMP channels. -#[derive(RuntimeDebug, Clone, PartialEq, Encode, Decode, TypeInfo)] -pub struct OutboundHrmpChannelLimitations { - /// The maximum bytes that can be written to the channel. - pub bytes_remaining: u32, - /// The maximum messages that can be written to the channel. - pub messages_remaining: u32, -} - -/// Constraints on the actions that can be taken by a new parachain -/// block. These limitations are implicitly associated with some particular -/// parachain, which should be apparent from usage. -#[derive(RuntimeDebug, Clone, PartialEq, Encode, Decode, TypeInfo)] -pub struct Constraints { - /// The minimum relay-parent number accepted under these constraints. - pub min_relay_parent_number: N, - /// The maximum Proof-of-Validity size allowed, in bytes. - pub max_pov_size: u32, - /// The maximum new validation code size allowed, in bytes. - pub max_code_size: u32, - /// The amount of UMP messages remaining. - pub ump_remaining: u32, - /// The amount of UMP bytes remaining. - pub ump_remaining_bytes: u32, - /// The maximum number of UMP messages allowed per candidate. - pub max_ump_num_per_candidate: u32, - /// Remaining DMP queue. Only includes sent-at block numbers. - pub dmp_remaining_messages: Vec, - /// The limitations of all registered inbound HRMP channels. - pub hrmp_inbound: InboundHrmpLimitations, - /// The limitations of all registered outbound HRMP channels. - pub hrmp_channels_out: Vec<(ParaId, OutboundHrmpChannelLimitations)>, - /// The maximum number of HRMP messages allowed per candidate. - pub max_hrmp_num_per_candidate: u32, - /// The required parent head-data of the parachain. - pub required_parent: HeadData, - /// The expected validation-code-hash of this parachain. - pub validation_code_hash: ValidationCodeHash, - /// The code upgrade restriction signal as-of this parachain. - pub upgrade_restriction: Option, - /// The future validation code hash, if any, and at what relay-parent - /// number the upgrade would be minimally applied. - pub future_validation_code: Option<(N, ValidationCodeHash)>, -} - -/// A candidate pending availability. -#[derive(RuntimeDebug, Clone, PartialEq, Encode, Decode, TypeInfo)] -pub struct CandidatePendingAvailability { - /// The hash of the candidate. - pub candidate_hash: CandidateHash, - /// The candidate's descriptor. - pub descriptor: CandidateDescriptor, - /// The commitments of the candidate. - pub commitments: CandidateCommitments, - /// The candidate's relay parent's number. - pub relay_parent_number: N, - /// The maximum Proof-of-Validity size allowed, in bytes. - pub max_pov_size: u32, -} - -/// The per-parachain state of the backing system, including -/// state-machine constraints and candidates pending availability. -#[derive(RuntimeDebug, Clone, PartialEq, Encode, Decode, TypeInfo)] -pub struct BackingState { - /// The state-machine constraints of the parachain. - pub constraints: Constraints, - /// The candidates pending availability. These should be ordered, i.e. they should form - /// a sub-chain, where the first candidate builds on top of the required parent of the - /// constraints and each subsequent builds on top of the previous head-data. - pub pending_availability: Vec>, -} diff --git a/polkadot/roadmap/implementers-guide/src/node/backing/prospective-parachains.md b/polkadot/roadmap/implementers-guide/src/node/backing/prospective-parachains.md index a48444a46e40..286aeddb986d 100644 --- a/polkadot/roadmap/implementers-guide/src/node/backing/prospective-parachains.md +++ b/polkadot/roadmap/implementers-guide/src/node/backing/prospective-parachains.md @@ -122,7 +122,7 @@ prospective validation data. This is unlikely to change. ### Outgoing -- `RuntimeApiRequest::StagingParaBackingState` +- `RuntimeApiRequest::ParaBackingState` - Gets the backing state of the given para (the constraints of the para and candidates pending availability). - `RuntimeApiRequest::AvailabilityCores` diff --git a/polkadot/runtime/kusama/src/lib.rs b/polkadot/runtime/kusama/src/lib.rs index 082e1aca375c..1709c1bf8b1c 100644 --- a/polkadot/runtime/kusama/src/lib.rs +++ b/polkadot/runtime/kusama/src/lib.rs @@ -46,7 +46,7 @@ use runtime_parachains::{ inclusion::{AggregateMessageOrigin, UmpQueueId}, initializer as parachains_initializer, origin as parachains_origin, paras as parachains_paras, paras_inherent as parachains_paras_inherent, reward_points as parachains_reward_points, - runtime_api_impl::v5 as parachains_runtime_api_impl, + runtime_api_impl::v7 as parachains_runtime_api_impl, scheduler as parachains_scheduler, session_info as parachains_session_info, shared as parachains_shared, }; diff --git a/polkadot/runtime/parachains/src/assigner_on_demand/tests.rs b/polkadot/runtime/parachains/src/assigner_on_demand/tests.rs index fe9a4e52bd07..d07964b69165 100644 --- a/polkadot/runtime/parachains/src/assigner_on_demand/tests.rs +++ b/polkadot/runtime/parachains/src/assigner_on_demand/tests.rs @@ -28,7 +28,7 @@ use crate::{ }; use frame_support::{assert_noop, assert_ok, error::BadOrigin}; use pallet_balances::Error as BalancesError; -use primitives::{v5::ValidationCode, BlockNumber, SessionIndex}; +use primitives::{BlockNumber, SessionIndex, ValidationCode}; use sp_std::collections::btree_map::BTreeMap; fn schedule_blank_para(id: ParaId, parakind: ParaKind) { diff --git a/polkadot/runtime/parachains/src/configuration.rs b/polkadot/runtime/parachains/src/configuration.rs index 33039cd08ca4..f53f986a553f 100644 --- a/polkadot/runtime/parachains/src/configuration.rs +++ b/polkadot/runtime/parachains/src/configuration.rs @@ -26,7 +26,7 @@ use polkadot_parachain_primitives::primitives::{ MAX_HORIZONTAL_MESSAGE_NUM, MAX_UPWARD_MESSAGE_NUM, }; use primitives::{ - vstaging::AsyncBackingParams, Balance, ExecutorParams, SessionIndex, LEGACY_MIN_BACKING_VOTES, + AsyncBackingParams, Balance, ExecutorParams, SessionIndex, LEGACY_MIN_BACKING_VOTES, MAX_CODE_SIZE, MAX_HEAD_DATA_SIZE, MAX_POV_SIZE, ON_DEMAND_DEFAULT_QUEUE_MAX_SIZE, }; use sp_runtime::{traits::Zero, Perbill}; diff --git a/polkadot/runtime/parachains/src/configuration/migration/v6.rs b/polkadot/runtime/parachains/src/configuration/migration/v6.rs index beed54deaffa..19031a90bab4 100644 --- a/polkadot/runtime/parachains/src/configuration/migration/v6.rs +++ b/polkadot/runtime/parachains/src/configuration/migration/v6.rs @@ -21,7 +21,7 @@ use frame_support::pallet_prelude::*; use frame_system::pallet_prelude::BlockNumberFor; use sp_std::vec::Vec; -use primitives::{vstaging::AsyncBackingParams, Balance, ExecutorParams, SessionIndex}; +use primitives::{AsyncBackingParams, Balance, ExecutorParams, SessionIndex}; #[cfg(feature = "try-runtime")] use sp_std::prelude::*; diff --git a/polkadot/runtime/parachains/src/configuration/migration/v7.rs b/polkadot/runtime/parachains/src/configuration/migration/v7.rs index 113651381207..1754b78e0a1d 100644 --- a/polkadot/runtime/parachains/src/configuration/migration/v7.rs +++ b/polkadot/runtime/parachains/src/configuration/migration/v7.rs @@ -23,7 +23,7 @@ use frame_support::{ weights::Weight, }; use frame_system::pallet_prelude::BlockNumberFor; -use primitives::{vstaging::AsyncBackingParams, Balance, ExecutorParams, SessionIndex}; +use primitives::{AsyncBackingParams, Balance, ExecutorParams, SessionIndex}; use sp_std::vec::Vec; use frame_support::traits::OnRuntimeUpgrade; diff --git a/polkadot/runtime/parachains/src/configuration/migration/v8.rs b/polkadot/runtime/parachains/src/configuration/migration/v8.rs index 5c5b34821835..d1bc90051125 100644 --- a/polkadot/runtime/parachains/src/configuration/migration/v8.rs +++ b/polkadot/runtime/parachains/src/configuration/migration/v8.rs @@ -24,8 +24,7 @@ use frame_support::{ }; use frame_system::pallet_prelude::BlockNumberFor; use primitives::{ - vstaging::AsyncBackingParams, Balance, ExecutorParams, SessionIndex, - ON_DEMAND_DEFAULT_QUEUE_MAX_SIZE, + AsyncBackingParams, Balance, ExecutorParams, SessionIndex, ON_DEMAND_DEFAULT_QUEUE_MAX_SIZE, }; use sp_runtime::Perbill; use sp_std::vec::Vec; diff --git a/polkadot/runtime/parachains/src/disputes/slashing.rs b/polkadot/runtime/parachains/src/disputes/slashing.rs index b27a7ab1ad73..9b2b7a48dc8b 100644 --- a/polkadot/runtime/parachains/src/disputes/slashing.rs +++ b/polkadot/runtime/parachains/src/disputes/slashing.rs @@ -51,7 +51,7 @@ use frame_support::{ use frame_system::pallet_prelude::BlockNumberFor; use primitives::{ - vstaging::slashing::{DisputeProof, DisputesTimeSlot, PendingSlashes, SlashingOffenceKind}, + slashing::{DisputeProof, DisputesTimeSlot, PendingSlashes, SlashingOffenceKind}, CandidateHash, SessionIndex, ValidatorId, ValidatorIndex, }; use scale_info::TypeInfo; diff --git a/polkadot/runtime/parachains/src/runtime_api_impl/mod.rs b/polkadot/runtime/parachains/src/runtime_api_impl/mod.rs index e066ad825a33..ba74e488cd3b 100644 --- a/polkadot/runtime/parachains/src/runtime_api_impl/mod.rs +++ b/polkadot/runtime/parachains/src/runtime_api_impl/mod.rs @@ -25,5 +25,6 @@ //! 1. Bump the version of the stable module (e.g. `v2` becomes `v3`) //! 2. Move methods from `vstaging` to `v3`. The new stable version should include all methods from //! `vstaging` tagged with the new version number (e.g. all `v3` methods). -pub mod v5; + +pub mod v7; pub mod vstaging; diff --git a/polkadot/runtime/parachains/src/runtime_api_impl/v5.rs b/polkadot/runtime/parachains/src/runtime_api_impl/v7.rs similarity index 79% rename from polkadot/runtime/parachains/src/runtime_api_impl/v5.rs rename to polkadot/runtime/parachains/src/runtime_api_impl/v7.rs index 46a609e0368d..35d92f71084f 100644 --- a/polkadot/runtime/parachains/src/runtime_api_impl/v5.rs +++ b/polkadot/runtime/parachains/src/runtime_api_impl/v7.rs @@ -18,12 +18,16 @@ //! functions. use crate::{ - disputes, dmp, hrmp, inclusion, initializer, paras, paras_inherent, + configuration, disputes, dmp, hrmp, inclusion, initializer, paras, paras_inherent, scheduler::{self, CoreOccupied}, session_info, shared, }; use frame_system::pallet_prelude::*; use primitives::{ + async_backing::{ + AsyncBackingParams, BackingState, CandidatePendingAvailability, Constraints, + InboundHrmpLimitations, OutboundHrmpChannelLimitations, + }, slashing, AuthorityDiscoveryId, CandidateEvent, CandidateHash, CommittedCandidateReceipt, CoreIndex, CoreState, DisputeState, ExecutorParams, GroupIndex, GroupRotationInfo, Hash, Id as ParaId, InboundDownwardMessage, InboundHrmpMessage, OccupiedCore, OccupiedCoreAssumption, @@ -395,3 +399,100 @@ pub fn submit_unsigned_slashing_report( key_ownership_proof, ) } + +/// Return the min backing votes threshold from the configuration. +pub fn minimum_backing_votes() -> u32 { + >::config().minimum_backing_votes +} + +/// Implementation for `ParaBackingState` function from the runtime API +pub fn backing_state( + para_id: ParaId, +) -> Option>> { + let config = >::config(); + // Async backing is only expected to be enabled with a tracker capacity of 1. + // Subsequent configuration update gets applied on new session, which always + // clears the buffer. + // + // Thus, minimum relay parent is ensured to have asynchronous backing enabled. + let now = >::block_number(); + let min_relay_parent_number = >::allowed_relay_parents() + .hypothetical_earliest_block_number(now, config.async_backing_params.allowed_ancestry_len); + + let required_parent = >::para_head(para_id)?; + let validation_code_hash = >::current_code_hash(para_id)?; + + let upgrade_restriction = >::upgrade_restriction_signal(para_id); + let future_validation_code = + >::future_code_upgrade_at(para_id).and_then(|block_num| { + // Only read the storage if there's a pending upgrade. + Some(block_num).zip(>::future_code_hash(para_id)) + }); + + let (ump_msg_count, ump_total_bytes) = + >::relay_dispatch_queue_size(para_id); + let ump_remaining = config.max_upward_queue_count - ump_msg_count; + let ump_remaining_bytes = config.max_upward_queue_size - ump_total_bytes; + + let dmp_remaining_messages = >::dmq_contents(para_id) + .into_iter() + .map(|msg| msg.sent_at) + .collect(); + + let valid_watermarks = >::valid_watermarks(para_id); + let hrmp_inbound = InboundHrmpLimitations { valid_watermarks }; + let hrmp_channels_out = >::outbound_remaining_capacity(para_id) + .into_iter() + .map(|(para, (messages_remaining, bytes_remaining))| { + (para, OutboundHrmpChannelLimitations { messages_remaining, bytes_remaining }) + }) + .collect(); + + let constraints = Constraints { + min_relay_parent_number, + max_pov_size: config.max_pov_size, + max_code_size: config.max_code_size, + ump_remaining, + ump_remaining_bytes, + max_ump_num_per_candidate: config.max_upward_message_num_per_candidate, + dmp_remaining_messages, + hrmp_inbound, + hrmp_channels_out, + max_hrmp_num_per_candidate: config.hrmp_max_message_num_per_candidate, + required_parent, + validation_code_hash, + upgrade_restriction, + future_validation_code, + }; + + let pending_availability = { + // Note: the API deals with a `Vec` as it is future-proof for cases + // where there may be multiple candidates pending availability at a time. + // But at the moment only one candidate can be pending availability per + // parachain. + crate::inclusion::PendingAvailability::::get(¶_id) + .and_then(|pending| { + let commitments = + crate::inclusion::PendingAvailabilityCommitments::::get(¶_id); + commitments.map(move |c| (pending, c)) + }) + .map(|(pending, commitments)| { + CandidatePendingAvailability { + candidate_hash: pending.candidate_hash(), + descriptor: pending.candidate_descriptor().clone(), + commitments, + relay_parent_number: pending.relay_parent_number(), + max_pov_size: constraints.max_pov_size, // assume always same in session. + } + }) + .into_iter() + .collect() + }; + + Some(BackingState { constraints, pending_availability }) +} + +/// Implementation for `AsyncBackingParams` function from the runtime API +pub fn async_backing_params() -> AsyncBackingParams { + >::config().async_backing_params +} diff --git a/polkadot/runtime/parachains/src/runtime_api_impl/vstaging.rs b/polkadot/runtime/parachains/src/runtime_api_impl/vstaging.rs index deef19d90710..d01b543630c3 100644 --- a/polkadot/runtime/parachains/src/runtime_api_impl/vstaging.rs +++ b/polkadot/runtime/parachains/src/runtime_api_impl/vstaging.rs @@ -15,111 +15,3 @@ // along with Polkadot. If not, see . //! Put implementations of functions from staging APIs here. - -use crate::{configuration, dmp, hrmp, inclusion, initializer, paras, shared}; -use frame_system::pallet_prelude::BlockNumberFor; -use primitives::{ - vstaging::{ - AsyncBackingParams, BackingState, CandidatePendingAvailability, Constraints, - InboundHrmpLimitations, OutboundHrmpChannelLimitations, - }, - Id as ParaId, -}; -use sp_std::prelude::*; - -/// Implementation for `StagingParaBackingState` function from the runtime API -pub fn backing_state( - para_id: ParaId, -) -> Option>> { - let config = >::config(); - // Async backing is only expected to be enabled with a tracker capacity of 1. - // Subsequent configuration update gets applied on new session, which always - // clears the buffer. - // - // Thus, minimum relay parent is ensured to have asynchronous backing enabled. - let now = >::block_number(); - let min_relay_parent_number = >::allowed_relay_parents() - .hypothetical_earliest_block_number(now, config.async_backing_params.allowed_ancestry_len); - - let required_parent = >::para_head(para_id)?; - let validation_code_hash = >::current_code_hash(para_id)?; - - let upgrade_restriction = >::upgrade_restriction_signal(para_id); - let future_validation_code = - >::future_code_upgrade_at(para_id).and_then(|block_num| { - // Only read the storage if there's a pending upgrade. - Some(block_num).zip(>::future_code_hash(para_id)) - }); - - let (ump_msg_count, ump_total_bytes) = - >::relay_dispatch_queue_size(para_id); - let ump_remaining = config.max_upward_queue_count - ump_msg_count; - let ump_remaining_bytes = config.max_upward_queue_size - ump_total_bytes; - - let dmp_remaining_messages = >::dmq_contents(para_id) - .into_iter() - .map(|msg| msg.sent_at) - .collect(); - - let valid_watermarks = >::valid_watermarks(para_id); - let hrmp_inbound = InboundHrmpLimitations { valid_watermarks }; - let hrmp_channels_out = >::outbound_remaining_capacity(para_id) - .into_iter() - .map(|(para, (messages_remaining, bytes_remaining))| { - (para, OutboundHrmpChannelLimitations { messages_remaining, bytes_remaining }) - }) - .collect(); - - let constraints = Constraints { - min_relay_parent_number, - max_pov_size: config.max_pov_size, - max_code_size: config.max_code_size, - ump_remaining, - ump_remaining_bytes, - max_ump_num_per_candidate: config.max_upward_message_num_per_candidate, - dmp_remaining_messages, - hrmp_inbound, - hrmp_channels_out, - max_hrmp_num_per_candidate: config.hrmp_max_message_num_per_candidate, - required_parent, - validation_code_hash, - upgrade_restriction, - future_validation_code, - }; - - let pending_availability = { - // Note: the API deals with a `Vec` as it is future-proof for cases - // where there may be multiple candidates pending availability at a time. - // But at the moment only one candidate can be pending availability per - // parachain. - crate::inclusion::PendingAvailability::::get(¶_id) - .and_then(|pending| { - let commitments = - crate::inclusion::PendingAvailabilityCommitments::::get(¶_id); - commitments.map(move |c| (pending, c)) - }) - .map(|(pending, commitments)| { - CandidatePendingAvailability { - candidate_hash: pending.candidate_hash(), - descriptor: pending.candidate_descriptor().clone(), - commitments, - relay_parent_number: pending.relay_parent_number(), - max_pov_size: constraints.max_pov_size, // assume always same in session. - } - }) - .into_iter() - .collect() - }; - - Some(BackingState { constraints, pending_availability }) -} - -/// Implementation for `StagingAsyncBackingParams` function from the runtime API -pub fn async_backing_params() -> AsyncBackingParams { - >::config().async_backing_params -} - -/// Return the min backing votes threshold from the configuration. -pub fn minimum_backing_votes() -> u32 { - >::config().minimum_backing_votes -} diff --git a/polkadot/runtime/polkadot/src/lib.rs b/polkadot/runtime/polkadot/src/lib.rs index c9e3ded6dadd..0b2dd12b1542 100644 --- a/polkadot/runtime/polkadot/src/lib.rs +++ b/polkadot/runtime/polkadot/src/lib.rs @@ -34,7 +34,7 @@ use runtime_parachains::{ inclusion::{AggregateMessageOrigin, UmpQueueId}, initializer as parachains_initializer, origin as parachains_origin, paras as parachains_paras, paras_inherent as parachains_paras_inherent, reward_points as parachains_reward_points, - runtime_api_impl::v5 as parachains_runtime_api_impl, + runtime_api_impl::v7 as parachains_runtime_api_impl, scheduler as parachains_scheduler, session_info as parachains_session_info, shared as parachains_shared, }; diff --git a/polkadot/runtime/rococo/src/lib.rs b/polkadot/runtime/rococo/src/lib.rs index c7d429bc99af..dd05082d2918 100644 --- a/polkadot/runtime/rococo/src/lib.rs +++ b/polkadot/runtime/rococo/src/lib.rs @@ -46,7 +46,7 @@ use runtime_parachains::{ inclusion::{AggregateMessageOrigin, UmpQueueId}, initializer as parachains_initializer, origin as parachains_origin, paras as parachains_paras, paras_inherent as parachains_paras_inherent, - runtime_api_impl::v5 as parachains_runtime_api_impl, + runtime_api_impl::v7 as parachains_runtime_api_impl, scheduler as parachains_scheduler, session_info as parachains_session_info, shared as parachains_shared, }; @@ -1724,6 +1724,7 @@ sp_api::impl_runtime_apis! { } } + #[api_version(7)] impl primitives::runtime_api::ParachainHost for Runtime { fn validators() -> Vec { parachains_runtime_api_impl::validators::() @@ -1854,6 +1855,18 @@ sp_api::impl_runtime_apis! { key_ownership_proof, ) } + + fn minimum_backing_votes() -> u32 { + parachains_runtime_api_impl::minimum_backing_votes::() + } + + fn para_backing_state(para_id: ParaId) -> Option { + parachains_runtime_api_impl::backing_state::(para_id) + } + + fn async_backing_params() -> primitives::AsyncBackingParams { + parachains_runtime_api_impl::async_backing_params::() + } } #[api_version(3)] diff --git a/polkadot/runtime/test-runtime/src/lib.rs b/polkadot/runtime/test-runtime/src/lib.rs index 99d6e58bc403..99fd2198400b 100644 --- a/polkadot/runtime/test-runtime/src/lib.rs +++ b/polkadot/runtime/test-runtime/src/lib.rs @@ -30,7 +30,7 @@ use polkadot_runtime_parachains::{ disputes::slashing as parachains_slashing, dmp as parachains_dmp, hrmp as parachains_hrmp, inclusion as parachains_inclusion, initializer as parachains_initializer, origin as parachains_origin, paras as parachains_paras, - paras_inherent as parachains_paras_inherent, runtime_api_impl::v5 as runtime_impl, + paras_inherent as parachains_paras_inherent, runtime_api_impl::v7 as runtime_impl, scheduler as parachains_scheduler, session_info as parachains_session_info, shared as parachains_shared, }; diff --git a/polkadot/runtime/westend/src/lib.rs b/polkadot/runtime/westend/src/lib.rs index ad08360f382d..e7cae7248bda 100644 --- a/polkadot/runtime/westend/src/lib.rs +++ b/polkadot/runtime/westend/src/lib.rs @@ -65,9 +65,7 @@ use runtime_parachains::{ inclusion::{AggregateMessageOrigin, UmpQueueId}, initializer as parachains_initializer, origin as parachains_origin, paras as parachains_paras, paras_inherent as parachains_paras_inherent, reward_points as parachains_reward_points, - runtime_api_impl::{ - v5 as parachains_runtime_api_impl, vstaging as parachains_staging_runtime_api_impl, - }, + runtime_api_impl::v7 as parachains_runtime_api_impl, scheduler as parachains_scheduler, session_info as parachains_session_info, shared as parachains_shared, }; @@ -1582,7 +1580,7 @@ sp_api::impl_runtime_apis! { } } - #[api_version(6)] + #[api_version(7)] impl primitives::runtime_api::ParachainHost for Runtime { fn validators() -> Vec { parachains_runtime_api_impl::validators::() @@ -1715,7 +1713,15 @@ sp_api::impl_runtime_apis! { } fn minimum_backing_votes() -> u32 { - parachains_staging_runtime_api_impl::minimum_backing_votes::() + parachains_runtime_api_impl::minimum_backing_votes::() + } + + fn para_backing_state(para_id: ParaId) -> Option { + parachains_runtime_api_impl::backing_state::(para_id) + } + + fn async_backing_params() -> primitives::AsyncBackingParams { + parachains_runtime_api_impl::async_backing_params::() } } diff --git a/polkadot/zombienet_tests/async_backing/001-async-backing-compatibility.toml b/polkadot/zombienet_tests/async_backing/001-async-backing-compatibility.toml deleted file mode 100644 index 918fb5bf4f62..000000000000 --- a/polkadot/zombienet_tests/async_backing/001-async-backing-compatibility.toml +++ /dev/null @@ -1,34 +0,0 @@ -[settings] -timeout = 1000 - -[relaychain] -default_image = "{{ZOMBIENET_INTEGRATION_TEST_IMAGE}}" -chain = "rococo-local" -default_command = "polkadot" - - [relaychain.default_resources] - limits = { memory = "4G", cpu = "2" } - requests = { memory = "2G", cpu = "1" } - - [[relaychain.nodes]] - name = "alice" - args = [ "-lparachain=debug,runtime=debug"] - - [[relaychain.nodes]] - name = "bob" - image = "{{ZOMBIENET_INTEGRATION_TEST_SECONDARY_IMAGE}}" - args = [ "-lparachain=debug,runtime=debug"] - -[[parachains]] -id = 100 - - [parachains.collator] - name = "collator01" - image = "{{COL_IMAGE}}" - command = "undying-collator" - args = ["-lparachain=debug"] - -[types.Header] -number = "u64" -parent_hash = "Hash" -post_state = "Hash" diff --git a/polkadot/zombienet_tests/async_backing/001-async-backing-compatibility.zndsl b/polkadot/zombienet_tests/async_backing/001-async-backing-compatibility.zndsl deleted file mode 100644 index 46c1d77acf46..000000000000 --- a/polkadot/zombienet_tests/async_backing/001-async-backing-compatibility.zndsl +++ /dev/null @@ -1,23 +0,0 @@ -Description: Async Backing Compatibility Test -Network: ./001-async-backing-compatibility.toml -Creds: config - -# General -alice: is up -bob: is up - -# Check authority status -alice: reports node_roles is 4 -bob: reports node_roles is 4 - -# Check peers -alice: reports peers count is at least 2 within 20 seconds -bob: reports peers count is at least 2 within 20 seconds - -# Parachain registration -alice: parachain 100 is registered within 225 seconds -bob: parachain 100 is registered within 225 seconds - -# Ensure parachain progress -alice: parachain 100 block height is at least 10 within 250 seconds -bob: parachain 100 block height is at least 10 within 250 seconds diff --git a/polkadot/zombienet_tests/async_backing/002-async-backing-runtime-upgrade.toml b/polkadot/zombienet_tests/async_backing/002-async-backing-runtime-upgrade.toml deleted file mode 100644 index e61f7dd47ef6..000000000000 --- a/polkadot/zombienet_tests/async_backing/002-async-backing-runtime-upgrade.toml +++ /dev/null @@ -1,54 +0,0 @@ -[settings] -timeout = 1000 - -[relaychain] -default_image = "{{ZOMBIENET_INTEGRATION_TEST_IMAGE}}" -chain = "rococo-local" -default_command = "polkadot" - - [relaychain.default_resources] - limits = { memory = "4G", cpu = "2" } - requests = { memory = "2G", cpu = "1" } - - [[relaychain.nodes]] - name = "alice" - args = [ "-lparachain=debug,runtime=debug"] - - [[relaychain.nodes]] - name = "bob" - args = [ "-lparachain=debug,runtime=debug"] - - [[relaychain.nodes]] - name = "charlie" - image = "{{ZOMBIENET_INTEGRATION_TEST_SECONDARY_IMAGE}}" - args = [ "-lparachain=debug,runtime=debug"] - - [[relaychain.nodes]] - name = "dave" - image = "{{ZOMBIENET_INTEGRATION_TEST_SECONDARY_IMAGE}}" - args = [ "-lparachain=debug,runtime=debug"] - -[[parachains]] -id = 100 -addToGenesis = true - - [parachains.collator] - name = "collator02" - image = "{{COL_IMAGE}}" - command = "undying-collator" - args = ["-lparachain=debug"] - -[[parachains]] -id = 101 -addToGenesis = true - - [parachains.collator] - name = "collator02" - image = "{{COL_IMAGE}}" - command = "undying-collator" - args = ["-lparachain=debug"] - -[types.Header] -number = "u64" -parent_hash = "Hash" -post_state = "Hash" diff --git a/polkadot/zombienet_tests/async_backing/002-async-backing-runtime-upgrade.zndsl b/polkadot/zombienet_tests/async_backing/002-async-backing-runtime-upgrade.zndsl deleted file mode 100644 index 6213d1afb81e..000000000000 --- a/polkadot/zombienet_tests/async_backing/002-async-backing-runtime-upgrade.zndsl +++ /dev/null @@ -1,34 +0,0 @@ -Description: Async Backing Runtime Upgrade Test -Network: ./002-async-backing-runtime-upgrade.toml -Creds: config - -# General -alice: is up -bob: is up -charlie: is up -dave: is up - -# Check peers -alice: reports peers count is at least 3 within 20 seconds -bob: reports peers count is at least 3 within 20 seconds - -# Parachain registration -alice: parachain 100 is registered within 225 seconds -bob: parachain 100 is registered within 225 seconds -charlie: parachain 100 is registered within 225 seconds -dave: parachain 100 is registered within 225 seconds -alice: parachain 101 is registered within 225 seconds -bob: parachain 101 is registered within 225 seconds -charlie: parachain 101 is registered within 225 seconds -dave: parachain 101 is registered within 225 seconds - -# Ensure parachain progress -alice: parachain 100 block height is at least 10 within 250 seconds -bob: parachain 100 block height is at least 10 within 250 seconds - -# Runtime upgrade (according to previous runtime tests, avg. is 30s) -alice: run ../misc/0002-download-polkadot-from-pr.sh with "{{POLKADOT_PR_BIN_URL}}" within 40 seconds -bob: run ../misc/0002-download-polkadot-from-pr.sh with "{{POLKADOT_PR_BIN_URL}}" within 40 seconds - -# Bootstrap the runtime upgrade -sleep 30 seconds diff --git a/polkadot/zombienet_tests/async_backing/003-async-backing-collator-mix.toml b/polkadot/zombienet_tests/async_backing/003-async-backing-collator-mix.toml deleted file mode 100644 index 4dca4d3d5312..000000000000 --- a/polkadot/zombienet_tests/async_backing/003-async-backing-collator-mix.toml +++ /dev/null @@ -1,40 +0,0 @@ -[settings] -timeout = 1000 - -[relaychain] -default_image = "{{ZOMBIENET_INTEGRATION_TEST_IMAGE}}" -chain = "rococo-local" -default_command = "polkadot" - - [relaychain.default_resources] - limits = { memory = "4G", cpu = "2" } - requests = { memory = "2G", cpu = "1" } - - [[relaychain.nodes]] - name = "alice" - args = [ "-lparachain=debug"] - - [[relaychain.nodes]] - name = "bob" - image = "{{ZOMBIENET_INTEGRATION_TEST_SECONDARY_IMAGE}}" - args = [ "-lparachain=debug"] - -[[parachains]] -id = 100 - - [[parachains.collators]] - name = "collator01" - image = "docker.io/paritypr/colander:master" - command = "undying-collator" - args = ["-lparachain=debug"] - - [[parachains.collators]] - name = "collator02" - image = "{{COL_IMAGE}}" - command = "undying-collator" - args = ["-lparachain=debug"] - -[types.Header] -number = "u64" -parent_hash = "Hash" -post_state = "Hash" diff --git a/polkadot/zombienet_tests/async_backing/003-async-backing-collator-mix.zndsl b/polkadot/zombienet_tests/async_backing/003-async-backing-collator-mix.zndsl deleted file mode 100644 index 98436b0459cf..000000000000 --- a/polkadot/zombienet_tests/async_backing/003-async-backing-collator-mix.zndsl +++ /dev/null @@ -1,19 +0,0 @@ -Description: Async Backing Collator Mix Test -Network: ./003-async-backing-collator-mix.toml -Creds: config - -# General -alice: is up -bob: is up - -# Check peers -alice: reports peers count is at least 3 within 20 seconds -bob: reports peers count is at least 3 within 20 seconds - -# Parachain registration -alice: parachain 100 is registered within 225 seconds -bob: parachain 100 is registered within 225 seconds - -# Ensure parachain progress -alice: parachain 100 block height is at least 10 within 250 seconds -bob: parachain 100 block height is at least 10 within 250 seconds diff --git a/polkadot/zombienet_tests/async_backing/README.md b/polkadot/zombienet_tests/async_backing/README.md deleted file mode 100644 index 9774ea3c25c9..000000000000 --- a/polkadot/zombienet_tests/async_backing/README.md +++ /dev/null @@ -1,9 +0,0 @@ -# async-backing zombienet tests - -This directory contains zombienet tests made explicitly for the async-backing feature branch. - -## coverage - -- Network protocol upgrade deploying both master and async branch (compatibility). -- Runtime ugprade while running both master and async backing branch nodes. -- Async backing test with a mix of collators collating via async backing and sync backing. diff --git a/polkadot/zombienet_tests/smoke/0002-parachains-upgrade-smoke-test.toml b/polkadot/zombienet_tests/smoke/0002-parachains-upgrade-smoke-test.toml index 0becb408550a..d72e3ebdb335 100644 --- a/polkadot/zombienet_tests/smoke/0002-parachains-upgrade-smoke-test.toml +++ b/polkadot/zombienet_tests/smoke/0002-parachains-upgrade-smoke-test.toml @@ -30,8 +30,8 @@ cumulus_based = true [parachains.collator] name = "collator01" - image = "{{COL_IMAGE}}" - command = "polkadot-collator" + image = "{{CUMULUS_IMAGE}}" + command = "polkadot-parachain" [[parachains.collator.env]] name = "RUST_LOG" From 8b061a5c5d29a4ef8efa0f2919fdc5c3cfc36361 Mon Sep 17 00:00:00 2001 From: Matteo Muraca <56828990+muraca@users.noreply.github.com> Date: Wed, 27 Sep 2023 14:46:14 +0200 Subject: [PATCH 60/69] Associated type Hasher for `QueryPreimage`, `StorePreimage` and `Bounded` (#1720) I hope it's enough to fix #1701 the only solution I found to make it happen is to put an associated type to the `Bounded` enum as well. @liamaharon @kianenigma @bkchr Polkadot address: 12poSUQPtcF1HUPQGY3zZu2P8emuW9YnsPduA4XG3oCEfJVp --------- Signed-off-by: muraca Co-authored-by: Liam Aharon Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> --- substrate/frame/democracy/src/benchmarking.rs | 7 +- substrate/frame/democracy/src/lib.rs | 54 +++++---- substrate/frame/democracy/src/tests.rs | 2 +- .../frame/democracy/src/tests/metadata.rs | 6 +- substrate/frame/preimage/src/lib.rs | 10 +- substrate/frame/preimage/src/tests.rs | 29 +++-- substrate/frame/referenda/src/benchmarking.rs | 4 +- substrate/frame/referenda/src/lib.rs | 29 +++-- substrate/frame/referenda/src/mock.rs | 2 +- substrate/frame/referenda/src/tests.rs | 3 +- substrate/frame/referenda/src/types.rs | 3 +- substrate/frame/scheduler/src/benchmarking.rs | 4 +- substrate/frame/scheduler/src/lib.rs | 35 +++--- substrate/frame/scheduler/src/migration.rs | 2 +- substrate/frame/scheduler/src/tests.rs | 2 +- substrate/frame/support/src/traits.rs | 2 +- .../frame/support/src/traits/preimages.rs | 110 ++++++++++-------- .../frame/support/src/traits/schedule.rs | 8 +- substrate/frame/whitelist/src/benchmarking.rs | 4 +- substrate/frame/whitelist/src/lib.rs | 32 ++--- 20 files changed, 189 insertions(+), 159 deletions(-) diff --git a/substrate/frame/democracy/src/benchmarking.rs b/substrate/frame/democracy/src/benchmarking.rs index e4a21a4e1d9b..b4aa17726b8d 100644 --- a/substrate/frame/democracy/src/benchmarking.rs +++ b/substrate/frame/democracy/src/benchmarking.rs @@ -25,7 +25,6 @@ use frame_support::{ traits::{Currency, EnsureOrigin, Get, OnInitialize, UnfilteredDispatchable}, }; use frame_system::{pallet_prelude::BlockNumberFor, RawOrigin}; -use sp_core::H256; use sp_runtime::{traits::Bounded, BoundedVec}; use crate::Pallet as Democracy; @@ -46,7 +45,7 @@ fn make_proposal(n: u32) -> BoundedCallOf { ::Preimages::bound(call).unwrap() } -fn add_proposal(n: u32) -> Result { +fn add_proposal(n: u32) -> Result { let other = funded_account::("proposer", n); let value = T::MinimumDeposit::get(); let proposal = make_proposal::(n); @@ -55,7 +54,7 @@ fn add_proposal(n: u32) -> Result { } // add a referendum with a metadata. -fn add_referendum(n: u32) -> (ReferendumIndex, H256, PreimageHash) { +fn add_referendum(n: u32) -> (ReferendumIndex, T::Hash, T::Hash) { let vote_threshold = VoteThreshold::SimpleMajority; let proposal = make_proposal::(n); let hash = proposal.hash(); @@ -85,7 +84,7 @@ fn assert_has_event(generic_event: ::RuntimeEvent) { } // note a new preimage. -fn note_preimage() -> PreimageHash { +fn note_preimage() -> T::Hash { use core::sync::atomic::{AtomicU8, Ordering}; use sp_std::borrow::Cow; // note a new preimage on every function invoke. diff --git a/substrate/frame/democracy/src/lib.rs b/substrate/frame/democracy/src/lib.rs index e538d31c6ad0..089556191cd1 100644 --- a/substrate/frame/democracy/src/lib.rs +++ b/substrate/frame/democracy/src/lib.rs @@ -159,9 +159,8 @@ use frame_support::{ traits::{ defensive_prelude::*, schedule::{v3::Named as ScheduleNamed, DispatchTime}, - Bounded, Currency, EnsureOrigin, Get, Hash as PreimageHash, LockIdentifier, - LockableCurrency, OnUnbalanced, QueryPreimage, ReservableCurrency, StorePreimage, - WithdrawReasons, + Bounded, Currency, EnsureOrigin, Get, LockIdentifier, LockableCurrency, OnUnbalanced, + QueryPreimage, ReservableCurrency, StorePreimage, WithdrawReasons, }, weights::Weight, }; @@ -203,7 +202,7 @@ type NegativeImbalanceOf = <::Currency as Currency< ::AccountId, >>::NegativeImbalance; pub type CallOf = ::RuntimeCall; -pub type BoundedCallOf = Bounded>; +pub type BoundedCallOf = Bounded, ::Hashing>; type AccountIdLookupOf = <::Lookup as StaticLookup>::Source; #[frame_support::pallet] @@ -211,7 +210,6 @@ pub mod pallet { use super::{DispatchResult, *}; use frame_support::pallet_prelude::*; use frame_system::pallet_prelude::*; - use sp_core::H256; /// The current storage version. const STORAGE_VERSION: StorageVersion = StorageVersion::new(1); @@ -226,10 +224,15 @@ pub mod pallet { type RuntimeEvent: From> + IsType<::RuntimeEvent>; /// The Scheduler. - type Scheduler: ScheduleNamed, CallOf, Self::PalletsOrigin>; + type Scheduler: ScheduleNamed< + BlockNumberFor, + CallOf, + Self::PalletsOrigin, + Hasher = Self::Hashing, + >; /// The Preimage provider. - type Preimages: QueryPreimage + StorePreimage; + type Preimages: QueryPreimage + StorePreimage; /// Currency type for this pallet. type Currency: ReservableCurrency @@ -421,22 +424,22 @@ pub mod pallet { pub type Blacklist = StorageMap< _, Identity, - H256, + T::Hash, (BlockNumberFor, BoundedVec), >; /// Record of all proposals that have been subject to emergency cancellation. #[pallet::storage] - pub type Cancellations = StorageMap<_, Identity, H256, bool, ValueQuery>; + pub type Cancellations = StorageMap<_, Identity, T::Hash, bool, ValueQuery>; /// General information concerning any proposal or referendum. - /// The `PreimageHash` refers to the preimage of the `Preimages` provider which can be a JSON + /// The `Hash` refers to the preimage of the `Preimages` provider which can be a JSON /// dump or IPFS hash of a JSON file. /// /// Consider a garbage collection for a metadata of finished referendums to `unrequest` (remove) /// large preimages. #[pallet::storage] - pub type MetadataOf = StorageMap<_, Blake2_128Concat, MetadataOwner, PreimageHash>; + pub type MetadataOf = StorageMap<_, Blake2_128Concat, MetadataOwner, T::Hash>; #[pallet::genesis_config] #[derive(frame_support::DefaultNoBound)] @@ -476,9 +479,9 @@ pub mod pallet { /// An account has cancelled a previous delegation operation. Undelegated { account: T::AccountId }, /// An external proposal has been vetoed. - Vetoed { who: T::AccountId, proposal_hash: H256, until: BlockNumberFor }, + Vetoed { who: T::AccountId, proposal_hash: T::Hash, until: BlockNumberFor }, /// A proposal_hash has been blacklisted permanently. - Blacklisted { proposal_hash: H256 }, + Blacklisted { proposal_hash: T::Hash }, /// An account has voted in a referendum Voted { voter: T::AccountId, ref_index: ReferendumIndex, vote: AccountVote> }, /// An account has secconded a proposal @@ -490,14 +493,14 @@ pub mod pallet { /// Metadata owner. owner: MetadataOwner, /// Preimage hash. - hash: PreimageHash, + hash: T::Hash, }, /// Metadata for a proposal or a referendum has been cleared. MetadataCleared { /// Metadata owner. owner: MetadataOwner, /// Preimage hash. - hash: PreimageHash, + hash: T::Hash, }, /// Metadata has been transferred to new owner. MetadataTransferred { @@ -506,7 +509,7 @@ pub mod pallet { /// New metadata owner. owner: MetadataOwner, /// Preimage hash. - hash: PreimageHash, + hash: T::Hash, }, } @@ -775,7 +778,7 @@ pub mod pallet { #[pallet::weight(T::WeightInfo::fast_track())] pub fn fast_track( origin: OriginFor, - proposal_hash: H256, + proposal_hash: T::Hash, voting_period: BlockNumberFor, delay: BlockNumberFor, ) -> DispatchResult { @@ -827,7 +830,7 @@ pub mod pallet { /// Weight: `O(V + log(V))` where V is number of `existing vetoers` #[pallet::call_index(8)] #[pallet::weight(T::WeightInfo::veto_external())] - pub fn veto_external(origin: OriginFor, proposal_hash: H256) -> DispatchResult { + pub fn veto_external(origin: OriginFor, proposal_hash: T::Hash) -> DispatchResult { let who = T::VetoOrigin::ensure_origin(origin)?; if let Some((ext_proposal, _)) = NextExternal::::get() { @@ -1042,7 +1045,7 @@ pub mod pallet { #[pallet::weight((T::WeightInfo::blacklist(), DispatchClass::Operational))] pub fn blacklist( origin: OriginFor, - proposal_hash: H256, + proposal_hash: T::Hash, maybe_ref_index: Option, ) -> DispatchResult { T::BlacklistOrigin::ensure_origin(origin)?; @@ -1139,7 +1142,7 @@ pub mod pallet { pub fn set_metadata( origin: OriginFor, owner: MetadataOwner, - maybe_hash: Option, + maybe_hash: Option, ) -> DispatchResult { match owner { MetadataOwner::External => { @@ -1173,15 +1176,16 @@ pub mod pallet { } pub trait EncodeInto: Encode { - fn encode_into + Default>(&self) -> T { + fn encode_into + Default, H: sp_core::Hasher>(&self) -> T { let mut t = T::default(); self.using_encoded(|data| { if data.len() <= t.as_mut().len() { t.as_mut()[0..data.len()].copy_from_slice(data); } else { - // encoded self is too big to fit into a T. hash it and use the first bytes of that - // instead. - let hash = sp_io::hashing::blake2_256(data); + // encoded self is too big to fit into a T. + // hash it and use the first bytes of that instead. + let hash = H::hash(&data); + let hash = hash.as_ref(); let l = t.as_mut().len().min(hash.len()); t.as_mut()[0..l].copy_from_slice(&hash[0..l]); } @@ -1610,7 +1614,7 @@ impl Pallet { // Earliest it can be scheduled for is next block. let when = now.saturating_add(status.delay.max(One::one())); if T::Scheduler::schedule_named( - (DEMOCRACY_ID, index).encode_into(), + (DEMOCRACY_ID, index).encode_into::<_, T::Hashing>(), DispatchTime::At(when), None, 63, diff --git a/substrate/frame/democracy/src/tests.rs b/substrate/frame/democracy/src/tests.rs index 9b885fb5915b..ebccf32e3423 100644 --- a/substrate/frame/democracy/src/tests.rs +++ b/substrate/frame/democracy/src/tests.rs @@ -277,7 +277,7 @@ fn tally(r: ReferendumIndex) -> Tally { } /// note a new preimage without registering. -fn note_preimage(who: u64) -> PreimageHash { +fn note_preimage(who: u64) -> ::Hash { use std::sync::atomic::{AtomicU8, Ordering}; // note a new preimage on every function invoke. static COUNTER: AtomicU8 = AtomicU8::new(0); diff --git a/substrate/frame/democracy/src/tests/metadata.rs b/substrate/frame/democracy/src/tests/metadata.rs index 5a36d80b7263..1b6d66a8bc44 100644 --- a/substrate/frame/democracy/src/tests/metadata.rs +++ b/substrate/frame/democracy/src/tests/metadata.rs @@ -22,9 +22,8 @@ use super::*; #[test] fn set_external_metadata_works() { new_test_ext().execute_with(|| { - use frame_support::traits::Hash as PreimageHash; // invalid preimage hash. - let invalid_hash: PreimageHash = [1u8; 32].into(); + let invalid_hash: ::Hash = [1u8; 32].into(); // metadata owner is an external proposal. let owner = MetadataOwner::External; // fails to set metadata if an external proposal does not exist. @@ -83,9 +82,8 @@ fn clear_metadata_works() { #[test] fn set_proposal_metadata_works() { new_test_ext().execute_with(|| { - use frame_support::traits::Hash as PreimageHash; // invalid preimage hash. - let invalid_hash: PreimageHash = [1u8; 32].into(); + let invalid_hash: ::Hash = [1u8; 32].into(); // create an external proposal. assert_ok!(propose_set_balance(1, 2, 5)); // metadata owner is a public proposal. diff --git a/substrate/frame/preimage/src/lib.rs b/substrate/frame/preimage/src/lib.rs index 4dda0207e62c..e344bdfe2d8f 100644 --- a/substrate/frame/preimage/src/lib.rs +++ b/substrate/frame/preimage/src/lib.rs @@ -49,8 +49,8 @@ use frame_support::{ ensure, pallet_prelude::Get, traits::{ - Consideration, Currency, Defensive, FetchResult, Footprint, Hash as PreimageHash, - PreimageProvider, PreimageRecipient, QueryPreimage, ReservableCurrency, StorePreimage, + Consideration, Currency, Defensive, FetchResult, Footprint, PreimageProvider, + PreimageRecipient, QueryPreimage, ReservableCurrency, StorePreimage, }, BoundedSlice, BoundedVec, }; @@ -531,7 +531,9 @@ impl PreimageRecipient for Pallet { } } -impl> QueryPreimage for Pallet { +impl QueryPreimage for Pallet { + type H = T::Hashing; + fn len(hash: &T::Hash) -> Option { Pallet::::len(hash) } @@ -555,7 +557,7 @@ impl> QueryPreimage for Pallet { } } -impl> StorePreimage for Pallet { +impl StorePreimage for Pallet { const MAX_LENGTH: usize = MAX_SIZE as usize; fn note(bytes: Cow<[u8]>) -> Result { diff --git a/substrate/frame/preimage/src/tests.rs b/substrate/frame/preimage/src/tests.rs index a473a0ae8e25..7609ec83e903 100644 --- a/substrate/frame/preimage/src/tests.rs +++ b/substrate/frame/preimage/src/tests.rs @@ -24,25 +24,32 @@ use crate::mock::*; use frame_support::{ assert_err, assert_noop, assert_ok, assert_storage_noop, - traits::{fungible::InspectHold, Bounded, BoundedInline, Hash as PreimageHash}, + traits::{fungible::InspectHold, Bounded, BoundedInline}, StorageNoopGuard, }; -use sp_core::{blake2_256, H256}; use sp_runtime::{bounded_vec, TokenError}; /// Returns one `Inline`, `Lookup` and `Legacy` item each with different data and hash. -pub fn make_bounded_values() -> (Bounded>, Bounded>, Bounded>) { +pub fn make_bounded_values() -> ( + Bounded, ::Hashing>, + Bounded, ::Hashing>, + Bounded, ::Hashing>, +) { let data: BoundedInline = bounded_vec![1]; - let inline = Bounded::>::Inline(data); + let inline = Bounded::, ::Hashing>::Inline(data); let data = vec![1, 2]; - let hash: H256 = blake2_256(&data[..]).into(); + let hash = ::Hashing::hash(&data[..]).into(); let len = data.len() as u32; - let lookup = Bounded::>::unrequested(hash, len); + let lookup = + Bounded::, ::Hashing>::unrequested(hash, len); let data = vec![1, 2, 3]; - let hash: H256 = blake2_256(&data[..]).into(); - let legacy = Bounded::>::Legacy { hash, dummy: Default::default() }; + let hash = ::Hashing::hash(&data[..]).into(); + let legacy = Bounded::, ::Hashing>::Legacy { + hash, + dummy: Default::default(), + }; (inline, lookup, legacy) } @@ -303,7 +310,7 @@ fn query_and_store_preimage_workflow() { let bound = Preimage::bound(data.clone()).unwrap(); let (len, hash) = (bound.len().unwrap(), bound.hash()); - assert_eq!(hash, blake2_256(&encoded).into()); + assert_eq!(hash, ::Hashing::hash(&encoded).into()); assert_eq!(bound.len(), Some(len)); assert!(bound.lookup_needed(), "Should not be Inlined"); assert_eq!(bound.lookup_len(), Some(len)); @@ -364,7 +371,7 @@ fn query_preimage_request_works() { new_test_ext().execute_with(|| { let _guard = StorageNoopGuard::default(); let data: Vec = vec![1; 10]; - let hash: PreimageHash = blake2_256(&data[..]).into(); + let hash = ::Hashing::hash(&data[..]).into(); // Request the preimage. ::request(&hash); @@ -454,7 +461,7 @@ fn store_preimage_basic_works() { // Cleanup. ::unnote(&bound.hash()); - let data_hash = blake2_256(&data); + let data_hash = ::Hashing::hash(&data); ::unnote(&data_hash.into()); // No storage changes remain. Checked by `StorageNoopGuard`. diff --git a/substrate/frame/referenda/src/benchmarking.rs b/substrate/frame/referenda/src/benchmarking.rs index e884a0bb6ec9..47d43cc0600c 100644 --- a/substrate/frame/referenda/src/benchmarking.rs +++ b/substrate/frame/referenda/src/benchmarking.rs @@ -25,7 +25,7 @@ use frame_benchmarking::v1::{ }; use frame_support::{ assert_ok, - traits::{Bounded, Currency, EnsureOrigin, EnsureOriginWithArg, UnfilteredDispatchable}, + traits::{Currency, EnsureOrigin, EnsureOriginWithArg, UnfilteredDispatchable}, }; use frame_system::RawOrigin; use sp_runtime::traits::Bounded as ArithBounded; @@ -42,7 +42,7 @@ fn funded_account, I: 'static>(name: &'static str, index: u32) -> T caller } -fn dummy_call, I: 'static>() -> Bounded<>::RuntimeCall> { +fn dummy_call, I: 'static>() -> BoundedCallOf { let inner = frame_system::Call::remark { remark: vec![] }; let call = >::RuntimeCall::from(inner); T::Preimages::bound(call).unwrap() diff --git a/substrate/frame/referenda/src/lib.rs b/substrate/frame/referenda/src/lib.rs index d4dbbf8a3c99..be21375f526f 100644 --- a/substrate/frame/referenda/src/lib.rs +++ b/substrate/frame/referenda/src/lib.rs @@ -73,8 +73,8 @@ use frame_support::{ v3::{Anon as ScheduleAnon, Named as ScheduleNamed}, DispatchTime, }, - Currency, Hash as PreimageHash, LockIdentifier, OnUnbalanced, OriginTrait, PollStatus, - Polling, QueryPreimage, ReservableCurrency, StorePreimage, VoteTally, + Currency, LockIdentifier, OnUnbalanced, OriginTrait, PollStatus, Polling, QueryPreimage, + ReservableCurrency, StorePreimage, VoteTally, }, BoundedVec, }; @@ -163,8 +163,17 @@ pub mod pallet { /// Weight information for extrinsics in this pallet. type WeightInfo: WeightInfo; /// The Scheduler. - type Scheduler: ScheduleAnon, CallOf, PalletsOriginOf> - + ScheduleNamed, CallOf, PalletsOriginOf>; + type Scheduler: ScheduleAnon< + BlockNumberFor, + CallOf, + PalletsOriginOf, + Hasher = Self::Hashing, + > + ScheduleNamed< + BlockNumberFor, + CallOf, + PalletsOriginOf, + Hasher = Self::Hashing, + >; /// Currency type for this pallet. type Currency: ReservableCurrency; // Origins and unbalances. @@ -226,7 +235,7 @@ pub mod pallet { >; /// The preimage provider. - type Preimages: QueryPreimage + StorePreimage; + type Preimages: QueryPreimage + StorePreimage; } /// The next free referendum index, aka the number of referenda started so far. @@ -257,14 +266,14 @@ pub mod pallet { StorageMap<_, Twox64Concat, TrackIdOf, u32, ValueQuery>; /// The metadata is a general information concerning the referendum. - /// The `PreimageHash` refers to the preimage of the `Preimages` provider which can be a JSON + /// The `Hash` refers to the preimage of the `Preimages` provider which can be a JSON /// dump or IPFS hash of a JSON file. /// /// Consider a garbage collection for a metadata of finished referendums to `unrequest` (remove) /// large preimages. #[pallet::storage] pub type MetadataOf, I: 'static = ()> = - StorageMap<_, Blake2_128Concat, ReferendumIndex, PreimageHash>; + StorageMap<_, Blake2_128Concat, ReferendumIndex, T::Hash>; #[pallet::event] #[pallet::generate_deposit(pub(super) fn deposit_event)] @@ -376,14 +385,14 @@ pub mod pallet { /// Index of the referendum. index: ReferendumIndex, /// Preimage hash. - hash: PreimageHash, + hash: T::Hash, }, /// Metadata for a referendum has been cleared. MetadataCleared { /// Index of the referendum. index: ReferendumIndex, /// Preimage hash. - hash: PreimageHash, + hash: T::Hash, }, } @@ -691,7 +700,7 @@ pub mod pallet { pub fn set_metadata( origin: OriginFor, index: ReferendumIndex, - maybe_hash: Option, + maybe_hash: Option, ) -> DispatchResult { let who = ensure_signed(origin)?; if let Some(hash) = maybe_hash { diff --git a/substrate/frame/referenda/src/mock.rs b/substrate/frame/referenda/src/mock.rs index c23fa4609d43..dce91f7dbfd7 100644 --- a/substrate/frame/referenda/src/mock.rs +++ b/substrate/frame/referenda/src/mock.rs @@ -473,7 +473,7 @@ impl RefState { } /// note a new preimage without registering. -pub fn note_preimage(who: u64) -> PreimageHash { +pub fn note_preimage(who: u64) -> ::Hash { use std::sync::atomic::{AtomicU8, Ordering}; // note a new preimage on every function invoke. static COUNTER: AtomicU8 = AtomicU8::new(0); diff --git a/substrate/frame/referenda/src/tests.rs b/substrate/frame/referenda/src/tests.rs index d748c524605a..8f51136de0bf 100644 --- a/substrate/frame/referenda/src/tests.rs +++ b/substrate/frame/referenda/src/tests.rs @@ -599,9 +599,8 @@ fn curve_handles_all_inputs() { #[test] fn set_metadata_works() { ExtBuilder::default().build_and_execute(|| { - use frame_support::traits::Hash as PreimageHash; // invalid preimage hash. - let invalid_hash: PreimageHash = [1u8; 32].into(); + let invalid_hash: ::Hash = [1u8; 32].into(); // fails to set metadata for a finished referendum. assert_ok!(Referenda::submit( RuntimeOrigin::signed(1), diff --git a/substrate/frame/referenda/src/types.rs b/substrate/frame/referenda/src/types.rs index ba89383888a7..8d6a13ef27c9 100644 --- a/substrate/frame/referenda/src/types.rs +++ b/substrate/frame/referenda/src/types.rs @@ -34,7 +34,8 @@ pub type NegativeImbalanceOf = <>::Currency as Currency< ::AccountId, >>::NegativeImbalance; pub type CallOf = >::RuntimeCall; -pub type BoundedCallOf = Bounded<>::RuntimeCall>; +pub type BoundedCallOf = + Bounded<>::RuntimeCall, ::Hashing>; pub type VotesOf = >::Votes; pub type TallyOf = >::Tally; pub type PalletsOriginOf = diff --git a/substrate/frame/scheduler/src/benchmarking.rs b/substrate/frame/scheduler/src/benchmarking.rs index 341a19cdb230..cc86a1797378 100644 --- a/substrate/frame/scheduler/src/benchmarking.rs +++ b/substrate/frame/scheduler/src/benchmarking.rs @@ -82,13 +82,13 @@ fn make_task( Scheduled { maybe_id, priority, call, maybe_periodic, origin, _phantom: PhantomData } } -fn bounded(len: u32) -> Option::RuntimeCall>> { +fn bounded(len: u32) -> Option> { let call = <::RuntimeCall>::from(SystemCall::remark { remark: vec![0; len as usize] }); T::Preimages::bound(call).ok() } -fn make_call(maybe_lookup_len: Option) -> Bounded<::RuntimeCall> { +fn make_call(maybe_lookup_len: Option) -> BoundedCallOf { let bound = BoundedInline::bound() as u32; let mut len = match maybe_lookup_len { Some(len) => len.min(T::Preimages::MAX_LENGTH as u32 - 2).max(bound) - 3, diff --git a/substrate/frame/scheduler/src/lib.rs b/substrate/frame/scheduler/src/lib.rs index 3da4aa03caeb..532ffe7bd71b 100644 --- a/substrate/frame/scheduler/src/lib.rs +++ b/substrate/frame/scheduler/src/lib.rs @@ -91,8 +91,8 @@ use frame_support::{ ensure, traits::{ schedule::{self, DispatchTime, MaybeHashed}, - Bounded, CallerTrait, EnsureOrigin, Get, Hash as PreimageHash, IsType, OriginTrait, - PalletInfoAccess, PrivilegeCmp, QueryPreimage, StorageVersion, StorePreimage, + Bounded, CallerTrait, EnsureOrigin, Get, IsType, OriginTrait, PalletInfoAccess, + PrivilegeCmp, QueryPreimage, StorageVersion, StorePreimage, }, weights::{Weight, WeightMeter}, }; @@ -119,6 +119,9 @@ pub type TaskAddress = (BlockNumber, u32); pub type CallOrHashOf = MaybeHashed<::RuntimeCall, ::Hash>; +pub type BoundedCallOf = + Bounded<::RuntimeCall, ::Hashing>; + #[cfg_attr(any(feature = "std", test), derive(PartialEq, Eq))] #[derive(Clone, RuntimeDebug, Encode, Decode)] struct ScheduledV1 { @@ -165,7 +168,7 @@ pub type ScheduledV3Of = ScheduledV3< pub type ScheduledOf = Scheduled< TaskName, - Bounded<::RuntimeCall>, + BoundedCallOf, BlockNumberFor, ::PalletsOrigin, ::AccountId, @@ -254,7 +257,7 @@ pub mod pallet { type WeightInfo: WeightInfo; /// The preimage provider with which we look up call hashes to get the call. - type Preimages: QueryPreimage + StorePreimage; + type Preimages: QueryPreimage + StorePreimage; } #[pallet::storage] @@ -440,7 +443,7 @@ pub mod pallet { } } -impl> Pallet { +impl Pallet { /// Migrate storage format from V1 to V4. /// /// Returns the weight consumed by this migration. @@ -627,7 +630,7 @@ impl> Pallet { >(&bounded) { log::error!( - "Dropping undecodable call {}: {:?}", + "Dropping undecodable call {:?}: {:?}", &h, &err ); @@ -695,7 +698,7 @@ impl Pallet { Option< Scheduled< TaskName, - Bounded<::RuntimeCall>, + BoundedCallOf, BlockNumberFor, OldOrigin, T::AccountId, @@ -797,7 +800,7 @@ impl Pallet { maybe_periodic: Option>>, priority: schedule::Priority, origin: T::PalletsOrigin, - call: Bounded<::RuntimeCall>, + call: BoundedCallOf, ) -> Result>, DispatchError> { let when = Self::resolve_time(when)?; @@ -886,7 +889,7 @@ impl Pallet { maybe_periodic: Option>>, priority: schedule::Priority, origin: T::PalletsOrigin, - call: Bounded<::RuntimeCall>, + call: BoundedCallOf, ) -> Result>, DispatchError> { // ensure id it is unique if Lookup::::contains_key(&id) { @@ -1191,8 +1194,8 @@ impl Pallet { } } -impl> - schedule::v2::Anon, ::RuntimeCall, T::PalletsOrigin> for Pallet +impl schedule::v2::Anon, ::RuntimeCall, T::PalletsOrigin> + for Pallet { type Address = TaskAddress>; type Hash = T::Hash; @@ -1225,8 +1228,8 @@ impl> } } -impl> - schedule::v2::Named, ::RuntimeCall, T::PalletsOrigin> for Pallet +impl schedule::v2::Named, ::RuntimeCall, T::PalletsOrigin> + for Pallet { type Address = TaskAddress>; type Hash = T::Hash; @@ -1270,13 +1273,14 @@ impl schedule::v3::Anon, ::RuntimeCall for Pallet { type Address = TaskAddress>; + type Hasher = T::Hashing; fn schedule( when: DispatchTime>, maybe_periodic: Option>>, priority: schedule::Priority, origin: T::PalletsOrigin, - call: Bounded<::RuntimeCall>, + call: BoundedCallOf, ) -> Result { Self::do_schedule(when, maybe_periodic, priority, origin, call) } @@ -1308,6 +1312,7 @@ impl schedule::v3::Named, ::RuntimeCal for Pallet { type Address = TaskAddress>; + type Hasher = T::Hashing; fn schedule_named( id: TaskName, @@ -1315,7 +1320,7 @@ impl schedule::v3::Named, ::RuntimeCal maybe_periodic: Option>>, priority: schedule::Priority, origin: T::PalletsOrigin, - call: Bounded<::RuntimeCall>, + call: BoundedCallOf, ) -> Result { Self::do_schedule_named(id, when, maybe_periodic, priority, origin, call) } diff --git a/substrate/frame/scheduler/src/migration.rs b/substrate/frame/scheduler/src/migration.rs index 06259768f0aa..9c8b0da03fc0 100644 --- a/substrate/frame/scheduler/src/migration.rs +++ b/substrate/frame/scheduler/src/migration.rs @@ -83,7 +83,7 @@ pub mod v3 { /// Migrate the scheduler pallet from V3 to V4. pub struct MigrateToV4(sp_std::marker::PhantomData); - impl> OnRuntimeUpgrade for MigrateToV4 { + impl OnRuntimeUpgrade for MigrateToV4 { #[cfg(feature = "try-runtime")] fn pre_upgrade() -> Result, TryRuntimeError> { ensure!(StorageVersion::get::>() == 3, "Can only upgrade from version 3"); diff --git a/substrate/frame/scheduler/src/tests.rs b/substrate/frame/scheduler/src/tests.rs index 25a60732e060..1bf8b3e5f3a0 100644 --- a/substrate/frame/scheduler/src/tests.rs +++ b/substrate/frame/scheduler/src/tests.rs @@ -1019,7 +1019,7 @@ fn test_migrate_origin() { new_test_ext().execute_with(|| { for i in 0..3u64 { let k = i.twox_64_concat(); - let old: Vec, u64, u32, u64>>> = vec![ + let old: Vec, u64, u32, u64>>> = vec![ Some(Scheduled { maybe_id: None, priority: i as u8 + 10, diff --git a/substrate/frame/support/src/traits.rs b/substrate/frame/support/src/traits.rs index 10ae5d83b5ac..2179ee38f848 100644 --- a/substrate/frame/support/src/traits.rs +++ b/substrate/frame/support/src/traits.rs @@ -107,7 +107,7 @@ mod voting; pub use voting::{ClassCountOf, PollStatus, Polling, VoteTally}; mod preimages; -pub use preimages::{Bounded, BoundedInline, FetchResult, Hash, QueryPreimage, StorePreimage}; +pub use preimages::{Bounded, BoundedInline, FetchResult, QueryPreimage, StorePreimage}; mod messages; pub use messages::{ diff --git a/substrate/frame/support/src/traits/preimages.rs b/substrate/frame/support/src/traits/preimages.rs index bf08a286dd7c..647af029c16d 100644 --- a/substrate/frame/support/src/traits/preimages.rs +++ b/substrate/frame/support/src/traits/preimages.rs @@ -15,16 +15,17 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Stuff for dealing with 32-byte hashed preimages. +//! Stuff for dealing with hashed preimages. use codec::{Decode, Encode, EncodeLike, MaxEncodedLen}; use scale_info::TypeInfo; -use sp_core::{RuntimeDebug, H256}; -use sp_io::hashing::blake2_256; -use sp_runtime::{traits::ConstU32, DispatchError}; +use sp_core::RuntimeDebug; +use sp_runtime::{ + traits::{ConstU32, Hash}, + DispatchError, +}; use sp_std::borrow::Cow; -pub type Hash = H256; pub type BoundedInline = crate::BoundedVec>; /// The maximum we expect a single legacy hash lookup to be. @@ -32,29 +33,29 @@ const MAX_LEGACY_LEN: u32 = 1_000_000; #[derive(Encode, Decode, MaxEncodedLen, Clone, Eq, PartialEq, TypeInfo, RuntimeDebug)] #[codec(mel_bound())] -pub enum Bounded { - /// A Blake2 256 hash with no preimage length. We - /// do not support creation of this except for transitioning from legacy state. - /// In the future we will make this a pure `Dummy` item storing only the final `dummy` field. - Legacy { hash: Hash, dummy: sp_std::marker::PhantomData }, +pub enum Bounded { + /// A hash with no preimage length. We do not support creation of this except + /// for transitioning from legacy state. In the future we will make this a pure + /// `Dummy` item storing only the final `dummy` field. + Legacy { hash: H::Output, dummy: sp_std::marker::PhantomData }, /// A an bounded `Call`. Its encoding must be at most 128 bytes. Inline(BoundedInline), - /// A Blake2-256 hash of the call together with an upper limit for its size. - Lookup { hash: Hash, len: u32 }, + /// A hash of the call together with an upper limit for its size.` + Lookup { hash: H::Output, len: u32 }, } -impl Bounded { +impl Bounded { /// Casts the wrapped type into something that encodes alike. /// /// # Examples /// ``` - /// use frame_support::traits::Bounded; + /// use frame_support::{traits::Bounded, sp_runtime::traits::BlakeTwo256}; /// /// // Transmute from `String` to `&str`. - /// let x: Bounded = Bounded::Inline(Default::default()); - /// let _: Bounded<&str> = x.transmute(); + /// let x: Bounded = Bounded::Inline(Default::default()); + /// let _: Bounded<&str, BlakeTwo256> = x.transmute(); /// ``` - pub fn transmute(self) -> Bounded + pub fn transmute(self) -> Bounded where T: Encode + EncodeLike, { @@ -69,18 +70,18 @@ impl Bounded { /// Returns the hash of the preimage. /// /// The hash is re-calculated every time if the preimage is inlined. - pub fn hash(&self) -> Hash { + pub fn hash(&self) -> H::Output { use Bounded::*; match self { Lookup { hash, .. } | Legacy { hash, .. } => *hash, - Inline(x) => blake2_256(x.as_ref()).into(), + Inline(x) => ::hash(x.as_ref()), } } /// Returns the hash to lookup the preimage. /// /// If this is a `Bounded::Inline`, `None` is returned as no lookup is required. - pub fn lookup_hash(&self) -> Option { + pub fn lookup_hash(&self) -> Option { use Bounded::*; match self { Lookup { hash, .. } | Legacy { hash, .. } => Some(*hash), @@ -115,13 +116,13 @@ impl Bounded { } /// Constructs a `Lookup` bounded item. - pub fn unrequested(hash: Hash, len: u32) -> Self { + pub fn unrequested(hash: H::Output, len: u32) -> Self { Self::Lookup { hash, len } } /// Constructs a `Legacy` bounded item. #[deprecated = "This API is only for transitioning to Scheduler v3 API"] - pub fn from_legacy_hash(hash: impl Into) -> Self { + pub fn from_legacy_hash(hash: impl Into) -> Self { Self::Legacy { hash: hash.into(), dummy: sp_std::marker::PhantomData } } } @@ -130,24 +131,27 @@ pub type FetchResult = Result, DispatchError>; /// A interface for looking up preimages from their hash on chain. pub trait QueryPreimage { + /// The hasher used in the runtime. + type H: Hash; + /// Returns whether a preimage exists for a given hash and if so its length. - fn len(hash: &Hash) -> Option; + fn len(hash: &::Out) -> Option; /// Returns the preimage for a given hash. If given, `len` must be the size of the preimage. - fn fetch(hash: &Hash, len: Option) -> FetchResult; + fn fetch(hash: &::Out, len: Option) -> FetchResult; /// Returns whether a preimage request exists for a given hash. - fn is_requested(hash: &Hash) -> bool; + fn is_requested(hash: &::Out) -> bool; /// Request that someone report a preimage. Providers use this to optimise the economics for /// preimage reporting. - fn request(hash: &Hash); + fn request(hash: &::Out); /// Cancel a previous preimage request. - fn unrequest(hash: &Hash); + fn unrequest(hash: &::Out); /// Request that the data required for decoding the given `bounded` value is made available. - fn hold(bounded: &Bounded) { + fn hold(bounded: &Bounded) { use Bounded::*; match bounded { Inline(..) => {}, @@ -157,7 +161,7 @@ pub trait QueryPreimage { /// No longer request that the data required for decoding the given `bounded` value is made /// available. - fn drop(bounded: &Bounded) { + fn drop(bounded: &Bounded) { use Bounded::*; match bounded { Inline(..) => {}, @@ -167,7 +171,7 @@ pub trait QueryPreimage { /// Check to see if all data required for the given `bounded` value is available for its /// decoding. - fn have(bounded: &Bounded) -> bool { + fn have(bounded: &Bounded) -> bool { use Bounded::*; match bounded { Inline(..) => true, @@ -180,7 +184,7 @@ pub trait QueryPreimage { /// It also directly requests the given `hash` using [`Self::request`]. /// /// This may not be `peek`-able or `realize`-able. - fn pick(hash: Hash, len: u32) -> Bounded { + fn pick(hash: ::Out, len: u32) -> Bounded { Self::request(&hash); Bounded::Lookup { hash, len } } @@ -190,7 +194,7 @@ pub trait QueryPreimage { /// /// NOTE: This does not remove any data needed for realization. If you will no longer use the /// `bounded`, call `realize` instead or call `drop` afterwards. - fn peek(bounded: &Bounded) -> Result<(T, Option), DispatchError> { + fn peek(bounded: &Bounded) -> Result<(T, Option), DispatchError> { use Bounded::*; match bounded { Inline(data) => T::decode(&mut &data[..]).ok().map(|x| (x, None)), @@ -209,7 +213,9 @@ pub trait QueryPreimage { /// Convert the given `bounded` value back into its original instance. If successful, /// `drop` any data backing it. This will not break the realisability of independently /// created instances of `Bounded` which happen to have identical data. - fn realize(bounded: &Bounded) -> Result<(T, Option), DispatchError> { + fn realize( + bounded: &Bounded, + ) -> Result<(T, Option), DispatchError> { let r = Self::peek(bounded)?; Self::drop(bounded); Ok(r) @@ -230,11 +236,11 @@ pub trait StorePreimage: QueryPreimage { /// Request and attempt to store the bytes of a preimage on chain. /// /// May return `DispatchError::Exhausted` if the preimage is just too big. - fn note(bytes: Cow<[u8]>) -> Result; + fn note(bytes: Cow<[u8]>) -> Result<::Out, DispatchError>; /// Attempt to clear a previously noted preimage. Exactly the same as `unrequest` but is /// provided for symmetry. - fn unnote(hash: &Hash) { + fn unnote(hash: &::Out) { Self::unrequest(hash) } @@ -244,7 +250,7 @@ pub trait StorePreimage: QueryPreimage { /// /// NOTE: Once this API is used, you should use either `drop` or `realize`. /// The value is also noted using [`Self::note`]. - fn bound(t: T) -> Result, DispatchError> { + fn bound(t: T) -> Result, DispatchError> { let data = t.encode(); let len = data.len() as u32; Ok(match BoundedInline::try_from(data) { @@ -255,22 +261,24 @@ pub trait StorePreimage: QueryPreimage { } impl QueryPreimage for () { - fn len(_: &Hash) -> Option { + type H = sp_runtime::traits::BlakeTwo256; + + fn len(_: &sp_core::H256) -> Option { None } - fn fetch(_: &Hash, _: Option) -> FetchResult { + fn fetch(_: &sp_core::H256, _: Option) -> FetchResult { Err(DispatchError::Unavailable) } - fn is_requested(_: &Hash) -> bool { + fn is_requested(_: &sp_core::H256) -> bool { false } - fn request(_: &Hash) {} - fn unrequest(_: &Hash) {} + fn request(_: &sp_core::H256) {} + fn unrequest(_: &sp_core::H256) {} } impl StorePreimage for () { const MAX_LENGTH: usize = 0; - fn note(_: Cow<[u8]>) -> Result { + fn note(_: Cow<[u8]>) -> Result { Err(DispatchError::Exhausted) } } @@ -279,22 +287,22 @@ impl StorePreimage for () { mod tests { use super::*; use crate::BoundedVec; - use sp_runtime::bounded_vec; + use sp_runtime::{bounded_vec, traits::BlakeTwo256}; #[test] fn bounded_size_is_correct() { - assert_eq!(> as MaxEncodedLen>::max_encoded_len(), 131); + assert_eq!(, BlakeTwo256> as MaxEncodedLen>::max_encoded_len(), 131); } #[test] fn bounded_basic_works() { let data: BoundedVec = bounded_vec![b'a', b'b', b'c']; let len = data.len() as u32; - let hash = blake2_256(&data).into(); + let hash = BlakeTwo256::hash(&data).into(); // Inline works { - let bound: Bounded> = Bounded::Inline(data.clone()); + let bound: Bounded, BlakeTwo256> = Bounded::Inline(data.clone()); assert_eq!(bound.hash(), hash); assert_eq!(bound.len(), Some(len)); assert!(!bound.lookup_needed()); @@ -302,7 +310,8 @@ mod tests { } // Legacy works { - let bound: Bounded> = Bounded::Legacy { hash, dummy: Default::default() }; + let bound: Bounded, BlakeTwo256> = + Bounded::Legacy { hash, dummy: Default::default() }; assert_eq!(bound.hash(), hash); assert_eq!(bound.len(), None); assert!(bound.lookup_needed()); @@ -310,7 +319,8 @@ mod tests { } // Lookup works { - let bound: Bounded> = Bounded::Lookup { hash, len: data.len() as u32 }; + let bound: Bounded, BlakeTwo256> = + Bounded::Lookup { hash, len: data.len() as u32 }; assert_eq!(bound.hash(), hash); assert_eq!(bound.len(), Some(len)); assert!(bound.lookup_needed()); @@ -323,8 +333,8 @@ mod tests { let data: BoundedVec = bounded_vec![b'a', b'b', b'c']; // Transmute a `String` into a `&str`. - let x: Bounded = Bounded::Inline(data.clone()); - let y: Bounded<&str> = x.transmute(); + let x: Bounded = Bounded::Inline(data.clone()); + let y: Bounded<&str, BlakeTwo256> = x.transmute(); assert_eq!(y, Bounded::Inline(data)); } } diff --git a/substrate/frame/support/src/traits/schedule.rs b/substrate/frame/support/src/traits/schedule.rs index 74a5951142d5..7a7d1357da1e 100644 --- a/substrate/frame/support/src/traits/schedule.rs +++ b/substrate/frame/support/src/traits/schedule.rs @@ -387,6 +387,8 @@ pub mod v3 { pub trait Anon { /// An address which can be used for removing a scheduled task. type Address: Codec + MaxEncodedLen + Clone + Eq + EncodeLike + Debug + TypeInfo; + /// The hasher used in the runtime. + type Hasher: sp_runtime::traits::Hash; /// Schedule a dispatch to happen at the beginning of some block in the future. /// @@ -396,7 +398,7 @@ pub mod v3 { maybe_periodic: Option>, priority: Priority, origin: Origin, - call: Bounded, + call: Bounded, ) -> Result; /// Cancel a scheduled task. If periodic, then it will cancel all further instances of that, @@ -434,6 +436,8 @@ pub mod v3 { pub trait Named { /// An address which can be used for removing a scheduled task. type Address: Codec + MaxEncodedLen + Clone + Eq + EncodeLike + sp_std::fmt::Debug; + /// The hasher used in the runtime. + type Hasher: sp_runtime::traits::Hash; /// Schedule a dispatch to happen at the beginning of some block in the future. /// @@ -446,7 +450,7 @@ pub mod v3 { maybe_periodic: Option>, priority: Priority, origin: Origin, - call: Bounded, + call: Bounded, ) -> Result; /// Cancel a scheduled, named task. If periodic, then it will cancel all further instances diff --git a/substrate/frame/whitelist/src/benchmarking.rs b/substrate/frame/whitelist/src/benchmarking.rs index 1982f5eb8738..9d356f09a9d2 100644 --- a/substrate/frame/whitelist/src/benchmarking.rs +++ b/substrate/frame/whitelist/src/benchmarking.rs @@ -79,7 +79,7 @@ benchmarks! { let call_weight = call.get_dispatch_info().weight; let encoded_call = call.encode(); let call_encoded_len = encoded_call.len() as u32; - let call_hash = call.blake2_256().into(); + let call_hash = T::Hashing::hash_of(&call); Pallet::::whitelist_call(origin.clone(), call_hash) .expect("whitelisting call must be successful"); @@ -106,7 +106,7 @@ benchmarks! { let remark = sp_std::vec![1u8; n as usize]; let call: ::RuntimeCall = frame_system::Call::remark { remark }.into(); - let call_hash = call.blake2_256().into(); + let call_hash = T::Hashing::hash_of(&call); Pallet::::whitelist_call(origin.clone(), call_hash) .expect("whitelisting call must be successful"); diff --git a/substrate/frame/whitelist/src/lib.rs b/substrate/frame/whitelist/src/lib.rs index decf010b0675..44551abd1071 100644 --- a/substrate/frame/whitelist/src/lib.rs +++ b/substrate/frame/whitelist/src/lib.rs @@ -44,12 +44,11 @@ use codec::{DecodeLimit, Encode, FullCodec}; use frame_support::{ dispatch::{GetDispatchInfo, PostDispatchInfo}, ensure, - traits::{Hash as PreimageHash, QueryPreimage, StorePreimage}, + traits::{QueryPreimage, StorePreimage}, weights::Weight, - Hashable, }; use scale_info::TypeInfo; -use sp_runtime::traits::Dispatchable; +use sp_runtime::traits::{Dispatchable, Hash}; use sp_std::prelude::*; pub use pallet::*; @@ -81,7 +80,7 @@ pub mod pallet { type DispatchWhitelistedOrigin: EnsureOrigin; /// The handler of pre-images. - type Preimages: QueryPreimage + StorePreimage; + type Preimages: QueryPreimage + StorePreimage; /// The weight information for this pallet. type WeightInfo: WeightInfo; @@ -93,9 +92,9 @@ pub mod pallet { #[pallet::event] #[pallet::generate_deposit(pub(super) fn deposit_event)] pub enum Event { - CallWhitelisted { call_hash: PreimageHash }, - WhitelistedCallRemoved { call_hash: PreimageHash }, - WhitelistedCallDispatched { call_hash: PreimageHash, result: DispatchResultWithPostInfo }, + CallWhitelisted { call_hash: T::Hash }, + WhitelistedCallRemoved { call_hash: T::Hash }, + WhitelistedCallDispatched { call_hash: T::Hash, result: DispatchResultWithPostInfo }, } #[pallet::error] @@ -113,14 +112,13 @@ pub mod pallet { } #[pallet::storage] - pub type WhitelistedCall = - StorageMap<_, Twox64Concat, PreimageHash, (), OptionQuery>; + pub type WhitelistedCall = StorageMap<_, Twox64Concat, T::Hash, (), OptionQuery>; #[pallet::call] impl Pallet { #[pallet::call_index(0)] #[pallet::weight(T::WeightInfo::whitelist_call())] - pub fn whitelist_call(origin: OriginFor, call_hash: PreimageHash) -> DispatchResult { + pub fn whitelist_call(origin: OriginFor, call_hash: T::Hash) -> DispatchResult { T::WhitelistOrigin::ensure_origin(origin)?; ensure!( @@ -138,10 +136,7 @@ pub mod pallet { #[pallet::call_index(1)] #[pallet::weight(T::WeightInfo::remove_whitelisted_call())] - pub fn remove_whitelisted_call( - origin: OriginFor, - call_hash: PreimageHash, - ) -> DispatchResult { + pub fn remove_whitelisted_call(origin: OriginFor, call_hash: T::Hash) -> DispatchResult { T::WhitelistOrigin::ensure_origin(origin)?; WhitelistedCall::::take(call_hash).ok_or(Error::::CallIsNotWhitelisted)?; @@ -160,7 +155,7 @@ pub mod pallet { )] pub fn dispatch_whitelisted_call( origin: OriginFor, - call_hash: PreimageHash, + call_hash: T::Hash, call_encoded_len: u32, call_weight_witness: Weight, ) -> DispatchResultWithPostInfo { @@ -206,7 +201,7 @@ pub mod pallet { ) -> DispatchResultWithPostInfo { T::DispatchWhitelistedOrigin::ensure_origin(origin)?; - let call_hash = call.blake2_256().into(); + let call_hash = T::Hashing::hash_of(&call).into(); ensure!( WhitelistedCall::::contains_key(call_hash), @@ -227,10 +222,7 @@ impl Pallet { /// Clean whitelisting/preimage and dispatch call. /// /// Return the call actual weight of the dispatched call if there is some. - fn clean_and_dispatch( - call_hash: PreimageHash, - call: ::RuntimeCall, - ) -> Option { + fn clean_and_dispatch(call_hash: T::Hash, call: ::RuntimeCall) -> Option { WhitelistedCall::::remove(call_hash); T::Preimages::unrequest(&call_hash); From 69ed3087e182d1f08209233eda20cb7531a69f44 Mon Sep 17 00:00:00 2001 From: Alejandro Martinez Andres <11448715+al3mart@users.noreply.github.com> Date: Wed, 27 Sep 2023 16:37:00 +0200 Subject: [PATCH 61/69] OpenGov in Westend and Rococo (#1177) Migrating [PR from the archived polkadot repo](https://github.com/paritytech/polkadot/pull/7272) As per https://github.com/paritytech/polkadot/pull/7272#issuecomment-1559240466, the changes in this MR include the following pallets into [x] Rococo and [x] Westend runtimes: pallet_conviction_voting pallet_referenda pallet_ranked_collective pallet_custom_origins pallet_whitelist And only for westend-runtime: pallet_treasury Following [Kusama runtime config](https://github.com/paritytech/polkadot/tree/dbae30efe080a1d41fe54ef4da8af47614c9ca93/runtime/kusama/src) as a baseline. Benchmarking of the following pallets done for both Rococo and Westend: pallet_conviction_voting pallet_referenda pallet_ranked_collective (only on Rococo) pallet_whitelist And only for Westend: pallet_treasury Removed Gov1 from Rococo as in https://github.com/paritytech/polkadot/pull/6701 Rococo Gov1 storage will be cleaned in a different PR - [issue ](https://github.com/paritytech/polkadot-sdk/issues/1618) --------- Co-authored-by: Giles Cope --- Cargo.lock | 39 +- polkadot/node/service/src/chain_spec.rs | 18 +- polkadot/runtime/rococo/Cargo.toml | 18 + .../rococo/src/governance/fellowship.rs | 343 ++++++++++++ polkadot/runtime/rococo/src/governance/mod.rs | 93 ++++ .../runtime/rococo/src/governance/origins.rs | 194 +++++++ .../runtime/rococo/src/governance/tracks.rs | 320 +++++++++++ polkadot/runtime/rococo/src/lib.rs | 311 +++-------- polkadot/runtime/rococo/src/weights/mod.rs | 11 +- .../rococo/src/weights/pallet_collective.rs | 196 ------- .../src/weights/pallet_collective_council.rs | 322 ----------- .../pallet_collective_technical_committee.rs | 324 ----------- .../src/weights/pallet_conviction_voting.rs | 195 +++++++ .../rococo/src/weights/pallet_democracy.rs | 525 ------------------ .../src/weights/pallet_elections_phragmen.rs | 315 ----------- .../rococo/src/weights/pallet_membership.rs | 204 ------- .../src/weights/pallet_ranked_collective.rs | 175 ++++++ .../pallet_referenda_fellowship_referenda.rs | 524 +++++++++++++++++ .../src/weights/pallet_referenda_referenda.rs | 522 +++++++++++++++++ .../runtime/rococo/src/weights/pallet_tips.rs | 161 ------ .../rococo/src/weights/pallet_whitelist.rs | 116 ++++ polkadot/runtime/rococo/src/xcm_config.rs | 50 +- polkadot/runtime/westend/Cargo.toml | 14 + polkadot/runtime/westend/constants/src/lib.rs | 20 + .../runtime/westend/src/governance/mod.rs | 97 ++++ .../runtime/westend/src/governance/origins.rs | 194 +++++++ .../runtime/westend/src/governance/tracks.rs | 320 +++++++++++ polkadot/runtime/westend/src/lib.rs | 107 +++- polkadot/runtime/westend/src/weights/mod.rs | 5 + .../src/weights/pallet_conviction_voting.rs | 194 +++++++ .../pallet_referenda_fellowship_referenda.rs | 525 ++++++++++++++++++ .../src/weights/pallet_referenda_referenda.rs | 523 +++++++++++++++++ .../westend/src/weights/pallet_treasury.rs | 150 +++++ .../westend/src/weights/pallet_whitelist.rs | 116 ++++ polkadot/runtime/westend/src/xcm_config.rs | 108 +++- 35 files changed, 4964 insertions(+), 2385 deletions(-) create mode 100644 polkadot/runtime/rococo/src/governance/fellowship.rs create mode 100644 polkadot/runtime/rococo/src/governance/mod.rs create mode 100644 polkadot/runtime/rococo/src/governance/origins.rs create mode 100644 polkadot/runtime/rococo/src/governance/tracks.rs delete mode 100644 polkadot/runtime/rococo/src/weights/pallet_collective.rs delete mode 100644 polkadot/runtime/rococo/src/weights/pallet_collective_council.rs delete mode 100644 polkadot/runtime/rococo/src/weights/pallet_collective_technical_committee.rs create mode 100644 polkadot/runtime/rococo/src/weights/pallet_conviction_voting.rs delete mode 100644 polkadot/runtime/rococo/src/weights/pallet_democracy.rs delete mode 100644 polkadot/runtime/rococo/src/weights/pallet_elections_phragmen.rs delete mode 100644 polkadot/runtime/rococo/src/weights/pallet_membership.rs create mode 100644 polkadot/runtime/rococo/src/weights/pallet_ranked_collective.rs create mode 100644 polkadot/runtime/rococo/src/weights/pallet_referenda_fellowship_referenda.rs create mode 100644 polkadot/runtime/rococo/src/weights/pallet_referenda_referenda.rs delete mode 100644 polkadot/runtime/rococo/src/weights/pallet_tips.rs create mode 100644 polkadot/runtime/rococo/src/weights/pallet_whitelist.rs create mode 100644 polkadot/runtime/westend/src/governance/mod.rs create mode 100644 polkadot/runtime/westend/src/governance/origins.rs create mode 100644 polkadot/runtime/westend/src/governance/tracks.rs create mode 100644 polkadot/runtime/westend/src/weights/pallet_conviction_voting.rs create mode 100644 polkadot/runtime/westend/src/weights/pallet_referenda_fellowship_referenda.rs create mode 100644 polkadot/runtime/westend/src/weights/pallet_referenda_referenda.rs create mode 100644 polkadot/runtime/westend/src/weights/pallet_treasury.rs create mode 100644 polkadot/runtime/westend/src/weights/pallet_whitelist.rs diff --git a/Cargo.lock b/Cargo.lock index dcfd8cd66017..7a11c5b7f196 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1152,7 +1152,7 @@ checksum = "cd56dd203fef61ac097dd65721a419ddccb106b2d2b70ba60a6b529f03961a51" dependencies = [ "async-stream-impl", "futures-core", - "pin-project-lite 0.2.12", + "pin-project-lite 0.2.13", ] [[package]] @@ -1193,7 +1193,7 @@ dependencies = [ "futures-sink", "futures-util", "memchr", - "pin-project-lite 0.2.12", + "pin-project-lite 0.2.13", ] [[package]] @@ -5601,7 +5601,7 @@ dependencies = [ "futures-io", "memchr", "parking", - "pin-project-lite 0.2.12", + "pin-project-lite 0.2.13", "waker-fn", ] @@ -5658,7 +5658,7 @@ dependencies = [ "futures-sink", "futures-task", "memchr", - "pin-project-lite 0.2.12", + "pin-project-lite 0.2.13", "pin-utils", "slab", ] @@ -6067,7 +6067,7 @@ checksum = "d5f38f16d184e36f2408a55281cd658ecbd3ca05cce6d6510a176eca393e26d1" dependencies = [ "bytes", "http", - "pin-project-lite 0.2.12", + "pin-project-lite 0.2.13", ] [[package]] @@ -6110,7 +6110,7 @@ dependencies = [ "httparse", "httpdate", "itoa", - "pin-project-lite 0.2.12", + "pin-project-lite 0.2.13", "socket2 0.4.9", "tokio", "tower-service", @@ -11366,9 +11366,9 @@ checksum = "257b64915a082f7811703966789728173279bdebb956b143dbcd23f6f970a777" [[package]] name = "pin-project-lite" -version = "0.2.12" +version = "0.2.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12cc1b0bf1727a77a54b6654e7b5f1af8604923edc8b81885f8ec92f9e3f0a05" +checksum = "8afb450f006bf6385ca15ef45d71d2288452bc3683ce2e2cacc0d18e4be60b58" [[package]] name = "pin-utils" @@ -13172,7 +13172,7 @@ dependencies = [ "concurrent-queue", "libc", "log", - "pin-project-lite 0.2.12", + "pin-project-lite 0.2.13", "windows-sys 0.48.0", ] @@ -13951,7 +13951,7 @@ dependencies = [ "mime", "once_cell", "percent-encoding", - "pin-project-lite 0.2.12", + "pin-project-lite 0.2.13", "rustls 0.21.6", "rustls-pemfile", "serde", @@ -14128,6 +14128,7 @@ dependencies = [ "pallet-bounties", "pallet-child-bounties", "pallet-collective", + "pallet-conviction-voting", "pallet-democracy", "pallet-elections-phragmen", "pallet-grandpa", @@ -14142,7 +14143,9 @@ dependencies = [ "pallet-offences", "pallet-preimage", "pallet-proxy", + "pallet-ranked-collective", "pallet-recovery", + "pallet-referenda", "pallet-scheduler", "pallet-session", "pallet-society", @@ -14156,6 +14159,7 @@ dependencies = [ "pallet-treasury", "pallet-utility", "pallet-vesting", + "pallet-whitelist", "pallet-xcm", "pallet-xcm-benchmarks", "parity-scale-codec", @@ -14171,6 +14175,7 @@ dependencies = [ "serde_json", "smallvec", "sp-api", + "sp-arithmetic", "sp-authority-discovery", "sp-block-builder", "sp-consensus-babe", @@ -18789,7 +18794,7 @@ dependencies = [ "mio", "num_cpus", "parking_lot 0.12.1", - "pin-project-lite 0.2.12", + "pin-project-lite 0.2.13", "signal-hook-registry", "socket2 0.5.3", "tokio-macros", @@ -18835,7 +18840,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "397c988d37662c7dda6d2208364a706264bf3d6138b11d436cbac0ad38832842" dependencies = [ "futures-core", - "pin-project-lite 0.2.12", + "pin-project-lite 0.2.13", "tokio", "tokio-util", ] @@ -18875,7 +18880,7 @@ dependencies = [ "futures-core", "futures-io", "futures-sink", - "pin-project-lite 0.2.12", + "pin-project-lite 0.2.13", "tokio", "tracing", ] @@ -18947,7 +18952,7 @@ dependencies = [ "http", "http-body", "http-range-header", - "pin-project-lite 0.2.12", + "pin-project-lite 0.2.13", "tower-layer", "tower-service", ] @@ -18972,7 +18977,7 @@ checksum = "8ce8c33a8d48bd45d624a6e523445fd21ec13d3653cd51f681abf67418f54eb8" dependencies = [ "cfg-if", "log", - "pin-project-lite 0.2.12", + "pin-project-lite 0.2.13", "tracing-attributes", "tracing-core", ] @@ -20257,6 +20262,7 @@ dependencies = [ "pallet-beefy", "pallet-beefy-mmr", "pallet-collective", + "pallet-conviction-voting", "pallet-democracy", "pallet-election-provider-multi-phase", "pallet-election-provider-support-benchmarking", @@ -20278,6 +20284,7 @@ dependencies = [ "pallet-preimage", "pallet-proxy", "pallet-recovery", + "pallet-referenda", "pallet-scheduler", "pallet-session", "pallet-session-benchmarking", @@ -20293,6 +20300,7 @@ dependencies = [ "pallet-treasury", "pallet-utility", "pallet-vesting", + "pallet-whitelist", "pallet-xcm", "pallet-xcm-benchmarks", "parity-scale-codec", @@ -20308,6 +20316,7 @@ dependencies = [ "smallvec", "sp-api", "sp-application-crypto", + "sp-arithmetic", "sp-authority-discovery", "sp-block-builder", "sp-consensus-babe", diff --git a/polkadot/node/service/src/chain_spec.rs b/polkadot/node/service/src/chain_spec.rs index 7fd9ce61c95e..97b3fab89e32 100644 --- a/polkadot/node/service/src/chain_spec.rs +++ b/polkadot/node/service/src/chain_spec.rs @@ -425,6 +425,7 @@ fn westend_staging_testnet_config_genesis(wasm_binary: &[u8]) -> westend::Runtim vesting: westend::VestingConfig { vesting: vec![] }, sudo: westend::SudoConfig { key: Some(endowed_accounts[0].clone()) }, hrmp: Default::default(), + treasury: Default::default(), configuration: westend::ConfigurationConfig { config: default_parachains_host_configuration(), }, @@ -716,7 +717,6 @@ fn rococo_staging_testnet_config_genesis( }) .collect::>(), }, - phragmen_election: Default::default(), babe: rococo_runtime::BabeConfig { authorities: Default::default(), epoch_config: Some(rococo_runtime::BABE_GENESIS_EPOCH_CONFIG), @@ -724,13 +724,6 @@ fn rococo_staging_testnet_config_genesis( }, grandpa: Default::default(), im_online: Default::default(), - democracy: rococo_runtime::DemocracyConfig::default(), - council: rococo::CouncilConfig { members: vec![], phantom: Default::default() }, - technical_committee: rococo::TechnicalCommitteeConfig { - members: vec![], - phantom: Default::default(), - }, - technical_membership: Default::default(), treasury: Default::default(), authority_discovery: rococo_runtime::AuthorityDiscoveryConfig { keys: vec![], @@ -992,6 +985,7 @@ pub fn westend_testnet_genesis( vesting: westend::VestingConfig { vesting: vec![] }, sudo: westend::SudoConfig { key: Some(root_key) }, hrmp: Default::default(), + treasury: Default::default(), configuration: westend::ConfigurationConfig { config: default_parachains_host_configuration(), }, @@ -1062,14 +1056,6 @@ pub fn rococo_testnet_genesis( }, grandpa: Default::default(), im_online: Default::default(), - phragmen_election: Default::default(), - democracy: rococo::DemocracyConfig::default(), - council: rococo::CouncilConfig { members: vec![], phantom: Default::default() }, - technical_committee: rococo::TechnicalCommitteeConfig { - members: vec![], - phantom: Default::default(), - }, - technical_membership: Default::default(), treasury: Default::default(), claims: rococo::ClaimsConfig { claims: vec![], vesting: vec![] }, vesting: rococo::VestingConfig { vesting: vec![] }, diff --git a/polkadot/runtime/rococo/Cargo.toml b/polkadot/runtime/rococo/Cargo.toml index 64cf6207236d..106d8aafa764 100644 --- a/polkadot/runtime/rococo/Cargo.toml +++ b/polkadot/runtime/rococo/Cargo.toml @@ -24,6 +24,7 @@ sp-api = { path = "../../../substrate/primitives/api", default-features = false sp-genesis-builder = { path = "../../../substrate/primitives/genesis-builder", default-features = false } inherents = { package = "sp-inherents", path = "../../../substrate/primitives/inherents", default-features = false } offchain-primitives = { package = "sp-offchain", path = "../../../substrate/primitives/offchain", default-features = false } +sp-arithmetic = { path = "../../../substrate/primitives/arithmetic", default-features = false } sp-std = { package = "sp-std", path = "../../../substrate/primitives/std", default-features = false } sp-io = { path = "../../../substrate/primitives/io", default-features = false } sp-mmr-primitives = { path = "../../../substrate/primitives/merkle-mountain-range", default-features = false } @@ -48,6 +49,7 @@ pallet-state-trie-migration = { path = "../../../substrate/frame/state-trie-migr pallet-transaction-payment = { path = "../../../substrate/frame/transaction-payment", default-features = false } pallet-transaction-payment-rpc-runtime-api = { path = "../../../substrate/frame/transaction-payment/rpc/runtime-api", default-features = false } pallet-collective = { path = "../../../substrate/frame/collective", default-features = false } +pallet-conviction-voting = { path = "../../../substrate/frame/conviction-voting", default-features = false } pallet-democracy = { path = "../../../substrate/frame/democracy", default-features = false } pallet-elections-phragmen = { path = "../../../substrate/frame/elections-phragmen", default-features = false } frame-executive = { path = "../../../substrate/frame/executive", default-features = false } @@ -63,7 +65,9 @@ pallet-nis = { path = "../../../substrate/frame/nis", default-features = false } pallet-offences = { path = "../../../substrate/frame/offences", default-features = false } pallet-preimage = { path = "../../../substrate/frame/preimage", default-features = false } pallet-proxy = { path = "../../../substrate/frame/proxy", default-features = false } +pallet-ranked-collective = { path = "../../../substrate/frame/ranked-collective", default-features = false } pallet-recovery = { path = "../../../substrate/frame/recovery", default-features = false } +pallet-referenda = { path = "../../../substrate/frame/referenda", default-features = false } pallet-scheduler = { path = "../../../substrate/frame/scheduler", default-features = false } pallet-session = { path = "../../../substrate/frame/session", default-features = false } pallet-society = { path = "../../../substrate/frame/society", default-features = false } @@ -77,6 +81,7 @@ pallet-tips = { path = "../../../substrate/frame/tips", default-features = false pallet-treasury = { path = "../../../substrate/frame/treasury", default-features = false } pallet-utility = { path = "../../../substrate/frame/utility", default-features = false } pallet-vesting = { path = "../../../substrate/frame/vesting", default-features = false } +pallet-whitelist = { path = "../../../substrate/frame/whitelist", default-features = false } pallet-xcm = { path = "../../xcm/pallet-xcm", default-features = false } pallet-xcm-benchmarks = { path = "../../xcm/pallet-xcm-benchmarks", default-features = false, optional = true } @@ -135,6 +140,7 @@ std = [ "pallet-bounties/std", "pallet-child-bounties/std", "pallet-collective/std", + "pallet-conviction-voting/std", "pallet-democracy/std", "pallet-elections-phragmen/std", "pallet-grandpa/std", @@ -149,7 +155,9 @@ std = [ "pallet-offences/std", "pallet-preimage/std", "pallet-proxy/std", + "pallet-ranked-collective/std", "pallet-recovery/std", + "pallet-referenda/std", "pallet-scheduler/std", "pallet-session/std", "pallet-society/std", @@ -163,6 +171,7 @@ std = [ "pallet-treasury/std", "pallet-utility/std", "pallet-vesting/std", + "pallet-whitelist/std", "pallet-xcm-benchmarks?/std", "pallet-xcm/std", "parity-scale-codec/std", @@ -175,6 +184,7 @@ std = [ "serde/std", "serde_derive", "sp-api/std", + "sp-arithmetic/std", "sp-core/std", "sp-genesis-builder/std", "sp-io/std", @@ -201,6 +211,7 @@ runtime-benchmarks = [ "pallet-bounties/runtime-benchmarks", "pallet-child-bounties/runtime-benchmarks", "pallet-collective/runtime-benchmarks", + "pallet-conviction-voting/runtime-benchmarks", "pallet-democracy/runtime-benchmarks", "pallet-elections-phragmen/runtime-benchmarks", "pallet-grandpa/runtime-benchmarks", @@ -215,7 +226,9 @@ runtime-benchmarks = [ "pallet-offences/runtime-benchmarks", "pallet-preimage/runtime-benchmarks", "pallet-proxy/runtime-benchmarks", + "pallet-ranked-collective/runtime-benchmarks", "pallet-recovery/runtime-benchmarks", + "pallet-referenda/runtime-benchmarks", "pallet-scheduler/runtime-benchmarks", "pallet-society/runtime-benchmarks", "pallet-staking/runtime-benchmarks", @@ -226,6 +239,7 @@ runtime-benchmarks = [ "pallet-treasury/runtime-benchmarks", "pallet-utility/runtime-benchmarks", "pallet-vesting/runtime-benchmarks", + "pallet-whitelist/runtime-benchmarks", "pallet-xcm-benchmarks/runtime-benchmarks", "pallet-xcm/runtime-benchmarks", "polkadot-parachain-primitives/runtime-benchmarks", @@ -252,6 +266,7 @@ try-runtime = [ "pallet-bounties/try-runtime", "pallet-child-bounties/try-runtime", "pallet-collective/try-runtime", + "pallet-conviction-voting/try-runtime", "pallet-democracy/try-runtime", "pallet-elections-phragmen/try-runtime", "pallet-grandpa/try-runtime", @@ -266,7 +281,9 @@ try-runtime = [ "pallet-offences/try-runtime", "pallet-preimage/try-runtime", "pallet-proxy/try-runtime", + "pallet-ranked-collective/try-runtime", "pallet-recovery/try-runtime", + "pallet-referenda/try-runtime", "pallet-scheduler/try-runtime", "pallet-session/try-runtime", "pallet-society/try-runtime", @@ -279,6 +296,7 @@ try-runtime = [ "pallet-treasury/try-runtime", "pallet-utility/try-runtime", "pallet-vesting/try-runtime", + "pallet-whitelist/try-runtime", "pallet-xcm/try-runtime", "runtime-common/try-runtime", "runtime-parachains/try-runtime", diff --git a/polkadot/runtime/rococo/src/governance/fellowship.rs b/polkadot/runtime/rococo/src/governance/fellowship.rs new file mode 100644 index 000000000000..b5df6cf2df34 --- /dev/null +++ b/polkadot/runtime/rococo/src/governance/fellowship.rs @@ -0,0 +1,343 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Elements of governance concerning the Rococo Fellowship. + +use frame_support::traits::{MapSuccess, TryMapSuccess}; +use sp_runtime::traits::{CheckedReduceBy, ConstU16, Replace}; + +use super::*; +use crate::{CENTS, DAYS}; + +parameter_types! { + pub const AlarmInterval: BlockNumber = 1; + pub const SubmissionDeposit: Balance = 0; + pub const UndecidingTimeout: BlockNumber = 7 * DAYS; +} + +pub struct TracksInfo; +impl pallet_referenda::TracksInfo for TracksInfo { + type Id = u16; + type RuntimeOrigin = ::PalletsOrigin; + fn tracks() -> &'static [(Self::Id, pallet_referenda::TrackInfo)] { + static DATA: [(u16, pallet_referenda::TrackInfo); 10] = [ + ( + 0u16, + pallet_referenda::TrackInfo { + name: "candidates", + max_deciding: 10, + decision_deposit: 100 * 3 * CENTS, + prepare_period: 30 * MINUTES, + decision_period: 7 * DAYS, + confirm_period: 30 * MINUTES, + min_enactment_period: 1 * MINUTES, + min_approval: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(50), + ceil: Perbill::from_percent(100), + }, + min_support: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(0), + ceil: Perbill::from_percent(50), + }, + }, + ), + ( + 1u16, + pallet_referenda::TrackInfo { + name: "members", + max_deciding: 10, + decision_deposit: 10 * 3 * CENTS, + prepare_period: 30 * MINUTES, + decision_period: 7 * DAYS, + confirm_period: 30 * MINUTES, + min_enactment_period: 1 * MINUTES, + min_approval: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(50), + ceil: Perbill::from_percent(100), + }, + min_support: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(0), + ceil: Perbill::from_percent(50), + }, + }, + ), + ( + 2u16, + pallet_referenda::TrackInfo { + name: "proficients", + max_deciding: 10, + decision_deposit: 10 * 3 * CENTS, + prepare_period: 30 * MINUTES, + decision_period: 7 * DAYS, + confirm_period: 30 * MINUTES, + min_enactment_period: 1 * MINUTES, + min_approval: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(50), + ceil: Perbill::from_percent(100), + }, + min_support: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(0), + ceil: Perbill::from_percent(50), + }, + }, + ), + ( + 3u16, + pallet_referenda::TrackInfo { + name: "fellows", + max_deciding: 10, + decision_deposit: 10 * 3 * CENTS, + prepare_period: 30 * MINUTES, + decision_period: 7 * DAYS, + confirm_period: 30 * MINUTES, + min_enactment_period: 1 * MINUTES, + min_approval: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(50), + ceil: Perbill::from_percent(100), + }, + min_support: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(0), + ceil: Perbill::from_percent(50), + }, + }, + ), + ( + 4u16, + pallet_referenda::TrackInfo { + name: "senior fellows", + max_deciding: 10, + decision_deposit: 10 * 3 * CENTS, + prepare_period: 30 * MINUTES, + decision_period: 7 * DAYS, + confirm_period: 30 * MINUTES, + min_enactment_period: 1 * MINUTES, + min_approval: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(50), + ceil: Perbill::from_percent(100), + }, + min_support: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(0), + ceil: Perbill::from_percent(50), + }, + }, + ), + ( + 5u16, + pallet_referenda::TrackInfo { + name: "experts", + max_deciding: 10, + decision_deposit: 1 * 3 * CENTS, + prepare_period: 30 * MINUTES, + decision_period: 7 * DAYS, + confirm_period: 30 * MINUTES, + min_enactment_period: 1 * MINUTES, + min_approval: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(50), + ceil: Perbill::from_percent(100), + }, + min_support: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(0), + ceil: Perbill::from_percent(50), + }, + }, + ), + ( + 6u16, + pallet_referenda::TrackInfo { + name: "senior experts", + max_deciding: 10, + decision_deposit: 1 * 3 * CENTS, + prepare_period: 30 * MINUTES, + decision_period: 7 * DAYS, + confirm_period: 30 * MINUTES, + min_enactment_period: 1 * MINUTES, + min_approval: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(50), + ceil: Perbill::from_percent(100), + }, + min_support: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(0), + ceil: Perbill::from_percent(50), + }, + }, + ), + ( + 7u16, + pallet_referenda::TrackInfo { + name: "masters", + max_deciding: 10, + decision_deposit: 1 * 3 * CENTS, + prepare_period: 30 * MINUTES, + decision_period: 7 * DAYS, + confirm_period: 30 * MINUTES, + min_enactment_period: 1 * MINUTES, + min_approval: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(50), + ceil: Perbill::from_percent(100), + }, + min_support: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(0), + ceil: Perbill::from_percent(50), + }, + }, + ), + ( + 8u16, + pallet_referenda::TrackInfo { + name: "senior masters", + max_deciding: 10, + decision_deposit: 1 * 3 * CENTS, + prepare_period: 30 * MINUTES, + decision_period: 7 * DAYS, + confirm_period: 30 * MINUTES, + min_enactment_period: 1 * MINUTES, + min_approval: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(50), + ceil: Perbill::from_percent(100), + }, + min_support: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(0), + ceil: Perbill::from_percent(50), + }, + }, + ), + ( + 9u16, + pallet_referenda::TrackInfo { + name: "grand masters", + max_deciding: 10, + decision_deposit: 1 * 3 * CENTS, + prepare_period: 30 * MINUTES, + decision_period: 7 * DAYS, + confirm_period: 30 * MINUTES, + min_enactment_period: 1 * MINUTES, + min_approval: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(50), + ceil: Perbill::from_percent(100), + }, + min_support: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(0), + ceil: Perbill::from_percent(50), + }, + }, + ), + ]; + &DATA[..] + } + fn track_for(id: &Self::RuntimeOrigin) -> Result { + use super::origins::Origin; + + #[cfg(feature = "runtime-benchmarks")] + { + // For benchmarks, we enable a root origin. + // It is important that this is not available in production! + let root: Self::RuntimeOrigin = frame_system::RawOrigin::Root.into(); + if &root == id { + return Ok(9) + } + } + + match Origin::try_from(id.clone()) { + Ok(Origin::FellowshipInitiates) => Ok(0), + Ok(Origin::Fellowship1Dan) => Ok(1), + Ok(Origin::Fellowship2Dan) => Ok(2), + Ok(Origin::Fellowship3Dan) | Ok(Origin::Fellows) => Ok(3), + Ok(Origin::Fellowship4Dan) => Ok(4), + Ok(Origin::Fellowship5Dan) | Ok(Origin::FellowshipExperts) => Ok(5), + Ok(Origin::Fellowship6Dan) => Ok(6), + Ok(Origin::Fellowship7Dan | Origin::FellowshipMasters) => Ok(7), + Ok(Origin::Fellowship8Dan) => Ok(8), + Ok(Origin::Fellowship9Dan) => Ok(9), + _ => Err(()), + } + } +} +pallet_referenda::impl_tracksinfo_get!(TracksInfo, Balance, BlockNumber); + +pub type FellowshipReferendaInstance = pallet_referenda::Instance2; + +impl pallet_referenda::Config for Runtime { + type WeightInfo = weights::pallet_referenda_fellowship_referenda::WeightInfo; + type RuntimeCall = RuntimeCall; + type RuntimeEvent = RuntimeEvent; + type Scheduler = Scheduler; + type Currency = Balances; + type SubmitOrigin = + pallet_ranked_collective::EnsureMember; + type CancelOrigin = FellowshipExperts; + type KillOrigin = FellowshipMasters; + type Slash = Treasury; + type Votes = pallet_ranked_collective::Votes; + type Tally = pallet_ranked_collective::TallyOf; + type SubmissionDeposit = SubmissionDeposit; + type MaxQueued = ConstU32<100>; + type UndecidingTimeout = UndecidingTimeout; + type AlarmInterval = AlarmInterval; + type Tracks = TracksInfo; + type Preimages = Preimage; +} + +pub type FellowshipCollectiveInstance = pallet_ranked_collective::Instance1; + +impl pallet_ranked_collective::Config for Runtime { + type WeightInfo = weights::pallet_ranked_collective::WeightInfo; + type RuntimeEvent = RuntimeEvent; + // Promotion is by any of: + // - Root can demote arbitrarily. + // - the FellowshipAdmin origin (i.e. token holder referendum); + // - a vote by the rank *above* the new rank. + type PromoteOrigin = EitherOf< + frame_system::EnsureRootWithSuccess>, + EitherOf< + MapSuccess>>, + TryMapSuccess>>, + >, + >; + // Demotion is by any of: + // - Root can demote arbitrarily. + // - the FellowshipAdmin origin (i.e. token holder referendum); + // - a vote by the rank two above the current rank. + type DemoteOrigin = EitherOf< + frame_system::EnsureRootWithSuccess>, + EitherOf< + MapSuccess>>, + TryMapSuccess>>, + >, + >; + type Polls = FellowshipReferenda; + type MinRankOfClass = sp_runtime::traits::Identity; + type VoteWeight = pallet_ranked_collective::Geometric; +} diff --git a/polkadot/runtime/rococo/src/governance/mod.rs b/polkadot/runtime/rococo/src/governance/mod.rs new file mode 100644 index 000000000000..ef2adf60753d --- /dev/null +++ b/polkadot/runtime/rococo/src/governance/mod.rs @@ -0,0 +1,93 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! New governance configurations for the Rococo runtime. + +use super::*; +use frame_support::{ + parameter_types, + traits::{ConstU16, EitherOf}, +}; +use frame_system::EnsureRootWithSuccess; + +mod origins; +pub use origins::{ + pallet_custom_origins, AuctionAdmin, Fellows, FellowshipAdmin, FellowshipExperts, + FellowshipInitiates, FellowshipMasters, GeneralAdmin, LeaseAdmin, ReferendumCanceller, + ReferendumKiller, Spender, StakingAdmin, Treasurer, WhitelistedCaller, +}; +mod tracks; +pub use tracks::TracksInfo; +mod fellowship; +pub use fellowship::{FellowshipCollectiveInstance, FellowshipReferendaInstance}; + +parameter_types! { + pub const VoteLockingPeriod: BlockNumber = 7 * DAYS; +} + +impl pallet_conviction_voting::Config for Runtime { + type WeightInfo = weights::pallet_conviction_voting::WeightInfo; + type RuntimeEvent = RuntimeEvent; + type Currency = Balances; + type VoteLockingPeriod = VoteLockingPeriod; + type MaxVotes = ConstU32<512>; + type MaxTurnout = + frame_support::traits::tokens::currency::ActiveIssuanceOf; + type Polls = Referenda; +} + +parameter_types! { + pub const AlarmInterval: BlockNumber = 1; + pub const SubmissionDeposit: Balance = 1 * 3 * CENTS; + pub const UndecidingTimeout: BlockNumber = 14 * DAYS; +} + +parameter_types! { + pub const MaxBalance: Balance = Balance::max_value(); +} +pub type TreasurySpender = EitherOf, Spender>; + +impl origins::pallet_custom_origins::Config for Runtime {} + +impl pallet_whitelist::Config for Runtime { + type WeightInfo = weights::pallet_whitelist::WeightInfo; + type RuntimeCall = RuntimeCall; + type RuntimeEvent = RuntimeEvent; + type WhitelistOrigin = + EitherOf>, Fellows>; + type DispatchWhitelistedOrigin = EitherOf, WhitelistedCaller>; + type Preimages = Preimage; +} + +impl pallet_referenda::Config for Runtime { + type WeightInfo = weights::pallet_referenda_referenda::WeightInfo; + type RuntimeCall = RuntimeCall; + type RuntimeEvent = RuntimeEvent; + type Scheduler = Scheduler; + type Currency = Balances; + type SubmitOrigin = frame_system::EnsureSigned; + type CancelOrigin = EitherOf, ReferendumCanceller>; + type KillOrigin = EitherOf, ReferendumKiller>; + type Slash = Treasury; + type Votes = pallet_conviction_voting::VotesOf; + type Tally = pallet_conviction_voting::TallyOf; + type SubmissionDeposit = SubmissionDeposit; + type MaxQueued = ConstU32<100>; + type UndecidingTimeout = UndecidingTimeout; + type AlarmInterval = AlarmInterval; + type Tracks = TracksInfo; + type Preimages = Preimage; +} diff --git a/polkadot/runtime/rococo/src/governance/origins.rs b/polkadot/runtime/rococo/src/governance/origins.rs new file mode 100644 index 000000000000..e4639f40dc43 --- /dev/null +++ b/polkadot/runtime/rococo/src/governance/origins.rs @@ -0,0 +1,194 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Custom origins for governance interventions. + +pub use pallet_custom_origins::*; + +#[frame_support::pallet] +pub mod pallet_custom_origins { + use crate::{Balance, CENTS, GRAND}; + use frame_support::pallet_prelude::*; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(_); + + #[derive(PartialEq, Eq, Clone, MaxEncodedLen, Encode, Decode, TypeInfo, RuntimeDebug)] + #[pallet::origin] + pub enum Origin { + /// Origin for cancelling slashes. + StakingAdmin, + /// Origin for spending (any amount of) funds. + Treasurer, + /// Origin for managing the composition of the fellowship. + FellowshipAdmin, + /// Origin for managing the registrar. + GeneralAdmin, + /// Origin for starting auctions. + AuctionAdmin, + /// Origin able to force slot leases. + LeaseAdmin, + /// Origin able to cancel referenda. + ReferendumCanceller, + /// Origin able to kill referenda. + ReferendumKiller, + /// Origin able to spend up to 1 KSM from the treasury at once. + SmallTipper, + /// Origin able to spend up to 5 KSM from the treasury at once. + BigTipper, + /// Origin able to spend up to 50 KSM from the treasury at once. + SmallSpender, + /// Origin able to spend up to 500 KSM from the treasury at once. + MediumSpender, + /// Origin able to spend up to 5,000 KSM from the treasury at once. + BigSpender, + /// Origin able to dispatch a whitelisted call. + WhitelistedCaller, + /// Origin commanded by any members of the Polkadot Fellowship (no Dan grade needed). + FellowshipInitiates, + /// Origin commanded by Polkadot Fellows (3rd Dan fellows or greater). + Fellows, + /// Origin commanded by Polkadot Experts (5th Dan fellows or greater). + FellowshipExperts, + /// Origin commanded by Polkadot Masters (7th Dan fellows of greater). + FellowshipMasters, + /// Origin commanded by rank 1 of the Polkadot Fellowship and with a success of 1. + Fellowship1Dan, + /// Origin commanded by rank 2 of the Polkadot Fellowship and with a success of 2. + Fellowship2Dan, + /// Origin commanded by rank 3 of the Polkadot Fellowship and with a success of 3. + Fellowship3Dan, + /// Origin commanded by rank 4 of the Polkadot Fellowship and with a success of 4. + Fellowship4Dan, + /// Origin commanded by rank 5 of the Polkadot Fellowship and with a success of 5. + Fellowship5Dan, + /// Origin commanded by rank 6 of the Polkadot Fellowship and with a success of 6. + Fellowship6Dan, + /// Origin commanded by rank 7 of the Polkadot Fellowship and with a success of 7. + Fellowship7Dan, + /// Origin commanded by rank 8 of the Polkadot Fellowship and with a success of 8. + Fellowship8Dan, + /// Origin commanded by rank 9 of the Polkadot Fellowship and with a success of 9. + Fellowship9Dan, + } + + macro_rules! decl_unit_ensures { + ( $name:ident: $success_type:ty = $success:expr ) => { + pub struct $name; + impl> + From> + EnsureOrigin for $name + { + type Success = $success_type; + fn try_origin(o: O) -> Result { + o.into().and_then(|o| match o { + Origin::$name => Ok($success), + r => Err(O::from(r)), + }) + } + #[cfg(feature = "runtime-benchmarks")] + fn try_successful_origin() -> Result { + Ok(O::from(Origin::$name)) + } + } + }; + ( $name:ident ) => { decl_unit_ensures! { $name : () = () } }; + ( $name:ident: $success_type:ty = $success:expr, $( $rest:tt )* ) => { + decl_unit_ensures! { $name: $success_type = $success } + decl_unit_ensures! { $( $rest )* } + }; + ( $name:ident, $( $rest:tt )* ) => { + decl_unit_ensures! { $name } + decl_unit_ensures! { $( $rest )* } + }; + () => {} + } + decl_unit_ensures!( + StakingAdmin, + Treasurer, + FellowshipAdmin, + GeneralAdmin, + AuctionAdmin, + LeaseAdmin, + ReferendumCanceller, + ReferendumKiller, + WhitelistedCaller, + FellowshipInitiates: u16 = 0, + Fellows: u16 = 3, + FellowshipExperts: u16 = 5, + FellowshipMasters: u16 = 7, + ); + + macro_rules! decl_ensure { + ( + $vis:vis type $name:ident: EnsureOrigin { + $( $item:ident = $success:expr, )* + } + ) => { + $vis struct $name; + impl> + From> + EnsureOrigin for $name + { + type Success = $success_type; + fn try_origin(o: O) -> Result { + o.into().and_then(|o| match o { + $( + Origin::$item => Ok($success), + )* + r => Err(O::from(r)), + }) + } + #[cfg(feature = "runtime-benchmarks")] + fn try_successful_origin() -> Result { + // By convention the more privileged origins go later, so for greatest chance + // of success, we want the last one. + let _result: Result = Err(()); + $( + let _result: Result = Ok(O::from(Origin::$item)); + )* + _result + } + } + } + } + + decl_ensure! { + pub type Spender: EnsureOrigin { + SmallTipper = 250 * 3 * CENTS, + BigTipper = 1 * GRAND, + SmallSpender = 10 * GRAND, + MediumSpender = 100 * GRAND, + BigSpender = 1_000 * GRAND, + Treasurer = 10_000 * GRAND, + } + } + + decl_ensure! { + pub type EnsureFellowship: EnsureOrigin { + Fellowship1Dan = 1, + Fellowship2Dan = 2, + Fellowship3Dan = 3, + Fellowship4Dan = 4, + Fellowship5Dan = 5, + Fellowship6Dan = 6, + Fellowship7Dan = 7, + Fellowship8Dan = 8, + Fellowship9Dan = 9, + } + } +} diff --git a/polkadot/runtime/rococo/src/governance/tracks.rs b/polkadot/runtime/rococo/src/governance/tracks.rs new file mode 100644 index 000000000000..3765569f183e --- /dev/null +++ b/polkadot/runtime/rococo/src/governance/tracks.rs @@ -0,0 +1,320 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Track configurations for governance. + +use super::*; + +const fn percent(x: i32) -> sp_arithmetic::FixedI64 { + sp_arithmetic::FixedI64::from_rational(x as u128, 100) +} +use pallet_referenda::Curve; +const APP_ROOT: Curve = Curve::make_reciprocal(4, 28, percent(80), percent(50), percent(100)); +const SUP_ROOT: Curve = Curve::make_linear(28, 28, percent(0), percent(50)); +const APP_STAKING_ADMIN: Curve = Curve::make_linear(17, 28, percent(50), percent(100)); +const SUP_STAKING_ADMIN: Curve = + Curve::make_reciprocal(12, 28, percent(1), percent(0), percent(50)); +const APP_TREASURER: Curve = Curve::make_reciprocal(4, 28, percent(80), percent(50), percent(100)); +const SUP_TREASURER: Curve = Curve::make_linear(28, 28, percent(0), percent(50)); +const APP_FELLOWSHIP_ADMIN: Curve = Curve::make_linear(17, 28, percent(50), percent(100)); +const SUP_FELLOWSHIP_ADMIN: Curve = + Curve::make_reciprocal(12, 28, percent(1), percent(0), percent(50)); +const APP_GENERAL_ADMIN: Curve = + Curve::make_reciprocal(4, 28, percent(80), percent(50), percent(100)); +const SUP_GENERAL_ADMIN: Curve = + Curve::make_reciprocal(7, 28, percent(10), percent(0), percent(50)); +const APP_AUCTION_ADMIN: Curve = + Curve::make_reciprocal(4, 28, percent(80), percent(50), percent(100)); +const SUP_AUCTION_ADMIN: Curve = + Curve::make_reciprocal(7, 28, percent(10), percent(0), percent(50)); +const APP_LEASE_ADMIN: Curve = Curve::make_linear(17, 28, percent(50), percent(100)); +const SUP_LEASE_ADMIN: Curve = Curve::make_reciprocal(12, 28, percent(1), percent(0), percent(50)); +const APP_REFERENDUM_CANCELLER: Curve = Curve::make_linear(17, 28, percent(50), percent(100)); +const SUP_REFERENDUM_CANCELLER: Curve = + Curve::make_reciprocal(12, 28, percent(1), percent(0), percent(50)); +const APP_REFERENDUM_KILLER: Curve = Curve::make_linear(17, 28, percent(50), percent(100)); +const SUP_REFERENDUM_KILLER: Curve = + Curve::make_reciprocal(12, 28, percent(1), percent(0), percent(50)); +const APP_SMALL_TIPPER: Curve = Curve::make_linear(10, 28, percent(50), percent(100)); +const SUP_SMALL_TIPPER: Curve = Curve::make_reciprocal(1, 28, percent(4), percent(0), percent(50)); +const APP_BIG_TIPPER: Curve = Curve::make_linear(10, 28, percent(50), percent(100)); +const SUP_BIG_TIPPER: Curve = Curve::make_reciprocal(8, 28, percent(1), percent(0), percent(50)); +const APP_SMALL_SPENDER: Curve = Curve::make_linear(17, 28, percent(50), percent(100)); +const SUP_SMALL_SPENDER: Curve = + Curve::make_reciprocal(12, 28, percent(1), percent(0), percent(50)); +const APP_MEDIUM_SPENDER: Curve = Curve::make_linear(23, 28, percent(50), percent(100)); +const SUP_MEDIUM_SPENDER: Curve = + Curve::make_reciprocal(16, 28, percent(1), percent(0), percent(50)); +const APP_BIG_SPENDER: Curve = Curve::make_linear(28, 28, percent(50), percent(100)); +const SUP_BIG_SPENDER: Curve = Curve::make_reciprocal(20, 28, percent(1), percent(0), percent(50)); +const APP_WHITELISTED_CALLER: Curve = + Curve::make_reciprocal(16, 28 * 24, percent(96), percent(50), percent(100)); +const SUP_WHITELISTED_CALLER: Curve = + Curve::make_reciprocal(1, 28, percent(20), percent(5), percent(50)); + +const TRACKS_DATA: [(u16, pallet_referenda::TrackInfo); 15] = [ + ( + 0, + pallet_referenda::TrackInfo { + name: "root", + max_deciding: 1, + decision_deposit: 100 * GRAND, + prepare_period: 8 * MINUTES, + decision_period: 20 * MINUTES, + confirm_period: 12 * MINUTES, + min_enactment_period: 5 * MINUTES, + min_approval: APP_ROOT, + min_support: SUP_ROOT, + }, + ), + ( + 1, + pallet_referenda::TrackInfo { + name: "whitelisted_caller", + max_deciding: 100, + decision_deposit: 10 * GRAND, + prepare_period: 6 * MINUTES, + decision_period: 20 * MINUTES, + confirm_period: 4 * MINUTES, + min_enactment_period: 3 * MINUTES, + min_approval: APP_WHITELISTED_CALLER, + min_support: SUP_WHITELISTED_CALLER, + }, + ), + ( + 10, + pallet_referenda::TrackInfo { + name: "staking_admin", + max_deciding: 10, + decision_deposit: 5 * GRAND, + prepare_period: 8 * MINUTES, + decision_period: 20 * MINUTES, + confirm_period: 8 * MINUTES, + min_enactment_period: 3 * MINUTES, + min_approval: APP_STAKING_ADMIN, + min_support: SUP_STAKING_ADMIN, + }, + ), + ( + 11, + pallet_referenda::TrackInfo { + name: "treasurer", + max_deciding: 10, + decision_deposit: 1 * GRAND, + prepare_period: 8 * MINUTES, + decision_period: 20 * MINUTES, + confirm_period: 8 * MINUTES, + min_enactment_period: 5 * MINUTES, + min_approval: APP_TREASURER, + min_support: SUP_TREASURER, + }, + ), + ( + 12, + pallet_referenda::TrackInfo { + name: "lease_admin", + max_deciding: 10, + decision_deposit: 5 * GRAND, + prepare_period: 8 * MINUTES, + decision_period: 20 * MINUTES, + confirm_period: 8 * MINUTES, + min_enactment_period: 3 * MINUTES, + min_approval: APP_LEASE_ADMIN, + min_support: SUP_LEASE_ADMIN, + }, + ), + ( + 13, + pallet_referenda::TrackInfo { + name: "fellowship_admin", + max_deciding: 10, + decision_deposit: 5 * GRAND, + prepare_period: 8 * MINUTES, + decision_period: 20 * MINUTES, + confirm_period: 8 * MINUTES, + min_enactment_period: 3 * MINUTES, + min_approval: APP_FELLOWSHIP_ADMIN, + min_support: SUP_FELLOWSHIP_ADMIN, + }, + ), + ( + 14, + pallet_referenda::TrackInfo { + name: "general_admin", + max_deciding: 10, + decision_deposit: 5 * GRAND, + prepare_period: 8 * MINUTES, + decision_period: 20 * MINUTES, + confirm_period: 8 * MINUTES, + min_enactment_period: 3 * MINUTES, + min_approval: APP_GENERAL_ADMIN, + min_support: SUP_GENERAL_ADMIN, + }, + ), + ( + 15, + pallet_referenda::TrackInfo { + name: "auction_admin", + max_deciding: 10, + decision_deposit: 5 * GRAND, + prepare_period: 8 * MINUTES, + decision_period: 20 * MINUTES, + confirm_period: 8 * MINUTES, + min_enactment_period: 3 * MINUTES, + min_approval: APP_AUCTION_ADMIN, + min_support: SUP_AUCTION_ADMIN, + }, + ), + ( + 20, + pallet_referenda::TrackInfo { + name: "referendum_canceller", + max_deciding: 1_000, + decision_deposit: 10 * GRAND, + prepare_period: 8 * MINUTES, + decision_period: 14 * MINUTES, + confirm_period: 8 * MINUTES, + min_enactment_period: 3 * MINUTES, + min_approval: APP_REFERENDUM_CANCELLER, + min_support: SUP_REFERENDUM_CANCELLER, + }, + ), + ( + 21, + pallet_referenda::TrackInfo { + name: "referendum_killer", + max_deciding: 1_000, + decision_deposit: 50 * GRAND, + prepare_period: 8 * MINUTES, + decision_period: 20 * MINUTES, + confirm_period: 8 * MINUTES, + min_enactment_period: 3 * MINUTES, + min_approval: APP_REFERENDUM_KILLER, + min_support: SUP_REFERENDUM_KILLER, + }, + ), + ( + 30, + pallet_referenda::TrackInfo { + name: "small_tipper", + max_deciding: 200, + decision_deposit: 1 * 3 * CENTS, + prepare_period: 1 * MINUTES, + decision_period: 14 * MINUTES, + confirm_period: 4 * MINUTES, + min_enactment_period: 1 * MINUTES, + min_approval: APP_SMALL_TIPPER, + min_support: SUP_SMALL_TIPPER, + }, + ), + ( + 31, + pallet_referenda::TrackInfo { + name: "big_tipper", + max_deciding: 100, + decision_deposit: 10 * 3 * CENTS, + prepare_period: 4 * MINUTES, + decision_period: 14 * MINUTES, + confirm_period: 12 * MINUTES, + min_enactment_period: 3 * MINUTES, + min_approval: APP_BIG_TIPPER, + min_support: SUP_BIG_TIPPER, + }, + ), + ( + 32, + pallet_referenda::TrackInfo { + name: "small_spender", + max_deciding: 50, + decision_deposit: 100 * 3 * CENTS, + prepare_period: 10 * MINUTES, + decision_period: 20 * MINUTES, + confirm_period: 10 * MINUTES, + min_enactment_period: 5 * MINUTES, + min_approval: APP_SMALL_SPENDER, + min_support: SUP_SMALL_SPENDER, + }, + ), + ( + 33, + pallet_referenda::TrackInfo { + name: "medium_spender", + max_deciding: 50, + decision_deposit: 200 * 3 * CENTS, + prepare_period: 10 * MINUTES, + decision_period: 20 * MINUTES, + confirm_period: 12 * MINUTES, + min_enactment_period: 5 * MINUTES, + min_approval: APP_MEDIUM_SPENDER, + min_support: SUP_MEDIUM_SPENDER, + }, + ), + ( + 34, + pallet_referenda::TrackInfo { + name: "big_spender", + max_deciding: 50, + decision_deposit: 400 * 3 * CENTS, + prepare_period: 10 * MINUTES, + decision_period: 20 * MINUTES, + confirm_period: 14 * MINUTES, + min_enactment_period: 5 * MINUTES, + min_approval: APP_BIG_SPENDER, + min_support: SUP_BIG_SPENDER, + }, + ), +]; + +pub struct TracksInfo; +impl pallet_referenda::TracksInfo for TracksInfo { + type Id = u16; + type RuntimeOrigin = ::PalletsOrigin; + fn tracks() -> &'static [(Self::Id, pallet_referenda::TrackInfo)] { + &TRACKS_DATA[..] + } + fn track_for(id: &Self::RuntimeOrigin) -> Result { + if let Ok(system_origin) = frame_system::RawOrigin::try_from(id.clone()) { + match system_origin { + frame_system::RawOrigin::Root => Ok(0), + _ => Err(()), + } + } else if let Ok(custom_origin) = origins::Origin::try_from(id.clone()) { + match custom_origin { + origins::Origin::WhitelistedCaller => Ok(1), + // General admin + origins::Origin::StakingAdmin => Ok(10), + origins::Origin::Treasurer => Ok(11), + origins::Origin::LeaseAdmin => Ok(12), + origins::Origin::FellowshipAdmin => Ok(13), + origins::Origin::GeneralAdmin => Ok(14), + origins::Origin::AuctionAdmin => Ok(15), + // Referendum admins + origins::Origin::ReferendumCanceller => Ok(20), + origins::Origin::ReferendumKiller => Ok(21), + // Limited treasury spenders + origins::Origin::SmallTipper => Ok(30), + origins::Origin::BigTipper => Ok(31), + origins::Origin::SmallSpender => Ok(32), + origins::Origin::MediumSpender => Ok(33), + origins::Origin::BigSpender => Ok(34), + _ => Err(()), + } + } else { + Err(()) + } + } +} +pallet_referenda::impl_tracksinfo_get!(TracksInfo, Balance, BlockNumber); diff --git a/polkadot/runtime/rococo/src/lib.rs b/polkadot/runtime/rococo/src/lib.rs index dd05082d2918..4bdcc1237394 100644 --- a/polkadot/runtime/rococo/src/lib.rs +++ b/polkadot/runtime/rococo/src/lib.rs @@ -62,8 +62,8 @@ use frame_support::{ genesis_builder_helper::{build_config, create_default_config}, parameter_types, traits::{ - fungible::HoldConsideration, Contains, EitherOfDiverse, EverythingBut, InstanceFilter, - KeyOwnerProofSystem, LinearStoragePrice, LockIdentifier, PrivilegeCmp, ProcessMessage, + fungible::HoldConsideration, Contains, EitherOf, EitherOfDiverse, EverythingBut, + InstanceFilter, KeyOwnerProofSystem, LinearStoragePrice, PrivilegeCmp, ProcessMessage, ProcessMessageError, StorageMapShim, WithdrawReasons, }, weights::{ConstantMultiplier, WeightMeter}, @@ -88,7 +88,6 @@ use sp_staking::SessionIndex; #[cfg(any(feature = "std", test))] use sp_version::NativeVersion; use sp_version::RuntimeVersion; -use static_assertions::const_assert; use xcm::latest::Junction; pub use frame_system::Call as SystemCall; @@ -103,6 +102,13 @@ mod weights; // XCM configurations. pub mod xcm_config; +// Governance and configurations. +pub mod governance; +use governance::{ + pallet_custom_origins, AuctionAdmin, Fellows, GeneralAdmin, LeaseAdmin, Treasurer, + TreasurySpender, +}; + mod validator_manager; impl_runtime_weights!(rococo_runtime_constants); @@ -117,7 +123,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("rococo"), impl_name: create_runtime_str!("parity-rococo-v2.0"), authoring_version: 0, - spec_version: 9430, + spec_version: 10020, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 22, @@ -186,11 +192,6 @@ parameter_types! { pub const NoPreimagePostponement: Option = Some(10); } -type ScheduleOrigin = EitherOfDiverse< - EnsureRoot, - pallet_collective::EnsureProportionAtLeast, ->; - /// Used the compare the privilege of an origin inside the scheduler. pub struct OriginPrivilegeCmp; @@ -203,11 +204,6 @@ impl PrivilegeCmp for OriginPrivilegeCmp { match (left, right) { // Root is greater than anything. (OriginCaller::system(frame_system::RawOrigin::Root), _) => Some(Ordering::Greater), - // Check which one has more yes votes. - ( - OriginCaller::Council(pallet_collective::RawOrigin::Members(l_yes_votes, l_count)), - OriginCaller::Council(pallet_collective::RawOrigin::Members(r_yes_votes, r_count)), - ) => Some((l_yes_votes * r_count).cmp(&(r_yes_votes * l_count))), // For every other origin we don't care, as they are not used for `ScheduleOrigin`. _ => None, } @@ -220,7 +216,9 @@ impl pallet_scheduler::Config for Runtime { type PalletsOrigin = OriginCaller; type RuntimeCall = RuntimeCall; type MaximumWeight = MaximumSchedulerWeight; - type ScheduleOrigin = ScheduleOrigin; + // The goal of having ScheduleOrigin include AuctionAdmin is to allow the auctions track of + // OpenGov to schedule periodic auctions. + type ScheduleOrigin = EitherOf, AuctionAdmin>; type MaxScheduledPerBlock = MaxScheduledPerBlock; type WeightInfo = weights::pallet_scheduler::WeightInfo; type OriginPrivilegeCmp = OriginPrivilegeCmp; @@ -380,171 +378,6 @@ parameter_types! { pub const BondingDuration: sp_staking::EraIndex = 28; } -parameter_types! { - pub LaunchPeriod: BlockNumber = prod_or_fast!(7 * DAYS, 1, "ROC_LAUNCH_PERIOD"); - pub VotingPeriod: BlockNumber = prod_or_fast!(7 * DAYS, 1 * MINUTES, "ROC_VOTING_PERIOD"); - pub FastTrackVotingPeriod: BlockNumber = prod_or_fast!(3 * HOURS, 1 * MINUTES, "ROC_FAST_TRACK_VOTING_PERIOD"); - pub const MinimumDeposit: Balance = 100 * CENTS; - pub EnactmentPeriod: BlockNumber = prod_or_fast!(8 * DAYS, 1, "ROC_ENACTMENT_PERIOD"); - pub CooloffPeriod: BlockNumber = prod_or_fast!(7 * DAYS, 1 * MINUTES, "ROC_COOLOFF_PERIOD"); - pub const InstantAllowed: bool = true; - pub const MaxVotes: u32 = 100; - pub const MaxProposals: u32 = 100; -} - -impl pallet_democracy::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type Currency = Balances; - type EnactmentPeriod = EnactmentPeriod; - type VoteLockingPeriod = EnactmentPeriod; - type LaunchPeriod = LaunchPeriod; - type VotingPeriod = VotingPeriod; - type MinimumDeposit = MinimumDeposit; - type SubmitOrigin = frame_system::EnsureSigned; - /// A straight majority of the council can decide what their next motion is. - type ExternalOrigin = - pallet_collective::EnsureProportionAtLeast; - /// A majority can have the next scheduled referendum be a straight majority-carries vote. - type ExternalMajorityOrigin = - pallet_collective::EnsureProportionAtLeast; - /// A unanimous council can have the next scheduled referendum be a straight default-carries - /// (NTB) vote. - type ExternalDefaultOrigin = - pallet_collective::EnsureProportionAtLeast; - /// Two thirds of the technical committee can have an `ExternalMajority/ExternalDefault` vote - /// be tabled immediately and with a shorter voting/enactment period. - type FastTrackOrigin = - pallet_collective::EnsureProportionAtLeast; - type InstantOrigin = - pallet_collective::EnsureProportionAtLeast; - type InstantAllowed = InstantAllowed; - type FastTrackVotingPeriod = FastTrackVotingPeriod; - // To cancel a proposal which has been passed, 2/3 of the council must agree to it. - type CancellationOrigin = EitherOfDiverse< - EnsureRoot, - pallet_collective::EnsureProportionAtLeast, - >; - type BlacklistOrigin = EnsureRoot; - // To cancel a proposal before it has been passed, the technical committee must be unanimous or - // Root must agree. - type CancelProposalOrigin = EitherOfDiverse< - EnsureRoot, - pallet_collective::EnsureProportionAtLeast, - >; - // Any single technical committee member may veto a coming council proposal, however they can - // only do it once and it lasts only for the cooloff period. - type VetoOrigin = pallet_collective::EnsureMember; - type CooloffPeriod = CooloffPeriod; - type Slash = Treasury; - type Scheduler = Scheduler; - type PalletsOrigin = OriginCaller; - type MaxVotes = MaxVotes; - type WeightInfo = weights::pallet_democracy::WeightInfo; - type MaxProposals = MaxProposals; - type Preimages = Preimage; - type MaxDeposits = ConstU32<100>; - type MaxBlacklisted = ConstU32<100>; -} - -parameter_types! { - pub CouncilMotionDuration: BlockNumber = prod_or_fast!(3 * DAYS, 2 * MINUTES, "ROC_MOTION_DURATION"); - pub const CouncilMaxProposals: u32 = 100; - pub const CouncilMaxMembers: u32 = 100; - pub MaxProposalWeight: Weight = Perbill::from_percent(50) * BlockWeights::get().max_block; -} - -type CouncilCollective = pallet_collective::Instance1; -impl pallet_collective::Config for Runtime { - type RuntimeOrigin = RuntimeOrigin; - type Proposal = RuntimeCall; - type RuntimeEvent = RuntimeEvent; - type MotionDuration = CouncilMotionDuration; - type MaxProposals = CouncilMaxProposals; - type MaxMembers = CouncilMaxMembers; - type DefaultVote = pallet_collective::PrimeDefaultVote; - type SetMembersOrigin = EnsureRoot; - type WeightInfo = weights::pallet_collective_council::WeightInfo; - type MaxProposalWeight = MaxProposalWeight; -} - -parameter_types! { - pub const CandidacyBond: Balance = 100 * CENTS; - // 1 storage item created, key size is 32 bytes, value size is 16+16. - pub const VotingBondBase: Balance = deposit(1, 64); - // additional data per vote is 32 bytes (account id). - pub const VotingBondFactor: Balance = deposit(0, 32); - /// Daily council elections - pub TermDuration: BlockNumber = prod_or_fast!(24 * HOURS, 2 * MINUTES, "ROC_TERM_DURATION"); - pub const DesiredMembers: u32 = 19; - pub const DesiredRunnersUp: u32 = 19; - pub const MaxVoters: u32 = 10 * 1000; - pub const MaxVotesPerVoter: u32 = 16; - pub const MaxCandidates: u32 = 1000; - pub const PhragmenElectionPalletId: LockIdentifier = *b"phrelect"; -} - -// Make sure that there are no more than MaxMembers members elected via phragmen. -const_assert!(DesiredMembers::get() <= CouncilMaxMembers::get()); - -impl pallet_elections_phragmen::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type Currency = Balances; - type ChangeMembers = Council; - type InitializeMembers = Council; - type CurrencyToVote = runtime_common::CurrencyToVote; - type CandidacyBond = CandidacyBond; - type VotingBondBase = VotingBondBase; - type VotingBondFactor = VotingBondFactor; - type LoserCandidate = Treasury; - type KickedMember = Treasury; - type DesiredMembers = DesiredMembers; - type DesiredRunnersUp = DesiredRunnersUp; - type TermDuration = TermDuration; - type MaxVoters = MaxVoters; - type MaxVotesPerVoter = MaxVotesPerVoter; - type MaxCandidates = MaxCandidates; - type PalletId = PhragmenElectionPalletId; - type WeightInfo = weights::pallet_elections_phragmen::WeightInfo; -} - -parameter_types! { - pub TechnicalMotionDuration: BlockNumber = prod_or_fast!(3 * DAYS, 2 * MINUTES, "ROC_MOTION_DURATION"); - pub const TechnicalMaxProposals: u32 = 100; - pub const TechnicalMaxMembers: u32 = 100; -} - -type TechnicalCollective = pallet_collective::Instance2; -impl pallet_collective::Config for Runtime { - type RuntimeOrigin = RuntimeOrigin; - type Proposal = RuntimeCall; - type RuntimeEvent = RuntimeEvent; - type MotionDuration = TechnicalMotionDuration; - type MaxProposals = TechnicalMaxProposals; - type MaxMembers = TechnicalMaxMembers; - type DefaultVote = pallet_collective::PrimeDefaultVote; - type SetMembersOrigin = EnsureRoot; - type WeightInfo = weights::pallet_collective_technical_committee::WeightInfo; - type MaxProposalWeight = MaxProposalWeight; -} - -type MoreThanHalfCouncil = EitherOfDiverse< - EnsureRoot, - pallet_collective::EnsureProportionMoreThan, ->; - -impl pallet_membership::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type AddOrigin = MoreThanHalfCouncil; - type RemoveOrigin = MoreThanHalfCouncil; - type SwapOrigin = MoreThanHalfCouncil; - type ResetOrigin = MoreThanHalfCouncil; - type PrimeOrigin = MoreThanHalfCouncil; - type MembershipInitialized = TechnicalCommittee; - type MembershipChanged = TechnicalCommittee; - type MaxMembers = TechnicalMaxMembers; - type WeightInfo = weights::pallet_membership::WeightInfo; -} - parameter_types! { pub const ProposalBond: Permill = Permill::from_percent(5); pub const ProposalBondMinimum: Balance = 2000 * CENTS; @@ -563,16 +396,11 @@ parameter_types! { pub const MaxPeerInHeartbeats: u32 = 10_000; } -type ApproveOrigin = EitherOfDiverse< - EnsureRoot, - pallet_collective::EnsureProportionAtLeast, ->; - impl pallet_treasury::Config for Runtime { type PalletId = TreasuryPalletId; type Currency = Balances; - type ApproveOrigin = ApproveOrigin; - type RejectOrigin = MoreThanHalfCouncil; + type ApproveOrigin = EitherOfDiverse, Treasurer>; + type RejectOrigin = EitherOfDiverse, Treasurer>; type RuntimeEvent = RuntimeEvent; type OnSlash = Treasury; type ProposalBond = ProposalBond; @@ -584,7 +412,7 @@ impl pallet_treasury::Config for Runtime { type MaxApprovals = MaxApprovals; type WeightInfo = weights::pallet_treasury::WeightInfo; type SpendFunds = Bounties; - type SpendOrigin = frame_support::traits::NeverEnsureOrigin; + type SpendOrigin = TreasurySpender; } parameter_types! { @@ -625,17 +453,6 @@ impl pallet_child_bounties::Config for Runtime { type WeightInfo = weights::pallet_child_bounties::WeightInfo; } -impl pallet_tips::Config for Runtime { - type MaximumReasonLength = MaximumReasonLength; - type DataDepositPerByte = DataDepositPerByte; - type Tippers = PhragmenElection; - type TipCountdown = TipCountdown; - type TipFindersFee = TipFindersFee; - type TipReportDepositBase = TipReportDepositBase; - type RuntimeEvent = RuntimeEvent; - type WeightInfo = weights::pallet_tips::WeightInfo; -} - impl pallet_offences::Config for Runtime { type RuntimeEvent = RuntimeEvent; type IdentificationTuple = pallet_session::historical::IdentificationTuple; @@ -746,8 +563,7 @@ impl claims::Config for Runtime { type RuntimeEvent = RuntimeEvent; type VestingSchedule = Vesting; type Prefix = Prefix; - type MoveClaimOrigin = - pallet_collective::EnsureProportionMoreThan; + type MoveClaimOrigin = EnsureRoot; type WeightInfo = weights::runtime_common_claims::WeightInfo; } @@ -771,8 +587,8 @@ impl pallet_identity::Config for Runtime { type MaxAdditionalFields = MaxAdditionalFields; type MaxRegistrars = MaxRegistrars; type Slashed = Treasury; - type ForceOrigin = MoreThanHalfCouncil; - type RegistrarOrigin = MoreThanHalfCouncil; + type ForceOrigin = EitherOf, GeneralAdmin>; + type RegistrarOrigin = EitherOf, GeneralAdmin>; type WeightInfo = weights::pallet_identity::WeightInfo; } @@ -913,15 +729,14 @@ impl InstanceFilter for ProxyType { RuntimeCall::Session(..) | RuntimeCall::Grandpa(..) | RuntimeCall::ImOnline(..) | - RuntimeCall::Democracy(..) | - RuntimeCall::Council(..) | - RuntimeCall::TechnicalCommittee(..) | - RuntimeCall::PhragmenElection(..) | - RuntimeCall::TechnicalMembership(..) | RuntimeCall::Treasury(..) | RuntimeCall::Bounties(..) | RuntimeCall::ChildBounties(..) | - RuntimeCall::Tips(..) | + RuntimeCall::ConvictionVoting(..) | + RuntimeCall::Referenda(..) | + RuntimeCall::FellowshipCollective(..) | + RuntimeCall::FellowshipReferenda(..) | + RuntimeCall::Whitelist(..) | RuntimeCall::Claims(..) | RuntimeCall::Utility(..) | RuntimeCall::Identity(..) | @@ -948,17 +763,18 @@ impl InstanceFilter for ProxyType { RuntimeCall::Slots(..) | RuntimeCall::Auctions(..) // Specifically omitting the entire XCM Pallet ), - ProxyType::Governance => - matches!( - c, - RuntimeCall::Democracy(..) | - RuntimeCall::Council(..) | RuntimeCall::TechnicalCommittee(..) | - RuntimeCall::PhragmenElection(..) | - RuntimeCall::Treasury(..) | - RuntimeCall::Bounties(..) | - RuntimeCall::Tips(..) | RuntimeCall::Utility(..) | - RuntimeCall::ChildBounties(..) - ), + ProxyType::Governance => matches!( + c, + RuntimeCall::Bounties(..) | + RuntimeCall::Utility(..) | + RuntimeCall::ChildBounties(..) | + // OpenGov calls + RuntimeCall::ConvictionVoting(..) | + RuntimeCall::Referenda(..) | + RuntimeCall::FellowshipCollective(..) | + RuntimeCall::FellowshipReferenda(..) | + RuntimeCall::Whitelist(..) + ), ProxyType::IdentityJudgement => matches!( c, RuntimeCall::Identity(pallet_identity::Call::provide_judgement { .. }) | @@ -1184,7 +1000,7 @@ impl slots::Config for Runtime { type Registrar = Registrar; type LeasePeriod = LeasePeriod; type LeaseOffset = (); - type ForceOrigin = MoreThanHalfCouncil; + type ForceOrigin = EitherOf, LeaseAdmin>; type WeightInfo = weights::runtime_common_slots::WeightInfo; } @@ -1217,11 +1033,6 @@ parameter_types! { pub const SampleLength: BlockNumber = 2 * MINUTES; } -type AuctionInitiate = EitherOfDiverse< - EnsureRoot, - pallet_collective::EnsureProportionAtLeast, ->; - impl auctions::Config for Runtime { type RuntimeEvent = RuntimeEvent; type Leaser = Slots; @@ -1229,7 +1040,7 @@ impl auctions::Config for Runtime { type EndingPeriod = EndingPeriod; type SampleLength = SampleLength; type Randomness = pallet_babe::RandomnessFromOneEpochAgo; - type InitiateOrigin = AuctionInitiate; + type InitiateOrigin = EitherOf, AuctionAdmin>; type WeightInfo = weights::runtime_common_auctions::WeightInfo; } @@ -1425,13 +1236,19 @@ construct_runtime! { AuthorityDiscovery: pallet_authority_discovery::{Pallet, Config} = 12, // Governance stuff; uncallable initially. - Democracy: pallet_democracy::{Pallet, Call, Storage, Config, Event} = 13, - Council: pallet_collective::::{Pallet, Call, Storage, Origin, Event, Config} = 14, - TechnicalCommittee: pallet_collective::::{Pallet, Call, Storage, Origin, Event, Config} = 15, - PhragmenElection: pallet_elections_phragmen::{Pallet, Call, Storage, Event, Config} = 16, - TechnicalMembership: pallet_membership::::{Pallet, Call, Storage, Event, Config} = 17, Treasury: pallet_treasury::{Pallet, Call, Storage, Config, Event} = 18, - + ConvictionVoting: pallet_conviction_voting::{Pallet, Call, Storage, Event} = 20, + Referenda: pallet_referenda::{Pallet, Call, Storage, Event} = 21, + // pub type FellowshipCollectiveInstance = pallet_ranked_collective::Instance1; + FellowshipCollective: pallet_ranked_collective::::{ + Pallet, Call, Storage, Event + } = 22, + // pub type FellowshipReferendaInstance = pallet_referenda::Instance2; + FellowshipReferenda: pallet_referenda::::{ + Pallet, Call, Storage, Event + } = 23, + Origins: pallet_custom_origins::{Origin} = 43, + Whitelist: pallet_whitelist::{Pallet, Call, Storage, Event} = 44, // Claims. Usable initially. Claims: claims::{Pallet, Call, Storage, Event, Config, ValidateUnsigned} = 19, @@ -1466,9 +1283,6 @@ construct_runtime! { Bounties: pallet_bounties::{Pallet, Call, Storage, Event} = 35, ChildBounties: pallet_child_bounties = 40, - // Tips module. - Tips: pallet_tips::{Pallet, Call, Storage, Event} = 36, - // NIS pallet. Nis: pallet_nis::{Pallet, Call, Storage, Event, HoldReason} = 38, // pub type NisCounterpartInstance = pallet_balances::Instance2; @@ -1563,6 +1377,8 @@ pub mod migrations { parachains_configuration::migration::v8::MigrateToV8, parachains_configuration::migration::v9::MigrateToV9, paras_registrar::migration::VersionCheckedMigrateToV1, + pallet_referenda::migration::v1::MigrateV0ToV1, + pallet_referenda::migration::v1::MigrateV0ToV1, ); } @@ -1629,28 +1445,27 @@ mod benches { [frame_benchmarking::baseline, Baseline::] [pallet_bounties, Bounties] [pallet_child_bounties, ChildBounties] - [pallet_collective, Council] - [pallet_collective, TechnicalCommittee] - [pallet_democracy, Democracy] - [pallet_elections_phragmen, PhragmenElection] + [pallet_conviction_voting, ConvictionVoting] [pallet_nis, Nis] [pallet_identity, Identity] [pallet_im_online, ImOnline] [pallet_indices, Indices] - [pallet_membership, TechnicalMembership] [pallet_message_queue, MessageQueue] [pallet_multisig, Multisig] [pallet_preimage, Preimage] [pallet_proxy, Proxy] + [pallet_ranked_collective, FellowshipCollective] [pallet_recovery, Recovery] + [pallet_referenda, Referenda] + [pallet_referenda, FellowshipReferenda] [pallet_scheduler, Scheduler] [pallet_sudo, Sudo] [frame_system, SystemBench::] [pallet_timestamp, Timestamp] - [pallet_tips, Tips] [pallet_treasury, Treasury] [pallet_utility, Utility] [pallet_vesting, Vesting] + [pallet_whitelist, Whitelist] // XCM [pallet_xcm, XcmPallet] [pallet_xcm_benchmarks::fungible, pallet_xcm_benchmarks::fungible::Pallet::] @@ -2143,7 +1958,7 @@ sp_api::impl_runtime_apis! { use sp_storage::TrackedStorageKey; use xcm::latest::prelude::*; use xcm_config::{ - LocalCheckAccount, LocationConverter, Rockmine, TokenLocation, XcmConfig, + LocalCheckAccount, LocationConverter, AssetHub, TokenLocation, XcmConfig, }; impl frame_system_benchmarking::Config for Runtime {} @@ -2152,7 +1967,7 @@ sp_api::impl_runtime_apis! { type XcmConfig = XcmConfig; type AccountIdConverter = LocationConverter; fn valid_destination() -> Result { - Ok(Rockmine::get()) + Ok(AssetHub::get()) } fn worst_case_holding(_depositable_count: u32) -> MultiAssets { // Rococo only knows about ROC @@ -2165,7 +1980,7 @@ sp_api::impl_runtime_apis! { parameter_types! { pub const TrustedTeleporter: Option<(MultiLocation, MultiAsset)> = Some(( - Rockmine::get(), + AssetHub::get(), MultiAsset { fun: Fungible(1 * UNITS), id: Concrete(TokenLocation::get()) }, )); pub const TrustedReserve: Option<(MultiLocation, MultiAsset)> = None; @@ -2204,15 +2019,15 @@ sp_api::impl_runtime_apis! { } fn transact_origin_and_runtime_call() -> Result<(MultiLocation, RuntimeCall), BenchmarkError> { - Ok((Rockmine::get(), frame_system::Call::remark_with_event { remark: vec![] }.into())) + Ok((AssetHub::get(), frame_system::Call::remark_with_event { remark: vec![] }.into())) } fn subscribe_origin() -> Result { - Ok(Rockmine::get()) + Ok(AssetHub::get()) } fn claimable_asset() -> Result<(MultiLocation, MultiLocation, MultiAssets), BenchmarkError> { - let origin = Rockmine::get(); + let origin = AssetHub::get(); let assets: MultiAssets = (Concrete(TokenLocation::get()), 1_000 * UNITS).into(); let ticket = MultiLocation { parents: 0, interior: Here }; Ok((origin, ticket, assets)) diff --git a/polkadot/runtime/rococo/src/weights/mod.rs b/polkadot/runtime/rococo/src/weights/mod.rs index 21558ca3fb90..e0c1c4f41351 100644 --- a/polkadot/runtime/rococo/src/weights/mod.rs +++ b/polkadot/runtime/rococo/src/weights/mod.rs @@ -20,27 +20,26 @@ pub mod pallet_balances; pub mod pallet_balances_nis_counterpart_balances; pub mod pallet_bounties; pub mod pallet_child_bounties; -pub mod pallet_collective_council; -pub mod pallet_collective_technical_committee; -pub mod pallet_democracy; -pub mod pallet_elections_phragmen; +pub mod pallet_conviction_voting; pub mod pallet_identity; pub mod pallet_im_online; pub mod pallet_indices; -pub mod pallet_membership; pub mod pallet_message_queue; pub mod pallet_multisig; pub mod pallet_nis; pub mod pallet_preimage; pub mod pallet_proxy; +pub mod pallet_ranked_collective; +pub mod pallet_referenda_fellowship_referenda; +pub mod pallet_referenda_referenda; pub mod pallet_scheduler; pub mod pallet_session; pub mod pallet_sudo; pub mod pallet_timestamp; -pub mod pallet_tips; pub mod pallet_treasury; pub mod pallet_utility; pub mod pallet_vesting; +pub mod pallet_whitelist; pub mod pallet_xcm; pub mod runtime_common_assigned_slots; pub mod runtime_common_auctions; diff --git a/polkadot/runtime/rococo/src/weights/pallet_collective.rs b/polkadot/runtime/rococo/src/weights/pallet_collective.rs deleted file mode 100644 index 9bf6671e2297..000000000000 --- a/polkadot/runtime/rococo/src/weights/pallet_collective.rs +++ /dev/null @@ -1,196 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Polkadot. - -// Polkadot is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Polkadot is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Polkadot. If not, see . -//! Autogenerated weights for `pallet_collective` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2022-09-08, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! HOSTNAME: `bm5`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("rococo-dev"), DB CACHE: 1024 - -// Executed Command: -// ./target/production/polkadot -// benchmark -// pallet -// --chain=rococo-dev -// --steps=50 -// --repeat=20 -// --pallet=pallet_collective -// --extrinsic=* -// --execution=wasm -// --wasm-execution=compiled -// --header=./file_header.txt -// --output=./runtime/rococo/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] - -use frame_support::{traits::Get, weights::{Weight}}; -use sp_std::marker::PhantomData; - -/// Weight functions for `pallet_collective`. -pub struct WeightInfo(PhantomData); -impl pallet_collective::WeightInfo for WeightInfo { - // Storage: Collective Members (r:1 w:1) - // Storage: Collective Proposals (r:1 w:0) - // Storage: Collective Voting (r:100 w:100) - // Storage: Collective Prime (r:0 w:1) - /// The range of component `m` is `[1, 100]`. - /// The range of component `n` is `[1, 100]`. - /// The range of component `p` is `[1, 100]`. - fn set_members(m: u32, _n: u32, p: u32, ) -> Weight { - Weight::from_parts(0 as u64, 0) - // Standard Error: 15_000 - .saturating_add(Weight::from_parts(10_832_000 as u64, 0).saturating_mul(m as u64)) - // Standard Error: 15_000 - .saturating_add(Weight::from_parts(12_894_000 as u64, 0).saturating_mul(p as u64)) - .saturating_add(T::DbWeight::get().reads(2 as u64)) - .saturating_add(T::DbWeight::get().reads((1 as u64).saturating_mul(p as u64))) - .saturating_add(T::DbWeight::get().writes(2 as u64)) - .saturating_add(T::DbWeight::get().writes((1 as u64).saturating_mul(p as u64))) - } - // Storage: Collective Members (r:1 w:0) - /// The range of component `b` is `[1, 1024]`. - /// The range of component `m` is `[1, 100]`. - fn execute(b: u32, m: u32, ) -> Weight { - Weight::from_parts(19_069_000 as u64, 0) - // Standard Error: 0 - .saturating_add(Weight::from_parts(2_000 as u64, 0).saturating_mul(b as u64)) - // Standard Error: 0 - .saturating_add(Weight::from_parts(13_000 as u64, 0).saturating_mul(m as u64)) - .saturating_add(T::DbWeight::get().reads(1 as u64)) - } - // Storage: Collective Members (r:1 w:0) - // Storage: Collective ProposalOf (r:1 w:0) - /// The range of component `b` is `[1, 1024]`. - /// The range of component `m` is `[1, 100]`. - fn propose_execute(b: u32, m: u32, ) -> Weight { - Weight::from_parts(20_794_000 as u64, 0) - // Standard Error: 0 - .saturating_add(Weight::from_parts(2_000 as u64, 0).saturating_mul(b as u64)) - // Standard Error: 0 - .saturating_add(Weight::from_parts(22_000 as u64, 0).saturating_mul(m as u64)) - .saturating_add(T::DbWeight::get().reads(2 as u64)) - } - // Storage: Collective Members (r:1 w:0) - // Storage: Collective ProposalOf (r:1 w:1) - // Storage: Collective Proposals (r:1 w:1) - // Storage: Collective ProposalCount (r:1 w:1) - // Storage: Collective Voting (r:0 w:1) - /// The range of component `b` is `[1, 1024]`. - /// The range of component `m` is `[2, 100]`. - /// The range of component `p` is `[1, 100]`. - fn propose_proposed(b: u32, m: u32, p: u32, ) -> Weight { - Weight::from_parts(27_870_000 as u64, 0) - // Standard Error: 0 - .saturating_add(Weight::from_parts(3_000 as u64, 0).saturating_mul(b as u64)) - // Standard Error: 1_000 - .saturating_add(Weight::from_parts(22_000 as u64, 0).saturating_mul(m as u64)) - // Standard Error: 1_000 - .saturating_add(Weight::from_parts(94_000 as u64, 0).saturating_mul(p as u64)) - .saturating_add(T::DbWeight::get().reads(4 as u64)) - .saturating_add(T::DbWeight::get().writes(4 as u64)) - } - // Storage: Collective Members (r:1 w:0) - // Storage: Collective Voting (r:1 w:1) - /// The range of component `m` is `[5, 100]`. - fn vote(m: u32, ) -> Weight { - Weight::from_parts(27_249_000 as u64, 0) - // Standard Error: 0 - .saturating_add(Weight::from_parts(35_000 as u64, 0).saturating_mul(m as u64)) - .saturating_add(T::DbWeight::get().reads(2 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) - } - // Storage: Collective Voting (r:1 w:1) - // Storage: Collective Members (r:1 w:0) - // Storage: Collective Proposals (r:1 w:1) - // Storage: Collective ProposalOf (r:0 w:1) - /// The range of component `m` is `[4, 100]`. - /// The range of component `p` is `[1, 100]`. - fn close_early_disapproved(m: u32, p: u32, ) -> Weight { - Weight::from_parts(30_754_000 as u64, 0) - // Standard Error: 0 - .saturating_add(Weight::from_parts(28_000 as u64, 0).saturating_mul(m as u64)) - // Standard Error: 0 - .saturating_add(Weight::from_parts(81_000 as u64, 0).saturating_mul(p as u64)) - .saturating_add(T::DbWeight::get().reads(3 as u64)) - .saturating_add(T::DbWeight::get().writes(3 as u64)) - } - // Storage: Collective Voting (r:1 w:1) - // Storage: Collective Members (r:1 w:0) - // Storage: Collective ProposalOf (r:1 w:1) - // Storage: Collective Proposals (r:1 w:1) - /// The range of component `b` is `[1, 1024]`. - /// The range of component `m` is `[4, 100]`. - /// The range of component `p` is `[1, 100]`. - fn close_early_approved(b: u32, m: u32, p: u32, ) -> Weight { - Weight::from_parts(39_508_000 as u64, 0) - // Standard Error: 0 - .saturating_add(Weight::from_parts(1_000 as u64, 0).saturating_mul(b as u64)) - // Standard Error: 0 - .saturating_add(Weight::from_parts(29_000 as u64, 0).saturating_mul(m as u64)) - // Standard Error: 0 - .saturating_add(Weight::from_parts(90_000 as u64, 0).saturating_mul(p as u64)) - .saturating_add(T::DbWeight::get().reads(4 as u64)) - .saturating_add(T::DbWeight::get().writes(3 as u64)) - } - // Storage: Collective Voting (r:1 w:1) - // Storage: Collective Members (r:1 w:0) - // Storage: Collective Prime (r:1 w:0) - // Storage: Collective Proposals (r:1 w:1) - // Storage: Collective ProposalOf (r:0 w:1) - /// The range of component `m` is `[4, 100]`. - /// The range of component `p` is `[1, 100]`. - fn close_disapproved(m: u32, p: u32, ) -> Weight { - Weight::from_parts(32_769_000 as u64, 0) - // Standard Error: 0 - .saturating_add(Weight::from_parts(31_000 as u64, 0).saturating_mul(m as u64)) - // Standard Error: 0 - .saturating_add(Weight::from_parts(83_000 as u64, 0).saturating_mul(p as u64)) - .saturating_add(T::DbWeight::get().reads(4 as u64)) - .saturating_add(T::DbWeight::get().writes(3 as u64)) - } - // Storage: Collective Voting (r:1 w:1) - // Storage: Collective Members (r:1 w:0) - // Storage: Collective Prime (r:1 w:0) - // Storage: Collective ProposalOf (r:1 w:1) - // Storage: Collective Proposals (r:1 w:1) - /// The range of component `b` is `[1, 1024]`. - /// The range of component `m` is `[4, 100]`. - /// The range of component `p` is `[1, 100]`. - fn close_approved(b: u32, m: u32, p: u32, ) -> Weight { - Weight::from_parts(41_704_000 as u64, 0) - // Standard Error: 0 - .saturating_add(Weight::from_parts(1_000 as u64, 0).saturating_mul(b as u64)) - // Standard Error: 0 - .saturating_add(Weight::from_parts(28_000 as u64, 0).saturating_mul(m as u64)) - // Standard Error: 0 - .saturating_add(Weight::from_parts(92_000 as u64, 0).saturating_mul(p as u64)) - .saturating_add(T::DbWeight::get().reads(5 as u64)) - .saturating_add(T::DbWeight::get().writes(3 as u64)) - } - // Storage: Collective Proposals (r:1 w:1) - // Storage: Collective Voting (r:0 w:1) - // Storage: Collective ProposalOf (r:0 w:1) - /// The range of component `p` is `[1, 100]`. - fn disapprove_proposal(p: u32, ) -> Weight { - Weight::from_parts(22_720_000 as u64, 0) - // Standard Error: 2_000 - .saturating_add(Weight::from_parts(74_000 as u64, 0).saturating_mul(p as u64)) - .saturating_add(T::DbWeight::get().reads(1 as u64)) - .saturating_add(T::DbWeight::get().writes(3 as u64)) - } -} diff --git a/polkadot/runtime/rococo/src/weights/pallet_collective_council.rs b/polkadot/runtime/rococo/src/weights/pallet_collective_council.rs deleted file mode 100644 index 835bdef7e673..000000000000 --- a/polkadot/runtime/rococo/src/weights/pallet_collective_council.rs +++ /dev/null @@ -1,322 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Polkadot. - -// Polkadot is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Polkadot is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Polkadot. If not, see . - -//! Autogenerated weights for `pallet_collective` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-05-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm5`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("rococo-dev"), DB CACHE: 1024 - -// Executed Command: -// ./target/production/polkadot -// benchmark -// pallet -// --chain=rococo-dev -// --steps=50 -// --repeat=20 -// --pallet=pallet_collective -// --extrinsic=* -// --execution=wasm -// --wasm-execution=compiled -// --header=./file_header.txt -// --output=./runtime/rococo/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `pallet_collective`. -pub struct WeightInfo(PhantomData); -impl pallet_collective::WeightInfo for WeightInfo { - /// Storage: Council Members (r:1 w:1) - /// Proof Skipped: Council Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council Proposals (r:1 w:0) - /// Proof Skipped: Council Proposals (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council Voting (r:100 w:100) - /// Proof Skipped: Council Voting (max_values: None, max_size: None, mode: Measured) - /// Storage: Council Prime (r:0 w:1) - /// Proof Skipped: Council Prime (max_values: Some(1), max_size: None, mode: Measured) - /// The range of component `m` is `[0, 100]`. - /// The range of component `n` is `[0, 100]`. - /// The range of component `p` is `[0, 100]`. - /// The range of component `m` is `[0, 100]`. - /// The range of component `n` is `[0, 100]`. - /// The range of component `p` is `[0, 100]`. - fn set_members(m: u32, _n: u32, p: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0 + m * (3232 ±0) + p * (3190 ±0)` - // Estimated: `15795 + m * (1967 ±16) + p * (4332 ±16)` - // Minimum execution time: 17_182_000 picoseconds. - Weight::from_parts(17_462_000, 0) - .saturating_add(Weight::from_parts(0, 15795)) - // Standard Error: 42_032 - .saturating_add(Weight::from_parts(4_868_618, 0).saturating_mul(m.into())) - // Standard Error: 42_032 - .saturating_add(Weight::from_parts(7_289_594, 0).saturating_mul(p.into())) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(p.into()))) - .saturating_add(T::DbWeight::get().writes(2)) - .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(p.into()))) - .saturating_add(Weight::from_parts(0, 1967).saturating_mul(m.into())) - .saturating_add(Weight::from_parts(0, 4332).saturating_mul(p.into())) - } - /// Storage: Council Members (r:1 w:0) - /// Proof Skipped: Council Members (max_values: Some(1), max_size: None, mode: Measured) - /// The range of component `b` is `[2, 1024]`. - /// The range of component `m` is `[1, 100]`. - /// The range of component `b` is `[2, 1024]`. - /// The range of component `m` is `[1, 100]`. - fn execute(b: u32, m: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `136 + m * (32 ±0)` - // Estimated: `1622 + m * (32 ±0)` - // Minimum execution time: 16_507_000 picoseconds. - Weight::from_parts(16_066_632, 0) - .saturating_add(Weight::from_parts(0, 1622)) - // Standard Error: 21 - .saturating_add(Weight::from_parts(982, 0).saturating_mul(b.into())) - // Standard Error: 220 - .saturating_add(Weight::from_parts(14_026, 0).saturating_mul(m.into())) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(Weight::from_parts(0, 32).saturating_mul(m.into())) - } - /// Storage: Council Members (r:1 w:0) - /// Proof Skipped: Council Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council ProposalOf (r:1 w:0) - /// Proof Skipped: Council ProposalOf (max_values: None, max_size: None, mode: Measured) - /// The range of component `b` is `[2, 1024]`. - /// The range of component `m` is `[1, 100]`. - /// The range of component `b` is `[2, 1024]`. - /// The range of component `m` is `[1, 100]`. - fn propose_execute(b: u32, m: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `136 + m * (32 ±0)` - // Estimated: `3602 + m * (32 ±0)` - // Minimum execution time: 18_990_000 picoseconds. - Weight::from_parts(18_411_713, 0) - .saturating_add(Weight::from_parts(0, 3602)) - // Standard Error: 15 - .saturating_add(Weight::from_parts(1_166, 0).saturating_mul(b.into())) - // Standard Error: 164 - .saturating_add(Weight::from_parts(23_067, 0).saturating_mul(m.into())) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(Weight::from_parts(0, 32).saturating_mul(m.into())) - } - /// Storage: Council Members (r:1 w:0) - /// Proof Skipped: Council Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council ProposalOf (r:1 w:1) - /// Proof Skipped: Council ProposalOf (max_values: None, max_size: None, mode: Measured) - /// Storage: Council Proposals (r:1 w:1) - /// Proof Skipped: Council Proposals (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council ProposalCount (r:1 w:1) - /// Proof Skipped: Council ProposalCount (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council Voting (r:0 w:1) - /// Proof Skipped: Council Voting (max_values: None, max_size: None, mode: Measured) - /// The range of component `b` is `[2, 1024]`. - /// The range of component `m` is `[2, 100]`. - /// The range of component `p` is `[1, 100]`. - /// The range of component `b` is `[2, 1024]`. - /// The range of component `m` is `[2, 100]`. - /// The range of component `p` is `[1, 100]`. - fn propose_proposed(b: u32, m: u32, p: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `426 + m * (32 ±0) + p * (36 ±0)` - // Estimated: `3818 + m * (33 ±0) + p * (36 ±0)` - // Minimum execution time: 25_500_000 picoseconds. - Weight::from_parts(26_304_307, 0) - .saturating_add(Weight::from_parts(0, 3818)) - // Standard Error: 49 - .saturating_add(Weight::from_parts(2_243, 0).saturating_mul(b.into())) - // Standard Error: 515 - .saturating_add(Weight::from_parts(18_905, 0).saturating_mul(m.into())) - // Standard Error: 508 - .saturating_add(Weight::from_parts(120_761, 0).saturating_mul(p.into())) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(4)) - .saturating_add(Weight::from_parts(0, 33).saturating_mul(m.into())) - .saturating_add(Weight::from_parts(0, 36).saturating_mul(p.into())) - } - /// Storage: Council Members (r:1 w:0) - /// Proof Skipped: Council Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council Voting (r:1 w:1) - /// Proof Skipped: Council Voting (max_values: None, max_size: None, mode: Measured) - /// The range of component `m` is `[5, 100]`. - /// The range of component `m` is `[5, 100]`. - fn vote(m: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `875 + m * (64 ±0)` - // Estimated: `4339 + m * (64 ±0)` - // Minimum execution time: 22_166_000 picoseconds. - Weight::from_parts(22_901_859, 0) - .saturating_add(Weight::from_parts(0, 4339)) - // Standard Error: 238 - .saturating_add(Weight::from_parts(40_475, 0).saturating_mul(m.into())) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - .saturating_add(Weight::from_parts(0, 64).saturating_mul(m.into())) - } - /// Storage: Council Voting (r:1 w:1) - /// Proof Skipped: Council Voting (max_values: None, max_size: None, mode: Measured) - /// Storage: Council Members (r:1 w:0) - /// Proof Skipped: Council Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council Proposals (r:1 w:1) - /// Proof Skipped: Council Proposals (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council ProposalOf (r:0 w:1) - /// Proof Skipped: Council ProposalOf (max_values: None, max_size: None, mode: Measured) - /// The range of component `m` is `[4, 100]`. - /// The range of component `p` is `[1, 100]`. - /// The range of component `m` is `[4, 100]`. - /// The range of component `p` is `[1, 100]`. - fn close_early_disapproved(m: u32, p: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `464 + m * (64 ±0) + p * (36 ±0)` - // Estimated: `3909 + m * (65 ±0) + p * (36 ±0)` - // Minimum execution time: 27_064_000 picoseconds. - Weight::from_parts(27_961_599, 0) - .saturating_add(Weight::from_parts(0, 3909)) - // Standard Error: 401 - .saturating_add(Weight::from_parts(22_196, 0).saturating_mul(m.into())) - // Standard Error: 391 - .saturating_add(Weight::from_parts(115_698, 0).saturating_mul(p.into())) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(3)) - .saturating_add(Weight::from_parts(0, 65).saturating_mul(m.into())) - .saturating_add(Weight::from_parts(0, 36).saturating_mul(p.into())) - } - /// Storage: Council Voting (r:1 w:1) - /// Proof Skipped: Council Voting (max_values: None, max_size: None, mode: Measured) - /// Storage: Council Members (r:1 w:0) - /// Proof Skipped: Council Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council ProposalOf (r:1 w:1) - /// Proof Skipped: Council ProposalOf (max_values: None, max_size: None, mode: Measured) - /// Storage: Council Proposals (r:1 w:1) - /// Proof Skipped: Council Proposals (max_values: Some(1), max_size: None, mode: Measured) - /// The range of component `b` is `[2, 1024]`. - /// The range of component `m` is `[4, 100]`. - /// The range of component `p` is `[1, 100]`. - /// The range of component `b` is `[2, 1024]`. - /// The range of component `m` is `[4, 100]`. - /// The range of component `p` is `[1, 100]`. - fn close_early_approved(b: u32, m: u32, p: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `766 + b * (1 ±0) + m * (64 ±0) + p * (40 ±0)` - // Estimated: `4083 + b * (1 ±0) + m * (66 ±0) + p * (40 ±0)` - // Minimum execution time: 38_302_000 picoseconds. - Weight::from_parts(40_639_640, 0) - .saturating_add(Weight::from_parts(0, 4083)) - // Standard Error: 123 - .saturating_add(Weight::from_parts(1_914, 0).saturating_mul(b.into())) - // Standard Error: 1_272 - .saturating_add(Weight::from_parts(150_067, 0).saturating_mul(p.into())) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) - .saturating_add(Weight::from_parts(0, 1).saturating_mul(b.into())) - .saturating_add(Weight::from_parts(0, 66).saturating_mul(m.into())) - .saturating_add(Weight::from_parts(0, 40).saturating_mul(p.into())) - } - /// Storage: Council Voting (r:1 w:1) - /// Proof Skipped: Council Voting (max_values: None, max_size: None, mode: Measured) - /// Storage: Council Members (r:1 w:0) - /// Proof Skipped: Council Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council Prime (r:1 w:0) - /// Proof Skipped: Council Prime (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council Proposals (r:1 w:1) - /// Proof Skipped: Council Proposals (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council ProposalOf (r:0 w:1) - /// Proof Skipped: Council ProposalOf (max_values: None, max_size: None, mode: Measured) - /// The range of component `m` is `[4, 100]`. - /// The range of component `p` is `[1, 100]`. - /// The range of component `m` is `[4, 100]`. - /// The range of component `p` is `[1, 100]`. - fn close_disapproved(m: u32, p: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `484 + m * (64 ±0) + p * (36 ±0)` - // Estimated: `3929 + m * (65 ±0) + p * (36 ±0)` - // Minimum execution time: 30_017_000 picoseconds. - Weight::from_parts(30_565_580, 0) - .saturating_add(Weight::from_parts(0, 3929)) - // Standard Error: 378 - .saturating_add(Weight::from_parts(24_396, 0).saturating_mul(m.into())) - // Standard Error: 369 - .saturating_add(Weight::from_parts(114_807, 0).saturating_mul(p.into())) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) - .saturating_add(Weight::from_parts(0, 65).saturating_mul(m.into())) - .saturating_add(Weight::from_parts(0, 36).saturating_mul(p.into())) - } - /// Storage: Council Voting (r:1 w:1) - /// Proof Skipped: Council Voting (max_values: None, max_size: None, mode: Measured) - /// Storage: Council Members (r:1 w:0) - /// Proof Skipped: Council Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council Prime (r:1 w:0) - /// Proof Skipped: Council Prime (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council ProposalOf (r:1 w:1) - /// Proof Skipped: Council ProposalOf (max_values: None, max_size: None, mode: Measured) - /// Storage: Council Proposals (r:1 w:1) - /// Proof Skipped: Council Proposals (max_values: Some(1), max_size: None, mode: Measured) - /// The range of component `b` is `[2, 1024]`. - /// The range of component `m` is `[4, 100]`. - /// The range of component `p` is `[1, 100]`. - /// The range of component `b` is `[2, 1024]`. - /// The range of component `m` is `[4, 100]`. - /// The range of component `p` is `[1, 100]`. - fn close_approved(b: u32, m: u32, p: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `786 + b * (1 ±0) + m * (64 ±0) + p * (40 ±0)` - // Estimated: `4103 + b * (1 ±0) + m * (66 ±0) + p * (40 ±0)` - // Minimum execution time: 40_911_000 picoseconds. - Weight::from_parts(42_312_485, 0) - .saturating_add(Weight::from_parts(0, 4103)) - // Standard Error: 83 - .saturating_add(Weight::from_parts(2_208, 0).saturating_mul(b.into())) - // Standard Error: 879 - .saturating_add(Weight::from_parts(20_173, 0).saturating_mul(m.into())) - // Standard Error: 857 - .saturating_add(Weight::from_parts(146_302, 0).saturating_mul(p.into())) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(3)) - .saturating_add(Weight::from_parts(0, 1).saturating_mul(b.into())) - .saturating_add(Weight::from_parts(0, 66).saturating_mul(m.into())) - .saturating_add(Weight::from_parts(0, 40).saturating_mul(p.into())) - } - /// Storage: Council Proposals (r:1 w:1) - /// Proof Skipped: Council Proposals (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council Voting (r:0 w:1) - /// Proof Skipped: Council Voting (max_values: None, max_size: None, mode: Measured) - /// Storage: Council ProposalOf (r:0 w:1) - /// Proof Skipped: Council ProposalOf (max_values: None, max_size: None, mode: Measured) - /// The range of component `p` is `[1, 100]`. - /// The range of component `p` is `[1, 100]`. - fn disapprove_proposal(p: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `293 + p * (32 ±0)` - // Estimated: `1778 + p * (32 ±0)` - // Minimum execution time: 15_465_000 picoseconds. - Weight::from_parts(17_387_663, 0) - .saturating_add(Weight::from_parts(0, 1778)) - // Standard Error: 450 - .saturating_add(Weight::from_parts(110_406, 0).saturating_mul(p.into())) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(3)) - .saturating_add(Weight::from_parts(0, 32).saturating_mul(p.into())) - } -} diff --git a/polkadot/runtime/rococo/src/weights/pallet_collective_technical_committee.rs b/polkadot/runtime/rococo/src/weights/pallet_collective_technical_committee.rs deleted file mode 100644 index 6d66dc871cd5..000000000000 --- a/polkadot/runtime/rococo/src/weights/pallet_collective_technical_committee.rs +++ /dev/null @@ -1,324 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Polkadot. - -// Polkadot is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Polkadot is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Polkadot. If not, see . - -//! Autogenerated weights for `pallet_collective` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-05-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm5`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("rococo-dev"), DB CACHE: 1024 - -// Executed Command: -// ./target/production/polkadot -// benchmark -// pallet -// --chain=rococo-dev -// --steps=50 -// --repeat=20 -// --pallet=pallet_collective -// --extrinsic=* -// --execution=wasm -// --wasm-execution=compiled -// --header=./file_header.txt -// --output=./runtime/rococo/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `pallet_collective`. -pub struct WeightInfo(PhantomData); -impl pallet_collective::WeightInfo for WeightInfo { - /// Storage: TechnicalCommittee Members (r:1 w:1) - /// Proof Skipped: TechnicalCommittee Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: TechnicalCommittee Proposals (r:1 w:0) - /// Proof Skipped: TechnicalCommittee Proposals (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: TechnicalCommittee Voting (r:100 w:100) - /// Proof Skipped: TechnicalCommittee Voting (max_values: None, max_size: None, mode: Measured) - /// Storage: TechnicalCommittee Prime (r:0 w:1) - /// Proof Skipped: TechnicalCommittee Prime (max_values: Some(1), max_size: None, mode: Measured) - /// The range of component `m` is `[0, 100]`. - /// The range of component `n` is `[0, 100]`. - /// The range of component `p` is `[0, 100]`. - /// The range of component `m` is `[0, 100]`. - /// The range of component `n` is `[0, 100]`. - /// The range of component `p` is `[0, 100]`. - fn set_members(m: u32, _n: u32, p: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0 + m * (3232 ±0) + p * (3190 ±0)` - // Estimated: `15766 + m * (1967 ±16) + p * (4332 ±16)` - // Minimum execution time: 17_826_000 picoseconds. - Weight::from_parts(18_046_000, 0) - .saturating_add(Weight::from_parts(0, 15766)) - // Standard Error: 42_164 - .saturating_add(Weight::from_parts(4_858_188, 0).saturating_mul(m.into())) - // Standard Error: 42_164 - .saturating_add(Weight::from_parts(7_379_354, 0).saturating_mul(p.into())) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(p.into()))) - .saturating_add(T::DbWeight::get().writes(2)) - .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(p.into()))) - .saturating_add(Weight::from_parts(0, 1967).saturating_mul(m.into())) - .saturating_add(Weight::from_parts(0, 4332).saturating_mul(p.into())) - } - /// Storage: TechnicalCommittee Members (r:1 w:0) - /// Proof Skipped: TechnicalCommittee Members (max_values: Some(1), max_size: None, mode: Measured) - /// The range of component `b` is `[2, 1024]`. - /// The range of component `m` is `[1, 100]`. - /// The range of component `b` is `[2, 1024]`. - /// The range of component `m` is `[1, 100]`. - fn execute(b: u32, m: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `107 + m * (32 ±0)` - // Estimated: `1593 + m * (32 ±0)` - // Minimum execution time: 16_992_000 picoseconds. - Weight::from_parts(16_555_669, 0) - .saturating_add(Weight::from_parts(0, 1593)) - // Standard Error: 18 - .saturating_add(Weight::from_parts(976, 0).saturating_mul(b.into())) - // Standard Error: 189 - .saturating_add(Weight::from_parts(12_101, 0).saturating_mul(m.into())) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(Weight::from_parts(0, 32).saturating_mul(m.into())) - } - /// Storage: TechnicalCommittee Members (r:1 w:0) - /// Proof Skipped: TechnicalCommittee Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: TechnicalCommittee ProposalOf (r:1 w:0) - /// Proof Skipped: TechnicalCommittee ProposalOf (max_values: None, max_size: None, mode: Measured) - /// The range of component `b` is `[2, 1024]`. - /// The range of component `m` is `[1, 100]`. - /// The range of component `b` is `[2, 1024]`. - /// The range of component `m` is `[1, 100]`. - fn propose_execute(b: u32, m: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `107 + m * (32 ±0)` - // Estimated: `3573 + m * (32 ±0)` - // Minimum execution time: 19_900_000 picoseconds. - Weight::from_parts(19_068_072, 0) - .saturating_add(Weight::from_parts(0, 3573)) - // Standard Error: 12 - .saturating_add(Weight::from_parts(1_161, 0).saturating_mul(b.into())) - // Standard Error: 129 - .saturating_add(Weight::from_parts(22_376, 0).saturating_mul(m.into())) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(Weight::from_parts(0, 32).saturating_mul(m.into())) - } - /// Storage: TechnicalCommittee Members (r:1 w:0) - /// Proof Skipped: TechnicalCommittee Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: TechnicalCommittee ProposalOf (r:1 w:1) - /// Proof Skipped: TechnicalCommittee ProposalOf (max_values: None, max_size: None, mode: Measured) - /// Storage: TechnicalCommittee Proposals (r:1 w:1) - /// Proof Skipped: TechnicalCommittee Proposals (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: TechnicalCommittee ProposalCount (r:1 w:1) - /// Proof Skipped: TechnicalCommittee ProposalCount (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: TechnicalCommittee Voting (r:0 w:1) - /// Proof Skipped: TechnicalCommittee Voting (max_values: None, max_size: None, mode: Measured) - /// The range of component `b` is `[2, 1024]`. - /// The range of component `m` is `[2, 100]`. - /// The range of component `p` is `[1, 100]`. - /// The range of component `b` is `[2, 1024]`. - /// The range of component `m` is `[2, 100]`. - /// The range of component `p` is `[1, 100]`. - fn propose_proposed(b: u32, m: u32, p: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `397 + m * (32 ±0) + p * (36 ±0)` - // Estimated: `3789 + m * (33 ±0) + p * (36 ±0)` - // Minimum execution time: 26_264_000 picoseconds. - Weight::from_parts(27_099_606, 0) - .saturating_add(Weight::from_parts(0, 3789)) - // Standard Error: 50 - .saturating_add(Weight::from_parts(2_278, 0).saturating_mul(b.into())) - // Standard Error: 525 - .saturating_add(Weight::from_parts(19_424, 0).saturating_mul(m.into())) - // Standard Error: 519 - .saturating_add(Weight::from_parts(120_852, 0).saturating_mul(p.into())) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(4)) - .saturating_add(Weight::from_parts(0, 33).saturating_mul(m.into())) - .saturating_add(Weight::from_parts(0, 36).saturating_mul(p.into())) - } - /// Storage: TechnicalCommittee Members (r:1 w:0) - /// Proof Skipped: TechnicalCommittee Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: TechnicalCommittee Voting (r:1 w:1) - /// Proof Skipped: TechnicalCommittee Voting (max_values: None, max_size: None, mode: Measured) - /// The range of component `m` is `[5, 100]`. - /// The range of component `m` is `[5, 100]`. - fn vote(m: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `846 + m * (64 ±0)` - // Estimated: `4310 + m * (64 ±0)` - // Minimum execution time: 22_954_000 picoseconds. - Weight::from_parts(23_675_214, 0) - .saturating_add(Weight::from_parts(0, 4310)) - // Standard Error: 256 - .saturating_add(Weight::from_parts(40_562, 0).saturating_mul(m.into())) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - .saturating_add(Weight::from_parts(0, 64).saturating_mul(m.into())) - } - /// Storage: TechnicalCommittee Voting (r:1 w:1) - /// Proof Skipped: TechnicalCommittee Voting (max_values: None, max_size: None, mode: Measured) - /// Storage: TechnicalCommittee Members (r:1 w:0) - /// Proof Skipped: TechnicalCommittee Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: TechnicalCommittee Proposals (r:1 w:1) - /// Proof Skipped: TechnicalCommittee Proposals (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: TechnicalCommittee ProposalOf (r:0 w:1) - /// Proof Skipped: TechnicalCommittee ProposalOf (max_values: None, max_size: None, mode: Measured) - /// The range of component `m` is `[4, 100]`. - /// The range of component `p` is `[1, 100]`. - /// The range of component `m` is `[4, 100]`. - /// The range of component `p` is `[1, 100]`. - fn close_early_disapproved(m: u32, p: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `435 + m * (64 ±0) + p * (36 ±0)` - // Estimated: `3880 + m * (65 ±0) + p * (36 ±0)` - // Minimum execution time: 27_797_000 picoseconds. - Weight::from_parts(28_934_600, 0) - .saturating_add(Weight::from_parts(0, 3880)) - // Standard Error: 374 - .saturating_add(Weight::from_parts(20_716, 0).saturating_mul(m.into())) - // Standard Error: 364 - .saturating_add(Weight::from_parts(115_491, 0).saturating_mul(p.into())) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(3)) - .saturating_add(Weight::from_parts(0, 65).saturating_mul(m.into())) - .saturating_add(Weight::from_parts(0, 36).saturating_mul(p.into())) - } - /// Storage: TechnicalCommittee Voting (r:1 w:1) - /// Proof Skipped: TechnicalCommittee Voting (max_values: None, max_size: None, mode: Measured) - /// Storage: TechnicalCommittee Members (r:1 w:0) - /// Proof Skipped: TechnicalCommittee Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: TechnicalCommittee ProposalOf (r:1 w:1) - /// Proof Skipped: TechnicalCommittee ProposalOf (max_values: None, max_size: None, mode: Measured) - /// Storage: TechnicalCommittee Proposals (r:1 w:1) - /// Proof Skipped: TechnicalCommittee Proposals (max_values: Some(1), max_size: None, mode: Measured) - /// The range of component `b` is `[2, 1024]`. - /// The range of component `m` is `[4, 100]`. - /// The range of component `p` is `[1, 100]`. - /// The range of component `b` is `[2, 1024]`. - /// The range of component `m` is `[4, 100]`. - /// The range of component `p` is `[1, 100]`. - fn close_early_approved(b: u32, m: u32, p: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `737 + b * (1 ±0) + m * (64 ±0) + p * (40 ±0)` - // Estimated: `4054 + b * (1 ±0) + m * (66 ±0) + p * (40 ±0)` - // Minimum execution time: 39_160_000 picoseconds. - Weight::from_parts(40_470_419, 0) - .saturating_add(Weight::from_parts(0, 4054)) - // Standard Error: 82 - .saturating_add(Weight::from_parts(2_146, 0).saturating_mul(b.into())) - // Standard Error: 869 - .saturating_add(Weight::from_parts(21_442, 0).saturating_mul(m.into())) - // Standard Error: 847 - .saturating_add(Weight::from_parts(144_479, 0).saturating_mul(p.into())) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) - .saturating_add(Weight::from_parts(0, 1).saturating_mul(b.into())) - .saturating_add(Weight::from_parts(0, 66).saturating_mul(m.into())) - .saturating_add(Weight::from_parts(0, 40).saturating_mul(p.into())) - } - /// Storage: TechnicalCommittee Voting (r:1 w:1) - /// Proof Skipped: TechnicalCommittee Voting (max_values: None, max_size: None, mode: Measured) - /// Storage: TechnicalCommittee Members (r:1 w:0) - /// Proof Skipped: TechnicalCommittee Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: TechnicalCommittee Prime (r:1 w:0) - /// Proof Skipped: TechnicalCommittee Prime (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: TechnicalCommittee Proposals (r:1 w:1) - /// Proof Skipped: TechnicalCommittee Proposals (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: TechnicalCommittee ProposalOf (r:0 w:1) - /// Proof Skipped: TechnicalCommittee ProposalOf (max_values: None, max_size: None, mode: Measured) - /// The range of component `m` is `[4, 100]`. - /// The range of component `p` is `[1, 100]`. - /// The range of component `m` is `[4, 100]`. - /// The range of component `p` is `[1, 100]`. - fn close_disapproved(m: u32, p: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `455 + m * (64 ±0) + p * (36 ±0)` - // Estimated: `3900 + m * (65 ±0) + p * (36 ±0)` - // Minimum execution time: 30_953_000 picoseconds. - Weight::from_parts(31_427_489, 0) - .saturating_add(Weight::from_parts(0, 3900)) - // Standard Error: 397 - .saturating_add(Weight::from_parts(24_280, 0).saturating_mul(m.into())) - // Standard Error: 387 - .saturating_add(Weight::from_parts(116_864, 0).saturating_mul(p.into())) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) - .saturating_add(Weight::from_parts(0, 65).saturating_mul(m.into())) - .saturating_add(Weight::from_parts(0, 36).saturating_mul(p.into())) - } - /// Storage: TechnicalCommittee Voting (r:1 w:1) - /// Proof Skipped: TechnicalCommittee Voting (max_values: None, max_size: None, mode: Measured) - /// Storage: TechnicalCommittee Members (r:1 w:0) - /// Proof Skipped: TechnicalCommittee Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: TechnicalCommittee Prime (r:1 w:0) - /// Proof Skipped: TechnicalCommittee Prime (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: TechnicalCommittee ProposalOf (r:1 w:1) - /// Proof Skipped: TechnicalCommittee ProposalOf (max_values: None, max_size: None, mode: Measured) - /// Storage: TechnicalCommittee Proposals (r:1 w:1) - /// Proof Skipped: TechnicalCommittee Proposals (max_values: Some(1), max_size: None, mode: Measured) - /// The range of component `b` is `[2, 1024]`. - /// The range of component `m` is `[4, 100]`. - /// The range of component `p` is `[1, 100]`. - /// The range of component `b` is `[2, 1024]`. - /// The range of component `m` is `[4, 100]`. - /// The range of component `p` is `[1, 100]`. - fn close_approved(b: u32, m: u32, p: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `757 + b * (1 ±0) + m * (64 ±0) + p * (40 ±0)` - // Estimated: `4074 + b * (1 ±0) + m * (66 ±0) + p * (40 ±0)` - // Minimum execution time: 41_468_000 picoseconds. - Weight::from_parts(43_538_242, 0) - .saturating_add(Weight::from_parts(0, 4074)) - // Standard Error: 80 - .saturating_add(Weight::from_parts(1_994, 0).saturating_mul(b.into())) - // Standard Error: 853 - .saturating_add(Weight::from_parts(19_637, 0).saturating_mul(m.into())) - // Standard Error: 831 - .saturating_add(Weight::from_parts(144_674, 0).saturating_mul(p.into())) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(3)) - .saturating_add(Weight::from_parts(0, 1).saturating_mul(b.into())) - .saturating_add(Weight::from_parts(0, 66).saturating_mul(m.into())) - .saturating_add(Weight::from_parts(0, 40).saturating_mul(p.into())) - } - /// Storage: TechnicalCommittee Proposals (r:1 w:1) - /// Proof Skipped: TechnicalCommittee Proposals (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: TechnicalCommittee Voting (r:0 w:1) - /// Proof Skipped: TechnicalCommittee Voting (max_values: None, max_size: None, mode: Measured) - /// Storage: TechnicalCommittee ProposalOf (r:0 w:1) - /// Proof Skipped: TechnicalCommittee ProposalOf (max_values: None, max_size: None, mode: Measured) - /// The range of component `p` is `[1, 100]`. - /// The range of component `p` is `[1, 100]`. - fn disapprove_proposal(p: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `264 + p * (32 ±0)` - // Estimated: `1749 + p * (32 ±0)` - // Minimum execution time: 15_998_000 picoseconds. - Weight::from_parts(17_837_641, 0) - .saturating_add(Weight::from_parts(0, 1749)) - // Standard Error: 422 - .saturating_add(Weight::from_parts(111_526, 0).saturating_mul(p.into())) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(3)) - .saturating_add(Weight::from_parts(0, 32).saturating_mul(p.into())) - } -} diff --git a/polkadot/runtime/rococo/src/weights/pallet_conviction_voting.rs b/polkadot/runtime/rococo/src/weights/pallet_conviction_voting.rs new file mode 100644 index 000000000000..ba505737f1b0 --- /dev/null +++ b/polkadot/runtime/rococo/src/weights/pallet_conviction_voting.rs @@ -0,0 +1,195 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Autogenerated weights for `pallet_conviction_voting` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-06-19, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-e8ezs4ez-project-163-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("kusama-dev"), DB CACHE: 1024 + +// Executed Command: +// ./target/production/polkadot +// benchmark +// pallet +// --chain=kusama-dev +// --steps=50 +// --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --pallet=pallet_conviction_voting +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --header=./file_header.txt +// --output=./runtime/kusama/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_conviction_voting`. +pub struct WeightInfo(PhantomData); +impl pallet_conviction_voting::WeightInfo for WeightInfo { + /// Storage: Referenda ReferendumInfoFor (r:1 w:1) + /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(936), added: 3411, mode: MaxEncodedLen) + /// Storage: ConvictionVoting VotingFor (r:1 w:1) + /// Proof: ConvictionVoting VotingFor (max_values: None, max_size: Some(27241), added: 29716, mode: MaxEncodedLen) + /// Storage: ConvictionVoting ClassLocksFor (r:1 w:1) + /// Proof: ConvictionVoting ClassLocksFor (max_values: None, max_size: Some(311), added: 2786, mode: MaxEncodedLen) + /// Storage: Balances Locks (r:1 w:1) + /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) + /// Storage: Balances Freezes (r:1 w:0) + /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) + /// Storage: Scheduler Agenda (r:1 w:1) + /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) + fn vote_new() -> Weight { + // Proof Size summary in bytes: + // Measured: `13445` + // Estimated: `42428` + // Minimum execution time: 151_077_000 picoseconds. + Weight::from_parts(165_283_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(5)) + } + /// Storage: Referenda ReferendumInfoFor (r:1 w:1) + /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(936), added: 3411, mode: MaxEncodedLen) + /// Storage: ConvictionVoting VotingFor (r:1 w:1) + /// Proof: ConvictionVoting VotingFor (max_values: None, max_size: Some(27241), added: 29716, mode: MaxEncodedLen) + /// Storage: ConvictionVoting ClassLocksFor (r:1 w:1) + /// Proof: ConvictionVoting ClassLocksFor (max_values: None, max_size: Some(311), added: 2786, mode: MaxEncodedLen) + /// Storage: Balances Locks (r:1 w:1) + /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) + /// Storage: Balances Freezes (r:1 w:0) + /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) + /// Storage: Scheduler Agenda (r:2 w:2) + /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) + fn vote_existing() -> Weight { + // Proof Size summary in bytes: + // Measured: `14166` + // Estimated: `83866` + // Minimum execution time: 232_420_000 picoseconds. + Weight::from_parts(244_439_000, 0) + .saturating_add(Weight::from_parts(0, 83866)) + .saturating_add(T::DbWeight::get().reads(7)) + .saturating_add(T::DbWeight::get().writes(6)) + } + /// Storage: ConvictionVoting VotingFor (r:1 w:1) + /// Proof: ConvictionVoting VotingFor (max_values: None, max_size: Some(27241), added: 29716, mode: MaxEncodedLen) + /// Storage: Referenda ReferendumInfoFor (r:1 w:1) + /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(936), added: 3411, mode: MaxEncodedLen) + /// Storage: Scheduler Agenda (r:2 w:2) + /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) + fn remove_vote() -> Weight { + // Proof Size summary in bytes: + // Measured: `13918` + // Estimated: `83866` + // Minimum execution time: 205_017_000 picoseconds. + Weight::from_parts(216_594_000, 0) + .saturating_add(Weight::from_parts(0, 83866)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: ConvictionVoting VotingFor (r:1 w:1) + /// Proof: ConvictionVoting VotingFor (max_values: None, max_size: Some(27241), added: 29716, mode: MaxEncodedLen) + /// Storage: Referenda ReferendumInfoFor (r:1 w:0) + /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(936), added: 3411, mode: MaxEncodedLen) + fn remove_other_vote() -> Weight { + // Proof Size summary in bytes: + // Measured: `13004` + // Estimated: `30706` + // Minimum execution time: 84_226_000 picoseconds. + Weight::from_parts(91_255_000, 0) + .saturating_add(Weight::from_parts(0, 30706)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: ConvictionVoting VotingFor (r:2 w:2) + /// Proof: ConvictionVoting VotingFor (max_values: None, max_size: Some(27241), added: 29716, mode: MaxEncodedLen) + /// Storage: Referenda ReferendumInfoFor (r:512 w:512) + /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(936), added: 3411, mode: MaxEncodedLen) + /// Storage: Scheduler Agenda (r:2 w:2) + /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) + /// Storage: ConvictionVoting ClassLocksFor (r:1 w:1) + /// Proof: ConvictionVoting ClassLocksFor (max_values: None, max_size: Some(311), added: 2786, mode: MaxEncodedLen) + /// Storage: Balances Locks (r:1 w:1) + /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) + /// Storage: Balances Freezes (r:1 w:0) + /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) + /// The range of component `r` is `[0, 512]`. + fn delegate(r: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `29640 + r * (365 ±0)` + // Estimated: `83866 + r * (3411 ±0)` + // Minimum execution time: 78_708_000 picoseconds. + Weight::from_parts(2_053_488_615, 0) + .saturating_add(Weight::from_parts(0, 83866)) + // Standard Error: 179_271 + .saturating_add(Weight::from_parts(47_806_482, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(7)) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) + .saturating_add(T::DbWeight::get().writes(6)) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(r.into()))) + .saturating_add(Weight::from_parts(0, 3411).saturating_mul(r.into())) + } + /// Storage: ConvictionVoting VotingFor (r:2 w:2) + /// Proof: ConvictionVoting VotingFor (max_values: None, max_size: Some(27241), added: 29716, mode: MaxEncodedLen) + /// Storage: Referenda ReferendumInfoFor (r:512 w:512) + /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(936), added: 3411, mode: MaxEncodedLen) + /// Storage: Scheduler Agenda (r:2 w:2) + /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) + /// The range of component `r` is `[0, 512]`. + fn undelegate(r: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `29555 + r * (365 ±0)` + // Estimated: `83866 + r * (3411 ±0)` + // Minimum execution time: 45_232_000 picoseconds. + Weight::from_parts(2_045_021_014, 0) + .saturating_add(Weight::from_parts(0, 83866)) + // Standard Error: 185_130 + .saturating_add(Weight::from_parts(47_896_011, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) + .saturating_add(T::DbWeight::get().writes(4)) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(r.into()))) + .saturating_add(Weight::from_parts(0, 3411).saturating_mul(r.into())) + } + /// Storage: ConvictionVoting VotingFor (r:1 w:1) + /// Proof: ConvictionVoting VotingFor (max_values: None, max_size: Some(27241), added: 29716, mode: MaxEncodedLen) + /// Storage: ConvictionVoting ClassLocksFor (r:1 w:1) + /// Proof: ConvictionVoting ClassLocksFor (max_values: None, max_size: Some(311), added: 2786, mode: MaxEncodedLen) + /// Storage: Balances Locks (r:1 w:1) + /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) + /// Storage: Balances Freezes (r:1 w:0) + /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) + fn unlock() -> Weight { + // Proof Size summary in bytes: + // Measured: `12218` + // Estimated: `30706` + // Minimum execution time: 116_446_000 picoseconds. + Weight::from_parts(124_043_000, 0) + .saturating_add(Weight::from_parts(0, 30706)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } +} diff --git a/polkadot/runtime/rococo/src/weights/pallet_democracy.rs b/polkadot/runtime/rococo/src/weights/pallet_democracy.rs deleted file mode 100644 index 00629a5c1103..000000000000 --- a/polkadot/runtime/rococo/src/weights/pallet_democracy.rs +++ /dev/null @@ -1,525 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Polkadot. - -// Polkadot is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Polkadot is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Polkadot. If not, see . - -//! Autogenerated weights for `pallet_democracy` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-05-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm5`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("rococo-dev"), DB CACHE: 1024 - -// Executed Command: -// ./target/production/polkadot -// benchmark -// pallet -// --chain=rococo-dev -// --steps=50 -// --repeat=20 -// --pallet=pallet_democracy -// --extrinsic=* -// --execution=wasm -// --wasm-execution=compiled -// --header=./file_header.txt -// --output=./runtime/rococo/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `pallet_democracy`. -pub struct WeightInfo(PhantomData); -impl pallet_democracy::WeightInfo for WeightInfo { - /// Storage: Democracy PublicPropCount (r:1 w:1) - /// Proof: Democracy PublicPropCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Democracy PublicProps (r:1 w:1) - /// Proof: Democracy PublicProps (max_values: Some(1), max_size: Some(16702), added: 17197, mode: MaxEncodedLen) - /// Storage: Democracy Blacklist (r:1 w:0) - /// Proof: Democracy Blacklist (max_values: None, max_size: Some(3238), added: 5713, mode: MaxEncodedLen) - /// Storage: Democracy DepositOf (r:0 w:1) - /// Proof: Democracy DepositOf (max_values: None, max_size: Some(3230), added: 5705, mode: MaxEncodedLen) - fn propose() -> Weight { - // Proof Size summary in bytes: - // Measured: `4734` - // Estimated: `18187` - // Minimum execution time: 39_492_000 picoseconds. - Weight::from_parts(39_853_000, 0) - .saturating_add(Weight::from_parts(0, 18187)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(3)) - } - /// Storage: Democracy DepositOf (r:1 w:1) - /// Proof: Democracy DepositOf (max_values: None, max_size: Some(3230), added: 5705, mode: MaxEncodedLen) - fn second() -> Weight { - // Proof Size summary in bytes: - // Measured: `3489` - // Estimated: `6695` - // Minimum execution time: 36_683_000 picoseconds. - Weight::from_parts(37_121_000, 0) - .saturating_add(Weight::from_parts(0, 6695)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: Democracy ReferendumInfoOf (r:1 w:1) - /// Proof: Democracy ReferendumInfoOf (max_values: None, max_size: Some(201), added: 2676, mode: MaxEncodedLen) - /// Storage: Democracy VotingOf (r:1 w:1) - /// Proof: Democracy VotingOf (max_values: None, max_size: Some(3795), added: 6270, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(65), added: 2540, mode: MaxEncodedLen) - fn vote_new() -> Weight { - // Proof Size summary in bytes: - // Measured: `3365` - // Estimated: `7260` - // Minimum execution time: 48_191_000 picoseconds. - Weight::from_parts(48_936_000, 0) - .saturating_add(Weight::from_parts(0, 7260)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) - } - /// Storage: Democracy ReferendumInfoOf (r:1 w:1) - /// Proof: Democracy ReferendumInfoOf (max_values: None, max_size: Some(201), added: 2676, mode: MaxEncodedLen) - /// Storage: Democracy VotingOf (r:1 w:1) - /// Proof: Democracy VotingOf (max_values: None, max_size: Some(3795), added: 6270, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(65), added: 2540, mode: MaxEncodedLen) - fn vote_existing() -> Weight { - // Proof Size summary in bytes: - // Measured: `3387` - // Estimated: `7260` - // Minimum execution time: 52_175_000 picoseconds. - Weight::from_parts(53_011_000, 0) - .saturating_add(Weight::from_parts(0, 7260)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) - } - /// Storage: Democracy ReferendumInfoOf (r:1 w:1) - /// Proof: Democracy ReferendumInfoOf (max_values: None, max_size: Some(201), added: 2676, mode: MaxEncodedLen) - /// Storage: Democracy Cancellations (r:1 w:1) - /// Proof: Democracy Cancellations (max_values: None, max_size: Some(33), added: 2508, mode: MaxEncodedLen) - /// Storage: Democracy MetadataOf (r:1 w:1) - /// Proof: Democracy MetadataOf (max_values: None, max_size: Some(53), added: 2528, mode: MaxEncodedLen) - fn emergency_cancel() -> Weight { - // Proof Size summary in bytes: - // Measured: `299` - // Estimated: `3666` - // Minimum execution time: 26_255_000 picoseconds. - Weight::from_parts(26_768_000, 0) - .saturating_add(Weight::from_parts(0, 3666)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(3)) - } - /// Storage: Democracy PublicProps (r:1 w:1) - /// Proof: Democracy PublicProps (max_values: Some(1), max_size: Some(16702), added: 17197, mode: MaxEncodedLen) - /// Storage: Democracy DepositOf (r:1 w:1) - /// Proof: Democracy DepositOf (max_values: None, max_size: Some(3230), added: 5705, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Democracy MetadataOf (r:3 w:1) - /// Proof: Democracy MetadataOf (max_values: None, max_size: Some(53), added: 2528, mode: MaxEncodedLen) - /// Storage: Democracy NextExternal (r:1 w:1) - /// Proof: Democracy NextExternal (max_values: Some(1), max_size: Some(132), added: 627, mode: MaxEncodedLen) - /// Storage: Democracy ReferendumInfoOf (r:1 w:1) - /// Proof: Democracy ReferendumInfoOf (max_values: None, max_size: Some(201), added: 2676, mode: MaxEncodedLen) - /// Storage: Democracy Blacklist (r:0 w:1) - /// Proof: Democracy Blacklist (max_values: None, max_size: Some(3238), added: 5713, mode: MaxEncodedLen) - fn blacklist() -> Weight { - // Proof Size summary in bytes: - // Measured: `5843` - // Estimated: `18187` - // Minimum execution time: 96_376_000 picoseconds. - Weight::from_parts(97_222_000, 0) - .saturating_add(Weight::from_parts(0, 18187)) - .saturating_add(T::DbWeight::get().reads(8)) - .saturating_add(T::DbWeight::get().writes(7)) - } - /// Storage: Democracy NextExternal (r:1 w:1) - /// Proof: Democracy NextExternal (max_values: Some(1), max_size: Some(132), added: 627, mode: MaxEncodedLen) - /// Storage: Democracy Blacklist (r:1 w:0) - /// Proof: Democracy Blacklist (max_values: None, max_size: Some(3238), added: 5713, mode: MaxEncodedLen) - fn external_propose() -> Weight { - // Proof Size summary in bytes: - // Measured: `3349` - // Estimated: `6703` - // Minimum execution time: 13_815_000 picoseconds. - Weight::from_parts(14_071_000, 0) - .saturating_add(Weight::from_parts(0, 6703)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: Democracy NextExternal (r:0 w:1) - /// Proof: Democracy NextExternal (max_values: Some(1), max_size: Some(132), added: 627, mode: MaxEncodedLen) - fn external_propose_majority() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 3_456_000 picoseconds. - Weight::from_parts(3_716_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: Democracy NextExternal (r:0 w:1) - /// Proof: Democracy NextExternal (max_values: Some(1), max_size: Some(132), added: 627, mode: MaxEncodedLen) - fn external_propose_default() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 3_610_000 picoseconds. - Weight::from_parts(3_768_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: Democracy NextExternal (r:1 w:1) - /// Proof: Democracy NextExternal (max_values: Some(1), max_size: Some(132), added: 627, mode: MaxEncodedLen) - /// Storage: Democracy ReferendumCount (r:1 w:1) - /// Proof: Democracy ReferendumCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Democracy MetadataOf (r:1 w:2) - /// Proof: Democracy MetadataOf (max_values: None, max_size: Some(53), added: 2528, mode: MaxEncodedLen) - /// Storage: Democracy ReferendumInfoOf (r:0 w:1) - /// Proof: Democracy ReferendumInfoOf (max_values: None, max_size: Some(201), added: 2676, mode: MaxEncodedLen) - fn fast_track() -> Weight { - // Proof Size summary in bytes: - // Measured: `219` - // Estimated: `3518` - // Minimum execution time: 27_514_000 picoseconds. - Weight::from_parts(27_905_000, 0) - .saturating_add(Weight::from_parts(0, 3518)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(5)) - } - /// Storage: Democracy NextExternal (r:1 w:1) - /// Proof: Democracy NextExternal (max_values: Some(1), max_size: Some(132), added: 627, mode: MaxEncodedLen) - /// Storage: Democracy Blacklist (r:1 w:1) - /// Proof: Democracy Blacklist (max_values: None, max_size: Some(3238), added: 5713, mode: MaxEncodedLen) - /// Storage: Democracy MetadataOf (r:1 w:1) - /// Proof: Democracy MetadataOf (max_values: None, max_size: Some(53), added: 2528, mode: MaxEncodedLen) - fn veto_external() -> Weight { - // Proof Size summary in bytes: - // Measured: `3452` - // Estimated: `6703` - // Minimum execution time: 31_250_000 picoseconds. - Weight::from_parts(31_604_000, 0) - .saturating_add(Weight::from_parts(0, 6703)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(3)) - } - /// Storage: Democracy PublicProps (r:1 w:1) - /// Proof: Democracy PublicProps (max_values: Some(1), max_size: Some(16702), added: 17197, mode: MaxEncodedLen) - /// Storage: Democracy DepositOf (r:1 w:1) - /// Proof: Democracy DepositOf (max_values: None, max_size: Some(3230), added: 5705, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Democracy MetadataOf (r:1 w:1) - /// Proof: Democracy MetadataOf (max_values: None, max_size: Some(53), added: 2528, mode: MaxEncodedLen) - fn cancel_proposal() -> Weight { - // Proof Size summary in bytes: - // Measured: `5754` - // Estimated: `18187` - // Minimum execution time: 79_757_000 picoseconds. - Weight::from_parts(83_603_000, 0) - .saturating_add(Weight::from_parts(0, 18187)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(4)) - } - /// Storage: Democracy MetadataOf (r:1 w:1) - /// Proof: Democracy MetadataOf (max_values: None, max_size: Some(53), added: 2528, mode: MaxEncodedLen) - /// Storage: Democracy ReferendumInfoOf (r:0 w:1) - /// Proof: Democracy ReferendumInfoOf (max_values: None, max_size: Some(201), added: 2676, mode: MaxEncodedLen) - fn cancel_referendum() -> Weight { - // Proof Size summary in bytes: - // Measured: `204` - // Estimated: `3518` - // Minimum execution time: 20_034_000 picoseconds. - Weight::from_parts(20_674_000, 0) - .saturating_add(Weight::from_parts(0, 3518)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: Democracy LowestUnbaked (r:1 w:1) - /// Proof: Democracy LowestUnbaked (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Democracy ReferendumCount (r:1 w:0) - /// Proof: Democracy ReferendumCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Democracy ReferendumInfoOf (r:99 w:0) - /// Proof: Democracy ReferendumInfoOf (max_values: None, max_size: Some(201), added: 2676, mode: MaxEncodedLen) - /// The range of component `r` is `[0, 99]`. - fn on_initialize_base(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `177 + r * (86 ±0)` - // Estimated: `1489 + r * (2676 ±0)` - // Minimum execution time: 7_053_000 picoseconds. - Weight::from_parts(10_157_848, 0) - .saturating_add(Weight::from_parts(0, 1489)) - // Standard Error: 5_462 - .saturating_add(Weight::from_parts(2_710_889, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) - .saturating_add(T::DbWeight::get().writes(1)) - .saturating_add(Weight::from_parts(0, 2676).saturating_mul(r.into())) - } - /// Storage: Democracy LowestUnbaked (r:1 w:1) - /// Proof: Democracy LowestUnbaked (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Democracy ReferendumCount (r:1 w:0) - /// Proof: Democracy ReferendumCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Democracy LastTabledWasExternal (r:1 w:0) - /// Proof: Democracy LastTabledWasExternal (max_values: Some(1), max_size: Some(1), added: 496, mode: MaxEncodedLen) - /// Storage: Democracy NextExternal (r:1 w:0) - /// Proof: Democracy NextExternal (max_values: Some(1), max_size: Some(132), added: 627, mode: MaxEncodedLen) - /// Storage: Democracy PublicProps (r:1 w:0) - /// Proof: Democracy PublicProps (max_values: Some(1), max_size: Some(16702), added: 17197, mode: MaxEncodedLen) - /// Storage: Democracy ReferendumInfoOf (r:99 w:0) - /// Proof: Democracy ReferendumInfoOf (max_values: None, max_size: Some(201), added: 2676, mode: MaxEncodedLen) - /// The range of component `r` is `[0, 99]`. - fn on_initialize_base_with_launch_period(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `177 + r * (86 ±0)` - // Estimated: `18187 + r * (2676 ±0)` - // Minimum execution time: 9_585_000 picoseconds. - Weight::from_parts(13_021_372, 0) - .saturating_add(Weight::from_parts(0, 18187)) - // Standard Error: 6_031 - .saturating_add(Weight::from_parts(2_707_449, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) - .saturating_add(T::DbWeight::get().writes(1)) - .saturating_add(Weight::from_parts(0, 2676).saturating_mul(r.into())) - } - /// Storage: Democracy VotingOf (r:3 w:3) - /// Proof: Democracy VotingOf (max_values: None, max_size: Some(3795), added: 6270, mode: MaxEncodedLen) - /// Storage: Democracy ReferendumInfoOf (r:99 w:99) - /// Proof: Democracy ReferendumInfoOf (max_values: None, max_size: Some(201), added: 2676, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(65), added: 2540, mode: MaxEncodedLen) - /// The range of component `r` is `[0, 99]`. - fn delegate(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `729 + r * (108 ±0)` - // Estimated: `19800 + r * (2676 ±0)` - // Minimum execution time: 41_109_000 picoseconds. - Weight::from_parts(46_477_334, 0) - .saturating_add(Weight::from_parts(0, 19800)) - // Standard Error: 9_372 - .saturating_add(Weight::from_parts(3_815_232, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) - .saturating_add(T::DbWeight::get().writes(4)) - .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(r.into()))) - .saturating_add(Weight::from_parts(0, 2676).saturating_mul(r.into())) - } - /// Storage: Democracy VotingOf (r:2 w:2) - /// Proof: Democracy VotingOf (max_values: None, max_size: Some(3795), added: 6270, mode: MaxEncodedLen) - /// Storage: Democracy ReferendumInfoOf (r:99 w:99) - /// Proof: Democracy ReferendumInfoOf (max_values: None, max_size: Some(201), added: 2676, mode: MaxEncodedLen) - /// The range of component `r` is `[0, 99]`. - fn undelegate(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `426 + r * (108 ±0)` - // Estimated: `13530 + r * (2676 ±0)` - // Minimum execution time: 21_283_000 picoseconds. - Weight::from_parts(23_372_139, 0) - .saturating_add(Weight::from_parts(0, 13530)) - // Standard Error: 6_191 - .saturating_add(Weight::from_parts(3_768_585, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) - .saturating_add(T::DbWeight::get().writes(2)) - .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(r.into()))) - .saturating_add(Weight::from_parts(0, 2676).saturating_mul(r.into())) - } - /// Storage: Democracy PublicProps (r:0 w:1) - /// Proof: Democracy PublicProps (max_values: Some(1), max_size: Some(16702), added: 17197, mode: MaxEncodedLen) - fn clear_public_proposals() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 3_510_000 picoseconds. - Weight::from_parts(3_642_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: Democracy VotingOf (r:1 w:1) - /// Proof: Democracy VotingOf (max_values: None, max_size: Some(3795), added: 6270, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(65), added: 2540, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// The range of component `r` is `[0, 99]`. - fn unlock_remove(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `458` - // Estimated: `7260` - // Minimum execution time: 23_647_000 picoseconds. - Weight::from_parts(36_627_552, 0) - .saturating_add(Weight::from_parts(0, 7260)) - // Standard Error: 2_937 - .saturating_add(Weight::from_parts(34_132, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) - } - /// Storage: Democracy VotingOf (r:1 w:1) - /// Proof: Democracy VotingOf (max_values: None, max_size: Some(3795), added: 6270, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(65), added: 2540, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// The range of component `r` is `[0, 99]`. - fn unlock_set(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `459 + r * (22 ±0)` - // Estimated: `7260` - // Minimum execution time: 33_932_000 picoseconds. - Weight::from_parts(35_331_660, 0) - .saturating_add(Weight::from_parts(0, 7260)) - // Standard Error: 615 - .saturating_add(Weight::from_parts(60_730, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) - } - /// Storage: Democracy ReferendumInfoOf (r:1 w:1) - /// Proof: Democracy ReferendumInfoOf (max_values: None, max_size: Some(201), added: 2676, mode: MaxEncodedLen) - /// Storage: Democracy VotingOf (r:1 w:1) - /// Proof: Democracy VotingOf (max_values: None, max_size: Some(3795), added: 6270, mode: MaxEncodedLen) - /// The range of component `r` is `[1, 100]`. - fn remove_vote(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `661 + r * (26 ±0)` - // Estimated: `7260` - // Minimum execution time: 16_605_000 picoseconds. - Weight::from_parts(19_057_092, 0) - .saturating_add(Weight::from_parts(0, 7260)) - // Standard Error: 873 - .saturating_add(Weight::from_parts(68_964, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: Democracy ReferendumInfoOf (r:1 w:1) - /// Proof: Democracy ReferendumInfoOf (max_values: None, max_size: Some(201), added: 2676, mode: MaxEncodedLen) - /// Storage: Democracy VotingOf (r:1 w:1) - /// Proof: Democracy VotingOf (max_values: None, max_size: Some(3795), added: 6270, mode: MaxEncodedLen) - /// The range of component `r` is `[1, 100]`. - fn remove_other_vote(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `661 + r * (26 ±0)` - // Estimated: `7260` - // Minimum execution time: 16_801_000 picoseconds. - Weight::from_parts(19_166_788, 0) - .saturating_add(Weight::from_parts(0, 7260)) - // Standard Error: 1_008 - .saturating_add(Weight::from_parts(69_851, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: Democracy NextExternal (r:1 w:0) - /// Proof: Democracy NextExternal (max_values: Some(1), max_size: Some(132), added: 627, mode: MaxEncodedLen) - /// Storage: Preimage StatusFor (r:1 w:0) - /// Proof: Preimage StatusFor (max_values: None, max_size: Some(91), added: 2566, mode: MaxEncodedLen) - /// Storage: Democracy MetadataOf (r:0 w:1) - /// Proof: Democracy MetadataOf (max_values: None, max_size: Some(53), added: 2528, mode: MaxEncodedLen) - fn set_external_metadata() -> Weight { - // Proof Size summary in bytes: - // Measured: `361` - // Estimated: `3556` - // Minimum execution time: 19_207_000 picoseconds. - Weight::from_parts(19_693_000, 0) - .saturating_add(Weight::from_parts(0, 3556)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: Democracy NextExternal (r:1 w:0) - /// Proof: Democracy NextExternal (max_values: Some(1), max_size: Some(132), added: 627, mode: MaxEncodedLen) - /// Storage: Democracy MetadataOf (r:1 w:1) - /// Proof: Democracy MetadataOf (max_values: None, max_size: Some(53), added: 2528, mode: MaxEncodedLen) - fn clear_external_metadata() -> Weight { - // Proof Size summary in bytes: - // Measured: `219` - // Estimated: `3518` - // Minimum execution time: 17_333_000 picoseconds. - Weight::from_parts(17_555_000, 0) - .saturating_add(Weight::from_parts(0, 3518)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: Democracy PublicProps (r:1 w:0) - /// Proof: Democracy PublicProps (max_values: Some(1), max_size: Some(16702), added: 17197, mode: MaxEncodedLen) - /// Storage: Preimage StatusFor (r:1 w:0) - /// Proof: Preimage StatusFor (max_values: None, max_size: Some(91), added: 2566, mode: MaxEncodedLen) - /// Storage: Democracy MetadataOf (r:0 w:1) - /// Proof: Democracy MetadataOf (max_values: None, max_size: Some(53), added: 2528, mode: MaxEncodedLen) - fn set_proposal_metadata() -> Weight { - // Proof Size summary in bytes: - // Measured: `4893` - // Estimated: `18187` - // Minimum execution time: 33_859_000 picoseconds. - Weight::from_parts(34_538_000, 0) - .saturating_add(Weight::from_parts(0, 18187)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: Democracy PublicProps (r:1 w:0) - /// Proof: Democracy PublicProps (max_values: Some(1), max_size: Some(16702), added: 17197, mode: MaxEncodedLen) - /// Storage: Democracy MetadataOf (r:1 w:1) - /// Proof: Democracy MetadataOf (max_values: None, max_size: Some(53), added: 2528, mode: MaxEncodedLen) - fn clear_proposal_metadata() -> Weight { - // Proof Size summary in bytes: - // Measured: `4755` - // Estimated: `18187` - // Minimum execution time: 31_155_000 picoseconds. - Weight::from_parts(31_520_000, 0) - .saturating_add(Weight::from_parts(0, 18187)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: Preimage StatusFor (r:1 w:0) - /// Proof: Preimage StatusFor (max_values: None, max_size: Some(91), added: 2566, mode: MaxEncodedLen) - /// Storage: Democracy MetadataOf (r:0 w:1) - /// Proof: Democracy MetadataOf (max_values: None, max_size: Some(53), added: 2528, mode: MaxEncodedLen) - fn set_referendum_metadata() -> Weight { - // Proof Size summary in bytes: - // Measured: `216` - // Estimated: `3556` - // Minimum execution time: 15_924_000 picoseconds. - Weight::from_parts(16_151_000, 0) - .saturating_add(Weight::from_parts(0, 3556)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: Democracy ReferendumInfoOf (r:1 w:0) - /// Proof: Democracy ReferendumInfoOf (max_values: None, max_size: Some(201), added: 2676, mode: MaxEncodedLen) - /// Storage: Democracy MetadataOf (r:1 w:1) - /// Proof: Democracy MetadataOf (max_values: None, max_size: Some(53), added: 2528, mode: MaxEncodedLen) - fn clear_referendum_metadata() -> Weight { - // Proof Size summary in bytes: - // Measured: `235` - // Estimated: `3666` - // Minimum execution time: 18_983_000 picoseconds. - Weight::from_parts(19_280_000, 0) - .saturating_add(Weight::from_parts(0, 3666)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } -} diff --git a/polkadot/runtime/rococo/src/weights/pallet_elections_phragmen.rs b/polkadot/runtime/rococo/src/weights/pallet_elections_phragmen.rs deleted file mode 100644 index fe6aca5ab881..000000000000 --- a/polkadot/runtime/rococo/src/weights/pallet_elections_phragmen.rs +++ /dev/null @@ -1,315 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Polkadot. - -// Polkadot is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Polkadot is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Polkadot. If not, see . - -//! Autogenerated weights for `pallet_elections_phragmen` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-05-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm5`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("rococo-dev"), DB CACHE: 1024 - -// Executed Command: -// ./target/production/polkadot -// benchmark -// pallet -// --chain=rococo-dev -// --steps=50 -// --repeat=20 -// --pallet=pallet_elections_phragmen -// --extrinsic=* -// --execution=wasm -// --wasm-execution=compiled -// --header=./file_header.txt -// --output=./runtime/rococo/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `pallet_elections_phragmen`. -pub struct WeightInfo(PhantomData); -impl pallet_elections_phragmen::WeightInfo for WeightInfo { - /// Storage: PhragmenElection Candidates (r:1 w:0) - /// Proof Skipped: PhragmenElection Candidates (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: PhragmenElection Members (r:1 w:0) - /// Proof Skipped: PhragmenElection Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: PhragmenElection RunnersUp (r:1 w:0) - /// Proof Skipped: PhragmenElection RunnersUp (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: PhragmenElection Voting (r:1 w:1) - /// Proof Skipped: PhragmenElection Voting (max_values: None, max_size: None, mode: Measured) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(65), added: 2540, mode: MaxEncodedLen) - /// The range of component `v` is `[1, 16]`. - fn vote_equal(v: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `331 + v * (80 ±0)` - // Estimated: `4764 + v * (80 ±0)` - // Minimum execution time: 30_910_000 picoseconds. - Weight::from_parts(31_851_802, 0) - .saturating_add(Weight::from_parts(0, 4764)) - // Standard Error: 4_099 - .saturating_add(Weight::from_parts(137_675, 0).saturating_mul(v.into())) - .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(2)) - .saturating_add(Weight::from_parts(0, 80).saturating_mul(v.into())) - } - /// Storage: PhragmenElection Candidates (r:1 w:0) - /// Proof Skipped: PhragmenElection Candidates (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: PhragmenElection Members (r:1 w:0) - /// Proof Skipped: PhragmenElection Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: PhragmenElection RunnersUp (r:1 w:0) - /// Proof Skipped: PhragmenElection RunnersUp (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: PhragmenElection Voting (r:1 w:1) - /// Proof Skipped: PhragmenElection Voting (max_values: None, max_size: None, mode: Measured) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(65), added: 2540, mode: MaxEncodedLen) - /// The range of component `v` is `[2, 16]`. - fn vote_more(v: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `299 + v * (80 ±0)` - // Estimated: `4764 + v * (80 ±0)` - // Minimum execution time: 42_670_000 picoseconds. - Weight::from_parts(43_351_345, 0) - .saturating_add(Weight::from_parts(0, 4764)) - // Standard Error: 2_986 - .saturating_add(Weight::from_parts(142_231, 0).saturating_mul(v.into())) - .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(2)) - .saturating_add(Weight::from_parts(0, 80).saturating_mul(v.into())) - } - /// Storage: PhragmenElection Candidates (r:1 w:0) - /// Proof Skipped: PhragmenElection Candidates (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: PhragmenElection Members (r:1 w:0) - /// Proof Skipped: PhragmenElection Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: PhragmenElection RunnersUp (r:1 w:0) - /// Proof Skipped: PhragmenElection RunnersUp (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: PhragmenElection Voting (r:1 w:1) - /// Proof Skipped: PhragmenElection Voting (max_values: None, max_size: None, mode: Measured) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(65), added: 2540, mode: MaxEncodedLen) - /// The range of component `v` is `[2, 16]`. - fn vote_less(v: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `331 + v * (80 ±0)` - // Estimated: `4764 + v * (80 ±0)` - // Minimum execution time: 42_782_000 picoseconds. - Weight::from_parts(43_611_866, 0) - .saturating_add(Weight::from_parts(0, 4764)) - // Standard Error: 2_968 - .saturating_add(Weight::from_parts(125_939, 0).saturating_mul(v.into())) - .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(2)) - .saturating_add(Weight::from_parts(0, 80).saturating_mul(v.into())) - } - /// Storage: PhragmenElection Voting (r:1 w:1) - /// Proof Skipped: PhragmenElection Voting (max_values: None, max_size: None, mode: Measured) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(65), added: 2540, mode: MaxEncodedLen) - fn remove_voter() -> Weight { - // Proof Size summary in bytes: - // Measured: `853` - // Estimated: `4764` - // Minimum execution time: 44_301_000 picoseconds. - Weight::from_parts(44_843_000, 0) - .saturating_add(Weight::from_parts(0, 4764)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: PhragmenElection Candidates (r:1 w:1) - /// Proof Skipped: PhragmenElection Candidates (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: PhragmenElection Members (r:1 w:0) - /// Proof Skipped: PhragmenElection Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: PhragmenElection RunnersUp (r:1 w:0) - /// Proof Skipped: PhragmenElection RunnersUp (max_values: Some(1), max_size: None, mode: Measured) - /// The range of component `c` is `[1, 1000]`. - fn submit_candidacy(c: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `2678 + c * (48 ±0)` - // Estimated: `4161 + c * (48 ±0)` - // Minimum execution time: 33_576_000 picoseconds. - Weight::from_parts(26_859_487, 0) - .saturating_add(Weight::from_parts(0, 4161)) - // Standard Error: 854 - .saturating_add(Weight::from_parts(81_887, 0).saturating_mul(c.into())) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(1)) - .saturating_add(Weight::from_parts(0, 48).saturating_mul(c.into())) - } - /// Storage: PhragmenElection Candidates (r:1 w:1) - /// Proof Skipped: PhragmenElection Candidates (max_values: Some(1), max_size: None, mode: Measured) - /// The range of component `c` is `[1, 1000]`. - fn renounce_candidacy_candidate(c: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `250 + c * (48 ±0)` - // Estimated: `1722 + c * (48 ±0)` - // Minimum execution time: 29_671_000 picoseconds. - Weight::from_parts(22_509_800, 0) - .saturating_add(Weight::from_parts(0, 1722)) - // Standard Error: 908 - .saturating_add(Weight::from_parts(58_320, 0).saturating_mul(c.into())) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - .saturating_add(Weight::from_parts(0, 48).saturating_mul(c.into())) - } - /// Storage: PhragmenElection Members (r:1 w:1) - /// Proof Skipped: PhragmenElection Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: PhragmenElection RunnersUp (r:1 w:1) - /// Proof Skipped: PhragmenElection RunnersUp (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council Prime (r:1 w:1) - /// Proof Skipped: Council Prime (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council Proposals (r:1 w:0) - /// Proof Skipped: Council Proposals (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council Members (r:0 w:1) - /// Proof Skipped: Council Members (max_values: Some(1), max_size: None, mode: Measured) - fn renounce_candidacy_members() -> Weight { - // Proof Size summary in bytes: - // Measured: `2952` - // Estimated: `4437` - // Minimum execution time: 45_934_000 picoseconds. - Weight::from_parts(46_279_000, 0) - .saturating_add(Weight::from_parts(0, 4437)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(4)) - } - /// Storage: PhragmenElection RunnersUp (r:1 w:1) - /// Proof Skipped: PhragmenElection RunnersUp (max_values: Some(1), max_size: None, mode: Measured) - fn renounce_candidacy_runners_up() -> Weight { - // Proof Size summary in bytes: - // Measured: `1647` - // Estimated: `3132` - // Minimum execution time: 30_291_000 picoseconds. - Weight::from_parts(30_611_000, 0) - .saturating_add(Weight::from_parts(0, 3132)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: Benchmark Override (r:0 w:0) - /// Proof Skipped: Benchmark Override (max_values: None, max_size: None, mode: Measured) - fn remove_member_without_replacement() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 2_000_000_000_000 picoseconds. - Weight::from_parts(2_000_000_000_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - } - /// Storage: PhragmenElection Members (r:1 w:1) - /// Proof Skipped: PhragmenElection Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: PhragmenElection RunnersUp (r:1 w:1) - /// Proof Skipped: PhragmenElection RunnersUp (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council Prime (r:1 w:1) - /// Proof Skipped: Council Prime (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council Proposals (r:1 w:0) - /// Proof Skipped: Council Proposals (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council Members (r:0 w:1) - /// Proof Skipped: Council Members (max_values: Some(1), max_size: None, mode: Measured) - fn remove_member_with_replacement() -> Weight { - // Proof Size summary in bytes: - // Measured: `2952` - // Estimated: `4437` - // Minimum execution time: 63_178_000 picoseconds. - Weight::from_parts(63_850_000, 0) - .saturating_add(Weight::from_parts(0, 4437)) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(5)) - } - /// Storage: PhragmenElection Voting (r:10001 w:10000) - /// Proof Skipped: PhragmenElection Voting (max_values: None, max_size: None, mode: Measured) - /// Storage: PhragmenElection Members (r:1 w:0) - /// Proof Skipped: PhragmenElection Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: PhragmenElection RunnersUp (r:1 w:0) - /// Proof Skipped: PhragmenElection RunnersUp (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: PhragmenElection Candidates (r:1 w:0) - /// Proof Skipped: PhragmenElection Candidates (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Balances Locks (r:10000 w:10000) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:10000 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(65), added: 2540, mode: MaxEncodedLen) - /// Storage: System Account (r:10000 w:10000) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// The range of component `v` is `[5000, 10000]`. - /// The range of component `d` is `[0, 5000]`. - fn clean_defunct_voters(v: u32, _d: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `35961 + v * (808 ±0)` - // Estimated: `39702 + v * (3774 ±0)` - // Minimum execution time: 379_638_846_000 picoseconds. - Weight::from_parts(380_443_068_000, 0) - .saturating_add(Weight::from_parts(0, 39702)) - // Standard Error: 318_371 - .saturating_add(Weight::from_parts(46_236_987, 0).saturating_mul(v.into())) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().reads((4_u64).saturating_mul(v.into()))) - .saturating_add(T::DbWeight::get().writes((3_u64).saturating_mul(v.into()))) - .saturating_add(Weight::from_parts(0, 3774).saturating_mul(v.into())) - } - /// Storage: PhragmenElection Candidates (r:1 w:1) - /// Proof Skipped: PhragmenElection Candidates (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: PhragmenElection Members (r:1 w:1) - /// Proof Skipped: PhragmenElection Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: PhragmenElection RunnersUp (r:1 w:1) - /// Proof Skipped: PhragmenElection RunnersUp (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: PhragmenElection Voting (r:10001 w:0) - /// Proof Skipped: PhragmenElection Voting (max_values: None, max_size: None, mode: Measured) - /// Storage: Council Proposals (r:1 w:0) - /// Proof Skipped: Council Proposals (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: System Account (r:962 w:962) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: PhragmenElection ElectionRounds (r:1 w:1) - /// Proof Skipped: PhragmenElection ElectionRounds (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council Members (r:0 w:1) - /// Proof Skipped: Council Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council Prime (r:0 w:1) - /// Proof Skipped: Council Prime (max_values: Some(1), max_size: None, mode: Measured) - /// The range of component `c` is `[1, 1000]`. - /// The range of component `v` is `[1, 10000]`. - /// The range of component `e` is `[10000, 160000]`. - fn election_phragmen(c: u32, v: u32, e: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0 + e * (28 ±0) + v * (607 ±0)` - // Estimated: `2771509 + c * (2560 ±0) + e * (16 ±0) + v * (2744 ±4)` - // Minimum execution time: 35_941_980_000 picoseconds. - Weight::from_parts(36_032_688_000, 0) - .saturating_add(Weight::from_parts(0, 2771509)) - // Standard Error: 554_972 - .saturating_add(Weight::from_parts(43_733_923, 0).saturating_mul(v.into())) - // Standard Error: 35_614 - .saturating_add(Weight::from_parts(2_430_249, 0).saturating_mul(e.into())) - .saturating_add(T::DbWeight::get().reads(265)) - .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(c.into()))) - .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(v.into()))) - .saturating_add(T::DbWeight::get().writes(6)) - .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(c.into()))) - .saturating_add(Weight::from_parts(0, 2560).saturating_mul(c.into())) - .saturating_add(Weight::from_parts(0, 16).saturating_mul(e.into())) - .saturating_add(Weight::from_parts(0, 2744).saturating_mul(v.into())) - } -} diff --git a/polkadot/runtime/rococo/src/weights/pallet_membership.rs b/polkadot/runtime/rococo/src/weights/pallet_membership.rs deleted file mode 100644 index 4486c7a270c4..000000000000 --- a/polkadot/runtime/rococo/src/weights/pallet_membership.rs +++ /dev/null @@ -1,204 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Polkadot. - -// Polkadot is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Polkadot is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Polkadot. If not, see . - -//! Autogenerated weights for `pallet_membership` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-05-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm5`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("rococo-dev"), DB CACHE: 1024 - -// Executed Command: -// ./target/production/polkadot -// benchmark -// pallet -// --chain=rococo-dev -// --steps=50 -// --repeat=20 -// --pallet=pallet_membership -// --extrinsic=* -// --execution=wasm -// --wasm-execution=compiled -// --header=./file_header.txt -// --output=./runtime/rococo/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `pallet_membership`. -pub struct WeightInfo(PhantomData); -impl pallet_membership::WeightInfo for WeightInfo { - /// Storage: TechnicalMembership Members (r:1 w:1) - /// Proof: TechnicalMembership Members (max_values: Some(1), max_size: Some(3202), added: 3697, mode: MaxEncodedLen) - /// Storage: TechnicalCommittee Proposals (r:1 w:0) - /// Proof Skipped: TechnicalCommittee Proposals (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: TechnicalCommittee Members (r:0 w:1) - /// Proof Skipped: TechnicalCommittee Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: TechnicalCommittee Prime (r:0 w:1) - /// Proof Skipped: TechnicalCommittee Prime (max_values: Some(1), max_size: None, mode: Measured) - /// The range of component `m` is `[1, 99]`. - fn add_member(m: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `140 + m * (64 ±0)` - // Estimated: `4687 + m * (64 ±0)` - // Minimum execution time: 17_084_000 picoseconds. - Weight::from_parts(17_897_754, 0) - .saturating_add(Weight::from_parts(0, 4687)) - // Standard Error: 295 - .saturating_add(Weight::from_parts(30_882, 0).saturating_mul(m.into())) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(3)) - .saturating_add(Weight::from_parts(0, 64).saturating_mul(m.into())) - } - /// Storage: TechnicalMembership Members (r:1 w:1) - /// Proof: TechnicalMembership Members (max_values: Some(1), max_size: Some(3202), added: 3697, mode: MaxEncodedLen) - /// Storage: TechnicalCommittee Proposals (r:1 w:0) - /// Proof Skipped: TechnicalCommittee Proposals (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: TechnicalMembership Prime (r:1 w:0) - /// Proof: TechnicalMembership Prime (max_values: Some(1), max_size: Some(32), added: 527, mode: MaxEncodedLen) - /// Storage: TechnicalCommittee Members (r:0 w:1) - /// Proof Skipped: TechnicalCommittee Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: TechnicalCommittee Prime (r:0 w:1) - /// Proof Skipped: TechnicalCommittee Prime (max_values: Some(1), max_size: None, mode: Measured) - /// The range of component `m` is `[2, 100]`. - fn remove_member(m: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `244 + m * (64 ±0)` - // Estimated: `4687 + m * (64 ±0)` - // Minimum execution time: 19_550_000 picoseconds. - Weight::from_parts(20_467_978, 0) - .saturating_add(Weight::from_parts(0, 4687)) - // Standard Error: 330 - .saturating_add(Weight::from_parts(31_881, 0).saturating_mul(m.into())) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(3)) - .saturating_add(Weight::from_parts(0, 64).saturating_mul(m.into())) - } - /// Storage: TechnicalMembership Members (r:1 w:1) - /// Proof: TechnicalMembership Members (max_values: Some(1), max_size: Some(3202), added: 3697, mode: MaxEncodedLen) - /// Storage: TechnicalCommittee Proposals (r:1 w:0) - /// Proof Skipped: TechnicalCommittee Proposals (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: TechnicalMembership Prime (r:1 w:0) - /// Proof: TechnicalMembership Prime (max_values: Some(1), max_size: Some(32), added: 527, mode: MaxEncodedLen) - /// Storage: TechnicalCommittee Members (r:0 w:1) - /// Proof Skipped: TechnicalCommittee Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: TechnicalCommittee Prime (r:0 w:1) - /// Proof Skipped: TechnicalCommittee Prime (max_values: Some(1), max_size: None, mode: Measured) - /// The range of component `m` is `[2, 100]`. - fn swap_member(m: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `244 + m * (64 ±0)` - // Estimated: `4687 + m * (64 ±0)` - // Minimum execution time: 19_994_000 picoseconds. - Weight::from_parts(20_663_824, 0) - .saturating_add(Weight::from_parts(0, 4687)) - // Standard Error: 337 - .saturating_add(Weight::from_parts(44_806, 0).saturating_mul(m.into())) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(3)) - .saturating_add(Weight::from_parts(0, 64).saturating_mul(m.into())) - } - /// Storage: TechnicalMembership Members (r:1 w:1) - /// Proof: TechnicalMembership Members (max_values: Some(1), max_size: Some(3202), added: 3697, mode: MaxEncodedLen) - /// Storage: TechnicalCommittee Proposals (r:1 w:0) - /// Proof Skipped: TechnicalCommittee Proposals (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: TechnicalMembership Prime (r:1 w:0) - /// Proof: TechnicalMembership Prime (max_values: Some(1), max_size: Some(32), added: 527, mode: MaxEncodedLen) - /// Storage: TechnicalCommittee Members (r:0 w:1) - /// Proof Skipped: TechnicalCommittee Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: TechnicalCommittee Prime (r:0 w:1) - /// Proof Skipped: TechnicalCommittee Prime (max_values: Some(1), max_size: None, mode: Measured) - /// The range of component `m` is `[1, 100]`. - fn reset_member(m: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `244 + m * (64 ±0)` - // Estimated: `4687 + m * (64 ±0)` - // Minimum execution time: 18_978_000 picoseconds. - Weight::from_parts(21_273_577, 0) - .saturating_add(Weight::from_parts(0, 4687)) - // Standard Error: 2_765 - .saturating_add(Weight::from_parts(152_082, 0).saturating_mul(m.into())) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(3)) - .saturating_add(Weight::from_parts(0, 64).saturating_mul(m.into())) - } - /// Storage: TechnicalMembership Members (r:1 w:1) - /// Proof: TechnicalMembership Members (max_values: Some(1), max_size: Some(3202), added: 3697, mode: MaxEncodedLen) - /// Storage: TechnicalCommittee Proposals (r:1 w:0) - /// Proof Skipped: TechnicalCommittee Proposals (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: TechnicalMembership Prime (r:1 w:1) - /// Proof: TechnicalMembership Prime (max_values: Some(1), max_size: Some(32), added: 527, mode: MaxEncodedLen) - /// Storage: TechnicalCommittee Members (r:0 w:1) - /// Proof Skipped: TechnicalCommittee Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: TechnicalCommittee Prime (r:0 w:1) - /// Proof Skipped: TechnicalCommittee Prime (max_values: Some(1), max_size: None, mode: Measured) - /// The range of component `m` is `[1, 100]`. - fn change_key(m: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `244 + m * (64 ±0)` - // Estimated: `4687 + m * (64 ±0)` - // Minimum execution time: 20_005_000 picoseconds. - Weight::from_parts(21_280_089, 0) - .saturating_add(Weight::from_parts(0, 4687)) - // Standard Error: 672 - .saturating_add(Weight::from_parts(41_961, 0).saturating_mul(m.into())) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(4)) - .saturating_add(Weight::from_parts(0, 64).saturating_mul(m.into())) - } - /// Storage: TechnicalMembership Members (r:1 w:0) - /// Proof: TechnicalMembership Members (max_values: Some(1), max_size: Some(3202), added: 3697, mode: MaxEncodedLen) - /// Storage: TechnicalMembership Prime (r:0 w:1) - /// Proof: TechnicalMembership Prime (max_values: Some(1), max_size: Some(32), added: 527, mode: MaxEncodedLen) - /// Storage: TechnicalCommittee Prime (r:0 w:1) - /// Proof Skipped: TechnicalCommittee Prime (max_values: Some(1), max_size: None, mode: Measured) - /// The range of component `m` is `[1, 100]`. - fn set_prime(m: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `32 + m * (32 ±0)` - // Estimated: `4687 + m * (32 ±0)` - // Minimum execution time: 8_168_000 picoseconds. - Weight::from_parts(8_579_141, 0) - .saturating_add(Weight::from_parts(0, 4687)) - // Standard Error: 215 - .saturating_add(Weight::from_parts(9_557, 0).saturating_mul(m.into())) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(2)) - .saturating_add(Weight::from_parts(0, 32).saturating_mul(m.into())) - } - /// Storage: TechnicalMembership Prime (r:0 w:1) - /// Proof: TechnicalMembership Prime (max_values: Some(1), max_size: Some(32), added: 527, mode: MaxEncodedLen) - /// Storage: TechnicalCommittee Prime (r:0 w:1) - /// Proof Skipped: TechnicalCommittee Prime (max_values: Some(1), max_size: None, mode: Measured) - /// The range of component `m` is `[1, 100]`. - fn clear_prime(m: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 3_344_000 picoseconds. - Weight::from_parts(3_551_700, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 86 - .saturating_add(Weight::from_parts(832, 0).saturating_mul(m.into())) - .saturating_add(T::DbWeight::get().writes(2)) - } -} diff --git a/polkadot/runtime/rococo/src/weights/pallet_ranked_collective.rs b/polkadot/runtime/rococo/src/weights/pallet_ranked_collective.rs new file mode 100644 index 000000000000..8a556c3a248e --- /dev/null +++ b/polkadot/runtime/rococo/src/weights/pallet_ranked_collective.rs @@ -0,0 +1,175 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Autogenerated weights for `pallet_ranked_collective` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-07-11, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-xerhrdyb-project-163-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! EXECUTION: `Some(Wasm)`, WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 + +// Executed Command: +// target/production/polkadot +// benchmark +// pallet +// --steps=50 +// --repeat=20 +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot/.git/.artifacts/bench.json +// --pallet=pallet_ranked_collective +// --chain=rococo-dev +// --header=./file_header.txt +// --output=./runtime/rococo/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_ranked_collective`. +pub struct WeightInfo(PhantomData); +impl pallet_ranked_collective::WeightInfo for WeightInfo { + /// Storage: `FellowshipCollective::Members` (r:1 w:1) + /// Proof: `FellowshipCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::MemberCount` (r:1 w:1) + /// Proof: `FellowshipCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::IndexToId` (r:0 w:1) + /// Proof: `FellowshipCollective::IndexToId` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::IdToIndex` (r:0 w:1) + /// Proof: `FellowshipCollective::IdToIndex` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + fn add_member() -> Weight { + // Proof Size summary in bytes: + // Measured: `42` + // Estimated: `3507` + // Minimum execution time: 17_632_000 picoseconds. + Weight::from_parts(18_252_000, 0) + .saturating_add(Weight::from_parts(0, 3507)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `FellowshipCollective::Members` (r:1 w:1) + /// Proof: `FellowshipCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::MemberCount` (r:11 w:11) + /// Proof: `FellowshipCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::IdToIndex` (r:11 w:11) + /// Proof: `FellowshipCollective::IdToIndex` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::IndexToId` (r:11 w:11) + /// Proof: `FellowshipCollective::IndexToId` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + /// The range of component `r` is `[0, 10]`. + fn remove_member(r: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `517 + r * (281 ±0)` + // Estimated: `3519 + r * (2529 ±0)` + // Minimum execution time: 27_960_000 picoseconds. + Weight::from_parts(30_632_408, 0) + .saturating_add(Weight::from_parts(0, 3519)) + // Standard Error: 22_806 + .saturating_add(Weight::from_parts(13_000_901, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().reads((3_u64).saturating_mul(r.into()))) + .saturating_add(T::DbWeight::get().writes(4)) + .saturating_add(T::DbWeight::get().writes((3_u64).saturating_mul(r.into()))) + .saturating_add(Weight::from_parts(0, 2529).saturating_mul(r.into())) + } + /// Storage: `FellowshipCollective::Members` (r:1 w:1) + /// Proof: `FellowshipCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::MemberCount` (r:1 w:1) + /// Proof: `FellowshipCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::IndexToId` (r:0 w:1) + /// Proof: `FellowshipCollective::IndexToId` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::IdToIndex` (r:0 w:1) + /// Proof: `FellowshipCollective::IdToIndex` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + /// The range of component `r` is `[0, 10]`. + fn promote_member(r: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `214 + r * (17 ±0)` + // Estimated: `3507` + // Minimum execution time: 19_900_000 picoseconds. + Weight::from_parts(20_908_316, 0) + .saturating_add(Weight::from_parts(0, 3507)) + // Standard Error: 4_878 + .saturating_add(Weight::from_parts(330_385, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `FellowshipCollective::Members` (r:1 w:1) + /// Proof: `FellowshipCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::MemberCount` (r:1 w:1) + /// Proof: `FellowshipCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::IdToIndex` (r:1 w:1) + /// Proof: `FellowshipCollective::IdToIndex` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::IndexToId` (r:1 w:1) + /// Proof: `FellowshipCollective::IndexToId` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + /// The range of component `r` is `[0, 10]`. + fn demote_member(r: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `532 + r * (72 ±0)` + // Estimated: `3519` + // Minimum execution time: 27_697_000 picoseconds. + Weight::from_parts(30_341_815, 0) + .saturating_add(Weight::from_parts(0, 3519)) + // Standard Error: 17_010 + .saturating_add(Weight::from_parts(642_213, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `FellowshipCollective::Members` (r:1 w:0) + /// Proof: `FellowshipCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::Voting` (r:1 w:1) + /// Proof: `FellowshipCollective::Voting` (`max_values`: None, `max_size`: Some(65), added: 2540, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + fn vote() -> Weight { + // Proof Size summary in bytes: + // Measured: `638` + // Estimated: `83866` + // Minimum execution time: 48_275_000 picoseconds. + Weight::from_parts(49_326_000, 0) + .saturating_add(Weight::from_parts(0, 83866)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:0) + /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::VotingCleanup` (r:1 w:0) + /// Proof: `FellowshipCollective::VotingCleanup` (`max_values`: None, `max_size`: Some(114), added: 2589, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::Voting` (r:100 w:100) + /// Proof: `FellowshipCollective::Voting` (`max_values`: None, `max_size`: Some(65), added: 2540, mode: `MaxEncodedLen`) + /// The range of component `n` is `[0, 100]`. + fn cleanup_poll(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `434 + n * (50 ±0)` + // Estimated: `4365 + n * (2540 ±0)` + // Minimum execution time: 15_506_000 picoseconds. + Weight::from_parts(17_634_029, 0) + .saturating_add(Weight::from_parts(0, 4365)) + // Standard Error: 2_117 + .saturating_add(Weight::from_parts(1_126_879, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(n.into()))) + .saturating_add(Weight::from_parts(0, 2540).saturating_mul(n.into())) + } +} diff --git a/polkadot/runtime/rococo/src/weights/pallet_referenda_fellowship_referenda.rs b/polkadot/runtime/rococo/src/weights/pallet_referenda_fellowship_referenda.rs new file mode 100644 index 000000000000..96f172230e13 --- /dev/null +++ b/polkadot/runtime/rococo/src/weights/pallet_referenda_fellowship_referenda.rs @@ -0,0 +1,524 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Autogenerated weights for `pallet_referenda` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-07-11, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-xerhrdyb-project-163-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! EXECUTION: `Some(Wasm)`, WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 + +// Executed Command: +// target/production/polkadot +// benchmark +// pallet +// --steps=50 +// --repeat=20 +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot/.git/.artifacts/bench.json +// --pallet=pallet_referenda +// --chain=rococo-dev +// --header=./file_header.txt +// --output=./runtime/rococo/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_referenda`. +pub struct WeightInfo(PhantomData); +impl pallet_referenda::WeightInfo for WeightInfo { + /// Storage: `FellowshipCollective::Members` (r:1 w:0) + /// Proof: `FellowshipCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `FellowshipReferenda::ReferendumCount` (r:1 w:1) + /// Proof: `FellowshipReferenda::ReferendumCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:0 w:1) + /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + fn submit() -> Weight { + // Proof Size summary in bytes: + // Measured: `327` + // Estimated: `42428` + // Minimum execution time: 29_909_000 picoseconds. + Weight::from_parts(30_645_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + fn place_decision_deposit_preparing() -> Weight { + // Proof Size summary in bytes: + // Measured: `438` + // Estimated: `83866` + // Minimum execution time: 54_405_000 picoseconds. + Weight::from_parts(55_583_000, 0) + .saturating_add(Weight::from_parts(0, 83866)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `FellowshipReferenda::DecidingCount` (r:1 w:0) + /// Proof: `FellowshipReferenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `FellowshipReferenda::TrackQueue` (r:1 w:1) + /// Proof: `FellowshipReferenda::TrackQueue` (`max_values`: None, `max_size`: Some(812), added: 3287, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + fn place_decision_deposit_queued() -> Weight { + // Proof Size summary in bytes: + // Measured: `2076` + // Estimated: `42428` + // Minimum execution time: 110_477_000 picoseconds. + Weight::from_parts(119_187_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `FellowshipReferenda::DecidingCount` (r:1 w:0) + /// Proof: `FellowshipReferenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `FellowshipReferenda::TrackQueue` (r:1 w:1) + /// Proof: `FellowshipReferenda::TrackQueue` (`max_values`: None, `max_size`: Some(812), added: 3287, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + fn place_decision_deposit_not_queued() -> Weight { + // Proof Size summary in bytes: + // Measured: `2117` + // Estimated: `42428` + // Minimum execution time: 111_467_000 picoseconds. + Weight::from_parts(117_758_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `FellowshipReferenda::DecidingCount` (r:1 w:1) + /// Proof: `FellowshipReferenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::MemberCount` (r:1 w:0) + /// Proof: `FellowshipCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + fn place_decision_deposit_passing() -> Weight { + // Proof Size summary in bytes: + // Measured: `774` + // Estimated: `83866` + // Minimum execution time: 191_135_000 picoseconds. + Weight::from_parts(210_535_000, 0) + .saturating_add(Weight::from_parts(0, 83866)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `FellowshipReferenda::DecidingCount` (r:1 w:1) + /// Proof: `FellowshipReferenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::MemberCount` (r:1 w:0) + /// Proof: `FellowshipCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + fn place_decision_deposit_failing() -> Weight { + // Proof Size summary in bytes: + // Measured: `639` + // Estimated: `83866` + // Minimum execution time: 67_168_000 picoseconds. + Weight::from_parts(68_895_000, 0) + .saturating_add(Weight::from_parts(0, 83866)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + fn refund_decision_deposit() -> Weight { + // Proof Size summary in bytes: + // Measured: `351` + // Estimated: `4365` + // Minimum execution time: 31_298_000 picoseconds. + Weight::from_parts(32_570_000, 0) + .saturating_add(Weight::from_parts(0, 4365)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + fn refund_submission_deposit() -> Weight { + // Proof Size summary in bytes: + // Measured: `201` + // Estimated: `4365` + // Minimum execution time: 15_674_000 picoseconds. + Weight::from_parts(16_190_000, 0) + .saturating_add(Weight::from_parts(0, 4365)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + fn cancel() -> Weight { + // Proof Size summary in bytes: + // Measured: `383` + // Estimated: `83866` + // Minimum execution time: 38_927_000 picoseconds. + Weight::from_parts(40_545_000, 0) + .saturating_add(Weight::from_parts(0, 83866)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + /// Storage: `FellowshipReferenda::MetadataOf` (r:1 w:0) + /// Proof: `FellowshipReferenda::MetadataOf` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + fn kill() -> Weight { + // Proof Size summary in bytes: + // Measured: `484` + // Estimated: `83866` + // Minimum execution time: 80_209_000 picoseconds. + Weight::from_parts(82_084_000, 0) + .saturating_add(Weight::from_parts(0, 83866)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `FellowshipReferenda::TrackQueue` (r:1 w:0) + /// Proof: `FellowshipReferenda::TrackQueue` (`max_values`: None, `max_size`: Some(812), added: 3287, mode: `MaxEncodedLen`) + /// Storage: `FellowshipReferenda::DecidingCount` (r:1 w:1) + /// Proof: `FellowshipReferenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + fn one_fewer_deciding_queue_empty() -> Weight { + // Proof Size summary in bytes: + // Measured: `174` + // Estimated: `4277` + // Minimum execution time: 9_520_000 picoseconds. + Weight::from_parts(10_088_000, 0) + .saturating_add(Weight::from_parts(0, 4277)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `FellowshipReferenda::TrackQueue` (r:1 w:1) + /// Proof: `FellowshipReferenda::TrackQueue` (`max_values`: None, `max_size`: Some(812), added: 3287, mode: `MaxEncodedLen`) + /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::MemberCount` (r:1 w:0) + /// Proof: `FellowshipCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + fn one_fewer_deciding_failing() -> Weight { + // Proof Size summary in bytes: + // Measured: `2376` + // Estimated: `42428` + // Minimum execution time: 93_893_000 picoseconds. + Weight::from_parts(101_065_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `FellowshipReferenda::TrackQueue` (r:1 w:1) + /// Proof: `FellowshipReferenda::TrackQueue` (`max_values`: None, `max_size`: Some(812), added: 3287, mode: `MaxEncodedLen`) + /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::MemberCount` (r:1 w:0) + /// Proof: `FellowshipCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + fn one_fewer_deciding_passing() -> Weight { + // Proof Size summary in bytes: + // Measured: `2362` + // Estimated: `42428` + // Minimum execution time: 98_811_000 picoseconds. + Weight::from_parts(103_590_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:0) + /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `FellowshipReferenda::TrackQueue` (r:1 w:1) + /// Proof: `FellowshipReferenda::TrackQueue` (`max_values`: None, `max_size`: Some(812), added: 3287, mode: `MaxEncodedLen`) + fn nudge_referendum_requeued_insertion() -> Weight { + // Proof Size summary in bytes: + // Measured: `1841` + // Estimated: `4365` + // Minimum execution time: 43_230_000 picoseconds. + Weight::from_parts(46_120_000, 0) + .saturating_add(Weight::from_parts(0, 4365)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:0) + /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `FellowshipReferenda::TrackQueue` (r:1 w:1) + /// Proof: `FellowshipReferenda::TrackQueue` (`max_values`: None, `max_size`: Some(812), added: 3287, mode: `MaxEncodedLen`) + fn nudge_referendum_requeued_slide() -> Weight { + // Proof Size summary in bytes: + // Measured: `1808` + // Estimated: `4365` + // Minimum execution time: 43_092_000 picoseconds. + Weight::from_parts(46_018_000, 0) + .saturating_add(Weight::from_parts(0, 4365)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `FellowshipReferenda::DecidingCount` (r:1 w:0) + /// Proof: `FellowshipReferenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `FellowshipReferenda::TrackQueue` (r:1 w:1) + /// Proof: `FellowshipReferenda::TrackQueue` (`max_values`: None, `max_size`: Some(812), added: 3287, mode: `MaxEncodedLen`) + fn nudge_referendum_queued() -> Weight { + // Proof Size summary in bytes: + // Measured: `1824` + // Estimated: `4365` + // Minimum execution time: 49_697_000 picoseconds. + Weight::from_parts(53_795_000, 0) + .saturating_add(Weight::from_parts(0, 4365)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `FellowshipReferenda::DecidingCount` (r:1 w:0) + /// Proof: `FellowshipReferenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `FellowshipReferenda::TrackQueue` (r:1 w:1) + /// Proof: `FellowshipReferenda::TrackQueue` (`max_values`: None, `max_size`: Some(812), added: 3287, mode: `MaxEncodedLen`) + fn nudge_referendum_not_queued() -> Weight { + // Proof Size summary in bytes: + // Measured: `1865` + // Estimated: `4365` + // Minimum execution time: 50_417_000 picoseconds. + Weight::from_parts(53_214_000, 0) + .saturating_add(Weight::from_parts(0, 4365)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + fn nudge_referendum_no_deposit() -> Weight { + // Proof Size summary in bytes: + // Measured: `335` + // Estimated: `42428` + // Minimum execution time: 25_688_000 picoseconds. + Weight::from_parts(26_575_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + fn nudge_referendum_preparing() -> Weight { + // Proof Size summary in bytes: + // Measured: `383` + // Estimated: `42428` + // Minimum execution time: 26_230_000 picoseconds. + Weight::from_parts(27_235_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + fn nudge_referendum_timed_out() -> Weight { + // Proof Size summary in bytes: + // Measured: `242` + // Estimated: `4365` + // Minimum execution time: 17_585_000 picoseconds. + Weight::from_parts(18_225_000, 0) + .saturating_add(Weight::from_parts(0, 4365)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `FellowshipReferenda::DecidingCount` (r:1 w:1) + /// Proof: `FellowshipReferenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::MemberCount` (r:1 w:0) + /// Proof: `FellowshipCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + fn nudge_referendum_begin_deciding_failing() -> Weight { + // Proof Size summary in bytes: + // Measured: `584` + // Estimated: `42428` + // Minimum execution time: 38_243_000 picoseconds. + Weight::from_parts(39_959_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `FellowshipReferenda::DecidingCount` (r:1 w:1) + /// Proof: `FellowshipReferenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::MemberCount` (r:1 w:0) + /// Proof: `FellowshipCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + fn nudge_referendum_begin_deciding_passing() -> Weight { + // Proof Size summary in bytes: + // Measured: `719` + // Estimated: `42428` + // Minimum execution time: 88_424_000 picoseconds. + Weight::from_parts(92_969_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::MemberCount` (r:1 w:0) + /// Proof: `FellowshipCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + fn nudge_referendum_begin_confirming() -> Weight { + // Proof Size summary in bytes: + // Measured: `770` + // Estimated: `42428` + // Minimum execution time: 138_207_000 picoseconds. + Weight::from_parts(151_726_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::MemberCount` (r:1 w:0) + /// Proof: `FellowshipCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + fn nudge_referendum_end_confirming() -> Weight { + // Proof Size summary in bytes: + // Measured: `755` + // Estimated: `42428` + // Minimum execution time: 131_001_000 picoseconds. + Weight::from_parts(148_651_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::MemberCount` (r:1 w:0) + /// Proof: `FellowshipCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + fn nudge_referendum_continue_not_confirming() -> Weight { + // Proof Size summary in bytes: + // Measured: `770` + // Estimated: `42428` + // Minimum execution time: 109_612_000 picoseconds. + Weight::from_parts(143_626_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::MemberCount` (r:1 w:0) + /// Proof: `FellowshipCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + fn nudge_referendum_continue_confirming() -> Weight { + // Proof Size summary in bytes: + // Measured: `776` + // Estimated: `42428` + // Minimum execution time: 71_754_000 picoseconds. + Weight::from_parts(77_329_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::MemberCount` (r:1 w:0) + /// Proof: `FellowshipCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Lookup` (r:1 w:1) + /// Proof: `Scheduler::Lookup` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + fn nudge_referendum_approved() -> Weight { + // Proof Size summary in bytes: + // Measured: `776` + // Estimated: `83866` + // Minimum execution time: 153_244_000 picoseconds. + Weight::from_parts(169_961_000, 0) + .saturating_add(Weight::from_parts(0, 83866)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::MemberCount` (r:1 w:0) + /// Proof: `FellowshipCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + fn nudge_referendum_rejected() -> Weight { + // Proof Size summary in bytes: + // Measured: `772` + // Estimated: `42428` + // Minimum execution time: 137_997_000 picoseconds. + Weight::from_parts(157_862_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:0) + /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `Preimage::StatusFor` (r:1 w:0) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `FellowshipReferenda::MetadataOf` (r:0 w:1) + /// Proof: `FellowshipReferenda::MetadataOf` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + fn set_some_metadata() -> Weight { + // Proof Size summary in bytes: + // Measured: `458` + // Estimated: `4365` + // Minimum execution time: 21_794_000 picoseconds. + Weight::from_parts(22_341_000, 0) + .saturating_add(Weight::from_parts(0, 4365)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:0) + /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `FellowshipReferenda::MetadataOf` (r:1 w:1) + /// Proof: `FellowshipReferenda::MetadataOf` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + fn clear_metadata() -> Weight { + // Proof Size summary in bytes: + // Measured: `319` + // Estimated: `4365` + // Minimum execution time: 18_458_000 picoseconds. + Weight::from_parts(19_097_000, 0) + .saturating_add(Weight::from_parts(0, 4365)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } +} diff --git a/polkadot/runtime/rococo/src/weights/pallet_referenda_referenda.rs b/polkadot/runtime/rococo/src/weights/pallet_referenda_referenda.rs new file mode 100644 index 000000000000..b7cc5df28b91 --- /dev/null +++ b/polkadot/runtime/rococo/src/weights/pallet_referenda_referenda.rs @@ -0,0 +1,522 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Autogenerated weights for `pallet_referenda` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-07-11, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-xerhrdyb-project-163-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! EXECUTION: `Some(Wasm)`, WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 + +// Executed Command: +// target/production/polkadot +// benchmark +// pallet +// --steps=50 +// --repeat=20 +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot/.git/.artifacts/bench.json +// --pallet=pallet_referenda +// --chain=rococo-dev +// --header=./file_header.txt +// --output=./runtime/rococo/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_referenda`. +pub struct WeightInfo(PhantomData); +impl pallet_referenda::WeightInfo for WeightInfo { + /// Storage: `Referenda::ReferendumCount` (r:1 w:1) + /// Proof: `Referenda::ReferendumCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + /// Storage: `Referenda::ReferendumInfoFor` (r:0 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) + fn submit() -> Weight { + // Proof Size summary in bytes: + // Measured: `324` + // Estimated: `42428` + // Minimum execution time: 39_852_000 picoseconds. + Weight::from_parts(41_610_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + fn place_decision_deposit_preparing() -> Weight { + // Proof Size summary in bytes: + // Measured: `577` + // Estimated: `83866` + // Minimum execution time: 52_588_000 picoseconds. + Weight::from_parts(54_154_000, 0) + .saturating_add(Weight::from_parts(0, 83866)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) + /// Storage: `Referenda::DecidingCount` (r:1 w:0) + /// Proof: `Referenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Referenda::TrackQueue` (r:1 w:1) + /// Proof: `Referenda::TrackQueue` (`max_values`: None, `max_size`: Some(2012), added: 4487, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + fn place_decision_deposit_queued() -> Weight { + // Proof Size summary in bytes: + // Measured: `3334` + // Estimated: `42428` + // Minimum execution time: 70_483_000 picoseconds. + Weight::from_parts(72_731_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) + /// Storage: `Referenda::DecidingCount` (r:1 w:0) + /// Proof: `Referenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Referenda::TrackQueue` (r:1 w:1) + /// Proof: `Referenda::TrackQueue` (`max_values`: None, `max_size`: Some(2012), added: 4487, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + fn place_decision_deposit_not_queued() -> Weight { + // Proof Size summary in bytes: + // Measured: `3354` + // Estimated: `42428` + // Minimum execution time: 68_099_000 picoseconds. + Weight::from_parts(71_560_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) + /// Storage: `Referenda::DecidingCount` (r:1 w:1) + /// Proof: `Referenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Balances::InactiveIssuance` (r:1 w:0) + /// Proof: `Balances::InactiveIssuance` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + fn place_decision_deposit_passing() -> Weight { + // Proof Size summary in bytes: + // Measured: `577` + // Estimated: `83866` + // Minimum execution time: 64_357_000 picoseconds. + Weight::from_parts(66_081_000, 0) + .saturating_add(Weight::from_parts(0, 83866)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) + /// Storage: `Referenda::DecidingCount` (r:1 w:1) + /// Proof: `Referenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Balances::InactiveIssuance` (r:1 w:0) + /// Proof: `Balances::InactiveIssuance` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + fn place_decision_deposit_failing() -> Weight { + // Proof Size summary in bytes: + // Measured: `577` + // Estimated: `83866` + // Minimum execution time: 62_709_000 picoseconds. + Weight::from_parts(64_534_000, 0) + .saturating_add(Weight::from_parts(0, 83866)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) + fn refund_decision_deposit() -> Weight { + // Proof Size summary in bytes: + // Measured: `417` + // Estimated: `4401` + // Minimum execution time: 31_296_000 picoseconds. + Weight::from_parts(32_221_000, 0) + .saturating_add(Weight::from_parts(0, 4401)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) + fn refund_submission_deposit() -> Weight { + // Proof Size summary in bytes: + // Measured: `407` + // Estimated: `4401` + // Minimum execution time: 31_209_000 picoseconds. + Weight::from_parts(32_168_000, 0) + .saturating_add(Weight::from_parts(0, 4401)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + fn cancel() -> Weight { + // Proof Size summary in bytes: + // Measured: `485` + // Estimated: `83866` + // Minimum execution time: 38_887_000 picoseconds. + Weight::from_parts(40_193_000, 0) + .saturating_add(Weight::from_parts(0, 83866)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + /// Storage: `Referenda::MetadataOf` (r:1 w:0) + /// Proof: `Referenda::MetadataOf` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + fn kill() -> Weight { + // Proof Size summary in bytes: + // Measured: `726` + // Estimated: `83866` + // Minimum execution time: 106_054_000 picoseconds. + Weight::from_parts(108_318_000, 0) + .saturating_add(Weight::from_parts(0, 83866)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `Referenda::TrackQueue` (r:1 w:0) + /// Proof: `Referenda::TrackQueue` (`max_values`: None, `max_size`: Some(2012), added: 4487, mode: `MaxEncodedLen`) + /// Storage: `Referenda::DecidingCount` (r:1 w:1) + /// Proof: `Referenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + fn one_fewer_deciding_queue_empty() -> Weight { + // Proof Size summary in bytes: + // Measured: `240` + // Estimated: `5477` + // Minimum execution time: 9_263_000 picoseconds. + Weight::from_parts(9_763_000, 0) + .saturating_add(Weight::from_parts(0, 5477)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Referenda::TrackQueue` (r:1 w:1) + /// Proof: `Referenda::TrackQueue` (`max_values`: None, `max_size`: Some(2012), added: 4487, mode: `MaxEncodedLen`) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) + /// Storage: `Balances::InactiveIssuance` (r:1 w:0) + /// Proof: `Balances::InactiveIssuance` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + fn one_fewer_deciding_failing() -> Weight { + // Proof Size summary in bytes: + // Measured: `3254` + // Estimated: `42428` + // Minimum execution time: 50_080_000 picoseconds. + Weight::from_parts(51_858_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `Referenda::TrackQueue` (r:1 w:1) + /// Proof: `Referenda::TrackQueue` (`max_values`: None, `max_size`: Some(2012), added: 4487, mode: `MaxEncodedLen`) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) + /// Storage: `Balances::InactiveIssuance` (r:1 w:0) + /// Proof: `Balances::InactiveIssuance` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + fn one_fewer_deciding_passing() -> Weight { + // Proof Size summary in bytes: + // Measured: `3254` + // Estimated: `42428` + // Minimum execution time: 53_889_000 picoseconds. + Weight::from_parts(55_959_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:0) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) + /// Storage: `Referenda::TrackQueue` (r:1 w:1) + /// Proof: `Referenda::TrackQueue` (`max_values`: None, `max_size`: Some(2012), added: 4487, mode: `MaxEncodedLen`) + fn nudge_referendum_requeued_insertion() -> Weight { + // Proof Size summary in bytes: + // Measured: `3077` + // Estimated: `5477` + // Minimum execution time: 23_266_000 picoseconds. + Weight::from_parts(24_624_000, 0) + .saturating_add(Weight::from_parts(0, 5477)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:0) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) + /// Storage: `Referenda::TrackQueue` (r:1 w:1) + /// Proof: `Referenda::TrackQueue` (`max_values`: None, `max_size`: Some(2012), added: 4487, mode: `MaxEncodedLen`) + fn nudge_referendum_requeued_slide() -> Weight { + // Proof Size summary in bytes: + // Measured: `3077` + // Estimated: `5477` + // Minimum execution time: 22_846_000 picoseconds. + Weight::from_parts(24_793_000, 0) + .saturating_add(Weight::from_parts(0, 5477)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) + /// Storage: `Referenda::DecidingCount` (r:1 w:0) + /// Proof: `Referenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Referenda::TrackQueue` (r:1 w:1) + /// Proof: `Referenda::TrackQueue` (`max_values`: None, `max_size`: Some(2012), added: 4487, mode: `MaxEncodedLen`) + fn nudge_referendum_queued() -> Weight { + // Proof Size summary in bytes: + // Measured: `3081` + // Estimated: `5477` + // Minimum execution time: 28_284_000 picoseconds. + Weight::from_parts(29_940_000, 0) + .saturating_add(Weight::from_parts(0, 5477)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) + /// Storage: `Referenda::DecidingCount` (r:1 w:0) + /// Proof: `Referenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Referenda::TrackQueue` (r:1 w:1) + /// Proof: `Referenda::TrackQueue` (`max_values`: None, `max_size`: Some(2012), added: 4487, mode: `MaxEncodedLen`) + fn nudge_referendum_not_queued() -> Weight { + // Proof Size summary in bytes: + // Measured: `3101` + // Estimated: `5477` + // Minimum execution time: 28_133_000 picoseconds. + Weight::from_parts(29_638_000, 0) + .saturating_add(Weight::from_parts(0, 5477)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + fn nudge_referendum_no_deposit() -> Weight { + // Proof Size summary in bytes: + // Measured: `437` + // Estimated: `42428` + // Minimum execution time: 25_710_000 picoseconds. + Weight::from_parts(26_500_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + fn nudge_referendum_preparing() -> Weight { + // Proof Size summary in bytes: + // Measured: `485` + // Estimated: `42428` + // Minimum execution time: 25_935_000 picoseconds. + Weight::from_parts(26_803_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) + fn nudge_referendum_timed_out() -> Weight { + // Proof Size summary in bytes: + // Measured: `344` + // Estimated: `4401` + // Minimum execution time: 17_390_000 picoseconds. + Weight::from_parts(18_042_000, 0) + .saturating_add(Weight::from_parts(0, 4401)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) + /// Storage: `Referenda::DecidingCount` (r:1 w:1) + /// Proof: `Referenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Balances::InactiveIssuance` (r:1 w:0) + /// Proof: `Balances::InactiveIssuance` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + fn nudge_referendum_begin_deciding_failing() -> Weight { + // Proof Size summary in bytes: + // Measured: `485` + // Estimated: `42428` + // Minimum execution time: 35_141_000 picoseconds. + Weight::from_parts(36_318_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) + /// Storage: `Referenda::DecidingCount` (r:1 w:1) + /// Proof: `Referenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Balances::InactiveIssuance` (r:1 w:0) + /// Proof: `Balances::InactiveIssuance` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + fn nudge_referendum_begin_deciding_passing() -> Weight { + // Proof Size summary in bytes: + // Measured: `485` + // Estimated: `42428` + // Minimum execution time: 37_815_000 picoseconds. + Weight::from_parts(39_243_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) + /// Storage: `Balances::InactiveIssuance` (r:1 w:0) + /// Proof: `Balances::InactiveIssuance` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + fn nudge_referendum_begin_confirming() -> Weight { + // Proof Size summary in bytes: + // Measured: `538` + // Estimated: `42428` + // Minimum execution time: 30_779_000 picoseconds. + Weight::from_parts(31_845_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) + /// Storage: `Balances::InactiveIssuance` (r:1 w:0) + /// Proof: `Balances::InactiveIssuance` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + fn nudge_referendum_end_confirming() -> Weight { + // Proof Size summary in bytes: + // Measured: `521` + // Estimated: `42428` + // Minimum execution time: 31_908_000 picoseconds. + Weight::from_parts(33_253_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) + /// Storage: `Balances::InactiveIssuance` (r:1 w:0) + /// Proof: `Balances::InactiveIssuance` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + fn nudge_referendum_continue_not_confirming() -> Weight { + // Proof Size summary in bytes: + // Measured: `538` + // Estimated: `42428` + // Minimum execution time: 28_951_000 picoseconds. + Weight::from_parts(30_004_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) + /// Storage: `Balances::InactiveIssuance` (r:1 w:0) + /// Proof: `Balances::InactiveIssuance` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + fn nudge_referendum_continue_confirming() -> Weight { + // Proof Size summary in bytes: + // Measured: `542` + // Estimated: `42428` + // Minimum execution time: 27_750_000 picoseconds. + Weight::from_parts(28_588_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) + /// Storage: `Balances::InactiveIssuance` (r:1 w:0) + /// Proof: `Balances::InactiveIssuance` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Lookup` (r:1 w:1) + /// Proof: `Scheduler::Lookup` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + fn nudge_referendum_approved() -> Weight { + // Proof Size summary in bytes: + // Measured: `542` + // Estimated: `83866` + // Minimum execution time: 43_950_000 picoseconds. + Weight::from_parts(46_164_000, 0) + .saturating_add(Weight::from_parts(0, 83866)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) + /// Storage: `Balances::InactiveIssuance` (r:1 w:0) + /// Proof: `Balances::InactiveIssuance` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + fn nudge_referendum_rejected() -> Weight { + // Proof Size summary in bytes: + // Measured: `538` + // Estimated: `42428` + // Minimum execution time: 31_050_000 picoseconds. + Weight::from_parts(32_169_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:0) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) + /// Storage: `Preimage::StatusFor` (r:1 w:0) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Referenda::MetadataOf` (r:0 w:1) + /// Proof: `Referenda::MetadataOf` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + fn set_some_metadata() -> Weight { + // Proof Size summary in bytes: + // Measured: `560` + // Estimated: `4401` + // Minimum execution time: 21_193_000 picoseconds. + Weight::from_parts(22_116_000, 0) + .saturating_add(Weight::from_parts(0, 4401)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:0) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) + /// Storage: `Referenda::MetadataOf` (r:1 w:1) + /// Proof: `Referenda::MetadataOf` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + fn clear_metadata() -> Weight { + // Proof Size summary in bytes: + // Measured: `421` + // Estimated: `4401` + // Minimum execution time: 18_065_000 picoseconds. + Weight::from_parts(18_781_000, 0) + .saturating_add(Weight::from_parts(0, 4401)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } +} diff --git a/polkadot/runtime/rococo/src/weights/pallet_tips.rs b/polkadot/runtime/rococo/src/weights/pallet_tips.rs deleted file mode 100644 index c4710afd78e2..000000000000 --- a/polkadot/runtime/rococo/src/weights/pallet_tips.rs +++ /dev/null @@ -1,161 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Polkadot. - -// Polkadot is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Polkadot is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Polkadot. If not, see . - -//! Autogenerated weights for `pallet_tips` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-05-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm5`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("rococo-dev"), DB CACHE: 1024 - -// Executed Command: -// ./target/production/polkadot -// benchmark -// pallet -// --chain=rococo-dev -// --steps=50 -// --repeat=20 -// --pallet=pallet_tips -// --extrinsic=* -// --execution=wasm -// --wasm-execution=compiled -// --header=./file_header.txt -// --output=./runtime/rococo/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `pallet_tips`. -pub struct WeightInfo(PhantomData); -impl pallet_tips::WeightInfo for WeightInfo { - /// Storage: Tips Reasons (r:1 w:1) - /// Proof Skipped: Tips Reasons (max_values: None, max_size: None, mode: Measured) - /// Storage: Tips Tips (r:1 w:1) - /// Proof Skipped: Tips Tips (max_values: None, max_size: None, mode: Measured) - /// The range of component `r` is `[0, 16384]`. - fn report_awesome(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `4` - // Estimated: `3469` - // Minimum execution time: 27_741_000 picoseconds. - Weight::from_parts(28_495_173, 0) - .saturating_add(Weight::from_parts(0, 3469)) - // Standard Error: 4 - .saturating_add(Weight::from_parts(1_433, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: Tips Tips (r:1 w:1) - /// Proof Skipped: Tips Tips (max_values: None, max_size: None, mode: Measured) - /// Storage: Tips Reasons (r:0 w:1) - /// Proof Skipped: Tips Reasons (max_values: None, max_size: None, mode: Measured) - fn retract_tip() -> Weight { - // Proof Size summary in bytes: - // Measured: `221` - // Estimated: `3686` - // Minimum execution time: 27_275_000 picoseconds. - Weight::from_parts(27_649_000, 0) - .saturating_add(Weight::from_parts(0, 3686)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: PhragmenElection Members (r:1 w:0) - /// Proof Skipped: PhragmenElection Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Tips Reasons (r:1 w:1) - /// Proof Skipped: Tips Reasons (max_values: None, max_size: None, mode: Measured) - /// Storage: Tips Tips (r:0 w:1) - /// Proof Skipped: Tips Tips (max_values: None, max_size: None, mode: Measured) - /// The range of component `r` is `[0, 16384]`. - /// The range of component `t` is `[1, 19]`. - fn tip_new(r: u32, t: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `74 + t * (64 ±0)` - // Estimated: `3539 + t * (64 ±0)` - // Minimum execution time: 19_809_000 picoseconds. - Weight::from_parts(18_182_607, 0) - .saturating_add(Weight::from_parts(0, 3539)) - // Standard Error: 5 - .saturating_add(Weight::from_parts(1_303, 0).saturating_mul(r.into())) - // Standard Error: 5_156 - .saturating_add(Weight::from_parts(151_789, 0).saturating_mul(t.into())) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - .saturating_add(Weight::from_parts(0, 64).saturating_mul(t.into())) - } - /// Storage: PhragmenElection Members (r:1 w:0) - /// Proof Skipped: PhragmenElection Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Tips Tips (r:1 w:1) - /// Proof Skipped: Tips Tips (max_values: None, max_size: None, mode: Measured) - /// The range of component `t` is `[1, 19]`. - fn tip(t: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `295 + t * (112 ±0)` - // Estimated: `3760 + t * (112 ±0)` - // Minimum execution time: 15_528_000 picoseconds. - Weight::from_parts(15_717_755, 0) - .saturating_add(Weight::from_parts(0, 3760)) - // Standard Error: 6_569 - .saturating_add(Weight::from_parts(146_426, 0).saturating_mul(t.into())) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - .saturating_add(Weight::from_parts(0, 112).saturating_mul(t.into())) - } - /// Storage: Tips Tips (r:1 w:1) - /// Proof Skipped: Tips Tips (max_values: None, max_size: None, mode: Measured) - /// Storage: PhragmenElection Members (r:1 w:0) - /// Proof Skipped: PhragmenElection Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Tips Reasons (r:0 w:1) - /// Proof Skipped: Tips Reasons (max_values: None, max_size: None, mode: Measured) - /// The range of component `t` is `[1, 19]`. - fn close_tip(t: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `334 + t * (112 ±0)` - // Estimated: `3790 + t * (112 ±0)` - // Minimum execution time: 58_304_000 picoseconds. - Weight::from_parts(60_138_785, 0) - .saturating_add(Weight::from_parts(0, 3790)) - // Standard Error: 7_636 - .saturating_add(Weight::from_parts(86_665, 0).saturating_mul(t.into())) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(3)) - .saturating_add(Weight::from_parts(0, 112).saturating_mul(t.into())) - } - /// Storage: Tips Tips (r:1 w:1) - /// Proof Skipped: Tips Tips (max_values: None, max_size: None, mode: Measured) - /// Storage: Tips Reasons (r:0 w:1) - /// Proof Skipped: Tips Reasons (max_values: None, max_size: None, mode: Measured) - /// The range of component `t` is `[1, 19]`. - fn slash_tip(t: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `269` - // Estimated: `3734` - // Minimum execution time: 15_097_000 picoseconds. - Weight::from_parts(15_497_872, 0) - .saturating_add(Weight::from_parts(0, 3734)) - // Standard Error: 785 - .saturating_add(Weight::from_parts(18_377, 0).saturating_mul(t.into())) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(2)) - } -} diff --git a/polkadot/runtime/rococo/src/weights/pallet_whitelist.rs b/polkadot/runtime/rococo/src/weights/pallet_whitelist.rs new file mode 100644 index 000000000000..7c307deec4c6 --- /dev/null +++ b/polkadot/runtime/rococo/src/weights/pallet_whitelist.rs @@ -0,0 +1,116 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Autogenerated weights for `pallet_whitelist` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-08-25, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-aahe6cbd-project-163-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 + +// Executed Command: +// target/production/polkadot +// benchmark +// pallet +// --steps=50 +// --repeat=20 +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot/.git/.artifacts/bench.json +// --pallet=pallet_whitelist +// --chain=rococo-dev +// --header=./file_header.txt +// --output=./runtime/rococo/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_whitelist`. +pub struct WeightInfo(PhantomData); +impl pallet_whitelist::WeightInfo for WeightInfo { + /// Storage: `Whitelist::WhitelistedCall` (r:1 w:1) + /// Proof: `Whitelist::WhitelistedCall` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) + /// Storage: `Preimage::StatusFor` (r:1 w:1) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + fn whitelist_call() -> Weight { + // Proof Size summary in bytes: + // Measured: `223` + // Estimated: `3556` + // Minimum execution time: 20_035_000 picoseconds. + Weight::from_parts(20_452_000, 0) + .saturating_add(Weight::from_parts(0, 3556)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Whitelist::WhitelistedCall` (r:1 w:1) + /// Proof: `Whitelist::WhitelistedCall` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) + /// Storage: `Preimage::StatusFor` (r:1 w:1) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + fn remove_whitelisted_call() -> Weight { + // Proof Size summary in bytes: + // Measured: `352` + // Estimated: `3556` + // Minimum execution time: 20_247_000 picoseconds. + Weight::from_parts(20_808_000, 0) + .saturating_add(Weight::from_parts(0, 3556)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Whitelist::WhitelistedCall` (r:1 w:1) + /// Proof: `Whitelist::WhitelistedCall` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) + /// Storage: `Preimage::PreimageFor` (r:1 w:1) + /// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `Measured`) + /// Storage: `Preimage::StatusFor` (r:1 w:1) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// The range of component `n` is `[1, 4194294]`. + fn dispatch_whitelisted_call(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `428 + n * (1 ±0)` + // Estimated: `3892 + n * (1 ±0)` + // Minimum execution time: 32_633_000 picoseconds. + Weight::from_parts(32_855_000, 0) + .saturating_add(Weight::from_parts(0, 3892)) + // Standard Error: 1 + .saturating_add(Weight::from_parts(1_223, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(3)) + .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) + } + /// Storage: `Whitelist::WhitelistedCall` (r:1 w:1) + /// Proof: `Whitelist::WhitelistedCall` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) + /// Storage: `Preimage::StatusFor` (r:1 w:1) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// The range of component `n` is `[1, 10000]`. + fn dispatch_whitelisted_call_with_preimage(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `352` + // Estimated: `3556` + // Minimum execution time: 23_833_000 picoseconds. + Weight::from_parts(24_698_994, 0) + .saturating_add(Weight::from_parts(0, 3556)) + // Standard Error: 4 + .saturating_add(Weight::from_parts(1_454, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } +} diff --git a/polkadot/runtime/rococo/src/xcm_config.rs b/polkadot/runtime/rococo/src/xcm_config.rs index 445cb8014357..b84d2335a699 100644 --- a/polkadot/runtime/rococo/src/xcm_config.rs +++ b/polkadot/runtime/rococo/src/xcm_config.rs @@ -17,9 +17,12 @@ //! XCM configuration for Rococo. use super::{ - parachains_origin, AccountId, AllPalletsWithSystem, Balances, Dmp, ParaId, Runtime, + parachains_origin, AccountId, AllPalletsWithSystem, Balances, Dmp, Fellows, ParaId, Runtime, RuntimeCall, RuntimeEvent, RuntimeOrigin, TransactionByteFee, WeightToFee, XcmPallet, }; + +use crate::governance::StakingAdmin; + use frame_support::{ match_types, parameter_types, traits::{Everything, Nothing}, @@ -38,9 +41,9 @@ use xcm_builder::{ AllowSubscriptionsFrom, AllowTopLevelPaidExecutionFrom, ChildParachainAsNative, ChildParachainConvertsVia, CurrencyAdapter as XcmCurrencyAdapter, DescribeBodyTerminal, DescribeFamily, FixedWeightBounds, HashedDescription, IsChildSystemParachain, IsConcrete, - MintLocation, SignedAccountId32AsNative, SignedToAccountId32, SovereignSignedViaLocation, - TakeWeightCredit, TrailingSetTopicAsId, UsingComponents, WeightInfoBounds, WithComputedOrigin, - WithUniqueTopic, + MintLocation, OriginToPluralityVoice, SignedAccountId32AsNative, SignedToAccountId32, + SovereignSignedViaLocation, TakeWeightCredit, TrailingSetTopicAsId, UsingComponents, + WeightInfoBounds, WithComputedOrigin, WithUniqueTopic, }; use xcm_executor::XcmExecutor; @@ -110,7 +113,7 @@ pub type XcmRouter = WithUniqueTopic<( parameter_types! { pub const Roc: MultiAssetFilter = Wild(AllOf { fun: WildFungible, id: Concrete(TokenLocation::get()) }); - pub const Rockmine: MultiLocation = Parachain(1000).into_location(); + pub const AssetHub: MultiLocation = Parachain(1000).into_location(); pub const Contracts: MultiLocation = Parachain(1002).into_location(); pub const Encointer: MultiLocation = Parachain(1003).into_location(); pub const Tick: MultiLocation = Parachain(100).into_location(); @@ -119,7 +122,7 @@ parameter_types! { pub const RocForTick: (MultiAssetFilter, MultiLocation) = (Roc::get(), Tick::get()); pub const RocForTrick: (MultiAssetFilter, MultiLocation) = (Roc::get(), Trick::get()); pub const RocForTrack: (MultiAssetFilter, MultiLocation) = (Roc::get(), Track::get()); - pub const RocForRockmine: (MultiAssetFilter, MultiLocation) = (Roc::get(), Rockmine::get()); + pub const RocForAssetHub: (MultiAssetFilter, MultiLocation) = (Roc::get(), AssetHub::get()); pub const RocForContracts: (MultiAssetFilter, MultiLocation) = (Roc::get(), Contracts::get()); pub const RocForEncointer: (MultiAssetFilter, MultiLocation) = (Roc::get(), Encointer::get()); pub const MaxInstructions: u32 = 100; @@ -129,7 +132,7 @@ pub type TrustedTeleporters = ( xcm_builder::Case, xcm_builder::Case, xcm_builder::Case, - xcm_builder::Case, + xcm_builder::Case, xcm_builder::Case, xcm_builder::Case, ); @@ -193,6 +196,14 @@ impl xcm_executor::Config for XcmConfig { type Aliasers = Nothing; } +parameter_types! { + pub const CollectiveBodyId: BodyId = BodyId::Unit; + // StakingAdmin pluralistic body. + pub const StakingAdminBodyId: BodyId = BodyId::Defense; + // Fellows pluralistic body. + pub const FellowsBodyId: BodyId = BodyId::Technical; +} + #[cfg(feature = "runtime-benchmarks")] parameter_types! { pub ReachableDest: Option = Some(Parachain(1000).into()); @@ -201,12 +212,33 @@ parameter_types! { /// Type to convert an `Origin` type value into a `MultiLocation` value which represents an interior /// location of this chain. pub type LocalOriginToLocation = ( - // A usual Signed origin to be used in XCM as a corresponding AccountId32 + // And a usual Signed origin to be used in XCM as a corresponding AccountId32 SignedToAccountId32, ); + +/// Type to convert the `StakingAdmin` origin to a Plurality `MultiLocation` value. +pub type StakingAdminToPlurality = + OriginToPluralityVoice; + +/// Type to convert the Fellows origin to a Plurality `MultiLocation` value. +pub type FellowsToPlurality = OriginToPluralityVoice; + +/// Type to convert a pallet `Origin` type value into a `MultiLocation` value which represents an +/// interior location of this chain for a destination chain. +pub type LocalPalletOriginToLocation = ( + // StakingAdmin origin to be used in XCM as a corresponding Plurality `MultiLocation` value. + StakingAdminToPlurality, + // Fellows origin to be used in XCM as a corresponding Plurality `MultiLocation` value. + FellowsToPlurality, +); + impl pallet_xcm::Config for Runtime { type RuntimeEvent = RuntimeEvent; - type SendXcmOrigin = xcm_builder::EnsureXcmOrigin; + // We only allow the root, fellows and the staking admin to send messages. + // This is basically safe to enable for everyone (safe the possibility of someone spamming the + // parachain if they're willing to pay the KSM to send from the Relay-chain), but it's useless + // until we bring in XCM v3 which will make `DescendOrigin` a bit more useful. + type SendXcmOrigin = xcm_builder::EnsureXcmOrigin; type XcmRouter = XcmRouter; // Anyone can execute XCM messages locally. type ExecuteXcmOrigin = xcm_builder::EnsureXcmOrigin; diff --git a/polkadot/runtime/westend/Cargo.toml b/polkadot/runtime/westend/Cargo.toml index e23d322b5a70..de9d66660591 100644 --- a/polkadot/runtime/westend/Cargo.toml +++ b/polkadot/runtime/westend/Cargo.toml @@ -24,6 +24,7 @@ inherents = { package = "sp-inherents", path = "../../../substrate/primitives/in offchain-primitives = { package = "sp-offchain", path = "../../../substrate/primitives/offchain", default-features = false } sp-api = { path = "../../../substrate/primitives/api", default-features = false } sp-application-crypto = { path = "../../../substrate/primitives/application-crypto", default-features = false } +sp-arithmetic = { path = "../../../substrate/primitives/arithmetic", default-features = false } sp-std = { package = "sp-std", path = "../../../substrate/primitives/std", default-features = false } sp-genesis-builder = { path = "../../../substrate/primitives/genesis-builder", default-features = false } sp-io = { path = "../../../substrate/primitives/io", default-features = false } @@ -65,10 +66,12 @@ pallet-message-queue = { path = "../../../substrate/frame/message-queue", defaul pallet-mmr = { path = "../../../substrate/frame/merkle-mountain-range", default-features = false } pallet-multisig = { path = "../../../substrate/frame/multisig", default-features = false } pallet-nomination-pools = { path = "../../../substrate/frame/nomination-pools", default-features = false } +pallet-conviction-voting = { path = "../../../substrate/frame/conviction-voting", default-features = false } pallet-offences = { path = "../../../substrate/frame/offences", default-features = false } pallet-preimage = { path = "../../../substrate/frame/preimage", default-features = false } pallet-proxy = { path = "../../../substrate/frame/proxy", default-features = false } pallet-recovery = { path = "../../../substrate/frame/recovery", default-features = false } +pallet-referenda = { path = "../../../substrate/frame/referenda", default-features = false } pallet-scheduler = { path = "../../../substrate/frame/scheduler", default-features = false } pallet-session = { path = "../../../substrate/frame/session", default-features = false } pallet-society = { path = "../../../substrate/frame/society", default-features = false } @@ -84,6 +87,7 @@ pallet-nomination-pools-runtime-api = { path = "../../../substrate/frame/nominat pallet-treasury = { path = "../../../substrate/frame/treasury", default-features = false } pallet-utility = { path = "../../../substrate/frame/utility", default-features = false } pallet-vesting = { path = "../../../substrate/frame/vesting", default-features = false } +pallet-whitelist = { path = "../../../substrate/frame/whitelist", default-features = false } pallet-xcm = { path = "../../xcm/pallet-xcm", default-features = false } pallet-xcm-benchmarks = { path = "../../xcm/pallet-xcm-benchmarks", default-features = false, optional = true } @@ -147,6 +151,7 @@ std = [ "pallet-beefy-mmr/std", "pallet-beefy/std", "pallet-collective/std", + "pallet-conviction-voting/std", "pallet-democracy/std", "pallet-election-provider-multi-phase/std", "pallet-election-provider-support-benchmarking?/std", @@ -168,6 +173,7 @@ std = [ "pallet-preimage/std", "pallet-proxy/std", "pallet-recovery/std", + "pallet-referenda/std", "pallet-scheduler/std", "pallet-session-benchmarking?/std", "pallet-session/std", @@ -182,6 +188,7 @@ std = [ "pallet-treasury/std", "pallet-utility/std", "pallet-vesting/std", + "pallet-whitelist/std", "pallet-xcm-benchmarks?/std", "pallet-xcm/std", "parity-scale-codec/std", @@ -195,6 +202,7 @@ std = [ "serde_derive", "sp-api/std", "sp-application-crypto/std", + "sp-arithmetic/std", "sp-core/std", "sp-genesis-builder/std", "sp-io/std", @@ -224,6 +232,7 @@ runtime-benchmarks = [ "pallet-bags-list/runtime-benchmarks", "pallet-balances/runtime-benchmarks", "pallet-collective/runtime-benchmarks", + "pallet-conviction-voting/runtime-benchmarks", "pallet-democracy/runtime-benchmarks", "pallet-election-provider-multi-phase/runtime-benchmarks", "pallet-election-provider-support-benchmarking/runtime-benchmarks", @@ -244,6 +253,7 @@ runtime-benchmarks = [ "pallet-preimage/runtime-benchmarks", "pallet-proxy/runtime-benchmarks", "pallet-recovery/runtime-benchmarks", + "pallet-referenda/runtime-benchmarks", "pallet-scheduler/runtime-benchmarks", "pallet-session-benchmarking/runtime-benchmarks", "pallet-society/runtime-benchmarks", @@ -254,6 +264,7 @@ runtime-benchmarks = [ "pallet-treasury/runtime-benchmarks", "pallet-utility/runtime-benchmarks", "pallet-vesting/runtime-benchmarks", + "pallet-whitelist/runtime-benchmarks", "pallet-xcm-benchmarks/runtime-benchmarks", "pallet-xcm/runtime-benchmarks", "polkadot-parachain-primitives/runtime-benchmarks", @@ -280,6 +291,7 @@ try-runtime = [ "pallet-beefy-mmr/try-runtime", "pallet-beefy/try-runtime", "pallet-collective/try-runtime", + "pallet-conviction-voting/try-runtime", "pallet-democracy/try-runtime", "pallet-election-provider-multi-phase/try-runtime", "pallet-elections-phragmen/try-runtime", @@ -297,6 +309,7 @@ try-runtime = [ "pallet-preimage/try-runtime", "pallet-proxy/try-runtime", "pallet-recovery/try-runtime", + "pallet-referenda/try-runtime", "pallet-scheduler/try-runtime", "pallet-session/try-runtime", "pallet-society/try-runtime", @@ -308,6 +321,7 @@ try-runtime = [ "pallet-treasury/try-runtime", "pallet-utility/try-runtime", "pallet-vesting/try-runtime", + "pallet-whitelist/try-runtime", "pallet-xcm/try-runtime", "runtime-common/try-runtime", "runtime-parachains/try-runtime", diff --git a/polkadot/runtime/westend/constants/src/lib.rs b/polkadot/runtime/westend/constants/src/lib.rs index f9830dab3325..0dd64d092c34 100644 --- a/polkadot/runtime/westend/constants/src/lib.rs +++ b/polkadot/runtime/westend/constants/src/lib.rs @@ -96,6 +96,26 @@ pub mod fee { } } +/// XCM protocol related constants. +pub mod xcm { + /// Pluralistic bodies existing within the consensus. + pub mod body { + // Preallocated for the Root body. + #[allow(dead_code)] + const ROOT_INDEX: u32 = 0; + // The bodies corresponding to the Polkadot OpenGov Origins. + pub const FELLOWSHIP_ADMIN_INDEX: u32 = 1; + } +} + +/// System Parachains. +pub mod system_parachain { + /// Statemint parachain ID. + pub const ASSET_HUB_ID: u32 = 1000; + /// Collectives parachain ID. + pub const COLLECTIVES_ID: u32 = 1001; +} + #[cfg(test)] mod tests { use super::{ diff --git a/polkadot/runtime/westend/src/governance/mod.rs b/polkadot/runtime/westend/src/governance/mod.rs new file mode 100644 index 000000000000..d027f788d71f --- /dev/null +++ b/polkadot/runtime/westend/src/governance/mod.rs @@ -0,0 +1,97 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! New governance configurations for the Kusama runtime. + +use super::*; +use crate::xcm_config::Collectives; +use frame_support::{parameter_types, traits::EitherOf}; +use frame_system::EnsureRootWithSuccess; +use pallet_xcm::{EnsureXcm, IsVoiceOfBody}; +use xcm::latest::BodyId; + +mod origins; +pub use origins::{ + pallet_custom_origins, AuctionAdmin, FellowshipAdmin, GeneralAdmin, LeaseAdmin, + ReferendumCanceller, ReferendumKiller, Spender, StakingAdmin, Treasurer, WhitelistedCaller, +}; +mod tracks; +pub use tracks::TracksInfo; + +parameter_types! { + pub const VoteLockingPeriod: BlockNumber = 7 * DAYS; +} + +impl pallet_conviction_voting::Config for Runtime { + type WeightInfo = weights::pallet_conviction_voting::WeightInfo; + type RuntimeEvent = RuntimeEvent; + type Currency = Balances; + type VoteLockingPeriod = VoteLockingPeriod; + type MaxVotes = ConstU32<512>; + type MaxTurnout = + frame_support::traits::tokens::currency::ActiveIssuanceOf; + type Polls = Referenda; +} + +parameter_types! { + pub const AlarmInterval: BlockNumber = 1; + pub const SubmissionDeposit: Balance = 1 * 3 * CENTS; + pub const UndecidingTimeout: BlockNumber = 14 * DAYS; +} + +parameter_types! { + pub const MaxBalance: Balance = Balance::max_value(); +} +pub type TreasurySpender = EitherOf, Spender>; + +impl origins::pallet_custom_origins::Config for Runtime {} + +parameter_types! { + // Fellows pluralistic body. + pub const FellowsBodyId: BodyId = BodyId::Technical; +} + +impl pallet_whitelist::Config for Runtime { + type WeightInfo = weights::pallet_whitelist::WeightInfo; + type RuntimeCall = RuntimeCall; + type RuntimeEvent = RuntimeEvent; + type WhitelistOrigin = EitherOfDiverse< + EnsureRoot, + EnsureXcm>, + >; + type DispatchWhitelistedOrigin = EitherOf, WhitelistedCaller>; + type Preimages = Preimage; +} + +impl pallet_referenda::Config for Runtime { + type WeightInfo = weights::pallet_referenda_referenda::WeightInfo; + type RuntimeCall = RuntimeCall; + type RuntimeEvent = RuntimeEvent; + type Scheduler = Scheduler; + type Currency = Balances; + type SubmitOrigin = frame_system::EnsureSigned; + type CancelOrigin = EitherOf, ReferendumCanceller>; + type KillOrigin = EitherOf, ReferendumKiller>; + type Slash = Treasury; + type Votes = pallet_conviction_voting::VotesOf; + type Tally = pallet_conviction_voting::TallyOf; + type SubmissionDeposit = SubmissionDeposit; + type MaxQueued = ConstU32<100>; + type UndecidingTimeout = UndecidingTimeout; + type AlarmInterval = AlarmInterval; + type Tracks = TracksInfo; + type Preimages = Preimage; +} diff --git a/polkadot/runtime/westend/src/governance/origins.rs b/polkadot/runtime/westend/src/governance/origins.rs new file mode 100644 index 000000000000..e4639f40dc43 --- /dev/null +++ b/polkadot/runtime/westend/src/governance/origins.rs @@ -0,0 +1,194 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Custom origins for governance interventions. + +pub use pallet_custom_origins::*; + +#[frame_support::pallet] +pub mod pallet_custom_origins { + use crate::{Balance, CENTS, GRAND}; + use frame_support::pallet_prelude::*; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(_); + + #[derive(PartialEq, Eq, Clone, MaxEncodedLen, Encode, Decode, TypeInfo, RuntimeDebug)] + #[pallet::origin] + pub enum Origin { + /// Origin for cancelling slashes. + StakingAdmin, + /// Origin for spending (any amount of) funds. + Treasurer, + /// Origin for managing the composition of the fellowship. + FellowshipAdmin, + /// Origin for managing the registrar. + GeneralAdmin, + /// Origin for starting auctions. + AuctionAdmin, + /// Origin able to force slot leases. + LeaseAdmin, + /// Origin able to cancel referenda. + ReferendumCanceller, + /// Origin able to kill referenda. + ReferendumKiller, + /// Origin able to spend up to 1 KSM from the treasury at once. + SmallTipper, + /// Origin able to spend up to 5 KSM from the treasury at once. + BigTipper, + /// Origin able to spend up to 50 KSM from the treasury at once. + SmallSpender, + /// Origin able to spend up to 500 KSM from the treasury at once. + MediumSpender, + /// Origin able to spend up to 5,000 KSM from the treasury at once. + BigSpender, + /// Origin able to dispatch a whitelisted call. + WhitelistedCaller, + /// Origin commanded by any members of the Polkadot Fellowship (no Dan grade needed). + FellowshipInitiates, + /// Origin commanded by Polkadot Fellows (3rd Dan fellows or greater). + Fellows, + /// Origin commanded by Polkadot Experts (5th Dan fellows or greater). + FellowshipExperts, + /// Origin commanded by Polkadot Masters (7th Dan fellows of greater). + FellowshipMasters, + /// Origin commanded by rank 1 of the Polkadot Fellowship and with a success of 1. + Fellowship1Dan, + /// Origin commanded by rank 2 of the Polkadot Fellowship and with a success of 2. + Fellowship2Dan, + /// Origin commanded by rank 3 of the Polkadot Fellowship and with a success of 3. + Fellowship3Dan, + /// Origin commanded by rank 4 of the Polkadot Fellowship and with a success of 4. + Fellowship4Dan, + /// Origin commanded by rank 5 of the Polkadot Fellowship and with a success of 5. + Fellowship5Dan, + /// Origin commanded by rank 6 of the Polkadot Fellowship and with a success of 6. + Fellowship6Dan, + /// Origin commanded by rank 7 of the Polkadot Fellowship and with a success of 7. + Fellowship7Dan, + /// Origin commanded by rank 8 of the Polkadot Fellowship and with a success of 8. + Fellowship8Dan, + /// Origin commanded by rank 9 of the Polkadot Fellowship and with a success of 9. + Fellowship9Dan, + } + + macro_rules! decl_unit_ensures { + ( $name:ident: $success_type:ty = $success:expr ) => { + pub struct $name; + impl> + From> + EnsureOrigin for $name + { + type Success = $success_type; + fn try_origin(o: O) -> Result { + o.into().and_then(|o| match o { + Origin::$name => Ok($success), + r => Err(O::from(r)), + }) + } + #[cfg(feature = "runtime-benchmarks")] + fn try_successful_origin() -> Result { + Ok(O::from(Origin::$name)) + } + } + }; + ( $name:ident ) => { decl_unit_ensures! { $name : () = () } }; + ( $name:ident: $success_type:ty = $success:expr, $( $rest:tt )* ) => { + decl_unit_ensures! { $name: $success_type = $success } + decl_unit_ensures! { $( $rest )* } + }; + ( $name:ident, $( $rest:tt )* ) => { + decl_unit_ensures! { $name } + decl_unit_ensures! { $( $rest )* } + }; + () => {} + } + decl_unit_ensures!( + StakingAdmin, + Treasurer, + FellowshipAdmin, + GeneralAdmin, + AuctionAdmin, + LeaseAdmin, + ReferendumCanceller, + ReferendumKiller, + WhitelistedCaller, + FellowshipInitiates: u16 = 0, + Fellows: u16 = 3, + FellowshipExperts: u16 = 5, + FellowshipMasters: u16 = 7, + ); + + macro_rules! decl_ensure { + ( + $vis:vis type $name:ident: EnsureOrigin { + $( $item:ident = $success:expr, )* + } + ) => { + $vis struct $name; + impl> + From> + EnsureOrigin for $name + { + type Success = $success_type; + fn try_origin(o: O) -> Result { + o.into().and_then(|o| match o { + $( + Origin::$item => Ok($success), + )* + r => Err(O::from(r)), + }) + } + #[cfg(feature = "runtime-benchmarks")] + fn try_successful_origin() -> Result { + // By convention the more privileged origins go later, so for greatest chance + // of success, we want the last one. + let _result: Result = Err(()); + $( + let _result: Result = Ok(O::from(Origin::$item)); + )* + _result + } + } + } + } + + decl_ensure! { + pub type Spender: EnsureOrigin { + SmallTipper = 250 * 3 * CENTS, + BigTipper = 1 * GRAND, + SmallSpender = 10 * GRAND, + MediumSpender = 100 * GRAND, + BigSpender = 1_000 * GRAND, + Treasurer = 10_000 * GRAND, + } + } + + decl_ensure! { + pub type EnsureFellowship: EnsureOrigin { + Fellowship1Dan = 1, + Fellowship2Dan = 2, + Fellowship3Dan = 3, + Fellowship4Dan = 4, + Fellowship5Dan = 5, + Fellowship6Dan = 6, + Fellowship7Dan = 7, + Fellowship8Dan = 8, + Fellowship9Dan = 9, + } + } +} diff --git a/polkadot/runtime/westend/src/governance/tracks.rs b/polkadot/runtime/westend/src/governance/tracks.rs new file mode 100644 index 000000000000..3765569f183e --- /dev/null +++ b/polkadot/runtime/westend/src/governance/tracks.rs @@ -0,0 +1,320 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Track configurations for governance. + +use super::*; + +const fn percent(x: i32) -> sp_arithmetic::FixedI64 { + sp_arithmetic::FixedI64::from_rational(x as u128, 100) +} +use pallet_referenda::Curve; +const APP_ROOT: Curve = Curve::make_reciprocal(4, 28, percent(80), percent(50), percent(100)); +const SUP_ROOT: Curve = Curve::make_linear(28, 28, percent(0), percent(50)); +const APP_STAKING_ADMIN: Curve = Curve::make_linear(17, 28, percent(50), percent(100)); +const SUP_STAKING_ADMIN: Curve = + Curve::make_reciprocal(12, 28, percent(1), percent(0), percent(50)); +const APP_TREASURER: Curve = Curve::make_reciprocal(4, 28, percent(80), percent(50), percent(100)); +const SUP_TREASURER: Curve = Curve::make_linear(28, 28, percent(0), percent(50)); +const APP_FELLOWSHIP_ADMIN: Curve = Curve::make_linear(17, 28, percent(50), percent(100)); +const SUP_FELLOWSHIP_ADMIN: Curve = + Curve::make_reciprocal(12, 28, percent(1), percent(0), percent(50)); +const APP_GENERAL_ADMIN: Curve = + Curve::make_reciprocal(4, 28, percent(80), percent(50), percent(100)); +const SUP_GENERAL_ADMIN: Curve = + Curve::make_reciprocal(7, 28, percent(10), percent(0), percent(50)); +const APP_AUCTION_ADMIN: Curve = + Curve::make_reciprocal(4, 28, percent(80), percent(50), percent(100)); +const SUP_AUCTION_ADMIN: Curve = + Curve::make_reciprocal(7, 28, percent(10), percent(0), percent(50)); +const APP_LEASE_ADMIN: Curve = Curve::make_linear(17, 28, percent(50), percent(100)); +const SUP_LEASE_ADMIN: Curve = Curve::make_reciprocal(12, 28, percent(1), percent(0), percent(50)); +const APP_REFERENDUM_CANCELLER: Curve = Curve::make_linear(17, 28, percent(50), percent(100)); +const SUP_REFERENDUM_CANCELLER: Curve = + Curve::make_reciprocal(12, 28, percent(1), percent(0), percent(50)); +const APP_REFERENDUM_KILLER: Curve = Curve::make_linear(17, 28, percent(50), percent(100)); +const SUP_REFERENDUM_KILLER: Curve = + Curve::make_reciprocal(12, 28, percent(1), percent(0), percent(50)); +const APP_SMALL_TIPPER: Curve = Curve::make_linear(10, 28, percent(50), percent(100)); +const SUP_SMALL_TIPPER: Curve = Curve::make_reciprocal(1, 28, percent(4), percent(0), percent(50)); +const APP_BIG_TIPPER: Curve = Curve::make_linear(10, 28, percent(50), percent(100)); +const SUP_BIG_TIPPER: Curve = Curve::make_reciprocal(8, 28, percent(1), percent(0), percent(50)); +const APP_SMALL_SPENDER: Curve = Curve::make_linear(17, 28, percent(50), percent(100)); +const SUP_SMALL_SPENDER: Curve = + Curve::make_reciprocal(12, 28, percent(1), percent(0), percent(50)); +const APP_MEDIUM_SPENDER: Curve = Curve::make_linear(23, 28, percent(50), percent(100)); +const SUP_MEDIUM_SPENDER: Curve = + Curve::make_reciprocal(16, 28, percent(1), percent(0), percent(50)); +const APP_BIG_SPENDER: Curve = Curve::make_linear(28, 28, percent(50), percent(100)); +const SUP_BIG_SPENDER: Curve = Curve::make_reciprocal(20, 28, percent(1), percent(0), percent(50)); +const APP_WHITELISTED_CALLER: Curve = + Curve::make_reciprocal(16, 28 * 24, percent(96), percent(50), percent(100)); +const SUP_WHITELISTED_CALLER: Curve = + Curve::make_reciprocal(1, 28, percent(20), percent(5), percent(50)); + +const TRACKS_DATA: [(u16, pallet_referenda::TrackInfo); 15] = [ + ( + 0, + pallet_referenda::TrackInfo { + name: "root", + max_deciding: 1, + decision_deposit: 100 * GRAND, + prepare_period: 8 * MINUTES, + decision_period: 20 * MINUTES, + confirm_period: 12 * MINUTES, + min_enactment_period: 5 * MINUTES, + min_approval: APP_ROOT, + min_support: SUP_ROOT, + }, + ), + ( + 1, + pallet_referenda::TrackInfo { + name: "whitelisted_caller", + max_deciding: 100, + decision_deposit: 10 * GRAND, + prepare_period: 6 * MINUTES, + decision_period: 20 * MINUTES, + confirm_period: 4 * MINUTES, + min_enactment_period: 3 * MINUTES, + min_approval: APP_WHITELISTED_CALLER, + min_support: SUP_WHITELISTED_CALLER, + }, + ), + ( + 10, + pallet_referenda::TrackInfo { + name: "staking_admin", + max_deciding: 10, + decision_deposit: 5 * GRAND, + prepare_period: 8 * MINUTES, + decision_period: 20 * MINUTES, + confirm_period: 8 * MINUTES, + min_enactment_period: 3 * MINUTES, + min_approval: APP_STAKING_ADMIN, + min_support: SUP_STAKING_ADMIN, + }, + ), + ( + 11, + pallet_referenda::TrackInfo { + name: "treasurer", + max_deciding: 10, + decision_deposit: 1 * GRAND, + prepare_period: 8 * MINUTES, + decision_period: 20 * MINUTES, + confirm_period: 8 * MINUTES, + min_enactment_period: 5 * MINUTES, + min_approval: APP_TREASURER, + min_support: SUP_TREASURER, + }, + ), + ( + 12, + pallet_referenda::TrackInfo { + name: "lease_admin", + max_deciding: 10, + decision_deposit: 5 * GRAND, + prepare_period: 8 * MINUTES, + decision_period: 20 * MINUTES, + confirm_period: 8 * MINUTES, + min_enactment_period: 3 * MINUTES, + min_approval: APP_LEASE_ADMIN, + min_support: SUP_LEASE_ADMIN, + }, + ), + ( + 13, + pallet_referenda::TrackInfo { + name: "fellowship_admin", + max_deciding: 10, + decision_deposit: 5 * GRAND, + prepare_period: 8 * MINUTES, + decision_period: 20 * MINUTES, + confirm_period: 8 * MINUTES, + min_enactment_period: 3 * MINUTES, + min_approval: APP_FELLOWSHIP_ADMIN, + min_support: SUP_FELLOWSHIP_ADMIN, + }, + ), + ( + 14, + pallet_referenda::TrackInfo { + name: "general_admin", + max_deciding: 10, + decision_deposit: 5 * GRAND, + prepare_period: 8 * MINUTES, + decision_period: 20 * MINUTES, + confirm_period: 8 * MINUTES, + min_enactment_period: 3 * MINUTES, + min_approval: APP_GENERAL_ADMIN, + min_support: SUP_GENERAL_ADMIN, + }, + ), + ( + 15, + pallet_referenda::TrackInfo { + name: "auction_admin", + max_deciding: 10, + decision_deposit: 5 * GRAND, + prepare_period: 8 * MINUTES, + decision_period: 20 * MINUTES, + confirm_period: 8 * MINUTES, + min_enactment_period: 3 * MINUTES, + min_approval: APP_AUCTION_ADMIN, + min_support: SUP_AUCTION_ADMIN, + }, + ), + ( + 20, + pallet_referenda::TrackInfo { + name: "referendum_canceller", + max_deciding: 1_000, + decision_deposit: 10 * GRAND, + prepare_period: 8 * MINUTES, + decision_period: 14 * MINUTES, + confirm_period: 8 * MINUTES, + min_enactment_period: 3 * MINUTES, + min_approval: APP_REFERENDUM_CANCELLER, + min_support: SUP_REFERENDUM_CANCELLER, + }, + ), + ( + 21, + pallet_referenda::TrackInfo { + name: "referendum_killer", + max_deciding: 1_000, + decision_deposit: 50 * GRAND, + prepare_period: 8 * MINUTES, + decision_period: 20 * MINUTES, + confirm_period: 8 * MINUTES, + min_enactment_period: 3 * MINUTES, + min_approval: APP_REFERENDUM_KILLER, + min_support: SUP_REFERENDUM_KILLER, + }, + ), + ( + 30, + pallet_referenda::TrackInfo { + name: "small_tipper", + max_deciding: 200, + decision_deposit: 1 * 3 * CENTS, + prepare_period: 1 * MINUTES, + decision_period: 14 * MINUTES, + confirm_period: 4 * MINUTES, + min_enactment_period: 1 * MINUTES, + min_approval: APP_SMALL_TIPPER, + min_support: SUP_SMALL_TIPPER, + }, + ), + ( + 31, + pallet_referenda::TrackInfo { + name: "big_tipper", + max_deciding: 100, + decision_deposit: 10 * 3 * CENTS, + prepare_period: 4 * MINUTES, + decision_period: 14 * MINUTES, + confirm_period: 12 * MINUTES, + min_enactment_period: 3 * MINUTES, + min_approval: APP_BIG_TIPPER, + min_support: SUP_BIG_TIPPER, + }, + ), + ( + 32, + pallet_referenda::TrackInfo { + name: "small_spender", + max_deciding: 50, + decision_deposit: 100 * 3 * CENTS, + prepare_period: 10 * MINUTES, + decision_period: 20 * MINUTES, + confirm_period: 10 * MINUTES, + min_enactment_period: 5 * MINUTES, + min_approval: APP_SMALL_SPENDER, + min_support: SUP_SMALL_SPENDER, + }, + ), + ( + 33, + pallet_referenda::TrackInfo { + name: "medium_spender", + max_deciding: 50, + decision_deposit: 200 * 3 * CENTS, + prepare_period: 10 * MINUTES, + decision_period: 20 * MINUTES, + confirm_period: 12 * MINUTES, + min_enactment_period: 5 * MINUTES, + min_approval: APP_MEDIUM_SPENDER, + min_support: SUP_MEDIUM_SPENDER, + }, + ), + ( + 34, + pallet_referenda::TrackInfo { + name: "big_spender", + max_deciding: 50, + decision_deposit: 400 * 3 * CENTS, + prepare_period: 10 * MINUTES, + decision_period: 20 * MINUTES, + confirm_period: 14 * MINUTES, + min_enactment_period: 5 * MINUTES, + min_approval: APP_BIG_SPENDER, + min_support: SUP_BIG_SPENDER, + }, + ), +]; + +pub struct TracksInfo; +impl pallet_referenda::TracksInfo for TracksInfo { + type Id = u16; + type RuntimeOrigin = ::PalletsOrigin; + fn tracks() -> &'static [(Self::Id, pallet_referenda::TrackInfo)] { + &TRACKS_DATA[..] + } + fn track_for(id: &Self::RuntimeOrigin) -> Result { + if let Ok(system_origin) = frame_system::RawOrigin::try_from(id.clone()) { + match system_origin { + frame_system::RawOrigin::Root => Ok(0), + _ => Err(()), + } + } else if let Ok(custom_origin) = origins::Origin::try_from(id.clone()) { + match custom_origin { + origins::Origin::WhitelistedCaller => Ok(1), + // General admin + origins::Origin::StakingAdmin => Ok(10), + origins::Origin::Treasurer => Ok(11), + origins::Origin::LeaseAdmin => Ok(12), + origins::Origin::FellowshipAdmin => Ok(13), + origins::Origin::GeneralAdmin => Ok(14), + origins::Origin::AuctionAdmin => Ok(15), + // Referendum admins + origins::Origin::ReferendumCanceller => Ok(20), + origins::Origin::ReferendumKiller => Ok(21), + // Limited treasury spenders + origins::Origin::SmallTipper => Ok(30), + origins::Origin::BigTipper => Ok(31), + origins::Origin::SmallSpender => Ok(32), + origins::Origin::MediumSpender => Ok(33), + origins::Origin::BigSpender => Ok(34), + _ => Err(()), + } + } else { + Err(()) + } + } +} +pallet_referenda::impl_tracksinfo_get!(TracksInfo, Balance, BlockNumber); diff --git a/polkadot/runtime/westend/src/lib.rs b/polkadot/runtime/westend/src/lib.rs index e7cae7248bda..b1231a5d95fd 100644 --- a/polkadot/runtime/westend/src/lib.rs +++ b/polkadot/runtime/westend/src/lib.rs @@ -17,7 +17,7 @@ //! The Westend runtime. This can be compiled with `#[no_std]`, ready for Wasm. #![cfg_attr(not(feature = "std"), no_std)] -// `construct_runtime!` does a lot of recursion and requires us to increase the limit to 512. +// `construct_runtime!` does a lot of recursion and requires us to increase the limit. #![recursion_limit = "512"] use authority_discovery_primitives::AuthorityId as AuthorityDiscoveryId; @@ -31,9 +31,9 @@ use frame_support::{ genesis_builder_helper::{build_config, create_default_config}, parameter_types, traits::{ - fungible::HoldConsideration, ConstU32, Contains, EverythingBut, InstanceFilter, - KeyOwnerProofSystem, LinearStoragePrice, ProcessMessage, ProcessMessageError, - WithdrawReasons, + fungible::HoldConsideration, ConstU32, Contains, EitherOf, EitherOfDiverse, EverythingBut, + InstanceFilter, KeyOwnerProofSystem, LinearStoragePrice, ProcessMessage, + ProcessMessageError, WithdrawReasons, }, weights::{ConstantMultiplier, WeightMeter}, PalletId, @@ -80,7 +80,7 @@ use sp_runtime::{ Keccak256, OpaqueKeys, SaturatedConversion, Verify, }, transaction_validity::{TransactionPriority, TransactionSource, TransactionValidity}, - ApplyExtrinsicResult, FixedU128, KeyTypeId, Perbill, Percent, + ApplyExtrinsicResult, FixedU128, KeyTypeId, Perbill, Percent, Permill, }; use sp_staking::SessionIndex; use sp_std::{collections::btree_map::BTreeMap, prelude::*}; @@ -107,6 +107,13 @@ mod bag_thresholds; mod weights; pub mod xcm_config; +// Governance and configurations. +pub mod governance; +use governance::{ + pallet_custom_origins, AuctionAdmin, FellowshipAdmin, GeneralAdmin, LeaseAdmin, StakingAdmin, + Treasurer, TreasurySpender, +}; + #[cfg(test)] mod tests; @@ -122,7 +129,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("westend"), impl_name: create_runtime_str!("parity-westend"), authoring_version: 2, - spec_version: 9430, + spec_version: 10020, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 22, @@ -197,7 +204,9 @@ impl pallet_scheduler::Config for Runtime { type PalletsOrigin = OriginCaller; type RuntimeCall = RuntimeCall; type MaximumWeight = MaximumSchedulerWeight; - type ScheduleOrigin = EnsureRoot; + // The goal of having ScheduleOrigin include AuctionAdmin is to allow the auctions track of + // OpenGov to schedule periodic auctions. + type ScheduleOrigin = EitherOf, AuctionAdmin>; type MaxScheduledPerBlock = MaxScheduledPerBlock; type WeightInfo = weights::pallet_scheduler::WeightInfo; type OriginPrivilegeCmp = frame_support::traits::EqualPrivilegeOnly; @@ -683,7 +692,40 @@ impl pallet_fast_unstake::Config for Runtime { } parameter_types! { + pub const ProposalBond: Permill = Permill::from_percent(5); + pub const ProposalBondMinimum: Balance = 2000 * CENTS; + pub const ProposalBondMaximum: Balance = 1 * GRAND; + pub const SpendPeriod: BlockNumber = 6 * DAYS; + pub const Burn: Permill = Permill::from_perthousand(2); + pub const TreasuryPalletId: PalletId = PalletId(*b"py/trsry"); + + pub const TipCountdown: BlockNumber = 1 * DAYS; + pub const TipFindersFee: Percent = Percent::from_percent(20); + pub const TipReportDepositBase: Balance = 100 * CENTS; + pub const DataDepositPerByte: Balance = 1 * CENTS; + pub const MaxApprovals: u32 = 100; pub const MaxAuthorities: u32 = 100_000; + pub const MaxKeys: u32 = 10_000; + pub const MaxPeerInHeartbeats: u32 = 10_000; +} + +impl pallet_treasury::Config for Runtime { + type PalletId = TreasuryPalletId; + type Currency = Balances; + type ApproveOrigin = EitherOfDiverse, Treasurer>; + type RejectOrigin = EitherOfDiverse, Treasurer>; + type RuntimeEvent = RuntimeEvent; + type OnSlash = Treasury; + type ProposalBond = ProposalBond; + type ProposalBondMinimum = ProposalBondMinimum; + type ProposalBondMaximum = ProposalBondMaximum; + type SpendPeriod = SpendPeriod; + type Burn = Burn; + type BurnDestination = (); + type MaxApprovals = MaxApprovals; + type WeightInfo = weights::pallet_treasury::WeightInfo; + type SpendFunds = (); + type SpendOrigin = TreasurySpender; } impl pallet_offences::Config for Runtime { @@ -699,8 +741,6 @@ impl pallet_authority_discovery::Config for Runtime { parameter_types! { pub const NposSolutionPriority: TransactionPriority = TransactionPriority::max_value() / 2; pub const ImOnlineUnsignedPriority: TransactionPriority = TransactionPriority::max_value(); - pub const MaxKeys: u32 = 10_000; - pub const MaxPeerInHeartbeats: u32 = 10_000; } impl pallet_im_online::Config for Runtime { @@ -814,8 +854,8 @@ impl pallet_identity::Config for Runtime { type MaxSubAccounts = MaxSubAccounts; type MaxAdditionalFields = MaxAdditionalFields; type MaxRegistrars = MaxRegistrars; - type RegistrarOrigin = frame_system::EnsureRoot; - type ForceOrigin = frame_system::EnsureRoot; + type ForceOrigin = EitherOf, GeneralAdmin>; + type RegistrarOrigin = EitherOf, GeneralAdmin>; type WeightInfo = weights::pallet_identity::WeightInfo; } @@ -912,6 +952,7 @@ parameter_types! { pub enum ProxyType { Any, NonTransfer, + Governance, Staking, SudoBalances, IdentityJudgement, @@ -944,6 +985,9 @@ impl InstanceFilter for ProxyType { RuntimeCall::ImOnline(..) | RuntimeCall::Utility(..) | RuntimeCall::Identity(..) | + RuntimeCall::ConvictionVoting(..) | + RuntimeCall::Referenda(..) | + RuntimeCall::Whitelist(..) | RuntimeCall::Recovery(pallet_recovery::Call::as_recovered{..}) | RuntimeCall::Recovery(pallet_recovery::Call::vouch_recovery{..}) | RuntimeCall::Recovery(pallet_recovery::Call::claim_recovery{..}) | @@ -989,6 +1033,13 @@ impl InstanceFilter for ProxyType { RuntimeCall::Utility(..) => true, _ => false, }, + ProxyType::Governance => matches!( + c, + // OpenGov calls + RuntimeCall::ConvictionVoting(..) | + RuntimeCall::Referenda(..) | + RuntimeCall::Whitelist(..) + ), ProxyType::IdentityJudgement => matches!( c, RuntimeCall::Identity(pallet_identity::Call::provide_judgement { .. }) | @@ -1184,7 +1235,7 @@ impl parachains_slashing::Config for Runtime { parameter_types! { pub const ParaDeposit: Balance = 2000 * CENTS; - pub const DataDepositPerByte: Balance = deposit(0, 1); + pub const RegistrarDataDepositPerByte: Balance = deposit(0, 1); } impl paras_registrar::Config for Runtime { @@ -1193,7 +1244,7 @@ impl paras_registrar::Config for Runtime { type Currency = Balances; type OnSwap = (Crowdloan, Slots); type ParaDeposit = ParaDeposit; - type DataDepositPerByte = DataDepositPerByte; + type DataDepositPerByte = RegistrarDataDepositPerByte; type WeightInfo = weights::runtime_common_paras_registrar::WeightInfo; } @@ -1207,7 +1258,7 @@ impl slots::Config for Runtime { type Registrar = Registrar; type LeasePeriod = LeasePeriod; type LeaseOffset = (); - type ForceOrigin = EnsureRoot; + type ForceOrigin = EitherOf, LeaseAdmin>; type WeightInfo = weights::runtime_common_slots::WeightInfo; } @@ -1247,7 +1298,7 @@ impl auctions::Config for Runtime { type EndingPeriod = EndingPeriod; type SampleLength = SampleLength; type Randomness = pallet_babe::RandomnessFromOneEpochAgo; - type InitiateOrigin = EnsureRoot; + type InitiateOrigin = EitherOf, AuctionAdmin>; type WeightInfo = weights::runtime_common_auctions::WeightInfo; } @@ -1352,6 +1403,15 @@ construct_runtime! { // Fast unstake pallet: extension to staking. FastUnstake: pallet_fast_unstake = 30, + // OpenGov + ConvictionVoting: pallet_conviction_voting::{Pallet, Call, Storage, Event} = 31, + Referenda: pallet_referenda::{Pallet, Call, Storage, Event} = 32, + Origins: pallet_custom_origins::{Origin} = 35, + Whitelist: pallet_whitelist::{Pallet, Call, Storage, Event} = 36, + + // Treasury + Treasury: pallet_treasury::{Pallet, Call, Storage, Config, Event} = 37, + // Parachains pallets. Start indices at 40 to leave room. ParachainsOrigin: parachains_origin::{Pallet, Origin} = 41, Configuration: parachains_configuration::{Pallet, Call, Storage, Config} = 42, @@ -1445,6 +1505,7 @@ pub mod migrations { UpgradeSessionKeys, parachains_configuration::migration::v9::MigrateToV9, paras_registrar::migration::VersionCheckedMigrateToV1, + pallet_referenda::migration::v1::MigrateV0ToV1, ); } @@ -1485,6 +1546,7 @@ mod benches { // Substrate [pallet_bags_list, VoterList] [pallet_balances, Balances] + [pallet_conviction_voting, ConvictionVoting] [pallet_election_provider_multi_phase, ElectionProviderMultiPhase] [frame_election_provider_support, ElectionProviderBench::] [pallet_fast_unstake, FastUnstake] @@ -1498,14 +1560,17 @@ mod benches { [pallet_preimage, Preimage] [pallet_proxy, Proxy] [pallet_recovery, Recovery] + [pallet_referenda, Referenda] [pallet_scheduler, Scheduler] [pallet_session, SessionBench::] [pallet_staking, Staking] [pallet_sudo, Sudo] [frame_system, SystemBench::] [pallet_timestamp, Timestamp] + [pallet_treasury, Treasury] [pallet_utility, Utility] [pallet_vesting, Vesting] + [pallet_whitelist, Whitelist] // XCM [pallet_xcm, XcmPallet] // NOTE: Make sure you point to the individual modules below. @@ -2059,13 +2124,13 @@ sp_api::impl_runtime_apis! { AssetId::*, Fungibility::*, InteriorMultiLocation, Junction, Junctions::*, MultiAsset, MultiAssets, MultiLocation, NetworkId, Response, }; - use xcm_config::{Westmint, TokenLocation}; + use xcm_config::{AssetHub, TokenLocation}; impl pallet_xcm_benchmarks::Config for Runtime { type XcmConfig = xcm_config::XcmConfig; type AccountIdConverter = xcm_config::LocationConverter; fn valid_destination() -> Result { - Ok(Westmint::get()) + Ok(AssetHub::get()) } fn worst_case_holding(_depositable_count: u32) -> MultiAssets { // Westend only knows about WND. @@ -2078,7 +2143,7 @@ sp_api::impl_runtime_apis! { parameter_types! { pub const TrustedTeleporter: Option<(MultiLocation, MultiAsset)> = Some(( - Westmint::get(), + AssetHub::get(), MultiAsset { fun: Fungible(1 * UNITS), id: Concrete(TokenLocation::get()) }, )); pub const TrustedReserve: Option<(MultiLocation, MultiAsset)> = None; @@ -2117,15 +2182,15 @@ sp_api::impl_runtime_apis! { } fn transact_origin_and_runtime_call() -> Result<(MultiLocation, RuntimeCall), BenchmarkError> { - Ok((Westmint::get(), frame_system::Call::remark_with_event { remark: vec![] }.into())) + Ok((AssetHub::get(), frame_system::Call::remark_with_event { remark: vec![] }.into())) } fn subscribe_origin() -> Result { - Ok(Westmint::get()) + Ok(AssetHub::get()) } fn claimable_asset() -> Result<(MultiLocation, MultiLocation, MultiAssets), BenchmarkError> { - let origin = Westmint::get(); + let origin = AssetHub::get(); let assets: MultiAssets = (Concrete(TokenLocation::get()), 1_000 * UNITS).into(); let ticket = MultiLocation { parents: 0, interior: Here }; Ok((origin, ticket, assets)) diff --git a/polkadot/runtime/westend/src/weights/mod.rs b/polkadot/runtime/westend/src/weights/mod.rs index 531de5527de5..faa94bcac586 100644 --- a/polkadot/runtime/westend/src/weights/mod.rs +++ b/polkadot/runtime/westend/src/weights/mod.rs @@ -19,6 +19,7 @@ pub mod frame_election_provider_support; pub mod frame_system; pub mod pallet_bags_list; pub mod pallet_balances; +pub mod pallet_conviction_voting; pub mod pallet_election_provider_multi_phase; pub mod pallet_fast_unstake; pub mod pallet_identity; @@ -29,13 +30,17 @@ pub mod pallet_multisig; pub mod pallet_nomination_pools; pub mod pallet_preimage; pub mod pallet_proxy; +pub mod pallet_referenda_fellowship_referenda; +pub mod pallet_referenda_referenda; pub mod pallet_scheduler; pub mod pallet_session; pub mod pallet_staking; pub mod pallet_sudo; pub mod pallet_timestamp; +pub mod pallet_treasury; pub mod pallet_utility; pub mod pallet_vesting; +pub mod pallet_whitelist; pub mod pallet_xcm; pub mod runtime_common_assigned_slots; pub mod runtime_common_auctions; diff --git a/polkadot/runtime/westend/src/weights/pallet_conviction_voting.rs b/polkadot/runtime/westend/src/weights/pallet_conviction_voting.rs new file mode 100644 index 000000000000..8965a7392ed2 --- /dev/null +++ b/polkadot/runtime/westend/src/weights/pallet_conviction_voting.rs @@ -0,0 +1,194 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Autogenerated weights for `pallet_conviction_voting` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-07-10, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-xerhrdyb-project-163-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! EXECUTION: `Some(Wasm)`, WASM-EXECUTION: `Compiled`, CHAIN: `Some("westend-dev")`, DB CACHE: 1024 + +// Executed Command: +// target/production/polkadot +// benchmark +// pallet +// --steps=50 +// --repeat=20 +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot/.git/.artifacts/bench.json +// --pallet=pallet_conviction_voting +// --chain=westend-dev +// --header=./file_header.txt +// --output=./runtime/westend/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_conviction_voting`. +pub struct WeightInfo(PhantomData); +impl pallet_conviction_voting::WeightInfo for WeightInfo { + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) + /// Storage: `ConvictionVoting::VotingFor` (r:1 w:1) + /// Proof: `ConvictionVoting::VotingFor` (`max_values`: None, `max_size`: Some(27241), added: 29716, mode: `MaxEncodedLen`) + /// Storage: `ConvictionVoting::ClassLocksFor` (r:1 w:1) + /// Proof: `ConvictionVoting::ClassLocksFor` (`max_values`: None, `max_size`: Some(311), added: 2786, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + fn vote_new() -> Weight { + // Proof Size summary in bytes: + // Measured: `13445` + // Estimated: `42428` + // Minimum execution time: 152_223_000 picoseconds. + Weight::from_parts(162_148_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(5)) + } + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) + /// Storage: `ConvictionVoting::VotingFor` (r:1 w:1) + /// Proof: `ConvictionVoting::VotingFor` (`max_values`: None, `max_size`: Some(27241), added: 29716, mode: `MaxEncodedLen`) + /// Storage: `ConvictionVoting::ClassLocksFor` (r:1 w:1) + /// Proof: `ConvictionVoting::ClassLocksFor` (`max_values`: None, `max_size`: Some(311), added: 2786, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + fn vote_existing() -> Weight { + // Proof Size summary in bytes: + // Measured: `14166` + // Estimated: `83866` + // Minimum execution time: 220_361_000 picoseconds. + Weight::from_parts(236_478_000, 0) + .saturating_add(Weight::from_parts(0, 83866)) + .saturating_add(T::DbWeight::get().reads(7)) + .saturating_add(T::DbWeight::get().writes(6)) + } + /// Storage: `ConvictionVoting::VotingFor` (r:1 w:1) + /// Proof: `ConvictionVoting::VotingFor` (`max_values`: None, `max_size`: Some(27241), added: 29716, mode: `MaxEncodedLen`) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + fn remove_vote() -> Weight { + // Proof Size summary in bytes: + // Measured: `13918` + // Estimated: `83866` + // Minimum execution time: 198_787_000 picoseconds. + Weight::from_parts(204_983_000, 0) + .saturating_add(Weight::from_parts(0, 83866)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `ConvictionVoting::VotingFor` (r:1 w:1) + /// Proof: `ConvictionVoting::VotingFor` (`max_values`: None, `max_size`: Some(27241), added: 29716, mode: `MaxEncodedLen`) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:0) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) + fn remove_other_vote() -> Weight { + // Proof Size summary in bytes: + // Measured: `13004` + // Estimated: `30706` + // Minimum execution time: 88_469_000 picoseconds. + Weight::from_parts(95_942_000, 0) + .saturating_add(Weight::from_parts(0, 30706)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `ConvictionVoting::VotingFor` (r:2 w:2) + /// Proof: `ConvictionVoting::VotingFor` (`max_values`: None, `max_size`: Some(27241), added: 29716, mode: `MaxEncodedLen`) + /// Storage: `Referenda::ReferendumInfoFor` (r:512 w:512) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + /// Storage: `ConvictionVoting::ClassLocksFor` (r:1 w:1) + /// Proof: `ConvictionVoting::ClassLocksFor` (`max_values`: None, `max_size`: Some(311), added: 2786, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) + /// The range of component `r` is `[0, 512]`. + fn delegate(r: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `29640 + r * (365 ±0)` + // Estimated: `83866 + r * (3411 ±0)` + // Minimum execution time: 79_951_000 picoseconds. + Weight::from_parts(1_844_983_097, 0) + .saturating_add(Weight::from_parts(0, 83866)) + // Standard Error: 160_158 + .saturating_add(Weight::from_parts(43_973_863, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(7)) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) + .saturating_add(T::DbWeight::get().writes(6)) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(r.into()))) + .saturating_add(Weight::from_parts(0, 3411).saturating_mul(r.into())) + } + /// Storage: `ConvictionVoting::VotingFor` (r:2 w:2) + /// Proof: `ConvictionVoting::VotingFor` (`max_values`: None, `max_size`: Some(27241), added: 29716, mode: `MaxEncodedLen`) + /// Storage: `Referenda::ReferendumInfoFor` (r:512 w:512) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + /// The range of component `r` is `[0, 512]`. + fn undelegate(r: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `29555 + r * (365 ±0)` + // Estimated: `83866 + r * (3411 ±0)` + // Minimum execution time: 47_976_000 picoseconds. + Weight::from_parts(1_877_857_335, 0) + .saturating_add(Weight::from_parts(0, 83866)) + // Standard Error: 168_477 + .saturating_add(Weight::from_parts(43_303_902, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) + .saturating_add(T::DbWeight::get().writes(4)) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(r.into()))) + .saturating_add(Weight::from_parts(0, 3411).saturating_mul(r.into())) + } + /// Storage: `ConvictionVoting::VotingFor` (r:1 w:1) + /// Proof: `ConvictionVoting::VotingFor` (`max_values`: None, `max_size`: Some(27241), added: 29716, mode: `MaxEncodedLen`) + /// Storage: `ConvictionVoting::ClassLocksFor` (r:1 w:1) + /// Proof: `ConvictionVoting::ClassLocksFor` (`max_values`: None, `max_size`: Some(311), added: 2786, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) + fn unlock() -> Weight { + // Proof Size summary in bytes: + // Measured: `12218` + // Estimated: `30706` + // Minimum execution time: 102_868_000 picoseconds. + Weight::from_parts(110_438_000, 0) + .saturating_add(Weight::from_parts(0, 30706)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } +} diff --git a/polkadot/runtime/westend/src/weights/pallet_referenda_fellowship_referenda.rs b/polkadot/runtime/westend/src/weights/pallet_referenda_fellowship_referenda.rs new file mode 100644 index 000000000000..a4ac06679116 --- /dev/null +++ b/polkadot/runtime/westend/src/weights/pallet_referenda_fellowship_referenda.rs @@ -0,0 +1,525 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Autogenerated weights for `pallet_referenda` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-06-19, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-e8ezs4ez-project-163-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("kusama-dev"), DB CACHE: 1024 + +// Executed Command: +// ./target/production/polkadot +// benchmark +// pallet +// --chain=kusama-dev +// --steps=50 +// --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --pallet=pallet_referenda +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --header=./file_header.txt +// --output=./runtime/kusama/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_referenda`. +pub struct WeightInfo(PhantomData); +impl pallet_referenda::WeightInfo for WeightInfo { + /// Storage: FellowshipCollective Members (r:1 w:0) + /// Proof: FellowshipCollective Members (max_values: None, max_size: Some(42), added: 2517, mode: MaxEncodedLen) + /// Storage: FellowshipReferenda ReferendumCount (r:1 w:1) + /// Proof: FellowshipReferenda ReferendumCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: Scheduler Agenda (r:1 w:1) + /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) + /// Storage: FellowshipReferenda ReferendumInfoFor (r:0 w:1) + /// Proof: FellowshipReferenda ReferendumInfoFor (max_values: None, max_size: Some(900), added: 3375, mode: MaxEncodedLen) + fn submit() -> Weight { + // Proof Size summary in bytes: + // Measured: `327` + // Estimated: `42428` + // Minimum execution time: 28_969_000 picoseconds. + Weight::from_parts(30_902_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: FellowshipReferenda ReferendumInfoFor (r:1 w:1) + /// Proof: FellowshipReferenda ReferendumInfoFor (max_values: None, max_size: Some(900), added: 3375, mode: MaxEncodedLen) + /// Storage: Scheduler Agenda (r:2 w:2) + /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) + fn place_decision_deposit_preparing() -> Weight { + // Proof Size summary in bytes: + // Measured: `404` + // Estimated: `83866` + // Minimum execution time: 53_500_000 picoseconds. + Weight::from_parts(54_447_000, 0) + .saturating_add(Weight::from_parts(0, 83866)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: FellowshipReferenda ReferendumInfoFor (r:1 w:1) + /// Proof: FellowshipReferenda ReferendumInfoFor (max_values: None, max_size: Some(900), added: 3375, mode: MaxEncodedLen) + /// Storage: FellowshipReferenda DecidingCount (r:1 w:0) + /// Proof: FellowshipReferenda DecidingCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) + /// Storage: FellowshipReferenda TrackQueue (r:1 w:1) + /// Proof: FellowshipReferenda TrackQueue (max_values: None, max_size: Some(812), added: 3287, mode: MaxEncodedLen) + /// Storage: Scheduler Agenda (r:1 w:1) + /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) + fn place_decision_deposit_queued() -> Weight { + // Proof Size summary in bytes: + // Measured: `2042` + // Estimated: `42428` + // Minimum execution time: 114_321_000 picoseconds. + Weight::from_parts(122_607_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: FellowshipReferenda ReferendumInfoFor (r:1 w:1) + /// Proof: FellowshipReferenda ReferendumInfoFor (max_values: None, max_size: Some(900), added: 3375, mode: MaxEncodedLen) + /// Storage: FellowshipReferenda DecidingCount (r:1 w:0) + /// Proof: FellowshipReferenda DecidingCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) + /// Storage: FellowshipReferenda TrackQueue (r:1 w:1) + /// Proof: FellowshipReferenda TrackQueue (max_values: None, max_size: Some(812), added: 3287, mode: MaxEncodedLen) + /// Storage: Scheduler Agenda (r:1 w:1) + /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) + fn place_decision_deposit_not_queued() -> Weight { + // Proof Size summary in bytes: + // Measured: `2083` + // Estimated: `42428` + // Minimum execution time: 113_476_000 picoseconds. + Weight::from_parts(120_078_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: FellowshipReferenda ReferendumInfoFor (r:1 w:1) + /// Proof: FellowshipReferenda ReferendumInfoFor (max_values: None, max_size: Some(900), added: 3375, mode: MaxEncodedLen) + /// Storage: FellowshipReferenda DecidingCount (r:1 w:1) + /// Proof: FellowshipReferenda DecidingCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) + /// Storage: FellowshipCollective MemberCount (r:1 w:0) + /// Proof: FellowshipCollective MemberCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) + /// Storage: Scheduler Agenda (r:2 w:2) + /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) + fn place_decision_deposit_passing() -> Weight { + // Proof Size summary in bytes: + // Measured: `774` + // Estimated: `83866` + // Minimum execution time: 194_798_000 picoseconds. + Weight::from_parts(208_378_000, 0) + .saturating_add(Weight::from_parts(0, 83866)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: FellowshipReferenda ReferendumInfoFor (r:1 w:1) + /// Proof: FellowshipReferenda ReferendumInfoFor (max_values: None, max_size: Some(900), added: 3375, mode: MaxEncodedLen) + /// Storage: FellowshipReferenda DecidingCount (r:1 w:1) + /// Proof: FellowshipReferenda DecidingCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) + /// Storage: FellowshipCollective MemberCount (r:1 w:0) + /// Proof: FellowshipCollective MemberCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) + /// Storage: Scheduler Agenda (r:2 w:2) + /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) + fn place_decision_deposit_failing() -> Weight { + // Proof Size summary in bytes: + // Measured: `639` + // Estimated: `83866` + // Minimum execution time: 69_502_000 picoseconds. + Weight::from_parts(71_500_000, 0) + .saturating_add(Weight::from_parts(0, 83866)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: FellowshipReferenda ReferendumInfoFor (r:1 w:1) + /// Proof: FellowshipReferenda ReferendumInfoFor (max_values: None, max_size: Some(900), added: 3375, mode: MaxEncodedLen) + fn refund_decision_deposit() -> Weight { + // Proof Size summary in bytes: + // Measured: `317` + // Estimated: `4365` + // Minimum execution time: 30_561_000 picoseconds. + Weight::from_parts(31_427_000, 0) + .saturating_add(Weight::from_parts(0, 4365)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: FellowshipReferenda ReferendumInfoFor (r:1 w:1) + /// Proof: FellowshipReferenda ReferendumInfoFor (max_values: None, max_size: Some(900), added: 3375, mode: MaxEncodedLen) + fn refund_submission_deposit() -> Weight { + // Proof Size summary in bytes: + // Measured: `167` + // Estimated: `4365` + // Minimum execution time: 14_535_000 picoseconds. + Weight::from_parts(14_999_000, 0) + .saturating_add(Weight::from_parts(0, 4365)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: FellowshipReferenda ReferendumInfoFor (r:1 w:1) + /// Proof: FellowshipReferenda ReferendumInfoFor (max_values: None, max_size: Some(900), added: 3375, mode: MaxEncodedLen) + /// Storage: Scheduler Agenda (r:2 w:2) + /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) + fn cancel() -> Weight { + // Proof Size summary in bytes: + // Measured: `349` + // Estimated: `83866` + // Minimum execution time: 38_532_000 picoseconds. + Weight::from_parts(39_361_000, 0) + .saturating_add(Weight::from_parts(0, 83866)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: FellowshipReferenda ReferendumInfoFor (r:1 w:1) + /// Proof: FellowshipReferenda ReferendumInfoFor (max_values: None, max_size: Some(900), added: 3375, mode: MaxEncodedLen) + /// Storage: Scheduler Agenda (r:2 w:2) + /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) + /// Storage: FellowshipReferenda MetadataOf (r:1 w:0) + /// Proof: FellowshipReferenda MetadataOf (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + fn kill() -> Weight { + // Proof Size summary in bytes: + // Measured: `450` + // Estimated: `83866` + // Minimum execution time: 78_956_000 picoseconds. + Weight::from_parts(80_594_000, 0) + .saturating_add(Weight::from_parts(0, 83866)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: FellowshipReferenda TrackQueue (r:1 w:0) + /// Proof: FellowshipReferenda TrackQueue (max_values: None, max_size: Some(812), added: 3287, mode: MaxEncodedLen) + /// Storage: FellowshipReferenda DecidingCount (r:1 w:1) + /// Proof: FellowshipReferenda DecidingCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) + fn one_fewer_deciding_queue_empty() -> Weight { + // Proof Size summary in bytes: + // Measured: `140` + // Estimated: `4277` + // Minimum execution time: 9_450_000 picoseconds. + Weight::from_parts(9_881_000, 0) + .saturating_add(Weight::from_parts(0, 4277)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: FellowshipReferenda TrackQueue (r:1 w:1) + /// Proof: FellowshipReferenda TrackQueue (max_values: None, max_size: Some(812), added: 3287, mode: MaxEncodedLen) + /// Storage: FellowshipReferenda ReferendumInfoFor (r:1 w:1) + /// Proof: FellowshipReferenda ReferendumInfoFor (max_values: None, max_size: Some(900), added: 3375, mode: MaxEncodedLen) + /// Storage: FellowshipCollective MemberCount (r:1 w:0) + /// Proof: FellowshipCollective MemberCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) + /// Storage: Scheduler Agenda (r:1 w:1) + /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) + fn one_fewer_deciding_failing() -> Weight { + // Proof Size summary in bytes: + // Measured: `2376` + // Estimated: `42428` + // Minimum execution time: 98_126_000 picoseconds. + Weight::from_parts(102_511_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: FellowshipReferenda TrackQueue (r:1 w:1) + /// Proof: FellowshipReferenda TrackQueue (max_values: None, max_size: Some(812), added: 3287, mode: MaxEncodedLen) + /// Storage: FellowshipReferenda ReferendumInfoFor (r:1 w:1) + /// Proof: FellowshipReferenda ReferendumInfoFor (max_values: None, max_size: Some(900), added: 3375, mode: MaxEncodedLen) + /// Storage: FellowshipCollective MemberCount (r:1 w:0) + /// Proof: FellowshipCollective MemberCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) + /// Storage: Scheduler Agenda (r:1 w:1) + /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) + fn one_fewer_deciding_passing() -> Weight { + // Proof Size summary in bytes: + // Measured: `2362` + // Estimated: `42428` + // Minimum execution time: 99_398_000 picoseconds. + Weight::from_parts(104_045_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: FellowshipReferenda ReferendumInfoFor (r:1 w:0) + /// Proof: FellowshipReferenda ReferendumInfoFor (max_values: None, max_size: Some(900), added: 3375, mode: MaxEncodedLen) + /// Storage: FellowshipReferenda TrackQueue (r:1 w:1) + /// Proof: FellowshipReferenda TrackQueue (max_values: None, max_size: Some(812), added: 3287, mode: MaxEncodedLen) + fn nudge_referendum_requeued_insertion() -> Weight { + // Proof Size summary in bytes: + // Measured: `1807` + // Estimated: `4365` + // Minimum execution time: 43_734_000 picoseconds. + Weight::from_parts(46_962_000, 0) + .saturating_add(Weight::from_parts(0, 4365)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: FellowshipReferenda ReferendumInfoFor (r:1 w:0) + /// Proof: FellowshipReferenda ReferendumInfoFor (max_values: None, max_size: Some(900), added: 3375, mode: MaxEncodedLen) + /// Storage: FellowshipReferenda TrackQueue (r:1 w:1) + /// Proof: FellowshipReferenda TrackQueue (max_values: None, max_size: Some(812), added: 3287, mode: MaxEncodedLen) + fn nudge_referendum_requeued_slide() -> Weight { + // Proof Size summary in bytes: + // Measured: `1774` + // Estimated: `4365` + // Minimum execution time: 42_863_000 picoseconds. + Weight::from_parts(46_241_000, 0) + .saturating_add(Weight::from_parts(0, 4365)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: FellowshipReferenda ReferendumInfoFor (r:1 w:1) + /// Proof: FellowshipReferenda ReferendumInfoFor (max_values: None, max_size: Some(900), added: 3375, mode: MaxEncodedLen) + /// Storage: FellowshipReferenda DecidingCount (r:1 w:0) + /// Proof: FellowshipReferenda DecidingCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) + /// Storage: FellowshipReferenda TrackQueue (r:1 w:1) + /// Proof: FellowshipReferenda TrackQueue (max_values: None, max_size: Some(812), added: 3287, mode: MaxEncodedLen) + fn nudge_referendum_queued() -> Weight { + // Proof Size summary in bytes: + // Measured: `1790` + // Estimated: `4365` + // Minimum execution time: 57_511_000 picoseconds. + Weight::from_parts(64_027_000, 0) + .saturating_add(Weight::from_parts(0, 4365)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: FellowshipReferenda ReferendumInfoFor (r:1 w:1) + /// Proof: FellowshipReferenda ReferendumInfoFor (max_values: None, max_size: Some(900), added: 3375, mode: MaxEncodedLen) + /// Storage: FellowshipReferenda DecidingCount (r:1 w:0) + /// Proof: FellowshipReferenda DecidingCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) + /// Storage: FellowshipReferenda TrackQueue (r:1 w:1) + /// Proof: FellowshipReferenda TrackQueue (max_values: None, max_size: Some(812), added: 3287, mode: MaxEncodedLen) + fn nudge_referendum_not_queued() -> Weight { + // Proof Size summary in bytes: + // Measured: `1831` + // Estimated: `4365` + // Minimum execution time: 56_726_000 picoseconds. + Weight::from_parts(61_962_000, 0) + .saturating_add(Weight::from_parts(0, 4365)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: FellowshipReferenda ReferendumInfoFor (r:1 w:1) + /// Proof: FellowshipReferenda ReferendumInfoFor (max_values: None, max_size: Some(900), added: 3375, mode: MaxEncodedLen) + /// Storage: Scheduler Agenda (r:1 w:1) + /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) + fn nudge_referendum_no_deposit() -> Weight { + // Proof Size summary in bytes: + // Measured: `301` + // Estimated: `42428` + // Minimum execution time: 24_870_000 picoseconds. + Weight::from_parts(25_837_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: FellowshipReferenda ReferendumInfoFor (r:1 w:1) + /// Proof: FellowshipReferenda ReferendumInfoFor (max_values: None, max_size: Some(900), added: 3375, mode: MaxEncodedLen) + /// Storage: Scheduler Agenda (r:1 w:1) + /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) + fn nudge_referendum_preparing() -> Weight { + // Proof Size summary in bytes: + // Measured: `349` + // Estimated: `42428` + // Minimum execution time: 25_297_000 picoseconds. + Weight::from_parts(26_086_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: FellowshipReferenda ReferendumInfoFor (r:1 w:1) + /// Proof: FellowshipReferenda ReferendumInfoFor (max_values: None, max_size: Some(900), added: 3375, mode: MaxEncodedLen) + fn nudge_referendum_timed_out() -> Weight { + // Proof Size summary in bytes: + // Measured: `208` + // Estimated: `4365` + // Minimum execution time: 16_776_000 picoseconds. + Weight::from_parts(17_396_000, 0) + .saturating_add(Weight::from_parts(0, 4365)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: FellowshipReferenda ReferendumInfoFor (r:1 w:1) + /// Proof: FellowshipReferenda ReferendumInfoFor (max_values: None, max_size: Some(900), added: 3375, mode: MaxEncodedLen) + /// Storage: FellowshipReferenda DecidingCount (r:1 w:1) + /// Proof: FellowshipReferenda DecidingCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) + /// Storage: FellowshipCollective MemberCount (r:1 w:0) + /// Proof: FellowshipCollective MemberCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) + /// Storage: Scheduler Agenda (r:1 w:1) + /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) + fn nudge_referendum_begin_deciding_failing() -> Weight { + // Proof Size summary in bytes: + // Measured: `584` + // Estimated: `42428` + // Minimum execution time: 37_780_000 picoseconds. + Weight::from_parts(38_626_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: FellowshipReferenda ReferendumInfoFor (r:1 w:1) + /// Proof: FellowshipReferenda ReferendumInfoFor (max_values: None, max_size: Some(900), added: 3375, mode: MaxEncodedLen) + /// Storage: FellowshipReferenda DecidingCount (r:1 w:1) + /// Proof: FellowshipReferenda DecidingCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) + /// Storage: FellowshipCollective MemberCount (r:1 w:0) + /// Proof: FellowshipCollective MemberCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) + /// Storage: Scheduler Agenda (r:1 w:1) + /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) + fn nudge_referendum_begin_deciding_passing() -> Weight { + // Proof Size summary in bytes: + // Measured: `719` + // Estimated: `42428` + // Minimum execution time: 85_265_000 picoseconds. + Weight::from_parts(89_986_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: FellowshipReferenda ReferendumInfoFor (r:1 w:1) + /// Proof: FellowshipReferenda ReferendumInfoFor (max_values: None, max_size: Some(900), added: 3375, mode: MaxEncodedLen) + /// Storage: FellowshipCollective MemberCount (r:1 w:0) + /// Proof: FellowshipCollective MemberCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) + /// Storage: Scheduler Agenda (r:1 w:1) + /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) + fn nudge_referendum_begin_confirming() -> Weight { + // Proof Size summary in bytes: + // Measured: `770` + // Estimated: `42428` + // Minimum execution time: 143_283_000 picoseconds. + Weight::from_parts(158_540_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: FellowshipReferenda ReferendumInfoFor (r:1 w:1) + /// Proof: FellowshipReferenda ReferendumInfoFor (max_values: None, max_size: Some(900), added: 3375, mode: MaxEncodedLen) + /// Storage: FellowshipCollective MemberCount (r:1 w:0) + /// Proof: FellowshipCollective MemberCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) + /// Storage: Scheduler Agenda (r:1 w:1) + /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) + fn nudge_referendum_end_confirming() -> Weight { + // Proof Size summary in bytes: + // Measured: `755` + // Estimated: `42428` + // Minimum execution time: 143_736_000 picoseconds. + Weight::from_parts(162_755_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: FellowshipReferenda ReferendumInfoFor (r:1 w:1) + /// Proof: FellowshipReferenda ReferendumInfoFor (max_values: None, max_size: Some(900), added: 3375, mode: MaxEncodedLen) + /// Storage: FellowshipCollective MemberCount (r:1 w:0) + /// Proof: FellowshipCollective MemberCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) + /// Storage: Scheduler Agenda (r:1 w:1) + /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) + fn nudge_referendum_continue_not_confirming() -> Weight { + // Proof Size summary in bytes: + // Measured: `770` + // Estimated: `42428` + // Minimum execution time: 139_021_000 picoseconds. + Weight::from_parts(157_398_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: FellowshipReferenda ReferendumInfoFor (r:1 w:1) + /// Proof: FellowshipReferenda ReferendumInfoFor (max_values: None, max_size: Some(900), added: 3375, mode: MaxEncodedLen) + /// Storage: FellowshipCollective MemberCount (r:1 w:0) + /// Proof: FellowshipCollective MemberCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) + /// Storage: Scheduler Agenda (r:1 w:1) + /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) + fn nudge_referendum_continue_confirming() -> Weight { + // Proof Size summary in bytes: + // Measured: `776` + // Estimated: `42428` + // Minimum execution time: 78_530_000 picoseconds. + Weight::from_parts(83_556_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: FellowshipReferenda ReferendumInfoFor (r:1 w:1) + /// Proof: FellowshipReferenda ReferendumInfoFor (max_values: None, max_size: Some(900), added: 3375, mode: MaxEncodedLen) + /// Storage: FellowshipCollective MemberCount (r:1 w:0) + /// Proof: FellowshipCollective MemberCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) + /// Storage: Scheduler Agenda (r:2 w:2) + /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) + /// Storage: Scheduler Lookup (r:1 w:1) + /// Proof: Scheduler Lookup (max_values: None, max_size: Some(48), added: 2523, mode: MaxEncodedLen) + fn nudge_referendum_approved() -> Weight { + // Proof Size summary in bytes: + // Measured: `776` + // Estimated: `83866` + // Minimum execution time: 174_165_000 picoseconds. + Weight::from_parts(188_496_000, 0) + .saturating_add(Weight::from_parts(0, 83866)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: FellowshipReferenda ReferendumInfoFor (r:1 w:1) + /// Proof: FellowshipReferenda ReferendumInfoFor (max_values: None, max_size: Some(900), added: 3375, mode: MaxEncodedLen) + /// Storage: FellowshipCollective MemberCount (r:1 w:0) + /// Proof: FellowshipCollective MemberCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) + /// Storage: Scheduler Agenda (r:1 w:1) + /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) + fn nudge_referendum_rejected() -> Weight { + // Proof Size summary in bytes: + // Measured: `772` + // Estimated: `42428` + // Minimum execution time: 142_964_000 picoseconds. + Weight::from_parts(157_257_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: FellowshipReferenda ReferendumInfoFor (r:1 w:0) + /// Proof: FellowshipReferenda ReferendumInfoFor (max_values: None, max_size: Some(900), added: 3375, mode: MaxEncodedLen) + /// Storage: Preimage StatusFor (r:1 w:0) + /// Proof: Preimage StatusFor (max_values: None, max_size: Some(91), added: 2566, mode: MaxEncodedLen) + /// Storage: FellowshipReferenda MetadataOf (r:0 w:1) + /// Proof: FellowshipReferenda MetadataOf (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + fn set_some_metadata() -> Weight { + // Proof Size summary in bytes: + // Measured: `352` + // Estimated: `4365` + // Minimum execution time: 20_126_000 picoseconds. + Weight::from_parts(20_635_000, 0) + .saturating_add(Weight::from_parts(0, 4365)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: FellowshipReferenda ReferendumInfoFor (r:1 w:0) + /// Proof: FellowshipReferenda ReferendumInfoFor (max_values: None, max_size: Some(900), added: 3375, mode: MaxEncodedLen) + /// Storage: FellowshipReferenda MetadataOf (r:1 w:1) + /// Proof: FellowshipReferenda MetadataOf (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + fn clear_metadata() -> Weight { + // Proof Size summary in bytes: + // Measured: `285` + // Estimated: `4365` + // Minimum execution time: 17_716_000 picoseconds. + Weight::from_parts(18_324_000, 0) + .saturating_add(Weight::from_parts(0, 4365)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } +} diff --git a/polkadot/runtime/westend/src/weights/pallet_referenda_referenda.rs b/polkadot/runtime/westend/src/weights/pallet_referenda_referenda.rs new file mode 100644 index 000000000000..accaa0ef10d9 --- /dev/null +++ b/polkadot/runtime/westend/src/weights/pallet_referenda_referenda.rs @@ -0,0 +1,523 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Autogenerated weights for `pallet_referenda` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-06-19, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-e8ezs4ez-project-163-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("kusama-dev"), DB CACHE: 1024 + +// Executed Command: +// ./target/production/polkadot +// benchmark +// pallet +// --chain=kusama-dev +// --steps=50 +// --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --pallet=pallet_referenda +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --header=./file_header.txt +// --output=./runtime/kusama/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_referenda`. +pub struct WeightInfo(PhantomData); +impl pallet_referenda::WeightInfo for WeightInfo { + /// Storage: Referenda ReferendumCount (r:1 w:1) + /// Proof: Referenda ReferendumCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: Scheduler Agenda (r:1 w:1) + /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) + /// Storage: Referenda ReferendumInfoFor (r:0 w:1) + /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(936), added: 3411, mode: MaxEncodedLen) + fn submit() -> Weight { + // Proof Size summary in bytes: + // Measured: `186` + // Estimated: `42428` + // Minimum execution time: 39_146_000 picoseconds. + Weight::from_parts(40_383_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: Referenda ReferendumInfoFor (r:1 w:1) + /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(936), added: 3411, mode: MaxEncodedLen) + /// Storage: Scheduler Agenda (r:2 w:2) + /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) + fn place_decision_deposit_preparing() -> Weight { + // Proof Size summary in bytes: + // Measured: `439` + // Estimated: `83866` + // Minimum execution time: 51_385_000 picoseconds. + Weight::from_parts(52_701_000, 0) + .saturating_add(Weight::from_parts(0, 83866)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: Referenda ReferendumInfoFor (r:1 w:1) + /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(936), added: 3411, mode: MaxEncodedLen) + /// Storage: Referenda DecidingCount (r:1 w:0) + /// Proof: Referenda DecidingCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) + /// Storage: Referenda TrackQueue (r:1 w:1) + /// Proof: Referenda TrackQueue (max_values: None, max_size: Some(2012), added: 4487, mode: MaxEncodedLen) + /// Storage: Scheduler Agenda (r:1 w:1) + /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) + fn place_decision_deposit_queued() -> Weight { + // Proof Size summary in bytes: + // Measured: `3196` + // Estimated: `42428` + // Minimum execution time: 70_018_000 picoseconds. + Weight::from_parts(75_868_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: Referenda ReferendumInfoFor (r:1 w:1) + /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(936), added: 3411, mode: MaxEncodedLen) + /// Storage: Referenda DecidingCount (r:1 w:0) + /// Proof: Referenda DecidingCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) + /// Storage: Referenda TrackQueue (r:1 w:1) + /// Proof: Referenda TrackQueue (max_values: None, max_size: Some(2012), added: 4487, mode: MaxEncodedLen) + /// Storage: Scheduler Agenda (r:1 w:1) + /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) + fn place_decision_deposit_not_queued() -> Weight { + // Proof Size summary in bytes: + // Measured: `3216` + // Estimated: `42428` + // Minimum execution time: 69_311_000 picoseconds. + Weight::from_parts(72_425_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: Referenda ReferendumInfoFor (r:1 w:1) + /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(936), added: 3411, mode: MaxEncodedLen) + /// Storage: Referenda DecidingCount (r:1 w:1) + /// Proof: Referenda DecidingCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) + /// Storage: Balances InactiveIssuance (r:1 w:0) + /// Proof: Balances InactiveIssuance (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) + /// Storage: Scheduler Agenda (r:2 w:2) + /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) + fn place_decision_deposit_passing() -> Weight { + // Proof Size summary in bytes: + // Measured: `439` + // Estimated: `83866` + // Minimum execution time: 64_385_000 picoseconds. + Weight::from_parts(66_178_000, 0) + .saturating_add(Weight::from_parts(0, 83866)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: Referenda ReferendumInfoFor (r:1 w:1) + /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(936), added: 3411, mode: MaxEncodedLen) + /// Storage: Referenda DecidingCount (r:1 w:1) + /// Proof: Referenda DecidingCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) + /// Storage: Balances InactiveIssuance (r:1 w:0) + /// Proof: Balances InactiveIssuance (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) + /// Storage: Scheduler Agenda (r:2 w:2) + /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) + fn place_decision_deposit_failing() -> Weight { + // Proof Size summary in bytes: + // Measured: `439` + // Estimated: `83866` + // Minimum execution time: 62_200_000 picoseconds. + Weight::from_parts(63_782_000, 0) + .saturating_add(Weight::from_parts(0, 83866)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: Referenda ReferendumInfoFor (r:1 w:1) + /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(936), added: 3411, mode: MaxEncodedLen) + fn refund_decision_deposit() -> Weight { + // Proof Size summary in bytes: + // Measured: `279` + // Estimated: `4401` + // Minimum execution time: 29_677_000 picoseconds. + Weight::from_parts(30_603_000, 0) + .saturating_add(Weight::from_parts(0, 4401)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: Referenda ReferendumInfoFor (r:1 w:1) + /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(936), added: 3411, mode: MaxEncodedLen) + fn refund_submission_deposit() -> Weight { + // Proof Size summary in bytes: + // Measured: `269` + // Estimated: `4401` + // Minimum execution time: 29_897_000 picoseconds. + Weight::from_parts(30_618_000, 0) + .saturating_add(Weight::from_parts(0, 4401)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: Referenda ReferendumInfoFor (r:1 w:1) + /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(936), added: 3411, mode: MaxEncodedLen) + /// Storage: Scheduler Agenda (r:2 w:2) + /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) + fn cancel() -> Weight { + // Proof Size summary in bytes: + // Measured: `347` + // Estimated: `83866` + // Minimum execution time: 37_697_000 picoseconds. + Weight::from_parts(38_953_000, 0) + .saturating_add(Weight::from_parts(0, 83866)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: Referenda ReferendumInfoFor (r:1 w:1) + /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(936), added: 3411, mode: MaxEncodedLen) + /// Storage: Scheduler Agenda (r:2 w:2) + /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) + /// Storage: Referenda MetadataOf (r:1 w:0) + /// Proof: Referenda MetadataOf (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + fn kill() -> Weight { + // Proof Size summary in bytes: + // Measured: `588` + // Estimated: `83866` + // Minimum execution time: 106_001_000 picoseconds. + Weight::from_parts(107_102_000, 0) + .saturating_add(Weight::from_parts(0, 83866)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: Referenda TrackQueue (r:1 w:0) + /// Proof: Referenda TrackQueue (max_values: None, max_size: Some(2012), added: 4487, mode: MaxEncodedLen) + /// Storage: Referenda DecidingCount (r:1 w:1) + /// Proof: Referenda DecidingCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) + fn one_fewer_deciding_queue_empty() -> Weight { + // Proof Size summary in bytes: + // Measured: `102` + // Estimated: `5477` + // Minimum execution time: 8_987_000 picoseconds. + Weight::from_parts(9_431_000, 0) + .saturating_add(Weight::from_parts(0, 5477)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: Referenda TrackQueue (r:1 w:1) + /// Proof: Referenda TrackQueue (max_values: None, max_size: Some(2012), added: 4487, mode: MaxEncodedLen) + /// Storage: Referenda ReferendumInfoFor (r:1 w:1) + /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(936), added: 3411, mode: MaxEncodedLen) + /// Storage: Balances InactiveIssuance (r:1 w:0) + /// Proof: Balances InactiveIssuance (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) + /// Storage: Scheduler Agenda (r:1 w:1) + /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) + fn one_fewer_deciding_failing() -> Weight { + // Proof Size summary in bytes: + // Measured: `3116` + // Estimated: `42428` + // Minimum execution time: 55_344_000 picoseconds. + Weight::from_parts(58_026_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: Referenda TrackQueue (r:1 w:1) + /// Proof: Referenda TrackQueue (max_values: None, max_size: Some(2012), added: 4487, mode: MaxEncodedLen) + /// Storage: Referenda ReferendumInfoFor (r:1 w:1) + /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(936), added: 3411, mode: MaxEncodedLen) + /// Storage: Balances InactiveIssuance (r:1 w:0) + /// Proof: Balances InactiveIssuance (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) + /// Storage: Scheduler Agenda (r:1 w:1) + /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) + fn one_fewer_deciding_passing() -> Weight { + // Proof Size summary in bytes: + // Measured: `3116` + // Estimated: `42428` + // Minimum execution time: 57_003_000 picoseconds. + Weight::from_parts(60_347_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: Referenda ReferendumInfoFor (r:1 w:0) + /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(936), added: 3411, mode: MaxEncodedLen) + /// Storage: Referenda TrackQueue (r:1 w:1) + /// Proof: Referenda TrackQueue (max_values: None, max_size: Some(2012), added: 4487, mode: MaxEncodedLen) + fn nudge_referendum_requeued_insertion() -> Weight { + // Proof Size summary in bytes: + // Measured: `2939` + // Estimated: `5477` + // Minimum execution time: 23_001_000 picoseconds. + Weight::from_parts(24_812_000, 0) + .saturating_add(Weight::from_parts(0, 5477)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: Referenda ReferendumInfoFor (r:1 w:0) + /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(936), added: 3411, mode: MaxEncodedLen) + /// Storage: Referenda TrackQueue (r:1 w:1) + /// Proof: Referenda TrackQueue (max_values: None, max_size: Some(2012), added: 4487, mode: MaxEncodedLen) + fn nudge_referendum_requeued_slide() -> Weight { + // Proof Size summary in bytes: + // Measured: `2939` + // Estimated: `5477` + // Minimum execution time: 23_299_000 picoseconds. + Weight::from_parts(24_465_000, 0) + .saturating_add(Weight::from_parts(0, 5477)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: Referenda ReferendumInfoFor (r:1 w:1) + /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(936), added: 3411, mode: MaxEncodedLen) + /// Storage: Referenda DecidingCount (r:1 w:0) + /// Proof: Referenda DecidingCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) + /// Storage: Referenda TrackQueue (r:1 w:1) + /// Proof: Referenda TrackQueue (max_values: None, max_size: Some(2012), added: 4487, mode: MaxEncodedLen) + fn nudge_referendum_queued() -> Weight { + // Proof Size summary in bytes: + // Measured: `2943` + // Estimated: `5477` + // Minimum execution time: 28_223_000 picoseconds. + Weight::from_parts(29_664_000, 0) + .saturating_add(Weight::from_parts(0, 5477)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: Referenda ReferendumInfoFor (r:1 w:1) + /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(936), added: 3411, mode: MaxEncodedLen) + /// Storage: Referenda DecidingCount (r:1 w:0) + /// Proof: Referenda DecidingCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) + /// Storage: Referenda TrackQueue (r:1 w:1) + /// Proof: Referenda TrackQueue (max_values: None, max_size: Some(2012), added: 4487, mode: MaxEncodedLen) + fn nudge_referendum_not_queued() -> Weight { + // Proof Size summary in bytes: + // Measured: `2963` + // Estimated: `5477` + // Minimum execution time: 27_474_000 picoseconds. + Weight::from_parts(29_072_000, 0) + .saturating_add(Weight::from_parts(0, 5477)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: Referenda ReferendumInfoFor (r:1 w:1) + /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(936), added: 3411, mode: MaxEncodedLen) + /// Storage: Scheduler Agenda (r:1 w:1) + /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) + fn nudge_referendum_no_deposit() -> Weight { + // Proof Size summary in bytes: + // Measured: `299` + // Estimated: `42428` + // Minimum execution time: 24_405_000 picoseconds. + Weight::from_parts(25_184_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: Referenda ReferendumInfoFor (r:1 w:1) + /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(936), added: 3411, mode: MaxEncodedLen) + /// Storage: Scheduler Agenda (r:1 w:1) + /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) + fn nudge_referendum_preparing() -> Weight { + // Proof Size summary in bytes: + // Measured: `347` + // Estimated: `42428` + // Minimum execution time: 24_572_000 picoseconds. + Weight::from_parts(25_287_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: Referenda ReferendumInfoFor (r:1 w:1) + /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(936), added: 3411, mode: MaxEncodedLen) + fn nudge_referendum_timed_out() -> Weight { + // Proof Size summary in bytes: + // Measured: `206` + // Estimated: `4401` + // Minimum execution time: 16_042_000 picoseconds. + Weight::from_parts(16_610_000, 0) + .saturating_add(Weight::from_parts(0, 4401)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: Referenda ReferendumInfoFor (r:1 w:1) + /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(936), added: 3411, mode: MaxEncodedLen) + /// Storage: Referenda DecidingCount (r:1 w:1) + /// Proof: Referenda DecidingCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) + /// Storage: Balances InactiveIssuance (r:1 w:0) + /// Proof: Balances InactiveIssuance (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) + /// Storage: Scheduler Agenda (r:1 w:1) + /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) + fn nudge_referendum_begin_deciding_failing() -> Weight { + // Proof Size summary in bytes: + // Measured: `347` + // Estimated: `42428` + // Minimum execution time: 33_639_000 picoseconds. + Weight::from_parts(34_749_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: Referenda ReferendumInfoFor (r:1 w:1) + /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(936), added: 3411, mode: MaxEncodedLen) + /// Storage: Referenda DecidingCount (r:1 w:1) + /// Proof: Referenda DecidingCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) + /// Storage: Balances InactiveIssuance (r:1 w:0) + /// Proof: Balances InactiveIssuance (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) + /// Storage: Scheduler Agenda (r:1 w:1) + /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) + fn nudge_referendum_begin_deciding_passing() -> Weight { + // Proof Size summary in bytes: + // Measured: `347` + // Estimated: `42428` + // Minimum execution time: 36_467_000 picoseconds. + Weight::from_parts(37_693_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: Referenda ReferendumInfoFor (r:1 w:1) + /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(936), added: 3411, mode: MaxEncodedLen) + /// Storage: Balances InactiveIssuance (r:1 w:0) + /// Proof: Balances InactiveIssuance (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) + /// Storage: Scheduler Agenda (r:1 w:1) + /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) + fn nudge_referendum_begin_confirming() -> Weight { + // Proof Size summary in bytes: + // Measured: `400` + // Estimated: `42428` + // Minimum execution time: 29_857_000 picoseconds. + Weight::from_parts(30_840_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: Referenda ReferendumInfoFor (r:1 w:1) + /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(936), added: 3411, mode: MaxEncodedLen) + /// Storage: Balances InactiveIssuance (r:1 w:0) + /// Proof: Balances InactiveIssuance (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) + /// Storage: Scheduler Agenda (r:1 w:1) + /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) + fn nudge_referendum_end_confirming() -> Weight { + // Proof Size summary in bytes: + // Measured: `383` + // Estimated: `42428` + // Minimum execution time: 31_028_000 picoseconds. + Weight::from_parts(32_154_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: Referenda ReferendumInfoFor (r:1 w:1) + /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(936), added: 3411, mode: MaxEncodedLen) + /// Storage: Balances InactiveIssuance (r:1 w:0) + /// Proof: Balances InactiveIssuance (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) + /// Storage: Scheduler Agenda (r:1 w:1) + /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) + fn nudge_referendum_continue_not_confirming() -> Weight { + // Proof Size summary in bytes: + // Measured: `400` + // Estimated: `42428` + // Minimum execution time: 28_594_000 picoseconds. + Weight::from_parts(29_092_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: Referenda ReferendumInfoFor (r:1 w:1) + /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(936), added: 3411, mode: MaxEncodedLen) + /// Storage: Balances InactiveIssuance (r:1 w:0) + /// Proof: Balances InactiveIssuance (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) + /// Storage: Scheduler Agenda (r:1 w:1) + /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) + fn nudge_referendum_continue_confirming() -> Weight { + // Proof Size summary in bytes: + // Measured: `404` + // Estimated: `42428` + // Minimum execution time: 27_246_000 picoseconds. + Weight::from_parts(28_003_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: Referenda ReferendumInfoFor (r:1 w:1) + /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(936), added: 3411, mode: MaxEncodedLen) + /// Storage: Balances InactiveIssuance (r:1 w:0) + /// Proof: Balances InactiveIssuance (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) + /// Storage: Scheduler Agenda (r:2 w:2) + /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) + /// Storage: Scheduler Lookup (r:1 w:1) + /// Proof: Scheduler Lookup (max_values: None, max_size: Some(48), added: 2523, mode: MaxEncodedLen) + fn nudge_referendum_approved() -> Weight { + // Proof Size summary in bytes: + // Measured: `404` + // Estimated: `83866` + // Minimum execution time: 43_426_000 picoseconds. + Weight::from_parts(44_917_000, 0) + .saturating_add(Weight::from_parts(0, 83866)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: Referenda ReferendumInfoFor (r:1 w:1) + /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(936), added: 3411, mode: MaxEncodedLen) + /// Storage: Balances InactiveIssuance (r:1 w:0) + /// Proof: Balances InactiveIssuance (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) + /// Storage: Scheduler Agenda (r:1 w:1) + /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) + fn nudge_referendum_rejected() -> Weight { + // Proof Size summary in bytes: + // Measured: `400` + // Estimated: `42428` + // Minimum execution time: 30_285_000 picoseconds. + Weight::from_parts(31_575_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: Referenda ReferendumInfoFor (r:1 w:0) + /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(936), added: 3411, mode: MaxEncodedLen) + /// Storage: Preimage StatusFor (r:1 w:0) + /// Proof: Preimage StatusFor (max_values: None, max_size: Some(91), added: 2566, mode: MaxEncodedLen) + /// Storage: Referenda MetadataOf (r:0 w:1) + /// Proof: Referenda MetadataOf (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + fn set_some_metadata() -> Weight { + // Proof Size summary in bytes: + // Measured: `350` + // Estimated: `4401` + // Minimum execution time: 19_254_000 picoseconds. + Weight::from_parts(19_855_000, 0) + .saturating_add(Weight::from_parts(0, 4401)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: Referenda ReferendumInfoFor (r:1 w:0) + /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(936), added: 3411, mode: MaxEncodedLen) + /// Storage: Referenda MetadataOf (r:1 w:1) + /// Proof: Referenda MetadataOf (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + fn clear_metadata() -> Weight { + // Proof Size summary in bytes: + // Measured: `283` + // Estimated: `4401` + // Minimum execution time: 16_957_000 picoseconds. + Weight::from_parts(17_556_000, 0) + .saturating_add(Weight::from_parts(0, 4401)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } +} diff --git a/polkadot/runtime/westend/src/weights/pallet_treasury.rs b/polkadot/runtime/westend/src/weights/pallet_treasury.rs new file mode 100644 index 000000000000..e2eb6abfc7bb --- /dev/null +++ b/polkadot/runtime/westend/src/weights/pallet_treasury.rs @@ -0,0 +1,150 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Autogenerated weights for `pallet_treasury` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-07-13, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-o7yfgx5n-project-163-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("westend-dev")`, DB CACHE: 1024 + +// Executed Command: +// target/production/polkadot +// benchmark +// pallet +// --steps=50 +// --repeat=20 +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot/.git/.artifacts/bench.json +// --pallet=pallet_treasury +// --chain=westend-dev +// --header=./file_header.txt +// --output=./runtime/westend/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_treasury`. +pub struct WeightInfo(PhantomData); +impl pallet_treasury::WeightInfo for WeightInfo { + /// Storage: `Treasury::ProposalCount` (r:1 w:1) + /// Proof: `Treasury::ProposalCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Treasury::Approvals` (r:1 w:1) + /// Proof: `Treasury::Approvals` (`max_values`: Some(1), `max_size`: Some(402), added: 897, mode: `MaxEncodedLen`) + /// Storage: `Treasury::Proposals` (r:0 w:1) + /// Proof: `Treasury::Proposals` (`max_values`: None, `max_size`: Some(108), added: 2583, mode: `MaxEncodedLen`) + fn spend() -> Weight { + // Proof Size summary in bytes: + // Measured: `6` + // Estimated: `1887` + // Minimum execution time: 13_644_000 picoseconds. + Weight::from_parts(13_988_000, 0) + .saturating_add(Weight::from_parts(0, 1887)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `Treasury::ProposalCount` (r:1 w:1) + /// Proof: `Treasury::ProposalCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Treasury::Proposals` (r:0 w:1) + /// Proof: `Treasury::Proposals` (`max_values`: None, `max_size`: Some(108), added: 2583, mode: `MaxEncodedLen`) + fn propose_spend() -> Weight { + // Proof Size summary in bytes: + // Measured: `107` + // Estimated: `1489` + // Minimum execution time: 26_304_000 picoseconds. + Weight::from_parts(26_850_000, 0) + .saturating_add(Weight::from_parts(0, 1489)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Treasury::Proposals` (r:1 w:1) + /// Proof: `Treasury::Proposals` (`max_values`: None, `max_size`: Some(108), added: 2583, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn reject_proposal() -> Weight { + // Proof Size summary in bytes: + // Measured: `265` + // Estimated: `3593` + // Minimum execution time: 40_318_000 picoseconds. + Weight::from_parts(41_598_000, 0) + .saturating_add(Weight::from_parts(0, 3593)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Treasury::Proposals` (r:1 w:0) + /// Proof: `Treasury::Proposals` (`max_values`: None, `max_size`: Some(108), added: 2583, mode: `MaxEncodedLen`) + /// Storage: `Treasury::Approvals` (r:1 w:1) + /// Proof: `Treasury::Approvals` (`max_values`: Some(1), `max_size`: Some(402), added: 897, mode: `MaxEncodedLen`) + /// The range of component `p` is `[0, 99]`. + fn approve_proposal(p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `433 + p * (8 ±0)` + // Estimated: `3573` + // Minimum execution time: 8_250_000 picoseconds. + Weight::from_parts(10_937_873, 0) + .saturating_add(Weight::from_parts(0, 3573)) + // Standard Error: 1_239 + .saturating_add(Weight::from_parts(82_426, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Treasury::Approvals` (r:1 w:1) + /// Proof: `Treasury::Approvals` (`max_values`: Some(1), `max_size`: Some(402), added: 897, mode: `MaxEncodedLen`) + fn remove_approval() -> Weight { + // Proof Size summary in bytes: + // Measured: `90` + // Estimated: `1887` + // Minimum execution time: 6_170_000 picoseconds. + Weight::from_parts(6_366_000, 0) + .saturating_add(Weight::from_parts(0, 1887)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Treasury::Deactivated` (r:1 w:1) + /// Proof: `Treasury::Deactivated` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `Balances::InactiveIssuance` (r:1 w:1) + /// Proof: `Balances::InactiveIssuance` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `Treasury::Approvals` (r:1 w:1) + /// Proof: `Treasury::Approvals` (`max_values`: Some(1), `max_size`: Some(402), added: 897, mode: `MaxEncodedLen`) + /// Storage: `Treasury::Proposals` (r:100 w:100) + /// Proof: `Treasury::Proposals` (`max_values`: None, `max_size`: Some(108), added: 2583, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:200 w:200) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// The range of component `p` is `[0, 100]`. + fn on_initialize_proposals(p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `175 + p * (251 ±0)` + // Estimated: `1887 + p * (5206 ±0)` + // Minimum execution time: 39_691_000 picoseconds. + Weight::from_parts(29_703_313, 0) + .saturating_add(Weight::from_parts(0, 1887)) + // Standard Error: 18_540 + .saturating_add(Weight::from_parts(42_601_290, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().reads((3_u64).saturating_mul(p.into()))) + .saturating_add(T::DbWeight::get().writes(3)) + .saturating_add(T::DbWeight::get().writes((3_u64).saturating_mul(p.into()))) + .saturating_add(Weight::from_parts(0, 5206).saturating_mul(p.into())) + } +} diff --git a/polkadot/runtime/westend/src/weights/pallet_whitelist.rs b/polkadot/runtime/westend/src/weights/pallet_whitelist.rs new file mode 100644 index 000000000000..6177ac799e6a --- /dev/null +++ b/polkadot/runtime/westend/src/weights/pallet_whitelist.rs @@ -0,0 +1,116 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Autogenerated weights for `pallet_whitelist` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-07-12, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-o7yfgx5n-project-163-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("westend-dev")`, DB CACHE: 1024 + +// Executed Command: +// target/production/polkadot +// benchmark +// pallet +// --steps=50 +// --repeat=20 +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot/.git/.artifacts/bench.json +// --pallet=pallet_whitelist +// --chain=westend-dev +// --header=./file_header.txt +// --output=./runtime/westend/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_whitelist`. +pub struct WeightInfo(PhantomData); +impl pallet_whitelist::WeightInfo for WeightInfo { + /// Storage: `Whitelist::WhitelistedCall` (r:1 w:1) + /// Proof: `Whitelist::WhitelistedCall` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) + /// Storage: `Preimage::StatusFor` (r:1 w:1) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + fn whitelist_call() -> Weight { + // Proof Size summary in bytes: + // Measured: `122` + // Estimated: `3556` + // Minimum execution time: 21_188_000 picoseconds. + Weight::from_parts(21_804_000, 0) + .saturating_add(Weight::from_parts(0, 3556)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Whitelist::WhitelistedCall` (r:1 w:1) + /// Proof: `Whitelist::WhitelistedCall` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) + /// Storage: `Preimage::StatusFor` (r:1 w:1) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + fn remove_whitelisted_call() -> Weight { + // Proof Size summary in bytes: + // Measured: `251` + // Estimated: `3556` + // Minimum execution time: 17_655_000 picoseconds. + Weight::from_parts(19_443_000, 0) + .saturating_add(Weight::from_parts(0, 3556)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Whitelist::WhitelistedCall` (r:1 w:1) + /// Proof: `Whitelist::WhitelistedCall` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) + /// Storage: `Preimage::PreimageFor` (r:1 w:1) + /// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `Measured`) + /// Storage: `Preimage::StatusFor` (r:1 w:1) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// The range of component `n` is `[1, 4194294]`. + fn dispatch_whitelisted_call(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `327 + n * (1 ±0)` + // Estimated: `3791 + n * (1 ±0)` + // Minimum execution time: 30_540_000 picoseconds. + Weight::from_parts(30_886_000, 0) + .saturating_add(Weight::from_parts(0, 3791)) + // Standard Error: 9 + .saturating_add(Weight::from_parts(1_779, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(3)) + .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) + } + /// Storage: `Whitelist::WhitelistedCall` (r:1 w:1) + /// Proof: `Whitelist::WhitelistedCall` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) + /// Storage: `Preimage::StatusFor` (r:1 w:1) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// The range of component `n` is `[1, 10000]`. + fn dispatch_whitelisted_call_with_preimage(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `251` + // Estimated: `3556` + // Minimum execution time: 21_082_000 picoseconds. + Weight::from_parts(21_922_294, 0) + .saturating_add(Weight::from_parts(0, 3556)) + // Standard Error: 4 + .saturating_add(Weight::from_parts(1_412, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } +} diff --git a/polkadot/runtime/westend/src/xcm_config.rs b/polkadot/runtime/westend/src/xcm_config.rs index 92dcee150aab..66a2e2230ccd 100644 --- a/polkadot/runtime/westend/src/xcm_config.rs +++ b/polkadot/runtime/westend/src/xcm_config.rs @@ -17,26 +17,31 @@ //! XCM configurations for Westend. use super::{ - parachains_origin, weights, AccountId, AllPalletsWithSystem, Balances, Dmp, ParaId, Runtime, - RuntimeCall, RuntimeEvent, RuntimeOrigin, TransactionByteFee, WeightToFee, XcmPallet, + parachains_origin, AccountId, AllPalletsWithSystem, Balances, Dmp, FellowshipAdmin, + GeneralAdmin, ParaId, Runtime, RuntimeCall, RuntimeEvent, RuntimeOrigin, StakingAdmin, + TransactionByteFee, WeightToFee, XcmPallet, }; + use frame_support::{ - parameter_types, + match_types, parameter_types, traits::{Everything, Nothing}, }; use frame_system::EnsureRoot; +use pallet_xcm::XcmPassthrough; use runtime_common::{ xcm_sender::{ChildParachainRouter, ExponentialPrice}, ToAuthor, }; use sp_core::ConstU32; -use westend_runtime_constants::currency::CENTS; +use westend_runtime_constants::{ + currency::CENTS, system_parachain::*, xcm::body::FELLOWSHIP_ADMIN_INDEX, +}; use xcm::latest::prelude::*; use xcm_builder::{ AccountId32Aliases, AllowExplicitUnpaidExecutionFrom, AllowKnownQueryResponses, AllowSubscriptionsFrom, AllowTopLevelPaidExecutionFrom, ChildParachainAsNative, ChildParachainConvertsVia, CurrencyAdapter as XcmCurrencyAdapter, DescribeBodyTerminal, - DescribeFamily, HashedDescription, IsChildSystemParachain, IsConcrete, MintLocation, + DescribeFamily, HashedDescription, IsConcrete, MintLocation, OriginToPluralityVoice, SignedAccountId32AsNative, SignedToAccountId32, SovereignSignedViaLocation, TakeWeightCredit, TrailingSetTopicAsId, UsingComponents, WeightInfoBounds, WithComputedOrigin, WithUniqueTopic, }; @@ -45,7 +50,7 @@ use xcm_executor::XcmExecutor; parameter_types! { pub const TokenLocation: MultiLocation = Here.into_location(); pub const ThisNetwork: NetworkId = Westend; - pub UniversalLocation: InteriorMultiLocation = ThisNetwork::get().into(); + pub const UniversalLocation: InteriorMultiLocation = X1(GlobalConsensus(ThisNetwork::get())); pub CheckAccount: AccountId = XcmPallet::check_account(); pub LocalCheckAccount: (AccountId, MintLocation) = (CheckAccount::get(), MintLocation::Local); /// The asset ID for the asset that we use to pay for message delivery fees. @@ -77,9 +82,17 @@ pub type LocalAssetTransactor = XcmCurrencyAdapter< >; type LocalOriginConverter = ( + // If the origin kind is `Sovereign`, then return a `Signed` origin with the account determined + // by the `LocationConverter` converter. SovereignSignedViaLocation, + // If the origin kind is `Native` and the XCM origin is a child parachain, then we can express + // it with the special `parachains_origin::Origin` origin variant. ChildParachainAsNative, + // If the origin kind is `Native` and the XCM origin is the `AccountId32` location, then it can + // be expressed using the `Signed` origin variant. SignedAccountId32AsNative, + // Xcm origins can be represented natively under the Xcm pallet's Xcm origin. + XcmPassthrough, ); /// The XCM router. When we want to send an XCM message, we use this type. It amalgamates all of our @@ -94,22 +107,27 @@ pub type XcmRouter = WithUniqueTopic<( )>; parameter_types! { - pub const Westmint: MultiLocation = Parachain(1000).into_location(); - pub const Collectives: MultiLocation = Parachain(1001).into_location(); pub const Wnd: MultiAssetFilter = Wild(AllOf { fun: WildFungible, id: Concrete(TokenLocation::get()) }); - pub const WndForWestmint: (MultiAssetFilter, MultiLocation) = (Wnd::get(), Westmint::get()); + pub const AssetHub: MultiLocation = Parachain(ASSET_HUB_ID).into_location(); + pub const WndForAssetHub: (MultiAssetFilter, MultiLocation) = (Wnd::get(), AssetHub::get()); + pub const Collectives: MultiLocation = Parachain(COLLECTIVES_ID).into_location(); pub const WndForCollectives: (MultiAssetFilter, MultiLocation) = (Wnd::get(), Collectives::get()); pub const MaxInstructions: u32 = 100; pub const MaxAssetsIntoHolding: u32 = 64; } -#[cfg(feature = "runtime-benchmarks")] -parameter_types! { - pub ReachableDest: Option = Some(Parachain(1000).into()); -} - pub type TrustedTeleporters = - (xcm_builder::Case, xcm_builder::Case); + (xcm_builder::Case, xcm_builder::Case); + +match_types! { + pub type OnlyParachains: impl Contains = { + MultiLocation { parents: 0, interior: X1(Parachain(_)) } + }; + pub type CollectivesOrFellows: impl Contains = { + MultiLocation { parents: 0, interior: X1(Parachain(COLLECTIVES_ID)) } | + MultiLocation { parents: 0, interior: X2(Parachain(COLLECTIVES_ID), Plurality { id: BodyId::Technical, .. }) } + }; +} /// The barriers one of which must be passed for an XCM message to be executed. pub type Barrier = TrailingSetTopicAsId<( @@ -121,10 +139,10 @@ pub type Barrier = TrailingSetTopicAsId<( ( // If the message is one that immediately attemps to pay for execution, then allow it. AllowTopLevelPaidExecutionFrom, - // Messages coming from system parachains need not pay for execution. - AllowExplicitUnpaidExecutionFrom>, // Subscriptions for version tracking are OK. - AllowSubscriptionsFrom, + AllowSubscriptionsFrom, + // Collectives and Fellows plurality get free execution. + AllowExplicitUnpaidExecutionFrom, ), UniversalLocation, ConstU32<8>, @@ -141,8 +159,11 @@ impl xcm_executor::Config for XcmConfig { type IsTeleporter = TrustedTeleporters; type UniversalLocation = UniversalLocation; type Barrier = Barrier; - type Weigher = - WeightInfoBounds, RuntimeCall, MaxInstructions>; + type Weigher = WeightInfoBounds< + crate::weights::xcm::WestendXcmWeight, + RuntimeCall, + MaxInstructions, + >; type Trader = UsingComponents>; type ResponseHandler = XcmPallet; @@ -161,16 +182,54 @@ impl xcm_executor::Config for XcmConfig { type Aliasers = Nothing; } +parameter_types! { + // `GeneralAdmin` pluralistic body. + pub const GeneralAdminBodyId: BodyId = BodyId::Administration; + // StakingAdmin pluralistic body. + pub const StakingAdminBodyId: BodyId = BodyId::Defense; + // FellowshipAdmin pluralistic body. + pub const FellowshipAdminBodyId: BodyId = BodyId::Index(FELLOWSHIP_ADMIN_INDEX); +} + +#[cfg(feature = "runtime-benchmarks")] +parameter_types! { + pub ReachableDest: Option = Some(Parachain(1000).into()); +} + +/// Type to convert the `GeneralAdmin` origin to a Plurality `MultiLocation` value. +pub type GeneralAdminToPlurality = + OriginToPluralityVoice; + /// Type to convert an `Origin` type value into a `MultiLocation` value which represents an interior /// location of this chain. pub type LocalOriginToLocation = ( + GeneralAdminToPlurality, // And a usual Signed origin to be used in XCM as a corresponding AccountId32 SignedToAccountId32, ); +/// Type to convert the `StakingAdmin` origin to a Plurality `MultiLocation` value. +pub type StakingAdminToPlurality = + OriginToPluralityVoice; + +/// Type to convert the `FellowshipAdmin` origin to a Plurality `MultiLocation` value. +pub type FellowshipAdminToPlurality = + OriginToPluralityVoice; + +/// Type to convert a pallet `Origin` type value into a `MultiLocation` value which represents an +/// interior location of this chain for a destination chain. +pub type LocalPalletOriginToLocation = ( + // GeneralAdmin origin to be used in XCM as a corresponding Plurality `MultiLocation` value. + GeneralAdminToPlurality, + // StakingAdmin origin to be used in XCM as a corresponding Plurality `MultiLocation` value. + StakingAdminToPlurality, + // FellowshipAdmin origin to be used in XCM as a corresponding Plurality `MultiLocation` value. + FellowshipAdminToPlurality, +); + impl pallet_xcm::Config for Runtime { type RuntimeEvent = RuntimeEvent; - type SendXcmOrigin = xcm_builder::EnsureXcmOrigin; + type SendXcmOrigin = xcm_builder::EnsureXcmOrigin; type XcmRouter = XcmRouter; // Anyone can execute XCM messages locally... type ExecuteXcmOrigin = xcm_builder::EnsureXcmOrigin; @@ -179,8 +238,11 @@ impl pallet_xcm::Config for Runtime { type XcmExecutor = XcmExecutor; type XcmTeleportFilter = Everything; type XcmReserveTransferFilter = Everything; - type Weigher = - WeightInfoBounds, RuntimeCall, MaxInstructions>; + type Weigher = WeightInfoBounds< + crate::weights::xcm::WestendXcmWeight, + RuntimeCall, + MaxInstructions, + >; type UniversalLocation = UniversalLocation; type RuntimeOrigin = RuntimeOrigin; type RuntimeCall = RuntimeCall; From ec9274eea6d0755fad165b3fe15e02738b003941 Mon Sep 17 00:00:00 2001 From: Joshy Orndorff Date: Wed, 27 Sep 2023 11:48:24 -0400 Subject: [PATCH 62/69] remove unnecessary hash string (#1722) This PR removes some unnecessary `r#`...`#` around a string and the corresponding comment that it was done because rustfmt wasn't working for "some reason". It seems to work fine now and clippy prefers it this way. --------- Co-authored-by: Joshy Orndorff --- .../support/procedural/src/no_bound/default.rs | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) diff --git a/substrate/frame/support/procedural/src/no_bound/default.rs b/substrate/frame/support/procedural/src/no_bound/default.rs index da05f19e0f81..35d0eaeecf56 100644 --- a/substrate/frame/support/procedural/src/no_bound/default.rs +++ b/substrate/frame/support/procedural/src/no_bound/default.rs @@ -66,15 +66,12 @@ pub fn derive_default_no_bound(input: proc_macro::TokenStream) -> proc_macro::To .collect::>(); match &*default_variants { - [] => { - return syn::Error::new( - name.clone().span(), - // writing this as a regular string breaks rustfmt for some reason - r#"no default declared, make a variant default by placing `#[default]` above it"#, - ) - .into_compile_error() - .into() - }, + [] => return syn::Error::new( + name.clone().span(), + "no default declared, make a variant default by placing `#[default]` above it", + ) + .into_compile_error() + .into(), // only one variant with the #[default] attribute set [default_variant] => { let variant_attrs = default_variant From 70dc6b62d8c98691ea5055702ebb334d510cc4d0 Mon Sep 17 00:00:00 2001 From: ordian Date: Wed, 27 Sep 2023 18:07:56 +0200 Subject: [PATCH 63/69] implement disabled_validators correctly --- .../subsystem-types/src/runtime_client.rs | 1 - .../src/runtime_api_impl/vstaging.rs | 34 ++++++++++++------- 2 files changed, 22 insertions(+), 13 deletions(-) diff --git a/polkadot/node/subsystem-types/src/runtime_client.rs b/polkadot/node/subsystem-types/src/runtime_client.rs index 1c56bebb7060..f7adcf9862b5 100644 --- a/polkadot/node/subsystem-types/src/runtime_client.rs +++ b/polkadot/node/subsystem-types/src/runtime_client.rs @@ -240,7 +240,6 @@ pub trait RuntimeApiSubsystemClient { session_index: SessionIndex, ) -> Result; - // === v7: Asynchronous backing API === /// Returns candidate's acceptance limitations for asynchronous backing for a relay parent. diff --git a/polkadot/runtime/parachains/src/runtime_api_impl/vstaging.rs b/polkadot/runtime/parachains/src/runtime_api_impl/vstaging.rs index 5c9f2a8d6400..dba4fc7b3cc6 100644 --- a/polkadot/runtime/parachains/src/runtime_api_impl/vstaging.rs +++ b/polkadot/runtime/parachains/src/runtime_api_impl/vstaging.rs @@ -16,18 +16,28 @@ //! Put implementations of functions from staging APIs here. -use primitives::{ - ValidatorIndex, -}; -use sp_std::prelude::*; +use crate::shared; +use primitives::ValidatorIndex; +use sp_std::prelude::Vec; +use sp_std::collections::btree_map::BTreeMap; /// Implementation for `DisabledValidators` -pub fn disabled_validators() -> Vec { - // >::disabled_validators() - // .iter() - // .cloned() - // .map(|v| ValidatorIndex(v)) - // .collect() - // TODO - Vec::new() +pub fn disabled_validators() -> Vec +where + T: pallet_session::Config + shared::Config, +{ + let shuffled_indices = >::active_validator_indices(); + // mapping from raw validator index to `ValidatorIndex` + // this computation is the same within a session, but should be cheap + let reverse_index = shuffled_indices + .iter() + .enumerate() + .map(|(i, v)| (v.0, ValidatorIndex(i as u32))) + .collect::>(); + + // we might have disabled validators who are not parachain validators + >::disabled_validators() + .iter() + .filter_map(|v| reverse_index.get(v).cloned()) + .collect() } From bca3c83b34d88b403de471b878ebc8c1077de2df Mon Sep 17 00:00:00 2001 From: ordian Date: Wed, 27 Sep 2023 18:09:38 +0200 Subject: [PATCH 64/69] add a CAVEAT comment --- polkadot/runtime/parachains/src/runtime_api_impl/vstaging.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/polkadot/runtime/parachains/src/runtime_api_impl/vstaging.rs b/polkadot/runtime/parachains/src/runtime_api_impl/vstaging.rs index dba4fc7b3cc6..4214f25cee54 100644 --- a/polkadot/runtime/parachains/src/runtime_api_impl/vstaging.rs +++ b/polkadot/runtime/parachains/src/runtime_api_impl/vstaging.rs @@ -22,6 +22,8 @@ use sp_std::prelude::Vec; use sp_std::collections::btree_map::BTreeMap; /// Implementation for `DisabledValidators` +// CAVEAT: this should only be called on the node side +// as it might produce incorrect results on session boundaries pub fn disabled_validators() -> Vec where T: pallet_session::Config + shared::Config, From f717d0b74ac362a68cc251655ce6bdf39f76d51d Mon Sep 17 00:00:00 2001 From: ordian Date: Wed, 27 Sep 2023 18:13:25 +0200 Subject: [PATCH 65/69] cargo fmt --- polkadot/runtime/parachains/src/runtime_api_impl/vstaging.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/polkadot/runtime/parachains/src/runtime_api_impl/vstaging.rs b/polkadot/runtime/parachains/src/runtime_api_impl/vstaging.rs index 4214f25cee54..24a076f3a443 100644 --- a/polkadot/runtime/parachains/src/runtime_api_impl/vstaging.rs +++ b/polkadot/runtime/parachains/src/runtime_api_impl/vstaging.rs @@ -18,8 +18,7 @@ use crate::shared; use primitives::ValidatorIndex; -use sp_std::prelude::Vec; -use sp_std::collections::btree_map::BTreeMap; +use sp_std::{collections::btree_map::BTreeMap, prelude::Vec}; /// Implementation for `DisabledValidators` // CAVEAT: this should only be called on the node side From 2a5af8963d6f8a142644e98158de96ceac4822f9 Mon Sep 17 00:00:00 2001 From: ordian Date: Wed, 27 Sep 2023 18:32:39 +0200 Subject: [PATCH 66/69] Clarify docs --- polkadot/primitives/src/runtime_api.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/polkadot/primitives/src/runtime_api.rs b/polkadot/primitives/src/runtime_api.rs index ac5a3b054de2..5ec897c8cbb4 100644 --- a/polkadot/primitives/src/runtime_api.rs +++ b/polkadot/primitives/src/runtime_api.rs @@ -261,7 +261,7 @@ sp_api::decl_runtime_apis! { /***** Added in v8 *****/ - /// Returns a sorted Vec with the `ValidatorIndex` of all disabled validators. + /// Returns a list of all disabled validators at the given block. #[api_version(8)] fn disabled_validators() -> Vec; } From 02284a3e82879f3a6f3ea2d44af1d7e69ccad60e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?D=C3=B3nal=20Murray?= Date: Wed, 27 Sep 2023 17:39:45 +0100 Subject: [PATCH 67/69] Add custom error message for `StorageNoopGuard` (#1727) Expand `StorageNoopGuard` to be able to add extra context through a custom error message. When the guard is triggered it panics with an error message which can be defaulted, set on construction, or set after it has been constructed. Turn `StorageNoopGuard` into struct with `storage_root` and `error_message` and added `from_error_message` constructor and `set_error_message` setter. Also added `new()` aliased to `default()`. Closes #375 --------- Co-authored-by: joe petrowski <25483142+joepetrowski@users.noreply.github.com> Co-authored-by: Liam Aharon --- .../support/src/storage/storage_noop_guard.rs | 70 ++++++++++++++++--- 1 file changed, 62 insertions(+), 8 deletions(-) diff --git a/substrate/frame/support/src/storage/storage_noop_guard.rs b/substrate/frame/support/src/storage/storage_noop_guard.rs index d00e6e18ecc4..c4d40fa99a35 100644 --- a/substrate/frame/support/src/storage/storage_noop_guard.rs +++ b/substrate/frame/support/src/storage/storage_noop_guard.rs @@ -37,15 +37,38 @@ /// }); /// ``` #[must_use] -pub struct StorageNoopGuard(sp_std::vec::Vec); +pub struct StorageNoopGuard<'a> { + storage_root: sp_std::vec::Vec, + error_message: &'a str, +} -impl Default for StorageNoopGuard { +impl<'a> Default for StorageNoopGuard<'a> { fn default() -> Self { - Self(sp_io::storage::root(sp_runtime::StateVersion::V1)) + Self { + storage_root: sp_io::storage::root(sp_runtime::StateVersion::V1), + error_message: "`StorageNoopGuard` detected an attempted storage change.", + } + } +} + +impl<'a> StorageNoopGuard<'a> { + /// Alias to `default()`. + pub fn new() -> Self { + Self::default() + } + + /// Creates a new [`StorageNoopGuard`] with a custom error message. + pub fn from_error_message(error_message: &'a str) -> Self { + Self { storage_root: sp_io::storage::root(sp_runtime::StateVersion::V1), error_message } + } + + /// Sets a custom error message for a [`StorageNoopGuard`]. + pub fn set_error_message(&mut self, error_message: &'a str) { + self.error_message = error_message; } } -impl Drop for StorageNoopGuard { +impl<'a> Drop for StorageNoopGuard<'a> { fn drop(&mut self) { // No need to double panic, eg. inside a test assertion failure. if sp_std::thread::panicking() { @@ -53,8 +76,9 @@ impl Drop for StorageNoopGuard { } assert_eq!( sp_io::storage::root(sp_runtime::StateVersion::V1), - self.0, - "StorageNoopGuard detected wrongful storage changes.", + self.storage_root, + "{}", + self.error_message, ); } } @@ -65,7 +89,7 @@ mod tests { use sp_io::TestExternalities; #[test] - #[should_panic(expected = "StorageNoopGuard detected wrongful storage changes.")] + #[should_panic(expected = "`StorageNoopGuard` detected an attempted storage change.")] fn storage_noop_guard_panics_on_changed() { TestExternalities::default().execute_with(|| { let _guard = StorageNoopGuard::default(); @@ -83,7 +107,7 @@ mod tests { } #[test] - #[should_panic(expected = "StorageNoopGuard detected wrongful storage changes.")] + #[should_panic(expected = "`StorageNoopGuard` detected an attempted storage change.")] fn storage_noop_guard_panics_on_early_drop() { TestExternalities::default().execute_with(|| { let guard = StorageNoopGuard::default(); @@ -111,4 +135,34 @@ mod tests { panic!("Something else"); }); } + + #[test] + #[should_panic(expected = "`StorageNoopGuard` found unexpected storage changes.")] + fn storage_noop_guard_panics_created_from_error_message() { + TestExternalities::default().execute_with(|| { + let _guard = StorageNoopGuard::from_error_message( + "`StorageNoopGuard` found unexpected storage changes.", + ); + frame_support::storage::unhashed::put(b"key", b"value"); + }); + } + + #[test] + #[should_panic(expected = "`StorageNoopGuard` found unexpected storage changes.")] + fn storage_noop_guard_panics_with_set_error_message() { + TestExternalities::default().execute_with(|| { + let mut guard = StorageNoopGuard::default(); + guard.set_error_message("`StorageNoopGuard` found unexpected storage changes."); + frame_support::storage::unhashed::put(b"key", b"value"); + }); + } + + #[test] + #[should_panic(expected = "`StorageNoopGuard` detected an attempted storage change.")] + fn storage_noop_guard_panics_new_alias() { + TestExternalities::default().execute_with(|| { + let _guard = StorageNoopGuard::new(); + frame_support::storage::unhashed::put(b"key", b"value"); + }); + } } From 14e5d233480ded48cc9801454924c402fbba7cd6 Mon Sep 17 00:00:00 2001 From: Dmitry Markin Date: Wed, 27 Sep 2023 19:44:37 +0300 Subject: [PATCH 68/69] Move requests-responses and polling from `ChainSync` to `SyncingEngine` (#1650) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Move request-response handling from `ChainSync` to `SyncingEngine` as part of [Sync 2.0](https://github.com/paritytech/polkadot-sdk/issues/534) refactoring aimed at making `ChainSync` a pure state machine. Resolves https://github.com/paritytech/polkadot-sdk/issues/502. --------- Co-authored-by: Aaro Altonen <48052676+altonen@users.noreply.github.com> Co-authored-by: Bastian Köcher --- Cargo.lock | 1 + substrate/client/network/common/src/sync.rs | 28 +- substrate/client/network/sync/Cargo.toml | 1 + substrate/client/network/sync/src/engine.rs | 382 ++++++++++- substrate/client/network/sync/src/lib.rs | 612 ++++-------------- substrate/client/network/sync/src/mock.rs | 12 +- .../network/sync/src/pending_responses.rs | 114 ++++ substrate/client/rpc-spec-v2/Cargo.toml | 2 +- 8 files changed, 609 insertions(+), 543 deletions(-) create mode 100644 substrate/client/network/sync/src/pending_responses.rs diff --git a/Cargo.lock b/Cargo.lock index 7a11c5b7f196..55a8e0c748f5 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -15380,6 +15380,7 @@ dependencies = [ "substrate-test-runtime-client", "thiserror", "tokio", + "tokio-stream", ] [[package]] diff --git a/substrate/client/network/common/src/sync.rs b/substrate/client/network/common/src/sync.rs index 5a6f90b290d2..8ef0fafa1c79 100644 --- a/substrate/client/network/common/src/sync.rs +++ b/substrate/client/network/common/src/sync.rs @@ -36,7 +36,7 @@ use sp_runtime::{ }; use warp::WarpSyncProgress; -use std::{any::Any, fmt, fmt::Formatter, pin::Pin, sync::Arc, task::Poll}; +use std::{any::Any, fmt, fmt::Formatter, pin::Pin, sync::Arc}; /// The sync status of a peer we are trying to sync with #[derive(Debug)] @@ -204,6 +204,23 @@ pub enum PeerRequest { WarpProof, } +#[derive(Debug)] +pub enum PeerRequestType { + Block, + State, + WarpProof, +} + +impl PeerRequest { + pub fn get_type(&self) -> PeerRequestType { + match self { + PeerRequest::Block(_) => PeerRequestType::Block, + PeerRequest::State => PeerRequestType::State, + PeerRequest::WarpProof => PeerRequestType::WarpProof, + } + } +} + /// Wrapper for implementation-specific state request. /// /// NOTE: Implementation must be able to encode and decode it for network purposes. @@ -289,9 +306,6 @@ pub trait ChainSync: Send { /// Returns the current number of peers stored within this state machine. fn num_peers(&self) -> usize; - /// Returns the number of peers we're connected to and that are being queried. - fn num_active_peers(&self) -> usize; - /// Handle a new connected peer. /// /// Call this method whenever we connect to a new peer. @@ -369,10 +383,4 @@ pub trait ChainSync: Send { /// Return some key metrics. fn metrics(&self) -> Metrics; - - /// Advance the state of `ChainSync` - fn poll(&mut self, cx: &mut std::task::Context) -> Poll<()>; - - /// Send block request to peer - fn send_block_request(&mut self, who: PeerId, request: BlockRequest); } diff --git a/substrate/client/network/sync/Cargo.toml b/substrate/client/network/sync/Cargo.toml index f10dd7869bbb..39312cc4b327 100644 --- a/substrate/client/network/sync/Cargo.toml +++ b/substrate/client/network/sync/Cargo.toml @@ -29,6 +29,7 @@ prost = "0.11" schnellru = "0.2.1" smallvec = "1.11.0" thiserror = "1.0" +tokio-stream = "0.1.14" fork-tree = { path = "../../../utils/fork-tree" } prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../utils/prometheus" } sc-client-api = { path = "../../api" } diff --git a/substrate/client/network/sync/src/engine.rs b/substrate/client/network/sync/src/engine.rs index c93ba89c6220..8b8874ad4ebd 100644 --- a/substrate/client/network/sync/src/engine.rs +++ b/substrate/client/network/sync/src/engine.rs @@ -23,10 +23,12 @@ use crate::{ block_announce_validator::{ BlockAnnounceValidationResult, BlockAnnounceValidator as BlockAnnounceValidatorStream, }, - block_relay_protocol::BlockDownloader, + block_relay_protocol::{BlockDownloader, BlockResponseError}, + pending_responses::{PendingResponses, ResponseEvent}, + schema::v1::{StateRequest, StateResponse}, service::{self, chain_sync::ToServiceCommand}, warp::WarpSyncParams, - ChainSync, ClientError, SyncingService, + BlockRequestEvent, ChainSync, ClientError, SyncingService, }; use codec::{Decode, Encode}; @@ -36,24 +38,32 @@ use futures::{ FutureExt, StreamExt, }; use futures_timer::Delay; -use libp2p::PeerId; +use libp2p::{request_response::OutboundFailure, PeerId}; +use log::{debug, trace}; use prometheus_endpoint::{ register, Gauge, GaugeVec, MetricSource, Opts, PrometheusError, Registry, SourcedGauge, U64, }; +use prost::Message; use schnellru::{ByLength, LruMap}; use sc_client_api::{BlockBackend, HeaderBackend, ProofProvider}; use sc_consensus::import_queue::ImportQueueService; use sc_network::{ - config::{FullNetworkConfiguration, NonDefaultSetConfig, ProtocolId}, + config::{ + FullNetworkConfiguration, NonDefaultSetConfig, NonReservedPeerMode, NotificationHandshake, + ProtocolId, SetConfig, + }, + request_responses::{IfDisconnected, RequestFailure}, utils::LruHashSet, NotificationsSink, ProtocolName, ReputationChange, }; use sc_network_common::{ role::Roles, sync::{ - message::{BlockAnnounce, BlockAnnouncesHandshake, BlockState}, - BadPeer, ChainSync as ChainSyncT, ExtendedPeerInfo, SyncEvent, + message::{BlockAnnounce, BlockAnnouncesHandshake, BlockRequest, BlockState}, + warp::{EncodedProof, WarpProofRequest}, + BadPeer, ChainSync as ChainSyncT, ExtendedPeerInfo, OpaqueStateRequest, + OpaqueStateResponse, PeerRequest, SyncEvent, }, }; use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; @@ -63,6 +73,7 @@ use sp_runtime::traits::{Block as BlockT, Header, NumberFor, Zero}; use std::{ collections::{HashMap, HashSet}, + iter, num::NonZeroUsize, sync::{ atomic::{AtomicBool, AtomicUsize, Ordering}, @@ -98,6 +109,9 @@ const INACTIVITY_EVICT_THRESHOLD: Duration = Duration::from_secs(30); /// before it starts evicting peers. const INITIAL_EVICTION_WAIT_PERIOD: Duration = Duration::from_secs(2 * 60); +/// Maximum allowed size for a block announce. +const MAX_BLOCK_ANNOUNCE_SIZE: u64 = 1024 * 1024; + mod rep { use sc_network::ReputationChange as Rep; /// Peer has different genesis. @@ -106,6 +120,14 @@ mod rep { pub const BAD_BLOCK_ANNOUNCEMENT: Rep = Rep::new(-(1 << 12), "Bad block announcement"); /// Block announce substream with the peer has been inactive too long pub const INACTIVE_SUBSTREAM: Rep = Rep::new(-(1 << 10), "Inactive block announce substream"); + /// We received a message that failed to decode. + pub const BAD_MESSAGE: Rep = Rep::new(-(1 << 12), "Bad message"); + /// Peer is on unsupported protocol version. + pub const BAD_PROTOCOL: Rep = Rep::new_fatal("Unsupported protocol"); + /// Reputation change when a peer refuses a request. + pub const REFUSED: Rep = Rep::new(-(1 << 10), "Request refused"); + /// Reputation change when a peer doesn't respond in time to our messages. + pub const TIMEOUT: Rep = Rep::new(-(1 << 10), "Request timeout"); } struct Metrics { @@ -277,6 +299,18 @@ pub struct SyncingEngine { /// Instant when the last notification was sent or received. last_notification_io: Instant, + + /// Pending responses + pending_responses: PendingResponses, + + /// Block downloader + block_downloader: Arc>, + + /// Protocol name used to send out state requests + state_request_protocol_name: ProtocolName, + + /// Protocol name used to send out warp sync requests + warp_sync_protocol_name: Option, } impl SyncingEngine @@ -381,24 +415,32 @@ where let warp_sync_target_block_header_rx = warp_sync_target_block_header_rx .map_or(futures::future::pending().boxed().fuse(), |rx| rx.boxed().fuse()); - let (chain_sync, block_announce_config) = ChainSync::new( - mode, - client.clone(), + let block_announce_config = Self::get_block_announce_proto_config( protocol_id, fork_id, roles, + client.info().best_number, + client.info().best_hash, + client + .block_hash(Zero::zero()) + .ok() + .flatten() + .expect("Genesis block exists; qed"), + ); + let block_announce_protocol_name = block_announce_config.notifications_protocol.clone(); + + let chain_sync = ChainSync::new( + mode, + client.clone(), + block_announce_protocol_name.clone(), max_parallel_downloads, max_blocks_per_request, warp_sync_config, metrics_registry, network_service.clone(), import_queue, - block_downloader, - state_request_protocol_name, - warp_sync_protocol_name, )?; - let block_announce_protocol_name = block_announce_config.notifications_protocol.clone(); let (tx, service_rx) = tracing_unbounded("mpsc_chain_sync", 100_000); let num_connected = Arc::new(AtomicUsize::new(0)); let is_major_syncing = Arc::new(AtomicBool::new(false)); @@ -455,6 +497,10 @@ where } else { None }, + pending_responses: PendingResponses::new(), + block_downloader, + state_request_protocol_name, + warp_sync_protocol_name, }, SyncingService::new(tx, num_connected, is_major_syncing), block_announce_config, @@ -682,11 +728,23 @@ where ToServiceCommand::BlocksProcessed(imported, count, results) => { for result in self.chain_sync.on_blocks_processed(imported, count, results) { match result { - Ok((id, req)) => self.chain_sync.send_block_request(id, req), - Err(BadPeer(id, repu)) => { - self.network_service - .disconnect_peer(id, self.block_announce_protocol_name.clone()); - self.network_service.report_peer(id, repu) + Ok(event) => match event { + BlockRequestEvent::SendRequest { peer_id, request } => { + // drop obsolete pending response first + self.pending_responses.remove(&peer_id); + self.send_block_request(peer_id, request); + }, + BlockRequestEvent::RemoveStale { peer_id } => { + self.pending_responses.remove(&peer_id); + }, + }, + Err(BadPeer(peer_id, repu)) => { + self.pending_responses.remove(&peer_id); + self.network_service.disconnect_peer( + peer_id, + self.block_announce_protocol_name.clone(), + ); + self.network_service.report_peer(peer_id, repu) }, } } @@ -715,7 +773,7 @@ where let _ = tx.send(status); }, ToServiceCommand::NumActivePeers(tx) => { - let _ = tx.send(self.chain_sync.num_active_peers()); + let _ = tx.send(self.num_active_peers()); }, ToServiceCommand::SyncState(tx) => { let _ = tx.send(self.chain_sync.status()); @@ -817,8 +875,13 @@ where Poll::Pending => {}, } - // Drive `ChainSync`. - while let Poll::Ready(()) = self.chain_sync.poll(cx) {} + // Send outbound requests on `ChanSync`'s behalf. + self.send_chain_sync_requests(); + + // Poll & process pending responses. + while let Poll::Ready(Some(event)) = self.pending_responses.poll_next_unpin(cx) { + self.process_response_event(event); + } // Poll block announce validations last, because if a block announcement was received // through the event stream between `SyncingEngine` and `Protocol` and the validation @@ -860,6 +923,7 @@ where } self.chain_sync.peer_disconnected(&peer_id); + self.pending_responses.remove(&peer_id); self.event_streams.retain(|stream| { stream.unbounded_send(SyncEvent::PeerDisconnected(peer_id)).is_ok() }); @@ -991,7 +1055,7 @@ where } if let Some(req) = req { - self.chain_sync.send_block_request(peer_id, req); + self.send_block_request(peer_id, req); } self.event_streams @@ -999,4 +1063,278 @@ where Ok(()) } + + fn send_chain_sync_requests(&mut self) { + for (peer_id, request) in self.chain_sync.block_requests() { + self.send_block_request(peer_id, request); + } + + if let Some((peer_id, request)) = self.chain_sync.state_request() { + self.send_state_request(peer_id, request); + } + + for (peer_id, request) in self.chain_sync.justification_requests() { + self.send_block_request(peer_id, request); + } + + if let Some((peer_id, request)) = self.chain_sync.warp_sync_request() { + self.send_warp_sync_request(peer_id, request); + } + } + + fn send_block_request(&mut self, peer_id: PeerId, request: BlockRequest) { + if !self.chain_sync.is_peer_known(&peer_id) { + trace!(target: LOG_TARGET, "Cannot send block request to unknown peer {peer_id}"); + debug_assert!(false); + return + } + + let downloader = self.block_downloader.clone(); + + self.pending_responses.insert( + peer_id, + PeerRequest::Block(request.clone()), + async move { downloader.download_blocks(peer_id, request).await }.boxed(), + ); + } + + fn send_state_request(&mut self, peer_id: PeerId, request: OpaqueStateRequest) { + if !self.chain_sync.is_peer_known(&peer_id) { + trace!(target: LOG_TARGET, "Cannot send state request to unknown peer {peer_id}"); + debug_assert!(false); + return + } + + let (tx, rx) = oneshot::channel(); + + self.pending_responses.insert(peer_id, PeerRequest::State, rx.boxed()); + + match Self::encode_state_request(&request) { + Ok(data) => { + self.network_service.start_request( + peer_id, + self.state_request_protocol_name.clone(), + data, + tx, + IfDisconnected::ImmediateError, + ); + }, + Err(err) => { + log::warn!( + target: LOG_TARGET, + "Failed to encode state request {request:?}: {err:?}", + ); + }, + } + } + + fn send_warp_sync_request(&mut self, peer_id: PeerId, request: WarpProofRequest) { + if !self.chain_sync.is_peer_known(&peer_id) { + trace!(target: LOG_TARGET, "Cannot send warp proof request to unknown peer {peer_id}"); + debug_assert!(false); + return + } + + let (tx, rx) = oneshot::channel(); + + self.pending_responses.insert(peer_id, PeerRequest::WarpProof, rx.boxed()); + + match &self.warp_sync_protocol_name { + Some(name) => self.network_service.start_request( + peer_id, + name.clone(), + request.encode(), + tx, + IfDisconnected::ImmediateError, + ), + None => { + log::warn!( + target: LOG_TARGET, + "Trying to send warp sync request when no protocol is configured {request:?}", + ); + }, + } + } + + fn encode_state_request(request: &OpaqueStateRequest) -> Result, String> { + let request: &StateRequest = request.0.downcast_ref().ok_or_else(|| { + "Failed to downcast opaque state response during encoding, this is an \ + implementation bug." + .to_string() + })?; + + Ok(request.encode_to_vec()) + } + + fn decode_state_response(response: &[u8]) -> Result { + let response = StateResponse::decode(response) + .map_err(|error| format!("Failed to decode state response: {error}"))?; + + Ok(OpaqueStateResponse(Box::new(response))) + } + + fn process_response_event(&mut self, response_event: ResponseEvent) { + let ResponseEvent { peer_id, request, response } = response_event; + + match response { + Ok(Ok(resp)) => match request { + PeerRequest::Block(req) => { + match self.block_downloader.block_response_into_blocks(&req, resp) { + Ok(blocks) => { + if let Some((peer_id, new_req)) = + self.chain_sync.on_block_response(peer_id, req, blocks) + { + self.send_block_request(peer_id, new_req); + } + }, + Err(BlockResponseError::DecodeFailed(e)) => { + debug!( + target: LOG_TARGET, + "Failed to decode block response from peer {:?}: {:?}.", + peer_id, + e + ); + self.network_service.report_peer(peer_id, rep::BAD_MESSAGE); + self.network_service.disconnect_peer( + peer_id, + self.block_announce_protocol_name.clone(), + ); + return + }, + Err(BlockResponseError::ExtractionFailed(e)) => { + debug!( + target: LOG_TARGET, + "Failed to extract blocks from peer response {:?}: {:?}.", + peer_id, + e + ); + self.network_service.report_peer(peer_id, rep::BAD_MESSAGE); + return + }, + } + }, + PeerRequest::State => { + let response = match Self::decode_state_response(&resp[..]) { + Ok(proto) => proto, + Err(e) => { + debug!( + target: LOG_TARGET, + "Failed to decode state response from peer {peer_id:?}: {e:?}.", + ); + self.network_service.report_peer(peer_id, rep::BAD_MESSAGE); + self.network_service.disconnect_peer( + peer_id, + self.block_announce_protocol_name.clone(), + ); + return + }, + }; + + self.chain_sync.on_state_response(peer_id, response); + }, + PeerRequest::WarpProof => { + self.chain_sync.on_warp_sync_response(peer_id, EncodedProof(resp)); + }, + }, + Ok(Err(e)) => { + debug!(target: LOG_TARGET, "Request to peer {peer_id:?} failed: {e:?}."); + + match e { + RequestFailure::Network(OutboundFailure::Timeout) => { + self.network_service.report_peer(peer_id, rep::TIMEOUT); + self.network_service + .disconnect_peer(peer_id, self.block_announce_protocol_name.clone()); + }, + RequestFailure::Network(OutboundFailure::UnsupportedProtocols) => { + self.network_service.report_peer(peer_id, rep::BAD_PROTOCOL); + self.network_service + .disconnect_peer(peer_id, self.block_announce_protocol_name.clone()); + }, + RequestFailure::Network(OutboundFailure::DialFailure) => { + self.network_service + .disconnect_peer(peer_id, self.block_announce_protocol_name.clone()); + }, + RequestFailure::Refused => { + self.network_service.report_peer(peer_id, rep::REFUSED); + self.network_service + .disconnect_peer(peer_id, self.block_announce_protocol_name.clone()); + }, + RequestFailure::Network(OutboundFailure::ConnectionClosed) | + RequestFailure::NotConnected => { + self.network_service + .disconnect_peer(peer_id, self.block_announce_protocol_name.clone()); + }, + RequestFailure::UnknownProtocol => { + debug_assert!(false, "Block request protocol should always be known."); + }, + RequestFailure::Obsolete => { + debug_assert!( + false, + "Can not receive `RequestFailure::Obsolete` after dropping the \ + response receiver.", + ); + }, + } + }, + Err(oneshot::Canceled) => { + trace!( + target: LOG_TARGET, + "Request to peer {peer_id:?} failed due to oneshot being canceled.", + ); + self.network_service + .disconnect_peer(peer_id, self.block_announce_protocol_name.clone()); + }, + } + } + + /// Returns the number of peers we're connected to and that are being queried. + fn num_active_peers(&self) -> usize { + self.pending_responses.len() + } + + /// Get config for the block announcement protocol + fn get_block_announce_proto_config( + protocol_id: ProtocolId, + fork_id: &Option, + roles: Roles, + best_number: NumberFor, + best_hash: B::Hash, + genesis_hash: B::Hash, + ) -> NonDefaultSetConfig { + let block_announces_protocol = { + let genesis_hash = genesis_hash.as_ref(); + if let Some(ref fork_id) = fork_id { + format!( + "/{}/{}/block-announces/1", + array_bytes::bytes2hex("", genesis_hash), + fork_id + ) + } else { + format!("/{}/block-announces/1", array_bytes::bytes2hex("", genesis_hash)) + } + }; + + NonDefaultSetConfig { + notifications_protocol: block_announces_protocol.into(), + fallback_names: iter::once( + format!("/{}/block-announces/1", protocol_id.as_ref()).into(), + ) + .collect(), + max_notification_size: MAX_BLOCK_ANNOUNCE_SIZE, + handshake: Some(NotificationHandshake::new(BlockAnnouncesHandshake::::build( + roles, + best_number, + best_hash, + genesis_hash, + ))), + // NOTE: `set_config` will be ignored by `protocol.rs` as the block announcement + // protocol is still hardcoded into the peerset. + set_config: SetConfig { + in_peers: 0, + out_peers: 0, + reserved_nodes: Vec::new(), + non_reserved_mode: NonReservedPeerMode::Deny, + }, + } + } } diff --git a/substrate/client/network/sync/src/lib.rs b/substrate/client/network/sync/src/lib.rs index 20c0034966d5..ff00e8f90f82 100644 --- a/substrate/client/network/sync/src/lib.rs +++ b/substrate/client/network/sync/src/lib.rs @@ -29,44 +29,31 @@ //! order to update it. use crate::{ - block_relay_protocol::{BlockDownloader, BlockResponseError}, blocks::BlockCollection, - schema::v1::{StateRequest, StateResponse}, + schema::v1::StateResponse, state::StateSync, warp::{WarpProofImportResult, WarpSync, WarpSyncConfig}, }; use codec::Encode; use extra_requests::ExtraRequests; -use futures::{channel::oneshot, task::Poll, Future, FutureExt}; -use libp2p::{request_response::OutboundFailure, PeerId}; +use libp2p::PeerId; use log::{debug, error, info, trace, warn}; -use prost::Message; use prometheus_endpoint::{register, Counter, PrometheusError, Registry, U64}; use sc_client_api::{BlockBackend, ProofProvider}; use sc_consensus::{ import_queue::ImportQueueService, BlockImportError, BlockImportStatus, IncomingBlock, }; -use sc_network::{ - config::{ - NonDefaultSetConfig, NonReservedPeerMode, NotificationHandshake, ProtocolId, SetConfig, - }, - request_responses::{IfDisconnected, RequestFailure}, - types::ProtocolName, -}; -use sc_network_common::{ - role::Roles, - sync::{ - message::{ - BlockAnnounce, BlockAnnouncesHandshake, BlockAttributes, BlockData, BlockRequest, - BlockResponse, Direction, FromBlock, - }, - warp::{EncodedProof, WarpProofRequest, WarpSyncPhase, WarpSyncProgress}, - BadPeer, ChainSync as ChainSyncT, ImportResult, Metrics, OnBlockData, OnBlockJustification, - OnStateData, OpaqueStateRequest, OpaqueStateResponse, PeerInfo, PeerRequest, SyncMode, - SyncState, SyncStatus, +use sc_network::types::ProtocolName; +use sc_network_common::sync::{ + message::{ + BlockAnnounce, BlockAttributes, BlockData, BlockRequest, BlockResponse, Direction, + FromBlock, }, + warp::{EncodedProof, WarpProofRequest, WarpSyncPhase, WarpSyncProgress}, + BadPeer, ChainSync as ChainSyncT, Metrics, OnBlockData, OnBlockJustification, OnStateData, + OpaqueStateRequest, OpaqueStateResponse, PeerInfo, SyncMode, SyncState, SyncStatus, }; use sp_arithmetic::traits::Saturating; use sp_blockchain::{Error as ClientError, HeaderBackend, HeaderMetadata}; @@ -81,9 +68,7 @@ use sp_runtime::{ use std::{ collections::{HashMap, HashSet}, - iter, ops::Range, - pin::Pin, sync::Arc, }; @@ -92,6 +77,7 @@ pub use service::chain_sync::SyncingService; mod block_announce_validator; mod extra_requests; mod futures_stream; +mod pending_responses; mod schema; pub mod block_relay_protocol; @@ -131,9 +117,6 @@ const MAJOR_SYNC_BLOCKS: u8 = 5; /// Number of peers that need to be connected before warp sync is started. const MIN_PEERS_TO_START_WARP_SYNC: usize = 3; -/// Maximum allowed size for a block announce. -const MAX_BLOCK_ANNOUNCE_SIZE: u64 = 1024 * 1024; - /// Maximum blocks per response. pub(crate) const MAX_BLOCKS_IN_RESPONSE: usize = 128; @@ -170,18 +153,6 @@ mod rep { /// Peer response data does not have requested bits. pub const BAD_RESPONSE: Rep = Rep::new(-(1 << 12), "Incomplete response"); - - /// Reputation change when a peer doesn't respond in time to our messages. - pub const TIMEOUT: Rep = Rep::new(-(1 << 10), "Request timeout"); - - /// Peer is on unsupported protocol version. - pub const BAD_PROTOCOL: Rep = Rep::new_fatal("Unsupported protocol"); - - /// Reputation change when a peer refuses a request. - pub const REFUSED: Rep = Rep::new(-(1 << 10), "Request refused"); - - /// We received a message that failed to decode. - pub const BAD_MESSAGE: Rep = Rep::new(-(1 << 12), "Bad message"); } enum AllowedRequests { @@ -261,17 +232,12 @@ struct GapSync { target: NumberFor, } -type PendingResponse = Pin< - Box< - dyn Future< - Output = ( - PeerId, - PeerRequest, - Result, RequestFailure>, oneshot::Canceled>, - ), - > + Send, - >, ->; +/// An event used to notify [`engine::SyncingEngine`] if we want to perform a block request +/// or drop an obsolete pending response. +enum BlockRequestEvent { + SendRequest { peer_id: PeerId, request: BlockRequest }, + RemoveStale { peer_id: PeerId }, +} /// The main data structure which contains all the state for a chains /// active syncing strategy. @@ -322,14 +288,6 @@ pub struct ChainSync { network_service: service::network::NetworkServiceHandle, /// Protocol name used for block announcements block_announce_protocol_name: ProtocolName, - /// Block downloader stub - block_downloader: Arc>, - /// Protocol name used to send out state requests - state_request_protocol_name: ProtocolName, - /// Protocol name used to send out warp sync requests - warp_sync_protocol_name: Option, - /// Pending responses - pending_responses: HashMap>, /// Handle to import queue. import_queue: Box>, /// Metrics. @@ -491,10 +449,6 @@ where self.peers.len() } - fn num_active_peers(&self) -> usize { - self.pending_responses.len() - } - fn new_peer( &mut self, who: PeerId, @@ -1145,7 +1099,6 @@ where gap_sync.blocks.clear_peer_download(who) } self.peers.remove(who); - self.pending_responses.remove(who); self.extra_justifications.peer_disconnected(who); self.allowed_requests.set_all(); self.fork_targets.retain(|_, target| { @@ -1168,36 +1121,6 @@ where justifications: self.extra_justifications.metrics(), } } - - fn poll(&mut self, cx: &mut std::task::Context) -> Poll<()> { - self.process_outbound_requests(); - - while let Poll::Ready(result) = self.poll_pending_responses(cx) { - match result { - ImportResult::BlockImport(origin, blocks) => self.import_blocks(origin, blocks), - ImportResult::JustificationImport(who, hash, number, justifications) => - self.import_justifications(who, hash, number, justifications), - } - } - - Poll::Pending - } - - fn send_block_request(&mut self, who: PeerId, request: BlockRequest) { - if self.peers.contains_key(&who) { - let downloader = self.block_downloader.clone(); - self.pending_responses.insert( - who, - Box::pin(async move { - ( - who, - PeerRequest::Block(request.clone()), - downloader.download_blocks(who, request).await, - ) - }), - ); - } - } } impl ChainSync @@ -1216,32 +1139,14 @@ where pub fn new( mode: SyncMode, client: Arc, - protocol_id: ProtocolId, - fork_id: &Option, - roles: Roles, + block_announce_protocol_name: ProtocolName, max_parallel_downloads: u32, max_blocks_per_request: u32, warp_sync_config: Option>, metrics_registry: Option<&Registry>, network_service: service::network::NetworkServiceHandle, import_queue: Box>, - block_downloader: Arc>, - state_request_protocol_name: ProtocolName, - warp_sync_protocol_name: Option, - ) -> Result<(Self, NonDefaultSetConfig), ClientError> { - let block_announce_config = Self::get_block_announce_proto_config( - protocol_id, - fork_id, - roles, - client.info().best_number, - client.info().best_hash, - client - .block_hash(Zero::zero()) - .ok() - .flatten() - .expect("Genesis block exists; qed"), - ); - + ) -> Result { let mut sync = Self { client, peers: HashMap::new(), @@ -1261,16 +1166,9 @@ where import_existing: false, gap_sync: None, network_service, - block_downloader, - state_request_protocol_name, warp_sync_config, warp_sync_target_block_header: None, - warp_sync_protocol_name, - block_announce_protocol_name: block_announce_config - .notifications_protocol - .clone() - .into(), - pending_responses: HashMap::new(), + block_announce_protocol_name, import_queue, metrics: if let Some(r) = &metrics_registry { match SyncingMetrics::register(r) { @@ -1289,7 +1187,7 @@ where }; sync.reset_sync_start_point()?; - Ok((sync, block_announce_config)) + Ok(sync) } /// Returns the median seen block number. @@ -1413,7 +1311,7 @@ where /// Restart the sync process. This will reset all pending block requests and return an iterator /// of new block requests to make to peers. Peers that were downloading finality data (i.e. /// their state was `DownloadingJustification`) are unaffected and will stay in the same state. - fn restart(&mut self) -> impl Iterator), BadPeer>> + '_ { + fn restart(&mut self) -> impl Iterator, BadPeer>> + '_ { self.blocks.clear(); if let Err(e) = self.reset_sync_start_point() { warn!(target: LOG_TARGET, "💔 Unable to restart sync: {e}"); @@ -1427,23 +1325,23 @@ where ); let old_peers = std::mem::take(&mut self.peers); - old_peers.into_iter().filter_map(move |(id, mut p)| { + old_peers.into_iter().filter_map(move |(peer_id, mut p)| { // peers that were downloading justifications // should be kept in that state. if let PeerSyncState::DownloadingJustification(_) = p.state { // We make sure our commmon number is at least something we have. p.common_number = self.best_queued_number; - self.peers.insert(id, p); + self.peers.insert(peer_id, p); return None } - // since the request is not a justification, remove it from pending responses - self.pending_responses.remove(&id); - // handle peers that were in other states. - match self.new_peer(id, p.best_hash, p.best_number) { - Ok(None) => None, - Ok(Some(x)) => Some(Ok((id, x))), + match self.new_peer(peer_id, p.best_hash, p.best_number) { + // since the request is not a justification, remove it from pending responses + Ok(None) => Some(Ok(BlockRequestEvent::RemoveStale { peer_id })), + // update the request if the new one is available + Ok(Some(request)) => Some(Ok(BlockRequestEvent::SendRequest { peer_id, request })), + // this implies that we need to drop pending response from the peer Err(e) => Some(Err(e)), } }) @@ -1524,6 +1422,11 @@ where .any(|(_, p)| p.state == PeerSyncState::DownloadingStale(*hash)) } + /// Is the peer know to the sync state machine? + pub fn is_peer_known(&self, peer_id: &PeerId) -> bool { + self.peers.contains_key(peer_id) + } + /// Get the set of downloaded blocks that are ready to be queued for import. fn ready_blocks(&mut self) -> Vec> { self.blocks @@ -1588,117 +1491,12 @@ where None } - /// Get config for the block announcement protocol - pub fn get_block_announce_proto_config( - protocol_id: ProtocolId, - fork_id: &Option, - roles: Roles, - best_number: NumberFor, - best_hash: B::Hash, - genesis_hash: B::Hash, - ) -> NonDefaultSetConfig { - let block_announces_protocol = { - let genesis_hash = genesis_hash.as_ref(); - if let Some(ref fork_id) = fork_id { - format!( - "/{}/{}/block-announces/1", - array_bytes::bytes2hex("", genesis_hash), - fork_id - ) - } else { - format!("/{}/block-announces/1", array_bytes::bytes2hex("", genesis_hash)) - } - }; - - NonDefaultSetConfig { - notifications_protocol: block_announces_protocol.into(), - fallback_names: iter::once( - format!("/{}/block-announces/1", protocol_id.as_ref()).into(), - ) - .collect(), - max_notification_size: MAX_BLOCK_ANNOUNCE_SIZE, - handshake: Some(NotificationHandshake::new(BlockAnnouncesHandshake::::build( - roles, - best_number, - best_hash, - genesis_hash, - ))), - // NOTE: `set_config` will be ignored by `protocol.rs` as the block announcement - // protocol is still hardcoded into the peerset. - set_config: SetConfig { - in_peers: 0, - out_peers: 0, - reserved_nodes: Vec::new(), - non_reserved_mode: NonReservedPeerMode::Deny, - }, - } - } - - fn decode_state_response(response: &[u8]) -> Result { - let response = StateResponse::decode(response) - .map_err(|error| format!("Failed to decode state response: {error}"))?; - - Ok(OpaqueStateResponse(Box::new(response))) - } - - fn send_state_request(&mut self, who: PeerId, request: OpaqueStateRequest) { - let (tx, rx) = oneshot::channel(); - - if self.peers.contains_key(&who) { - self.pending_responses - .insert(who, Box::pin(async move { (who, PeerRequest::State, rx.await) })); - } - - match self.encode_state_request(&request) { - Ok(data) => { - self.network_service.start_request( - who, - self.state_request_protocol_name.clone(), - data, - tx, - IfDisconnected::ImmediateError, - ); - }, - Err(err) => { - log::warn!( - target: LOG_TARGET, - "Failed to encode state request {request:?}: {err:?}", - ); - }, - } - } - - fn send_warp_sync_request(&mut self, who: PeerId, request: WarpProofRequest) { - let (tx, rx) = oneshot::channel(); - - if self.peers.contains_key(&who) { - self.pending_responses - .insert(who, Box::pin(async move { (who, PeerRequest::WarpProof, rx.await) })); - } - - match &self.warp_sync_protocol_name { - Some(name) => self.network_service.start_request( - who, - name.clone(), - request.encode(), - tx, - IfDisconnected::ImmediateError, - ), - None => { - log::warn!( - target: LOG_TARGET, - "Trying to send warp sync request when no protocol is configured {request:?}", - ); - }, - } - } - - fn on_block_response( + pub(crate) fn on_block_response( &mut self, peer_id: PeerId, request: BlockRequest, blocks: Vec>, - ) -> Option> { + ) -> Option<(PeerId, BlockRequest)> { let block_response = BlockResponse:: { id: request.id, blocks }; let blocks_range = || match ( @@ -1741,10 +1539,7 @@ where self.import_blocks(origin, blocks); None }, - Ok(OnBlockData::Request(peer, req)) => { - self.send_block_request(peer, req); - None - }, + Ok(OnBlockData::Request(peer, req)) => Some((peer, req)), Ok(OnBlockData::Continue) => None, Err(BadPeer(id, repu)) => { self.network_service @@ -1756,20 +1551,14 @@ where } } - pub fn on_state_response( - &mut self, - peer_id: PeerId, - response: OpaqueStateResponse, - ) -> Option> { + pub fn on_state_response(&mut self, peer_id: PeerId, response: OpaqueStateResponse) { match self.on_state_data(&peer_id, response) { - Ok(OnStateData::Import(origin, block)) => - Some(ImportResult::BlockImport(origin, vec![block])), - Ok(OnStateData::Continue) => None, + Ok(OnStateData::Import(origin, block)) => self.import_blocks(origin, vec![block]), + Ok(OnStateData::Continue) => {}, Err(BadPeer(id, repu)) => { self.network_service .disconnect_peer(id, self.block_announce_protocol_name.clone()); self.network_service.report_peer(id, repu); - None }, } } @@ -1782,165 +1571,10 @@ where } } - fn process_outbound_requests(&mut self) { - for (id, request) in self.block_requests() { - self.send_block_request(id, request); - } - - if let Some((id, request)) = self.state_request() { - self.send_state_request(id, request); - } - - for (id, request) in self.justification_requests().collect::>() { - self.send_block_request(id, request); - } - - if let Some((id, request)) = self.warp_sync_request() { - self.send_warp_sync_request(id, request); - } - } - - fn poll_pending_responses(&mut self, cx: &mut std::task::Context) -> Poll> { - let ready_responses = self - .pending_responses - .values_mut() - .filter_map(|future| match future.poll_unpin(cx) { - Poll::Pending => None, - Poll::Ready(result) => Some(result), - }) - .collect::>(); - - for (id, request, response) in ready_responses { - self.pending_responses - .remove(&id) - .expect("Logic error: peer id from pending response is missing in the map."); - - match response { - Ok(Ok(resp)) => match request { - PeerRequest::Block(req) => { - match self.block_downloader.block_response_into_blocks(&req, resp) { - Ok(blocks) => { - if let Some(import) = self.on_block_response(id, req, blocks) { - return Poll::Ready(import) - } - }, - Err(BlockResponseError::DecodeFailed(e)) => { - debug!( - target: LOG_TARGET, - "Failed to decode block response from peer {:?}: {:?}.", - id, - e - ); - self.network_service.report_peer(id, rep::BAD_MESSAGE); - self.network_service - .disconnect_peer(id, self.block_announce_protocol_name.clone()); - continue - }, - Err(BlockResponseError::ExtractionFailed(e)) => { - debug!( - target: LOG_TARGET, - "Failed to extract blocks from peer response {:?}: {:?}.", - id, - e - ); - self.network_service.report_peer(id, rep::BAD_MESSAGE); - continue - }, - } - }, - PeerRequest::State => { - let response = match Self::decode_state_response(&resp[..]) { - Ok(proto) => proto, - Err(e) => { - debug!( - target: LOG_TARGET, - "Failed to decode state response from peer {id:?}: {e:?}.", - ); - self.network_service.report_peer(id, rep::BAD_MESSAGE); - self.network_service - .disconnect_peer(id, self.block_announce_protocol_name.clone()); - continue - }, - }; - - if let Some(import) = self.on_state_response(id, response) { - return Poll::Ready(import) - } - }, - PeerRequest::WarpProof => { - self.on_warp_sync_response(id, EncodedProof(resp)); - }, - }, - Ok(Err(e)) => { - debug!(target: LOG_TARGET, "Request to peer {id:?} failed: {e:?}."); - - match e { - RequestFailure::Network(OutboundFailure::Timeout) => { - self.network_service.report_peer(id, rep::TIMEOUT); - self.network_service - .disconnect_peer(id, self.block_announce_protocol_name.clone()); - }, - RequestFailure::Network(OutboundFailure::UnsupportedProtocols) => { - self.network_service.report_peer(id, rep::BAD_PROTOCOL); - self.network_service - .disconnect_peer(id, self.block_announce_protocol_name.clone()); - }, - RequestFailure::Network(OutboundFailure::DialFailure) => { - self.network_service - .disconnect_peer(id, self.block_announce_protocol_name.clone()); - }, - RequestFailure::Refused => { - self.network_service.report_peer(id, rep::REFUSED); - self.network_service - .disconnect_peer(id, self.block_announce_protocol_name.clone()); - }, - RequestFailure::Network(OutboundFailure::ConnectionClosed) | - RequestFailure::NotConnected => { - self.network_service - .disconnect_peer(id, self.block_announce_protocol_name.clone()); - }, - RequestFailure::UnknownProtocol => { - debug_assert!(false, "Block request protocol should always be known."); - }, - RequestFailure::Obsolete => { - debug_assert!( - false, - "Can not receive `RequestFailure::Obsolete` after dropping the \ - response receiver.", - ); - }, - } - }, - Err(oneshot::Canceled) => { - trace!( - target: LOG_TARGET, - "Request to peer {id:?} failed due to oneshot being canceled.", - ); - self.network_service - .disconnect_peer(id, self.block_announce_protocol_name.clone()); - }, - } - } - - Poll::Pending - } - - fn encode_state_request(&self, request: &OpaqueStateRequest) -> Result, String> { - let request: &StateRequest = request.0.downcast_ref().ok_or_else(|| { - "Failed to downcast opaque state response during encoding, this is an \ - implementation bug." - .to_string() - })?; - - Ok(request.encode_to_vec()) - } - - fn justification_requests<'a>( - &'a mut self, - ) -> Box)> + 'a> { + fn justification_requests(&mut self) -> Vec<(PeerId, BlockRequest)> { let peers = &mut self.peers; let mut matcher = self.extra_justifications.matcher(); - Box::new(std::iter::from_fn(move || { + std::iter::from_fn(move || { if let Some((peer, request)) = matcher.next(peers) { peers .get_mut(&peer) @@ -1959,7 +1593,8 @@ where } else { None } - })) + }) + .collect() } fn block_requests(&mut self) -> Vec<(PeerId, BlockRequest)> { @@ -2086,7 +1721,6 @@ where } }) .collect() - // Box::new(iter) } fn state_request(&mut self) -> Option<(PeerId, OpaqueStateRequest)> { @@ -2288,13 +1922,14 @@ where /// A batch of blocks have been processed, with or without errors. /// /// Call this when a batch of blocks have been processed by the import - /// queue, with or without errors. + /// queue, with or without errors. If an error is returned, the pending response + /// from the peer must be dropped. fn on_blocks_processed( &mut self, imported: usize, count: usize, results: Vec<(Result>, BlockImportError>, B::Hash)>, - ) -> Box), BadPeer>>> { + ) -> Box, BadPeer>>> { trace!(target: LOG_TARGET, "Imported {imported} of {count}"); let mut output = Vec::new(); @@ -2799,13 +2434,10 @@ fn validate_blocks( #[cfg(test)] mod test { use super::*; - use crate::{mock::MockBlockDownloader, service::network::NetworkServiceProvider}; + use crate::service::network::NetworkServiceProvider; use futures::executor::block_on; use sc_block_builder::BlockBuilderProvider; - use sc_network_common::{ - role::Role, - sync::message::{BlockAnnounce, BlockData, BlockState, FromBlock}, - }; + use sc_network_common::sync::message::{BlockAnnounce, BlockData, BlockState, FromBlock}; use sp_blockchain::HeaderBackend; use substrate_test_runtime_client::{ runtime::{Block, Hash, Header}, @@ -2825,21 +2457,16 @@ mod test { let import_queue = Box::new(sc_consensus::import_queue::mock::MockImportQueueHandle::new()); let (_chain_sync_network_provider, chain_sync_network_handle) = NetworkServiceProvider::new(); - let (mut sync, _) = ChainSync::new( + let mut sync = ChainSync::new( SyncMode::Full, client.clone(), - ProtocolId::from("test-protocol-name"), - &Some(String::from("test-fork-id")), - Roles::from(&Role::Full), + ProtocolName::from("test-block-announce-protocol"), 1, 64, None, None, chain_sync_network_handle, import_queue, - Arc::new(MockBlockDownloader::new()), - ProtocolName::from("state-request"), - None, ) .unwrap(); @@ -2857,7 +2484,8 @@ mod test { // the justification request should be scheduled to that peer assert!(sync .justification_requests() - .any(|(who, request)| { who == peer_id && request.from == FromBlock::Hash(a1_hash) })); + .iter() + .any(|(who, request)| { *who == peer_id && request.from == FromBlock::Hash(a1_hash) })); // there are no extra pending requests assert_eq!(sync.extra_justifications.pending_requests().count(), 0); @@ -2891,21 +2519,16 @@ mod test { let (_chain_sync_network_provider, chain_sync_network_handle) = NetworkServiceProvider::new(); - let (mut sync, _) = ChainSync::new( + let mut sync = ChainSync::new( SyncMode::Full, client.clone(), - ProtocolId::from("test-protocol-name"), - &Some(String::from("test-fork-id")), - Roles::from(&Role::Full), + ProtocolName::from("test-block-announce-protocol"), 1, 64, None, None, chain_sync_network_handle, import_queue, - Arc::new(MockBlockDownloader::new()), - ProtocolName::from("state-request"), - None, ) .unwrap(); @@ -2944,8 +2567,8 @@ mod test { // the justification request should be scheduled to the // new peer which is at the given block - assert!(sync.justification_requests().any(|(p, r)| { - p == peer_id3 && + assert!(sync.justification_requests().iter().any(|(p, r)| { + *p == peer_id3 && r.fields == BlockAttributes::JUSTIFICATION && r.from == FromBlock::Hash(b1_hash) })); @@ -2959,9 +2582,11 @@ mod test { let block_requests = sync.restart(); // which should make us send out block requests to the first two peers - assert!(block_requests - .map(|r| r.unwrap()) - .all(|(p, _)| { p == peer_id1 || p == peer_id2 })); + assert!(block_requests.map(|r| r.unwrap()).all(|event| match event { + BlockRequestEvent::SendRequest { peer_id, .. } => + peer_id == peer_id1 || peer_id == peer_id2, + BlockRequestEvent::RemoveStale { .. } => false, + })); // peer 3 should be unaffected it was downloading finality data assert_eq!( @@ -3065,21 +2690,16 @@ mod test { let (_chain_sync_network_provider, chain_sync_network_handle) = NetworkServiceProvider::new(); - let (mut sync, _) = ChainSync::new( + let mut sync = ChainSync::new( SyncMode::Full, client.clone(), - ProtocolId::from("test-protocol-name"), - &Some(String::from("test-fork-id")), - Roles::from(&Role::Full), + ProtocolName::from("test-block-announce-protocol"), 5, 64, None, None, chain_sync_network_handle, import_queue, - Arc::new(MockBlockDownloader::new()), - ProtocolName::from("state-request"), - None, ) .unwrap(); @@ -3191,21 +2811,16 @@ mod test { NetworkServiceProvider::new(); let info = client.info(); - let (mut sync, _) = ChainSync::new( + let mut sync = ChainSync::new( SyncMode::Full, client.clone(), - ProtocolId::from("test-protocol-name"), - &Some(String::from("test-fork-id")), - Roles::from(&Role::Full), + ProtocolName::from("test-block-announce-protocol"), 5, 64, None, None, chain_sync_network_handle, import_queue, - Arc::new(MockBlockDownloader::new()), - ProtocolName::from("state-request"), - None, ) .unwrap(); @@ -3348,21 +2963,16 @@ mod test { let info = client.info(); - let (mut sync, _) = ChainSync::new( + let mut sync = ChainSync::new( SyncMode::Full, client.clone(), - ProtocolId::from("test-protocol-name"), - &Some(String::from("test-fork-id")), - Roles::from(&Role::Full), + ProtocolName::from("test-block-announce-protocol"), 5, 64, None, None, chain_sync_network_handle, import_queue, - Arc::new(MockBlockDownloader::new()), - ProtocolName::from("state-request"), - None, ) .unwrap(); @@ -3490,21 +3100,16 @@ mod test { let info = client.info(); - let (mut sync, _) = ChainSync::new( + let mut sync = ChainSync::new( SyncMode::Full, client.clone(), - ProtocolId::from("test-protocol-name"), - &Some(String::from("test-fork-id")), - Roles::from(&Role::Full), + ProtocolName::from("test-block-announce-protocol"), 5, 64, None, None, chain_sync_network_handle, import_queue, - Arc::new(MockBlockDownloader::new()), - ProtocolName::from("state-request"), - None, ) .unwrap(); @@ -3634,21 +3239,16 @@ mod test { let mut client = Arc::new(TestClientBuilder::new().build()); let blocks = (0..3).map(|_| build_block(&mut client, None, false)).collect::>(); - let (mut sync, _) = ChainSync::new( + let mut sync = ChainSync::new( SyncMode::Full, client.clone(), - ProtocolId::from("test-protocol-name"), - &Some(String::from("test-fork-id")), - Roles::from(&Role::Full), + ProtocolName::from("test-block-announce-protocol"), 1, 64, None, None, chain_sync_network_handle, import_queue, - Arc::new(MockBlockDownloader::new()), - ProtocolName::from("state-request"), - None, ) .unwrap(); @@ -3679,21 +3279,16 @@ mod test { let empty_client = Arc::new(TestClientBuilder::new().build()); - let (mut sync, _) = ChainSync::new( + let mut sync = ChainSync::new( SyncMode::Full, empty_client.clone(), - ProtocolId::from("test-protocol-name"), - &Some(String::from("test-fork-id")), - Roles::from(&Role::Full), + ProtocolName::from("test-block-announce-protocol"), 1, 64, None, None, chain_sync_network_handle, import_queue, - Arc::new(MockBlockDownloader::new()), - ProtocolName::from("state-request"), - None, ) .unwrap(); @@ -3731,21 +3326,16 @@ mod test { let import_queue = Box::new(sc_consensus::import_queue::mock::MockImportQueueHandle::new()); let (_chain_sync_network_provider, chain_sync_network_handle) = NetworkServiceProvider::new(); - let (mut sync, _) = ChainSync::new( + let mut sync = ChainSync::new( SyncMode::Full, client.clone(), - ProtocolId::from("test-protocol-name"), - &Some(String::from("test-fork-id")), - Roles::from(&Role::Full), + ProtocolName::from("test-block-announce-protocol"), 1, 64, None, None, chain_sync_network_handle, import_queue, - Arc::new(MockBlockDownloader::new()), - ProtocolName::from("state-request"), - None, ) .unwrap(); @@ -3766,10 +3356,14 @@ mod test { // add new peer and request blocks from them sync.new_peer(peers[0], Hash::random(), 42).unwrap(); + // we don't actually perform any requests, just keep track of peers waiting for a response + let mut pending_responses = HashSet::new(); + // we wil send block requests to these peers // for these blocks we don't know about - for (peer, request) in sync.block_requests() { - sync.send_block_request(peer, request); + for (peer, _request) in sync.block_requests() { + // "send" request + pending_responses.insert(peer); } // add a new peer at a known block @@ -3780,10 +3374,11 @@ mod test { // the justification request should be scheduled to the // new peer which is at the given block - let mut requests = sync.justification_requests().collect::>(); + let mut requests = sync.justification_requests(); assert_eq!(requests.len(), 1); - let (peer, request) = requests.remove(0); - sync.send_block_request(peer, request); + let (peer, _request) = requests.remove(0); + // "send" request + assert!(pending_responses.insert(peer)); assert!(!std::matches!( sync.peers.get(&peers[0]).unwrap().state, @@ -3793,18 +3388,37 @@ mod test { sync.peers.get(&peers[1]).unwrap().state, PeerSyncState::DownloadingJustification(b1_hash), ); - assert_eq!(sync.pending_responses.len(), 2); - - let requests = sync.restart().collect::>(); - assert!(requests.iter().any(|res| res.as_ref().unwrap().0 == peers[0])); + assert_eq!(pending_responses.len(), 2); + + // restart sync + let request_events = sync.restart().collect::>(); + for event in request_events.iter() { + match event.as_ref().unwrap() { + BlockRequestEvent::RemoveStale { peer_id } => { + pending_responses.remove(&peer_id); + }, + BlockRequestEvent::SendRequest { peer_id, .. } => { + // we drop obsolete response, but don't register a new request, it's checked in + // the `assert!` below + pending_responses.remove(&peer_id); + }, + } + } + assert!(request_events.iter().any(|event| { + match event.as_ref().unwrap() { + BlockRequestEvent::RemoveStale { .. } => false, + BlockRequestEvent::SendRequest { peer_id, .. } => peer_id == &peers[0], + } + })); - assert_eq!(sync.pending_responses.len(), 1); - assert!(sync.pending_responses.get(&peers[1]).is_some()); + assert_eq!(pending_responses.len(), 1); + assert!(pending_responses.contains(&peers[1])); assert_eq!( sync.peers.get(&peers[1]).unwrap().state, PeerSyncState::DownloadingJustification(b1_hash), ); sync.peer_disconnected(&peers[1]); - assert_eq!(sync.pending_responses.len(), 0); + pending_responses.remove(&peers[1]); + assert_eq!(pending_responses.len(), 0); } } diff --git a/substrate/client/network/sync/src/mock.rs b/substrate/client/network/sync/src/mock.rs index 859f9fb9c54b..b51eec8d1499 100644 --- a/substrate/client/network/sync/src/mock.rs +++ b/substrate/client/network/sync/src/mock.rs @@ -20,7 +20,7 @@ use crate::block_relay_protocol::{BlockDownloader as BlockDownloaderT, BlockResponseError}; -use futures::{channel::oneshot, task::Poll}; +use futures::channel::oneshot; use libp2p::PeerId; use sc_network::RequestFailure; use sc_network_common::sync::{ @@ -39,7 +39,6 @@ mockall::mock! { fn num_sync_requests(&self) -> usize; fn num_downloaded_blocks(&self) -> usize; fn num_peers(&self) -> usize; - fn num_active_peers(&self) -> usize; fn new_peer( &mut self, who: PeerId, @@ -81,15 +80,6 @@ mockall::mock! { ); fn peer_disconnected(&mut self, who: &PeerId); fn metrics(&self) -> Metrics; - fn poll<'a>( - &mut self, - cx: &mut std::task::Context<'a>, - ) -> Poll<()>; - fn send_block_request( - &mut self, - who: PeerId, - request: BlockRequest, - ); } } diff --git a/substrate/client/network/sync/src/pending_responses.rs b/substrate/client/network/sync/src/pending_responses.rs new file mode 100644 index 000000000000..c863267e7808 --- /dev/null +++ b/substrate/client/network/sync/src/pending_responses.rs @@ -0,0 +1,114 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! [`PendingResponses`] is responsible for keeping track of pending responses and +//! polling them. + +use futures::{ + channel::oneshot, + future::BoxFuture, + stream::{BoxStream, Stream}, + FutureExt, StreamExt, +}; +use libp2p::PeerId; +use log::error; +use sc_network::request_responses::RequestFailure; +use sc_network_common::sync::PeerRequest; +use sp_runtime::traits::Block as BlockT; +use std::task::{Context, Poll}; +use tokio_stream::StreamMap; + +/// Response result. +type ResponseResult = Result, RequestFailure>, oneshot::Canceled>; + +/// A future yielding [`ResponseResult`]. +type ResponseFuture = BoxFuture<'static, ResponseResult>; + +/// An event we receive once a pending response future resolves. +pub(crate) struct ResponseEvent { + pub peer_id: PeerId, + pub request: PeerRequest, + pub response: ResponseResult, +} + +/// Stream taking care of polling pending responses. +pub(crate) struct PendingResponses { + /// Pending responses + pending_responses: StreamMap, ResponseResult)>>, +} + +impl PendingResponses { + pub fn new() -> Self { + Self { pending_responses: StreamMap::new() } + } + + pub fn insert( + &mut self, + peer_id: PeerId, + request: PeerRequest, + response_future: ResponseFuture, + ) { + let request_type = request.get_type(); + + if self + .pending_responses + .insert( + peer_id, + Box::pin(async move { (request, response_future.await) }.into_stream()), + ) + .is_some() + { + error!( + target: crate::LOG_TARGET, + "Discarded pending response from peer {peer_id}, request type: {request_type:?}.", + ); + debug_assert!(false); + } + } + + pub fn remove(&mut self, peer_id: &PeerId) -> bool { + self.pending_responses.remove(peer_id).is_some() + } + + pub fn len(&self) -> usize { + self.pending_responses.len() + } +} + +impl Unpin for PendingResponses {} + +impl Stream for PendingResponses { + type Item = ResponseEvent; + + fn poll_next( + mut self: std::pin::Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll> { + match futures::ready!(self.pending_responses.poll_next_unpin(cx)) { + Some((peer_id, (request, response))) => { + // We need to manually remove the stream, because `StreamMap` doesn't know yet that + // it's going to yield `None`, so may not remove it before the next request is made + // to the same peer. + self.pending_responses.remove(&peer_id); + + Poll::Ready(Some(ResponseEvent { peer_id, request, response })) + }, + None => Poll::Ready(None), + } + } +} diff --git a/substrate/client/rpc-spec-v2/Cargo.toml b/substrate/client/rpc-spec-v2/Cargo.toml index c93006753afb..f4068d1bc594 100644 --- a/substrate/client/rpc-spec-v2/Cargo.toml +++ b/substrate/client/rpc-spec-v2/Cargo.toml @@ -31,7 +31,7 @@ serde = "1.0" hex = "0.4" futures = "0.3.21" parking_lot = "0.12.1" -tokio-stream = { version = "0.1", features = ["sync"] } +tokio-stream = { version = "0.1.14", features = ["sync"] } tokio = { version = "1.22.0", features = ["sync"] } array-bytes = "6.1" log = "0.4.17" From 769bdd3ff33a291cbc70a800a3830638467e42a2 Mon Sep 17 00:00:00 2001 From: ordian Date: Thu, 28 Sep 2023 01:07:32 +0200 Subject: [PATCH 69/69] runtime-api: cleanup after v7 stabilization (#1729) Follow-up to #1543. --- polkadot/node/core/runtime-api/src/cache.rs | 1 - polkadot/node/core/runtime-api/src/lib.rs | 4 ++-- polkadot/node/subsystem-types/src/messages.rs | 2 +- polkadot/node/subsystem-util/src/lib.rs | 1 - 4 files changed, 3 insertions(+), 5 deletions(-) diff --git a/polkadot/node/core/runtime-api/src/cache.rs b/polkadot/node/core/runtime-api/src/cache.rs index e05e5823a282..6cf7fa744d3f 100644 --- a/polkadot/node/core/runtime-api/src/cache.rs +++ b/polkadot/node/core/runtime-api/src/cache.rs @@ -96,7 +96,6 @@ impl Default for RequestResultCache { unapplied_slashes: LruMap::new(ByLength::new(DEFAULT_CACHE_CAP)), key_ownership_proof: LruMap::new(ByLength::new(DEFAULT_CACHE_CAP)), minimum_backing_votes: LruMap::new(ByLength::new(DEFAULT_CACHE_CAP)), - para_backing_state: LruMap::new(ByLength::new(DEFAULT_CACHE_CAP)), async_backing_params: LruMap::new(ByLength::new(DEFAULT_CACHE_CAP)), } diff --git a/polkadot/node/core/runtime-api/src/lib.rs b/polkadot/node/core/runtime-api/src/lib.rs index 19b2f5565a22..1b18941e5464 100644 --- a/polkadot/node/core/runtime-api/src/lib.rs +++ b/polkadot/node/core/runtime-api/src/lib.rs @@ -569,7 +569,7 @@ where query!( ParaBackingState, para_backing_state(para), - ver = Request::STAGING_BACKING_STATE, + ver = Request::ASYNC_BACKING_STATE_RUNTIME_REQUIREMENT, sender ) }, @@ -577,7 +577,7 @@ where query!( AsyncBackingParams, async_backing_params(), - ver = Request::STAGING_BACKING_STATE, + ver = Request::ASYNC_BACKING_STATE_RUNTIME_REQUIREMENT, sender ) }, diff --git a/polkadot/node/subsystem-types/src/messages.rs b/polkadot/node/subsystem-types/src/messages.rs index eb94f1696c9d..01ccee3add90 100644 --- a/polkadot/node/subsystem-types/src/messages.rs +++ b/polkadot/node/subsystem-types/src/messages.rs @@ -725,7 +725,7 @@ impl RuntimeApiRequest { pub const MINIMUM_BACKING_VOTES_RUNTIME_REQUIREMENT: u32 = 6; /// Minimum version to enable asynchronous backing: `AsyncBackingParams` and `ParaBackingState`. - pub const STAGING_BACKING_STATE: u32 = 7; + pub const ASYNC_BACKING_STATE_RUNTIME_REQUIREMENT: u32 = 7; } /// A message to the Runtime API subsystem. diff --git a/polkadot/node/subsystem-util/src/lib.rs b/polkadot/node/subsystem-util/src/lib.rs index e60a9ff82eeb..57e4f9cde09a 100644 --- a/polkadot/node/subsystem-util/src/lib.rs +++ b/polkadot/node/subsystem-util/src/lib.rs @@ -226,7 +226,6 @@ specialize_requests! { fn request_unapplied_slashes() -> Vec<(SessionIndex, CandidateHash, slashing::PendingSlashes)>; UnappliedSlashes; fn request_key_ownership_proof(validator_id: ValidatorId) -> Option; KeyOwnershipProof; fn request_submit_report_dispute_lost(dp: slashing::DisputeProof, okop: slashing::OpaqueKeyOwnershipProof) -> Option<()>; SubmitReportDisputeLost; - fn request_async_backing_params() -> AsyncBackingParams; AsyncBackingParams; }