diff --git a/Cargo.lock b/Cargo.lock index e36892ff13c2..ca21d2f3bdc8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2558,9 +2558,9 @@ checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" [[package]] name = "hex-literal" -version = "0.3.3" +version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21e4590e13640f19f249fe3e4eca5113bc4289f2497710378190e7f4bd96f45b" +checksum = "7ebdb29d2ea9ed0083cd8cece49bbd968021bd99b0849edb4a9a7ee0fdf6a4e0" [[package]] name = "hex_fmt" @@ -6683,7 +6683,7 @@ dependencies = [ [[package]] name = "polkadot-primitives" -version = "0.9.12" +version = "0.9.13" dependencies = [ "bitvec 0.20.1", "frame-system", @@ -6875,7 +6875,7 @@ dependencies = [ [[package]] name = "polkadot-runtime-parachains" -version = "0.9.12" +version = "0.9.13" dependencies = [ "bitflags", "bitvec 0.20.1", @@ -6889,6 +6889,7 @@ dependencies = [ "log", "pallet-authority-discovery", "pallet-authorship", + "pallet-babe", "pallet-balances", "pallet-session", "pallet-staking", diff --git a/node/network/availability-distribution/src/requester/mod.rs b/node/network/availability-distribution/src/requester/mod.rs index f678a768d61f..53804aae8723 100644 --- a/node/network/availability-distribution/src/requester/mod.rs +++ b/node/network/availability-distribution/src/requester/mod.rs @@ -159,7 +159,7 @@ impl Requester { // Just book keeping - we are already requesting that chunk: { e.get_mut().add_leaf(leaf); - } + }, Entry::Vacant(e) => { let tx = self.tx.clone(); let metrics = self.metrics.clone(); diff --git a/node/network/collator-protocol/src/validator_side/mod.rs b/node/network/collator-protocol/src/validator_side/mod.rs index f7672d932dcd..6eb92f5f81e7 100644 --- a/node/network/collator-protocol/src/validator_side/mod.rs +++ b/node/network/collator-protocol/src/validator_side/mod.rs @@ -1462,7 +1462,7 @@ async fn poll_collation_response( ); CollationFetchResult::Error(COST_WRONG_PARA) - } + }, Ok(CollationFetchingResponse::Collation(receipt, pov)) => { tracing::debug!( target: LOG_TARGET, diff --git a/node/network/statement-distribution/src/tests.rs b/node/network/statement-distribution/src/tests.rs index 2c41d5e7ddf0..80fdb50a4f26 100644 --- a/node/network/statement-distribution/src/tests.rs +++ b/node/network/statement-distribution/src/tests.rs @@ -1814,7 +1814,7 @@ fn peer_cant_flood_with_large_statements() { if p == peer_a && r == COST_APPARENT_FLOOD => { punished = true; - } + }, m => panic!("Unexpected message: {:?}", m), } diff --git a/parachain/Cargo.toml b/parachain/Cargo.toml index 77dcd13b5ce5..196e20e529ab 100644 --- a/parachain/Cargo.toml +++ b/parachain/Cargo.toml @@ -36,3 +36,4 @@ std = [ "polkadot-core-primitives/std", "frame-support/std", ] +runtime-benchmarks = [] diff --git a/parachain/src/primitives.rs b/parachain/src/primitives.rs index bda56bf59e8c..39d59dcfe7a4 100644 --- a/parachain/src/primitives.rs +++ b/parachain/src/primitives.rs @@ -41,9 +41,19 @@ pub use polkadot_core_primitives::BlockNumber as RelayChainBlockNumber; /// Parachain head data included in the chain. #[derive( - PartialEq, Eq, Clone, PartialOrd, Ord, Encode, Decode, RuntimeDebug, derive_more::From, TypeInfo, + PartialEq, + Eq, + Clone, + PartialOrd, + Ord, + Encode, + Decode, + RuntimeDebug, + derive_more::From, + TypeInfo, + Default, )] -#[cfg_attr(feature = "std", derive(Serialize, Deserialize, Default, Hash, MallocSizeOf))] +#[cfg_attr(feature = "std", derive(Serialize, Deserialize, Hash, MallocSizeOf))] pub struct HeadData(#[cfg_attr(feature = "std", serde(with = "bytes"))] pub Vec); impl HeadData { diff --git a/primitives/Cargo.toml b/primitives/Cargo.toml index c84c0f8f77a2..86cf62fffa14 100644 --- a/primitives/Cargo.toml +++ b/primitives/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "polkadot-primitives" -version = "0.9.12" +version = "0.9.13" authors = ["Parity Technologies "] edition = "2018" @@ -26,7 +26,7 @@ polkadot-core-primitives = { path = "../core-primitives", default-features = fal trie = { package = "sp-trie", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } bitvec = { version = "0.20.1", default-features = false, features = ["alloc"] } frame-system = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -hex-literal = "0.3.3" +hex-literal = "0.3.4" parity-util-mem = { version = "0.10.0", default-features = false, optional = true } @@ -55,4 +55,4 @@ std = [ "polkadot-core-primitives/std", "bitvec/std", "frame-system/std", -] +] \ No newline at end of file diff --git a/primitives/src/v0.rs b/primitives/src/v0.rs index 2000c173b879..64163d04bdd7 100644 --- a/primitives/src/v0.rs +++ b/primitives/src/v0.rs @@ -107,8 +107,8 @@ impl MallocSizeOf for ValidatorId { } /// Index of the validator is used as a lightweight replacement of the `ValidatorId` when appropriate. -#[derive(Eq, Ord, PartialEq, PartialOrd, Copy, Clone, Encode, Decode, TypeInfo)] -#[cfg_attr(feature = "std", derive(Serialize, Deserialize, Debug, Hash, MallocSizeOf))] +#[derive(Eq, Ord, PartialEq, PartialOrd, Copy, Clone, Encode, Decode, TypeInfo, Debug)] +#[cfg_attr(feature = "std", derive(Serialize, Deserialize, Hash, MallocSizeOf))] pub struct ValidatorIndex(pub u32); // We should really get https://github.com/paritytech/polkadot/issues/2403 going .. diff --git a/primitives/src/v1/mod.rs b/primitives/src/v1/mod.rs index a4b598dd6c6a..b1acf7c11750 100644 --- a/primitives/src/v1/mod.rs +++ b/primitives/src/v1/mod.rs @@ -327,8 +327,8 @@ fn check_collator_signature>( } /// A unique descriptor of the candidate receipt. -#[derive(PartialEq, Eq, Clone, Encode, Decode, TypeInfo)] -#[cfg_attr(feature = "std", derive(Debug, Default, Hash, MallocSizeOf))] +#[derive(PartialEq, Eq, Clone, Encode, Decode, TypeInfo, Default)] +#[cfg_attr(feature = "std", derive(Debug, Hash, MallocSizeOf))] pub struct CandidateDescriptor { /// The ID of the para this is a candidate for. pub para_id: Id, @@ -407,8 +407,8 @@ pub struct FullCandidateReceipt { } /// A candidate-receipt with commitments directly included. -#[derive(PartialEq, Eq, Clone, Encode, Decode, TypeInfo)] -#[cfg_attr(feature = "std", derive(Debug, Default, Hash, MallocSizeOf))] +#[derive(PartialEq, Eq, Clone, Encode, Decode, TypeInfo, Default)] +#[cfg_attr(feature = "std", derive(Debug, Hash, MallocSizeOf))] pub struct CommittedCandidateReceipt { /// The descriptor of the candidate. pub descriptor: CandidateDescriptor, @@ -509,8 +509,8 @@ impl PersistedValidationData { } /// Commitments made in a `CandidateReceipt`. Many of these are outputs of validation. -#[derive(PartialEq, Eq, Clone, Encode, Decode, TypeInfo)] -#[cfg_attr(feature = "std", derive(Debug, Default, Hash, MallocSizeOf))] +#[derive(PartialEq, Eq, Clone, Encode, Decode, TypeInfo, Default)] +#[cfg_attr(feature = "std", derive(Debug, Hash, MallocSizeOf))] pub struct CandidateCommitments { /// Messages destined to be interpreted by the Relay chain itself. pub upward_messages: Vec, @@ -534,6 +534,8 @@ impl CandidateCommitments { } /// A bitfield concerning availability of backed candidates. +/// +/// Every bit refers to an availability core index. #[derive(PartialEq, Eq, Clone, Encode, Decode, RuntimeDebug, TypeInfo)] pub struct AvailabilityBitfield(pub BitVec); diff --git a/primitives/src/v1/signed.rs b/primitives/src/v1/signed.rs index 612cc1516895..d2515b4873aa 100644 --- a/primitives/src/v1/signed.rs +++ b/primitives/src/v1/signed.rs @@ -41,6 +41,13 @@ use crate::v0::{SigningContext, ValidatorId, ValidatorIndex, ValidatorSignature} #[derive(Clone, PartialEq, Eq, RuntimeDebug)] pub struct Signed(UncheckedSigned); +impl Signed { + /// Convert back to an unchecked type. + pub fn into_unchecked(self) -> UncheckedSigned { + self.0 + } +} + /// Unchecked signed data, can be converted to `Signed` by checking the signature. #[derive(Clone, PartialEq, Eq, RuntimeDebug, Encode, Decode, TypeInfo)] pub struct UncheckedSigned { @@ -253,6 +260,37 @@ impl, RealPayload: Encode> UncheckedSigned( + public: &crate::v0::ValidatorId, + payload: Payload, + context: &SigningContext, + validator_index: ValidatorIndex, + ) -> Self { + use application_crypto::RuntimeAppPublic; + let data = Self::payload_data(&payload, context); + let signature = public.sign(&data).unwrap(); + + Self { payload, validator_index, signature, real_payload: sp_std::marker::PhantomData } + } + + /// Immutably access the signature. + /// + /// # WARNING + /// Only meant for usage in tests and and benchmarks. + pub fn benchmark_signature(&self) -> ValidatorSignature { + self.signature.clone() + } + + /// Set the signature. Only should be used for creating testing mocks. + #[cfg(feature = "std")] + pub fn set_signature(&mut self, signature: ValidatorSignature) { + self.signature = signature + } } impl From> diff --git a/roadmap/implementers-guide/src/runtime/inclusion.md b/roadmap/implementers-guide/src/runtime/inclusion.md index c800abd8f7a7..84b513cb985c 100644 --- a/roadmap/implementers-guide/src/runtime/inclusion.md +++ b/roadmap/implementers-guide/src/runtime/inclusion.md @@ -45,13 +45,38 @@ PendingAvailabilityCommitments: map ParaId => CandidateCommitments; All failed checks should lead to an unrecoverable error making the block invalid. * `process_bitfields(expected_bits, Bitfields, core_lookup: Fn(CoreIndex) -> Option)`: - 1. check that there is at most 1 bitfield per validator and that the number of bits in each bitfield is equal to `expected_bits`. - 1. check that there are no duplicates - 1. check all validator signatures. + 1. call `sanitize_bitfields` and use the sanitized `signed_bitfields` from now on. + 1. call `sanitize_backed_candidates` and use the sanitized `backed_candidates` from now on. 1. apply each bit of bitfield to the corresponding pending candidate. looking up parathread cores using the `core_lookup`. Disregard bitfields that have a `1` bit for any free cores. 1. For each applied bit of each availability-bitfield, set the bit for the validator in the `CandidatePendingAvailability`'s `availability_votes` bitfield. Track all candidates that now have >2/3 of bits set in their `availability_votes`. These candidates are now available and can be enacted. 1. For all now-available candidates, invoke the `enact_candidate` routine with the candidate and relay-parent number. 1. Return a list of `(CoreIndex, CandidateHash)` from freed cores consisting of the cores where candidates have become available. +* `sanitize_bitfields( + unchecked_bitfields: UncheckedSignedAvailabilityBitfields, + disputed_bitfield: DisputedBitfield, + expected_bits: usize, + parent_hash: T::Hash, + session_index: SessionIndex, + validators: &[ValidatorId], + )`: + 1. check that `disputed_bitfield` has the same number of bits as the `expected_bits`, iff not return early with an empty vec. + 1. each of the below checks is for each bitfield. If a check does not pass the bitfield will be skipped. + 1. check that there are no bits set that reference a disputed candidate. + 1. check that the number of bits is equal to `expected_bits`. + 1. check that the validator index is strictly increasing (and thus also unique). + 1. check that the validator bit index is not out of bounds. + 1. check the validators signature, iff `CHECK_SIGS=true`. + +* `sanitize_backed_candidates bool>( + relay_parent: T::Hash, + mut backed_candidates: Vec>, + candidate_has_concluded_invalid_dispute: F, + scheduled: &[CoreAssignment], + ) ` + 1. filter out any backed candidates that have concluded invalid. + 1. filter out backed candidates that don't have a matching `relay_parent`. + 1. filters backed candidates whom's paraid was scheduled by means of the provided `scheduled` parameter. + * `process_candidates(parent_storage_root, BackedCandidates, scheduled: Vec, group_validators: Fn(GroupIndex) -> Option>)`: 1. check that each candidate corresponds to a scheduled core and that they are ordered in the same order the cores appear in assignments in `scheduled`. 1. check that `scheduled` is sorted ascending by `CoreIndex`, without duplicates. @@ -78,6 +103,7 @@ All failed checks should lead to an unrecoverable error making the block invalid 1. call `Hrmp::prune_hrmp` with the para id of the candiate and the candidate's `hrmp_watermark`. 1. call `Hrmp::queue_outbound_hrmp` with the para id of the candidate and the list of horizontal messages taken from the commitment, 1. Call `Paras::note_new_head` using the `HeadData` from the receipt and `relay_parent_number`. + * `collect_pending`: ```rust diff --git a/roadmap/implementers-guide/src/runtime/parainherent.md b/roadmap/implementers-guide/src/runtime/parainherent.md index cc5e209362e9..dd67f9f108f8 100644 --- a/roadmap/implementers-guide/src/runtime/parainherent.md +++ b/roadmap/implementers-guide/src/runtime/parainherent.md @@ -29,10 +29,19 @@ OnChainVotes: Option, ## Entry Points -* `enter`: This entry-point accepts three parameters: The relay-chain parent block header, [`Bitfields`](../types/availability.md#signed-availability-bitfield) and [`BackedCandidates`](../types/backing.md#backed-candidate). - 1. Hash the parent header and make sure that it corresponds to the block hash of the parent (tracked by the `frame_system` FRAME module), +* `enter`: This entry-point accepts one parameter: [`ParaInherentData`](../types/runtime.md#ParaInherentData). + 1. Ensure the origin is none. + 1. Ensure `Included` is set as `None`. + 1. Set `Included` as `Some`. + 1. Unpack `ParachainsInherentData` into `signed_bitfields`, `backed_candidates`, `parent_header`, and `disputes`. + 1. Hash the parent header and make sure that it corresponds to the block hash of the parent (tracked by the `frame_system` FRAME module). + 1. Calculate the `candidate_weight`, `bitfields_weight`, and `disputes_weight`. + 1. If the sum of `candidate_weight`, `bitfields_weight`, and `disputes_weight` is greater than the max block weight we do the following with the goal of prioritizing the inclusion of disputes without making it game-able by block authors: + 1. clear `bitfields` and set `bitfields_weight` equal to 0. + 1. clear `backed_candidates` and set `candidate_weight` equal to 0. + 1. invoke `limit_disputes` on the `disputes` with the max block weight iff the disputes weight is greater than the max block weight. 1. Invoke `Disputes::provide_multi_dispute_data`. - 1. If `Disputes::is_frozen`, return and set `Included` to `Some(())`. + 1. If `Disputes::is_frozen`, return. 1. If there are any concluded disputes from the current session, invoke `Inclusion::collect_disputed` with the disputed candidates. Annotate each returned core with `FreedReason::Concluded`, sort them, and invoke `Scheduler::free_cores` with them. 1. The `Bitfields` are first forwarded to the `Inclusion::process_bitfields` routine, returning a set included candidates and the respective freed cores. Provide the number of availability cores (`Scheduler::availability_cores().len()`) as the expected number of bits and a `Scheduler::core_para` as a core-lookup to the `process_bitfields` routine. Annotate each of these freed cores with `FreedReason::Concluded`. 1. For each freed candidate from the `Inclusion::process_bitfields` call, invoke `Disputes::note_included(current_session, candidate)`. @@ -48,3 +57,32 @@ OnChainVotes: Option, 1. Call `Scheduler::occupied` using the `occupied` core indices of the returned above, first sorting the list of assigned core indices. 1. Call the `Ump::process_pending_upward_messages` routine to execute all messages in upward dispatch queues. 1. If all of the above succeeds, set `Included` to `Some(())`. + + +* `create_inherent`: This entry-point accepts one parameter: `InherentData`. + 1. Invoke [`create_inherent_inner(InherentData)`](#routines), the unit testable logic for filtering and sanitzing the inherent data used when invoking `enter`. Save the result as `inherent_data`. + 1. If the `inherent_data` is an `Err` variant, return the `enter` call signature with all inherent data cleared else return the `enter` call signature with `inherent_data` passed in as the `data` param. + +# Routines + +* `create_inherent_inner(data: &InherentData) -> Option>` + 1. Unpack `InherentData` into its parts, `bitfields`, `backed_candidates`, `disputes` and the `parent_header`. If data cannot be unpacked return `None`. + 1. Hash the `parent_header` and make sure that it corresponds to the block hash of the parent (tracked by the `frame_system` FRAME module). + 1. Invoke `Disputes::filter_multi_dispute_data` to remove duplicates et al from `disputes`. + 1. Run the following within a `with_transaction` closure to avoid side effects (we are essentially replicating the logic that would otherwise happen within `enter` so we can get the filtered bitfields and the `concluded_invalid_disputes` + `scheduled` to use in filtering the `backed_candidates`.): + 1. Invoke `Disputes::provide_multi_dispute_data`. + 1. Collect `current_concluded_invalid_disputes`, the disputed candidate hashes from the current session that have concluded invalid. + 1. Collect `concluded_invalid_disputes`, the disputed candidate hashes from the given `backed_candidates`. + 1. Invoke `Inclusion::collect_disputed` with the newly disputed candidates. Annotate each returned core with `FreedReason::Concluded`, sort them, and invoke `Scheduler::free_cores` with them. + 1. Collect filtered `bitfields` by invoking [`sanitize_bitfields`](inclusion.md#Routines). + 1. Collect `freed_concluded` by invoking `update_pending_availability_and_get_freed_cores` on the filtered bitfields. + 1. Collect all `freed` cores by invoking `collect_all_freed_cores` on `freed_concluding`. + 1. Invoke `scheduler::Pallet>::clear()`. + 1. Invoke `scheduler::Pallet>::schedule` with `freed` and the current block number to create the same schedule of the cores that `enter` will create. + 1. Read the new `>::scheduled()` into `schedule`. + 1. From the `with_transaction` closure return `concluded_invalid_disputes`, `bitfields`, and `scheduled`. + 1. Invoke `sanitize_backed_candidates` using the `scheduled` return from the `with_transaction` and pass the closure `|candidate_hash: CandidateHash| -> bool { DisputesHandler::concluded_invalid(current_session, candidate_hash) }` for the param `candidate_has_concluded_invalid_dispute`. + 1. create a `rng` from `rand_chacha::ChaChaRng::from_seed(compute_entropy::(parent_hash))`. + 1. Invoke `limit_disputes` with the max block weight and `rng`, storing the returned weigh in `remaining_weight`. + 1. Fill up the remaining of the block weight with backed candidates and bitfields by invoking `apply_weight_limit` with `remaining_weigh` and `rng`. + 1. Return `Some(ParachainsInherentData { bitfields, backed_candidates, disputes, parent_header }`. diff --git a/roadmap/implementers-guide/src/types/runtime.md b/roadmap/implementers-guide/src/types/runtime.md index 5749aeca866a..b8eef8f11204 100644 --- a/roadmap/implementers-guide/src/types/runtime.md +++ b/roadmap/implementers-guide/src/types/runtime.md @@ -116,15 +116,17 @@ struct HostConfiguration { Inherent data passed to a runtime entry-point for the advancement of parachain consensus. -This contains 3 pieces of data: +This contains 4 pieces of data: 1. [`Bitfields`](availability.md#signed-availability-bitfield) 2. [`BackedCandidates`](backing.md#backed-candidate) 3. [`MultiDisputeStatementSet`](disputes.md#multidisputestatementset) +4. `Header` ```rust struct ParaInherentData { bitfields: Bitfields, backed_candidates: BackedCandidates, dispute_statements: MultiDisputeStatementSet, + parent_header: Header } ``` diff --git a/runtime/kusama/Cargo.toml b/runtime/kusama/Cargo.toml index cf1936303d1d..c123548ec40f 100644 --- a/runtime/kusama/Cargo.toml +++ b/runtime/kusama/Cargo.toml @@ -214,6 +214,7 @@ runtime-benchmarks = [ "xcm-builder/runtime-benchmarks", "frame-election-provider-support/runtime-benchmarks", "pallet-bags-list/runtime-benchmarks", + "runtime-parachains/runtime-benchmarks", ] try-runtime = [ "frame-executive/try-runtime", diff --git a/runtime/kusama/src/lib.rs b/runtime/kusama/src/lib.rs index d924bd603064..29ab69fa185d 100644 --- a/runtime/kusama/src/lib.rs +++ b/runtime/kusama/src/lib.rs @@ -1201,7 +1201,9 @@ impl parachains_hrmp::Config for Runtime { type Currency = Balances; } -impl parachains_paras_inherent::Config for Runtime {} +impl parachains_paras_inherent::Config for Runtime { + type WeightInfo = weights::runtime_parachains_paras_inherent::WeightInfo; +} impl parachains_scheduler::Config for Runtime {} @@ -1933,6 +1935,7 @@ sp_api::impl_runtime_apis! { list_benchmark!(list, extra, runtime_common::paras_registrar, Registrar); list_benchmark!(list, extra, runtime_parachains::configuration, Configuration); list_benchmark!(list, extra, runtime_parachains::initializer, Initializer); + list_benchmark!(list, extra, runtime_parachains::paras_inherent, ParaInherent); list_benchmark!(list, extra, runtime_parachains::paras, Paras); // Substrate list_benchmark!(list, extra, pallet_bags_list, BagsList); @@ -2010,6 +2013,7 @@ sp_api::impl_runtime_apis! { add_benchmark!(params, batches, runtime_common::paras_registrar, Registrar); add_benchmark!(params, batches, runtime_parachains::configuration, Configuration); add_benchmark!(params, batches, runtime_parachains::initializer, Initializer); + add_benchmark!(params, batches, runtime_parachains::paras_inherent, ParaInherent); add_benchmark!(params, batches, runtime_parachains::paras, Paras); // Substrate add_benchmark!(params, batches, pallet_balances, Balances); diff --git a/runtime/kusama/src/weights/mod.rs b/runtime/kusama/src/weights/mod.rs index ea8c2fc7f58d..dcfd8ac5031b 100644 --- a/runtime/kusama/src/weights/mod.rs +++ b/runtime/kusama/src/weights/mod.rs @@ -47,3 +47,4 @@ pub mod runtime_common_slots; pub mod runtime_parachains_configuration; pub mod runtime_parachains_initializer; pub mod runtime_parachains_paras; +pub mod runtime_parachains_paras_inherent; diff --git a/runtime/kusama/src/weights/runtime_parachains_paras_inherent.rs b/runtime/kusama/src/weights/runtime_parachains_paras_inherent.rs new file mode 100644 index 000000000000..e43221b98995 --- /dev/null +++ b/runtime/kusama/src/weights/runtime_parachains_paras_inherent.rs @@ -0,0 +1,178 @@ +// Copyright 2017-2021 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . +//! Autogenerated weights for `runtime_parachains::paras_inherent` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2021-11-14, STEPS: `50`, REPEAT: 3, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("kusama-dev"), DB CACHE: 128 + +// Executed Command: +// target/release/polkadot +// benchmark +// --chain=kusama-dev +// --steps=50 +// --repeat=3 +// --pallet=runtime_parachains::paras_inherent +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --heap-pages=4096 +// --header=./file_header.txt +// --output=./runtime/kusama/src/weights/runtime_parachains_paras_inherent.rs + + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] + +use frame_support::{traits::Get, weights::Weight}; +use sp_std::marker::PhantomData; + +/// Weight functions for `runtime_parachains::paras_inherent`. +pub struct WeightInfo(PhantomData); +impl runtime_parachains::paras_inherent::WeightInfo for WeightInfo { + // Storage: ParaInherent Included (r:1 w:1) + // Storage: System ParentHash (r:1 w:0) + // Storage: ParaScheduler AvailabilityCores (r:1 w:1) + // Storage: ParasShared CurrentSessionIndex (r:1 w:0) + // Storage: ParaInclusion PendingAvailability (r:2 w:1) + // Storage: ParasShared ActiveValidatorKeys (r:1 w:0) + // Storage: Paras Parachains (r:1 w:0) + // Storage: ParaInclusion PendingAvailabilityCommitments (r:1 w:1) + // Storage: Configuration ActiveConfig (r:1 w:0) + // Storage: Session Validators (r:1 w:0) + // Storage: ParasShared ActiveValidatorIndices (r:1 w:0) + // Storage: Staking ActiveEra (r:1 w:0) + // Storage: Staking ErasRewardPoints (r:1 w:1) + // Storage: Dmp DownwardMessageQueues (r:1 w:1) + // Storage: Hrmp HrmpChannelDigests (r:1 w:1) + // Storage: Paras FutureCodeUpgrades (r:1 w:0) + // Storage: ParaScheduler SessionStartBlock (r:1 w:0) + // Storage: ParaScheduler ParathreadQueue (r:1 w:1) + // Storage: ParaScheduler Scheduled (r:1 w:1) + // Storage: ParaScheduler ValidatorGroups (r:1 w:0) + // Storage: Ump NeedsDispatch (r:1 w:1) + // Storage: Ump NextDispatchRoundStartWith (r:1 w:1) + // Storage: ParaInherent OnChainVotes (r:0 w:1) + // Storage: Hrmp HrmpWatermarks (r:0 w:1) + // Storage: Paras Heads (r:0 w:1) + fn enter_variable_disputes(v: u32, ) -> Weight { + (316_331_000 as Weight) + // Standard Error: 112_000 + .saturating_add((325_000 as Weight).saturating_mul(v as Weight)) + .saturating_add(T::DbWeight::get().reads(23 as Weight)) + .saturating_add(T::DbWeight::get().writes(14 as Weight)) + } + // Storage: ParaInherent Included (r:1 w:1) + // Storage: System ParentHash (r:1 w:0) + // Storage: ParaScheduler AvailabilityCores (r:1 w:1) + // Storage: ParasShared CurrentSessionIndex (r:1 w:0) + // Storage: ParasShared ActiveValidatorKeys (r:1 w:0) + // Storage: Paras Parachains (r:1 w:0) + // Storage: ParaInclusion PendingAvailability (r:2 w:1) + // Storage: ParaInclusion PendingAvailabilityCommitments (r:1 w:1) + // Storage: Configuration ActiveConfig (r:1 w:0) + // Storage: Session Validators (r:1 w:0) + // Storage: ParasShared ActiveValidatorIndices (r:1 w:0) + // Storage: Staking ActiveEra (r:1 w:0) + // Storage: Staking ErasRewardPoints (r:1 w:1) + // Storage: Dmp DownwardMessageQueues (r:1 w:1) + // Storage: Hrmp HrmpChannelDigests (r:1 w:1) + // Storage: Paras FutureCodeUpgrades (r:1 w:0) + // Storage: ParaScheduler SessionStartBlock (r:1 w:0) + // Storage: ParaScheduler ParathreadQueue (r:1 w:1) + // Storage: ParaScheduler Scheduled (r:1 w:1) + // Storage: ParaScheduler ValidatorGroups (r:1 w:0) + // Storage: Ump NeedsDispatch (r:1 w:1) + // Storage: Ump NextDispatchRoundStartWith (r:1 w:1) + // Storage: ParaInclusion AvailabilityBitfields (r:0 w:1) + // Storage: ParaInherent OnChainVotes (r:0 w:1) + // Storage: Hrmp HrmpWatermarks (r:0 w:1) + // Storage: Paras Heads (r:0 w:1) + fn enter_bitfields() -> Weight { + (352_749_000 as Weight) + .saturating_add(T::DbWeight::get().reads(23 as Weight)) + .saturating_add(T::DbWeight::get().writes(15 as Weight)) + } + // Storage: ParaInherent Included (r:1 w:1) + // Storage: System ParentHash (r:1 w:0) + // Storage: ParaScheduler AvailabilityCores (r:1 w:1) + // Storage: ParasShared CurrentSessionIndex (r:1 w:0) + // Storage: ParasShared ActiveValidatorKeys (r:1 w:0) + // Storage: Paras Parachains (r:1 w:0) + // Storage: ParaInclusion PendingAvailability (r:2 w:1) + // Storage: ParaInclusion PendingAvailabilityCommitments (r:1 w:1) + // Storage: Configuration ActiveConfig (r:1 w:0) + // Storage: Session Validators (r:1 w:0) + // Storage: ParasShared ActiveValidatorIndices (r:1 w:0) + // Storage: Staking ActiveEra (r:1 w:0) + // Storage: Staking ErasRewardPoints (r:1 w:1) + // Storage: Dmp DownwardMessageQueues (r:1 w:1) + // Storage: Hrmp HrmpChannelDigests (r:1 w:1) + // Storage: Paras FutureCodeUpgrades (r:1 w:0) + // Storage: ParaScheduler SessionStartBlock (r:1 w:0) + // Storage: ParaScheduler ParathreadQueue (r:1 w:1) + // Storage: ParaScheduler Scheduled (r:1 w:1) + // Storage: ParaScheduler ValidatorGroups (r:1 w:0) + // Storage: Paras PastCodeMeta (r:1 w:0) + // Storage: Paras CurrentCodeHash (r:1 w:0) + // Storage: Ump RelayDispatchQueueSize (r:1 w:0) + // Storage: Ump NeedsDispatch (r:1 w:1) + // Storage: Ump NextDispatchRoundStartWith (r:1 w:1) + // Storage: ParaInherent OnChainVotes (r:0 w:1) + // Storage: Hrmp HrmpWatermarks (r:0 w:1) + // Storage: Paras Heads (r:0 w:1) + fn enter_backed_candidates_variable(v: u32, ) -> Weight { + (88_047_000 as Weight) + // Standard Error: 3_275_000 + .saturating_add((68_499_000 as Weight).saturating_mul(v as Weight)) + .saturating_add(T::DbWeight::get().reads(26 as Weight)) + .saturating_add(T::DbWeight::get().writes(14 as Weight)) + } + // Storage: ParaInherent Included (r:1 w:1) + // Storage: System ParentHash (r:1 w:0) + // Storage: ParaScheduler AvailabilityCores (r:1 w:1) + // Storage: ParasShared CurrentSessionIndex (r:1 w:0) + // Storage: ParasShared ActiveValidatorKeys (r:1 w:0) + // Storage: Paras Parachains (r:1 w:0) + // Storage: ParaInclusion PendingAvailability (r:2 w:1) + // Storage: ParaInclusion PendingAvailabilityCommitments (r:1 w:1) + // Storage: Configuration ActiveConfig (r:1 w:0) + // Storage: Session Validators (r:1 w:0) + // Storage: ParasShared ActiveValidatorIndices (r:1 w:0) + // Storage: Staking ActiveEra (r:1 w:0) + // Storage: Staking ErasRewardPoints (r:1 w:1) + // Storage: Dmp DownwardMessageQueues (r:1 w:1) + // Storage: Hrmp HrmpChannelDigests (r:1 w:1) + // Storage: Paras FutureCodeUpgrades (r:1 w:0) + // Storage: ParaScheduler SessionStartBlock (r:1 w:0) + // Storage: ParaScheduler ParathreadQueue (r:1 w:1) + // Storage: ParaScheduler Scheduled (r:1 w:1) + // Storage: ParaScheduler ValidatorGroups (r:1 w:0) + // Storage: Paras PastCodeMeta (r:1 w:0) + // Storage: Paras CurrentCodeHash (r:1 w:0) + // Storage: Ump RelayDispatchQueueSize (r:1 w:0) + // Storage: Ump NeedsDispatch (r:1 w:1) + // Storage: Ump NextDispatchRoundStartWith (r:1 w:1) + // Storage: ParaInherent OnChainVotes (r:0 w:1) + // Storage: Hrmp HrmpWatermarks (r:0 w:1) + // Storage: Paras Heads (r:0 w:1) + fn enter_backed_candidate_code_upgrade() -> Weight { + (53_728_168_000 as Weight) + .saturating_add(T::DbWeight::get().reads(26 as Weight)) + .saturating_add(T::DbWeight::get().writes(14 as Weight)) + } +} diff --git a/runtime/parachains/Cargo.toml b/runtime/parachains/Cargo.toml index da5e695b71fc..705141192093 100644 --- a/runtime/parachains/Cargo.toml +++ b/runtime/parachains/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "polkadot-runtime-parachains" -version = "0.9.12" +version = "0.9.13" authors = ["Parity Technologies "] edition = "2018" @@ -27,6 +27,7 @@ sp-keystore = { git = "https://github.com/paritytech/substrate", branch = "maste pallet-authority-discovery = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } pallet-authorship = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } pallet-balances = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-babe = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } pallet-session = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } pallet-staking = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } pallet-timestamp = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } @@ -44,7 +45,7 @@ rand_chacha = { version = "0.3.1", default-features = false } [dev-dependencies] futures = "0.3.17" -hex-literal = "0.3.3" +hex-literal = "0.3.4" keyring = { package = "sp-keyring", git = "https://github.com/paritytech/substrate", branch = "master" } frame-support-test = { git = "https://github.com/paritytech/substrate", branch = "master" } sc-keystore = { git = "https://github.com/paritytech/substrate", branch = "master" } @@ -94,4 +95,4 @@ try-runtime = [ "pallet-staking/try-runtime", "pallet-timestamp/try-runtime", "pallet-vesting/try-runtime", -] +] \ No newline at end of file diff --git a/runtime/parachains/src/builder.rs b/runtime/parachains/src/builder.rs new file mode 100644 index 000000000000..362d97ecc259 --- /dev/null +++ b/runtime/parachains/src/builder.rs @@ -0,0 +1,590 @@ +use crate::{ + configuration, inclusion, initializer, paras, + paras_inherent::{self}, + scheduler, session_info, shared, +}; +use bitvec::{order::Lsb0 as BitOrderLsb0, vec::BitVec}; +use frame_support::pallet_prelude::*; +use primitives::v1::{ + collator_signature_payload, AvailabilityBitfield, BackedCandidate, CandidateCommitments, + CandidateDescriptor, CandidateHash, CollatorId, CommittedCandidateReceipt, CompactStatement, + CoreIndex, CoreOccupied, DisputeStatement, DisputeStatementSet, GroupIndex, HeadData, + Id as ParaId, InherentData as ParachainsInherentData, InvalidDisputeStatementKind, + PersistedValidationData, SessionIndex, SigningContext, UncheckedSigned, + ValidDisputeStatementKind, ValidationCode, ValidatorId, ValidatorIndex, ValidityAttestation, +}; +use sp_core::H256; +use sp_runtime::{ + generic::Digest, + traits::{Header as HeaderT, One, Zero}, + RuntimeAppPublic, +}; +use sp_std::{collections::btree_map::BTreeMap, convert::TryInto, prelude::Vec, vec}; + +/// Grab an account, seeded by a name and index. +/// +/// This is directly from frame-benchmarking. Copy/pasted so we can use it when not compiling with +/// "features = runtime-benchmarks". +fn account(name: &'static str, index: u32, seed: u32) -> AccountId { + let entropy = (name, index, seed).using_encoded(sp_io::hashing::blake2_256); + AccountId::decode(&mut &entropy[..]).unwrap_or_default() +} + +/// Create a 32 byte slice based on the given number. +fn byte32_slice_from(n: u32) -> [u8; 32] { + let mut slice = [0u8; 32]; + slice[31] = (n % (1 << 8)) as u8; + slice[30] = ((n >> 8) % (1 << 8)) as u8; + slice[29] = ((n >> 16) % (1 << 8)) as u8; + slice[28] = ((n >> 24) % (1 << 8)) as u8; + + slice +} + +/// Paras inherent `enter` benchmark scenario builder. +pub(crate) struct BenchBuilder { + validators: Option>, + block_number: T::BlockNumber, + session: SessionIndex, + target_session: u32, + max_validators_per_core: Option, + max_validators: Option, + dispute_statements: BTreeMap, + _phantom: sp_std::marker::PhantomData, +} + +/// Paras inherent `enter` benchmark scenario. +#[allow(dead_code)] +pub(crate) struct Bench { + pub(crate) data: ParachainsInherentData, + pub(crate) _session: u32, + pub(crate) _block_number: T::BlockNumber, +} + +impl BenchBuilder { + /// Create a new `BenchBuilder` with some opinionated values that should work with the rest + /// of the functions in this implementation. + pub(crate) fn new() -> Self { + BenchBuilder { + // Validators should be declared prior to all other setup. + validators: None, + // Starting block number; we expect it to get incremented on session setup. + block_number: Zero::zero(), + // Starting session; we expect it to get incremented on session setup. + session: SessionIndex::from(0u32), + // Session we want the scenario to take place in. We roll to to this session. + target_session: 2u32, + // Optionally set the max validators per core; otherwise uses the configuration value. + max_validators_per_core: None, + // Optionally set the max validators; otherwise uses the configuration value. + max_validators: None, + // Optionally set the number of dispute statements for each candidate, + dispute_statements: BTreeMap::new(), + _phantom: sp_std::marker::PhantomData::, + } + } + + /// Mock header. + pub(crate) fn header(block_number: T::BlockNumber) -> T::Header { + T::Header::new( + block_number, // block_number, + Default::default(), // extrinsics_root, + Default::default(), // storage_root, + Default::default(), // parent_hash, + Default::default(), // digest, + ) + } + + /// Number of the relay parent block. + fn relay_parent_number(&self) -> u32 { + (self.block_number - One::one()) + .try_into() + .map_err(|_| ()) + .expect("self.block_number is u32") + } + + /// Maximum number of validators that may be part of a validator group. + pub(crate) fn fallback_max_validators() -> u32 { + configuration::Pallet::::config().max_validators.unwrap_or(200) + } + + /// Maximum number of validators participating in parachains consensus (a.k.a. active validators). + fn max_validators(&self) -> u32 { + self.max_validators.unwrap_or(Self::fallback_max_validators()) + } + + #[allow(dead_code)] + pub(crate) fn set_max_validators(mut self, n: u32) -> Self { + self.max_validators = Some(n); + self + } + + /// Maximum number of validators per core (a.k.a. max validators per group). This value is used if none is + /// explicitly set on the builder. + pub(crate) fn fallback_max_validators_per_core() -> u32 { + configuration::Pallet::::config().max_validators_per_core.unwrap_or(5) + } + + /// Specify a mapping of core index, para id, group index seed to the number of dispute statements for the + /// corresponding dispute statement set. Note that if the number of disputes is not specified it fallbacks + /// to having a dispute per every validator. Additionally, an entry is not guaranteed to have a dispute - it + /// must line up with the cores marked as disputed as defined in `Self::Build`. + #[allow(dead_code)] + pub(crate) fn set_dispute_statements(mut self, m: BTreeMap) -> Self { + self.dispute_statements = m; + self + } + + fn max_validators_per_core(&self) -> u32 { + self.max_validators_per_core.unwrap_or(Self::fallback_max_validators_per_core()) + } + + /// Set maximum number of validators per core. + #[allow(dead_code)] + pub(crate) fn set_max_validators_per_core(mut self, n: u32) -> Self { + self.max_validators_per_core = Some(n); + self + } + + /// Maximum number of cores we expect from this configuration. + pub(crate) fn max_cores(&self) -> u32 { + self.max_validators() / self.max_validators_per_core() + } + + /// Minimum number of validity votes in order for a backed candidate to be included. + #[allow(dead_code)] + pub(crate) fn fallback_min_validity_votes() -> u32 { + (Self::fallback_max_validators() / 2) + 1 + } + + fn create_indexes(&self, seed: u32) -> (ParaId, CoreIndex, GroupIndex) { + let para_id = ParaId::from(seed); + let core_idx = CoreIndex(seed); + let group_idx = + scheduler::Pallet::::group_assigned_to_core(core_idx, self.block_number).unwrap(); + + (para_id, core_idx, group_idx) + } + + fn candidate_availability_mock( + group_idx: GroupIndex, + core_idx: CoreIndex, + candidate_hash: CandidateHash, + availability_votes: BitVec, + ) -> inclusion::CandidatePendingAvailability { + inclusion::CandidatePendingAvailability::::new( + core_idx, // core + candidate_hash, // hash + Default::default(), // candidate descriptor + availability_votes, // availability votes + Default::default(), // backers + Zero::zero(), // relay parent + One::one(), // relay chain block this was backed in + group_idx, // backing group + ) + } + + fn add_availability( + para_id: ParaId, + core_idx: CoreIndex, + group_idx: GroupIndex, + availability_votes: BitVec, + candidate_hash: CandidateHash, + ) { + let candidate_availability = Self::candidate_availability_mock( + group_idx, + core_idx, + candidate_hash, + availability_votes, + ); + // NOTE: commitments does not include any data that would lead to heavy code + // paths in `enact_candidate`. But enact_candidates does return a weight which will get + // taken into account. + let commitments = CandidateCommitments::::default(); + inclusion::PendingAvailability::::insert(para_id, candidate_availability); + inclusion::PendingAvailabilityCommitments::::insert(¶_id, commitments); + } + + fn availability_bitvec(concluding: &BTreeMap, cores: u32) -> AvailabilityBitfield { + let mut bitfields = bitvec::bitvec![bitvec::order::Lsb0, u8; 0; 0]; + for i in 0..cores { + if concluding.get(&(i as u32)).is_some() { + bitfields.push(true); + } else { + bitfields.push(false) + } + } + + bitfields.into() + } + + fn run_to_block(to: u32) { + let to = to.into(); + while frame_system::Pallet::::block_number() < to { + let b = frame_system::Pallet::::block_number(); + initializer::Pallet::::on_finalize(b); + + let b = b + One::one(); + frame_system::Pallet::::set_block_number(b); + initializer::Pallet::::on_initialize(b); + } + } + + fn setup_para_ids(cores: u32) { + // make sure parachains exist prior to session change. + for i in 0..cores { + let para_id = ParaId::from(i as u32); + + paras::Pallet::::schedule_para_initialize( + para_id, + paras::ParaGenesisArgs { + genesis_head: Default::default(), + validation_code: Default::default(), + parachain: true, + }, + ) + .unwrap(); + } + } + + /// Generate validator key pairs and account ids. + fn generate_validator_pairs(validator_count: u32) -> Vec<(T::AccountId, ValidatorId)> { + (0..validator_count) + .map(|i| { + let public = ValidatorId::generate_pair(None); + + // The account Id is not actually used anywhere, just necessary to fulfill the + // expected type of the `validators` param of `test_trigger_on_new_session`. + let account: T::AccountId = account("validator", i, i); + (account, public) + }) + .collect() + } + + fn signing_context(&self) -> SigningContext { + SigningContext { + parent_hash: Self::header(self.block_number.clone()).hash(), + session_index: self.session.clone(), + } + } + + fn validator_availability_votes_yes(validators: usize) -> BitVec { + // every validator confirms availability. + bitvec::bitvec![bitvec::order::Lsb0, u8; 1; validators as usize] + } + + /// Setup session 1 and create `self.validators_map` and `self.validators`. + fn setup_session( + mut self, + target_session: SessionIndex, + validators: Vec<(T::AccountId, ValidatorId)>, + total_cores: u32, + ) -> Self { + let mut block = 1; + for session in 0..=target_session { + initializer::Pallet::::test_trigger_on_new_session( + false, + session, + validators.iter().map(|(a, v)| (a, v.clone())), + None, + ); + block += 1; + Self::run_to_block(block); + } + + let block_number = ::BlockNumber::from(block); + let header = Self::header(block_number.clone()); + + frame_system::Pallet::::initialize( + &header.number(), + &header.hash(), + &Digest { logs: Vec::new() }, + Default::default(), + ); + + assert_eq!(scheduler::ValidatorGroups::::get().len(), total_cores as usize); + assert_eq!(>::session_index(), target_session); + + // We need to refetch validators since they have been shuffled. + let validators_shuffled: Vec<_> = session_info::Pallet::::session_info(target_session) + .unwrap() + .validators + .clone(); + + self.validators = Some(validators_shuffled); + self.block_number = block_number; + self.session = target_session; + assert_eq!(paras::Pallet::::parachains().len(), total_cores as usize); + + self + } + + fn create_availability_bitfields( + &self, + concluding_cores: &BTreeMap, + total_cores: u32, + ) -> Vec> { + let validators = + self.validators.as_ref().expect("must have some validators prior to calling"); + + let availability_bitvec = Self::availability_bitvec(concluding_cores, total_cores); + + let bitfields: Vec> = validators + .iter() + .enumerate() + .map(|(i, public)| { + let unchecked_signed = UncheckedSigned::::benchmark_sign( + public, + availability_bitvec.clone(), + &self.signing_context(), + ValidatorIndex(i as u32), + ); + + unchecked_signed + }) + .collect(); + + for (seed, _) in concluding_cores.iter() { + // make sure the candidates that are concluding by becoming available are marked as + // pending availability. + let (para_id, core_idx, group_idx) = self.create_indexes(seed.clone()); + Self::add_availability( + para_id, + core_idx, + group_idx, + Self::validator_availability_votes_yes(validators.len()), + CandidateHash(H256::from(byte32_slice_from(seed.clone()))), + ); + } + + bitfields + } + + /// Create backed candidates for `cores_with_backed_candidates`. You need these cores to be + /// scheduled _within_ paras inherent, which requires marking the available bitfields as fully + /// available. + /// - `cores_with_backed_candidates` Mapping of `para_id`/`core_idx`/`group_idx` seed to number of + /// validity votes. + fn create_backed_candidates( + &self, + cores_with_backed_candidates: &BTreeMap, + includes_code_upgrade: Option, + ) -> Vec> { + let validators = + self.validators.as_ref().expect("must have some validators prior to calling"); + let config = configuration::Pallet::::config(); + + cores_with_backed_candidates + .iter() + .map(|(seed, num_votes)| { + assert!(*num_votes <= validators.len() as u32); + let (para_id, _core_idx, group_idx) = self.create_indexes(seed.clone()); + + // This generates a pair and adds it to the keystore, returning just the public. + let collator_public = CollatorId::generate_pair(None); + let header = Self::header(self.block_number.clone()); + let relay_parent = header.hash(); + let head_data: HeadData = Default::default(); + let persisted_validation_data_hash = PersistedValidationData:: { + parent_head: head_data.clone(), + relay_parent_number: self.relay_parent_number(), + relay_parent_storage_root: Default::default(), + max_pov_size: config.max_pov_size, + } + .hash(); + + let pov_hash = Default::default(); + // NOTE: we use the default `ValidationCode` when setting it in `setup_para_ids`, + // so using the default again here makes sure things line up. + let validation_code_hash = ValidationCode::default().hash(); + let payload = collator_signature_payload( + &relay_parent, + ¶_id, + &persisted_validation_data_hash, + &pov_hash, + &validation_code_hash, + ); + let signature = collator_public.sign(&payload).unwrap(); + + // Set the head data so it can be used while validating the signatures on the + // candidate receipt. + paras::Pallet::::heads_insert(¶_id, head_data.clone()); + + let mut past_code_meta = paras::ParaPastCodeMeta::::default(); + past_code_meta.note_replacement(0u32.into(), 0u32.into()); + + let group_validators = scheduler::Pallet::::group_validators(group_idx).unwrap(); + + let candidate = CommittedCandidateReceipt:: { + descriptor: CandidateDescriptor:: { + para_id, + relay_parent, + collator: collator_public, + persisted_validation_data_hash, + pov_hash, + erasure_root: Default::default(), + signature, + para_head: head_data.hash(), + validation_code_hash, + }, + commitments: CandidateCommitments:: { + upward_messages: Vec::new(), + horizontal_messages: Vec::new(), + new_validation_code: includes_code_upgrade + .map(|v| ValidationCode(vec![0u8; v as usize])), + head_data, + processed_downward_messages: 0, + hrmp_watermark: self.relay_parent_number(), + }, + }; + + let candidate_hash = candidate.hash(); + + let validity_votes: Vec<_> = group_validators + .iter() + .take(*num_votes as usize) + .map(|val_idx| { + let public = validators.get(val_idx.0 as usize).unwrap(); + let sig = UncheckedSigned::::benchmark_sign( + public, + CompactStatement::Valid(candidate_hash.clone()), + &self.signing_context(), + *val_idx, + ) + .benchmark_signature(); + + ValidityAttestation::Explicit(sig.clone()) + }) + .collect(); + + BackedCandidate:: { + candidate, + validity_votes, + validator_indices: bitvec::bitvec![bitvec::order::Lsb0, u8; 1; group_validators.len()], + } + }) + .collect() + } + + fn create_disputes_with_no_spam( + &self, + start: u32, + last: u32, + dispute_sessions: &[u32], + ) -> Vec { + let validators = + self.validators.as_ref().expect("must have some validators prior to calling"); + + (start..last) + .map(|seed| { + let session = + dispute_sessions.get(seed as usize).cloned().unwrap_or(self.target_session); + + let (para_id, core_idx, group_idx) = self.create_indexes(seed); + let candidate_hash = CandidateHash(H256::from(byte32_slice_from(seed))); + + Self::add_availability( + para_id, + core_idx, + group_idx, + Self::validator_availability_votes_yes(validators.len()), + candidate_hash, + ); + + let statements_len = + self.dispute_statements.get(&seed).cloned().unwrap_or(validators.len() as u32); + let statements = (0..statements_len) + .map(|validator_index| { + let validator_public = &validators.get(validator_index as usize).unwrap(); + + // We need dispute statements on each side. And we don't want a revert log + // so we make sure that we have a super majority with valid statements. + let dispute_statement = if validator_index % 4 == 0 { + DisputeStatement::Invalid(InvalidDisputeStatementKind::Explicit) + } else { + // Note that in the future we could use some availability votes as an + // implicit valid kind. + DisputeStatement::Valid(ValidDisputeStatementKind::Explicit) + }; + let data = dispute_statement.payload_data(candidate_hash.clone(), session); + let statement_sig = validator_public.sign(&data).unwrap(); + + (dispute_statement, ValidatorIndex(validator_index), statement_sig) + }) + .collect(); + + DisputeStatementSet { candidate_hash: candidate_hash.clone(), session, statements } + }) + .collect() + } + + /// Build a scenario for testing or benchmarks. + /// + /// - `backed_and_concluding_cores`: Map from core/para id/group index seed to number of + /// validity votes. + /// - `dispute_sessions`: Session index of for each dispute. Index of slice corresponds to core. + /// The length of this must equal total cores used. Seed index for disputes starts at + /// `backed_and_concluding_cores.len()`, so `dispute_sessions` needs to be left padded by + /// `backed_and_concluding_cores.len()` values which effectively get ignored. + /// TODO we should fix this. + pub(crate) fn build( + self, + backed_and_concluding_cores: BTreeMap, + dispute_sessions: &[u32], + includes_code_upgrade: Option, + ) -> Bench { + // Make sure relevant storage is cleared. This is just to get the asserts to work when + // running tests because it seems the storage is not cleared in between. + inclusion::PendingAvailabilityCommitments::::remove_all(None); + inclusion::PendingAvailability::::remove_all(None); + + // We don't allow a core to have both disputes and be marked fully available at this block. + let cores = self.max_cores(); + let used_cores = dispute_sessions.len() as u32; + assert!(used_cores <= cores); + + // NOTE: there is an n+2 session delay for these actions to take effect + // We are currently in Session 0, so these changes will take effect in Session 2 + Self::setup_para_ids(used_cores); + + let validator_ids = Self::generate_validator_pairs(self.max_validators()); + let target_session = SessionIndex::from(self.target_session); + let builder = self.setup_session(target_session, validator_ids, used_cores); + + let bitfields = + builder.create_availability_bitfields(&backed_and_concluding_cores, used_cores); + let backed_candidates = + builder.create_backed_candidates(&backed_and_concluding_cores, includes_code_upgrade); + + let disputes = builder.create_disputes_with_no_spam( + backed_and_concluding_cores.len() as u32, + used_cores, + dispute_sessions, + ); + + assert_eq!( + inclusion::PendingAvailabilityCommitments::::iter().count(), + used_cores as usize, + ); + assert_eq!(inclusion::PendingAvailability::::iter().count(), used_cores as usize,); + + // Mark all the use cores as occupied. We expect that their are `backed_and_concluding_cores` + // that are pending availability and that there are `used_cores - backed_and_concluding_cores ` + // which are about to be disputed. + scheduler::AvailabilityCores::::set(vec![ + Some(CoreOccupied::Parachain); + used_cores as usize + ]); + + Bench:: { + data: ParachainsInherentData { + bitfields, + backed_candidates, + disputes, + parent_header: Self::header(builder.block_number.clone()), + }, + _session: target_session, + _block_number: builder.block_number, + } + } +} diff --git a/runtime/parachains/src/disputes.rs b/runtime/parachains/src/disputes.rs index 7dde5f2b515a..1f49f0f4d25d 100644 --- a/runtime/parachains/src/disputes.rs +++ b/runtime/parachains/src/disputes.rs @@ -129,6 +129,10 @@ pub trait DisputesHandler { included_in: BlockNumber, ); + /// Retrieve the included state of a given candidate in a particular session. If it + /// returns `Some`, then we have a local dispute for the given `candidate_hash`. + fn included_state(session: SessionIndex, candidate_hash: CandidateHash) -> Option; + /// Whether the given candidate concluded invalid in a dispute with supermajority. fn concluded_invalid(session: SessionIndex, candidate_hash: CandidateHash) -> bool; @@ -164,6 +168,13 @@ impl DisputesHandler for () { ) { } + fn included_state( + _session: SessionIndex, + _candidate_hash: CandidateHash, + ) -> Option { + None + } + fn concluded_invalid(_session: SessionIndex, _candidate_hash: CandidateHash) -> bool { false } @@ -200,6 +211,13 @@ impl DisputesHandler for pallet::Pallet { pallet::Pallet::::note_included(session, candidate_hash, included_in) } + fn included_state( + session: SessionIndex, + candidate_hash: CandidateHash, + ) -> Option { + pallet::Pallet::::included_state(session, candidate_hash) + } + fn concluded_invalid(session: SessionIndex, candidate_hash: CandidateHash) -> bool { pallet::Pallet::::concluded_invalid(session, candidate_hash) } @@ -738,6 +756,7 @@ impl Pallet { Ok(fresh) } + /// Removes all duplicate disputes. fn filter_multi_dispute_data(statement_sets: &mut MultiDisputeStatementSet) { frame_support::storage::with_transaction(|| { let config = >::config(); @@ -1116,6 +1135,13 @@ impl Pallet { } } + pub(crate) fn included_state( + session: SessionIndex, + candidate_hash: CandidateHash, + ) -> Option { + >::get(session, candidate_hash) + } + pub(crate) fn concluded_invalid(session: SessionIndex, candidate_hash: CandidateHash) -> bool { >::get(&session, &candidate_hash).map_or(false, |dispute| { // A dispute that has concluded with supermajority-against. diff --git a/runtime/parachains/src/inclusion.rs b/runtime/parachains/src/inclusion.rs index dd865bc8572b..e1a5e0efc258 100644 --- a/runtime/parachains/src/inclusion.rs +++ b/runtime/parachains/src/inclusion.rs @@ -20,23 +20,27 @@ //! It is responsible for carrying candidates from being backable to being backed, and then from backed //! to included. +use crate::{ + configuration, disputes, dmp, hrmp, paras, + paras_inherent::{sanitize_bitfields, DisputedBitfield, VERIFY_SIGS}, + scheduler::CoreAssignment, + shared, ump, +}; use bitvec::{order::Lsb0 as BitOrderLsb0, vec::BitVec}; use frame_support::pallet_prelude::*; use parity_scale_codec::{Decode, Encode}; use primitives::v1::{ AvailabilityBitfield, BackedCandidate, CandidateCommitments, CandidateDescriptor, CandidateHash, CandidateReceipt, CommittedCandidateReceipt, CoreIndex, GroupIndex, Hash, - HeadData, Id as ParaId, SigningContext, UncheckedSignedAvailabilityBitfields, ValidatorIndex, - ValidityAttestation, + HeadData, Id as ParaId, SigningContext, UncheckedSignedAvailabilityBitfields, ValidatorId, + ValidatorIndex, ValidityAttestation, }; use scale_info::TypeInfo; use sp_runtime::{ traits::{One, Saturating}, DispatchError, }; -use sp_std::prelude::*; - -use crate::{configuration, disputes, dmp, hrmp, paras, scheduler::CoreAssignment, shared, ump}; +use sp_std::{collections::btree_set::BTreeSet, prelude::*}; pub use pallet::*; @@ -54,7 +58,7 @@ pub struct AvailabilityBitfieldRecord { /// A backed candidate pending availability. #[derive(Encode, Decode, PartialEq, TypeInfo)] -#[cfg_attr(test, derive(Debug))] +#[cfg_attr(test, derive(Debug, Default))] pub struct CandidatePendingAvailability { /// The availability core this is assigned to. core: CoreIndex, @@ -99,6 +103,29 @@ impl CandidatePendingAvailability { pub(crate) fn candidate_descriptor(&self) -> &CandidateDescriptor { &self.descriptor } + + #[cfg(any(feature = "runtime-benchmarks", test))] + pub(crate) fn new( + core: CoreIndex, + hash: CandidateHash, + descriptor: CandidateDescriptor, + availability_votes: BitVec, + backers: BitVec, + relay_parent_number: N, + backed_in_number: N, + backing_group: GroupIndex, + ) -> Self { + Self { + core, + hash, + descriptor, + availability_votes, + backers, + relay_parent_number, + backed_in_number, + backing_group, + } + } } /// A hook for applying validator rewards @@ -212,6 +239,10 @@ pub mod pallet { /// The `para_head` hash in the candidate descriptor doesn't match the hash of the actual para head in the /// commitments. ParaHeadMismatch, + /// A bitfield that references a freed core, + /// either intentionally or as part of a concluded + /// invalid dispute. + BitfieldReferencesFreedCore, } /// The latest bitfield for each validator, referred to by their index in the validator set. @@ -255,18 +286,18 @@ impl Pallet { for _ in >::drain() {} } - /// Process a set of incoming bitfields. + /// Extract the freed cores based on cores that became available. /// - /// Returns a `Vec` of `CandidateHash`es and their respective `AvailabilityCore`s that became available, - /// and cores free. - pub(crate) fn process_bitfields( + /// Updates storage items `PendingAvailability` and `AvailabilityBitfields`. + pub(crate) fn update_pending_availability_and_get_freed_cores( expected_bits: usize, - unchecked_bitfields: UncheckedSignedAvailabilityBitfields, - core_lookup: impl Fn(CoreIndex) -> Option, - ) -> Result, DispatchError> { - let validators = shared::Pallet::::active_validator_keys(); - let session_index = shared::Pallet::::session_index(); - + validators: &[ValidatorId], + signed_bitfields: UncheckedSignedAvailabilityBitfields, + core_lookup: F, + ) -> Vec<(CoreIndex, CandidateHash)> + where + F: Fn(CoreIndex) -> Option, + { let mut assigned_paras_record = (0..expected_bits) .map(|bit_index| core_lookup(CoreIndex::from(bit_index as u32))) .map(|opt_para_id| { @@ -274,57 +305,15 @@ impl Pallet { }) .collect::>(); - // do sanity checks on the bitfields: - // 1. no more than one bitfield per validator - // 2. bitfields are ascending by validator index. - // 3. each bitfield has exactly `expected_bits` - // 4. signature is valid. - let signed_bitfields = { - let mut last_index = None; - - let signing_context = SigningContext { - parent_hash: >::parent_hash(), - session_index, - }; - - let mut signed_bitfields = Vec::with_capacity(unchecked_bitfields.len()); - - for unchecked_bitfield in unchecked_bitfields { - ensure!( - unchecked_bitfield.unchecked_payload().0.len() == expected_bits, - Error::::WrongBitfieldSize, - ); - - ensure!( - last_index - .map_or(true, |last| last < unchecked_bitfield.unchecked_validator_index()), - Error::::BitfieldDuplicateOrUnordered, - ); - - ensure!( - (unchecked_bitfield.unchecked_validator_index().0 as usize) < validators.len(), - Error::::ValidatorIndexOutOfBounds, - ); - - let validator_public = - &validators[unchecked_bitfield.unchecked_validator_index().0 as usize]; - - last_index = Some(unchecked_bitfield.unchecked_validator_index()); - - signed_bitfields.push( - unchecked_bitfield - .try_into_checked(&signing_context, validator_public) - .map_err(|_| Error::::InvalidBitfieldSignature)?, - ); - } - signed_bitfields - }; - let now = >::block_number(); - for signed_bitfield in signed_bitfields { - for (bit_idx, _) in - signed_bitfield.payload().0.iter().enumerate().filter(|(_, is_av)| **is_av) - { + for (checked_bitfield, validator_index) in + signed_bitfields.into_iter().map(|signed_bitfield| { + // extracting unchecked data, since it's checked in `fn sanitize_bitfields` already. + let validator_idx = signed_bitfield.unchecked_validator_index(); + let checked_bitfield = signed_bitfield.unchecked_into_payload(); + (checked_bitfield, validator_idx) + }) { + for (bit_idx, _) in checked_bitfield.0.iter().enumerate().filter(|(_, is_av)| **is_av) { let pending_availability = if let Some((_, pending_availability)) = assigned_paras_record[bit_idx].as_mut() { @@ -339,20 +328,17 @@ impl Pallet { // defensive check - this is constructed by loading the availability bitfield record, // which is always `Some` if the core is occupied - that's why we're here. - let val_idx = signed_bitfield.validator_index().0 as usize; + let validator_index = validator_index.0 as usize; if let Some(mut bit) = pending_availability.as_mut().and_then(|candidate_pending_availability| { - candidate_pending_availability.availability_votes.get_mut(val_idx) + candidate_pending_availability.availability_votes.get_mut(validator_index) }) { *bit = true; } } - let validator_index = signed_bitfield.validator_index(); - let record = AvailabilityBitfieldRecord { - bitfield: signed_bitfield.into_payload(), - submitted_at: now, - }; + let record = + AvailabilityBitfieldRecord { bitfield: checked_bitfield, submitted_at: now }; >::insert(&validator_index, record); } @@ -379,18 +365,20 @@ impl Pallet { }, }; - let receipt = CommittedCandidateReceipt { - descriptor: pending_availability.descriptor, - commitments, - }; - Self::enact_candidate( - pending_availability.relay_parent_number, - receipt, - pending_availability.backers, - pending_availability.availability_votes, - pending_availability.core, - pending_availability.backing_group, - ); + if ON_CHAIN_USE { + let receipt = CommittedCandidateReceipt { + descriptor: pending_availability.descriptor, + commitments, + }; + let _weight = Self::enact_candidate( + pending_availability.relay_parent_number, + receipt, + pending_availability.backers, + pending_availability.availability_votes, + pending_availability.core, + pending_availability.backing_group, + ); + } freed_cores.push((pending_availability.core, pending_availability.hash)); } else { @@ -398,7 +386,40 @@ impl Pallet { } } - Ok(freed_cores) + freed_cores + } + + /// Process a set of incoming bitfields. + /// + /// Returns a `Vec` of `CandidateHash`es and their respective `AvailabilityCore`s that became available, + /// and cores free. + pub(crate) fn process_bitfields( + expected_bits: usize, + signed_bitfields: UncheckedSignedAvailabilityBitfields, + disputed_bitfield: DisputedBitfield, + core_lookup: impl Fn(CoreIndex) -> Option, + ) -> Vec<(CoreIndex, CandidateHash)> { + let validators = shared::Pallet::::active_validator_keys(); + let session_index = shared::Pallet::::session_index(); + let parent_hash = frame_system::Pallet::::parent_hash(); + + let checked_bitfields = sanitize_bitfields::( + signed_bitfields, + disputed_bitfield, + expected_bits, + parent_hash, + session_index, + &validators[..], + ); + + let freed_cores = Self::update_pending_availability_and_get_freed_cores::<_, true>( + expected_bits, + &validators[..], + checked_bitfields, + core_lookup, + ); + + freed_cores } /// Process candidates that have been backed. Provide the relay storage root, a set of candidates @@ -563,10 +584,10 @@ impl Pallet { &backed_candidate, &signing_context, group_vals.len(), - |idx| { + |intra_group_vi| { group_vals - .get(idx) - .and_then(|i| validators.get(i.0 as usize)) + .get(intra_group_vi) + .and_then(|vi| validators.get(vi.0 as usize)) .map(|v| v.clone()) }, ); @@ -822,7 +843,7 @@ impl Pallet { /// Cleans up all paras pending availability that are in the given list of disputed candidates. /// /// Returns a vector of cleaned-up core IDs. - pub(crate) fn collect_disputed(disputed: Vec) -> Vec { + pub(crate) fn collect_disputed(disputed: &BTreeSet) -> Vec { let mut cleaned_up_ids = Vec::new(); let mut cleaned_up_cores = Vec::new(); @@ -974,9 +995,8 @@ impl CandidateCheckContext { } #[cfg(test)] -mod tests { +pub(crate) mod tests { use super::*; - use crate::{ configuration::HostConfiguration, initializer::SessionChangeNotification, @@ -985,8 +1005,10 @@ mod tests { System, Test, }, paras::ParaGenesisArgs, + paras_inherent::DisputedBitfield, scheduler::AssignmentKind, }; + use frame_support::assert_noop; use futures::executor::block_on; use keyring::Sr25519Keyring; use primitives::{ @@ -1008,7 +1030,7 @@ mod tests { config } - fn genesis_config(paras: Vec<(ParaId, bool)>) -> MockGenesisConfig { + pub(crate) fn genesis_config(paras: Vec<(ParaId, bool)>) -> MockGenesisConfig { MockGenesisConfig { paras: paras::GenesisConfig { paras: paras @@ -1035,14 +1057,14 @@ mod tests { } #[derive(Debug, Clone, Copy, PartialEq)] - enum BackingKind { + pub(crate) enum BackingKind { #[allow(unused)] Unanimous, Threshold, Lacking, } - fn collator_sign_candidate( + pub(crate) fn collator_sign_candidate( collator: Sr25519Keyring, candidate: &mut CommittedCandidateReceipt, ) { @@ -1060,7 +1082,7 @@ mod tests { assert!(candidate.descriptor().check_collator_signature().is_ok()); } - async fn back_candidate( + pub(crate) async fn back_candidate( candidate: CommittedCandidateReceipt, validators: &[Sr25519Keyring], group: &[ValidatorIndex], @@ -1102,11 +1124,6 @@ mod tests { let backed = BackedCandidate { candidate, validity_votes, validator_indices }; - let should_pass = match kind { - BackingKind::Unanimous | BackingKind::Threshold => true, - BackingKind::Lacking => false, - }; - let successfully_backed = primitives::v1::check_candidate_backing(&backed, signing_context, group.len(), |i| { Some(validators[group[i].0 as usize].public().into()) @@ -1115,16 +1132,15 @@ mod tests { .unwrap_or(0) * 2 > group.len(); - if should_pass { - assert!(successfully_backed); - } else { - assert!(!successfully_backed); - } + match kind { + BackingKind::Unanimous | BackingKind::Threshold => assert!(successfully_backed), + BackingKind::Lacking => assert!(!successfully_backed), + }; backed } - fn run_to_block( + pub(crate) fn run_to_block( to: BlockNumber, new_session: impl Fn(BlockNumber) -> Option>, ) { @@ -1157,7 +1173,7 @@ mod tests { } } - fn expected_bits() -> usize { + pub(crate) fn expected_bits() -> usize { Paras::parachains().len() + Configuration::config().parathread_cores as usize } @@ -1181,11 +1197,11 @@ mod tests { b } - fn validator_pubkeys(val_ids: &[Sr25519Keyring]) -> Vec { + pub(crate) fn validator_pubkeys(val_ids: &[Sr25519Keyring]) -> Vec { val_ids.iter().map(|v| v.public().into()).collect() } - async fn sign_bitfield( + pub(crate) async fn sign_bitfield( keystore: &SyncCryptoStorePtr, key: &Sr25519Keyring, validator_index: ValidatorIndex, @@ -1205,20 +1221,20 @@ mod tests { } #[derive(Default)] - struct TestCandidateBuilder { - para_id: ParaId, - head_data: HeadData, - para_head_hash: Option, - pov_hash: Hash, - relay_parent: Hash, - persisted_validation_data_hash: Hash, - new_validation_code: Option, - validation_code: ValidationCode, - hrmp_watermark: BlockNumber, + pub(crate) struct TestCandidateBuilder { + pub(crate) para_id: ParaId, + pub(crate) head_data: HeadData, + pub(crate) para_head_hash: Option, + pub(crate) pov_hash: Hash, + pub(crate) relay_parent: Hash, + pub(crate) persisted_validation_data_hash: Hash, + pub(crate) new_validation_code: Option, + pub(crate) validation_code: ValidationCode, + pub(crate) hrmp_watermark: BlockNumber, } impl TestCandidateBuilder { - fn build(self) -> CommittedCandidateReceipt { + pub(crate) fn build(self) -> CommittedCandidateReceipt { CommittedCandidateReceipt { descriptor: CandidateDescriptor { para_id: self.para_id, @@ -1239,7 +1255,7 @@ mod tests { } } - fn make_vdata_hash(para_id: ParaId) -> Option { + pub(crate) fn make_vdata_hash(para_id: ParaId) -> Option { let relay_parent_number = >::block_number() - 1; let persisted_validation_data = crate::util::make_persisted_validation_data::( para_id, @@ -1332,7 +1348,7 @@ mod tests { } let validator_public = validator_pubkeys(&validators); - new_test_ext(genesis_config(paras)).execute_with(|| { + new_test_ext(genesis_config(paras.clone())).execute_with(|| { shared::Pallet::::set_active_validators_ascending(validator_public.clone()); shared::Pallet::::set_session_index(5); @@ -1347,7 +1363,20 @@ mod tests { _ => panic!("out of bounds for testing"), }; - // wrong number of bits. + // mark all candidates as pending availability + let set_pending_av = || { + for (p_id, _) in paras { + PendingAvailability::::insert( + p_id, + CandidatePendingAvailability { + availability_votes: default_availability_votes(), + ..Default::default() + }, + ) + } + }; + + // too many bits in bitfield { let mut bare_bitfield = default_bitfield(); bare_bitfield.0.push(false); @@ -1359,15 +1388,18 @@ mod tests { &signing_context, )); - assert!(ParaInclusion::process_bitfields( - expected_bits(), - vec![signed.into()], - &core_lookup, - ) - .is_err()); + assert_eq!( + ParaInclusion::process_bitfields( + expected_bits(), + vec![signed.into()], + DisputedBitfield::zeros(expected_bits()), + &core_lookup, + ), + vec![] + ); } - // wrong number of bits: other way around. + // not enough bits { let bare_bitfield = default_bitfield(); let signed = block_on(sign_bitfield( @@ -1378,42 +1410,77 @@ mod tests { &signing_context, )); - assert!(ParaInclusion::process_bitfields( - expected_bits() + 1, - vec![signed.into()], - &core_lookup, - ) - .is_err()); + assert_eq!( + ParaInclusion::process_bitfields( + expected_bits() + 1, + vec![signed.into()], + DisputedBitfield::zeros(expected_bits()), + &core_lookup, + ), + vec![] + ); } // duplicate. { - let bare_bitfield = default_bitfield(); + set_pending_av.clone()(); + let back_core_0_bitfield = { + let mut b = default_bitfield(); + b.0.set(0, true); + b + }; let signed: UncheckedSignedAvailabilityBitfield = block_on(sign_bitfield( &keystore, &validators[0], ValidatorIndex(0), - bare_bitfield, + back_core_0_bitfield, &signing_context, )) .into(); + assert_eq!( + >::get(chain_a) + .unwrap() + .availability_votes + .count_ones(), + 0 + ); + + // the threshold to free a core is 4 availability votes, but we only expect 1 valid + // valid bitfield. assert!(ParaInclusion::process_bitfields( expected_bits(), vec![signed.clone(), signed], + DisputedBitfield::zeros(expected_bits()), &core_lookup, ) - .is_err()); + .is_empty()); + + assert_eq!( + >::get(chain_a) + .unwrap() + .availability_votes + .count_ones(), + 1 + ); + + // clean up + PendingAvailability::::remove_all(None); } // out of order. { - let bare_bitfield = default_bitfield(); + set_pending_av.clone()(); + let back_core_0_bitfield = { + let mut b = default_bitfield(); + b.0.set(0, true); + b + }; let signed_0 = block_on(sign_bitfield( &keystore, &validators[0], ValidatorIndex(0), - bare_bitfield.clone(), + back_core_0_bitfield.clone(), &signing_context, )) .into(); @@ -1422,17 +1489,38 @@ mod tests { &keystore, &validators[1], ValidatorIndex(1), - bare_bitfield, + back_core_0_bitfield, &signing_context, )) .into(); + assert_eq!( + >::get(chain_a) + .unwrap() + .availability_votes + .count_ones(), + 0 + ); + + // the threshold to free a core is 4 availability votes, but we only expect 1 valid + // valid bitfield because `signed_0` will get skipped for being out of order. assert!(ParaInclusion::process_bitfields( expected_bits(), vec![signed_1, signed_0], + DisputedBitfield::zeros(expected_bits()), &core_lookup, ) - .is_err()); + .is_empty()); + + assert_eq!( + >::get(chain_a) + .unwrap() + .availability_votes + .count_ones(), + 1 + ); + + PendingAvailability::::remove_all(None); } // non-pending bit set. @@ -1446,17 +1534,17 @@ mod tests { bare_bitfield, &signing_context, )); - assert_eq!( - ParaInclusion::process_bitfields( - expected_bits(), - vec![signed.into()], - &core_lookup, - ), - Ok(vec![]) - ); + + assert!(ParaInclusion::process_bitfields( + expected_bits(), + vec![signed.into()], + DisputedBitfield::zeros(expected_bits()), + &core_lookup, + ) + .is_empty()); } - // empty bitfield signed: always OK, but kind of useless. + // empty bitfield signed: always ok, but kind of useless. { let bare_bitfield = default_bitfield(); let signed = block_on(sign_bitfield( @@ -1470,9 +1558,10 @@ mod tests { assert!(ParaInclusion::process_bitfields( expected_bits(), vec![signed.into()], + DisputedBitfield::zeros(expected_bits()), &core_lookup, ) - .is_ok()); + .is_empty()); } // bitfield signed with pending bit signed. @@ -1512,9 +1601,10 @@ mod tests { assert!(ParaInclusion::process_bitfields( expected_bits(), vec![signed.into()], + DisputedBitfield::zeros(expected_bits()), &core_lookup, ) - .is_ok()); + .is_empty()); >::remove(chain_a); PendingAvailabilityCommitments::::remove(chain_a); @@ -1551,14 +1641,13 @@ mod tests { )); // no core is freed - assert_eq!( - ParaInclusion::process_bitfields( - expected_bits(), - vec![signed.into()], - &core_lookup, - ), - Ok(vec![]), - ); + assert!(ParaInclusion::process_bitfields( + expected_bits(), + vec![signed.into()], + DisputedBitfield::zeros(expected_bits()), + &core_lookup, + ) + .is_empty()); } }); } @@ -1614,7 +1703,7 @@ mod tests { CandidatePendingAvailability { core: CoreIndex::from(0), hash: candidate_a.hash(), - descriptor: candidate_a.descriptor, + descriptor: candidate_a.clone().descriptor, availability_votes: default_availability_votes(), relay_parent_number: 0, backed_in_number: 0, @@ -1622,7 +1711,10 @@ mod tests { backing_group: GroupIndex::from(0), }, ); - PendingAvailabilityCommitments::::insert(chain_a, candidate_a.commitments); + PendingAvailabilityCommitments::::insert( + chain_a, + candidate_a.clone().commitments, + ); let candidate_b = TestCandidateBuilder { para_id: chain_b, @@ -1694,12 +1786,16 @@ mod tests { }) .collect(); - assert!(ParaInclusion::process_bitfields( - expected_bits(), - signed_bitfields, - &core_lookup, - ) - .is_ok()); + // only chain A's core is freed. + assert_eq!( + ParaInclusion::process_bitfields( + expected_bits(), + signed_bitfields, + DisputedBitfield::zeros(expected_bits()), + &core_lookup, + ), + vec![(CoreIndex(0), candidate_a.hash())] + ); // chain A had 4 signing off, which is >= threshold. // chain B has 3 signing off, which is < threshold. @@ -1833,14 +1929,14 @@ mod tests { BackingKind::Threshold, )); - assert_eq!( + assert_noop!( ParaInclusion::process_candidates( Default::default(), vec![backed], vec![chain_b_assignment.clone()], &group_validators, ), - Err(Error::::UnscheduledCandidate.into()), + Error::::UnscheduledCandidate ); } @@ -1888,14 +1984,14 @@ mod tests { )); // out-of-order manifests as unscheduled. - assert_eq!( + assert_noop!( ParaInclusion::process_candidates( Default::default(), vec![backed_b, backed_a], vec![chain_a_assignment.clone(), chain_b_assignment.clone()], &group_validators, ), - Err(Error::::UnscheduledCandidate.into()), + Error::::UnscheduledCandidate ); } @@ -1921,14 +2017,14 @@ mod tests { BackingKind::Lacking, )); - assert_eq!( + assert_noop!( ParaInclusion::process_candidates( Default::default(), vec![backed], vec![chain_a_assignment.clone()], &group_validators, ), - Err(Error::::InsufficientBacking.into()), + Error::::InsufficientBacking ); } @@ -1956,14 +2052,14 @@ mod tests { BackingKind::Threshold, )); - assert_eq!( + assert_noop!( ParaInclusion::process_candidates( Default::default(), vec![backed], vec![chain_a_assignment.clone()], &group_validators, ), - Err(Error::::CandidateNotInParentContext.into()), + Error::::CandidateNotInParentContext ); } @@ -1991,7 +2087,7 @@ mod tests { BackingKind::Threshold, )); - assert_eq!( + assert_noop!( ParaInclusion::process_candidates( Default::default(), vec![backed], @@ -2002,7 +2098,7 @@ mod tests { ], &group_validators, ), - Err(Error::::WrongCollator.into()), + Error::::WrongCollator, ); } @@ -2033,14 +2129,14 @@ mod tests { BackingKind::Threshold, )); - assert_eq!( + assert_noop!( ParaInclusion::process_candidates( Default::default(), vec![backed], vec![thread_a_assignment.clone()], &group_validators, ), - Err(Error::::NotCollatorSigned.into()), + Error::::NotCollatorSigned ); } @@ -2083,14 +2179,14 @@ mod tests { ); >::insert(&chain_a, candidate.commitments); - assert_eq!( + assert_noop!( ParaInclusion::process_candidates( Default::default(), vec![backed], vec![chain_a_assignment.clone()], &group_validators, ), - Err(Error::::CandidateScheduledBeforeParaFree.into()), + Error::::CandidateScheduledBeforeParaFree ); >::remove(&chain_a); @@ -2126,14 +2222,14 @@ mod tests { BackingKind::Threshold, )); - assert_eq!( + assert_noop!( ParaInclusion::process_candidates( Default::default(), vec![backed], vec![chain_a_assignment.clone()], &group_validators, ), - Err(Error::::CandidateScheduledBeforeParaFree.into()), + Error::::CandidateScheduledBeforeParaFree ); >::remove(&chain_a); @@ -2177,14 +2273,14 @@ mod tests { assert_eq!(Paras::last_code_upgrade(chain_a, true), Some(expected_at)); } - assert_eq!( + assert_noop!( ParaInclusion::process_candidates( Default::default(), vec![backed], vec![chain_a_assignment.clone()], &group_validators, ), - Err(Error::::PrematureCodeUpgrade.into()), + Error::::PrematureCodeUpgrade ); } @@ -2246,14 +2342,14 @@ mod tests { BackingKind::Threshold, )); - assert_eq!( + assert_noop!( ParaInclusion::process_candidates( Default::default(), vec![backed], vec![chain_a_assignment.clone()], &group_validators, ), - Err(Error::::InvalidValidationCodeHash.into()), + Error::::InvalidValidationCodeHash ); } @@ -2281,14 +2377,14 @@ mod tests { BackingKind::Threshold, )); - assert_eq!( + assert_noop!( ParaInclusion::process_candidates( Default::default(), vec![backed], vec![chain_a_assignment.clone()], &group_validators, ), - Err(Error::::ParaHeadMismatch.into()), + Error::::ParaHeadMismatch ); } }); diff --git a/runtime/parachains/src/initializer.rs b/runtime/parachains/src/initializer.rs index 95e3310e37fe..0467e10f1a4b 100644 --- a/runtime/parachains/src/initializer.rs +++ b/runtime/parachains/src/initializer.rs @@ -294,7 +294,7 @@ impl Pallet { // Allow to trigger on_new_session in tests, this is needed as long as pallet_session is not // implemented in mock. - #[cfg(test)] + #[cfg(any(test, feature = "runtime-benchmarks"))] pub(crate) fn test_trigger_on_new_session<'a, I: 'a>( changed: bool, session_index: SessionIndex, diff --git a/runtime/parachains/src/lib.rs b/runtime/parachains/src/lib.rs index ab48d693d601..f1d8473f8894 100644 --- a/runtime/parachains/src/lib.rs +++ b/runtime/parachains/src/lib.rs @@ -42,6 +42,8 @@ pub mod runtime_api_impl; mod util; +#[cfg(any(feature = "runtime-benchmarks", test))] +mod builder; #[cfg(test)] mod mock; diff --git a/runtime/parachains/src/mock.rs b/runtime/parachains/src/mock.rs index a5b58cc54b00..4d046e6eab0b 100644 --- a/runtime/parachains/src/mock.rs +++ b/runtime/parachains/src/mock.rs @@ -22,15 +22,24 @@ use crate::{ ump::{self, MessageId, UmpSink}, ParaId, }; -use frame_support::{parameter_types, traits::GenesisBuild, weights::Weight}; + +use frame_support::{ + parameter_types, + traits::{GenesisBuild, KeyOwnerProofSystem}, + weights::Weight, +}; use frame_support_test::TestRandomness; use parity_scale_codec::Decode; use primitives::v1::{ - AuthorityDiscoveryId, Balance, BlockNumber, Header, SessionIndex, UpwardMessage, ValidatorIndex, + AuthorityDiscoveryId, Balance, BlockNumber, Header, Moment, SessionIndex, UpwardMessage, + ValidatorIndex, }; use sp_core::H256; use sp_io::TestExternalities; -use sp_runtime::traits::{BlakeTwo256, IdentityLookup}; +use sp_runtime::{ + traits::{BlakeTwo256, IdentityLookup}, + KeyTypeId, +}; use std::{cell::RefCell, collections::HashMap}; type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; @@ -56,6 +65,7 @@ frame_support::construct_runtime!( Hrmp: hrmp::{Pallet, Call, Storage, Event}, SessionInfo: session_info::{Pallet, Storage}, Disputes: disputes::{Pallet, Storage, Event}, + Babe: pallet_babe::{Pallet, Call, Storage, Config, ValidateUnsigned}, } ); @@ -109,6 +119,52 @@ impl pallet_balances::Config for Test { type WeightInfo = (); } +parameter_types! { + pub const EpochDuration: u64 = 10; + pub const ExpectedBlockTime: Moment = 6_000; + pub const ReportLongevity: u64 = 10; + pub const MaxAuthorities: u32 = 100_000; +} + +impl pallet_babe::Config for Test { + type EpochDuration = EpochDuration; + type ExpectedBlockTime = ExpectedBlockTime; + + // session module is the trigger + type EpochChangeTrigger = pallet_babe::ExternalTrigger; + + type DisabledValidators = (); + + type KeyOwnerProof = >::Proof; + + type KeyOwnerIdentification = >::IdentificationTuple; + + type KeyOwnerProofSystem = (); + + type HandleEquivocation = (); + + type WeightInfo = (); + + type MaxAuthorities = MaxAuthorities; +} + +parameter_types! { + pub const MinimumPeriod: Moment = 6_000 / 2; +} + +impl pallet_timestamp::Config for Test { + type Moment = Moment; + type OnTimestampSet = (); + type MinimumPeriod = MinimumPeriod; + type WeightInfo = (); +} + impl crate::initializer::Config for Test { type Randomness = TestRandomness; type ForceOrigin = frame_system::EnsureRoot; @@ -203,7 +259,9 @@ impl crate::inclusion::Config for Test { type RewardValidators = TestRewardValidators; } -impl crate::paras_inherent::Config for Test {} +impl crate::paras_inherent::Config for Test { + type WeightInfo = crate::paras_inherent::TestWeightInfo; +} impl crate::session_info::Config for Test {} @@ -299,6 +357,8 @@ impl inclusion::RewardValidators for TestRewardValidators { /// Create a new set of test externalities. pub fn new_test_ext(state: MockGenesisConfig) -> TestExternalities { + use sp_keystore::{testing::KeyStore, KeystoreExt, SyncCryptoStorePtr}; + use sp_std::sync::Arc; BACKING_REWARDS.with(|r| r.borrow_mut().clear()); AVAILABILITY_REWARDS.with(|r| r.borrow_mut().clear()); @@ -306,7 +366,10 @@ pub fn new_test_ext(state: MockGenesisConfig) -> TestExternalities { state.configuration.assimilate_storage(&mut t).unwrap(); GenesisBuild::::assimilate_storage(&state.paras, &mut t).unwrap(); - t.into() + let mut ext: TestExternalities = t.into(); + ext.register_extension(KeystoreExt(Arc::new(KeyStore::new()) as SyncCryptoStorePtr)); + + ext } #[derive(Default)] diff --git a/runtime/parachains/src/paras.rs b/runtime/parachains/src/paras.rs index d7bc9e7a7619..7c786360e57c 100644 --- a/runtime/parachains/src/paras.rs +++ b/runtime/parachains/src/paras.rs @@ -42,7 +42,7 @@ use serde::{Deserialize, Serialize}; pub use crate::Origin as ParachainOrigin; #[cfg(feature = "runtime-benchmarks")] -mod benchmarking; +pub(crate) mod benchmarking; pub use pallet::*; @@ -160,7 +160,7 @@ impl ParaLifecycle { impl ParaPastCodeMeta { // note a replacement has occurred at a given block number. - fn note_replacement(&mut self, expected_at: N, activated_at: N) { + pub(crate) fn note_replacement(&mut self, expected_at: N, activated_at: N) { self.upgrade_times.push(ReplacementTimes { expected_at, activated_at }) } @@ -350,7 +350,7 @@ pub mod pallet { /// All parachains. Ordered ascending by `ParaId`. Parathreads are not included. #[pallet::storage] #[pallet::getter(fn parachains)] - pub(super) type Parachains = StorageValue<_, Vec, ValueQuery>; + pub(crate) type Parachains = StorageValue<_, Vec, ValueQuery>; /// The current lifecycle of a all known Para IDs. #[pallet::storage] @@ -1168,6 +1168,11 @@ impl Pallet { ..Default::default() }); } + + #[cfg(any(feature = "runtime-benchmarks", test))] + pub fn heads_insert(para_id: &ParaId, head_data: HeadData) { + Heads::::insert(para_id, head_data); + } } #[cfg(test)] diff --git a/runtime/parachains/src/paras/benchmarking.rs b/runtime/parachains/src/paras/benchmarking.rs index b37ee5b83618..9c3de5cf9b2c 100644 --- a/runtime/parachains/src/paras/benchmarking.rs +++ b/runtime/parachains/src/paras/benchmarking.rs @@ -47,7 +47,7 @@ fn generate_disordered_pruning() { as Store>::PastCodePruning::put(needs_pruning); } -fn generate_disordered_upgrades() { +pub(crate) fn generate_disordered_upgrades() { let mut upgrades = Vec::new(); let mut cooldowns = Vec::new(); diff --git a/runtime/parachains/src/paras_inherent.rs b/runtime/parachains/src/paras_inherent.rs index c866a077ccb2..4c683d037cf0 100644 --- a/runtime/parachains/src/paras_inherent.rs +++ b/runtime/parachains/src/paras_inherent.rs @@ -23,30 +23,161 @@ use crate::{ disputes::DisputesHandler, - inclusion, - scheduler::{self, FreedReason}, + inclusion, initializer, + scheduler::{self, CoreAssignment, FreedReason}, shared, ump, }; +use bitvec::prelude::BitVec; use frame_support::{ inherent::{InherentData, InherentIdentifier, MakeFatalError, ProvideInherent}, pallet_prelude::*, + traits::Randomness, }; use frame_system::pallet_prelude::*; +use pallet_babe::{self, CurrentBlockRandomness}; use primitives::v1::{ - BackedCandidate, InherentData as ParachainsInherentData, ScrapedOnChainVotes, + BackedCandidate, CandidateHash, CoreIndex, DisputeStatementSet, + InherentData as ParachainsInherentData, MultiDisputeStatementSet, ScrapedOnChainVotes, + SessionIndex, SigningContext, UncheckedSignedAvailabilityBitfield, + UncheckedSignedAvailabilityBitfields, ValidatorId, ValidatorIndex, PARACHAINS_INHERENT_IDENTIFIER, }; +use rand::{Rng, SeedableRng}; +use scale_info::TypeInfo; use sp_runtime::traits::Header as HeaderT; -use sp_std::prelude::*; - -pub use pallet::*; +use sp_std::{ + cmp::Ordering, + collections::{btree_map::BTreeMap, btree_set::BTreeSet}, + prelude::*, + vec::Vec, +}; +#[cfg(feature = "runtime-benchmarks")] +mod benchmarking; const LOG_TARGET: &str = "runtime::inclusion-inherent"; -// In the future, we should benchmark these consts; these are all untested assumptions for now. -const BACKED_CANDIDATE_WEIGHT: Weight = 100_000; -const INCLUSION_INHERENT_CLAIMED_WEIGHT: Weight = 1_000_000_000; -// we assume that 75% of an paras inherent's weight is used processing backed candidates -const MINIMAL_INCLUSION_INHERENT_WEIGHT: Weight = INCLUSION_INHERENT_CLAIMED_WEIGHT / 4; +const SKIP_SIG_VERIFY: bool = false; +pub(crate) const VERIFY_SIGS: bool = true; + +pub trait WeightInfo { + /// Variant over `v`, the count of dispute statements in a dispute statement set. This gives the + /// weight of a single dispute statement set. + fn enter_variable_disputes(v: u32) -> Weight; + /// The weight of one bitfield. + fn enter_bitfields() -> Weight; + /// Variant over `v`, the count of validity votes for a backed candidate. This gives the weight + /// of a single backed candidate. + fn enter_backed_candidates_variable(v: u32) -> Weight; + /// The weight of a single backed candidate with a code upgrade. + fn enter_backed_candidate_code_upgrade() -> Weight; +} + +pub struct TestWeightInfo; +// `WeightInfo` impl for unit and integration tests. Based off of the `max_block` weight for the +// mock. +#[cfg(not(feature = "runtime-benchmarks"))] +impl WeightInfo for TestWeightInfo { + fn enter_variable_disputes(v: u32) -> Weight { + // MAX Block Weight should fit 4 disputes + 80_000 * v as Weight + 80_000 + } + fn enter_bitfields() -> Weight { + // MAX Block Weight should fit 4 backed candidates + 40_000 as Weight + } + fn enter_backed_candidates_variable(v: u32) -> Weight { + // MAX Block Weight should fit 4 backed candidates + 40_000 * v as Weight + 40_000 + } + fn enter_backed_candidate_code_upgrade() -> Weight { + 0 + } +} +// To simplify benchmarks running as tests, we set all the weights to 0. `enter` will exit early +// when if the data causes it to be over weight, but we don't want that to block a benchmark from +// running as a test. +#[cfg(feature = "runtime-benchmarks")] +impl WeightInfo for TestWeightInfo { + fn enter_variable_disputes(_v: u32) -> Weight { + 0 + } + fn enter_bitfields() -> Weight { + 0 + } + fn enter_backed_candidates_variable(_v: u32) -> Weight { + 0 + } + fn enter_backed_candidate_code_upgrade() -> Weight { + 0 + } +} + +fn paras_inherent_total_weight( + backed_candidates: &[BackedCandidate<::Hash>], + bitfields: &[UncheckedSignedAvailabilityBitfield], + disputes: &[DisputeStatementSet], +) -> Weight { + backed_candidates_weight::(backed_candidates) + .saturating_add(signed_bitfields_weight::(bitfields.len())) + .saturating_add(dispute_statements_weight::(disputes)) +} + +fn dispute_statements_weight(disputes: &[DisputeStatementSet]) -> Weight { + disputes + .iter() + .map(|d| { + <::WeightInfo as WeightInfo>::enter_variable_disputes( + d.statements.len() as u32 + ) + }) + .fold(0, |acc, x| acc.saturating_add(x)) +} + +fn signed_bitfields_weight(bitfields_len: usize) -> Weight { + <::WeightInfo as WeightInfo>::enter_bitfields() + .saturating_mul(bitfields_len as Weight) +} + +fn backed_candidate_weight( + candidate: &BackedCandidate, +) -> Weight { + if candidate.candidate.commitments.new_validation_code.is_some() { + <::WeightInfo as WeightInfo>::enter_backed_candidate_code_upgrade() + } else { + <::WeightInfo as WeightInfo>::enter_backed_candidates_variable( + candidate.validity_votes.len() as u32, + ) + } +} + +fn backed_candidates_weight( + candidates: &[BackedCandidate], +) -> Weight { + candidates + .iter() + .map(|c| backed_candidate_weight::(c)) + .fold(0, |acc, x| acc.saturating_add(x)) +} + +/// A bitfield concerning concluded disputes for candidates +/// associated to the core index equivalent to the bit position. +#[derive(Default, PartialEq, Eq, Clone, Encode, Decode, RuntimeDebug, TypeInfo)] +pub(crate) struct DisputedBitfield(pub(crate) BitVec); + +impl From> for DisputedBitfield { + fn from(inner: BitVec) -> Self { + Self(inner) + } +} + +#[cfg(test)] +impl DisputedBitfield { + /// Create a new bitfield, where each bit is set to `false`. + pub fn zeros(n: usize) -> Self { + Self::from(BitVec::::repeat(false, n)) + } +} + +pub use pallet::*; #[frame_support::pallet] pub mod pallet { @@ -58,7 +189,12 @@ pub mod pallet { #[pallet::config] #[pallet::disable_frame_system_supertrait_check] - pub trait Config: inclusion::Config + scheduler::Config {} + pub trait Config: + inclusion::Config + scheduler::Config + initializer::Config + pallet_babe::Config + { + /// Weight information for extrinsics in this pallet. + type WeightInfo: WeightInfo; + } #[pallet::error] pub enum Error { @@ -69,6 +205,8 @@ pub mod pallet { InvalidParentHeader, /// Disputed candidate that was concluded invalid. CandidateConcludedInvalid, + /// The data given to the inherent will result in an overweight block. + InherentOverweight, } /// Whether the paras inherent was included within this block. @@ -105,32 +243,25 @@ pub mod pallet { const INHERENT_IDENTIFIER: InherentIdentifier = PARACHAINS_INHERENT_IDENTIFIER; fn create_inherent(data: &InherentData) -> Option { - let mut inherent_data: ParachainsInherentData = - match data.get_data(&Self::INHERENT_IDENTIFIER) { - Ok(Some(d)) => d, - Ok(None) => return None, - Err(_) => { - log::warn!(target: LOG_TARGET, "ParachainsInherentData failed to decode"); - - return None - }, - }; + let inherent_data = Self::create_inherent_inner(data)?; + // Sanity check: session changes can invalidate an inherent, + // and we _really_ don't want that to happen. + // See - // filter out any unneeded dispute statements - T::DisputesHandler::filter_multi_dispute_data(&mut inherent_data.disputes); + // Calling `Self::enter` here is a safe-guard, to avoid any discrepancy between on-chain checks + // (`enter`) and the off-chain checks by the block author (this function). Once we are confident + // in all the logic in this module this check should be removed to optimize performance. - // Sanity check: session changes can invalidate an inherent, and we _really_ don't want that to happen. - // See github.com/paritytech/polkadot/issues/1327 let inherent_data = match Self::enter(frame_system::RawOrigin::None.into(), inherent_data.clone()) { Ok(_) => inherent_data, Err(err) => { - log::warn!( - target: LOG_TARGET, - "dropping signed_bitfields and backed_candidates because they produced \ - an invalid paras inherent: {:?}", - err.error, - ); + log::error!( + target: LOG_TARGET, + "dropping paras inherent data because they produced \ + an invalid paras inherent: {:?}", + err.error, + ); ParachainsInherentData { bitfields: Vec::new(), @@ -149,27 +280,62 @@ pub mod pallet { } } + /// Collect all freed cores based on storage data. (i.e. append cores freed from timeouts to + /// the given `freed_concluded`). + /// + /// The parameter `freed_concluded` contains all core indicies that became + /// free due to candidate that became available. + pub(crate) fn collect_all_freed_cores( + freed_concluded: I, + ) -> BTreeMap + where + I: core::iter::IntoIterator, + T: Config, + { + // Handle timeouts for any availability core work. + let availability_pred = >::availability_timeout_predicate(); + let freed_timeout = if let Some(pred) = availability_pred { + >::collect_pending(pred) + } else { + Vec::new() + }; + + // Schedule paras again, given freed cores, and reasons for freeing. + let freed = freed_concluded + .into_iter() + .map(|(c, _hash)| (c, FreedReason::Concluded)) + .chain(freed_timeout.into_iter().map(|c| (c, FreedReason::TimedOut))) + .collect::>(); + freed + } + #[pallet::call] impl Pallet { /// Enter the paras inherent. This will process bitfields and backed candidates. #[pallet::weight(( - MINIMAL_INCLUSION_INHERENT_WEIGHT + data.backed_candidates.len() as Weight * BACKED_CANDIDATE_WEIGHT, + paras_inherent_total_weight::( + data.backed_candidates.as_slice(), + data.bitfields.as_slice(), + data.disputes.as_slice(), + ), DispatchClass::Mandatory, ))] pub fn enter( origin: OriginFor, data: ParachainsInherentData, ) -> DispatchResultWithPostInfo { + ensure_none(origin)?; + + ensure!(!Included::::exists(), Error::::TooManyInclusionInherents); + Included::::set(Some(())); + let ParachainsInherentData { - bitfields: signed_bitfields, - backed_candidates, + bitfields: mut signed_bitfields, + mut backed_candidates, parent_header, - disputes, + mut disputes, } = data; - ensure_none(origin)?; - ensure!(!Included::::exists(), Error::::TooManyInclusionInherents); - // Check that the submitted parent header indeed corresponds to the previous block hash. let parent_hash = >::parent_hash(); ensure!( @@ -177,53 +343,107 @@ pub mod pallet { Error::::InvalidParentHeader, ); + let mut candidate_weight = backed_candidates_weight::(&backed_candidates); + let mut bitfields_weight = signed_bitfields_weight::(signed_bitfields.len()); + let disputes_weight = dispute_statements_weight::(&disputes); + + let max_block_weight = ::BlockWeights::get().max_block; + + // Potentially trim inherent data to ensure processing will be within weight limits + let total_weight = { + if candidate_weight + .saturating_add(bitfields_weight) + .saturating_add(disputes_weight) > + max_block_weight + { + // if the total weight is over the max block weight, first try clearing backed + // candidates and bitfields. + backed_candidates.clear(); + candidate_weight = 0; + signed_bitfields.clear(); + bitfields_weight = 0; + } + + if disputes_weight > max_block_weight { + // if disputes are by themselves overweight already, trim the disputes. + debug_assert!(candidate_weight == 0 && bitfields_weight == 0); + + let entropy = compute_entropy::(parent_hash); + let mut rng = rand_chacha::ChaChaRng::from_seed(entropy.into()); + + let remaining_weight = + limit_disputes::(&mut disputes, max_block_weight, &mut rng); + max_block_weight.saturating_sub(remaining_weight) + } else { + candidate_weight + .saturating_add(bitfields_weight) + .saturating_add(disputes_weight) + } + }; + + let expected_bits = >::availability_cores().len(); + // Handle disputes logic. let current_session = >::session_index(); - { + let disputed_bitfield = { let new_current_dispute_sets: Vec<_> = disputes .iter() .filter(|s| s.session == current_session) .map(|s| (s.session, s.candidate_hash)) .collect(); + // Note that `provide_multi_dispute_data` will iterate, verify, and import each + // dispute; so the input here must be reasonably bounded. let _ = T::DisputesHandler::provide_multi_dispute_data(disputes.clone())?; if T::DisputesHandler::is_frozen() { // The relay chain we are currently on is invalid. Proceed no further on parachains. - Included::::set(Some(())); - return Ok(Some(MINIMAL_INCLUSION_INHERENT_WEIGHT).into()) + return Ok(Some(dispute_statements_weight::(&disputes)).into()) } let mut freed_disputed = if !new_current_dispute_sets.is_empty() { - let concluded_invalid_disputes: Vec<_> = new_current_dispute_sets + let concluded_invalid_disputes = new_current_dispute_sets .iter() - .filter(|(s, c)| T::DisputesHandler::concluded_invalid(*s, *c)) - .map(|(_, c)| *c) - .collect(); - - >::collect_disputed(concluded_invalid_disputes) - .into_iter() - .map(|core| (core, FreedReason::Concluded)) - .collect() + .filter(|(session, candidate)| { + T::DisputesHandler::concluded_invalid(*session, *candidate) + }) + .map(|(_, candidate)| *candidate) + .collect::>(); + + let freed_disputed = + >::collect_disputed(&concluded_invalid_disputes) + .into_iter() + .map(|core| (core, FreedReason::Concluded)) + .collect(); + freed_disputed } else { Vec::new() }; + // Create a bit index from the set of core indices where each index corresponds to + // a core index that was freed due to a dispute. + let disputed_bitfield = create_disputed_bitfield( + expected_bits, + freed_disputed.iter().map(|(core_index, _)| core_index), + ); + if !freed_disputed.is_empty() { // unstable sort is fine, because core indices are unique // i.e. the same candidate can't occupy 2 cores at once. freed_disputed.sort_unstable_by_key(|pair| pair.0); // sort by core index >::free_cores(freed_disputed); } + + disputed_bitfield }; // Process new availability bitfields, yielding any availability cores whose // work has now concluded. - let expected_bits = >::availability_cores().len(); let freed_concluded = >::process_bitfields( expected_bits, signed_bitfields, + disputed_bitfield, >::core_para, - )?; + ); // Inform the disputes module of all included candidates. let now = >::block_number(); @@ -231,40 +451,20 @@ pub mod pallet { T::DisputesHandler::note_included(current_session, *candidate_hash, now); } - // Handle timeouts for any availability core work. - let availability_pred = >::availability_timeout_predicate(); - let freed_timeout = if let Some(pred) = availability_pred { - >::collect_pending(pred) - } else { - Vec::new() - }; - - // Schedule paras again, given freed cores, and reasons for freeing. - let mut freed = freed_concluded - .into_iter() - .map(|(c, _hash)| (c, FreedReason::Concluded)) - .chain(freed_timeout.into_iter().map(|c| (c, FreedReason::TimedOut))) - .collect::>(); - - // unstable sort is fine, because core indices are unique. - freed.sort_unstable_by_key(|pair| pair.0); // sort by core index + let freed = collect_all_freed_cores::(freed_concluded.iter().cloned()); >::clear(); - >::schedule(freed, >::block_number()); - - let backed_candidates = limit_backed_candidates::(backed_candidates); - let backed_candidates_len = backed_candidates.len() as Weight; + >::schedule(freed, now); - // Refuse to back any candidates that were disputed and are concluded invalid. - for candidate in &backed_candidates { - ensure!( - !T::DisputesHandler::concluded_invalid( - current_session, - candidate.candidate.hash(), - ), - Error::::CandidateConcludedInvalid, - ); - } + let scheduled = >::scheduled(); + let backed_candidates = sanitize_backed_candidates::( + parent_hash, + backed_candidates, + move |candidate_hash: CandidateHash| -> bool { + ::DisputesHandler::concluded_invalid(current_session, candidate_hash) + }, + &scheduled[..], + ); // Process backed candidates according to scheduled cores. let parent_storage_root = parent_header.state_root().clone(); @@ -274,7 +474,7 @@ pub mod pallet { } = >::process_candidates( parent_storage_root, backed_candidates, - >::scheduled(), + scheduled, >::group_validators, )?; @@ -290,239 +490,1639 @@ pub mod pallet { >::occupied(&occupied); // Give some time slice to dispatch pending upward messages. - >::process_pending_upward_messages(); + // this is max config.ump_service_total_weight + let _ump_weight = >::process_pending_upward_messages(); - // And track that we've finished processing the inherent for this block. - Included::::set(Some(())); + Ok(Some(total_weight).into()) + } + } +} - Ok(Some( - MINIMAL_INCLUSION_INHERENT_WEIGHT + - (backed_candidates_len * BACKED_CANDIDATE_WEIGHT), - ) - .into()) +impl Pallet { + /// Create the `ParachainsInherentData` that gets passed to [`Self::enter`] in [`Self::create_inherent`]. + /// This code is pulled out of [`Self::create_inherent`] so it can be unit tested. + fn create_inherent_inner(data: &InherentData) -> Option> { + let ParachainsInherentData:: { + bitfields, + backed_candidates, + mut disputes, + parent_header, + } = match data.get_data(&Self::INHERENT_IDENTIFIER) { + Ok(Some(d)) => d, + Ok(None) => return None, + Err(_) => { + log::warn!(target: LOG_TARGET, "ParachainsInherentData failed to decode"); + return None + }, + }; + + let parent_hash = >::parent_hash(); + + if parent_hash != parent_header.hash() { + log::warn!( + target: LOG_TARGET, + "ParachainsInherentData references a different parent header hash than frame" + ); + return None + } + + let current_session = >::session_index(); + let expected_bits = >::availability_cores().len(); + let validator_public = shared::Pallet::::active_validator_keys(); + + T::DisputesHandler::filter_multi_dispute_data(&mut disputes); + + let (concluded_invalid_disputes, mut bitfields, scheduled) = + frame_support::storage::with_transaction(|| { + // we don't care about fresh or not disputes + // this writes them to storage, so let's query it via those means + // if this fails for whatever reason, that's ok + let _ = + T::DisputesHandler::provide_multi_dispute_data(disputes.clone()).map_err(|e| { + log::warn!( + target: LOG_TARGET, + "MultiDisputesData failed to update: {:?}", + e + ); + e + }); + + // current concluded invalid disputes, only including the current block's votes + // TODO why does this say "only including the current block's votes"? This can include + // remote disputes, right? + let current_concluded_invalid_disputes = disputes + .iter() + .filter(|dss| dss.session == current_session) + .map(|dss| (dss.session, dss.candidate_hash)) + .filter(|(session, candidate)| { + ::DisputesHandler::concluded_invalid(*session, *candidate) + }) + .map(|(_session, candidate)| candidate) + .collect::>(); + + // all concluded invalid disputes, that are relevant for the set of candidates + // the inherent provided + let concluded_invalid_disputes = backed_candidates + .iter() + .map(|backed_candidate| backed_candidate.hash()) + .filter(|candidate| { + ::DisputesHandler::concluded_invalid(current_session, *candidate) + }) + .collect::>(); + + let mut freed_disputed: Vec<_> = + >::collect_disputed(¤t_concluded_invalid_disputes) + .into_iter() + .map(|core| (core, FreedReason::Concluded)) + .collect(); + + let disputed_bitfield = + create_disputed_bitfield(expected_bits, freed_disputed.iter().map(|(x, _)| x)); + + if !freed_disputed.is_empty() { + // unstable sort is fine, because core indices are unique + // i.e. the same candidate can't occupy 2 cores at once. + freed_disputed.sort_unstable_by_key(|pair| pair.0); // sort by core index + >::free_cores(freed_disputed.clone()); + } + + // The following 3 calls are equiv to a call to `process_bitfields` + // but we can retain access to `bitfields`. + let bitfields = sanitize_bitfields::( + bitfields, + disputed_bitfield, + expected_bits, + parent_hash, + current_session, + &validator_public[..], + ); + + let freed_concluded = + >::update_pending_availability_and_get_freed_cores::< + _, + false, + >( + expected_bits, + &validator_public[..], + bitfields.clone(), + >::core_para, + ); + + let freed = collect_all_freed_cores::(freed_concluded.iter().cloned()); + + >::clear(); + >::schedule(freed, >::block_number()); + + let scheduled = >::scheduled(); + + frame_support::storage::TransactionOutcome::Rollback(( + // concluded disputes for backed candidates in this block + concluded_invalid_disputes, + // filtered bitfields, + bitfields, + // updated schedule + scheduled, + )) + }); + + let mut backed_candidates = sanitize_backed_candidates::( + parent_hash, + backed_candidates, + move |candidate_hash: CandidateHash| -> bool { + concluded_invalid_disputes.contains(&candidate_hash) + }, + &scheduled[..], + ); + + let entropy = compute_entropy::(parent_hash); + let mut rng = rand_chacha::ChaChaRng::from_seed(entropy.into()); + let max_block_weight = ::BlockWeights::get().max_block; + let _consumed_weight = apply_weight_limit::( + &mut backed_candidates, + &mut bitfields, + &mut disputes, + max_block_weight, + &mut rng, + ); + + Some(ParachainsInherentData:: { + bitfields, + backed_candidates, + disputes, + parent_header, + }) + } +} + +/// Derive a bitfield from dispute +pub(super) fn create_disputed_bitfield<'a, I>( + expected_bits: usize, + freed_cores: I, +) -> DisputedBitfield +where + I: 'a + IntoIterator, +{ + let mut bitvec = BitVec::repeat(false, expected_bits); + for core_idx in freed_cores { + let core_idx = core_idx.0 as usize; + if core_idx < expected_bits { + bitvec.set(core_idx, true); } } + DisputedBitfield::from(bitvec) } -/// Limit the number of backed candidates processed in order to stay within block weight limits. +/// Select a random subset /// -/// Use a configured assumption about the weight required to process a backed candidate and the -/// current block weight as of the execution of this function to ensure that we don't overload -/// the block with candidate processing. +/// Adds random items to the set until all candidates +/// are tried or the remaining weight is depleted. /// -/// If the backed candidates exceed the available block weight remaining, then skips all of them. -/// This is somewhat less desirable than attempting to fit some of them, but is more fair in the -/// even that we can't trust the provisioner to provide a fair / random ordering of candidates. -fn limit_backed_candidates( +/// Returns the weight of all selected items from `selectables` +/// as well as their indices in ascending order. +fn random_sel Weight>( + rng: &mut rand_chacha::ChaChaRng, + selectables: Vec, + weight_fn: F, + weight_limit: Weight, +) -> (Weight, Vec) { + if selectables.is_empty() { + return (0 as Weight, Vec::new()) + } + let mut indices = (0..selectables.len()).into_iter().collect::>(); + let mut picked_indices = Vec::with_capacity(selectables.len().saturating_sub(1)); + + let mut weight_acc = 0 as Weight; + while !indices.is_empty() { + // randomly pick an index + let pick = rng.gen_range(0..indices.len()); + // remove the index from the available set of indices + let idx = indices.swap_remove(pick); + + let item = &selectables[idx]; + weight_acc += weight_fn(item); + + if weight_acc > weight_limit { + break + } + + picked_indices.push(idx); + } + + // sorting indices, so the ordering is retained + // unstable sorting is fine, since there are no duplicates + picked_indices.sort_unstable(); + (weight_acc, picked_indices) +} + +/// Considers an upper threshold that the candidates must not exceed. +/// +/// If there is sufficient space, all bitfields and candidates will be included. +/// +/// Otherwise tries to include all bitfields, and fills in the remaining weight with candidates. +/// +/// If even the bitfields are too large to fit into the `max_weight` limit, bitfields are randomly +/// picked and _no_ candidates will be included. +fn apply_weight_limit( + candidates: &mut Vec::Hash>>, + bitfields: &mut UncheckedSignedAvailabilityBitfields, + disputes: &mut MultiDisputeStatementSet, + max_block_weight: Weight, + rng: &mut rand_chacha::ChaChaRng, +) -> Weight { + // include as many disputes as possible, always + let remaining_weight = limit_disputes::(disputes, max_block_weight, rng); + + let total_candidates_weight = backed_candidates_weight::(candidates.as_slice()); + + let total_bitfields_weight = signed_bitfields_weight::(bitfields.len()); + + let total = total_bitfields_weight.saturating_add(total_candidates_weight); + + // candidates + bitfields fit into the block + if remaining_weight >= total { + return total + } + + // There is weight remaining to be consumed by a subset of candidates + // which are going to be picked now. + if let Some(remaining_weight) = remaining_weight.checked_sub(total_bitfields_weight) { + let (acc_candidate_weight, indices) = + random_sel::::Hash>, _>( + rng, + candidates.clone(), + |c| backed_candidate_weight::(c), + remaining_weight, + ); + let mut idx = 0_usize; + candidates.retain(|_backed_candidate| { + let exists = indices.binary_search(&idx).is_ok(); + idx += 1; + exists + }); + // pick all bitfields, and + // fill the remaining space with candidates + let total = acc_candidate_weight.saturating_add(total_bitfields_weight); + return total + } + + candidates.clear(); + + // insufficient space for even the bitfields alone, so only try to fit as many of those + // into the block and skip the candidates entirely + let (total, indices) = random_sel::( + rng, + bitfields.clone(), + |_| <::WeightInfo as WeightInfo>::enter_bitfields(), + remaining_weight, + ); + + let mut idx = 0_usize; + bitfields.retain(|_bitfield| { + let exists = indices.binary_search(&idx).is_ok(); + idx += 1; + exists + }); + + total +} + +/// Filter bitfields based on freed core indices, validity, and other sanity checks. +/// +/// Do sanity checks on the bitfields: +/// +/// 1. no more than one bitfield per validator +/// 2. bitfields are ascending by validator index. +/// 3. each bitfield has exactly `expected_bits` +/// 4. signature is valid +/// 5. remove any disputed core indices +/// +/// If any of those is not passed, the bitfield is dropped. +/// +/// While this function technically returns a set of unchecked bitfields, +/// they were actually checked and filtered to allow using it in both +/// cases, as `filtering` and `checking` stage. +/// +/// `CHECK_SIGS` determines if validator signatures are checked. If true, bitfields that have an +/// invalid signature will be filtered out. +pub(crate) fn sanitize_bitfields( + unchecked_bitfields: UncheckedSignedAvailabilityBitfields, + disputed_bitfield: DisputedBitfield, + expected_bits: usize, + parent_hash: T::Hash, + session_index: SessionIndex, + validators: &[ValidatorId], +) -> UncheckedSignedAvailabilityBitfields { + let mut bitfields = Vec::with_capacity(unchecked_bitfields.len()); + + let mut last_index: Option = None; + + if disputed_bitfield.0.len() != expected_bits { + // This is a system logic error that should never occur, but we want to handle it gracefully + // so we just drop all bitfields + log::error!(target: LOG_TARGET, "BUG: disputed_bitfield != expected_bits"); + return vec![] + } + + let all_zeros = BitVec::::repeat(false, expected_bits); + let signing_context = SigningContext { parent_hash, session_index }; + for unchecked_bitfield in unchecked_bitfields { + // Find and skip invalid bitfields. + if unchecked_bitfield.unchecked_payload().0.len() != expected_bits { + log::trace!( + target: LOG_TARGET, + "[CHECK_SIGS: {}] bad bitfield length: {} != {:?}", + CHECK_SIGS, + unchecked_bitfield.unchecked_payload().0.len(), + expected_bits, + ); + continue + } + + if unchecked_bitfield.unchecked_payload().0.clone() & disputed_bitfield.0.clone() != + all_zeros + { + log::trace!( + target: LOG_TARGET, + "[CHECK_SIGS: {}] bitfield contains disputed cores: {:?}", + CHECK_SIGS, + unchecked_bitfield.unchecked_payload().0.clone() & disputed_bitfield.0.clone() + ); + continue + } + + let validator_index = unchecked_bitfield.unchecked_validator_index(); + + if !last_index.map_or(true, |last_index: ValidatorIndex| last_index < validator_index) { + log::trace!( + target: LOG_TARGET, + "[CHECK_SIGS: {}] bitfield validator index is not greater than last: !({:?} < {})", + CHECK_SIGS, + last_index.as_ref().map(|x| x.0), + validator_index.0 + ); + continue + } + + if unchecked_bitfield.unchecked_validator_index().0 as usize >= validators.len() { + log::trace!( + target: LOG_TARGET, + "[CHECK_SIGS: {}] bitfield validator index is out of bounds: {} >= {}", + CHECK_SIGS, + validator_index.0, + validators.len(), + ); + continue + } + + let validator_public = &validators[validator_index.0 as usize]; + + if CHECK_SIGS { + if let Ok(signed_bitfield) = + unchecked_bitfield.try_into_checked(&signing_context, validator_public) + { + bitfields.push(signed_bitfield.into_unchecked()); + } else { + log::warn!(target: LOG_TARGET, "Invalid bitfield signature"); + }; + } else { + bitfields.push(unchecked_bitfield); + } + + last_index = Some(validator_index); + } + bitfields +} + +/// Filter out any candidates that have a concluded invalid dispute. +/// +/// `scheduled` follows the same naming scheme as provided in the +/// guide: Currently `free` but might become `occupied`. +/// For the filtering here the relevant part is only the current `free` +/// state. +/// +/// `candidate_has_concluded_invalid_dispute` must return `true` if the candidate +/// is disputed, false otherwise +fn sanitize_backed_candidates bool>( + relay_parent: T::Hash, mut backed_candidates: Vec>, + candidate_has_concluded_invalid_dispute: F, + scheduled: &[CoreAssignment], ) -> Vec> { - const MAX_CODE_UPGRADES: usize = 1; + // Remove any candidates that were concluded invalid. + backed_candidates.retain(|backed_candidate| { + !candidate_has_concluded_invalid_dispute(backed_candidate.candidate.hash()) + }); + + // Assure the backed candidate's `ParaId`'s core is free. + // This holds under the assumption that `Scheduler::schedule` is called _before_. + // Also checks the candidate references the correct relay parent. + let scheduled_paras_set = scheduled + .into_iter() + .map(|core_assignment| core_assignment.para_id) + .collect::>(); + backed_candidates.retain(|backed_candidate| { + let desc = backed_candidate.descriptor(); + desc.relay_parent == relay_parent && scheduled_paras_set.contains(&desc.para_id) + }); + + backed_candidates +} - // Ignore any candidates beyond one that contain code upgrades. - // - // This is an artificial limitation that does not appear in the guide as it is a practical - // concern around execution. - { - let mut code_upgrades = 0; - backed_candidates.retain(|c| { - if c.candidate.commitments.new_validation_code.is_some() { - if code_upgrades >= MAX_CODE_UPGRADES { - return false - } +fn compute_entropy(parent_hash: T::Hash) -> [u8; 32] { + const CANDIDATE_SEED_SUBJECT: [u8; 32] = *b"candidate-seed-selection-subject"; + let vrf_random = CurrentBlockRandomness::::random(&CANDIDATE_SEED_SUBJECT[..]).0; + let mut entropy: [u8; 32] = CANDIDATE_SEED_SUBJECT.clone(); + if let Some(vrf_random) = vrf_random { + entropy.as_mut().copy_from_slice(vrf_random.as_ref()); + } else { + // in case there is no vrf randomness present, we utilize the relay parent + // as seed, it's better than a static value. + log::warn!(target: LOG_TARGET, "CurrentBlockRandomness did not provide entropy"); + entropy.as_mut().copy_from_slice(parent_hash.as_ref()); + } + entropy +} - code_upgrades += 1; +/// Limit disputes in place. +/// +/// Returns the unused weight of `remaining_weight`. +fn limit_disputes( + disputes: &mut MultiDisputeStatementSet, + remaining_weight: Weight, + rng: &mut rand_chacha::ChaChaRng, +) -> Weight { + let mut remaining_weight = remaining_weight; + let disputes_weight = dispute_statements_weight::(&disputes); + if disputes_weight > remaining_weight { + // Sort the dispute statements according to the following prioritization: + // 1. Prioritize local disputes over remote disputes. + // 2. Prioritize older disputes over newer disputes. + disputes.sort_unstable_by(|a, b| { + let a_local_block = T::DisputesHandler::included_state(a.session, a.candidate_hash); + let b_local_block = T::DisputesHandler::included_state(b.session, b.candidate_hash); + match (a_local_block, b_local_block) { + // Prioritize local disputes over remote disputes. + (None, Some(_)) => Ordering::Greater, + (Some(_), None) => Ordering::Less, + // For local disputes, prioritize those that occur at an earlier height. + (Some(a_height), Some(b_height)) => a_height.cmp(&b_height), + // Prioritize earlier remote disputes using session as rough proxy. + (None, None) => a.session.cmp(&b.session), } + }); - true + // Since the disputes array is sorted, we may use binary search to find the beginning of + // remote disputes + let idx = disputes + .binary_search_by(|probe| { + if T::DisputesHandler::included_state(probe.session, probe.candidate_hash).is_some() + { + Ordering::Greater + } else { + Ordering::Less + } + }) + // The above predicate will never find an item and therefore we are guaranteed to obtain + // an error, which we can safely unwrap. QED. + .unwrap_err(); + + // Due to the binary search predicate above, the index computed will constitute the beginning + // of the remote disputes sub-array + let remote_disputes = disputes.split_off(idx); + + // Select disputes in-order until the remaining weight is attained + disputes.retain(|d| { + let dispute_weight = <::WeightInfo as WeightInfo>::enter_variable_disputes( + d.statements.len() as u32, + ); + if remaining_weight >= dispute_weight { + remaining_weight -= dispute_weight; + true + } else { + false + } }); - } - // the weight of the paras inherent is already included in the current block weight, - // so our operation is simple: if the block is currently overloaded, make this intrinsic smaller - if frame_system::Pallet::::block_weight().total() > - ::BlockWeights::get().max_block - { - Vec::new() - } else { - backed_candidates + // Compute the statements length of all remote disputes + let d = remote_disputes.iter().map(|d| d.statements.len() as u32).collect::>(); + + // Select remote disputes at random until the block is full + let (acc_remote_disputes_weight, indices) = random_sel::( + rng, + d, + |v| <::WeightInfo as WeightInfo>::enter_variable_disputes(*v), + remaining_weight, + ); + + // Collect all remote disputes + let mut remote_disputes = + indices.into_iter().map(|idx| disputes[idx].clone()).collect::>(); + + // Construct the full list of selected disputes + disputes.append(&mut remote_disputes); + + // Update the remaining weight + remaining_weight = remaining_weight.saturating_sub(acc_remote_disputes_weight); } + + remaining_weight } #[cfg(test)] mod tests { use super::*; - use crate::mock::{new_test_ext, MockGenesisConfig, System, Test}; - - mod limit_backed_candidates { + // In order to facilitate benchmarks as tests we have a benchmark feature gated `WeightInfo` impl + // that uses 0 for all the weights. Because all the weights are 0, the tests that rely on + // weights for limiting data will fail, so we don't run them when using the benchmark feature. + #[cfg(not(feature = "runtime-benchmarks"))] + mod enter { use super::*; + use crate::{ + builder::{Bench, BenchBuilder}, + mock::{new_test_ext, MockGenesisConfig, Test}, + }; + use frame_support::assert_ok; + use sp_std::collections::btree_map::BTreeMap; + + struct TestConfig { + dispute_statements: BTreeMap, + dispute_sessions: Vec, + backed_and_concluding: BTreeMap, + num_validators_per_core: u32, + includes_code_upgrade: Option, + } + + fn make_inherent_data( + TestConfig { + dispute_statements, + dispute_sessions, + backed_and_concluding, + num_validators_per_core, + includes_code_upgrade, + }: TestConfig, + ) -> Bench { + BenchBuilder::::new() + .set_max_validators((dispute_sessions.len() as u32) * num_validators_per_core) + .set_max_validators_per_core(num_validators_per_core) + .set_dispute_statements(dispute_statements) + .build(backed_and_concluding, dispute_sessions.as_slice(), includes_code_upgrade) + } #[test] - fn does_not_truncate_on_empty_block() { + // Validate that if we create 2 backed candidates which are assigned to 2 cores that will be freed via + // becoming fully available, the backed candidates will not be filtered out in `create_inherent` and + // will not cause `enter` to early. + fn include_backed_candidates() { new_test_ext(MockGenesisConfig::default()).execute_with(|| { - let backed_candidates = vec![BackedCandidate::default()]; - System::set_block_consumed_resources(0, 0); - assert_eq!(limit_backed_candidates::(backed_candidates).len(), 1); + let dispute_statements = BTreeMap::new(); + + let mut backed_and_concluding = BTreeMap::new(); + backed_and_concluding.insert(0, 1); + backed_and_concluding.insert(1, 1); + + let scenario = make_inherent_data(TestConfig { + dispute_statements, + dispute_sessions: vec![0, 0], + backed_and_concluding, + num_validators_per_core: 1, + includes_code_upgrade: None, + }); + + // We expect the scenario to have cores 0 & 1 with pending availability. The backed + // candidates are also created for cores 0 & 1, so once the pending available + // become fully available those cores are marked as free and scheduled for the backed + // candidates. + let expected_para_inherent_data = scenario.data.clone(); + + // Check the para inherent data is as expected: + // * 1 bitfield per validator (2 validators) + assert_eq!(expected_para_inherent_data.bitfields.len(), 2); + // * 1 backed candidate per core (2 cores) + assert_eq!(expected_para_inherent_data.backed_candidates.len(), 2); + // * 0 disputes. + assert_eq!(expected_para_inherent_data.disputes.len(), 0); + let mut inherent_data = InherentData::new(); + inherent_data + .put_data(PARACHAINS_INHERENT_IDENTIFIER, &expected_para_inherent_data) + .unwrap(); + + // The current schedule is empty prior to calling `create_inherent_enter`. + assert_eq!(>::scheduled(), vec![]); + + // Nothing is filtered out (including the backed candidates.) + assert_eq!( + Pallet::::create_inherent_inner(&inherent_data.clone()).unwrap(), + expected_para_inherent_data + ); + + // The schedule is still empty prior to calling `enter`. (`create_inherent_inner` should not + // alter storage, but just double checking for sanity). + assert_eq!(>::scheduled(), vec![]); + + assert_eq!(Pallet::::on_chain_votes(), None); + // Call enter with our 2 backed candidates + assert_ok!(Pallet::::enter( + frame_system::RawOrigin::None.into(), + expected_para_inherent_data + )); + assert_eq!( + // The length of this vec is equal to the number of candidates, so we know our 2 + // backed candidates did not get filtered out + Pallet::::on_chain_votes() + .unwrap() + .backing_validators_per_candidate + .len(), + 2 + ); }); } #[test] - fn does_not_truncate_on_exactly_full_block() { + // Ensure that disputes are filtered out if the session is in the future. + fn filter_multi_dispute_data() { new_test_ext(MockGenesisConfig::default()).execute_with(|| { - let backed_candidates = vec![BackedCandidate::default()]; - let max_block_weight = - ::BlockWeights::get().max_block; - // if the consumed resources are precisely equal to the max block weight, we do not truncate. - System::set_block_consumed_resources(max_block_weight, 0); - assert_eq!(limit_backed_candidates::(backed_candidates).len(), 1); + // Create the inherent data for this block + let dispute_statements = BTreeMap::new(); + + let backed_and_concluding = BTreeMap::new(); + + let scenario = make_inherent_data(TestConfig { + dispute_statements, + dispute_sessions: vec![ + 1, 2, 3, /* Session 3 too new, will get filtered out */ + ], + backed_and_concluding, + num_validators_per_core: 5, + includes_code_upgrade: None, + }); + + let expected_para_inherent_data = scenario.data.clone(); + + // Check the para inherent data is as expected: + // * 1 bitfield per validator (5 validators per core, 3 disputes => 3 cores, 15 validators) + assert_eq!(expected_para_inherent_data.bitfields.len(), 15); + // * 0 backed candidate per core + assert_eq!(expected_para_inherent_data.backed_candidates.len(), 0); + // * 3 disputes. + assert_eq!(expected_para_inherent_data.disputes.len(), 3); + let mut inherent_data = InherentData::new(); + inherent_data + .put_data(PARACHAINS_INHERENT_IDENTIFIER, &expected_para_inherent_data) + .unwrap(); + + // The current schedule is empty prior to calling `create_inherent_enter`. + assert_eq!(>::scheduled(), vec![]); + + let multi_dispute_inherent_data = + Pallet::::create_inherent_inner(&inherent_data.clone()).unwrap(); + // Dispute for session that lies too far in the future should be filtered out + assert!(multi_dispute_inherent_data != expected_para_inherent_data); + + assert_eq!(multi_dispute_inherent_data.disputes.len(), 2); + + // Assert that the first 2 disputes are included + assert_eq!( + &multi_dispute_inherent_data.disputes[..2], + &expected_para_inherent_data.disputes[..2], + ); + + // The schedule is still empty prior to calling `enter`. (`create_inherent_inner` should not + // alter storage, but just double checking for sanity). + assert_eq!(>::scheduled(), vec![]); + + assert_eq!(Pallet::::on_chain_votes(), None); + // Call enter with our 2 disputes + assert_ok!(Pallet::::enter( + frame_system::RawOrigin::None.into(), + multi_dispute_inherent_data, + )); + + assert_eq!( + // The length of this vec is equal to the number of candidates, so we know there + // where no backed candidates included + Pallet::::on_chain_votes() + .unwrap() + .backing_validators_per_candidate + .len(), + 0 + ); }); } #[test] - fn truncates_on_over_full_block() { + // Ensure that when dispute data establishes an over weight block that we adequately + // filter out disputes according to our prioritization rule + fn limit_dispute_data() { new_test_ext(MockGenesisConfig::default()).execute_with(|| { - let backed_candidates = vec![BackedCandidate::default()]; - let max_block_weight = - ::BlockWeights::get().max_block; - // if the consumed resources are precisely equal to the max block weight, we do not truncate. - System::set_block_consumed_resources(max_block_weight + 1, 0); - assert_eq!(limit_backed_candidates::(backed_candidates).len(), 0); + // Create the inherent data for this block + let dispute_statements = BTreeMap::new(); + // No backed and concluding cores, so all cores will be fileld with disputesw + let backed_and_concluding = BTreeMap::new(); + + let scenario = make_inherent_data(TestConfig { + dispute_statements, + dispute_sessions: vec![2, 2, 1], // 3 cores, all disputes + backed_and_concluding, + num_validators_per_core: 6, + includes_code_upgrade: None, + }); + + let expected_para_inherent_data = scenario.data.clone(); + + // Check the para inherent data is as expected: + // * 1 bitfield per validator (6 validators per core, 3 disputes => 18 validators) + assert_eq!(expected_para_inherent_data.bitfields.len(), 18); + // * 0 backed candidate per core + assert_eq!(expected_para_inherent_data.backed_candidates.len(), 0); + // * 3 disputes. + assert_eq!(expected_para_inherent_data.disputes.len(), 3); + let mut inherent_data = InherentData::new(); + inherent_data + .put_data(PARACHAINS_INHERENT_IDENTIFIER, &expected_para_inherent_data) + .unwrap(); + + // The current schedule is empty prior to calling `create_inherent_enter`. + assert_eq!(>::scheduled(), vec![]); + + let limit_inherent_data = + Pallet::::create_inherent_inner(&inherent_data.clone()).unwrap(); + // Expect that inherent data is filtered to include only 2 disputes + assert!(limit_inherent_data != expected_para_inherent_data); + + // Ensure that the included disputes are sorted by session + assert_eq!(limit_inherent_data.disputes.len(), 2); + assert_eq!(limit_inherent_data.disputes[0].session, 1); + assert_eq!(limit_inherent_data.disputes[1].session, 2); + + // The schedule is still empty prior to calling `enter`. (`create_inherent_inner` should not + // alter storage, but just double checking for sanity). + assert_eq!(>::scheduled(), vec![]); + + assert_eq!(Pallet::::on_chain_votes(), None); + // Call enter with our 2 disputes + assert_ok!(Pallet::::enter( + frame_system::RawOrigin::None.into(), + limit_inherent_data, + )); + + assert_eq!( + // Ensure that our inherent data did not included backed candidates as expected + Pallet::::on_chain_votes() + .unwrap() + .backing_validators_per_candidate + .len(), + 0 + ); }); } #[test] - fn all_backed_candidates_get_truncated() { + // Ensure that when dispute data establishes an over weight block that we abort + // due to an over weight block + fn limit_dispute_data_overweight() { new_test_ext(MockGenesisConfig::default()).execute_with(|| { - let backed_candidates = vec![BackedCandidate::default(); 10]; - let max_block_weight = - ::BlockWeights::get().max_block; - // if the consumed resources are precisely equal to the max block weight, we do not truncate. - System::set_block_consumed_resources(max_block_weight + 1, 0); - assert_eq!(limit_backed_candidates::(backed_candidates).len(), 0); + // Create the inherent data for this block + let dispute_statements = BTreeMap::new(); + // No backed and concluding cores, so all cores will be fileld with disputesw + let backed_and_concluding = BTreeMap::new(); + + let scenario = make_inherent_data(TestConfig { + dispute_statements, + dispute_sessions: vec![2, 2, 1], // 3 cores, all disputes + backed_and_concluding, + num_validators_per_core: 6, + includes_code_upgrade: None, + }); + + let expected_para_inherent_data = scenario.data.clone(); + + // Check the para inherent data is as expected: + // * 1 bitfield per validator (6 validators per core, 3 disputes => 18 validators) + assert_eq!(expected_para_inherent_data.bitfields.len(), 18); + // * 0 backed candidate per core + assert_eq!(expected_para_inherent_data.backed_candidates.len(), 0); + // * 3 disputes. + assert_eq!(expected_para_inherent_data.disputes.len(), 3); + let mut inherent_data = InherentData::new(); + inherent_data + .put_data(PARACHAINS_INHERENT_IDENTIFIER, &expected_para_inherent_data) + .unwrap(); + + // The current schedule is empty prior to calling `create_inherent_enter`. + assert_eq!(>::scheduled(), vec![]); + + assert_ok!(Pallet::::enter( + frame_system::RawOrigin::None.into(), + expected_para_inherent_data, + )); }); } #[test] - fn ignores_subsequent_code_upgrades() { + // Ensure that when a block is over weight due to disputes, but there is still sufficient + // block weight to include a number of signed bitfields, the inherent data is filtered + // as expected + fn limit_dispute_data_ignore_backed_candidates() { new_test_ext(MockGenesisConfig::default()).execute_with(|| { - let mut backed = BackedCandidate::default(); - backed.candidate.commitments.new_validation_code = Some(Vec::new().into()); - let backed_candidates = (0..3).map(|_| backed.clone()).collect(); - assert_eq!(limit_backed_candidates::(backed_candidates).len(), 1); + // Create the inherent data for this block + let dispute_statements = BTreeMap::new(); + + let mut backed_and_concluding = BTreeMap::new(); + // 2 backed candidates shall be scheduled + backed_and_concluding.insert(0, 2); + backed_and_concluding.insert(1, 2); + + let scenario = make_inherent_data(TestConfig { + dispute_statements, + // 2 backed candidates + 3 disputes (at sessions 2, 1 and 1) + dispute_sessions: vec![0, 0, 2, 2, 1], + backed_and_concluding, + num_validators_per_core: 4, + includes_code_upgrade: None, + }); + + let expected_para_inherent_data = scenario.data.clone(); + + // Check the para inherent data is as expected: + // * 1 bitfield per validator (4 validators per core, 2 backed candidates, 3 disputes => 4*5 = 20) + assert_eq!(expected_para_inherent_data.bitfields.len(), 20); + // * 2 backed candidates + assert_eq!(expected_para_inherent_data.backed_candidates.len(), 2); + // * 3 disputes. + assert_eq!(expected_para_inherent_data.disputes.len(), 3); + let mut inherent_data = InherentData::new(); + inherent_data + .put_data(PARACHAINS_INHERENT_IDENTIFIER, &expected_para_inherent_data) + .unwrap(); + + // The current schedule is empty prior to calling `create_inherent_enter`. + assert_eq!(>::scheduled(), vec![]); + + // Nothing is filtered out (including the backed candidates.) + let limit_inherent_data = + Pallet::::create_inherent_inner(&inherent_data.clone()).unwrap(); + assert!(limit_inherent_data != expected_para_inherent_data); + + // Three disputes is over weight (see previous test), so we expect to only see 2 disputes + assert_eq!(limit_inherent_data.disputes.len(), 2); + // Ensure disputes are filtered as expected + assert_eq!(limit_inherent_data.disputes[0].session, 1); + assert_eq!(limit_inherent_data.disputes[1].session, 2); + // Ensure all bitfields are included as these are still not over weight + assert_eq!( + limit_inherent_data.bitfields.len(), + expected_para_inherent_data.bitfields.len() + ); + // Ensure that all backed candidates are filtered out as either would make the block over weight + assert_eq!(limit_inherent_data.backed_candidates.len(), 0); + + // The schedule is still empty prior to calling `enter`. (`create_inherent_inner` should not + // alter storage, but just double checking for sanity). + assert_eq!(>::scheduled(), vec![]); + + assert_eq!(Pallet::::on_chain_votes(), None); + // Call enter with our 2 disputes + assert_ok!(Pallet::::enter( + frame_system::RawOrigin::None.into(), + limit_inherent_data, + )); + + assert_eq!( + // The length of this vec is equal to the number of candidates, so we know + // all of our candidates got filtered out + Pallet::::on_chain_votes() + .unwrap() + .backing_validators_per_candidate + .len(), + 0, + ); }); } - } - mod paras_inherent_weight { - use super::*; - - use crate::mock::{new_test_ext, MockGenesisConfig, System, Test}; - use primitives::v1::Header; + #[test] + // Ensure that we abort if we encounter an over weight block for disputes + bitfields + fn limit_dispute_data_ignore_backed_candidates_overweight() { + new_test_ext(MockGenesisConfig::default()).execute_with(|| { + // Create the inherent data for this block + let dispute_statements = BTreeMap::new(); + + let mut backed_and_concluding = BTreeMap::new(); + // 2 backed candidates shall be scheduled + backed_and_concluding.insert(0, 2); + backed_and_concluding.insert(1, 2); + + let scenario = make_inherent_data(TestConfig { + dispute_statements, + // 2 backed candidates + 3 disputes (at sessions 2, 1 and 1) + dispute_sessions: vec![0, 0, 2, 2, 1], + backed_and_concluding, + num_validators_per_core: 4, + includes_code_upgrade: None, + }); + + let expected_para_inherent_data = scenario.data.clone(); + + // Check the para inherent data is as expected: + // * 1 bitfield per validator (4 validators per core, 2 backed candidates, 3 disputes => 4*5 = 20) + assert_eq!(expected_para_inherent_data.bitfields.len(), 20); + // * 2 backed candidates + assert_eq!(expected_para_inherent_data.backed_candidates.len(), 2); + // * 3 disputes. + assert_eq!(expected_para_inherent_data.disputes.len(), 3); + let mut inherent_data = InherentData::new(); + inherent_data + .put_data(PARACHAINS_INHERENT_IDENTIFIER, &expected_para_inherent_data) + .unwrap(); + + // The current schedule is empty prior to calling `create_inherent_enter`. + assert_eq!(>::scheduled(), vec![]); + + // Ensure that calling enter with 3 disputes and 2 candidates is over weight + assert_ok!(Pallet::::enter( + frame_system::RawOrigin::None.into(), + expected_para_inherent_data, + )); + + assert_eq!( + // The length of this vec is equal to the number of candidates, so we know + // all of our candidates got filtered out + Pallet::::on_chain_votes() + .unwrap() + .backing_validators_per_candidate + .len(), + 0, + ); + }); + } - use frame_support::traits::UnfilteredDispatchable; + #[test] + // Ensure that when a block is over weight due to disputes and bitfields, the bitfields are + // filtered to accommodate the block size and no backed candidates are included. + fn limit_bitfields() { + new_test_ext(MockGenesisConfig::default()).execute_with(|| { + // Create the inherent data for this block + let mut dispute_statements = BTreeMap::new(); + // Cap the number of statements per dispute to 20 in order to ensure we have enough + // space in the block for some (but not all) bitfields + dispute_statements.insert(2, 20); + dispute_statements.insert(3, 20); + dispute_statements.insert(4, 20); + + let mut backed_and_concluding = BTreeMap::new(); + // Schedule 2 backed candidates + backed_and_concluding.insert(0, 2); + backed_and_concluding.insert(1, 2); + + let scenario = make_inherent_data(TestConfig { + dispute_statements, + // 2 backed candidates + 3 disputes (at sessions 2, 1 and 1) + dispute_sessions: vec![0, 0, 2, 2, 1], + backed_and_concluding, + num_validators_per_core: 5, + includes_code_upgrade: None, + }); + + let expected_para_inherent_data = scenario.data.clone(); + + // Check the para inherent data is as expected: + // * 1 bitfield per validator (5 validators per core, 2 backed candidates, 3 disputes => 4*5 = 20), + assert_eq!(expected_para_inherent_data.bitfields.len(), 25); + // * 2 backed candidates, + assert_eq!(expected_para_inherent_data.backed_candidates.len(), 2); + // * 3 disputes. + assert_eq!(expected_para_inherent_data.disputes.len(), 3); + let mut inherent_data = InherentData::new(); + inherent_data + .put_data(PARACHAINS_INHERENT_IDENTIFIER, &expected_para_inherent_data) + .unwrap(); + + // The current schedule is empty prior to calling `create_inherent_enter`. + assert_eq!(>::scheduled(), vec![]); + + // Nothing is filtered out (including the backed candidates.) + let limit_inherent_data = + Pallet::::create_inherent_inner(&inherent_data.clone()).unwrap(); + assert!(limit_inherent_data != expected_para_inherent_data); + + // Three disputes is over weight (see previous test), so we expect to only see 2 disputes + assert_eq!(limit_inherent_data.disputes.len(), 2); + // Ensure disputes are filtered as expected + assert_eq!(limit_inherent_data.disputes[0].session, 1); + assert_eq!(limit_inherent_data.disputes[1].session, 2); + // Ensure all bitfields are included as these are still not over weight + assert_eq!(limit_inherent_data.bitfields.len(), 20,); + // Ensure that all backed candidates are filtered out as either would make the block over weight + assert_eq!(limit_inherent_data.backed_candidates.len(), 0); + + // The schedule is still empty prior to calling `enter`. (`create_inherent_inner` should not + // alter storage, but just double checking for sanity). + assert_eq!(>::scheduled(), vec![]); + + assert_eq!(Pallet::::on_chain_votes(), None); + // Call enter with our 2 disputes + assert_ok!(Pallet::::enter( + frame_system::RawOrigin::None.into(), + limit_inherent_data, + )); + + assert_eq!( + // The length of this vec is equal to the number of candidates, so we know + // all of our candidates got filtered out + Pallet::::on_chain_votes() + .unwrap() + .backing_validators_per_candidate + .len(), + 0, + ); + }); + } - fn default_header() -> Header { - Header { - parent_hash: Default::default(), - number: 0, - state_root: Default::default(), - extrinsics_root: Default::default(), - digest: Default::default(), - } + #[test] + // Ensure that when a block is over weight due to disputes and bitfields, we abort + fn limit_bitfields_overweight() { + new_test_ext(MockGenesisConfig::default()).execute_with(|| { + // Create the inherent data for this block + let mut dispute_statements = BTreeMap::new(); + // Control the number of statements per dispute to ensure we have enough space + // in the block for some (but not all) bitfields + dispute_statements.insert(2, 20); + dispute_statements.insert(3, 20); + dispute_statements.insert(4, 20); + + let mut backed_and_concluding = BTreeMap::new(); + // 2 backed candidates shall be scheduled + backed_and_concluding.insert(0, 2); + backed_and_concluding.insert(1, 2); + + let scenario = make_inherent_data(TestConfig { + dispute_statements, + // 2 backed candidates + 3 disputes (at sessions 2, 1 and 1) + dispute_sessions: vec![0, 0, 2, 2, 1], + backed_and_concluding, + num_validators_per_core: 5, + includes_code_upgrade: None, + }); + + let expected_para_inherent_data = scenario.data.clone(); + + // Check the para inherent data is as expected: + // * 1 bitfield per validator (5 validators per core, 2 backed candidates, 3 disputes => 5*5 = 25) + assert_eq!(expected_para_inherent_data.bitfields.len(), 25); + // * 2 backed candidates + assert_eq!(expected_para_inherent_data.backed_candidates.len(), 2); + // * 3 disputes. + assert_eq!(expected_para_inherent_data.disputes.len(), 3); + let mut inherent_data = InherentData::new(); + inherent_data + .put_data(PARACHAINS_INHERENT_IDENTIFIER, &expected_para_inherent_data) + .unwrap(); + + // The current schedule is empty prior to calling `create_inherent_enter`. + assert_eq!(>::scheduled(), vec![]); + + assert_ok!(Pallet::::enter( + frame_system::RawOrigin::None.into(), + expected_para_inherent_data, + )); + + assert_eq!( + // The length of this vec is equal to the number of candidates, so we know + // all of our candidates got filtered out + Pallet::::on_chain_votes() + .unwrap() + .backing_validators_per_candidate + .len(), + 0, + ); + }); } - /// We expect the weight of the paras inherent not to change when no truncation occurs: - /// its weight is dynamically computed from the size of the backed candidates list, and is - /// already incorporated into the current block weight when it is selected by the provisioner. #[test] - fn weight_does_not_change_on_happy_path() { + // Ensure that when a block is over weight due to disputes and bitfields, we abort + fn limit_candidates_over_weight() { new_test_ext(MockGenesisConfig::default()).execute_with(|| { - let header = default_header(); - System::set_block_number(1); - System::set_parent_hash(header.hash()); - - // number of bitfields doesn't affect the paras inherent weight, so we can mock it with an empty one - let signed_bitfields = Vec::new(); - // backed candidates must not be empty, so we can demonstrate that the weight has not changed - let backed_candidates = vec![BackedCandidate::default(); 10]; - - // the expected weight can always be computed by this formula - let expected_weight = MINIMAL_INCLUSION_INHERENT_WEIGHT + - (backed_candidates.len() as Weight * BACKED_CANDIDATE_WEIGHT); - - // we've used half the block weight; there's plenty of margin - let max_block_weight = - ::BlockWeights::get().max_block; - let used_block_weight = max_block_weight / 2; - System::set_block_consumed_resources(used_block_weight, 0); - - // execute the paras inherent - let post_info = Call::::enter { - data: ParachainsInherentData { - bitfields: signed_bitfields, - backed_candidates, - disputes: Vec::new(), - parent_header: default_header(), - }, - } - .dispatch_bypass_filter(None.into()) - .unwrap_err() - .post_info; - - // we don't directly check the block's weight post-call. Instead, we check that the - // call has returned the appropriate post-dispatch weight for refund, and trust - // Substrate to do the right thing with that information. - // - // In this case, the weight system can update the actual weight with the same amount, - // or return `None` to indicate that the pre-computed weight should not change. - // Either option is acceptable for our purposes. - if let Some(actual_weight) = post_info.actual_weight { - assert_eq!(actual_weight, expected_weight); - } + // Create the inherent data for this block + let mut dispute_statements = BTreeMap::new(); + // Control the number of statements per dispute to ensure we have enough space + // in the block for some (but not all) bitfields + dispute_statements.insert(2, 17); + dispute_statements.insert(3, 17); + dispute_statements.insert(4, 17); + + let mut backed_and_concluding = BTreeMap::new(); + // 2 backed candidates shall be scheduled + backed_and_concluding.insert(0, 16); + backed_and_concluding.insert(1, 25); + + let scenario = make_inherent_data(TestConfig { + dispute_statements, + dispute_sessions: vec![0, 0, 2, 2, 1], // 2 backed candidates, 3 disputes at sessions 2, 1 and 1 respectively + backed_and_concluding, + num_validators_per_core: 5, + includes_code_upgrade: None, + }); + + let expected_para_inherent_data = scenario.data.clone(); + + // Check the para inherent data is as expected: + // * 1 bitfield per validator (5 validators per core, 2 backed candidates, 3 disputes => 5*5 = 25) + assert_eq!(expected_para_inherent_data.bitfields.len(), 25); + // * 2 backed candidates + assert_eq!(expected_para_inherent_data.backed_candidates.len(), 2); + // * 3 disputes. + assert_eq!(expected_para_inherent_data.disputes.len(), 3); + let mut inherent_data = InherentData::new(); + inherent_data + .put_data(PARACHAINS_INHERENT_IDENTIFIER, &expected_para_inherent_data) + .unwrap(); + + let limit_inherent_data = + Pallet::::create_inherent_inner(&inherent_data.clone()).unwrap(); + // Expect that inherent data is filtered to include only 1 backed candidate and 2 disputes + assert!(limit_inherent_data != expected_para_inherent_data); + + // * 1 bitfields + assert_eq!(limit_inherent_data.bitfields.len(), 25); + // * 2 backed candidates + assert_eq!(limit_inherent_data.backed_candidates.len(), 1); + // * 3 disputes. + assert_eq!(limit_inherent_data.disputes.len(), 2); + + // The current schedule is empty prior to calling `create_inherent_enter`. + assert_eq!(>::scheduled(), vec![]); + + assert_ok!(Pallet::::enter( + frame_system::RawOrigin::None.into(), + limit_inherent_data, + )); + + assert_eq!( + // The length of this vec is equal to the number of candidates, so we know our 2 + // backed candidates did not get filtered out + Pallet::::on_chain_votes() + .unwrap() + .backing_validators_per_candidate + .len(), + 1 + ); }); } - /// We expect the weight of the paras inherent to change when truncation occurs: its - /// weight was initially dynamically computed from the size of the backed candidates list, - /// but was reduced by truncation. #[test] - fn weight_changes_when_backed_candidates_are_truncated() { + // Ensure that when a block is over weight due to disputes and bitfields, we abort + fn limit_candidates_over_weight_overweight() { new_test_ext(MockGenesisConfig::default()).execute_with(|| { - let header = default_header(); - System::set_block_number(1); - System::set_parent_hash(header.hash()); - - // number of bitfields doesn't affect the paras inherent weight, so we can mock it with an empty one - let signed_bitfields = Vec::new(); - // backed candidates must not be empty, so we can demonstrate that the weight has not changed - let backed_candidates = vec![BackedCandidate::default(); 10]; - - // the expected weight with no blocks is just the minimum weight - let expected_weight = MINIMAL_INCLUSION_INHERENT_WEIGHT; - - // oops, looks like this mandatory call pushed the block weight over the limit - let max_block_weight = - ::BlockWeights::get().max_block; - let used_block_weight = max_block_weight + 1; - System::set_block_consumed_resources(used_block_weight, 0); - - // execute the paras inherent - let post_info = Call::::enter { - data: ParachainsInherentData { - bitfields: signed_bitfields, - backed_candidates, - disputes: Vec::new(), - parent_header: header, - }, - } - .dispatch_bypass_filter(None.into()) + // Create the inherent data for this block + let mut dispute_statements = BTreeMap::new(); + // Control the number of statements per dispute to ensure we have enough space + // in the block for some (but not all) bitfields + dispute_statements.insert(2, 17); + dispute_statements.insert(3, 17); + dispute_statements.insert(4, 17); + + let mut backed_and_concluding = BTreeMap::new(); + // 2 backed candidates shall be scheduled + backed_and_concluding.insert(0, 16); + backed_and_concluding.insert(1, 25); + + let scenario = make_inherent_data(TestConfig { + dispute_statements, + dispute_sessions: vec![0, 0, 2, 2, 1], // 2 backed candidates, 3 disputes at sessions 2, 1 and 1 respectively + backed_and_concluding, + num_validators_per_core: 5, + includes_code_upgrade: None, + }); + + let expected_para_inherent_data = scenario.data.clone(); + + // Check the para inherent data is as expected: + // * 1 bitfield per validator (5 validators per core, 2 backed candidates, 3 disputes => 5*5 = 25) + assert_eq!(expected_para_inherent_data.bitfields.len(), 25); + // * 2 backed candidates + assert_eq!(expected_para_inherent_data.backed_candidates.len(), 2); + // * 3 disputes. + assert_eq!(expected_para_inherent_data.disputes.len(), 3); + + assert_ok!(Pallet::::enter( + frame_system::RawOrigin::None.into(), + expected_para_inherent_data, + )); + + assert_eq!( + // The length of this vec is equal to the number of candidates, so we know our 2 + // backed candidates did not get filtered out + Pallet::::on_chain_votes() + .unwrap() + .backing_validators_per_candidate + .len(), + 0 + ); + }); + } + } + + fn default_header() -> primitives::v1::Header { + primitives::v1::Header { + parent_hash: Default::default(), + number: 0, + state_root: Default::default(), + extrinsics_root: Default::default(), + digest: Default::default(), + } + } + + mod sanitizers { + use super::*; + + use crate::inclusion::tests::{ + back_candidate, collator_sign_candidate, BackingKind, TestCandidateBuilder, + }; + use bitvec::order::Lsb0; + use primitives::v1::{ + AvailabilityBitfield, GroupIndex, Hash, Id as ParaId, SignedAvailabilityBitfield, + ValidatorIndex, + }; + + use crate::mock::Test; + use futures::executor::block_on; + use keyring::Sr25519Keyring; + use primitives::v0::PARACHAIN_KEY_TYPE_ID; + use sc_keystore::LocalKeystore; + use sp_keystore::{SyncCryptoStore, SyncCryptoStorePtr}; + use std::sync::Arc; + + fn validator_pubkeys(val_ids: &[keyring::Sr25519Keyring]) -> Vec { + val_ids.iter().map(|v| v.public().into()).collect() + } + + #[test] + fn bitfields() { + let header = default_header(); + let parent_hash = header.hash(); + // 2 cores means two bits + let expected_bits = 2; + let session_index = SessionIndex::from(0_u32); + + let crypto_store = LocalKeystore::in_memory(); + let crypto_store = Arc::new(crypto_store) as SyncCryptoStorePtr; + let signing_context = SigningContext { parent_hash, session_index }; + + let validators = vec![ + keyring::Sr25519Keyring::Alice, + keyring::Sr25519Keyring::Bob, + keyring::Sr25519Keyring::Charlie, + keyring::Sr25519Keyring::Dave, + ]; + for validator in validators.iter() { + SyncCryptoStore::sr25519_generate_new( + &*crypto_store, + PARACHAIN_KEY_TYPE_ID, + Some(&validator.to_seed()), + ) .unwrap(); + } + let validator_public = validator_pubkeys(&validators); + + let unchecked_bitfields = [ + BitVec::::repeat(true, expected_bits), + BitVec::::repeat(true, expected_bits), + { + let mut bv = BitVec::::repeat(false, expected_bits); + bv.set(expected_bits - 1, true); + bv + }, + ] + .iter() + .enumerate() + .map(|(vi, ab)| { + let validator_index = ValidatorIndex::from(vi as u32); + block_on(SignedAvailabilityBitfield::sign( + &crypto_store, + AvailabilityBitfield::from(ab.clone()), + &signing_context, + validator_index, + &validator_public[vi], + )) + .unwrap() + .unwrap() + .into_unchecked() + }) + .collect::>(); + + let disputed_bitfield = DisputedBitfield::zeros(expected_bits); - // we don't directly check the block's weight post-call. Instead, we check that the - // call has returned the appropriate post-dispatch weight for refund, and trust - // Substrate to do the right thing with that information. - assert_eq!(post_info.actual_weight.unwrap(), expected_weight); - }); + { + assert_eq!( + sanitize_bitfields::( + unchecked_bitfields.clone(), + disputed_bitfield.clone(), + expected_bits, + parent_hash, + session_index, + &validator_public[..] + ), + unchecked_bitfields.clone() + ); + assert_eq!( + sanitize_bitfields::( + unchecked_bitfields.clone(), + disputed_bitfield.clone(), + expected_bits, + parent_hash, + session_index, + &validator_public[..] + ), + unchecked_bitfields.clone() + ); + } + + // disputed bitfield is non-zero + { + let mut disputed_bitfield = DisputedBitfield::zeros(expected_bits); + // pretend the first core was freed by either a malicious validator + // or by resolved dispute + disputed_bitfield.0.set(0, true); + + assert_eq!( + sanitize_bitfields::( + unchecked_bitfields.clone(), + disputed_bitfield.clone(), + expected_bits, + parent_hash, + session_index, + &validator_public[..] + ) + .len(), + 1 + ); + assert_eq!( + sanitize_bitfields::( + unchecked_bitfields.clone(), + disputed_bitfield.clone(), + expected_bits, + parent_hash, + session_index, + &validator_public[..] + ) + .len(), + 1 + ); + } + + // bitfield size mismatch + { + assert!(sanitize_bitfields::( + unchecked_bitfields.clone(), + disputed_bitfield.clone(), + expected_bits + 1, + parent_hash, + session_index, + &validator_public[..] + ) + .is_empty()); + assert!(sanitize_bitfields::( + unchecked_bitfields.clone(), + disputed_bitfield.clone(), + expected_bits + 1, + parent_hash, + session_index, + &validator_public[..] + ) + .is_empty()); + } + + // remove the last validator + { + let shortened = validator_public.len() - 2; + assert_eq!( + &sanitize_bitfields::( + unchecked_bitfields.clone(), + disputed_bitfield.clone(), + expected_bits, + parent_hash, + session_index, + &validator_public[..shortened] + )[..], + &unchecked_bitfields[..shortened] + ); + assert_eq!( + &sanitize_bitfields::( + unchecked_bitfields.clone(), + disputed_bitfield.clone(), + expected_bits, + parent_hash, + session_index, + &validator_public[..shortened] + )[..], + &unchecked_bitfields[..shortened] + ); + } + + // switch ordering of bitfields + { + let mut unchecked_bitfields = unchecked_bitfields.clone(); + let x = unchecked_bitfields.swap_remove(0); + unchecked_bitfields.push(x); + assert_eq!( + &sanitize_bitfields::( + unchecked_bitfields.clone(), + disputed_bitfield.clone(), + expected_bits, + parent_hash, + session_index, + &validator_public[..] + )[..], + &unchecked_bitfields[..(unchecked_bitfields.len() - 2)] + ); + assert_eq!( + &sanitize_bitfields::( + unchecked_bitfields.clone(), + disputed_bitfield.clone(), + expected_bits, + parent_hash, + session_index, + &validator_public[..] + )[..], + &unchecked_bitfields[..(unchecked_bitfields.len() - 2)] + ); + } + + // check the validators signature + { + use primitives::v1::ValidatorSignature; + let mut unchecked_bitfields = unchecked_bitfields.clone(); + + // insert a bad signature for the last bitfield + let last_bit_idx = unchecked_bitfields.len() - 1; + unchecked_bitfields + .get_mut(last_bit_idx) + .and_then(|u| Some(u.set_signature(ValidatorSignature::default()))) + .expect("we are accessing a valid index"); + assert_eq!( + &sanitize_bitfields::( + unchecked_bitfields.clone(), + disputed_bitfield.clone(), + expected_bits, + parent_hash, + session_index, + &validator_public[..] + )[..], + &unchecked_bitfields[..last_bit_idx] + ); + assert_eq!( + &sanitize_bitfields::( + unchecked_bitfields.clone(), + disputed_bitfield.clone(), + expected_bits, + parent_hash, + session_index, + &validator_public[..] + )[..], + &unchecked_bitfields[..] + ); + } + } + + #[test] + fn candidates() { + const RELAY_PARENT_NUM: u32 = 3; + + let header = default_header(); + let relay_parent = header.hash(); + let session_index = SessionIndex::from(0_u32); + + let keystore = LocalKeystore::in_memory(); + let keystore = Arc::new(keystore) as SyncCryptoStorePtr; + let signing_context = SigningContext { parent_hash: relay_parent, session_index }; + + let validators = vec![ + keyring::Sr25519Keyring::Alice, + keyring::Sr25519Keyring::Bob, + keyring::Sr25519Keyring::Charlie, + keyring::Sr25519Keyring::Dave, + ]; + for validator in validators.iter() { + SyncCryptoStore::sr25519_generate_new( + &*keystore, + PARACHAIN_KEY_TYPE_ID, + Some(&validator.to_seed()), + ) + .unwrap(); + } + + let has_concluded_invalid = |_candidate: CandidateHash| -> bool { false }; + + let scheduled = (0_usize..2) + .into_iter() + .map(|idx| { + let ca = CoreAssignment { + kind: scheduler::AssignmentKind::Parachain, + group_idx: GroupIndex::from(idx as u32), + para_id: ParaId::from(1_u32 + idx as u32), + core: CoreIndex::from(idx as u32), + }; + ca + }) + .collect::>(); + let scheduled = &scheduled[..]; + + let group_validators = |group_index: GroupIndex| { + match group_index { + group_index if group_index == GroupIndex::from(0) => Some(vec![0, 1]), + group_index if group_index == GroupIndex::from(1) => Some(vec![2, 3]), + _ => panic!("Group index out of bounds for 2 parachains and 1 parathread core"), + } + .map(|m| m.into_iter().map(ValidatorIndex).collect::>()) + }; + + let backed_candidates = (0_usize..2) + .into_iter() + .map(|idx0| { + let idx1 = idx0 + 1; + let mut candidate = TestCandidateBuilder { + para_id: ParaId::from(idx1), + relay_parent, + pov_hash: Hash::repeat_byte(idx1 as u8), + persisted_validation_data_hash: [42u8; 32].into(), + hrmp_watermark: RELAY_PARENT_NUM, + ..Default::default() + } + .build(); + + collator_sign_candidate(Sr25519Keyring::One, &mut candidate); + + let backed = block_on(back_candidate( + candidate, + &validators, + group_validators(GroupIndex::from(idx0 as u32)).unwrap().as_ref(), + &keystore, + &signing_context, + BackingKind::Threshold, + )); + backed + }) + .collect::>(); + + // happy path + assert_eq!( + sanitize_backed_candidates::( + relay_parent, + backed_candidates.clone(), + has_concluded_invalid, + scheduled + ), + backed_candidates + ); + + // nothing is scheduled, so no paraids match, thus all backed candidates are skipped + { + let scheduled = &[][..]; + assert!(sanitize_backed_candidates::( + relay_parent, + backed_candidates.clone(), + has_concluded_invalid, + scheduled + ) + .is_empty()); + } + + // relay parent mismatch + { + let relay_parent = Hash::repeat_byte(0xFA); + assert!(sanitize_backed_candidates::( + relay_parent, + backed_candidates.clone(), + has_concluded_invalid, + scheduled + ) + .is_empty()); + } + + // candidates that have concluded as invalid are filtered out + { + // mark every second one as concluded invalid + let set = { + let mut set = std::collections::HashSet::new(); + for (idx, backed_candidate) in backed_candidates.iter().enumerate() { + if idx & 0x01 == 0 { + set.insert(backed_candidate.hash().clone()); + } + } + set + }; + let has_concluded_invalid = |candidate: CandidateHash| set.contains(&candidate); + assert_eq!( + sanitize_backed_candidates::( + relay_parent, + backed_candidates.clone(), + has_concluded_invalid, + scheduled + ) + .len(), + backed_candidates.len() / 2 + ); + } } } } diff --git a/runtime/parachains/src/paras_inherent/benchmarking.rs b/runtime/parachains/src/paras_inherent/benchmarking.rs new file mode 100644 index 000000000000..d5aea6dde331 --- /dev/null +++ b/runtime/parachains/src/paras_inherent/benchmarking.rs @@ -0,0 +1,206 @@ +// Copyright 2021 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +use super::*; +use crate::{inclusion, ParaId}; +use frame_benchmarking::{benchmarks, impl_benchmark_test_suite}; +use frame_system::RawOrigin; +use sp_std::collections::btree_map::BTreeMap; + +use crate::builder::BenchBuilder; + +benchmarks! { + // Variant over `v`, the number of dispute statements in a dispute statement set. This gives the + // weight of a single dispute statement set. + enter_variable_disputes { + let v in 10..BenchBuilder::::fallback_max_validators(); + + let scenario = BenchBuilder::::new() + .build(Default::default(), &[2], None); + + let mut benchmark = scenario.data.clone(); + let dispute = benchmark.disputes.pop().unwrap(); + + benchmark.bitfields.clear(); + benchmark.backed_candidates.clear(); + benchmark.disputes.clear(); + + benchmark.disputes.push(dispute); + benchmark.disputes.get_mut(0).unwrap().statements.drain(v as usize..); + }: enter(RawOrigin::None, benchmark) + verify { + // Assert that the block was not discarded + assert!(Included::::get().is_some()); + // Assert that there are on-chain votes that got scraped + let onchain_votes = OnChainVotes::::get(); + assert!(onchain_votes.is_some()); + let vote = onchain_votes.unwrap(); + // Ensure that the votes are for the correct session + assert_eq!(vote.session, scenario._session); + } + + // The weight of one bitfield. + enter_bitfields { + let cores_with_backed: BTreeMap<_, _> + = vec![(0, BenchBuilder::::fallback_max_validators())] + .into_iter() + .collect(); + + let scenario = BenchBuilder::::new() + .build(cores_with_backed, &[1], None); + + let mut benchmark = scenario.data.clone(); + let bitfield = benchmark.bitfields.pop().unwrap(); + + benchmark.bitfields.clear(); + benchmark.backed_candidates.clear(); + benchmark.disputes.clear(); + + benchmark.bitfields.push(bitfield); + }: enter(RawOrigin::None, benchmark) + verify { + // Assert that the block was not discarded + assert!(Included::::get().is_some()); + // Assert that there are on-chain votes that got scraped + let onchain_votes = OnChainVotes::::get(); + assert!(onchain_votes.is_some()); + let vote = onchain_votes.unwrap(); + // Ensure that the votes are for the correct session + assert_eq!(vote.session, scenario._session); + } + + // Variant over `v`, the amount of validity votes for a backed candidate. This gives the weight + // of a single backed candidate. + enter_backed_candidates_variable { + // NOTE: the starting value must be over half of `max_validators` so the backed candidate is + // not rejected. + let v + in (BenchBuilder::::fallback_min_validity_votes()) + ..BenchBuilder::::fallback_max_validators(); + + let cores_with_backed: BTreeMap<_, _> + = vec![(0, v)] // The backed candidate will have `v` validity votes. + .into_iter() + .collect(); + + let scenario = BenchBuilder::::new() + .build(cores_with_backed.clone(), &[1], None); + + let mut benchmark = scenario.data.clone(); + + // There is 1 backed, + assert_eq!(benchmark.backed_candidates.len(), 1); + // with `v` validity votes. + assert_eq!(benchmark.backed_candidates.get(0).unwrap().validity_votes.len(), v as usize); + + benchmark.bitfields.clear(); + benchmark.disputes.clear(); + }: enter(RawOrigin::None, benchmark) + verify { + let max_validators_per_core = BenchBuilder::::fallback_max_validators_per_core(); + // Assert that the block was not discarded + assert!(Included::::get().is_some()); + // Assert that there are on-chain votes that got scraped + let onchain_votes = OnChainVotes::::get(); + assert!(onchain_votes.is_some()); + let vote = onchain_votes.unwrap(); + // Ensure that the votes are for the correct session + assert_eq!(vote.session, scenario._session); + // Ensure that there are an expected number of candidates + let header = BenchBuilder::::header(scenario._block_number.clone()); + // Traverse candidates and assert descriptors are as expected + for (para_id, backing_validators) in vote.backing_validators_per_candidate.iter().enumerate() { + let descriptor = backing_validators.0.descriptor(); + assert_eq!(ParaId::from(para_id), descriptor.para_id); + assert_eq!(header.hash(), descriptor.relay_parent); + assert_eq!(backing_validators.1.len(), v as usize); + } + + assert_eq!( + inclusion::PendingAvailabilityCommitments::::iter().count(), + cores_with_backed.len() + ); + assert_eq!( + inclusion::PendingAvailability::::iter().count(), + cores_with_backed.len() + ); + } + + enter_backed_candidate_code_upgrade { + // For now we always assume worst case code size. In the future we could vary over this. + let v = crate::configuration::Pallet::::config().max_code_size; + + let cores_with_backed: BTreeMap<_, _> + = vec![(0, BenchBuilder::::fallback_min_validity_votes())] + .into_iter() + .collect(); + + let scenario = BenchBuilder::::new() + .build(cores_with_backed.clone(), &[1], Some(v)); + + let mut benchmark = scenario.data.clone(); + + // There is 1 backed + assert_eq!(benchmark.backed_candidates.len(), 1); + assert_eq!( + benchmark.backed_candidates.get(0).unwrap().validity_votes.len() as u32, + BenchBuilder::::fallback_min_validity_votes() + ); + + benchmark.bitfields.clear(); + benchmark.disputes.clear(); + crate::paras::benchmarking::generate_disordered_upgrades::(); + }: enter(RawOrigin::None, benchmark) + verify { + let max_validators_per_core = BenchBuilder::::fallback_max_validators_per_core(); + // Assert that the block was not discarded + assert!(Included::::get().is_some()); + // Assert that there are on-chain votes that got scraped + let onchain_votes = OnChainVotes::::get(); + assert!(onchain_votes.is_some()); + let vote = onchain_votes.unwrap(); + // Ensure that the votes are for the correct session + assert_eq!(vote.session, scenario._session); + // Ensure that there are an expected number of candidates + let header = BenchBuilder::::header(scenario._block_number.clone()); + // Traverse candidates and assert descriptors are as expected + for (para_id, backing_validators) + in vote.backing_validators_per_candidate.iter().enumerate() { + let descriptor = backing_validators.0.descriptor(); + assert_eq!(ParaId::from(para_id), descriptor.para_id); + assert_eq!(header.hash(), descriptor.relay_parent); + assert_eq!( + backing_validators.1.len() as u32, + BenchBuilder::::fallback_min_validity_votes() + ); + } + + assert_eq!( + inclusion::PendingAvailabilityCommitments::::iter().count(), + cores_with_backed.len() + ); + assert_eq!( + inclusion::PendingAvailability::::iter().count(), + cores_with_backed.len() + ); + } +} + +impl_benchmark_test_suite!( + Pallet, + crate::mock::new_test_ext(Default::default()), + crate::mock::Test +); diff --git a/runtime/parachains/src/scheduler.rs b/runtime/parachains/src/scheduler.rs index 8e948e3b5529..ca658be2cc28 100644 --- a/runtime/parachains/src/scheduler.rs +++ b/runtime/parachains/src/scheduler.rs @@ -90,6 +90,7 @@ impl ParathreadClaimQueue { } /// Reasons a core might be freed +#[derive(Clone, Copy)] pub enum FreedReason { /// The core's work concluded and the parablock assigned to it is considered available. Concluded, diff --git a/runtime/polkadot/Cargo.toml b/runtime/polkadot/Cargo.toml index e263e4f4e668..a9812fe997ea 100644 --- a/runtime/polkadot/Cargo.toml +++ b/runtime/polkadot/Cargo.toml @@ -192,6 +192,7 @@ runtime-benchmarks = [ "frame-system-benchmarking", "hex-literal", "frame-election-provider-support/runtime-benchmarks", + "runtime-parachains/runtime-benchmarks", ] try-runtime = [ "frame-executive/try-runtime", diff --git a/runtime/polkadot/src/lib.rs b/runtime/polkadot/src/lib.rs index 2b1c53c9a771..b235c3463633 100644 --- a/runtime/polkadot/src/lib.rs +++ b/runtime/polkadot/src/lib.rs @@ -1182,7 +1182,9 @@ impl parachains_hrmp::Config for Runtime { type Currency = Balances; } -impl parachains_paras_inherent::Config for Runtime {} +impl parachains_paras_inherent::Config for Runtime { + type WeightInfo = weights::runtime_parachains_paras_inherent::WeightInfo; +} impl parachains_scheduler::Config for Runtime {} @@ -1755,6 +1757,7 @@ sp_api::impl_runtime_apis! { list_benchmark!(list, extra, runtime_parachains::configuration, Configuration); list_benchmark!(list, extra, runtime_parachains::initializer, Initializer); list_benchmark!(list, extra, runtime_parachains::paras, Paras); + list_benchmark!(list, extra, runtime_parachains::paras_inherent, ParaInherent); // Substrate list_benchmark!(list, extra, pallet_bags_list, BagsList); list_benchmark!(list, extra, pallet_balances, Balances); @@ -1831,6 +1834,7 @@ sp_api::impl_runtime_apis! { add_benchmark!(params, batches, runtime_parachains::configuration, Configuration); add_benchmark!(params, batches, runtime_parachains::initializer, Initializer); add_benchmark!(params, batches, runtime_parachains::paras, Paras); + add_benchmark!(params, batches, runtime_parachains::paras_inherent, ParaInherent); // Substrate add_benchmark!(params, batches, pallet_bags_list, BagsList); add_benchmark!(params, batches, pallet_balances, Balances); @@ -1880,7 +1884,6 @@ mod test_fees { MaxNominatorRewardedPerValidator::get(), ) as f64; let block_weight = BlockWeights::get().max_block as f64; - println!( "a full payout takes {:.2} of the block weight [{} / {}]", payout_weight / block_weight, diff --git a/runtime/polkadot/src/weights/mod.rs b/runtime/polkadot/src/weights/mod.rs index c913094df553..bf4d83844fd2 100644 --- a/runtime/polkadot/src/weights/mod.rs +++ b/runtime/polkadot/src/weights/mod.rs @@ -46,3 +46,4 @@ pub mod runtime_common_slots; pub mod runtime_parachains_configuration; pub mod runtime_parachains_initializer; pub mod runtime_parachains_paras; +pub mod runtime_parachains_paras_inherent; diff --git a/runtime/polkadot/src/weights/runtime_parachains_paras_inherent.rs b/runtime/polkadot/src/weights/runtime_parachains_paras_inherent.rs new file mode 100644 index 000000000000..e43221b98995 --- /dev/null +++ b/runtime/polkadot/src/weights/runtime_parachains_paras_inherent.rs @@ -0,0 +1,178 @@ +// Copyright 2017-2021 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . +//! Autogenerated weights for `runtime_parachains::paras_inherent` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2021-11-14, STEPS: `50`, REPEAT: 3, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("kusama-dev"), DB CACHE: 128 + +// Executed Command: +// target/release/polkadot +// benchmark +// --chain=kusama-dev +// --steps=50 +// --repeat=3 +// --pallet=runtime_parachains::paras_inherent +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --heap-pages=4096 +// --header=./file_header.txt +// --output=./runtime/kusama/src/weights/runtime_parachains_paras_inherent.rs + + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] + +use frame_support::{traits::Get, weights::Weight}; +use sp_std::marker::PhantomData; + +/// Weight functions for `runtime_parachains::paras_inherent`. +pub struct WeightInfo(PhantomData); +impl runtime_parachains::paras_inherent::WeightInfo for WeightInfo { + // Storage: ParaInherent Included (r:1 w:1) + // Storage: System ParentHash (r:1 w:0) + // Storage: ParaScheduler AvailabilityCores (r:1 w:1) + // Storage: ParasShared CurrentSessionIndex (r:1 w:0) + // Storage: ParaInclusion PendingAvailability (r:2 w:1) + // Storage: ParasShared ActiveValidatorKeys (r:1 w:0) + // Storage: Paras Parachains (r:1 w:0) + // Storage: ParaInclusion PendingAvailabilityCommitments (r:1 w:1) + // Storage: Configuration ActiveConfig (r:1 w:0) + // Storage: Session Validators (r:1 w:0) + // Storage: ParasShared ActiveValidatorIndices (r:1 w:0) + // Storage: Staking ActiveEra (r:1 w:0) + // Storage: Staking ErasRewardPoints (r:1 w:1) + // Storage: Dmp DownwardMessageQueues (r:1 w:1) + // Storage: Hrmp HrmpChannelDigests (r:1 w:1) + // Storage: Paras FutureCodeUpgrades (r:1 w:0) + // Storage: ParaScheduler SessionStartBlock (r:1 w:0) + // Storage: ParaScheduler ParathreadQueue (r:1 w:1) + // Storage: ParaScheduler Scheduled (r:1 w:1) + // Storage: ParaScheduler ValidatorGroups (r:1 w:0) + // Storage: Ump NeedsDispatch (r:1 w:1) + // Storage: Ump NextDispatchRoundStartWith (r:1 w:1) + // Storage: ParaInherent OnChainVotes (r:0 w:1) + // Storage: Hrmp HrmpWatermarks (r:0 w:1) + // Storage: Paras Heads (r:0 w:1) + fn enter_variable_disputes(v: u32, ) -> Weight { + (316_331_000 as Weight) + // Standard Error: 112_000 + .saturating_add((325_000 as Weight).saturating_mul(v as Weight)) + .saturating_add(T::DbWeight::get().reads(23 as Weight)) + .saturating_add(T::DbWeight::get().writes(14 as Weight)) + } + // Storage: ParaInherent Included (r:1 w:1) + // Storage: System ParentHash (r:1 w:0) + // Storage: ParaScheduler AvailabilityCores (r:1 w:1) + // Storage: ParasShared CurrentSessionIndex (r:1 w:0) + // Storage: ParasShared ActiveValidatorKeys (r:1 w:0) + // Storage: Paras Parachains (r:1 w:0) + // Storage: ParaInclusion PendingAvailability (r:2 w:1) + // Storage: ParaInclusion PendingAvailabilityCommitments (r:1 w:1) + // Storage: Configuration ActiveConfig (r:1 w:0) + // Storage: Session Validators (r:1 w:0) + // Storage: ParasShared ActiveValidatorIndices (r:1 w:0) + // Storage: Staking ActiveEra (r:1 w:0) + // Storage: Staking ErasRewardPoints (r:1 w:1) + // Storage: Dmp DownwardMessageQueues (r:1 w:1) + // Storage: Hrmp HrmpChannelDigests (r:1 w:1) + // Storage: Paras FutureCodeUpgrades (r:1 w:0) + // Storage: ParaScheduler SessionStartBlock (r:1 w:0) + // Storage: ParaScheduler ParathreadQueue (r:1 w:1) + // Storage: ParaScheduler Scheduled (r:1 w:1) + // Storage: ParaScheduler ValidatorGroups (r:1 w:0) + // Storage: Ump NeedsDispatch (r:1 w:1) + // Storage: Ump NextDispatchRoundStartWith (r:1 w:1) + // Storage: ParaInclusion AvailabilityBitfields (r:0 w:1) + // Storage: ParaInherent OnChainVotes (r:0 w:1) + // Storage: Hrmp HrmpWatermarks (r:0 w:1) + // Storage: Paras Heads (r:0 w:1) + fn enter_bitfields() -> Weight { + (352_749_000 as Weight) + .saturating_add(T::DbWeight::get().reads(23 as Weight)) + .saturating_add(T::DbWeight::get().writes(15 as Weight)) + } + // Storage: ParaInherent Included (r:1 w:1) + // Storage: System ParentHash (r:1 w:0) + // Storage: ParaScheduler AvailabilityCores (r:1 w:1) + // Storage: ParasShared CurrentSessionIndex (r:1 w:0) + // Storage: ParasShared ActiveValidatorKeys (r:1 w:0) + // Storage: Paras Parachains (r:1 w:0) + // Storage: ParaInclusion PendingAvailability (r:2 w:1) + // Storage: ParaInclusion PendingAvailabilityCommitments (r:1 w:1) + // Storage: Configuration ActiveConfig (r:1 w:0) + // Storage: Session Validators (r:1 w:0) + // Storage: ParasShared ActiveValidatorIndices (r:1 w:0) + // Storage: Staking ActiveEra (r:1 w:0) + // Storage: Staking ErasRewardPoints (r:1 w:1) + // Storage: Dmp DownwardMessageQueues (r:1 w:1) + // Storage: Hrmp HrmpChannelDigests (r:1 w:1) + // Storage: Paras FutureCodeUpgrades (r:1 w:0) + // Storage: ParaScheduler SessionStartBlock (r:1 w:0) + // Storage: ParaScheduler ParathreadQueue (r:1 w:1) + // Storage: ParaScheduler Scheduled (r:1 w:1) + // Storage: ParaScheduler ValidatorGroups (r:1 w:0) + // Storage: Paras PastCodeMeta (r:1 w:0) + // Storage: Paras CurrentCodeHash (r:1 w:0) + // Storage: Ump RelayDispatchQueueSize (r:1 w:0) + // Storage: Ump NeedsDispatch (r:1 w:1) + // Storage: Ump NextDispatchRoundStartWith (r:1 w:1) + // Storage: ParaInherent OnChainVotes (r:0 w:1) + // Storage: Hrmp HrmpWatermarks (r:0 w:1) + // Storage: Paras Heads (r:0 w:1) + fn enter_backed_candidates_variable(v: u32, ) -> Weight { + (88_047_000 as Weight) + // Standard Error: 3_275_000 + .saturating_add((68_499_000 as Weight).saturating_mul(v as Weight)) + .saturating_add(T::DbWeight::get().reads(26 as Weight)) + .saturating_add(T::DbWeight::get().writes(14 as Weight)) + } + // Storage: ParaInherent Included (r:1 w:1) + // Storage: System ParentHash (r:1 w:0) + // Storage: ParaScheduler AvailabilityCores (r:1 w:1) + // Storage: ParasShared CurrentSessionIndex (r:1 w:0) + // Storage: ParasShared ActiveValidatorKeys (r:1 w:0) + // Storage: Paras Parachains (r:1 w:0) + // Storage: ParaInclusion PendingAvailability (r:2 w:1) + // Storage: ParaInclusion PendingAvailabilityCommitments (r:1 w:1) + // Storage: Configuration ActiveConfig (r:1 w:0) + // Storage: Session Validators (r:1 w:0) + // Storage: ParasShared ActiveValidatorIndices (r:1 w:0) + // Storage: Staking ActiveEra (r:1 w:0) + // Storage: Staking ErasRewardPoints (r:1 w:1) + // Storage: Dmp DownwardMessageQueues (r:1 w:1) + // Storage: Hrmp HrmpChannelDigests (r:1 w:1) + // Storage: Paras FutureCodeUpgrades (r:1 w:0) + // Storage: ParaScheduler SessionStartBlock (r:1 w:0) + // Storage: ParaScheduler ParathreadQueue (r:1 w:1) + // Storage: ParaScheduler Scheduled (r:1 w:1) + // Storage: ParaScheduler ValidatorGroups (r:1 w:0) + // Storage: Paras PastCodeMeta (r:1 w:0) + // Storage: Paras CurrentCodeHash (r:1 w:0) + // Storage: Ump RelayDispatchQueueSize (r:1 w:0) + // Storage: Ump NeedsDispatch (r:1 w:1) + // Storage: Ump NextDispatchRoundStartWith (r:1 w:1) + // Storage: ParaInherent OnChainVotes (r:0 w:1) + // Storage: Hrmp HrmpWatermarks (r:0 w:1) + // Storage: Paras Heads (r:0 w:1) + fn enter_backed_candidate_code_upgrade() -> Weight { + (53_728_168_000 as Weight) + .saturating_add(T::DbWeight::get().reads(26 as Weight)) + .saturating_add(T::DbWeight::get().writes(14 as Weight)) + } +} diff --git a/runtime/rococo/Cargo.toml b/runtime/rococo/Cargo.toml index 388d49101c16..61878123125c 100644 --- a/runtime/rococo/Cargo.toml +++ b/runtime/rococo/Cargo.toml @@ -177,7 +177,8 @@ runtime-benchmarks = [ "xcm-builder/runtime-benchmarks", "pallet-multisig/runtime-benchmarks", "frame-benchmarking", - "hex-literal" + "hex-literal", + "runtime-parachains/runtime-benchmarks", ] try-runtime = [ "frame-executive/try-runtime", diff --git a/runtime/rococo/src/lib.rs b/runtime/rococo/src/lib.rs index 750850ba5f3f..5d88333cc464 100644 --- a/runtime/rococo/src/lib.rs +++ b/runtime/rococo/src/lib.rs @@ -750,7 +750,9 @@ impl parachains_hrmp::Config for Runtime { type Currency = Balances; } -impl parachains_paras_inherent::Config for Runtime {} +impl parachains_paras_inherent::Config for Runtime { + type WeightInfo = weights::runtime_parachains_paras_inherent::WeightInfo; +} impl parachains_scheduler::Config for Runtime {} @@ -1616,6 +1618,7 @@ sp_api::impl_runtime_apis! { list_benchmark!(list, extra, runtime_parachains::configuration, Configuration); list_benchmark!(list, extra, runtime_parachains::disputes, ParasDisputes); + list_benchmark!(list, extra, runtime_parachains::paras_inherent, ParaInherent); list_benchmark!(list, extra, runtime_parachains::paras, Paras); let storage_info = AllPalletsWithSystem::storage_info(); @@ -1648,6 +1651,7 @@ sp_api::impl_runtime_apis! { add_benchmark!(params, batches, runtime_parachains::configuration, Configuration); add_benchmark!(params, batches, runtime_parachains::disputes, ParasDisputes); + add_benchmark!(params, batches, runtime_parachains::paras_inherent, ParaInherent); add_benchmark!(params, batches, runtime_parachains::paras, Paras); if batches.is_empty() { return Err("Benchmark not found for this pallet.".into()) } diff --git a/runtime/rococo/src/weights/mod.rs b/runtime/rococo/src/weights/mod.rs index 0c18d1ff9201..ccd1e3b0cda4 100644 --- a/runtime/rococo/src/weights/mod.rs +++ b/runtime/rococo/src/weights/mod.rs @@ -18,3 +18,4 @@ pub mod runtime_parachains_configuration; pub mod runtime_parachains_disputes; pub mod runtime_parachains_paras; +pub mod runtime_parachains_paras_inherent; diff --git a/runtime/rococo/src/weights/runtime_parachains_paras_inherent.rs b/runtime/rococo/src/weights/runtime_parachains_paras_inherent.rs new file mode 100644 index 000000000000..e43221b98995 --- /dev/null +++ b/runtime/rococo/src/weights/runtime_parachains_paras_inherent.rs @@ -0,0 +1,178 @@ +// Copyright 2017-2021 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . +//! Autogenerated weights for `runtime_parachains::paras_inherent` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2021-11-14, STEPS: `50`, REPEAT: 3, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("kusama-dev"), DB CACHE: 128 + +// Executed Command: +// target/release/polkadot +// benchmark +// --chain=kusama-dev +// --steps=50 +// --repeat=3 +// --pallet=runtime_parachains::paras_inherent +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --heap-pages=4096 +// --header=./file_header.txt +// --output=./runtime/kusama/src/weights/runtime_parachains_paras_inherent.rs + + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] + +use frame_support::{traits::Get, weights::Weight}; +use sp_std::marker::PhantomData; + +/// Weight functions for `runtime_parachains::paras_inherent`. +pub struct WeightInfo(PhantomData); +impl runtime_parachains::paras_inherent::WeightInfo for WeightInfo { + // Storage: ParaInherent Included (r:1 w:1) + // Storage: System ParentHash (r:1 w:0) + // Storage: ParaScheduler AvailabilityCores (r:1 w:1) + // Storage: ParasShared CurrentSessionIndex (r:1 w:0) + // Storage: ParaInclusion PendingAvailability (r:2 w:1) + // Storage: ParasShared ActiveValidatorKeys (r:1 w:0) + // Storage: Paras Parachains (r:1 w:0) + // Storage: ParaInclusion PendingAvailabilityCommitments (r:1 w:1) + // Storage: Configuration ActiveConfig (r:1 w:0) + // Storage: Session Validators (r:1 w:0) + // Storage: ParasShared ActiveValidatorIndices (r:1 w:0) + // Storage: Staking ActiveEra (r:1 w:0) + // Storage: Staking ErasRewardPoints (r:1 w:1) + // Storage: Dmp DownwardMessageQueues (r:1 w:1) + // Storage: Hrmp HrmpChannelDigests (r:1 w:1) + // Storage: Paras FutureCodeUpgrades (r:1 w:0) + // Storage: ParaScheduler SessionStartBlock (r:1 w:0) + // Storage: ParaScheduler ParathreadQueue (r:1 w:1) + // Storage: ParaScheduler Scheduled (r:1 w:1) + // Storage: ParaScheduler ValidatorGroups (r:1 w:0) + // Storage: Ump NeedsDispatch (r:1 w:1) + // Storage: Ump NextDispatchRoundStartWith (r:1 w:1) + // Storage: ParaInherent OnChainVotes (r:0 w:1) + // Storage: Hrmp HrmpWatermarks (r:0 w:1) + // Storage: Paras Heads (r:0 w:1) + fn enter_variable_disputes(v: u32, ) -> Weight { + (316_331_000 as Weight) + // Standard Error: 112_000 + .saturating_add((325_000 as Weight).saturating_mul(v as Weight)) + .saturating_add(T::DbWeight::get().reads(23 as Weight)) + .saturating_add(T::DbWeight::get().writes(14 as Weight)) + } + // Storage: ParaInherent Included (r:1 w:1) + // Storage: System ParentHash (r:1 w:0) + // Storage: ParaScheduler AvailabilityCores (r:1 w:1) + // Storage: ParasShared CurrentSessionIndex (r:1 w:0) + // Storage: ParasShared ActiveValidatorKeys (r:1 w:0) + // Storage: Paras Parachains (r:1 w:0) + // Storage: ParaInclusion PendingAvailability (r:2 w:1) + // Storage: ParaInclusion PendingAvailabilityCommitments (r:1 w:1) + // Storage: Configuration ActiveConfig (r:1 w:0) + // Storage: Session Validators (r:1 w:0) + // Storage: ParasShared ActiveValidatorIndices (r:1 w:0) + // Storage: Staking ActiveEra (r:1 w:0) + // Storage: Staking ErasRewardPoints (r:1 w:1) + // Storage: Dmp DownwardMessageQueues (r:1 w:1) + // Storage: Hrmp HrmpChannelDigests (r:1 w:1) + // Storage: Paras FutureCodeUpgrades (r:1 w:0) + // Storage: ParaScheduler SessionStartBlock (r:1 w:0) + // Storage: ParaScheduler ParathreadQueue (r:1 w:1) + // Storage: ParaScheduler Scheduled (r:1 w:1) + // Storage: ParaScheduler ValidatorGroups (r:1 w:0) + // Storage: Ump NeedsDispatch (r:1 w:1) + // Storage: Ump NextDispatchRoundStartWith (r:1 w:1) + // Storage: ParaInclusion AvailabilityBitfields (r:0 w:1) + // Storage: ParaInherent OnChainVotes (r:0 w:1) + // Storage: Hrmp HrmpWatermarks (r:0 w:1) + // Storage: Paras Heads (r:0 w:1) + fn enter_bitfields() -> Weight { + (352_749_000 as Weight) + .saturating_add(T::DbWeight::get().reads(23 as Weight)) + .saturating_add(T::DbWeight::get().writes(15 as Weight)) + } + // Storage: ParaInherent Included (r:1 w:1) + // Storage: System ParentHash (r:1 w:0) + // Storage: ParaScheduler AvailabilityCores (r:1 w:1) + // Storage: ParasShared CurrentSessionIndex (r:1 w:0) + // Storage: ParasShared ActiveValidatorKeys (r:1 w:0) + // Storage: Paras Parachains (r:1 w:0) + // Storage: ParaInclusion PendingAvailability (r:2 w:1) + // Storage: ParaInclusion PendingAvailabilityCommitments (r:1 w:1) + // Storage: Configuration ActiveConfig (r:1 w:0) + // Storage: Session Validators (r:1 w:0) + // Storage: ParasShared ActiveValidatorIndices (r:1 w:0) + // Storage: Staking ActiveEra (r:1 w:0) + // Storage: Staking ErasRewardPoints (r:1 w:1) + // Storage: Dmp DownwardMessageQueues (r:1 w:1) + // Storage: Hrmp HrmpChannelDigests (r:1 w:1) + // Storage: Paras FutureCodeUpgrades (r:1 w:0) + // Storage: ParaScheduler SessionStartBlock (r:1 w:0) + // Storage: ParaScheduler ParathreadQueue (r:1 w:1) + // Storage: ParaScheduler Scheduled (r:1 w:1) + // Storage: ParaScheduler ValidatorGroups (r:1 w:0) + // Storage: Paras PastCodeMeta (r:1 w:0) + // Storage: Paras CurrentCodeHash (r:1 w:0) + // Storage: Ump RelayDispatchQueueSize (r:1 w:0) + // Storage: Ump NeedsDispatch (r:1 w:1) + // Storage: Ump NextDispatchRoundStartWith (r:1 w:1) + // Storage: ParaInherent OnChainVotes (r:0 w:1) + // Storage: Hrmp HrmpWatermarks (r:0 w:1) + // Storage: Paras Heads (r:0 w:1) + fn enter_backed_candidates_variable(v: u32, ) -> Weight { + (88_047_000 as Weight) + // Standard Error: 3_275_000 + .saturating_add((68_499_000 as Weight).saturating_mul(v as Weight)) + .saturating_add(T::DbWeight::get().reads(26 as Weight)) + .saturating_add(T::DbWeight::get().writes(14 as Weight)) + } + // Storage: ParaInherent Included (r:1 w:1) + // Storage: System ParentHash (r:1 w:0) + // Storage: ParaScheduler AvailabilityCores (r:1 w:1) + // Storage: ParasShared CurrentSessionIndex (r:1 w:0) + // Storage: ParasShared ActiveValidatorKeys (r:1 w:0) + // Storage: Paras Parachains (r:1 w:0) + // Storage: ParaInclusion PendingAvailability (r:2 w:1) + // Storage: ParaInclusion PendingAvailabilityCommitments (r:1 w:1) + // Storage: Configuration ActiveConfig (r:1 w:0) + // Storage: Session Validators (r:1 w:0) + // Storage: ParasShared ActiveValidatorIndices (r:1 w:0) + // Storage: Staking ActiveEra (r:1 w:0) + // Storage: Staking ErasRewardPoints (r:1 w:1) + // Storage: Dmp DownwardMessageQueues (r:1 w:1) + // Storage: Hrmp HrmpChannelDigests (r:1 w:1) + // Storage: Paras FutureCodeUpgrades (r:1 w:0) + // Storage: ParaScheduler SessionStartBlock (r:1 w:0) + // Storage: ParaScheduler ParathreadQueue (r:1 w:1) + // Storage: ParaScheduler Scheduled (r:1 w:1) + // Storage: ParaScheduler ValidatorGroups (r:1 w:0) + // Storage: Paras PastCodeMeta (r:1 w:0) + // Storage: Paras CurrentCodeHash (r:1 w:0) + // Storage: Ump RelayDispatchQueueSize (r:1 w:0) + // Storage: Ump NeedsDispatch (r:1 w:1) + // Storage: Ump NextDispatchRoundStartWith (r:1 w:1) + // Storage: ParaInherent OnChainVotes (r:0 w:1) + // Storage: Hrmp HrmpWatermarks (r:0 w:1) + // Storage: Paras Heads (r:0 w:1) + fn enter_backed_candidate_code_upgrade() -> Weight { + (53_728_168_000 as Weight) + .saturating_add(T::DbWeight::get().reads(26 as Weight)) + .saturating_add(T::DbWeight::get().writes(14 as Weight)) + } +} diff --git a/runtime/test-runtime/src/lib.rs b/runtime/test-runtime/src/lib.rs index 773c50920c52..f92f2d476e33 100644 --- a/runtime/test-runtime/src/lib.rs +++ b/runtime/test-runtime/src/lib.rs @@ -476,7 +476,9 @@ impl parachains_disputes::Config for Runtime { type WeightInfo = parachains_disputes::TestWeightInfo; } -impl parachains_paras_inherent::Config for Runtime {} +impl parachains_paras_inherent::Config for Runtime { + type WeightInfo = parachains_paras_inherent::TestWeightInfo; +} impl parachains_initializer::Config for Runtime { type Randomness = pallet_babe::RandomnessFromOneEpochAgo; diff --git a/runtime/westend/Cargo.toml b/runtime/westend/Cargo.toml index f7e29f5cb70e..1073f0d169c6 100644 --- a/runtime/westend/Cargo.toml +++ b/runtime/westend/Cargo.toml @@ -206,6 +206,7 @@ runtime-benchmarks = [ "pallet-xcm-benchmarks", "frame-election-provider-support/runtime-benchmarks", "pallet-bags-list/runtime-benchmarks", + "runtime-parachains/runtime-benchmarks", ] try-runtime = [ "frame-executive/try-runtime", diff --git a/runtime/westend/src/lib.rs b/runtime/westend/src/lib.rs index 8ff636abf51e..40ac1e8ad1db 100644 --- a/runtime/westend/src/lib.rs +++ b/runtime/westend/src/lib.rs @@ -846,7 +846,9 @@ impl parachains_hrmp::Config for Runtime { type Currency = Balances; } -impl parachains_paras_inherent::Config for Runtime {} +impl parachains_paras_inherent::Config for Runtime { + type WeightInfo = weights::runtime_parachains_paras_inherent::WeightInfo; +} impl parachains_scheduler::Config for Runtime {} @@ -1482,6 +1484,7 @@ sp_api::impl_runtime_apis! { list_benchmark!(list, extra, runtime_common::slots, Slots); list_benchmark!(list, extra, runtime_parachains::configuration, Configuration); list_benchmark!(list, extra, runtime_parachains::initializer, Initializer); + list_benchmark!(list, extra, runtime_parachains::paras_inherent, ParaInherent); list_benchmark!(list, extra, runtime_parachains::paras, Paras); // Substrate @@ -1595,6 +1598,7 @@ sp_api::impl_runtime_apis! { add_benchmark!(params, batches, runtime_parachains::configuration, Configuration); add_benchmark!(params, batches, runtime_parachains::initializer, Initializer); add_benchmark!(params, batches, runtime_parachains::paras, Paras); + add_benchmark!(params, batches, runtime_parachains::paras_inherent, ParaInherent); // Substrate add_benchmark!(params, batches, pallet_bags_list, BagsList); diff --git a/runtime/westend/src/weights/mod.rs b/runtime/westend/src/weights/mod.rs index 923245b2fc35..8e7c4c4e0d55 100644 --- a/runtime/westend/src/weights/mod.rs +++ b/runtime/westend/src/weights/mod.rs @@ -37,4 +37,5 @@ pub mod runtime_common_slots; pub mod runtime_parachains_configuration; pub mod runtime_parachains_initializer; pub mod runtime_parachains_paras; +pub mod runtime_parachains_paras_inherent; pub mod xcm; diff --git a/runtime/westend/src/weights/runtime_parachains_paras_inherent.rs b/runtime/westend/src/weights/runtime_parachains_paras_inherent.rs new file mode 100644 index 000000000000..e43221b98995 --- /dev/null +++ b/runtime/westend/src/weights/runtime_parachains_paras_inherent.rs @@ -0,0 +1,178 @@ +// Copyright 2017-2021 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . +//! Autogenerated weights for `runtime_parachains::paras_inherent` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2021-11-14, STEPS: `50`, REPEAT: 3, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("kusama-dev"), DB CACHE: 128 + +// Executed Command: +// target/release/polkadot +// benchmark +// --chain=kusama-dev +// --steps=50 +// --repeat=3 +// --pallet=runtime_parachains::paras_inherent +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --heap-pages=4096 +// --header=./file_header.txt +// --output=./runtime/kusama/src/weights/runtime_parachains_paras_inherent.rs + + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] + +use frame_support::{traits::Get, weights::Weight}; +use sp_std::marker::PhantomData; + +/// Weight functions for `runtime_parachains::paras_inherent`. +pub struct WeightInfo(PhantomData); +impl runtime_parachains::paras_inherent::WeightInfo for WeightInfo { + // Storage: ParaInherent Included (r:1 w:1) + // Storage: System ParentHash (r:1 w:0) + // Storage: ParaScheduler AvailabilityCores (r:1 w:1) + // Storage: ParasShared CurrentSessionIndex (r:1 w:0) + // Storage: ParaInclusion PendingAvailability (r:2 w:1) + // Storage: ParasShared ActiveValidatorKeys (r:1 w:0) + // Storage: Paras Parachains (r:1 w:0) + // Storage: ParaInclusion PendingAvailabilityCommitments (r:1 w:1) + // Storage: Configuration ActiveConfig (r:1 w:0) + // Storage: Session Validators (r:1 w:0) + // Storage: ParasShared ActiveValidatorIndices (r:1 w:0) + // Storage: Staking ActiveEra (r:1 w:0) + // Storage: Staking ErasRewardPoints (r:1 w:1) + // Storage: Dmp DownwardMessageQueues (r:1 w:1) + // Storage: Hrmp HrmpChannelDigests (r:1 w:1) + // Storage: Paras FutureCodeUpgrades (r:1 w:0) + // Storage: ParaScheduler SessionStartBlock (r:1 w:0) + // Storage: ParaScheduler ParathreadQueue (r:1 w:1) + // Storage: ParaScheduler Scheduled (r:1 w:1) + // Storage: ParaScheduler ValidatorGroups (r:1 w:0) + // Storage: Ump NeedsDispatch (r:1 w:1) + // Storage: Ump NextDispatchRoundStartWith (r:1 w:1) + // Storage: ParaInherent OnChainVotes (r:0 w:1) + // Storage: Hrmp HrmpWatermarks (r:0 w:1) + // Storage: Paras Heads (r:0 w:1) + fn enter_variable_disputes(v: u32, ) -> Weight { + (316_331_000 as Weight) + // Standard Error: 112_000 + .saturating_add((325_000 as Weight).saturating_mul(v as Weight)) + .saturating_add(T::DbWeight::get().reads(23 as Weight)) + .saturating_add(T::DbWeight::get().writes(14 as Weight)) + } + // Storage: ParaInherent Included (r:1 w:1) + // Storage: System ParentHash (r:1 w:0) + // Storage: ParaScheduler AvailabilityCores (r:1 w:1) + // Storage: ParasShared CurrentSessionIndex (r:1 w:0) + // Storage: ParasShared ActiveValidatorKeys (r:1 w:0) + // Storage: Paras Parachains (r:1 w:0) + // Storage: ParaInclusion PendingAvailability (r:2 w:1) + // Storage: ParaInclusion PendingAvailabilityCommitments (r:1 w:1) + // Storage: Configuration ActiveConfig (r:1 w:0) + // Storage: Session Validators (r:1 w:0) + // Storage: ParasShared ActiveValidatorIndices (r:1 w:0) + // Storage: Staking ActiveEra (r:1 w:0) + // Storage: Staking ErasRewardPoints (r:1 w:1) + // Storage: Dmp DownwardMessageQueues (r:1 w:1) + // Storage: Hrmp HrmpChannelDigests (r:1 w:1) + // Storage: Paras FutureCodeUpgrades (r:1 w:0) + // Storage: ParaScheduler SessionStartBlock (r:1 w:0) + // Storage: ParaScheduler ParathreadQueue (r:1 w:1) + // Storage: ParaScheduler Scheduled (r:1 w:1) + // Storage: ParaScheduler ValidatorGroups (r:1 w:0) + // Storage: Ump NeedsDispatch (r:1 w:1) + // Storage: Ump NextDispatchRoundStartWith (r:1 w:1) + // Storage: ParaInclusion AvailabilityBitfields (r:0 w:1) + // Storage: ParaInherent OnChainVotes (r:0 w:1) + // Storage: Hrmp HrmpWatermarks (r:0 w:1) + // Storage: Paras Heads (r:0 w:1) + fn enter_bitfields() -> Weight { + (352_749_000 as Weight) + .saturating_add(T::DbWeight::get().reads(23 as Weight)) + .saturating_add(T::DbWeight::get().writes(15 as Weight)) + } + // Storage: ParaInherent Included (r:1 w:1) + // Storage: System ParentHash (r:1 w:0) + // Storage: ParaScheduler AvailabilityCores (r:1 w:1) + // Storage: ParasShared CurrentSessionIndex (r:1 w:0) + // Storage: ParasShared ActiveValidatorKeys (r:1 w:0) + // Storage: Paras Parachains (r:1 w:0) + // Storage: ParaInclusion PendingAvailability (r:2 w:1) + // Storage: ParaInclusion PendingAvailabilityCommitments (r:1 w:1) + // Storage: Configuration ActiveConfig (r:1 w:0) + // Storage: Session Validators (r:1 w:0) + // Storage: ParasShared ActiveValidatorIndices (r:1 w:0) + // Storage: Staking ActiveEra (r:1 w:0) + // Storage: Staking ErasRewardPoints (r:1 w:1) + // Storage: Dmp DownwardMessageQueues (r:1 w:1) + // Storage: Hrmp HrmpChannelDigests (r:1 w:1) + // Storage: Paras FutureCodeUpgrades (r:1 w:0) + // Storage: ParaScheduler SessionStartBlock (r:1 w:0) + // Storage: ParaScheduler ParathreadQueue (r:1 w:1) + // Storage: ParaScheduler Scheduled (r:1 w:1) + // Storage: ParaScheduler ValidatorGroups (r:1 w:0) + // Storage: Paras PastCodeMeta (r:1 w:0) + // Storage: Paras CurrentCodeHash (r:1 w:0) + // Storage: Ump RelayDispatchQueueSize (r:1 w:0) + // Storage: Ump NeedsDispatch (r:1 w:1) + // Storage: Ump NextDispatchRoundStartWith (r:1 w:1) + // Storage: ParaInherent OnChainVotes (r:0 w:1) + // Storage: Hrmp HrmpWatermarks (r:0 w:1) + // Storage: Paras Heads (r:0 w:1) + fn enter_backed_candidates_variable(v: u32, ) -> Weight { + (88_047_000 as Weight) + // Standard Error: 3_275_000 + .saturating_add((68_499_000 as Weight).saturating_mul(v as Weight)) + .saturating_add(T::DbWeight::get().reads(26 as Weight)) + .saturating_add(T::DbWeight::get().writes(14 as Weight)) + } + // Storage: ParaInherent Included (r:1 w:1) + // Storage: System ParentHash (r:1 w:0) + // Storage: ParaScheduler AvailabilityCores (r:1 w:1) + // Storage: ParasShared CurrentSessionIndex (r:1 w:0) + // Storage: ParasShared ActiveValidatorKeys (r:1 w:0) + // Storage: Paras Parachains (r:1 w:0) + // Storage: ParaInclusion PendingAvailability (r:2 w:1) + // Storage: ParaInclusion PendingAvailabilityCommitments (r:1 w:1) + // Storage: Configuration ActiveConfig (r:1 w:0) + // Storage: Session Validators (r:1 w:0) + // Storage: ParasShared ActiveValidatorIndices (r:1 w:0) + // Storage: Staking ActiveEra (r:1 w:0) + // Storage: Staking ErasRewardPoints (r:1 w:1) + // Storage: Dmp DownwardMessageQueues (r:1 w:1) + // Storage: Hrmp HrmpChannelDigests (r:1 w:1) + // Storage: Paras FutureCodeUpgrades (r:1 w:0) + // Storage: ParaScheduler SessionStartBlock (r:1 w:0) + // Storage: ParaScheduler ParathreadQueue (r:1 w:1) + // Storage: ParaScheduler Scheduled (r:1 w:1) + // Storage: ParaScheduler ValidatorGroups (r:1 w:0) + // Storage: Paras PastCodeMeta (r:1 w:0) + // Storage: Paras CurrentCodeHash (r:1 w:0) + // Storage: Ump RelayDispatchQueueSize (r:1 w:0) + // Storage: Ump NeedsDispatch (r:1 w:1) + // Storage: Ump NextDispatchRoundStartWith (r:1 w:1) + // Storage: ParaInherent OnChainVotes (r:0 w:1) + // Storage: Hrmp HrmpWatermarks (r:0 w:1) + // Storage: Paras Heads (r:0 w:1) + fn enter_backed_candidate_code_upgrade() -> Weight { + (53_728_168_000 as Weight) + .saturating_add(T::DbWeight::get().reads(26 as Weight)) + .saturating_add(T::DbWeight::get().writes(14 as Weight)) + } +}