From 442185ecd1f467994eec3936b879b7e715484e90 Mon Sep 17 00:00:00 2001 From: alindima Date: Fri, 16 Feb 2024 10:00:42 +0200 Subject: [PATCH 01/44] Initial draft changes --- .../runtime/parachains/src/inclusion/mod.rs | 731 ++++++++++-------- .../parachains/src/paras_inherent/mod.rs | 40 +- .../parachains/src/runtime_api_impl/v7.rs | 30 +- polkadot/runtime/parachains/src/util.rs | 21 +- 4 files changed, 447 insertions(+), 375 deletions(-) diff --git a/polkadot/runtime/parachains/src/inclusion/mod.rs b/polkadot/runtime/parachains/src/inclusion/mod.rs index 16e2e93b5617..54255f9758e5 100644 --- a/polkadot/runtime/parachains/src/inclusion/mod.rs +++ b/polkadot/runtime/parachains/src/inclusion/mod.rs @@ -25,12 +25,13 @@ use crate::{ paras::{self, SetGoAhead}, scheduler::{self, AvailabilityTimeoutStatus}, shared::{self, AllowedRelayParentsTracker}, + util::make_persisted_validation_data_with_parent, }; use bitvec::{order::Lsb0 as BitOrderLsb0, vec::BitVec}; use frame_support::{ defensive, pallet_prelude::*, - traits::{Defensive, EnqueueMessage, Footprint, QueueFootprint}, + traits::{EnqueueMessage, Footprint, QueueFootprint}, BoundedSlice, }; use frame_system::pallet_prelude::*; @@ -47,7 +48,10 @@ use scale_info::TypeInfo; use sp_runtime::{traits::One, DispatchError, SaturatedConversion, Saturating}; #[cfg(feature = "std")] use sp_std::fmt; -use sp_std::{collections::btree_set::BTreeSet, prelude::*}; +use sp_std::{ + collections::{btree_map::BTreeMap, btree_set::BTreeSet, vec_deque::VecDeque}, + prelude::*, +}; pub use pallet::*; @@ -93,7 +97,7 @@ pub struct AvailabilityBitfieldRecord { } /// A backed candidate pending availability. -#[derive(Encode, Decode, PartialEq, TypeInfo)] +#[derive(Encode, Decode, PartialEq, TypeInfo, Clone)] #[cfg_attr(test, derive(Debug))] pub struct CandidatePendingAvailability { /// The availability core this is assigned to. @@ -102,6 +106,8 @@ pub struct CandidatePendingAvailability { hash: CandidateHash, /// The candidate descriptor. descriptor: CandidateDescriptor, + /// The candidate commitments. + commitments: CandidateCommitments, /// The received availability votes. One bit per validator. availability_votes: BitVec, /// The backers of the candidate pending availability. @@ -140,6 +146,11 @@ impl CandidatePendingAvailability { &self.descriptor } + /// Get the candidate commitments. + pub(crate) fn candidate_commitments(&self) -> &CandidateCommitments { + &self.commitments + } + /// Get the candidate's relay parent's number. pub(crate) fn relay_parent_number(&self) -> N where @@ -153,6 +164,7 @@ impl CandidatePendingAvailability { core: CoreIndex, hash: CandidateHash, descriptor: CandidateDescriptor, + commitments: CandidateCommitments, availability_votes: BitVec, backers: BitVec, relay_parent_number: N, @@ -163,6 +175,7 @@ impl CandidatePendingAvailability { core, hash, descriptor, + commitments, availability_votes, backers, relay_parent_number, @@ -367,20 +380,16 @@ pub mod pallet { pub(crate) type AvailabilityBitfields = StorageMap<_, Twox64Concat, ValidatorIndex, AvailabilityBitfieldRecord>>; - /// Candidates pending availability by `ParaId`. + /// Candidates pending availability by `ParaId`. They form a chain starting from the latest + /// included head of the para. #[pallet::storage] pub(crate) type PendingAvailability = StorageMap< _, Twox64Concat, ParaId, - CandidatePendingAvailability>, + VecDeque>>, >; - /// The commitments of candidates pending availability, by `ParaId`. - #[pallet::storage] - pub(crate) type PendingAvailabilityCommitments = - StorageMap<_, Twox64Concat, ParaId, CandidateCommitments>; - #[pallet::call] impl Pallet {} } @@ -469,7 +478,6 @@ impl Pallet { ) { // unlike most drain methods, drained elements are not cleared on `Drop` of the iterator // and require consumption. - for _ in >::drain() {} for _ in >::drain() {} for _ in >::drain() {} @@ -495,7 +503,7 @@ impl Pallet { /// Returns a `Vec` of `CandidateHash`es and their respective `AvailabilityCore`s that became /// available, and cores free. pub(crate) fn update_pending_availability_and_get_freed_cores( - expected_bits: usize, + allowed_relay_parents: &AllowedRelayParentsTracker>, validators: &[ValidatorId], signed_bitfields: SignedAvailabilityBitfields, core_lookup: F, @@ -503,14 +511,12 @@ impl Pallet { where F: Fn(CoreIndex) -> Option, { - let mut assigned_paras_record = (0..expected_bits) - .map(|bit_index| core_lookup(CoreIndex::from(bit_index as u32))) - .map(|opt_para_id| { - opt_para_id.map(|para_id| (para_id, PendingAvailability::::get(¶_id))) - }) - .collect::>(); - let now = >::block_number(); + let threshold = availability_threshold(validators.len()); + + // Track the paraids that had one/more of their candidates made available. + let mut paras_made_available = BTreeSet::new(); + for (checked_bitfield, validator_index) in signed_bitfields.into_iter().map(|signed_bitfield| { let validator_idx = signed_bitfield.validator_index(); @@ -518,26 +524,28 @@ impl Pallet { (checked_bitfield, validator_idx) }) { for (bit_idx, _) in checked_bitfield.0.iter().enumerate().filter(|(_, is_av)| **is_av) { - let pending_availability = if let Some((_, pending_availability)) = - assigned_paras_record[bit_idx].as_mut() - { - pending_availability + let core_index = CoreIndex(bit_idx as u32); + if let Some(para_id) = core_lookup(core_index) { + >::mutate(¶_id, |candidates| { + if let Some(candidates) = candidates { + for candidate in candidates { + if candidate.core == core_index { + // defensive check - this is constructed by loading the + // availability bitfield record, which is always `Some` if + // the core is occupied - that's why we're here. + if let Some(mut bit) = candidate + .availability_votes + .get_mut(validator_index.0 as usize) + { + paras_made_available.insert(para_id); + *bit = true; + } + } + } + } + }); } else { - // For honest validators, this happens in case of unoccupied cores, - // which in turn happens in case of a disputed candidate. - // A malicious one might include arbitrary indices, but they are represented - // by `None` values and will be sorted out in the next if case. - continue - }; - - // defensive check - this is constructed by loading the availability bitfield - // record, which is always `Some` if the core is occupied - that's why we're here. - let validator_index = validator_index.0 as usize; - if let Some(mut bit) = - pending_availability.as_mut().and_then(|candidate_pending_availability| { - candidate_pending_availability.availability_votes.get_mut(validator_index) - }) { - *bit = true; + // No parachain is occupying that core yet. } } @@ -547,45 +555,77 @@ impl Pallet { >::insert(&validator_index, record); } - let threshold = availability_threshold(validators.len()); - - let mut freed_cores = Vec::with_capacity(expected_bits); - for (para_id, pending_availability) in assigned_paras_record + let mut freed_cores = Vec::with_capacity(paras_made_available.len()); + // Iterate through the paraids that had one of their candidates made available and see if we + // can free any of its occupied cores. + // We can only free cores whose candidates form a chain starting from the included para + // head. + // We assume dependency order is preserved in `PendingAvailability`. + 'para_loop: for (para_id, candidates_pending_availability) in paras_made_available .into_iter() - .flatten() - .filter_map(|(id, p)| p.map(|p| (id, p))) + .filter_map(|para_id| >::get(para_id).map(|c| (para_id, c))) { - if pending_availability.availability_votes.count_ones() >= threshold { - >::remove(¶_id); - let commitments = match PendingAvailabilityCommitments::::take(¶_id) { - Some(commitments) => commitments, - None => { - log::warn!( - target: LOG_TARGET, - "Inclusion::process_bitfields: PendingAvailability and PendingAvailabilityCommitments - are out of sync, did someone mess with the storage?", - ); - continue - }, - }; - - let receipt = CommittedCandidateReceipt { - descriptor: pending_availability.descriptor, - commitments, - }; - let _weight = Self::enact_candidate( - pending_availability.relay_parent_number, - receipt, - pending_availability.backers, - pending_availability.availability_votes, - pending_availability.core, - pending_availability.backing_group, - ); - - freed_cores.push((pending_availability.core, pending_availability.hash)); - } else { - >::insert(¶_id, &pending_availability); + let mut stopped_at_index = 0; + let mut latest_parent_head = match >::para_head(¶_id) { + Some(head) => head, + None => continue, + }; + + // We have to check all candidates, because some of them may have already been made + // available in the past but their ancestors were not. + for (index, pending_availability) in candidates_pending_availability.iter().enumerate() + { + stopped_at_index = index; + + if pending_availability.availability_votes.count_ones() >= threshold { + let (relay_parent_storage_root, _) = { + match allowed_relay_parents + .acquire_info(pending_availability.descriptor.relay_parent, None) + { + None => continue 'para_loop, // TODO: fix this + Some(info) => info, + } + }; + + let pvd = make_persisted_validation_data_with_parent::( + pending_availability.relay_parent_number, + relay_parent_storage_root, + latest_parent_head, + ); + if pvd.hash() != pending_availability.descriptor.persisted_validation_data_hash + { + // TODO: fix this. + // This means that we've backed a parachain fork in the past. Should have + // never happened. Should we evict all cores of this para? + continue 'para_loop; + } + + latest_parent_head = pending_availability.commitments.head_data.clone(); + + freed_cores.push((pending_availability.core, pending_availability.hash)); + } } + + // Trim the pending availability candidates storage and enact candidates now. + >::mutate(¶_id, |candidates| { + if let Some(candidates) = candidates { + let candidates_made_available = candidates.drain(0..stopped_at_index); + for candidate in candidates_made_available { + let receipt = CommittedCandidateReceipt { + descriptor: candidate.descriptor, + commitments: candidate.commitments, + }; + let _weight = Self::enact_candidate( + candidate.relay_parent_number, + receipt, + candidate.backers, + candidate.availability_votes, + candidate.core, + candidate.backing_group, + ); + } + } + }); } freed_cores @@ -611,217 +651,212 @@ impl Pallet { return Ok(ProcessedCandidates::default()) } - let minimum_backing_votes = configuration::Pallet::::config().minimum_backing_votes; let validators = shared::Pallet::::active_validator_keys(); // Collect candidate receipts with backers. let mut candidate_receipt_with_backing_validator_indices = Vec::with_capacity(candidates.len()); - - // Do all checks before writing storage. - let core_indices_and_backers = { - let mut core_indices_and_backers = Vec::with_capacity(candidates.len()); - let mut last_core = None; - - let mut check_assignment_in_order = |core_idx| -> DispatchResult { - ensure!( - last_core.map_or(true, |core| core_idx > core), - Error::::ScheduledOutOfOrder, - ); - - last_core = Some(core_idx); - Ok(()) + let mut core_indices = Vec::with_capacity(candidates.len()); + + // Map candidates by para id. Use a BTreeSet for each candidate as we'll be removing them + // from the set. + let candidates: BTreeMap, CoreIndex)>> = + candidates.into_iter().fold(BTreeMap::new(), |mut acc, (candidate, core)| { + acc.entry(candidate.candidate().descriptor.para_id) + .or_insert_with(|| vec![]) + .push((candidate, core)); + acc + }); + + for (para_id, mut candidates) in candidates { + let maybe_latest_head_data = match >::get(¶_id) + .map(|pending_candidates| { + pending_candidates.back().map(|x| x.commitments.head_data.clone()) + }) + .flatten() + { + Some(head_data) => Some(head_data), + None => >::para_head(¶_id), + }; + // this cannot be None + let mut latest_head_data = match maybe_latest_head_data { + None => continue, + Some(latest_head_data) => latest_head_data, }; - // We combine an outer loop over candidates with an inner loop over the scheduled, - // where each iteration of the outer loop picks up at the position - // in scheduled just after the past iteration left off. - // - // If the candidates appear in the same order as they appear in `scheduled`, - // then they should always be found. If the end of `scheduled` is reached, - // then the candidate was either not scheduled or out-of-order. - // - // In the meantime, we do certain sanity checks on the candidates and on the scheduled - // list. - for (candidate_idx, (backed_candidate, core_index)) in candidates.iter().enumerate() { - let relay_parent_hash = backed_candidate.descriptor().relay_parent; - let para_id = backed_candidate.descriptor().para_id; - - let prev_context = >::para_most_recent_context(para_id); - - let check_ctx = CandidateCheckContext::::new(prev_context); - let signing_context = SigningContext { - parent_hash: relay_parent_hash, - session_index: shared::Pallet::::session_index(), - }; - - let relay_parent_number = match check_ctx.verify_backed_candidate( - &allowed_relay_parents, - candidate_idx, - backed_candidate.candidate(), - )? { - Err(FailedToCreatePVD) => { - log::debug!( - target: LOG_TARGET, - "Failed to create PVD for candidate {}", - candidate_idx, - ); - // We don't want to error out here because it will - // brick the relay-chain. So we return early without - // doing anything. - return Ok(ProcessedCandidates::default()) - }, - Ok(rpn) => rpn, - }; - - let (validator_indices, _) = - backed_candidate.validator_indices_and_core_index(core_index_enabled); - - log::debug!( - target: LOG_TARGET, - "Candidate {:?} on {:?}, - core_index_enabled = {}", - backed_candidate.hash(), - core_index, - core_index_enabled - ); - - check_assignment_in_order(core_index)?; - - let mut backers = bitvec::bitvec![u8, BitOrderLsb0; 0; validators.len()]; - - ensure!( - >::get(¶_id).is_none() && - >::get(¶_id).is_none(), - Error::::CandidateScheduledBeforeParaFree, - ); - - // The candidate based upon relay parent `N` should be backed by a group - // assigned to core at block `N + 1`. Thus, `relay_parent_number + 1` - // will always land in the current session. - let group_idx = >::group_assigned_to_core( - *core_index, - relay_parent_number + One::one(), - ) - .ok_or_else(|| { - log::warn!( - target: LOG_TARGET, - "Failed to compute group index for candidate {}", - candidate_idx - ); - Error::::InvalidAssignment - })?; - let group_vals = - group_validators(group_idx).ok_or_else(|| Error::::InvalidGroupIndex)?; - - // check the signatures in the backing and that it is a majority. - { - let maybe_amount_validated = primitives::check_candidate_backing( - backed_candidate.candidate().hash(), - backed_candidate.validity_votes(), - validator_indices, - &signing_context, - group_vals.len(), - |intra_group_vi| { - group_vals - .get(intra_group_vi) - .and_then(|vi| validators.get(vi.0 as usize)) - .map(|v| v.clone()) - }, - ); - - match maybe_amount_validated { - Ok(amount_validated) => ensure!( - amount_validated >= - effective_minimum_backing_votes( - group_vals.len(), - minimum_backing_votes - ), - Error::::InsufficientBacking, - ), - Err(()) => { - Err(Error::::InvalidBacking)?; + // Now we need to arrange the candidates into a dependency chain based on the latest + // head. + // Since we don't know the parent_head_hash, we need to build the pvd for all candidates + // and check its hash against the one in the descriptor. + // TODO: we can make this more performant in we embed the parent_head_hash in the + // BackedCandidate. + loop { + let mut found_candidate = false; + + let mut used_candidates = BTreeSet::::new(); + + for (idx, (candidate, core)) in candidates.iter().enumerate() { + let candidate_hash = candidate.candidate().hash(); + + // TODO: find out if we're correctly building the context here. + let check_ctx = CandidateCheckContext::::new(None); + let relay_parent_number = match check_ctx.verify_backed_candidate( + &allowed_relay_parents, + candidate.candidate(), + latest_head_data.clone(), + )? { + // TODO: can a PVD mismatch hide some other issue? + Err(PVDMismatch) => { + // This means that this candidate is not a child of + // latest_head_data. + continue }, - } - - let mut backer_idx_and_attestation = - Vec::<(ValidatorIndex, ValidityAttestation)>::with_capacity( - validator_indices.count_ones(), + Ok(relay_parent_number) => relay_parent_number, + }; + + // The candidate based upon relay parent `N` should be backed by a + // group assigned to core at block `N + 1`. Thus, + // `relay_parent_number + 1` will always land in the current + // session. + let group_idx = >::group_assigned_to_core( + *core, + relay_parent_number + One::one(), + ) + .ok_or_else(|| { + log::warn!( + target: LOG_TARGET, + "Failed to compute group index for candidate {:?}", + candidate_hash ); - let candidate_receipt = backed_candidate.receipt(); - - for ((bit_idx, _), attestation) in validator_indices - .iter() - .enumerate() - .filter(|(_, signed)| **signed) - .zip(backed_candidate.validity_votes().iter().cloned()) - { - let val_idx = - group_vals.get(bit_idx).expect("this query succeeded above; qed"); - backer_idx_and_attestation.push((*val_idx, attestation)); - - backers.set(val_idx.0 as _, true); - } + Error::::InvalidAssignment + })?; + let group_vals = + group_validators(group_idx).ok_or_else(|| Error::::InvalidGroupIndex)?; + + // Check backing vote count and validity. + let (backers, backer_idx_and_attestation) = Self::check_backing_votes( + candidate, + &validators, + group_vals, + core_index_enabled, + )?; + + // Found a valid candidate. + latest_head_data = candidate.candidate().commitments.head_data.clone(); + used_candidates.insert(idx); candidate_receipt_with_backing_validator_indices - .push((candidate_receipt, backer_idx_and_attestation)); + .push((candidate.receipt(), backer_idx_and_attestation)); + found_candidate = true; + core_indices.push((*core, para_id)); + + // Update storage now. The next candidate may be a successor of this one. + >::mutate(¶_id, |pending_availability| { + if let Some(pending_availability) = pending_availability { + pending_availability.push_back(CandidatePendingAvailability { + core: *core, + hash: candidate_hash, + descriptor: candidate.candidate().descriptor.clone(), + commitments: candidate.candidate().commitments.clone(), + // initialize all availability votes to 0. + availability_votes: bitvec::bitvec![u8, BitOrderLsb0; 0; validators.len()], + relay_parent_number, + backers: backers.to_bitvec(), + backed_in_number: now, + backing_group: group_idx, + }); + } + }); + + // Deposit backed event. + Self::deposit_event(Event::::CandidateBacked( + candidate.candidate().to_plain(), + candidate.candidate().commitments.head_data.clone(), + *core, + group_idx, + )); } - core_indices_and_backers.push(( - (*core_index, para_id), - backers, - group_idx, - relay_parent_number, - )); + if !found_candidate { + break + } else { + // Remove used candidates + let mut i = 0; + candidates.retain(|_| { + let keep = !used_candidates.contains(&i); + i += 1; + keep + }); + } } + } - core_indices_and_backers - }; + Ok(ProcessedCandidates:: { + core_indices, // TODO: these may need to be sorted. + candidate_receipt_with_backing_validator_indices, + }) + } - // one more sweep for actually writing to storage. - let core_indices = core_indices_and_backers.iter().map(|(c, ..)| *c).collect(); - for ((candidate, _), (core, backers, group, relay_parent_number)) in - candidates.into_iter().zip(core_indices_and_backers) - { - let para_id = candidate.descriptor().para_id; + fn check_backing_votes( + backed_candidate: &BackedCandidate, + validators: &[ValidatorId], + group_vals: Vec, + core_index_enabled: bool, + ) -> Result<(BitVec, Vec<(ValidatorIndex, ValidityAttestation)>), Error> { + let minimum_backing_votes = configuration::Pallet::::config().minimum_backing_votes; - // initialize all availability votes to 0. - let availability_votes: BitVec = - bitvec::bitvec![u8, BitOrderLsb0; 0; validators.len()]; + let mut backers = bitvec::bitvec![u8, BitOrderLsb0; 0; validators.len()]; + let signing_context = SigningContext { + parent_hash: backed_candidate.descriptor().relay_parent, + session_index: shared::Pallet::::session_index(), + }; - Self::deposit_event(Event::::CandidateBacked( - candidate.candidate().to_plain(), - candidate.candidate().commitments.head_data.clone(), - core.0, - group, - )); + let (validator_indices, _) = + backed_candidate.validator_indices_and_core_index(core_index_enabled); + + // check the signatures in the backing and that it is a majority. + let maybe_amount_validated = primitives::check_candidate_backing( + backed_candidate.candidate().hash(), + backed_candidate.validity_votes(), + validator_indices, + &signing_context, + group_vals.len(), + |intra_group_vi| { + group_vals + .get(intra_group_vi) + .and_then(|vi| validators.get(vi.0 as usize)) + .map(|v| v.clone()) + }, + ); - let candidate_hash = candidate.candidate().hash(); + match maybe_amount_validated { + Ok(amount_validated) => ensure!( + amount_validated >= + effective_minimum_backing_votes(group_vals.len(), minimum_backing_votes), + Error::::InsufficientBacking, + ), + Err(()) => { + Err(Error::::InvalidBacking)?; + }, + } - let (descriptor, commitments) = ( - candidate.candidate().descriptor.clone(), - candidate.candidate().commitments.clone(), + let mut backer_idx_and_attestation = + Vec::<(ValidatorIndex, ValidityAttestation)>::with_capacity( + validator_indices.count_ones(), ); - >::insert( - ¶_id, - CandidatePendingAvailability { - core: core.0, - hash: candidate_hash, - descriptor, - availability_votes, - relay_parent_number, - backers: backers.to_bitvec(), - backed_in_number: now, - backing_group: group, - }, - ); - >::insert(¶_id, commitments); + for ((bit_idx, _), attestation) in validator_indices + .iter() + .enumerate() + .filter(|(_, signed)| **signed) + .zip(backed_candidate.validity_votes().iter().cloned()) + { + let val_idx = group_vals.get(bit_idx).expect("this query succeeded above; qed"); + backer_idx_and_attestation.push((*val_idx, attestation)); + + backers.set(val_idx.0 as _, true); } - Ok(ProcessedCandidates:: { - core_indices, - candidate_receipt_with_backing_validator_indices, - }) + Ok((backers, backer_idx_and_attestation)) } /// Run the acceptance criteria checks on the given candidate commitments. @@ -1028,41 +1063,55 @@ impl Pallet { weight } - /// Cleans up all paras pending availability that the predicate returns true for. + /// Cleans up all timed out candidates that the predicate returns true for. + /// Also cleans up their descendant candidates. /// - /// The predicate accepts the index of the core and the block number the core has been occupied + /// The predicate accepts the block number the core has been occupied /// since (i.e. the block number the candidate was backed at in this fork of the relay chain). /// /// Returns a vector of cleaned-up core IDs. - pub(crate) fn collect_pending( + pub(crate) fn collect_timedout( pred: impl Fn(BlockNumberFor) -> AvailabilityTimeoutStatus>, ) -> Vec { - let mut cleaned_up_ids = Vec::new(); + let mut timed_out_paras = BTreeMap::new(); let mut cleaned_up_cores = Vec::new(); - for (para_id, pending_record) in >::iter() { - if pred(pending_record.backed_in_number).timed_out { - cleaned_up_ids.push(para_id); - cleaned_up_cores.push(pending_record.core); + for (para_id, candidates_pending_availability) in >::iter() { + for (idx, candidate) in candidates_pending_availability.iter().enumerate() { + if pred(candidate.backed_in_number).timed_out { + timed_out_paras.insert(para_id, idx); + // Found the first timed out candidate of this para. All other successors will + // be timed out as well. Break and go to the next para + break + } } } - for para_id in cleaned_up_ids { - let pending = >::take(¶_id); - let commitments = >::take(¶_id); - - if let (Some(pending), Some(commitments)) = (pending, commitments) { - // defensive: this should always be true. - let candidate = CandidateReceipt { - descriptor: pending.descriptor, - commitments_hash: commitments.hash(), - }; - - Self::deposit_event(Event::::CandidateTimedOut( - candidate, - commitments.head_data, - pending.core, - )); + for (para_id, idx) in timed_out_paras.iter() { + let timed_out_candidates: Option>> = + >::mutate(¶_id, |candidates| { + if let Some(candidates) = candidates { + Some(candidates.drain(idx..).collect()) + } else { + None + } + }); + + if let Some(candidates) = timed_out_candidates { + for candidate in candidates { + cleaned_up_cores.push(candidate.core); + + let receipt = CandidateReceipt { + descriptor: candidate.descriptor, + commitments_hash: candidate.commitments.hash(), + }; + + Self::deposit_event(Event::::CandidateTimedOut( + receipt, + candidate.commitments.head_data, + candidate.core, + )); + } } } @@ -1073,19 +1122,32 @@ impl Pallet { /// /// Returns a vector of cleaned-up core IDs. pub(crate) fn collect_disputed(disputed: &BTreeSet) -> Vec { - let mut cleaned_up_ids = Vec::new(); - let mut cleaned_up_cores = Vec::new(); - - for (para_id, pending_record) in >::iter() { - if disputed.contains(&pending_record.hash) { - cleaned_up_ids.push(para_id); - cleaned_up_cores.push(pending_record.core); + let mut cleaned_up_cores = Vec::with_capacity(disputed.len()); + + for (para_id, pending_candidates) in >::iter() { + // We assume that pending candidates are stored in dependency order. So we need to store + // the earliest disputed candidate. All others that follow will get freed as well. + let mut earliest_disputed_idx = None; + for (index, candidate) in pending_candidates.iter().enumerate() { + if disputed.contains(&candidate.hash) { + if let Some(prev_disputed_idx) = earliest_disputed_idx { + // Find the earliest disputed index. + earliest_disputed_idx = Some(sp_std::cmp::min(prev_disputed_idx, index)); + } + } } - } - for para_id in cleaned_up_ids { - let _ = >::take(¶_id); - let _ = >::take(¶_id); + if let Some(earliest_disputed_idx) = earliest_disputed_idx { + // Do cleanups and record the cleaned up cores + >::mutate(¶_id, |record| { + if let Some(record) = record { + let cleaned_up = record.drain(earliest_disputed_idx..); + for candidate in cleaned_up { + cleaned_up_cores.push(candidate.core); + } + } + }); + } } cleaned_up_cores @@ -1098,40 +1160,51 @@ impl Pallet { /// This should generally not be used but it is useful during execution of Runtime APIs, /// where the changes to the state are expected to be discarded directly after. pub(crate) fn force_enact(para: ParaId) { - let pending = >::take(¶); - let commitments = >::take(¶); - - if let (Some(pending), Some(commitments)) = (pending, commitments) { - let candidate = - CommittedCandidateReceipt { descriptor: pending.descriptor, commitments }; + // TODO: this does not take elastic-scaling into account, it enacts the first candidate. + let enacted_candidate = + >::mutate(¶, |candidates| match candidates { + Some(candidates) => candidates.pop_front(), + _ => None, + }); + + if let Some(candidate) = enacted_candidate { + let receipt = CommittedCandidateReceipt { + descriptor: candidate.descriptor, + commitments: candidate.commitments, + }; Self::enact_candidate( - pending.relay_parent_number, - candidate, - pending.backers, - pending.availability_votes, - pending.core, - pending.backing_group, + candidate.relay_parent_number, + receipt, + candidate.backers, + candidate.availability_votes, + candidate.core, + candidate.backing_group, ); } } - /// Returns the `CommittedCandidateReceipt` pending availability for the para provided, if any. + /// Returns the first `CommittedCandidateReceipt` pending availability for the para provided, if + /// any. pub(crate) fn candidate_pending_availability( para: ParaId, ) -> Option> { >::get(¶) - .map(|p| p.descriptor) - .and_then(|d| >::get(¶).map(move |c| (d, c))) - .map(|(d, c)| CommittedCandidateReceipt { descriptor: d, commitments: c }) + .map(|p| { + p.get(0).map(|p| CommittedCandidateReceipt { + descriptor: p.descriptor.clone(), + commitments: p.commitments.clone(), + }) + }) + .flatten() } - /// Returns the metadata around the candidate pending availability for the + /// Returns the metadata around the first candidate pending availability for the /// para provided, if any. pub(crate) fn pending_availability( para: ParaId, ) -> Option>> { - >::get(¶) + >::get(¶).map(|p| p.get(0).cloned()).flatten() } } @@ -1184,7 +1257,7 @@ pub(crate) struct CandidateCheckContext { /// An error indicating that creating Persisted Validation Data failed /// while checking a candidate's validity. -pub(crate) struct FailedToCreatePVD; +pub(crate) struct PVDMismatch; impl CandidateCheckContext { pub(crate) fn new(prev_context: Option>) -> Self { @@ -1203,9 +1276,9 @@ impl CandidateCheckContext { pub(crate) fn verify_backed_candidate( &self, allowed_relay_parents: &AllowedRelayParentsTracker>, - candidate_idx: usize, backed_candidate_receipt: &CommittedCandidateReceipt<::Hash>, - ) -> Result, FailedToCreatePVD>, Error> { + parent_head_data: HeadData, + ) -> Result, PVDMismatch>, Error> { let para_id = backed_candidate_receipt.descriptor().para_id; let relay_parent = backed_candidate_receipt.descriptor().relay_parent; @@ -1218,23 +1291,17 @@ impl CandidateCheckContext { }; { - let persisted_validation_data = match crate::util::make_persisted_validation_data::( - para_id, + let persisted_validation_data = make_persisted_validation_data_with_parent::( relay_parent_number, relay_parent_storage_root, - ) - .defensive_proof("the para is registered") - { - Some(l) => l, - None => return Ok(Err(FailedToCreatePVD)), - }; + parent_head_data, + ); let expected = persisted_validation_data.hash(); - ensure!( - expected == backed_candidate_receipt.descriptor().persisted_validation_data_hash, - Error::::ValidationDataHashMismatch, - ); + if backed_candidate_receipt.descriptor().persisted_validation_data_hash != expected { + return Ok(Err(PVDMismatch)) + } } ensure!( @@ -1268,8 +1335,8 @@ impl CandidateCheckContext { ) { log::debug!( target: LOG_TARGET, - "Validation outputs checking during inclusion of a candidate {} for parachain `{}` failed", - candidate_idx, + "Validation outputs checking during inclusion of a candidate {:?} for parachain `{}` failed", + backed_candidate_receipt.hash(), u32::from(para_id), ); Err(err.strip_into_dispatch_err::())?; diff --git a/polkadot/runtime/parachains/src/paras_inherent/mod.rs b/polkadot/runtime/parachains/src/paras_inherent/mod.rs index cebf858c24ab..e5b7167d7ce4 100644 --- a/polkadot/runtime/parachains/src/paras_inherent/mod.rs +++ b/polkadot/runtime/parachains/src/paras_inherent/mod.rs @@ -24,11 +24,8 @@ use crate::{ configuration, disputes::DisputesHandler, - inclusion, - inclusion::CandidateCheckContext, - initializer, + inclusion, initializer, metrics::METRICS, - paras, scheduler::{self, FreedReason}, shared::{self, AllowedRelayParentsTracker}, ParaId, @@ -250,7 +247,7 @@ pub mod pallet { // Handle timeouts for any availability core work. let freed_timeout = if >::availability_timeout_check_required() { let pred = >::availability_timeout_predicate(); - >::collect_pending(pred) + >::collect_timedout(pred) } else { Vec::new() }; @@ -572,7 +569,7 @@ impl Pallet { // work has now concluded. let freed_concluded = >::update_pending_availability_and_get_freed_cores::<_>( - expected_bits, + &allowed_relay_parents, &validator_public[..], bitfields.clone(), >::core_para, @@ -612,24 +609,13 @@ impl Pallet { } = sanitize_backed_candidates::( backed_candidates, &allowed_relay_parents, - |candidate_idx: usize, - backed_candidate: &BackedCandidate<::Hash>| - -> bool { - let para_id = backed_candidate.descriptor().para_id; - let prev_context = >::para_most_recent_context(para_id); - let check_ctx = CandidateCheckContext::::new(prev_context); - - // never include a concluded-invalid candidate - current_concluded_invalid_disputes.contains(&backed_candidate.hash()) || - // Instead of checking the candidates with code upgrades twice - // move the checking up here and skip it in the training wheels fallback. - // That way we avoid possible duplicate checks while assuring all - // backed candidates fine to pass on. - // - // NOTE: this is the only place where we check the relay-parent. - check_ctx - .verify_backed_candidate(&allowed_relay_parents, candidate_idx, backed_candidate.candidate()) - .is_err() + |backed_candidate: &BackedCandidate<::Hash>| -> bool { + // TODO: see this old comment // NOTE: this is the only place where we check the + // relay-parent. never include a concluded-invalid candidate. we don't need to + // check for descendants of concluded-invalid candidates as those descendants + // have already been evicted from the cores and the included head data won't + // match. + current_concluded_invalid_disputes.contains(&backed_candidate.hash()) }, scheduled, core_index_enabled, @@ -982,7 +968,7 @@ struct SanitizedBackedCandidates { /// occupied core index. fn sanitize_backed_candidates< T: crate::inclusion::Config, - F: FnMut(usize, &BackedCandidate) -> bool, + F: FnMut(&BackedCandidate) -> bool, >( mut backed_candidates: Vec>, allowed_relay_parents: &AllowedRelayParentsTracker>, @@ -992,8 +978,8 @@ fn sanitize_backed_candidates< ) -> SanitizedBackedCandidates { // Remove any candidates that were concluded invalid. // This does not assume sorting. - backed_candidates.indexed_retain(move |candidate_idx, backed_candidate| { - !candidate_has_concluded_invalid_dispute_or_is_invalid(candidate_idx, backed_candidate) + backed_candidates.retain(move |backed_candidate| { + !candidate_has_concluded_invalid_dispute_or_is_invalid(backed_candidate) }); let initial_candidate_count = backed_candidates.len(); diff --git a/polkadot/runtime/parachains/src/runtime_api_impl/v7.rs b/polkadot/runtime/parachains/src/runtime_api_impl/v7.rs index 1bbd4dfb716f..9c9d94eeddaf 100644 --- a/polkadot/runtime/parachains/src/runtime_api_impl/v7.rs +++ b/polkadot/runtime/parachains/src/runtime_api_impl/v7.rs @@ -470,22 +470,22 @@ pub fn backing_state( // But at the moment only one candidate can be pending availability per // parachain. crate::inclusion::PendingAvailability::::get(¶_id) - .and_then(|pending| { - let commitments = - crate::inclusion::PendingAvailabilityCommitments::::get(¶_id); - commitments.map(move |c| (pending, c)) + .map(|pending_candidates| { + pending_candidates + .into_iter() + .map(|candidate| { + CandidatePendingAvailability { + candidate_hash: candidate.candidate_hash(), + descriptor: candidate.candidate_descriptor().clone(), + commitments: candidate.candidate_commitments().clone(), + relay_parent_number: candidate.relay_parent_number(), + max_pov_size: constraints.max_pov_size, /* assume always same in + * session. */ + } + }) + .collect() }) - .map(|(pending, commitments)| { - CandidatePendingAvailability { - candidate_hash: pending.candidate_hash(), - descriptor: pending.candidate_descriptor().clone(), - commitments, - relay_parent_number: pending.relay_parent_number(), - max_pov_size: constraints.max_pov_size, // assume always same in session. - } - }) - .into_iter() - .collect() + .unwrap_or_else(|| vec![]) }; Some(BackingState { constraints, pending_availability }) diff --git a/polkadot/runtime/parachains/src/util.rs b/polkadot/runtime/parachains/src/util.rs index aa07ef080055..f95e29191384 100644 --- a/polkadot/runtime/parachains/src/util.rs +++ b/polkadot/runtime/parachains/src/util.rs @@ -18,7 +18,7 @@ //! on all modules. use frame_system::pallet_prelude::BlockNumberFor; -use primitives::{Id as ParaId, PersistedValidationData, ValidatorIndex}; +use primitives::{HeadData, Id as ParaId, PersistedValidationData, ValidatorIndex}; use sp_std::{collections::btree_set::BTreeSet, vec::Vec}; use crate::{configuration, hrmp, paras}; @@ -42,6 +42,25 @@ pub fn make_persisted_validation_data( }) } +/// Make the persisted validation data for a particular parachain, a specified relay-parent, its +/// storage root and parent head data. +/// +/// This ties together the storage of several modules. +pub fn make_persisted_validation_data_with_parent( + relay_parent_number: BlockNumberFor, + relay_parent_storage_root: T::Hash, + parent_head: HeadData, +) -> PersistedValidationData> { + let config = >::config(); + + PersistedValidationData { + parent_head, + relay_parent_number, + relay_parent_storage_root, + max_pov_size: config.max_pov_size, + } +} + /// Take an active subset of a set containing all validators. /// /// First item in pair will be all items in set have indices found in the `active` indices set (in From 799fabe97306b63331002d433684b8b419039bb3 Mon Sep 17 00:00:00 2001 From: alindima Date: Mon, 26 Feb 2024 16:45:25 +0200 Subject: [PATCH 02/44] bugfixes Signed-off-by: alindima --- .../runtime/parachains/src/inclusion/mod.rs | 126 ++++++++---------- .../parachains/src/paras_inherent/mod.rs | 1 - 2 files changed, 58 insertions(+), 69 deletions(-) diff --git a/polkadot/runtime/parachains/src/inclusion/mod.rs b/polkadot/runtime/parachains/src/inclusion/mod.rs index 54255f9758e5..40cf61425d58 100644 --- a/polkadot/runtime/parachains/src/inclusion/mod.rs +++ b/polkadot/runtime/parachains/src/inclusion/mod.rs @@ -503,7 +503,6 @@ impl Pallet { /// Returns a `Vec` of `CandidateHash`es and their respective `AvailabilityCore`s that became /// available, and cores free. pub(crate) fn update_pending_availability_and_get_freed_cores( - allowed_relay_parents: &AllowedRelayParentsTracker>, validators: &[ValidatorId], signed_bitfields: SignedAvailabilityBitfields, core_lookup: F, @@ -514,7 +513,6 @@ impl Pallet { let now = >::block_number(); let threshold = availability_threshold(validators.len()); - // Track the paraids that had one/more of their candidates made available. let mut paras_made_available = BTreeSet::new(); for (checked_bitfield, validator_index) in @@ -528,7 +526,7 @@ impl Pallet { if let Some(para_id) = core_lookup(core_index) { >::mutate(¶_id, |candidates| { if let Some(candidates) = candidates { - for candidate in candidates { + for (candidate_idx, candidate) in candidates.iter_mut().enumerate() { if candidate.core == core_index { // defensive check - this is constructed by loading the // availability bitfield record, which is always `Some` if @@ -537,9 +535,17 @@ impl Pallet { .availability_votes .get_mut(validator_index.0 as usize) { - paras_made_available.insert(para_id); *bit = true; } + + // We only care if the first candidate of this para was made + // available. We don't enact candidates until their predecessors + // have been enacted. + if candidate_idx == 0 && + candidate.availability_votes.count_ones() >= threshold + { + paras_made_available.insert(para_id); + } } } } @@ -561,71 +567,47 @@ impl Pallet { // We can only free cores whose candidates form a chain starting from the included para // head. // We assume dependency order is preserved in `PendingAvailability`. - 'para_loop: for (para_id, candidates_pending_availability) in paras_made_available + for (para_id, candidates_pending_availability) in paras_made_available .into_iter() .filter_map(|para_id| >::get(para_id).map(|c| (para_id, c))) { - let mut stopped_at_index = 0; - let mut latest_parent_head = match >::para_head(¶_id) { - Some(head) => head, - None => continue, - }; + let mut stopped_at_index = None; - // We have to check all candidates, because some of them may have already been made - // available in the past but their ancestors were not. + // We try to check all candidates, because some of them may have already been made + // available in the past but their ancestors were not. However, we can stop when we find + // the first one which is not available yet. for (index, pending_availability) in candidates_pending_availability.iter().enumerate() { - stopped_at_index = index; - if pending_availability.availability_votes.count_ones() >= threshold { - let (relay_parent_storage_root, _) = { - match allowed_relay_parents - .acquire_info(pending_availability.descriptor.relay_parent, None) - { - None => continue 'para_loop, // TODO: fix this - Some(info) => info, - } - }; - - let pvd = make_persisted_validation_data_with_parent::( - pending_availability.relay_parent_number, - relay_parent_storage_root, - latest_parent_head, - ); - if pvd.hash() != pending_availability.descriptor.persisted_validation_data_hash - { - // TODO: fix this. - // This means that we've backed a parachain fork in the past. Should have - // never happened. Should we evict all cores of this para? - continue 'para_loop; - } - - latest_parent_head = pending_availability.commitments.head_data.clone(); - freed_cores.push((pending_availability.core, pending_availability.hash)); + stopped_at_index = Some(index); + } else { + break } } // Trim the pending availability candidates storage and enact candidates now. - >::mutate(¶_id, |candidates| { - if let Some(candidates) = candidates { - let candidates_made_available = candidates.drain(0..stopped_at_index); - for candidate in candidates_made_available { - let receipt = CommittedCandidateReceipt { - descriptor: candidate.descriptor, - commitments: candidate.commitments, - }; - let _weight = Self::enact_candidate( - candidate.relay_parent_number, - receipt, - candidate.backers, - candidate.availability_votes, - candidate.core, - candidate.backing_group, - ); + if let Some(stopped_at_index) = stopped_at_index { + >::mutate(¶_id, |candidates| { + if let Some(candidates) = candidates { + let candidates_made_available = candidates.drain(0..=stopped_at_index); + for candidate in candidates_made_available { + let receipt = CommittedCandidateReceipt { + descriptor: candidate.descriptor, + commitments: candidate.commitments, + }; + let _weight = Self::enact_candidate( + candidate.relay_parent_number, + receipt, + candidate.backers, + candidate.availability_votes, + candidate.core, + candidate.backing_group, + ); + } } - } - }); + }); + } } freed_cores @@ -751,19 +733,24 @@ impl Pallet { // Update storage now. The next candidate may be a successor of this one. >::mutate(¶_id, |pending_availability| { + let new_candidate = CandidatePendingAvailability { + core: *core, + hash: candidate_hash, + descriptor: candidate.candidate().descriptor.clone(), + commitments: candidate.candidate().commitments.clone(), + // initialize all availability votes to 0. + availability_votes: bitvec::bitvec![u8, BitOrderLsb0; 0; validators.len()], + relay_parent_number, + backers: backers.to_bitvec(), + backed_in_number: now, + backing_group: group_idx, + }; + if let Some(pending_availability) = pending_availability { - pending_availability.push_back(CandidatePendingAvailability { - core: *core, - hash: candidate_hash, - descriptor: candidate.candidate().descriptor.clone(), - commitments: candidate.candidate().commitments.clone(), - // initialize all availability votes to 0. - availability_votes: bitvec::bitvec![u8, BitOrderLsb0; 0; validators.len()], - relay_parent_number, - backers: backers.to_bitvec(), - backed_in_number: now, - backing_group: group_idx, - }); + pending_availability.push_back(new_candidate); + } else { + *pending_availability = + Some([new_candidate].into_iter().collect::>()) } }); @@ -774,6 +761,9 @@ impl Pallet { *core, group_idx, )); + + // break, we've found the candidate. + break } if !found_candidate { diff --git a/polkadot/runtime/parachains/src/paras_inherent/mod.rs b/polkadot/runtime/parachains/src/paras_inherent/mod.rs index e5b7167d7ce4..76f5a7fe3368 100644 --- a/polkadot/runtime/parachains/src/paras_inherent/mod.rs +++ b/polkadot/runtime/parachains/src/paras_inherent/mod.rs @@ -569,7 +569,6 @@ impl Pallet { // work has now concluded. let freed_concluded = >::update_pending_availability_and_get_freed_cores::<_>( - &allowed_relay_parents, &validator_public[..], bitfields.clone(), >::core_para, From 9f3ba62e4e819e299970aa9d28b903ac14e6a7db Mon Sep 17 00:00:00 2001 From: alindima Date: Mon, 26 Feb 2024 17:32:49 +0200 Subject: [PATCH 03/44] filter descendants of disputed candidates also no need to sort by core index any more --- .../runtime/parachains/src/inclusion/mod.rs | 11 ++-- .../parachains/src/paras_inherent/mod.rs | 51 +++++++++---------- 2 files changed, 32 insertions(+), 30 deletions(-) diff --git a/polkadot/runtime/parachains/src/inclusion/mod.rs b/polkadot/runtime/parachains/src/inclusion/mod.rs index 40cf61425d58..c71dc6bf89cf 100644 --- a/polkadot/runtime/parachains/src/inclusion/mod.rs +++ b/polkadot/runtime/parachains/src/inclusion/mod.rs @@ -1108,10 +1108,13 @@ impl Pallet { cleaned_up_cores } - /// Cleans up all paras pending availability that are in the given list of disputed candidates. + /// Cleans up all cores pending availability occupied by one of the disputed candidates or which + /// are descendants of a disputed candidate. /// - /// Returns a vector of cleaned-up core IDs. - pub(crate) fn collect_disputed(disputed: &BTreeSet) -> Vec { + /// Returns a vector of cleaned-up core IDs, along with the evicted candidate hashes. + pub(crate) fn collect_disputed( + disputed: &BTreeSet, + ) -> Vec<(CoreIndex, CandidateHash)> { let mut cleaned_up_cores = Vec::with_capacity(disputed.len()); for (para_id, pending_candidates) in >::iter() { @@ -1133,7 +1136,7 @@ impl Pallet { if let Some(record) = record { let cleaned_up = record.drain(earliest_disputed_idx..); for candidate in cleaned_up { - cleaned_up_cores.push(candidate.core); + cleaned_up_cores.push((candidate.core, candidate.hash)); } } }); diff --git a/polkadot/runtime/parachains/src/paras_inherent/mod.rs b/polkadot/runtime/parachains/src/paras_inherent/mod.rs index 76f5a7fe3368..0533c47328f3 100644 --- a/polkadot/runtime/parachains/src/paras_inherent/mod.rs +++ b/polkadot/runtime/parachains/src/paras_inherent/mod.rs @@ -539,20 +539,26 @@ impl Pallet { .map(|(_session, candidate)| candidate) .collect::>(); - let freed_disputed: BTreeMap = - >::collect_disputed(¤t_concluded_invalid_disputes) - .into_iter() - .map(|core| (core, FreedReason::Concluded)) - .collect(); + let (freed_disputed_cores, freed_disputed_candidates): ( + BTreeMap, + BTreeSet, + ) = >::collect_disputed(¤t_concluded_invalid_disputes) + .into_iter() + .map(|(core, candidate)| ((core, FreedReason::Concluded), candidate)) + .unzip(); // Create a bit index from the set of core indices where each index corresponds to // a core index that was freed due to a dispute. // // I.e. 010100 would indicate, the candidates on Core 1 and 3 would be disputed. - let disputed_bitfield = create_disputed_bitfield(expected_bits, freed_disputed.keys()); + let disputed_bitfield = + create_disputed_bitfield(expected_bits, freed_disputed_cores.keys()); - if !freed_disputed.is_empty() { - >::free_cores_and_fill_claimqueue(freed_disputed.clone(), now); + if !freed_disputed_cores.is_empty() { + >::free_cores_and_fill_claimqueue( + freed_disputed_cores.clone(), + now, + ); } let bitfields = sanitize_bitfields::( @@ -610,11 +616,10 @@ impl Pallet { &allowed_relay_parents, |backed_candidate: &BackedCandidate<::Hash>| -> bool { // TODO: see this old comment // NOTE: this is the only place where we check the - // relay-parent. never include a concluded-invalid candidate. we don't need to - // check for descendants of concluded-invalid candidates as those descendants - // have already been evicted from the cores and the included head data won't - // match. - current_concluded_invalid_disputes.contains(&backed_candidate.hash()) + // relay-parent. + // Never include a concluded-invalid candidate. We need to also check for + // descendants of the concluded-invalid candidates. + freed_disputed_candidates.contains(&backed_candidate.hash()) }, scheduled, core_index_enabled, @@ -961,24 +966,25 @@ struct SanitizedBackedCandidates { /// state. /// /// `candidate_has_concluded_invalid_dispute` must return `true` if the candidate -/// is disputed, false otherwise. The passed `usize` is the candidate index. +/// is disputed or is a descendant of a disputed candidate, false otherwise. /// /// Returns struct `SanitizedBackedCandidates` where `backed_candidates` are sorted according to the /// occupied core index. +/// This function must preserve the dependency order between candidates of the same para. fn sanitize_backed_candidates< T: crate::inclusion::Config, F: FnMut(&BackedCandidate) -> bool, >( mut backed_candidates: Vec>, allowed_relay_parents: &AllowedRelayParentsTracker>, - mut candidate_has_concluded_invalid_dispute_or_is_invalid: F, + mut candidate_has_concluded_invalid_or_is_descendant_of_invalid: F, scheduled: BTreeMap>, core_index_enabled: bool, ) -> SanitizedBackedCandidates { - // Remove any candidates that were concluded invalid. - // This does not assume sorting. - backed_candidates.retain(move |backed_candidate| { - !candidate_has_concluded_invalid_dispute_or_is_invalid(backed_candidate) + // Remove any candidates that were concluded invalid or who are descendants of concluded invalid + // candidates. + backed_candidates.retain(|backed_candidate| { + !candidate_has_concluded_invalid_or_is_descendant_of_invalid(backed_candidate) }); let initial_candidate_count = backed_candidates.len(); @@ -1000,13 +1006,6 @@ fn sanitize_backed_candidates< core_index_enabled, ); - // Sort the `Vec` last, once there is a guarantee that these - // `BackedCandidates` references the expected relay chain parent, - // but more importantly are scheduled for a free core. - // This both avoids extra work for obviously invalid candidates, - // but also allows this to be done in place. - backed_candidates_with_core.sort_by(|(_x, core_x), (_y, core_y)| core_x.cmp(&core_y)); - SanitizedBackedCandidates { dropped_unscheduled_candidates, votes_from_disabled_were_dropped, From 5b13eb7486d622e905ba6695f78be5bfdc1f1bcf Mon Sep 17 00:00:00 2001 From: alindima Date: Tue, 27 Feb 2024 12:00:55 +0200 Subject: [PATCH 04/44] some simplifications --- .../runtime/parachains/src/inclusion/mod.rs | 59 ++++++++----------- 1 file changed, 26 insertions(+), 33 deletions(-) diff --git a/polkadot/runtime/parachains/src/inclusion/mod.rs b/polkadot/runtime/parachains/src/inclusion/mod.rs index c71dc6bf89cf..8830fc7197c0 100644 --- a/polkadot/runtime/parachains/src/inclusion/mod.rs +++ b/polkadot/runtime/parachains/src/inclusion/mod.rs @@ -1063,45 +1063,39 @@ impl Pallet { pub(crate) fn collect_timedout( pred: impl Fn(BlockNumberFor) -> AvailabilityTimeoutStatus>, ) -> Vec { - let mut timed_out_paras = BTreeMap::new(); let mut cleaned_up_cores = Vec::new(); for (para_id, candidates_pending_availability) in >::iter() { + let mut timed_out = None; for (idx, candidate) in candidates_pending_availability.iter().enumerate() { if pred(candidate.backed_in_number).timed_out { - timed_out_paras.insert(para_id, idx); + timed_out = Some(idx); // Found the first timed out candidate of this para. All other successors will - // be timed out as well. Break and go to the next para + // be timed out as well. break } } - } - for (para_id, idx) in timed_out_paras.iter() { - let timed_out_candidates: Option>> = + if let Some(idx) = timed_out { >::mutate(¶_id, |candidates| { if let Some(candidates) = candidates { - Some(candidates.drain(idx..).collect()) - } else { - None - } - }); - - if let Some(candidates) = timed_out_candidates { - for candidate in candidates { - cleaned_up_cores.push(candidate.core); + let cleaned_up = candidates.drain(idx..); + for candidate in cleaned_up { + cleaned_up_cores.push(candidate.core); - let receipt = CandidateReceipt { - descriptor: candidate.descriptor, - commitments_hash: candidate.commitments.hash(), - }; + let receipt = CandidateReceipt { + descriptor: candidate.descriptor, + commitments_hash: candidate.commitments.hash(), + }; - Self::deposit_event(Event::::CandidateTimedOut( - receipt, - candidate.commitments.head_data, - candidate.core, - )); - } + Self::deposit_event(Event::::CandidateTimedOut( + receipt, + candidate.commitments.head_data, + candidate.core, + )); + } + } + }); } } @@ -1123,10 +1117,10 @@ impl Pallet { let mut earliest_disputed_idx = None; for (index, candidate) in pending_candidates.iter().enumerate() { if disputed.contains(&candidate.hash) { - if let Some(prev_disputed_idx) = earliest_disputed_idx { - // Find the earliest disputed index. - earliest_disputed_idx = Some(sp_std::cmp::min(prev_disputed_idx, index)); - } + earliest_disputed_idx = Some(index); + // Since we're looping the candidates in dependency order, we've found the + // earliest disputed index for this paraid. + break; } } @@ -1135,9 +1129,8 @@ impl Pallet { >::mutate(¶_id, |record| { if let Some(record) = record { let cleaned_up = record.drain(earliest_disputed_idx..); - for candidate in cleaned_up { - cleaned_up_cores.push((candidate.core, candidate.hash)); - } + cleaned_up_cores + .extend(cleaned_up.map(|candidate| (candidate.core, candidate.hash))); } }); } @@ -1153,7 +1146,7 @@ impl Pallet { /// This should generally not be used but it is useful during execution of Runtime APIs, /// where the changes to the state are expected to be discarded directly after. pub(crate) fn force_enact(para: ParaId) { - // TODO: this does not take elastic-scaling into account, it enacts the first candidate. + // This does not take elastic-scaling into account, it enacts the first candidate. let enacted_candidate = >::mutate(¶, |candidates| match candidates { Some(candidates) => candidates.pop_front(), From ecc508807b0bdca0932000437a56da4f2011e18b Mon Sep 17 00:00:00 2001 From: alindima Date: Wed, 28 Feb 2024 11:18:57 +0200 Subject: [PATCH 05/44] assert that candidates of a para are sorted in chain dependency order. optimise process_candidates to be O(N) --- .../runtime/parachains/src/inclusion/mod.rs | 204 +++++------ .../parachains/src/paras_inherent/mod.rs | 345 +++++++++++++----- 2 files changed, 347 insertions(+), 202 deletions(-) diff --git a/polkadot/runtime/parachains/src/inclusion/mod.rs b/polkadot/runtime/parachains/src/inclusion/mod.rs index 8830fc7197c0..e64d239c9306 100644 --- a/polkadot/runtime/parachains/src/inclusion/mod.rs +++ b/polkadot/runtime/parachains/src/inclusion/mod.rs @@ -620,7 +620,7 @@ impl Pallet { /// scheduled cores. If these conditions are not met, the execution of the function fails. pub(crate) fn process_candidates( allowed_relay_parents: &AllowedRelayParentsTracker>, - candidates: Vec<(BackedCandidate, CoreIndex)>, + candidates: &BTreeMap, CoreIndex)>>, group_validators: GV, core_index_enabled: bool, ) -> Result, DispatchError> @@ -640,17 +640,7 @@ impl Pallet { Vec::with_capacity(candidates.len()); let mut core_indices = Vec::with_capacity(candidates.len()); - // Map candidates by para id. Use a BTreeSet for each candidate as we'll be removing them - // from the set. - let candidates: BTreeMap, CoreIndex)>> = - candidates.into_iter().fold(BTreeMap::new(), |mut acc, (candidate, core)| { - acc.entry(candidate.candidate().descriptor.para_id) - .or_insert_with(|| vec![]) - .push((candidate, core)); - acc - }); - - for (para_id, mut candidates) in candidates { + for (para_id, candidates) in candidates { let maybe_latest_head_data = match >::get(¶_id) .map(|pending_candidates| { pending_candidates.back().map(|x| x.commitments.head_data.clone()) @@ -666,117 +656,88 @@ impl Pallet { Some(latest_head_data) => latest_head_data, }; - // Now we need to arrange the candidates into a dependency chain based on the latest - // head. - // Since we don't know the parent_head_hash, we need to build the pvd for all candidates - // and check its hash against the one in the descriptor. - // TODO: we can make this more performant in we embed the parent_head_hash in the - // BackedCandidate. - loop { - let mut found_candidate = false; - - let mut used_candidates = BTreeSet::::new(); - - for (idx, (candidate, core)) in candidates.iter().enumerate() { - let candidate_hash = candidate.candidate().hash(); - - // TODO: find out if we're correctly building the context here. - let check_ctx = CandidateCheckContext::::new(None); - let relay_parent_number = match check_ctx.verify_backed_candidate( - &allowed_relay_parents, - candidate.candidate(), - latest_head_data.clone(), - )? { - // TODO: can a PVD mismatch hide some other issue? - Err(PVDMismatch) => { - // This means that this candidate is not a child of - // latest_head_data. - continue - }, - Ok(relay_parent_number) => relay_parent_number, + for (candidate, core) in candidates.iter() { + let candidate_hash = candidate.candidate().hash(); + + // TODO: find out if we're correctly building the context here. + let check_ctx = CandidateCheckContext::::new(None); + let relay_parent_number = match check_ctx.verify_backed_candidate( + &allowed_relay_parents, + candidate.candidate(), + latest_head_data.clone(), + )? { + // TODO: can a PVD mismatch hide some other issue? + Err(PVDMismatch) => { + // This means that this candidate is not a child of + // latest_head_data. + break + }, + Ok(relay_parent_number) => relay_parent_number, + }; + + // The candidate based upon relay parent `N` should be backed by a + // group assigned to core at block `N + 1`. Thus, + // `relay_parent_number + 1` will always land in the current + // session. + let group_idx = >::group_assigned_to_core( + *core, + relay_parent_number + One::one(), + ) + .ok_or_else(|| { + log::warn!( + target: LOG_TARGET, + "Failed to compute group index for candidate {:?}", + candidate_hash + ); + Error::::InvalidAssignment + })?; + let group_vals = + group_validators(group_idx).ok_or_else(|| Error::::InvalidGroupIndex)?; + + // Check backing vote count and validity. + let (backers, backer_idx_and_attestation) = Self::check_backing_votes( + candidate, + &validators, + group_vals, + core_index_enabled, + )?; + + // Found a valid candidate. + latest_head_data = candidate.candidate().commitments.head_data.clone(); + candidate_receipt_with_backing_validator_indices + .push((candidate.receipt(), backer_idx_and_attestation)); + core_indices.push((*core, *para_id)); + + // Update storage now + >::mutate(¶_id, |pending_availability| { + let new_candidate = CandidatePendingAvailability { + core: *core, + hash: candidate_hash, + descriptor: candidate.candidate().descriptor.clone(), + commitments: candidate.candidate().commitments.clone(), + // initialize all availability votes to 0. + availability_votes: bitvec::bitvec![u8, BitOrderLsb0; 0; validators.len()], + relay_parent_number, + backers: backers.to_bitvec(), + backed_in_number: now, + backing_group: group_idx, }; - // The candidate based upon relay parent `N` should be backed by a - // group assigned to core at block `N + 1`. Thus, - // `relay_parent_number + 1` will always land in the current - // session. - let group_idx = >::group_assigned_to_core( - *core, - relay_parent_number + One::one(), - ) - .ok_or_else(|| { - log::warn!( - target: LOG_TARGET, - "Failed to compute group index for candidate {:?}", - candidate_hash - ); - Error::::InvalidAssignment - })?; - let group_vals = - group_validators(group_idx).ok_or_else(|| Error::::InvalidGroupIndex)?; - - // Check backing vote count and validity. - let (backers, backer_idx_and_attestation) = Self::check_backing_votes( - candidate, - &validators, - group_vals, - core_index_enabled, - )?; - - // Found a valid candidate. - latest_head_data = candidate.candidate().commitments.head_data.clone(); - used_candidates.insert(idx); - candidate_receipt_with_backing_validator_indices - .push((candidate.receipt(), backer_idx_and_attestation)); - found_candidate = true; - core_indices.push((*core, para_id)); - - // Update storage now. The next candidate may be a successor of this one. - >::mutate(¶_id, |pending_availability| { - let new_candidate = CandidatePendingAvailability { - core: *core, - hash: candidate_hash, - descriptor: candidate.candidate().descriptor.clone(), - commitments: candidate.candidate().commitments.clone(), - // initialize all availability votes to 0. - availability_votes: bitvec::bitvec![u8, BitOrderLsb0; 0; validators.len()], - relay_parent_number, - backers: backers.to_bitvec(), - backed_in_number: now, - backing_group: group_idx, - }; - - if let Some(pending_availability) = pending_availability { - pending_availability.push_back(new_candidate); - } else { - *pending_availability = - Some([new_candidate].into_iter().collect::>()) - } - }); - - // Deposit backed event. - Self::deposit_event(Event::::CandidateBacked( - candidate.candidate().to_plain(), - candidate.candidate().commitments.head_data.clone(), - *core, - group_idx, - )); - - // break, we've found the candidate. - break - } + if let Some(pending_availability) = pending_availability { + pending_availability.push_back(new_candidate); + } else { + *pending_availability = + Some([new_candidate].into_iter().collect::>()) + } + }); - if !found_candidate { - break - } else { - // Remove used candidates - let mut i = 0; - candidates.retain(|_| { - let keep = !used_candidates.contains(&i); - i += 1; - keep - }); - } + // Deposit backed event. + Self::deposit_event(Event::::CandidateBacked( + candidate.candidate().to_plain(), + candidate.candidate().commitments.head_data.clone(), + *core, + group_idx, + )); } } @@ -1150,6 +1111,9 @@ impl Pallet { let enacted_candidate = >::mutate(¶, |candidates| match candidates { Some(candidates) => candidates.pop_front(), + // TODO: this should also check the descendants, as they have been made available + // before their parent. Or just change the semantic of force_enact to enact all + // candidates of a para. _ => None, }); diff --git a/polkadot/runtime/parachains/src/paras_inherent/mod.rs b/polkadot/runtime/parachains/src/paras_inherent/mod.rs index 0533c47328f3..d0866d9b221a 100644 --- a/polkadot/runtime/parachains/src/paras_inherent/mod.rs +++ b/polkadot/runtime/parachains/src/paras_inherent/mod.rs @@ -26,8 +26,10 @@ use crate::{ disputes::DisputesHandler, inclusion, initializer, metrics::METRICS, + paras, scheduler::{self, FreedReason}, shared::{self, AllowedRelayParentsTracker}, + util::make_persisted_validation_data_with_parent, ParaId, }; use bitvec::prelude::BitVec; @@ -42,7 +44,7 @@ use pallet_babe::{self, ParentBlockRandomness}; use primitives::{ effective_minimum_backing_votes, vstaging::node_features::FeatureIndex, BackedCandidate, CandidateHash, CandidateReceipt, CheckedDisputeStatementSet, CheckedMultiDisputeStatementSet, - CoreIndex, DisputeStatementSet, InherentData as ParachainsInherentData, + CoreIndex, DisputeStatementSet, HeadData, InherentData as ParachainsInherentData, MultiDisputeStatementSet, ScrapedOnChainVotes, SessionIndex, SignedAvailabilityBitfields, SigningContext, UncheckedSignedAvailabilityBitfield, UncheckedSignedAvailabilityBitfields, ValidatorId, ValidatorIndex, ValidityAttestation, PARACHAINS_INHERENT_IDENTIFIER, @@ -611,26 +613,20 @@ impl Pallet { backed_candidates_with_core, votes_from_disabled_were_dropped, dropped_unscheduled_candidates, - } = sanitize_backed_candidates::( + count, + } = sanitize_backed_candidates::( backed_candidates, &allowed_relay_parents, - |backed_candidate: &BackedCandidate<::Hash>| -> bool { - // TODO: see this old comment // NOTE: this is the only place where we check the - // relay-parent. - // Never include a concluded-invalid candidate. We need to also check for - // descendants of the concluded-invalid candidates. - freed_disputed_candidates.contains(&backed_candidate.hash()) - }, + // TODO: see this old comment // NOTE: this is the only place where we check the + // relay-parent. + freed_disputed_candidates, scheduled, core_index_enabled, ); - ensure!( - backed_candidates_with_core.len() <= total_scheduled_cores, - Error::::UnscheduledCandidate - ); + ensure!(count <= total_scheduled_cores, Error::::UnscheduledCandidate); - METRICS.on_candidates_sanitized(backed_candidates_with_core.len() as u64); + METRICS.on_candidates_sanitized(count as u64); // In `Enter` context (invoked during execution) there should be no backing votes from // disabled validators because they should have been filtered out during inherent data @@ -652,7 +648,7 @@ impl Pallet { candidate_receipt_with_backing_validator_indices, } = >::process_candidates( &allowed_relay_parents, - backed_candidates_with_core.clone(), + &backed_candidates_with_core, >::group_validators, core_index_enabled, )?; @@ -673,10 +669,13 @@ impl Pallet { let processed = ParachainsInherentData { bitfields, - backed_candidates: backed_candidates_with_core - .into_iter() - .map(|(candidate, _)| candidate) - .collect(), + backed_candidates: backed_candidates_with_core.into_iter().fold( + Vec::with_capacity(count), + |mut acc, (_id, candidates)| { + acc.extend(candidates.into_iter().map(|(c, _)| c)); + acc + }, + ), disputes, parent_header, }; @@ -945,12 +944,14 @@ pub(crate) fn sanitize_bitfields( struct SanitizedBackedCandidates { // Sanitized backed candidates along with the assigned core. The `Vec` is sorted according to // the occupied core index. - backed_candidates_with_core: Vec<(BackedCandidate, CoreIndex)>, + backed_candidates_with_core: BTreeMap, CoreIndex)>>, // Set to true if any votes from disabled validators were dropped from the input. votes_from_disabled_were_dropped: bool, // Set to true if any candidates were dropped due to filtering done in // `map_candidates_to_cores` dropped_unscheduled_candidates: bool, + // Total candidate count after sanitization + count: usize, } /// Filter out: @@ -971,33 +972,43 @@ struct SanitizedBackedCandidates { /// Returns struct `SanitizedBackedCandidates` where `backed_candidates` are sorted according to the /// occupied core index. /// This function must preserve the dependency order between candidates of the same para. -fn sanitize_backed_candidates< - T: crate::inclusion::Config, - F: FnMut(&BackedCandidate) -> bool, ->( - mut backed_candidates: Vec>, +fn sanitize_backed_candidates( + backed_candidates: Vec>, allowed_relay_parents: &AllowedRelayParentsTracker>, - mut candidate_has_concluded_invalid_or_is_descendant_of_invalid: F, + concluded_invalid: BTreeSet, scheduled: BTreeMap>, core_index_enabled: bool, ) -> SanitizedBackedCandidates { + // For all the modifications that will happen below, make sure we're preserving the order. + + let mut candidates_per_para: BTreeMap> = BTreeMap::new(); + for candidate in backed_candidates { + candidates_per_para + .entry(candidate.descriptor().para_id) + .or_default() + .push(candidate); + } + + // TODO: all filters need to return whether or not we dropped any candidates. + + // Check that candidates pertaining to the same para form a chain. Drop the ones that + // don't. + filter_unchained_candidates::(&mut candidates_per_para, allowed_relay_parents); + // Remove any candidates that were concluded invalid or who are descendants of concluded invalid // candidates. - backed_candidates.retain(|backed_candidate| { - !candidate_has_concluded_invalid_or_is_descendant_of_invalid(backed_candidate) + filter_candidates::(&mut candidates_per_para, |_, candidate| { + !concluded_invalid.contains(&candidate.candidate().hash()) }); - let initial_candidate_count = backed_candidates.len(); // Map candidates to scheduled cores. Filter out any unscheduled candidates. - let mut backed_candidates_with_core = map_candidates_to_cores::( - &allowed_relay_parents, - scheduled, - core_index_enabled, - backed_candidates, - ); - - let dropped_unscheduled_candidates = - initial_candidate_count != backed_candidates_with_core.len(); + let (mut backed_candidates_with_core, dropped_unscheduled_candidates) = + map_candidates_to_cores::( + &allowed_relay_parents, + scheduled, + core_index_enabled, + candidates_per_para, + ); // Filter out backing statements from disabled validators let votes_from_disabled_were_dropped = filter_backed_statements_from_disabled_validators::( @@ -1009,6 +1020,10 @@ fn sanitize_backed_candidates< SanitizedBackedCandidates { dropped_unscheduled_candidates, votes_from_disabled_were_dropped, + count: backed_candidates_with_core.iter().fold(0, |mut count, (_id, candidates)| { + count += candidates.len(); + count + }), backed_candidates_with_core, } } @@ -1094,14 +1109,74 @@ fn limit_and_sanitize_disputes< } } +fn filter_candidates< + T: inclusion::Config + paras::Config + inclusion::Config, + F: FnMut(ParaId, &BackedCandidate) -> bool, +>( + candidates: &mut BTreeMap>>, + mut pred: F, +) { + for (para_id, candidates) in candidates.iter_mut() { + let mut latest_valid_idx = None; + + for (idx, candidate) in candidates.iter().enumerate() { + if pred(*para_id, candidate) { + // Found a valid candidate. + latest_valid_idx = Some(idx); + } else { + break + } + } + + if let Some(latest_valid_idx) = latest_valid_idx { + candidates.truncate(latest_valid_idx + 1); + } else { + candidates.clear(); + } + } + + candidates.retain(|_, c| !c.is_empty()); +} + +fn filter_candidates_with_core< + T: inclusion::Config + paras::Config + inclusion::Config, + F: FnMut(ParaId, &mut BackedCandidate, CoreIndex) -> bool, +>( + candidates: &mut BTreeMap, CoreIndex)>>, + mut pred: F, +) { + for (para_id, candidates) in candidates.iter_mut() { + let mut latest_valid_idx = None; + + for (idx, (candidate, core_idx)) in candidates.iter_mut().enumerate() { + if pred(*para_id, candidate, *core_idx) { + // Found a valid candidate. + latest_valid_idx = Some(idx); + } else { + break + } + } + + if let Some(latest_valid_idx) = latest_valid_idx { + candidates.truncate(latest_valid_idx + 1); + } else { + candidates.clear(); + } + } + + candidates.retain(|_, c| !c.is_empty()); +} + // Filters statements from disabled validators in `BackedCandidate`, non-scheduled candidates and // few more sanity checks. Returns `true` if at least one statement is removed and `false` // otherwise. -fn filter_backed_statements_from_disabled_validators( - backed_candidates_with_core: &mut Vec<( - BackedCandidate<::Hash>, - CoreIndex, - )>, +fn filter_backed_statements_from_disabled_validators< + T: shared::Config + scheduler::Config + inclusion::Config, +>( + backed_candidates_with_core: &mut BTreeMap< + ParaId, + Vec<(BackedCandidate<::Hash>, CoreIndex)>, + >, allowed_relay_parents: &AllowedRelayParentsTracker>, core_index_enabled: bool, ) -> bool { @@ -1124,23 +1199,26 @@ fn filter_backed_statements_from_disabled_validators(backed_candidates_with_core, |_, bc, core_idx| { + let (validator_indices, maybe_core_index) = + bc.validator_indices_and_core_index(core_index_enabled); let mut validator_indices = BitVec::<_>::from(validator_indices); - // Get relay parent block number of the candidate. We need this to get the group index assigned to this core at this block number + // Get relay parent block number of the candidate. We need this to get the group index + // assigned to this core at this block number let relay_parent_block_number = match allowed_relay_parents - .acquire_info(bc.descriptor().relay_parent, None) { - Some((_, block_num)) => block_num, - None => { - log::debug!(target: LOG_TARGET, "Relay parent {:?} for candidate is not in the allowed relay parents. Dropping the candidate.", bc.descriptor().relay_parent); - return false - } - }; + .acquire_info(bc.descriptor().relay_parent, None) + { + Some((_, block_num)) => block_num, + None => { + log::debug!(target: LOG_TARGET, "Relay parent {:?} for candidate is not in the allowed relay parents. Dropping the candidate.", bc.descriptor().relay_parent); + return false + }, + }; // Get the group index for the core let group_idx = match >::group_assigned_to_core( - *core_idx, + core_idx, relay_parent_block_number + One::one(), ) { Some(group_idx) => group_idx, @@ -1156,12 +1234,15 @@ fn filter_backed_statements_from_disabled_validators { log::debug!(target: LOG_TARGET, "Can't get the validators from group {:?}. Dropping the candidate.", group_idx); return false - } + }, }; // Bitmask with the disabled indices within the validator group - let disabled_indices = BitVec::::from_iter(validator_group.iter().map(|idx| disabled_validators.contains(idx))); - // The indices of statements from disabled validators in `BackedCandidate`. We have to drop these. + let disabled_indices = BitVec::::from_iter( + validator_group.iter().map(|idx| disabled_validators.contains(idx)), + ); + // The indices of statements from disabled validators in `BackedCandidate`. We have to drop + // these. let indices_to_drop = disabled_indices.clone() & &validator_indices; // Apply the bitmask to drop the disabled validator from `validator_indices` validator_indices &= !disabled_indices; @@ -1181,10 +1262,9 @@ fn filter_backed_statements_from_disabled_validators( + candidates: &mut BTreeMap>>, + allowed_relay_parents: &AllowedRelayParentsTracker>, +) { + let mut para_latest_head_data: BTreeMap = BTreeMap::new(); + for para_id in candidates.keys() { + let maybe_latest_head_data = match >::get(¶_id) + .map(|pending_candidates| { + pending_candidates.back().map(|x| x.candidate_commitments().head_data.clone()) + }) + .flatten() + { + Some(head_data) => Some(head_data), + None => >::para_head(¶_id), + }; + // this cannot be None + let latest_head_data = match maybe_latest_head_data { + None => continue, + Some(latest_head_data) => latest_head_data, + }; + para_latest_head_data.insert(*para_id, latest_head_data); + } + + filter_candidates::(candidates, |para_id, candidate| { + let Some(latest_head_data) = para_latest_head_data.get(¶_id) else { return false }; + + let Some((relay_parent_storage_root, relay_parent_number)) = + allowed_relay_parents.acquire_info(candidate.descriptor().relay_parent, None) + else { + return false + }; + + let persisted_validation_data = make_persisted_validation_data_with_parent::( + relay_parent_number, + relay_parent_storage_root, + latest_head_data.clone(), + ); + + let expected = persisted_validation_data.hash(); + + if expected == candidate.descriptor().persisted_validation_data_hash { + para_latest_head_data + .insert(para_id, candidate.candidate().commitments.head_data.clone()); + + true + } else { + false + } + }); +} + /// Map candidates to scheduled cores. /// If the para only has one scheduled core and no `CoreIndex` is injected, map the candidate to the /// single core. If the para has multiple cores scheduled, only map the candidates which have a @@ -1204,35 +1335,85 @@ fn map_candidates_to_cores>, mut scheduled: BTreeMap>, core_index_enabled: bool, - candidates: Vec>, -) -> Vec<(BackedCandidate, CoreIndex)> { - let mut backed_candidates_with_core = Vec::with_capacity(candidates.len()); - - // We keep a candidate if the parachain has only one core assigned or if - // a core index is provided by block author and it's indeed scheduled. - for backed_candidate in candidates { - let maybe_injected_core_index = get_injected_core_index::( - allowed_relay_parents, - &backed_candidate, - core_index_enabled, - ); + candidates: BTreeMap>>, +) -> (BTreeMap, CoreIndex)>>, bool) { + let mut backed_candidates_with_core = BTreeMap::new(); + let mut dropped_unscheduled_candidates = false; + + for (para_id, backed_candidates) in candidates.into_iter() { + // Sanity check, should never be true. + if backed_candidates.len() == 0 { + continue + } + + let scheduled_cores = scheduled.get_mut(¶_id); - let scheduled_cores = scheduled.get_mut(&backed_candidate.descriptor().para_id); - // Candidates without scheduled cores are silently filtered out. + // ParaIds without scheduled cores are silently filtered out. if let Some(scheduled_cores) = scheduled_cores { - if let Some(core_idx) = maybe_injected_core_index { - if scheduled_cores.contains(&core_idx) { - scheduled_cores.remove(&core_idx); - backed_candidates_with_core.push((backed_candidate, core_idx)); + // Non-elastic scaling case. One core per para. + if scheduled_cores.len() == 1 { + backed_candidates_with_core.insert( + para_id, + vec![( + // We need the first one here, as we assume candidates of a para are in + // dependency order. + backed_candidates.into_iter().next().expect("Length is at least 1"), + scheduled_cores.pop_first().expect("Length is 1"), + )], + ); + continue; + + // Elastic scaling case. We only allow candidates which have the right core + // indices injected. + } else if scheduled_cores.len() > 1 && core_index_enabled { + // We must preserve the dependency order given in the input. + let mut temp_backed_candidates = Vec::with_capacity(scheduled_cores.len()); + + for candidate in backed_candidates { + if scheduled_cores.len() == 0 { + // We've got candidates for all of this para's assigned cores. Move on to + // the next para. + break; + } + let maybe_injected_core_index: Option = get_injected_core_index::( + allowed_relay_parents, + &candidate, + core_index_enabled, + ); + + if let Some(core_index) = maybe_injected_core_index { + if scheduled_cores.contains(&core_index) { + scheduled_cores.remove(&core_index); + temp_backed_candidates.push((candidate, core_index)); + } else { + // if we got a candidate for a core index which is not scheduled, stop + // the work for this para. the already processed candidate chain in + // temp_backed_candidates is still fine though. + dropped_unscheduled_candidates = true; + + break; + } + } else { + // if we got a candidate which does not contain its core index, stop the + // work for this para. the already processed candidate chain in + // temp_backed_candidates is still fine though. + dropped_unscheduled_candidates = true; + + break; + } } - } else if scheduled_cores.len() == 1 { + backed_candidates_with_core - .push((backed_candidate, scheduled_cores.pop_first().expect("Length is 1"))); + .entry(para_id) + .or_insert_with(|| vec![]) + .extend(temp_backed_candidates); + } else { + dropped_unscheduled_candidates = true; } } } - backed_candidates_with_core + (backed_candidates_with_core, dropped_unscheduled_candidates) } fn get_injected_core_index( From 0fb7b8cf74edfddff9d597e1838c3850957d09fd Mon Sep 17 00:00:00 2001 From: alindima Date: Wed, 28 Feb 2024 12:54:46 +0200 Subject: [PATCH 06/44] deduplicate some of the logic for freeing cores --- .../runtime/parachains/src/inclusion/mod.rs | 81 +++++++++---------- .../parachains/src/paras_inherent/mod.rs | 1 - 2 files changed, 36 insertions(+), 46 deletions(-) diff --git a/polkadot/runtime/parachains/src/inclusion/mod.rs b/polkadot/runtime/parachains/src/inclusion/mod.rs index e64d239c9306..72a184eed18e 100644 --- a/polkadot/runtime/parachains/src/inclusion/mod.rs +++ b/polkadot/runtime/parachains/src/inclusion/mod.rs @@ -1024,43 +1024,26 @@ impl Pallet { pub(crate) fn collect_timedout( pred: impl Fn(BlockNumberFor) -> AvailabilityTimeoutStatus>, ) -> Vec { - let mut cleaned_up_cores = Vec::new(); - - for (para_id, candidates_pending_availability) in >::iter() { - let mut timed_out = None; - for (idx, candidate) in candidates_pending_availability.iter().enumerate() { - if pred(candidate.backed_in_number).timed_out { - timed_out = Some(idx); - // Found the first timed out candidate of this para. All other successors will - // be timed out as well. - break - } - } - - if let Some(idx) = timed_out { - >::mutate(¶_id, |candidates| { - if let Some(candidates) = candidates { - let cleaned_up = candidates.drain(idx..); - for candidate in cleaned_up { - cleaned_up_cores.push(candidate.core); - - let receipt = CandidateReceipt { - descriptor: candidate.descriptor, - commitments_hash: candidate.commitments.hash(), - }; + let timed_out: Vec<_> = + Self::free_cores(|candidate| pred(candidate.backed_in_number).timed_out, None) + .collect(); + let mut timed_out_cores = Vec::with_capacity(timed_out.len()); + for candidate in timed_out.iter() { + timed_out_cores.push(candidate.core); + + let receipt = CandidateReceipt { + descriptor: candidate.descriptor.clone(), + commitments_hash: candidate.commitments.hash(), + }; - Self::deposit_event(Event::::CandidateTimedOut( - receipt, - candidate.commitments.head_data, - candidate.core, - )); - } - } - }); - } + Self::deposit_event(Event::::CandidateTimedOut( + receipt, + candidate.commitments.head_data.clone(), + candidate.core, + )); } - cleaned_up_cores + timed_out_cores } /// Cleans up all cores pending availability occupied by one of the disputed candidates or which @@ -1069,35 +1052,43 @@ impl Pallet { /// Returns a vector of cleaned-up core IDs, along with the evicted candidate hashes. pub(crate) fn collect_disputed( disputed: &BTreeSet, - ) -> Vec<(CoreIndex, CandidateHash)> { - let mut cleaned_up_cores = Vec::with_capacity(disputed.len()); + ) -> impl Iterator + '_ { + Self::free_cores(|candidate| disputed.contains(&candidate.hash), Some(disputed.len())) + .map(|candidate| (candidate.core, candidate.hash)) + } + + fn free_cores>) -> bool>( + pred: P, + capacity_hint: Option, + ) -> impl Iterator>> { + let mut cleaned_up_cores = + if let Some(capacity) = capacity_hint { Vec::with_capacity(capacity) } else { vec![] }; for (para_id, pending_candidates) in >::iter() { // We assume that pending candidates are stored in dependency order. So we need to store - // the earliest disputed candidate. All others that follow will get freed as well. - let mut earliest_disputed_idx = None; + // the earliest dropped candidate. All others that follow will get freed as well. + let mut earliest_dropped_idx = None; for (index, candidate) in pending_candidates.iter().enumerate() { - if disputed.contains(&candidate.hash) { - earliest_disputed_idx = Some(index); + if pred(candidate) { + earliest_dropped_idx = Some(index); // Since we're looping the candidates in dependency order, we've found the // earliest disputed index for this paraid. break; } } - if let Some(earliest_disputed_idx) = earliest_disputed_idx { + if let Some(earliest_dropped_idx) = earliest_dropped_idx { // Do cleanups and record the cleaned up cores >::mutate(¶_id, |record| { if let Some(record) = record { - let cleaned_up = record.drain(earliest_disputed_idx..); - cleaned_up_cores - .extend(cleaned_up.map(|candidate| (candidate.core, candidate.hash))); + let cleaned_up = record.drain(earliest_dropped_idx..); + cleaned_up_cores.extend(cleaned_up); } }); } } - cleaned_up_cores + cleaned_up_cores.into_iter() } /// Forcibly enact the candidate with the given ID as though it had been deemed available diff --git a/polkadot/runtime/parachains/src/paras_inherent/mod.rs b/polkadot/runtime/parachains/src/paras_inherent/mod.rs index d0866d9b221a..c2fcf0696da8 100644 --- a/polkadot/runtime/parachains/src/paras_inherent/mod.rs +++ b/polkadot/runtime/parachains/src/paras_inherent/mod.rs @@ -545,7 +545,6 @@ impl Pallet { BTreeMap, BTreeSet, ) = >::collect_disputed(¤t_concluded_invalid_disputes) - .into_iter() .map(|(core, candidate)| ((core, FreedReason::Concluded), candidate)) .unzip(); From 0ed6fc369f3aad969b608bb40e3e917f95170435 Mon Sep 17 00:00:00 2001 From: alindima Date: Wed, 28 Feb 2024 14:21:43 +0200 Subject: [PATCH 07/44] update some comments --- .../runtime/parachains/src/inclusion/mod.rs | 24 ++++--- .../parachains/src/paras_inherent/mod.rs | 71 +++++++++++-------- .../parachains/src/runtime_api_impl/v7.rs | 4 -- polkadot/runtime/parachains/src/util.rs | 4 +- 4 files changed, 56 insertions(+), 47 deletions(-) diff --git a/polkadot/runtime/parachains/src/inclusion/mod.rs b/polkadot/runtime/parachains/src/inclusion/mod.rs index 72a184eed18e..b3e44e521bc0 100644 --- a/polkadot/runtime/parachains/src/inclusion/mod.rs +++ b/polkadot/runtime/parachains/src/inclusion/mod.rs @@ -538,9 +538,9 @@ impl Pallet { *bit = true; } - // We only care if the first candidate of this para was made - // available. We don't enact candidates until their predecessors - // have been enacted. + // In terms of candidate enactment, we only care if the first + // candidate of this para was made available. We don't enact + // candidates until their predecessors have been enacted. if candidate_idx == 0 && candidate.availability_votes.count_ones() >= threshold { @@ -613,11 +613,12 @@ impl Pallet { freed_cores } - /// Process candidates that have been backed. Provide the relay storage root, a set of - /// candidates and scheduled cores. + /// Process candidates that have been backed. Provide a set of + /// candidates along with their scheduled cores. /// - /// Both should be sorted ascending by core index, and the candidates should be a subset of - /// scheduled cores. If these conditions are not met, the execution of the function fails. + /// Candidates of a paraid should sorted ascending by core index. If this condition is not met, + /// candidates of the para which don't satisfy this criteria will be dropped. (This really + /// should not happen here, if the candidates were properly sanitised in paras_inherent). pub(crate) fn process_candidates( allowed_relay_parents: &AllowedRelayParentsTracker>, candidates: &BTreeMap, CoreIndex)>>, @@ -627,12 +628,11 @@ impl Pallet { where GV: Fn(GroupIndex) -> Option>, { - let now = >::block_number(); - if candidates.is_empty() { return Ok(ProcessedCandidates::default()) } + let now = >::block_number(); let validators = shared::Pallet::::active_validator_keys(); // Collect candidate receipts with backers. @@ -641,6 +641,8 @@ impl Pallet { let mut core_indices = Vec::with_capacity(candidates.len()); for (para_id, candidates) in candidates { + // PVD hash should have already been checked in `filter_unchained_candidates`, but do it + // again for safety. let maybe_latest_head_data = match >::get(¶_id) .map(|pending_candidates| { pending_candidates.back().map(|x| x.commitments.head_data.clone()) @@ -650,7 +652,7 @@ impl Pallet { Some(head_data) => Some(head_data), None => >::para_head(¶_id), }; - // this cannot be None + // this cannot be None if the parachain was registered. let mut latest_head_data = match maybe_latest_head_data { None => continue, Some(latest_head_data) => latest_head_data, @@ -742,7 +744,7 @@ impl Pallet { } Ok(ProcessedCandidates:: { - core_indices, // TODO: these may need to be sorted. + core_indices, candidate_receipt_with_backing_validator_indices, }) } diff --git a/polkadot/runtime/parachains/src/paras_inherent/mod.rs b/polkadot/runtime/parachains/src/paras_inherent/mod.rs index c2fcf0696da8..c58fc5322790 100644 --- a/polkadot/runtime/parachains/src/paras_inherent/mod.rs +++ b/polkadot/runtime/parachains/src/paras_inherent/mod.rs @@ -238,7 +238,7 @@ pub mod pallet { /// the given `freed_concluded`). /// /// The parameter `freed_concluded` contains all core indicies that became - /// free due to candidate that became available. + /// free due to candidates that became available. pub(crate) fn collect_all_freed_cores( freed_concluded: I, ) -> BTreeMap @@ -254,7 +254,11 @@ pub mod pallet { Vec::new() }; - // Schedule paras again, given freed cores, and reasons for freeing. + if !freed_timeout.is_empty() { + log::debug!(target: LOG_TARGET, "Evicted timed out cores: {:?}", freed_timeout); + } + + // We'll schedule paras again, given freed cores, and reasons for freeing. let freed = freed_concluded .into_iter() .map(|(c, _hash)| (c, FreedReason::Concluded)) @@ -953,33 +957,32 @@ struct SanitizedBackedCandidates { count: usize, } +/// Performs various filtering on the backed candidates inherent data. +/// Must maintain the invariant that the returned candidate collection contains the candidates +/// sorted in dependency order for each para. When doing any filtering, we must therefore drop any +/// subsequent candidates after the filtered one. +/// /// Filter out: -/// 1. any candidates that have a concluded invalid dispute -/// 2. any unscheduled candidates, as well as candidates whose paraid has multiple cores assigned +/// 1. any candidates which don't form a chain with the other candidates of the paraid (even if they +/// do form a chain but are not in the right order). +/// 2. any candidates that have a concluded invalid dispute or who are descendants of a concluded +/// invalid candidate. +/// 3. any unscheduled candidates, as well as candidates whose paraid has multiple cores assigned /// but have no injected core index. -/// 3. all backing votes from disabled validators -/// 4. any candidates that end up with less than `effective_minimum_backing_votes` backing votes +/// 4. all backing votes from disabled validators +/// 5. any candidates that end up with less than `effective_minimum_backing_votes` backing votes /// -/// `scheduled` follows the same naming scheme as provided in the -/// guide: Currently `free` but might become `occupied`. -/// For the filtering here the relevant part is only the current `free` -/// state. -/// -/// `candidate_has_concluded_invalid_dispute` must return `true` if the candidate -/// is disputed or is a descendant of a disputed candidate, false otherwise. -/// -/// Returns struct `SanitizedBackedCandidates` where `backed_candidates` are sorted according to the -/// occupied core index. -/// This function must preserve the dependency order between candidates of the same para. +/// Returns struct `SanitizedBackedCandidates` where `backed_candidates_with_core` are the scheduled +/// backed candidates which passed filtering, mapped by para id and in the right dependency order. fn sanitize_backed_candidates( backed_candidates: Vec>, allowed_relay_parents: &AllowedRelayParentsTracker>, - concluded_invalid: BTreeSet, + concluded_invalid_with_descendants: BTreeSet, scheduled: BTreeMap>, core_index_enabled: bool, ) -> SanitizedBackedCandidates { - // For all the modifications that will happen below, make sure we're preserving the order. - + // Map the candidates to the right paraids, while making sure that the order between candidates + // of the same para is preserved. let mut candidates_per_para: BTreeMap> = BTreeMap::new(); for candidate in backed_candidates { candidates_per_para @@ -991,16 +994,17 @@ fn sanitize_backed_candidates( // TODO: all filters need to return whether or not we dropped any candidates. // Check that candidates pertaining to the same para form a chain. Drop the ones that - // don't. + // don't, along with the rest of candidates which follow them in the input vector. filter_unchained_candidates::(&mut candidates_per_para, allowed_relay_parents); // Remove any candidates that were concluded invalid or who are descendants of concluded invalid - // candidates. + // candidates (along with their descendants). filter_candidates::(&mut candidates_per_para, |_, candidate| { - !concluded_invalid.contains(&candidate.candidate().hash()) + !concluded_invalid_with_descendants.contains(&candidate.candidate().hash()) }); - // Map candidates to scheduled cores. Filter out any unscheduled candidates. + // Map candidates to scheduled cores. Filter out any unscheduled candidates along with their + // descendants. let (mut backed_candidates_with_core, dropped_unscheduled_candidates) = map_candidates_to_cores::( &allowed_relay_parents, @@ -1009,7 +1013,9 @@ fn sanitize_backed_candidates( candidates_per_para, ); - // Filter out backing statements from disabled validators + // Filter out backing statements from disabled validators. If by that we render a candidate with + // less backing votes than required, filter that candidate also. As all the other filtering + // operations above, we drop the descendants of the dropped candidates also. let votes_from_disabled_were_dropped = filter_backed_statements_from_disabled_validators::( &mut backed_candidates_with_core, &allowed_relay_parents, @@ -1108,6 +1114,8 @@ fn limit_and_sanitize_disputes< } } +// Helper function for filtering candidates which don't pass the given predicate. When/if the first +// candidate which failes the predicate is found, all the other candidates that follow are dropped. fn filter_candidates< T: inclusion::Config + paras::Config + inclusion::Config, F: FnMut(ParaId, &BackedCandidate) -> bool, @@ -1137,6 +1145,8 @@ fn filter_candidates< candidates.retain(|_, c| !c.is_empty()); } +// Helper function for filtering candidates which don't pass the given predicate. When/if the first +// candidate which failes the predicate is found, all the other candidates that follow are dropped. fn filter_candidates_with_core< T: inclusion::Config + paras::Config + inclusion::Config, F: FnMut(ParaId, &mut BackedCandidate, CoreIndex) -> bool, @@ -1166,9 +1176,8 @@ fn filter_candidates_with_core< candidates.retain(|_, c| !c.is_empty()); } -// Filters statements from disabled validators in `BackedCandidate`, non-scheduled candidates and -// few more sanity checks. Returns `true` if at least one statement is removed and `false` -// otherwise. +// Filters statements from disabled validators in `BackedCandidate` and does a few more sanity +// checks. Returns `true` if at least one statement is removed and `false` otherwise. fn filter_backed_statements_from_disabled_validators< T: shared::Config + scheduler::Config + inclusion::Config, >( @@ -1274,6 +1283,8 @@ fn filter_backed_statements_from_disabled_validators< filtered || backed_len_before != backed_candidates_with_core.len() } +// Check that candidates pertaining to the same para form a chain. Drop the ones that +// don't, along with the rest of candidates which follow them in the input vector. fn filter_unchained_candidates( candidates: &mut BTreeMap>>, allowed_relay_parents: &AllowedRelayParentsTracker>, @@ -1326,10 +1337,12 @@ fn filter_unchained_candidates( allowed_relay_parents: &AllowedRelayParentsTracker>, mut scheduled: BTreeMap>, diff --git a/polkadot/runtime/parachains/src/runtime_api_impl/v7.rs b/polkadot/runtime/parachains/src/runtime_api_impl/v7.rs index 9c9d94eeddaf..9513645dc8fd 100644 --- a/polkadot/runtime/parachains/src/runtime_api_impl/v7.rs +++ b/polkadot/runtime/parachains/src/runtime_api_impl/v7.rs @@ -465,10 +465,6 @@ pub fn backing_state( }; let pending_availability = { - // Note: the API deals with a `Vec` as it is future-proof for cases - // where there may be multiple candidates pending availability at a time. - // But at the moment only one candidate can be pending availability per - // parachain. crate::inclusion::PendingAvailability::::get(¶_id) .map(|pending_candidates| { pending_candidates diff --git a/polkadot/runtime/parachains/src/util.rs b/polkadot/runtime/parachains/src/util.rs index f95e29191384..493a9d055efd 100644 --- a/polkadot/runtime/parachains/src/util.rs +++ b/polkadot/runtime/parachains/src/util.rs @@ -44,9 +44,7 @@ pub fn make_persisted_validation_data( /// Make the persisted validation data for a particular parachain, a specified relay-parent, its /// storage root and parent head data. -/// -/// This ties together the storage of several modules. -pub fn make_persisted_validation_data_with_parent( +pub fn make_persisted_validation_data_with_parent( relay_parent_number: BlockNumberFor, relay_parent_storage_root: T::Hash, parent_head: HeadData, From 0a08fa0d155fce4d4613b089f2afbbad65cc2e3e Mon Sep 17 00:00:00 2001 From: alindima Date: Wed, 28 Feb 2024 14:38:48 +0200 Subject: [PATCH 08/44] unify dropped candidates errors --- .../parachains/src/paras_inherent/mod.rs | 96 +++++++------------ 1 file changed, 33 insertions(+), 63 deletions(-) diff --git a/polkadot/runtime/parachains/src/paras_inherent/mod.rs b/polkadot/runtime/parachains/src/paras_inherent/mod.rs index c58fc5322790..40aefa713fe8 100644 --- a/polkadot/runtime/parachains/src/paras_inherent/mod.rs +++ b/polkadot/runtime/parachains/src/paras_inherent/mod.rs @@ -141,10 +141,9 @@ pub mod pallet { DisputeStatementsUnsortedOrDuplicates, /// A dispute statement was invalid. DisputeInvalid, - /// A candidate was backed by a disabled validator - BackedByDisabled, - /// A candidate was backed even though the paraid was not scheduled. - BackedOnUnscheduledCore, + /// A candidate was filtered during inherent execution. This should have only been done + /// during creation. + CandidatesFilteredDuringExecution, /// Too many candidates supplied. UnscheduledCandidate, } @@ -612,12 +611,8 @@ impl Pallet { scheduled.entry(para_id).or_default().insert(core_idx); } - let SanitizedBackedCandidates { - backed_candidates_with_core, - votes_from_disabled_were_dropped, - dropped_unscheduled_candidates, - count, - } = sanitize_backed_candidates::( + let initial_candidate_count = backed_candidates.len(); + let backed_candidates_with_core = sanitize_backed_candidates::( backed_candidates, &allowed_relay_parents, // TODO: see this old comment // NOTE: this is the only place where we check the @@ -626,23 +621,20 @@ impl Pallet { scheduled, core_index_enabled, ); + let count = count_backed_candidates(&backed_candidates_with_core); ensure!(count <= total_scheduled_cores, Error::::UnscheduledCandidate); METRICS.on_candidates_sanitized(count as u64); - // In `Enter` context (invoked during execution) there should be no backing votes from - // disabled validators because they should have been filtered out during inherent data - // preparation (`ProvideInherent` context). Abort in such cases. + // In `Enter` context (invoked during execution) no more candidates should be filtered, + // because they have already been filtered during `ProvideInherent` context. Abort in such + // cases. if context == ProcessInherentDataContext::Enter { - ensure!(!votes_from_disabled_were_dropped, Error::::BackedByDisabled); - } - - // In `Enter` context (invoked during execution) we shouldn't have filtered any candidates - // due to a para not being scheduled. They have been filtered during inherent data - // preparation (`ProvideInherent` context). Abort in such cases. - if context == ProcessInherentDataContext::Enter { - ensure!(!dropped_unscheduled_candidates, Error::::BackedOnUnscheduledCore); + ensure!( + initial_candidate_count == count, + Error::::CandidatesFilteredDuringExecution + ); } // Process backed candidates according to scheduled cores. @@ -942,21 +934,6 @@ pub(crate) fn sanitize_bitfields( bitfields } -// Result from `sanitize_backed_candidates` -#[derive(Debug, PartialEq)] -struct SanitizedBackedCandidates { - // Sanitized backed candidates along with the assigned core. The `Vec` is sorted according to - // the occupied core index. - backed_candidates_with_core: BTreeMap, CoreIndex)>>, - // Set to true if any votes from disabled validators were dropped from the input. - votes_from_disabled_were_dropped: bool, - // Set to true if any candidates were dropped due to filtering done in - // `map_candidates_to_cores` - dropped_unscheduled_candidates: bool, - // Total candidate count after sanitization - count: usize, -} - /// Performs various filtering on the backed candidates inherent data. /// Must maintain the invariant that the returned candidate collection contains the candidates /// sorted in dependency order for each para. When doing any filtering, we must therefore drop any @@ -972,7 +949,7 @@ struct SanitizedBackedCandidates { /// 4. all backing votes from disabled validators /// 5. any candidates that end up with less than `effective_minimum_backing_votes` backing votes /// -/// Returns struct `SanitizedBackedCandidates` where `backed_candidates_with_core` are the scheduled +/// Returns the scheduled /// backed candidates which passed filtering, mapped by para id and in the right dependency order. fn sanitize_backed_candidates( backed_candidates: Vec>, @@ -980,7 +957,7 @@ fn sanitize_backed_candidates( concluded_invalid_with_descendants: BTreeSet, scheduled: BTreeMap>, core_index_enabled: bool, -) -> SanitizedBackedCandidates { +) -> BTreeMap, CoreIndex)>> { // Map the candidates to the right paraids, while making sure that the order between candidates // of the same para is preserved. let mut candidates_per_para: BTreeMap> = BTreeMap::new(); @@ -991,8 +968,6 @@ fn sanitize_backed_candidates( .push(candidate); } - // TODO: all filters need to return whether or not we dropped any candidates. - // Check that candidates pertaining to the same para form a chain. Drop the ones that // don't, along with the rest of candidates which follow them in the input vector. filter_unchained_candidates::(&mut candidates_per_para, allowed_relay_parents); @@ -1005,32 +980,30 @@ fn sanitize_backed_candidates( // Map candidates to scheduled cores. Filter out any unscheduled candidates along with their // descendants. - let (mut backed_candidates_with_core, dropped_unscheduled_candidates) = - map_candidates_to_cores::( - &allowed_relay_parents, - scheduled, - core_index_enabled, - candidates_per_para, - ); + let mut backed_candidates_with_core = map_candidates_to_cores::( + &allowed_relay_parents, + scheduled, + core_index_enabled, + candidates_per_para, + ); // Filter out backing statements from disabled validators. If by that we render a candidate with // less backing votes than required, filter that candidate also. As all the other filtering // operations above, we drop the descendants of the dropped candidates also. - let votes_from_disabled_were_dropped = filter_backed_statements_from_disabled_validators::( + filter_backed_statements_from_disabled_validators::( &mut backed_candidates_with_core, &allowed_relay_parents, core_index_enabled, ); - SanitizedBackedCandidates { - dropped_unscheduled_candidates, - votes_from_disabled_were_dropped, - count: backed_candidates_with_core.iter().fold(0, |mut count, (_id, candidates)| { - count += candidates.len(); - count - }), - backed_candidates_with_core, - } + backed_candidates_with_core +} + +fn count_backed_candidates(backed_candidates: &BTreeMap>) -> usize { + backed_candidates.iter().fold(0, |mut count, (_id, candidates)| { + count += candidates.len(); + count + }) } /// Derive entropy from babe provided per block randomness. @@ -1348,9 +1321,8 @@ fn map_candidates_to_cores>, core_index_enabled: bool, candidates: BTreeMap>>, -) -> (BTreeMap, CoreIndex)>>, bool) { +) -> BTreeMap, CoreIndex)>> { let mut backed_candidates_with_core = BTreeMap::new(); - let mut dropped_unscheduled_candidates = false; for (para_id, backed_candidates) in candidates.into_iter() { // Sanity check, should never be true. @@ -1401,7 +1373,6 @@ fn map_candidates_to_cores( From 9e51e20883425429d60d92e7b55379b91fef2df8 Mon Sep 17 00:00:00 2001 From: alindima Date: Wed, 28 Feb 2024 15:02:15 +0200 Subject: [PATCH 09/44] add more logs --- .../parachains/src/paras_inherent/mod.rs | 114 +++++++++++++----- 1 file changed, 82 insertions(+), 32 deletions(-) diff --git a/polkadot/runtime/parachains/src/paras_inherent/mod.rs b/polkadot/runtime/parachains/src/paras_inherent/mod.rs index 40aefa713fe8..6e715018390f 100644 --- a/polkadot/runtime/parachains/src/paras_inherent/mod.rs +++ b/polkadot/runtime/parachains/src/paras_inherent/mod.rs @@ -975,7 +975,17 @@ fn sanitize_backed_candidates( // Remove any candidates that were concluded invalid or who are descendants of concluded invalid // candidates (along with their descendants). filter_candidates::(&mut candidates_per_para, |_, candidate| { - !concluded_invalid_with_descendants.contains(&candidate.candidate().hash()) + let keep = !concluded_invalid_with_descendants.contains(&candidate.candidate().hash()); + + if !keep { + log::debug!( + target: LOG_TARGET, + "Found backed candidate {:?} which was concluded invalid or is a descendant of a concluded invalid candidate, for paraid {:?}.", + candidate.candidate().hash(), + candidate.descriptor().para_id + ); + } + keep }); // Map candidates to scheduled cores. Filter out any unscheduled candidates along with their @@ -1150,7 +1160,7 @@ fn filter_candidates_with_core< } // Filters statements from disabled validators in `BackedCandidate` and does a few more sanity -// checks. Returns `true` if at least one statement is removed and `false` otherwise. +// checks. fn filter_backed_statements_from_disabled_validators< T: shared::Config + scheduler::Config + inclusion::Config, >( @@ -1160,42 +1170,40 @@ fn filter_backed_statements_from_disabled_validators< >, allowed_relay_parents: &AllowedRelayParentsTracker>, core_index_enabled: bool, -) -> bool { +) { let disabled_validators = BTreeSet::<_>::from_iter(shared::Pallet::::disabled_validators().into_iter()); if disabled_validators.is_empty() { // No disabled validators - nothing to do - return false + return } - let backed_len_before = backed_candidates_with_core.len(); - - // Flag which will be returned. Set to `true` if at least one vote is filtered. - let mut filtered = false; - let minimum_backing_votes = configuration::Pallet::::config().minimum_backing_votes; // Process all backed candidates. `validator_indices` in `BackedCandidates` are indices within // the validator group assigned to the parachain. To obtain this group we need: // 1. Core index assigned to the parachain which has produced the candidate // 2. The relay chain block number of the candidate - filter_candidates_with_core::(backed_candidates_with_core, |_, bc, core_idx| { + filter_candidates_with_core::(backed_candidates_with_core, |para_id, bc, core_idx| { let (validator_indices, maybe_core_index) = bc.validator_indices_and_core_index(core_index_enabled); let mut validator_indices = BitVec::<_>::from(validator_indices); // Get relay parent block number of the candidate. We need this to get the group index // assigned to this core at this block number - let relay_parent_block_number = match allowed_relay_parents - .acquire_info(bc.descriptor().relay_parent, None) - { - Some((_, block_num)) => block_num, - None => { - log::debug!(target: LOG_TARGET, "Relay parent {:?} for candidate is not in the allowed relay parents. Dropping the candidate.", bc.descriptor().relay_parent); - return false - }, - }; + let relay_parent_block_number = + match allowed_relay_parents.acquire_info(bc.descriptor().relay_parent, None) { + Some((_, block_num)) => block_num, + None => { + log::debug!( + target: LOG_TARGET, + "Relay parent {:?} for candidate is not in the allowed relay parents. Dropping the candidate.", + bc.descriptor().relay_parent + ); + return false + }, + }; // Get the group index for the core let group_idx = match >::group_assigned_to_core( @@ -1235,25 +1243,24 @@ fn filter_backed_statements_from_disabled_validators< bc.validity_votes_mut().remove(idx); } - // If at least one statement was dropped we need to return `true` - if indices_to_drop.count_ones() > 0 { - filtered = true; - } - // By filtering votes we might render the candidate invalid and cause a failure in // [`process_candidates`]. To avoid this we have to perform a sanity check here. If there // are not enough backing votes after filtering we will remove the whole candidate. if bc.validity_votes().len() < effective_minimum_backing_votes(validator_group.len(), minimum_backing_votes) { + log::debug!( + target: LOG_TARGET, + "Dropping candidate {:?} of paraid {:?} because it was left with too few backing votes after votes from disabled validators were filtered.", + bc.candidate().hash(), + para_id + ); + return false } true }); - - // Also return `true` if a whole candidate was dropped from the set - filtered || backed_len_before != backed_candidates_with_core.len() } // Check that candidates pertaining to the same para form a chain. Drop the ones that @@ -1275,7 +1282,10 @@ fn filter_unchained_candidates continue, + None => { + log::warn!(target: LOG_TARGET, "Latest included head data for paraid {:?} is None", para_id); + continue + }, Some(latest_head_data) => latest_head_data, }; para_latest_head_data.insert(*para_id, latest_head_data); @@ -1287,6 +1297,13 @@ fn filter_unchained_candidates { log::debug!( target: LOG_TARGET, - "Relay parent {:?} for candidate {:?} is not in the allowed relay parents. Dropping the candidate.", + "Relay parent {:?} for candidate {:?} is not in the allowed relay parents.", candidate.descriptor().relay_parent, candidate.candidate().hash(), ); @@ -1434,9 +1477,8 @@ fn get_injected_core_index { log::debug!( target: LOG_TARGET, - "Can't get the group index for core idx {:?}. Dropping the candidate {:?}.", + "Can't get the group index for core idx {:?}.", core_idx, - candidate.candidate().hash(), ); return None }, @@ -1450,6 +1492,14 @@ fn get_injected_core_index Date: Wed, 28 Feb 2024 15:34:56 +0200 Subject: [PATCH 10/44] remove some todos --- .../runtime/parachains/src/inclusion/mod.rs | 3 +- .../parachains/src/paras_inherent/mod.rs | 67 +++++++++---------- 2 files changed, 32 insertions(+), 38 deletions(-) diff --git a/polkadot/runtime/parachains/src/inclusion/mod.rs b/polkadot/runtime/parachains/src/inclusion/mod.rs index b3e44e521bc0..2a2e5dea2fe0 100644 --- a/polkadot/runtime/parachains/src/inclusion/mod.rs +++ b/polkadot/runtime/parachains/src/inclusion/mod.rs @@ -661,14 +661,12 @@ impl Pallet { for (candidate, core) in candidates.iter() { let candidate_hash = candidate.candidate().hash(); - // TODO: find out if we're correctly building the context here. let check_ctx = CandidateCheckContext::::new(None); let relay_parent_number = match check_ctx.verify_backed_candidate( &allowed_relay_parents, candidate.candidate(), latest_head_data.clone(), )? { - // TODO: can a PVD mismatch hide some other issue? Err(PVDMismatch) => { // This means that this candidate is not a child of // latest_head_data. @@ -1200,6 +1198,7 @@ pub(crate) struct CandidateCheckContext { /// An error indicating that creating Persisted Validation Data failed /// while checking a candidate's validity. +#[derive(PartialEq)] pub(crate) struct PVDMismatch; impl CandidateCheckContext { diff --git a/polkadot/runtime/parachains/src/paras_inherent/mod.rs b/polkadot/runtime/parachains/src/paras_inherent/mod.rs index 6e715018390f..3e247eacc0d9 100644 --- a/polkadot/runtime/parachains/src/paras_inherent/mod.rs +++ b/polkadot/runtime/parachains/src/paras_inherent/mod.rs @@ -24,12 +24,12 @@ use crate::{ configuration, disputes::DisputesHandler, - inclusion, initializer, + inclusion::{self, CandidateCheckContext, PVDMismatch}, + initializer, metrics::METRICS, paras, scheduler::{self, FreedReason}, shared::{self, AllowedRelayParentsTracker}, - util::make_persisted_validation_data_with_parent, ParaId, }; use bitvec::prelude::BitVec; @@ -615,8 +615,6 @@ impl Pallet { let backed_candidates_with_core = sanitize_backed_candidates::( backed_candidates, &allowed_relay_parents, - // TODO: see this old comment // NOTE: this is the only place where we check the - // relay-parent. freed_disputed_candidates, scheduled, core_index_enabled, @@ -1294,40 +1292,37 @@ fn filter_unchained_candidates(candidates, |para_id, candidate| { let Some(latest_head_data) = para_latest_head_data.get(¶_id) else { return false }; - let Some((relay_parent_storage_root, relay_parent_number)) = - allowed_relay_parents.acquire_info(candidate.descriptor().relay_parent, None) - else { - log::debug!( - target: LOG_TARGET, - "Relay parent {:?} for candidate {:?} is not in the allowed relay parents. Dropping the candidate.", - candidate.descriptor().relay_parent, - candidate.candidate().hash(), - ); + let prev_context = >::para_most_recent_context(para_id); + let check_ctx = CandidateCheckContext::::new(prev_context); - return false - }; - - let persisted_validation_data = make_persisted_validation_data_with_parent::( - relay_parent_number, - relay_parent_storage_root, + match check_ctx.verify_backed_candidate( + &allowed_relay_parents, + candidate.candidate(), latest_head_data.clone(), - ); - - let expected = persisted_validation_data.hash(); - - if expected == candidate.descriptor().persisted_validation_data_hash { - para_latest_head_data - .insert(para_id, candidate.candidate().commitments.head_data.clone()); - - true - } else { - log::debug!( - target: LOG_TARGET, - "Found backed candidates which don't form a chain for paraid {:?}. The order may also be wrong. Dropping the candidates.", - para_id - ); - - false + ) { + Ok(Err(err)) if err == PVDMismatch => { + log::debug!( + target: LOG_TARGET, + "Found backed candidates which don't form a chain for paraid {:?}. The order may also be wrong. Dropping the candidates.", + para_id + ); + false + }, + Ok(Err(_)) => { + // Currently unreachable as the only error is PVDMismatch. + false + }, + Ok(Ok(_)) => true, + Err(err) => { + log::debug!( + target: LOG_TARGET, + "Backed candidate verification for candidate {:?} of paraid {:?} failed with {:?}", + candidate.candidate().hash(), + para_id, + err + ); + false + }, } }); } From ccef35b79f304a25c8ec8065beda325d20eb5b3e Mon Sep 17 00:00:00 2001 From: alindima Date: Thu, 29 Feb 2024 10:07:15 +0200 Subject: [PATCH 11/44] review comments --- .../runtime/parachains/src/inclusion/mod.rs | 25 ++++--- .../parachains/src/paras_inherent/mod.rs | 75 ++++--------------- 2 files changed, 30 insertions(+), 70 deletions(-) diff --git a/polkadot/runtime/parachains/src/inclusion/mod.rs b/polkadot/runtime/parachains/src/inclusion/mod.rs index 2a2e5dea2fe0..480406435bf5 100644 --- a/polkadot/runtime/parachains/src/inclusion/mod.rs +++ b/polkadot/runtime/parachains/src/inclusion/mod.rs @@ -643,17 +643,9 @@ impl Pallet { for (para_id, candidates) in candidates { // PVD hash should have already been checked in `filter_unchained_candidates`, but do it // again for safety. - let maybe_latest_head_data = match >::get(¶_id) - .map(|pending_candidates| { - pending_candidates.back().map(|x| x.commitments.head_data.clone()) - }) - .flatten() - { - Some(head_data) => Some(head_data), - None => >::para_head(¶_id), - }; + // this cannot be None if the parachain was registered. - let mut latest_head_data = match maybe_latest_head_data { + let mut latest_head_data = match Self::para_latest_head_data(para_id) { None => continue, Some(latest_head_data) => latest_head_data, }; @@ -747,6 +739,19 @@ impl Pallet { }) } + // Get the latest backed output head data of this para. + pub(crate) fn para_latest_head_data(para_id: &ParaId) -> Option { + match >::get(para_id) + .map(|pending_candidates| { + pending_candidates.back().map(|x| x.commitments.head_data.clone()) + }) + .flatten() + { + Some(head_data) => Some(head_data), + None => >::para_head(para_id), + } + } + fn check_backing_votes( backed_candidate: &BackedCandidate, validators: &[ValidatorId], diff --git a/polkadot/runtime/parachains/src/paras_inherent/mod.rs b/polkadot/runtime/parachains/src/paras_inherent/mod.rs index 3e247eacc0d9..c21de47b3ab8 100644 --- a/polkadot/runtime/parachains/src/paras_inherent/mod.rs +++ b/polkadot/runtime/parachains/src/paras_inherent/mod.rs @@ -972,7 +972,7 @@ fn sanitize_backed_candidates( // Remove any candidates that were concluded invalid or who are descendants of concluded invalid // candidates (along with their descendants). - filter_candidates::(&mut candidates_per_para, |_, candidate| { + filter_candidates::(&mut candidates_per_para, |_, candidate| { let keep = !concluded_invalid_with_descendants.contains(&candidate.candidate().hash()); if !keep { @@ -1099,15 +1099,16 @@ fn limit_and_sanitize_disputes< // candidate which failes the predicate is found, all the other candidates that follow are dropped. fn filter_candidates< T: inclusion::Config + paras::Config + inclusion::Config, - F: FnMut(ParaId, &BackedCandidate) -> bool, + F: FnMut(ParaId, &mut C) -> bool, + C, >( - candidates: &mut BTreeMap>>, + candidates_per_para: &mut BTreeMap>, mut pred: F, ) { - for (para_id, candidates) in candidates.iter_mut() { + for (para_id, candidates) in candidates_per_para.iter_mut() { let mut latest_valid_idx = None; - for (idx, candidate) in candidates.iter().enumerate() { + for (idx, candidate) in candidates.iter_mut().enumerate() { if pred(*para_id, candidate) { // Found a valid candidate. latest_valid_idx = Some(idx); @@ -1123,38 +1124,7 @@ fn filter_candidates< } } - candidates.retain(|_, c| !c.is_empty()); -} - -// Helper function for filtering candidates which don't pass the given predicate. When/if the first -// candidate which failes the predicate is found, all the other candidates that follow are dropped. -fn filter_candidates_with_core< - T: inclusion::Config + paras::Config + inclusion::Config, - F: FnMut(ParaId, &mut BackedCandidate, CoreIndex) -> bool, ->( - candidates: &mut BTreeMap, CoreIndex)>>, - mut pred: F, -) { - for (para_id, candidates) in candidates.iter_mut() { - let mut latest_valid_idx = None; - - for (idx, (candidate, core_idx)) in candidates.iter_mut().enumerate() { - if pred(*para_id, candidate, *core_idx) { - // Found a valid candidate. - latest_valid_idx = Some(idx); - } else { - break - } - } - - if let Some(latest_valid_idx) = latest_valid_idx { - candidates.truncate(latest_valid_idx + 1); - } else { - candidates.clear(); - } - } - - candidates.retain(|_, c| !c.is_empty()); + candidates_per_para.retain(|_, c| !c.is_empty()); } // Filters statements from disabled validators in `BackedCandidate` and does a few more sanity @@ -1183,7 +1153,7 @@ fn filter_backed_statements_from_disabled_validators< // the validator group assigned to the parachain. To obtain this group we need: // 1. Core index assigned to the parachain which has produced the candidate // 2. The relay chain block number of the candidate - filter_candidates_with_core::(backed_candidates_with_core, |para_id, bc, core_idx| { + filter_candidates::(backed_candidates_with_core, |para_id, (bc, core_idx)| { let (validator_indices, maybe_core_index) = bc.validator_indices_and_core_index(core_index_enabled); let mut validator_indices = BitVec::<_>::from(validator_indices); @@ -1205,7 +1175,7 @@ fn filter_backed_statements_from_disabled_validators< // Get the group index for the core let group_idx = match >::group_assigned_to_core( - core_idx, + *core_idx, relay_parent_block_number + One::one(), ) { Some(group_idx) => group_idx, @@ -1269,17 +1239,8 @@ fn filter_unchained_candidates = BTreeMap::new(); for para_id in candidates.keys() { - let maybe_latest_head_data = match >::get(¶_id) - .map(|pending_candidates| { - pending_candidates.back().map(|x| x.candidate_commitments().head_data.clone()) - }) - .flatten() - { - Some(head_data) => Some(head_data), - None => >::para_head(¶_id), - }; // this cannot be None - let latest_head_data = match maybe_latest_head_data { + let latest_head_data = match >::para_latest_head_data(¶_id) { None => { log::warn!(target: LOG_TARGET, "Latest included head data for paraid {:?} is None", para_id); continue @@ -1289,7 +1250,7 @@ fn filter_unchained_candidates(candidates, |para_id, candidate| { + filter_candidates::(candidates, |para_id, candidate| { let Some(latest_head_data) = para_latest_head_data.get(¶_id) else { return false }; let prev_context = >::para_most_recent_context(para_id); @@ -1377,15 +1338,11 @@ fn map_candidates_to_cores = get_injected_core_index::( - allowed_relay_parents, - &candidate, - core_index_enabled, - ); + let maybe_injected_core_index: Option = + get_injected_core_index::(allowed_relay_parents, &candidate); if let Some(core_index) = maybe_injected_core_index { - if scheduled_cores.contains(&core_index) { - scheduled_cores.remove(&core_index); + if scheduled_cores.remove(&core_index) { temp_backed_candidates.push((candidate, core_index)); } else { // if we got a candidate for a core index which is not scheduled, stop @@ -1439,13 +1396,11 @@ fn map_candidates_to_cores( allowed_relay_parents: &AllowedRelayParentsTracker>, candidate: &BackedCandidate, - core_index_enabled: bool, ) -> Option { // After stripping the 8 bit extensions, the `validator_indices` field length is expected // to be equal to backing group size. If these don't match, the `CoreIndex` is badly encoded, // or not supported. - let (validator_indices, maybe_core_idx) = - candidate.validator_indices_and_core_index(core_index_enabled); + let (validator_indices, maybe_core_idx) = candidate.validator_indices_and_core_index(true); let Some(core_idx) = maybe_core_idx else { return None }; From 3f67898f3e3596337810e445910b866cce398b54 Mon Sep 17 00:00:00 2001 From: alindima Date: Thu, 29 Feb 2024 11:50:51 +0200 Subject: [PATCH 12/44] some more nits --- polkadot/runtime/parachains/src/paras_inherent/mod.rs | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/polkadot/runtime/parachains/src/paras_inherent/mod.rs b/polkadot/runtime/parachains/src/paras_inherent/mod.rs index c21de47b3ab8..3554b886e90a 100644 --- a/polkadot/runtime/parachains/src/paras_inherent/mod.rs +++ b/polkadot/runtime/parachains/src/paras_inherent/mod.rs @@ -972,7 +972,7 @@ fn sanitize_backed_candidates( // Remove any candidates that were concluded invalid or who are descendants of concluded invalid // candidates (along with their descendants). - filter_candidates::(&mut candidates_per_para, |_, candidate| { + retain_candidates::(&mut candidates_per_para, |_, candidate| { let keep = !concluded_invalid_with_descendants.contains(&candidate.candidate().hash()); if !keep { @@ -1097,7 +1097,7 @@ fn limit_and_sanitize_disputes< // Helper function for filtering candidates which don't pass the given predicate. When/if the first // candidate which failes the predicate is found, all the other candidates that follow are dropped. -fn filter_candidates< +fn retain_candidates< T: inclusion::Config + paras::Config + inclusion::Config, F: FnMut(ParaId, &mut C) -> bool, C, @@ -1153,7 +1153,7 @@ fn filter_backed_statements_from_disabled_validators< // the validator group assigned to the parachain. To obtain this group we need: // 1. Core index assigned to the parachain which has produced the candidate // 2. The relay chain block number of the candidate - filter_candidates::(backed_candidates_with_core, |para_id, (bc, core_idx)| { + retain_candidates::(backed_candidates_with_core, |para_id, (bc, core_idx)| { let (validator_indices, maybe_core_index) = bc.validator_indices_and_core_index(core_index_enabled); let mut validator_indices = BitVec::<_>::from(validator_indices); @@ -1233,6 +1233,7 @@ fn filter_backed_statements_from_disabled_validators< // Check that candidates pertaining to the same para form a chain. Drop the ones that // don't, along with the rest of candidates which follow them in the input vector. +// In the process, duplicated candidates will also be dropped (unless they form a valid cycle). fn filter_unchained_candidates( candidates: &mut BTreeMap>>, allowed_relay_parents: &AllowedRelayParentsTracker>, @@ -1250,7 +1251,7 @@ fn filter_unchained_candidates(candidates, |para_id, candidate| { + retain_candidates::(candidates, |para_id, candidate| { let Some(latest_head_data) = para_latest_head_data.get(¶_id) else { return false }; let prev_context = >::para_most_recent_context(para_id); From 58b412901144fc24f5fb4e9b6b6992077fc439bb Mon Sep 17 00:00:00 2001 From: alindima Date: Thu, 29 Feb 2024 16:50:06 +0200 Subject: [PATCH 13/44] add runtime migration to inclusion storage --- .../parachains/src/inclusion/migration.rs | 127 ++++++++++++++++++ .../runtime/parachains/src/inclusion/mod.rs | 4 + polkadot/runtime/rococo/src/lib.rs | 2 + polkadot/runtime/westend/src/lib.rs | 1 + 4 files changed, 134 insertions(+) create mode 100644 polkadot/runtime/parachains/src/inclusion/migration.rs diff --git a/polkadot/runtime/parachains/src/inclusion/migration.rs b/polkadot/runtime/parachains/src/inclusion/migration.rs new file mode 100644 index 000000000000..3b529b280eea --- /dev/null +++ b/polkadot/runtime/parachains/src/inclusion/migration.rs @@ -0,0 +1,127 @@ +pub use v1::MigrateToV1; + +mod v0 { + use crate::inclusion::{Config, Pallet}; + use bitvec::{order::Lsb0 as BitOrderLsb0, vec::BitVec}; + use frame_support::{storage_alias, Twox64Concat}; + use frame_system::pallet_prelude::BlockNumberFor; + use parity_scale_codec::{Decode, Encode}; + use primitives::{ + CandidateCommitments, CandidateDescriptor, CandidateHash, CoreIndex, GroupIndex, + Id as ParaId, + }; + use scale_info::TypeInfo; + + #[derive(Encode, Decode, PartialEq, TypeInfo, Clone)] + pub struct CandidatePendingAvailability { + pub core: CoreIndex, + pub hash: CandidateHash, + pub descriptor: CandidateDescriptor, + pub availability_votes: BitVec, + pub backers: BitVec, + pub relay_parent_number: N, + pub backed_in_number: N, + pub backing_group: GroupIndex, + } + + #[storage_alias] + pub type PendingAvailability = StorageMap< + Pallet, + Twox64Concat, + ParaId, + CandidatePendingAvailability<::Hash, BlockNumberFor>, + >; + + #[storage_alias] + pub type PendingAvailabilityCommitments = + StorageMap, Twox64Concat, ParaId, CandidateCommitments>; +} + +mod v1 { + use super::v0::{ + PendingAvailability as V0PendingAvailability, + PendingAvailabilityCommitments as V0PendingAvailabilityCommitments, + }; + use crate::inclusion::{ + CandidatePendingAvailability as V1CandidatePendingAvailability, Config, Pallet, + PendingAvailability as V1PendingAvailability, + }; + use frame_support::{traits::OnRuntimeUpgrade, weights::Weight}; + use sp_core::Get; + use sp_std::{collections::vec_deque::VecDeque, vec::Vec}; + + #[cfg(feature = "try-runtime")] + use frame_support::{ + ensure, + traits::{GetStorageVersion, StorageVersion}, + }; + + pub struct VersionUncheckedMigrateToV1(sp_std::marker::PhantomData); + + impl OnRuntimeUpgrade for VersionUncheckedMigrateToV1 { + #[cfg(feature = "try-runtime")] + fn pre_upgrade() -> Result, sp_runtime::TryRuntimeError> { + log::trace!(target: crate::inclusion::LOG_TARGET, "Running pre_upgrade() for inclusion MigrateToV1"); + Ok(Vec::new()) + } + + fn on_runtime_upgrade() -> Weight { + let mut weight: Weight = Weight::zero(); + + let v0_candidates: Vec<_> = V0PendingAvailability::::drain().collect(); + + for (para_id, candidate) in v0_candidates { + let commitments = V0PendingAvailabilityCommitments::::take(para_id); + // One write for each removal (one candidate and one commitment). + weight = weight.saturating_add(T::DbWeight::get().writes(2)); + + if let Some(commitments) = commitments { + let mut per_para = VecDeque::new(); + per_para.push_back(V1CandidatePendingAvailability { + core: candidate.core, + hash: candidate.hash, + descriptor: candidate.descriptor, + availability_votes: candidate.availability_votes, + backers: candidate.backers, + relay_parent_number: candidate.relay_parent_number, + backed_in_number: candidate.backed_in_number, + backing_group: candidate.backing_group, + commitments, + }); + V1PendingAvailability::::insert(para_id, per_para); + + weight = weight.saturating_add(T::DbWeight::get().writes(1)); + } + } + + // should've already been drained by the above for loop, but as a sanity check, in case + // there are more commitments than candidates. V0PendingAvailabilityCommitments should + // not contain too many keys so removing everything at once should be safe + let res = V0PendingAvailabilityCommitments::::clear(u32::MAX, None); + weight = weight.saturating_add( + T::DbWeight::get().reads_writes(res.loops as u64, res.backend as u64), + ); + + weight + } + + #[cfg(feature = "try-runtime")] + fn post_upgrade(_state: Vec) -> Result<(), sp_runtime::TryRuntimeError> { + log::trace!(target: crate::inclusion::LOG_TARGET, "Running post_upgrade() for inclusion MigrateToV1"); + ensure!( + Pallet::::on_chain_storage_version() >= StorageVersion::new(1), + "Storage version should be >= 1 after the migration" + ); + + Ok(()) + } + } + + pub type MigrateToV1 = frame_support::migrations::VersionedMigration< + 0, + 1, + VersionUncheckedMigrateToV1, + Pallet, + ::DbWeight, + >; +} diff --git a/polkadot/runtime/parachains/src/inclusion/mod.rs b/polkadot/runtime/parachains/src/inclusion/mod.rs index 480406435bf5..f5c245ea4564 100644 --- a/polkadot/runtime/parachains/src/inclusion/mod.rs +++ b/polkadot/runtime/parachains/src/inclusion/mod.rs @@ -61,6 +61,8 @@ pub(crate) mod tests; #[cfg(feature = "runtime-benchmarks")] mod benchmarking; +pub mod migration; + pub trait WeightInfo { fn receive_upward_messages(i: u32) -> Weight; } @@ -266,8 +268,10 @@ pub type MaxUmpMessageLenOf = pub mod pallet { use super::*; + const STORAGE_VERSION: StorageVersion = StorageVersion::new(1); #[pallet::pallet] #[pallet::without_storage_info] + #[pallet::storage_version(STORAGE_VERSION)] pub struct Pallet(_); #[pallet::config] diff --git a/polkadot/runtime/rococo/src/lib.rs b/polkadot/runtime/rococo/src/lib.rs index f3c0c3d6bd8c..9bc701b83285 100644 --- a/polkadot/runtime/rococo/src/lib.rs +++ b/polkadot/runtime/rococo/src/lib.rs @@ -1662,6 +1662,8 @@ pub mod migrations { // permanent pallet_xcm::migration::MigrateToLatestXcmVersion, + + parachains_inclusion::migration::MigrateToV1, ); } diff --git a/polkadot/runtime/westend/src/lib.rs b/polkadot/runtime/westend/src/lib.rs index a7772303a95d..2fd5e73089d1 100644 --- a/polkadot/runtime/westend/src/lib.rs +++ b/polkadot/runtime/westend/src/lib.rs @@ -1708,6 +1708,7 @@ pub mod migrations { crate::xcm_config::XcmRouter, GetLegacyLeaseImpl, >, + parachains_inclusion::migration::MigrateToV1, ); } From 23a8d348900ffa39f6a7bcd7735d377a2919dccb Mon Sep 17 00:00:00 2001 From: alindima Date: Fri, 1 Mar 2024 09:49:25 +0200 Subject: [PATCH 14/44] add migration tests --- polkadot/runtime/parachains/src/builder.rs | 33 +++--- .../parachains/src/inclusion/migration.rs | 111 ++++++++++++++++++ 2 files changed, 129 insertions(+), 15 deletions(-) diff --git a/polkadot/runtime/parachains/src/builder.rs b/polkadot/runtime/parachains/src/builder.rs index 0e4e659fef27..6331a6c29e62 100644 --- a/polkadot/runtime/parachains/src/builder.rs +++ b/polkadot/runtime/parachains/src/builder.rs @@ -38,7 +38,11 @@ use sp_runtime::{ traits::{Header as HeaderT, One, TrailingZeroInput, Zero}, RuntimeAppPublic, }; -use sp_std::{collections::btree_map::BTreeMap, prelude::Vec, vec}; +use sp_std::{ + collections::{btree_map::BTreeMap, vec_deque::VecDeque}, + prelude::Vec, + vec, +}; fn mock_validation_code() -> ValidationCode { ValidationCode(vec![1, 2, 3]) @@ -275,11 +279,13 @@ impl BenchBuilder { core_idx: CoreIndex, candidate_hash: CandidateHash, availability_votes: BitVec, + commitments: CandidateCommitments, ) -> inclusion::CandidatePendingAvailability> { inclusion::CandidatePendingAvailability::>::new( core_idx, // core candidate_hash, // hash Self::candidate_descriptor_mock(), // candidate descriptor + commitments, // commitments availability_votes, // availability votes Default::default(), // backers Zero::zero(), // relay parent @@ -300,12 +306,6 @@ impl BenchBuilder { availability_votes: BitVec, candidate_hash: CandidateHash, ) { - let candidate_availability = Self::candidate_availability_mock( - group_idx, - core_idx, - candidate_hash, - availability_votes, - ); let commitments = CandidateCommitments:: { upward_messages: Default::default(), horizontal_messages: Default::default(), @@ -314,8 +314,17 @@ impl BenchBuilder { processed_downward_messages: 0, hrmp_watermark: 0u32.into(), }; - inclusion::PendingAvailability::::insert(para_id, candidate_availability); - inclusion::PendingAvailabilityCommitments::::insert(¶_id, commitments); + let candidate_availability = Self::candidate_availability_mock( + group_idx, + core_idx, + candidate_hash, + availability_votes, + commitments, + ); + inclusion::PendingAvailability::::insert( + para_id, + [candidate_availability].into_iter().collect::>(), + ); } /// Create an `AvailabilityBitfield` where `concluding` is a map where each key is a core index @@ -668,8 +677,6 @@ impl BenchBuilder { // Make sure relevant storage is cleared. This is just to get the asserts to work when // running tests because it seems the storage is not cleared in between. #[allow(deprecated)] - inclusion::PendingAvailabilityCommitments::::remove_all(None); - #[allow(deprecated)] inclusion::PendingAvailability::::remove_all(None); // We don't allow a core to have both disputes and be marked fully available at this block. @@ -701,10 +708,6 @@ impl BenchBuilder { builder.dispute_sessions.as_slice(), ); - assert_eq!( - inclusion::PendingAvailabilityCommitments::::iter().count(), - used_cores as usize, - ); assert_eq!(inclusion::PendingAvailability::::iter().count(), used_cores as usize,); // Mark all the used cores as occupied. We expect that there are diff --git a/polkadot/runtime/parachains/src/inclusion/migration.rs b/polkadot/runtime/parachains/src/inclusion/migration.rs index 3b529b280eea..41b77ad9545a 100644 --- a/polkadot/runtime/parachains/src/inclusion/migration.rs +++ b/polkadot/runtime/parachains/src/inclusion/migration.rs @@ -1,3 +1,16 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + pub use v1::MigrateToV1; mod v0 { @@ -125,3 +138,101 @@ mod v1 { ::DbWeight, >; } + +#[cfg(test)] +mod tests { + use super::{v1::VersionUncheckedMigrateToV1, *}; + use crate::{ + inclusion::{ + CandidatePendingAvailability as V1CandidatePendingAvailability, + PendingAvailability as V1PendingAvailability, *, + }, + mock::{new_test_ext, MockGenesisConfig, Test}, + }; + use frame_support::traits::OnRuntimeUpgrade; + use primitives::Id as ParaId; + use test_helpers::{dummy_candidate_commitments, dummy_candidate_descriptor, dummy_hash}; + + #[test] + fn migrate_to_v1() { + new_test_ext(MockGenesisConfig::default()).execute_with(|| { + // No data to migrate. + assert_eq!( + as OnRuntimeUpgrade>::on_runtime_upgrade(), + Weight::zero() + ); + assert!(V1PendingAvailability::::iter().next().is_none()); + + let mut expected = vec![]; + + for i in 1..5 { + let descriptor = dummy_candidate_descriptor(dummy_hash()); + v0::PendingAvailability::::insert( + ParaId::from(i), + v0::CandidatePendingAvailability { + core: CoreIndex(i), + descriptor: descriptor.clone(), + relay_parent_number: i, + hash: CandidateHash(dummy_hash()), + availability_votes: Default::default(), + backed_in_number: i, + backers: Default::default(), + backing_group: GroupIndex(i), + }, + ); + v0::PendingAvailabilityCommitments::::insert( + ParaId::from(i), + dummy_candidate_commitments(HeadData(vec![i as _])), + ); + + expected.push(( + ParaId::from(i), + [V1CandidatePendingAvailability { + core: CoreIndex(i), + descriptor, + relay_parent_number: i, + hash: CandidateHash(dummy_hash()), + availability_votes: Default::default(), + backed_in_number: i, + backers: Default::default(), + backing_group: GroupIndex(i), + commitments: dummy_candidate_commitments(HeadData(vec![i as _])), + }] + .into_iter() + .collect::>(), + )); + } + // add some wrong data also, candidates without commitments or commitments without + // candidates. + v0::PendingAvailability::::insert( + ParaId::from(6), + v0::CandidatePendingAvailability { + core: CoreIndex(6), + descriptor: dummy_candidate_descriptor(dummy_hash()), + relay_parent_number: 6, + hash: CandidateHash(dummy_hash()), + availability_votes: Default::default(), + backed_in_number: 6, + backers: Default::default(), + backing_group: GroupIndex(6), + }, + ); + v0::PendingAvailabilityCommitments::::insert( + ParaId::from(7), + dummy_candidate_commitments(HeadData(vec![7 as _])), + ); + + // For tests, db weight is zero. + assert_eq!( + as OnRuntimeUpgrade>::on_runtime_upgrade(), + Weight::zero() + ); + + let mut actual = V1PendingAvailability::::iter().collect::>(); + actual.sort_by(|(id1, _), (id2, _)| id1.cmp(id2)); + expected.sort_by(|(id1, _), (id2, _)| id1.cmp(id2)); + + assert_eq!(actual, expected); + }); + } +} From e0d9dffdb42aa751a4e90be6ca198796b3ea6153 Mon Sep 17 00:00:00 2001 From: alindima Date: Fri, 1 Mar 2024 10:32:42 +0200 Subject: [PATCH 15/44] don't allow candidate cycles --- .../parachains/src/paras_inherent/mod.rs | 24 +++++++++++++++++-- 1 file changed, 22 insertions(+), 2 deletions(-) diff --git a/polkadot/runtime/parachains/src/paras_inherent/mod.rs b/polkadot/runtime/parachains/src/paras_inherent/mod.rs index 3554b886e90a..5e2bc4a2b1f0 100644 --- a/polkadot/runtime/parachains/src/paras_inherent/mod.rs +++ b/polkadot/runtime/parachains/src/paras_inherent/mod.rs @@ -1233,7 +1233,8 @@ fn filter_backed_statements_from_disabled_validators< // Check that candidates pertaining to the same para form a chain. Drop the ones that // don't, along with the rest of candidates which follow them in the input vector. -// In the process, duplicated candidates will also be dropped (unless they form a valid cycle). +// In the process, duplicated candidates will also be dropped (even if they form a valid cycle; +// cycles are not allowed if they entail backing duplicated candidates). fn filter_unchained_candidates( candidates: &mut BTreeMap>>, allowed_relay_parents: &AllowedRelayParentsTracker>, @@ -1251,8 +1252,27 @@ fn filter_unchained_candidates> = BTreeMap::new(); + retain_candidates::(candidates, |para_id, candidate| { let Some(latest_head_data) = para_latest_head_data.get(¶_id) else { return false }; + let candidate_hash = candidate.candidate().hash(); + + let visited_candidates = + para_visited_candidates.entry(para_id).or_insert_with(|| BTreeSet::new()); + if visited_candidates.contains(&candidate_hash) { + log::debug!( + target: LOG_TARGET, + "Found duplicate candidates for paraid {:?}. Dropping the candidates with hash {:?}", + para_id, + candidate_hash + ); + + // If we got a duplicate candidate, stop. + return false + } else { + visited_candidates.insert(candidate_hash); + } let prev_context = >::para_most_recent_context(para_id); let check_ctx = CandidateCheckContext::::new(prev_context); @@ -1279,7 +1299,7 @@ fn filter_unchained_candidates Date: Fri, 1 Mar 2024 15:44:34 +0200 Subject: [PATCH 16/44] fix bug --- polkadot/runtime/parachains/src/paras_inherent/mod.rs | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/polkadot/runtime/parachains/src/paras_inherent/mod.rs b/polkadot/runtime/parachains/src/paras_inherent/mod.rs index 5e2bc4a2b1f0..f8e6203e4653 100644 --- a/polkadot/runtime/parachains/src/paras_inherent/mod.rs +++ b/polkadot/runtime/parachains/src/paras_inherent/mod.rs @@ -1277,7 +1277,7 @@ fn filter_unchained_candidates>::para_most_recent_context(para_id); let check_ctx = CandidateCheckContext::::new(prev_context); - match check_ctx.verify_backed_candidate( + let res = match check_ctx.verify_backed_candidate( &allowed_relay_parents, candidate.candidate(), latest_head_data.clone(), @@ -1305,7 +1305,14 @@ fn filter_unchained_candidates Date: Fri, 1 Mar 2024 15:44:52 +0200 Subject: [PATCH 17/44] make tests compile and make paras_inherent tests pass --- .../runtime/parachains/src/inclusion/tests.rs | 527 ++++++++---------- .../parachains/src/paras_inherent/tests.rs | 313 ++++++++--- 2 files changed, 443 insertions(+), 397 deletions(-) diff --git a/polkadot/runtime/parachains/src/inclusion/tests.rs b/polkadot/runtime/parachains/src/inclusion/tests.rs index 3fe7d7f0c7d4..86b8be638b4d 100644 --- a/polkadot/runtime/parachains/src/inclusion/tests.rs +++ b/polkadot/runtime/parachains/src/inclusion/tests.rs @@ -360,14 +360,12 @@ fn simple_sanitize_bitfields( } /// Process a set of already sanitized bitfields. pub(crate) fn process_bitfields( - expected_bits: usize, signed_bitfields: SignedAvailabilityBitfields, core_lookup: impl Fn(CoreIndex) -> Option, ) -> Vec<(CoreIndex, CandidateHash)> { let validators = shared::Pallet::::active_validator_keys(); ParaInclusion::update_pending_availability_and_get_freed_cores::<_>( - expected_bits, &validators[..], signed_bitfields, core_lookup, @@ -375,7 +373,7 @@ pub(crate) fn process_bitfields( } #[test] -fn collect_pending_cleans_up_pending() { +fn collect_timedout_cleans_up_pending() { let chain_a = ParaId::from(1_u32); let chain_b = ParaId::from(2_u32); let thread_a = ParaId::from(3_u32); @@ -391,7 +389,7 @@ fn collect_pending_cleans_up_pending() { let default_candidate = TestCandidateBuilder::default().build(); >::insert( chain_a, - CandidatePendingAvailability { + [CandidatePendingAvailability { core: CoreIndex::from(0), hash: default_candidate.hash(), descriptor: default_candidate.descriptor.clone(), @@ -400,16 +398,15 @@ fn collect_pending_cleans_up_pending() { backed_in_number: 0, backers: default_backing_bitfield(), backing_group: GroupIndex::from(0), - }, - ); - PendingAvailabilityCommitments::::insert( - chain_a, - default_candidate.commitments.clone(), + commitments: default_candidate.commitments.clone(), + }] + .into_iter() + .collect::>(), ); >::insert( &chain_b, - CandidatePendingAvailability { + [CandidatePendingAvailability { core: CoreIndex::from(1), hash: default_candidate.hash(), descriptor: default_candidate.descriptor, @@ -418,23 +415,21 @@ fn collect_pending_cleans_up_pending() { backed_in_number: 5, backers: default_backing_bitfield(), backing_group: GroupIndex::from(1), - }, + commitments: default_candidate.commitments.clone(), + }] + .into_iter() + .collect::>(), ); - PendingAvailabilityCommitments::::insert(chain_b, default_candidate.commitments); run_to_block(5, |_| None); assert!(>::get(&chain_a).is_some()); assert!(>::get(&chain_b).is_some()); - assert!(>::get(&chain_a).is_some()); - assert!(>::get(&chain_b).is_some()); - ParaInclusion::collect_pending(Scheduler::availability_timeout_predicate()); + ParaInclusion::collect_timedout(Scheduler::availability_timeout_predicate()); assert!(>::get(&chain_a).is_none()); assert!(>::get(&chain_b).is_some()); - assert!(>::get(&chain_a).is_none()); - assert!(>::get(&chain_b).is_some()); }); } @@ -550,7 +545,7 @@ fn bitfield_checks() { ); assert_eq!(checked_bitfields.len(), 1, "No bitfields should have been filtered!"); - let x = process_bitfields(expected_bits(), checked_bitfields, core_lookup); + let x = process_bitfields(checked_bitfields, core_lookup); assert!(x.is_empty(), "No core should be freed."); } @@ -571,7 +566,7 @@ fn bitfield_checks() { ); assert_eq!(checked_bitfields.len(), 1, "No bitfields should have been filtered!"); - let x = process_bitfields(expected_bits(), checked_bitfields, core_lookup); + let x = process_bitfields(checked_bitfields, core_lookup); assert!(x.is_empty(), "No core should be freed."); } @@ -584,7 +579,7 @@ fn bitfield_checks() { let default_candidate = TestCandidateBuilder::default().build(); >::insert( chain_a, - CandidatePendingAvailability { + [CandidatePendingAvailability { core: CoreIndex::from(0), hash: default_candidate.hash(), descriptor: default_candidate.descriptor, @@ -593,9 +588,11 @@ fn bitfield_checks() { backed_in_number: 0, backers: default_backing_bitfield(), backing_group: GroupIndex::from(0), - }, + commitments: default_candidate.commitments, + }] + .into_iter() + .collect::>(), ); - PendingAvailabilityCommitments::::insert(chain_a, default_candidate.commitments); *bare_bitfield.0.get_mut(0).unwrap() = true; let signed = sign_bitfield( @@ -613,53 +610,10 @@ fn bitfield_checks() { ); assert_eq!(checked_bitfields.len(), 1, "No bitfields should have been filtered!"); - let x = process_bitfields(expected_bits(), checked_bitfields, core_lookup); + let x = process_bitfields(checked_bitfields, core_lookup); assert!(x.is_empty(), "No core should be freed."); >::remove(chain_a); - PendingAvailabilityCommitments::::remove(chain_a); - } - - // bitfield signed with pending bit signed, but no commitments. - { - let mut bare_bitfield = default_bitfield(); - - assert_eq!(core_lookup(CoreIndex::from(0)), Some(chain_a)); - - let default_candidate = TestCandidateBuilder::default().build(); - >::insert( - chain_a, - CandidatePendingAvailability { - core: CoreIndex::from(0), - hash: default_candidate.hash(), - descriptor: default_candidate.descriptor, - availability_votes: default_availability_votes(), - relay_parent_number: 0, - backed_in_number: 0, - backers: default_backing_bitfield(), - backing_group: GroupIndex::from(0), - }, - ); - - *bare_bitfield.0.get_mut(0).unwrap() = true; - let signed = sign_bitfield( - &keystore, - &validators[0], - ValidatorIndex(0), - bare_bitfield, - &signing_context, - ); - - let checked_bitfields = simple_sanitize_bitfields( - vec![signed.into()], - DisputedBitfield::zeros(expected_bits()), - expected_bits(), - ); - assert_eq!(checked_bitfields.len(), 1, "No bitfields should have been filtered!"); - - let x = process_bitfields(expected_bits(), checked_bitfields, core_lookup); - // no core is freed - assert!(x.is_empty(), "No core should be freed."); } }); } @@ -723,7 +677,7 @@ fn supermajority_bitfields_trigger_availability() { >::insert( chain_a, - CandidatePendingAvailability { + [CandidatePendingAvailability { core: CoreIndex::from(0), hash: candidate_a.hash(), descriptor: candidate_a.clone().descriptor, @@ -732,9 +686,11 @@ fn supermajority_bitfields_trigger_availability() { backed_in_number: 0, backers: backing_bitfield(&[3, 4]), backing_group: GroupIndex::from(0), - }, + commitments: candidate_a.clone().commitments, + }] + .into_iter() + .collect::>(), ); - PendingAvailabilityCommitments::::insert(chain_a, candidate_a.clone().commitments); let candidate_b = TestCandidateBuilder { para_id: chain_b, @@ -745,7 +701,7 @@ fn supermajority_bitfields_trigger_availability() { >::insert( chain_b, - CandidatePendingAvailability { + [CandidatePendingAvailability { core: CoreIndex::from(1), hash: candidate_b.hash(), descriptor: candidate_b.descriptor, @@ -754,9 +710,11 @@ fn supermajority_bitfields_trigger_availability() { backed_in_number: 0, backers: backing_bitfield(&[0, 2]), backing_group: GroupIndex::from(1), - }, + commitments: candidate_b.commitments, + }] + .into_iter() + .collect::>(), ); - PendingAvailabilityCommitments::::insert(chain_b, candidate_b.commitments); // this bitfield signals that a and b are available. let a_and_b_available = { @@ -815,24 +773,29 @@ fn supermajority_bitfields_trigger_availability() { assert_eq!(checked_bitfields.len(), old_len, "No bitfields should have been filtered!"); // only chain A's core is freed. - let v = process_bitfields(expected_bits(), checked_bitfields, core_lookup); + let v = process_bitfields(checked_bitfields, core_lookup); assert_eq!(vec![(CoreIndex(0), candidate_a.hash())], v); // chain A had 4 signing off, which is >= threshold. // chain B has 3 signing off, which is < threshold. assert!(>::get(&chain_a).is_none()); - assert!(>::get(&chain_a).is_none()); - assert!(>::get(&chain_b).is_some()); - assert_eq!(>::get(&chain_b).unwrap().availability_votes, { - // check that votes from first 3 were tracked. + assert_eq!( + >::get(&chain_b) + .unwrap() + .pop_front() + .unwrap() + .availability_votes, + { + // check that votes from first 3 were tracked. - let mut votes = default_availability_votes(); - *votes.get_mut(0).unwrap() = true; - *votes.get_mut(1).unwrap() = true; - *votes.get_mut(2).unwrap() = true; + let mut votes = default_availability_votes(); + *votes.get_mut(0).unwrap() = true; + *votes.get_mut(1).unwrap() = true; + *votes.get_mut(2).unwrap() = true; - votes - }); + votes + } + ); // and check that chain head was enacted. assert_eq!(Paras::para_head(&chain_a), Some(vec![1, 2, 3, 4].into())); @@ -929,7 +892,7 @@ fn candidate_checks() { assert_eq!( ParaInclusion::process_candidates( &allowed_relay_parents, - vec![], + &BTreeMap::new(), &group_validators, false ), @@ -985,7 +948,12 @@ fn candidate_checks() { assert_noop!( ParaInclusion::process_candidates( &allowed_relay_parents, - vec![(backed_b, chain_b_assignment.1), (backed_a, chain_a_assignment.1)], + &vec![ + (chain_b_assignment.0, vec![(backed_b, chain_b_assignment.1)]), + (chain_a_assignment.0, vec![(backed_a, chain_a_assignment.1)]) + ] + .into_iter() + .collect(), &group_validators, false ), @@ -1019,7 +987,9 @@ fn candidate_checks() { assert_noop!( ParaInclusion::process_candidates( &allowed_relay_parents, - vec![(backed, chain_a_assignment.1)], + &vec![(chain_a_assignment.0, vec![(backed, chain_a_assignment.1)])] + .into_iter() + .collect(), &group_validators, false ), @@ -1078,7 +1048,12 @@ fn candidate_checks() { assert_noop!( ParaInclusion::process_candidates( &allowed_relay_parents, - vec![(backed_b, chain_b_assignment.1), (backed_a, chain_a_assignment.1)], + &vec![ + (chain_b_assignment.0, vec![(backed_b, chain_b_assignment.1)]), + (chain_a_assignment.0, vec![(backed_a, chain_a_assignment.1)]) + ] + .into_iter() + .collect(), &group_validators, false ), @@ -1117,7 +1092,9 @@ fn candidate_checks() { assert_noop!( ParaInclusion::process_candidates( &allowed_relay_parents, - vec![(backed, thread_a_assignment.1)], + &vec![(thread_a_assignment.0, vec![(backed, thread_a_assignment.1)])] + .into_iter() + .collect(), &group_validators, false ), @@ -1125,100 +1102,6 @@ fn candidate_checks() { ); } - // para occupied - reject. - { - let mut candidate = TestCandidateBuilder { - para_id: chain_a, - relay_parent: System::parent_hash(), - pov_hash: Hash::repeat_byte(1), - persisted_validation_data_hash: make_vdata_hash(chain_a).unwrap(), - hrmp_watermark: RELAY_PARENT_NUM, - ..Default::default() - } - .build(); - - collator_sign_candidate(Sr25519Keyring::One, &mut candidate); - - let backed = back_candidate( - candidate, - &validators, - group_validators(GroupIndex::from(0)).unwrap().as_ref(), - &keystore, - &signing_context, - BackingKind::Threshold, - None, - ); - - let candidate = TestCandidateBuilder::default().build(); - >::insert( - &chain_a, - CandidatePendingAvailability { - core: CoreIndex::from(0), - hash: candidate.hash(), - descriptor: candidate.descriptor, - availability_votes: default_availability_votes(), - relay_parent_number: 3, - backed_in_number: 4, - backers: default_backing_bitfield(), - backing_group: GroupIndex::from(0), - }, - ); - >::insert(&chain_a, candidate.commitments); - - assert_noop!( - ParaInclusion::process_candidates( - &allowed_relay_parents, - vec![(backed, chain_a_assignment.1)], - &group_validators, - false - ), - Error::::CandidateScheduledBeforeParaFree - ); - - >::remove(&chain_a); - >::remove(&chain_a); - } - - // messed up commitments storage - do not panic - reject. - { - let mut candidate = TestCandidateBuilder { - para_id: chain_a, - relay_parent: System::parent_hash(), - pov_hash: Hash::repeat_byte(1), - persisted_validation_data_hash: make_vdata_hash(chain_a).unwrap(), - hrmp_watermark: RELAY_PARENT_NUM, - ..Default::default() - } - .build(); - - collator_sign_candidate(Sr25519Keyring::One, &mut candidate); - - // this is not supposed to happen - >::insert(&chain_a, candidate.commitments.clone()); - - let backed = back_candidate( - candidate, - &validators, - group_validators(GroupIndex::from(0)).unwrap().as_ref(), - &keystore, - &signing_context, - BackingKind::Threshold, - None, - ); - - assert_noop!( - ParaInclusion::process_candidates( - &allowed_relay_parents, - vec![(backed, chain_a_assignment.1)], - &group_validators, - false - ), - Error::::CandidateScheduledBeforeParaFree - ); - - >::remove(&chain_a); - } - // interfering code upgrade - reject { let mut candidate = TestCandidateBuilder { @@ -1260,7 +1143,9 @@ fn candidate_checks() { assert_noop!( ParaInclusion::process_candidates( &allowed_relay_parents, - vec![(backed, chain_a_assignment.1)], + &vec![(chain_a_assignment.0, vec![(backed, chain_a_assignment.1)])] + .into_iter() + .collect(), &group_validators, false ), @@ -1295,7 +1180,9 @@ fn candidate_checks() { assert_eq!( ParaInclusion::process_candidates( &allowed_relay_parents, - vec![(backed, chain_a_assignment.1)], + &vec![(chain_a_assignment.0, vec![(backed, chain_a_assignment.1)])] + .into_iter() + .collect(), &group_validators, false ), @@ -1331,7 +1218,9 @@ fn candidate_checks() { assert_noop!( ParaInclusion::process_candidates( &allowed_relay_parents, - vec![(backed, chain_a_assignment.1)], + &vec![(chain_a_assignment.0, vec![(backed, chain_a_assignment.1)])] + .into_iter() + .collect(), &group_validators, false ), @@ -1367,7 +1256,9 @@ fn candidate_checks() { assert_noop!( ParaInclusion::process_candidates( &allowed_relay_parents, - vec![(backed, chain_a_assignment.1)], + &vec![(chain_a_assignment.0, vec![(backed, chain_a_assignment.1)])] + .into_iter() + .collect(), &group_validators, false ), @@ -1506,16 +1397,21 @@ fn backing_works() { ); let backed_candidates = vec![ - (backed_a.clone(), chain_a_assignment.1), - (backed_b.clone(), chain_b_assignment.1), - (backed_c, thread_a_assignment.1), - ]; + (chain_a_assignment.0, vec![(backed_a, chain_a_assignment.1)]), + (chain_b_assignment.0, vec![(backed_b, chain_b_assignment.1)]), + (thread_a_assignment.0, vec![(backed_c, thread_a_assignment.1)]), + ] + .into_iter() + .collect::>(); + let get_backing_group_idx = { // the order defines the group implicitly for this test case let backed_candidates_with_groups = backed_candidates - .iter() + .values() .enumerate() - .map(|(idx, (backed_candidate, _))| (backed_candidate.hash(), GroupIndex(idx as _))) + .map(|(idx, backed_candidates)| { + (backed_candidates.iter().next().unwrap().0.hash(), GroupIndex(idx as _)) + }) .collect::>(); move |candidate_hash_x: CandidateHash| -> Option { @@ -1534,7 +1430,7 @@ fn backing_works() { candidate_receipt_with_backing_validator_indices, } = ParaInclusion::process_candidates( &allowed_relay_parents, - backed_candidates.clone(), + &backed_candidates, &group_validators, false, ) @@ -1555,7 +1451,8 @@ fn backing_works() { CandidateHash, (CandidateReceipt, Vec<(ValidatorIndex, ValidityAttestation)>), >::new(); - backed_candidates.into_iter().for_each(|(backed_candidate, _)| { + backed_candidates.values().for_each(|backed_candidates| { + let backed_candidate = backed_candidates.iter().next().unwrap().0.clone(); let candidate_receipt_with_backers = intermediate .entry(backed_candidate.hash()) .or_insert_with(|| (backed_candidate.receipt(), Vec::new())); @@ -1606,20 +1503,21 @@ fn backing_works() { }; assert_eq!( >::get(&chain_a), - Some(CandidatePendingAvailability { - core: CoreIndex::from(0), - hash: candidate_a.hash(), - descriptor: candidate_a.descriptor, - availability_votes: default_availability_votes(), - relay_parent_number: System::block_number() - 1, - backed_in_number: System::block_number(), - backers, - backing_group: GroupIndex::from(0), - }) - ); - assert_eq!( - >::get(&chain_a), - Some(candidate_a.commitments), + Some( + [CandidatePendingAvailability { + core: CoreIndex::from(0), + hash: candidate_a.hash(), + descriptor: candidate_a.descriptor, + availability_votes: default_availability_votes(), + relay_parent_number: System::block_number() - 1, + backed_in_number: System::block_number(), + backers, + backing_group: GroupIndex::from(0), + commitments: candidate_a.commitments, + }] + .into_iter() + .collect::>() + ) ); let backers = { @@ -1631,38 +1529,40 @@ fn backing_works() { }; assert_eq!( >::get(&chain_b), - Some(CandidatePendingAvailability { - core: CoreIndex::from(1), - hash: candidate_b.hash(), - descriptor: candidate_b.descriptor, - availability_votes: default_availability_votes(), - relay_parent_number: System::block_number() - 1, - backed_in_number: System::block_number(), - backers, - backing_group: GroupIndex::from(1), - }) - ); - assert_eq!( - >::get(&chain_b), - Some(candidate_b.commitments), + Some( + [CandidatePendingAvailability { + core: CoreIndex::from(1), + hash: candidate_b.hash(), + descriptor: candidate_b.descriptor, + availability_votes: default_availability_votes(), + relay_parent_number: System::block_number() - 1, + backed_in_number: System::block_number(), + backers, + backing_group: GroupIndex::from(1), + commitments: candidate_b.commitments, + }] + .into_iter() + .collect::>() + ) ); assert_eq!( >::get(&thread_a), - Some(CandidatePendingAvailability { - core: CoreIndex::from(2), - hash: candidate_c.hash(), - descriptor: candidate_c.descriptor, - availability_votes: default_availability_votes(), - relay_parent_number: System::block_number() - 1, - backed_in_number: System::block_number(), - backers: backing_bitfield(&[4]), - backing_group: GroupIndex::from(2), - }) - ); - assert_eq!( - >::get(&thread_a), - Some(candidate_c.commitments), + Some( + [CandidatePendingAvailability { + core: CoreIndex::from(2), + hash: candidate_c.hash(), + descriptor: candidate_c.descriptor, + availability_votes: default_availability_votes(), + relay_parent_number: System::block_number() - 1, + backed_in_number: System::block_number(), + backers: backing_bitfield(&[4]), + backing_group: GroupIndex::from(2), + commitments: candidate_c.commitments + }] + .into_iter() + .collect::>() + ) ); }); } @@ -1792,16 +1692,21 @@ fn backing_works_with_elastic_scaling_mvp() { ); let backed_candidates = vec![ - (backed_a.clone(), CoreIndex(0)), - (backed_b_1.clone(), CoreIndex(1)), - (backed_b_2.clone(), CoreIndex(2)), - ]; + (chain_a, vec![(backed_a, CoreIndex(0))]), + (chain_b, vec![(backed_b_1, CoreIndex(1))]), + (chain_b, vec![(backed_b_2, CoreIndex(2))]), + ] + .into_iter() + .collect::>(); + let get_backing_group_idx = { // the order defines the group implicitly for this test case let backed_candidates_with_groups = backed_candidates - .iter() + .values() .enumerate() - .map(|(idx, (backed_candidate, _))| (backed_candidate.hash(), GroupIndex(idx as _))) + .map(|(idx, backed_candidates)| { + (backed_candidates.iter().next().unwrap().0.hash(), GroupIndex(idx as _)) + }) .collect::>(); move |candidate_hash_x: CandidateHash| -> Option { @@ -1820,7 +1725,7 @@ fn backing_works_with_elastic_scaling_mvp() { candidate_receipt_with_backing_validator_indices, } = ParaInclusion::process_candidates( &allowed_relay_parents, - backed_candidates.clone(), + &backed_candidates, &group_validators, true, ) @@ -1842,7 +1747,8 @@ fn backing_works_with_elastic_scaling_mvp() { CandidateHash, (CandidateReceipt, Vec<(ValidatorIndex, ValidityAttestation)>), >::new(); - backed_candidates.into_iter().for_each(|(backed_candidate, _)| { + backed_candidates.values().for_each(|backed_candidates| { + let backed_candidate = backed_candidates.iter().next().unwrap().0.clone(); let candidate_receipt_with_backers = expected .entry(backed_candidate.hash()) .or_insert_with(|| (backed_candidate.receipt(), Vec::new())); @@ -1881,39 +1787,41 @@ fn backing_works_with_elastic_scaling_mvp() { }; assert_eq!( >::get(&chain_a), - Some(CandidatePendingAvailability { - core: CoreIndex::from(0), - hash: candidate_a.hash(), - descriptor: candidate_a.descriptor, - availability_votes: default_availability_votes(), - relay_parent_number: System::block_number() - 1, - backed_in_number: System::block_number(), - backers, - backing_group: GroupIndex::from(0), - }) - ); - assert_eq!( - >::get(&chain_a), - Some(candidate_a.commitments), + Some( + [CandidatePendingAvailability { + core: CoreIndex::from(0), + hash: candidate_a.hash(), + descriptor: candidate_a.descriptor, + availability_votes: default_availability_votes(), + relay_parent_number: System::block_number() - 1, + backed_in_number: System::block_number(), + backers, + backing_group: GroupIndex::from(0), + commitments: candidate_a.commitments + }] + .into_iter() + .collect::>() + ) ); // Only one candidate for b will be recorded on chain. assert_eq!( >::get(&chain_b), - Some(CandidatePendingAvailability { - core: CoreIndex::from(2), - hash: candidate_b_2.hash(), - descriptor: candidate_b_2.descriptor, - availability_votes: default_availability_votes(), - relay_parent_number: System::block_number() - 1, - backed_in_number: System::block_number(), - backers: backing_bitfield(&[4]), - backing_group: GroupIndex::from(2), - }) - ); - assert_eq!( - >::get(&chain_b), - Some(candidate_b_2.commitments), + Some( + [CandidatePendingAvailability { + core: CoreIndex::from(2), + hash: candidate_b_2.hash(), + descriptor: candidate_b_2.descriptor, + availability_votes: default_availability_votes(), + relay_parent_number: System::block_number() - 1, + backed_in_number: System::block_number(), + backers: backing_bitfield(&[4]), + backing_group: GroupIndex::from(2), + commitments: candidate_b_2.commitments + }] + .into_iter() + .collect::>() + ) ); }); } @@ -1998,8 +1906,10 @@ fn can_include_candidate_with_ok_code_upgrade() { let ProcessedCandidates { core_indices: occupied_cores, .. } = ParaInclusion::process_candidates( &allowed_relay_parents, - vec![(backed_a, chain_a_assignment.1)], - &group_validators, + &vec![(chain_a_assignment.0, vec![(backed_a, chain_a_assignment.1)])] + .into_iter() + .collect::>(), + group_validators, false, ) .expect("candidates scheduled, in order, and backed"); @@ -2015,20 +1925,21 @@ fn can_include_candidate_with_ok_code_upgrade() { }; assert_eq!( >::get(&chain_a), - Some(CandidatePendingAvailability { - core: CoreIndex::from(0), - hash: candidate_a.hash(), - descriptor: candidate_a.descriptor, - availability_votes: default_availability_votes(), - relay_parent_number: System::block_number() - 1, - backed_in_number: System::block_number(), - backers, - backing_group: GroupIndex::from(0), - }) - ); - assert_eq!( - >::get(&chain_a), - Some(candidate_a.commitments), + Some( + [CandidatePendingAvailability { + core: CoreIndex::from(0), + hash: candidate_a.hash(), + descriptor: candidate_a.descriptor, + availability_votes: default_availability_votes(), + relay_parent_number: System::block_number() - 1, + backed_in_number: System::block_number(), + backers, + backing_group: GroupIndex::from(0), + commitments: candidate_a.commitments + }] + .into_iter() + .collect::>() + ) ); }); } @@ -2209,14 +2120,16 @@ fn check_allowed_relay_parents() { ); let backed_candidates = vec![ - (backed_a, chain_a_assignment.1), - (backed_b, chain_b_assignment.1), - (backed_c, thread_a_assignment.1), - ]; + (chain_a_assignment.0, vec![(backed_a, chain_a_assignment.1)]), + (chain_b_assignment.0, vec![(backed_b, chain_b_assignment.1)]), + (thread_a_assignment.0, vec![(backed_c, thread_a_assignment.1)]), + ] + .into_iter() + .collect::>(); ParaInclusion::process_candidates( &allowed_relay_parents, - backed_candidates.clone(), + &backed_candidates, &group_validators, false, ) @@ -2282,7 +2195,7 @@ fn session_change_wipes() { let candidate = TestCandidateBuilder::default().build(); >::insert( &chain_a, - CandidatePendingAvailability { + [CandidatePendingAvailability { core: CoreIndex::from(0), hash: candidate.hash(), descriptor: candidate.descriptor.clone(), @@ -2291,13 +2204,15 @@ fn session_change_wipes() { backed_in_number: 6, backers: default_backing_bitfield(), backing_group: GroupIndex::from(0), - }, + commitments: candidate.commitments.clone(), + }] + .into_iter() + .collect::>(), ); - >::insert(&chain_a, candidate.commitments.clone()); >::insert( &chain_b, - CandidatePendingAvailability { + [CandidatePendingAvailability { core: CoreIndex::from(1), hash: candidate.hash(), descriptor: candidate.descriptor, @@ -2306,9 +2221,11 @@ fn session_change_wipes() { backed_in_number: 7, backers: default_backing_bitfield(), backing_group: GroupIndex::from(1), - }, + commitments: candidate.commitments, + }] + .into_iter() + .collect::>(), ); - >::insert(&chain_b, candidate.commitments); run_to_block(11, |_| None); @@ -2320,8 +2237,6 @@ fn session_change_wipes() { assert!(>::get(&chain_a).is_some()); assert!(>::get(&chain_b).is_some()); - assert!(>::get(&chain_a).is_some()); - assert!(>::get(&chain_b).is_some()); run_to_block(12, |n| match n { 12 => Some(SessionChangeNotification { @@ -2343,12 +2258,9 @@ fn session_change_wipes() { assert!(>::get(&chain_a).is_none()); assert!(>::get(&chain_b).is_none()); - assert!(>::get(&chain_a).is_none()); - assert!(>::get(&chain_b).is_none()); assert!(>::iter().collect::>().is_empty()); assert!(>::iter().collect::>().is_empty()); - assert!(>::iter().collect::>().is_empty()); }); } @@ -2453,7 +2365,9 @@ fn para_upgrade_delay_scheduled_from_inclusion() { let ProcessedCandidates { core_indices: occupied_cores, .. } = ParaInclusion::process_candidates( &allowed_relay_parents, - vec![(backed_a, chain_a_assignment.1)], + &vec![(chain_a_assignment.0, vec![(backed_a, chain_a_assignment.1)])] + .into_iter() + .collect::>(), &group_validators, false, ) @@ -2488,11 +2402,10 @@ fn para_upgrade_delay_scheduled_from_inclusion() { expected_bits(), ); - let v = process_bitfields(expected_bits(), checked_bitfields, core_lookup); + let v = process_bitfields(checked_bitfields, core_lookup); assert_eq!(vec![(CoreIndex(0), candidate_a.hash())], v); assert!(>::get(&chain_a).is_none()); - assert!(>::get(&chain_a).is_none()); let active_vote_state = paras::Pallet::::active_vote_state(&new_validation_code_hash) .expect("prechecking must be initiated"); diff --git a/polkadot/runtime/parachains/src/paras_inherent/tests.rs b/polkadot/runtime/parachains/src/paras_inherent/tests.rs index b7285ec884ad..3033fae1d3db 100644 --- a/polkadot/runtime/parachains/src/paras_inherent/tests.rs +++ b/polkadot/runtime/parachains/src/paras_inherent/tests.rs @@ -1252,9 +1252,11 @@ mod sanitizers { mod candidates { use crate::{ - mock::set_disabled_validators, + mock::{set_disabled_validators, RuntimeOrigin}, scheduler::{common::Assignment, ParasEntry}, + util::{make_persisted_validation_data, make_persisted_validation_data_with_parent}, }; + use primitives::ValidationCode; use sp_std::collections::vec_deque::VecDeque; use super::*; @@ -1262,7 +1264,7 @@ mod sanitizers { // Backed candidates and scheduled parachains used for `sanitize_backed_candidates` testing struct TestData { backed_candidates: Vec, - all_backed_candidates_with_core: Vec<(BackedCandidate, CoreIndex)>, + all_backed_candidates_with_core: BTreeMap>, scheduled_paras: BTreeMap>, } @@ -1344,6 +1346,24 @@ mod sanitizers { ), ])); + // Set the on-chain included head data for paras. + paras::Pallet::::set_current_head(ParaId::from(1), HeadData(vec![1])); + paras::Pallet::::set_current_head(ParaId::from(2), HeadData(vec![2])); + + // Set the current_code_hash + paras::Pallet::::force_set_current_code( + RuntimeOrigin::root(), + ParaId::from(1), + ValidationCode(vec![1]), + ) + .unwrap(); + paras::Pallet::::force_set_current_code( + RuntimeOrigin::root(), + ParaId::from(2), + ValidationCode(vec![2]), + ) + .unwrap(); + // Callback used for backing candidates let group_validators = |group_index: GroupIndex| { match group_index { @@ -1363,8 +1383,15 @@ mod sanitizers { para_id: ParaId::from(idx1), relay_parent, pov_hash: Hash::repeat_byte(idx1 as u8), - persisted_validation_data_hash: [42u8; 32].into(), + persisted_validation_data_hash: make_persisted_validation_data::( + ParaId::from(idx1), + RELAY_PARENT_NUM, + Default::default(), + ) + .unwrap() + .hash(), hrmp_watermark: RELAY_PARENT_NUM, + validation_code: ValidationCode(vec![idx1 as u8]), ..Default::default() } .build(); @@ -1400,21 +1427,16 @@ mod sanitizers { ] ); - let all_backed_candidates_with_core = backed_candidates - .iter() - .map(|candidate| { - // Only one entry for this test data. - ( - candidate.clone(), - scheduled - .get(&candidate.descriptor().para_id) - .unwrap() - .first() - .copied() - .unwrap(), - ) - }) - .collect(); + let mut all_backed_candidates_with_core = BTreeMap::new(); + + for candidate in backed_candidates.iter() { + let para_id = candidate.descriptor().para_id; + + all_backed_candidates_with_core.entry(para_id).or_insert(vec![]).push(( + candidate.clone(), + scheduled.get(¶_id).unwrap().first().copied().unwrap(), + )); + } TestData { backed_candidates, @@ -1537,6 +1559,17 @@ mod sanitizers { ), ])); + // Set the on-chain included head data and current code hash. + for id in 1..=5u32 { + paras::Pallet::::set_current_head(ParaId::from(id), HeadData(vec![id as u8])); + paras::Pallet::::force_set_current_code( + RuntimeOrigin::root(), + ParaId::from(id), + ValidationCode(vec![id as u8]), + ) + .unwrap(); + } + // Callback used for backing candidates let group_validators = |group_index: GroupIndex| { match group_index { @@ -1554,7 +1587,7 @@ mod sanitizers { }; let mut backed_candidates = vec![]; - let mut all_backed_candidates_with_core = vec![]; + let mut all_backed_candidates_with_core = BTreeMap::new(); // Para 1 { @@ -1562,14 +1595,22 @@ mod sanitizers { para_id: ParaId::from(1), relay_parent, pov_hash: Hash::repeat_byte(1 as u8), - persisted_validation_data_hash: [42u8; 32].into(), + persisted_validation_data_hash: make_persisted_validation_data::( + ParaId::from(1), + RELAY_PARENT_NUM, + Default::default(), + ) + .unwrap() + .hash(), hrmp_watermark: RELAY_PARENT_NUM, + validation_code: ValidationCode(vec![1]), ..Default::default() } .build(); collator_sign_candidate(Sr25519Keyring::One, &mut candidate); + let prev_candidate = candidate.clone(); let backed: BackedCandidate = back_candidate( candidate, &validators, @@ -1581,15 +1622,26 @@ mod sanitizers { ); backed_candidates.push(backed.clone()); if core_index_enabled { - all_backed_candidates_with_core.push((backed, CoreIndex(0))); + all_backed_candidates_with_core + .entry(ParaId::from(1)) + .or_insert(vec![]) + .push((backed, CoreIndex(0))); } let mut candidate = TestCandidateBuilder { para_id: ParaId::from(1), relay_parent, pov_hash: Hash::repeat_byte(2 as u8), - persisted_validation_data_hash: [42u8; 32].into(), + persisted_validation_data_hash: make_persisted_validation_data_with_parent::< + Test, + >( + RELAY_PARENT_NUM, + Default::default(), + prev_candidate.commitments.head_data, + ) + .hash(), hrmp_watermark: RELAY_PARENT_NUM, + validation_code: ValidationCode(vec![1]), ..Default::default() } .build(); @@ -1607,7 +1659,10 @@ mod sanitizers { ); backed_candidates.push(backed.clone()); if core_index_enabled { - all_backed_candidates_with_core.push((backed, CoreIndex(1))); + all_backed_candidates_with_core + .entry(ParaId::from(1)) + .or_insert(vec![]) + .push((backed, CoreIndex(1))); } } @@ -1617,8 +1672,15 @@ mod sanitizers { para_id: ParaId::from(2), relay_parent, pov_hash: Hash::repeat_byte(3 as u8), - persisted_validation_data_hash: [42u8; 32].into(), + persisted_validation_data_hash: make_persisted_validation_data::( + ParaId::from(2), + RELAY_PARENT_NUM, + Default::default(), + ) + .unwrap() + .hash(), hrmp_watermark: RELAY_PARENT_NUM, + validation_code: ValidationCode(vec![2]), ..Default::default() } .build(); @@ -1636,7 +1698,10 @@ mod sanitizers { ); backed_candidates.push(backed.clone()); if core_index_enabled { - all_backed_candidates_with_core.push((backed, CoreIndex(2))); + all_backed_candidates_with_core + .entry(ParaId::from(2)) + .or_insert(vec![]) + .push((backed, CoreIndex(2))); } } @@ -1646,8 +1711,15 @@ mod sanitizers { para_id: ParaId::from(3), relay_parent, pov_hash: Hash::repeat_byte(4 as u8), - persisted_validation_data_hash: [42u8; 32].into(), + persisted_validation_data_hash: make_persisted_validation_data::( + ParaId::from(3), + RELAY_PARENT_NUM, + Default::default(), + ) + .unwrap() + .hash(), hrmp_watermark: RELAY_PARENT_NUM, + validation_code: ValidationCode(vec![3]), ..Default::default() } .build(); @@ -1664,7 +1736,10 @@ mod sanitizers { core_index_enabled.then_some(CoreIndex(4 as u32)), ); backed_candidates.push(backed.clone()); - all_backed_candidates_with_core.push((backed, CoreIndex(4))); + all_backed_candidates_with_core + .entry(ParaId::from(3)) + .or_insert(vec![]) + .push((backed, CoreIndex(4))); } // Para 4 @@ -1673,14 +1748,22 @@ mod sanitizers { para_id: ParaId::from(4), relay_parent, pov_hash: Hash::repeat_byte(5 as u8), - persisted_validation_data_hash: [42u8; 32].into(), + persisted_validation_data_hash: make_persisted_validation_data::( + ParaId::from(4), + RELAY_PARENT_NUM, + Default::default(), + ) + .unwrap() + .hash(), hrmp_watermark: RELAY_PARENT_NUM, + validation_code: ValidationCode(vec![4]), ..Default::default() } .build(); collator_sign_candidate(Sr25519Keyring::One, &mut candidate); + let prev_candidate = candidate.clone(); let backed = back_candidate( candidate, &validators, @@ -1691,14 +1774,25 @@ mod sanitizers { None, ); backed_candidates.push(backed.clone()); - all_backed_candidates_with_core.push((backed, CoreIndex(5))); + all_backed_candidates_with_core + .entry(ParaId::from(4)) + .or_insert(vec![]) + .push((backed, CoreIndex(5))); let mut candidate = TestCandidateBuilder { para_id: ParaId::from(4), relay_parent, pov_hash: Hash::repeat_byte(6 as u8), - persisted_validation_data_hash: [42u8; 32].into(), + persisted_validation_data_hash: make_persisted_validation_data_with_parent::< + Test, + >( + RELAY_PARENT_NUM, + Default::default(), + prev_candidate.commitments.head_data, + ) + .hash(), hrmp_watermark: RELAY_PARENT_NUM, + validation_code: ValidationCode(vec![4]), ..Default::default() } .build(); @@ -1768,22 +1862,15 @@ mod sanitizers { scheduled_paras: scheduled, } = get_test_data(core_index_enabled); - let has_concluded_invalid = - |_idx: usize, _backed_candidate: &BackedCandidate| -> bool { false }; - assert_eq!( - sanitize_backed_candidates::( + sanitize_backed_candidates::( backed_candidates.clone(), &>::allowed_relay_parents(), - has_concluded_invalid, + BTreeSet::new(), scheduled, core_index_enabled ), - SanitizedBackedCandidates { - backed_candidates_with_core: all_backed_candidates_with_core, - votes_from_disabled_were_dropped: false, - dropped_unscheduled_candidates: false - } + all_backed_candidates_with_core, ); }); } @@ -1799,22 +1886,15 @@ mod sanitizers { scheduled_paras: scheduled, } = get_test_data_multiple_cores_per_para(core_index_enabled); - let has_concluded_invalid = - |_idx: usize, _backed_candidate: &BackedCandidate| -> bool { false }; - assert_eq!( - sanitize_backed_candidates::( + sanitize_backed_candidates::( backed_candidates.clone(), &>::allowed_relay_parents(), - has_concluded_invalid, + BTreeSet::new(), scheduled, core_index_enabled ), - SanitizedBackedCandidates { - backed_candidates_with_core: expected_all_backed_candidates_with_core, - votes_from_disabled_were_dropped: false, - dropped_unscheduled_candidates: true - } + expected_all_backed_candidates_with_core, ); }); } @@ -1836,24 +1916,16 @@ mod sanitizers { get_test_data(core_index_enabled) }; let scheduled = BTreeMap::new(); - let has_concluded_invalid = - |_idx: usize, _backed_candidate: &BackedCandidate| -> bool { false }; - - let SanitizedBackedCandidates { - backed_candidates_with_core: sanitized_backed_candidates, - votes_from_disabled_were_dropped, - dropped_unscheduled_candidates, - } = sanitize_backed_candidates::( + + let sanitized_backed_candidates = sanitize_backed_candidates::( backed_candidates.clone(), &>::allowed_relay_parents(), - has_concluded_invalid, + BTreeSet::new(), scheduled, core_index_enabled, ); assert!(sanitized_backed_candidates.is_empty()); - assert!(!votes_from_disabled_were_dropped); - assert!(dropped_unscheduled_candidates); }); } @@ -1868,7 +1940,7 @@ mod sanitizers { // mark every second one as concluded invalid let set = { - let mut set = std::collections::HashSet::new(); + let mut set = std::collections::BTreeSet::new(); for (idx, backed_candidate) in backed_candidates.iter().enumerate() { if idx & 0x01 == 0 { set.insert(backed_candidate.hash()); @@ -1876,23 +1948,18 @@ mod sanitizers { } set }; - let has_concluded_invalid = - |_idx: usize, candidate: &BackedCandidate| set.contains(&candidate.hash()); - let SanitizedBackedCandidates { - backed_candidates_with_core: sanitized_backed_candidates, - votes_from_disabled_were_dropped, - dropped_unscheduled_candidates, - } = sanitize_backed_candidates::( + let sanitized_backed_candidates: BTreeMap< + ParaId, + Vec<(BackedCandidate<_>, CoreIndex)>, + > = sanitize_backed_candidates::( backed_candidates.clone(), &>::allowed_relay_parents(), - has_concluded_invalid, + set, scheduled, core_index_enabled, ); assert_eq!(sanitized_backed_candidates.len(), backed_candidates.len() / 2); - assert!(!votes_from_disabled_were_dropped); - assert!(!dropped_unscheduled_candidates); }); } @@ -1911,14 +1978,15 @@ mod sanitizers { // Eve is disabled but no backing statement is signed by it so nothing should be // filtered - assert!(!filter_backed_statements_from_disabled_validators::( + filter_backed_statements_from_disabled_validators::( &mut all_backed_candidates_with_core, &>::allowed_relay_parents(), - core_index_enabled - )); + core_index_enabled, + ); assert_eq!(all_backed_candidates_with_core, before); }); } + #[rstest] #[case(false)] #[case(true)] @@ -1941,11 +2009,22 @@ mod sanitizers { // Verify the initial state is as expected assert_eq!( - all_backed_candidates_with_core.get(0).unwrap().0.validity_votes().len(), + all_backed_candidates_with_core + .get(&ParaId::from(1)) + .unwrap() + .iter() + .next() + .unwrap() + .0 + .validity_votes() + .len(), 2 ); let (validator_indices, maybe_core_index) = all_backed_candidates_with_core - .get(0) + .get(&ParaId::from(1)) + .unwrap() + .iter() + .next() .unwrap() .0 .validator_indices_and_core_index(core_index_enabled); @@ -1957,16 +2036,28 @@ mod sanitizers { assert_eq!(validator_indices.get(0).unwrap(), true); assert_eq!(validator_indices.get(1).unwrap(), true); - let untouched = all_backed_candidates_with_core.get(1).unwrap().0.clone(); + let untouched = all_backed_candidates_with_core + .get(&ParaId::from(2)) + .unwrap() + .iter() + .next() + .unwrap() + .0 + .clone(); - assert!(filter_backed_statements_from_disabled_validators::( + let before = all_backed_candidates_with_core.clone(); + filter_backed_statements_from_disabled_validators::( &mut all_backed_candidates_with_core, &>::allowed_relay_parents(), - core_index_enabled - )); + core_index_enabled, + ); + assert_eq!(before.len(), all_backed_candidates_with_core.len()); let (validator_indices, maybe_core_index) = all_backed_candidates_with_core - .get(0) + .get(&ParaId::from(1)) + .unwrap() + .iter() + .next() .unwrap() .0 .validator_indices_and_core_index(core_index_enabled); @@ -1980,14 +2071,31 @@ mod sanitizers { assert_eq!(all_backed_candidates_with_core.len(), 2); // but the first one should have only one validity vote assert_eq!( - all_backed_candidates_with_core.get(0).unwrap().0.validity_votes().len(), + all_backed_candidates_with_core + .get(&ParaId::from(1)) + .unwrap() + .iter() + .next() + .unwrap() + .0 + .validity_votes() + .len(), 1 ); // Validator 0 vote should be dropped, validator 1 - retained assert_eq!(validator_indices.get(0).unwrap(), false); assert_eq!(validator_indices.get(1).unwrap(), true); // the second candidate shouldn't be modified - assert_eq!(all_backed_candidates_with_core.get(1).unwrap().0, untouched); + assert_eq!( + all_backed_candidates_with_core + .get(&ParaId::from(2)) + .unwrap() + .iter() + .next() + .unwrap() + .0, + untouched + ); }); } @@ -2004,19 +2112,44 @@ mod sanitizers { // Verify the initial state is as expected assert_eq!( - all_backed_candidates_with_core.get(0).unwrap().0.validity_votes().len(), + all_backed_candidates_with_core + .get(&ParaId::from(1)) + .unwrap() + .iter() + .next() + .unwrap() + .0 + .validity_votes() + .len(), 2 ); - let untouched = all_backed_candidates_with_core.get(1).unwrap().0.clone(); + let untouched = all_backed_candidates_with_core + .get(&ParaId::from(2)) + .unwrap() + .iter() + .next() + .unwrap() + .0 + .clone(); - assert!(filter_backed_statements_from_disabled_validators::( + filter_backed_statements_from_disabled_validators::( &mut all_backed_candidates_with_core, &>::allowed_relay_parents(), - core_index_enabled - )); + core_index_enabled, + ); assert_eq!(all_backed_candidates_with_core.len(), 1); - assert_eq!(all_backed_candidates_with_core.get(0).unwrap().0, untouched); + assert_eq!( + all_backed_candidates_with_core + .get(&ParaId::from(2)) + .unwrap() + .iter() + .next() + .unwrap() + .0, + untouched + ); + assert_eq!(all_backed_candidates_with_core.get(&ParaId::from(1)), None); }); } } From 7733b253994feb33f1d8d4c0df5ee09dfd30f2bf Mon Sep 17 00:00:00 2001 From: alindima Date: Fri, 1 Mar 2024 16:14:53 +0200 Subject: [PATCH 18/44] fix inclusion tests --- .../runtime/parachains/src/inclusion/mod.rs | 2 - .../runtime/parachains/src/inclusion/tests.rs | 146 ++++++++++-------- 2 files changed, 83 insertions(+), 65 deletions(-) diff --git a/polkadot/runtime/parachains/src/inclusion/mod.rs b/polkadot/runtime/parachains/src/inclusion/mod.rs index f5c245ea4564..2cac75037124 100644 --- a/polkadot/runtime/parachains/src/inclusion/mod.rs +++ b/polkadot/runtime/parachains/src/inclusion/mod.rs @@ -336,8 +336,6 @@ pub mod pallet { UnscheduledCandidate, /// Candidate scheduled despite pending candidate already existing for the para. CandidateScheduledBeforeParaFree, - /// Scheduled cores out of order. - ScheduledOutOfOrder, /// Head data exceeds the configured maximum. HeadDataTooLarge, /// Code upgrade prematurely. diff --git a/polkadot/runtime/parachains/src/inclusion/tests.rs b/polkadot/runtime/parachains/src/inclusion/tests.rs index 86b8be638b4d..f0daff2f5580 100644 --- a/polkadot/runtime/parachains/src/inclusion/tests.rs +++ b/polkadot/runtime/parachains/src/inclusion/tests.rs @@ -428,8 +428,8 @@ fn collect_timedout_cleans_up_pending() { ParaInclusion::collect_timedout(Scheduler::availability_timeout_predicate()); - assert!(>::get(&chain_a).is_none()); - assert!(>::get(&chain_b).is_some()); + assert!(>::get(&chain_a).unwrap().is_empty()); + assert!(!>::get(&chain_b).unwrap().is_empty()); }); } @@ -778,7 +778,7 @@ fn supermajority_bitfields_trigger_availability() { // chain A had 4 signing off, which is >= threshold. // chain B has 3 signing off, which is < threshold. - assert!(>::get(&chain_a).is_none()); + assert!(>::get(&chain_a).unwrap().is_empty()); assert_eq!( >::get(&chain_b) .unwrap() @@ -944,21 +944,19 @@ fn candidate_checks() { None, ); - // out-of-order manifests as unscheduled. - assert_noop!( - ParaInclusion::process_candidates( - &allowed_relay_parents, - &vec![ - (chain_b_assignment.0, vec![(backed_b, chain_b_assignment.1)]), - (chain_a_assignment.0, vec![(backed_a, chain_a_assignment.1)]) - ] - .into_iter() - .collect(), - &group_validators, - false - ), - Error::::ScheduledOutOfOrder - ); + // no longer needed to be sorted by core index. + assert!(ParaInclusion::process_candidates( + &allowed_relay_parents, + &vec![ + (chain_b_assignment.0, vec![(backed_b, chain_b_assignment.1)]), + (chain_a_assignment.0, vec![(backed_a, chain_a_assignment.1)]) + ] + .into_iter() + .collect(), + &group_validators, + false + ) + .is_ok()); } // candidate not backed. @@ -1650,11 +1648,17 @@ fn backing_works_with_elastic_scaling_mvp() { .build(); collator_sign_candidate(Sr25519Keyring::One, &mut candidate_b_1); + // Make candidate b2 a child of b1. let mut candidate_b_2 = TestCandidateBuilder { para_id: chain_b, relay_parent: System::parent_hash(), pov_hash: Hash::repeat_byte(3), - persisted_validation_data_hash: make_vdata_hash(chain_b).unwrap(), + persisted_validation_data_hash: make_persisted_validation_data_with_parent::( + RELAY_PARENT_NUM, + Default::default(), + candidate_b_1.commitments.head_data.clone(), + ) + .hash(), hrmp_watermark: RELAY_PARENT_NUM, ..Default::default() } @@ -1691,13 +1695,10 @@ fn backing_works_with_elastic_scaling_mvp() { Some(CoreIndex(2)), ); - let backed_candidates = vec![ - (chain_a, vec![(backed_a, CoreIndex(0))]), - (chain_b, vec![(backed_b_1, CoreIndex(1))]), - (chain_b, vec![(backed_b_2, CoreIndex(2))]), - ] - .into_iter() - .collect::>(); + let mut backed_candidates = BTreeMap::new(); + backed_candidates.insert(chain_a, vec![(backed_a, CoreIndex(0))]); + backed_candidates + .insert(chain_b, vec![(backed_b_1, CoreIndex(1)), (backed_b_2, CoreIndex(2))]); let get_backing_group_idx = { // the order defines the group implicitly for this test case @@ -1705,9 +1706,14 @@ fn backing_works_with_elastic_scaling_mvp() { .values() .enumerate() .map(|(idx, backed_candidates)| { - (backed_candidates.iter().next().unwrap().0.hash(), GroupIndex(idx as _)) + backed_candidates + .iter() + .enumerate() + .map(|(i, c)| (c.0.hash(), GroupIndex((idx + i) as _))) + .collect() }) - .collect::>(); + .collect::>>() + .concat(); move |candidate_hash_x: CandidateHash| -> Option { backed_candidates_with_groups.iter().find_map(|(candidate_hash, grp)| { @@ -1731,8 +1737,7 @@ fn backing_works_with_elastic_scaling_mvp() { ) .expect("candidates scheduled, in order, and backed"); - // Both b candidates will be backed. However, only one will be recorded on-chain and proceed - // with being made available. + // Both b candidates will be backed. assert_eq!( occupied_cores, vec![ @@ -1748,26 +1753,28 @@ fn backing_works_with_elastic_scaling_mvp() { (CandidateReceipt, Vec<(ValidatorIndex, ValidityAttestation)>), >::new(); backed_candidates.values().for_each(|backed_candidates| { - let backed_candidate = backed_candidates.iter().next().unwrap().0.clone(); - let candidate_receipt_with_backers = expected - .entry(backed_candidate.hash()) - .or_insert_with(|| (backed_candidate.receipt(), Vec::new())); - let (validator_indices, _maybe_core_index) = - backed_candidate.validator_indices_and_core_index(true); - assert_eq!(backed_candidate.validity_votes().len(), validator_indices.count_ones()); - candidate_receipt_with_backers.1.extend( - validator_indices - .iter() - .enumerate() - .filter(|(_, signed)| **signed) - .zip(backed_candidate.validity_votes().iter().cloned()) - .filter_map(|((validator_index_within_group, _), attestation)| { - let grp_idx = get_backing_group_idx(backed_candidate.hash()).unwrap(); - group_validators(grp_idx).map(|validator_indices| { - (validator_indices[validator_index_within_group], attestation) - }) - }), - ); + for backed_candidate in backed_candidates { + let backed_candidate = backed_candidate.0.clone(); + let candidate_receipt_with_backers = expected + .entry(backed_candidate.hash()) + .or_insert_with(|| (backed_candidate.receipt(), Vec::new())); + let (validator_indices, _maybe_core_index) = + backed_candidate.validator_indices_and_core_index(true); + assert_eq!(backed_candidate.validity_votes().len(), validator_indices.count_ones()); + candidate_receipt_with_backers.1.extend( + validator_indices + .iter() + .enumerate() + .filter(|(_, signed)| **signed) + .zip(backed_candidate.validity_votes().iter().cloned()) + .filter_map(|((validator_index_within_group, _), attestation)| { + let grp_idx = get_backing_group_idx(backed_candidate.hash()).unwrap(); + group_validators(grp_idx).map(|validator_indices| { + (validator_indices[validator_index_within_group], attestation) + }) + }), + ); + } }); assert_eq!( @@ -1804,21 +1811,34 @@ fn backing_works_with_elastic_scaling_mvp() { ) ); - // Only one candidate for b will be recorded on chain. + // Both candidates of b will be recorded on chain. assert_eq!( >::get(&chain_b), Some( - [CandidatePendingAvailability { - core: CoreIndex::from(2), - hash: candidate_b_2.hash(), - descriptor: candidate_b_2.descriptor, - availability_votes: default_availability_votes(), - relay_parent_number: System::block_number() - 1, - backed_in_number: System::block_number(), - backers: backing_bitfield(&[4]), - backing_group: GroupIndex::from(2), - commitments: candidate_b_2.commitments - }] + [ + CandidatePendingAvailability { + core: CoreIndex::from(1), + hash: candidate_b_1.hash(), + descriptor: candidate_b_1.descriptor, + availability_votes: default_availability_votes(), + relay_parent_number: System::block_number() - 1, + backed_in_number: System::block_number(), + backers: backing_bitfield(&[2, 3]), + backing_group: GroupIndex::from(1), + commitments: candidate_b_1.commitments + }, + CandidatePendingAvailability { + core: CoreIndex::from(2), + hash: candidate_b_2.hash(), + descriptor: candidate_b_2.descriptor, + availability_votes: default_availability_votes(), + relay_parent_number: System::block_number() - 1, + backed_in_number: System::block_number(), + backers: backing_bitfield(&[4]), + backing_group: GroupIndex::from(2), + commitments: candidate_b_2.commitments + } + ] .into_iter() .collect::>() ) @@ -2405,7 +2425,7 @@ fn para_upgrade_delay_scheduled_from_inclusion() { let v = process_bitfields(checked_bitfields, core_lookup); assert_eq!(vec![(CoreIndex(0), candidate_a.hash())], v); - assert!(>::get(&chain_a).is_none()); + assert!(>::get(&chain_a).unwrap().is_empty()); let active_vote_state = paras::Pallet::::active_vote_state(&new_validation_code_hash) .expect("prechecking must be initiated"); From c32f925dbc413861f1af0cbab815b17aa12488d2 Mon Sep 17 00:00:00 2001 From: alindima Date: Fri, 1 Mar 2024 17:11:52 +0200 Subject: [PATCH 19/44] fix some more tests --- .../runtime/parachains/src/inclusion/mod.rs | 2 -- .../runtime/parachains/src/inclusion/tests.rs | 26 +++++++++++-------- 2 files changed, 15 insertions(+), 13 deletions(-) diff --git a/polkadot/runtime/parachains/src/inclusion/mod.rs b/polkadot/runtime/parachains/src/inclusion/mod.rs index 2cac75037124..c15487d2c698 100644 --- a/polkadot/runtime/parachains/src/inclusion/mod.rs +++ b/polkadot/runtime/parachains/src/inclusion/mod.rs @@ -356,8 +356,6 @@ pub mod pallet { InvalidBacking, /// Collator did not sign PoV. NotCollatorSigned, - /// The validation data hash does not match expected. - ValidationDataHashMismatch, /// The downward message queue is not processed correctly. IncorrectDownwardMessageHandling, /// At least one upward message sent does not pass the acceptance criteria. diff --git a/polkadot/runtime/parachains/src/inclusion/tests.rs b/polkadot/runtime/parachains/src/inclusion/tests.rs index f0daff2f5580..75dc1de992b6 100644 --- a/polkadot/runtime/parachains/src/inclusion/tests.rs +++ b/polkadot/runtime/parachains/src/inclusion/tests.rs @@ -1175,17 +1175,21 @@ fn candidate_checks() { None, ); - assert_eq!( - ParaInclusion::process_candidates( - &allowed_relay_parents, - &vec![(chain_a_assignment.0, vec![(backed, chain_a_assignment.1)])] - .into_iter() - .collect(), - &group_validators, - false - ), - Err(Error::::ValidationDataHashMismatch.into()), - ); + // validation data hash mismatch is not fatal, but candidates will be dropped. + let ProcessedCandidates { + core_indices: occupied_cores, + candidate_receipt_with_backing_validator_indices, + } = ParaInclusion::process_candidates( + &allowed_relay_parents, + &vec![(chain_a_assignment.0, vec![(backed, chain_a_assignment.1)])] + .into_iter() + .collect(), + &group_validators, + false, + ) + .unwrap(); + assert!(occupied_cores.is_empty()); + assert!(candidate_receipt_with_backing_validator_indices.is_empty()); } // bad validation code hash From 40e2933d3b1d19656e42b60c80b6f5be951b89ed Mon Sep 17 00:00:00 2001 From: alindima Date: Fri, 1 Mar 2024 17:21:06 +0200 Subject: [PATCH 20/44] clippy --- .../runtime/parachains/src/inclusion/mod.rs | 23 ++++++++----------- .../parachains/src/paras_inherent/mod.rs | 6 +---- 2 files changed, 10 insertions(+), 19 deletions(-) diff --git a/polkadot/runtime/parachains/src/inclusion/mod.rs b/polkadot/runtime/parachains/src/inclusion/mod.rs index c15487d2c698..806a9790fccd 100644 --- a/polkadot/runtime/parachains/src/inclusion/mod.rs +++ b/polkadot/runtime/parachains/src/inclusion/mod.rs @@ -741,12 +741,9 @@ impl Pallet { // Get the latest backed output head data of this para. pub(crate) fn para_latest_head_data(para_id: &ParaId) -> Option { - match >::get(para_id) - .map(|pending_candidates| { - pending_candidates.back().map(|x| x.commitments.head_data.clone()) - }) - .flatten() - { + match >::get(para_id).and_then(|pending_candidates| { + pending_candidates.back().map(|x| x.commitments.head_data.clone()) + }) { Some(head_data) => Some(head_data), None => >::para_head(para_id), } @@ -1135,14 +1132,12 @@ impl Pallet { pub(crate) fn candidate_pending_availability( para: ParaId, ) -> Option> { - >::get(¶) - .map(|p| { - p.get(0).map(|p| CommittedCandidateReceipt { - descriptor: p.descriptor.clone(), - commitments: p.commitments.clone(), - }) + >::get(¶).and_then(|p| { + p.get(0).map(|p| CommittedCandidateReceipt { + descriptor: p.descriptor.clone(), + commitments: p.commitments.clone(), }) - .flatten() + }) } /// Returns the metadata around the first candidate pending availability for the @@ -1150,7 +1145,7 @@ impl Pallet { pub(crate) fn pending_availability( para: ParaId, ) -> Option>> { - >::get(¶).map(|p| p.get(0).cloned()).flatten() + >::get(¶).and_then(|p| p.get(0).cloned()) } } diff --git a/polkadot/runtime/parachains/src/paras_inherent/mod.rs b/polkadot/runtime/parachains/src/paras_inherent/mod.rs index f8e6203e4653..684b5a31c0a2 100644 --- a/polkadot/runtime/parachains/src/paras_inherent/mod.rs +++ b/polkadot/runtime/parachains/src/paras_inherent/mod.rs @@ -1282,7 +1282,7 @@ fn filter_unchained_candidates { + Ok(Err(PVDMismatch)) => { log::debug!( target: LOG_TARGET, "Found backed candidates which don't form a chain for paraid {:?}. The order may also be wrong. Dropping the candidates.", @@ -1290,10 +1290,6 @@ fn filter_unchained_candidates { - // Currently unreachable as the only error is PVDMismatch. - false - }, Ok(Ok(_)) => true, Err(err) => { log::debug!( From 630261e258d3299abb065985d7e90c3a35ac0328 Mon Sep 17 00:00:00 2001 From: alindima Date: Mon, 11 Mar 2024 12:59:40 +0200 Subject: [PATCH 21/44] some review comments --- .../runtime/parachains/src/inclusion/mod.rs | 54 ++++++++++++------- .../parachains/src/paras_inherent/mod.rs | 10 ++-- 2 files changed, 40 insertions(+), 24 deletions(-) diff --git a/polkadot/runtime/parachains/src/inclusion/mod.rs b/polkadot/runtime/parachains/src/inclusion/mod.rs index 806a9790fccd..1bf6583a8555 100644 --- a/polkadot/runtime/parachains/src/inclusion/mod.rs +++ b/polkadot/runtime/parachains/src/inclusion/mod.rs @@ -644,9 +644,11 @@ impl Pallet { // PVD hash should have already been checked in `filter_unchained_candidates`, but do it // again for safety. - // this cannot be None if the parachain was registered. let mut latest_head_data = match Self::para_latest_head_data(para_id) { - None => continue, + None => { + defensive!("Latest included head data for paraid {:?} is None", para_id); + continue + }, Some(latest_head_data) => latest_head_data, }; @@ -1027,7 +1029,7 @@ impl Pallet { pred: impl Fn(BlockNumberFor) -> AvailabilityTimeoutStatus>, ) -> Vec { let timed_out: Vec<_> = - Self::free_cores(|candidate| pred(candidate.backed_in_number).timed_out, None) + Self::free_failed_cores(|candidate| pred(candidate.backed_in_number).timed_out, None) .collect(); let mut timed_out_cores = Vec::with_capacity(timed_out.len()); for candidate in timed_out.iter() { @@ -1055,16 +1057,23 @@ impl Pallet { pub(crate) fn collect_disputed( disputed: &BTreeSet, ) -> impl Iterator + '_ { - Self::free_cores(|candidate| disputed.contains(&candidate.hash), Some(disputed.len())) - .map(|candidate| (candidate.core, candidate.hash)) + Self::free_failed_cores( + |candidate| disputed.contains(&candidate.hash), + Some(disputed.len()), + ) + .map(|candidate| (candidate.core, candidate.hash)) } - fn free_cores>) -> bool>( + // Clean up cores whose candidates are deemed as failed by the predicate. `pred` returns true if + // a candidate is considered failed. + // A failed candidate also frees all subsequent cores which hold descendants of said candidate. + fn free_failed_cores< + P: Fn(&CandidatePendingAvailability>) -> bool, + >( pred: P, capacity_hint: Option, ) -> impl Iterator>> { - let mut cleaned_up_cores = - if let Some(capacity) = capacity_hint { Vec::with_capacity(capacity) } else { vec![] }; + let mut earliest_dropped_indices: BTreeMap = BTreeMap::new(); for (para_id, pending_candidates) in >::iter() { // We assume that pending candidates are stored in dependency order. So we need to store @@ -1074,22 +1083,29 @@ impl Pallet { if pred(candidate) { earliest_dropped_idx = Some(index); // Since we're looping the candidates in dependency order, we've found the - // earliest disputed index for this paraid. + // earliest failed index for this paraid. break; } } if let Some(earliest_dropped_idx) = earliest_dropped_idx { - // Do cleanups and record the cleaned up cores - >::mutate(¶_id, |record| { - if let Some(record) = record { - let cleaned_up = record.drain(earliest_dropped_idx..); - cleaned_up_cores.extend(cleaned_up); - } - }); + earliest_dropped_indices.insert(para_id, earliest_dropped_idx); } } + let mut cleaned_up_cores = + if let Some(capacity) = capacity_hint { Vec::with_capacity(capacity) } else { vec![] }; + + for (para_id, earliest_dropped_idx) in earliest_dropped_indices { + // Do cleanups and record the cleaned up cores + >::mutate(¶_id, |record| { + if let Some(record) = record { + let cleaned_up = record.drain(earliest_dropped_idx..); + cleaned_up_cores.extend(cleaned_up); + } + }); + } + cleaned_up_cores.into_iter() } @@ -1104,9 +1120,9 @@ impl Pallet { let enacted_candidate = >::mutate(¶, |candidates| match candidates { Some(candidates) => candidates.pop_front(), - // TODO: this should also check the descendants, as they have been made available - // before their parent. Or just change the semantic of force_enact to enact all - // candidates of a para. + // TODO: this should also check the descendants, as they may have been made + // available before their parent. Or just change the semantic of force_enact to + // enact all candidates of a para. _ => None, }); diff --git a/polkadot/runtime/parachains/src/paras_inherent/mod.rs b/polkadot/runtime/parachains/src/paras_inherent/mod.rs index 684b5a31c0a2..8f05700a90e1 100644 --- a/polkadot/runtime/parachains/src/paras_inherent/mod.rs +++ b/polkadot/runtime/parachains/src/paras_inherent/mod.rs @@ -34,6 +34,7 @@ use crate::{ }; use bitvec::prelude::BitVec; use frame_support::{ + defensive, dispatch::{DispatchErrorWithPostInfo, PostDispatchInfo}, inherent::{InherentData, InherentIdentifier, MakeFatalError, ProvideInherent}, pallet_prelude::*, @@ -1241,10 +1242,9 @@ fn filter_unchained_candidates = BTreeMap::new(); for para_id in candidates.keys() { - // this cannot be None let latest_head_data = match >::para_latest_head_data(¶_id) { None => { - log::warn!(target: LOG_TARGET, "Latest included head data for paraid {:?} is None", para_id); + defensive!("Latest included head data for paraid {:?} is None", para_id); continue }, Some(latest_head_data) => latest_head_data, @@ -1328,8 +1328,8 @@ fn map_candidates_to_cores Date: Mon, 11 Mar 2024 15:07:35 +0200 Subject: [PATCH 22/44] simplify update_pending_availability_and_get_freed_cores --- .../runtime/parachains/src/inclusion/mod.rs | 111 +++++++++--------- 1 file changed, 55 insertions(+), 56 deletions(-) diff --git a/polkadot/runtime/parachains/src/inclusion/mod.rs b/polkadot/runtime/parachains/src/inclusion/mod.rs index 1bf6583a8555..18790f1d4299 100644 --- a/polkadot/runtime/parachains/src/inclusion/mod.rs +++ b/polkadot/runtime/parachains/src/inclusion/mod.rs @@ -513,7 +513,7 @@ impl Pallet { let now = >::block_number(); let threshold = availability_threshold(validators.len()); - let mut paras_made_available = BTreeSet::new(); + let mut votes_per_core: BTreeMap> = BTreeMap::new(); for (checked_bitfield, validator_index) in signed_bitfields.into_iter().map(|signed_bitfield| { @@ -523,11 +523,28 @@ impl Pallet { }) { for (bit_idx, _) in checked_bitfield.0.iter().enumerate().filter(|(_, is_av)| **is_av) { let core_index = CoreIndex(bit_idx as u32); - if let Some(para_id) = core_lookup(core_index) { - >::mutate(¶_id, |candidates| { - if let Some(candidates) = candidates { - for (candidate_idx, candidate) in candidates.iter_mut().enumerate() { - if candidate.core == core_index { + votes_per_core + .entry(core_index) + .or_insert_with(|| BTreeSet::new()) + .insert(validator_index); + } + + let record = + AvailabilityBitfieldRecord { bitfield: checked_bitfield, submitted_at: now }; + + >::insert(&validator_index, record); + } + + // Update the availability votes for each candidate and take note of what cores were made + // available. + let mut candidates_made_available: BTreeMap> = BTreeMap::new(); + for (core_index, validator_indices) in votes_per_core { + if let Some(para_id) = core_lookup(core_index) { + >::mutate(¶_id, |candidates| { + if let Some(candidates) = candidates { + for (index, candidate) in candidates.iter_mut().enumerate() { + if candidate.core == core_index { + for validator_index in validator_indices.iter() { // defensive check - this is constructed by loading the // availability bitfield record, which is always `Some` if // the core is occupied - that's why we're here. @@ -537,61 +554,43 @@ impl Pallet { { *bit = true; } - - // In terms of candidate enactment, we only care if the first - // candidate of this para was made available. We don't enact - // candidates until their predecessors have been enacted. - if candidate_idx == 0 && - candidate.availability_votes.count_ones() >= threshold - { - paras_made_available.insert(para_id); - } } } + + if candidate.availability_votes.count_ones() >= threshold { + candidates_made_available + .entry(para_id) + .or_insert_with(|| BTreeSet::new()) + .insert(index); + } } - }); - } else { - // No parachain is occupying that core yet. - } + } + }); + } else { + // No parachain is occupying that core yet. } - - let record = - AvailabilityBitfieldRecord { bitfield: checked_bitfield, submitted_at: now }; - - >::insert(&validator_index, record); } - let mut freed_cores = Vec::with_capacity(paras_made_available.len()); - // Iterate through the paraids that had one of their candidates made available and see if we - // can free any of its occupied cores. - // We can only free cores whose candidates form a chain starting from the included para - // head. - // We assume dependency order is preserved in `PendingAvailability`. - for (para_id, candidates_pending_availability) in paras_made_available - .into_iter() - .filter_map(|para_id| >::get(para_id).map(|c| (para_id, c))) - { - let mut stopped_at_index = None; - - // We try to check all candidates, because some of them may have already been made - // available in the past but their ancestors were not. However, we can stop when we find - // the first one which is not available yet. - for (index, pending_availability) in candidates_pending_availability.iter().enumerate() - { - if pending_availability.availability_votes.count_ones() >= threshold { - freed_cores.push((pending_availability.core, pending_availability.hash)); - stopped_at_index = Some(index); - } else { - break - } - } + let mut freed_cores = Vec::with_capacity(candidates_made_available.len()); + + // Trim the pending availability candidates storage and enact candidates now. + for (para_id, available_candidates) in candidates_made_available { + >::mutate(¶_id, |candidates| { + if let Some(candidates) = candidates { + let mut stopped_at_index = None; + for index in 0..candidates.len() { + if available_candidates.contains(&index) { + stopped_at_index = Some(index); + } else { + break + } + } + + if let Some(stopped_at_index) = stopped_at_index { + let evicted_candidates = candidates.drain(0..=stopped_at_index); + for candidate in evicted_candidates { + freed_cores.push((candidate.core, candidate.hash)); - // Trim the pending availability candidates storage and enact candidates now. - if let Some(stopped_at_index) = stopped_at_index { - >::mutate(¶_id, |candidates| { - if let Some(candidates) = candidates { - let candidates_made_available = candidates.drain(0..=stopped_at_index); - for candidate in candidates_made_available { let receipt = CommittedCandidateReceipt { descriptor: candidate.descriptor, commitments: candidate.commitments, @@ -606,8 +605,8 @@ impl Pallet { ); } } - }); - } + } + }); } freed_cores From 3793f2074da03216b55563c8df8846a731a28de3 Mon Sep 17 00:00:00 2001 From: alindima Date: Mon, 11 Mar 2024 15:22:49 +0200 Subject: [PATCH 23/44] fix clippy --- .../runtime/parachains/src/paras_inherent/benchmarking.rs | 8 -------- 1 file changed, 8 deletions(-) diff --git a/polkadot/runtime/parachains/src/paras_inherent/benchmarking.rs b/polkadot/runtime/parachains/src/paras_inherent/benchmarking.rs index ad3fa8e0dc71..454fe395ea18 100644 --- a/polkadot/runtime/parachains/src/paras_inherent/benchmarking.rs +++ b/polkadot/runtime/parachains/src/paras_inherent/benchmarking.rs @@ -145,10 +145,6 @@ benchmarks! { assert_eq!(backing_validators.1.len(), votes); } - assert_eq!( - inclusion::PendingAvailabilityCommitments::::iter().count(), - cores_with_backed.len() - ); assert_eq!( inclusion::PendingAvailability::::iter().count(), cores_with_backed.len() @@ -209,10 +205,6 @@ benchmarks! { ); } - assert_eq!( - inclusion::PendingAvailabilityCommitments::::iter().count(), - cores_with_backed.len() - ); assert_eq!( inclusion::PendingAvailability::::iter().count(), cores_with_backed.len() From 6c0d062fc1fb05b72be3c5276130a7b69d0898b8 Mon Sep 17 00:00:00 2001 From: alindima Date: Mon, 11 Mar 2024 15:25:24 +0200 Subject: [PATCH 24/44] add prdoc --- prdoc/pr_3479.prdoc | 8 ++++++++ 1 file changed, 8 insertions(+) create mode 100644 prdoc/pr_3479.prdoc diff --git a/prdoc/pr_3479.prdoc b/prdoc/pr_3479.prdoc new file mode 100644 index 000000000000..1e44ce5646b9 --- /dev/null +++ b/prdoc/pr_3479.prdoc @@ -0,0 +1,8 @@ +title: "Elastic scaling: runtime dependency tracking and enactment" + +doc: + - audience: Node Dev + description: | + Adds support in the inclusion and paras_inherent runtime modules for backing and including multiple candidates of the same para if they form a chain. + +crates: [ ] From 74acc46628e9a4871c13b56945571d94c8cca296 Mon Sep 17 00:00:00 2001 From: alindima Date: Mon, 11 Mar 2024 15:48:11 +0200 Subject: [PATCH 25/44] fix availability_cores runtime API --- polkadot/runtime/parachains/src/inclusion/mod.rs | 10 ++++++++++ polkadot/runtime/parachains/src/runtime_api_impl/v7.rs | 8 +++++--- 2 files changed, 15 insertions(+), 3 deletions(-) diff --git a/polkadot/runtime/parachains/src/inclusion/mod.rs b/polkadot/runtime/parachains/src/inclusion/mod.rs index 18790f1d4299..07b09b18cf8a 100644 --- a/polkadot/runtime/parachains/src/inclusion/mod.rs +++ b/polkadot/runtime/parachains/src/inclusion/mod.rs @@ -1162,6 +1162,16 @@ impl Pallet { ) -> Option>> { >::get(¶).and_then(|p| p.get(0).cloned()) } + + /// Returns the metadata around the candidate pending availability occupying the supplied core, + /// if any. + pub(crate) fn pending_availability_with_core( + para: ParaId, + core: CoreIndex, + ) -> Option>> { + >::get(¶) + .and_then(|p| p.iter().find(|c| c.core == core).cloned()) + } } const fn availability_threshold(n_validators: usize) -> usize { diff --git a/polkadot/runtime/parachains/src/runtime_api_impl/v7.rs b/polkadot/runtime/parachains/src/runtime_api_impl/v7.rs index 9513645dc8fd..7b9c15d2b749 100644 --- a/polkadot/runtime/parachains/src/runtime_api_impl/v7.rs +++ b/polkadot/runtime/parachains/src/runtime_api_impl/v7.rs @@ -92,9 +92,11 @@ pub fn availability_cores() -> Vec { - let pending_availability = - >::pending_availability(entry.para_id()) - .expect("Occupied core always has pending availability; qed"); + let pending_availability = >::pending_availability_with_core( + entry.para_id(), + CoreIndex(i as u32), + ) + .expect("Occupied core always has pending availability; qed"); let backed_in_number = *pending_availability.backed_in_number(); From 5afb90198e9ce923f834120611848ed7e704b5c0 Mon Sep 17 00:00:00 2001 From: alindima Date: Mon, 11 Mar 2024 16:12:25 +0200 Subject: [PATCH 26/44] remove some unused errors and reintroduce ValidationDataHashMismatch --- .../runtime/parachains/src/inclusion/mod.rs | 54 ++++--------------- .../runtime/parachains/src/inclusion/tests.rs | 26 ++++----- .../parachains/src/paras_inherent/mod.rs | 18 +------ 3 files changed, 23 insertions(+), 75 deletions(-) diff --git a/polkadot/runtime/parachains/src/inclusion/mod.rs b/polkadot/runtime/parachains/src/inclusion/mod.rs index 07b09b18cf8a..7ddee9d71d4a 100644 --- a/polkadot/runtime/parachains/src/inclusion/mod.rs +++ b/polkadot/runtime/parachains/src/inclusion/mod.rs @@ -314,28 +314,10 @@ pub mod pallet { #[pallet::error] pub enum Error { - /// Validator indices are out of order or contains duplicates. - UnsortedOrDuplicateValidatorIndices, - /// Dispute statement sets are out of order or contain duplicates. - UnsortedOrDuplicateDisputeStatementSet, - /// Backed candidates are out of order (core index) or contain duplicates. - UnsortedOrDuplicateBackedCandidates, - /// A different relay parent was provided compared to the on-chain stored one. - UnexpectedRelayParent, - /// Availability bitfield has unexpected size. - WrongBitfieldSize, - /// Bitfield consists of zeros only. - BitfieldAllZeros, - /// Multiple bitfields submitted by same validator or validators out of order by index. - BitfieldDuplicateOrUnordered, /// Validator index out of bounds. ValidatorIndexOutOfBounds, - /// Invalid signature - InvalidBitfieldSignature, /// Candidate submitted but para not scheduled. UnscheduledCandidate, - /// Candidate scheduled despite pending candidate already existing for the para. - CandidateScheduledBeforeParaFree, /// Head data exceeds the configured maximum. HeadDataTooLarge, /// Code upgrade prematurely. @@ -356,6 +338,8 @@ pub mod pallet { InvalidBacking, /// Collator did not sign PoV. NotCollatorSigned, + /// The validation data hash does not match expected. + ValidationDataHashMismatch, /// The downward message queue is not processed correctly. IncorrectDownwardMessageHandling, /// At least one upward message sent does not pass the acceptance criteria. @@ -369,10 +353,6 @@ pub mod pallet { /// The `para_head` hash in the candidate descriptor doesn't match the hash of the actual /// para head in the commitments. ParaHeadMismatch, - /// A bitfield that references a freed core, - /// either intentionally or as part of a concluded - /// invalid dispute. - BitfieldReferencesFreedCore, } /// The latest bitfield for each validator, referred to by their index in the validator set. @@ -640,9 +620,6 @@ impl Pallet { let mut core_indices = Vec::with_capacity(candidates.len()); for (para_id, candidates) in candidates { - // PVD hash should have already been checked in `filter_unchained_candidates`, but do it - // again for safety. - let mut latest_head_data = match Self::para_latest_head_data(para_id) { None => { defensive!("Latest included head data for paraid {:?} is None", para_id); @@ -655,18 +632,11 @@ impl Pallet { let candidate_hash = candidate.candidate().hash(); let check_ctx = CandidateCheckContext::::new(None); - let relay_parent_number = match check_ctx.verify_backed_candidate( + let relay_parent_number = check_ctx.verify_backed_candidate( &allowed_relay_parents, candidate.candidate(), latest_head_data.clone(), - )? { - Err(PVDMismatch) => { - // This means that this candidate is not a child of - // latest_head_data. - break - }, - Ok(relay_parent_number) => relay_parent_number, - }; + )?; // The candidate based upon relay parent `N` should be backed by a // group assigned to core at block `N + 1`. Thus, @@ -1221,11 +1191,6 @@ pub(crate) struct CandidateCheckContext { prev_context: Option>, } -/// An error indicating that creating Persisted Validation Data failed -/// while checking a candidate's validity. -#[derive(PartialEq)] -pub(crate) struct PVDMismatch; - impl CandidateCheckContext { pub(crate) fn new(prev_context: Option>) -> Self { Self { config: >::config(), prev_context } @@ -1245,7 +1210,7 @@ impl CandidateCheckContext { allowed_relay_parents: &AllowedRelayParentsTracker>, backed_candidate_receipt: &CommittedCandidateReceipt<::Hash>, parent_head_data: HeadData, - ) -> Result, PVDMismatch>, Error> { + ) -> Result, Error> { let para_id = backed_candidate_receipt.descriptor().para_id; let relay_parent = backed_candidate_receipt.descriptor().relay_parent; @@ -1266,9 +1231,10 @@ impl CandidateCheckContext { let expected = persisted_validation_data.hash(); - if backed_candidate_receipt.descriptor().persisted_validation_data_hash != expected { - return Ok(Err(PVDMismatch)) - } + ensure!( + expected == backed_candidate_receipt.descriptor().persisted_validation_data_hash, + Error::::ValidationDataHashMismatch, + ); } ensure!( @@ -1308,7 +1274,7 @@ impl CandidateCheckContext { ); Err(err.strip_into_dispatch_err::())?; }; - Ok(Ok(relay_parent_number)) + Ok(relay_parent_number) } /// Check the given outputs after candidate validation on whether it passes the acceptance diff --git a/polkadot/runtime/parachains/src/inclusion/tests.rs b/polkadot/runtime/parachains/src/inclusion/tests.rs index 75dc1de992b6..47097dad4ac7 100644 --- a/polkadot/runtime/parachains/src/inclusion/tests.rs +++ b/polkadot/runtime/parachains/src/inclusion/tests.rs @@ -1175,21 +1175,17 @@ fn candidate_checks() { None, ); - // validation data hash mismatch is not fatal, but candidates will be dropped. - let ProcessedCandidates { - core_indices: occupied_cores, - candidate_receipt_with_backing_validator_indices, - } = ParaInclusion::process_candidates( - &allowed_relay_parents, - &vec![(chain_a_assignment.0, vec![(backed, chain_a_assignment.1)])] - .into_iter() - .collect(), - &group_validators, - false, - ) - .unwrap(); - assert!(occupied_cores.is_empty()); - assert!(candidate_receipt_with_backing_validator_indices.is_empty()); + assert_noop!( + ParaInclusion::process_candidates( + &allowed_relay_parents, + &vec![(chain_a_assignment.0, vec![(backed, chain_a_assignment.1)])] + .into_iter() + .collect(), + &group_validators, + false, + ), + Error::::ValidationDataHashMismatch + ); } // bad validation code hash diff --git a/polkadot/runtime/parachains/src/paras_inherent/mod.rs b/polkadot/runtime/parachains/src/paras_inherent/mod.rs index c308c15a4bc1..ad8bde4f6a5c 100644 --- a/polkadot/runtime/parachains/src/paras_inherent/mod.rs +++ b/polkadot/runtime/parachains/src/paras_inherent/mod.rs @@ -24,7 +24,7 @@ use crate::{ configuration, disputes::DisputesHandler, - inclusion::{self, CandidateCheckContext, PVDMismatch}, + inclusion::{self, CandidateCheckContext}, initializer, metrics::METRICS, paras, @@ -134,14 +134,8 @@ pub mod pallet { /// The hash of the submitted parent header doesn't correspond to the saved block hash of /// the parent. InvalidParentHeader, - /// Disputed candidate that was concluded invalid. - CandidateConcludedInvalid, /// The data given to the inherent will result in an overweight block. InherentOverweight, - /// The ordering of dispute statements was invalid. - DisputeStatementsUnsortedOrDuplicates, - /// A dispute statement was invalid. - DisputeInvalid, /// A candidate was filtered during inherent execution. This should have only been done /// during creation. CandidatesFilteredDuringExecution, @@ -1318,15 +1312,7 @@ fn filter_unchained_candidates { - log::debug!( - target: LOG_TARGET, - "Found backed candidates which don't form a chain for paraid {:?}. The order may also be wrong. Dropping the candidates.", - para_id - ); - false - }, - Ok(Ok(_)) => true, + Ok(_) => true, Err(err) => { log::debug!( target: LOG_TARGET, From a8c10be09a4d75fc2f3b724c213da0d786e6e075 Mon Sep 17 00:00:00 2001 From: alindima Date: Tue, 12 Mar 2024 15:02:16 +0200 Subject: [PATCH 27/44] add more unit tests to inclusion module --- .../runtime/parachains/src/inclusion/tests.rs | 491 +++++++++++++++--- 1 file changed, 427 insertions(+), 64 deletions(-) diff --git a/polkadot/runtime/parachains/src/inclusion/tests.rs b/polkadot/runtime/parachains/src/inclusion/tests.rs index 47097dad4ac7..5c4486baf3a9 100644 --- a/polkadot/runtime/parachains/src/inclusion/tests.rs +++ b/polkadot/runtime/parachains/src/inclusion/tests.rs @@ -373,63 +373,280 @@ pub(crate) fn process_bitfields( } #[test] -fn collect_timedout_cleans_up_pending() { +fn collect_timedout() { let chain_a = ParaId::from(1_u32); let chain_b = ParaId::from(2_u32); - let thread_a = ParaId::from(3_u32); + let chain_c = ParaId::from(3_u32); + let chain_d = ParaId::from(4_u32); + let chain_e = ParaId::from(5_u32); + let chain_f = ParaId::from(6_u32); + let thread_a = ParaId::from(7_u32); let paras = vec![ (chain_a, ParaKind::Parachain), (chain_b, ParaKind::Parachain), + (chain_c, ParaKind::Parachain), + (chain_d, ParaKind::Parachain), + (chain_e, ParaKind::Parachain), + (chain_f, ParaKind::Parachain), (thread_a, ParaKind::Parathread), ]; let mut config = genesis_config(paras); config.configuration.config.scheduler_params.group_rotation_frequency = 3; new_test_ext(config).execute_with(|| { - let default_candidate = TestCandidateBuilder::default().build(); - >::insert( - chain_a, - [CandidatePendingAvailability { - core: CoreIndex::from(0), + let timed_out_cores = + ParaInclusion::collect_timedout(Scheduler::availability_timeout_predicate()); + assert!(timed_out_cores.is_empty()); + + let make_candidate = |core_index: u32, timed_out: bool| { + let default_candidate = TestCandidateBuilder::default().build(); + let backed_in_number = if timed_out { 0 } else { 5 }; + + CandidatePendingAvailability { + core: CoreIndex::from(core_index), hash: default_candidate.hash(), descriptor: default_candidate.descriptor.clone(), availability_votes: default_availability_votes(), relay_parent_number: 0, - backed_in_number: 0, + backed_in_number, backers: default_backing_bitfield(), - backing_group: GroupIndex::from(0), + backing_group: GroupIndex::from(core_index), commitments: default_candidate.commitments.clone(), - }] - .into_iter() - .collect::>(), + } + }; + + >::insert( + chain_a, + [make_candidate(0, true)].into_iter().collect::>(), ); >::insert( &chain_b, - [CandidatePendingAvailability { - core: CoreIndex::from(1), - hash: default_candidate.hash(), - descriptor: default_candidate.descriptor, + [make_candidate(1, false)].into_iter().collect::>(), + ); + + // 2 chained candidates. The first one is timed out. The other will be evicted also. + let mut c_candidates = VecDeque::new(); + c_candidates.push_back(make_candidate(2, true)); + c_candidates.push_back(make_candidate(3, false)); + + >::insert(&chain_c, c_candidates); + + // 2 chained candidates. All are timed out. + let mut d_candidates = VecDeque::new(); + d_candidates.push_back(make_candidate(4, true)); + d_candidates.push_back(make_candidate(5, true)); + + >::insert(&chain_d, d_candidates); + + // 3 chained candidates. The second one is timed out. The first one will remain in place. + // With the current time out predicate this scenario is impossible. But this is not a + // concern for this module. + let mut e_candidates = VecDeque::new(); + e_candidates.push_back(make_candidate(6, false)); + e_candidates.push_back(make_candidate(7, true)); + e_candidates.push_back(make_candidate(8, false)); + + >::insert(&chain_e, e_candidates); + + // 3 chained candidates, none are timed out. + let mut f_candidates = VecDeque::new(); + f_candidates.push_back(make_candidate(9, false)); + f_candidates.push_back(make_candidate(10, false)); + f_candidates.push_back(make_candidate(11, false)); + + >::insert(&chain_f, f_candidates); + + run_to_block(5, |_| None); + + assert_eq!(>::get(&chain_a).unwrap().len(), 1); + assert_eq!(>::get(&chain_b).unwrap().len(), 1); + assert_eq!(>::get(&chain_c).unwrap().len(), 2); + assert_eq!(>::get(&chain_d).unwrap().len(), 2); + assert_eq!(>::get(&chain_e).unwrap().len(), 3); + assert_eq!(>::get(&chain_f).unwrap().len(), 3); + + let timed_out_cores = + ParaInclusion::collect_timedout(Scheduler::availability_timeout_predicate()); + + assert_eq!( + timed_out_cores, + vec![ + CoreIndex(0), + CoreIndex(2), + CoreIndex(3), + CoreIndex(4), + CoreIndex(5), + CoreIndex(7), + CoreIndex(8), + ] + ); + + assert!(>::get(&chain_a).unwrap().is_empty()); + assert_eq!(>::get(&chain_b).unwrap().len(), 1); + assert!(>::get(&chain_c).unwrap().is_empty()); + assert!(>::get(&chain_d).unwrap().is_empty()); + assert_eq!( + >::get(&chain_e) + .unwrap() + .into_iter() + .map(|c| c.core) + .collect::>(), + vec![CoreIndex(6)] + ); + assert_eq!( + >::get(&chain_f) + .unwrap() + .into_iter() + .map(|c| c.core) + .collect::>(), + vec![CoreIndex(9), CoreIndex(10), CoreIndex(11)] + ); + }); +} + +#[test] +fn collect_disputed() { + let chain_a = ParaId::from(1_u32); + let chain_b = ParaId::from(2_u32); + let chain_c = ParaId::from(3_u32); + let chain_d = ParaId::from(4_u32); + let chain_e = ParaId::from(5_u32); + let chain_f = ParaId::from(6_u32); + let thread_a = ParaId::from(7_u32); + + let paras = vec![ + (chain_a, ParaKind::Parachain), + (chain_b, ParaKind::Parachain), + (chain_c, ParaKind::Parachain), + (chain_d, ParaKind::Parachain), + (chain_e, ParaKind::Parachain), + (chain_f, ParaKind::Parachain), + (thread_a, ParaKind::Parathread), + ]; + let mut config = genesis_config(paras); + config.configuration.config.scheduler_params.group_rotation_frequency = 3; + new_test_ext(config).execute_with(|| { + let disputed_cores = ParaInclusion::collect_disputed(&BTreeSet::new()).collect::>(); + assert!(disputed_cores.is_empty()); + + let disputed_cores = ParaInclusion::collect_disputed( + &[CandidateHash::default()].into_iter().collect::>(), + ) + .collect::>(); + assert!(disputed_cores.is_empty()); + + let make_candidate = |core_index: u32| { + let default_candidate = TestCandidateBuilder::default().build(); + + CandidatePendingAvailability { + core: CoreIndex::from(core_index), + hash: CandidateHash(Hash::from_low_u64_be(core_index as _)), + descriptor: default_candidate.descriptor.clone(), availability_votes: default_availability_votes(), relay_parent_number: 0, - backed_in_number: 5, + backed_in_number: 0, backers: default_backing_bitfield(), - backing_group: GroupIndex::from(1), + backing_group: GroupIndex::from(core_index), commitments: default_candidate.commitments.clone(), - }] - .into_iter() - .collect::>(), + } + }; + + // Disputed + >::insert( + chain_a, + [make_candidate(0)].into_iter().collect::>(), ); + // Not disputed. + >::insert( + &chain_b, + [make_candidate(1)].into_iter().collect::>(), + ); + + // 2 chained candidates. The first one is disputed. The other will be evicted also. + let mut c_candidates = VecDeque::new(); + c_candidates.push_back(make_candidate(2)); + c_candidates.push_back(make_candidate(3)); + + >::insert(&chain_c, c_candidates); + + // 2 chained candidates. All are disputed. + let mut d_candidates = VecDeque::new(); + d_candidates.push_back(make_candidate(4)); + d_candidates.push_back(make_candidate(5)); + + >::insert(&chain_d, d_candidates); + + // 3 chained candidates. The second one is disputed. The first one will remain in place. + let mut e_candidates = VecDeque::new(); + e_candidates.push_back(make_candidate(6)); + e_candidates.push_back(make_candidate(7)); + e_candidates.push_back(make_candidate(8)); + + >::insert(&chain_e, e_candidates); + + // 3 chained candidates, none are disputed. + let mut f_candidates = VecDeque::new(); + f_candidates.push_back(make_candidate(9)); + f_candidates.push_back(make_candidate(10)); + f_candidates.push_back(make_candidate(11)); + + >::insert(&chain_f, f_candidates); + run_to_block(5, |_| None); - assert!(>::get(&chain_a).is_some()); - assert!(>::get(&chain_b).is_some()); + assert_eq!(>::get(&chain_a).unwrap().len(), 1); + assert_eq!(>::get(&chain_b).unwrap().len(), 1); + assert_eq!(>::get(&chain_c).unwrap().len(), 2); + assert_eq!(>::get(&chain_d).unwrap().len(), 2); + assert_eq!(>::get(&chain_e).unwrap().len(), 3); + assert_eq!(>::get(&chain_f).unwrap().len(), 3); + + let disputed_candidates = [ + CandidateHash(Hash::from_low_u64_be(0)), + CandidateHash(Hash::from_low_u64_be(2)), + CandidateHash(Hash::from_low_u64_be(4)), + CandidateHash(Hash::from_low_u64_be(5)), + CandidateHash(Hash::from_low_u64_be(7)), + ] + .into_iter() + .collect::>(); + let disputed_cores = ParaInclusion::collect_disputed(&disputed_candidates); - ParaInclusion::collect_timedout(Scheduler::availability_timeout_predicate()); + assert_eq!( + disputed_cores.map(|(core, _)| core).collect::>(), + vec![ + CoreIndex(0), + CoreIndex(2), + CoreIndex(3), + CoreIndex(4), + CoreIndex(5), + CoreIndex(7), + CoreIndex(8), + ] + ); assert!(>::get(&chain_a).unwrap().is_empty()); - assert!(!>::get(&chain_b).unwrap().is_empty()); + assert_eq!(>::get(&chain_b).unwrap().len(), 1); + assert!(>::get(&chain_c).unwrap().is_empty()); + assert!(>::get(&chain_d).unwrap().is_empty()); + assert_eq!( + >::get(&chain_e) + .unwrap() + .into_iter() + .map(|c| c.core) + .collect::>(), + vec![CoreIndex(6)] + ); + assert_eq!( + >::get(&chain_f) + .unwrap() + .into_iter() + .map(|c| c.core) + .collect::>(), + vec![CoreIndex(9), CoreIndex(10), CoreIndex(11)] + ); }); } @@ -627,13 +844,17 @@ fn availability_threshold_is_supermajority() { #[test] fn supermajority_bitfields_trigger_availability() { - let chain_a = ParaId::from(1_u32); - let chain_b = ParaId::from(2_u32); - let thread_a = ParaId::from(3_u32); + let chain_a = ParaId::from(0_u32); + let chain_b = ParaId::from(1_u32); + let chain_c = ParaId::from(2_u32); + let chain_d = ParaId::from(3_u32); + let thread_a = ParaId::from(4_u32); let paras = vec![ (chain_a, ParaKind::Parachain), (chain_b, ParaKind::Parachain), + (chain_c, ParaKind::Parachain), + (chain_d, ParaKind::Parachain), (thread_a, ParaKind::Parathread), ]; let validators = vec![ @@ -642,6 +863,8 @@ fn supermajority_bitfields_trigger_availability() { Sr25519Keyring::Charlie, Sr25519Keyring::Dave, Sr25519Keyring::Ferdie, + Sr25519Keyring::One, + Sr25519Keyring::Two, ]; let keystore: KeystorePtr = Arc::new(LocalKeystore::in_memory()); for validator in validators.iter() { @@ -664,10 +887,14 @@ fn supermajority_bitfields_trigger_availability() { let core_lookup = |core| match core { core if core == CoreIndex::from(0) => Some(chain_a), core if core == CoreIndex::from(1) => Some(chain_b), - core if core == CoreIndex::from(2) => Some(thread_a), - _ => panic!("Core out of bounds for 2 parachains and 1 parathread core."), + core if core == CoreIndex::from(2) => Some(chain_c), + core if core == CoreIndex::from(3) => Some(chain_c), + core if core == CoreIndex::from(4) => Some(chain_c), + core if core == CoreIndex::from(5) => Some(thread_a), + _ => panic!("Core out of bounds"), }; + // Chain A only has one candidate pending availability. It will be made available now. let candidate_a = TestCandidateBuilder { para_id: chain_a, head_data: vec![1, 2, 3, 4].into(), @@ -692,6 +919,7 @@ fn supermajority_bitfields_trigger_availability() { .collect::>(), ); + // Chain B only has one candidate pending availability. It won't be made available now. let candidate_b = TestCandidateBuilder { para_id: chain_b, head_data: vec![5, 6, 7, 8].into(), @@ -716,36 +944,93 @@ fn supermajority_bitfields_trigger_availability() { .collect::>(), ); - // this bitfield signals that a and b are available. - let a_and_b_available = { - let mut bare_bitfield = default_bitfield(); - *bare_bitfield.0.get_mut(0).unwrap() = true; - *bare_bitfield.0.get_mut(1).unwrap() = true; + // Chain C has three candidates pending availability. The first and third candidates will be + // made available. Only the first candidate will be evicted from the core and enacted. + let candidate_c_1 = TestCandidateBuilder { + para_id: chain_c, + head_data: vec![7, 8].into(), + ..Default::default() + } + .build(); + let candidate_c_2 = TestCandidateBuilder { + para_id: chain_c, + head_data: vec![9, 10].into(), + ..Default::default() + } + .build(); + let candidate_c_3 = TestCandidateBuilder { + para_id: chain_c, + head_data: vec![11, 12].into(), + ..Default::default() + } + .build(); - bare_bitfield - }; + let mut c_candidates = VecDeque::new(); + c_candidates.push_back(CandidatePendingAvailability { + core: CoreIndex::from(2), + hash: candidate_c_1.hash(), + descriptor: candidate_c_1.descriptor.clone(), + availability_votes: default_availability_votes(), + relay_parent_number: 0, + backed_in_number: 0, + backers: backing_bitfield(&[1]), + backing_group: GroupIndex::from(2), + commitments: candidate_c_1.commitments.clone(), + }); + c_candidates.push_back(CandidatePendingAvailability { + core: CoreIndex::from(3), + hash: candidate_c_2.hash(), + descriptor: candidate_c_2.descriptor.clone(), + availability_votes: default_availability_votes(), + relay_parent_number: 0, + backed_in_number: 0, + backers: backing_bitfield(&[5]), + backing_group: GroupIndex::from(3), + commitments: candidate_c_2.commitments.clone(), + }); + c_candidates.push_back(CandidatePendingAvailability { + core: CoreIndex::from(4), + hash: candidate_c_3.hash(), + descriptor: candidate_c_3.descriptor.clone(), + availability_votes: default_availability_votes(), + relay_parent_number: 0, + backed_in_number: 0, + backers: backing_bitfield(&[6]), + backing_group: GroupIndex::from(4), + commitments: candidate_c_3.commitments.clone(), + }); + + >::insert(chain_c, c_candidates); - // this bitfield signals that only a is available. - let a_available = { + // this bitfield signals that a and b are available. + let all_available = { let mut bare_bitfield = default_bitfield(); - *bare_bitfield.0.get_mut(0).unwrap() = true; + for bit in 0..=4 { + *bare_bitfield.0.get_mut(bit).unwrap() = true; + } bare_bitfield }; let threshold = availability_threshold(validators.len()); - // 4 of 5 first value >= 2/3 - assert_eq!(threshold, 4); + // 5 of 7 first value >= 2/3 + assert_eq!(threshold, 5); let signed_bitfields = validators .iter() .enumerate() .filter_map(|(i, key)| { - let to_sign = if i < 3 { - a_and_b_available.clone() - } else if i < 4 { - a_available.clone() + let to_sign = if i < 4 { + all_available.clone() + } else if i < 5 { + // this bitfield signals that only a, c1 and c3 are available. + let mut bare_bitfield = default_bitfield(); + *bare_bitfield.0.get_mut(0).unwrap() = true; + *bare_bitfield.0.get_mut(2).unwrap() = true; + *bare_bitfield.0.get_mut(4).unwrap() = true; + + bare_bitfield } else { // sign nothing. return None @@ -772,12 +1057,22 @@ fn supermajority_bitfields_trigger_availability() { ); assert_eq!(checked_bitfields.len(), old_len, "No bitfields should have been filtered!"); - // only chain A's core is freed. + // only chain A's core and candidate's C1 core are freed. let v = process_bitfields(checked_bitfields, core_lookup); - assert_eq!(vec![(CoreIndex(0), candidate_a.hash())], v); + assert_eq!( + vec![(CoreIndex(0), candidate_a.hash()), (CoreIndex(2), candidate_c_1.hash())], + v + ); + + let votes = |bits: &[usize]| { + let mut votes = default_availability_votes(); + for bit in bits { + *votes.get_mut(*bit).unwrap() = true; + } + + votes + }; - // chain A had 4 signing off, which is >= threshold. - // chain B has 3 signing off, which is < threshold. assert!(>::get(&chain_a).unwrap().is_empty()); assert_eq!( >::get(&chain_b) @@ -785,38 +1080,106 @@ fn supermajority_bitfields_trigger_availability() { .pop_front() .unwrap() .availability_votes, + votes(&[0, 1, 2, 3]) + ); + let mut pending_c = >::get(&chain_c).unwrap(); + assert_eq!(pending_c.pop_front().unwrap().availability_votes, votes(&[0, 1, 2, 3])); + assert_eq!(pending_c.pop_front().unwrap().availability_votes, votes(&[0, 1, 2, 3, 4])); + assert!(pending_c.is_empty()); + + // and check that chain heads. + assert_eq!(Paras::para_head(&chain_a), Some(vec![1, 2, 3, 4].into())); + assert_ne!(Paras::para_head(&chain_b), Some(vec![5, 6, 7, 8].into())); + assert_eq!(Paras::para_head(&chain_c), Some(vec![7, 8].into())); + + // Check that rewards are applied. + { + let rewards = crate::mock::availability_rewards(); + + assert_eq!(rewards.len(), 5); + assert_eq!(rewards.get(&ValidatorIndex(0)).unwrap(), &2); + assert_eq!(rewards.get(&ValidatorIndex(1)).unwrap(), &2); + assert_eq!(rewards.get(&ValidatorIndex(2)).unwrap(), &2); + assert_eq!(rewards.get(&ValidatorIndex(3)).unwrap(), &2); + assert_eq!(rewards.get(&ValidatorIndex(4)).unwrap(), &2); + } + + { + let rewards = crate::mock::backing_rewards(); + + assert_eq!(rewards.len(), 3); + assert_eq!(rewards.get(&ValidatorIndex(3)).unwrap(), &1); + assert_eq!(rewards.get(&ValidatorIndex(4)).unwrap(), &1); + assert_eq!(rewards.get(&ValidatorIndex(1)).unwrap(), &1); + } + + // Add a new bitfield which will make candidate C2 available also. This will also evict and + // enact C3. + let signed_bitfields = vec![sign_bitfield( + &keystore, + &validators[5], + ValidatorIndex(5), { - // check that votes from first 3 were tracked. + let mut bare_bitfield = default_bitfield(); + *bare_bitfield.0.get_mut(3).unwrap() = true; + bare_bitfield + }, + &signing_context, + ) + .into()]; - let mut votes = default_availability_votes(); - *votes.get_mut(0).unwrap() = true; - *votes.get_mut(1).unwrap() = true; - *votes.get_mut(2).unwrap() = true; + let old_len = signed_bitfields.len(); + let checked_bitfields = simple_sanitize_bitfields( + signed_bitfields, + DisputedBitfield::zeros(expected_bits()), + expected_bits(), + ); + assert_eq!(checked_bitfields.len(), old_len, "No bitfields should have been filtered!"); - votes - } + let v = process_bitfields(checked_bitfields, core_lookup); + assert_eq!( + vec![(CoreIndex(3), candidate_c_2.hash()), (CoreIndex(4), candidate_c_3.hash())], + v ); - // and check that chain head was enacted. + assert!(>::get(&chain_a).unwrap().is_empty()); + assert_eq!( + >::get(&chain_b) + .unwrap() + .pop_front() + .unwrap() + .availability_votes, + votes(&[0, 1, 2, 3]) + ); + assert!(>::get(&chain_c).unwrap().is_empty()); + + // and check that chain heads. assert_eq!(Paras::para_head(&chain_a), Some(vec![1, 2, 3, 4].into())); + assert_ne!(Paras::para_head(&chain_b), Some(vec![5, 6, 7, 8].into())); + assert_eq!(Paras::para_head(&chain_c), Some(vec![11, 12].into())); // Check that rewards are applied. { let rewards = crate::mock::availability_rewards(); - assert_eq!(rewards.len(), 4); - assert_eq!(rewards.get(&ValidatorIndex(0)).unwrap(), &1); - assert_eq!(rewards.get(&ValidatorIndex(1)).unwrap(), &1); - assert_eq!(rewards.get(&ValidatorIndex(2)).unwrap(), &1); - assert_eq!(rewards.get(&ValidatorIndex(3)).unwrap(), &1); + assert_eq!(rewards.len(), 6); + assert_eq!(rewards.get(&ValidatorIndex(0)).unwrap(), &4); + assert_eq!(rewards.get(&ValidatorIndex(1)).unwrap(), &4); + assert_eq!(rewards.get(&ValidatorIndex(2)).unwrap(), &4); + assert_eq!(rewards.get(&ValidatorIndex(3)).unwrap(), &4); + assert_eq!(rewards.get(&ValidatorIndex(4)).unwrap(), &3); + assert_eq!(rewards.get(&ValidatorIndex(5)).unwrap(), &1); } { let rewards = crate::mock::backing_rewards(); - assert_eq!(rewards.len(), 2); + assert_eq!(rewards.len(), 5); assert_eq!(rewards.get(&ValidatorIndex(3)).unwrap(), &1); assert_eq!(rewards.get(&ValidatorIndex(4)).unwrap(), &1); + assert_eq!(rewards.get(&ValidatorIndex(1)).unwrap(), &1); + assert_eq!(rewards.get(&ValidatorIndex(5)).unwrap(), &1); + assert_eq!(rewards.get(&ValidatorIndex(6)).unwrap(), &1); } }); } From d2309a7b214ba11e1b9ff4d0ca7f9c4e983f81aa Mon Sep 17 00:00:00 2001 From: alindima Date: Wed, 13 Mar 2024 13:53:05 +0200 Subject: [PATCH 28/44] more inclusion unit tests --- .../runtime/parachains/src/inclusion/mod.rs | 7 +- .../runtime/parachains/src/inclusion/tests.rs | 148 ++++++++++++++++-- 2 files changed, 137 insertions(+), 18 deletions(-) diff --git a/polkadot/runtime/parachains/src/inclusion/mod.rs b/polkadot/runtime/parachains/src/inclusion/mod.rs index 7ddee9d71d4a..2c48cd8ade80 100644 --- a/polkadot/runtime/parachains/src/inclusion/mod.rs +++ b/polkadot/runtime/parachains/src/inclusion/mod.rs @@ -595,9 +595,10 @@ impl Pallet { /// Process candidates that have been backed. Provide a set of /// candidates along with their scheduled cores. /// - /// Candidates of a paraid should sorted ascending by core index. If this condition is not met, - /// candidates of the para which don't satisfy this criteria will be dropped. (This really - /// should not happen here, if the candidates were properly sanitised in paras_inherent). + /// Candidates of the same paraid should be sorted according to their dependency order (they + /// should form a chain). If this condition is not met, this function will return an error. + /// (This really should not happen here, if the candidates were properly sanitised in + /// paras_inherent). pub(crate) fn process_candidates( allowed_relay_parents: &AllowedRelayParentsTracker>, candidates: &BTreeMap, CoreIndex)>>, diff --git a/polkadot/runtime/parachains/src/inclusion/tests.rs b/polkadot/runtime/parachains/src/inclusion/tests.rs index 5c4486baf3a9..7b938fc2fa25 100644 --- a/polkadot/runtime/parachains/src/inclusion/tests.rs +++ b/polkadot/runtime/parachains/src/inclusion/tests.rs @@ -1204,6 +1204,7 @@ fn candidate_checks() { Sr25519Keyring::Charlie, Sr25519Keyring::Dave, Sr25519Keyring::Ferdie, + Sr25519Keyring::One, ]; let keystore: KeystorePtr = Arc::new(LocalKeystore::in_memory()); for validator in validators.iter() { @@ -1230,7 +1231,8 @@ fn candidate_checks() { group_index if group_index == GroupIndex::from(0) => Some(vec![0, 1]), group_index if group_index == GroupIndex::from(1) => Some(vec![2, 3]), group_index if group_index == GroupIndex::from(2) => Some(vec![4]), - _ => panic!("Group index out of bounds for 2 parachains and 1 parathread core"), + group_index if group_index == GroupIndex::from(3) => Some(vec![5]), + _ => panic!("Group index out of bounds"), } .map(|m| m.into_iter().map(ValidatorIndex).collect::>()) }; @@ -1240,12 +1242,12 @@ fn candidate_checks() { vec![ValidatorIndex(0), ValidatorIndex(1)], vec![ValidatorIndex(2), ValidatorIndex(3)], vec![ValidatorIndex(4)], + vec![ValidatorIndex(5)], ]; Scheduler::set_validator_groups(validator_groups); let thread_collator: CollatorId = Sr25519Keyring::Two.public().into(); let chain_a_assignment = (chain_a, CoreIndex::from(0)); - let chain_b_assignment = (chain_b, CoreIndex::from(1)); let thread_a_assignment = (thread_a, CoreIndex::from(2)); @@ -1262,7 +1264,7 @@ fn candidate_checks() { Ok(ProcessedCandidates::default()) ); - // candidates out of order. + // Check candidate ordering { let mut candidate_a = TestCandidateBuilder { para_id: chain_a, @@ -1273,19 +1275,37 @@ fn candidate_checks() { ..Default::default() } .build(); - let mut candidate_b = TestCandidateBuilder { + let mut candidate_b_1 = TestCandidateBuilder { para_id: chain_b, relay_parent: System::parent_hash(), pov_hash: Hash::repeat_byte(2), persisted_validation_data_hash: make_vdata_hash(chain_b).unwrap(), hrmp_watermark: RELAY_PARENT_NUM, + head_data: HeadData(vec![1, 2, 3]), ..Default::default() } .build(); - collator_sign_candidate(Sr25519Keyring::One, &mut candidate_a); + // Make candidate b2 a child of b1. + let mut candidate_b_2 = TestCandidateBuilder { + para_id: chain_b, + relay_parent: System::parent_hash(), + pov_hash: Hash::repeat_byte(3), + persisted_validation_data_hash: make_persisted_validation_data_with_parent::( + RELAY_PARENT_NUM, + Default::default(), + candidate_b_1.commitments.head_data.clone(), + ) + .hash(), + hrmp_watermark: RELAY_PARENT_NUM, + head_data: HeadData(vec![5, 6, 7]), + ..Default::default() + } + .build(); - collator_sign_candidate(Sr25519Keyring::Two, &mut candidate_b); + collator_sign_candidate(Sr25519Keyring::One, &mut candidate_a); + collator_sign_candidate(Sr25519Keyring::Two, &mut candidate_b_1); + collator_sign_candidate(Sr25519Keyring::Two, &mut candidate_b_2); let backed_a = back_candidate( candidate_a, @@ -1297,8 +1317,18 @@ fn candidate_checks() { None, ); - let backed_b = back_candidate( - candidate_b, + let backed_b_1 = back_candidate( + candidate_b_1.clone(), + &validators, + group_validators(GroupIndex::from(2)).unwrap().as_ref(), + &keystore, + &signing_context, + BackingKind::Threshold, + None, + ); + + let backed_b_2 = back_candidate( + candidate_b_2, &validators, group_validators(GroupIndex::from(1)).unwrap().as_ref(), &keystore, @@ -1307,19 +1337,83 @@ fn candidate_checks() { None, ); - // no longer needed to be sorted by core index. - assert!(ParaInclusion::process_candidates( + // candidates are required to be sorted in dependency order. + assert_noop!( + ParaInclusion::process_candidates( + &allowed_relay_parents, + &vec![( + chain_b, + vec![ + (backed_b_2.clone(), CoreIndex(1)), + (backed_b_1.clone(), CoreIndex(2)) + ] + ),] + .into_iter() + .collect(), + &group_validators, + false + ), + Error::::ValidationDataHashMismatch + ); + + // candidates are no longer required to be sorted by core index. + ParaInclusion::process_candidates( &allowed_relay_parents, &vec![ - (chain_b_assignment.0, vec![(backed_b, chain_b_assignment.1)]), - (chain_a_assignment.0, vec![(backed_a, chain_a_assignment.1)]) + ( + chain_b, + vec![ + (backed_b_1.clone(), CoreIndex(2)), + (backed_b_2.clone(), CoreIndex(1)), + ], + ), + (chain_a_assignment.0, vec![(backed_a.clone(), chain_a_assignment.1)]), ] .into_iter() .collect(), &group_validators, - false + false, ) - .is_ok()); + .unwrap(); + + // candidate does not build on top of the latest unincluded head + + let mut candidate_b_3 = TestCandidateBuilder { + para_id: chain_b, + relay_parent: System::parent_hash(), + pov_hash: Hash::repeat_byte(4), + persisted_validation_data_hash: make_persisted_validation_data_with_parent::( + RELAY_PARENT_NUM, + Default::default(), + candidate_b_1.commitments.head_data.clone(), + ) + .hash(), + hrmp_watermark: RELAY_PARENT_NUM, + head_data: HeadData(vec![8, 9]), + ..Default::default() + } + .build(); + collator_sign_candidate(Sr25519Keyring::Two, &mut candidate_b_3); + + let backed_b_3 = back_candidate( + candidate_b_3, + &validators, + group_validators(GroupIndex::from(3)).unwrap().as_ref(), + &keystore, + &signing_context, + BackingKind::Threshold, + None, + ); + + assert_noop!( + ParaInclusion::process_candidates( + &allowed_relay_parents, + &vec![(chain_b, vec![(backed_b_3, CoreIndex(3))])].into_iter().collect(), + &group_validators, + false + ), + Error::::ValidationDataHashMismatch + ); } // candidate not backed. @@ -1335,8 +1429,9 @@ fn candidate_checks() { .build(); collator_sign_candidate(Sr25519Keyring::One, &mut candidate); + // Insufficient backing. let backed = back_candidate( - candidate, + candidate.clone(), &validators, group_validators(GroupIndex::from(0)).unwrap().as_ref(), &keystore, @@ -1356,6 +1451,29 @@ fn candidate_checks() { ), Error::::InsufficientBacking ); + + // Wrong backing group. + let backed = back_candidate( + candidate, + &validators, + group_validators(GroupIndex::from(1)).unwrap().as_ref(), + &keystore, + &signing_context, + BackingKind::Threshold, + None, + ); + + assert_noop!( + ParaInclusion::process_candidates( + &allowed_relay_parents, + &vec![(chain_a_assignment.0, vec![(backed, chain_a_assignment.1)])] + .into_iter() + .collect(), + &group_validators, + false + ), + Error::::InvalidBacking + ); } // one of candidates is not based on allowed relay parent. From c59bce0fe438d91518bb5a8204bdf577b5fbb5bd Mon Sep 17 00:00:00 2001 From: alindima Date: Wed, 13 Mar 2024 17:28:34 +0200 Subject: [PATCH 29/44] more tests to paras_inherent --- .../parachains/src/paras_inherent/tests.rs | 559 +++++++++++++++++- 1 file changed, 550 insertions(+), 9 deletions(-) diff --git a/polkadot/runtime/parachains/src/paras_inherent/tests.rs b/polkadot/runtime/parachains/src/paras_inherent/tests.rs index 7a818502bd56..ac45bcc88013 100644 --- a/polkadot/runtime/parachains/src/paras_inherent/tests.rs +++ b/polkadot/runtime/parachains/src/paras_inherent/tests.rs @@ -1375,6 +1375,7 @@ mod sanitizers { mod candidates { use crate::{ + configuration::HostConfiguration, mock::{set_disabled_validators, RuntimeOrigin}, scheduler::{common::Assignment, ParasEntry}, util::{make_persisted_validation_data, make_persisted_validation_data_with_parent}, @@ -1384,6 +1385,18 @@ mod sanitizers { use super::*; + fn default_config() -> MockGenesisConfig { + MockGenesisConfig { + configuration: configuration::GenesisConfig { + config: HostConfiguration { + max_head_data_size: 0b100000, + ..Default::default() + }, + }, + ..Default::default() + } + } + // Backed candidates and scheduled parachains used for `sanitize_backed_candidates` testing struct TestData { backed_candidates: Vec, @@ -1391,7 +1404,7 @@ mod sanitizers { scheduled_paras: BTreeMap>, } - // Generate test data for the candidates and assert that the evnironment is set as expected + // Generate test data for the candidates and assert that the environment is set as expected // (check the comments for details) fn get_test_data(core_index_enabled: bool) -> TestData { const RELAY_PARENT_NUM: u32 = 3; @@ -1568,7 +1581,7 @@ mod sanitizers { } } - // Generate test data for the candidates and assert that the evnironment is set as expected + // Generate test data for the candidates and assert that the environment is set as expected // (check the comments for details) // Para 1 scheduled on core 0 and core 1. Two candidates are supplied. // Para 2 scheduled on cores 2 and 3. One candidate supplied. @@ -1726,6 +1739,7 @@ mod sanitizers { .unwrap() .hash(), hrmp_watermark: RELAY_PARENT_NUM, + head_data: HeadData(vec![1, 1]), validation_code: ValidationCode(vec![1]), ..Default::default() } @@ -1974,11 +1988,514 @@ mod sanitizers { } } + // Para 1 scheduled on core 0 and core 1. Two candidates are supplied. They form a chain but + // in the wrong order. + // Para 2 scheduled on core 2, core 3 and core 4. Three candidates are supplied. The second + // one is not part of the chain. + // Para 3 scheduled on core 5 and 6. Two candidates are supplied and they all form a chain. + // Para 4 scheduled on core 7 and 8. Duplicated candidates. + fn get_test_data_for_order_checks(core_index_enabled: bool) -> TestData { + const RELAY_PARENT_NUM: u32 = 3; + + // Add the relay parent to `shared` pallet. Otherwise some code (e.g. filtering backing + // votes) won't behave correctly + shared::Pallet::::add_allowed_relay_parent( + default_header().hash(), + Default::default(), + RELAY_PARENT_NUM, + 1, + ); + + let header = default_header(); + let relay_parent = header.hash(); + let session_index = SessionIndex::from(0_u32); + + let keystore = LocalKeystore::in_memory(); + let keystore = Arc::new(keystore) as KeystorePtr; + let signing_context = SigningContext { parent_hash: relay_parent, session_index }; + + let validators = vec![ + keyring::Sr25519Keyring::Alice, + keyring::Sr25519Keyring::Bob, + keyring::Sr25519Keyring::Charlie, + keyring::Sr25519Keyring::Dave, + keyring::Sr25519Keyring::Eve, + keyring::Sr25519Keyring::Ferdie, + keyring::Sr25519Keyring::One, + keyring::Sr25519Keyring::Two, + keyring::Sr25519Keyring::AliceStash, + ]; + for validator in validators.iter() { + Keystore::sr25519_generate_new( + &*keystore, + PARACHAIN_KEY_TYPE_ID, + Some(&validator.to_seed()), + ) + .unwrap(); + } + + // Set active validators in `shared` pallet + let validator_ids = + validators.iter().map(|v| v.public().into()).collect::>(); + shared::Pallet::::set_active_validators_ascending(validator_ids); + + // Set the validator groups in `scheduler` + scheduler::Pallet::::set_validator_groups(vec![ + vec![ValidatorIndex(0)], + vec![ValidatorIndex(1)], + vec![ValidatorIndex(2)], + vec![ValidatorIndex(3)], + vec![ValidatorIndex(4)], + vec![ValidatorIndex(5)], + vec![ValidatorIndex(6)], + vec![ValidatorIndex(7)], + vec![ValidatorIndex(8)], + ]); + + // Update scheduler's claimqueue with the parachains + scheduler::Pallet::::set_claimqueue(BTreeMap::from([ + ( + CoreIndex::from(0), + VecDeque::from([ParasEntry::new( + Assignment::Pool { para_id: 1.into(), core_index: CoreIndex(0) }, + RELAY_PARENT_NUM, + )]), + ), + ( + CoreIndex::from(1), + VecDeque::from([ParasEntry::new( + Assignment::Pool { para_id: 1.into(), core_index: CoreIndex(1) }, + RELAY_PARENT_NUM, + )]), + ), + ( + CoreIndex::from(2), + VecDeque::from([ParasEntry::new( + Assignment::Pool { para_id: 2.into(), core_index: CoreIndex(2) }, + RELAY_PARENT_NUM, + )]), + ), + ( + CoreIndex::from(3), + VecDeque::from([ParasEntry::new( + Assignment::Pool { para_id: 2.into(), core_index: CoreIndex(3) }, + RELAY_PARENT_NUM, + )]), + ), + ( + CoreIndex::from(4), + VecDeque::from([ParasEntry::new( + Assignment::Pool { para_id: 2.into(), core_index: CoreIndex(4) }, + RELAY_PARENT_NUM, + )]), + ), + ( + CoreIndex::from(5), + VecDeque::from([ParasEntry::new( + Assignment::Pool { para_id: 3.into(), core_index: CoreIndex(5) }, + RELAY_PARENT_NUM, + )]), + ), + ( + CoreIndex::from(6), + VecDeque::from([ParasEntry::new( + Assignment::Pool { para_id: 3.into(), core_index: CoreIndex(6) }, + RELAY_PARENT_NUM, + )]), + ), + ( + CoreIndex::from(7), + VecDeque::from([ParasEntry::new( + Assignment::Pool { para_id: 4.into(), core_index: CoreIndex(7) }, + RELAY_PARENT_NUM, + )]), + ), + ( + CoreIndex::from(8), + VecDeque::from([ParasEntry::new( + Assignment::Pool { para_id: 4.into(), core_index: CoreIndex(8) }, + RELAY_PARENT_NUM, + )]), + ), + ])); + + // Set the on-chain included head data and current code hash. + for id in 1..=4u32 { + paras::Pallet::::set_current_head(ParaId::from(id), HeadData(vec![id as u8])); + paras::Pallet::::force_set_current_code( + RuntimeOrigin::root(), + ParaId::from(id), + ValidationCode(vec![id as u8]), + ) + .unwrap(); + } + + // Callback used for backing candidates + let group_validators = |group_index: GroupIndex| { + match group_index { + group_index if group_index == GroupIndex::from(0) => Some(vec![0]), + group_index if group_index == GroupIndex::from(1) => Some(vec![1]), + group_index if group_index == GroupIndex::from(2) => Some(vec![2]), + group_index if group_index == GroupIndex::from(3) => Some(vec![3]), + group_index if group_index == GroupIndex::from(4) => Some(vec![4]), + group_index if group_index == GroupIndex::from(5) => Some(vec![5]), + group_index if group_index == GroupIndex::from(6) => Some(vec![6]), + group_index if group_index == GroupIndex::from(7) => Some(vec![7]), + group_index if group_index == GroupIndex::from(8) => Some(vec![8]), + + _ => panic!("Group index out of bounds"), + } + .map(|m| m.into_iter().map(ValidatorIndex).collect::>()) + }; + + let mut backed_candidates = vec![]; + let mut all_backed_candidates_with_core = BTreeMap::new(); + + // Para 1 + { + let mut candidate = TestCandidateBuilder { + para_id: ParaId::from(1), + relay_parent, + pov_hash: Hash::repeat_byte(1 as u8), + persisted_validation_data_hash: make_persisted_validation_data::( + ParaId::from(1), + RELAY_PARENT_NUM, + Default::default(), + ) + .unwrap() + .hash(), + head_data: HeadData(vec![1, 1]), + hrmp_watermark: RELAY_PARENT_NUM, + validation_code: ValidationCode(vec![1]), + ..Default::default() + } + .build(); + + collator_sign_candidate(Sr25519Keyring::One, &mut candidate); + + let prev_candidate = candidate.clone(); + let prev_backed: BackedCandidate = back_candidate( + candidate, + &validators, + group_validators(GroupIndex::from(0 as u32)).unwrap().as_ref(), + &keystore, + &signing_context, + BackingKind::Threshold, + core_index_enabled.then_some(CoreIndex(0 as u32)), + ); + + let mut candidate = TestCandidateBuilder { + para_id: ParaId::from(1), + relay_parent, + pov_hash: Hash::repeat_byte(2 as u8), + persisted_validation_data_hash: make_persisted_validation_data_with_parent::< + Test, + >( + RELAY_PARENT_NUM, + Default::default(), + prev_candidate.commitments.head_data, + ) + .hash(), + hrmp_watermark: RELAY_PARENT_NUM, + validation_code: ValidationCode(vec![1]), + ..Default::default() + } + .build(); + + collator_sign_candidate(Sr25519Keyring::One, &mut candidate); + + let backed = back_candidate( + candidate, + &validators, + group_validators(GroupIndex::from(1 as u32)).unwrap().as_ref(), + &keystore, + &signing_context, + BackingKind::Threshold, + core_index_enabled.then_some(CoreIndex(1 as u32)), + ); + backed_candidates.push(backed.clone()); + backed_candidates.push(prev_backed.clone()); + } + + // Para 2. + { + let mut candidate_1 = TestCandidateBuilder { + para_id: ParaId::from(2), + relay_parent, + pov_hash: Hash::repeat_byte(3 as u8), + persisted_validation_data_hash: make_persisted_validation_data::( + ParaId::from(2), + RELAY_PARENT_NUM, + Default::default(), + ) + .unwrap() + .hash(), + head_data: HeadData(vec![2, 2]), + hrmp_watermark: RELAY_PARENT_NUM, + validation_code: ValidationCode(vec![2]), + ..Default::default() + } + .build(); + + collator_sign_candidate(Sr25519Keyring::One, &mut candidate_1); + + let backed_1: BackedCandidate = back_candidate( + candidate_1, + &validators, + group_validators(GroupIndex::from(2 as u32)).unwrap().as_ref(), + &keystore, + &signing_context, + BackingKind::Threshold, + core_index_enabled.then_some(CoreIndex(2 as u32)), + ); + + backed_candidates.push(backed_1.clone()); + if core_index_enabled { + all_backed_candidates_with_core + .entry(ParaId::from(2)) + .or_insert(vec![]) + .push((backed_1, CoreIndex(2))); + } + + let mut candidate_2 = TestCandidateBuilder { + para_id: ParaId::from(2), + relay_parent, + pov_hash: Hash::repeat_byte(4 as u8), + persisted_validation_data_hash: make_persisted_validation_data::( + ParaId::from(2), + RELAY_PARENT_NUM, + Default::default(), + ) + .unwrap() + .hash(), + hrmp_watermark: RELAY_PARENT_NUM, + validation_code: ValidationCode(vec![2]), + head_data: HeadData(vec![3, 3]), + ..Default::default() + } + .build(); + + collator_sign_candidate(Sr25519Keyring::One, &mut candidate_2); + + let backed_2 = back_candidate( + candidate_2.clone(), + &validators, + group_validators(GroupIndex::from(3 as u32)).unwrap().as_ref(), + &keystore, + &signing_context, + BackingKind::Threshold, + core_index_enabled.then_some(CoreIndex(3 as u32)), + ); + backed_candidates.push(backed_2.clone()); + + let mut candidate_3 = TestCandidateBuilder { + para_id: ParaId::from(2), + relay_parent, + pov_hash: Hash::repeat_byte(5 as u8), + persisted_validation_data_hash: make_persisted_validation_data_with_parent::< + Test, + >( + RELAY_PARENT_NUM, + Default::default(), + candidate_2.commitments.head_data, + ) + .hash(), + hrmp_watermark: RELAY_PARENT_NUM, + validation_code: ValidationCode(vec![2]), + ..Default::default() + } + .build(); + + collator_sign_candidate(Sr25519Keyring::One, &mut candidate_3); + + let backed_3 = back_candidate( + candidate_3, + &validators, + group_validators(GroupIndex::from(4 as u32)).unwrap().as_ref(), + &keystore, + &signing_context, + BackingKind::Threshold, + core_index_enabled.then_some(CoreIndex(4 as u32)), + ); + backed_candidates.push(backed_3.clone()); + } + + // Para 3 + { + let mut candidate = TestCandidateBuilder { + para_id: ParaId::from(3), + relay_parent, + pov_hash: Hash::repeat_byte(6 as u8), + persisted_validation_data_hash: make_persisted_validation_data::( + ParaId::from(3), + RELAY_PARENT_NUM, + Default::default(), + ) + .unwrap() + .hash(), + head_data: HeadData(vec![3, 3]), + hrmp_watermark: RELAY_PARENT_NUM, + validation_code: ValidationCode(vec![3]), + ..Default::default() + } + .build(); + + collator_sign_candidate(Sr25519Keyring::One, &mut candidate); + + let prev_candidate = candidate.clone(); + let backed: BackedCandidate = back_candidate( + candidate, + &validators, + group_validators(GroupIndex::from(5 as u32)).unwrap().as_ref(), + &keystore, + &signing_context, + BackingKind::Threshold, + core_index_enabled.then_some(CoreIndex(5 as u32)), + ); + backed_candidates.push(backed.clone()); + if core_index_enabled { + all_backed_candidates_with_core + .entry(ParaId::from(3)) + .or_insert(vec![]) + .push((backed, CoreIndex(5))); + } + + let mut candidate = TestCandidateBuilder { + para_id: ParaId::from(3), + relay_parent, + pov_hash: Hash::repeat_byte(6 as u8), + persisted_validation_data_hash: make_persisted_validation_data_with_parent::< + Test, + >( + RELAY_PARENT_NUM, + Default::default(), + prev_candidate.commitments.head_data, + ) + .hash(), + hrmp_watermark: RELAY_PARENT_NUM, + validation_code: ValidationCode(vec![3]), + ..Default::default() + } + .build(); + + collator_sign_candidate(Sr25519Keyring::One, &mut candidate); + + let backed = back_candidate( + candidate, + &validators, + group_validators(GroupIndex::from(6 as u32)).unwrap().as_ref(), + &keystore, + &signing_context, + BackingKind::Threshold, + core_index_enabled.then_some(CoreIndex(6 as u32)), + ); + backed_candidates.push(backed.clone()); + if core_index_enabled { + all_backed_candidates_with_core + .entry(ParaId::from(3)) + .or_insert(vec![]) + .push((backed, CoreIndex(6))); + } + } + + // Para 4 + { + let mut candidate = TestCandidateBuilder { + para_id: ParaId::from(4), + relay_parent, + pov_hash: Hash::repeat_byte(8 as u8), + persisted_validation_data_hash: make_persisted_validation_data::( + ParaId::from(4), + RELAY_PARENT_NUM, + Default::default(), + ) + .unwrap() + .hash(), + head_data: HeadData(vec![4]), + hrmp_watermark: RELAY_PARENT_NUM, + validation_code: ValidationCode(vec![4]), + ..Default::default() + } + .build(); + + collator_sign_candidate(Sr25519Keyring::One, &mut candidate); + + let backed: BackedCandidate = back_candidate( + candidate.clone(), + &validators, + group_validators(GroupIndex::from(7 as u32)).unwrap().as_ref(), + &keystore, + &signing_context, + BackingKind::Threshold, + core_index_enabled.then_some(CoreIndex(7 as u32)), + ); + backed_candidates.push(backed.clone()); + if core_index_enabled { + all_backed_candidates_with_core + .entry(ParaId::from(4)) + .or_insert(vec![]) + .push((backed, CoreIndex(7))); + } + + let backed: BackedCandidate = back_candidate( + candidate, + &validators, + group_validators(GroupIndex::from(7 as u32)).unwrap().as_ref(), + &keystore, + &signing_context, + BackingKind::Threshold, + core_index_enabled.then_some(CoreIndex(8 as u32)), + ); + backed_candidates.push(backed.clone()); + } + + // State sanity checks + assert_eq!( + >::scheduled_paras().collect::>(), + vec![ + (CoreIndex(0), ParaId::from(1)), + (CoreIndex(1), ParaId::from(1)), + (CoreIndex(2), ParaId::from(2)), + (CoreIndex(3), ParaId::from(2)), + (CoreIndex(4), ParaId::from(2)), + (CoreIndex(5), ParaId::from(3)), + (CoreIndex(6), ParaId::from(3)), + (CoreIndex(7), ParaId::from(4)), + (CoreIndex(8), ParaId::from(4)), + ] + ); + let mut scheduled: BTreeMap> = BTreeMap::new(); + for (core_idx, para_id) in >::scheduled_paras() { + scheduled.entry(para_id).or_default().insert(core_idx); + } + + assert_eq!( + shared::Pallet::::active_validator_indices(), + vec![ + ValidatorIndex(0), + ValidatorIndex(1), + ValidatorIndex(2), + ValidatorIndex(3), + ValidatorIndex(4), + ValidatorIndex(5), + ValidatorIndex(6), + ValidatorIndex(7), + ValidatorIndex(8), + ] + ); + + TestData { + backed_candidates, + scheduled_paras: scheduled, + all_backed_candidates_with_core, + } + } + #[rstest] #[case(false)] #[case(true)] fn happy_path(#[case] core_index_enabled: bool) { - new_test_ext(MockGenesisConfig::default()).execute_with(|| { + new_test_ext(default_config()).execute_with(|| { let TestData { backed_candidates, all_backed_candidates_with_core, @@ -2002,7 +2519,7 @@ mod sanitizers { #[case(false)] #[case(true)] fn test_with_multiple_cores_per_para(#[case] core_index_enabled: bool) { - new_test_ext(MockGenesisConfig::default()).execute_with(|| { + new_test_ext(default_config()).execute_with(|| { let TestData { backed_candidates, all_backed_candidates_with_core: expected_all_backed_candidates_with_core, @@ -2022,6 +2539,30 @@ mod sanitizers { }); } + #[rstest] + #[case(false)] + #[case(true)] + fn test_candidate_ordering(#[case] core_index_enabled: bool) { + new_test_ext(default_config()).execute_with(|| { + let TestData { + backed_candidates, + scheduled_paras: scheduled, + all_backed_candidates_with_core, + } = get_test_data_for_order_checks(core_index_enabled); + + assert_eq!( + sanitize_backed_candidates::( + backed_candidates.clone(), + &>::allowed_relay_parents(), + BTreeSet::new(), + scheduled, + core_index_enabled, + ), + all_backed_candidates_with_core + ); + }); + } + // nothing is scheduled, so no paraids match, thus all backed candidates are skipped #[rstest] #[case(false, false)] @@ -2032,7 +2573,7 @@ mod sanitizers { #[case] core_index_enabled: bool, #[case] multiple_cores_per_para: bool, ) { - new_test_ext(MockGenesisConfig::default()).execute_with(|| { + new_test_ext(default_config()).execute_with(|| { let TestData { backed_candidates, .. } = if multiple_cores_per_para { get_test_data_multiple_cores_per_para(core_index_enabled) } else { @@ -2057,7 +2598,7 @@ mod sanitizers { #[case(false)] #[case(true)] fn invalid_are_filtered_out(#[case] core_index_enabled: bool) { - new_test_ext(MockGenesisConfig::default()).execute_with(|| { + new_test_ext(default_config()).execute_with(|| { let TestData { backed_candidates, scheduled_paras: scheduled, .. } = get_test_data(core_index_enabled); @@ -2090,7 +2631,7 @@ mod sanitizers { #[case(false)] #[case(true)] fn disabled_non_signing_validator_doesnt_get_filtered(#[case] core_index_enabled: bool) { - new_test_ext(MockGenesisConfig::default()).execute_with(|| { + new_test_ext(default_config()).execute_with(|| { let TestData { mut all_backed_candidates_with_core, .. } = get_test_data(core_index_enabled); @@ -2116,7 +2657,7 @@ mod sanitizers { fn drop_statements_from_disabled_without_dropping_candidate( #[case] core_index_enabled: bool, ) { - new_test_ext(MockGenesisConfig::default()).execute_with(|| { + new_test_ext(default_config()).execute_with(|| { let TestData { mut all_backed_candidates_with_core, .. } = get_test_data(core_index_enabled); @@ -2226,7 +2767,7 @@ mod sanitizers { #[case(false)] #[case(true)] fn drop_candidate_if_all_statements_are_from_disabled(#[case] core_index_enabled: bool) { - new_test_ext(MockGenesisConfig::default()).execute_with(|| { + new_test_ext(default_config()).execute_with(|| { let TestData { mut all_backed_candidates_with_core, .. } = get_test_data(core_index_enabled); From 5e1a351da1145799a3df729d2d77f615df47a8f2 Mon Sep 17 00:00:00 2001 From: alindima Date: Thu, 14 Mar 2024 13:52:26 +0200 Subject: [PATCH 30/44] more paras_inherent tests --- .../parachains/src/paras_inherent/mod.rs | 23 +- .../parachains/src/paras_inherent/tests.rs | 333 +++++++++++++++--- 2 files changed, 294 insertions(+), 62 deletions(-) diff --git a/polkadot/runtime/parachains/src/paras_inherent/mod.rs b/polkadot/runtime/parachains/src/paras_inherent/mod.rs index ad8bde4f6a5c..457e51504f2d 100644 --- a/polkadot/runtime/parachains/src/paras_inherent/mod.rs +++ b/polkadot/runtime/parachains/src/paras_inherent/mod.rs @@ -1382,6 +1382,11 @@ fn map_candidates_to_cores = @@ -1394,7 +1399,6 @@ fn map_candidates_to_cores, - all_backed_candidates_with_core: BTreeMap>, + expected_backed_candidates_with_core: + BTreeMap>, scheduled_paras: BTreeMap>, } // Generate test data for the candidates and assert that the environment is set as expected // (check the comments for details) - fn get_test_data(core_index_enabled: bool) -> TestData { + fn get_test_data_one_core_per_para(core_index_enabled: bool) -> TestData { const RELAY_PARENT_NUM: u32 = 3; // Add the relay parent to `shared` pallet. Otherwise some code (e.g. filtering backing @@ -1563,12 +1568,12 @@ mod sanitizers { ] ); - let mut all_backed_candidates_with_core = BTreeMap::new(); + let mut expected_backed_candidates_with_core = BTreeMap::new(); for candidate in backed_candidates.iter() { let para_id = candidate.descriptor().para_id; - all_backed_candidates_with_core.entry(para_id).or_insert(vec![]).push(( + expected_backed_candidates_with_core.entry(para_id).or_insert(vec![]).push(( candidate.clone(), scheduled.get(¶_id).unwrap().first().copied().unwrap(), )); @@ -1577,7 +1582,7 @@ mod sanitizers { TestData { backed_candidates, scheduled_paras: scheduled, - all_backed_candidates_with_core, + expected_backed_candidates_with_core, } } @@ -1588,6 +1593,8 @@ mod sanitizers { // Para 3 scheduled on core 4. One candidate supplied. // Para 4 scheduled on core 5. Two candidates supplied. // Para 5 scheduled on core 6. No candidates supplied. + // Para 6 is not scheduled. One candidate supplied. + // Para 7 is scheduled on core 7 and 8, but the candidate contains the wrong core index. fn get_test_data_multiple_cores_per_para(core_index_enabled: bool) -> TestData { const RELAY_PARENT_NUM: u32 = 3; @@ -1693,10 +1700,24 @@ mod sanitizers { RELAY_PARENT_NUM, )]), ), + ( + CoreIndex::from(7), + VecDeque::from([ParasEntry::new( + Assignment::Pool { para_id: 7.into(), core_index: CoreIndex(7) }, + RELAY_PARENT_NUM, + )]), + ), + ( + CoreIndex::from(8), + VecDeque::from([ParasEntry::new( + Assignment::Pool { para_id: 7.into(), core_index: CoreIndex(8) }, + RELAY_PARENT_NUM, + )]), + ), ])); // Set the on-chain included head data and current code hash. - for id in 1..=5u32 { + for id in 1..=7u32 { paras::Pallet::::set_current_head(ParaId::from(id), HeadData(vec![id as u8])); paras::Pallet::::force_set_current_code( RuntimeOrigin::root(), @@ -1723,7 +1744,7 @@ mod sanitizers { }; let mut backed_candidates = vec![]; - let mut all_backed_candidates_with_core = BTreeMap::new(); + let mut expected_backed_candidates_with_core = BTreeMap::new(); // Para 1 { @@ -1759,7 +1780,7 @@ mod sanitizers { ); backed_candidates.push(backed.clone()); if core_index_enabled { - all_backed_candidates_with_core + expected_backed_candidates_with_core .entry(ParaId::from(1)) .or_insert(vec![]) .push((backed, CoreIndex(0))); @@ -1796,7 +1817,7 @@ mod sanitizers { ); backed_candidates.push(backed.clone()); if core_index_enabled { - all_backed_candidates_with_core + expected_backed_candidates_with_core .entry(ParaId::from(1)) .or_insert(vec![]) .push((backed, CoreIndex(1))); @@ -1835,7 +1856,7 @@ mod sanitizers { ); backed_candidates.push(backed.clone()); if core_index_enabled { - all_backed_candidates_with_core + expected_backed_candidates_with_core .entry(ParaId::from(2)) .or_insert(vec![]) .push((backed, CoreIndex(2))); @@ -1873,7 +1894,7 @@ mod sanitizers { core_index_enabled.then_some(CoreIndex(4 as u32)), ); backed_candidates.push(backed.clone()); - all_backed_candidates_with_core + expected_backed_candidates_with_core .entry(ParaId::from(3)) .or_insert(vec![]) .push((backed, CoreIndex(4))); @@ -1911,7 +1932,7 @@ mod sanitizers { None, ); backed_candidates.push(backed.clone()); - all_backed_candidates_with_core + expected_backed_candidates_with_core .entry(ParaId::from(4)) .or_insert(vec![]) .push((backed, CoreIndex(5))); @@ -1950,6 +1971,72 @@ mod sanitizers { // No candidate for para 5. + // Para 6. + { + let mut candidate = TestCandidateBuilder { + para_id: ParaId::from(6), + relay_parent, + pov_hash: Hash::repeat_byte(3 as u8), + persisted_validation_data_hash: make_persisted_validation_data::( + ParaId::from(6), + RELAY_PARENT_NUM, + Default::default(), + ) + .unwrap() + .hash(), + hrmp_watermark: RELAY_PARENT_NUM, + validation_code: ValidationCode(vec![6]), + ..Default::default() + } + .build(); + + collator_sign_candidate(Sr25519Keyring::One, &mut candidate); + + let backed = back_candidate( + candidate, + &validators, + group_validators(GroupIndex::from(6 as u32)).unwrap().as_ref(), + &keystore, + &signing_context, + BackingKind::Threshold, + core_index_enabled.then_some(CoreIndex(6 as u32)), + ); + backed_candidates.push(backed.clone()); + } + + // Para 7. + { + let mut candidate = TestCandidateBuilder { + para_id: ParaId::from(7), + relay_parent, + pov_hash: Hash::repeat_byte(3 as u8), + persisted_validation_data_hash: make_persisted_validation_data::( + ParaId::from(7), + RELAY_PARENT_NUM, + Default::default(), + ) + .unwrap() + .hash(), + hrmp_watermark: RELAY_PARENT_NUM, + validation_code: ValidationCode(vec![7]), + ..Default::default() + } + .build(); + + collator_sign_candidate(Sr25519Keyring::One, &mut candidate); + + let backed = back_candidate( + candidate, + &validators, + group_validators(GroupIndex::from(6 as u32)).unwrap().as_ref(), + &keystore, + &signing_context, + BackingKind::Threshold, + core_index_enabled.then_some(CoreIndex(6 as u32)), + ); + backed_candidates.push(backed.clone()); + } + // State sanity checks assert_eq!( >::scheduled_paras().collect::>(), @@ -1961,6 +2048,8 @@ mod sanitizers { (CoreIndex(4), ParaId::from(3)), (CoreIndex(5), ParaId::from(4)), (CoreIndex(6), ParaId::from(5)), + (CoreIndex(7), ParaId::from(7)), + (CoreIndex(8), ParaId::from(7)), ] ); let mut scheduled: BTreeMap> = BTreeMap::new(); @@ -1984,7 +2073,7 @@ mod sanitizers { TestData { backed_candidates, scheduled_paras: scheduled, - all_backed_candidates_with_core, + expected_backed_candidates_with_core, } } @@ -2149,7 +2238,7 @@ mod sanitizers { }; let mut backed_candidates = vec![]; - let mut all_backed_candidates_with_core = BTreeMap::new(); + let mut expected_backed_candidates_with_core = BTreeMap::new(); // Para 1 { @@ -2251,7 +2340,7 @@ mod sanitizers { backed_candidates.push(backed_1.clone()); if core_index_enabled { - all_backed_candidates_with_core + expected_backed_candidates_with_core .entry(ParaId::from(2)) .or_insert(vec![]) .push((backed_1, CoreIndex(2))); @@ -2354,7 +2443,7 @@ mod sanitizers { ); backed_candidates.push(backed.clone()); if core_index_enabled { - all_backed_candidates_with_core + expected_backed_candidates_with_core .entry(ParaId::from(3)) .or_insert(vec![]) .push((backed, CoreIndex(5))); @@ -2391,7 +2480,7 @@ mod sanitizers { ); backed_candidates.push(backed.clone()); if core_index_enabled { - all_backed_candidates_with_core + expected_backed_candidates_with_core .entry(ParaId::from(3)) .or_insert(vec![]) .push((backed, CoreIndex(6))); @@ -2431,7 +2520,7 @@ mod sanitizers { ); backed_candidates.push(backed.clone()); if core_index_enabled { - all_backed_candidates_with_core + expected_backed_candidates_with_core .entry(ParaId::from(4)) .or_insert(vec![]) .push((backed, CoreIndex(7))); @@ -2487,20 +2576,20 @@ mod sanitizers { TestData { backed_candidates, scheduled_paras: scheduled, - all_backed_candidates_with_core, + expected_backed_candidates_with_core, } } #[rstest] #[case(false)] #[case(true)] - fn happy_path(#[case] core_index_enabled: bool) { + fn happy_path_one_core_per_para(#[case] core_index_enabled: bool) { new_test_ext(default_config()).execute_with(|| { let TestData { backed_candidates, - all_backed_candidates_with_core, + expected_backed_candidates_with_core, scheduled_paras: scheduled, - } = get_test_data(core_index_enabled); + } = get_test_data_one_core_per_para(core_index_enabled); assert_eq!( sanitize_backed_candidates::( @@ -2510,7 +2599,7 @@ mod sanitizers { scheduled, core_index_enabled ), - all_backed_candidates_with_core, + expected_backed_candidates_with_core, ); }); } @@ -2522,7 +2611,7 @@ mod sanitizers { new_test_ext(default_config()).execute_with(|| { let TestData { backed_candidates, - all_backed_candidates_with_core: expected_all_backed_candidates_with_core, + expected_backed_candidates_with_core, scheduled_paras: scheduled, } = get_test_data_multiple_cores_per_para(core_index_enabled); @@ -2534,7 +2623,7 @@ mod sanitizers { scheduled, core_index_enabled ), - expected_all_backed_candidates_with_core, + expected_backed_candidates_with_core, ); }); } @@ -2547,7 +2636,7 @@ mod sanitizers { let TestData { backed_candidates, scheduled_paras: scheduled, - all_backed_candidates_with_core, + expected_backed_candidates_with_core, } = get_test_data_for_order_checks(core_index_enabled); assert_eq!( @@ -2558,7 +2647,7 @@ mod sanitizers { scheduled, core_index_enabled, ), - all_backed_candidates_with_core + expected_backed_candidates_with_core ); }); } @@ -2577,7 +2666,7 @@ mod sanitizers { let TestData { backed_candidates, .. } = if multiple_cores_per_para { get_test_data_multiple_cores_per_para(core_index_enabled) } else { - get_test_data(core_index_enabled) + get_test_data_one_core_per_para(core_index_enabled) }; let scheduled = BTreeMap::new(); @@ -2597,10 +2686,12 @@ mod sanitizers { #[rstest] #[case(false)] #[case(true)] - fn invalid_are_filtered_out(#[case] core_index_enabled: bool) { + fn concluded_invalid_are_filtered_out_single_core_per_para( + #[case] core_index_enabled: bool, + ) { new_test_ext(default_config()).execute_with(|| { let TestData { backed_candidates, scheduled_paras: scheduled, .. } = - get_test_data(core_index_enabled); + get_test_data_one_core_per_para(core_index_enabled); // mark every second one as concluded invalid let set = { @@ -2627,27 +2718,107 @@ mod sanitizers { }); } + // candidates that have concluded as invalid are filtered out, as well as their descendants. + #[test] + fn concluded_invalid_are_filtered_out_multiple_cores_per_para() { + // Mark the first candidate of paraid 1 as invalid. Its descendant should also + // be dropped. Also mark the candidate of paraid 3 as invalid. + new_test_ext(default_config()).execute_with(|| { + let TestData { + backed_candidates, + scheduled_paras: scheduled, + mut expected_backed_candidates_with_core, + .. + } = get_test_data_multiple_cores_per_para(true); + + let mut invalid_set = std::collections::BTreeSet::new(); + + for (idx, backed_candidate) in backed_candidates.iter().enumerate() { + if backed_candidate.descriptor().para_id == ParaId::from(1) && idx == 0 { + invalid_set.insert(backed_candidate.hash()); + } else if backed_candidate.descriptor().para_id == ParaId::from(3) { + invalid_set.insert(backed_candidate.hash()); + } + } + let sanitized_backed_candidates: BTreeMap< + ParaId, + Vec<(BackedCandidate<_>, CoreIndex)>, + > = sanitize_backed_candidates::( + backed_candidates.clone(), + &>::allowed_relay_parents(), + invalid_set, + scheduled, + true, + ); + + // We'll be left with candidates from paraid 2 and 4. + + expected_backed_candidates_with_core.remove(&ParaId::from(1)).unwrap(); + expected_backed_candidates_with_core.remove(&ParaId::from(3)).unwrap(); + + assert_eq!(sanitized_backed_candidates, sanitized_backed_candidates); + }); + + // Mark the second candidate of paraid 1 as invalid. Its predecessor should be left + // in place. + new_test_ext(default_config()).execute_with(|| { + let TestData { + backed_candidates, + scheduled_paras: scheduled, + mut expected_backed_candidates_with_core, + .. + } = get_test_data_multiple_cores_per_para(true); + + let mut invalid_set = std::collections::BTreeSet::new(); + + for (idx, backed_candidate) in backed_candidates.iter().enumerate() { + if backed_candidate.descriptor().para_id == ParaId::from(1) && idx == 1 { + invalid_set.insert(backed_candidate.hash()); + } + } + let sanitized_backed_candidates: BTreeMap< + ParaId, + Vec<(BackedCandidate<_>, CoreIndex)>, + > = sanitize_backed_candidates::( + backed_candidates.clone(), + &>::allowed_relay_parents(), + invalid_set, + scheduled, + true, + ); + + // Only the second candidate of paraid 1 should be removed. + expected_backed_candidates_with_core + .get_mut(&ParaId::from(1)) + .unwrap() + .remove(1); + + // We'll be left with candidates from paraid 1, 2, 3 and 4. + assert_eq!(sanitized_backed_candidates, expected_backed_candidates_with_core); + }); + } + #[rstest] #[case(false)] #[case(true)] fn disabled_non_signing_validator_doesnt_get_filtered(#[case] core_index_enabled: bool) { new_test_ext(default_config()).execute_with(|| { - let TestData { mut all_backed_candidates_with_core, .. } = - get_test_data(core_index_enabled); + let TestData { mut expected_backed_candidates_with_core, .. } = + get_test_data_one_core_per_para(core_index_enabled); // Disable Eve set_disabled_validators(vec![4]); - let before = all_backed_candidates_with_core.clone(); + let before = expected_backed_candidates_with_core.clone(); // Eve is disabled but no backing statement is signed by it so nothing should be // filtered filter_backed_statements_from_disabled_validators::( - &mut all_backed_candidates_with_core, + &mut expected_backed_candidates_with_core, &>::allowed_relay_parents(), core_index_enabled, ); - assert_eq!(all_backed_candidates_with_core, before); + assert_eq!(expected_backed_candidates_with_core, before); }); } @@ -2658,8 +2829,8 @@ mod sanitizers { #[case] core_index_enabled: bool, ) { new_test_ext(default_config()).execute_with(|| { - let TestData { mut all_backed_candidates_with_core, .. } = - get_test_data(core_index_enabled); + let TestData { mut expected_backed_candidates_with_core, .. } = + get_test_data_one_core_per_para(core_index_enabled); // Disable Alice set_disabled_validators(vec![0]); @@ -2673,7 +2844,7 @@ mod sanitizers { // Verify the initial state is as expected assert_eq!( - all_backed_candidates_with_core + expected_backed_candidates_with_core .get(&ParaId::from(1)) .unwrap() .iter() @@ -2684,7 +2855,7 @@ mod sanitizers { .len(), 2 ); - let (validator_indices, maybe_core_index) = all_backed_candidates_with_core + let (validator_indices, maybe_core_index) = expected_backed_candidates_with_core .get(&ParaId::from(1)) .unwrap() .iter() @@ -2700,7 +2871,7 @@ mod sanitizers { assert_eq!(validator_indices.get(0).unwrap(), true); assert_eq!(validator_indices.get(1).unwrap(), true); - let untouched = all_backed_candidates_with_core + let untouched = expected_backed_candidates_with_core .get(&ParaId::from(2)) .unwrap() .iter() @@ -2709,15 +2880,15 @@ mod sanitizers { .0 .clone(); - let before = all_backed_candidates_with_core.clone(); + let before = expected_backed_candidates_with_core.clone(); filter_backed_statements_from_disabled_validators::( - &mut all_backed_candidates_with_core, + &mut expected_backed_candidates_with_core, &>::allowed_relay_parents(), core_index_enabled, ); - assert_eq!(before.len(), all_backed_candidates_with_core.len()); + assert_eq!(before.len(), expected_backed_candidates_with_core.len()); - let (validator_indices, maybe_core_index) = all_backed_candidates_with_core + let (validator_indices, maybe_core_index) = expected_backed_candidates_with_core .get(&ParaId::from(1)) .unwrap() .iter() @@ -2732,10 +2903,10 @@ mod sanitizers { } // there should still be two backed candidates - assert_eq!(all_backed_candidates_with_core.len(), 2); + assert_eq!(expected_backed_candidates_with_core.len(), 2); // but the first one should have only one validity vote assert_eq!( - all_backed_candidates_with_core + expected_backed_candidates_with_core .get(&ParaId::from(1)) .unwrap() .iter() @@ -2751,7 +2922,7 @@ mod sanitizers { assert_eq!(validator_indices.get(1).unwrap(), true); // the second candidate shouldn't be modified assert_eq!( - all_backed_candidates_with_core + expected_backed_candidates_with_core .get(&ParaId::from(2)) .unwrap() .iter() @@ -2766,17 +2937,19 @@ mod sanitizers { #[rstest] #[case(false)] #[case(true)] - fn drop_candidate_if_all_statements_are_from_disabled(#[case] core_index_enabled: bool) { + fn drop_candidate_if_all_statements_are_from_disabled_single_core_per_para( + #[case] core_index_enabled: bool, + ) { new_test_ext(default_config()).execute_with(|| { - let TestData { mut all_backed_candidates_with_core, .. } = - get_test_data(core_index_enabled); + let TestData { mut expected_backed_candidates_with_core, .. } = + get_test_data_one_core_per_para(core_index_enabled); // Disable Alice and Bob set_disabled_validators(vec![0, 1]); // Verify the initial state is as expected assert_eq!( - all_backed_candidates_with_core + expected_backed_candidates_with_core .get(&ParaId::from(1)) .unwrap() .iter() @@ -2787,7 +2960,7 @@ mod sanitizers { .len(), 2 ); - let untouched = all_backed_candidates_with_core + let untouched = expected_backed_candidates_with_core .get(&ParaId::from(2)) .unwrap() .iter() @@ -2797,14 +2970,14 @@ mod sanitizers { .clone(); filter_backed_statements_from_disabled_validators::( - &mut all_backed_candidates_with_core, + &mut expected_backed_candidates_with_core, &>::allowed_relay_parents(), core_index_enabled, ); - assert_eq!(all_backed_candidates_with_core.len(), 1); + assert_eq!(expected_backed_candidates_with_core.len(), 1); assert_eq!( - all_backed_candidates_with_core + expected_backed_candidates_with_core .get(&ParaId::from(2)) .unwrap() .iter() @@ -2813,8 +2986,54 @@ mod sanitizers { .0, untouched ); - assert_eq!(all_backed_candidates_with_core.get(&ParaId::from(1)), None); + assert_eq!(expected_backed_candidates_with_core.get(&ParaId::from(1)), None); }); } + + #[test] + fn drop_candidate_if_all_statements_are_from_disabled_multiple_cores_per_para() { + // Disable Bob, only the second candidate of paraid 1 should be removed. + new_test_ext(default_config()).execute_with(|| { + let TestData { mut expected_backed_candidates_with_core, .. } = + get_test_data_multiple_cores_per_para(true); + + set_disabled_validators(vec![1]); + + let mut untouched = expected_backed_candidates_with_core.clone(); + + filter_backed_statements_from_disabled_validators::( + &mut expected_backed_candidates_with_core, + &>::allowed_relay_parents(), + true, + ); + + untouched.get_mut(&ParaId::from(1)).unwrap().remove(1); + + assert_eq!(expected_backed_candidates_with_core, untouched); + }); + + // Disable Alice or disable both Alice and Bob, all candidates of paraid 1 should be + // removed. + for disabled in [vec![0], vec![0, 1]] { + new_test_ext(default_config()).execute_with(|| { + let TestData { mut expected_backed_candidates_with_core, .. } = + get_test_data_multiple_cores_per_para(true); + + set_disabled_validators(disabled); + + let mut untouched = expected_backed_candidates_with_core.clone(); + + filter_backed_statements_from_disabled_validators::( + &mut expected_backed_candidates_with_core, + &>::allowed_relay_parents(), + true, + ); + + untouched.remove(&ParaId::from(1)).unwrap(); + + assert_eq!(expected_backed_candidates_with_core, untouched); + }); + } + } } } From 37ff4c5a70cc475b3f77c28fe8899758a0095716 Mon Sep 17 00:00:00 2001 From: alindima Date: Fri, 15 Mar 2024 10:30:19 +0200 Subject: [PATCH 31/44] optimise update_pending_availability_and_get_freed_cores --- .../src/runtime/scheduler.md | 1 - .../runtime/parachains/src/inclusion/mod.rs | 92 +++++++++---------- .../runtime/parachains/src/inclusion/tests.rs | 43 ++------- .../parachains/src/paras_inherent/mod.rs | 3 +- polkadot/runtime/parachains/src/scheduler.rs | 10 -- 5 files changed, 50 insertions(+), 99 deletions(-) diff --git a/polkadot/roadmap/implementers-guide/src/runtime/scheduler.md b/polkadot/roadmap/implementers-guide/src/runtime/scheduler.md index 32a7fe652dbc..04b221a83e58 100644 --- a/polkadot/roadmap/implementers-guide/src/runtime/scheduler.md +++ b/polkadot/roadmap/implementers-guide/src/runtime/scheduler.md @@ -285,7 +285,6 @@ No finalization routine runs for this module. - This clears them from `Scheduled` and marks each corresponding `core` in the `AvailabilityCores` as occupied. - Since both the availability cores and the newly-occupied cores lists are sorted ascending, this method can be implemented efficiently. -- `core_para(CoreIndex) -> ParaId`: return the currently-scheduled or occupied ParaId for the given core. - `group_validators(GroupIndex) -> Option>`: return all validators in a given group, if the group index is valid for this session. - `availability_timeout_predicate() -> Option bool>`: returns an optional predicate diff --git a/polkadot/runtime/parachains/src/inclusion/mod.rs b/polkadot/runtime/parachains/src/inclusion/mod.rs index 2c48cd8ade80..14d5889906cf 100644 --- a/polkadot/runtime/parachains/src/inclusion/mod.rs +++ b/polkadot/runtime/parachains/src/inclusion/mod.rs @@ -482,14 +482,10 @@ impl Pallet { /// /// Returns a `Vec` of `CandidateHash`es and their respective `AvailabilityCore`s that became /// available, and cores free. - pub(crate) fn update_pending_availability_and_get_freed_cores( + pub(crate) fn update_pending_availability_and_get_freed_cores( validators: &[ValidatorId], signed_bitfields: SignedAvailabilityBitfields, - core_lookup: F, - ) -> Vec<(CoreIndex, CandidateHash)> - where - F: Fn(CoreIndex) -> Option, - { + ) -> Vec<(CoreIndex, CandidateHash)> { let now = >::block_number(); let threshold = availability_threshold(validators.len()); @@ -515,59 +511,53 @@ impl Pallet { >::insert(&validator_index, record); } - // Update the availability votes for each candidate and take note of what cores were made - // available. - let mut candidates_made_available: BTreeMap> = BTreeMap::new(); - for (core_index, validator_indices) in votes_per_core { - if let Some(para_id) = core_lookup(core_index) { - >::mutate(¶_id, |candidates| { - if let Some(candidates) = candidates { - for (index, candidate) in candidates.iter_mut().enumerate() { - if candidate.core == core_index { - for validator_index in validator_indices.iter() { - // defensive check - this is constructed by loading the - // availability bitfield record, which is always `Some` if - // the core is occupied - that's why we're here. - if let Some(mut bit) = candidate - .availability_votes - .get_mut(validator_index.0 as usize) - { - *bit = true; - } - } - } + let mut freed_cores = vec![]; - if candidate.availability_votes.count_ones() >= threshold { - candidates_made_available - .entry(para_id) - .or_insert_with(|| BTreeSet::new()) - .insert(index); + let pending_paraids: Vec<_> = >::iter_keys().collect(); + for paraid in pending_paraids { + >::mutate(paraid, |candidates| { + if let Some(candidates) = candidates { + let mut last_enacted_index: Option = None; + + for (candidate_index, candidate) in candidates.iter_mut().enumerate() { + if let Some(validator_indices) = votes_per_core.remove(&candidate.core) { + for validator_index in validator_indices.iter() { + // defensive check - this is constructed by loading the + // availability bitfield record, which is always `Some` if + // the core is occupied - that's why we're here. + if let Some(mut bit) = + candidate.availability_votes.get_mut(validator_index.0 as usize) + { + *bit = true; + } } } - } - }); - } else { - // No parachain is occupying that core yet. - } - } - let mut freed_cores = Vec::with_capacity(candidates_made_available.len()); + // We check for the candidate's availability even if we didn't get any new + // bitfields for its core, as it may have already been available at a + // previous block but wasn't enacted due to its predecessors not being + // available. + if candidate.availability_votes.count_ones() >= threshold { + // We can only enact a candidate if we've enacted all of its + // predecessors already. + let can_enact = if candidate_index == 0 { + last_enacted_index == None + } else { + let prev_candidate_index = usize::try_from(candidate_index - 1) + .expect("Previous `if` would have caught a 0 candidate index."); + matches!(last_enacted_index, Some(old_index) if old_index == prev_candidate_index) + }; - // Trim the pending availability candidates storage and enact candidates now. - for (para_id, available_candidates) in candidates_made_available { - >::mutate(¶_id, |candidates| { - if let Some(candidates) = candidates { - let mut stopped_at_index = None; - for index in 0..candidates.len() { - if available_candidates.contains(&index) { - stopped_at_index = Some(index); - } else { - break + if can_enact { + last_enacted_index = Some(candidate_index); + } } } - if let Some(stopped_at_index) = stopped_at_index { - let evicted_candidates = candidates.drain(0..=stopped_at_index); + // Trim the pending availability candidates storage and enact candidates of this + // para now. + if let Some(last_enacted_index) = last_enacted_index { + let evicted_candidates = candidates.drain(0..=last_enacted_index); for candidate in evicted_candidates { freed_cores.push((candidate.core, candidate.hash)); diff --git a/polkadot/runtime/parachains/src/inclusion/tests.rs b/polkadot/runtime/parachains/src/inclusion/tests.rs index 7b938fc2fa25..36e99eed53ea 100644 --- a/polkadot/runtime/parachains/src/inclusion/tests.rs +++ b/polkadot/runtime/parachains/src/inclusion/tests.rs @@ -361,14 +361,12 @@ fn simple_sanitize_bitfields( /// Process a set of already sanitized bitfields. pub(crate) fn process_bitfields( signed_bitfields: SignedAvailabilityBitfields, - core_lookup: impl Fn(CoreIndex) -> Option, ) -> Vec<(CoreIndex, CandidateHash)> { let validators = shared::Pallet::::active_validator_keys(); - ParaInclusion::update_pending_availability_and_get_freed_cores::<_>( + ParaInclusion::update_pending_availability_and_get_freed_cores( &validators[..], signed_bitfields, - core_lookup, ) } @@ -686,14 +684,6 @@ fn bitfield_checks() { let signing_context = SigningContext { parent_hash: System::parent_hash(), session_index: 5 }; - let core_lookup = |core| match core { - core if core == CoreIndex::from(0) => Some(chain_a), - core if core == CoreIndex::from(1) => Some(chain_b), - core if core == CoreIndex::from(2) => Some(thread_a), - core if core == CoreIndex::from(3) => None, // for the expected_cores() + 1 test below. - _ => panic!("out of bounds for testing"), - }; - // too many bits in bitfield { let mut bare_bitfield = default_bitfield(); @@ -762,7 +752,7 @@ fn bitfield_checks() { ); assert_eq!(checked_bitfields.len(), 1, "No bitfields should have been filtered!"); - let x = process_bitfields(checked_bitfields, core_lookup); + let x = process_bitfields(checked_bitfields); assert!(x.is_empty(), "No core should be freed."); } @@ -783,7 +773,7 @@ fn bitfield_checks() { ); assert_eq!(checked_bitfields.len(), 1, "No bitfields should have been filtered!"); - let x = process_bitfields(checked_bitfields, core_lookup); + let x = process_bitfields(checked_bitfields); assert!(x.is_empty(), "No core should be freed."); } @@ -791,8 +781,6 @@ fn bitfield_checks() { { let mut bare_bitfield = default_bitfield(); - assert_eq!(core_lookup(CoreIndex::from(0)), Some(chain_a)); - let default_candidate = TestCandidateBuilder::default().build(); >::insert( chain_a, @@ -827,7 +815,7 @@ fn bitfield_checks() { ); assert_eq!(checked_bitfields.len(), 1, "No bitfields should have been filtered!"); - let x = process_bitfields(checked_bitfields, core_lookup); + let x = process_bitfields(checked_bitfields); assert!(x.is_empty(), "No core should be freed."); >::remove(chain_a); @@ -884,16 +872,6 @@ fn supermajority_bitfields_trigger_availability() { let signing_context = SigningContext { parent_hash: System::parent_hash(), session_index: 5 }; - let core_lookup = |core| match core { - core if core == CoreIndex::from(0) => Some(chain_a), - core if core == CoreIndex::from(1) => Some(chain_b), - core if core == CoreIndex::from(2) => Some(chain_c), - core if core == CoreIndex::from(3) => Some(chain_c), - core if core == CoreIndex::from(4) => Some(chain_c), - core if core == CoreIndex::from(5) => Some(thread_a), - _ => panic!("Core out of bounds"), - }; - // Chain A only has one candidate pending availability. It will be made available now. let candidate_a = TestCandidateBuilder { para_id: chain_a, @@ -1058,9 +1036,9 @@ fn supermajority_bitfields_trigger_availability() { assert_eq!(checked_bitfields.len(), old_len, "No bitfields should have been filtered!"); // only chain A's core and candidate's C1 core are freed. - let v = process_bitfields(checked_bitfields, core_lookup); + let v = process_bitfields(checked_bitfields); assert_eq!( - vec![(CoreIndex(0), candidate_a.hash()), (CoreIndex(2), candidate_c_1.hash())], + vec![(CoreIndex(2), candidate_c_1.hash()), (CoreIndex(0), candidate_a.hash())], v ); @@ -1136,7 +1114,7 @@ fn supermajority_bitfields_trigger_availability() { ); assert_eq!(checked_bitfields.len(), old_len, "No bitfields should have been filtered!"); - let v = process_bitfields(checked_bitfields, core_lookup); + let v = process_bitfields(checked_bitfields); assert_eq!( vec![(CoreIndex(3), candidate_c_2.hash()), (CoreIndex(4), candidate_c_3.hash())], v @@ -2833,11 +2811,6 @@ fn para_upgrade_delay_scheduled_from_inclusion() { ]]; Scheduler::set_validator_groups(validator_groups); - let core_lookup = |core| match core { - core if core == CoreIndex::from(0) => Some(chain_a), - _ => None, - }; - let allowed_relay_parents = default_allowed_relay_parent_tracker(); let chain_a_assignment = (chain_a, CoreIndex::from(0)); @@ -2903,7 +2876,7 @@ fn para_upgrade_delay_scheduled_from_inclusion() { expected_bits(), ); - let v = process_bitfields(checked_bitfields, core_lookup); + let v = process_bitfields(checked_bitfields); assert_eq!(vec![(CoreIndex(0), candidate_a.hash())], v); assert!(>::get(&chain_a).unwrap().is_empty()); diff --git a/polkadot/runtime/parachains/src/paras_inherent/mod.rs b/polkadot/runtime/parachains/src/paras_inherent/mod.rs index 457e51504f2d..098404d72602 100644 --- a/polkadot/runtime/parachains/src/paras_inherent/mod.rs +++ b/polkadot/runtime/parachains/src/paras_inherent/mod.rs @@ -573,10 +573,9 @@ impl Pallet { // Process new availability bitfields, yielding any availability cores whose // work has now concluded. let freed_concluded = - >::update_pending_availability_and_get_freed_cores::<_>( + >::update_pending_availability_and_get_freed_cores( &validator_public[..], bitfields.clone(), - >::core_para, ); // Inform the disputes module of all included candidates. diff --git a/polkadot/runtime/parachains/src/scheduler.rs b/polkadot/runtime/parachains/src/scheduler.rs index 61ab36dbe96c..fb2a4362dfc5 100644 --- a/polkadot/runtime/parachains/src/scheduler.rs +++ b/polkadot/runtime/parachains/src/scheduler.rs @@ -391,16 +391,6 @@ impl Pallet { }); } - /// Get the para (chain or thread) ID assigned to a particular core or index, if any. Core - /// indices out of bounds will return `None`, as will indices of unassigned cores. - pub(crate) fn core_para(core_index: CoreIndex) -> Option { - let cores = AvailabilityCores::::get(); - match cores.get(core_index.0 as usize) { - None | Some(CoreOccupied::Free) => None, - Some(CoreOccupied::Paras(entry)) => Some(entry.para_id()), - } - } - /// Get the validators in the given group, if the group index is valid for this session. pub(crate) fn group_validators(group_index: GroupIndex) -> Option> { ValidatorGroups::::get().get(group_index.0 as usize).map(|g| g.clone()) From a92523b2ee137fc912d643846ede8dc26beb31ba Mon Sep 17 00:00:00 2001 From: alindima Date: Fri, 15 Mar 2024 10:42:10 +0200 Subject: [PATCH 32/44] make force-enact work on all cores --- .../src/runtime/inclusion.md | 9 ++-- .../runtime/parachains/src/inclusion/mod.rs | 49 +++++++++---------- .../parachains/src/runtime_api_impl/v7.rs | 4 +- 3 files changed, 29 insertions(+), 33 deletions(-) diff --git a/polkadot/roadmap/implementers-guide/src/runtime/inclusion.md b/polkadot/roadmap/implementers-guide/src/runtime/inclusion.md index f6a32a01d502..90f8da249ee6 100644 --- a/polkadot/roadmap/implementers-guide/src/runtime/inclusion.md +++ b/polkadot/roadmap/implementers-guide/src/runtime/inclusion.md @@ -147,10 +147,11 @@ All failed checks should lead to an unrecoverable error making the block invalid // return a vector of cleaned-up core IDs. } ``` -* `force_enact(ParaId)`: Forcibly enact the candidate with the given ID as though it had been deemed available by - bitfields. Is a no-op if there is no candidate pending availability for this para-id. This should generally not be - used but it is useful during execution of Runtime APIs, where the changes to the state are expected to be discarded - directly after. +* `force_enact(ParaId)`: Forcibly enact the pending candidates of the given paraid as though they had been deemed + available by bitfields. Is a no-op if there is no candidate pending availability for this para-id. + If there are multiple candidates pending availability for this para-id, it will enact all of + them. This should generally not be used but it is useful during execution of Runtime APIs, + where the changes to the state are expected to be discarded directly after. * `candidate_pending_availability(ParaId) -> Option`: returns the `CommittedCandidateReceipt` pending availability for the para provided, if any. * `pending_availability(ParaId) -> Option`: returns the metadata around the candidate diff --git a/polkadot/runtime/parachains/src/inclusion/mod.rs b/polkadot/runtime/parachains/src/inclusion/mod.rs index 14d5889906cf..3658cca45774 100644 --- a/polkadot/runtime/parachains/src/inclusion/mod.rs +++ b/polkadot/runtime/parachains/src/inclusion/mod.rs @@ -1069,38 +1069,33 @@ impl Pallet { cleaned_up_cores.into_iter() } - /// Forcibly enact the candidate with the given ID as though it had been deemed available - /// by bitfields. + /// Forcibly enact the pending candidates of the given paraid as though they had been deemed + /// available by bitfields. /// /// Is a no-op if there is no candidate pending availability for this para-id. - /// This should generally not be used but it is useful during execution of Runtime APIs, + /// If there are multiple candidates pending availability for this para-id, it will enact all of + /// them. This should generally not be used but it is useful during execution of Runtime APIs, /// where the changes to the state are expected to be discarded directly after. pub(crate) fn force_enact(para: ParaId) { - // This does not take elastic-scaling into account, it enacts the first candidate. - let enacted_candidate = - >::mutate(¶, |candidates| match candidates { - Some(candidates) => candidates.pop_front(), - // TODO: this should also check the descendants, as they may have been made - // available before their parent. Or just change the semantic of force_enact to - // enact all candidates of a para. - _ => None, - }); - - if let Some(candidate) = enacted_candidate { - let receipt = CommittedCandidateReceipt { - descriptor: candidate.descriptor, - commitments: candidate.commitments, - }; + >::mutate(¶, |candidates| { + if let Some(candidates) = candidates { + for candidate in candidates.drain(..) { + let receipt = CommittedCandidateReceipt { + descriptor: candidate.descriptor, + commitments: candidate.commitments, + }; - Self::enact_candidate( - candidate.relay_parent_number, - receipt, - candidate.backers, - candidate.availability_votes, - candidate.core, - candidate.backing_group, - ); - } + Self::enact_candidate( + candidate.relay_parent_number, + receipt, + candidate.backers, + candidate.availability_votes, + candidate.core, + candidate.backing_group, + ); + } + } + }); } /// Returns the first `CommittedCandidateReceipt` pending availability for the para provided, if diff --git a/polkadot/runtime/parachains/src/runtime_api_impl/v7.rs b/polkadot/runtime/parachains/src/runtime_api_impl/v7.rs index 7b9c15d2b749..e20516abda0b 100644 --- a/polkadot/runtime/parachains/src/runtime_api_impl/v7.rs +++ b/polkadot/runtime/parachains/src/runtime_api_impl/v7.rs @@ -202,8 +202,8 @@ pub fn assumed_validation_data( }; let persisted_validation_data = make_validation_data().or_else(|| { - // Try again with force enacting the core. This check only makes sense if - // the core is occupied. + // Try again with force enacting the pending candidates. This check only makes sense if + // there are any pending candidates. >::pending_availability(para_id).and_then(|_| { >::force_enact(para_id); make_validation_data() From b77806ec04cb95398b3e9fa851cda8996ac27526 Mon Sep 17 00:00:00 2001 From: alindima Date: Fri, 15 Mar 2024 10:51:17 +0200 Subject: [PATCH 33/44] some review comments --- .../src/runtime/inclusion.md | 2 +- .../src/runtime/parainherent.md | 2 +- .../runtime/parachains/src/inclusion/mod.rs | 29 +++++++++---------- .../runtime/parachains/src/inclusion/tests.rs | 16 +++++----- .../parachains/src/paras_inherent/mod.rs | 7 ++--- 5 files changed, 26 insertions(+), 30 deletions(-) diff --git a/polkadot/roadmap/implementers-guide/src/runtime/inclusion.md b/polkadot/roadmap/implementers-guide/src/runtime/inclusion.md index 90f8da249ee6..fd74f33253b7 100644 --- a/polkadot/roadmap/implementers-guide/src/runtime/inclusion.md +++ b/polkadot/roadmap/implementers-guide/src/runtime/inclusion.md @@ -156,7 +156,7 @@ All failed checks should lead to an unrecoverable error making the block invalid pending availability for the para provided, if any. * `pending_availability(ParaId) -> Option`: returns the metadata around the candidate pending availability for the para, if any. -* `collect_disputed(disputed: Vec) -> Vec`: Sweeps through all paras pending availability. If +* `free_disputed(disputed: Vec) -> Vec`: Sweeps through all paras pending availability. If the candidate hash is one of the disputed candidates, then clean up the corresponding storage for that candidate and the commitments. Return a vector of cleaned-up core IDs. diff --git a/polkadot/roadmap/implementers-guide/src/runtime/parainherent.md b/polkadot/roadmap/implementers-guide/src/runtime/parainherent.md index 5419ddae83d4..1345f0eea95e 100644 --- a/polkadot/roadmap/implementers-guide/src/runtime/parainherent.md +++ b/polkadot/roadmap/implementers-guide/src/runtime/parainherent.md @@ -17,7 +17,7 @@ There are a couple of important notes to the operations in this inherent as they this fork. 1. When disputes are initiated, we remove the block from pending availability. This allows us to roll back chains to the block before blocks are included as opposed to backing. It's important to do this before processing bitfields. -1. `Inclusion::collect_disputed` is kind of expensive so it's important to gate this on whether there are actually any +1. `Inclusion::free_disputed` is kind of expensive so it's important to gate this on whether there are actually any new disputes. Which should be never. 1. And we don't accept parablocks that have open disputes or disputes that have concluded against the candidate. It's important to import dispute statements before backing, but this is already the case as disputes are imported before diff --git a/polkadot/runtime/parachains/src/inclusion/mod.rs b/polkadot/runtime/parachains/src/inclusion/mod.rs index 3658cca45774..d51054dd247f 100644 --- a/polkadot/runtime/parachains/src/inclusion/mod.rs +++ b/polkadot/runtime/parachains/src/inclusion/mod.rs @@ -23,7 +23,7 @@ use crate::{ configuration::{self, HostConfiguration}, disputes, dmp, hrmp, paras::{self, SetGoAhead}, - scheduler::{self, AvailabilityTimeoutStatus}, + scheduler, shared::{self, AllowedRelayParentsTracker}, util::make_persisted_validation_data_with_parent, }; @@ -610,7 +610,7 @@ impl Pallet { Vec::with_capacity(candidates.len()); let mut core_indices = Vec::with_capacity(candidates.len()); - for (para_id, candidates) in candidates { + for (para_id, para_candidates) in candidates { let mut latest_head_data = match Self::para_latest_head_data(para_id) { None => { defensive!("Latest included head data for paraid {:?} is None", para_id); @@ -619,7 +619,7 @@ impl Pallet { Some(latest_head_data) => latest_head_data, }; - for (candidate, core) in candidates.iter() { + for (candidate, core) in para_candidates.iter() { let candidate_hash = candidate.candidate().hash(); let check_ctx = CandidateCheckContext::::new(None); @@ -978,19 +978,18 @@ impl Pallet { weight } - /// Cleans up all timed out candidates that the predicate returns true for. - /// Also cleans up their descendant candidates. - /// - /// The predicate accepts the block number the core has been occupied - /// since (i.e. the block number the candidate was backed at in this fork of the relay chain). + /// Cleans up all timed out candidates as well as their descendant candidates. /// /// Returns a vector of cleaned-up core IDs. - pub(crate) fn collect_timedout( - pred: impl Fn(BlockNumberFor) -> AvailabilityTimeoutStatus>, - ) -> Vec { - let timed_out: Vec<_> = - Self::free_failed_cores(|candidate| pred(candidate.backed_in_number).timed_out, None) - .collect(); + pub(crate) fn free_timedout() -> Vec { + let timeout_pred = >::availability_timeout_predicate(); + + let timed_out: Vec<_> = Self::free_failed_cores( + |candidate| timeout_pred(candidate.backed_in_number).timed_out, + None, + ) + .collect(); + let mut timed_out_cores = Vec::with_capacity(timed_out.len()); for candidate in timed_out.iter() { timed_out_cores.push(candidate.core); @@ -1014,7 +1013,7 @@ impl Pallet { /// are descendants of a disputed candidate. /// /// Returns a vector of cleaned-up core IDs, along with the evicted candidate hashes. - pub(crate) fn collect_disputed( + pub(crate) fn free_disputed( disputed: &BTreeSet, ) -> impl Iterator + '_ { Self::free_failed_cores( diff --git a/polkadot/runtime/parachains/src/inclusion/tests.rs b/polkadot/runtime/parachains/src/inclusion/tests.rs index 36e99eed53ea..c51b0c498f89 100644 --- a/polkadot/runtime/parachains/src/inclusion/tests.rs +++ b/polkadot/runtime/parachains/src/inclusion/tests.rs @@ -371,7 +371,7 @@ pub(crate) fn process_bitfields( } #[test] -fn collect_timedout() { +fn free_timedout() { let chain_a = ParaId::from(1_u32); let chain_b = ParaId::from(2_u32); let chain_c = ParaId::from(3_u32); @@ -392,8 +392,7 @@ fn collect_timedout() { let mut config = genesis_config(paras); config.configuration.config.scheduler_params.group_rotation_frequency = 3; new_test_ext(config).execute_with(|| { - let timed_out_cores = - ParaInclusion::collect_timedout(Scheduler::availability_timeout_predicate()); + let timed_out_cores = ParaInclusion::free_timedout(); assert!(timed_out_cores.is_empty()); let make_candidate = |core_index: u32, timed_out: bool| { @@ -464,8 +463,7 @@ fn collect_timedout() { assert_eq!(>::get(&chain_e).unwrap().len(), 3); assert_eq!(>::get(&chain_f).unwrap().len(), 3); - let timed_out_cores = - ParaInclusion::collect_timedout(Scheduler::availability_timeout_predicate()); + let timed_out_cores = ParaInclusion::free_timedout(); assert_eq!( timed_out_cores, @@ -504,7 +502,7 @@ fn collect_timedout() { } #[test] -fn collect_disputed() { +fn free_disputed() { let chain_a = ParaId::from(1_u32); let chain_b = ParaId::from(2_u32); let chain_c = ParaId::from(3_u32); @@ -525,10 +523,10 @@ fn collect_disputed() { let mut config = genesis_config(paras); config.configuration.config.scheduler_params.group_rotation_frequency = 3; new_test_ext(config).execute_with(|| { - let disputed_cores = ParaInclusion::collect_disputed(&BTreeSet::new()).collect::>(); + let disputed_cores = ParaInclusion::free_disputed(&BTreeSet::new()).collect::>(); assert!(disputed_cores.is_empty()); - let disputed_cores = ParaInclusion::collect_disputed( + let disputed_cores = ParaInclusion::free_disputed( &[CandidateHash::default()].into_iter().collect::>(), ) .collect::>(); @@ -610,7 +608,7 @@ fn collect_disputed() { ] .into_iter() .collect::>(); - let disputed_cores = ParaInclusion::collect_disputed(&disputed_candidates); + let disputed_cores = ParaInclusion::free_disputed(&disputed_candidates); assert_eq!( disputed_cores.map(|(core, _)| core).collect::>(), diff --git a/polkadot/runtime/parachains/src/paras_inherent/mod.rs b/polkadot/runtime/parachains/src/paras_inherent/mod.rs index 098404d72602..e1423c5d21d3 100644 --- a/polkadot/runtime/parachains/src/paras_inherent/mod.rs +++ b/polkadot/runtime/parachains/src/paras_inherent/mod.rs @@ -242,8 +242,7 @@ pub mod pallet { { // Handle timeouts for any availability core work. let freed_timeout = if >::availability_timeout_check_required() { - let pred = >::availability_timeout_predicate(); - >::collect_timedout(pred) + >::free_timedout() } else { Vec::new() }; @@ -523,7 +522,7 @@ impl Pallet { // Contains the disputes that are concluded in the current session only, // since these are the only ones that are relevant for the occupied cores - // and lightens the load on `collect_disputed` significantly. + // and lightens the load on `free_disputed` significantly. // Cores can't be occupied with candidates of the previous sessions, and only // things with new votes can have just concluded. We only need to collect // cores with disputes that conclude just now, because disputes that @@ -542,7 +541,7 @@ impl Pallet { let (freed_disputed_cores, freed_disputed_candidates): ( BTreeMap, BTreeSet, - ) = >::collect_disputed(¤t_concluded_invalid_disputes) + ) = >::free_disputed(¤t_concluded_invalid_disputes) .map(|(core, candidate)| ((core, FreedReason::Concluded), candidate)) .unzip(); From 318b8da25f1a12e8630b8fb895ad39775b127b96 Mon Sep 17 00:00:00 2001 From: alindima Date: Fri, 15 Mar 2024 11:33:34 +0200 Subject: [PATCH 34/44] call free_cores_and_fill_claimqueue only once --- .../runtime/parachains/src/inclusion/mod.rs | 3 +- .../runtime/parachains/src/inclusion/tests.rs | 7 ++--- .../parachains/src/paras_inherent/mod.rs | 28 +++++++------------ 3 files changed, 15 insertions(+), 23 deletions(-) diff --git a/polkadot/runtime/parachains/src/inclusion/mod.rs b/polkadot/runtime/parachains/src/inclusion/mod.rs index d51054dd247f..54c6dbfea7a1 100644 --- a/polkadot/runtime/parachains/src/inclusion/mod.rs +++ b/polkadot/runtime/parachains/src/inclusion/mod.rs @@ -1015,12 +1015,13 @@ impl Pallet { /// Returns a vector of cleaned-up core IDs, along with the evicted candidate hashes. pub(crate) fn free_disputed( disputed: &BTreeSet, - ) -> impl Iterator + '_ { + ) -> Vec<(CoreIndex, CandidateHash)> { Self::free_failed_cores( |candidate| disputed.contains(&candidate.hash), Some(disputed.len()), ) .map(|candidate| (candidate.core, candidate.hash)) + .collect() } // Clean up cores whose candidates are deemed as failed by the predicate. `pred` returns true if diff --git a/polkadot/runtime/parachains/src/inclusion/tests.rs b/polkadot/runtime/parachains/src/inclusion/tests.rs index c51b0c498f89..c8d99b28ca90 100644 --- a/polkadot/runtime/parachains/src/inclusion/tests.rs +++ b/polkadot/runtime/parachains/src/inclusion/tests.rs @@ -523,13 +523,12 @@ fn free_disputed() { let mut config = genesis_config(paras); config.configuration.config.scheduler_params.group_rotation_frequency = 3; new_test_ext(config).execute_with(|| { - let disputed_cores = ParaInclusion::free_disputed(&BTreeSet::new()).collect::>(); + let disputed_cores = ParaInclusion::free_disputed(&BTreeSet::new()); assert!(disputed_cores.is_empty()); let disputed_cores = ParaInclusion::free_disputed( &[CandidateHash::default()].into_iter().collect::>(), - ) - .collect::>(); + ); assert!(disputed_cores.is_empty()); let make_candidate = |core_index: u32| { @@ -611,7 +610,7 @@ fn free_disputed() { let disputed_cores = ParaInclusion::free_disputed(&disputed_candidates); assert_eq!( - disputed_cores.map(|(core, _)| core).collect::>(), + disputed_cores.into_iter().map(|(core, _)| core).collect::>(), vec![ CoreIndex(0), CoreIndex(2), diff --git a/polkadot/runtime/parachains/src/paras_inherent/mod.rs b/polkadot/runtime/parachains/src/paras_inherent/mod.rs index e1423c5d21d3..6a95f724d0c0 100644 --- a/polkadot/runtime/parachains/src/paras_inherent/mod.rs +++ b/polkadot/runtime/parachains/src/paras_inherent/mod.rs @@ -232,7 +232,7 @@ pub mod pallet { /// the given `freed_concluded`). /// /// The parameter `freed_concluded` contains all core indicies that became - /// free due to candidates that became available. + /// free due to candidates that became available or due to candidates being disputed. pub(crate) fn collect_all_freed_cores( freed_concluded: I, ) -> BTreeMap @@ -538,26 +538,15 @@ impl Pallet { .map(|(_session, candidate)| candidate) .collect::>(); - let (freed_disputed_cores, freed_disputed_candidates): ( - BTreeMap, - BTreeSet, - ) = >::free_disputed(¤t_concluded_invalid_disputes) - .map(|(core, candidate)| ((core, FreedReason::Concluded), candidate)) - .unzip(); + let freed_disputed = + >::free_disputed(¤t_concluded_invalid_disputes); // Create a bit index from the set of core indices where each index corresponds to // a core index that was freed due to a dispute. // // I.e. 010100 would indicate, the candidates on Core 1 and 3 would be disputed. let disputed_bitfield = - create_disputed_bitfield(expected_bits, freed_disputed_cores.keys()); - - if !freed_disputed_cores.is_empty() { - >::free_cores_and_fill_claimqueue( - freed_disputed_cores.clone(), - now, - ); - } + create_disputed_bitfield(expected_bits, freed_disputed.iter().map(|(core, _)| core)); let bitfields = sanitize_bitfields::( bitfields, @@ -571,7 +560,7 @@ impl Pallet { // Process new availability bitfields, yielding any availability cores whose // work has now concluded. - let freed_concluded = + let mut freed_concluded = >::update_pending_availability_and_get_freed_cores( &validator_public[..], bitfields.clone(), @@ -584,7 +573,10 @@ impl Pallet { METRICS.on_candidates_included(freed_concluded.len() as u64); - let freed = collect_all_freed_cores::(freed_concluded.iter().cloned()); + // Add the disputed candidates to the concluded collection. + freed_concluded.extend(freed_disputed.iter()); + + let freed = collect_all_freed_cores::(freed_concluded); >::free_cores_and_fill_claimqueue(freed, now); @@ -608,7 +600,7 @@ impl Pallet { let backed_candidates_with_core = sanitize_backed_candidates::( backed_candidates, &allowed_relay_parents, - freed_disputed_candidates, + freed_disputed.into_iter().map(|(_, hash)| hash).collect(), scheduled, core_index_enabled, ); From 376d19ce96bd1906c9ec7e5c2b4602a0494b7daf Mon Sep 17 00:00:00 2001 From: alindima Date: Fri, 15 Mar 2024 14:39:47 +0200 Subject: [PATCH 35/44] add removal of pendingbitfields storage to the migration and add more checks --- .../parachains/src/inclusion/migration.rs | 97 +++++++++++++++++-- .../runtime/parachains/src/inclusion/mod.rs | 40 ++------ .../runtime/parachains/src/inclusion/tests.rs | 29 +----- 3 files changed, 99 insertions(+), 67 deletions(-) diff --git a/polkadot/runtime/parachains/src/inclusion/migration.rs b/polkadot/runtime/parachains/src/inclusion/migration.rs index 41b77ad9545a..b9e65bc33edb 100644 --- a/polkadot/runtime/parachains/src/inclusion/migration.rs +++ b/polkadot/runtime/parachains/src/inclusion/migration.rs @@ -20,12 +20,12 @@ mod v0 { use frame_system::pallet_prelude::BlockNumberFor; use parity_scale_codec::{Decode, Encode}; use primitives::{ - CandidateCommitments, CandidateDescriptor, CandidateHash, CoreIndex, GroupIndex, - Id as ParaId, + AvailabilityBitfield, CandidateCommitments, CandidateDescriptor, CandidateHash, CoreIndex, + GroupIndex, Id as ParaId, ValidatorIndex, }; use scale_info::TypeInfo; - #[derive(Encode, Decode, PartialEq, TypeInfo, Clone)] + #[derive(Encode, Decode, PartialEq, TypeInfo, Clone, Debug)] pub struct CandidatePendingAvailability { pub core: CoreIndex, pub hash: CandidateHash, @@ -37,6 +37,12 @@ mod v0 { pub backing_group: GroupIndex, } + #[derive(Encode, Decode, TypeInfo, Debug, PartialEq)] + pub struct AvailabilityBitfieldRecord { + pub bitfield: AvailabilityBitfield, + pub submitted_at: N, + } + #[storage_alias] pub type PendingAvailability = StorageMap< Pallet, @@ -48,11 +54,19 @@ mod v0 { #[storage_alias] pub type PendingAvailabilityCommitments = StorageMap, Twox64Concat, ParaId, CandidateCommitments>; + + #[storage_alias] + pub type AvailabilityBitfields = StorageMap< + Pallet, + Twox64Concat, + ValidatorIndex, + AvailabilityBitfieldRecord>, + >; } mod v1 { use super::v0::{ - PendingAvailability as V0PendingAvailability, + AvailabilityBitfields, PendingAvailability as V0PendingAvailability, PendingAvailabilityCommitments as V0PendingAvailabilityCommitments, }; use crate::inclusion::{ @@ -68,6 +82,8 @@ mod v1 { ensure, traits::{GetStorageVersion, StorageVersion}, }; + #[cfg(feature = "try-runtime")] + use parity_scale_codec::{Decode, Encode}; pub struct VersionUncheckedMigrateToV1(sp_std::marker::PhantomData); @@ -75,7 +91,19 @@ mod v1 { #[cfg(feature = "try-runtime")] fn pre_upgrade() -> Result, sp_runtime::TryRuntimeError> { log::trace!(target: crate::inclusion::LOG_TARGET, "Running pre_upgrade() for inclusion MigrateToV1"); - Ok(Vec::new()) + let candidates_before_upgrade = V0PendingAvailability::::iter().count(); + let commitments_before_upgrade = V0PendingAvailabilityCommitments::::iter().count(); + + if candidates_before_upgrade != commitments_before_upgrade { + log::warn!( + target: crate::inclusion::LOG_TARGET, + "Number of pending candidates differ from the number of pending commitments. {} vs {}", + candidates_before_upgrade, + commitments_before_upgrade + ); + } + + Ok((candidates_before_upgrade as u32).encode()) } fn on_runtime_upgrade() -> Weight { @@ -108,28 +136,67 @@ mod v1 { } // should've already been drained by the above for loop, but as a sanity check, in case - // there are more commitments than candidates. V0PendingAvailabilityCommitments should - // not contain too many keys so removing everything at once should be safe + // there are more commitments than candidates. + // V0PendingAvailabilityCommitments should not contain too many keys so removing + // everything at once should be safe let res = V0PendingAvailabilityCommitments::::clear(u32::MAX, None); weight = weight.saturating_add( T::DbWeight::get().reads_writes(res.loops as u64, res.backend as u64), ); + // AvailabilityBitfields should not contain too many keys so removing everything at once + // should be safe. + let res = AvailabilityBitfields::::clear(u32::MAX, None); + weight = weight.saturating_add( + T::DbWeight::get().reads_writes(res.loops as u64, res.backend as u64), + ); + weight } #[cfg(feature = "try-runtime")] - fn post_upgrade(_state: Vec) -> Result<(), sp_runtime::TryRuntimeError> { + fn post_upgrade(state: Vec) -> Result<(), sp_runtime::TryRuntimeError> { log::trace!(target: crate::inclusion::LOG_TARGET, "Running post_upgrade() for inclusion MigrateToV1"); ensure!( Pallet::::on_chain_storage_version() >= StorageVersion::new(1), "Storage version should be >= 1 after the migration" ); + let candidates_before_upgrade = + u32::decode(&mut &state[..]).expect("Was properly encoded") as usize; + let candidates_after_upgrade = V1PendingAvailability::::iter().fold( + 0usize, + |mut acc, (_paraid, para_candidates)| { + acc += para_candidates.len(); + acc + }, + ); + + ensure!( + candidates_before_upgrade == candidates_after_upgrade, + "Number of pending candidates should be the same as the one before the upgrade." + ); + ensure!( + V0PendingAvailability::::iter().next() == None, + "Pending availability candidates storage v0 should have been removed" + ); + ensure!( + V0PendingAvailabilityCommitments::::iter().next() == None, + "Pending availability commitments storage should have been removed" + ); + ensure!( + AvailabilityBitfields::::iter().next() == None, + "Availability bitfields storage should have been removed" + ); + Ok(()) } } + /// Migrate to v1 inclusion module storage. + /// - merges the `PendingAvailabilityCommitments` into the `CandidatePendingAvailability` + /// storage + /// - removes the `AvailabilityBitfields` storage, which was never read. pub type MigrateToV1 = frame_support::migrations::VersionedMigration< 0, 1, @@ -150,7 +217,7 @@ mod tests { mock::{new_test_ext, MockGenesisConfig, Test}, }; use frame_support::traits::OnRuntimeUpgrade; - use primitives::Id as ParaId; + use primitives::{AvailabilityBitfield, Id as ParaId}; use test_helpers::{dummy_candidate_commitments, dummy_candidate_descriptor, dummy_hash}; #[test] @@ -185,6 +252,14 @@ mod tests { dummy_candidate_commitments(HeadData(vec![i as _])), ); + v0::AvailabilityBitfields::::insert( + ValidatorIndex(i), + v0::AvailabilityBitfieldRecord { + bitfield: AvailabilityBitfield(Default::default()), + submitted_at: i, + }, + ); + expected.push(( ParaId::from(i), [V1CandidatePendingAvailability { @@ -228,6 +303,10 @@ mod tests { Weight::zero() ); + assert_eq!(v0::PendingAvailabilityCommitments::::iter().next(), None); + assert_eq!(v0::PendingAvailability::::iter().next(), None); + assert_eq!(v0::AvailabilityBitfields::::iter().next(), None); + let mut actual = V1PendingAvailability::::iter().collect::>(); actual.sort_by(|(id1, _), (id2, _)| id1.cmp(id2)); expected.sort_by(|(id1, _), (id2, _)| id1.cmp(id2)); diff --git a/polkadot/runtime/parachains/src/inclusion/mod.rs b/polkadot/runtime/parachains/src/inclusion/mod.rs index 54c6dbfea7a1..381727265423 100644 --- a/polkadot/runtime/parachains/src/inclusion/mod.rs +++ b/polkadot/runtime/parachains/src/inclusion/mod.rs @@ -38,11 +38,11 @@ use frame_system::pallet_prelude::*; use pallet_message_queue::OnQueueChanged; use parity_scale_codec::{Decode, Encode}; use primitives::{ - effective_minimum_backing_votes, supermajority_threshold, well_known_keys, - AvailabilityBitfield, BackedCandidate, CandidateCommitments, CandidateDescriptor, - CandidateHash, CandidateReceipt, CommittedCandidateReceipt, CoreIndex, GroupIndex, Hash, - HeadData, Id as ParaId, SignedAvailabilityBitfields, SigningContext, UpwardMessage, - ValidatorId, ValidatorIndex, ValidityAttestation, + effective_minimum_backing_votes, supermajority_threshold, well_known_keys, BackedCandidate, + CandidateCommitments, CandidateDescriptor, CandidateHash, CandidateReceipt, + CommittedCandidateReceipt, CoreIndex, GroupIndex, Hash, HeadData, Id as ParaId, + SignedAvailabilityBitfields, SigningContext, UpwardMessage, ValidatorId, ValidatorIndex, + ValidityAttestation, }; use scale_info::TypeInfo; use sp_runtime::{traits::One, DispatchError, SaturatedConversion, Saturating}; @@ -86,18 +86,6 @@ impl WeightInfo for () { /// `configuration` pallet to check these values before setting. pub const MAX_UPWARD_MESSAGE_SIZE_BOUND: u32 = 128 * 1024; -/// A bitfield signed by a validator indicating that it is keeping its piece of the erasure-coding -/// for any backed candidates referred to by a `1` bit available. -/// -/// The bitfield's signature should be checked at the point of submission. Afterwards it can be -/// dropped. -#[derive(Encode, Decode, TypeInfo)] -#[cfg_attr(test, derive(Debug))] -pub struct AvailabilityBitfieldRecord { - bitfield: AvailabilityBitfield, // one bit per core. - submitted_at: N, // for accounting, as meaning of bits may change over time. -} - /// A backed candidate pending availability. #[derive(Encode, Decode, PartialEq, TypeInfo, Clone)] #[cfg_attr(test, derive(Debug))] @@ -355,14 +343,13 @@ pub mod pallet { ParaHeadMismatch, } - /// The latest bitfield for each validator, referred to by their index in the validator set. - #[pallet::storage] - pub(crate) type AvailabilityBitfields = - StorageMap<_, Twox64Concat, ValidatorIndex, AvailabilityBitfieldRecord>>; - /// Candidates pending availability by `ParaId`. They form a chain starting from the latest /// included head of the para. + /// Use a different prefix post-migration to v1, since the v0 `PendingAvailability` storage + /// would otherwise have the exact same prefix which could cause undefined behaviour when doing + /// the migration. #[pallet::storage] + #[pallet::storage_prefix = "V1"] pub(crate) type PendingAvailability = StorageMap< _, Twox64Concat, @@ -459,7 +446,6 @@ impl Pallet { // unlike most drain methods, drained elements are not cleared on `Drop` of the iterator // and require consumption. for _ in >::drain() {} - for _ in >::drain() {} Self::cleanup_outgoing_ump_dispatch_queues(outgoing_paras); } @@ -478,7 +464,7 @@ impl Pallet { /// /// Bitfields are expected to have been sanitized already. E.g. via `sanitize_bitfields`! /// - /// Updates storage items `PendingAvailability` and `AvailabilityBitfields`. + /// Updates storage items `PendingAvailability`. /// /// Returns a `Vec` of `CandidateHash`es and their respective `AvailabilityCore`s that became /// available, and cores free. @@ -486,7 +472,6 @@ impl Pallet { validators: &[ValidatorId], signed_bitfields: SignedAvailabilityBitfields, ) -> Vec<(CoreIndex, CandidateHash)> { - let now = >::block_number(); let threshold = availability_threshold(validators.len()); let mut votes_per_core: BTreeMap> = BTreeMap::new(); @@ -504,11 +489,6 @@ impl Pallet { .or_insert_with(|| BTreeSet::new()) .insert(validator_index); } - - let record = - AvailabilityBitfieldRecord { bitfield: checked_bitfield, submitted_at: now }; - - >::insert(&validator_index, record); } let mut freed_cores = vec![]; diff --git a/polkadot/runtime/parachains/src/inclusion/tests.rs b/polkadot/runtime/parachains/src/inclusion/tests.rs index c8d99b28ca90..5ab3a13324d2 100644 --- a/polkadot/runtime/parachains/src/inclusion/tests.rs +++ b/polkadot/runtime/parachains/src/inclusion/tests.rs @@ -27,7 +27,7 @@ use crate::{ shared::AllowedRelayParentsTracker, }; use primitives::{ - effective_minimum_backing_votes, SignedAvailabilityBitfields, + effective_minimum_backing_votes, AvailabilityBitfield, SignedAvailabilityBitfields, UncheckedSignedAvailabilityBitfields, }; @@ -2653,21 +2653,6 @@ fn session_change_wipes() { run_to_block(10, |_| None); - >::insert( - &ValidatorIndex(0), - AvailabilityBitfieldRecord { bitfield: default_bitfield(), submitted_at: 9 }, - ); - - >::insert( - &ValidatorIndex(1), - AvailabilityBitfieldRecord { bitfield: default_bitfield(), submitted_at: 9 }, - ); - - >::insert( - &ValidatorIndex(4), - AvailabilityBitfieldRecord { bitfield: default_bitfield(), submitted_at: 9 }, - ); - let candidate = TestCandidateBuilder::default().build(); >::insert( &chain_a, @@ -2707,10 +2692,6 @@ fn session_change_wipes() { assert_eq!(shared::Pallet::::session_index(), 5); - assert!(>::get(&ValidatorIndex(0)).is_some()); - assert!(>::get(&ValidatorIndex(1)).is_some()); - assert!(>::get(&ValidatorIndex(4)).is_some()); - assert!(>::get(&chain_a).is_some()); assert!(>::get(&chain_b).is_some()); @@ -2728,14 +2709,6 @@ fn session_change_wipes() { assert_eq!(shared::Pallet::::session_index(), 6); - assert!(>::get(&ValidatorIndex(0)).is_none()); - assert!(>::get(&ValidatorIndex(1)).is_none()); - assert!(>::get(&ValidatorIndex(4)).is_none()); - - assert!(>::get(&chain_a).is_none()); - assert!(>::get(&chain_b).is_none()); - - assert!(>::iter().collect::>().is_empty()); assert!(>::iter().collect::>().is_empty()); }); } From 98f4a0eee6cdff298ea87f89f4f800241a746c2a Mon Sep 17 00:00:00 2001 From: alindima Date: Mon, 18 Mar 2024 11:44:05 +0200 Subject: [PATCH 36/44] one more test for paras_inherent --- polkadot/runtime/parachains/src/builder.rs | 31 +-- .../parachains/src/paras_inherent/mod.rs | 2 +- .../parachains/src/paras_inherent/tests.rs | 198 +++++++++++++++--- 3 files changed, 191 insertions(+), 40 deletions(-) diff --git a/polkadot/runtime/parachains/src/builder.rs b/polkadot/runtime/parachains/src/builder.rs index 39f53911e1ec..e41f241b387e 100644 --- a/polkadot/runtime/parachains/src/builder.rs +++ b/polkadot/runtime/parachains/src/builder.rs @@ -159,7 +159,6 @@ impl BenchBuilder { } /// Set a map from para id seed to number of cores assigned to it. - #[cfg(feature = "runtime-benchmarks")] pub(crate) fn set_elastic_paras(mut self, elastic_paras: BTreeMap) -> Self { self.elastic_paras = elastic_paras; self @@ -326,10 +325,14 @@ impl BenchBuilder { availability_votes, commitments, ); - inclusion::PendingAvailability::::insert( - para_id, - [candidate_availability].into_iter().collect::>(), - ); + inclusion::PendingAvailability::::mutate(para_id, |maybe_andidates| { + if let Some(candidates) = maybe_andidates { + candidates.push_back(candidate_availability); + } else { + *maybe_andidates = + Some([candidate_availability].into_iter().collect::>()); + } + }); } /// Create an `AvailabilityBitfield` where `concluding` is a map where each key is a core index @@ -361,13 +364,13 @@ impl BenchBuilder { } } - /// Register `cores` count of parachains. + /// Register `n_paras` count of parachains. /// /// Note that this must be called at least 2 sessions before the target session as there is a /// n+2 session delay for the scheduled actions to take effect. - fn setup_para_ids(cores: usize) { + fn setup_para_ids(n_paras: usize) { // make sure parachains exist prior to session change. - for i in 0..cores { + for i in 0..n_paras { let para_id = ParaId::from(i as u32); let validation_code = mock_validation_code(); @@ -527,7 +530,7 @@ impl BenchBuilder { /// validity votes. fn create_backed_candidates( &self, - cores_with_backed_candidates: &BTreeMap, + paras_with_backed_candidates: &BTreeMap, elastic_paras: &BTreeMap, includes_code_upgrade: Option, ) -> Vec> { @@ -536,7 +539,7 @@ impl BenchBuilder { let config = configuration::Pallet::::config(); let mut current_core_idx = 0u32; - cores_with_backed_candidates + paras_with_backed_candidates .iter() .flat_map(|(seed, num_votes)| { assert!(*num_votes <= validators.len() as u32); @@ -765,7 +768,7 @@ impl BenchBuilder { // NOTE: there is an n+2 session delay for these actions to take effect. // We are currently in Session 0, so these changes will take effect in Session 2. - Self::setup_para_ids(used_cores); + Self::setup_para_ids(used_cores - extra_cores); configuration::ActiveConfig::::mutate(|c| { c.scheduler_params.num_cores = used_cores as u32; }); @@ -787,11 +790,11 @@ impl BenchBuilder { let disputes = builder.create_disputes( builder.backed_and_concluding_paras.len() as u32, - used_cores as u32, + (used_cores - extra_cores) as u32, builder.dispute_sessions.as_slice(), ); let mut disputed_cores = (builder.backed_and_concluding_paras.len() as u32.. - used_cores as u32) + ((used_cores - extra_cores) as u32)) .into_iter() .map(|idx| (idx, 0)) .collect::>(); @@ -799,7 +802,7 @@ impl BenchBuilder { let mut all_cores = builder.backed_and_concluding_paras.clone(); all_cores.append(&mut disputed_cores); - assert_eq!(inclusion::PendingAvailability::::iter().count(), used_cores as usize,); + assert_eq!(inclusion::PendingAvailability::::iter().count(), used_cores - extra_cores); // Mark all the used cores as occupied. We expect that there are // `backed_and_concluding_paras` that are pending availability and that there are diff --git a/polkadot/runtime/parachains/src/paras_inherent/mod.rs b/polkadot/runtime/parachains/src/paras_inherent/mod.rs index 6a95f724d0c0..37cbc73e8ce4 100644 --- a/polkadot/runtime/parachains/src/paras_inherent/mod.rs +++ b/polkadot/runtime/parachains/src/paras_inherent/mod.rs @@ -315,7 +315,7 @@ impl Pallet { /// Process inherent data. /// /// The given inherent data is processed and state is altered accordingly. If any data could - /// not be applied (inconsitencies, weight limit, ...) it is removed. + /// not be applied (inconsistencies, weight limit, ...) it is removed. /// /// When called from `create_inherent` the `context` must be set to /// `ProcessInherentDataContext::ProvideInherent` so it guarantees the invariant that inherent diff --git a/polkadot/runtime/parachains/src/paras_inherent/tests.rs b/polkadot/runtime/parachains/src/paras_inherent/tests.rs index 12d6b25a3f52..60c49ce92d81 100644 --- a/polkadot/runtime/parachains/src/paras_inherent/tests.rs +++ b/polkadot/runtime/parachains/src/paras_inherent/tests.rs @@ -16,6 +16,28 @@ use super::*; +use crate::{ + configuration::{self, HostConfiguration}, + mock::MockGenesisConfig, +}; +use primitives::vstaging::SchedulerParams; + +fn default_config() -> MockGenesisConfig { + MockGenesisConfig { + configuration: configuration::GenesisConfig { + config: HostConfiguration { + max_head_data_size: 0b100000, + scheduler_params: SchedulerParams { + group_rotation_frequency: u32::MAX, + ..Default::default() + }, + ..Default::default() + }, + }, + ..Default::default() + } +} + // In order to facilitate benchmarks as tests we have a benchmark feature gated `WeightInfo` impl // that uses 0 for all the weights. Because all the weights are 0, the tests that rely on // weights for limiting data will fail, so we don't run them when using the benchmark feature. @@ -25,7 +47,7 @@ mod enter { use super::{inclusion::tests::TestCandidateBuilder, *}; use crate::{ builder::{Bench, BenchBuilder}, - mock::{mock_assigner, new_test_ext, BlockLength, BlockWeights, MockGenesisConfig, Test}, + mock::{mock_assigner, new_test_ext, BlockLength, BlockWeights, RuntimeOrigin, Test}, scheduler::{ common::{Assignment, AssignmentProvider}, ParasEntry, @@ -45,6 +67,7 @@ mod enter { num_validators_per_core: u32, code_upgrade: Option, fill_claimqueue: bool, + elastic_paras: BTreeMap, } fn make_inherent_data( @@ -55,13 +78,21 @@ mod enter { num_validators_per_core, code_upgrade, fill_claimqueue, + elastic_paras, }: TestConfig, ) -> Bench { + let extra_cores = elastic_paras + .values() + .map(|count| *count as usize) + .sum::() + .saturating_sub(elastic_paras.len() as usize); + let builder = BenchBuilder::::new() .set_max_validators( - (dispute_sessions.len() + backed_and_concluding.len()) as u32 * + (dispute_sessions.len() + backed_and_concluding.len() + extra_cores) as u32 * num_validators_per_core, ) + .set_elastic_paras(elastic_paras.clone()) .set_max_validators_per_core(num_validators_per_core) .set_dispute_statements(dispute_statements) .set_backed_and_concluding_paras(backed_and_concluding) @@ -70,10 +101,16 @@ mod enter { // Setup some assignments as needed: mock_assigner::Pallet::::set_core_count(builder.max_cores()); - for core_index in 0..builder.max_cores() { - // Core index == para_id in this case - mock_assigner::Pallet::::add_test_assignment(Assignment::Bulk(core_index.into())); - } + + (0..(builder.max_cores() as usize - extra_cores)).for_each(|para_id| { + (0..elastic_paras.get(&(para_id as u32)).cloned().unwrap_or(1)).for_each( + |_para_local_core_idx| { + mock_assigner::Pallet::::add_test_assignment(Assignment::Bulk( + para_id.into(), + )); + }, + ); + }); if let Some(code_size) = code_upgrade { builder.set_code_upgrade(code_size).build() @@ -104,6 +141,7 @@ mod enter { num_validators_per_core: 1, code_upgrade: None, fill_claimqueue: false, + elastic_paras: BTreeMap::new(), }); // We expect the scenario to have cores 0 & 1 with pending availability. The backed @@ -145,6 +183,122 @@ mod enter { Pallet::::on_chain_votes().unwrap().session, 2 ); + + assert_eq!( + inclusion::PendingAvailability::::get(ParaId::from(0)) + .unwrap() + .into_iter() + .map(|c| c.core_occupied()) + .collect::>(), + vec![CoreIndex(0)] + ); + assert_eq!( + inclusion::PendingAvailability::::get(ParaId::from(1)) + .unwrap() + .into_iter() + .map(|c| c.core_occupied()) + .collect::>(), + vec![CoreIndex(1)] + ); + }); + } + + #[test] + // ParaId 1 has one pending candidate on core 0. + // ParaId 2 has one pending candidate on core 1. + // ParaId 3 has three pending candidates on cores 2, 3 and 4. + // All of them are being made available in this block. Propose 5 more candidates (one for each + // core) and check that they're successfully backed and the old ones enacted. + fn include_backed_candidates_elastic_scaling() { + let config = default_config(); + assert!(config.configuration.config.scheduler_params.lookahead > 0); + + new_test_ext(config).execute_with(|| { + // Set the elastic scaling MVP feature. + >::set_node_feature( + RuntimeOrigin::root(), + FeatureIndex::ElasticScalingMVP as u8, + true, + ) + .unwrap(); + + let dispute_statements = BTreeMap::new(); + + let mut backed_and_concluding = BTreeMap::new(); + backed_and_concluding.insert(0, 1); + backed_and_concluding.insert(1, 1); + backed_and_concluding.insert(2, 1); + + let scenario = make_inherent_data(TestConfig { + dispute_statements, + dispute_sessions: vec![], // No disputes + backed_and_concluding, + num_validators_per_core: 1, + code_upgrade: None, + fill_claimqueue: false, + elastic_paras: [(2, 3)].into_iter().collect(), + }); + + let expected_para_inherent_data = scenario.data.clone(); + + // Check the para inherent data is as expected: + // * 1 bitfield per validator (5 validators) + assert_eq!(expected_para_inherent_data.bitfields.len(), 5); + // * 1 backed candidate per core (5 cores) + assert_eq!(expected_para_inherent_data.backed_candidates.len(), 5); + // * 0 disputes. + assert_eq!(expected_para_inherent_data.disputes.len(), 0); + let mut inherent_data = InherentData::new(); + inherent_data + .put_data(PARACHAINS_INHERENT_IDENTIFIER, &expected_para_inherent_data) + .unwrap(); + + // The current schedule is empty prior to calling `create_inherent_enter`. + assert!(>::claimqueue_is_empty()); + + // Nothing is filtered out (including the backed candidates.) + assert_eq!( + Pallet::::create_inherent_inner(&inherent_data.clone()).unwrap(), + expected_para_inherent_data + ); + + assert_eq!( + // The length of this vec is equal to the number of candidates, so we know our 5 + // backed candidates did not get filtered out + Pallet::::on_chain_votes().unwrap().backing_validators_per_candidate.len(), + 5 + ); + + assert_eq!( + // The session of the on chain votes should equal the current session, which is 2 + Pallet::::on_chain_votes().unwrap().session, + 2 + ); + + assert_eq!( + inclusion::PendingAvailability::::get(ParaId::from(0)) + .unwrap() + .into_iter() + .map(|c| c.core_occupied()) + .collect::>(), + vec![CoreIndex(0)] + ); + assert_eq!( + inclusion::PendingAvailability::::get(ParaId::from(1)) + .unwrap() + .into_iter() + .map(|c| c.core_occupied()) + .collect::>(), + vec![CoreIndex(1)] + ); + assert_eq!( + inclusion::PendingAvailability::::get(ParaId::from(2)) + .unwrap() + .into_iter() + .map(|c| c.core_occupied()) + .collect::>(), + vec![CoreIndex(2), CoreIndex(3), CoreIndex(4)] + ); }); } @@ -255,6 +409,7 @@ mod enter { num_validators_per_core: 5, code_upgrade: None, fill_claimqueue: false, + elastic_paras: BTreeMap::new(), }); let expected_para_inherent_data = scenario.data.clone(); @@ -326,6 +481,7 @@ mod enter { num_validators_per_core: 6, code_upgrade: None, fill_claimqueue: false, + elastic_paras: BTreeMap::new(), }); let expected_para_inherent_data = scenario.data.clone(); @@ -395,6 +551,7 @@ mod enter { num_validators_per_core: 4, code_upgrade: None, fill_claimqueue: false, + elastic_paras: BTreeMap::new(), }); let expected_para_inherent_data = scenario.data.clone(); @@ -480,6 +637,7 @@ mod enter { num_validators_per_core: 5, code_upgrade: None, fill_claimqueue: false, + elastic_paras: BTreeMap::new(), }); let expected_para_inherent_data = scenario.data.clone(); @@ -565,6 +723,7 @@ mod enter { num_validators_per_core: 5, code_upgrade: None, fill_claimqueue: false, + elastic_paras: BTreeMap::new(), }); let expected_para_inherent_data = scenario.data.clone(); @@ -649,6 +808,7 @@ mod enter { num_validators_per_core: 5, code_upgrade: None, fill_claimqueue: false, + elastic_paras: BTreeMap::new(), }); let expected_para_inherent_data = scenario.data.clone(); @@ -754,6 +914,7 @@ mod enter { num_validators_per_core: 5, code_upgrade: None, fill_claimqueue: false, + elastic_paras: BTreeMap::new(), }); let expected_para_inherent_data = scenario.data.clone(); @@ -820,6 +981,7 @@ mod enter { num_validators_per_core: 5, code_upgrade: None, fill_claimqueue: false, + elastic_paras: BTreeMap::new(), }); let expected_para_inherent_data = scenario.data.clone(); @@ -884,6 +1046,7 @@ mod enter { num_validators_per_core: 5, code_upgrade: None, fill_claimqueue: false, + elastic_paras: BTreeMap::new(), }); let expected_para_inherent_data = scenario.data.clone(); @@ -985,6 +1148,7 @@ mod enter { num_validators_per_core: 5, code_upgrade: None, fill_claimqueue: false, + elastic_paras: BTreeMap::new(), }); let mut para_inherent_data = scenario.data.clone(); @@ -1072,6 +1236,7 @@ mod enter { num_validators_per_core: 5, code_upgrade: None, fill_claimqueue: false, + elastic_paras: BTreeMap::new(), }); let expected_para_inherent_data = scenario.data.clone(); @@ -1119,7 +1284,7 @@ mod sanitizers { inclusion::tests::{ back_candidate, collator_sign_candidate, BackingKind, TestCandidateBuilder, }, - mock::{new_test_ext, MockGenesisConfig}, + mock::new_test_ext, }; use bitvec::order::Lsb0; use primitives::{ @@ -1375,32 +1540,15 @@ mod sanitizers { mod candidates { use crate::{ - configuration::HostConfiguration, mock::{set_disabled_validators, RuntimeOrigin}, scheduler::{common::Assignment, ParasEntry}, util::{make_persisted_validation_data, make_persisted_validation_data_with_parent}, }; - use primitives::{vstaging::SchedulerParams, ValidationCode}; + use primitives::ValidationCode; use sp_std::collections::vec_deque::VecDeque; use super::*; - fn default_config() -> MockGenesisConfig { - MockGenesisConfig { - configuration: configuration::GenesisConfig { - config: HostConfiguration { - max_head_data_size: 0b100000, - scheduler_params: SchedulerParams { - group_rotation_frequency: u32::MAX, - ..Default::default() - }, - ..Default::default() - }, - }, - ..Default::default() - } - } - // Backed candidates and scheduled parachains used for `sanitize_backed_candidates` testing struct TestData { backed_candidates: Vec, From e53751318f225cb1efd30942d470b4d4f5742160 Mon Sep 17 00:00:00 2001 From: command-bot <> Date: Mon, 18 Mar 2024 12:15:00 +0000 Subject: [PATCH 37/44] ".git/.scripts/commands/bench/bench.sh" --subcommand=pallet --runtime=westend --target_dir=polkadot --pallet=runtime_parachains::paras_inherent --- .../runtime_parachains_paras_inherent.rs | 583 +++++++++--------- 1 file changed, 298 insertions(+), 285 deletions(-) diff --git a/polkadot/runtime/westend/src/weights/runtime_parachains_paras_inherent.rs b/polkadot/runtime/westend/src/weights/runtime_parachains_paras_inherent.rs index 0dd64f054d00..aa99ac9438c4 100644 --- a/polkadot/runtime/westend/src/weights/runtime_parachains_paras_inherent.rs +++ b/polkadot/runtime/westend/src/weights/runtime_parachains_paras_inherent.rs @@ -16,11 +16,11 @@ //! Autogenerated weights for `runtime_parachains::paras_inherent` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-06-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-03-18, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-xerhrdyb-project-163-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("westend-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-h2rr8wx7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("westend-dev")`, DB CACHE: 1024 // Executed Command: // target/production/polkadot @@ -29,14 +29,13 @@ // --steps=50 // --repeat=20 // --extrinsic=* -// --execution=wasm // --wasm-execution=compiled // --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot/.git/.artifacts/bench.json +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json // --pallet=runtime_parachains::paras_inherent // --chain=westend-dev -// --header=./file_header.txt -// --output=./runtime/westend/src/weights/ +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/westend/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -49,297 +48,311 @@ use core::marker::PhantomData; /// Weight functions for `runtime_parachains::paras_inherent`. pub struct WeightInfo(PhantomData); impl runtime_parachains::paras_inherent::WeightInfo for WeightInfo { - /// Storage: ParaInherent Included (r:1 w:1) - /// Proof Skipped: ParaInherent Included (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: System ParentHash (r:1 w:0) - /// Proof: System ParentHash (max_values: Some(1), max_size: Some(32), added: 527, mode: MaxEncodedLen) - /// Storage: ParasShared CurrentSessionIndex (r:1 w:0) - /// Proof Skipped: ParasShared CurrentSessionIndex (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParaScheduler AvailabilityCores (r:1 w:1) - /// Proof Skipped: ParaScheduler AvailabilityCores (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParasShared ActiveValidatorKeys (r:1 w:0) - /// Proof Skipped: ParasShared ActiveValidatorKeys (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Babe AuthorVrfRandomness (r:1 w:0) - /// Proof: Babe AuthorVrfRandomness (max_values: Some(1), max_size: Some(33), added: 528, mode: MaxEncodedLen) - /// Storage: ParaSessionInfo Sessions (r:1 w:0) - /// Proof Skipped: ParaSessionInfo Sessions (max_values: None, max_size: None, mode: Measured) - /// Storage: ParasDisputes Disputes (r:1 w:1) - /// Proof Skipped: ParasDisputes Disputes (max_values: None, max_size: None, mode: Measured) - /// Storage: ParasDisputes BackersOnDisputes (r:1 w:1) - /// Proof Skipped: ParasDisputes BackersOnDisputes (max_values: None, max_size: None, mode: Measured) - /// Storage: ParasDisputes Included (r:1 w:1) - /// Proof Skipped: ParasDisputes Included (max_values: None, max_size: None, mode: Measured) - /// Storage: ParaSessionInfo AccountKeys (r:1 w:0) - /// Proof Skipped: ParaSessionInfo AccountKeys (max_values: None, max_size: None, mode: Measured) - /// Storage: Session Validators (r:1 w:0) - /// Proof Skipped: Session Validators (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Staking ActiveEra (r:1 w:0) - /// Proof: Staking ActiveEra (max_values: Some(1), max_size: Some(13), added: 508, mode: MaxEncodedLen) - /// Storage: Staking ErasRewardPoints (r:1 w:1) - /// Proof Skipped: Staking ErasRewardPoints (max_values: None, max_size: None, mode: Measured) - /// Storage: ParaInherent OnChainVotes (r:1 w:1) - /// Proof Skipped: ParaInherent OnChainVotes (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParasDisputes Frozen (r:1 w:0) - /// Proof Skipped: ParasDisputes Frozen (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParaInclusion PendingAvailability (r:2 w:1) - /// Proof Skipped: ParaInclusion PendingAvailability (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras Parachains (r:1 w:0) - /// Proof Skipped: Paras Parachains (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParaInclusion PendingAvailabilityCommitments (r:1 w:1) - /// Proof Skipped: ParaInclusion PendingAvailabilityCommitments (max_values: None, max_size: None, mode: Measured) - /// Storage: Dmp DownwardMessageQueues (r:1 w:1) - /// Proof Skipped: Dmp DownwardMessageQueues (max_values: None, max_size: None, mode: Measured) - /// Storage: Dmp DeliveryFeeFactor (r:1 w:1) - /// Proof Skipped: Dmp DeliveryFeeFactor (max_values: None, max_size: None, mode: Measured) - /// Storage: Hrmp HrmpChannelDigests (r:1 w:1) - /// Proof Skipped: Hrmp HrmpChannelDigests (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras FutureCodeUpgrades (r:1 w:0) - /// Proof Skipped: Paras FutureCodeUpgrades (max_values: None, max_size: None, mode: Measured) - /// Storage: ParaScheduler SessionStartBlock (r:1 w:0) - /// Proof Skipped: ParaScheduler SessionStartBlock (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParaScheduler ParathreadQueue (r:1 w:1) - /// Proof Skipped: ParaScheduler ParathreadQueue (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParaScheduler Scheduled (r:1 w:1) - /// Proof Skipped: ParaScheduler Scheduled (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParaScheduler ValidatorGroups (r:1 w:0) - /// Proof Skipped: ParaScheduler ValidatorGroups (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Hrmp HrmpWatermarks (r:0 w:1) - /// Proof Skipped: Hrmp HrmpWatermarks (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras Heads (r:0 w:1) - /// Proof Skipped: Paras Heads (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras UpgradeGoAheadSignal (r:0 w:1) - /// Proof Skipped: Paras UpgradeGoAheadSignal (max_values: None, max_size: None, mode: Measured) + /// Storage: `ParaInherent::Included` (r:1 w:1) + /// Proof: `ParaInherent::Included` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `System::ParentHash` (r:1 w:0) + /// Proof: `System::ParentHash` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `ParasShared::AllowedRelayParents` (r:1 w:1) + /// Proof: `ParasShared::AllowedRelayParents` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::AvailabilityCores` (r:1 w:1) + /// Proof: `ParaScheduler::AvailabilityCores` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::ActiveValidatorKeys` (r:1 w:0) + /// Proof: `ParasShared::ActiveValidatorKeys` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Babe::AuthorVrfRandomness` (r:1 w:0) + /// Proof: `Babe::AuthorVrfRandomness` (`max_values`: Some(1), `max_size`: Some(33), added: 528, mode: `MaxEncodedLen`) + /// Storage: `ParaSessionInfo::Sessions` (r:1 w:0) + /// Proof: `ParaSessionInfo::Sessions` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParasDisputes::Disputes` (r:1 w:1) + /// Proof: `ParasDisputes::Disputes` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParasDisputes::BackersOnDisputes` (r:1 w:1) + /// Proof: `ParasDisputes::BackersOnDisputes` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParasDisputes::Included` (r:1 w:1) + /// Proof: `ParasDisputes::Included` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParaSessionInfo::AccountKeys` (r:1 w:0) + /// Proof: `ParaSessionInfo::AccountKeys` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Session::Validators` (r:1 w:0) + /// Proof: `Session::Validators` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Staking::ActiveEra` (r:1 w:0) + /// Proof: `Staking::ActiveEra` (`max_values`: Some(1), `max_size`: Some(13), added: 508, mode: `MaxEncodedLen`) + /// Storage: `Staking::ErasRewardPoints` (r:1 w:1) + /// Proof: `Staking::ErasRewardPoints` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParaInherent::OnChainVotes` (r:1 w:1) + /// Proof: `ParaInherent::OnChainVotes` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasDisputes::Frozen` (r:1 w:0) + /// Proof: `ParasDisputes::Frozen` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaInclusion::V1` (r:2 w:1) + /// Proof: `ParaInclusion::V1` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:1) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpChannelDigests` (r:1 w:1) + /// Proof: `Hrmp::HrmpChannelDigests` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::FutureCodeUpgrades` (r:1 w:0) + /// Proof: `Paras::FutureCodeUpgrades` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::SessionStartBlock` (r:1 w:0) + /// Proof: `ParaScheduler::SessionStartBlock` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::ValidatorGroups` (r:1 w:0) + /// Proof: `ParaScheduler::ValidatorGroups` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::ClaimQueue` (r:1 w:1) + /// Proof: `ParaScheduler::ClaimQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `CoretimeAssignmentProvider::CoreDescriptors` (r:1 w:1) + /// Proof: `CoretimeAssignmentProvider::CoreDescriptors` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::ActiveValidatorIndices` (r:1 w:0) + /// Proof: `ParasShared::ActiveValidatorIndices` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Session::DisabledValidators` (r:1 w:0) + /// Proof: `Session::DisabledValidators` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpWatermarks` (r:0 w:1) + /// Proof: `Hrmp::HrmpWatermarks` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::Heads` (r:0 w:1) + /// Proof: `Paras::Heads` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::UpgradeGoAheadSignal` (r:0 w:1) + /// Proof: `Paras::UpgradeGoAheadSignal` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::MostRecentContext` (r:0 w:1) + /// Proof: `Paras::MostRecentContext` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `v` is `[10, 200]`. fn enter_variable_disputes(v: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `50518` - // Estimated: `56458 + v * (23 ±0)` - // Minimum execution time: 998_338_000 picoseconds. - Weight::from_parts(468_412_001, 0) - .saturating_add(Weight::from_parts(0, 56458)) - // Standard Error: 20_559 - .saturating_add(Weight::from_parts(56_965_025, 0).saturating_mul(v.into())) - .saturating_add(T::DbWeight::get().reads(27)) - .saturating_add(T::DbWeight::get().writes(15)) + // Measured: `67518` + // Estimated: `73458 + v * (23 ±0)` + // Minimum execution time: 844_022_000 picoseconds. + Weight::from_parts(456_682_337, 0) + .saturating_add(Weight::from_parts(0, 73458)) + // Standard Error: 16_403 + .saturating_add(Weight::from_parts(41_871_245, 0).saturating_mul(v.into())) + .saturating_add(T::DbWeight::get().reads(28)) + .saturating_add(T::DbWeight::get().writes(16)) .saturating_add(Weight::from_parts(0, 23).saturating_mul(v.into())) } - /// Storage: ParaInherent Included (r:1 w:1) - /// Proof Skipped: ParaInherent Included (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: System ParentHash (r:1 w:0) - /// Proof: System ParentHash (max_values: Some(1), max_size: Some(32), added: 527, mode: MaxEncodedLen) - /// Storage: ParasShared CurrentSessionIndex (r:1 w:0) - /// Proof Skipped: ParasShared CurrentSessionIndex (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParaScheduler AvailabilityCores (r:1 w:1) - /// Proof Skipped: ParaScheduler AvailabilityCores (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParasShared ActiveValidatorKeys (r:1 w:0) - /// Proof Skipped: ParasShared ActiveValidatorKeys (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Babe AuthorVrfRandomness (r:1 w:0) - /// Proof: Babe AuthorVrfRandomness (max_values: Some(1), max_size: Some(33), added: 528, mode: MaxEncodedLen) - /// Storage: ParaInherent OnChainVotes (r:1 w:1) - /// Proof Skipped: ParaInherent OnChainVotes (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParasDisputes Frozen (r:1 w:0) - /// Proof Skipped: ParasDisputes Frozen (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParaInclusion PendingAvailability (r:2 w:1) - /// Proof Skipped: ParaInclusion PendingAvailability (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras Parachains (r:1 w:0) - /// Proof Skipped: Paras Parachains (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParaInclusion PendingAvailabilityCommitments (r:1 w:1) - /// Proof Skipped: ParaInclusion PendingAvailabilityCommitments (max_values: None, max_size: None, mode: Measured) - /// Storage: ParaSessionInfo AccountKeys (r:1 w:0) - /// Proof Skipped: ParaSessionInfo AccountKeys (max_values: None, max_size: None, mode: Measured) - /// Storage: Session Validators (r:1 w:0) - /// Proof Skipped: Session Validators (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Staking ActiveEra (r:1 w:0) - /// Proof: Staking ActiveEra (max_values: Some(1), max_size: Some(13), added: 508, mode: MaxEncodedLen) - /// Storage: Staking ErasRewardPoints (r:1 w:1) - /// Proof Skipped: Staking ErasRewardPoints (max_values: None, max_size: None, mode: Measured) - /// Storage: Dmp DownwardMessageQueues (r:1 w:1) - /// Proof Skipped: Dmp DownwardMessageQueues (max_values: None, max_size: None, mode: Measured) - /// Storage: Dmp DeliveryFeeFactor (r:1 w:1) - /// Proof Skipped: Dmp DeliveryFeeFactor (max_values: None, max_size: None, mode: Measured) - /// Storage: Hrmp HrmpChannelDigests (r:1 w:1) - /// Proof Skipped: Hrmp HrmpChannelDigests (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras FutureCodeUpgrades (r:1 w:0) - /// Proof Skipped: Paras FutureCodeUpgrades (max_values: None, max_size: None, mode: Measured) - /// Storage: ParasDisputes Disputes (r:1 w:0) - /// Proof Skipped: ParasDisputes Disputes (max_values: None, max_size: None, mode: Measured) - /// Storage: ParaScheduler SessionStartBlock (r:1 w:0) - /// Proof Skipped: ParaScheduler SessionStartBlock (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParaScheduler ParathreadQueue (r:1 w:1) - /// Proof Skipped: ParaScheduler ParathreadQueue (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParaScheduler Scheduled (r:1 w:1) - /// Proof Skipped: ParaScheduler Scheduled (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParaScheduler ValidatorGroups (r:1 w:0) - /// Proof Skipped: ParaScheduler ValidatorGroups (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParaInclusion AvailabilityBitfields (r:0 w:1) - /// Proof Skipped: ParaInclusion AvailabilityBitfields (max_values: None, max_size: None, mode: Measured) - /// Storage: ParasDisputes Included (r:0 w:1) - /// Proof Skipped: ParasDisputes Included (max_values: None, max_size: None, mode: Measured) - /// Storage: Hrmp HrmpWatermarks (r:0 w:1) - /// Proof Skipped: Hrmp HrmpWatermarks (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras Heads (r:0 w:1) - /// Proof Skipped: Paras Heads (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras UpgradeGoAheadSignal (r:0 w:1) - /// Proof Skipped: Paras UpgradeGoAheadSignal (max_values: None, max_size: None, mode: Measured) + /// Storage: `ParaInherent::Included` (r:1 w:1) + /// Proof: `ParaInherent::Included` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `System::ParentHash` (r:1 w:0) + /// Proof: `System::ParentHash` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `ParasShared::AllowedRelayParents` (r:1 w:1) + /// Proof: `ParasShared::AllowedRelayParents` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::AvailabilityCores` (r:1 w:1) + /// Proof: `ParaScheduler::AvailabilityCores` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::ActiveValidatorKeys` (r:1 w:0) + /// Proof: `ParasShared::ActiveValidatorKeys` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Babe::AuthorVrfRandomness` (r:1 w:0) + /// Proof: `Babe::AuthorVrfRandomness` (`max_values`: Some(1), `max_size`: Some(33), added: 528, mode: `MaxEncodedLen`) + /// Storage: `ParaInherent::OnChainVotes` (r:1 w:1) + /// Proof: `ParaInherent::OnChainVotes` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasDisputes::Frozen` (r:1 w:0) + /// Proof: `ParasDisputes::Frozen` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaInclusion::V1` (r:2 w:1) + /// Proof: `ParaInclusion::V1` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParaSessionInfo::AccountKeys` (r:1 w:0) + /// Proof: `ParaSessionInfo::AccountKeys` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Session::Validators` (r:1 w:0) + /// Proof: `Session::Validators` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Staking::ActiveEra` (r:1 w:0) + /// Proof: `Staking::ActiveEra` (`max_values`: Some(1), `max_size`: Some(13), added: 508, mode: `MaxEncodedLen`) + /// Storage: `Staking::ErasRewardPoints` (r:1 w:1) + /// Proof: `Staking::ErasRewardPoints` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:1) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpChannelDigests` (r:1 w:1) + /// Proof: `Hrmp::HrmpChannelDigests` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::FutureCodeUpgrades` (r:1 w:0) + /// Proof: `Paras::FutureCodeUpgrades` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParasDisputes::Disputes` (r:1 w:0) + /// Proof: `ParasDisputes::Disputes` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::SessionStartBlock` (r:1 w:0) + /// Proof: `ParaScheduler::SessionStartBlock` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::ValidatorGroups` (r:1 w:0) + /// Proof: `ParaScheduler::ValidatorGroups` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::ClaimQueue` (r:1 w:1) + /// Proof: `ParaScheduler::ClaimQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `CoretimeAssignmentProvider::CoreDescriptors` (r:1 w:1) + /// Proof: `CoretimeAssignmentProvider::CoreDescriptors` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::ActiveValidatorIndices` (r:1 w:0) + /// Proof: `ParasShared::ActiveValidatorIndices` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Session::DisabledValidators` (r:1 w:0) + /// Proof: `Session::DisabledValidators` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasDisputes::Included` (r:0 w:1) + /// Proof: `ParasDisputes::Included` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpWatermarks` (r:0 w:1) + /// Proof: `Hrmp::HrmpWatermarks` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::Heads` (r:0 w:1) + /// Proof: `Paras::Heads` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::UpgradeGoAheadSignal` (r:0 w:1) + /// Proof: `Paras::UpgradeGoAheadSignal` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::MostRecentContext` (r:0 w:1) + /// Proof: `Paras::MostRecentContext` (`max_values`: None, `max_size`: None, mode: `Measured`) fn enter_bitfields() -> Weight { // Proof Size summary in bytes: - // Measured: `42352` - // Estimated: `48292` - // Minimum execution time: 457_404_000 picoseconds. - Weight::from_parts(485_416_000, 0) - .saturating_add(Weight::from_parts(0, 48292)) - .saturating_add(T::DbWeight::get().reads(25)) + // Measured: `43196` + // Estimated: `49136` + // Minimum execution time: 438_637_000 picoseconds. + Weight::from_parts(458_342_000, 0) + .saturating_add(Weight::from_parts(0, 49136)) + .saturating_add(T::DbWeight::get().reads(26)) .saturating_add(T::DbWeight::get().writes(16)) } - /// Storage: ParaInherent Included (r:1 w:1) - /// Proof Skipped: ParaInherent Included (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: System ParentHash (r:1 w:0) - /// Proof: System ParentHash (max_values: Some(1), max_size: Some(32), added: 527, mode: MaxEncodedLen) - /// Storage: ParasShared CurrentSessionIndex (r:1 w:0) - /// Proof Skipped: ParasShared CurrentSessionIndex (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParaScheduler AvailabilityCores (r:1 w:1) - /// Proof Skipped: ParaScheduler AvailabilityCores (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParasShared ActiveValidatorKeys (r:1 w:0) - /// Proof Skipped: ParasShared ActiveValidatorKeys (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Babe AuthorVrfRandomness (r:1 w:0) - /// Proof: Babe AuthorVrfRandomness (max_values: Some(1), max_size: Some(33), added: 528, mode: MaxEncodedLen) - /// Storage: ParaInherent OnChainVotes (r:1 w:1) - /// Proof Skipped: ParaInherent OnChainVotes (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParasDisputes Frozen (r:1 w:0) - /// Proof Skipped: ParasDisputes Frozen (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParaInclusion PendingAvailability (r:2 w:1) - /// Proof Skipped: ParaInclusion PendingAvailability (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras Parachains (r:1 w:0) - /// Proof Skipped: Paras Parachains (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParaInclusion PendingAvailabilityCommitments (r:1 w:1) - /// Proof Skipped: ParaInclusion PendingAvailabilityCommitments (max_values: None, max_size: None, mode: Measured) - /// Storage: ParaSessionInfo AccountKeys (r:1 w:0) - /// Proof Skipped: ParaSessionInfo AccountKeys (max_values: None, max_size: None, mode: Measured) - /// Storage: Session Validators (r:1 w:0) - /// Proof Skipped: Session Validators (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Staking ActiveEra (r:1 w:0) - /// Proof: Staking ActiveEra (max_values: Some(1), max_size: Some(13), added: 508, mode: MaxEncodedLen) - /// Storage: Staking ErasRewardPoints (r:1 w:1) - /// Proof Skipped: Staking ErasRewardPoints (max_values: None, max_size: None, mode: Measured) - /// Storage: Dmp DownwardMessageQueues (r:1 w:1) - /// Proof Skipped: Dmp DownwardMessageQueues (max_values: None, max_size: None, mode: Measured) - /// Storage: Dmp DeliveryFeeFactor (r:1 w:1) - /// Proof Skipped: Dmp DeliveryFeeFactor (max_values: None, max_size: None, mode: Measured) - /// Storage: Hrmp HrmpChannelDigests (r:1 w:1) - /// Proof Skipped: Hrmp HrmpChannelDigests (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras FutureCodeUpgrades (r:1 w:0) - /// Proof Skipped: Paras FutureCodeUpgrades (max_values: None, max_size: None, mode: Measured) - /// Storage: ParasDisputes Disputes (r:1 w:0) - /// Proof Skipped: ParasDisputes Disputes (max_values: None, max_size: None, mode: Measured) - /// Storage: ParaScheduler SessionStartBlock (r:1 w:0) - /// Proof Skipped: ParaScheduler SessionStartBlock (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParaScheduler ParathreadQueue (r:1 w:1) - /// Proof Skipped: ParaScheduler ParathreadQueue (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParaScheduler Scheduled (r:1 w:1) - /// Proof Skipped: ParaScheduler Scheduled (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParaScheduler ValidatorGroups (r:1 w:0) - /// Proof Skipped: ParaScheduler ValidatorGroups (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Paras CurrentCodeHash (r:1 w:0) - /// Proof Skipped: Paras CurrentCodeHash (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras ParaLifecycles (r:1 w:0) - /// Proof Skipped: Paras ParaLifecycles (max_values: None, max_size: None, mode: Measured) - /// Storage: MessageQueue BookStateFor (r:1 w:0) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(55), added: 2530, mode: MaxEncodedLen) - /// Storage: ParasDisputes Included (r:0 w:1) - /// Proof Skipped: ParasDisputes Included (max_values: None, max_size: None, mode: Measured) - /// Storage: Hrmp HrmpWatermarks (r:0 w:1) - /// Proof Skipped: Hrmp HrmpWatermarks (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras Heads (r:0 w:1) - /// Proof Skipped: Paras Heads (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras UpgradeGoAheadSignal (r:0 w:1) - /// Proof Skipped: Paras UpgradeGoAheadSignal (max_values: None, max_size: None, mode: Measured) + /// Storage: `ParaInherent::Included` (r:1 w:1) + /// Proof: `ParaInherent::Included` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `System::ParentHash` (r:1 w:0) + /// Proof: `System::ParentHash` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `ParasShared::AllowedRelayParents` (r:1 w:1) + /// Proof: `ParasShared::AllowedRelayParents` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::AvailabilityCores` (r:1 w:1) + /// Proof: `ParaScheduler::AvailabilityCores` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::ActiveValidatorKeys` (r:1 w:0) + /// Proof: `ParasShared::ActiveValidatorKeys` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Babe::AuthorVrfRandomness` (r:1 w:0) + /// Proof: `Babe::AuthorVrfRandomness` (`max_values`: Some(1), `max_size`: Some(33), added: 528, mode: `MaxEncodedLen`) + /// Storage: `ParaInherent::OnChainVotes` (r:1 w:1) + /// Proof: `ParaInherent::OnChainVotes` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasDisputes::Frozen` (r:1 w:0) + /// Proof: `ParasDisputes::Frozen` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaInclusion::V1` (r:2 w:1) + /// Proof: `ParaInclusion::V1` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParaSessionInfo::AccountKeys` (r:1 w:0) + /// Proof: `ParaSessionInfo::AccountKeys` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Session::Validators` (r:1 w:0) + /// Proof: `Session::Validators` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Staking::ActiveEra` (r:1 w:0) + /// Proof: `Staking::ActiveEra` (`max_values`: Some(1), `max_size`: Some(13), added: 508, mode: `MaxEncodedLen`) + /// Storage: `Staking::ErasRewardPoints` (r:1 w:1) + /// Proof: `Staking::ErasRewardPoints` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:1) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpChannelDigests` (r:1 w:1) + /// Proof: `Hrmp::HrmpChannelDigests` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::FutureCodeUpgrades` (r:1 w:0) + /// Proof: `Paras::FutureCodeUpgrades` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParasDisputes::Disputes` (r:1 w:0) + /// Proof: `ParasDisputes::Disputes` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::SessionStartBlock` (r:1 w:0) + /// Proof: `ParaScheduler::SessionStartBlock` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::ValidatorGroups` (r:1 w:0) + /// Proof: `ParaScheduler::ValidatorGroups` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::ClaimQueue` (r:1 w:1) + /// Proof: `ParaScheduler::ClaimQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `CoretimeAssignmentProvider::CoreDescriptors` (r:1 w:1) + /// Proof: `CoretimeAssignmentProvider::CoreDescriptors` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::CurrentCodeHash` (r:1 w:0) + /// Proof: `Paras::CurrentCodeHash` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::ParaLifecycles` (r:1 w:0) + /// Proof: `Paras::ParaLifecycles` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:0) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(55), added: 2530, mode: `MaxEncodedLen`) + /// Storage: `ParasShared::ActiveValidatorIndices` (r:1 w:0) + /// Proof: `ParasShared::ActiveValidatorIndices` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Session::DisabledValidators` (r:1 w:0) + /// Proof: `Session::DisabledValidators` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasDisputes::Included` (r:0 w:1) + /// Proof: `ParasDisputes::Included` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpWatermarks` (r:0 w:1) + /// Proof: `Hrmp::HrmpWatermarks` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::Heads` (r:0 w:1) + /// Proof: `Paras::Heads` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::UpgradeGoAheadSignal` (r:0 w:1) + /// Proof: `Paras::UpgradeGoAheadSignal` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::MostRecentContext` (r:0 w:1) + /// Proof: `Paras::MostRecentContext` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `v` is `[101, 200]`. fn enter_backed_candidates_variable(v: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `42387` - // Estimated: `48327` - // Minimum execution time: 6_864_029_000 picoseconds. - Weight::from_parts(1_237_704_892, 0) - .saturating_add(Weight::from_parts(0, 48327)) - // Standard Error: 33_413 - .saturating_add(Weight::from_parts(56_199_819, 0).saturating_mul(v.into())) - .saturating_add(T::DbWeight::get().reads(28)) - .saturating_add(T::DbWeight::get().writes(15)) + // Measured: `43269` + // Estimated: `49209` + // Minimum execution time: 5_955_361_000 picoseconds. + Weight::from_parts(1_285_398_956, 0) + .saturating_add(Weight::from_parts(0, 49209)) + // Standard Error: 57_369 + .saturating_add(Weight::from_parts(47_073_853, 0).saturating_mul(v.into())) + .saturating_add(T::DbWeight::get().reads(29)) + .saturating_add(T::DbWeight::get().writes(16)) } - /// Storage: ParaInherent Included (r:1 w:1) - /// Proof Skipped: ParaInherent Included (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: System ParentHash (r:1 w:0) - /// Proof: System ParentHash (max_values: Some(1), max_size: Some(32), added: 527, mode: MaxEncodedLen) - /// Storage: ParasShared CurrentSessionIndex (r:1 w:0) - /// Proof Skipped: ParasShared CurrentSessionIndex (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParaScheduler AvailabilityCores (r:1 w:1) - /// Proof Skipped: ParaScheduler AvailabilityCores (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParasShared ActiveValidatorKeys (r:1 w:0) - /// Proof Skipped: ParasShared ActiveValidatorKeys (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Babe AuthorVrfRandomness (r:1 w:0) - /// Proof: Babe AuthorVrfRandomness (max_values: Some(1), max_size: Some(33), added: 528, mode: MaxEncodedLen) - /// Storage: ParaInherent OnChainVotes (r:1 w:1) - /// Proof Skipped: ParaInherent OnChainVotes (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParasDisputes Frozen (r:1 w:0) - /// Proof Skipped: ParasDisputes Frozen (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParaInclusion PendingAvailability (r:2 w:1) - /// Proof Skipped: ParaInclusion PendingAvailability (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras Parachains (r:1 w:0) - /// Proof Skipped: Paras Parachains (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParaInclusion PendingAvailabilityCommitments (r:1 w:1) - /// Proof Skipped: ParaInclusion PendingAvailabilityCommitments (max_values: None, max_size: None, mode: Measured) - /// Storage: ParaSessionInfo AccountKeys (r:1 w:0) - /// Proof Skipped: ParaSessionInfo AccountKeys (max_values: None, max_size: None, mode: Measured) - /// Storage: Session Validators (r:1 w:0) - /// Proof Skipped: Session Validators (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Staking ActiveEra (r:1 w:0) - /// Proof: Staking ActiveEra (max_values: Some(1), max_size: Some(13), added: 508, mode: MaxEncodedLen) - /// Storage: Staking ErasRewardPoints (r:1 w:1) - /// Proof Skipped: Staking ErasRewardPoints (max_values: None, max_size: None, mode: Measured) - /// Storage: Dmp DownwardMessageQueues (r:1 w:1) - /// Proof Skipped: Dmp DownwardMessageQueues (max_values: None, max_size: None, mode: Measured) - /// Storage: Dmp DeliveryFeeFactor (r:1 w:1) - /// Proof Skipped: Dmp DeliveryFeeFactor (max_values: None, max_size: None, mode: Measured) - /// Storage: Hrmp HrmpChannelDigests (r:1 w:1) - /// Proof Skipped: Hrmp HrmpChannelDigests (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras FutureCodeUpgrades (r:1 w:0) - /// Proof Skipped: Paras FutureCodeUpgrades (max_values: None, max_size: None, mode: Measured) - /// Storage: ParasDisputes Disputes (r:1 w:0) - /// Proof Skipped: ParasDisputes Disputes (max_values: None, max_size: None, mode: Measured) - /// Storage: ParaScheduler SessionStartBlock (r:1 w:0) - /// Proof Skipped: ParaScheduler SessionStartBlock (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParaScheduler ParathreadQueue (r:1 w:1) - /// Proof Skipped: ParaScheduler ParathreadQueue (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParaScheduler Scheduled (r:1 w:1) - /// Proof Skipped: ParaScheduler Scheduled (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParaScheduler ValidatorGroups (r:1 w:0) - /// Proof Skipped: ParaScheduler ValidatorGroups (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Paras CurrentCodeHash (r:1 w:0) - /// Proof Skipped: Paras CurrentCodeHash (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras FutureCodeHash (r:1 w:0) - /// Proof Skipped: Paras FutureCodeHash (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras UpgradeRestrictionSignal (r:1 w:0) - /// Proof Skipped: Paras UpgradeRestrictionSignal (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras ParaLifecycles (r:1 w:0) - /// Proof Skipped: Paras ParaLifecycles (max_values: None, max_size: None, mode: Measured) - /// Storage: MessageQueue BookStateFor (r:1 w:0) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(55), added: 2530, mode: MaxEncodedLen) - /// Storage: ParasDisputes Included (r:0 w:1) - /// Proof Skipped: ParasDisputes Included (max_values: None, max_size: None, mode: Measured) - /// Storage: Hrmp HrmpWatermarks (r:0 w:1) - /// Proof Skipped: Hrmp HrmpWatermarks (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras Heads (r:0 w:1) - /// Proof Skipped: Paras Heads (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras UpgradeGoAheadSignal (r:0 w:1) - /// Proof Skipped: Paras UpgradeGoAheadSignal (max_values: None, max_size: None, mode: Measured) + /// Storage: `ParaInherent::Included` (r:1 w:1) + /// Proof: `ParaInherent::Included` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `System::ParentHash` (r:1 w:0) + /// Proof: `System::ParentHash` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `ParasShared::AllowedRelayParents` (r:1 w:1) + /// Proof: `ParasShared::AllowedRelayParents` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::AvailabilityCores` (r:1 w:1) + /// Proof: `ParaScheduler::AvailabilityCores` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::ActiveValidatorKeys` (r:1 w:0) + /// Proof: `ParasShared::ActiveValidatorKeys` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Babe::AuthorVrfRandomness` (r:1 w:0) + /// Proof: `Babe::AuthorVrfRandomness` (`max_values`: Some(1), `max_size`: Some(33), added: 528, mode: `MaxEncodedLen`) + /// Storage: `ParaInherent::OnChainVotes` (r:1 w:1) + /// Proof: `ParaInherent::OnChainVotes` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasDisputes::Frozen` (r:1 w:0) + /// Proof: `ParasDisputes::Frozen` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaInclusion::V1` (r:2 w:1) + /// Proof: `ParaInclusion::V1` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParaSessionInfo::AccountKeys` (r:1 w:0) + /// Proof: `ParaSessionInfo::AccountKeys` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Session::Validators` (r:1 w:0) + /// Proof: `Session::Validators` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Staking::ActiveEra` (r:1 w:0) + /// Proof: `Staking::ActiveEra` (`max_values`: Some(1), `max_size`: Some(13), added: 508, mode: `MaxEncodedLen`) + /// Storage: `Staking::ErasRewardPoints` (r:1 w:1) + /// Proof: `Staking::ErasRewardPoints` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:1) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpChannelDigests` (r:1 w:1) + /// Proof: `Hrmp::HrmpChannelDigests` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::FutureCodeUpgrades` (r:1 w:0) + /// Proof: `Paras::FutureCodeUpgrades` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParasDisputes::Disputes` (r:1 w:0) + /// Proof: `ParasDisputes::Disputes` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::SessionStartBlock` (r:1 w:0) + /// Proof: `ParaScheduler::SessionStartBlock` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::ValidatorGroups` (r:1 w:0) + /// Proof: `ParaScheduler::ValidatorGroups` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::ClaimQueue` (r:1 w:1) + /// Proof: `ParaScheduler::ClaimQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `CoretimeAssignmentProvider::CoreDescriptors` (r:1 w:1) + /// Proof: `CoretimeAssignmentProvider::CoreDescriptors` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::CurrentCodeHash` (r:1 w:0) + /// Proof: `Paras::CurrentCodeHash` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::FutureCodeHash` (r:1 w:0) + /// Proof: `Paras::FutureCodeHash` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::UpgradeRestrictionSignal` (r:1 w:0) + /// Proof: `Paras::UpgradeRestrictionSignal` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::ParaLifecycles` (r:1 w:0) + /// Proof: `Paras::ParaLifecycles` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:0) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(55), added: 2530, mode: `MaxEncodedLen`) + /// Storage: `ParasShared::ActiveValidatorIndices` (r:1 w:0) + /// Proof: `ParasShared::ActiveValidatorIndices` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Session::DisabledValidators` (r:1 w:0) + /// Proof: `Session::DisabledValidators` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasDisputes::Included` (r:0 w:1) + /// Proof: `ParasDisputes::Included` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpWatermarks` (r:0 w:1) + /// Proof: `Hrmp::HrmpWatermarks` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::Heads` (r:0 w:1) + /// Proof: `Paras::Heads` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::UpgradeGoAheadSignal` (r:0 w:1) + /// Proof: `Paras::UpgradeGoAheadSignal` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::MostRecentContext` (r:0 w:1) + /// Proof: `Paras::MostRecentContext` (`max_values`: None, `max_size`: None, mode: `Measured`) fn enter_backed_candidate_code_upgrade() -> Weight { // Proof Size summary in bytes: - // Measured: `42414` - // Estimated: `48354` - // Minimum execution time: 43_320_529_000 picoseconds. - Weight::from_parts(45_622_613_000, 0) - .saturating_add(Weight::from_parts(0, 48354)) - .saturating_add(T::DbWeight::get().reads(30)) - .saturating_add(T::DbWeight::get().writes(15)) + // Measured: `43282` + // Estimated: `49222` + // Minimum execution time: 42_128_606_000 picoseconds. + Weight::from_parts(42_822_806_000, 0) + .saturating_add(Weight::from_parts(0, 49222)) + .saturating_add(T::DbWeight::get().reads(31)) + .saturating_add(T::DbWeight::get().writes(16)) } } From f4402174d9c2d49cc8994783c78e69d17391403a Mon Sep 17 00:00:00 2001 From: command-bot <> Date: Mon, 18 Mar 2024 13:58:45 +0000 Subject: [PATCH 38/44] ".git/.scripts/commands/bench/bench.sh" --subcommand=pallet --runtime=rococo --target_dir=polkadot --pallet=runtime_parachains::paras_inherent --- .../runtime_parachains_paras_inherent.rs | 421 ++++++++++++------ 1 file changed, 291 insertions(+), 130 deletions(-) diff --git a/polkadot/runtime/rococo/src/weights/runtime_parachains_paras_inherent.rs b/polkadot/runtime/rococo/src/weights/runtime_parachains_paras_inherent.rs index a102d1903b2f..c250c86665be 100644 --- a/polkadot/runtime/rococo/src/weights/runtime_parachains_paras_inherent.rs +++ b/polkadot/runtime/rococo/src/weights/runtime_parachains_paras_inherent.rs @@ -13,161 +13,322 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . + //! Autogenerated weights for `runtime_parachains::paras_inherent` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2021-11-20, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("rococo-dev"), DB CACHE: 128 +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-03-18, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-h2rr8wx7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: -// target/release/polkadot +// target/production/polkadot // benchmark -// --chain=rococo-dev +// pallet // --steps=50 // --repeat=20 -// --pallet=runtime_parachains::paras_inherent // --extrinsic=* -// --execution=wasm // --wasm-execution=compiled // --heap-pages=4096 -// --output=./runtime/rococo/src/weights/runtime_parachains_paras_inherent.rs -// --header=./file_header.txt +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=runtime_parachains::paras_inherent +// --chain=rococo-dev +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] #![allow(unused_imports)] +#![allow(missing_docs)] use frame_support::{traits::Get, weights::Weight}; -use sp_std::marker::PhantomData; +use core::marker::PhantomData; /// Weight functions for `runtime_parachains::paras_inherent`. pub struct WeightInfo(PhantomData); impl runtime_parachains::paras_inherent::WeightInfo for WeightInfo { - // Storage: ParaInherent Included (r:1 w:1) - // Storage: System ParentHash (r:1 w:0) - // Storage: ParaScheduler AvailabilityCores (r:1 w:1) - // Storage: ParasShared CurrentSessionIndex (r:1 w:0) - // Storage: Configuration ActiveConfig (r:1 w:0) - // Storage: ParaSessionInfo Sessions (r:1 w:0) - // Storage: ParasDisputes Disputes (r:1 w:1) - // Storage: ParasDisputes Included (r:1 w:1) - // Storage: ParasDisputes SpamSlots (r:1 w:1) - // Storage: ParasDisputes Frozen (r:1 w:0) - // Storage: ParaInclusion PendingAvailability (r:2 w:1) - // Storage: ParasShared ActiveValidatorKeys (r:1 w:0) - // Storage: Paras Parachains (r:1 w:0) - // Storage: ParaInclusion PendingAvailabilityCommitments (r:1 w:1) - // Storage: Dmp DownwardMessageQueues (r:1 w:1) - // Storage: Hrmp HrmpChannelDigests (r:1 w:1) - // Storage: Paras FutureCodeUpgrades (r:1 w:0) - // Storage: ParaScheduler SessionStartBlock (r:1 w:0) - // Storage: ParaScheduler ParathreadQueue (r:1 w:1) - // Storage: ParaScheduler Scheduled (r:1 w:1) - // Storage: ParaScheduler ValidatorGroups (r:1 w:0) - // Storage: Ump NeedsDispatch (r:1 w:1) - // Storage: Ump NextDispatchRoundStartWith (r:1 w:1) - // Storage: ParaInherent OnChainVotes (r:0 w:1) - // Storage: Hrmp HrmpWatermarks (r:0 w:1) - // Storage: Paras Heads (r:0 w:1) + /// Storage: `ParaInherent::Included` (r:1 w:1) + /// Proof: `ParaInherent::Included` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `System::ParentHash` (r:1 w:0) + /// Proof: `System::ParentHash` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `ParasShared::AllowedRelayParents` (r:1 w:1) + /// Proof: `ParasShared::AllowedRelayParents` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::AvailabilityCores` (r:1 w:1) + /// Proof: `ParaScheduler::AvailabilityCores` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::ActiveValidatorKeys` (r:1 w:0) + /// Proof: `ParasShared::ActiveValidatorKeys` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Babe::AuthorVrfRandomness` (r:1 w:0) + /// Proof: `Babe::AuthorVrfRandomness` (`max_values`: Some(1), `max_size`: Some(33), added: 528, mode: `MaxEncodedLen`) + /// Storage: `ParaSessionInfo::Sessions` (r:1 w:0) + /// Proof: `ParaSessionInfo::Sessions` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParasDisputes::Disputes` (r:1 w:1) + /// Proof: `ParasDisputes::Disputes` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParasDisputes::BackersOnDisputes` (r:1 w:1) + /// Proof: `ParasDisputes::BackersOnDisputes` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParasDisputes::Included` (r:1 w:1) + /// Proof: `ParasDisputes::Included` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParaInherent::OnChainVotes` (r:1 w:1) + /// Proof: `ParaInherent::OnChainVotes` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasDisputes::Frozen` (r:1 w:0) + /// Proof: `ParasDisputes::Frozen` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaInclusion::V1` (r:2 w:1) + /// Proof: `ParaInclusion::V1` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:1) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpChannelDigests` (r:1 w:1) + /// Proof: `Hrmp::HrmpChannelDigests` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::FutureCodeUpgrades` (r:1 w:0) + /// Proof: `Paras::FutureCodeUpgrades` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Registrar::Paras` (r:1 w:0) + /// Proof: `Registrar::Paras` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::SessionStartBlock` (r:1 w:0) + /// Proof: `ParaScheduler::SessionStartBlock` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::ValidatorGroups` (r:1 w:0) + /// Proof: `ParaScheduler::ValidatorGroups` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::ClaimQueue` (r:1 w:1) + /// Proof: `ParaScheduler::ClaimQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `CoretimeAssignmentProvider::CoreDescriptors` (r:1 w:1) + /// Proof: `CoretimeAssignmentProvider::CoreDescriptors` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::ActiveValidatorIndices` (r:1 w:0) + /// Proof: `ParasShared::ActiveValidatorIndices` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Session::DisabledValidators` (r:1 w:0) + /// Proof: `Session::DisabledValidators` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpWatermarks` (r:0 w:1) + /// Proof: `Hrmp::HrmpWatermarks` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::Heads` (r:0 w:1) + /// Proof: `Paras::Heads` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::UpgradeGoAheadSignal` (r:0 w:1) + /// Proof: `Paras::UpgradeGoAheadSignal` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::MostRecentContext` (r:0 w:1) + /// Proof: `Paras::MostRecentContext` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// The range of component `v` is `[10, 200]`. fn enter_variable_disputes(v: u32, ) -> Weight { - Weight::from_parts(352_590_000 as u64, 0) - // Standard Error: 13_000 - .saturating_add(Weight::from_parts(49_254_000 as u64, 0).saturating_mul(v as u64)) - .saturating_add(T::DbWeight::get().reads(24 as u64)) - .saturating_add(T::DbWeight::get().writes(16 as u64)) + // Proof Size summary in bytes: + // Measured: `67785` + // Estimated: `73725 + v * (23 ±0)` + // Minimum execution time: 949_716_000 picoseconds. + Weight::from_parts(482_361_515, 0) + .saturating_add(Weight::from_parts(0, 73725)) + // Standard Error: 17_471 + .saturating_add(Weight::from_parts(50_100_764, 0).saturating_mul(v.into())) + .saturating_add(T::DbWeight::get().reads(25)) + .saturating_add(T::DbWeight::get().writes(15)) + .saturating_add(Weight::from_parts(0, 23).saturating_mul(v.into())) } - // Storage: ParaInherent Included (r:1 w:1) - // Storage: System ParentHash (r:1 w:0) - // Storage: ParaScheduler AvailabilityCores (r:1 w:1) - // Storage: ParasShared CurrentSessionIndex (r:1 w:0) - // Storage: Configuration ActiveConfig (r:1 w:0) - // Storage: ParasDisputes Frozen (r:1 w:0) - // Storage: ParasShared ActiveValidatorKeys (r:1 w:0) - // Storage: Paras Parachains (r:1 w:0) - // Storage: ParaInclusion PendingAvailability (r:2 w:1) - // Storage: ParaInclusion PendingAvailabilityCommitments (r:1 w:1) - // Storage: Dmp DownwardMessageQueues (r:1 w:1) - // Storage: Hrmp HrmpChannelDigests (r:1 w:1) - // Storage: Paras FutureCodeUpgrades (r:1 w:0) - // Storage: ParasDisputes Disputes (r:1 w:0) - // Storage: ParaScheduler SessionStartBlock (r:1 w:0) - // Storage: ParaScheduler ParathreadQueue (r:1 w:1) - // Storage: ParaScheduler Scheduled (r:1 w:1) - // Storage: ParaScheduler ValidatorGroups (r:1 w:0) - // Storage: Ump NeedsDispatch (r:1 w:1) - // Storage: Ump NextDispatchRoundStartWith (r:1 w:1) - // Storage: ParaInclusion AvailabilityBitfields (r:0 w:1) - // Storage: ParaInherent OnChainVotes (r:0 w:1) - // Storage: ParasDisputes Included (r:0 w:1) - // Storage: Hrmp HrmpWatermarks (r:0 w:1) - // Storage: Paras Heads (r:0 w:1) + /// Storage: `ParaInherent::Included` (r:1 w:1) + /// Proof: `ParaInherent::Included` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `System::ParentHash` (r:1 w:0) + /// Proof: `System::ParentHash` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `ParasShared::AllowedRelayParents` (r:1 w:1) + /// Proof: `ParasShared::AllowedRelayParents` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::AvailabilityCores` (r:1 w:1) + /// Proof: `ParaScheduler::AvailabilityCores` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::ActiveValidatorKeys` (r:1 w:0) + /// Proof: `ParasShared::ActiveValidatorKeys` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Babe::AuthorVrfRandomness` (r:1 w:0) + /// Proof: `Babe::AuthorVrfRandomness` (`max_values`: Some(1), `max_size`: Some(33), added: 528, mode: `MaxEncodedLen`) + /// Storage: `ParaInherent::OnChainVotes` (r:1 w:1) + /// Proof: `ParaInherent::OnChainVotes` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasDisputes::Frozen` (r:1 w:0) + /// Proof: `ParasDisputes::Frozen` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaInclusion::V1` (r:2 w:1) + /// Proof: `ParaInclusion::V1` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:1) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpChannelDigests` (r:1 w:1) + /// Proof: `Hrmp::HrmpChannelDigests` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::FutureCodeUpgrades` (r:1 w:0) + /// Proof: `Paras::FutureCodeUpgrades` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Registrar::Paras` (r:1 w:0) + /// Proof: `Registrar::Paras` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParasDisputes::Disputes` (r:1 w:0) + /// Proof: `ParasDisputes::Disputes` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::SessionStartBlock` (r:1 w:0) + /// Proof: `ParaScheduler::SessionStartBlock` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::ValidatorGroups` (r:1 w:0) + /// Proof: `ParaScheduler::ValidatorGroups` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::ClaimQueue` (r:1 w:1) + /// Proof: `ParaScheduler::ClaimQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `CoretimeAssignmentProvider::CoreDescriptors` (r:1 w:1) + /// Proof: `CoretimeAssignmentProvider::CoreDescriptors` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::ActiveValidatorIndices` (r:1 w:0) + /// Proof: `ParasShared::ActiveValidatorIndices` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Session::DisabledValidators` (r:1 w:0) + /// Proof: `Session::DisabledValidators` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasDisputes::Included` (r:0 w:1) + /// Proof: `ParasDisputes::Included` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpWatermarks` (r:0 w:1) + /// Proof: `Hrmp::HrmpWatermarks` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::Heads` (r:0 w:1) + /// Proof: `Paras::Heads` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::UpgradeGoAheadSignal` (r:0 w:1) + /// Proof: `Paras::UpgradeGoAheadSignal` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::MostRecentContext` (r:0 w:1) + /// Proof: `Paras::MostRecentContext` (`max_values`: None, `max_size`: None, mode: `Measured`) fn enter_bitfields() -> Weight { - Weight::from_parts(299_878_000 as u64, 0) - .saturating_add(T::DbWeight::get().reads(21 as u64)) - .saturating_add(T::DbWeight::get().writes(15 as u64)) + // Proof Size summary in bytes: + // Measured: `42757` + // Estimated: `48697` + // Minimum execution time: 437_627_000 picoseconds. + Weight::from_parts(460_975_000, 0) + .saturating_add(Weight::from_parts(0, 48697)) + .saturating_add(T::DbWeight::get().reads(23)) + .saturating_add(T::DbWeight::get().writes(15)) } - // Storage: ParaInherent Included (r:1 w:1) - // Storage: System ParentHash (r:1 w:0) - // Storage: ParaScheduler AvailabilityCores (r:1 w:1) - // Storage: ParasShared CurrentSessionIndex (r:1 w:0) - // Storage: Configuration ActiveConfig (r:1 w:0) - // Storage: ParasDisputes Frozen (r:1 w:0) - // Storage: ParasShared ActiveValidatorKeys (r:1 w:0) - // Storage: Paras Parachains (r:1 w:0) - // Storage: ParaInclusion PendingAvailability (r:2 w:1) - // Storage: ParaInclusion PendingAvailabilityCommitments (r:1 w:1) - // Storage: Dmp DownwardMessageQueues (r:1 w:1) - // Storage: Hrmp HrmpChannelDigests (r:1 w:1) - // Storage: Paras FutureCodeUpgrades (r:1 w:0) - // Storage: ParasDisputes Disputes (r:2 w:0) - // Storage: ParaScheduler SessionStartBlock (r:1 w:0) - // Storage: ParaScheduler ParathreadQueue (r:1 w:1) - // Storage: ParaScheduler Scheduled (r:1 w:1) - // Storage: ParaScheduler ValidatorGroups (r:1 w:0) - // Storage: Paras PastCodeMeta (r:1 w:0) - // Storage: Paras CurrentCodeHash (r:1 w:0) - // Storage: Ump RelayDispatchQueueSize (r:1 w:0) - // Storage: Ump NeedsDispatch (r:1 w:1) - // Storage: Ump NextDispatchRoundStartWith (r:1 w:1) - // Storage: ParaInherent OnChainVotes (r:0 w:1) - // Storage: ParasDisputes Included (r:0 w:1) - // Storage: Hrmp HrmpWatermarks (r:0 w:1) - // Storage: Paras Heads (r:0 w:1) - fn enter_backed_candidates_variable(_v: u32) -> Weight { - Weight::from_parts(442_472_000 as u64, 0) - .saturating_add(T::DbWeight::get().reads(25 as u64)) - .saturating_add(T::DbWeight::get().writes(14 as u64)) + /// Storage: `ParaInherent::Included` (r:1 w:1) + /// Proof: `ParaInherent::Included` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `System::ParentHash` (r:1 w:0) + /// Proof: `System::ParentHash` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `ParasShared::AllowedRelayParents` (r:1 w:1) + /// Proof: `ParasShared::AllowedRelayParents` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::AvailabilityCores` (r:1 w:1) + /// Proof: `ParaScheduler::AvailabilityCores` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::ActiveValidatorKeys` (r:1 w:0) + /// Proof: `ParasShared::ActiveValidatorKeys` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Babe::AuthorVrfRandomness` (r:1 w:0) + /// Proof: `Babe::AuthorVrfRandomness` (`max_values`: Some(1), `max_size`: Some(33), added: 528, mode: `MaxEncodedLen`) + /// Storage: `ParaInherent::OnChainVotes` (r:1 w:1) + /// Proof: `ParaInherent::OnChainVotes` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasDisputes::Frozen` (r:1 w:0) + /// Proof: `ParasDisputes::Frozen` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaInclusion::V1` (r:2 w:1) + /// Proof: `ParaInclusion::V1` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:1) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpChannelDigests` (r:1 w:1) + /// Proof: `Hrmp::HrmpChannelDigests` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::FutureCodeUpgrades` (r:1 w:0) + /// Proof: `Paras::FutureCodeUpgrades` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Registrar::Paras` (r:1 w:0) + /// Proof: `Registrar::Paras` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParasDisputes::Disputes` (r:1 w:0) + /// Proof: `ParasDisputes::Disputes` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::SessionStartBlock` (r:1 w:0) + /// Proof: `ParaScheduler::SessionStartBlock` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::ValidatorGroups` (r:1 w:0) + /// Proof: `ParaScheduler::ValidatorGroups` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::ClaimQueue` (r:1 w:1) + /// Proof: `ParaScheduler::ClaimQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `CoretimeAssignmentProvider::CoreDescriptors` (r:1 w:1) + /// Proof: `CoretimeAssignmentProvider::CoreDescriptors` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::CurrentCodeHash` (r:1 w:0) + /// Proof: `Paras::CurrentCodeHash` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::ParaLifecycles` (r:1 w:0) + /// Proof: `Paras::ParaLifecycles` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:0) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(55), added: 2530, mode: `MaxEncodedLen`) + /// Storage: `ParasShared::ActiveValidatorIndices` (r:1 w:0) + /// Proof: `ParasShared::ActiveValidatorIndices` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Session::DisabledValidators` (r:1 w:0) + /// Proof: `Session::DisabledValidators` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasDisputes::Included` (r:0 w:1) + /// Proof: `ParasDisputes::Included` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpWatermarks` (r:0 w:1) + /// Proof: `Hrmp::HrmpWatermarks` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::Heads` (r:0 w:1) + /// Proof: `Paras::Heads` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::UpgradeGoAheadSignal` (r:0 w:1) + /// Proof: `Paras::UpgradeGoAheadSignal` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::MostRecentContext` (r:0 w:1) + /// Proof: `Paras::MostRecentContext` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// The range of component `v` is `[101, 200]`. + fn enter_backed_candidates_variable(v: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `42829` + // Estimated: `48769` + // Minimum execution time: 1_305_254_000 picoseconds. + Weight::from_parts(1_347_160_667, 0) + .saturating_add(Weight::from_parts(0, 48769)) + // Standard Error: 22_128 + .saturating_add(Weight::from_parts(57_229, 0).saturating_mul(v.into())) + .saturating_add(T::DbWeight::get().reads(26)) + .saturating_add(T::DbWeight::get().writes(15)) } - // Storage: ParaInherent Included (r:1 w:1) - // Storage: System ParentHash (r:1 w:0) - // Storage: ParaScheduler AvailabilityCores (r:1 w:1) - // Storage: ParasShared CurrentSessionIndex (r:1 w:0) - // Storage: Configuration ActiveConfig (r:1 w:0) - // Storage: ParasDisputes Frozen (r:1 w:0) - // Storage: ParasShared ActiveValidatorKeys (r:1 w:0) - // Storage: Paras Parachains (r:1 w:0) - // Storage: ParaInclusion PendingAvailability (r:2 w:1) - // Storage: ParaInclusion PendingAvailabilityCommitments (r:1 w:1) - // Storage: Dmp DownwardMessageQueues (r:1 w:1) - // Storage: Hrmp HrmpChannelDigests (r:1 w:1) - // Storage: Paras FutureCodeUpgrades (r:1 w:0) - // Storage: ParasDisputes Disputes (r:2 w:0) - // Storage: ParaScheduler SessionStartBlock (r:1 w:0) - // Storage: ParaScheduler ParathreadQueue (r:1 w:1) - // Storage: ParaScheduler Scheduled (r:1 w:1) - // Storage: ParaScheduler ValidatorGroups (r:1 w:0) - // Storage: Paras PastCodeMeta (r:1 w:0) - // Storage: Paras CurrentCodeHash (r:1 w:0) - // Storage: Ump RelayDispatchQueueSize (r:1 w:0) - // Storage: Ump NeedsDispatch (r:1 w:1) - // Storage: Ump NextDispatchRoundStartWith (r:1 w:1) - // Storage: ParaInherent OnChainVotes (r:0 w:1) - // Storage: ParasDisputes Included (r:0 w:1) - // Storage: Hrmp HrmpWatermarks (r:0 w:1) - // Storage: Paras Heads (r:0 w:1) + /// Storage: `ParaInherent::Included` (r:1 w:1) + /// Proof: `ParaInherent::Included` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `System::ParentHash` (r:1 w:0) + /// Proof: `System::ParentHash` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `ParasShared::AllowedRelayParents` (r:1 w:1) + /// Proof: `ParasShared::AllowedRelayParents` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::AvailabilityCores` (r:1 w:1) + /// Proof: `ParaScheduler::AvailabilityCores` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::ActiveValidatorKeys` (r:1 w:0) + /// Proof: `ParasShared::ActiveValidatorKeys` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Babe::AuthorVrfRandomness` (r:1 w:0) + /// Proof: `Babe::AuthorVrfRandomness` (`max_values`: Some(1), `max_size`: Some(33), added: 528, mode: `MaxEncodedLen`) + /// Storage: `ParaInherent::OnChainVotes` (r:1 w:1) + /// Proof: `ParaInherent::OnChainVotes` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasDisputes::Frozen` (r:1 w:0) + /// Proof: `ParasDisputes::Frozen` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaInclusion::V1` (r:2 w:1) + /// Proof: `ParaInclusion::V1` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:1) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpChannelDigests` (r:1 w:1) + /// Proof: `Hrmp::HrmpChannelDigests` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::FutureCodeUpgrades` (r:1 w:0) + /// Proof: `Paras::FutureCodeUpgrades` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Registrar::Paras` (r:1 w:0) + /// Proof: `Registrar::Paras` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParasDisputes::Disputes` (r:1 w:0) + /// Proof: `ParasDisputes::Disputes` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::SessionStartBlock` (r:1 w:0) + /// Proof: `ParaScheduler::SessionStartBlock` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::ValidatorGroups` (r:1 w:0) + /// Proof: `ParaScheduler::ValidatorGroups` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::ClaimQueue` (r:1 w:1) + /// Proof: `ParaScheduler::ClaimQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `CoretimeAssignmentProvider::CoreDescriptors` (r:1 w:1) + /// Proof: `CoretimeAssignmentProvider::CoreDescriptors` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::CurrentCodeHash` (r:1 w:0) + /// Proof: `Paras::CurrentCodeHash` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::FutureCodeHash` (r:1 w:0) + /// Proof: `Paras::FutureCodeHash` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::UpgradeRestrictionSignal` (r:1 w:0) + /// Proof: `Paras::UpgradeRestrictionSignal` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::ParaLifecycles` (r:1 w:0) + /// Proof: `Paras::ParaLifecycles` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:0) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(55), added: 2530, mode: `MaxEncodedLen`) + /// Storage: `ParasShared::ActiveValidatorIndices` (r:1 w:0) + /// Proof: `ParasShared::ActiveValidatorIndices` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Session::DisabledValidators` (r:1 w:0) + /// Proof: `Session::DisabledValidators` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasDisputes::Included` (r:0 w:1) + /// Proof: `ParasDisputes::Included` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpWatermarks` (r:0 w:1) + /// Proof: `Hrmp::HrmpWatermarks` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::Heads` (r:0 w:1) + /// Proof: `Paras::Heads` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::UpgradeGoAheadSignal` (r:0 w:1) + /// Proof: `Paras::UpgradeGoAheadSignal` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::MostRecentContext` (r:0 w:1) + /// Proof: `Paras::MostRecentContext` (`max_values`: None, `max_size`: None, mode: `Measured`) fn enter_backed_candidate_code_upgrade() -> Weight { - Weight::from_parts(36_903_411_000 as u64, 0) - .saturating_add(T::DbWeight::get().reads(25 as u64)) - .saturating_add(T::DbWeight::get().writes(14 as u64)) + // Proof Size summary in bytes: + // Measured: `42842` + // Estimated: `48782` + // Minimum execution time: 38_637_547_000 picoseconds. + Weight::from_parts(41_447_412_000, 0) + .saturating_add(Weight::from_parts(0, 48782)) + .saturating_add(T::DbWeight::get().reads(28)) + .saturating_add(T::DbWeight::get().writes(15)) } } From a745a635d5e823b9e69a65c7df1b3fd615ca18c4 Mon Sep 17 00:00:00 2001 From: alindima Date: Tue, 19 Mar 2024 09:38:03 +0200 Subject: [PATCH 39/44] fix runtime API panic --- .../parachains/src/inclusion/migration.rs | 2 +- .../runtime/parachains/src/inclusion/mod.rs | 15 +++++- .../parachains/src/runtime_api_impl/v7.rs | 48 ++++++++++++++----- 3 files changed, 51 insertions(+), 14 deletions(-) diff --git a/polkadot/runtime/parachains/src/inclusion/migration.rs b/polkadot/runtime/parachains/src/inclusion/migration.rs index b9e65bc33edb..1e63b209f4e7 100644 --- a/polkadot/runtime/parachains/src/inclusion/migration.rs +++ b/polkadot/runtime/parachains/src/inclusion/migration.rs @@ -13,7 +13,7 @@ pub use v1::MigrateToV1; -mod v0 { +pub mod v0 { use crate::inclusion::{Config, Pallet}; use bitvec::{order::Lsb0 as BitOrderLsb0, vec::BitVec}; use frame_support::{storage_alias, Twox64Concat}; diff --git a/polkadot/runtime/parachains/src/inclusion/mod.rs b/polkadot/runtime/parachains/src/inclusion/mod.rs index 381727265423..68d7afdd5803 100644 --- a/polkadot/runtime/parachains/src/inclusion/mod.rs +++ b/polkadot/runtime/parachains/src/inclusion/mod.rs @@ -117,7 +117,10 @@ impl CandidatePendingAvailability { } /// Get the relay-chain block number this was backed in. - pub(crate) fn backed_in_number(&self) -> &N { + pub(crate) fn backed_in_number(&self) -> &N + where + N: Clone, + { &self.backed_in_number } @@ -149,6 +152,16 @@ impl CandidatePendingAvailability { self.relay_parent_number.clone() } + /// Get the candidate backing group. + pub(crate) fn backing_group(&self) -> GroupIndex { + self.backing_group + } + + /// Get the candidate's backers. + pub(crate) fn backers(&self) -> &BitVec { + &self.backers + } + #[cfg(any(feature = "runtime-benchmarks", test))] pub(crate) fn new( core: CoreIndex, diff --git a/polkadot/runtime/parachains/src/runtime_api_impl/v7.rs b/polkadot/runtime/parachains/src/runtime_api_impl/v7.rs index e20516abda0b..bfebc207ce71 100644 --- a/polkadot/runtime/parachains/src/runtime_api_impl/v7.rs +++ b/polkadot/runtime/parachains/src/runtime_api_impl/v7.rs @@ -22,6 +22,7 @@ use crate::{ scheduler::{self, CoreOccupied}, session_info, shared, }; +use frame_support::traits::{GetStorageVersion, StorageVersion}; use frame_system::pallet_prelude::*; use primitives::{ async_backing::{ @@ -92,18 +93,41 @@ pub fn availability_cores() -> Vec { - let pending_availability = >::pending_availability_with_core( - entry.para_id(), - CoreIndex(i as u32), - ) - .expect("Occupied core always has pending availability; qed"); - - let backed_in_number = *pending_availability.backed_in_number(); + // Due to https://github.com/paritytech/polkadot-sdk/issues/64, using the new storage types would cause + // this runtime API to panic. We explicitly handle the storage for version 0 to + // prevent that. When removing the inclusion v0 -> v1 migration, this bit of code + // can also be removed. + let pending_availability = if >::on_chain_storage_version() == + StorageVersion::new(0) + { + inclusion::migration::v0::PendingAvailability::::get(entry.para_id()) + .expect("Occupied core always has pending availability; qed") + } else { + let candidate = >::pending_availability_with_core( + entry.para_id(), + CoreIndex(i as u32), + ) + .expect("Occupied core always has pending availability; qed"); + + // Translate to the old candidate format, as we don't need the commitments now. + inclusion::migration::v0::CandidatePendingAvailability { + core: candidate.core_occupied(), + hash: candidate.candidate_hash(), + descriptor: candidate.candidate_descriptor().clone(), + availability_votes: candidate.availability_votes().clone(), + backers: candidate.backers().clone(), + relay_parent_number: candidate.relay_parent_number(), + backed_in_number: candidate.backed_in_number().clone(), + backing_group: candidate.backing_group(), + } + }; + + let backed_in_number = pending_availability.backed_in_number; // Use the same block number for determining the responsible group as what the // backing subsystem would use when it calls validator_groups api. let backing_group_allocation_time = - pending_availability.relay_parent_number() + One::one(); + pending_availability.relay_parent_number + One::one(); CoreState::Occupied(OccupiedCore { next_up_on_available: >::next_up_on_available(CoreIndex( i as u32, @@ -113,13 +137,13 @@ pub fn availability_cores() -> Vec>::next_up_on_time_out(CoreIndex( i as u32, )), - availability: pending_availability.availability_votes().clone(), + availability: pending_availability.availability_votes.clone(), group_responsible: group_responsible_for( backing_group_allocation_time, - pending_availability.core_occupied(), + pending_availability.core, ), - candidate_hash: pending_availability.candidate_hash(), - candidate_descriptor: pending_availability.candidate_descriptor().clone(), + candidate_hash: pending_availability.hash, + candidate_descriptor: pending_availability.descriptor, }) }, CoreOccupied::Free => { From c3060bdd436f2a49657d9307df1dd013dae51143 Mon Sep 17 00:00:00 2001 From: alindima Date: Tue, 19 Mar 2024 10:23:33 +0200 Subject: [PATCH 40/44] fix clippy --- polkadot/runtime/parachains/src/inclusion/mod.rs | 4 ++-- polkadot/runtime/parachains/src/runtime_api_impl/v7.rs | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/polkadot/runtime/parachains/src/inclusion/mod.rs b/polkadot/runtime/parachains/src/inclusion/mod.rs index 68d7afdd5803..e77f8d15b40d 100644 --- a/polkadot/runtime/parachains/src/inclusion/mod.rs +++ b/polkadot/runtime/parachains/src/inclusion/mod.rs @@ -117,11 +117,11 @@ impl CandidatePendingAvailability { } /// Get the relay-chain block number this was backed in. - pub(crate) fn backed_in_number(&self) -> &N + pub(crate) fn backed_in_number(&self) -> N where N: Clone, { - &self.backed_in_number + self.backed_in_number.clone() } /// Get the core index. diff --git a/polkadot/runtime/parachains/src/runtime_api_impl/v7.rs b/polkadot/runtime/parachains/src/runtime_api_impl/v7.rs index bfebc207ce71..171f3f746a82 100644 --- a/polkadot/runtime/parachains/src/runtime_api_impl/v7.rs +++ b/polkadot/runtime/parachains/src/runtime_api_impl/v7.rs @@ -117,7 +117,7 @@ pub fn availability_cores() -> Vec Date: Wed, 20 Mar 2024 13:04:10 +0200 Subject: [PATCH 41/44] map_candidates_to_cores: check core index for single core if ElasticScalingMVP is enabled --- .../runtime/parachains/src/paras_inherent/mod.rs | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/polkadot/runtime/parachains/src/paras_inherent/mod.rs b/polkadot/runtime/parachains/src/paras_inherent/mod.rs index 37cbc73e8ce4..88d44155d26a 100644 --- a/polkadot/runtime/parachains/src/paras_inherent/mod.rs +++ b/polkadot/runtime/parachains/src/paras_inherent/mod.rs @@ -1349,8 +1349,16 @@ fn map_candidates_to_cores 1 && core_index_enabled { + } else if scheduled_cores.len() >= 1 && core_index_enabled { // We must preserve the dependency order given in the input. let mut temp_backed_candidates = Vec::with_capacity(scheduled_cores.len()); @@ -1374,7 +1382,7 @@ fn map_candidates_to_cores Date: Wed, 20 Mar 2024 13:05:02 +0200 Subject: [PATCH 42/44] add a couple more test cases --- polkadot/runtime/parachains/src/builder.rs | 65 ++++-- .../parachains/src/paras_inherent/tests.rs | 208 ++++++++++++++++-- 2 files changed, 235 insertions(+), 38 deletions(-) diff --git a/polkadot/runtime/parachains/src/builder.rs b/polkadot/runtime/parachains/src/builder.rs index e41f241b387e..045d3270122b 100644 --- a/polkadot/runtime/parachains/src/builder.rs +++ b/polkadot/runtime/parachains/src/builder.rs @@ -40,7 +40,7 @@ use sp_runtime::{ RuntimeAppPublic, }; use sp_std::{ - collections::{btree_map::BTreeMap, vec_deque::VecDeque}, + collections::{btree_map::BTreeMap, btree_set::BTreeSet, vec_deque::VecDeque}, prelude::Vec, vec, }; @@ -104,6 +104,8 @@ pub(crate) struct BenchBuilder { code_upgrade: Option, /// Specifies whether the claimqueue should be filled. fill_claimqueue: bool, + /// Cores which should not be available when being populated with pending candidates. + unavailable_cores: Vec, _phantom: sp_std::marker::PhantomData, } @@ -133,6 +135,7 @@ impl BenchBuilder { elastic_paras: Default::default(), code_upgrade: None, fill_claimqueue: true, + unavailable_cores: vec![], _phantom: sp_std::marker::PhantomData::, } } @@ -149,6 +152,12 @@ impl BenchBuilder { self } + /// Set the cores which should not be available when being populated with pending candidates. + pub(crate) fn set_unavailable_cores(mut self, unavailable_cores: Vec) -> Self { + self.unavailable_cores = unavailable_cores; + self + } + /// Set a map from para id seed to number of validity votes. pub(crate) fn set_backed_and_concluding_paras( mut self, @@ -337,10 +346,10 @@ impl BenchBuilder { /// Create an `AvailabilityBitfield` where `concluding` is a map where each key is a core index /// that is concluding and `cores` is the total number of cores in the system. - fn availability_bitvec(concluding: &BTreeMap, cores: usize) -> AvailabilityBitfield { + fn availability_bitvec(concluding_cores: &BTreeSet, cores: usize) -> AvailabilityBitfield { let mut bitfields = bitvec::bitvec![u8, bitvec::order::Lsb0; 0; 0]; for i in 0..cores { - if concluding.get(&(i as u32)).is_some() { + if concluding_cores.contains(&(i as u32)) { bitfields.push(true); } else { bitfields.push(false) @@ -480,24 +489,8 @@ impl BenchBuilder { let validators = self.validators.as_ref().expect("must have some validators prior to calling"); - let availability_bitvec = Self::availability_bitvec(concluding_paras, total_cores); - - let bitfields: Vec> = validators - .iter() - .enumerate() - .map(|(i, public)| { - let unchecked_signed = UncheckedSigned::::benchmark_sign( - public, - availability_bitvec.clone(), - &self.signing_context(), - ValidatorIndex(i as u32), - ); - - unchecked_signed - }) - .collect(); - let mut current_core_idx = 0u32; + let mut concluding_cores = BTreeSet::new(); for (seed, _) in concluding_paras.iter() { // make sure the candidates that will be concluding are marked as pending availability. @@ -513,13 +506,34 @@ impl BenchBuilder { para_id, core_idx, group_idx, - Self::validator_availability_votes_yes(validators.len()), + // No validators have made this candidate available yet. + bitvec::bitvec![u8, bitvec::order::Lsb0; 0; validators.len()], CandidateHash(H256::from(byte32_slice_from(current_core_idx))), ); + if !self.unavailable_cores.contains(¤t_core_idx) { + concluding_cores.insert(current_core_idx); + } current_core_idx += 1; } } + let availability_bitvec = Self::availability_bitvec(&concluding_cores, total_cores); + + let bitfields: Vec> = validators + .iter() + .enumerate() + .map(|(i, public)| { + let unchecked_signed = UncheckedSigned::::benchmark_sign( + public, + availability_bitvec.clone(), + &self.signing_context(), + ValidatorIndex(i as u32), + ); + + unchecked_signed + }) + .collect(); + bitfields } @@ -839,7 +853,7 @@ impl BenchBuilder { .keys() .flat_map(|para_id| { (0..elastic_paras.get(¶_id).cloned().unwrap_or(1)) - .map(|_para_local_core_idx| { + .filter_map(|_para_local_core_idx| { let ttl = configuration::Pallet::::config().scheduler_params.ttl; // Load an assignment into provider so that one is present to pop let assignment = @@ -852,8 +866,13 @@ impl BenchBuilder { CoreIndex(core_idx), [ParasEntry::new(assignment, now + ttl)].into(), ); + let res = if builder.unavailable_cores.contains(&core_idx) { + None + } else { + Some(entry) + }; core_idx += 1; - entry + res }) .collect::>)>>() }) diff --git a/polkadot/runtime/parachains/src/paras_inherent/tests.rs b/polkadot/runtime/parachains/src/paras_inherent/tests.rs index 60c49ce92d81..6ab115fc1927 100644 --- a/polkadot/runtime/parachains/src/paras_inherent/tests.rs +++ b/polkadot/runtime/parachains/src/paras_inherent/tests.rs @@ -43,7 +43,6 @@ fn default_config() -> MockGenesisConfig { // weights for limiting data will fail, so we don't run them when using the benchmark feature. #[cfg(not(feature = "runtime-benchmarks"))] mod enter { - use super::{inclusion::tests::TestCandidateBuilder, *}; use crate::{ builder::{Bench, BenchBuilder}, @@ -54,6 +53,7 @@ mod enter { }, }; use assert_matches::assert_matches; + use core::panic; use frame_support::assert_ok; use frame_system::limits; use primitives::vstaging::SchedulerParams; @@ -68,6 +68,7 @@ mod enter { code_upgrade: Option, fill_claimqueue: bool, elastic_paras: BTreeMap, + unavailable_cores: Vec, } fn make_inherent_data( @@ -79,6 +80,7 @@ mod enter { code_upgrade, fill_claimqueue, elastic_paras, + unavailable_cores, }: TestConfig, ) -> Bench { let extra_cores = elastic_paras @@ -86,18 +88,17 @@ mod enter { .map(|count| *count as usize) .sum::() .saturating_sub(elastic_paras.len() as usize); + let total_cores = dispute_sessions.len() + backed_and_concluding.len() + extra_cores; let builder = BenchBuilder::::new() - .set_max_validators( - (dispute_sessions.len() + backed_and_concluding.len() + extra_cores) as u32 * - num_validators_per_core, - ) + .set_max_validators((total_cores) as u32 * num_validators_per_core) .set_elastic_paras(elastic_paras.clone()) .set_max_validators_per_core(num_validators_per_core) .set_dispute_statements(dispute_statements) - .set_backed_and_concluding_paras(backed_and_concluding) + .set_backed_and_concluding_paras(backed_and_concluding.clone()) .set_dispute_sessions(&dispute_sessions[..]) - .set_fill_claimqueue(fill_claimqueue); + .set_fill_claimqueue(fill_claimqueue) + .set_unavailable_cores(unavailable_cores); // Setup some assignments as needed: mock_assigner::Pallet::::set_core_count(builder.max_cores()); @@ -142,6 +143,7 @@ mod enter { code_upgrade: None, fill_claimqueue: false, elastic_paras: BTreeMap::new(), + unavailable_cores: vec![], }); // We expect the scenario to have cores 0 & 1 with pending availability. The backed @@ -204,15 +206,14 @@ mod enter { } #[test] - // ParaId 1 has one pending candidate on core 0. - // ParaId 2 has one pending candidate on core 1. - // ParaId 3 has three pending candidates on cores 2, 3 and 4. - // All of them are being made available in this block. Propose 5 more candidates (one for each - // core) and check that they're successfully backed and the old ones enacted. fn include_backed_candidates_elastic_scaling() { + // ParaId 0 has one pending candidate on core 0. + // ParaId 1 has one pending candidate on core 1. + // ParaId 2 has three pending candidates on cores 2, 3 and 4. + // All of them are being made available in this block. Propose 5 more candidates (one for + // each core) and check that they're successfully backed and the old ones enacted. let config = default_config(); assert!(config.configuration.config.scheduler_params.lookahead > 0); - new_test_ext(config).execute_with(|| { // Set the elastic scaling MVP feature. >::set_node_feature( @@ -237,6 +238,7 @@ mod enter { code_upgrade: None, fill_claimqueue: false, elastic_paras: [(2, 3)].into_iter().collect(), + unavailable_cores: vec![], }); let expected_para_inherent_data = scenario.data.clone(); @@ -256,6 +258,8 @@ mod enter { // The current schedule is empty prior to calling `create_inherent_enter`. assert!(>::claimqueue_is_empty()); + assert!(Pallet::::on_chain_votes().is_none()); + // Nothing is filtered out (including the backed candidates.) assert_eq!( Pallet::::create_inherent_inner(&inherent_data.clone()).unwrap(), @@ -300,6 +304,117 @@ mod enter { vec![CoreIndex(2), CoreIndex(3), CoreIndex(4)] ); }); + + // ParaId 0 has one pending candidate on core 0. + // ParaId 1 has one pending candidate on core 1. + // ParaId 2 has 4 pending candidates on cores 2, 3, 4 and 5. + // Cores 1, 2 and 3 are being made available in this block. Propose 6 more candidates (one + // for each core) and check that the right ones are successfully backed and the old ones + // enacted. + let config = default_config(); + assert!(config.configuration.config.scheduler_params.lookahead > 0); + new_test_ext(config).execute_with(|| { + // Set the elastic scaling MVP feature. + >::set_node_feature( + RuntimeOrigin::root(), + FeatureIndex::ElasticScalingMVP as u8, + true, + ) + .unwrap(); + + let dispute_statements = BTreeMap::new(); + + let mut backed_and_concluding = BTreeMap::new(); + backed_and_concluding.insert(0, 1); + backed_and_concluding.insert(1, 1); + backed_and_concluding.insert(2, 1); + + // Modify the availability bitfields so that cores 0, 4 and 5 are not being made + // available. + let unavailable_cores = vec![0, 4, 5]; + + let scenario = make_inherent_data(TestConfig { + dispute_statements, + dispute_sessions: vec![], // No disputes + backed_and_concluding, + num_validators_per_core: 1, + code_upgrade: None, + fill_claimqueue: true, + elastic_paras: [(2, 4)].into_iter().collect(), + unavailable_cores: unavailable_cores.clone(), + }); + + let mut expected_para_inherent_data = scenario.data.clone(); + + // Check the para inherent data is as expected: + // * 1 bitfield per validator (6 validators) + assert_eq!(expected_para_inherent_data.bitfields.len(), 6); + // * 1 backed candidate per core (6 cores) + assert_eq!(expected_para_inherent_data.backed_candidates.len(), 6); + // * 0 disputes. + assert_eq!(expected_para_inherent_data.disputes.len(), 0); + assert!(Pallet::::on_chain_votes().is_none()); + + expected_para_inherent_data.backed_candidates = expected_para_inherent_data + .backed_candidates + .into_iter() + .filter(|candidate| { + let (_, Some(core_index)) = candidate.validator_indices_and_core_index(true) + else { + panic!("Core index must have been injected"); + }; + !unavailable_cores.contains(&core_index.0) + }) + .collect(); + + let mut inherent_data = InherentData::new(); + inherent_data.put_data(PARACHAINS_INHERENT_IDENTIFIER, &scenario.data).unwrap(); + + assert!(!>::claimqueue_is_empty()); + + // The right candidates have been filtered out (the ones for cores 0,4,5) + assert_eq!( + Pallet::::create_inherent_inner(&inherent_data.clone()).unwrap(), + expected_para_inherent_data + ); + + // 3 candidates have been enacted (for cores 1,2 and 3) + assert_eq!( + Pallet::::on_chain_votes().unwrap().backing_validators_per_candidate.len(), + 3 + ); + + assert_eq!( + // The session of the on chain votes should equal the current session, which is 2 + Pallet::::on_chain_votes().unwrap().session, + 2 + ); + + assert_eq!( + inclusion::PendingAvailability::::get(ParaId::from(0)) + .unwrap() + .into_iter() + .map(|c| c.core_occupied()) + .collect::>(), + vec![CoreIndex(0)] + ); + assert_eq!( + inclusion::PendingAvailability::::get(ParaId::from(1)) + .unwrap() + .into_iter() + .map(|c| c.core_occupied()) + .collect::>(), + vec![CoreIndex(1)] + ); + assert_eq!( + inclusion::PendingAvailability::::get(ParaId::from(2)) + .unwrap() + .into_iter() + .map(|c| c.core_occupied()) + .collect::>(), + vec![CoreIndex(4), CoreIndex(5), CoreIndex(2), CoreIndex(3)] + ); + }); } #[test] @@ -410,6 +525,7 @@ mod enter { code_upgrade: None, fill_claimqueue: false, elastic_paras: BTreeMap::new(), + unavailable_cores: vec![], }); let expected_para_inherent_data = scenario.data.clone(); @@ -482,6 +598,7 @@ mod enter { code_upgrade: None, fill_claimqueue: false, elastic_paras: BTreeMap::new(), + unavailable_cores: vec![], }); let expected_para_inherent_data = scenario.data.clone(); @@ -552,6 +669,7 @@ mod enter { code_upgrade: None, fill_claimqueue: false, elastic_paras: BTreeMap::new(), + unavailable_cores: vec![], }); let expected_para_inherent_data = scenario.data.clone(); @@ -638,6 +756,7 @@ mod enter { code_upgrade: None, fill_claimqueue: false, elastic_paras: BTreeMap::new(), + unavailable_cores: vec![], }); let expected_para_inherent_data = scenario.data.clone(); @@ -724,6 +843,7 @@ mod enter { code_upgrade: None, fill_claimqueue: false, elastic_paras: BTreeMap::new(), + unavailable_cores: vec![], }); let expected_para_inherent_data = scenario.data.clone(); @@ -809,6 +929,7 @@ mod enter { code_upgrade: None, fill_claimqueue: false, elastic_paras: BTreeMap::new(), + unavailable_cores: vec![], }); let expected_para_inherent_data = scenario.data.clone(); @@ -915,6 +1036,7 @@ mod enter { code_upgrade: None, fill_claimqueue: false, elastic_paras: BTreeMap::new(), + unavailable_cores: vec![], }); let expected_para_inherent_data = scenario.data.clone(); @@ -982,6 +1104,7 @@ mod enter { code_upgrade: None, fill_claimqueue: false, elastic_paras: BTreeMap::new(), + unavailable_cores: vec![], }); let expected_para_inherent_data = scenario.data.clone(); @@ -1047,6 +1170,7 @@ mod enter { code_upgrade: None, fill_claimqueue: false, elastic_paras: BTreeMap::new(), + unavailable_cores: vec![], }); let expected_para_inherent_data = scenario.data.clone(); @@ -1149,6 +1273,7 @@ mod enter { code_upgrade: None, fill_claimqueue: false, elastic_paras: BTreeMap::new(), + unavailable_cores: vec![], }); let mut para_inherent_data = scenario.data.clone(); @@ -1237,6 +1362,7 @@ mod enter { code_upgrade: None, fill_claimqueue: false, elastic_paras: BTreeMap::new(), + unavailable_cores: vec![], }); let expected_para_inherent_data = scenario.data.clone(); @@ -1743,6 +1869,7 @@ mod sanitizers { // Para 5 scheduled on core 6. No candidates supplied. // Para 6 is not scheduled. One candidate supplied. // Para 7 is scheduled on core 7 and 8, but the candidate contains the wrong core index. + // Para 8 is scheduled on core 9, but the candidate contains the wrong core index. fn get_test_data_multiple_cores_per_para(core_index_enabled: bool) -> TestData { const RELAY_PARENT_NUM: u32 = 3; @@ -1771,6 +1898,7 @@ mod sanitizers { keyring::Sr25519Keyring::Eve, keyring::Sr25519Keyring::Ferdie, keyring::Sr25519Keyring::One, + keyring::Sr25519Keyring::Two, ]; for validator in validators.iter() { Keystore::sr25519_generate_new( @@ -1795,6 +1923,7 @@ mod sanitizers { vec![ValidatorIndex(4)], vec![ValidatorIndex(5)], vec![ValidatorIndex(6)], + vec![ValidatorIndex(7)], ]); // Update scheduler's claimqueue with the parachains @@ -1862,10 +1991,17 @@ mod sanitizers { RELAY_PARENT_NUM, )]), ), + ( + CoreIndex::from(9), + VecDeque::from([ParasEntry::new( + Assignment::Pool { para_id: 8.into(), core_index: CoreIndex(9) }, + RELAY_PARENT_NUM, + )]), + ), ])); // Set the on-chain included head data and current code hash. - for id in 1..=7u32 { + for id in 1..=8u32 { paras::Pallet::::set_current_head(ParaId::from(id), HeadData(vec![id as u8])); paras::Pallet::::force_set_current_code( RuntimeOrigin::root(), @@ -1885,6 +2021,7 @@ mod sanitizers { group_index if group_index == GroupIndex::from(4) => Some(vec![4]), group_index if group_index == GroupIndex::from(5) => Some(vec![5]), group_index if group_index == GroupIndex::from(6) => Some(vec![6]), + group_index if group_index == GroupIndex::from(7) => Some(vec![7]), _ => panic!("Group index out of bounds"), } @@ -2077,7 +2214,7 @@ mod sanitizers { &keystore, &signing_context, BackingKind::Threshold, - None, + core_index_enabled.then_some(CoreIndex(5 as u32)), ); backed_candidates.push(backed.clone()); expected_backed_candidates_with_core @@ -2185,6 +2322,45 @@ mod sanitizers { backed_candidates.push(backed.clone()); } + // Para 8. + { + let mut candidate = TestCandidateBuilder { + para_id: ParaId::from(8), + relay_parent, + pov_hash: Hash::repeat_byte(3 as u8), + persisted_validation_data_hash: make_persisted_validation_data::( + ParaId::from(8), + RELAY_PARENT_NUM, + Default::default(), + ) + .unwrap() + .hash(), + hrmp_watermark: RELAY_PARENT_NUM, + validation_code: ValidationCode(vec![8]), + ..Default::default() + } + .build(); + + collator_sign_candidate(Sr25519Keyring::One, &mut candidate); + + let backed = back_candidate( + candidate, + &validators, + group_validators(GroupIndex::from(6 as u32)).unwrap().as_ref(), + &keystore, + &signing_context, + BackingKind::Threshold, + core_index_enabled.then_some(CoreIndex(7 as u32)), + ); + backed_candidates.push(backed.clone()); + if !core_index_enabled { + expected_backed_candidates_with_core + .entry(ParaId::from(8)) + .or_insert(vec![]) + .push((backed, CoreIndex(9))); + } + } + // State sanity checks assert_eq!( >::scheduled_paras().collect::>(), @@ -2198,6 +2374,7 @@ mod sanitizers { (CoreIndex(6), ParaId::from(5)), (CoreIndex(7), ParaId::from(7)), (CoreIndex(8), ParaId::from(7)), + (CoreIndex(9), ParaId::from(8)), ] ); let mut scheduled: BTreeMap> = BTreeMap::new(); @@ -2215,6 +2392,7 @@ mod sanitizers { ValidatorIndex(4), ValidatorIndex(5), ValidatorIndex(6), + ValidatorIndex(7), ] ); From e05693c6dd1060d6f8bc9a52c24d9ac5f3193b52 Mon Sep 17 00:00:00 2001 From: alindima Date: Wed, 20 Mar 2024 13:44:50 +0200 Subject: [PATCH 43/44] review comment --- .../parachains/src/paras_inherent/mod.rs | 65 +++++++------------ 1 file changed, 24 insertions(+), 41 deletions(-) diff --git a/polkadot/runtime/parachains/src/paras_inherent/mod.rs b/polkadot/runtime/parachains/src/paras_inherent/mod.rs index 88d44155d26a..02ddfd0accab 100644 --- a/polkadot/runtime/parachains/src/paras_inherent/mod.rs +++ b/polkadot/runtime/parachains/src/paras_inherent/mod.rs @@ -228,38 +228,6 @@ pub mod pallet { } } - /// Collect all freed cores based on storage data. (i.e. append cores freed from timeouts to - /// the given `freed_concluded`). - /// - /// The parameter `freed_concluded` contains all core indicies that became - /// free due to candidates that became available or due to candidates being disputed. - pub(crate) fn collect_all_freed_cores( - freed_concluded: I, - ) -> BTreeMap - where - I: core::iter::IntoIterator, - T: Config, - { - // Handle timeouts for any availability core work. - let freed_timeout = if >::availability_timeout_check_required() { - >::free_timedout() - } else { - Vec::new() - }; - - if !freed_timeout.is_empty() { - log::debug!(target: LOG_TARGET, "Evicted timed out cores: {:?}", freed_timeout); - } - - // We'll schedule paras again, given freed cores, and reasons for freeing. - let freed = freed_concluded - .into_iter() - .map(|(c, _hash)| (c, FreedReason::Concluded)) - .chain(freed_timeout.into_iter().map(|c| (c, FreedReason::TimedOut))) - .collect::>(); - freed - } - #[pallet::call] impl Pallet { /// Enter the paras inherent. This will process bitfields and backed candidates. @@ -538,15 +506,17 @@ impl Pallet { .map(|(_session, candidate)| candidate) .collect::>(); - let freed_disputed = - >::free_disputed(¤t_concluded_invalid_disputes); + // Get the cores freed as a result of concluded invalid candidates. + let (freed_disputed, concluded_invalid_hashes): (Vec, BTreeSet) = + >::free_disputed(¤t_concluded_invalid_disputes) + .into_iter() + .unzip(); // Create a bit index from the set of core indices where each index corresponds to // a core index that was freed due to a dispute. // // I.e. 010100 would indicate, the candidates on Core 1 and 3 would be disputed. - let disputed_bitfield = - create_disputed_bitfield(expected_bits, freed_disputed.iter().map(|(core, _)| core)); + let disputed_bitfield = create_disputed_bitfield(expected_bits, freed_disputed.iter()); let bitfields = sanitize_bitfields::( bitfields, @@ -560,7 +530,7 @@ impl Pallet { // Process new availability bitfields, yielding any availability cores whose // work has now concluded. - let mut freed_concluded = + let freed_concluded = >::update_pending_availability_and_get_freed_cores( &validator_public[..], bitfields.clone(), @@ -573,11 +543,24 @@ impl Pallet { METRICS.on_candidates_included(freed_concluded.len() as u64); - // Add the disputed candidates to the concluded collection. - freed_concluded.extend(freed_disputed.iter()); + // Get the timed out candidates + let freed_timeout = if >::availability_timeout_check_required() { + >::free_timedout() + } else { + Vec::new() + }; - let freed = collect_all_freed_cores::(freed_concluded); + if !freed_timeout.is_empty() { + log::debug!(target: LOG_TARGET, "Evicted timed out cores: {:?}", freed_timeout); + } + // We'll schedule paras again, given freed cores, and reasons for freeing. + let freed = freed_concluded + .into_iter() + .map(|(c, _hash)| (c, FreedReason::Concluded)) + .chain(freed_disputed.into_iter().map(|core| (core, FreedReason::Concluded))) + .chain(freed_timeout.into_iter().map(|c| (c, FreedReason::TimedOut))) + .collect::>(); >::free_cores_and_fill_claimqueue(freed, now); METRICS.on_candidates_processed_total(backed_candidates.len() as u64); @@ -600,7 +583,7 @@ impl Pallet { let backed_candidates_with_core = sanitize_backed_candidates::( backed_candidates, &allowed_relay_parents, - freed_disputed.into_iter().map(|(_, hash)| hash).collect(), + concluded_invalid_hashes, scheduled, core_index_enabled, ); From 91f705cc3b1ac0bc583e20ff2dfcd0f190198499 Mon Sep 17 00:00:00 2001 From: alindima Date: Thu, 21 Mar 2024 10:40:47 +0200 Subject: [PATCH 44/44] improve test --- .../parachains/src/paras_inherent/tests.rs | 87 +++++++++++++++++-- 1 file changed, 79 insertions(+), 8 deletions(-) diff --git a/polkadot/runtime/parachains/src/paras_inherent/tests.rs b/polkadot/runtime/parachains/src/paras_inherent/tests.rs index 6ab115fc1927..c5e65622c76e 100644 --- a/polkadot/runtime/parachains/src/paras_inherent/tests.rs +++ b/polkadot/runtime/parachains/src/paras_inherent/tests.rs @@ -51,12 +51,13 @@ mod enter { common::{Assignment, AssignmentProvider}, ParasEntry, }, + session_info, }; use assert_matches::assert_matches; use core::panic; use frame_support::assert_ok; use frame_system::limits; - use primitives::vstaging::SchedulerParams; + use primitives::{vstaging::SchedulerParams, AvailabilityBitfield, UncheckedSigned}; use sp_runtime::Perbill; use sp_std::collections::btree_map::BTreeMap; @@ -322,8 +323,6 @@ mod enter { ) .unwrap(); - let dispute_statements = BTreeMap::new(); - let mut backed_and_concluding = BTreeMap::new(); backed_and_concluding.insert(0, 1); backed_and_concluding.insert(1, 1); @@ -334,7 +333,7 @@ mod enter { let unavailable_cores = vec![0, 4, 5]; let scenario = make_inherent_data(TestConfig { - dispute_statements, + dispute_statements: BTreeMap::new(), dispute_sessions: vec![], // No disputes backed_and_concluding, num_validators_per_core: 1, @@ -378,7 +377,7 @@ mod enter { expected_para_inherent_data ); - // 3 candidates have been enacted (for cores 1,2 and 3) + // 3 candidates have been backed (for cores 1,2 and 3) assert_eq!( Pallet::::on_chain_votes().unwrap().backing_validators_per_candidate.len(), 3 @@ -390,13 +389,77 @@ mod enter { 2 ); + assert_eq!( + inclusion::PendingAvailability::::get(ParaId::from(1)) + .unwrap() + .into_iter() + .map(|c| c.core_occupied()) + .collect::>(), + vec![CoreIndex(1)] + ); + assert_eq!( + inclusion::PendingAvailability::::get(ParaId::from(2)) + .unwrap() + .into_iter() + .map(|c| c.core_occupied()) + .collect::>(), + vec![CoreIndex(4), CoreIndex(5), CoreIndex(2), CoreIndex(3)] + ); + + let expected_heads = (0..=2) + .map(|id| { + inclusion::PendingAvailability::::get(ParaId::from(id)) + .unwrap() + .back() + .unwrap() + .candidate_commitments() + .head_data + .clone() + }) + .collect::>(); + + // Now just make all candidates available. + let mut data = scenario.data.clone(); + let validators = session_info::Pallet::::session_info(2).unwrap().validators; + let signing_context = SigningContext { + parent_hash: BenchBuilder::::header(4).hash(), + session_index: 2, + }; + + data.backed_candidates.clear(); + + data.bitfields.iter_mut().enumerate().for_each(|(i, bitfield)| { + let unchecked_signed = UncheckedSigned::::benchmark_sign( + validators.get(ValidatorIndex(i as u32)).unwrap(), + bitvec::bitvec![u8, bitvec::order::Lsb0; 1; 6].into(), + &signing_context, + ValidatorIndex(i as u32), + ); + *bitfield = unchecked_signed; + }); + let mut inherent_data = InherentData::new(); + inherent_data.put_data(PARACHAINS_INHERENT_IDENTIFIER, &data).unwrap(); + + // Nothing has been filtered out. + assert_eq!( + Pallet::::create_inherent_inner(&inherent_data.clone()).unwrap(), + data + ); + + // No more candidates have been backed + assert!(Pallet::::on_chain_votes() + .unwrap() + .backing_validators_per_candidate + .is_empty()); + + // No more pending availability candidates assert_eq!( inclusion::PendingAvailability::::get(ParaId::from(0)) .unwrap() .into_iter() .map(|c| c.core_occupied()) .collect::>(), - vec![CoreIndex(0)] + vec![] ); assert_eq!( inclusion::PendingAvailability::::get(ParaId::from(1)) @@ -404,7 +467,7 @@ mod enter { .into_iter() .map(|c| c.core_occupied()) .collect::>(), - vec![CoreIndex(1)] + vec![] ); assert_eq!( inclusion::PendingAvailability::::get(ParaId::from(2)) @@ -412,8 +475,16 @@ mod enter { .into_iter() .map(|c| c.core_occupied()) .collect::>(), - vec![CoreIndex(4), CoreIndex(5), CoreIndex(2), CoreIndex(3)] + vec![] ); + + // Paras have the right on-chain heads now + expected_heads.into_iter().enumerate().for_each(|(id, head)| { + assert_eq!( + paras::Pallet::::para_head(ParaId::from(id as u32)).unwrap(), + head + ); + }); }); }