From cab6ab9c06995ab90da3b77bd276143052be5574 Mon Sep 17 00:00:00 2001 From: Andrei Sandu Date: Thu, 1 Feb 2024 22:29:58 +0200 Subject: [PATCH 01/51] Switch statement table from ParaId to CoreIndex Signed-off-by: Andrei Sandu --- polkadot/node/core/backing/src/lib.rs | 141 ++++++++++++++++-------- polkadot/primitives/Cargo.toml | 2 + polkadot/primitives/src/v6/mod.rs | 33 +++++- polkadot/statement-table/src/generic.rs | 13 +-- polkadot/statement-table/src/lib.rs | 6 +- 5 files changed, 136 insertions(+), 59 deletions(-) diff --git a/polkadot/node/core/backing/src/lib.rs b/polkadot/node/core/backing/src/lib.rs index 98bbd6232add..01ca089d8c46 100644 --- a/polkadot/node/core/backing/src/lib.rs +++ b/polkadot/node/core/backing/src/lib.rs @@ -105,9 +105,9 @@ use polkadot_node_subsystem_util::{ }; use polkadot_primitives::{ BackedCandidate, CandidateCommitments, CandidateHash, CandidateReceipt, - CommittedCandidateReceipt, CoreIndex, CoreState, ExecutorParams, Hash, Id as ParaId, - PersistedValidationData, PvfExecKind, SigningContext, ValidationCode, ValidatorId, - ValidatorIndex, ValidatorSignature, ValidityAttestation, + CommittedCandidateReceipt, CoreIndex, CoreState, ExecutorParams, GroupIndex, Hash, + Id as ParaId, PersistedValidationData, PvfExecKind, SigningContext, ValidationCode, + ValidatorId, ValidatorIndex, ValidatorSignature, ValidityAttestation, }; use sp_keystore::KeystorePtr; use statement_table::{ @@ -208,8 +208,10 @@ struct PerRelayParentState { prospective_parachains_mode: ProspectiveParachainsMode, /// The hash of the relay parent on top of which this job is doing it's work. parent: Hash, - /// The `ParaId` assigned to the local validator at this relay parent. - assignment: Option, + /// The `CoreIndex` assigned to the local validator at this relay parent. + assigned_para: Option, + /// The `CoreIndex` assigned to the local validator at this relay parent. + assigned_core: Option, /// The candidates that are backed by enough validators in their group, by hash. backed: HashSet, /// The table of candidates and statements under this relay-parent. @@ -382,7 +384,7 @@ struct AttestingData { #[derive(Default)] struct TableContext { validator: Option, - groups: HashMap>, + groups: HashMap>, validators: Vec, disabled_validators: Vec, } @@ -404,7 +406,7 @@ impl TableContext { impl TableContextTrait for TableContext { type AuthorityId = ValidatorIndex; type Digest = CandidateHash; - type GroupId = ParaId; + type GroupId = CoreIndex; type Signature = ValidatorSignature; type Candidate = CommittedCandidateReceipt; @@ -412,15 +414,11 @@ impl TableContextTrait for TableContext { candidate.hash() } - fn candidate_group(candidate: &CommittedCandidateReceipt) -> ParaId { - candidate.descriptor().para_id + fn is_member_of(&self, authority: &ValidatorIndex, core: &CoreIndex) -> bool { + self.groups.get(core).map_or(false, |g| g.iter().any(|a| a == authority)) } - fn is_member_of(&self, authority: &ValidatorIndex, group: &ParaId) -> bool { - self.groups.get(group).map_or(false, |g| g.iter().any(|a| a == authority)) - } - - fn get_group_size(&self, group: &ParaId) -> Option { + fn get_group_size(&self, group: &CoreIndex) -> Option { self.groups.get(group).map(|g| g.len()) } } @@ -442,20 +440,22 @@ fn primitive_statement_to_table(s: &SignedFullStatementWithPVD) -> TableSignedSt fn table_attested_to_backed( attested: TableAttestedCandidate< - ParaId, + CoreIndex, CommittedCandidateReceipt, ValidatorIndex, ValidatorSignature, >, table_context: &TableContext, ) -> Option { - let TableAttestedCandidate { candidate, validity_votes, group_id: para_id } = attested; + let TableAttestedCandidate { candidate, validity_votes, group_id: core_index } = attested; let (ids, validity_votes): (Vec<_>, Vec) = validity_votes.into_iter().map(|(id, vote)| (id, vote.into())).unzip(); - let group = table_context.groups.get(¶_id)?; + let group = table_context.groups.get(&core_index)?; + // TODO: This si a temporary fix and will not work if a para is assigned to + // different sized backing groups. We need core index in the candidate descriptor let mut validator_indices = BitVec::with_capacity(group.len()); validator_indices.resize(group.len(), false); @@ -981,6 +981,56 @@ async fn handle_active_leaves_update( Ok(()) } +macro_rules! try_runtime_api { + ($x: expr) => { + match $x { + Ok(x) => x, + Err(err) => { + // Only bubble up fatal errors. + error::log_error(Err(Into::::into(err).into()))?; + + // We can't do candidate validation work if we don't have the + // requisite runtime API data. But these errors should not take + // down the node. + return Ok(None) + }, + } + }; +} + +#[overseer::contextbounds(CandidateBacking, prefix = self::overseer)] +async fn core_index_from_statement( + ctx: &mut Context, + relay_parent: Hash, + statement: &SignedFullStatementWithPVD, +) -> Result, Error> { + let parent = relay_parent; + + let (groups, cores) = futures::try_join!( + request_validator_groups(parent, ctx.sender()).await, + request_from_runtime(parent, ctx.sender(), |tx| { + RuntimeApiRequest::AvailabilityCores(tx) + },) + .await, + ) + .map_err(Error::JoinMultiple)?; + let (validator_groups, group_rotation_info) = try_runtime_api!(groups); + let cores = try_runtime_api!(cores); + + let statement_validator_index = statement.validator_index(); + for (group_index, group) in validator_groups.iter().enumerate() { + for validator_index in group { + if *validator_index == statement_validator_index { + return Ok(Some( + group_rotation_info.core_for_group(GroupIndex(group_index as u32), cores.len()), + )) + } + } + } + + Ok(None) +} + /// Load the data necessary to do backing work on top of a relay-parent. #[overseer::contextbounds(CandidateBacking, prefix = self::overseer)] async fn construct_per_relay_parent_state( @@ -989,23 +1039,6 @@ async fn construct_per_relay_parent_state( keystore: &KeystorePtr, mode: ProspectiveParachainsMode, ) -> Result, Error> { - macro_rules! try_runtime_api { - ($x: expr) => { - match $x { - Ok(x) => x, - Err(err) => { - // Only bubble up fatal errors. - error::log_error(Err(Into::::into(err).into()))?; - - // We can't do candidate validation work if we don't have the - // requisite runtime API data. But these errors should not take - // down the node. - return Ok(None) - }, - } - }; - } - let parent = relay_parent; let (session_index, validators, groups, cores) = futures::try_join!( @@ -1055,9 +1088,11 @@ async fn construct_per_relay_parent_state( }, }; - let mut groups = HashMap::new(); let n_cores = cores.len(); - let mut assignment = None; + + let mut groups = HashMap::>::new(); + let mut assigned_core = None; + let mut assigned_para = None; for (idx, core) in cores.into_iter().enumerate() { let core_para_id = match core { @@ -1077,11 +1112,13 @@ async fn construct_per_relay_parent_state( let group_index = group_rotation_info.group_for_core(core_index, n_cores); if let Some(g) = validator_groups.get(group_index.0 as usize) { if validator.as_ref().map_or(false, |v| g.contains(&v.index())) { - assignment = Some(core_para_id); + assigned_para = Some(core_para_id); + assigned_core = Some(core_index); } - groups.insert(core_para_id, g.clone()); + groups.insert(core_index, g.clone()); } } + gum::debug!(target: LOG_TARGET, ?groups, "TableContext" ); let table_context = TableContext { validator, groups, validators, disabled_validators }; let table_config = TableConfig { @@ -1094,7 +1131,8 @@ async fn construct_per_relay_parent_state( Ok(Some(PerRelayParentState { prospective_parachains_mode: mode, parent, - assignment, + assigned_core, + assigned_para, backed: HashSet::new(), table: Table::new(table_config), table_context, @@ -1519,15 +1557,16 @@ async fn import_statement( per_candidate: &mut HashMap, statement: &SignedFullStatementWithPVD, ) -> Result, Error> { + let candidate_hash = statement.payload().candidate_hash(); + gum::debug!( target: LOG_TARGET, statement = ?statement.payload().to_compact(), validator_index = statement.validator_index().0, + ?candidate_hash, "Importing statement", ); - let candidate_hash = statement.payload().candidate_hash(); - // If this is a new candidate (statement is 'seconded' and candidate is unknown), // we need to create an entry in the `PerCandidateState` map. // @@ -1593,7 +1632,11 @@ async fn import_statement( let stmt = primitive_statement_to_table(statement); - Ok(rp_state.table.import_statement(&rp_state.table_context, stmt)) + let core = core_index_from_statement(ctx, rp_state.parent, statement) + .await + .unwrap() + .unwrap(); + Ok(rp_state.table.import_statement(&rp_state.table_context, core, stmt)) } /// Handles a summary received from [`import_statement`] and dispatches `Backed` notifications and @@ -1654,8 +1697,14 @@ async fn post_import_statement_actions( ); ctx.send_unbounded_message(message); } + } else { + gum::debug!(target: LOG_TARGET, ?candidate_hash, "Cannot get BackedCandidate"); } + } else { + gum::debug!(target: LOG_TARGET, ?candidate_hash, "Candidate already known"); } + } else { + gum::debug!(target: LOG_TARGET, "No attested candidate"); } issue_new_misbehaviors(ctx, rp_state.parent, &mut rp_state.table); @@ -1859,9 +1908,10 @@ async fn maybe_validate_and_import( let candidate_hash = summary.candidate; - if Some(summary.group_id) != rp_state.assignment { + if Some(summary.group_id) != rp_state.assigned_core { return Ok(()) } + let attesting = match statement.payload() { StatementWithPVD::Seconded(receipt, _) => { let attesting = AttestingData { @@ -2004,10 +2054,11 @@ async fn handle_second_message( } // Sanity check that candidate is from our assignment. - if Some(candidate.descriptor().para_id) != rp_state.assignment { + if Some(candidate.descriptor().para_id) != rp_state.assigned_para { gum::debug!( target: LOG_TARGET, - our_assignment = ?rp_state.assignment, + our_assignment_core = ?rp_state.assigned_core, + our_assignment_para = ?rp_state.assigned_para, collation = ?candidate.descriptor().para_id, "Subsystem asked to second for para outside of our assignment", ); diff --git a/polkadot/primitives/Cargo.toml b/polkadot/primitives/Cargo.toml index c2fdf331568d..27827fcd7d78 100644 --- a/polkadot/primitives/Cargo.toml +++ b/polkadot/primitives/Cargo.toml @@ -15,6 +15,7 @@ hex-literal = "0.4.1" parity-scale-codec = { version = "3.6.1", default-features = false, features = ["bit-vec", "derive"] } scale-info = { version = "2.10.0", default-features = false, features = ["bit-vec", "derive", "serde"] } serde = { version = "1.0.195", default-features = false, features = ["alloc", "derive"] } +log = { version = "0.4.17", default-features = false } application-crypto = { package = "sp-application-crypto", path = "../../substrate/primitives/application-crypto", default-features = false, features = ["serde"] } inherents = { package = "sp-inherents", path = "../../substrate/primitives/inherents", default-features = false } @@ -44,6 +45,7 @@ std = [ "primitives/std", "runtime_primitives/std", "scale-info/std", + "log/std", "serde/std", "sp-api/std", "sp-arithmetic/std", diff --git a/polkadot/primitives/src/v6/mod.rs b/polkadot/primitives/src/v6/mod.rs index fd0b32db7994..c9c78499cbb5 100644 --- a/polkadot/primitives/src/v6/mod.rs +++ b/polkadot/primitives/src/v6/mod.rs @@ -72,6 +72,7 @@ pub use metrics::{ /// The key type ID for a collator key. pub const COLLATOR_KEY_TYPE_ID: KeyTypeId = KeyTypeId(*b"coll"); +const LOG_TARGET: &str = "runtime::primitives"; mod collator_app { use application_crypto::{app_crypto, sr25519}; @@ -743,17 +744,35 @@ impl BackedCandidate { /// /// Returns either an error, indicating that one of the signatures was invalid or that the index /// was out-of-bounds, or the number of signatures checked. -pub fn check_candidate_backing + Clone + Encode>( +pub fn check_candidate_backing + Clone + Encode + core::fmt::Debug>( backed: &BackedCandidate, signing_context: &SigningContext, group_len: usize, validator_lookup: impl Fn(usize) -> Option, ) -> Result { + log::debug!( + target: LOG_TARGET, + "checking candidate {:?}", + backed + ); + if backed.validator_indices.len() != group_len { + log::debug!( + target: LOG_TARGET, + "indices mismatch: group_len = {} , indices_len = {}", + group_len, + backed.validator_indices.len(), + ); return Err(()) } if backed.validity_votes.len() > group_len { + log::debug!( + target: LOG_TARGET, + "Too many votes, expected: {}, found: {}", + group_len, + backed.validity_votes.len(), + ); return Err(()) } @@ -775,11 +794,23 @@ pub fn check_candidate_backing + Clone + Encode>( if sig.verify(&payload[..], &validator_id) { signed += 1; } else { + log::debug!( + target: LOG_TARGET, + "Invalid signature. validator_id = {:?}, validator_index = {} ", + validator_id, + val_in_group_idx, + ); return Err(()) } } if signed != backed.validity_votes.len() { + log::error!( + target: LOG_TARGET, + "Too many signatures, expected = {}, found = {}", + backed.validity_votes.len() , + signed, + ); return Err(()) } diff --git a/polkadot/statement-table/src/generic.rs b/polkadot/statement-table/src/generic.rs index 22bffde5acc1..cef2c87151c0 100644 --- a/polkadot/statement-table/src/generic.rs +++ b/polkadot/statement-table/src/generic.rs @@ -53,9 +53,6 @@ pub trait Context { /// get the digest of a candidate. fn candidate_digest(candidate: &Self::Candidate) -> Self::Digest; - /// get the group of a candidate. - fn candidate_group(candidate: &Self::Candidate) -> Self::GroupId; - /// Whether a authority is a member of a group. /// Members are meant to submit candidates and vote on validity. fn is_member_of(&self, authority: &Self::AuthorityId, group: &Self::GroupId) -> bool; @@ -342,13 +339,13 @@ impl Table { pub fn import_statement( &mut self, context: &Ctx, + group_id: Ctx::GroupId, statement: SignedStatement, ) -> Option> { let SignedStatement { statement, signature, sender: signer } = statement; - let res = match statement { Statement::Seconded(candidate) => - self.import_candidate(context, signer.clone(), candidate, signature), + self.import_candidate(context, signer.clone(), candidate, signature, group_id), Statement::Valid(digest) => self.validity_vote(context, signer.clone(), digest, ValidityVote::Valid(signature)), }; @@ -387,8 +384,8 @@ impl Table { authority: Ctx::AuthorityId, candidate: Ctx::Candidate, signature: Ctx::Signature, + group: Ctx::GroupId, ) -> ImportResult { - let group = Ctx::candidate_group(&candidate); if !context.is_member_of(&authority, &group) { return Err(Misbehavior::UnauthorizedStatement(UnauthorizedStatement { statement: SignedStatement { @@ -634,10 +631,6 @@ mod tests { Digest(candidate.1) } - fn candidate_group(candidate: &Candidate) -> GroupId { - GroupId(candidate.0) - } - fn is_member_of(&self, authority: &AuthorityId, group: &GroupId) -> bool { self.authorities.get(authority).map(|v| v == group).unwrap_or(false) } diff --git a/polkadot/statement-table/src/lib.rs b/polkadot/statement-table/src/lib.rs index d4629330ac01..3740d15cc4f3 100644 --- a/polkadot/statement-table/src/lib.rs +++ b/polkadot/statement-table/src/lib.rs @@ -35,8 +35,8 @@ pub use generic::{Config, Context, Table}; pub mod v2 { use crate::generic; use primitives::{ - CandidateHash, CommittedCandidateReceipt, CompactStatement as PrimitiveStatement, Id, - ValidatorIndex, ValidatorSignature, + CandidateHash, CommittedCandidateReceipt, CompactStatement as PrimitiveStatement, + CoreIndex, ValidatorIndex, ValidatorSignature, }; /// Statements about candidates on the network. @@ -59,7 +59,7 @@ pub mod v2 { >; /// A summary of import of a statement. - pub type Summary = generic::Summary; + pub type Summary = generic::Summary; impl<'a> From<&'a Statement> for PrimitiveStatement { fn from(s: &'a Statement) -> PrimitiveStatement { From d2df658335e3525a3efec59a5a8286e96555b0bb Mon Sep 17 00:00:00 2001 From: Andrei Sandu Date: Fri, 2 Feb 2024 23:06:21 +0200 Subject: [PATCH 02/51] cargo lock Signed-off-by: Andrei Sandu --- Cargo.lock | 1 + 1 file changed, 1 insertion(+) diff --git a/Cargo.lock b/Cargo.lock index 3e7e59ac1174..c38490bebdd2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -13030,6 +13030,7 @@ version = "7.0.0" dependencies = [ "bitvec", "hex-literal", + "log", "parity-scale-codec", "polkadot-core-primitives", "polkadot-parachain-primitives", From 4a8b8d5a724c42e8236b7595530686f0e7239a1d Mon Sep 17 00:00:00 2001 From: Andrei Sandu Date: Tue, 6 Feb 2024 17:58:16 +0200 Subject: [PATCH 03/51] add experimental feature Signed-off-by: Andrei Sandu --- polkadot/primitives/src/vstaging/mod.rs | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/polkadot/primitives/src/vstaging/mod.rs b/polkadot/primitives/src/vstaging/mod.rs index 630bcf8679ad..dfae1af07f38 100644 --- a/polkadot/primitives/src/vstaging/mod.rs +++ b/polkadot/primitives/src/vstaging/mod.rs @@ -68,5 +68,12 @@ pub mod node_features { /// Every time a new feature flag is assigned it should take this value. /// and this should be incremented. FirstUnassigned = 1, + /// Experimental features start at bit 16. Note that experimental features pop in and out + /// of exsitence without warning. + /// + /// This feature enables the extension of `BackedCandidate::validator_indices` by 8 bit. + /// The value stored there represents the assumed core index where the candidates + /// are backed. + InjectCoreIndex = 16, } } From 9244632c43f52521d667f581fb164a7067358625 Mon Sep 17 00:00:00 2001 From: Andrei Sandu Date: Tue, 6 Feb 2024 17:59:29 +0200 Subject: [PATCH 04/51] inject core_index from statements Signed-off-by: Andrei Sandu --- polkadot/node/core/backing/src/lib.rs | 41 +++++++++++++++++++++++---- 1 file changed, 35 insertions(+), 6 deletions(-) diff --git a/polkadot/node/core/backing/src/lib.rs b/polkadot/node/core/backing/src/lib.rs index 01ca089d8c46..723db247f42a 100644 --- a/polkadot/node/core/backing/src/lib.rs +++ b/polkadot/node/core/backing/src/lib.rs @@ -70,7 +70,7 @@ use std::{ sync::Arc, }; -use bitvec::vec::BitVec; +use bitvec::{order::Lsb0 as BitOrderLsb0, vec::BitVec}; use futures::{ channel::{mpsc, oneshot}, future::BoxFuture, @@ -104,6 +104,7 @@ use polkadot_node_subsystem_util::{ Validator, }; use polkadot_primitives::{ + vstaging::{node_features::FeatureIndex, NodeFeatures}, BackedCandidate, CandidateCommitments, CandidateHash, CandidateReceipt, CommittedCandidateReceipt, CoreIndex, CoreState, ExecutorParams, GroupIndex, Hash, Id as ParaId, PersistedValidationData, PvfExecKind, SigningContext, ValidationCode, @@ -118,7 +119,7 @@ use statement_table::{ }, Config as TableConfig, Context as TableContextTrait, Table, }; -use util::vstaging::get_disabled_validators_with_fallback; +use util::{runtime::request_node_features, vstaging::get_disabled_validators_with_fallback}; mod error; @@ -226,6 +227,8 @@ struct PerRelayParentState { fallbacks: HashMap, /// The minimum backing votes threshold. minimum_backing_votes: u32, + /// If true, we're appendindg extra bits in the BackedCandidate validator indices bitfield. + inject_core_index: bool, } struct PerCandidateState { @@ -446,6 +449,7 @@ fn table_attested_to_backed( ValidatorSignature, >, table_context: &TableContext, + inject_core_index: bool, ) -> Option { let TableAttestedCandidate { candidate, validity_votes, group_id: core_index } = attested; @@ -454,8 +458,6 @@ fn table_attested_to_backed( let group = table_context.groups.get(&core_index)?; - // TODO: This si a temporary fix and will not work if a para is assigned to - // different sized backing groups. We need core index in the candidate descriptor let mut validator_indices = BitVec::with_capacity(group.len()); validator_indices.resize(group.len(), false); @@ -479,6 +481,12 @@ fn table_attested_to_backed( } vote_positions.sort_by_key(|(_orig, pos_in_group)| *pos_in_group); + if inject_core_index { + let core_index_to_inject: BitVec = + BitVec::from_vec(vec![core_index.0 as u8]); + validator_indices.extend(core_index_to_inject); + } + Some(BackedCandidate { candidate, validity_votes: vote_positions @@ -1053,6 +1061,16 @@ async fn construct_per_relay_parent_state( .map_err(Error::JoinMultiple)?; let session_index = try_runtime_api!(session_index); + + let inject_core_index = request_node_features(parent, session_index, ctx.sender()) + .await? + .unwrap_or(NodeFeatures::EMPTY) + .get(FeatureIndex::InjectCoreIndex as usize) + .map(|b| *b) + .unwrap_or(false); + + gum::debug!(target: LOG_TARGET, inject_core_index, ?parent, "New state"); + let validators: Vec<_> = try_runtime_api!(validators); let (validator_groups, group_rotation_info) = try_runtime_api!(groups); let cores = try_runtime_api!(cores); @@ -1140,6 +1158,7 @@ async fn construct_per_relay_parent_state( awaiting_validation: HashSet::new(), fallbacks: HashMap::new(), minimum_backing_votes, + inject_core_index, })) } @@ -1658,7 +1677,11 @@ async fn post_import_statement_actions( // `HashSet::insert` returns true if the thing wasn't in there already. if rp_state.backed.insert(candidate_hash) { - if let Some(backed) = table_attested_to_backed(attested, &rp_state.table_context) { + if let Some(backed) = table_attested_to_backed( + attested, + &rp_state.table_context, + rp_state.inject_core_index, + ) { let para_id = backed.candidate.descriptor.para_id; gum::debug!( target: LOG_TARGET, @@ -2138,7 +2161,13 @@ fn handle_get_backed_candidates_message( &rp_state.table_context, rp_state.minimum_backing_votes, ) - .and_then(|attested| table_attested_to_backed(attested, &rp_state.table_context)) + .and_then(|attested| { + table_attested_to_backed( + attested, + &rp_state.table_context, + rp_state.inject_core_index, + ) + }) }) .collect(); From 22e017b4e2d12880429b675c0588f0ac6ccf658e Mon Sep 17 00:00:00 2001 From: Andrei Sandu Date: Tue, 6 Feb 2024 17:59:45 +0200 Subject: [PATCH 05/51] temporary provisioner fix Signed-off-by: Andrei Sandu --- polkadot/node/core/provisioner/src/lib.rs | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/polkadot/node/core/provisioner/src/lib.rs b/polkadot/node/core/provisioner/src/lib.rs index 3970b8572612..4257cc3b511f 100644 --- a/polkadot/node/core/provisioner/src/lib.rs +++ b/polkadot/node/core/provisioner/src/lib.rs @@ -681,10 +681,16 @@ async fn request_backable_candidates( CoreState::Free => continue, }; + // We're currently fetching based on para id. This has to be chagned to query prospective + // parachains via core index. We should be calling this once per para rather than per core. + // TODO: Fix after https://github.com/paritytech/polkadot-sdk/pull/3160 let response = get_backable_candidate(relay_parent, para_id, required_path, sender).await?; - match response { - Some((hash, relay_parent)) => selected_candidates.push((hash, relay_parent)), + Some((hash, relay_parent)) => { + if selected_candidates.iter().position(|bc| &(hash, relay_parent) == bc).is_none() { + selected_candidates.push((hash, relay_parent)) + } + }, None => { gum::debug!( target: LOG_TARGET, @@ -726,6 +732,7 @@ async fn select_candidates( ) .await?, }; + gum::debug!(target: LOG_TARGET, ?selected_candidates, "Got backedable candidates"); // now get the backed candidates corresponding to these candidate receipts let (tx, rx) = oneshot::channel(); From 9dc8927454a96a95ab20200d0dde8b4fed3ecb96 Mon Sep 17 00:00:00 2001 From: Andrei Sandu Date: Tue, 6 Feb 2024 18:02:21 +0200 Subject: [PATCH 06/51] Support injected `CoreIndex` Signed-off-by: Andrei Sandu --- .../runtime/parachains/src/inclusion/mod.rs | 36 +++++--- .../parachains/src/paras_inherent/mod.rs | 25 ++++-- polkadot/runtime/parachains/src/util.rs | 82 ++++++++++++++++++- 3 files changed, 124 insertions(+), 19 deletions(-) diff --git a/polkadot/runtime/parachains/src/inclusion/mod.rs b/polkadot/runtime/parachains/src/inclusion/mod.rs index 90af9cde00a8..49375864f23a 100644 --- a/polkadot/runtime/parachains/src/inclusion/mod.rs +++ b/polkadot/runtime/parachains/src/inclusion/mod.rs @@ -25,6 +25,7 @@ use crate::{ paras::{self, SetGoAhead}, scheduler::{self, AvailabilityTimeoutStatus}, shared::{self, AllowedRelayParentsTracker}, + util::strip_candidate_core_index, }; use bitvec::{order::Lsb0 as BitOrderLsb0, vec::BitVec}; use frame_support::{ @@ -37,11 +38,12 @@ use frame_system::pallet_prelude::*; use pallet_message_queue::OnQueueChanged; use parity_scale_codec::{Decode, Encode}; use primitives::{ - effective_minimum_backing_votes, supermajority_threshold, well_known_keys, - AvailabilityBitfield, BackedCandidate, CandidateCommitments, CandidateDescriptor, - CandidateHash, CandidateReceipt, CommittedCandidateReceipt, CoreIndex, GroupIndex, Hash, - HeadData, Id as ParaId, SignedAvailabilityBitfields, SigningContext, UpwardMessage, - ValidatorId, ValidatorIndex, ValidityAttestation, + effective_minimum_backing_votes, supermajority_threshold, + vstaging::node_features::FeatureIndex, well_known_keys, AvailabilityBitfield, BackedCandidate, + CandidateCommitments, CandidateDescriptor, CandidateHash, CandidateReceipt, + CommittedCandidateReceipt, CoreIndex, GroupIndex, Hash, HeadData, Id as ParaId, + SignedAvailabilityBitfields, SigningContext, UpwardMessage, ValidatorId, ValidatorIndex, + ValidityAttestation, }; use scale_info::TypeInfo; use sp_runtime::{traits::One, DispatchError, SaturatedConversion, Saturating}; @@ -601,8 +603,9 @@ impl Pallet { /// scheduled cores. If these conditions are not met, the execution of the function fails. pub(crate) fn process_candidates( allowed_relay_parents: &AllowedRelayParentsTracker>, - candidates: Vec>, + mut candidates: Vec>, scheduled: &BTreeMap, + scheduled_by_core: &BTreeMap, group_validators: GV, ) -> Result, DispatchError> where @@ -610,7 +613,7 @@ impl Pallet { { let now = >::block_number(); - ensure!(candidates.len() <= scheduled.len(), Error::::UnscheduledCandidate); + ensure!(candidates.len() <= scheduled_by_core.len(), Error::::UnscheduledCandidate); if scheduled.is_empty() { return Ok(ProcessedCandidates::default()) @@ -648,7 +651,7 @@ impl Pallet { // // In the meantime, we do certain sanity checks on the candidates and on the scheduled // list. - for (candidate_idx, backed_candidate) in candidates.iter().enumerate() { + for (candidate_idx, backed_candidate) in candidates.iter_mut().enumerate() { let relay_parent_hash = backed_candidate.descriptor().relay_parent; let para_id = backed_candidate.descriptor().para_id; @@ -680,10 +683,23 @@ impl Pallet { }; let para_id = backed_candidate.descriptor().para_id; - let mut backers = bitvec::bitvec![u8, BitOrderLsb0; 0; validators.len()]; + let core_idx = + if let Some(core_idx) = strip_candidate_core_index::(backed_candidate) { + core_idx + } else { + *scheduled.get(¶_id).ok_or(Error::::UnscheduledCandidate)? + }; + + log::debug!(target: LOG_TARGET, "Candidate {:?} on {:?}, core_index_hack = {}", backed_candidate.hash(), core_idx, configuration::Pallet::::config() + .node_features + .get(FeatureIndex::InjectCoreIndex as usize) + .map(|b| *b) + .unwrap_or(false)); - let core_idx = *scheduled.get(¶_id).ok_or(Error::::UnscheduledCandidate)?; check_assignment_in_order(core_idx)?; + + let mut backers = bitvec::bitvec![u8, BitOrderLsb0; 0; validators.len()]; + ensure!( >::get(¶_id).is_none() && >::get(¶_id).is_none(), diff --git a/polkadot/runtime/parachains/src/paras_inherent/mod.rs b/polkadot/runtime/parachains/src/paras_inherent/mod.rs index 81e092f0a991..0744d1639b2e 100644 --- a/polkadot/runtime/parachains/src/paras_inherent/mod.rs +++ b/polkadot/runtime/parachains/src/paras_inherent/mod.rs @@ -31,7 +31,7 @@ use crate::{ paras, scheduler::{self, FreedReason}, shared::{self, AllowedRelayParentsTracker}, - ParaId, + ParaId, util::elastic_scaling_mvp_filter, }; use bitvec::prelude::BitVec; use frame_support::{ @@ -40,6 +40,9 @@ use frame_support::{ pallet_prelude::*, traits::Randomness, }; + +use crate::util::strip_candidate_core_index; + use frame_system::pallet_prelude::*; use pallet_babe::{self, ParentBlockRandomness}; use primitives::{ @@ -591,6 +594,8 @@ impl Pallet { METRICS.on_candidates_processed_total(backed_candidates.len() as u64); + elastic_scaling_mvp_filter::(&mut backed_candidates); + let SanitizedBackedCandidates { backed_candidates, votes_from_disabled_were_dropped } = sanitize_backed_candidates::( backed_candidates, @@ -625,6 +630,7 @@ impl Pallet { if context == ProcessInherentDataContext::Enter { ensure!(!votes_from_disabled_were_dropped, Error::::BackedByDisabled); } + let scheduled_by_core = >::scheduled_paras().collect(); // Process backed candidates according to scheduled cores. let inclusion::ProcessedCandidates::< as HeaderT>::Hash> { @@ -634,6 +640,7 @@ impl Pallet { &allowed_relay_parents, backed_candidates.clone(), &scheduled, + &scheduled_by_core, >::group_validators, )?; // Note which of the scheduled cores were actually occupied by a backed candidate. @@ -1095,12 +1102,16 @@ fn filter_backed_statements_from_disabled_validators *core_idx, - None => { - log::debug!(target: LOG_TARGET, "Can't get core idx of a backed candidate for para id {:?}. Dropping the candidate.", bc.descriptor().para_id); - return false + let core_idx = if let Some(core_idx) = strip_candidate_core_index::(bc) { + core_idx + } else { + // Get `core_idx` assigned to the `para_id` of the candidate + match scheduled.get(&bc.descriptor().para_id) { + Some(core_idx) => *core_idx, + None => { + log::debug!(target: LOG_TARGET, "Can't get core idx of a backed candidate for para id {:?}. Dropping the candidate.", bc.descriptor().para_id); + return false + } } }; diff --git a/polkadot/runtime/parachains/src/util.rs b/polkadot/runtime/parachains/src/util.rs index aa07ef080055..3524caa46a1b 100644 --- a/polkadot/runtime/parachains/src/util.rs +++ b/polkadot/runtime/parachains/src/util.rs @@ -17,11 +17,15 @@ //! Utilities that don't belong to any particular module but may draw //! on all modules. +use bitvec::{field::BitField, vec::BitVec}; use frame_system::pallet_prelude::BlockNumberFor; -use primitives::{Id as ParaId, PersistedValidationData, ValidatorIndex}; +use primitives::{ + vstaging::node_features::FeatureIndex, BackedCandidate, CoreIndex, Id as ParaId, + PersistedValidationData, ValidatorIndex, +}; use sp_std::{collections::btree_set::BTreeSet, vec::Vec}; -use crate::{configuration, hrmp, paras}; +use crate::{configuration, hrmp, paras, scheduler}; /// Make the persisted validation data for a particular parachain, a specified relay-parent and it's /// storage root. @@ -117,3 +121,77 @@ mod tests { assert_eq!(selected, vec![1, 3, 7]); } } + +/// Filters out all candidates that have multiple cores assigned and no +/// `CoreIndex` injected. +pub(crate) fn elastic_scaling_mvp_filter(candidates: &mut Vec>) { + if !configuration::Pallet::::config() + .node_features + .get(FeatureIndex::InjectCoreIndex as usize) + .map(|b| *b) + .unwrap_or(false) { + // we don't touch the candidates, since we don't expect block producers + // to inject `CoreIndex`. + return + } + // TODO: determine cores assigned to this para. + let multiple_cores_asigned = true; + candidates.retain(|candidate| !multiple_cores_asigned || has_core_index::(candidate) ); +} + +// Returns `true` if the candidate contains an injected `CoreIndex`. +fn has_core_index(candidate: &BackedCandidate) -> bool { + // After stripping the 8 bit extensions, the `validator_indices` field length is expected + // to be equal to backing group size. If these don't match, the `CoreIndex` is badly encoded, + // or not supported. + let core_idx_offset = candidate.validator_indices.len().saturating_sub(8); + let (validator_indices_slice, core_idx_slice) = + candidate.validator_indices.split_at(core_idx_offset); + let core_idx: u8 = core_idx_slice.load(); + + let current_block = frame_system::Pallet::::block_number(); + + // Get the backing group of the candidate backed at `core_idx`. + let group_idx = match >::group_assigned_to_core( + CoreIndex(core_idx as u32), + current_block, + ) { + Some(group_idx) => group_idx, + None => return false + }; + + + let group_validators = match >::group_validators(group_idx) { + Some(validators) => validators, + None => return false + }; + + group_validators.len() == validator_indices_slice.len() +} + +/// Strips and returns the `CoreIndex` encoded in the `validator_indices` of `BackedCandidate` +/// if `FeatureIndex::InjectCoreIndex` is enabled and supported by block producer. +/// +/// Otherwise it returns `None`. +pub(crate) fn strip_candidate_core_index( + backed_candidate: &mut BackedCandidate, +) -> Option { + // This flag tells us if the block producers must enable Elastic Scaling MVP hack. + // It extends `BackedCandidate::validity_indices` to store a 8 bit core index. + let core_index_hack = configuration::Pallet::::config() + .node_features + .get(FeatureIndex::InjectCoreIndex as usize) + .map(|b| *b) + .unwrap_or(false); + + if core_index_hack { + let core_idx_offset = backed_candidate.validator_indices.len().saturating_sub(8); + let (validator_indices_slice, core_idx_slice) = + backed_candidate.validator_indices.split_at(core_idx_offset); + let core_idx: u8 = core_idx_slice.load(); + backed_candidate.validator_indices = BitVec::from(validator_indices_slice); + Some(CoreIndex(core_idx as u32)) + } else { + None + } +} From fbb7351d3b186bf2fa29281310d50a067e1efaa8 Mon Sep 17 00:00:00 2001 From: Andrei Sandu Date: Tue, 6 Feb 2024 18:21:18 +0200 Subject: [PATCH 07/51] cargo lock Signed-off-by: Andrei Sandu --- Cargo.lock | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b49b77dd9333..abd137e1ab15 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -21235,9 +21235,9 @@ dependencies = [ [[package]] name = "wasmi" -version = "0.31.2" +version = "0.31.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77a8281d1d660cdf54c76a3efa9ddd0c270cada1383a995db3ccb43d166456c7" +checksum = "1f341edb80021141d4ae6468cbeefc50798716a347d4085c3811900049ea8945" dependencies = [ "smallvec", "spin 0.9.8", @@ -21248,9 +21248,9 @@ dependencies = [ [[package]] name = "wasmi_arena" -version = "0.4.1" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "104a7f73be44570cac297b3035d76b169d6599637631cf37a1703326a0727073" +checksum = "401c1f35e413fac1846d4843745589d9ec678977ab35a384db8ae7830525d468" [[package]] name = "wasmi_core" From 6c72918a0e68cacfcf629e2974ae1404dd5b1e9b Mon Sep 17 00:00:00 2001 From: Andrei Sandu Date: Mon, 12 Feb 2024 14:35:50 +0200 Subject: [PATCH 08/51] It was damn hard to fix these tests Signed-off-by: Andrei Sandu --- Cargo.lock | 1 + polkadot/node/core/backing/src/error.rs | 3 + polkadot/node/core/backing/src/lib.rs | 19 ++++- polkadot/node/core/backing/src/tests/mod.rs | 81 +++++++++++++++++-- .../src/tests/prospective_parachains.rs | 67 ++++++++++++++- polkadot/node/core/provisioner/src/lib.rs | 2 +- polkadot/statement-table/Cargo.toml | 1 + polkadot/statement-table/src/generic.rs | 2 + 8 files changed, 164 insertions(+), 12 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index abd137e1ab15..fb345e3d36ff 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -13460,6 +13460,7 @@ dependencies = [ "parity-scale-codec", "polkadot-primitives", "sp-core", + "tracing-gum", ] [[package]] diff --git a/polkadot/node/core/backing/src/error.rs b/polkadot/node/core/backing/src/error.rs index 1b00a62510b7..64955a393962 100644 --- a/polkadot/node/core/backing/src/error.rs +++ b/polkadot/node/core/backing/src/error.rs @@ -48,6 +48,9 @@ pub enum Error { #[error("Candidate is not found")] CandidateNotFound, + #[error("CoreIndex cannot be determined for a candidate")] + CoreIndexUnavailable, + #[error("Signature is invalid")] InvalidSignature, diff --git a/polkadot/node/core/backing/src/lib.rs b/polkadot/node/core/backing/src/lib.rs index 723db247f42a..4bdbe0214f8f 100644 --- a/polkadot/node/core/backing/src/lib.rs +++ b/polkadot/node/core/backing/src/lib.rs @@ -384,7 +384,7 @@ struct AttestingData { backing: Vec, } -#[derive(Default)] +#[derive(Default, Debug)] struct TableContext { validator: Option, groups: HashMap>, @@ -1025,6 +1025,10 @@ async fn core_index_from_statement( let (validator_groups, group_rotation_info) = try_runtime_api!(groups); let cores = try_runtime_api!(cores); + let compact_statement = statement.as_unchecked(); + let candidate_hash = CandidateHash(*compact_statement.unchecked_payload().candidate_hash()); + + gum::trace!(target: LOG_TARGET, ?group_rotation_info, ?statement, ?validator_groups, ?cores, ?candidate_hash, "Extracting core index from statement"); let statement_validator_index = statement.validator_index(); for (group_index, group) in validator_groups.iter().enumerate() { for validator_index in group { @@ -1653,8 +1657,9 @@ async fn import_statement( let core = core_index_from_statement(ctx, rp_state.parent, statement) .await - .unwrap() - .unwrap(); + .map_err(|_| Error::CoreIndexUnavailable)? + .ok_or(Error::CoreIndexUnavailable)?; + Ok(rp_state.table.import_statement(&rp_state.table_context, core, stmt)) } @@ -2089,6 +2094,14 @@ async fn handle_second_message( return Ok(()) } + gum::debug!( + target: LOG_TARGET, + our_assignment_core = ?rp_state.assigned_core, + our_assignment_para = ?rp_state.assigned_para, + collation = ?candidate.descriptor().para_id, + "Current assignments vs collation", + ); + // If the message is a `CandidateBackingMessage::Second`, sign and dispatch a // Seconded statement only if we have not signed a Valid statement for the requested candidate. // diff --git a/polkadot/node/core/backing/src/tests/mod.rs b/polkadot/node/core/backing/src/tests/mod.rs index 1957f4e19c54..97e1f2ea10c3 100644 --- a/polkadot/node/core/backing/src/tests/mod.rs +++ b/polkadot/node/core/backing/src/tests/mod.rs @@ -65,7 +65,7 @@ fn dummy_pvd() -> PersistedValidationData { } } -struct TestState { +pub(crate) struct TestState { chain_ids: Vec, keystore: KeystorePtr, validators: Vec, @@ -161,6 +161,7 @@ fn test_harness>( test: impl FnOnce(VirtualOverseer) -> T, ) { let pool = sp_core::testing::TaskExecutor::new(); + sp_tracing::init_for_tests(); let (context, virtual_overseer) = test_helpers::make_subsystem_context(pool.clone()); @@ -285,6 +286,16 @@ async fn test_startup(virtual_overseer: &mut VirtualOverseer, test_state: &TestS } ); + // Node features request from runtime: all features are disabled. + assert_matches!( + virtual_overseer.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(_parent, RuntimeApiRequest::NodeFeatures(_session_index, tx)) + ) => { + tx.send(Ok(Default::default())).unwrap(); + } + ); + // Check if subsystem job issues a request for the minimum backing votes. assert_matches!( virtual_overseer.recv().await, @@ -317,6 +328,30 @@ async fn test_startup(virtual_overseer: &mut VirtualOverseer, test_state: &TestS ); } +pub(crate) async fn assert_core_index_from_statement( + virtual_overseer: &mut VirtualOverseer, + test_state: &TestState, +) { + assert_matches!( + virtual_overseer.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(_parent, RuntimeApiRequest::ValidatorGroups(tx)) + ) => { + tx.send(Ok(test_state.validator_groups.clone())).unwrap(); + } + ); + + // Check that subsystem job issues a request for the availability cores. + assert_matches!( + virtual_overseer.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(_parent, RuntimeApiRequest::AvailabilityCores(tx)) + ) => { + tx.send(Ok(test_state.availability_cores.clone())).unwrap(); + } + ); +} + async fn assert_validation_requests( virtual_overseer: &mut VirtualOverseer, validation_code: ValidationCode, @@ -449,6 +484,8 @@ fn backing_second_works() { } ); + assert_core_index_from_statement(&mut virtual_overseer, &test_state).await; + assert_matches!( virtual_overseer.recv().await, AllMessages::StatementDistribution( @@ -545,6 +582,7 @@ fn backing_works() { virtual_overseer.send(FromOrchestra::Communication { msg: statement }).await; + assert_core_index_from_statement(&mut virtual_overseer, &test_state).await; assert_validation_requests(&mut virtual_overseer, validation_code_ab.clone()).await; // Sending a `Statement::Seconded` for our assignment will start @@ -604,6 +642,8 @@ fn backing_works() { } ); + assert_core_index_from_statement(&mut virtual_overseer, &test_state).await; + assert_matches!( virtual_overseer.recv().await, AllMessages::StatementDistribution( @@ -630,6 +670,8 @@ fn backing_works() { virtual_overseer.send(FromOrchestra::Communication { msg: statement }).await; + assert_core_index_from_statement(&mut virtual_overseer, &test_state).await; + virtual_overseer .send(FromOrchestra::Signal(OverseerSignal::ActiveLeaves( ActiveLeavesUpdate::stop_work(test_state.relay_parent), @@ -722,6 +764,7 @@ fn backing_works_while_validation_ongoing() { let statement = CandidateBackingMessage::Statement(test_state.relay_parent, signed_a.clone()); virtual_overseer.send(FromOrchestra::Communication { msg: statement }).await; + assert_core_index_from_statement(&mut virtual_overseer, &test_state).await; assert_validation_requests(&mut virtual_overseer, validation_code_abc.clone()).await; @@ -771,6 +814,7 @@ fn backing_works_while_validation_ongoing() { CandidateBackingMessage::Statement(test_state.relay_parent, signed_b.clone()); virtual_overseer.send(FromOrchestra::Communication { msg: statement }).await; + assert_core_index_from_statement(&mut virtual_overseer, &test_state).await; // Candidate gets backed entirely by other votes. assert_matches!( @@ -791,6 +835,8 @@ fn backing_works_while_validation_ongoing() { virtual_overseer.send(FromOrchestra::Communication { msg: statement }).await; + assert_core_index_from_statement(&mut virtual_overseer, &test_state).await; + let (tx, rx) = oneshot::channel(); let msg = CandidateBackingMessage::GetBackedCandidates( vec![(candidate_a.hash(), test_state.relay_parent)], @@ -889,6 +935,7 @@ fn backing_misbehavior_works() { virtual_overseer.send(FromOrchestra::Communication { msg: statement }).await; + assert_core_index_from_statement(&mut virtual_overseer, &test_state).await; assert_validation_requests(&mut virtual_overseer, validation_code_a.clone()).await; assert_matches!( @@ -944,6 +991,8 @@ fn backing_misbehavior_works() { } ); + assert_core_index_from_statement(&mut virtual_overseer, &test_state).await; + assert_matches!( virtual_overseer.recv().await, AllMessages::StatementDistribution( @@ -975,6 +1024,8 @@ fn backing_misbehavior_works() { virtual_overseer.send(FromOrchestra::Communication { msg: statement }).await; + assert_core_index_from_statement(&mut virtual_overseer, &test_state).await; + assert_matches!( virtual_overseer.recv().await, AllMessages::Provisioner( @@ -1150,6 +1201,7 @@ fn backing_dont_second_invalid() { tx.send(Ok(())).unwrap(); } ); + assert_core_index_from_statement(&mut virtual_overseer, &test_state).await; assert_matches!( virtual_overseer.recv().await, @@ -1221,6 +1273,7 @@ fn backing_second_after_first_fails_works() { virtual_overseer.send(FromOrchestra::Communication { msg: statement }).await; + assert_core_index_from_statement(&mut virtual_overseer, &test_state).await; assert_validation_requests(&mut virtual_overseer, validation_code_a.clone()).await; // Subsystem requests PoV and requests validation. @@ -1365,6 +1418,7 @@ fn backing_works_after_failed_validation() { virtual_overseer.send(FromOrchestra::Communication { msg: statement }).await; + assert_core_index_from_statement(&mut virtual_overseer, &test_state).await; assert_validation_requests(&mut virtual_overseer, validation_code_a.clone()).await; // Subsystem requests PoV and requests validation. @@ -1422,7 +1476,7 @@ fn backing_works_after_failed_validation() { fn candidate_backing_reorders_votes() { use sp_core::Encode; - let para_id = ParaId::from(10); + let core_idx = CoreIndex(10); let validators = vec![ Sr25519Keyring::Alice, Sr25519Keyring::Bob, @@ -1436,7 +1490,7 @@ fn candidate_backing_reorders_votes() { let validator_groups = { let mut validator_groups = HashMap::new(); validator_groups - .insert(para_id, vec![0, 1, 2, 3, 4, 5].into_iter().map(ValidatorIndex).collect()); + .insert(core_idx, vec![0, 1, 2, 3, 4, 5].into_iter().map(ValidatorIndex).collect()); validator_groups }; @@ -1466,10 +1520,10 @@ fn candidate_backing_reorders_votes() { (ValidatorIndex(3), fake_attestation(3)), (ValidatorIndex(1), fake_attestation(1)), ], - group_id: para_id, + group_id: core_idx, }; - let backed = table_attested_to_backed(attested, &table_context).unwrap(); + let backed = table_attested_to_backed(attested, &table_context, false).unwrap(); let expected_bitvec = { let mut validator_indices = BitVec::::with_capacity(6); @@ -1569,6 +1623,7 @@ fn retry_works() { CandidateBackingMessage::Statement(test_state.relay_parent, signed_a.clone()); virtual_overseer.send(FromOrchestra::Communication { msg: statement }).await; + assert_core_index_from_statement(&mut virtual_overseer, &test_state).await; assert_validation_requests(&mut virtual_overseer, validation_code_a.clone()).await; // Subsystem requests PoV and requests validation. @@ -1590,6 +1645,8 @@ fn retry_works() { CandidateBackingMessage::Statement(test_state.relay_parent, signed_b.clone()); virtual_overseer.send(FromOrchestra::Communication { msg: statement }).await; + assert_core_index_from_statement(&mut virtual_overseer, &test_state).await; + // Not deterministic which message comes first: for _ in 0u32..5 { match virtual_overseer.recv().await { @@ -1632,6 +1689,7 @@ fn retry_works() { CandidateBackingMessage::Statement(test_state.relay_parent, signed_c.clone()); virtual_overseer.send(FromOrchestra::Communication { msg: statement }).await; + assert_core_index_from_statement(&mut virtual_overseer, &test_state).await; assert_validation_requests(&mut virtual_overseer, validation_code_a.clone()).await; assert_matches!( @@ -1756,11 +1814,14 @@ fn observes_backing_even_if_not_validator() { virtual_overseer.send(FromOrchestra::Communication { msg: statement }).await; + assert_core_index_from_statement(&mut virtual_overseer, &test_state).await; + let statement = CandidateBackingMessage::Statement(test_state.relay_parent, signed_b.clone()); virtual_overseer.send(FromOrchestra::Communication { msg: statement }).await; + assert_core_index_from_statement(&mut virtual_overseer, &test_state).await; assert_matches!( virtual_overseer.recv().await, AllMessages::Provisioner( @@ -1778,6 +1839,8 @@ fn observes_backing_even_if_not_validator() { virtual_overseer.send(FromOrchestra::Communication { msg: statement }).await; + assert_core_index_from_statement(&mut virtual_overseer, &test_state).await; + virtual_overseer .send(FromOrchestra::Signal(OverseerSignal::ActiveLeaves( ActiveLeavesUpdate::stop_work(test_state.relay_parent), @@ -1844,6 +1907,8 @@ fn cannot_second_multiple_candidates_per_parent() { } ); + assert_core_index_from_statement(&mut virtual_overseer, &test_state).await; + assert_matches!( virtual_overseer.recv().await, AllMessages::StatementDistribution( @@ -2078,6 +2143,8 @@ fn disabled_validator_doesnt_distribute_statement_on_receiving_statement() { virtual_overseer.send(FromOrchestra::Communication { msg: statement }).await; + assert_core_index_from_statement(&mut virtual_overseer, &test_state).await; + // Ensure backing subsystem is not doing any work assert_matches!(virtual_overseer.recv().timeout(Duration::from_secs(1)).await, None); @@ -2169,6 +2236,8 @@ fn validator_ignores_statements_from_disabled_validators() { virtual_overseer.send(FromOrchestra::Communication { msg: statement_3 }).await; + assert_core_index_from_statement(&mut virtual_overseer, &test_state).await; + assert_matches!( virtual_overseer.recv().await, AllMessages::RuntimeApi( @@ -2255,6 +2324,8 @@ fn validator_ignores_statements_from_disabled_validators() { } ); + assert_core_index_from_statement(&mut virtual_overseer, &test_state).await; + assert_matches!( virtual_overseer.recv().await, AllMessages::StatementDistribution( diff --git a/polkadot/node/core/backing/src/tests/prospective_parachains.rs b/polkadot/node/core/backing/src/tests/prospective_parachains.rs index 578f21bef665..0fbf52403ea5 100644 --- a/polkadot/node/core/backing/src/tests/prospective_parachains.rs +++ b/polkadot/node/core/backing/src/tests/prospective_parachains.rs @@ -185,6 +185,16 @@ async fn activate_leaf( } ); + // Node features request from runtime: all features are disabled. + assert_matches!( + virtual_overseer.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(parent, RuntimeApiRequest::NodeFeatures(_session_index, tx)) + ) if parent == hash => { + tx.send(Ok(Default::default())).unwrap(); + } + ); + // Check if subsystem job issues a request for the minimum backing votes. assert_matches!( virtual_overseer.recv().await, @@ -305,10 +315,11 @@ async fn assert_hypothetical_frontier_requests( ) => { let idx = match expected_requests.iter().position(|r| r.0 == request) { Some(idx) => idx, - None => panic!( + None => + panic!( "unexpected hypothetical frontier request, no match found for {:?}", request - ), + ), }; let resp = std::mem::take(&mut expected_requests[idx].1); tx.send(resp).unwrap(); @@ -451,6 +462,8 @@ fn seconding_sanity_check_allowed() { )) ); + assert_core_index_from_statement(&mut virtual_overseer, &test_state).await; + assert_matches!( virtual_overseer.recv().await, AllMessages::StatementDistribution( @@ -586,6 +599,8 @@ fn seconding_sanity_check_disallowed() { )) ); + assert_core_index_from_statement(&mut virtual_overseer, &test_state).await; + assert_matches!( virtual_overseer.recv().await, AllMessages::StatementDistribution( @@ -840,6 +855,8 @@ fn prospective_parachains_reject_candidate() { )) ); + assert_core_index_from_statement(&mut virtual_overseer, &test_state).await; + assert_matches!( virtual_overseer.recv().await, AllMessages::StatementDistribution( @@ -976,6 +993,8 @@ fn second_multiple_candidates_per_relay_parent() { ) ); + assert_core_index_from_statement(&mut virtual_overseer, &test_state).await; + assert_matches!( virtual_overseer.recv().await, AllMessages::StatementDistribution( @@ -1106,6 +1125,8 @@ fn backing_works() { )) ); + assert_core_index_from_statement(&mut virtual_overseer, &test_state).await; + assert_validate_seconded_candidate( &mut virtual_overseer, candidate_a.descriptor().relay_parent, @@ -1118,6 +1139,7 @@ fn backing_works() { ) .await; + assert_core_index_from_statement(&mut virtual_overseer, &test_state).await; assert_matches!( virtual_overseer.recv().await, AllMessages::StatementDistribution( @@ -1154,6 +1176,8 @@ fn backing_works() { virtual_overseer.send(FromOrchestra::Communication { msg: statement }).await; + assert_core_index_from_statement(&mut virtual_overseer, &test_state).await; + virtual_overseer }); } @@ -1268,6 +1292,7 @@ fn concurrent_dependent_candidates() { let statement_b = CandidateBackingMessage::Statement(leaf_parent, signed_b.clone()); virtual_overseer.send(FromOrchestra::Communication { msg: statement_a }).await; + // At this point the subsystem waits for response, the previous message is received, // send a second one without blocking. let _ = virtual_overseer @@ -1388,7 +1413,19 @@ fn concurrent_dependent_candidates() { assert_eq!(sess_idx, 1); tx.send(Ok(Some(ExecutorParams::default()))).unwrap(); }, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + _parent, + RuntimeApiRequest::ValidatorGroups(tx), + )) => { + tx.send(Ok(test_state.validator_groups.clone())).unwrap(); + }, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + _parent, + RuntimeApiRequest::AvailabilityCores(tx), + )) => { + tx.send(Ok(test_state.availability_cores.clone())).unwrap(); + }, _ => panic!("unexpected message received from overseer: {:?}", msg), } } @@ -1419,7 +1456,6 @@ fn seconding_sanity_check_occupy_same_depth() { let leaf_parent = get_parent_hash(leaf_hash); let activated = new_leaf(leaf_hash, LEAF_BLOCK_NUMBER); - let min_block_number = LEAF_BLOCK_NUMBER - LEAF_ANCESTRY_LEN; let min_relay_parents = vec![(para_id_a, min_block_number), (para_id_b, min_block_number)]; let test_leaf_a = TestLeaf { activated, min_relay_parents }; @@ -1523,6 +1559,29 @@ fn seconding_sanity_check_occupy_same_depth() { ) ); + assert_matches!( + virtual_overseer.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(_parent, RuntimeApiRequest::ValidatorGroups(tx)) + ) => { + let (groups, mut rotation) = test_state.validator_groups.clone(); + if leaf_hash == _parent { + rotation.now = 100; + } + tx.send(Ok((groups, rotation))).unwrap(); + } + ); + + // Check that subsystem job issues a request for the availability cores. + assert_matches!( + virtual_overseer.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(_parent, RuntimeApiRequest::AvailabilityCores(tx)) + ) => { + tx.send(Ok(test_state.availability_cores.clone())).unwrap(); + } + ); + assert_matches!( virtual_overseer.recv().await, AllMessages::StatementDistribution( @@ -1662,6 +1721,8 @@ fn occupied_core_assignment() { )) ); + assert_core_index_from_statement(&mut virtual_overseer, &test_state).await; + assert_matches!( virtual_overseer.recv().await, AllMessages::StatementDistribution( diff --git a/polkadot/node/core/provisioner/src/lib.rs b/polkadot/node/core/provisioner/src/lib.rs index 06fd3c31524e..ddbf3a0529d7 100644 --- a/polkadot/node/core/provisioner/src/lib.rs +++ b/polkadot/node/core/provisioner/src/lib.rs @@ -683,7 +683,7 @@ async fn request_backable_candidates( // We're currently fetching based on para id. This has to be chagned to query prospective // parachains via core index. We should be calling this once per para rather than per core. - // TODO: Fix after https://github.com/paritytech/polkadot-sdk/pull/3160 + // TODO: Fix after https://github.com/paritytech/polkadot-sdk/pull/3233 let response = get_backable_candidate(relay_parent, para_id, required_path, sender).await?; match response { Some((hash, relay_parent)) => { diff --git a/polkadot/statement-table/Cargo.toml b/polkadot/statement-table/Cargo.toml index 6403b822ed9b..37b8a99d640a 100644 --- a/polkadot/statement-table/Cargo.toml +++ b/polkadot/statement-table/Cargo.toml @@ -13,3 +13,4 @@ workspace = true parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] } sp-core = { path = "../../substrate/primitives/core" } primitives = { package = "polkadot-primitives", path = "../primitives" } +gum = { package = "tracing-gum", path = "../node/gum" } diff --git a/polkadot/statement-table/src/generic.rs b/polkadot/statement-table/src/generic.rs index cef2c87151c0..825b6474e663 100644 --- a/polkadot/statement-table/src/generic.rs +++ b/polkadot/statement-table/src/generic.rs @@ -36,6 +36,7 @@ use primitives::{ }; use parity_scale_codec::{Decode, Encode}; +const LOG_TARGET: &str = "parachain::statement-table"; /// Context for the statement table. pub trait Context { @@ -387,6 +388,7 @@ impl Table { group: Ctx::GroupId, ) -> ImportResult { if !context.is_member_of(&authority, &group) { + gum::debug!(target: LOG_TARGET, authority = ?authority, group = ?group, "New `Misbehavior::UnauthorizedStatement`, candidate backed by validator that doesn't belong to expected group" ); return Err(Misbehavior::UnauthorizedStatement(UnauthorizedStatement { statement: SignedStatement { signature, From fc5c1091b66af4a3d8dc7ee5a2aabcab774e1a00 Mon Sep 17 00:00:00 2001 From: Andrei Sandu Date: Mon, 12 Feb 2024 14:47:41 +0200 Subject: [PATCH 09/51] These tests were easy to fix Signed-off-by: Andrei Sandu --- polkadot/statement-table/src/generic.rs | 35 +++++++++++++------------ 1 file changed, 18 insertions(+), 17 deletions(-) diff --git a/polkadot/statement-table/src/generic.rs b/polkadot/statement-table/src/generic.rs index 825b6474e663..2ee6f6a4f781 100644 --- a/polkadot/statement-table/src/generic.rs +++ b/polkadot/statement-table/src/generic.rs @@ -670,10 +670,10 @@ mod tests { sender: AuthorityId(1), }; - table.import_statement(&context, statement_a); + table.import_statement(&context, GroupId(2), statement_a); assert!(!table.detected_misbehavior.contains_key(&AuthorityId(1))); - table.import_statement(&context, statement_b); + table.import_statement(&context, GroupId(2), statement_b); assert_eq!( table.detected_misbehavior[&AuthorityId(1)][0], Misbehavior::MultipleCandidates(MultipleCandidates { @@ -706,10 +706,10 @@ mod tests { sender: AuthorityId(1), }; - table.import_statement(&context, statement_a); + table.import_statement(&context, GroupId(2), statement_a); assert!(!table.detected_misbehavior.contains_key(&AuthorityId(1))); - table.import_statement(&context, statement_b); + table.import_statement(&context, GroupId(2), statement_b); assert!(!table.detected_misbehavior.contains_key(&AuthorityId(1))); } @@ -730,7 +730,7 @@ mod tests { sender: AuthorityId(1), }; - table.import_statement(&context, statement); + table.import_statement(&context, GroupId(2), statement); assert_eq!( table.detected_misbehavior[&AuthorityId(1)][0], @@ -764,7 +764,7 @@ mod tests { }; let candidate_a_digest = Digest(100); - table.import_statement(&context, candidate_a); + table.import_statement(&context, GroupId(2), candidate_a); assert!(!table.detected_misbehavior.contains_key(&AuthorityId(1))); assert!(!table.detected_misbehavior.contains_key(&AuthorityId(2))); @@ -774,7 +774,7 @@ mod tests { signature: Signature(2), sender: AuthorityId(2), }; - table.import_statement(&context, bad_validity_vote); + table.import_statement(&context, GroupId(3), bad_validity_vote); assert_eq!( table.detected_misbehavior[&AuthorityId(2)][0], @@ -806,7 +806,7 @@ mod tests { sender: AuthorityId(1), }; - table.import_statement(&context, statement); + table.import_statement(&context, GroupId(2), statement); assert!(!table.detected_misbehavior.contains_key(&AuthorityId(1))); let invalid_statement = SignedStatement { @@ -815,7 +815,7 @@ mod tests { sender: AuthorityId(1), }; - table.import_statement(&context, invalid_statement); + table.import_statement(&context, GroupId(2), invalid_statement); assert!(table.detected_misbehavior.contains_key(&AuthorityId(1))); } @@ -837,7 +837,7 @@ mod tests { }; let candidate_digest = Digest(100); - table.import_statement(&context, statement); + table.import_statement(&context, GroupId(2), statement); assert!(!table.detected_misbehavior.contains_key(&AuthorityId(1))); let extra_vote = SignedStatement { @@ -846,7 +846,7 @@ mod tests { sender: AuthorityId(1), }; - table.import_statement(&context, extra_vote); + table.import_statement(&context, GroupId(2), extra_vote); assert_eq!( table.detected_misbehavior[&AuthorityId(1)][0], Misbehavior::ValidityDoubleVote(ValidityDoubleVote::IssuedAndValidity( @@ -905,7 +905,7 @@ mod tests { }; let candidate_digest = Digest(100); - table.import_statement(&context, statement); + table.import_statement(&context, GroupId(2), statement); assert!(!table.detected_misbehavior.contains_key(&AuthorityId(1))); assert!(table.attested_candidate(&candidate_digest, &context, 2).is_none()); @@ -916,7 +916,7 @@ mod tests { sender: AuthorityId(2), }; - table.import_statement(&context, vote); + table.import_statement(&context, GroupId(2), vote); assert!(!table.detected_misbehavior.contains_key(&AuthorityId(2))); assert!(table.attested_candidate(&candidate_digest, &context, 2).is_some()); } @@ -939,7 +939,7 @@ mod tests { }; let summary = table - .import_statement(&context, statement) + .import_statement(&context, GroupId(2), statement) .expect("candidate import to give summary"); assert_eq!(summary.candidate, Digest(100)); @@ -966,7 +966,7 @@ mod tests { }; let candidate_digest = Digest(100); - table.import_statement(&context, statement); + table.import_statement(&context, GroupId(2), statement); assert!(!table.detected_misbehavior.contains_key(&AuthorityId(1))); let vote = SignedStatement { @@ -975,8 +975,9 @@ mod tests { sender: AuthorityId(2), }; - let summary = - table.import_statement(&context, vote).expect("candidate vote to give summary"); + let summary = table + .import_statement(&context, GroupId(2), vote) + .expect("candidate vote to give summary"); assert!(!table.detected_misbehavior.contains_key(&AuthorityId(2))); From 33351b42efa03cdbbff2e805561e9e9154d3f5e8 Mon Sep 17 00:00:00 2001 From: Andrei Sandu Date: Mon, 12 Feb 2024 14:51:20 +0200 Subject: [PATCH 10/51] Fix comment Signed-off-by: Andrei Sandu --- polkadot/node/core/backing/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/polkadot/node/core/backing/src/lib.rs b/polkadot/node/core/backing/src/lib.rs index 4bdbe0214f8f..c3ae1ddac7a3 100644 --- a/polkadot/node/core/backing/src/lib.rs +++ b/polkadot/node/core/backing/src/lib.rs @@ -209,7 +209,7 @@ struct PerRelayParentState { prospective_parachains_mode: ProspectiveParachainsMode, /// The hash of the relay parent on top of which this job is doing it's work. parent: Hash, - /// The `CoreIndex` assigned to the local validator at this relay parent. + /// The `ParaId` assigned to the local validator at this relay parent. assigned_para: Option, /// The `CoreIndex` assigned to the local validator at this relay parent. assigned_core: Option, From 0d994bfb7d9a5c85b7c1993f92cf990f0e44935a Mon Sep 17 00:00:00 2001 From: Andrei Sandu Date: Mon, 12 Feb 2024 15:10:27 +0200 Subject: [PATCH 11/51] clippy was angry Signed-off-by: Andrei Sandu --- polkadot/node/core/provisioner/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/polkadot/node/core/provisioner/src/lib.rs b/polkadot/node/core/provisioner/src/lib.rs index ddbf3a0529d7..8d094eaa7673 100644 --- a/polkadot/node/core/provisioner/src/lib.rs +++ b/polkadot/node/core/provisioner/src/lib.rs @@ -687,7 +687,7 @@ async fn request_backable_candidates( let response = get_backable_candidate(relay_parent, para_id, required_path, sender).await?; match response { Some((hash, relay_parent)) => { - if selected_candidates.iter().position(|bc| &(hash, relay_parent) == bc).is_none() { + if !selected_candidates.iter().any(|bc| &(hash, relay_parent) == bc) { selected_candidates.push((hash, relay_parent)) } }, From 534c019dd7e038d42734d999007217730a1656ed Mon Sep 17 00:00:00 2001 From: Andrei Sandu Date: Mon, 12 Feb 2024 18:36:59 +0200 Subject: [PATCH 12/51] A bit refactor and add a test Signed-off-by: Andrei Sandu --- polkadot/node/core/backing/src/error.rs | 6 ++ polkadot/node/core/backing/src/lib.rs | 59 +++++++++------ polkadot/node/core/backing/src/tests/mod.rs | 79 ++++++++++++++++++++- 3 files changed, 121 insertions(+), 23 deletions(-) diff --git a/polkadot/node/core/backing/src/error.rs b/polkadot/node/core/backing/src/error.rs index 64955a393962..c41084911d22 100644 --- a/polkadot/node/core/backing/src/error.rs +++ b/polkadot/node/core/backing/src/error.rs @@ -63,6 +63,12 @@ pub enum Error { #[error("Fetching validation code by hash failed {0:?}, {1:?}")] FetchValidationCode(ValidationCodeHash, RuntimeApiError), + #[error("Fetching validator groups failed")] + FetchValidatorGroups(RuntimeApiError), + + #[error("Fetching availability cores failed")] + FetchAvailabilityCores(RuntimeApiError), + #[error("Fetching Runtime API version failed {0:?}")] FetchRuntimeApiVersion(RuntimeApiError), diff --git a/polkadot/node/core/backing/src/lib.rs b/polkadot/node/core/backing/src/lib.rs index c3ae1ddac7a3..c4e8ea7c1fe2 100644 --- a/polkadot/node/core/backing/src/lib.rs +++ b/polkadot/node/core/backing/src/lib.rs @@ -92,12 +92,13 @@ use polkadot_node_subsystem::{ RuntimeApiRequest, StatementDistributionMessage, StoreAvailableDataError, }, overseer, ActiveLeavesUpdate, FromOrchestra, OverseerSignal, SpawnedSubsystem, SubsystemError, + SubsystemSender, }; use polkadot_node_subsystem_util::{ self as util, backing_implicit_view::{FetchError as ImplicitViewFetchError, View as ImplicitView}, - executor_params_at_relay_parent, request_from_runtime, request_session_index_for_child, - request_validator_groups, request_validators, + executor_params_at_relay_parent, request_availability_cores, request_from_runtime, + request_session_index_for_child, request_validator_groups, request_validators, runtime::{ self, prospective_parachains_mode, request_min_backing_votes, ProspectiveParachainsMode, }, @@ -106,8 +107,8 @@ use polkadot_node_subsystem_util::{ use polkadot_primitives::{ vstaging::{node_features::FeatureIndex, NodeFeatures}, BackedCandidate, CandidateCommitments, CandidateHash, CandidateReceipt, - CommittedCandidateReceipt, CoreIndex, CoreState, ExecutorParams, GroupIndex, Hash, - Id as ParaId, PersistedValidationData, PvfExecKind, SigningContext, ValidationCode, + CommittedCandidateReceipt, CoreIndex, CoreState, ExecutorParams, GroupIndex, GroupRotationInfo, + Hash, Id as ParaId, PersistedValidationData, PvfExecKind, SigningContext, ValidationCode, ValidatorId, ValidatorIndex, ValidatorSignature, ValidityAttestation, }; use sp_keystore::KeystorePtr; @@ -1006,41 +1007,55 @@ macro_rules! try_runtime_api { }; } -#[overseer::contextbounds(CandidateBacking, prefix = self::overseer)] -async fn core_index_from_statement( - ctx: &mut Context, +#[overseer::contextbounds(ApprovalVoting, prefix = self::overseer)] +async fn core_index_from_statement( + sender: &mut Sender, relay_parent: Hash, statement: &SignedFullStatementWithPVD, -) -> Result, Error> { +) -> Result, Error> +where + Sender: SubsystemSender + Clone, +{ let parent = relay_parent; - let (groups, cores) = futures::try_join!( - request_validator_groups(parent, ctx.sender()).await, - request_from_runtime(parent, ctx.sender(), |tx| { - RuntimeApiRequest::AvailabilityCores(tx) - },) - .await, - ) - .map_err(Error::JoinMultiple)?; - let (validator_groups, group_rotation_info) = try_runtime_api!(groups); - let cores = try_runtime_api!(cores); + let (validator_groups, group_rotation_info) = request_validator_groups(parent, sender) + .await + .await + .map_err(Error::RuntimeApiUnavailable)? + .map_err(Error::FetchValidatorGroups)?; + + let cores = request_availability_cores(parent, sender) + .await + .await + .map_err(Error::RuntimeApiUnavailable)? + .map_err(Error::FetchAvailabilityCores)?; let compact_statement = statement.as_unchecked(); let candidate_hash = CandidateHash(*compact_statement.unchecked_payload().candidate_hash()); gum::trace!(target: LOG_TARGET, ?group_rotation_info, ?statement, ?validator_groups, ?cores, ?candidate_hash, "Extracting core index from statement"); + + Ok(core_index_from_statement_inner(&cores, &validator_groups, &group_rotation_info, statement)) +} + +pub(crate) fn core_index_from_statement_inner( + cores: &[CoreState], + validator_groups: &[Vec], + group_rotation_info: &GroupRotationInfo, + statement: &SignedFullStatementWithPVD, +) -> Option { let statement_validator_index = statement.validator_index(); for (group_index, group) in validator_groups.iter().enumerate() { for validator_index in group { if *validator_index == statement_validator_index { - return Ok(Some( + return Some( group_rotation_info.core_for_group(GroupIndex(group_index as u32), cores.len()), - )) + ) } } } - Ok(None) + None } /// Load the data necessary to do backing work on top of a relay-parent. @@ -1655,7 +1670,7 @@ async fn import_statement( let stmt = primitive_statement_to_table(statement); - let core = core_index_from_statement(ctx, rp_state.parent, statement) + let core = core_index_from_statement(ctx.sender(), rp_state.parent, statement) .await .map_err(|_| Error::CoreIndexUnavailable)? .ok_or(Error::CoreIndexUnavailable)?; diff --git a/polkadot/node/core/backing/src/tests/mod.rs b/polkadot/node/core/backing/src/tests/mod.rs index 97e1f2ea10c3..2bf32c407395 100644 --- a/polkadot/node/core/backing/src/tests/mod.rs +++ b/polkadot/node/core/backing/src/tests/mod.rs @@ -161,7 +161,6 @@ fn test_harness>( test: impl FnOnce(VirtualOverseer) -> T, ) { let pool = sp_core::testing::TaskExecutor::new(); - sp_tracing::init_for_tests(); let (context, virtual_overseer) = test_helpers::make_subsystem_context(pool.clone()); @@ -681,6 +680,84 @@ fn backing_works() { }); } +#[test] +fn extract_core_index_from_statement_works() { + let test_state = TestState::default(); + + let pov_a = PoV { block_data: BlockData(vec![42, 43, 44]) }; + let pvd_a = dummy_pvd(); + let validation_code_a = ValidationCode(vec![1, 2, 3]); + + let pov_hash = pov_a.hash(); + + let candidate = TestCandidateBuilder { + para_id: test_state.chain_ids[0], + relay_parent: test_state.relay_parent, + pov_hash, + erasure_root: make_erasure_root(&test_state, pov_a.clone(), pvd_a.clone()), + persisted_validation_data_hash: pvd_a.hash(), + validation_code: validation_code_a.0.clone(), + ..Default::default() + } + .build(); + + let public2 = Keystore::sr25519_generate_new( + &*test_state.keystore, + ValidatorId::ID, + Some(&test_state.validators[2].to_seed()), + ) + .expect("Insert key into keystore"); + + let signed_statement_1 = SignedFullStatementWithPVD::sign( + &test_state.keystore, + StatementWithPVD::Seconded(candidate.clone(), pvd_a.clone()), + &test_state.signing_context, + ValidatorIndex(2), + &public2.into(), + ) + .ok() + .flatten() + .expect("should be signed"); + + let public1 = Keystore::sr25519_generate_new( + &*test_state.keystore, + ValidatorId::ID, + Some(&test_state.validators[1].to_seed()), + ) + .expect("Insert key into keystore"); + + let signed_statement_2 = SignedFullStatementWithPVD::sign( + &test_state.keystore, + StatementWithPVD::Seconded(candidate.clone(), pvd_a.clone()), + &test_state.signing_context, + ValidatorIndex(1), + &public1.into(), + ) + .ok() + .flatten() + .expect("should be signed"); + + let core_index_1 = core_index_from_statement_inner( + &test_state.availability_cores, + &test_state.validator_groups.0, + &test_state.validator_groups.1, + &signed_statement_1, + ) + .unwrap(); + + assert_eq!(core_index_1, CoreIndex(0)); + + let core_index_2 = core_index_from_statement_inner( + &test_state.availability_cores, + &test_state.validator_groups.0, + &test_state.validator_groups.1, + &signed_statement_2, + ) + .unwrap(); + + assert_eq!(core_index_2, CoreIndex(1)); +} + #[test] fn backing_works_while_validation_ongoing() { let test_state = TestState::default(); From 10d86ddfe3616356528daa8302cbd9e5034500fa Mon Sep 17 00:00:00 2001 From: Andrei Sandu Date: Mon, 12 Feb 2024 18:45:20 +0200 Subject: [PATCH 13/51] taplo happy Signed-off-by: Andrei Sandu --- polkadot/primitives/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/polkadot/primitives/Cargo.toml b/polkadot/primitives/Cargo.toml index 27827fcd7d78..9d366f6cfaa5 100644 --- a/polkadot/primitives/Cargo.toml +++ b/polkadot/primitives/Cargo.toml @@ -39,13 +39,13 @@ std = [ "application-crypto/std", "bitvec/std", "inherents/std", + "log/std", "parity-scale-codec/std", "polkadot-core-primitives/std", "polkadot-parachain-primitives/std", "primitives/std", "runtime_primitives/std", "scale-info/std", - "log/std", "serde/std", "sp-api/std", "sp-arithmetic/std", From fbe1ad516de65b20807c13b502051103547ad538 Mon Sep 17 00:00:00 2001 From: Andrei Sandu Date: Tue, 13 Feb 2024 12:17:57 +0200 Subject: [PATCH 14/51] BackedCandidate: make all members private and provide an interface Signed-off-by: Andrei Sandu --- polkadot/primitives/src/v6/mod.rs | 64 +++++++++++++++++++++++++++++-- 1 file changed, 60 insertions(+), 4 deletions(-) diff --git a/polkadot/primitives/src/v6/mod.rs b/polkadot/primitives/src/v6/mod.rs index c9c78499cbb5..d5c3ec017ad3 100644 --- a/polkadot/primitives/src/v6/mod.rs +++ b/polkadot/primitives/src/v6/mod.rs @@ -16,7 +16,7 @@ //! `V6` Primitives. -use bitvec::vec::BitVec; +use bitvec::{field::BitField, vec::BitVec}; use parity_scale_codec::{Decode, Encode}; use scale_info::TypeInfo; use sp_std::{ @@ -704,19 +704,43 @@ pub type UncheckedSignedAvailabilityBitfields = Vec { /// The candidate referred to. - pub candidate: CommittedCandidateReceipt, + candidate: CommittedCandidateReceipt, /// The validity votes themselves, expressed as signatures. - pub validity_votes: Vec, + validity_votes: Vec, /// The indices of the validators within the group, expressed as a bitfield. - pub validator_indices: BitVec, + validator_indices: BitVec, } impl BackedCandidate { + /// Constructor + pub fn new( + candidate: CommittedCandidateReceipt, + validity_votes: Vec, + validator_indices: BitVec, + ) -> Self { + Self { candidate, validity_votes, validator_indices } + } + /// Get a reference to the descriptor of the para. pub fn descriptor(&self) -> &CandidateDescriptor { &self.candidate.descriptor } + /// Get a reference to the descriptor of the para. + pub fn candidate(&self) -> &CommittedCandidateReceipt { + &self.candidate + } + + /// Get a reference to the descriptor of the para. + pub fn validity_votes(&self) -> &[ValidityAttestation] { + &self.validity_votes + } + + /// Get a reference to the descriptor of the para. + pub fn validity_votes_mut(&mut self) -> &mut Vec { + &mut self.validity_votes + } + /// Compute this candidate's hash. pub fn hash(&self) -> CandidateHash where @@ -732,6 +756,38 @@ impl BackedCandidate { { self.candidate.to_plain() } + + /// Get validator indices mutable reference + pub fn validator_indices(&self, core_index_enabled: bool) -> BitVec { + // This flag tells us if the block producers must enable Elastic Scaling MVP hack. + // It extends `BackedCandidate::validity_indices` to store a 8 bit core index. + if core_index_enabled { + let core_idx_offset = self.validator_indices.len().saturating_sub(8); + let (validator_indices_slice, _core_idx_slice) = + self.validator_indices.split_at(core_idx_offset); + BitVec::from(validator_indices_slice) + } else { + self.validator_indices.clone() + } + } + + /// Update the validator indices in the candidate + pub fn set_validator_indices(&mut self, new_indices: BitVec) { + self.validator_indices = new_indices; + } + + /// Return the assumed core index of the backed candidate if any. + pub fn assumed_core_index(&self, core_index_enabled: bool) -> Option { + if core_index_enabled { + let core_idx_offset = self.validator_indices.len().saturating_sub(8); + let (_validator_indices_slice, core_idx_slice) = + self.validator_indices.split_at(core_idx_offset); + let core_idx: u8 = core_idx_slice.load(); + Some(CoreIndex(core_idx as u32)) + } else { + None + } + } } /// Verify the backing of the given candidate. From e74f038eb6a537287ab3047dba70cb303d5a4086 Mon Sep 17 00:00:00 2001 From: Andrei Sandu Date: Tue, 13 Feb 2024 12:18:19 +0200 Subject: [PATCH 15/51] refactor based on new BackedCandidate Signed-off-by: Andrei Sandu --- polkadot/node/core/backing/src/lib.rs | 10 +-- polkadot/node/core/provisioner/src/lib.rs | 2 +- .../runtime/parachains/src/inclusion/mod.rs | 59 +++++++++-------- .../parachains/src/paras_inherent/mod.rs | 40 +++++++----- .../parachains/src/paras_inherent/weights.rs | 4 +- polkadot/runtime/parachains/src/util.rs | 65 +++++++------------ 6 files changed, 84 insertions(+), 96 deletions(-) diff --git a/polkadot/node/core/backing/src/lib.rs b/polkadot/node/core/backing/src/lib.rs index c4e8ea7c1fe2..cbc4936c5264 100644 --- a/polkadot/node/core/backing/src/lib.rs +++ b/polkadot/node/core/backing/src/lib.rs @@ -488,14 +488,14 @@ fn table_attested_to_backed( validator_indices.extend(core_index_to_inject); } - Some(BackedCandidate { + Some(BackedCandidate::new( candidate, - validity_votes: vote_positions + vote_positions .into_iter() .map(|(pos_in_votes, _pos_in_group)| validity_votes[pos_in_votes].clone()) .collect(), validator_indices, - }) + )) } async fn store_available_data( @@ -1702,7 +1702,7 @@ async fn post_import_statement_actions( &rp_state.table_context, rp_state.inject_core_index, ) { - let para_id = backed.candidate.descriptor.para_id; + let para_id = backed.candidate().descriptor.para_id; gum::debug!( target: LOG_TARGET, candidate_hash = ?candidate_hash, @@ -1723,7 +1723,7 @@ async fn post_import_statement_actions( // notify collator protocol. ctx.send_message(CollatorProtocolMessage::Backed { para_id, - para_head: backed.candidate.descriptor.para_head, + para_head: backed.candidate().descriptor.para_head, }) .await; // Notify statement distribution of backed candidate. diff --git a/polkadot/node/core/provisioner/src/lib.rs b/polkadot/node/core/provisioner/src/lib.rs index 8d094eaa7673..61f332656531 100644 --- a/polkadot/node/core/provisioner/src/lib.rs +++ b/polkadot/node/core/provisioner/src/lib.rs @@ -765,7 +765,7 @@ async fn select_candidates( // keep only one candidate with validation code. let mut with_validation_code = false; candidates.retain(|c| { - if c.candidate.commitments.new_validation_code.is_some() { + if c.candidate().commitments.new_validation_code.is_some() { if with_validation_code { return false } diff --git a/polkadot/runtime/parachains/src/inclusion/mod.rs b/polkadot/runtime/parachains/src/inclusion/mod.rs index 49375864f23a..a606a421ac97 100644 --- a/polkadot/runtime/parachains/src/inclusion/mod.rs +++ b/polkadot/runtime/parachains/src/inclusion/mod.rs @@ -25,7 +25,6 @@ use crate::{ paras::{self, SetGoAhead}, scheduler::{self, AvailabilityTimeoutStatus}, shared::{self, AllowedRelayParentsTracker}, - util::strip_candidate_core_index, }; use bitvec::{order::Lsb0 as BitOrderLsb0, vec::BitVec}; use frame_support::{ @@ -619,6 +618,11 @@ impl Pallet { return Ok(ProcessedCandidates::default()) } + let core_index_enabled = configuration::Pallet::::config() + .node_features + .get(FeatureIndex::InjectCoreIndex as usize) + .map(|b| *b) + .unwrap_or(false); let minimum_backing_votes = configuration::Pallet::::config().minimum_backing_votes; let validators = shared::Pallet::::active_validator_keys(); @@ -683,18 +687,15 @@ impl Pallet { }; let para_id = backed_candidate.descriptor().para_id; - let core_idx = - if let Some(core_idx) = strip_candidate_core_index::(backed_candidate) { - core_idx - } else { - *scheduled.get(¶_id).ok_or(Error::::UnscheduledCandidate)? - }; - - log::debug!(target: LOG_TARGET, "Candidate {:?} on {:?}, core_index_hack = {}", backed_candidate.hash(), core_idx, configuration::Pallet::::config() - .node_features - .get(FeatureIndex::InjectCoreIndex as usize) - .map(|b| *b) - .unwrap_or(false)); + let core_idx = if let Some(core_idx) = + backed_candidate.assumed_core_index(core_index_enabled) + { + core_idx + } else { + *scheduled.get(¶_id).ok_or(Error::::UnscheduledCandidate)? + }; + + log::debug!(target: LOG_TARGET, "Candidate {:?} on {:?}, core_index_enabled = {}", backed_candidate.hash(), core_idx, core_index_enabled); check_assignment_in_order(core_idx)?; @@ -754,16 +755,16 @@ impl Pallet { let mut backer_idx_and_attestation = Vec::<(ValidatorIndex, ValidityAttestation)>::with_capacity( - backed_candidate.validator_indices.count_ones(), + backed_candidate.validator_indices(core_index_enabled).count_ones(), ); let candidate_receipt = backed_candidate.receipt(); for ((bit_idx, _), attestation) in backed_candidate - .validator_indices + .validator_indices(core_index_enabled) .iter() .enumerate() .filter(|(_, signed)| **signed) - .zip(backed_candidate.validity_votes.iter().cloned()) + .zip(backed_candidate.validity_votes().iter().cloned()) { let val_idx = group_vals.get(bit_idx).expect("this query succeeded above; qed"); @@ -798,16 +799,18 @@ impl Pallet { bitvec::bitvec![u8, BitOrderLsb0; 0; validators.len()]; Self::deposit_event(Event::::CandidateBacked( - candidate.candidate.to_plain(), - candidate.candidate.commitments.head_data.clone(), + candidate.candidate().to_plain(), + candidate.candidate().commitments.head_data.clone(), core.0, group, )); - let candidate_hash = candidate.candidate.hash(); + let candidate_hash = candidate.candidate().hash(); - let (descriptor, commitments) = - (candidate.candidate.descriptor, candidate.candidate.commitments); + let (descriptor, commitments) = ( + candidate.candidate().descriptor.clone(), + candidate.candidate().commitments.clone(), + ); >::insert( ¶_id, @@ -1259,19 +1262,19 @@ impl CandidateCheckContext { ensure!( backed_candidate.descriptor().para_head == - backed_candidate.candidate.commitments.head_data.hash(), + backed_candidate.candidate().commitments.head_data.hash(), Error::::ParaHeadMismatch, ); if let Err(err) = self.check_validation_outputs( para_id, relay_parent_number, - &backed_candidate.candidate.commitments.head_data, - &backed_candidate.candidate.commitments.new_validation_code, - backed_candidate.candidate.commitments.processed_downward_messages, - &backed_candidate.candidate.commitments.upward_messages, - BlockNumberFor::::from(backed_candidate.candidate.commitments.hrmp_watermark), - &backed_candidate.candidate.commitments.horizontal_messages, + &backed_candidate.candidate().commitments.head_data, + &backed_candidate.candidate().commitments.new_validation_code, + backed_candidate.candidate().commitments.processed_downward_messages, + &backed_candidate.candidate().commitments.upward_messages, + BlockNumberFor::::from(backed_candidate.candidate().commitments.hrmp_watermark), + &backed_candidate.candidate().commitments.horizontal_messages, ) { log::debug!( target: LOG_TARGET, diff --git a/polkadot/runtime/parachains/src/paras_inherent/mod.rs b/polkadot/runtime/parachains/src/paras_inherent/mod.rs index 0744d1639b2e..932a649bb443 100644 --- a/polkadot/runtime/parachains/src/paras_inherent/mod.rs +++ b/polkadot/runtime/parachains/src/paras_inherent/mod.rs @@ -31,7 +31,8 @@ use crate::{ paras, scheduler::{self, FreedReason}, shared::{self, AllowedRelayParentsTracker}, - ParaId, util::elastic_scaling_mvp_filter, + util::elastic_scaling_mvp_filter, + ParaId, }; use bitvec::prelude::BitVec; use frame_support::{ @@ -41,20 +42,17 @@ use frame_support::{ traits::Randomness, }; -use crate::util::strip_candidate_core_index; - use frame_system::pallet_prelude::*; use pallet_babe::{self, ParentBlockRandomness}; use primitives::{ - effective_minimum_backing_votes, BackedCandidate, CandidateHash, CandidateReceipt, - CheckedDisputeStatementSet, CheckedMultiDisputeStatementSet, CoreIndex, DisputeStatementSet, - InherentData as ParachainsInherentData, MultiDisputeStatementSet, ScrapedOnChainVotes, - SessionIndex, SignedAvailabilityBitfields, SigningContext, UncheckedSignedAvailabilityBitfield, - UncheckedSignedAvailabilityBitfields, ValidatorId, ValidatorIndex, ValidityAttestation, - PARACHAINS_INHERENT_IDENTIFIER, + effective_minimum_backing_votes, vstaging::node_features::FeatureIndex, BackedCandidate, + CandidateHash, CandidateReceipt, CheckedDisputeStatementSet, CheckedMultiDisputeStatementSet, + CoreIndex, DisputeStatementSet, InherentData as ParachainsInherentData, + MultiDisputeStatementSet, ScrapedOnChainVotes, SessionIndex, SignedAvailabilityBitfields, + SigningContext, UncheckedSignedAvailabilityBitfield, UncheckedSignedAvailabilityBitfields, + ValidatorId, ValidatorIndex, ValidityAttestation, PARACHAINS_INHERENT_IDENTIFIER, }; use rand::{seq::SliceRandom, SeedableRng}; - use scale_info::TypeInfo; use sp_runtime::traits::{Header as HeaderT, One}; use sp_std::{ @@ -781,7 +779,7 @@ fn apply_weight_limit( .iter() .enumerate() .filter_map(|(idx, candidate)| { - candidate.candidate.commitments.new_validation_code.as_ref().map(|_code| idx) + candidate.candidate().commitments.new_validation_code.as_ref().map(|_code| idx) }) .collect::>(); @@ -1096,13 +1094,18 @@ fn filter_backed_statements_from_disabled_validators::config().minimum_backing_votes; + let core_index_enabled = configuration::Pallet::::config() + .node_features + .get(FeatureIndex::InjectCoreIndex as usize) + .map(|b| *b) + .unwrap_or(false); // Process all backed candidates. `validator_indices` in `BackedCandidates` are indices within // the validator group assigned to the parachain. To obtain this group we need: // 1. Core index assigned to the parachain which has produced the candidate // 2. The relay chain block number of the candidate backed_candidates.retain_mut(|bc| { - let core_idx = if let Some(core_idx) = strip_candidate_core_index::(bc) { + let core_idx = if let Some(core_idx) = bc.assumed_core_index(core_index_enabled) { core_idx } else { // Get `core_idx` assigned to the `para_id` of the candidate @@ -1148,13 +1151,17 @@ fn filter_backed_statements_from_disabled_validators::from_iter(validator_group.iter().map(|idx| disabled_validators.contains(idx))); + let mut validator_indices = bc.validator_indices(core_index_enabled); // The indices of statements from disabled validators in `BackedCandidate`. We have to drop these. - let indices_to_drop = disabled_indices.clone() & &bc.validator_indices; + let indices_to_drop = disabled_indices.clone() & &validator_indices; // Apply the bitmask to drop the disabled validator from `validator_indices` - bc.validator_indices &= !disabled_indices; + validator_indices &= !disabled_indices; + // Update the backed candidate + bc.set_validator_indices(validator_indices); + // Remove the corresponding votes from `validity_votes` for idx in indices_to_drop.iter_ones().rev() { - bc.validity_votes.remove(idx); + bc.validity_votes_mut().remove(idx); } // If at least one statement was dropped we need to return `true` @@ -1165,10 +1172,9 @@ fn filter_backed_statements_from_disabled_validators( candidate: &BackedCandidate, ) -> Weight { set_proof_size_to_tx_size( - if candidate.candidate.commitments.new_validation_code.is_some() { + if candidate.candidate().commitments.new_validation_code.is_some() { <::WeightInfo as WeightInfo>::enter_backed_candidate_code_upgrade() } else { <::WeightInfo as WeightInfo>::enter_backed_candidates_variable( - candidate.validity_votes.len() as u32, + candidate.validity_votes().len() as u32, ) }, candidate, diff --git a/polkadot/runtime/parachains/src/util.rs b/polkadot/runtime/parachains/src/util.rs index 3524caa46a1b..6fec97741094 100644 --- a/polkadot/runtime/parachains/src/util.rs +++ b/polkadot/runtime/parachains/src/util.rs @@ -17,7 +17,7 @@ //! Utilities that don't belong to any particular module but may draw //! on all modules. -use bitvec::{field::BitField, vec::BitVec}; +use bitvec::field::BitField; use frame_system::pallet_prelude::BlockNumberFor; use primitives::{ vstaging::node_features::FeatureIndex, BackedCandidate, CoreIndex, Id as ParaId, @@ -124,29 +124,36 @@ mod tests { /// Filters out all candidates that have multiple cores assigned and no /// `CoreIndex` injected. -pub(crate) fn elastic_scaling_mvp_filter(candidates: &mut Vec>) { +pub(crate) fn elastic_scaling_mvp_filter( + candidates: &mut Vec>, +) { if !configuration::Pallet::::config() .node_features .get(FeatureIndex::InjectCoreIndex as usize) .map(|b| *b) - .unwrap_or(false) { - // we don't touch the candidates, since we don't expect block producers - // to inject `CoreIndex`. - return - } + .unwrap_or(false) + { + // we don't touch the candidates, since we don't expect block producers + // to inject `CoreIndex`. + return + } + // TODO: determine cores assigned to this para. let multiple_cores_asigned = true; - candidates.retain(|candidate| !multiple_cores_asigned || has_core_index::(candidate) ); + candidates.retain(|candidate| !multiple_cores_asigned || has_core_index::(candidate, true)); } // Returns `true` if the candidate contains an injected `CoreIndex`. -fn has_core_index(candidate: &BackedCandidate) -> bool { +fn has_core_index( + candidate: &BackedCandidate, + core_index_enabled: bool, +) -> bool { // After stripping the 8 bit extensions, the `validator_indices` field length is expected // to be equal to backing group size. If these don't match, the `CoreIndex` is badly encoded, // or not supported. - let core_idx_offset = candidate.validator_indices.len().saturating_sub(8); - let (validator_indices_slice, core_idx_slice) = - candidate.validator_indices.split_at(core_idx_offset); + let core_idx_offset = candidate.validator_indices(core_index_enabled).len().saturating_sub(8); + let validator_indices_raw = candidate.validator_indices(core_index_enabled); + let (validator_indices_slice, core_idx_slice) = validator_indices_raw.split_at(core_idx_offset); let core_idx: u8 = core_idx_slice.load(); let current_block = frame_system::Pallet::::block_number(); @@ -157,41 +164,13 @@ fn has_core_index(candidate: &Back current_block, ) { Some(group_idx) => group_idx, - None => return false + None => return false, }; - let group_validators = match >::group_validators(group_idx) { Some(validators) => validators, - None => return false + None => return false, }; - - group_validators.len() == validator_indices_slice.len() -} -/// Strips and returns the `CoreIndex` encoded in the `validator_indices` of `BackedCandidate` -/// if `FeatureIndex::InjectCoreIndex` is enabled and supported by block producer. -/// -/// Otherwise it returns `None`. -pub(crate) fn strip_candidate_core_index( - backed_candidate: &mut BackedCandidate, -) -> Option { - // This flag tells us if the block producers must enable Elastic Scaling MVP hack. - // It extends `BackedCandidate::validity_indices` to store a 8 bit core index. - let core_index_hack = configuration::Pallet::::config() - .node_features - .get(FeatureIndex::InjectCoreIndex as usize) - .map(|b| *b) - .unwrap_or(false); - - if core_index_hack { - let core_idx_offset = backed_candidate.validator_indices.len().saturating_sub(8); - let (validator_indices_slice, core_idx_slice) = - backed_candidate.validator_indices.split_at(core_idx_offset); - let core_idx: u8 = core_idx_slice.load(); - backed_candidate.validator_indices = BitVec::from(validator_indices_slice); - Some(CoreIndex(core_idx as u32)) - } else { - None - } + group_validators.len() == validator_indices_slice.len() } From d898740a15e3eb3dc958aad4d0c2e4763bdbbe77 Mon Sep 17 00:00:00 2001 From: Andrei Sandu Date: Tue, 13 Feb 2024 12:32:38 +0200 Subject: [PATCH 16/51] Fix all parachain runtime tests affected Signed-off-by: Andrei Sandu --- polkadot/runtime/parachains/src/builder.rs | 6 +-- .../runtime/parachains/src/inclusion/tests.rs | 47 +++++++++++++++++-- .../parachains/src/paras_inherent/tests.rs | 14 +++--- 3 files changed, 52 insertions(+), 15 deletions(-) diff --git a/polkadot/runtime/parachains/src/builder.rs b/polkadot/runtime/parachains/src/builder.rs index 016b3fca589a..97e26371a206 100644 --- a/polkadot/runtime/parachains/src/builder.rs +++ b/polkadot/runtime/parachains/src/builder.rs @@ -587,11 +587,11 @@ impl BenchBuilder { }) .collect(); - BackedCandidate:: { + BackedCandidate::::new( candidate, validity_votes, - validator_indices: bitvec::bitvec![u8, bitvec::order::Lsb0; 1; group_validators.len()], - } + bitvec::bitvec![u8, bitvec::order::Lsb0; 1; group_validators.len()], + ) }) .collect() } diff --git a/polkadot/runtime/parachains/src/inclusion/tests.rs b/polkadot/runtime/parachains/src/inclusion/tests.rs index 232e65d78ed2..5199b006fac4 100644 --- a/polkadot/runtime/parachains/src/inclusion/tests.rs +++ b/polkadot/runtime/parachains/src/inclusion/tests.rs @@ -155,7 +155,7 @@ pub(crate) fn back_candidate( validity_votes.push(ValidityAttestation::Explicit(signature).into()); } - let backed = BackedCandidate { candidate, validity_votes, validator_indices }; + let backed = BackedCandidate::new(candidate, validity_votes, validator_indices); let successfully_backed = primitives::check_candidate_backing(&backed, signing_context, group.len(), |i| { @@ -946,6 +946,7 @@ fn candidate_checks() { &allowed_relay_parents, vec![backed], &[chain_b_assignment].into_iter().collect(), + &[(chain_b_assignment.1, chain_b_assignment.0)].into_iter().collect(), &group_validators, ), Error::::UnscheduledCandidate @@ -1001,6 +1002,12 @@ fn candidate_checks() { &allowed_relay_parents, vec![backed_b, backed_a], &[chain_a_assignment, chain_b_assignment].into_iter().collect(), + &[ + (chain_a_assignment.1, chain_a_assignment.0), + (chain_b_assignment.1, chain_b_assignment.0) + ] + .into_iter() + .collect(), &group_validators, ), Error::::ScheduledOutOfOrder @@ -1034,6 +1041,7 @@ fn candidate_checks() { &allowed_relay_parents, vec![backed], &[chain_a_assignment].into_iter().collect(), + &[(chain_a_assignment.1, chain_a_assignment.0)].into_iter().collect(), &group_validators, ), Error::::InsufficientBacking @@ -1091,6 +1099,12 @@ fn candidate_checks() { &allowed_relay_parents, vec![backed_b, backed_a], &[chain_a_assignment, chain_b_assignment].into_iter().collect(), + &[ + (chain_a_assignment.1, chain_a_assignment.0), + (chain_b_assignment.1, chain_b_assignment.0) + ] + .into_iter() + .collect(), &group_validators, ), Error::::DisallowedRelayParent @@ -1129,6 +1143,7 @@ fn candidate_checks() { &allowed_relay_parents, vec![backed], &[thread_a_assignment].into_iter().collect(), + &[(thread_a_assignment.1, thread_a_assignment.0)].into_iter().collect(), &group_validators, ), Error::::NotCollatorSigned @@ -1179,6 +1194,7 @@ fn candidate_checks() { &allowed_relay_parents, vec![backed], &[chain_a_assignment].into_iter().collect(), + &[(chain_a_assignment.1, chain_a_assignment.0)].into_iter().collect(), &group_validators, ), Error::::CandidateScheduledBeforeParaFree @@ -1219,6 +1235,7 @@ fn candidate_checks() { &allowed_relay_parents, vec![backed], &[chain_a_assignment].into_iter().collect(), + &[(chain_a_assignment.1, chain_a_assignment.0)].into_iter().collect(), &group_validators, ), Error::::CandidateScheduledBeforeParaFree @@ -1269,6 +1286,7 @@ fn candidate_checks() { &allowed_relay_parents, vec![backed], &[chain_a_assignment].into_iter().collect(), + &[(chain_a_assignment.1, chain_a_assignment.0)].into_iter().collect(), &group_validators, ), Error::::PrematureCodeUpgrade @@ -1303,6 +1321,7 @@ fn candidate_checks() { &allowed_relay_parents, vec![backed], &[chain_a_assignment].into_iter().collect(), + &[(chain_a_assignment.1, chain_a_assignment.0)].into_iter().collect(), &group_validators, ), Err(Error::::ValidationDataHashMismatch.into()), @@ -1338,6 +1357,7 @@ fn candidate_checks() { &allowed_relay_parents, vec![backed], &[chain_a_assignment].into_iter().collect(), + &[(chain_a_assignment.1, chain_a_assignment.0)].into_iter().collect(), &group_validators, ), Error::::InvalidValidationCodeHash @@ -1373,6 +1393,7 @@ fn candidate_checks() { &allowed_relay_parents, vec![backed], &[chain_a_assignment].into_iter().collect(), + &[(chain_a_assignment.1, chain_a_assignment.0)].into_iter().collect(), &group_validators, ), Error::::ParaHeadMismatch @@ -1535,6 +1556,13 @@ fn backing_works() { &[chain_a_assignment, chain_b_assignment, thread_a_assignment] .into_iter() .collect(), + &[ + (chain_a_assignment.1, chain_a_assignment.0), + (chain_b_assignment.1, chain_b_assignment.0), + (thread_a_assignment.1, thread_a_assignment.0), + ] + .into_iter() + .collect(), &group_validators, ) .expect("candidates scheduled, in order, and backed"); @@ -1560,16 +1588,16 @@ fn backing_works() { .or_insert_with(|| (backed_candidate.receipt(), Vec::new())); assert_eq!( - backed_candidate.validity_votes.len(), - backed_candidate.validator_indices.count_ones() + backed_candidate.validity_votes().len(), + backed_candidate.validator_indices(false).count_ones() ); candidate_receipt_with_backers.1.extend( backed_candidate - .validator_indices + .validator_indices(false) .iter() .enumerate() .filter(|(_, signed)| **signed) - .zip(backed_candidate.validity_votes.iter().cloned()) + .zip(backed_candidate.validity_votes().iter().cloned()) .filter_map(|((validator_index_within_group, _), attestation)| { let grp_idx = get_backing_group_idx(backed_candidate.hash()).unwrap(); group_validators(grp_idx).map(|validator_indices| { @@ -1747,6 +1775,7 @@ fn can_include_candidate_with_ok_code_upgrade() { &allowed_relay_parents, vec![backed_a], &[chain_a_assignment].into_iter().collect(), + &[(chain_a_assignment.1, chain_a_assignment.0)].into_iter().collect(), &group_validators, ) .expect("candidates scheduled, in order, and backed"); @@ -1960,6 +1989,13 @@ fn check_allowed_relay_parents() { &[chain_a_assignment, chain_b_assignment, thread_a_assignment] .into_iter() .collect(), + &[ + (chain_a_assignment.1, chain_a_assignment.0), + (chain_b_assignment.1, chain_b_assignment.0), + (thread_a_assignment.1, thread_a_assignment.0), + ] + .into_iter() + .collect(), &group_validators, ) .expect("candidates scheduled, in order, and backed"); @@ -2196,6 +2232,7 @@ fn para_upgrade_delay_scheduled_from_inclusion() { &allowed_relay_parents, vec![backed_a], &[chain_a_assignment].into_iter().collect(), + &[(chain_a_assignment.1, chain_a_assignment.0)].into_iter().collect(), &group_validators, ) .expect("candidates scheduled, in order, and backed"); diff --git a/polkadot/runtime/parachains/src/paras_inherent/tests.rs b/polkadot/runtime/parachains/src/paras_inherent/tests.rs index 6f3eac35685a..c96cc3902fe3 100644 --- a/polkadot/runtime/parachains/src/paras_inherent/tests.rs +++ b/polkadot/runtime/parachains/src/paras_inherent/tests.rs @@ -1491,13 +1491,13 @@ mod sanitizers { configuration::Pallet::::force_set_active_config(hc); // Verify the initial state is as expected - assert_eq!(backed_candidates.get(0).unwrap().validity_votes.len(), 2); + assert_eq!(backed_candidates.get(0).unwrap().validity_votes().len(), 2); assert_eq!( - backed_candidates.get(0).unwrap().validator_indices.get(0).unwrap(), + backed_candidates.get(0).unwrap().validator_indices(false).get(0).unwrap(), true ); assert_eq!( - backed_candidates.get(0).unwrap().validator_indices.get(1).unwrap(), + backed_candidates.get(0).unwrap().validator_indices(false).get(1).unwrap(), true ); let untouched = backed_candidates.get(1).unwrap().clone(); @@ -1511,14 +1511,14 @@ mod sanitizers { // there should still be two backed candidates assert_eq!(backed_candidates.len(), 2); // but the first one should have only one validity vote - assert_eq!(backed_candidates.get(0).unwrap().validity_votes.len(), 1); + assert_eq!(backed_candidates.get(0).unwrap().validity_votes().len(), 1); // Validator 0 vote should be dropped, validator 1 - retained assert_eq!( - backed_candidates.get(0).unwrap().validator_indices.get(0).unwrap(), + backed_candidates.get(0).unwrap().validator_indices(false).get(0).unwrap(), false ); assert_eq!( - backed_candidates.get(0).unwrap().validator_indices.get(1).unwrap(), + backed_candidates.get(0).unwrap().validator_indices(false).get(1).unwrap(), true ); // the second candidate shouldn't be modified @@ -1535,7 +1535,7 @@ mod sanitizers { set_disabled_validators(vec![0, 1]); // Verify the initial state is as expected - assert_eq!(backed_candidates.get(0).unwrap().validity_votes.len(), 2); + assert_eq!(backed_candidates.get(0).unwrap().validity_votes().len(), 2); let untouched = backed_candidates.get(1).unwrap().clone(); assert!(filter_backed_statements_from_disabled_validators::( From 9e4049039b1148effe5d8ef55e54f402aeebddd2 Mon Sep 17 00:00:00 2001 From: Andrei Sandu Date: Tue, 13 Feb 2024 14:24:39 +0200 Subject: [PATCH 17/51] fix more broken test on node side Signed-off-by: Andrei Sandu --- polkadot/node/core/backing/src/tests/mod.rs | 14 ++--- polkadot/node/core/provisioner/src/tests.rs | 66 ++++++++++++--------- 2 files changed, 44 insertions(+), 36 deletions(-) diff --git a/polkadot/node/core/backing/src/tests/mod.rs b/polkadot/node/core/backing/src/tests/mod.rs index 2bf32c407395..03c0ac3762fa 100644 --- a/polkadot/node/core/backing/src/tests/mod.rs +++ b/polkadot/node/core/backing/src/tests/mod.rs @@ -924,19 +924,19 @@ fn backing_works_while_validation_ongoing() { let candidates = rx.await.unwrap(); assert_eq!(1, candidates.len()); - assert_eq!(candidates[0].validity_votes.len(), 3); + assert_eq!(candidates[0].validity_votes().len(), 3); assert!(candidates[0] - .validity_votes + .validity_votes() .contains(&ValidityAttestation::Implicit(signed_a.signature().clone()))); assert!(candidates[0] - .validity_votes + .validity_votes() .contains(&ValidityAttestation::Explicit(signed_b.signature().clone()))); assert!(candidates[0] - .validity_votes + .validity_votes() .contains(&ValidityAttestation::Explicit(signed_c.signature().clone()))); assert_eq!( - candidates[0].validator_indices, + candidates[0].validator_indices(false), bitvec::bitvec![u8, bitvec::order::Lsb0; 1, 0, 1, 1], ); @@ -1617,8 +1617,8 @@ fn candidate_backing_reorders_votes() { let expected_attestations = vec![fake_attestation(1).into(), fake_attestation(3).into(), fake_attestation(5).into()]; - assert_eq!(backed.validator_indices, expected_bitvec); - assert_eq!(backed.validity_votes, expected_attestations); + assert_eq!(backed.validator_indices(false), expected_bitvec); + assert_eq!(backed.validity_votes(), expected_attestations); } // Test whether we retry on failed PoV fetching. diff --git a/polkadot/node/core/provisioner/src/tests.rs b/polkadot/node/core/provisioner/src/tests.rs index b26df8ddb910..5d9425196844 100644 --- a/polkadot/node/core/provisioner/src/tests.rs +++ b/polkadot/node/core/provisioner/src/tests.rs @@ -460,13 +460,15 @@ mod select_candidates { let expected_backed = expected_candidates .iter() - .map(|c| BackedCandidate { - candidate: CommittedCandidateReceipt { - descriptor: c.descriptor.clone(), - commitments: Default::default(), - }, - validity_votes: Vec::new(), - validator_indices: default_bitvec(MOCK_GROUP_SIZE), + .map(|c| { + BackedCandidate::new( + CommittedCandidateReceipt { + descriptor: c.descriptor().clone(), + commitments: Default::default(), + }, + Vec::new(), + default_bitvec(MOCK_GROUP_SIZE), + ) }) .collect(); @@ -486,7 +488,7 @@ mod select_candidates { result.into_iter().for_each(|c| { assert!( - expected_candidates.iter().any(|c2| c.candidate.corresponds_to(c2)), + expected_candidates.iter().any(|c2| c.candidate().corresponds_to(c2)), "Failed to find candidate: {:?}", c, ) @@ -532,10 +534,12 @@ mod select_candidates { // Build possible outputs from select_candidates let backed_candidates: Vec<_> = committed_receipts .iter() - .map(|committed_receipt| BackedCandidate { - candidate: committed_receipt.clone(), - validity_votes: Vec::new(), - validator_indices: default_bitvec(MOCK_GROUP_SIZE), + .map(|committed_receipt| { + BackedCandidate::new( + committed_receipt.clone(), + Vec::new(), + default_bitvec(MOCK_GROUP_SIZE), + ) }) .collect(); @@ -566,7 +570,7 @@ mod select_candidates { result.into_iter().for_each(|c| { assert!( - expected_backed_filtered.iter().any(|c2| c.candidate.corresponds_to(c2)), + expected_backed_filtered.iter().any(|c2| c.candidate().corresponds_to(c2)), "Failed to find candidate: {:?}", c, ) @@ -605,13 +609,15 @@ mod select_candidates { let expected_backed = expected_candidates .iter() - .map(|c| BackedCandidate { - candidate: CommittedCandidateReceipt { - descriptor: c.descriptor.clone(), - commitments: Default::default(), - }, - validity_votes: Vec::new(), - validator_indices: default_bitvec(MOCK_GROUP_SIZE), + .map(|c| { + BackedCandidate::new( + CommittedCandidateReceipt { + descriptor: c.descriptor.clone(), + commitments: Default::default(), + }, + Vec::new(), + default_bitvec(MOCK_GROUP_SIZE), + ) }) .collect(); @@ -631,7 +637,7 @@ mod select_candidates { result.into_iter().for_each(|c| { assert!( - expected_candidates.iter().any(|c2| c.candidate.corresponds_to(c2)), + expected_candidates.iter().any(|c2| c.candidate().corresponds_to(c2)), "Failed to find candidate: {:?}", c, ) @@ -671,13 +677,15 @@ mod select_candidates { let expected_backed = expected_candidates .iter() - .map(|c| BackedCandidate { - candidate: CommittedCandidateReceipt { - descriptor: c.descriptor.clone(), - commitments: Default::default(), - }, - validity_votes: Vec::new(), - validator_indices: default_bitvec(MOCK_GROUP_SIZE), + .map(|c| { + BackedCandidate::new( + CommittedCandidateReceipt { + descriptor: c.descriptor().clone(), + commitments: Default::default(), + }, + Vec::new(), + default_bitvec(MOCK_GROUP_SIZE), + ) }) .collect(); @@ -697,7 +705,7 @@ mod select_candidates { result.into_iter().for_each(|c| { assert!( - expected_candidates.iter().any(|c2| c.candidate.corresponds_to(c2)), + expected_candidates.iter().any(|c2| c.candidate().corresponds_to(c2)), "Failed to find candidate: {:?}", c, ) From 42f46a0dcb3d924aebe1dfb27bb18efe59759023 Mon Sep 17 00:00:00 2001 From: Andrei Sandu Date: Wed, 14 Feb 2024 09:03:38 +0200 Subject: [PATCH 18/51] wip new test Signed-off-by: Andrei Sandu --- polkadot/runtime/parachains/src/util.rs | 69 +++++++++++++++++-------- 1 file changed, 48 insertions(+), 21 deletions(-) diff --git a/polkadot/runtime/parachains/src/util.rs b/polkadot/runtime/parachains/src/util.rs index 6fec97741094..2fa172169d08 100644 --- a/polkadot/runtime/parachains/src/util.rs +++ b/polkadot/runtime/parachains/src/util.rs @@ -26,7 +26,6 @@ use primitives::{ use sp_std::{collections::btree_set::BTreeSet, vec::Vec}; use crate::{configuration, hrmp, paras, scheduler}; - /// Make the persisted validation data for a particular parachain, a specified relay-parent and it's /// storage root. /// @@ -102,26 +101,6 @@ pub fn take_active_subset(active: &[ValidatorIndex], set: &[T]) -> Vec subset } -#[cfg(test)] -mod tests { - - use sp_std::vec::Vec; - - use crate::util::{split_active_subset, take_active_subset}; - use primitives::ValidatorIndex; - - #[test] - fn take_active_subset_is_compatible_with_split_active_subset() { - let active: Vec<_> = vec![ValidatorIndex(1), ValidatorIndex(7), ValidatorIndex(3)]; - let validators = vec![9, 1, 6, 7, 4, 5, 2, 3, 0, 8]; - let (selected, unselected) = split_active_subset(&active, &validators); - let selected2 = take_active_subset(&active, &validators); - assert_eq!(selected, selected2); - assert_eq!(unselected, vec![9, 6, 4, 5, 2, 0, 8]); - assert_eq!(selected, vec![1, 3, 7]); - } -} - /// Filters out all candidates that have multiple cores assigned and no /// `CoreIndex` injected. pub(crate) fn elastic_scaling_mvp_filter( @@ -174,3 +153,51 @@ fn has_core_index( group_validators.len() == validator_indices_slice.len() } + +#[cfg(test)] +mod tests { + +use bitvec::vec::BitVec; +use sp_std::vec::Vec; +use test_helpers::{dummy_candidate_descriptor, dummy_hash}; + + use crate::util::{has_core_index, split_active_subset, take_active_subset}; + use primitives::{BackedCandidate, CandidateCommitments, CommittedCandidateReceipt, PersistedValidationData, ValidatorIndex}; + use bitvec::bitvec; + + #[test] + fn take_active_subset_is_compatible_with_split_active_subset() { + let active: Vec<_> = vec![ValidatorIndex(1), ValidatorIndex(7), ValidatorIndex(3)]; + let validators = vec![9, 1, 6, 7, 4, 5, 2, 3, 0, 8]; + let (selected, unselected) = split_active_subset(&active, &validators); + let selected2 = take_active_subset(&active, &validators); + assert_eq!(selected, selected2); + assert_eq!(unselected, vec![9, 6, 4, 5, 2, 0, 8]); + assert_eq!(selected, vec![1, 3, 7]); + } + + pub fn dummy_bitvec(size: usize) -> BitVec { + bitvec![u8, bitvec::order::Lsb0; 0; size] + } + + #[test] + fn has_core_index_works() { + let mut descriptor = dummy_candidate_descriptor(dummy_hash()); + let empty_hash = sp_core::H256::zero(); + + descriptor.para_id = 1000.into(); + descriptor.persisted_validation_data_hash = empty_hash; + let committed_receipt = CommittedCandidateReceipt { + descriptor, + commitments: CandidateCommitments::default(), + }; + + let candidate = BackedCandidate::new( + committed_receipt.clone(), + Vec::new(), + dummy_bitvec(5), + ); + + assert_eq!(has_core_index::(&candidate, false), false); + } +} From 222609c23a59bb3d9277d21f9ddf14216e7e2a2f Mon Sep 17 00:00:00 2001 From: Andrei Sandu Date: Wed, 14 Feb 2024 14:04:45 +0200 Subject: [PATCH 19/51] review feedback Signed-off-by: Andrei Sandu --- Cargo.lock | 8 +-- polkadot/node/core/backing/src/error.rs | 6 -- polkadot/node/core/backing/src/lib.rs | 70 ++++++++----------- polkadot/node/core/backing/src/tests/mod.rs | 68 ++---------------- .../src/tests/prospective_parachains.rs | 38 ---------- polkadot/node/core/provisioner/src/lib.rs | 7 +- polkadot/primitives/src/vstaging/mod.rs | 13 ++-- 7 files changed, 44 insertions(+), 166 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index fb345e3d36ff..332c05825ec0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -21236,9 +21236,9 @@ dependencies = [ [[package]] name = "wasmi" -version = "0.31.0" +version = "0.31.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f341edb80021141d4ae6468cbeefc50798716a347d4085c3811900049ea8945" +checksum = "77a8281d1d660cdf54c76a3efa9ddd0c270cada1383a995db3ccb43d166456c7" dependencies = [ "smallvec", "spin 0.9.8", @@ -21249,9 +21249,9 @@ dependencies = [ [[package]] name = "wasmi_arena" -version = "0.4.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "401c1f35e413fac1846d4843745589d9ec678977ab35a384db8ae7830525d468" +checksum = "104a7f73be44570cac297b3035d76b169d6599637631cf37a1703326a0727073" [[package]] name = "wasmi_core" diff --git a/polkadot/node/core/backing/src/error.rs b/polkadot/node/core/backing/src/error.rs index c41084911d22..64955a393962 100644 --- a/polkadot/node/core/backing/src/error.rs +++ b/polkadot/node/core/backing/src/error.rs @@ -63,12 +63,6 @@ pub enum Error { #[error("Fetching validation code by hash failed {0:?}, {1:?}")] FetchValidationCode(ValidationCodeHash, RuntimeApiError), - #[error("Fetching validator groups failed")] - FetchValidatorGroups(RuntimeApiError), - - #[error("Fetching availability cores failed")] - FetchAvailabilityCores(RuntimeApiError), - #[error("Fetching Runtime API version failed {0:?}")] FetchRuntimeApiVersion(RuntimeApiError), diff --git a/polkadot/node/core/backing/src/lib.rs b/polkadot/node/core/backing/src/lib.rs index c4e8ea7c1fe2..a8ccbd17b3d3 100644 --- a/polkadot/node/core/backing/src/lib.rs +++ b/polkadot/node/core/backing/src/lib.rs @@ -92,13 +92,12 @@ use polkadot_node_subsystem::{ RuntimeApiRequest, StatementDistributionMessage, StoreAvailableDataError, }, overseer, ActiveLeavesUpdate, FromOrchestra, OverseerSignal, SpawnedSubsystem, SubsystemError, - SubsystemSender, }; use polkadot_node_subsystem_util::{ self as util, backing_implicit_view::{FetchError as ImplicitViewFetchError, View as ImplicitView}, - executor_params_at_relay_parent, request_availability_cores, request_from_runtime, - request_session_index_for_child, request_validator_groups, request_validators, + executor_params_at_relay_parent, request_from_runtime, request_session_index_for_child, + request_validator_groups, request_validators, runtime::{ self, prospective_parachains_mode, request_min_backing_votes, ProspectiveParachainsMode, }, @@ -228,8 +227,15 @@ struct PerRelayParentState { fallbacks: HashMap, /// The minimum backing votes threshold. minimum_backing_votes: u32, - /// If true, we're appendindg extra bits in the BackedCandidate validator indices bitfield. + /// If true, we're appendindg extra bits in the BackedCandidate validator indices bitfield, + /// which represent the assigned core index. inject_core_index: bool, + /// Number of cores. + n_cores: usize, + /// The validator groups at this relay parent. + validator_groups: Vec>, + /// The associated group rotation information. + group_rotation_info: GroupRotationInfo, } struct PerCandidateState { @@ -1007,49 +1013,23 @@ macro_rules! try_runtime_api { }; } -#[overseer::contextbounds(ApprovalVoting, prefix = self::overseer)] -async fn core_index_from_statement( - sender: &mut Sender, - relay_parent: Hash, +fn core_index_from_statement( + validator_groups: &[Vec], + group_rotation_info: &GroupRotationInfo, + n_cores: usize, statement: &SignedFullStatementWithPVD, -) -> Result, Error> -where - Sender: SubsystemSender + Clone, -{ - let parent = relay_parent; - - let (validator_groups, group_rotation_info) = request_validator_groups(parent, sender) - .await - .await - .map_err(Error::RuntimeApiUnavailable)? - .map_err(Error::FetchValidatorGroups)?; - - let cores = request_availability_cores(parent, sender) - .await - .await - .map_err(Error::RuntimeApiUnavailable)? - .map_err(Error::FetchAvailabilityCores)?; - +) -> Option { let compact_statement = statement.as_unchecked(); let candidate_hash = CandidateHash(*compact_statement.unchecked_payload().candidate_hash()); - gum::trace!(target: LOG_TARGET, ?group_rotation_info, ?statement, ?validator_groups, ?cores, ?candidate_hash, "Extracting core index from statement"); - - Ok(core_index_from_statement_inner(&cores, &validator_groups, &group_rotation_info, statement)) -} + gum::trace!(target: LOG_TARGET, ?group_rotation_info, ?statement, ?validator_groups, ?n_cores, ?candidate_hash, "Extracting core index from statement"); -pub(crate) fn core_index_from_statement_inner( - cores: &[CoreState], - validator_groups: &[Vec], - group_rotation_info: &GroupRotationInfo, - statement: &SignedFullStatementWithPVD, -) -> Option { let statement_validator_index = statement.validator_index(); for (group_index, group) in validator_groups.iter().enumerate() { for validator_index in group { if *validator_index == statement_validator_index { return Some( - group_rotation_info.core_for_group(GroupIndex(group_index as u32), cores.len()), + group_rotation_info.core_for_group(GroupIndex(group_index as u32), n_cores), ) } } @@ -1084,7 +1064,7 @@ async fn construct_per_relay_parent_state( let inject_core_index = request_node_features(parent, session_index, ctx.sender()) .await? .unwrap_or(NodeFeatures::EMPTY) - .get(FeatureIndex::InjectCoreIndex as usize) + .get(FeatureIndex::ElasticScalingCoreIndex as usize) .map(|b| *b) .unwrap_or(false); @@ -1178,6 +1158,9 @@ async fn construct_per_relay_parent_state( fallbacks: HashMap::new(), minimum_backing_votes, inject_core_index, + n_cores, + validator_groups, + group_rotation_info, })) } @@ -1670,10 +1653,13 @@ async fn import_statement( let stmt = primitive_statement_to_table(statement); - let core = core_index_from_statement(ctx.sender(), rp_state.parent, statement) - .await - .map_err(|_| Error::CoreIndexUnavailable)? - .ok_or(Error::CoreIndexUnavailable)?; + let core = core_index_from_statement( + &rp_state.validator_groups, + &rp_state.group_rotation_info, + rp_state.n_cores, + statement, + ) + .ok_or(Error::CoreIndexUnavailable)?; Ok(rp_state.table.import_statement(&rp_state.table_context, core, stmt)) } diff --git a/polkadot/node/core/backing/src/tests/mod.rs b/polkadot/node/core/backing/src/tests/mod.rs index 2bf32c407395..316dcdba6d86 100644 --- a/polkadot/node/core/backing/src/tests/mod.rs +++ b/polkadot/node/core/backing/src/tests/mod.rs @@ -327,30 +327,6 @@ async fn test_startup(virtual_overseer: &mut VirtualOverseer, test_state: &TestS ); } -pub(crate) async fn assert_core_index_from_statement( - virtual_overseer: &mut VirtualOverseer, - test_state: &TestState, -) { - assert_matches!( - virtual_overseer.recv().await, - AllMessages::RuntimeApi( - RuntimeApiMessage::Request(_parent, RuntimeApiRequest::ValidatorGroups(tx)) - ) => { - tx.send(Ok(test_state.validator_groups.clone())).unwrap(); - } - ); - - // Check that subsystem job issues a request for the availability cores. - assert_matches!( - virtual_overseer.recv().await, - AllMessages::RuntimeApi( - RuntimeApiMessage::Request(_parent, RuntimeApiRequest::AvailabilityCores(tx)) - ) => { - tx.send(Ok(test_state.availability_cores.clone())).unwrap(); - } - ); -} - async fn assert_validation_requests( virtual_overseer: &mut VirtualOverseer, validation_code: ValidationCode, @@ -483,8 +459,6 @@ fn backing_second_works() { } ); - assert_core_index_from_statement(&mut virtual_overseer, &test_state).await; - assert_matches!( virtual_overseer.recv().await, AllMessages::StatementDistribution( @@ -581,7 +555,6 @@ fn backing_works() { virtual_overseer.send(FromOrchestra::Communication { msg: statement }).await; - assert_core_index_from_statement(&mut virtual_overseer, &test_state).await; assert_validation_requests(&mut virtual_overseer, validation_code_ab.clone()).await; // Sending a `Statement::Seconded` for our assignment will start @@ -641,8 +614,6 @@ fn backing_works() { } ); - assert_core_index_from_statement(&mut virtual_overseer, &test_state).await; - assert_matches!( virtual_overseer.recv().await, AllMessages::StatementDistribution( @@ -669,8 +640,6 @@ fn backing_works() { virtual_overseer.send(FromOrchestra::Communication { msg: statement }).await; - assert_core_index_from_statement(&mut virtual_overseer, &test_state).await; - virtual_overseer .send(FromOrchestra::Signal(OverseerSignal::ActiveLeaves( ActiveLeavesUpdate::stop_work(test_state.relay_parent), @@ -737,20 +706,20 @@ fn extract_core_index_from_statement_works() { .flatten() .expect("should be signed"); - let core_index_1 = core_index_from_statement_inner( - &test_state.availability_cores, + let core_index_1 = core_index_from_statement( &test_state.validator_groups.0, &test_state.validator_groups.1, + test_state.availability_cores.len(), &signed_statement_1, ) .unwrap(); assert_eq!(core_index_1, CoreIndex(0)); - let core_index_2 = core_index_from_statement_inner( - &test_state.availability_cores, + let core_index_2 = core_index_from_statement( &test_state.validator_groups.0, &test_state.validator_groups.1, + test_state.availability_cores.len(), &signed_statement_2, ) .unwrap(); @@ -841,7 +810,6 @@ fn backing_works_while_validation_ongoing() { let statement = CandidateBackingMessage::Statement(test_state.relay_parent, signed_a.clone()); virtual_overseer.send(FromOrchestra::Communication { msg: statement }).await; - assert_core_index_from_statement(&mut virtual_overseer, &test_state).await; assert_validation_requests(&mut virtual_overseer, validation_code_abc.clone()).await; @@ -891,7 +859,6 @@ fn backing_works_while_validation_ongoing() { CandidateBackingMessage::Statement(test_state.relay_parent, signed_b.clone()); virtual_overseer.send(FromOrchestra::Communication { msg: statement }).await; - assert_core_index_from_statement(&mut virtual_overseer, &test_state).await; // Candidate gets backed entirely by other votes. assert_matches!( @@ -912,8 +879,6 @@ fn backing_works_while_validation_ongoing() { virtual_overseer.send(FromOrchestra::Communication { msg: statement }).await; - assert_core_index_from_statement(&mut virtual_overseer, &test_state).await; - let (tx, rx) = oneshot::channel(); let msg = CandidateBackingMessage::GetBackedCandidates( vec![(candidate_a.hash(), test_state.relay_parent)], @@ -1012,7 +977,6 @@ fn backing_misbehavior_works() { virtual_overseer.send(FromOrchestra::Communication { msg: statement }).await; - assert_core_index_from_statement(&mut virtual_overseer, &test_state).await; assert_validation_requests(&mut virtual_overseer, validation_code_a.clone()).await; assert_matches!( @@ -1068,8 +1032,6 @@ fn backing_misbehavior_works() { } ); - assert_core_index_from_statement(&mut virtual_overseer, &test_state).await; - assert_matches!( virtual_overseer.recv().await, AllMessages::StatementDistribution( @@ -1101,8 +1063,6 @@ fn backing_misbehavior_works() { virtual_overseer.send(FromOrchestra::Communication { msg: statement }).await; - assert_core_index_from_statement(&mut virtual_overseer, &test_state).await; - assert_matches!( virtual_overseer.recv().await, AllMessages::Provisioner( @@ -1278,7 +1238,6 @@ fn backing_dont_second_invalid() { tx.send(Ok(())).unwrap(); } ); - assert_core_index_from_statement(&mut virtual_overseer, &test_state).await; assert_matches!( virtual_overseer.recv().await, @@ -1350,7 +1309,6 @@ fn backing_second_after_first_fails_works() { virtual_overseer.send(FromOrchestra::Communication { msg: statement }).await; - assert_core_index_from_statement(&mut virtual_overseer, &test_state).await; assert_validation_requests(&mut virtual_overseer, validation_code_a.clone()).await; // Subsystem requests PoV and requests validation. @@ -1495,7 +1453,6 @@ fn backing_works_after_failed_validation() { virtual_overseer.send(FromOrchestra::Communication { msg: statement }).await; - assert_core_index_from_statement(&mut virtual_overseer, &test_state).await; assert_validation_requests(&mut virtual_overseer, validation_code_a.clone()).await; // Subsystem requests PoV and requests validation. @@ -1700,7 +1657,6 @@ fn retry_works() { CandidateBackingMessage::Statement(test_state.relay_parent, signed_a.clone()); virtual_overseer.send(FromOrchestra::Communication { msg: statement }).await; - assert_core_index_from_statement(&mut virtual_overseer, &test_state).await; assert_validation_requests(&mut virtual_overseer, validation_code_a.clone()).await; // Subsystem requests PoV and requests validation. @@ -1722,8 +1678,6 @@ fn retry_works() { CandidateBackingMessage::Statement(test_state.relay_parent, signed_b.clone()); virtual_overseer.send(FromOrchestra::Communication { msg: statement }).await; - assert_core_index_from_statement(&mut virtual_overseer, &test_state).await; - // Not deterministic which message comes first: for _ in 0u32..5 { match virtual_overseer.recv().await { @@ -1766,7 +1720,6 @@ fn retry_works() { CandidateBackingMessage::Statement(test_state.relay_parent, signed_c.clone()); virtual_overseer.send(FromOrchestra::Communication { msg: statement }).await; - assert_core_index_from_statement(&mut virtual_overseer, &test_state).await; assert_validation_requests(&mut virtual_overseer, validation_code_a.clone()).await; assert_matches!( @@ -1891,14 +1844,11 @@ fn observes_backing_even_if_not_validator() { virtual_overseer.send(FromOrchestra::Communication { msg: statement }).await; - assert_core_index_from_statement(&mut virtual_overseer, &test_state).await; - let statement = CandidateBackingMessage::Statement(test_state.relay_parent, signed_b.clone()); virtual_overseer.send(FromOrchestra::Communication { msg: statement }).await; - assert_core_index_from_statement(&mut virtual_overseer, &test_state).await; assert_matches!( virtual_overseer.recv().await, AllMessages::Provisioner( @@ -1916,8 +1866,6 @@ fn observes_backing_even_if_not_validator() { virtual_overseer.send(FromOrchestra::Communication { msg: statement }).await; - assert_core_index_from_statement(&mut virtual_overseer, &test_state).await; - virtual_overseer .send(FromOrchestra::Signal(OverseerSignal::ActiveLeaves( ActiveLeavesUpdate::stop_work(test_state.relay_parent), @@ -1984,8 +1932,6 @@ fn cannot_second_multiple_candidates_per_parent() { } ); - assert_core_index_from_statement(&mut virtual_overseer, &test_state).await; - assert_matches!( virtual_overseer.recv().await, AllMessages::StatementDistribution( @@ -2220,8 +2166,6 @@ fn disabled_validator_doesnt_distribute_statement_on_receiving_statement() { virtual_overseer.send(FromOrchestra::Communication { msg: statement }).await; - assert_core_index_from_statement(&mut virtual_overseer, &test_state).await; - // Ensure backing subsystem is not doing any work assert_matches!(virtual_overseer.recv().timeout(Duration::from_secs(1)).await, None); @@ -2313,8 +2257,6 @@ fn validator_ignores_statements_from_disabled_validators() { virtual_overseer.send(FromOrchestra::Communication { msg: statement_3 }).await; - assert_core_index_from_statement(&mut virtual_overseer, &test_state).await; - assert_matches!( virtual_overseer.recv().await, AllMessages::RuntimeApi( @@ -2401,8 +2343,6 @@ fn validator_ignores_statements_from_disabled_validators() { } ); - assert_core_index_from_statement(&mut virtual_overseer, &test_state).await; - assert_matches!( virtual_overseer.recv().await, AllMessages::StatementDistribution( diff --git a/polkadot/node/core/backing/src/tests/prospective_parachains.rs b/polkadot/node/core/backing/src/tests/prospective_parachains.rs index 0fbf52403ea5..165d39b4fcc0 100644 --- a/polkadot/node/core/backing/src/tests/prospective_parachains.rs +++ b/polkadot/node/core/backing/src/tests/prospective_parachains.rs @@ -462,8 +462,6 @@ fn seconding_sanity_check_allowed() { )) ); - assert_core_index_from_statement(&mut virtual_overseer, &test_state).await; - assert_matches!( virtual_overseer.recv().await, AllMessages::StatementDistribution( @@ -599,8 +597,6 @@ fn seconding_sanity_check_disallowed() { )) ); - assert_core_index_from_statement(&mut virtual_overseer, &test_state).await; - assert_matches!( virtual_overseer.recv().await, AllMessages::StatementDistribution( @@ -855,8 +851,6 @@ fn prospective_parachains_reject_candidate() { )) ); - assert_core_index_from_statement(&mut virtual_overseer, &test_state).await; - assert_matches!( virtual_overseer.recv().await, AllMessages::StatementDistribution( @@ -993,8 +987,6 @@ fn second_multiple_candidates_per_relay_parent() { ) ); - assert_core_index_from_statement(&mut virtual_overseer, &test_state).await; - assert_matches!( virtual_overseer.recv().await, AllMessages::StatementDistribution( @@ -1125,8 +1117,6 @@ fn backing_works() { )) ); - assert_core_index_from_statement(&mut virtual_overseer, &test_state).await; - assert_validate_seconded_candidate( &mut virtual_overseer, candidate_a.descriptor().relay_parent, @@ -1139,7 +1129,6 @@ fn backing_works() { ) .await; - assert_core_index_from_statement(&mut virtual_overseer, &test_state).await; assert_matches!( virtual_overseer.recv().await, AllMessages::StatementDistribution( @@ -1176,8 +1165,6 @@ fn backing_works() { virtual_overseer.send(FromOrchestra::Communication { msg: statement }).await; - assert_core_index_from_statement(&mut virtual_overseer, &test_state).await; - virtual_overseer }); } @@ -1559,29 +1546,6 @@ fn seconding_sanity_check_occupy_same_depth() { ) ); - assert_matches!( - virtual_overseer.recv().await, - AllMessages::RuntimeApi( - RuntimeApiMessage::Request(_parent, RuntimeApiRequest::ValidatorGroups(tx)) - ) => { - let (groups, mut rotation) = test_state.validator_groups.clone(); - if leaf_hash == _parent { - rotation.now = 100; - } - tx.send(Ok((groups, rotation))).unwrap(); - } - ); - - // Check that subsystem job issues a request for the availability cores. - assert_matches!( - virtual_overseer.recv().await, - AllMessages::RuntimeApi( - RuntimeApiMessage::Request(_parent, RuntimeApiRequest::AvailabilityCores(tx)) - ) => { - tx.send(Ok(test_state.availability_cores.clone())).unwrap(); - } - ); - assert_matches!( virtual_overseer.recv().await, AllMessages::StatementDistribution( @@ -1721,8 +1685,6 @@ fn occupied_core_assignment() { )) ); - assert_core_index_from_statement(&mut virtual_overseer, &test_state).await; - assert_matches!( virtual_overseer.recv().await, AllMessages::StatementDistribution( diff --git a/polkadot/node/core/provisioner/src/lib.rs b/polkadot/node/core/provisioner/src/lib.rs index 8d094eaa7673..e130780c2f3d 100644 --- a/polkadot/node/core/provisioner/src/lib.rs +++ b/polkadot/node/core/provisioner/src/lib.rs @@ -681,9 +681,8 @@ async fn request_backable_candidates( CoreState::Free => continue, }; - // We're currently fetching based on para id. This has to be chagned to query prospective - // parachains via core index. We should be calling this once per para rather than per core. - // TODO: Fix after https://github.com/paritytech/polkadot-sdk/pull/3233 + // We should be calling this once per para rather than per core. + // TODO: Will be fixed in https://github.com/paritytech/polkadot-sdk/pull/3233 let response = get_backable_candidate(relay_parent, para_id, required_path, sender).await?; match response { Some((hash, relay_parent)) => { @@ -732,7 +731,7 @@ async fn select_candidates( ) .await?, }; - gum::debug!(target: LOG_TARGET, ?selected_candidates, "Got backedable candidates"); + gum::debug!(target: LOG_TARGET, ?selected_candidates, "Got backeable candidates"); // now get the backed candidates corresponding to these candidate receipts let (tx, rx) = oneshot::channel(); diff --git a/polkadot/primitives/src/vstaging/mod.rs b/polkadot/primitives/src/vstaging/mod.rs index dfae1af07f38..8f4806be85f8 100644 --- a/polkadot/primitives/src/vstaging/mod.rs +++ b/polkadot/primitives/src/vstaging/mod.rs @@ -64,16 +64,13 @@ pub mod node_features { /// Tells if tranch0 assignments could be sent in a single certificate. /// Reserved for: `` EnableAssignmentsV2 = 0, - /// First unassigned feature bit. - /// Every time a new feature flag is assigned it should take this value. - /// and this should be incremented. - FirstUnassigned = 1, - /// Experimental features start at bit 16. Note that experimental features pop in and out - /// of exsitence without warning. - /// /// This feature enables the extension of `BackedCandidate::validator_indices` by 8 bit. /// The value stored there represents the assumed core index where the candidates /// are backed. - InjectCoreIndex = 16, + ElasticScalingCoreIndex = 1, + /// First unassigned feature bit. + /// Every time a new feature flag is assigned it should take this value. + /// and this should be incremented. + FirstUnassigned = 2, } } From ccb2a88a13c793f64ad46e3687527ae35bd0e056 Mon Sep 17 00:00:00 2001 From: Andrei Sandu Date: Wed, 14 Feb 2024 14:14:11 +0200 Subject: [PATCH 20/51] ElasticScalingCoreIndex Signed-off-by: Andrei Sandu --- polkadot/runtime/parachains/src/paras_inherent/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/polkadot/runtime/parachains/src/paras_inherent/mod.rs b/polkadot/runtime/parachains/src/paras_inherent/mod.rs index 932a649bb443..a9545aeb3519 100644 --- a/polkadot/runtime/parachains/src/paras_inherent/mod.rs +++ b/polkadot/runtime/parachains/src/paras_inherent/mod.rs @@ -1096,7 +1096,7 @@ fn filter_backed_statements_from_disabled_validators::config().minimum_backing_votes; let core_index_enabled = configuration::Pallet::::config() .node_features - .get(FeatureIndex::InjectCoreIndex as usize) + .get(FeatureIndex::ElasticScalingCoreIndex as usize) .map(|b| *b) .unwrap_or(false); From a5ba15722208e0aeeeff3ca0dd161bedc31a8b1d Mon Sep 17 00:00:00 2001 From: Andrei Sandu Date: Wed, 14 Feb 2024 15:35:27 +0200 Subject: [PATCH 21/51] finish filtering of candidates for elastic scaling Signed-off-by: Andrei Sandu --- .../runtime/parachains/src/inclusion/mod.rs | 2 +- .../parachains/src/paras_inherent/mod.rs | 4 +- polkadot/runtime/parachains/src/util.rs | 102 ++++++++++++------ 3 files changed, 73 insertions(+), 35 deletions(-) diff --git a/polkadot/runtime/parachains/src/inclusion/mod.rs b/polkadot/runtime/parachains/src/inclusion/mod.rs index a606a421ac97..978e7a5e94c6 100644 --- a/polkadot/runtime/parachains/src/inclusion/mod.rs +++ b/polkadot/runtime/parachains/src/inclusion/mod.rs @@ -620,7 +620,7 @@ impl Pallet { let core_index_enabled = configuration::Pallet::::config() .node_features - .get(FeatureIndex::InjectCoreIndex as usize) + .get(FeatureIndex::ElasticScalingCoreIndex as usize) .map(|b| *b) .unwrap_or(false); let minimum_backing_votes = configuration::Pallet::::config().minimum_backing_votes; diff --git a/polkadot/runtime/parachains/src/paras_inherent/mod.rs b/polkadot/runtime/parachains/src/paras_inherent/mod.rs index a9545aeb3519..5385e41270a2 100644 --- a/polkadot/runtime/parachains/src/paras_inherent/mod.rs +++ b/polkadot/runtime/parachains/src/paras_inherent/mod.rs @@ -31,7 +31,7 @@ use crate::{ paras, scheduler::{self, FreedReason}, shared::{self, AllowedRelayParentsTracker}, - util::elastic_scaling_mvp_filter, + util::filter_elastic_scaling_candidates, ParaId, }; use bitvec::prelude::BitVec; @@ -592,7 +592,7 @@ impl Pallet { METRICS.on_candidates_processed_total(backed_candidates.len() as u64); - elastic_scaling_mvp_filter::(&mut backed_candidates); + filter_elastic_scaling_candidates::(&mut backed_candidates); let SanitizedBackedCandidates { backed_candidates, votes_from_disabled_were_dropped } = sanitize_backed_candidates::( diff --git a/polkadot/runtime/parachains/src/util.rs b/polkadot/runtime/parachains/src/util.rs index 2fa172169d08..70d03005399b 100644 --- a/polkadot/runtime/parachains/src/util.rs +++ b/polkadot/runtime/parachains/src/util.rs @@ -23,7 +23,10 @@ use primitives::{ vstaging::node_features::FeatureIndex, BackedCandidate, CoreIndex, Id as ParaId, PersistedValidationData, ValidatorIndex, }; -use sp_std::{collections::btree_set::BTreeSet, vec::Vec}; +use sp_std::{ + collections::{btree_map::BTreeMap, btree_set::BTreeSet}, + vec::Vec, +}; use crate::{configuration, hrmp, paras, scheduler}; /// Make the persisted validation data for a particular parachain, a specified relay-parent and it's @@ -101,14 +104,17 @@ pub fn take_active_subset(active: &[ValidatorIndex], set: &[T]) -> Vec subset } -/// Filters out all candidates that have multiple cores assigned and no +/// On first pass it filters out all candidates that have multiple cores assigned and no /// `CoreIndex` injected. -pub(crate) fn elastic_scaling_mvp_filter( +/// +/// On second pass we filter out candidates with the same core index. This can happen if for example +/// collators distribute collations to multiple backing groups. +pub(crate) fn filter_elastic_scaling_candidates( candidates: &mut Vec>, ) { if !configuration::Pallet::::config() .node_features - .get(FeatureIndex::InjectCoreIndex as usize) + .get(FeatureIndex::ElasticScalingCoreIndex as usize) .map(|b| *b) .unwrap_or(false) { @@ -117,9 +123,38 @@ pub(crate) fn elastic_scaling_mvp_filter(candidate, true)); + // A mapping from parachain to all assigned cores. + let mut cores_per_parachain: BTreeMap> = BTreeMap::new(); + + for (core_index, para_id) in >::scheduled_paras() { + cores_per_parachain.entry(para_id).or_default().push(core_index); + } + + // We keep a candidate if the parachain has only one core assigned or if + // a core index is provided by block author. + candidates.retain(|candidate| { + !cores_per_parachain + .get(&candidate.candidate().descriptor.para_id) + .map(|cores| cores.len()) + .unwrap_or(0) > + 1 || has_core_index::(candidate, true) + }); + + let mut used_cores = BTreeSet::new(); + + // We keep one candidate per core in case multiple candidates of same para end up backed on same + // core. This can be further refined to pick the candidate that has the parenthead equal + // to the one in storage. + candidates.retain(|candidate| { + if let Some(core_index) = candidate.assumed_core_index(true) { + // Drop candidate if the core was already used by a previous candidate. + used_cores.insert(core_index) + } else { + // This shouldn't happen, but at this point the candidate without core index is fine + // since we know the para:core mapping is unique. + true + } + }) } // Returns `true` if the candidate contains an injected `CoreIndex`. @@ -157,13 +192,16 @@ fn has_core_index( #[cfg(test)] mod tests { -use bitvec::vec::BitVec; -use sp_std::vec::Vec; -use test_helpers::{dummy_candidate_descriptor, dummy_hash}; + use bitvec::vec::BitVec; + use sp_std::vec::Vec; + use test_helpers::{dummy_candidate_descriptor, dummy_hash}; use crate::util::{has_core_index, split_active_subset, take_active_subset}; - use primitives::{BackedCandidate, CandidateCommitments, CommittedCandidateReceipt, PersistedValidationData, ValidatorIndex}; use bitvec::bitvec; + use primitives::{ + BackedCandidate, CandidateCommitments, CommittedCandidateReceipt, PersistedValidationData, + ValidatorIndex, + }; #[test] fn take_active_subset_is_compatible_with_split_active_subset() { @@ -176,28 +214,28 @@ use test_helpers::{dummy_candidate_descriptor, dummy_hash}; assert_eq!(selected, vec![1, 3, 7]); } - pub fn dummy_bitvec(size: usize) -> BitVec { + pub fn dummy_bitvec(size: usize) -> BitVec { bitvec![u8, bitvec::order::Lsb0; 0; size] } - - #[test] - fn has_core_index_works() { - let mut descriptor = dummy_candidate_descriptor(dummy_hash()); - let empty_hash = sp_core::H256::zero(); - - descriptor.para_id = 1000.into(); - descriptor.persisted_validation_data_hash = empty_hash; - let committed_receipt = CommittedCandidateReceipt { - descriptor, - commitments: CandidateCommitments::default(), - }; - - let candidate = BackedCandidate::new( - committed_receipt.clone(), - Vec::new(), - dummy_bitvec(5), - ); - assert_eq!(has_core_index::(&candidate, false), false); - } + // #[test] + // fn has_core_index_works() { + // let mut descriptor = dummy_candidate_descriptor(dummy_hash()); + // let empty_hash = sp_core::H256::zero(); + + // descriptor.para_id = 1000.into(); + // descriptor.persisted_validation_data_hash = empty_hash; + // let committed_receipt = CommittedCandidateReceipt { + // descriptor, + // commitments: CandidateCommitments::default(), + // }; + + // let candidate = BackedCandidate::new( + // committed_receipt.clone(), + // Vec::new(), + // dummy_bitvec(5), + // ); + + // assert_eq!(has_core_index::(&candidate, false), + // false); } } From a02e896f1970a9dfd299358525f8dc67a4721c43 Mon Sep 17 00:00:00 2001 From: Andrei Sandu Date: Wed, 14 Feb 2024 15:39:09 +0200 Subject: [PATCH 22/51] remove log Signed-off-by: Andrei Sandu --- polkadot/primitives/src/v6/mod.rs | 6 ------ 1 file changed, 6 deletions(-) diff --git a/polkadot/primitives/src/v6/mod.rs b/polkadot/primitives/src/v6/mod.rs index c9c78499cbb5..7723b8a1d74d 100644 --- a/polkadot/primitives/src/v6/mod.rs +++ b/polkadot/primitives/src/v6/mod.rs @@ -750,12 +750,6 @@ pub fn check_candidate_backing + Clone + Encode + core::fmt::Debu group_len: usize, validator_lookup: impl Fn(usize) -> Option, ) -> Result { - log::debug!( - target: LOG_TARGET, - "checking candidate {:?}", - backed - ); - if backed.validator_indices.len() != group_len { log::debug!( target: LOG_TARGET, From dd34850d5c933f31c49f03dedb0b32ed400a6ece Mon Sep 17 00:00:00 2001 From: Andrei Sandu Date: Wed, 14 Feb 2024 19:47:24 +0200 Subject: [PATCH 23/51] more feedback Signed-off-by: Andrei Sandu --- polkadot/node/core/backing/src/lib.rs | 47 ++++++++++++++++----- polkadot/node/core/backing/src/tests/mod.rs | 31 ++++++++++++-- 2 files changed, 64 insertions(+), 14 deletions(-) diff --git a/polkadot/node/core/backing/src/lib.rs b/polkadot/node/core/backing/src/lib.rs index a8ccbd17b3d3..48bb5121554f 100644 --- a/polkadot/node/core/backing/src/lib.rs +++ b/polkadot/node/core/backing/src/lib.rs @@ -230,8 +230,8 @@ struct PerRelayParentState { /// If true, we're appendindg extra bits in the BackedCandidate validator indices bitfield, /// which represent the assigned core index. inject_core_index: bool, - /// Number of cores. - n_cores: usize, + /// The core state for all cores + cores: Vec, /// The validator groups at this relay parent. validator_groups: Vec>, /// The associated group rotation information. @@ -1016,21 +1016,48 @@ macro_rules! try_runtime_api { fn core_index_from_statement( validator_groups: &[Vec], group_rotation_info: &GroupRotationInfo, - n_cores: usize, + cores: &[CoreState], statement: &SignedFullStatementWithPVD, ) -> Option { let compact_statement = statement.as_unchecked(); let candidate_hash = CandidateHash(*compact_statement.unchecked_payload().candidate_hash()); - gum::trace!(target: LOG_TARGET, ?group_rotation_info, ?statement, ?validator_groups, ?n_cores, ?candidate_hash, "Extracting core index from statement"); + let n_cores = cores.len(); + + gum::trace!(target: LOG_TARGET, ?group_rotation_info, ?statement, ?validator_groups, n_cores = ?cores.len() , ?candidate_hash, "Extracting core index from statement"); let statement_validator_index = statement.validator_index(); for (group_index, group) in validator_groups.iter().enumerate() { for validator_index in group { if *validator_index == statement_validator_index { - return Some( - group_rotation_info.core_for_group(GroupIndex(group_index as u32), n_cores), - ) + // First check if the statement para id matches the core assignment. + let core_index = + group_rotation_info.core_for_group(GroupIndex(group_index as u32), n_cores); + + if core_index.0 as usize > n_cores { + gum::warn!(target: LOG_TARGET, ?candidate_hash, ?core_index, n_cores, "Invalid CoreIndex"); + return None + } + + if let StatementWithPVD::Seconded(candidate, _pvd) = statement.payload() { + let candidate_para_id = candidate.descriptor.para_id; + let assigned_para_id = match &cores[core_index.0 as usize] { + CoreState::Free => { + gum::debug!(target: LOG_TARGET, ?candidate_hash, "Invalid CoreIndex, core is not assigned to any para_id"); + return None + }, + CoreState::Occupied(occupied) => occupied.candidate_descriptor.para_id, + CoreState::Scheduled(scheduled) => scheduled.para_id, + }; + + if assigned_para_id != candidate_para_id { + gum::debug!(target: LOG_TARGET, ?candidate_hash, ?core_index, ?assigned_para_id, ?candidate_para_id, "Invalid CoreIndex, core is assigned to a different para_id"); + return None + } + return Some(core_index) + } else { + return Some(core_index) + } } } } @@ -1111,7 +1138,7 @@ async fn construct_per_relay_parent_state( let mut assigned_core = None; let mut assigned_para = None; - for (idx, core) in cores.into_iter().enumerate() { + for (idx, core) in cores.iter().enumerate() { let core_para_id = match core { CoreState::Scheduled(scheduled) => scheduled.para_id, CoreState::Occupied(occupied) => @@ -1158,7 +1185,7 @@ async fn construct_per_relay_parent_state( fallbacks: HashMap::new(), minimum_backing_votes, inject_core_index, - n_cores, + cores, validator_groups, group_rotation_info, })) @@ -1656,7 +1683,7 @@ async fn import_statement( let core = core_index_from_statement( &rp_state.validator_groups, &rp_state.group_rotation_info, - rp_state.n_cores, + &rp_state.cores, statement, ) .ok_or(Error::CoreIndexUnavailable)?; diff --git a/polkadot/node/core/backing/src/tests/mod.rs b/polkadot/node/core/backing/src/tests/mod.rs index 316dcdba6d86..a4f77817427c 100644 --- a/polkadot/node/core/backing/src/tests/mod.rs +++ b/polkadot/node/core/backing/src/tests/mod.rs @@ -659,7 +659,7 @@ fn extract_core_index_from_statement_works() { let pov_hash = pov_a.hash(); - let candidate = TestCandidateBuilder { + let mut candidate = TestCandidateBuilder { para_id: test_state.chain_ids[0], relay_parent: test_state.relay_parent, pov_hash, @@ -706,10 +706,23 @@ fn extract_core_index_from_statement_works() { .flatten() .expect("should be signed"); + candidate.descriptor.para_id = test_state.chain_ids[1]; + + let signed_statement_3 = SignedFullStatementWithPVD::sign( + &test_state.keystore, + StatementWithPVD::Seconded(candidate, pvd_a.clone()), + &test_state.signing_context, + ValidatorIndex(1), + &public1.into(), + ) + .ok() + .flatten() + .expect("should be signed"); + let core_index_1 = core_index_from_statement( &test_state.validator_groups.0, &test_state.validator_groups.1, - test_state.availability_cores.len(), + &test_state.availability_cores, &signed_statement_1, ) .unwrap(); @@ -719,12 +732,22 @@ fn extract_core_index_from_statement_works() { let core_index_2 = core_index_from_statement( &test_state.validator_groups.0, &test_state.validator_groups.1, - test_state.availability_cores.len(), + &test_state.availability_cores, &signed_statement_2, + ); + + // Must be none, para_id in descriptor is different than para assigned to core + assert_eq!(core_index_2, None); + + let core_index_3 = core_index_from_statement( + &test_state.validator_groups.0, + &test_state.validator_groups.1, + &test_state.availability_cores, + &signed_statement_3, ) .unwrap(); - assert_eq!(core_index_2, CoreIndex(1)); + assert_eq!(core_index_3, CoreIndex(1)); } #[test] From ad98f18fce42f090cd86574d972416ce6a431393 Mon Sep 17 00:00:00 2001 From: alindima Date: Mon, 19 Feb 2024 12:18:24 +0200 Subject: [PATCH 24/51] use next up on available instead of occupied core index --- polkadot/node/core/backing/src/lib.rs | 14 ++++++++++++-- .../backing/src/tests/prospective_parachains.rs | 5 +++-- 2 files changed, 15 insertions(+), 4 deletions(-) diff --git a/polkadot/node/core/backing/src/lib.rs b/polkadot/node/core/backing/src/lib.rs index 48bb5121554f..a44f3c47f46e 100644 --- a/polkadot/node/core/backing/src/lib.rs +++ b/polkadot/node/core/backing/src/lib.rs @@ -1046,7 +1046,13 @@ fn core_index_from_statement( gum::debug!(target: LOG_TARGET, ?candidate_hash, "Invalid CoreIndex, core is not assigned to any para_id"); return None }, - CoreState::Occupied(occupied) => occupied.candidate_descriptor.para_id, + CoreState::Occupied(occupied) => { + if let Some(next) = &occupied.next_up_on_available { + next.para_id + } else { + return None + } + }, CoreState::Scheduled(scheduled) => scheduled.para_id, }; @@ -1145,7 +1151,11 @@ async fn construct_per_relay_parent_state( if mode.is_enabled() { // Async backing makes it legal to build on top of // occupied core. - occupied.candidate_descriptor.para_id + if let Some(next) = &occupied.next_up_on_available { + next.para_id + } else { + continue + } } else { continue }, diff --git a/polkadot/node/core/backing/src/tests/prospective_parachains.rs b/polkadot/node/core/backing/src/tests/prospective_parachains.rs index 165d39b4fcc0..94310d2aa164 100644 --- a/polkadot/node/core/backing/src/tests/prospective_parachains.rs +++ b/polkadot/node/core/backing/src/tests/prospective_parachains.rs @@ -1578,13 +1578,14 @@ fn occupied_core_assignment() { const LEAF_A_BLOCK_NUMBER: BlockNumber = 100; const LEAF_A_ANCESTRY_LEN: BlockNumber = 3; let para_id = test_state.chain_ids[0]; + let previous_para_id = test_state.chain_ids[1]; // Set the core state to occupied. let mut candidate_descriptor = ::test_helpers::dummy_candidate_descriptor(Hash::zero()); - candidate_descriptor.para_id = para_id; + candidate_descriptor.para_id = previous_para_id; test_state.availability_cores[0] = CoreState::Occupied(OccupiedCore { group_responsible: Default::default(), - next_up_on_available: None, + next_up_on_available: Some(ScheduledCore { para_id, collator: None }), occupied_since: 100_u32, time_out_at: 200_u32, next_up_on_time_out: None, From 606d7c4ef7c1d28d38cc25f63e5442d78c74daea Mon Sep 17 00:00:00 2001 From: alindima Date: Mon, 19 Feb 2024 13:45:31 +0200 Subject: [PATCH 25/51] ElasticScalingCoreIndex -> ElasticScalingMVP --- polkadot/node/core/backing/src/lib.rs | 2 +- polkadot/primitives/src/vstaging/mod.rs | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/polkadot/node/core/backing/src/lib.rs b/polkadot/node/core/backing/src/lib.rs index a44f3c47f46e..50084e1658b4 100644 --- a/polkadot/node/core/backing/src/lib.rs +++ b/polkadot/node/core/backing/src/lib.rs @@ -1097,7 +1097,7 @@ async fn construct_per_relay_parent_state( let inject_core_index = request_node_features(parent, session_index, ctx.sender()) .await? .unwrap_or(NodeFeatures::EMPTY) - .get(FeatureIndex::ElasticScalingCoreIndex as usize) + .get(FeatureIndex::ElasticScalingMVP as usize) .map(|b| *b) .unwrap_or(false); diff --git a/polkadot/primitives/src/vstaging/mod.rs b/polkadot/primitives/src/vstaging/mod.rs index 8f4806be85f8..39d9dfc02c5b 100644 --- a/polkadot/primitives/src/vstaging/mod.rs +++ b/polkadot/primitives/src/vstaging/mod.rs @@ -64,10 +64,10 @@ pub mod node_features { /// Tells if tranch0 assignments could be sent in a single certificate. /// Reserved for: `` EnableAssignmentsV2 = 0, - /// This feature enables the extension of `BackedCandidate::validator_indices` by 8 bit. + /// This feature enables the extension of `BackedCandidate::validator_indices` by 8 bits. /// The value stored there represents the assumed core index where the candidates - /// are backed. - ElasticScalingCoreIndex = 1, + /// are backed. This is needed for the elastic scaling MVP. + ElasticScalingMVP = 1, /// First unassigned feature bit. /// Every time a new feature flag is assigned it should take this value. /// and this should be incremented. From 10f6486034037b53cee52070709fc648edc97166 Mon Sep 17 00:00:00 2001 From: alindima Date: Tue, 20 Feb 2024 10:07:15 +0200 Subject: [PATCH 26/51] rename ElasticScalingCoreIndex --- polkadot/runtime/parachains/src/inclusion/mod.rs | 2 +- polkadot/runtime/parachains/src/paras_inherent/mod.rs | 2 +- polkadot/runtime/parachains/src/util.rs | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/polkadot/runtime/parachains/src/inclusion/mod.rs b/polkadot/runtime/parachains/src/inclusion/mod.rs index 978e7a5e94c6..4ae458ded5cc 100644 --- a/polkadot/runtime/parachains/src/inclusion/mod.rs +++ b/polkadot/runtime/parachains/src/inclusion/mod.rs @@ -620,7 +620,7 @@ impl Pallet { let core_index_enabled = configuration::Pallet::::config() .node_features - .get(FeatureIndex::ElasticScalingCoreIndex as usize) + .get(FeatureIndex::ElasticScalingMVP as usize) .map(|b| *b) .unwrap_or(false); let minimum_backing_votes = configuration::Pallet::::config().minimum_backing_votes; diff --git a/polkadot/runtime/parachains/src/paras_inherent/mod.rs b/polkadot/runtime/parachains/src/paras_inherent/mod.rs index 5385e41270a2..a9aa9338bedc 100644 --- a/polkadot/runtime/parachains/src/paras_inherent/mod.rs +++ b/polkadot/runtime/parachains/src/paras_inherent/mod.rs @@ -1096,7 +1096,7 @@ fn filter_backed_statements_from_disabled_validators::config().minimum_backing_votes; let core_index_enabled = configuration::Pallet::::config() .node_features - .get(FeatureIndex::ElasticScalingCoreIndex as usize) + .get(FeatureIndex::ElasticScalingMVP as usize) .map(|b| *b) .unwrap_or(false); diff --git a/polkadot/runtime/parachains/src/util.rs b/polkadot/runtime/parachains/src/util.rs index 70d03005399b..597a8f7d512d 100644 --- a/polkadot/runtime/parachains/src/util.rs +++ b/polkadot/runtime/parachains/src/util.rs @@ -114,7 +114,7 @@ pub(crate) fn filter_elastic_scaling_candidates::config() .node_features - .get(FeatureIndex::ElasticScalingCoreIndex as usize) + .get(FeatureIndex::ElasticScalingMVP as usize) .map(|b| *b) .unwrap_or(false) { From f9e178d2c6950b62d83f967d49fc2828042392d7 Mon Sep 17 00:00:00 2001 From: alindima Date: Tue, 20 Feb 2024 11:41:19 +0200 Subject: [PATCH 27/51] address some comments --- polkadot/node/core/backing/src/lib.rs | 18 +-- polkadot/primitives/src/v6/mod.rs | 44 +++++--- .../runtime/parachains/src/inclusion/mod.rs | 24 +++- .../parachains/src/paras_inherent/mod.rs | 104 ++++++++++++++++-- polkadot/runtime/parachains/src/util.rs | 100 +---------------- 5 files changed, 151 insertions(+), 139 deletions(-) diff --git a/polkadot/node/core/backing/src/lib.rs b/polkadot/node/core/backing/src/lib.rs index 19fcb57661b6..fe38f14ec599 100644 --- a/polkadot/node/core/backing/src/lib.rs +++ b/polkadot/node/core/backing/src/lib.rs @@ -228,7 +228,7 @@ struct PerRelayParentState { /// The minimum backing votes threshold. minimum_backing_votes: u32, /// If true, we're appendindg extra bits in the BackedCandidate validator indices bitfield, - /// which represent the assigned core index. + /// which represent the assigned core index. True when ElasticScalingMVP feature is enabled. inject_core_index: bool, /// The core state for all cores cores: Vec, @@ -488,20 +488,20 @@ fn table_attested_to_backed( } vote_positions.sort_by_key(|(_orig, pos_in_group)| *pos_in_group); - if inject_core_index { - let core_index_to_inject: BitVec = - BitVec::from_vec(vec![core_index.0 as u8]); - validator_indices.extend(core_index_to_inject); - } - - Some(BackedCandidate::new( + let mut backed_candidate = BackedCandidate::new( candidate, vote_positions .into_iter() .map(|(pos_in_votes, _pos_in_group)| validity_votes[pos_in_votes].clone()) .collect(), validator_indices, - )) + ); + + if inject_core_index { + backed_candidate.set_validator_indices_and_core_index(validator_indices, Some(core_index)); + } + + Some(backed_candidate) } async fn store_available_data( diff --git a/polkadot/primitives/src/v6/mod.rs b/polkadot/primitives/src/v6/mod.rs index bd8a0193e4e4..3e26eb03f505 100644 --- a/polkadot/primitives/src/v6/mod.rs +++ b/polkadot/primitives/src/v6/mod.rs @@ -760,36 +760,44 @@ impl BackedCandidate { self.candidate.to_plain() } - /// Get validator indices mutable reference - pub fn validator_indices(&self, core_index_enabled: bool) -> BitVec { + /// Get a copy of the validator indices and the assumed core index, if any. + pub fn validator_indices_and_core_index( + &self, + core_index_enabled: bool, + ) -> (BitVec, Option) { // This flag tells us if the block producers must enable Elastic Scaling MVP hack. // It extends `BackedCandidate::validity_indices` to store a 8 bit core index. if core_index_enabled { let core_idx_offset = self.validator_indices.len().saturating_sub(8); - let (validator_indices_slice, _core_idx_slice) = + let (validator_indices_slice, core_idx_slice) = self.validator_indices.split_at(core_idx_offset); - BitVec::from(validator_indices_slice) + ( + BitVec::from(validator_indices_slice), + Some(CoreIndex(core_idx_slice.load::() as u32)), + ) } else { - self.validator_indices.clone() + (self.validator_indices.clone(), None) } } - /// Update the validator indices in the candidate - pub fn set_validator_indices(&mut self, new_indices: BitVec) { + /// Update the validator indices and core index in the candidate. + pub fn set_validator_indices_and_core_index( + &mut self, + mut new_indices: BitVec, + core_index: Option, + ) { + if let Some(core_index) = core_index { + let core_index_to_inject: BitVec = + BitVec::from_vec(vec![core_index.0 as u8]); + new_indices.extend(core_index_to_inject); + } self.validator_indices = new_indices; } - /// Return the assumed core index of the backed candidate if any. - pub fn assumed_core_index(&self, core_index_enabled: bool) -> Option { - if core_index_enabled { - let core_idx_offset = self.validator_indices.len().saturating_sub(8); - let (_validator_indices_slice, core_idx_slice) = - self.validator_indices.split_at(core_idx_offset); - let core_idx: u8 = core_idx_slice.load(); - Some(CoreIndex(core_idx as u32)) - } else { - None - } + /// Get a copy of the validator indices. Note that it may contain an encoded core index also as + /// the last 8 bits. You must make sure to handle it properly or to have removed it beforehand. + pub fn validator_indices(&self) -> BitVec { + self.validator_indices.clone() } } diff --git a/polkadot/runtime/parachains/src/inclusion/mod.rs b/polkadot/runtime/parachains/src/inclusion/mod.rs index 4ae458ded5cc..5934e58e415f 100644 --- a/polkadot/runtime/parachains/src/inclusion/mod.rs +++ b/polkadot/runtime/parachains/src/inclusion/mod.rs @@ -687,9 +687,21 @@ impl Pallet { }; let para_id = backed_candidate.descriptor().para_id; - let core_idx = if let Some(core_idx) = - backed_candidate.assumed_core_index(core_index_enabled) - { + let (validator_indices, maybe_core_index) = + backed_candidate.validator_indices_and_core_index(core_index_enabled); + let core_idx = if let Some(core_idx) = maybe_core_index { + ensure!( + scheduled_by_core.get(&core_idx) == Some(¶_id), + Error::::UnscheduledCandidate + ); + + // We assume the core index is valid because of the checks done in + // `filter_elastic_scaling_candidates`. + + // Remove the core index from the validator indices if present. We'll no longer + // need it. + backed_candidate.set_validator_indices_and_core_index(validator_indices, None); + core_idx } else { *scheduled.get(¶_id).ok_or(Error::::UnscheduledCandidate)? @@ -753,14 +765,14 @@ impl Pallet { }, } + let validator_indices = backed_candidate.validator_indices(); let mut backer_idx_and_attestation = Vec::<(ValidatorIndex, ValidityAttestation)>::with_capacity( - backed_candidate.validator_indices(core_index_enabled).count_ones(), + validator_indices.count_ones(), ); let candidate_receipt = backed_candidate.receipt(); - for ((bit_idx, _), attestation) in backed_candidate - .validator_indices(core_index_enabled) + for ((bit_idx, _), attestation) in validator_indices .iter() .enumerate() .filter(|(_, signed)| **signed) diff --git a/polkadot/runtime/parachains/src/paras_inherent/mod.rs b/polkadot/runtime/parachains/src/paras_inherent/mod.rs index a9aa9338bedc..88fce191e214 100644 --- a/polkadot/runtime/parachains/src/paras_inherent/mod.rs +++ b/polkadot/runtime/parachains/src/paras_inherent/mod.rs @@ -31,7 +31,6 @@ use crate::{ paras, scheduler::{self, FreedReason}, shared::{self, AllowedRelayParentsTracker}, - util::filter_elastic_scaling_candidates, ParaId, }; use bitvec::prelude::BitVec; @@ -592,7 +591,17 @@ impl Pallet { METRICS.on_candidates_processed_total(backed_candidates.len() as u64); - filter_elastic_scaling_candidates::(&mut backed_candidates); + let core_index_enabled = configuration::Pallet::::config() + .node_features + .get(FeatureIndex::ElasticScalingMVP as usize) + .map(|b| *b) + .unwrap_or(false); + + filter_elastic_scaling_candidates::( + &allowed_relay_parents, + core_index_enabled, + &mut backed_candidates, + ); let SanitizedBackedCandidates { backed_candidates, votes_from_disabled_were_dropped } = sanitize_backed_candidates::( @@ -618,6 +627,7 @@ impl Pallet { .is_err() }, &scheduled, + core_index_enabled, ); METRICS.on_candidates_sanitized(backed_candidates.len() as u64); @@ -950,6 +960,7 @@ fn sanitize_backed_candidates< allowed_relay_parents: &AllowedRelayParentsTracker>, mut candidate_has_concluded_invalid_dispute_or_is_invalid: F, scheduled: &BTreeMap, + core_index_enabled: bool, ) -> SanitizedBackedCandidates { // Remove any candidates that were concluded invalid. // This does not assume sorting. @@ -973,6 +984,7 @@ fn sanitize_backed_candidates< &mut backed_candidates, &allowed_relay_parents, scheduled, + core_index_enabled, ); // Sort the `Vec` last, once there is a guarantee that these @@ -1079,6 +1091,7 @@ fn filter_backed_statements_from_disabled_validators::Hash>>, allowed_relay_parents: &AllowedRelayParentsTracker>, scheduled: &BTreeMap, + core_index_enabled: bool, ) -> bool { let disabled_validators = BTreeSet::<_>::from_iter(shared::Pallet::::disabled_validators().into_iter()); @@ -1094,18 +1107,15 @@ fn filter_backed_statements_from_disabled_validators::config().minimum_backing_votes; - let core_index_enabled = configuration::Pallet::::config() - .node_features - .get(FeatureIndex::ElasticScalingMVP as usize) - .map(|b| *b) - .unwrap_or(false); // Process all backed candidates. `validator_indices` in `BackedCandidates` are indices within // the validator group assigned to the parachain. To obtain this group we need: // 1. Core index assigned to the parachain which has produced the candidate // 2. The relay chain block number of the candidate backed_candidates.retain_mut(|bc| { - let core_idx = if let Some(core_idx) = bc.assumed_core_index(core_index_enabled) { + let (mut validator_indices, maybe_core_index) = bc.validator_indices_and_core_index(core_index_enabled); + + let core_idx = if let Some(core_idx) = maybe_core_index { core_idx } else { // Get `core_idx` assigned to the `para_id` of the candidate @@ -1151,13 +1161,12 @@ fn filter_backed_statements_from_disabled_validators::from_iter(validator_group.iter().map(|idx| disabled_validators.contains(idx))); - let mut validator_indices = bc.validator_indices(core_index_enabled); // The indices of statements from disabled validators in `BackedCandidate`. We have to drop these. let indices_to_drop = disabled_indices.clone() & &validator_indices; // Apply the bitmask to drop the disabled validator from `validator_indices` validator_indices &= !disabled_indices; // Update the backed candidate - bc.set_validator_indices(validator_indices); + bc.set_validator_indices_and_core_index(validator_indices, maybe_core_index); // Remove the corresponding votes from `validity_votes` for idx in indices_to_drop.iter_ones().rev() { @@ -1185,3 +1194,78 @@ fn filter_backed_statements_from_disabled_validators( + allowed_relay_parents: &AllowedRelayParentsTracker>, + core_index_enabled: bool, + candidates: &mut Vec>, +) { + // Count how many scheduled cores each paraid has. + let mut cores_per_parachain: BTreeMap = BTreeMap::new(); + + for (_, para_id) in >::scheduled_paras() { + *cores_per_parachain.entry(para_id).or_default() += 1; + } + + // We keep a candidate if the parachain has only one core assigned or if + // a core index is provided by block author. + candidates.retain(|candidate| { + *cores_per_parachain.get(&candidate.descriptor().para_id).unwrap_or(&0) <= 1 || + has_core_index::(allowed_relay_parents, candidate, core_index_enabled) + }); +} + +// Returns `true` if the candidate contains a valid injected `CoreIndex`. +fn has_core_index( + allowed_relay_parents: &AllowedRelayParentsTracker>, + candidate: &BackedCandidate, + core_index_enabled: bool, +) -> bool { + // After stripping the 8 bit extensions, the `validator_indices` field length is expected + // to be equal to backing group size. If these don't match, the `CoreIndex` is badly encoded, + // or not supported. + let (validator_indices, maybe_core_idx) = + candidate.validator_indices_and_core_index(core_index_enabled); + + let Some(core_idx) = maybe_core_idx else { return false }; + + let relay_parent_block_number = + match allowed_relay_parents.acquire_info(candidate.descriptor().relay_parent, None) { + Some((_, block_num)) => block_num, + None => { + log::debug!( + target: LOG_TARGET, + "Relay parent {:?} for candidate {:?} is not in the allowed relay parents. Dropping the candidate.", + candidate.descriptor().relay_parent, + candidate.candidate().hash(), + ); + return false + }, + }; + + // Get the backing group of the candidate backed at `core_idx`. + let group_idx = + match >::group_assigned_to_core(core_idx, relay_parent_block_number) { + Some(group_idx) => group_idx, + None => { + log::debug!( + target: LOG_TARGET, + "Can't get the group index for core idx {:?}. Dropping the candidate {:?}.", + core_idx, + candidate.candidate().hash(), + ); + return false + }, + }; + + let group_validators = match >::group_validators(group_idx) { + Some(validators) => validators, + None => return false, + }; + + group_validators.len() == validator_indices.len() +} diff --git a/polkadot/runtime/parachains/src/util.rs b/polkadot/runtime/parachains/src/util.rs index 597a8f7d512d..588c224079ec 100644 --- a/polkadot/runtime/parachains/src/util.rs +++ b/polkadot/runtime/parachains/src/util.rs @@ -17,18 +17,11 @@ //! Utilities that don't belong to any particular module but may draw //! on all modules. -use bitvec::field::BitField; use frame_system::pallet_prelude::BlockNumberFor; -use primitives::{ - vstaging::node_features::FeatureIndex, BackedCandidate, CoreIndex, Id as ParaId, - PersistedValidationData, ValidatorIndex, -}; -use sp_std::{ - collections::{btree_map::BTreeMap, btree_set::BTreeSet}, - vec::Vec, -}; - -use crate::{configuration, hrmp, paras, scheduler}; +use primitives::{Id as ParaId, PersistedValidationData, ValidatorIndex}; +use sp_std::{collections::btree_set::BTreeSet, vec::Vec}; + +use crate::{configuration, hrmp, paras}; /// Make the persisted validation data for a particular parachain, a specified relay-parent and it's /// storage root. /// @@ -104,91 +97,6 @@ pub fn take_active_subset(active: &[ValidatorIndex], set: &[T]) -> Vec subset } -/// On first pass it filters out all candidates that have multiple cores assigned and no -/// `CoreIndex` injected. -/// -/// On second pass we filter out candidates with the same core index. This can happen if for example -/// collators distribute collations to multiple backing groups. -pub(crate) fn filter_elastic_scaling_candidates( - candidates: &mut Vec>, -) { - if !configuration::Pallet::::config() - .node_features - .get(FeatureIndex::ElasticScalingMVP as usize) - .map(|b| *b) - .unwrap_or(false) - { - // we don't touch the candidates, since we don't expect block producers - // to inject `CoreIndex`. - return - } - - // A mapping from parachain to all assigned cores. - let mut cores_per_parachain: BTreeMap> = BTreeMap::new(); - - for (core_index, para_id) in >::scheduled_paras() { - cores_per_parachain.entry(para_id).or_default().push(core_index); - } - - // We keep a candidate if the parachain has only one core assigned or if - // a core index is provided by block author. - candidates.retain(|candidate| { - !cores_per_parachain - .get(&candidate.candidate().descriptor.para_id) - .map(|cores| cores.len()) - .unwrap_or(0) > - 1 || has_core_index::(candidate, true) - }); - - let mut used_cores = BTreeSet::new(); - - // We keep one candidate per core in case multiple candidates of same para end up backed on same - // core. This can be further refined to pick the candidate that has the parenthead equal - // to the one in storage. - candidates.retain(|candidate| { - if let Some(core_index) = candidate.assumed_core_index(true) { - // Drop candidate if the core was already used by a previous candidate. - used_cores.insert(core_index) - } else { - // This shouldn't happen, but at this point the candidate without core index is fine - // since we know the para:core mapping is unique. - true - } - }) -} - -// Returns `true` if the candidate contains an injected `CoreIndex`. -fn has_core_index( - candidate: &BackedCandidate, - core_index_enabled: bool, -) -> bool { - // After stripping the 8 bit extensions, the `validator_indices` field length is expected - // to be equal to backing group size. If these don't match, the `CoreIndex` is badly encoded, - // or not supported. - let core_idx_offset = candidate.validator_indices(core_index_enabled).len().saturating_sub(8); - let validator_indices_raw = candidate.validator_indices(core_index_enabled); - let (validator_indices_slice, core_idx_slice) = validator_indices_raw.split_at(core_idx_offset); - let core_idx: u8 = core_idx_slice.load(); - - let current_block = frame_system::Pallet::::block_number(); - - // Get the backing group of the candidate backed at `core_idx`. - let group_idx = match >::group_assigned_to_core( - CoreIndex(core_idx as u32), - current_block, - ) { - Some(group_idx) => group_idx, - None => return false, - }; - - let group_validators = match >::group_validators(group_idx) { - Some(validators) => validators, - None => return false, - }; - - group_validators.len() == validator_indices_slice.len() -} - #[cfg(test)] mod tests { From ec7b6602073936448c9458f2fe2e2e9f606c741e Mon Sep 17 00:00:00 2001 From: alindima Date: Tue, 20 Feb 2024 12:01:29 +0200 Subject: [PATCH 28/51] +1 --- .../parachains/src/paras_inherent/mod.rs | 28 ++++++++++--------- 1 file changed, 15 insertions(+), 13 deletions(-) diff --git a/polkadot/runtime/parachains/src/paras_inherent/mod.rs b/polkadot/runtime/parachains/src/paras_inherent/mod.rs index 88fce191e214..aead47d12206 100644 --- a/polkadot/runtime/parachains/src/paras_inherent/mod.rs +++ b/polkadot/runtime/parachains/src/paras_inherent/mod.rs @@ -1248,19 +1248,21 @@ fn has_core_index>::group_assigned_to_core(core_idx, relay_parent_block_number) { - Some(group_idx) => group_idx, - None => { - log::debug!( - target: LOG_TARGET, - "Can't get the group index for core idx {:?}. Dropping the candidate {:?}.", - core_idx, - candidate.candidate().hash(), - ); - return false - }, - }; + let group_idx = match >::group_assigned_to_core( + core_idx, + relay_parent_block_number + One::one(), + ) { + Some(group_idx) => group_idx, + None => { + log::debug!( + target: LOG_TARGET, + "Can't get the group index for core idx {:?}. Dropping the candidate {:?}.", + core_idx, + candidate.candidate().hash(), + ); + return false + }, + }; let group_validators = match >::group_validators(group_idx) { Some(validators) => validators, From 578850bac9e3ac19ff327dfa9e72756546254f2b Mon Sep 17 00:00:00 2001 From: alindima Date: Tue, 20 Feb 2024 13:21:01 +0200 Subject: [PATCH 29/51] more comments --- polkadot/primitives/src/v6/mod.rs | 51 ++++++++----------- .../runtime/parachains/src/inclusion/mod.rs | 44 ++++++++-------- .../parachains/src/paras_inherent/mod.rs | 20 ++++++-- 3 files changed, 56 insertions(+), 59 deletions(-) diff --git a/polkadot/primitives/src/v6/mod.rs b/polkadot/primitives/src/v6/mod.rs index 3e26eb03f505..5551ba660a22 100644 --- a/polkadot/primitives/src/v6/mod.rs +++ b/polkadot/primitives/src/v6/mod.rs @@ -16,7 +16,7 @@ //! `V6` Primitives. -use bitvec::{field::BitField, vec::BitVec}; +use bitvec::{field::BitField, slice::BitSlice, vec::BitVec}; use parity_scale_codec::{Decode, Encode}; use scale_info::TypeInfo; use sp_std::{ @@ -724,22 +724,22 @@ impl BackedCandidate { Self { candidate, validity_votes, validator_indices } } - /// Get a reference to the descriptor of the para. + /// Get a reference to the descriptor of the candidate. pub fn descriptor(&self) -> &CandidateDescriptor { &self.candidate.descriptor } - /// Get a reference to the descriptor of the para. + /// Get a reference to the committed candidate receipt of the candidate. pub fn candidate(&self) -> &CommittedCandidateReceipt { &self.candidate } - /// Get a reference to the descriptor of the para. + /// Get a reference to the validity votes of the candidate. pub fn validity_votes(&self) -> &[ValidityAttestation] { &self.validity_votes } - /// Get a reference to the descriptor of the para. + /// Get a mutable reference to validity votes of the para. pub fn validity_votes_mut(&mut self) -> &mut Vec { &mut self.validity_votes } @@ -764,19 +764,16 @@ impl BackedCandidate { pub fn validator_indices_and_core_index( &self, core_index_enabled: bool, - ) -> (BitVec, Option) { + ) -> (&BitSlice, Option) { // This flag tells us if the block producers must enable Elastic Scaling MVP hack. // It extends `BackedCandidate::validity_indices` to store a 8 bit core index. if core_index_enabled { let core_idx_offset = self.validator_indices.len().saturating_sub(8); let (validator_indices_slice, core_idx_slice) = self.validator_indices.split_at(core_idx_offset); - ( - BitVec::from(validator_indices_slice), - Some(CoreIndex(core_idx_slice.load::() as u32)), - ) + (validator_indices_slice, Some(CoreIndex(core_idx_slice.load::() as u32))) } else { - (self.validator_indices.clone(), None) + (&self.validator_indices, None) } } @@ -793,12 +790,6 @@ impl BackedCandidate { } self.validator_indices = new_indices; } - - /// Get a copy of the validator indices. Note that it may contain an encoded core index also as - /// the last 8 bits. You must make sure to handle it properly or to have removed it beforehand. - pub fn validator_indices(&self) -> BitVec { - self.validator_indices.clone() - } } /// Verify the backing of the given candidate. @@ -812,44 +803,42 @@ impl BackedCandidate { /// Returns either an error, indicating that one of the signatures was invalid or that the index /// was out-of-bounds, or the number of signatures checked. pub fn check_candidate_backing + Clone + Encode + core::fmt::Debug>( - backed: &BackedCandidate, + candidate_hash: CandidateHash, + validity_votes: &[ValidityAttestation], + validator_indices: &BitSlice, signing_context: &SigningContext, group_len: usize, validator_lookup: impl Fn(usize) -> Option, ) -> Result { - if backed.validator_indices.len() != group_len { + if validator_indices.len() != group_len { log::debug!( target: LOG_TARGET, "indices mismatch: group_len = {} , indices_len = {}", group_len, - backed.validator_indices.len(), + validator_indices.len(), ); return Err(()) } - if backed.validity_votes.len() > group_len { + if validity_votes.len() > group_len { log::debug!( target: LOG_TARGET, "Too many votes, expected: {}, found: {}", group_len, - backed.validity_votes.len(), + validity_votes.len(), ); return Err(()) } - // this is known, even in runtime, to be blake2-256. - let hash = backed.candidate.hash(); - let mut signed = 0; - for ((val_in_group_idx, _), attestation) in backed - .validator_indices + for ((val_in_group_idx, _), attestation) in validator_indices .iter() .enumerate() .filter(|(_, signed)| **signed) - .zip(backed.validity_votes.iter()) + .zip(validity_votes.iter()) { let validator_id = validator_lookup(val_in_group_idx).ok_or(())?; - let payload = attestation.signed_payload(hash, signing_context); + let payload = attestation.signed_payload(candidate_hash, signing_context); let sig = attestation.signature(); if sig.verify(&payload[..], &validator_id) { @@ -865,11 +854,11 @@ pub fn check_candidate_backing + Clone + Encode + core::fmt::Debu } } - if signed != backed.validity_votes.len() { + if signed != validity_votes.len() { log::error!( target: LOG_TARGET, "Too many signatures, expected = {}, found = {}", - backed.validity_votes.len() , + validity_votes.len() , signed, ); return Err(()) diff --git a/polkadot/runtime/parachains/src/inclusion/mod.rs b/polkadot/runtime/parachains/src/inclusion/mod.rs index 5934e58e415f..cda75cf650a9 100644 --- a/polkadot/runtime/parachains/src/inclusion/mod.rs +++ b/polkadot/runtime/parachains/src/inclusion/mod.rs @@ -602,7 +602,7 @@ impl Pallet { /// scheduled cores. If these conditions are not met, the execution of the function fails. pub(crate) fn process_candidates( allowed_relay_parents: &AllowedRelayParentsTracker>, - mut candidates: Vec>, + candidates: Vec>, scheduled: &BTreeMap, scheduled_by_core: &BTreeMap, group_validators: GV, @@ -655,7 +655,7 @@ impl Pallet { // // In the meantime, we do certain sanity checks on the candidates and on the scheduled // list. - for (candidate_idx, backed_candidate) in candidates.iter_mut().enumerate() { + for (candidate_idx, backed_candidate) in candidates.iter().enumerate() { let relay_parent_hash = backed_candidate.descriptor().relay_parent; let para_id = backed_candidate.descriptor().para_id; @@ -670,7 +670,7 @@ impl Pallet { let relay_parent_number = match check_ctx.verify_backed_candidate( &allowed_relay_parents, candidate_idx, - backed_candidate, + backed_candidate.candidate(), )? { Err(FailedToCreatePVD) => { log::debug!( @@ -686,7 +686,6 @@ impl Pallet { Ok(rpn) => rpn, }; - let para_id = backed_candidate.descriptor().para_id; let (validator_indices, maybe_core_index) = backed_candidate.validator_indices_and_core_index(core_index_enabled); let core_idx = if let Some(core_idx) = maybe_core_index { @@ -698,10 +697,6 @@ impl Pallet { // We assume the core index is valid because of the checks done in // `filter_elastic_scaling_candidates`. - // Remove the core index from the validator indices if present. We'll no longer - // need it. - backed_candidate.set_validator_indices_and_core_index(validator_indices, None); - core_idx } else { *scheduled.get(¶_id).ok_or(Error::::UnscheduledCandidate)? @@ -740,7 +735,9 @@ impl Pallet { // check the signatures in the backing and that it is a majority. { let maybe_amount_validated = primitives::check_candidate_backing( - &backed_candidate, + backed_candidate.candidate().hash(), + backed_candidate.validity_votes(), + validator_indices, &signing_context, group_vals.len(), |intra_group_vi| { @@ -765,7 +762,6 @@ impl Pallet { }, } - let validator_indices = backed_candidate.validator_indices(); let mut backer_idx_and_attestation = Vec::<(ValidatorIndex, ValidityAttestation)>::with_capacity( validator_indices.count_ones(), @@ -1226,10 +1222,10 @@ impl CandidateCheckContext { &self, allowed_relay_parents: &AllowedRelayParentsTracker>, candidate_idx: usize, - backed_candidate: &BackedCandidate<::Hash>, + backed_candidate_receipt: &CommittedCandidateReceipt<::Hash>, ) -> Result, FailedToCreatePVD>, Error> { - let para_id = backed_candidate.descriptor().para_id; - let relay_parent = backed_candidate.descriptor().relay_parent; + let para_id = backed_candidate_receipt.descriptor().para_id; + let relay_parent = backed_candidate_receipt.descriptor().relay_parent; // Check that the relay-parent is one of the allowed relay-parents. let (relay_parent_storage_root, relay_parent_number) = { @@ -1254,13 +1250,13 @@ impl CandidateCheckContext { let expected = persisted_validation_data.hash(); ensure!( - expected == backed_candidate.descriptor().persisted_validation_data_hash, + expected == backed_candidate_receipt.descriptor().persisted_validation_data_hash, Error::::ValidationDataHashMismatch, ); } ensure!( - backed_candidate.descriptor().check_collator_signature().is_ok(), + backed_candidate_receipt.descriptor().check_collator_signature().is_ok(), Error::::NotCollatorSigned, ); @@ -1268,25 +1264,25 @@ impl CandidateCheckContext { // A candidate for a parachain without current validation code is not scheduled. .ok_or_else(|| Error::::UnscheduledCandidate)?; ensure!( - backed_candidate.descriptor().validation_code_hash == validation_code_hash, + backed_candidate_receipt.descriptor().validation_code_hash == validation_code_hash, Error::::InvalidValidationCodeHash, ); ensure!( - backed_candidate.descriptor().para_head == - backed_candidate.candidate().commitments.head_data.hash(), + backed_candidate_receipt.descriptor().para_head == + backed_candidate_receipt.commitments.head_data.hash(), Error::::ParaHeadMismatch, ); if let Err(err) = self.check_validation_outputs( para_id, relay_parent_number, - &backed_candidate.candidate().commitments.head_data, - &backed_candidate.candidate().commitments.new_validation_code, - backed_candidate.candidate().commitments.processed_downward_messages, - &backed_candidate.candidate().commitments.upward_messages, - BlockNumberFor::::from(backed_candidate.candidate().commitments.hrmp_watermark), - &backed_candidate.candidate().commitments.horizontal_messages, + &backed_candidate_receipt.commitments.head_data, + &backed_candidate_receipt.commitments.new_validation_code, + backed_candidate_receipt.commitments.processed_downward_messages, + &backed_candidate_receipt.commitments.upward_messages, + BlockNumberFor::::from(backed_candidate_receipt.commitments.hrmp_watermark), + &backed_candidate_receipt.commitments.horizontal_messages, ) { log::debug!( target: LOG_TARGET, diff --git a/polkadot/runtime/parachains/src/paras_inherent/mod.rs b/polkadot/runtime/parachains/src/paras_inherent/mod.rs index aead47d12206..94d3417cc596 100644 --- a/polkadot/runtime/parachains/src/paras_inherent/mod.rs +++ b/polkadot/runtime/parachains/src/paras_inherent/mod.rs @@ -597,11 +597,18 @@ impl Pallet { .map(|b| *b) .unwrap_or(false); - filter_elastic_scaling_candidates::( + let dropped_elastic_scaling_candidates = filter_elastic_scaling_candidates::( &allowed_relay_parents, core_index_enabled, &mut backed_candidates, ); + // In `Enter` context (invoked during execution) we shouldn't have filtered any candidates + // due to a para having multiple cores assigned and no injected core index. They have been + // filtered during inherent data preparation (`ProvideInherent` context). Abort in such + // cases. + if context == ProcessInherentDataContext::Enter { + ensure!(!dropped_elastic_scaling_candidates, Error::::BackedByDisabled); + } let SanitizedBackedCandidates { backed_candidates, votes_from_disabled_were_dropped } = sanitize_backed_candidates::( @@ -623,7 +630,7 @@ impl Pallet { // // NOTE: this is the only place where we check the relay-parent. check_ctx - .verify_backed_candidate(&allowed_relay_parents, candidate_idx, backed_candidate) + .verify_backed_candidate(&allowed_relay_parents, candidate_idx, backed_candidate.candidate()) .is_err() }, &scheduled, @@ -1113,7 +1120,8 @@ fn filter_backed_statements_from_disabled_validators::from(validator_indices); let core_idx = if let Some(core_idx) = maybe_core_index { core_idx @@ -1197,13 +1205,14 @@ fn filter_backed_statements_from_disabled_validators( allowed_relay_parents: &AllowedRelayParentsTracker>, core_index_enabled: bool, candidates: &mut Vec>, -) { +) -> bool { // Count how many scheduled cores each paraid has. let mut cores_per_parachain: BTreeMap = BTreeMap::new(); @@ -1211,12 +1220,15 @@ fn filter_elastic_scaling_candidates< *cores_per_parachain.entry(para_id).or_default() += 1; } + let prev_count = candidates.len(); // We keep a candidate if the parachain has only one core assigned or if // a core index is provided by block author. candidates.retain(|candidate| { *cores_per_parachain.get(&candidate.descriptor().para_id).unwrap_or(&0) <= 1 || has_core_index::(allowed_relay_parents, candidate, core_index_enabled) }); + + prev_count != candidates.len() } // Returns `true` if the candidate contains a valid injected `CoreIndex`. From 362ff1eac2335092899acdf72400fea1cc03f251 Mon Sep 17 00:00:00 2001 From: alindima Date: Tue, 20 Feb 2024 17:35:38 +0200 Subject: [PATCH 30/51] add a backing test --- polkadot/node/core/backing/src/lib.rs | 4 +- polkadot/node/core/backing/src/tests/mod.rs | 60 +++++++++++++++++---- 2 files changed, 53 insertions(+), 11 deletions(-) diff --git a/polkadot/node/core/backing/src/lib.rs b/polkadot/node/core/backing/src/lib.rs index fe38f14ec599..92e2d8c84289 100644 --- a/polkadot/node/core/backing/src/lib.rs +++ b/polkadot/node/core/backing/src/lib.rs @@ -70,7 +70,7 @@ use std::{ sync::Arc, }; -use bitvec::{order::Lsb0 as BitOrderLsb0, vec::BitVec}; +use bitvec::vec::BitVec; use futures::{ channel::{mpsc, oneshot}, future::BoxFuture, @@ -494,7 +494,7 @@ fn table_attested_to_backed( .into_iter() .map(|(pos_in_votes, _pos_in_group)| validity_votes[pos_in_votes].clone()) .collect(), - validator_indices, + validator_indices.clone(), ); if inject_core_index { diff --git a/polkadot/node/core/backing/src/tests/mod.rs b/polkadot/node/core/backing/src/tests/mod.rs index 5aca6ee951f0..34c0ff6f2067 100644 --- a/polkadot/node/core/backing/src/tests/mod.rs +++ b/polkadot/node/core/backing/src/tests/mod.rs @@ -33,9 +33,10 @@ use polkadot_node_subsystem::{ }; use polkadot_node_subsystem_test_helpers as test_helpers; use polkadot_primitives::{ - CandidateDescriptor, GroupRotationInfo, HeadData, PersistedValidationData, PvfExecKind, - ScheduledCore, SessionIndex, LEGACY_MIN_BACKING_VOTES, + vstaging::node_features, CandidateDescriptor, GroupRotationInfo, HeadData, + PersistedValidationData, PvfExecKind, ScheduledCore, SessionIndex, LEGACY_MIN_BACKING_VOTES, }; +use rstest::rstest; use sp_application_crypto::AppCrypto; use sp_keyring::Sr25519Keyring; use sp_keystore::Keystore; @@ -78,6 +79,7 @@ pub(crate) struct TestState { relay_parent: Hash, minimum_backing_votes: u32, disabled_validators: Vec, + node_features: NodeFeatures, } impl TestState { @@ -150,6 +152,7 @@ impl Default for TestState { relay_parent, minimum_backing_votes: LEGACY_MIN_BACKING_VOTES, disabled_validators: Vec::new(), + node_features: Default::default(), } } } @@ -291,7 +294,7 @@ async fn test_startup(virtual_overseer: &mut VirtualOverseer, test_state: &TestS AllMessages::RuntimeApi( RuntimeApiMessage::Request(_parent, RuntimeApiRequest::NodeFeatures(_session_index, tx)) ) => { - tx.send(Ok(Default::default())).unwrap(); + tx.send(Ok(test_state.node_features.clone())).unwrap(); } ); @@ -487,9 +490,20 @@ fn backing_second_works() { } // Test that the candidate reaches quorum successfully. -#[test] -fn backing_works() { - let test_state = TestState::default(); +#[rstest] +#[case(true)] +#[case(false)] +fn backing_works(#[case] elastic_scaling_mvp: bool) { + let mut test_state = TestState::default(); + if elastic_scaling_mvp { + test_state + .node_features + .resize((node_features::FeatureIndex::ElasticScalingMVP as u8 + 1) as usize, false); + test_state + .node_features + .set(node_features::FeatureIndex::ElasticScalingMVP as u8 as usize, true); + } + test_harness(test_state.keystore.clone(), |mut virtual_overseer| async move { test_startup(&mut virtual_overseer, &test_state).await; @@ -640,6 +654,31 @@ fn backing_works() { virtual_overseer.send(FromOrchestra::Communication { msg: statement }).await; + let (tx, rx) = oneshot::channel(); + let msg = CandidateBackingMessage::GetBackedCandidates( + vec![(candidate_a_hash, test_state.relay_parent)], + tx, + ); + + virtual_overseer.send(FromOrchestra::Communication { msg }).await; + + let candidates = rx.await.unwrap(); + assert_eq!(1, candidates.len()); + assert_eq!(candidates[0].validity_votes().len(), 3); + + let (validator_indices, maybe_core_index) = + candidates[0].validator_indices_and_core_index(elastic_scaling_mvp); + if elastic_scaling_mvp { + assert_eq!(maybe_core_index.unwrap(), CoreIndex(0)); + } else { + assert!(maybe_core_index.is_none()); + } + + assert_eq!( + validator_indices, + bitvec::bitvec![u8, bitvec::order::Lsb0; 1, 1, 0, 1].as_bitslice() + ); + virtual_overseer .send(FromOrchestra::Signal(OverseerSignal::ActiveLeaves( ActiveLeavesUpdate::stop_work(test_state.relay_parent), @@ -924,8 +963,8 @@ fn backing_works_while_validation_ongoing() { .validity_votes() .contains(&ValidityAttestation::Explicit(signed_c.signature().clone()))); assert_eq!( - candidates[0].validator_indices(false), - bitvec::bitvec![u8, bitvec::order::Lsb0; 1, 0, 1, 1], + candidates[0].validator_indices_and_core_index(false), + (bitvec::bitvec![u8, bitvec::order::Lsb0; 1, 0, 1, 1].as_bitslice(), None) ); virtual_overseer @@ -1597,7 +1636,10 @@ fn candidate_backing_reorders_votes() { let expected_attestations = vec![fake_attestation(1).into(), fake_attestation(3).into(), fake_attestation(5).into()]; - assert_eq!(backed.validator_indices(false), expected_bitvec); + assert_eq!( + backed.validator_indices_and_core_index(false), + (expected_bitvec.as_bitslice(), None) + ); assert_eq!(backed.validity_votes(), expected_attestations); } From 238536937abe9bacb39c98d0369b2af85c83974e Mon Sep 17 00:00:00 2001 From: alindima Date: Tue, 20 Feb 2024 17:36:28 +0200 Subject: [PATCH 31/51] add rstest --- Cargo.lock | 36 +++++++++++++++++++++++++++ polkadot/node/core/backing/Cargo.toml | 1 + 2 files changed, 37 insertions(+) diff --git a/Cargo.lock b/Cargo.lock index b26a9d5d3351..e417639e8a34 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -12486,6 +12486,7 @@ dependencies = [ "polkadot-primitives", "polkadot-primitives-test-helpers", "polkadot-statement-table", + "rstest", "sc-keystore", "sp-application-crypto", "sp-core", @@ -14763,6 +14764,12 @@ version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c08c74e62047bb2de4ff487b251e4a92e24f48745648451635cec7d591162d9f" +[[package]] +name = "relative-path" +version = "1.9.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e898588f33fdd5b9420719948f9f2a32c922a246964576f71ba7f24f80610fbc" + [[package]] name = "remote-ext-tests-bags-list" version = "1.0.0" @@ -15145,6 +15152,35 @@ dependencies = [ "winapi", ] +[[package]] +name = "rstest" +version = "0.18.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97eeab2f3c0a199bc4be135c36c924b6590b88c377d416494288c14f2db30199" +dependencies = [ + "futures", + "futures-timer", + "rstest_macros", + "rustc_version 0.4.0", +] + +[[package]] +name = "rstest_macros" +version = "0.18.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d428f8247852f894ee1be110b375111b586d4fa431f6c46e64ba5a0dcccbe605" +dependencies = [ + "cfg-if", + "glob", + "proc-macro2", + "quote", + "regex", + "relative-path", + "rustc_version 0.4.0", + "syn 2.0.49", + "unicode-ident", +] + [[package]] name = "rtnetlink" version = "0.10.1" diff --git a/polkadot/node/core/backing/Cargo.toml b/polkadot/node/core/backing/Cargo.toml index 1c69fc441b32..c1c6443ce05d 100644 --- a/polkadot/node/core/backing/Cargo.toml +++ b/polkadot/node/core/backing/Cargo.toml @@ -31,5 +31,6 @@ sc-keystore = { path = "../../../../substrate/client/keystore" } sp-tracing = { path = "../../../../substrate/primitives/tracing" } futures = { version = "0.3.21", features = ["thread-pool"] } assert_matches = "1.4.0" +rstest = "0.18.2" polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" } test-helpers = { package = "polkadot-primitives-test-helpers", path = "../../../primitives/test-helpers" } From c793b89835a01136e0cfdc1c987b6026858864e8 Mon Sep 17 00:00:00 2001 From: alindima Date: Tue, 20 Feb 2024 17:47:29 +0200 Subject: [PATCH 32/51] small nits and typos Signed-off-by: alindima --- polkadot/node/core/backing/src/lib.rs | 6 +++--- polkadot/node/core/provisioner/src/lib.rs | 6 ++++-- polkadot/primitives/src/v6/mod.rs | 8 ++++---- 3 files changed, 11 insertions(+), 9 deletions(-) diff --git a/polkadot/node/core/backing/src/lib.rs b/polkadot/node/core/backing/src/lib.rs index 50084e1658b4..a6fe0ddbfb91 100644 --- a/polkadot/node/core/backing/src/lib.rs +++ b/polkadot/node/core/backing/src/lib.rs @@ -227,10 +227,10 @@ struct PerRelayParentState { fallbacks: HashMap, /// The minimum backing votes threshold. minimum_backing_votes: u32, - /// If true, we're appendindg extra bits in the BackedCandidate validator indices bitfield, - /// which represent the assigned core index. + /// If true, we're appending extra bits in the BackedCandidate validator indices bitfield, + /// which represent the assigned core index. True if ElasticScalingMVP is enabled. inject_core_index: bool, - /// The core state for all cores + /// The core states for all cores. cores: Vec, /// The validator groups at this relay parent. validator_groups: Vec>, diff --git a/polkadot/node/core/provisioner/src/lib.rs b/polkadot/node/core/provisioner/src/lib.rs index e130780c2f3d..d98f6ebfe428 100644 --- a/polkadot/node/core/provisioner/src/lib.rs +++ b/polkadot/node/core/provisioner/src/lib.rs @@ -682,7 +682,9 @@ async fn request_backable_candidates( }; // We should be calling this once per para rather than per core. - // TODO: Will be fixed in https://github.com/paritytech/polkadot-sdk/pull/3233 + // TODO: Will be fixed in https://github.com/paritytech/polkadot-sdk/pull/3233. + // For now, at least make sure we don't supply the same candidate multiple times in case a + // para has multiple cores scheduled. let response = get_backable_candidate(relay_parent, para_id, required_path, sender).await?; match response { Some((hash, relay_parent)) => { @@ -731,7 +733,7 @@ async fn select_candidates( ) .await?, }; - gum::debug!(target: LOG_TARGET, ?selected_candidates, "Got backeable candidates"); + gum::debug!(target: LOG_TARGET, ?selected_candidates, "Got backable candidates"); // now get the backed candidates corresponding to these candidate receipts let (tx, rx) = oneshot::channel(); diff --git a/polkadot/primitives/src/v6/mod.rs b/polkadot/primitives/src/v6/mod.rs index ffb287e581d6..538eb3855848 100644 --- a/polkadot/primitives/src/v6/mod.rs +++ b/polkadot/primitives/src/v6/mod.rs @@ -756,7 +756,7 @@ pub fn check_candidate_backing + Clone + Encode + core::fmt::Debu if backed.validator_indices.len() != group_len { log::debug!( target: LOG_TARGET, - "indices mismatch: group_len = {} , indices_len = {}", + "Check candidate backing: indices mismatch: group_len = {} , indices_len = {}", group_len, backed.validator_indices.len(), ); @@ -766,7 +766,7 @@ pub fn check_candidate_backing + Clone + Encode + core::fmt::Debu if backed.validity_votes.len() > group_len { log::debug!( target: LOG_TARGET, - "Too many votes, expected: {}, found: {}", + "Check candidate backing: Too many votes, expected: {}, found: {}", group_len, backed.validity_votes.len(), ); @@ -793,7 +793,7 @@ pub fn check_candidate_backing + Clone + Encode + core::fmt::Debu } else { log::debug!( target: LOG_TARGET, - "Invalid signature. validator_id = {:?}, validator_index = {} ", + "Check candidate backing: Invalid signature. validator_id = {:?}, validator_index = {} ", validator_id, val_in_group_idx, ); @@ -804,7 +804,7 @@ pub fn check_candidate_backing + Clone + Encode + core::fmt::Debu if signed != backed.validity_votes.len() { log::error!( target: LOG_TARGET, - "Too many signatures, expected = {}, found = {}", + "Check candidate backing: Too many signatures, expected = {}, found = {}", backed.validity_votes.len() , signed, ); From 19c9a67e264520f4bc813abec7a2901bea837c52 Mon Sep 17 00:00:00 2001 From: alindima Date: Wed, 21 Feb 2024 10:01:21 +0200 Subject: [PATCH 33/51] review comments --- polkadot/node/core/backing/src/lib.rs | 13 +++----- polkadot/node/core/provisioner/src/tests.rs | 4 +++ polkadot/primitives/src/v6/mod.rs | 33 ++++++++++++++----- .../runtime/parachains/src/inclusion/tests.rs | 2 +- .../parachains/src/paras_inherent/mod.rs | 8 ++++- polkadot/runtime/parachains/src/util.rs | 33 ++----------------- 6 files changed, 43 insertions(+), 50 deletions(-) diff --git a/polkadot/node/core/backing/src/lib.rs b/polkadot/node/core/backing/src/lib.rs index 2a73a6e0c37b..734582485ea8 100644 --- a/polkadot/node/core/backing/src/lib.rs +++ b/polkadot/node/core/backing/src/lib.rs @@ -488,20 +488,15 @@ fn table_attested_to_backed( } vote_positions.sort_by_key(|(_orig, pos_in_group)| *pos_in_group); - let mut backed_candidate = BackedCandidate::new( + Some(BackedCandidate::new( candidate, vote_positions .into_iter() .map(|(pos_in_votes, _pos_in_group)| validity_votes[pos_in_votes].clone()) .collect(), - validator_indices.clone(), - ); - - if inject_core_index { - backed_candidate.set_validator_indices_and_core_index(validator_indices, Some(core_index)); - } - - Some(backed_candidate) + validator_indices, + inject_core_index.then_some(core_index), + )) } async fn store_available_data( diff --git a/polkadot/node/core/provisioner/src/tests.rs b/polkadot/node/core/provisioner/src/tests.rs index 5d9425196844..87c0e7a65d35 100644 --- a/polkadot/node/core/provisioner/src/tests.rs +++ b/polkadot/node/core/provisioner/src/tests.rs @@ -468,6 +468,7 @@ mod select_candidates { }, Vec::new(), default_bitvec(MOCK_GROUP_SIZE), + None, ) }) .collect(); @@ -539,6 +540,7 @@ mod select_candidates { committed_receipt.clone(), Vec::new(), default_bitvec(MOCK_GROUP_SIZE), + None, ) }) .collect(); @@ -617,6 +619,7 @@ mod select_candidates { }, Vec::new(), default_bitvec(MOCK_GROUP_SIZE), + None, ) }) .collect(); @@ -685,6 +688,7 @@ mod select_candidates { }, Vec::new(), default_bitvec(MOCK_GROUP_SIZE), + None, ) }) .collect(); diff --git a/polkadot/primitives/src/v6/mod.rs b/polkadot/primitives/src/v6/mod.rs index e192ccf6a558..32f16b0b3c0e 100644 --- a/polkadot/primitives/src/v6/mod.rs +++ b/polkadot/primitives/src/v6/mod.rs @@ -710,7 +710,9 @@ pub struct BackedCandidate { candidate: CommittedCandidateReceipt, /// The validity votes themselves, expressed as signatures. validity_votes: Vec, - /// The indices of the validators within the group, expressed as a bitfield. + /// The indices of the validators within the group, expressed as a bitfield. May be extended + /// beyond the backing group size to contain the assigned core index, if ElasticScalingMVP is + /// enabled. validator_indices: BitVec, } @@ -720,8 +722,13 @@ impl BackedCandidate { candidate: CommittedCandidateReceipt, validity_votes: Vec, validator_indices: BitVec, + core_index: Option, ) -> Self { - Self { candidate, validity_votes, validator_indices } + let mut instance = Self { candidate, validity_votes, validator_indices }; + if let Some(core_index) = core_index { + instance.inject_core_index(core_index); + } + instance } /// Get a reference to the descriptor of the candidate. @@ -777,18 +784,24 @@ impl BackedCandidate { } } + /// Inject a core index in the validator_indices bitvec. + fn inject_core_index(&mut self, core_index: CoreIndex) { + let core_index_to_inject: BitVec = + BitVec::from_vec(vec![core_index.0 as u8]); + self.validator_indices.extend(core_index_to_inject); + } + /// Update the validator indices and core index in the candidate. pub fn set_validator_indices_and_core_index( &mut self, - mut new_indices: BitVec, - core_index: Option, + new_indices: BitVec, + maybe_core_index: Option, ) { - if let Some(core_index) = core_index { - let core_index_to_inject: BitVec = - BitVec::from_vec(vec![core_index.0 as u8]); - new_indices.extend(core_index_to_inject); - } self.validator_indices = new_indices; + + if let Some(core_index) = maybe_core_index { + self.inject_core_index(core_index); + } } } @@ -2011,4 +2024,6 @@ mod tests { assert!(zero_b.leading_zeros() >= zero_u.leading_zeros()); } + + // TODO: test validator_indices_and_core_index and set_validator_indices_and_core_index } diff --git a/polkadot/runtime/parachains/src/inclusion/tests.rs b/polkadot/runtime/parachains/src/inclusion/tests.rs index 65cbf54e0af2..c9b8692f166a 100644 --- a/polkadot/runtime/parachains/src/inclusion/tests.rs +++ b/polkadot/runtime/parachains/src/inclusion/tests.rs @@ -155,7 +155,7 @@ pub(crate) fn back_candidate( validity_votes.push(ValidityAttestation::Explicit(signature).into()); } - let backed = BackedCandidate::new(candidate, validity_votes, validator_indices); + let backed = BackedCandidate::new(candidate, validity_votes, validator_indices, None); let successfully_backed = primitives::check_candidate_backing(&backed, signing_context, group.len(), |i| { diff --git a/polkadot/runtime/parachains/src/paras_inherent/mod.rs b/polkadot/runtime/parachains/src/paras_inherent/mod.rs index 94d3417cc596..900e1bbff503 100644 --- a/polkadot/runtime/parachains/src/paras_inherent/mod.rs +++ b/polkadot/runtime/parachains/src/paras_inherent/mod.rs @@ -145,6 +145,9 @@ pub mod pallet { DisputeInvalid, /// A candidate was backed by a disabled validator BackedByDisabled, + /// A candidate was backed even though the paraid had multiple cores assigned and no + /// injected core index. + BackedByElasticScalingWithNoCoreIndex, } /// Whether the paras inherent was included within this block. @@ -607,7 +610,10 @@ impl Pallet { // filtered during inherent data preparation (`ProvideInherent` context). Abort in such // cases. if context == ProcessInherentDataContext::Enter { - ensure!(!dropped_elastic_scaling_candidates, Error::::BackedByDisabled); + ensure!( + !dropped_elastic_scaling_candidates, + Error::::BackedByElasticScalingWithNoCoreIndex + ); } let SanitizedBackedCandidates { backed_candidates, votes_from_disabled_were_dropped } = diff --git a/polkadot/runtime/parachains/src/util.rs b/polkadot/runtime/parachains/src/util.rs index 588c224079ec..238e24dfc078 100644 --- a/polkadot/runtime/parachains/src/util.rs +++ b/polkadot/runtime/parachains/src/util.rs @@ -100,16 +100,10 @@ pub fn take_active_subset(active: &[ValidatorIndex], set: &[T]) -> Vec #[cfg(test)] mod tests { - use bitvec::vec::BitVec; + use crate::util::{split_active_subset, take_active_subset}; + use bitvec::{bitvec, vec::BitVec}; + use primitives::ValidatorIndex; use sp_std::vec::Vec; - use test_helpers::{dummy_candidate_descriptor, dummy_hash}; - - use crate::util::{has_core_index, split_active_subset, take_active_subset}; - use bitvec::bitvec; - use primitives::{ - BackedCandidate, CandidateCommitments, CommittedCandidateReceipt, PersistedValidationData, - ValidatorIndex, - }; #[test] fn take_active_subset_is_compatible_with_split_active_subset() { @@ -125,25 +119,4 @@ mod tests { pub fn dummy_bitvec(size: usize) -> BitVec { bitvec![u8, bitvec::order::Lsb0; 0; size] } - - // #[test] - // fn has_core_index_works() { - // let mut descriptor = dummy_candidate_descriptor(dummy_hash()); - // let empty_hash = sp_core::H256::zero(); - - // descriptor.para_id = 1000.into(); - // descriptor.persisted_validation_data_hash = empty_hash; - // let committed_receipt = CommittedCandidateReceipt { - // descriptor, - // commitments: CandidateCommitments::default(), - // }; - - // let candidate = BackedCandidate::new( - // committed_receipt.clone(), - // Vec::new(), - // dummy_bitvec(5), - // ); - - // assert_eq!(has_core_index::(&candidate, false), - // false); } } From d7b6ce8ce539d6e8f490ab0ff4a0d9a84e334f7a Mon Sep 17 00:00:00 2001 From: alindima Date: Wed, 21 Feb 2024 12:17:56 +0200 Subject: [PATCH 34/51] add zombienet test --- .gitlab/pipeline/zombienet/polkadot.yml | 8 ++++ .../functional/0012-elastic-scaling-mvp.toml | 38 +++++++++++++++++++ .../functional/0012-elastic-scaling-mvp.zndsl | 27 +++++++++++++ .../functional/0012-enable-node-feature.js | 37 ++++++++++++++++++ .../functional/0012-register-para.js | 37 ++++++++++++++++++ 5 files changed, 147 insertions(+) create mode 100644 polkadot/zombienet_tests/functional/0012-elastic-scaling-mvp.toml create mode 100644 polkadot/zombienet_tests/functional/0012-elastic-scaling-mvp.zndsl create mode 100644 polkadot/zombienet_tests/functional/0012-enable-node-feature.js create mode 100644 polkadot/zombienet_tests/functional/0012-register-para.js diff --git a/.gitlab/pipeline/zombienet/polkadot.yml b/.gitlab/pipeline/zombienet/polkadot.yml index 54eb6db48cae..97572f029d00 100644 --- a/.gitlab/pipeline/zombienet/polkadot.yml +++ b/.gitlab/pipeline/zombienet/polkadot.yml @@ -158,6 +158,14 @@ zombienet-polkadot-functional-0011-async-backing-6-seconds-rate: --local-dir="${LOCAL_DIR}/functional" --test="0011-async-backing-6-seconds-rate.zndsl" +zombienet-polkadot-functional-0012-elastic-scaling-mvp: + extends: + - .zombienet-polkadot-common + script: + - /home/nonroot/zombie-net/scripts/ci/run-test-local-env-manager.sh + --local-dir="${LOCAL_DIR}/functional" + --test="0012-elastic-scaling-mvp.zndsl" + zombienet-polkadot-smoke-0001-parachains-smoke-test: extends: - .zombienet-polkadot-common diff --git a/polkadot/zombienet_tests/functional/0012-elastic-scaling-mvp.toml b/polkadot/zombienet_tests/functional/0012-elastic-scaling-mvp.toml new file mode 100644 index 000000000000..0dfd814e10a5 --- /dev/null +++ b/polkadot/zombienet_tests/functional/0012-elastic-scaling-mvp.toml @@ -0,0 +1,38 @@ +[settings] +timeout = 1000 +bootnode = true + +[relaychain.genesis.runtimeGenesis.patch.configuration.config] + max_validators_per_core = 2 + needed_approvals = 4 + coretime_cores = 2 + +[relaychain] +default_image = "{{ZOMBIENET_INTEGRATION_TEST_IMAGE}}" +chain = "rococo-local" +default_command = "polkadot" + +[relaychain.default_resources] +limits = { memory = "4G", cpu = "2" } +requests = { memory = "2G", cpu = "1" } + + [[relaychain.nodes]] + name = "alice" + validator = "true" + + [[relaychain.node_groups]] + name = "validator" + count = 3 + args = [ "-lparachain=debug,runtime=debug"] + +[[parachains]] +id = 2000 +default_command = "polkadot-parachain" +add_to_genesis = false +register_para = true +onboard_as_parachain = false + + [parachains.collator] + name = "collator2000" + command = "polkadot-parachain" + args = [ "-lparachain=debug" ] diff --git a/polkadot/zombienet_tests/functional/0012-elastic-scaling-mvp.zndsl b/polkadot/zombienet_tests/functional/0012-elastic-scaling-mvp.zndsl new file mode 100644 index 000000000000..f0dfa343e724 --- /dev/null +++ b/polkadot/zombienet_tests/functional/0012-elastic-scaling-mvp.zndsl @@ -0,0 +1,27 @@ +Description: Test that a paraid acquiring multiple cores does not brick itself if ElasticScalingMVP feature is enabled +Network: ./0012-elastic-scaling-mvp.toml +Creds: config + +# Check authority status. +validator: reports node_roles is 4 + +validator: reports substrate_block_height{status="finalized"} is at least 10 within 100 seconds + +# Ensure parachain was able to make progress. +validator: parachain 2000 block height is at least 10 within 200 seconds + +# Register the second core assigned to this parachain. +alice: js-script ./0012-register-para.js return is 0 within 600 seconds + +validator: reports substrate_block_height{status="finalized"} is at least 35 within 100 seconds + +# Parachain will now be stalled + +# Enable the ElasticScalingMVP node feature. +alice: js-script ./0012-enable-node-feature.js with "1" return is 0 within 600 seconds + +# Wait two sessions for the config to be updated. +sleep 120 seconds + +# Ensure parachain is now making progress. +validator: parachain 2000 block height is at least 30 within 200 seconds diff --git a/polkadot/zombienet_tests/functional/0012-enable-node-feature.js b/polkadot/zombienet_tests/functional/0012-enable-node-feature.js new file mode 100644 index 000000000000..ae3406fca686 --- /dev/null +++ b/polkadot/zombienet_tests/functional/0012-enable-node-feature.js @@ -0,0 +1,37 @@ +async function run(nodeName, networkInfo, index) { + const { wsUri, userDefinedTypes } = networkInfo.nodesByName[nodeName]; + const api = await zombie.connect(wsUri, userDefinedTypes); + + await zombie.util.cryptoWaitReady(); + + // account to submit tx + const keyring = new zombie.Keyring({ type: "sr25519" }); + const alice = keyring.addFromUri("//Alice"); + + await new Promise(async (resolve, reject) => { + const unsub = await api.tx.sudo + .sudo(api.tx.configuration.setNodeFeature(Number(index), true)) + .signAndSend(alice, ({ status, isError }) => { + if (status.isInBlock) { + console.log( + `Transaction included at blockhash ${status.asInBlock}`, + ); + } else if (status.isFinalized) { + console.log( + `Transaction finalized at blockHash ${status.asFinalized}`, + ); + unsub(); + return resolve(); + } else if (isError) { + console.log(`Transaction error`); + reject(`Transaction error`); + } + }); + }); + + + + return 0; +} + +module.exports = { run }; \ No newline at end of file diff --git a/polkadot/zombienet_tests/functional/0012-register-para.js b/polkadot/zombienet_tests/functional/0012-register-para.js new file mode 100644 index 000000000000..e7619474fae2 --- /dev/null +++ b/polkadot/zombienet_tests/functional/0012-register-para.js @@ -0,0 +1,37 @@ +async function run(nodeName, networkInfo, _jsArgs) { + const { wsUri, userDefinedTypes } = networkInfo.nodesByName[nodeName]; + const api = await zombie.connect(wsUri, userDefinedTypes); + + await zombie.util.cryptoWaitReady(); + + // account to submit tx + const keyring = new zombie.Keyring({ type: "sr25519" }); + const alice = keyring.addFromUri("//Alice"); + + await new Promise(async (resolve, reject) => { + const unsub = await api.tx.sudo + .sudo(api.tx.coretime.assignCore(0, 35, [[{ task: 2000 }, 57600]], null)) + .signAndSend(alice, ({ status, isError }) => { + if (status.isInBlock) { + console.log( + `Transaction included at blockhash ${status.asInBlock}`, + ); + } else if (status.isFinalized) { + console.log( + `Transaction finalized at blockHash ${status.asFinalized}`, + ); + unsub(); + return resolve(); + } else if (isError) { + console.log(`Transaction error`); + reject(`Transaction error`); + } + }); + }); + + + + return 0; +} + +module.exports = { run }; \ No newline at end of file From afed2a8512ce52b2ee336db624c1beed1f848d79 Mon Sep 17 00:00:00 2001 From: alindima Date: Wed, 21 Feb 2024 12:37:41 +0200 Subject: [PATCH 35/51] fix existing unit tests --- polkadot/runtime/parachains/src/builder.rs | 1 + .../runtime/parachains/src/inclusion/tests.rs | 34 +++++++------- .../parachains/src/paras_inherent/tests.rs | 44 ++++++++++--------- polkadot/runtime/parachains/src/util.rs | 5 --- 4 files changed, 44 insertions(+), 40 deletions(-) diff --git a/polkadot/runtime/parachains/src/builder.rs b/polkadot/runtime/parachains/src/builder.rs index 97e26371a206..500bc70cfa75 100644 --- a/polkadot/runtime/parachains/src/builder.rs +++ b/polkadot/runtime/parachains/src/builder.rs @@ -591,6 +591,7 @@ impl BenchBuilder { candidate, validity_votes, bitvec::bitvec![u8, bitvec::order::Lsb0; 1; group_validators.len()], + None, ) }) .collect() diff --git a/polkadot/runtime/parachains/src/inclusion/tests.rs b/polkadot/runtime/parachains/src/inclusion/tests.rs index c9b8692f166a..340e0ab1dd93 100644 --- a/polkadot/runtime/parachains/src/inclusion/tests.rs +++ b/polkadot/runtime/parachains/src/inclusion/tests.rs @@ -155,15 +155,19 @@ pub(crate) fn back_candidate( validity_votes.push(ValidityAttestation::Explicit(signature).into()); } - let backed = BackedCandidate::new(candidate, validity_votes, validator_indices, None); + let backed = BackedCandidate::new(candidate, validity_votes, validator_indices.clone(), None); - let successfully_backed = - primitives::check_candidate_backing(&backed, signing_context, group.len(), |i| { - Some(validators[group[i].0 as usize].public().into()) - }) - .ok() - .unwrap_or(0) >= - threshold; + let successfully_backed = primitives::check_candidate_backing( + backed.candidate().hash(), + backed.validity_votes(), + validator_indices.as_bitslice(), + signing_context, + group.len(), + |i| Some(validators[group[i].0 as usize].public().into()), + ) + .ok() + .unwrap_or(0) >= + threshold; match kind { BackingKind::Unanimous | BackingKind::Threshold => assert!(successfully_backed), @@ -1586,14 +1590,14 @@ fn backing_works() { let candidate_receipt_with_backers = intermediate .entry(backed_candidate.hash()) .or_insert_with(|| (backed_candidate.receipt(), Vec::new())); - - assert_eq!( - backed_candidate.validity_votes().len(), - backed_candidate.validator_indices(false).count_ones() - ); + let (validator_indices, None) = + backed_candidate.validator_indices_and_core_index(false) + else { + panic!("Expected no injected core index") + }; + assert_eq!(backed_candidate.validity_votes().len(), validator_indices.count_ones()); candidate_receipt_with_backers.1.extend( - backed_candidate - .validator_indices(false) + validator_indices .iter() .enumerate() .filter(|(_, signed)| **signed) diff --git a/polkadot/runtime/parachains/src/paras_inherent/tests.rs b/polkadot/runtime/parachains/src/paras_inherent/tests.rs index c96cc3902fe3..ecee338e7a15 100644 --- a/polkadot/runtime/parachains/src/paras_inherent/tests.rs +++ b/polkadot/runtime/parachains/src/paras_inherent/tests.rs @@ -1385,7 +1385,8 @@ mod sanitizers { backed_candidates.clone(), &>::allowed_relay_parents(), has_concluded_invalid, - &scheduled + &scheduled, + false ), SanitizedBackedCandidates { backed_candidates, @@ -1414,6 +1415,7 @@ mod sanitizers { &>::allowed_relay_parents(), has_concluded_invalid, &scheduled, + false, ); assert!(sanitized_backed_candidates.is_empty()); @@ -1447,6 +1449,7 @@ mod sanitizers { &>::allowed_relay_parents(), has_concluded_invalid, &scheduled, + false, ); assert_eq!(sanitized_backed_candidates.len(), backed_candidates.len() / 2); @@ -1469,7 +1472,8 @@ mod sanitizers { assert!(!filter_backed_statements_from_disabled_validators::( &mut backed_candidates, &>::allowed_relay_parents(), - &scheduled_paras + &scheduled_paras, + false )); assert_eq!(backed_candidates, before); }); @@ -1492,35 +1496,34 @@ mod sanitizers { // Verify the initial state is as expected assert_eq!(backed_candidates.get(0).unwrap().validity_votes().len(), 2); - assert_eq!( - backed_candidates.get(0).unwrap().validator_indices(false).get(0).unwrap(), - true - ); - assert_eq!( - backed_candidates.get(0).unwrap().validator_indices(false).get(1).unwrap(), - true - ); + let (validator_indices, None) = + backed_candidates.get(0).unwrap().validator_indices_and_core_index(false) + else { + panic!("Expected no injected core index") + }; + assert_eq!(validator_indices.get(0).unwrap(), true); + assert_eq!(validator_indices.get(1).unwrap(), true); let untouched = backed_candidates.get(1).unwrap().clone(); assert!(filter_backed_statements_from_disabled_validators::( &mut backed_candidates, &>::allowed_relay_parents(), - &scheduled_paras + &scheduled_paras, + false )); + let (validator_indices, None) = + backed_candidates.get(0).unwrap().validator_indices_and_core_index(false) + else { + panic!("Expected no injected core index") + }; // there should still be two backed candidates assert_eq!(backed_candidates.len(), 2); // but the first one should have only one validity vote assert_eq!(backed_candidates.get(0).unwrap().validity_votes().len(), 1); // Validator 0 vote should be dropped, validator 1 - retained - assert_eq!( - backed_candidates.get(0).unwrap().validator_indices(false).get(0).unwrap(), - false - ); - assert_eq!( - backed_candidates.get(0).unwrap().validator_indices(false).get(1).unwrap(), - true - ); + assert_eq!(validator_indices.get(0).unwrap(), false); + assert_eq!(validator_indices.get(1).unwrap(), true); // the second candidate shouldn't be modified assert_eq!(*backed_candidates.get(1).unwrap(), untouched); }); @@ -1541,7 +1544,8 @@ mod sanitizers { assert!(filter_backed_statements_from_disabled_validators::( &mut backed_candidates, &>::allowed_relay_parents(), - &scheduled_paras + &scheduled_paras, + false )); assert_eq!(backed_candidates.len(), 1); diff --git a/polkadot/runtime/parachains/src/util.rs b/polkadot/runtime/parachains/src/util.rs index 238e24dfc078..08bb009ad9a8 100644 --- a/polkadot/runtime/parachains/src/util.rs +++ b/polkadot/runtime/parachains/src/util.rs @@ -101,7 +101,6 @@ pub fn take_active_subset(active: &[ValidatorIndex], set: &[T]) -> Vec mod tests { use crate::util::{split_active_subset, take_active_subset}; - use bitvec::{bitvec, vec::BitVec}; use primitives::ValidatorIndex; use sp_std::vec::Vec; @@ -115,8 +114,4 @@ mod tests { assert_eq!(unselected, vec![9, 6, 4, 5, 2, 0, 8]); assert_eq!(selected, vec![1, 3, 7]); } - - pub fn dummy_bitvec(size: usize) -> BitVec { - bitvec![u8, bitvec::order::Lsb0; 0; size] - } } From 7ea040d4d5ffb93a77d8df73d90fc6ce9f021ca5 Mon Sep 17 00:00:00 2001 From: alindima Date: Wed, 21 Feb 2024 12:50:34 +0200 Subject: [PATCH 36/51] add prdoc --- prdoc/pr_3231.prdoc | 9 +++++++++ 1 file changed, 9 insertions(+) create mode 100644 prdoc/pr_3231.prdoc diff --git a/prdoc/pr_3231.prdoc b/prdoc/pr_3231.prdoc new file mode 100644 index 000000000000..9efdb2092ca5 --- /dev/null +++ b/prdoc/pr_3231.prdoc @@ -0,0 +1,9 @@ +title: Allow parachain which acquires multiple coretime cores to make progress + +doc: + - audience: Node Operator + description: | + Adds the needed changes so that parachains which acquire multiple coretime cores can still make progress. + Only one of the cores will be able to be occupied at a time. + Only works if the ElasticScalingMVP node feature is enabled in the runtime and the block author validator is + updated to include this change. From 79f281b835d2ae079ab89a412ccc059207dbecbe Mon Sep 17 00:00:00 2001 From: alindima Date: Wed, 21 Feb 2024 12:52:00 +0200 Subject: [PATCH 37/51] fix clippy --- .../runtime/parachains/src/paras_inherent/benchmarking.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/polkadot/runtime/parachains/src/paras_inherent/benchmarking.rs b/polkadot/runtime/parachains/src/paras_inherent/benchmarking.rs index 0f6b23ae1b39..ad3fa8e0dc71 100644 --- a/polkadot/runtime/parachains/src/paras_inherent/benchmarking.rs +++ b/polkadot/runtime/parachains/src/paras_inherent/benchmarking.rs @@ -120,7 +120,7 @@ benchmarks! { // with `v` validity votes. // let votes = v as usize; let votes = min(scheduler::Pallet::::group_validators(GroupIndex::from(0)).unwrap().len(), v as usize); - assert_eq!(benchmark.backed_candidates.get(0).unwrap().validity_votes.len(), votes); + assert_eq!(benchmark.backed_candidates.get(0).unwrap().validity_votes().len(), votes); benchmark.bitfields.clear(); benchmark.disputes.clear(); @@ -177,7 +177,7 @@ benchmarks! { // There is 1 backed assert_eq!(benchmark.backed_candidates.len(), 1); assert_eq!( - benchmark.backed_candidates.get(0).unwrap().validity_votes.len(), + benchmark.backed_candidates.get(0).unwrap().validity_votes().len(), votes, ); From 7521ed98e71d2991030b2b6d03923ccf0bf82b58 Mon Sep 17 00:00:00 2001 From: alindima Date: Wed, 21 Feb 2024 13:21:20 +0200 Subject: [PATCH 38/51] try fixing prdoc --- prdoc/pr_3231.prdoc | 2 ++ 1 file changed, 2 insertions(+) diff --git a/prdoc/pr_3231.prdoc b/prdoc/pr_3231.prdoc index 9efdb2092ca5..26e96d3635b1 100644 --- a/prdoc/pr_3231.prdoc +++ b/prdoc/pr_3231.prdoc @@ -7,3 +7,5 @@ doc: Only one of the cores will be able to be occupied at a time. Only works if the ElasticScalingMVP node feature is enabled in the runtime and the block author validator is updated to include this change. + +crates: [ ] From 9c3dd5cb54b53abdfde8ccaaef94b4d238303f1c Mon Sep 17 00:00:00 2001 From: alindima Date: Wed, 21 Feb 2024 15:25:12 +0200 Subject: [PATCH 39/51] cache Validator->Group mapping --- Cargo.lock | 1 + polkadot/node/core/backing/Cargo.toml | 1 + polkadot/node/core/backing/src/lib.rs | 143 +++++++++++++------- polkadot/node/core/backing/src/tests/mod.rs | 13 +- 4 files changed, 108 insertions(+), 50 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 378cf44c8cf7..0142532cb050 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -12487,6 +12487,7 @@ dependencies = [ "polkadot-primitives-test-helpers", "polkadot-statement-table", "sc-keystore", + "schnellru", "sp-application-crypto", "sp-core", "sp-keyring", diff --git a/polkadot/node/core/backing/Cargo.toml b/polkadot/node/core/backing/Cargo.toml index b0cf041e38da..f71b8df80dd2 100644 --- a/polkadot/node/core/backing/Cargo.toml +++ b/polkadot/node/core/backing/Cargo.toml @@ -22,6 +22,7 @@ bitvec = { version = "1.0.0", default-features = false, features = ["alloc"] } gum = { package = "tracing-gum", path = "../../gum" } thiserror = { workspace = true } fatality = "0.0.6" +schnellru = "0.2.1" [dev-dependencies] sp-core = { path = "../../../../substrate/primitives/core" } diff --git a/polkadot/node/core/backing/src/lib.rs b/polkadot/node/core/backing/src/lib.rs index a6fe0ddbfb91..04b24a417066 100644 --- a/polkadot/node/core/backing/src/lib.rs +++ b/polkadot/node/core/backing/src/lib.rs @@ -77,6 +77,7 @@ use futures::{ stream::FuturesOrdered, FutureExt, SinkExt, StreamExt, TryFutureExt, }; +use schnellru::{ByLength, LruMap}; use error::{Error, FatalResult}; use polkadot_node_primitives::{ @@ -107,8 +108,9 @@ use polkadot_primitives::{ vstaging::{node_features::FeatureIndex, NodeFeatures}, BackedCandidate, CandidateCommitments, CandidateHash, CandidateReceipt, CommittedCandidateReceipt, CoreIndex, CoreState, ExecutorParams, GroupIndex, GroupRotationInfo, - Hash, Id as ParaId, PersistedValidationData, PvfExecKind, SigningContext, ValidationCode, - ValidatorId, ValidatorIndex, ValidatorSignature, ValidityAttestation, + Hash, Id as ParaId, IndexedVec, PersistedValidationData, PvfExecKind, SessionIndex, + SigningContext, ValidationCode, ValidatorId, ValidatorIndex, ValidatorSignature, + ValidityAttestation, }; use sp_keystore::KeystorePtr; use statement_table::{ @@ -232,8 +234,8 @@ struct PerRelayParentState { inject_core_index: bool, /// The core states for all cores. cores: Vec, - /// The validator groups at this relay parent. - validator_groups: Vec>, + /// The validator index -> group mapping at this relay parent. + validator_to_group: IndexedVec>, /// The associated group rotation information. group_rotation_info: GroupRotationInfo, } @@ -287,6 +289,8 @@ struct State { /// This is guaranteed to have an entry for each candidate with a relay parent in the implicit /// or explicit view for which a `Seconded` statement has been successfully imported. per_candidate: HashMap, + /// Cache the per-session Validator->Group mapping. + validator_to_group_cache: LruMap>>, /// A cloneable sender which is dispatched to background candidate validation tasks to inform /// the main task of the result. background_validation_tx: mpsc::Sender<(Hash, ValidatedCandidateCommand)>, @@ -304,6 +308,7 @@ impl State { per_leaf: HashMap::default(), per_relay_parent: HashMap::default(), per_candidate: HashMap::new(), + validator_to_group_cache: LruMap::new(ByLength::new(2)), background_validation_tx, keystore, } @@ -986,7 +991,14 @@ async fn handle_active_leaves_update( // construct a `PerRelayParent` from the runtime API // and insert it. - let per = construct_per_relay_parent_state(ctx, maybe_new, &state.keystore, mode).await?; + let per = construct_per_relay_parent_state( + ctx, + maybe_new, + &state.keystore, + &mut state.validator_to_group_cache, + mode, + ) + .await?; if let Some(per) = per { state.per_relay_parent.insert(maybe_new, per); @@ -1014,7 +1026,7 @@ macro_rules! try_runtime_api { } fn core_index_from_statement( - validator_groups: &[Vec], + validator_to_group: &IndexedVec>, group_rotation_info: &GroupRotationInfo, cores: &[CoreState], statement: &SignedFullStatementWithPVD, @@ -1024,51 +1036,70 @@ fn core_index_from_statement( let n_cores = cores.len(); - gum::trace!(target: LOG_TARGET, ?group_rotation_info, ?statement, ?validator_groups, n_cores = ?cores.len() , ?candidate_hash, "Extracting core index from statement"); + gum::trace!( + target:LOG_TARGET, + ?group_rotation_info, + ?statement, + ?validator_to_group, + n_cores = ?cores.len(), + ?candidate_hash, + "Extracting core index from statement" + ); let statement_validator_index = statement.validator_index(); - for (group_index, group) in validator_groups.iter().enumerate() { - for validator_index in group { - if *validator_index == statement_validator_index { - // First check if the statement para id matches the core assignment. - let core_index = - group_rotation_info.core_for_group(GroupIndex(group_index as u32), n_cores); - - if core_index.0 as usize > n_cores { - gum::warn!(target: LOG_TARGET, ?candidate_hash, ?core_index, n_cores, "Invalid CoreIndex"); - return None - } + let Some(Some(group_index)) = validator_to_group.get(statement_validator_index) else { + gum::debug!( + target: LOG_TARGET, + ?group_rotation_info, + ?statement, + ?validator_to_group, + n_cores = ?cores.len() , + ?candidate_hash, + "Invalid validator index: {:?}", + statement_validator_index + ); + return None + }; - if let StatementWithPVD::Seconded(candidate, _pvd) = statement.payload() { - let candidate_para_id = candidate.descriptor.para_id; - let assigned_para_id = match &cores[core_index.0 as usize] { - CoreState::Free => { - gum::debug!(target: LOG_TARGET, ?candidate_hash, "Invalid CoreIndex, core is not assigned to any para_id"); - return None - }, - CoreState::Occupied(occupied) => { - if let Some(next) = &occupied.next_up_on_available { - next.para_id - } else { - return None - } - }, - CoreState::Scheduled(scheduled) => scheduled.para_id, - }; + // First check if the statement para id matches the core assignment. + let core_index = group_rotation_info.core_for_group(*group_index, n_cores); - if assigned_para_id != candidate_para_id { - gum::debug!(target: LOG_TARGET, ?candidate_hash, ?core_index, ?assigned_para_id, ?candidate_para_id, "Invalid CoreIndex, core is assigned to a different para_id"); - return None - } - return Some(core_index) + if core_index.0 as usize > n_cores { + gum::warn!(target: LOG_TARGET, ?candidate_hash, ?core_index, n_cores, "Invalid CoreIndex"); + return None + } + + if let StatementWithPVD::Seconded(candidate, _pvd) = statement.payload() { + let candidate_para_id = candidate.descriptor.para_id; + let assigned_para_id = match &cores[core_index.0 as usize] { + CoreState::Free => { + gum::debug!(target: LOG_TARGET, ?candidate_hash, "Invalid CoreIndex, core is not assigned to any para_id"); + return None + }, + CoreState::Occupied(occupied) => + if let Some(next) = &occupied.next_up_on_available { + next.para_id } else { - return Some(core_index) - } - } + return None + }, + CoreState::Scheduled(scheduled) => scheduled.para_id, + }; + + if assigned_para_id != candidate_para_id { + gum::debug!( + target: LOG_TARGET, + ?candidate_hash, + ?core_index, + ?assigned_para_id, + ?candidate_para_id, + "Invalid CoreIndex, core is assigned to a different para_id" + ); + return None } + return Some(core_index) + } else { + return Some(core_index) } - - None } /// Load the data necessary to do backing work on top of a relay-parent. @@ -1077,6 +1108,10 @@ async fn construct_per_relay_parent_state( ctx: &mut Context, relay_parent: Hash, keystore: &KeystorePtr, + validator_to_group_cache: &mut LruMap< + SessionIndex, + IndexedVec>, + >, mode: ProspectiveParachainsMode, ) -> Result, Error> { let parent = relay_parent; @@ -1172,7 +1207,21 @@ async fn construct_per_relay_parent_state( groups.insert(core_index, g.clone()); } } - gum::debug!(target: LOG_TARGET, ?groups, "TableContext" ); + gum::debug!(target: LOG_TARGET, ?groups, "TableContext"); + + let validator_to_group = validator_to_group_cache + .get_or_insert(session_index, || { + let mut vector = vec![None; validators.len()]; + + for (group_idx, validator_group) in validator_groups.iter().enumerate() { + for validator in validator_group { + vector[validator.0 as usize] = Some(GroupIndex(group_idx as u32)); + } + } + + IndexedVec::<_, _>::from(vector) + }) + .expect("Just inserted"); let table_context = TableContext { validator, groups, validators, disabled_validators }; let table_config = TableConfig { @@ -1196,7 +1245,7 @@ async fn construct_per_relay_parent_state( minimum_backing_votes, inject_core_index, cores, - validator_groups, + validator_to_group: validator_to_group.clone(), group_rotation_info, })) } @@ -1691,7 +1740,7 @@ async fn import_statement( let stmt = primitive_statement_to_table(statement); let core = core_index_from_statement( - &rp_state.validator_groups, + &rp_state.validator_to_group, &rp_state.group_rotation_info, &rp_state.cores, statement, diff --git a/polkadot/node/core/backing/src/tests/mod.rs b/polkadot/node/core/backing/src/tests/mod.rs index a4f77817427c..7223f1e1dfb0 100644 --- a/polkadot/node/core/backing/src/tests/mod.rs +++ b/polkadot/node/core/backing/src/tests/mod.rs @@ -72,6 +72,7 @@ pub(crate) struct TestState { validator_public: Vec, validation_data: PersistedValidationData, validator_groups: (Vec>, GroupRotationInfo), + validator_to_group: IndexedVec>, availability_cores: Vec, head_data: HashMap, signing_context: SigningContext, @@ -114,6 +115,11 @@ impl Default for TestState { .into_iter() .map(|g| g.into_iter().map(ValidatorIndex).collect()) .collect(); + let validator_to_group: IndexedVec<_, _> = + vec![Some(0), Some(1), Some(0), Some(0), None, Some(0)] + .into_iter() + .map(|x| x.map(|x| GroupIndex(x))) + .collect(); let group_rotation_info = GroupRotationInfo { session_start_block: 0, group_rotation_frequency: 100, now: 1 }; @@ -143,6 +149,7 @@ impl Default for TestState { validators, validator_public, validator_groups: (validator_groups, group_rotation_info), + validator_to_group, availability_cores, head_data, validation_data, @@ -720,7 +727,7 @@ fn extract_core_index_from_statement_works() { .expect("should be signed"); let core_index_1 = core_index_from_statement( - &test_state.validator_groups.0, + &test_state.validator_to_group, &test_state.validator_groups.1, &test_state.availability_cores, &signed_statement_1, @@ -730,7 +737,7 @@ fn extract_core_index_from_statement_works() { assert_eq!(core_index_1, CoreIndex(0)); let core_index_2 = core_index_from_statement( - &test_state.validator_groups.0, + &test_state.validator_to_group, &test_state.validator_groups.1, &test_state.availability_cores, &signed_statement_2, @@ -740,7 +747,7 @@ fn extract_core_index_from_statement_works() { assert_eq!(core_index_2, None); let core_index_3 = core_index_from_statement( - &test_state.validator_groups.0, + &test_state.validator_to_group, &test_state.validator_groups.1, &test_state.availability_cores, &signed_statement_3, From 7976e2f936bdda913291d27a43659a48044ab5df Mon Sep 17 00:00:00 2001 From: alindima Date: Wed, 21 Feb 2024 15:36:18 +0200 Subject: [PATCH 40/51] lockfile --- Cargo.lock | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Cargo.lock b/Cargo.lock index 5a85cb23dda1..b0cffbb0573b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -15178,7 +15178,7 @@ dependencies = [ "regex", "relative-path", "rustc_version 0.4.0", - "syn 2.0.49", + "syn 2.0.50", "unicode-ident", ] From 4d6e797dc42bdac440aea892988a6f36be3c19b5 Mon Sep 17 00:00:00 2001 From: alindima Date: Wed, 21 Feb 2024 17:33:11 +0200 Subject: [PATCH 41/51] add tests for backedcandidate functions --- polkadot/primitives/src/v6/mod.rs | 112 ++++++++++++++++++++++-- polkadot/runtime/parachains/src/util.rs | 4 +- 2 files changed, 109 insertions(+), 7 deletions(-) diff --git a/polkadot/primitives/src/v6/mod.rs b/polkadot/primitives/src/v6/mod.rs index 32f16b0b3c0e..89431f7801f7 100644 --- a/polkadot/primitives/src/v6/mod.rs +++ b/polkadot/primitives/src/v6/mod.rs @@ -776,12 +776,17 @@ impl BackedCandidate { // It extends `BackedCandidate::validity_indices` to store a 8 bit core index. if core_index_enabled { let core_idx_offset = self.validator_indices.len().saturating_sub(8); - let (validator_indices_slice, core_idx_slice) = - self.validator_indices.split_at(core_idx_offset); - (validator_indices_slice, Some(CoreIndex(core_idx_slice.load::() as u32))) - } else { - (&self.validator_indices, None) + if core_idx_offset > 0 { + let (validator_indices_slice, core_idx_slice) = + self.validator_indices.split_at(core_idx_offset); + return ( + validator_indices_slice, + Some(CoreIndex(core_idx_slice.load::() as u32)), + ); + } } + + (&self.validator_indices, None) } /// Inject a core index in the validator_indices bitvec. @@ -1950,6 +1955,34 @@ pub enum PvfExecKind { #[cfg(test)] mod tests { use super::*; + use bitvec::bitvec; + use primitives::sr25519; + + pub fn dummy_committed_candidate_receipt() -> CommittedCandidateReceipt { + let zeros = Hash::zero(); + + CommittedCandidateReceipt { + descriptor: CandidateDescriptor { + para_id: 0.into(), + relay_parent: zeros, + collator: CollatorId::from(sr25519::Public::from_raw([0; 32])), + persisted_validation_data_hash: zeros, + pov_hash: zeros, + erasure_root: zeros, + signature: CollatorSignature::from(sr25519::Signature([0u8; 64])), + para_head: zeros, + validation_code_hash: ValidationCode(vec![1, 2, 3, 4, 5, 6, 7, 8, 9]).hash(), + }, + commitments: CandidateCommitments { + head_data: HeadData(vec![]), + upward_messages: vec![].try_into().expect("empty vec fits within bounds"), + new_validation_code: None, + horizontal_messages: vec![].try_into().expect("empty vec fits within bounds"), + processed_downward_messages: 0, + hrmp_watermark: 0_u32, + }, + } + } #[test] fn group_rotation_info_calculations() { @@ -2025,5 +2058,72 @@ mod tests { assert!(zero_b.leading_zeros() >= zero_u.leading_zeros()); } - // TODO: test validator_indices_and_core_index and set_validator_indices_and_core_index + #[test] + fn test_backed_candidate_injected_core_index() { + let initial_validator_indices = bitvec![u8, bitvec::order::Lsb0; 0, 1, 0, 1]; + let mut candidate = BackedCandidate::new( + dummy_committed_candidate_receipt(), + vec![], + initial_validator_indices.clone(), + None, + ); + + // No core index supplied, ElasticScalingMVP is off. + let (validator_indices, core_index) = candidate.validator_indices_and_core_index(false); + assert_eq!(validator_indices, initial_validator_indices.as_bitslice()); + assert!(core_index.is_none()); + + // No core index supplied, ElasticScalingMVP is on. Still, decoding will be ok if backing + // group size is <= 8, to give a chance to parachains that don't have multiple cores + // assigned. + let (validator_indices, core_index) = candidate.validator_indices_and_core_index(true); + assert_eq!(validator_indices, initial_validator_indices.as_bitslice()); + assert!(core_index.is_none()); + + let encoded_validator_indices = candidate.validator_indices.clone(); + candidate.set_validator_indices_and_core_index(validator_indices.into(), core_index); + assert_eq!(candidate.validator_indices, encoded_validator_indices); + + // No core index supplied, ElasticScalingMVP is on. Decoding is corrupted if backing group + // size larger than 8. + let candidate = BackedCandidate::new( + dummy_committed_candidate_receipt(), + vec![], + bitvec![u8, bitvec::order::Lsb0; 0, 1, 0, 1, 0, 1, 0, 1, 0], + None, + ); + let (validator_indices, core_index) = candidate.validator_indices_and_core_index(true); + assert_eq!(validator_indices, bitvec![u8, bitvec::order::Lsb0; 0].as_bitslice()); + assert!(core_index.is_some()); + + // Core index supplied, ElasticScalingMVP is off. Core index will be treated as normal + // validator indices. Runtime will check against this. + let candidate = BackedCandidate::new( + dummy_committed_candidate_receipt(), + vec![], + bitvec![u8, bitvec::order::Lsb0; 0, 1, 0, 1], + Some(CoreIndex(10)), + ); + let (validator_indices, core_index) = candidate.validator_indices_and_core_index(false); + assert_eq!( + validator_indices, + bitvec![u8, bitvec::order::Lsb0; 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0] + ); + assert!(core_index.is_none()); + + // Core index supplied, ElasticScalingMVP is on. + let mut candidate = BackedCandidate::new( + dummy_committed_candidate_receipt(), + vec![], + bitvec![u8, bitvec::order::Lsb0; 0, 1, 0, 1], + Some(CoreIndex(10)), + ); + let (validator_indices, core_index) = candidate.validator_indices_and_core_index(true); + assert_eq!(validator_indices, bitvec![u8, bitvec::order::Lsb0; 0, 1, 0, 1]); + assert_eq!(core_index, Some(CoreIndex(10))); + + let encoded_validator_indices = candidate.validator_indices.clone(); + candidate.set_validator_indices_and_core_index(validator_indices.into(), core_index); + assert_eq!(candidate.validator_indices, encoded_validator_indices); + } } diff --git a/polkadot/runtime/parachains/src/util.rs b/polkadot/runtime/parachains/src/util.rs index 08bb009ad9a8..aa07ef080055 100644 --- a/polkadot/runtime/parachains/src/util.rs +++ b/polkadot/runtime/parachains/src/util.rs @@ -22,6 +22,7 @@ use primitives::{Id as ParaId, PersistedValidationData, ValidatorIndex}; use sp_std::{collections::btree_set::BTreeSet, vec::Vec}; use crate::{configuration, hrmp, paras}; + /// Make the persisted validation data for a particular parachain, a specified relay-parent and it's /// storage root. /// @@ -100,9 +101,10 @@ pub fn take_active_subset(active: &[ValidatorIndex], set: &[T]) -> Vec #[cfg(test)] mod tests { + use sp_std::vec::Vec; + use crate::util::{split_active_subset, take_active_subset}; use primitives::ValidatorIndex; - use sp_std::vec::Vec; #[test] fn take_active_subset_is_compatible_with_split_active_subset() { From 4c36440b77ff015da412b28a29b514cbec508e41 Mon Sep 17 00:00:00 2001 From: alindima Date: Thu, 22 Feb 2024 09:04:43 +0200 Subject: [PATCH 42/51] newlines --- polkadot/zombienet_tests/functional/0012-enable-node-feature.js | 2 +- polkadot/zombienet_tests/functional/0012-register-para.js | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/polkadot/zombienet_tests/functional/0012-enable-node-feature.js b/polkadot/zombienet_tests/functional/0012-enable-node-feature.js index ae3406fca686..4822e1f66447 100644 --- a/polkadot/zombienet_tests/functional/0012-enable-node-feature.js +++ b/polkadot/zombienet_tests/functional/0012-enable-node-feature.js @@ -34,4 +34,4 @@ async function run(nodeName, networkInfo, index) { return 0; } -module.exports = { run }; \ No newline at end of file +module.exports = { run }; diff --git a/polkadot/zombienet_tests/functional/0012-register-para.js b/polkadot/zombienet_tests/functional/0012-register-para.js index e7619474fae2..25c7e4f5ffdd 100644 --- a/polkadot/zombienet_tests/functional/0012-register-para.js +++ b/polkadot/zombienet_tests/functional/0012-register-para.js @@ -34,4 +34,4 @@ async function run(nodeName, networkInfo, _jsArgs) { return 0; } -module.exports = { run }; \ No newline at end of file +module.exports = { run }; From af1cd821d3fb348af2fa25109604ca5a5ffde5dc Mon Sep 17 00:00:00 2001 From: alindima Date: Thu, 22 Feb 2024 09:27:42 +0200 Subject: [PATCH 43/51] use Arc to avoid cloning --- polkadot/node/core/backing/src/lib.rs | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/polkadot/node/core/backing/src/lib.rs b/polkadot/node/core/backing/src/lib.rs index 04b24a417066..cc192607cea0 100644 --- a/polkadot/node/core/backing/src/lib.rs +++ b/polkadot/node/core/backing/src/lib.rs @@ -235,7 +235,7 @@ struct PerRelayParentState { /// The core states for all cores. cores: Vec, /// The validator index -> group mapping at this relay parent. - validator_to_group: IndexedVec>, + validator_to_group: Arc>>, /// The associated group rotation information. group_rotation_info: GroupRotationInfo, } @@ -290,7 +290,8 @@ struct State { /// or explicit view for which a `Seconded` statement has been successfully imported. per_candidate: HashMap, /// Cache the per-session Validator->Group mapping. - validator_to_group_cache: LruMap>>, + validator_to_group_cache: + LruMap>>>, /// A cloneable sender which is dispatched to background candidate validation tasks to inform /// the main task of the result. background_validation_tx: mpsc::Sender<(Hash, ValidatedCandidateCommand)>, @@ -1110,7 +1111,7 @@ async fn construct_per_relay_parent_state( keystore: &KeystorePtr, validator_to_group_cache: &mut LruMap< SessionIndex, - IndexedVec>, + Arc>>, >, mode: ProspectiveParachainsMode, ) -> Result, Error> { @@ -1219,7 +1220,7 @@ async fn construct_per_relay_parent_state( } } - IndexedVec::<_, _>::from(vector) + Arc::new(IndexedVec::<_, _>::from(vector)) }) .expect("Just inserted"); From dc57adbacda46f306bfc3a9bd8c3f0ffdec719e7 Mon Sep 17 00:00:00 2001 From: alindima Date: Thu, 22 Feb 2024 09:56:05 +0200 Subject: [PATCH 44/51] add check for parachain stall to zombienet test --- .../zombienet_tests/functional/0012-elastic-scaling-mvp.zndsl | 1 + 1 file changed, 1 insertion(+) diff --git a/polkadot/zombienet_tests/functional/0012-elastic-scaling-mvp.zndsl b/polkadot/zombienet_tests/functional/0012-elastic-scaling-mvp.zndsl index f0dfa343e724..a7193c9282b9 100644 --- a/polkadot/zombienet_tests/functional/0012-elastic-scaling-mvp.zndsl +++ b/polkadot/zombienet_tests/functional/0012-elastic-scaling-mvp.zndsl @@ -16,6 +16,7 @@ alice: js-script ./0012-register-para.js return is 0 within 600 seconds validator: reports substrate_block_height{status="finalized"} is at least 35 within 100 seconds # Parachain will now be stalled +validator: parachain 2000 block height is lower than 20 within 300 seconds # Enable the ElasticScalingMVP node feature. alice: js-script ./0012-enable-node-feature.js with "1" return is 0 within 600 seconds From bb5968cd1d56d3eefd5b73f53def2a1cfdf7b515 Mon Sep 17 00:00:00 2001 From: alindima Date: Thu, 22 Feb 2024 11:38:18 +0200 Subject: [PATCH 45/51] add more unit tests --- .../runtime/parachains/src/inclusion/mod.rs | 17 +- .../runtime/parachains/src/inclusion/tests.rs | 377 +++++++++++++++++- .../parachains/src/paras_inherent/mod.rs | 1 + .../parachains/src/paras_inherent/tests.rs | 1 + 4 files changed, 384 insertions(+), 12 deletions(-) diff --git a/polkadot/runtime/parachains/src/inclusion/mod.rs b/polkadot/runtime/parachains/src/inclusion/mod.rs index cda75cf650a9..2d6dac8b3b19 100644 --- a/polkadot/runtime/parachains/src/inclusion/mod.rs +++ b/polkadot/runtime/parachains/src/inclusion/mod.rs @@ -37,12 +37,11 @@ use frame_system::pallet_prelude::*; use pallet_message_queue::OnQueueChanged; use parity_scale_codec::{Decode, Encode}; use primitives::{ - effective_minimum_backing_votes, supermajority_threshold, - vstaging::node_features::FeatureIndex, well_known_keys, AvailabilityBitfield, BackedCandidate, - CandidateCommitments, CandidateDescriptor, CandidateHash, CandidateReceipt, - CommittedCandidateReceipt, CoreIndex, GroupIndex, Hash, HeadData, Id as ParaId, - SignedAvailabilityBitfields, SigningContext, UpwardMessage, ValidatorId, ValidatorIndex, - ValidityAttestation, + effective_minimum_backing_votes, supermajority_threshold, well_known_keys, + AvailabilityBitfield, BackedCandidate, CandidateCommitments, CandidateDescriptor, + CandidateHash, CandidateReceipt, CommittedCandidateReceipt, CoreIndex, GroupIndex, Hash, + HeadData, Id as ParaId, SignedAvailabilityBitfields, SigningContext, UpwardMessage, + ValidatorId, ValidatorIndex, ValidityAttestation, }; use scale_info::TypeInfo; use sp_runtime::{traits::One, DispatchError, SaturatedConversion, Saturating}; @@ -606,6 +605,7 @@ impl Pallet { scheduled: &BTreeMap, scheduled_by_core: &BTreeMap, group_validators: GV, + core_index_enabled: bool, ) -> Result, DispatchError> where GV: Fn(GroupIndex) -> Option>, @@ -618,11 +618,6 @@ impl Pallet { return Ok(ProcessedCandidates::default()) } - let core_index_enabled = configuration::Pallet::::config() - .node_features - .get(FeatureIndex::ElasticScalingMVP as usize) - .map(|b| *b) - .unwrap_or(false); let minimum_backing_votes = configuration::Pallet::::config().minimum_backing_votes; let validators = shared::Pallet::::active_validator_keys(); diff --git a/polkadot/runtime/parachains/src/inclusion/tests.rs b/polkadot/runtime/parachains/src/inclusion/tests.rs index 340e0ab1dd93..02bf5f7186b4 100644 --- a/polkadot/runtime/parachains/src/inclusion/tests.rs +++ b/polkadot/runtime/parachains/src/inclusion/tests.rs @@ -120,6 +120,7 @@ pub(crate) fn back_candidate( keystore: &KeystorePtr, signing_context: &SigningContext, kind: BackingKind, + core_index: Option, ) -> BackedCandidate { let mut validator_indices = bitvec::bitvec![u8, BitOrderLsb0; 0; group.len()]; let threshold = effective_minimum_backing_votes( @@ -155,7 +156,8 @@ pub(crate) fn back_candidate( validity_votes.push(ValidityAttestation::Explicit(signature).into()); } - let backed = BackedCandidate::new(candidate, validity_votes, validator_indices.clone(), None); + let backed = + BackedCandidate::new(candidate, validity_votes, validator_indices.clone(), core_index); let successfully_backed = primitives::check_candidate_backing( backed.candidate().hash(), @@ -943,8 +945,89 @@ fn candidate_checks() { &keystore, &signing_context, BackingKind::Threshold, + None, ); + // No scheduled cores. + assert_noop!( + ParaInclusion::process_candidates( + &allowed_relay_parents, + vec![backed.clone()], + &BTreeMap::new(), + &BTreeMap::new(), + &group_validators, + false + ), + Error::::UnscheduledCandidate + ); + + // Core scheduled for another para. + assert_noop!( + ParaInclusion::process_candidates( + &allowed_relay_parents, + vec![backed.clone()], + &[chain_b_assignment].into_iter().collect(), + &[(chain_b_assignment.1, chain_b_assignment.0)].into_iter().collect(), + &group_validators, + false + ), + Error::::UnscheduledCandidate + ); + + // another candidate for the same para, but only one core scheduled for the para. + let mut another_candidate = TestCandidateBuilder { + para_id: chain_a, + relay_parent: System::parent_hash(), + pov_hash: Hash::repeat_byte(2), + persisted_validation_data_hash: make_vdata_hash(chain_a).unwrap(), + hrmp_watermark: RELAY_PARENT_NUM, + ..Default::default() + } + .build(); + collator_sign_candidate(Sr25519Keyring::One, &mut another_candidate); + let another_backed = back_candidate( + another_candidate, + &validators, + group_validators(GroupIndex::from(0)).unwrap().as_ref(), + &keystore, + &signing_context, + BackingKind::Threshold, + None, + ); + + assert_noop!( + ParaInclusion::process_candidates( + &allowed_relay_parents, + vec![backed, another_backed], + &[chain_a_assignment].into_iter().collect(), + &[(chain_a_assignment.1, chain_a_assignment.0)].into_iter().collect(), + &group_validators, + false + ), + Error::::UnscheduledCandidate + ); + + // core scheduled for another para. ElasticScalingMVP enabled and cores supplied. + let mut candidate = TestCandidateBuilder { + para_id: chain_a, + relay_parent: System::parent_hash(), + pov_hash: Hash::repeat_byte(1), + persisted_validation_data_hash: make_vdata_hash(chain_a).unwrap(), + hrmp_watermark: RELAY_PARENT_NUM, + ..Default::default() + } + .build(); + collator_sign_candidate(Sr25519Keyring::One, &mut candidate); + + let backed = back_candidate( + candidate, + &validators, + group_validators(GroupIndex::from(0)).unwrap().as_ref(), + &keystore, + &signing_context, + BackingKind::Threshold, + Some(chain_a_assignment.1), + ); assert_noop!( ParaInclusion::process_candidates( &allowed_relay_parents, @@ -952,6 +1035,7 @@ fn candidate_checks() { &[chain_b_assignment].into_iter().collect(), &[(chain_b_assignment.1, chain_b_assignment.0)].into_iter().collect(), &group_validators, + true ), Error::::UnscheduledCandidate ); @@ -989,6 +1073,7 @@ fn candidate_checks() { &keystore, &signing_context, BackingKind::Threshold, + None, ); let backed_b = back_candidate( @@ -998,6 +1083,7 @@ fn candidate_checks() { &keystore, &signing_context, BackingKind::Threshold, + None, ); // out-of-order manifests as unscheduled. @@ -1013,6 +1099,7 @@ fn candidate_checks() { .into_iter() .collect(), &group_validators, + false ), Error::::ScheduledOutOfOrder ); @@ -1038,6 +1125,7 @@ fn candidate_checks() { &keystore, &signing_context, BackingKind::Lacking, + None, ); assert_noop!( @@ -1047,6 +1135,7 @@ fn candidate_checks() { &[chain_a_assignment].into_iter().collect(), &[(chain_a_assignment.1, chain_a_assignment.0)].into_iter().collect(), &group_validators, + false ), Error::::InsufficientBacking ); @@ -1087,6 +1176,7 @@ fn candidate_checks() { &keystore, &signing_context, BackingKind::Threshold, + None, ); let backed_b = back_candidate( @@ -1096,6 +1186,7 @@ fn candidate_checks() { &keystore, &signing_context, BackingKind::Threshold, + None, ); assert_noop!( @@ -1110,6 +1201,7 @@ fn candidate_checks() { .into_iter() .collect(), &group_validators, + false ), Error::::DisallowedRelayParent ); @@ -1140,6 +1232,7 @@ fn candidate_checks() { &keystore, &signing_context, BackingKind::Threshold, + None, ); assert_noop!( @@ -1149,6 +1242,7 @@ fn candidate_checks() { &[thread_a_assignment].into_iter().collect(), &[(thread_a_assignment.1, thread_a_assignment.0)].into_iter().collect(), &group_validators, + false ), Error::::NotCollatorSigned ); @@ -1175,6 +1269,7 @@ fn candidate_checks() { &keystore, &signing_context, BackingKind::Threshold, + None, ); let candidate = TestCandidateBuilder::default().build(); @@ -1200,6 +1295,7 @@ fn candidate_checks() { &[chain_a_assignment].into_iter().collect(), &[(chain_a_assignment.1, chain_a_assignment.0)].into_iter().collect(), &group_validators, + false ), Error::::CandidateScheduledBeforeParaFree ); @@ -1232,6 +1328,7 @@ fn candidate_checks() { &keystore, &signing_context, BackingKind::Threshold, + None, ); assert_noop!( @@ -1241,6 +1338,7 @@ fn candidate_checks() { &[chain_a_assignment].into_iter().collect(), &[(chain_a_assignment.1, chain_a_assignment.0)].into_iter().collect(), &group_validators, + false ), Error::::CandidateScheduledBeforeParaFree ); @@ -1270,6 +1368,7 @@ fn candidate_checks() { &keystore, &signing_context, BackingKind::Threshold, + None, ); { @@ -1292,6 +1391,7 @@ fn candidate_checks() { &[chain_a_assignment].into_iter().collect(), &[(chain_a_assignment.1, chain_a_assignment.0)].into_iter().collect(), &group_validators, + false ), Error::::PrematureCodeUpgrade ); @@ -1318,6 +1418,7 @@ fn candidate_checks() { &keystore, &signing_context, BackingKind::Threshold, + None, ); assert_eq!( @@ -1327,6 +1428,7 @@ fn candidate_checks() { &[chain_a_assignment].into_iter().collect(), &[(chain_a_assignment.1, chain_a_assignment.0)].into_iter().collect(), &group_validators, + false ), Err(Error::::ValidationDataHashMismatch.into()), ); @@ -1354,6 +1456,7 @@ fn candidate_checks() { &keystore, &signing_context, BackingKind::Threshold, + None, ); assert_noop!( @@ -1363,6 +1466,7 @@ fn candidate_checks() { &[chain_a_assignment].into_iter().collect(), &[(chain_a_assignment.1, chain_a_assignment.0)].into_iter().collect(), &group_validators, + false ), Error::::InvalidValidationCodeHash ); @@ -1390,6 +1494,7 @@ fn candidate_checks() { &keystore, &signing_context, BackingKind::Threshold, + None, ); assert_noop!( @@ -1399,6 +1504,7 @@ fn candidate_checks() { &[chain_a_assignment].into_iter().collect(), &[(chain_a_assignment.1, chain_a_assignment.0)].into_iter().collect(), &group_validators, + false ), Error::::ParaHeadMismatch ); @@ -1511,6 +1617,7 @@ fn backing_works() { &keystore, &signing_context, BackingKind::Threshold, + None, ); let backed_b = back_candidate( @@ -1520,6 +1627,7 @@ fn backing_works() { &keystore, &signing_context, BackingKind::Threshold, + None, ); let backed_c = back_candidate( @@ -1529,6 +1637,7 @@ fn backing_works() { &keystore, &signing_context, BackingKind::Threshold, + None, ); let backed_candidates = vec![backed_a.clone(), backed_b.clone(), backed_c]; @@ -1568,6 +1677,7 @@ fn backing_works() { .into_iter() .collect(), &group_validators, + false, ) .expect("candidates scheduled, in order, and backed"); @@ -1698,6 +1808,263 @@ fn backing_works() { }); } +#[test] +fn backing_works_with_elastic_scaling_mvp() { + let chain_a = ParaId::from(1_u32); + let chain_b = ParaId::from(2_u32); + let thread_a = ParaId::from(3_u32); + + // The block number of the relay-parent for testing. + const RELAY_PARENT_NUM: BlockNumber = 4; + + let paras = vec![ + (chain_a, ParaKind::Parachain), + (chain_b, ParaKind::Parachain), + (thread_a, ParaKind::Parathread), + ]; + let validators = vec![ + Sr25519Keyring::Alice, + Sr25519Keyring::Bob, + Sr25519Keyring::Charlie, + Sr25519Keyring::Dave, + Sr25519Keyring::Ferdie, + ]; + let keystore: KeystorePtr = Arc::new(LocalKeystore::in_memory()); + for validator in validators.iter() { + Keystore::sr25519_generate_new( + &*keystore, + PARACHAIN_KEY_TYPE_ID, + Some(&validator.to_seed()), + ) + .unwrap(); + } + let validator_public = validator_pubkeys(&validators); + + new_test_ext(genesis_config(paras)).execute_with(|| { + shared::Pallet::::set_active_validators_ascending(validator_public.clone()); + shared::Pallet::::set_session_index(5); + + run_to_block(5, |_| None); + + let signing_context = + SigningContext { parent_hash: System::parent_hash(), session_index: 5 }; + + let group_validators = |group_index: GroupIndex| { + match group_index { + group_index if group_index == GroupIndex::from(0) => Some(vec![0, 1]), + group_index if group_index == GroupIndex::from(1) => Some(vec![2, 3]), + group_index if group_index == GroupIndex::from(2) => Some(vec![4]), + _ => panic!("Group index out of bounds for 2 parachains and 1 parathread core"), + } + .map(|vs| vs.into_iter().map(ValidatorIndex).collect::>()) + }; + + // When processing candidates, we compute the group index from scheduler. + let validator_groups = vec![ + vec![ValidatorIndex(0), ValidatorIndex(1)], + vec![ValidatorIndex(2), ValidatorIndex(3)], + vec![ValidatorIndex(4)], + ]; + Scheduler::set_validator_groups(validator_groups); + + let allowed_relay_parents = default_allowed_relay_parent_tracker(); + + let chain_a_assignment = (chain_a, CoreIndex::from(0)); + + let mut candidate_a = TestCandidateBuilder { + para_id: chain_a, + relay_parent: System::parent_hash(), + pov_hash: Hash::repeat_byte(1), + persisted_validation_data_hash: make_vdata_hash(chain_a).unwrap(), + hrmp_watermark: RELAY_PARENT_NUM, + ..Default::default() + } + .build(); + collator_sign_candidate(Sr25519Keyring::One, &mut candidate_a); + + let mut candidate_b_1 = TestCandidateBuilder { + para_id: chain_b, + relay_parent: System::parent_hash(), + pov_hash: Hash::repeat_byte(2), + persisted_validation_data_hash: make_vdata_hash(chain_b).unwrap(), + hrmp_watermark: RELAY_PARENT_NUM, + ..Default::default() + } + .build(); + collator_sign_candidate(Sr25519Keyring::One, &mut candidate_b_1); + + let mut candidate_b_2 = TestCandidateBuilder { + para_id: chain_b, + relay_parent: System::parent_hash(), + pov_hash: Hash::repeat_byte(3), + persisted_validation_data_hash: make_vdata_hash(chain_b).unwrap(), + hrmp_watermark: RELAY_PARENT_NUM, + ..Default::default() + } + .build(); + collator_sign_candidate(Sr25519Keyring::One, &mut candidate_b_2); + + let backed_a = back_candidate( + candidate_a.clone(), + &validators, + group_validators(GroupIndex::from(0)).unwrap().as_ref(), + &keystore, + &signing_context, + BackingKind::Threshold, + None, + ); + + let backed_b_1 = back_candidate( + candidate_b_1.clone(), + &validators, + group_validators(GroupIndex::from(1)).unwrap().as_ref(), + &keystore, + &signing_context, + BackingKind::Threshold, + Some(CoreIndex(1)), + ); + + let backed_b_2 = back_candidate( + candidate_b_2.clone(), + &validators, + group_validators(GroupIndex::from(2)).unwrap().as_ref(), + &keystore, + &signing_context, + BackingKind::Threshold, + Some(CoreIndex(2)), + ); + + let backed_candidates = vec![backed_a.clone(), backed_b_1.clone(), backed_b_2.clone()]; + let get_backing_group_idx = { + // the order defines the group implicitly for this test case + let backed_candidates_with_groups = backed_candidates + .iter() + .enumerate() + .map(|(idx, backed_candidate)| (backed_candidate.hash(), GroupIndex(idx as _))) + .collect::>(); + + move |candidate_hash_x: CandidateHash| -> Option { + backed_candidates_with_groups.iter().find_map(|(candidate_hash, grp)| { + if *candidate_hash == candidate_hash_x { + Some(*grp) + } else { + None + } + }) + } + }; + + let ProcessedCandidates { + core_indices: occupied_cores, + candidate_receipt_with_backing_validator_indices, + } = ParaInclusion::process_candidates( + &allowed_relay_parents, + backed_candidates.clone(), + &[chain_a_assignment, (chain_b, CoreIndex::from(2))].into_iter().collect(), + &[ + (chain_a_assignment.1, chain_a_assignment.0), + (CoreIndex::from(2), chain_b), + (CoreIndex::from(1), chain_b), + ] + .into_iter() + .collect(), + &group_validators, + true, + ) + .expect("candidates scheduled, in order, and backed"); + + // Both b candidates will be backed. However, only one will be recorded on-chain and proceed + // with being made available. + assert_eq!( + occupied_cores, + vec![ + (CoreIndex::from(0), chain_a), + (CoreIndex::from(1), chain_b), + (CoreIndex::from(2), chain_b), + ] + ); + + // Transform the votes into the setup we expect + let mut expected = std::collections::HashMap::< + CandidateHash, + (CandidateReceipt, Vec<(ValidatorIndex, ValidityAttestation)>), + >::new(); + backed_candidates.into_iter().for_each(|backed_candidate| { + let candidate_receipt_with_backers = expected + .entry(backed_candidate.hash()) + .or_insert_with(|| (backed_candidate.receipt(), Vec::new())); + let (validator_indices, _maybe_core_index) = + backed_candidate.validator_indices_and_core_index(true); + assert_eq!(backed_candidate.validity_votes().len(), validator_indices.count_ones()); + candidate_receipt_with_backers.1.extend( + validator_indices + .iter() + .enumerate() + .filter(|(_, signed)| **signed) + .zip(backed_candidate.validity_votes().iter().cloned()) + .filter_map(|((validator_index_within_group, _), attestation)| { + let grp_idx = get_backing_group_idx(backed_candidate.hash()).unwrap(); + group_validators(grp_idx).map(|validator_indices| { + (validator_indices[validator_index_within_group], attestation) + }) + }), + ); + }); + + assert_eq!( + expected, + candidate_receipt_with_backing_validator_indices + .into_iter() + .map(|c| (c.0.hash(), c)) + .collect() + ); + + let backers = { + let num_backers = effective_minimum_backing_votes( + group_validators(GroupIndex(0)).unwrap().len(), + configuration::Pallet::::config().minimum_backing_votes, + ); + backing_bitfield(&(0..num_backers).collect::>()) + }; + assert_eq!( + >::get(&chain_a), + Some(CandidatePendingAvailability { + core: CoreIndex::from(0), + hash: candidate_a.hash(), + descriptor: candidate_a.descriptor, + availability_votes: default_availability_votes(), + relay_parent_number: System::block_number() - 1, + backed_in_number: System::block_number(), + backers, + backing_group: GroupIndex::from(0), + }) + ); + assert_eq!( + >::get(&chain_a), + Some(candidate_a.commitments), + ); + + // Only one candidate for b will be recorded on chain. + assert_eq!( + >::get(&chain_b), + Some(CandidatePendingAvailability { + core: CoreIndex::from(2), + hash: candidate_b_2.hash(), + descriptor: candidate_b_2.descriptor, + availability_votes: default_availability_votes(), + relay_parent_number: System::block_number() - 1, + backed_in_number: System::block_number(), + backers: backing_bitfield(&[4]), + backing_group: GroupIndex::from(2), + }) + ); + assert_eq!( + >::get(&chain_b), + Some(candidate_b_2.commitments), + ); + }); +} + #[test] fn can_include_candidate_with_ok_code_upgrade() { let chain_a = ParaId::from(1_u32); @@ -1772,6 +2139,7 @@ fn can_include_candidate_with_ok_code_upgrade() { &keystore, &signing_context, BackingKind::Threshold, + None, ); let ProcessedCandidates { core_indices: occupied_cores, .. } = @@ -1781,6 +2149,7 @@ fn can_include_candidate_with_ok_code_upgrade() { &[chain_a_assignment].into_iter().collect(), &[(chain_a_assignment.1, chain_a_assignment.0)].into_iter().collect(), &group_validators, + false, ) .expect("candidates scheduled, in order, and backed"); @@ -1965,6 +2334,7 @@ fn check_allowed_relay_parents() { &keystore, &signing_context_a, BackingKind::Threshold, + None, ); let backed_b = back_candidate( @@ -1974,6 +2344,7 @@ fn check_allowed_relay_parents() { &keystore, &signing_context_b, BackingKind::Threshold, + None, ); let backed_c = back_candidate( @@ -1983,6 +2354,7 @@ fn check_allowed_relay_parents() { &keystore, &signing_context_c, BackingKind::Threshold, + None, ); let backed_candidates = vec![backed_a, backed_b, backed_c]; @@ -2001,6 +2373,7 @@ fn check_allowed_relay_parents() { .into_iter() .collect(), &group_validators, + false, ) .expect("candidates scheduled, in order, and backed"); }); @@ -2229,6 +2602,7 @@ fn para_upgrade_delay_scheduled_from_inclusion() { &keystore, &signing_context, BackingKind::Threshold, + None, ); let ProcessedCandidates { core_indices: occupied_cores, .. } = @@ -2238,6 +2612,7 @@ fn para_upgrade_delay_scheduled_from_inclusion() { &[chain_a_assignment].into_iter().collect(), &[(chain_a_assignment.1, chain_a_assignment.0)].into_iter().collect(), &group_validators, + false, ) .expect("candidates scheduled, in order, and backed"); diff --git a/polkadot/runtime/parachains/src/paras_inherent/mod.rs b/polkadot/runtime/parachains/src/paras_inherent/mod.rs index 900e1bbff503..335002692fa9 100644 --- a/polkadot/runtime/parachains/src/paras_inherent/mod.rs +++ b/polkadot/runtime/parachains/src/paras_inherent/mod.rs @@ -663,6 +663,7 @@ impl Pallet { &scheduled, &scheduled_by_core, >::group_validators, + core_index_enabled, )?; // Note which of the scheduled cores were actually occupied by a backed candidate. >::occupied(occupied.into_iter().map(|e| (e.0, e.1)).collect()); diff --git a/polkadot/runtime/parachains/src/paras_inherent/tests.rs b/polkadot/runtime/parachains/src/paras_inherent/tests.rs index ecee338e7a15..9c9308db8844 100644 --- a/polkadot/runtime/parachains/src/paras_inherent/tests.rs +++ b/polkadot/runtime/parachains/src/paras_inherent/tests.rs @@ -1348,6 +1348,7 @@ mod sanitizers { &keystore, &signing_context, BackingKind::Threshold, + None, ); backed }) From eb345b06868caa507b431f6ae15c676ff35e4771 Mon Sep 17 00:00:00 2001 From: alindima Date: Thu, 22 Feb 2024 12:06:28 +0200 Subject: [PATCH 46/51] fix clippy --- polkadot/node/core/backing/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/polkadot/node/core/backing/src/lib.rs b/polkadot/node/core/backing/src/lib.rs index 852026d80097..e2072b42e9cb 100644 --- a/polkadot/node/core/backing/src/lib.rs +++ b/polkadot/node/core/backing/src/lib.rs @@ -70,7 +70,7 @@ use std::{ sync::Arc, }; -use bitvec::{order::Lsb0 as BitOrderLsb0, vec::BitVec}; +use bitvec::vec::BitVec; use futures::{ channel::{mpsc, oneshot}, future::BoxFuture, From 058c0c23e810fa26657947169e48f24cf00ff662 Mon Sep 17 00:00:00 2001 From: alindima Date: Thu, 22 Feb 2024 17:50:49 +0200 Subject: [PATCH 47/51] refactor --- .../runtime/parachains/src/inclusion/mod.rs | 47 ++-- .../runtime/parachains/src/inclusion/tests.rs | 239 +++--------------- .../parachains/src/paras_inherent/mod.rs | 227 +++++++++-------- .../parachains/src/paras_inherent/tests.rs | 120 ++++++--- 4 files changed, 259 insertions(+), 374 deletions(-) diff --git a/polkadot/runtime/parachains/src/inclusion/mod.rs b/polkadot/runtime/parachains/src/inclusion/mod.rs index 2d6dac8b3b19..16e2e93b5617 100644 --- a/polkadot/runtime/parachains/src/inclusion/mod.rs +++ b/polkadot/runtime/parachains/src/inclusion/mod.rs @@ -47,10 +47,7 @@ use scale_info::TypeInfo; use sp_runtime::{traits::One, DispatchError, SaturatedConversion, Saturating}; #[cfg(feature = "std")] use sp_std::fmt; -use sp_std::{ - collections::{btree_map::BTreeMap, btree_set::BTreeSet}, - prelude::*, -}; +use sp_std::{collections::btree_set::BTreeSet, prelude::*}; pub use pallet::*; @@ -601,9 +598,7 @@ impl Pallet { /// scheduled cores. If these conditions are not met, the execution of the function fails. pub(crate) fn process_candidates( allowed_relay_parents: &AllowedRelayParentsTracker>, - candidates: Vec>, - scheduled: &BTreeMap, - scheduled_by_core: &BTreeMap, + candidates: Vec<(BackedCandidate, CoreIndex)>, group_validators: GV, core_index_enabled: bool, ) -> Result, DispatchError> @@ -612,9 +607,7 @@ impl Pallet { { let now = >::block_number(); - ensure!(candidates.len() <= scheduled_by_core.len(), Error::::UnscheduledCandidate); - - if scheduled.is_empty() { + if candidates.is_empty() { return Ok(ProcessedCandidates::default()) } @@ -650,7 +643,7 @@ impl Pallet { // // In the meantime, we do certain sanity checks on the candidates and on the scheduled // list. - for (candidate_idx, backed_candidate) in candidates.iter().enumerate() { + for (candidate_idx, (backed_candidate, core_index)) in candidates.iter().enumerate() { let relay_parent_hash = backed_candidate.descriptor().relay_parent; let para_id = backed_candidate.descriptor().para_id; @@ -681,25 +674,19 @@ impl Pallet { Ok(rpn) => rpn, }; - let (validator_indices, maybe_core_index) = + let (validator_indices, _) = backed_candidate.validator_indices_and_core_index(core_index_enabled); - let core_idx = if let Some(core_idx) = maybe_core_index { - ensure!( - scheduled_by_core.get(&core_idx) == Some(¶_id), - Error::::UnscheduledCandidate - ); - - // We assume the core index is valid because of the checks done in - // `filter_elastic_scaling_candidates`. - core_idx - } else { - *scheduled.get(¶_id).ok_or(Error::::UnscheduledCandidate)? - }; - - log::debug!(target: LOG_TARGET, "Candidate {:?} on {:?}, core_index_enabled = {}", backed_candidate.hash(), core_idx, core_index_enabled); + log::debug!( + target: LOG_TARGET, + "Candidate {:?} on {:?}, + core_index_enabled = {}", + backed_candidate.hash(), + core_index, + core_index_enabled + ); - check_assignment_in_order(core_idx)?; + check_assignment_in_order(core_index)?; let mut backers = bitvec::bitvec![u8, BitOrderLsb0; 0; validators.len()]; @@ -713,7 +700,7 @@ impl Pallet { // assigned to core at block `N + 1`. Thus, `relay_parent_number + 1` // will always land in the current session. let group_idx = >::group_assigned_to_core( - core_idx, + *core_index, relay_parent_number + One::one(), ) .ok_or_else(|| { @@ -780,7 +767,7 @@ impl Pallet { } core_indices_and_backers.push(( - (core_idx, para_id), + (*core_index, para_id), backers, group_idx, relay_parent_number, @@ -792,7 +779,7 @@ impl Pallet { // one more sweep for actually writing to storage. let core_indices = core_indices_and_backers.iter().map(|(c, ..)| *c).collect(); - for (candidate, (core, backers, group, relay_parent_number)) in + for ((candidate, _), (core, backers, group, relay_parent_number)) in candidates.into_iter().zip(core_indices_and_backers) { let para_id = candidate.descriptor().para_id; diff --git a/polkadot/runtime/parachains/src/inclusion/tests.rs b/polkadot/runtime/parachains/src/inclusion/tests.rs index 02bf5f7186b4..d2b5a67c3e45 100644 --- a/polkadot/runtime/parachains/src/inclusion/tests.rs +++ b/polkadot/runtime/parachains/src/inclusion/tests.rs @@ -925,121 +925,16 @@ fn candidate_checks() { let thread_a_assignment = (thread_a, CoreIndex::from(2)); let allowed_relay_parents = default_allowed_relay_parent_tracker(); - // unscheduled candidate. - { - let mut candidate = TestCandidateBuilder { - para_id: chain_a, - relay_parent: System::parent_hash(), - pov_hash: Hash::repeat_byte(1), - persisted_validation_data_hash: make_vdata_hash(chain_a).unwrap(), - hrmp_watermark: RELAY_PARENT_NUM, - ..Default::default() - } - .build(); - collator_sign_candidate(Sr25519Keyring::One, &mut candidate); - - let backed = back_candidate( - candidate, - &validators, - group_validators(GroupIndex::from(0)).unwrap().as_ref(), - &keystore, - &signing_context, - BackingKind::Threshold, - None, - ); - - // No scheduled cores. - assert_noop!( - ParaInclusion::process_candidates( - &allowed_relay_parents, - vec![backed.clone()], - &BTreeMap::new(), - &BTreeMap::new(), - &group_validators, - false - ), - Error::::UnscheduledCandidate - ); - - // Core scheduled for another para. - assert_noop!( - ParaInclusion::process_candidates( - &allowed_relay_parents, - vec![backed.clone()], - &[chain_b_assignment].into_iter().collect(), - &[(chain_b_assignment.1, chain_b_assignment.0)].into_iter().collect(), - &group_validators, - false - ), - Error::::UnscheduledCandidate - ); - - // another candidate for the same para, but only one core scheduled for the para. - let mut another_candidate = TestCandidateBuilder { - para_id: chain_a, - relay_parent: System::parent_hash(), - pov_hash: Hash::repeat_byte(2), - persisted_validation_data_hash: make_vdata_hash(chain_a).unwrap(), - hrmp_watermark: RELAY_PARENT_NUM, - ..Default::default() - } - .build(); - collator_sign_candidate(Sr25519Keyring::One, &mut another_candidate); - let another_backed = back_candidate( - another_candidate, - &validators, - group_validators(GroupIndex::from(0)).unwrap().as_ref(), - &keystore, - &signing_context, - BackingKind::Threshold, - None, - ); - - assert_noop!( - ParaInclusion::process_candidates( - &allowed_relay_parents, - vec![backed, another_backed], - &[chain_a_assignment].into_iter().collect(), - &[(chain_a_assignment.1, chain_a_assignment.0)].into_iter().collect(), - &group_validators, - false - ), - Error::::UnscheduledCandidate - ); - - // core scheduled for another para. ElasticScalingMVP enabled and cores supplied. - let mut candidate = TestCandidateBuilder { - para_id: chain_a, - relay_parent: System::parent_hash(), - pov_hash: Hash::repeat_byte(1), - persisted_validation_data_hash: make_vdata_hash(chain_a).unwrap(), - hrmp_watermark: RELAY_PARENT_NUM, - ..Default::default() - } - .build(); - collator_sign_candidate(Sr25519Keyring::One, &mut candidate); - - let backed = back_candidate( - candidate, - &validators, - group_validators(GroupIndex::from(0)).unwrap().as_ref(), - &keystore, - &signing_context, - BackingKind::Threshold, - Some(chain_a_assignment.1), - ); - assert_noop!( - ParaInclusion::process_candidates( - &allowed_relay_parents, - vec![backed], - &[chain_b_assignment].into_iter().collect(), - &[(chain_b_assignment.1, chain_b_assignment.0)].into_iter().collect(), - &group_validators, - true - ), - Error::::UnscheduledCandidate - ); - } + // no candidates. + assert_eq!( + ParaInclusion::process_candidates( + &allowed_relay_parents, + vec![], + &group_validators, + false + ), + Ok(ProcessedCandidates::default()) + ); // candidates out of order. { @@ -1090,14 +985,7 @@ fn candidate_checks() { assert_noop!( ParaInclusion::process_candidates( &allowed_relay_parents, - vec![backed_b, backed_a], - &[chain_a_assignment, chain_b_assignment].into_iter().collect(), - &[ - (chain_a_assignment.1, chain_a_assignment.0), - (chain_b_assignment.1, chain_b_assignment.0) - ] - .into_iter() - .collect(), + vec![(backed_b, chain_b_assignment.1), (backed_a, chain_a_assignment.1)], &group_validators, false ), @@ -1131,9 +1019,7 @@ fn candidate_checks() { assert_noop!( ParaInclusion::process_candidates( &allowed_relay_parents, - vec![backed], - &[chain_a_assignment].into_iter().collect(), - &[(chain_a_assignment.1, chain_a_assignment.0)].into_iter().collect(), + vec![(backed, chain_a_assignment.1)], &group_validators, false ), @@ -1192,14 +1078,7 @@ fn candidate_checks() { assert_noop!( ParaInclusion::process_candidates( &allowed_relay_parents, - vec![backed_b, backed_a], - &[chain_a_assignment, chain_b_assignment].into_iter().collect(), - &[ - (chain_a_assignment.1, chain_a_assignment.0), - (chain_b_assignment.1, chain_b_assignment.0) - ] - .into_iter() - .collect(), + vec![(backed_b, chain_b_assignment.1), (backed_a, chain_a_assignment.1)], &group_validators, false ), @@ -1238,9 +1117,7 @@ fn candidate_checks() { assert_noop!( ParaInclusion::process_candidates( &allowed_relay_parents, - vec![backed], - &[thread_a_assignment].into_iter().collect(), - &[(thread_a_assignment.1, thread_a_assignment.0)].into_iter().collect(), + vec![(backed, thread_a_assignment.1)], &group_validators, false ), @@ -1291,9 +1168,7 @@ fn candidate_checks() { assert_noop!( ParaInclusion::process_candidates( &allowed_relay_parents, - vec![backed], - &[chain_a_assignment].into_iter().collect(), - &[(chain_a_assignment.1, chain_a_assignment.0)].into_iter().collect(), + vec![(backed, chain_a_assignment.1)], &group_validators, false ), @@ -1334,9 +1209,7 @@ fn candidate_checks() { assert_noop!( ParaInclusion::process_candidates( &allowed_relay_parents, - vec![backed], - &[chain_a_assignment].into_iter().collect(), - &[(chain_a_assignment.1, chain_a_assignment.0)].into_iter().collect(), + vec![(backed, chain_a_assignment.1)], &group_validators, false ), @@ -1387,9 +1260,7 @@ fn candidate_checks() { assert_noop!( ParaInclusion::process_candidates( &allowed_relay_parents, - vec![backed], - &[chain_a_assignment].into_iter().collect(), - &[(chain_a_assignment.1, chain_a_assignment.0)].into_iter().collect(), + vec![(backed, chain_a_assignment.1)], &group_validators, false ), @@ -1424,9 +1295,7 @@ fn candidate_checks() { assert_eq!( ParaInclusion::process_candidates( &allowed_relay_parents, - vec![backed], - &[chain_a_assignment].into_iter().collect(), - &[(chain_a_assignment.1, chain_a_assignment.0)].into_iter().collect(), + vec![(backed, chain_a_assignment.1)], &group_validators, false ), @@ -1462,9 +1331,7 @@ fn candidate_checks() { assert_noop!( ParaInclusion::process_candidates( &allowed_relay_parents, - vec![backed], - &[chain_a_assignment].into_iter().collect(), - &[(chain_a_assignment.1, chain_a_assignment.0)].into_iter().collect(), + vec![(backed, chain_a_assignment.1)], &group_validators, false ), @@ -1500,9 +1367,7 @@ fn candidate_checks() { assert_noop!( ParaInclusion::process_candidates( &allowed_relay_parents, - vec![backed], - &[chain_a_assignment].into_iter().collect(), - &[(chain_a_assignment.1, chain_a_assignment.0)].into_iter().collect(), + vec![(backed, chain_a_assignment.1)], &group_validators, false ), @@ -1640,13 +1505,17 @@ fn backing_works() { None, ); - let backed_candidates = vec![backed_a.clone(), backed_b.clone(), backed_c]; + let backed_candidates = vec![ + (backed_a.clone(), chain_a_assignment.1), + (backed_b.clone(), chain_b_assignment.1), + (backed_c, thread_a_assignment.1), + ]; let get_backing_group_idx = { // the order defines the group implicitly for this test case let backed_candidates_with_groups = backed_candidates .iter() .enumerate() - .map(|(idx, backed_candidate)| (backed_candidate.hash(), GroupIndex(idx as _))) + .map(|(idx, (backed_candidate, _))| (backed_candidate.hash(), GroupIndex(idx as _))) .collect::>(); move |candidate_hash_x: CandidateHash| -> Option { @@ -1666,16 +1535,6 @@ fn backing_works() { } = ParaInclusion::process_candidates( &allowed_relay_parents, backed_candidates.clone(), - &[chain_a_assignment, chain_b_assignment, thread_a_assignment] - .into_iter() - .collect(), - &[ - (chain_a_assignment.1, chain_a_assignment.0), - (chain_b_assignment.1, chain_b_assignment.0), - (thread_a_assignment.1, thread_a_assignment.0), - ] - .into_iter() - .collect(), &group_validators, false, ) @@ -1696,7 +1555,7 @@ fn backing_works() { CandidateHash, (CandidateReceipt, Vec<(ValidatorIndex, ValidityAttestation)>), >::new(); - backed_candidates.into_iter().for_each(|backed_candidate| { + backed_candidates.into_iter().for_each(|(backed_candidate, _)| { let candidate_receipt_with_backers = intermediate .entry(backed_candidate.hash()) .or_insert_with(|| (backed_candidate.receipt(), Vec::new())); @@ -1869,8 +1728,6 @@ fn backing_works_with_elastic_scaling_mvp() { let allowed_relay_parents = default_allowed_relay_parent_tracker(); - let chain_a_assignment = (chain_a, CoreIndex::from(0)); - let mut candidate_a = TestCandidateBuilder { para_id: chain_a, relay_parent: System::parent_hash(), @@ -1934,13 +1791,17 @@ fn backing_works_with_elastic_scaling_mvp() { Some(CoreIndex(2)), ); - let backed_candidates = vec![backed_a.clone(), backed_b_1.clone(), backed_b_2.clone()]; + let backed_candidates = vec![ + (backed_a.clone(), CoreIndex(0)), + (backed_b_1.clone(), CoreIndex(1)), + (backed_b_2.clone(), CoreIndex(2)), + ]; let get_backing_group_idx = { // the order defines the group implicitly for this test case let backed_candidates_with_groups = backed_candidates .iter() .enumerate() - .map(|(idx, backed_candidate)| (backed_candidate.hash(), GroupIndex(idx as _))) + .map(|(idx, (backed_candidate, _))| (backed_candidate.hash(), GroupIndex(idx as _))) .collect::>(); move |candidate_hash_x: CandidateHash| -> Option { @@ -1960,14 +1821,6 @@ fn backing_works_with_elastic_scaling_mvp() { } = ParaInclusion::process_candidates( &allowed_relay_parents, backed_candidates.clone(), - &[chain_a_assignment, (chain_b, CoreIndex::from(2))].into_iter().collect(), - &[ - (chain_a_assignment.1, chain_a_assignment.0), - (CoreIndex::from(2), chain_b), - (CoreIndex::from(1), chain_b), - ] - .into_iter() - .collect(), &group_validators, true, ) @@ -1989,7 +1842,7 @@ fn backing_works_with_elastic_scaling_mvp() { CandidateHash, (CandidateReceipt, Vec<(ValidatorIndex, ValidityAttestation)>), >::new(); - backed_candidates.into_iter().for_each(|backed_candidate| { + backed_candidates.into_iter().for_each(|(backed_candidate, _)| { let candidate_receipt_with_backers = expected .entry(backed_candidate.hash()) .or_insert_with(|| (backed_candidate.receipt(), Vec::new())); @@ -2145,9 +1998,7 @@ fn can_include_candidate_with_ok_code_upgrade() { let ProcessedCandidates { core_indices: occupied_cores, .. } = ParaInclusion::process_candidates( &allowed_relay_parents, - vec![backed_a], - &[chain_a_assignment].into_iter().collect(), - &[(chain_a_assignment.1, chain_a_assignment.0)].into_iter().collect(), + vec![(backed_a, chain_a_assignment.1)], &group_validators, false, ) @@ -2357,21 +2208,15 @@ fn check_allowed_relay_parents() { None, ); - let backed_candidates = vec![backed_a, backed_b, backed_c]; + let backed_candidates = vec![ + (backed_a, chain_a_assignment.1), + (backed_b, chain_b_assignment.1), + (backed_c, thread_a_assignment.1), + ]; ParaInclusion::process_candidates( &allowed_relay_parents, backed_candidates.clone(), - &[chain_a_assignment, chain_b_assignment, thread_a_assignment] - .into_iter() - .collect(), - &[ - (chain_a_assignment.1, chain_a_assignment.0), - (chain_b_assignment.1, chain_b_assignment.0), - (thread_a_assignment.1, thread_a_assignment.0), - ] - .into_iter() - .collect(), &group_validators, false, ) @@ -2608,9 +2453,7 @@ fn para_upgrade_delay_scheduled_from_inclusion() { let ProcessedCandidates { core_indices: occupied_cores, .. } = ParaInclusion::process_candidates( &allowed_relay_parents, - vec![backed_a], - &[chain_a_assignment].into_iter().collect(), - &[(chain_a_assignment.1, chain_a_assignment.0)].into_iter().collect(), + vec![(backed_a, chain_a_assignment.1)], &group_validators, false, ) diff --git a/polkadot/runtime/parachains/src/paras_inherent/mod.rs b/polkadot/runtime/parachains/src/paras_inherent/mod.rs index 335002692fa9..80e8b38004c6 100644 --- a/polkadot/runtime/parachains/src/paras_inherent/mod.rs +++ b/polkadot/runtime/parachains/src/paras_inherent/mod.rs @@ -588,9 +588,6 @@ impl Pallet { let freed = collect_all_freed_cores::(freed_concluded.iter().cloned()); >::free_cores_and_fill_claimqueue(freed, now); - let scheduled = >::scheduled_paras() - .map(|(core_idx, para_id)| (para_id, core_idx)) - .collect(); METRICS.on_candidates_processed_total(backed_candidates.len() as u64); @@ -600,35 +597,27 @@ impl Pallet { .map(|b| *b) .unwrap_or(false); - let dropped_elastic_scaling_candidates = filter_elastic_scaling_candidates::( - &allowed_relay_parents, - core_index_enabled, - &mut backed_candidates, - ); - // In `Enter` context (invoked during execution) we shouldn't have filtered any candidates - // due to a para having multiple cores assigned and no injected core index. They have been - // filtered during inherent data preparation (`ProvideInherent` context). Abort in such - // cases. - if context == ProcessInherentDataContext::Enter { - ensure!( - !dropped_elastic_scaling_candidates, - Error::::BackedByElasticScalingWithNoCoreIndex - ); + let mut scheduled: BTreeMap> = BTreeMap::new(); + for (core_idx, para_id) in >::scheduled_paras() { + scheduled.entry(para_id).or_default().insert(core_idx); } - let SanitizedBackedCandidates { backed_candidates, votes_from_disabled_were_dropped } = - sanitize_backed_candidates::( - backed_candidates, - &allowed_relay_parents, - |candidate_idx: usize, - backed_candidate: &BackedCandidate<::Hash>| - -> bool { - let para_id = backed_candidate.descriptor().para_id; - let prev_context = >::para_most_recent_context(para_id); - let check_ctx = CandidateCheckContext::::new(prev_context); - - // never include a concluded-invalid candidate - current_concluded_invalid_disputes.contains(&backed_candidate.hash()) || + let SanitizedBackedCandidates { + backed_candidates_with_core, + votes_from_disabled_were_dropped, + dropped_elastic_scaling_candidates, + } = sanitize_backed_candidates::( + backed_candidates, + &allowed_relay_parents, + |candidate_idx: usize, + backed_candidate: &BackedCandidate<::Hash>| + -> bool { + let para_id = backed_candidate.descriptor().para_id; + let prev_context = >::para_most_recent_context(para_id); + let check_ctx = CandidateCheckContext::::new(prev_context); + + // never include a concluded-invalid candidate + current_concluded_invalid_disputes.contains(&backed_candidate.hash()) || // Instead of checking the candidates with code upgrades twice // move the checking up here and skip it in the training wheels fallback. // That way we avoid possible duplicate checks while assuring all @@ -638,12 +627,12 @@ impl Pallet { check_ctx .verify_backed_candidate(&allowed_relay_parents, candidate_idx, backed_candidate.candidate()) .is_err() - }, - &scheduled, - core_index_enabled, - ); + }, + &scheduled, + core_index_enabled, + ); - METRICS.on_candidates_sanitized(backed_candidates.len() as u64); + METRICS.on_candidates_sanitized(backed_candidates_with_core.len() as u64); // In `Enter` context (invoked during execution) there should be no backing votes from // disabled validators because they should have been filtered out during inherent data @@ -651,7 +640,17 @@ impl Pallet { if context == ProcessInherentDataContext::Enter { ensure!(!votes_from_disabled_were_dropped, Error::::BackedByDisabled); } - let scheduled_by_core = >::scheduled_paras().collect(); + + // In `Enter` context (invoked during execution) we shouldn't have filtered any candidates + // due to a para having multiple cores assigned and no injected core index. They have been + // filtered during inherent data preparation (`ProvideInherent` context). Abort in such + // cases. + if context == ProcessInherentDataContext::Enter { + ensure!( + !dropped_elastic_scaling_candidates, + Error::::BackedByElasticScalingWithNoCoreIndex + ); + } // Process backed candidates according to scheduled cores. let inclusion::ProcessedCandidates::< as HeaderT>::Hash> { @@ -659,9 +658,7 @@ impl Pallet { candidate_receipt_with_backing_validator_indices, } = >::process_candidates( &allowed_relay_parents, - backed_candidates.clone(), - &scheduled, - &scheduled_by_core, + backed_candidates_with_core.clone(), >::group_validators, core_index_enabled, )?; @@ -680,8 +677,15 @@ impl Pallet { let bitfields = bitfields.into_iter().map(|v| v.into_unchecked()).collect(); - let processed = - ParachainsInherentData { bitfields, backed_candidates, disputes, parent_header }; + let processed = ParachainsInherentData { + bitfields, + backed_candidates: backed_candidates_with_core + .into_iter() + .map(|(candidate, _)| candidate) + .collect(), + disputes, + parent_header, + }; Ok((processed, Some(all_weight_after).into())) } } @@ -946,9 +950,12 @@ pub(crate) fn sanitize_bitfields( #[derive(Debug, PartialEq)] struct SanitizedBackedCandidates { // Sanitized backed candidates. The `Vec` is sorted according to the occupied core index. - backed_candidates: Vec>, + backed_candidates_with_core: Vec<(BackedCandidate, CoreIndex)>, // Set to true if any votes from disabled validators were dropped from the input. votes_from_disabled_were_dropped: bool, + // Set to true if any candidates were dropped due to filtering done in + // `map_candidates_to_cores` + dropped_elastic_scaling_candidates: bool, } /// Filter out: @@ -973,7 +980,7 @@ fn sanitize_backed_candidates< mut backed_candidates: Vec>, allowed_relay_parents: &AllowedRelayParentsTracker>, mut candidate_has_concluded_invalid_dispute_or_is_invalid: F, - scheduled: &BTreeMap, + scheduled: &BTreeMap>, core_index_enabled: bool, ) -> SanitizedBackedCandidates { // Remove any candidates that were concluded invalid. @@ -982,22 +989,19 @@ fn sanitize_backed_candidates< !candidate_has_concluded_invalid_dispute_or_is_invalid(candidate_idx, backed_candidate) }); - // Assure the backed candidate's `ParaId`'s core is free. - // This holds under the assumption that `Scheduler::schedule` is called _before_. - // We don't check the relay-parent because this is done in the closure when - // constructing the inherent and during actual processing otherwise. - - backed_candidates.retain(|backed_candidate| { - let desc = backed_candidate.descriptor(); - - scheduled.get(&desc.para_id).is_some() - }); + // Map candidates to scheduled cores. + let (mut backed_candidates_with_core, dropped_elastic_scaling_candidates) = + map_candidates_to_cores::( + &allowed_relay_parents, + scheduled, + core_index_enabled, + backed_candidates, + ); // Filter out backing statements from disabled validators - let dropped_disabled = filter_backed_statements_from_disabled_validators::( - &mut backed_candidates, + let votes_from_disabled_were_dropped = filter_backed_statements_from_disabled_validators::( + &mut backed_candidates_with_core, &allowed_relay_parents, - scheduled, core_index_enabled, ); @@ -1006,14 +1010,12 @@ fn sanitize_backed_candidates< // but more importantly are scheduled for a free core. // This both avoids extra work for obviously invalid candidates, // but also allows this to be done in place. - backed_candidates.sort_by(|x, y| { - // Never panics, since we filtered all panic arguments out in the previous `fn retain`. - scheduled[&x.descriptor().para_id].cmp(&scheduled[&y.descriptor().para_id]) - }); + backed_candidates_with_core.sort_by(|(_x, core_x), (_y, core_y)| core_x.cmp(&core_y)); SanitizedBackedCandidates { - backed_candidates, - votes_from_disabled_were_dropped: dropped_disabled, + dropped_elastic_scaling_candidates, + votes_from_disabled_were_dropped, + backed_candidates_with_core, } } @@ -1102,9 +1104,11 @@ fn limit_and_sanitize_disputes< // few more sanity checks. Returns `true` if at least one statement is removed and `false` // otherwise. fn filter_backed_statements_from_disabled_validators( - backed_candidates: &mut Vec::Hash>>, + backed_candidates_with_core: &mut Vec<( + BackedCandidate<::Hash>, + CoreIndex, + )>, allowed_relay_parents: &AllowedRelayParentsTracker>, - scheduled: &BTreeMap, core_index_enabled: bool, ) -> bool { let disabled_validators = @@ -1115,7 +1119,7 @@ fn filter_backed_statements_from_disabled_validators::from(validator_indices); - let core_idx = if let Some(core_idx) = maybe_core_index { - core_idx - } else { - // Get `core_idx` assigned to the `para_id` of the candidate - match scheduled.get(&bc.descriptor().para_id) { - Some(core_idx) => *core_idx, - None => { - log::debug!(target: LOG_TARGET, "Can't get core idx of a backed candidate for para id {:?}. Dropping the candidate.", bc.descriptor().para_id); - return false - } - } - }; - // Get relay parent block number of the candidate. We need this to get the group index assigned to this core at this block number let relay_parent_block_number = match allowed_relay_parents .acquire_info(bc.descriptor().relay_parent, None) { @@ -1155,7 +1146,7 @@ fn filter_backed_statements_from_disabled_validators>::group_assigned_to_core( - core_idx, + *core_idx, relay_parent_block_number + One::one(), ) { Some(group_idx) => group_idx, @@ -1207,50 +1198,64 @@ fn filter_backed_statements_from_disabled_validators( +/// Map candidates to scheduled cores. +/// If the para only has one scheduled core and no `CoreIndex` is injected, map the candidate to the +/// single core. If the para has multiple cores scheduled, only map the candidates which have a +/// proper core injected. Filter out the rest. +fn map_candidates_to_cores( allowed_relay_parents: &AllowedRelayParentsTracker>, + scheduled: &BTreeMap>, core_index_enabled: bool, - candidates: &mut Vec>, -) -> bool { - // Count how many scheduled cores each paraid has. - let mut cores_per_parachain: BTreeMap = BTreeMap::new(); + candidates: Vec>, +) -> (Vec<(BackedCandidate, CoreIndex)>, bool) { + let mut dropped_elastic_scaling_candidates = false; + let mut backed_candidates_with_core = Vec::with_capacity(candidates.len()); - for (_, para_id) in >::scheduled_paras() { - *cores_per_parachain.entry(para_id).or_default() += 1; - } - - let prev_count = candidates.len(); // We keep a candidate if the parachain has only one core assigned or if - // a core index is provided by block author. - candidates.retain(|candidate| { - *cores_per_parachain.get(&candidate.descriptor().para_id).unwrap_or(&0) <= 1 || - has_core_index::(allowed_relay_parents, candidate, core_index_enabled) - }); + // a core index is provided by block author and it's indeed scheduled. + for backed_candidate in candidates { + let maybe_injected_core_index = get_injected_core_index::( + allowed_relay_parents, + &backed_candidate, + core_index_enabled, + ); + + let scheduled_cores = scheduled.get(&backed_candidate.descriptor().para_id); + if let Some(scheduled_cores) = scheduled_cores { + if let Some(core_idx) = maybe_injected_core_index { + if scheduled_cores.contains(&core_idx) { + backed_candidates_with_core.push((backed_candidate, core_idx)); + } + } else if scheduled_cores.len() == 1 { + backed_candidates_with_core.push(( + backed_candidate, + scheduled_cores.first().copied().expect("Length is 1"), + )); + } else { + dropped_elastic_scaling_candidates = true; + } + } + } - prev_count != candidates.len() + (backed_candidates_with_core, dropped_elastic_scaling_candidates) } // Returns `true` if the candidate contains a valid injected `CoreIndex`. -fn has_core_index( +fn get_injected_core_index( allowed_relay_parents: &AllowedRelayParentsTracker>, candidate: &BackedCandidate, core_index_enabled: bool, -) -> bool { +) -> Option { // After stripping the 8 bit extensions, the `validator_indices` field length is expected // to be equal to backing group size. If these don't match, the `CoreIndex` is badly encoded, // or not supported. let (validator_indices, maybe_core_idx) = candidate.validator_indices_and_core_index(core_index_enabled); - let Some(core_idx) = maybe_core_idx else { return false }; + let Some(core_idx) = maybe_core_idx else { return None }; let relay_parent_block_number = match allowed_relay_parents.acquire_info(candidate.descriptor().relay_parent, None) { @@ -1262,7 +1267,7 @@ fn has_core_index>::group_validators(group_idx) { Some(validators) => validators, - None => return false, + None => return None, }; - group_validators.len() == validator_indices.len() + if group_validators.len() == validator_indices.len() { + Some(core_idx) + } else { + None + } } diff --git a/polkadot/runtime/parachains/src/paras_inherent/tests.rs b/polkadot/runtime/parachains/src/paras_inherent/tests.rs index 9c9308db8844..8c26119b35db 100644 --- a/polkadot/runtime/parachains/src/paras_inherent/tests.rs +++ b/polkadot/runtime/parachains/src/paras_inherent/tests.rs @@ -1238,7 +1238,8 @@ mod sanitizers { // Backed candidates and scheduled parachains used for `sanitize_backed_candidates` testing struct TestData { backed_candidates: Vec, - scheduled_paras: BTreeMap, + all_backed_candidates_with_core: Vec<(BackedCandidate, CoreIndex)>, + scheduled_paras: BTreeMap>, } // Generate test data for the candidates and assert that the evnironment is set as expected @@ -1285,9 +1286,14 @@ mod sanitizers { shared::Pallet::::set_active_validators_ascending(validator_ids); // Two scheduled parachains - ParaId(1) on CoreIndex(0) and ParaId(2) on CoreIndex(1) - let scheduled = (0_usize..2) + let scheduled: BTreeMap> = (0_usize..2) .into_iter() - .map(|idx| (ParaId::from(1_u32 + idx as u32), CoreIndex::from(idx as u32))) + .map(|idx| { + ( + ParaId::from(1_u32 + idx as u32), + [CoreIndex::from(idx as u32)].into_iter().collect(), + ) + }) .collect::>(); // Set the validator groups in `scheduler` @@ -1370,13 +1376,37 @@ mod sanitizers { ] ); - TestData { backed_candidates, scheduled_paras: scheduled } + let all_backed_candidates_with_core = backed_candidates + .iter() + .map(|candidate| { + // Only one entry for this test data. + ( + candidate.clone(), + scheduled + .get(&candidate.descriptor().para_id) + .unwrap() + .first() + .copied() + .unwrap(), + ) + }) + .collect(); + + TestData { + backed_candidates, + scheduled_paras: scheduled, + all_backed_candidates_with_core, + } } #[test] fn happy_path() { new_test_ext(MockGenesisConfig::default()).execute_with(|| { - let TestData { backed_candidates, scheduled_paras: scheduled } = get_test_data(); + let TestData { + backed_candidates, + all_backed_candidates_with_core, + scheduled_paras: scheduled, + } = get_test_data(); let has_concluded_invalid = |_idx: usize, _backed_candidate: &BackedCandidate| -> bool { false }; @@ -1390,12 +1420,11 @@ mod sanitizers { false ), SanitizedBackedCandidates { - backed_candidates, - votes_from_disabled_were_dropped: false + backed_candidates_with_core: all_backed_candidates_with_core, + votes_from_disabled_were_dropped: false, + dropped_elastic_scaling_candidates: false } ); - - {} }); } @@ -1403,14 +1432,15 @@ mod sanitizers { #[test] fn nothing_scheduled() { new_test_ext(MockGenesisConfig::default()).execute_with(|| { - let TestData { backed_candidates, scheduled_paras: _ } = get_test_data(); + let TestData { backed_candidates, .. } = get_test_data(); let scheduled = &BTreeMap::new(); let has_concluded_invalid = |_idx: usize, _backed_candidate: &BackedCandidate| -> bool { false }; let SanitizedBackedCandidates { - backed_candidates: sanitized_backed_candidates, + backed_candidates_with_core: sanitized_backed_candidates, votes_from_disabled_were_dropped, + dropped_elastic_scaling_candidates, } = sanitize_backed_candidates::( backed_candidates.clone(), &>::allowed_relay_parents(), @@ -1421,6 +1451,7 @@ mod sanitizers { assert!(sanitized_backed_candidates.is_empty()); assert!(!votes_from_disabled_were_dropped); + assert!(!dropped_elastic_scaling_candidates); }); } @@ -1428,7 +1459,8 @@ mod sanitizers { #[test] fn invalid_are_filtered_out() { new_test_ext(MockGenesisConfig::default()).execute_with(|| { - let TestData { backed_candidates, scheduled_paras: scheduled } = get_test_data(); + let TestData { backed_candidates, scheduled_paras: scheduled, .. } = + get_test_data(); // mark every second one as concluded invalid let set = { @@ -1443,8 +1475,9 @@ mod sanitizers { let has_concluded_invalid = |_idx: usize, candidate: &BackedCandidate| set.contains(&candidate.hash()); let SanitizedBackedCandidates { - backed_candidates: sanitized_backed_candidates, + backed_candidates_with_core: sanitized_backed_candidates, votes_from_disabled_were_dropped, + dropped_elastic_scaling_candidates, } = sanitize_backed_candidates::( backed_candidates.clone(), &>::allowed_relay_parents(), @@ -1455,35 +1488,35 @@ mod sanitizers { assert_eq!(sanitized_backed_candidates.len(), backed_candidates.len() / 2); assert!(!votes_from_disabled_were_dropped); + assert!(!dropped_elastic_scaling_candidates); }); } #[test] fn disabled_non_signing_validator_doesnt_get_filtered() { new_test_ext(MockGenesisConfig::default()).execute_with(|| { - let TestData { mut backed_candidates, scheduled_paras } = get_test_data(); + let TestData { mut all_backed_candidates_with_core, .. } = get_test_data(); // Disable Eve set_disabled_validators(vec![4]); - let before = backed_candidates.clone(); + let before = all_backed_candidates_with_core.clone(); // Eve is disabled but no backing statement is signed by it so nothing should be // filtered assert!(!filter_backed_statements_from_disabled_validators::( - &mut backed_candidates, + &mut all_backed_candidates_with_core, &>::allowed_relay_parents(), - &scheduled_paras, false )); - assert_eq!(backed_candidates, before); + assert_eq!(all_backed_candidates_with_core, before); }); } #[test] fn drop_statements_from_disabled_without_dropping_candidate() { new_test_ext(MockGenesisConfig::default()).execute_with(|| { - let TestData { mut backed_candidates, scheduled_paras } = get_test_data(); + let TestData { mut all_backed_candidates_with_core, .. } = get_test_data(); // Disable Alice set_disabled_validators(vec![0]); @@ -1496,61 +1529,74 @@ mod sanitizers { configuration::Pallet::::force_set_active_config(hc); // Verify the initial state is as expected - assert_eq!(backed_candidates.get(0).unwrap().validity_votes().len(), 2); - let (validator_indices, None) = - backed_candidates.get(0).unwrap().validator_indices_and_core_index(false) + assert_eq!( + all_backed_candidates_with_core.get(0).unwrap().0.validity_votes().len(), + 2 + ); + let (validator_indices, None) = all_backed_candidates_with_core + .get(0) + .unwrap() + .0 + .validator_indices_and_core_index(false) else { panic!("Expected no injected core index") }; assert_eq!(validator_indices.get(0).unwrap(), true); assert_eq!(validator_indices.get(1).unwrap(), true); - let untouched = backed_candidates.get(1).unwrap().clone(); + let untouched = all_backed_candidates_with_core.get(1).unwrap().0.clone(); assert!(filter_backed_statements_from_disabled_validators::( - &mut backed_candidates, + &mut all_backed_candidates_with_core, &>::allowed_relay_parents(), - &scheduled_paras, false )); - let (validator_indices, None) = - backed_candidates.get(0).unwrap().validator_indices_and_core_index(false) + let (validator_indices, None) = all_backed_candidates_with_core + .get(0) + .unwrap() + .0 + .validator_indices_and_core_index(false) else { panic!("Expected no injected core index") }; // there should still be two backed candidates - assert_eq!(backed_candidates.len(), 2); + assert_eq!(all_backed_candidates_with_core.len(), 2); // but the first one should have only one validity vote - assert_eq!(backed_candidates.get(0).unwrap().validity_votes().len(), 1); + assert_eq!( + all_backed_candidates_with_core.get(0).unwrap().0.validity_votes().len(), + 1 + ); // Validator 0 vote should be dropped, validator 1 - retained assert_eq!(validator_indices.get(0).unwrap(), false); assert_eq!(validator_indices.get(1).unwrap(), true); // the second candidate shouldn't be modified - assert_eq!(*backed_candidates.get(1).unwrap(), untouched); + assert_eq!(all_backed_candidates_with_core.get(1).unwrap().0, untouched); }); } #[test] fn drop_candidate_if_all_statements_are_from_disabled() { new_test_ext(MockGenesisConfig::default()).execute_with(|| { - let TestData { mut backed_candidates, scheduled_paras } = get_test_data(); + let TestData { mut all_backed_candidates_with_core, .. } = get_test_data(); // Disable Alice and Bob set_disabled_validators(vec![0, 1]); // Verify the initial state is as expected - assert_eq!(backed_candidates.get(0).unwrap().validity_votes().len(), 2); - let untouched = backed_candidates.get(1).unwrap().clone(); + assert_eq!( + all_backed_candidates_with_core.get(0).unwrap().0.validity_votes().len(), + 2 + ); + let untouched = all_backed_candidates_with_core.get(1).unwrap().0.clone(); assert!(filter_backed_statements_from_disabled_validators::( - &mut backed_candidates, + &mut all_backed_candidates_with_core, &>::allowed_relay_parents(), - &scheduled_paras, false )); - assert_eq!(backed_candidates.len(), 1); - assert_eq!(*backed_candidates.get(0).unwrap(), untouched); + assert_eq!(all_backed_candidates_with_core.len(), 1); + assert_eq!(all_backed_candidates_with_core.get(0).unwrap().0, untouched); }); } } From d4c58bd3baca40687bf3f686850445e1cb320aa5 Mon Sep 17 00:00:00 2001 From: alindima Date: Fri, 23 Feb 2024 12:23:32 +0200 Subject: [PATCH 48/51] fix some bugs and add more unit tests --- Cargo.lock | 1 + polkadot/runtime/parachains/Cargo.toml | 1 + .../parachains/src/paras_inherent/mod.rs | 27 +- .../parachains/src/paras_inherent/tests.rs | 485 ++++++++++++++++-- 4 files changed, 461 insertions(+), 53 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 053778c3fdcf..7870081a63fe 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -13328,6 +13328,7 @@ dependencies = [ "polkadot-runtime-metrics", "rand", "rand_chacha 0.3.1", + "rstest", "rustc-hex", "sc-keystore", "scale-info", diff --git a/polkadot/runtime/parachains/Cargo.toml b/polkadot/runtime/parachains/Cargo.toml index 311a62b6c917..610401454763 100644 --- a/polkadot/runtime/parachains/Cargo.toml +++ b/polkadot/runtime/parachains/Cargo.toml @@ -69,6 +69,7 @@ sp-tracing = { path = "../../../substrate/primitives/tracing" } sp-crypto-hashing = { path = "../../../substrate/primitives/crypto/hashing" } thousands = "0.2.0" assert_matches = "1" +rstest = "0.18.2" serde_json = { workspace = true, default-features = true } [features] diff --git a/polkadot/runtime/parachains/src/paras_inherent/mod.rs b/polkadot/runtime/parachains/src/paras_inherent/mod.rs index 80e8b38004c6..3856135e0c08 100644 --- a/polkadot/runtime/parachains/src/paras_inherent/mod.rs +++ b/polkadot/runtime/parachains/src/paras_inherent/mod.rs @@ -148,6 +148,8 @@ pub mod pallet { /// A candidate was backed even though the paraid had multiple cores assigned and no /// injected core index. BackedByElasticScalingWithNoCoreIndex, + /// Too many candidates supplied. + UnscheduledCandidate, } /// Whether the paras inherent was included within this block. @@ -598,7 +600,10 @@ impl Pallet { .unwrap_or(false); let mut scheduled: BTreeMap> = BTreeMap::new(); + let mut total_scheduled_cores = 0; + for (core_idx, para_id) in >::scheduled_paras() { + total_scheduled_cores += 1; scheduled.entry(para_id).or_default().insert(core_idx); } @@ -628,10 +633,15 @@ impl Pallet { .verify_backed_candidate(&allowed_relay_parents, candidate_idx, backed_candidate.candidate()) .is_err() }, - &scheduled, + scheduled, core_index_enabled, ); + ensure!( + backed_candidates_with_core.len() <= total_scheduled_cores, + Error::::UnscheduledCandidate + ); + METRICS.on_candidates_sanitized(backed_candidates_with_core.len() as u64); // In `Enter` context (invoked during execution) there should be no backing votes from @@ -980,7 +990,7 @@ fn sanitize_backed_candidates< mut backed_candidates: Vec>, allowed_relay_parents: &AllowedRelayParentsTracker>, mut candidate_has_concluded_invalid_dispute_or_is_invalid: F, - scheduled: &BTreeMap>, + scheduled: BTreeMap>, core_index_enabled: bool, ) -> SanitizedBackedCandidates { // Remove any candidates that were concluded invalid. @@ -989,7 +999,7 @@ fn sanitize_backed_candidates< !candidate_has_concluded_invalid_dispute_or_is_invalid(candidate_idx, backed_candidate) }); - // Map candidates to scheduled cores. + // Map candidates to scheduled cores. Filter out any unscheduled candidates. let (mut backed_candidates_with_core, dropped_elastic_scaling_candidates) = map_candidates_to_cores::( &allowed_relay_parents, @@ -1207,7 +1217,7 @@ fn filter_backed_statements_from_disabled_validators( allowed_relay_parents: &AllowedRelayParentsTracker>, - scheduled: &BTreeMap>, + mut scheduled: BTreeMap>, core_index_enabled: bool, candidates: Vec>, ) -> (Vec<(BackedCandidate, CoreIndex)>, bool) { @@ -1223,17 +1233,16 @@ fn map_candidates_to_cores TestData { + fn get_test_data(core_index_enabled: bool) -> TestData { const RELAY_PARENT_NUM: u32 = 3; // Add the relay parent to `shared` pallet. Otherwise some code (e.g. filtering backing @@ -1307,7 +1308,7 @@ mod sanitizers { ( CoreIndex::from(0), VecDeque::from([ParasEntry::new( - Assignment::Pool { para_id: 1.into(), core_index: CoreIndex(1) }, + Assignment::Pool { para_id: 1.into(), core_index: CoreIndex(0) }, RELAY_PARENT_NUM, )]), ), @@ -1325,12 +1326,12 @@ mod sanitizers { match group_index { group_index if group_index == GroupIndex::from(0) => Some(vec![0, 1]), group_index if group_index == GroupIndex::from(1) => Some(vec![2, 3]), - _ => panic!("Group index out of bounds for 2 parachains and 1 parathread core"), + _ => panic!("Group index out of bounds"), } .map(|m| m.into_iter().map(ValidatorIndex).collect::>()) }; - // Two backed candidates from each parachain + // One backed candidate from each parachain let backed_candidates = (0_usize..2) .into_iter() .map(|idx0| { @@ -1354,7 +1355,7 @@ mod sanitizers { &keystore, &signing_context, BackingKind::Threshold, - None, + core_index_enabled.then_some(CoreIndex(idx0 as u32)), ); backed }) @@ -1399,14 +1400,350 @@ mod sanitizers { } } - #[test] - fn happy_path() { + // Generate test data for the candidates and assert that the evnironment is set as expected + // (check the comments for details) + // Para 1 scheduled on core 0 and core 1. Two candidates are supplied. + // Para 2 scheduled on cores 2 and 3. One candidate supplied. + // Para 3 scheduled on core 4. One candidate supplied. + // Para 4 scheduled on core 5. Two candidates supplied. + // Para 5 scheduled on core 6. No candidates supplied. + fn get_test_data_multiple_cores_per_para(core_index_enabled: bool) -> TestData { + const RELAY_PARENT_NUM: u32 = 3; + + // Add the relay parent to `shared` pallet. Otherwise some code (e.g. filtering backing + // votes) won't behave correctly + shared::Pallet::::add_allowed_relay_parent( + default_header().hash(), + Default::default(), + RELAY_PARENT_NUM, + 1, + ); + + let header = default_header(); + let relay_parent = header.hash(); + let session_index = SessionIndex::from(0_u32); + + let keystore = LocalKeystore::in_memory(); + let keystore = Arc::new(keystore) as KeystorePtr; + let signing_context = SigningContext { parent_hash: relay_parent, session_index }; + + let validators = vec![ + keyring::Sr25519Keyring::Alice, + keyring::Sr25519Keyring::Bob, + keyring::Sr25519Keyring::Charlie, + keyring::Sr25519Keyring::Dave, + keyring::Sr25519Keyring::Eve, + keyring::Sr25519Keyring::Ferdie, + keyring::Sr25519Keyring::One, + ]; + for validator in validators.iter() { + Keystore::sr25519_generate_new( + &*keystore, + PARACHAIN_KEY_TYPE_ID, + Some(&validator.to_seed()), + ) + .unwrap(); + } + + // Set active validators in `shared` pallet + let validator_ids = + validators.iter().map(|v| v.public().into()).collect::>(); + shared::Pallet::::set_active_validators_ascending(validator_ids); + + // Set the validator groups in `scheduler` + scheduler::Pallet::::set_validator_groups(vec![ + vec![ValidatorIndex(0)], + vec![ValidatorIndex(1)], + vec![ValidatorIndex(2)], + vec![ValidatorIndex(3)], + vec![ValidatorIndex(4)], + vec![ValidatorIndex(5)], + vec![ValidatorIndex(6)], + ]); + + // Update scheduler's claimqueue with the parachains + scheduler::Pallet::::set_claimqueue(BTreeMap::from([ + ( + CoreIndex::from(0), + VecDeque::from([ParasEntry::new( + Assignment::Pool { para_id: 1.into(), core_index: CoreIndex(0) }, + RELAY_PARENT_NUM, + )]), + ), + ( + CoreIndex::from(1), + VecDeque::from([ParasEntry::new( + Assignment::Pool { para_id: 1.into(), core_index: CoreIndex(1) }, + RELAY_PARENT_NUM, + )]), + ), + ( + CoreIndex::from(2), + VecDeque::from([ParasEntry::new( + Assignment::Pool { para_id: 2.into(), core_index: CoreIndex(2) }, + RELAY_PARENT_NUM, + )]), + ), + ( + CoreIndex::from(3), + VecDeque::from([ParasEntry::new( + Assignment::Pool { para_id: 2.into(), core_index: CoreIndex(3) }, + RELAY_PARENT_NUM, + )]), + ), + ( + CoreIndex::from(4), + VecDeque::from([ParasEntry::new( + Assignment::Pool { para_id: 3.into(), core_index: CoreIndex(4) }, + RELAY_PARENT_NUM, + )]), + ), + ( + CoreIndex::from(5), + VecDeque::from([ParasEntry::new( + Assignment::Pool { para_id: 4.into(), core_index: CoreIndex(5) }, + RELAY_PARENT_NUM, + )]), + ), + ( + CoreIndex::from(6), + VecDeque::from([ParasEntry::new( + Assignment::Pool { para_id: 5.into(), core_index: CoreIndex(6) }, + RELAY_PARENT_NUM, + )]), + ), + ])); + + // Callback used for backing candidates + let group_validators = |group_index: GroupIndex| { + match group_index { + group_index if group_index == GroupIndex::from(0) => Some(vec![0]), + group_index if group_index == GroupIndex::from(1) => Some(vec![1]), + group_index if group_index == GroupIndex::from(2) => Some(vec![2]), + group_index if group_index == GroupIndex::from(3) => Some(vec![3]), + group_index if group_index == GroupIndex::from(4) => Some(vec![4]), + group_index if group_index == GroupIndex::from(5) => Some(vec![5]), + group_index if group_index == GroupIndex::from(6) => Some(vec![6]), + + _ => panic!("Group index out of bounds"), + } + .map(|m| m.into_iter().map(ValidatorIndex).collect::>()) + }; + + let mut backed_candidates = vec![]; + let mut all_backed_candidates_with_core = vec![]; + + // Para 1 + { + let mut candidate = TestCandidateBuilder { + para_id: ParaId::from(1), + relay_parent, + pov_hash: Hash::repeat_byte(1 as u8), + persisted_validation_data_hash: [42u8; 32].into(), + hrmp_watermark: RELAY_PARENT_NUM, + ..Default::default() + } + .build(); + + collator_sign_candidate(Sr25519Keyring::One, &mut candidate); + + let backed: BackedCandidate = back_candidate( + candidate, + &validators, + group_validators(GroupIndex::from(0 as u32)).unwrap().as_ref(), + &keystore, + &signing_context, + BackingKind::Threshold, + core_index_enabled.then_some(CoreIndex(0 as u32)), + ); + backed_candidates.push(backed.clone()); + if core_index_enabled { + all_backed_candidates_with_core.push((backed, CoreIndex(0))); + } + + let mut candidate = TestCandidateBuilder { + para_id: ParaId::from(1), + relay_parent, + pov_hash: Hash::repeat_byte(2 as u8), + persisted_validation_data_hash: [42u8; 32].into(), + hrmp_watermark: RELAY_PARENT_NUM, + ..Default::default() + } + .build(); + + collator_sign_candidate(Sr25519Keyring::One, &mut candidate); + + let backed = back_candidate( + candidate, + &validators, + group_validators(GroupIndex::from(1 as u32)).unwrap().as_ref(), + &keystore, + &signing_context, + BackingKind::Threshold, + core_index_enabled.then_some(CoreIndex(1 as u32)), + ); + backed_candidates.push(backed.clone()); + if core_index_enabled { + all_backed_candidates_with_core.push((backed, CoreIndex(1))); + } + } + + // Para 2 + { + let mut candidate = TestCandidateBuilder { + para_id: ParaId::from(2), + relay_parent, + pov_hash: Hash::repeat_byte(3 as u8), + persisted_validation_data_hash: [42u8; 32].into(), + hrmp_watermark: RELAY_PARENT_NUM, + ..Default::default() + } + .build(); + + collator_sign_candidate(Sr25519Keyring::One, &mut candidate); + + let backed = back_candidate( + candidate, + &validators, + group_validators(GroupIndex::from(2 as u32)).unwrap().as_ref(), + &keystore, + &signing_context, + BackingKind::Threshold, + core_index_enabled.then_some(CoreIndex(2 as u32)), + ); + backed_candidates.push(backed.clone()); + if core_index_enabled { + all_backed_candidates_with_core.push((backed, CoreIndex(2))); + } + } + + // Para 3 + { + let mut candidate = TestCandidateBuilder { + para_id: ParaId::from(3), + relay_parent, + pov_hash: Hash::repeat_byte(4 as u8), + persisted_validation_data_hash: [42u8; 32].into(), + hrmp_watermark: RELAY_PARENT_NUM, + ..Default::default() + } + .build(); + + collator_sign_candidate(Sr25519Keyring::One, &mut candidate); + + let backed = back_candidate( + candidate, + &validators, + group_validators(GroupIndex::from(4 as u32)).unwrap().as_ref(), + &keystore, + &signing_context, + BackingKind::Threshold, + core_index_enabled.then_some(CoreIndex(4 as u32)), + ); + backed_candidates.push(backed.clone()); + all_backed_candidates_with_core.push((backed, CoreIndex(4))); + } + + // Para 4 + { + let mut candidate = TestCandidateBuilder { + para_id: ParaId::from(4), + relay_parent, + pov_hash: Hash::repeat_byte(5 as u8), + persisted_validation_data_hash: [42u8; 32].into(), + hrmp_watermark: RELAY_PARENT_NUM, + ..Default::default() + } + .build(); + + collator_sign_candidate(Sr25519Keyring::One, &mut candidate); + + let backed = back_candidate( + candidate, + &validators, + group_validators(GroupIndex::from(5 as u32)).unwrap().as_ref(), + &keystore, + &signing_context, + BackingKind::Threshold, + None, + ); + backed_candidates.push(backed.clone()); + all_backed_candidates_with_core.push((backed, CoreIndex(5))); + + let mut candidate = TestCandidateBuilder { + para_id: ParaId::from(4), + relay_parent, + pov_hash: Hash::repeat_byte(6 as u8), + persisted_validation_data_hash: [42u8; 32].into(), + hrmp_watermark: RELAY_PARENT_NUM, + ..Default::default() + } + .build(); + + collator_sign_candidate(Sr25519Keyring::One, &mut candidate); + + let backed = back_candidate( + candidate, + &validators, + group_validators(GroupIndex::from(5 as u32)).unwrap().as_ref(), + &keystore, + &signing_context, + BackingKind::Threshold, + core_index_enabled.then_some(CoreIndex(5 as u32)), + ); + backed_candidates.push(backed.clone()); + } + + // No candidate for para 5. + + // State sanity checks + assert_eq!( + >::scheduled_paras().collect::>(), + vec![ + (CoreIndex(0), ParaId::from(1)), + (CoreIndex(1), ParaId::from(1)), + (CoreIndex(2), ParaId::from(2)), + (CoreIndex(3), ParaId::from(2)), + (CoreIndex(4), ParaId::from(3)), + (CoreIndex(5), ParaId::from(4)), + (CoreIndex(6), ParaId::from(5)), + ] + ); + let mut scheduled: BTreeMap> = BTreeMap::new(); + for (core_idx, para_id) in >::scheduled_paras() { + scheduled.entry(para_id).or_default().insert(core_idx); + } + + assert_eq!( + shared::Pallet::::active_validator_indices(), + vec![ + ValidatorIndex(0), + ValidatorIndex(1), + ValidatorIndex(2), + ValidatorIndex(3), + ValidatorIndex(4), + ValidatorIndex(5), + ValidatorIndex(6), + ] + ); + + TestData { + backed_candidates, + scheduled_paras: scheduled, + all_backed_candidates_with_core, + } + } + + #[rstest] + #[case(false)] + #[case(true)] + fn happy_path(#[case] core_index_enabled: bool) { new_test_ext(MockGenesisConfig::default()).execute_with(|| { let TestData { backed_candidates, all_backed_candidates_with_core, scheduled_paras: scheduled, - } = get_test_data(); + } = get_test_data(core_index_enabled); let has_concluded_invalid = |_idx: usize, _backed_candidate: &BackedCandidate| -> bool { false }; @@ -1416,8 +1753,8 @@ mod sanitizers { backed_candidates.clone(), &>::allowed_relay_parents(), has_concluded_invalid, - &scheduled, - false + scheduled, + core_index_enabled ), SanitizedBackedCandidates { backed_candidates_with_core: all_backed_candidates_with_core, @@ -1428,12 +1765,54 @@ mod sanitizers { }); } + #[rstest] + #[case(false)] + #[case(true)] + fn test_with_multiple_cores_per_para(#[case] core_index_enabled: bool) { + new_test_ext(MockGenesisConfig::default()).execute_with(|| { + let TestData { + backed_candidates, + all_backed_candidates_with_core: expected_all_backed_candidates_with_core, + scheduled_paras: scheduled, + } = get_test_data_multiple_cores_per_para(core_index_enabled); + + let has_concluded_invalid = + |_idx: usize, _backed_candidate: &BackedCandidate| -> bool { false }; + + assert_eq!( + sanitize_backed_candidates::( + backed_candidates.clone(), + &>::allowed_relay_parents(), + has_concluded_invalid, + scheduled, + core_index_enabled + ), + SanitizedBackedCandidates { + backed_candidates_with_core: expected_all_backed_candidates_with_core, + votes_from_disabled_were_dropped: false, + dropped_elastic_scaling_candidates: !core_index_enabled + } + ); + }); + } + // nothing is scheduled, so no paraids match, thus all backed candidates are skipped - #[test] - fn nothing_scheduled() { + #[rstest] + #[case(false, false)] + #[case(true, true)] + #[case(false, true)] + #[case(true, false)] + fn nothing_scheduled( + #[case] core_index_enabled: bool, + #[case] multiple_cores_per_para: bool, + ) { new_test_ext(MockGenesisConfig::default()).execute_with(|| { - let TestData { backed_candidates, .. } = get_test_data(); - let scheduled = &BTreeMap::new(); + let TestData { backed_candidates, .. } = if multiple_cores_per_para { + get_test_data_multiple_cores_per_para(core_index_enabled) + } else { + get_test_data(core_index_enabled) + }; + let scheduled = BTreeMap::new(); let has_concluded_invalid = |_idx: usize, _backed_candidate: &BackedCandidate| -> bool { false }; @@ -1445,8 +1824,8 @@ mod sanitizers { backed_candidates.clone(), &>::allowed_relay_parents(), has_concluded_invalid, - &scheduled, - false, + scheduled, + core_index_enabled, ); assert!(sanitized_backed_candidates.is_empty()); @@ -1456,11 +1835,13 @@ mod sanitizers { } // candidates that have concluded as invalid are filtered out - #[test] - fn invalid_are_filtered_out() { + #[rstest] + #[case(false)] + #[case(true)] + fn invalid_are_filtered_out(#[case] core_index_enabled: bool) { new_test_ext(MockGenesisConfig::default()).execute_with(|| { let TestData { backed_candidates, scheduled_paras: scheduled, .. } = - get_test_data(); + get_test_data(core_index_enabled); // mark every second one as concluded invalid let set = { @@ -1482,8 +1863,8 @@ mod sanitizers { backed_candidates.clone(), &>::allowed_relay_parents(), has_concluded_invalid, - &scheduled, - false, + scheduled, + core_index_enabled, ); assert_eq!(sanitized_backed_candidates.len(), backed_candidates.len() / 2); @@ -1492,10 +1873,13 @@ mod sanitizers { }); } - #[test] - fn disabled_non_signing_validator_doesnt_get_filtered() { + #[rstest] + #[case(false)] + #[case(true)] + fn disabled_non_signing_validator_doesnt_get_filtered(#[case] core_index_enabled: bool) { new_test_ext(MockGenesisConfig::default()).execute_with(|| { - let TestData { mut all_backed_candidates_with_core, .. } = get_test_data(); + let TestData { mut all_backed_candidates_with_core, .. } = + get_test_data(core_index_enabled); // Disable Eve set_disabled_validators(vec![4]); @@ -1507,16 +1891,20 @@ mod sanitizers { assert!(!filter_backed_statements_from_disabled_validators::( &mut all_backed_candidates_with_core, &>::allowed_relay_parents(), - false + core_index_enabled )); assert_eq!(all_backed_candidates_with_core, before); }); } - - #[test] - fn drop_statements_from_disabled_without_dropping_candidate() { + #[rstest] + #[case(false)] + #[case(true)] + fn drop_statements_from_disabled_without_dropping_candidate( + #[case] core_index_enabled: bool, + ) { new_test_ext(MockGenesisConfig::default()).execute_with(|| { - let TestData { mut all_backed_candidates_with_core, .. } = get_test_data(); + let TestData { mut all_backed_candidates_with_core, .. } = + get_test_data(core_index_enabled); // Disable Alice set_disabled_validators(vec![0]); @@ -1533,14 +1921,17 @@ mod sanitizers { all_backed_candidates_with_core.get(0).unwrap().0.validity_votes().len(), 2 ); - let (validator_indices, None) = all_backed_candidates_with_core + let (validator_indices, maybe_core_index) = all_backed_candidates_with_core .get(0) .unwrap() .0 - .validator_indices_and_core_index(false) - else { - panic!("Expected no injected core index") - }; + .validator_indices_and_core_index(core_index_enabled); + if core_index_enabled { + assert!(maybe_core_index.is_some()); + } else { + assert!(maybe_core_index.is_none()); + } + assert_eq!(validator_indices.get(0).unwrap(), true); assert_eq!(validator_indices.get(1).unwrap(), true); let untouched = all_backed_candidates_with_core.get(1).unwrap().0.clone(); @@ -1548,17 +1939,20 @@ mod sanitizers { assert!(filter_backed_statements_from_disabled_validators::( &mut all_backed_candidates_with_core, &>::allowed_relay_parents(), - false + core_index_enabled )); - let (validator_indices, None) = all_backed_candidates_with_core + let (validator_indices, maybe_core_index) = all_backed_candidates_with_core .get(0) .unwrap() .0 - .validator_indices_and_core_index(false) - else { - panic!("Expected no injected core index") - }; + .validator_indices_and_core_index(core_index_enabled); + if core_index_enabled { + assert!(maybe_core_index.is_some()); + } else { + assert!(maybe_core_index.is_none()); + } + // there should still be two backed candidates assert_eq!(all_backed_candidates_with_core.len(), 2); // but the first one should have only one validity vote @@ -1574,10 +1968,13 @@ mod sanitizers { }); } - #[test] - fn drop_candidate_if_all_statements_are_from_disabled() { + #[rstest] + #[case(false)] + #[case(true)] + fn drop_candidate_if_all_statements_are_from_disabled(#[case] core_index_enabled: bool) { new_test_ext(MockGenesisConfig::default()).execute_with(|| { - let TestData { mut all_backed_candidates_with_core, .. } = get_test_data(); + let TestData { mut all_backed_candidates_with_core, .. } = + get_test_data(core_index_enabled); // Disable Alice and Bob set_disabled_validators(vec![0, 1]); @@ -1592,7 +1989,7 @@ mod sanitizers { assert!(filter_backed_statements_from_disabled_validators::( &mut all_backed_candidates_with_core, &>::allowed_relay_parents(), - false + core_index_enabled )); assert_eq!(all_backed_candidates_with_core.len(), 1); From cb41758fe9a38ee26821bff93e615eb10848f7ad Mon Sep 17 00:00:00 2001 From: alindima Date: Fri, 23 Feb 2024 12:36:34 +0200 Subject: [PATCH 49/51] update some comments --- polkadot/runtime/parachains/src/paras_inherent/mod.rs | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/polkadot/runtime/parachains/src/paras_inherent/mod.rs b/polkadot/runtime/parachains/src/paras_inherent/mod.rs index 3856135e0c08..720573c1e91c 100644 --- a/polkadot/runtime/parachains/src/paras_inherent/mod.rs +++ b/polkadot/runtime/parachains/src/paras_inherent/mod.rs @@ -959,7 +959,8 @@ pub(crate) fn sanitize_bitfields( // Result from `sanitize_backed_candidates` #[derive(Debug, PartialEq)] struct SanitizedBackedCandidates { - // Sanitized backed candidates. The `Vec` is sorted according to the occupied core index. + // Sanitized backed candidates along with the assigned core. The `Vec` is sorted according to + // the occupied core index. backed_candidates_with_core: Vec<(BackedCandidate, CoreIndex)>, // Set to true if any votes from disabled validators were dropped from the input. votes_from_disabled_were_dropped: bool, @@ -970,8 +971,10 @@ struct SanitizedBackedCandidates { /// Filter out: /// 1. any candidates that have a concluded invalid dispute -/// 2. all backing votes from disabled validators -/// 3. any candidates that end up with less than `effective_minimum_backing_votes` backing votes +/// 2. any unscheduled candidates, as well as candidates whose paraid has multiple cores assigned +/// but have no injected core index. +/// 3. all backing votes from disabled validators +/// 4. any candidates that end up with less than `effective_minimum_backing_votes` backing votes /// /// `scheduled` follows the same naming scheme as provided in the /// guide: Currently `free` but might become `occupied`. From bffa4e957a65af9a6ffa9906e30931804dd8fdc6 Mon Sep 17 00:00:00 2001 From: alindima Date: Fri, 23 Feb 2024 17:03:21 +0200 Subject: [PATCH 50/51] review comments --- .../parachains/src/paras_inherent/mod.rs | 49 +++++++++---------- .../parachains/src/paras_inherent/tests.rs | 36 +++++++++++--- 2 files changed, 51 insertions(+), 34 deletions(-) diff --git a/polkadot/runtime/parachains/src/paras_inherent/mod.rs b/polkadot/runtime/parachains/src/paras_inherent/mod.rs index 720573c1e91c..cebf858c24ab 100644 --- a/polkadot/runtime/parachains/src/paras_inherent/mod.rs +++ b/polkadot/runtime/parachains/src/paras_inherent/mod.rs @@ -40,7 +40,6 @@ use frame_support::{ pallet_prelude::*, traits::Randomness, }; - use frame_system::pallet_prelude::*; use pallet_babe::{self, ParentBlockRandomness}; use primitives::{ @@ -145,9 +144,8 @@ pub mod pallet { DisputeInvalid, /// A candidate was backed by a disabled validator BackedByDisabled, - /// A candidate was backed even though the paraid had multiple cores assigned and no - /// injected core index. - BackedByElasticScalingWithNoCoreIndex, + /// A candidate was backed even though the paraid was not scheduled. + BackedOnUnscheduledCore, /// Too many candidates supplied. UnscheduledCandidate, } @@ -610,7 +608,7 @@ impl Pallet { let SanitizedBackedCandidates { backed_candidates_with_core, votes_from_disabled_were_dropped, - dropped_elastic_scaling_candidates, + dropped_unscheduled_candidates, } = sanitize_backed_candidates::( backed_candidates, &allowed_relay_parents, @@ -652,14 +650,10 @@ impl Pallet { } // In `Enter` context (invoked during execution) we shouldn't have filtered any candidates - // due to a para having multiple cores assigned and no injected core index. They have been - // filtered during inherent data preparation (`ProvideInherent` context). Abort in such - // cases. + // due to a para not being scheduled. They have been filtered during inherent data + // preparation (`ProvideInherent` context). Abort in such cases. if context == ProcessInherentDataContext::Enter { - ensure!( - !dropped_elastic_scaling_candidates, - Error::::BackedByElasticScalingWithNoCoreIndex - ); + ensure!(!dropped_unscheduled_candidates, Error::::BackedOnUnscheduledCore); } // Process backed candidates according to scheduled cores. @@ -966,7 +960,7 @@ struct SanitizedBackedCandidates { votes_from_disabled_were_dropped: bool, // Set to true if any candidates were dropped due to filtering done in // `map_candidates_to_cores` - dropped_elastic_scaling_candidates: bool, + dropped_unscheduled_candidates: bool, } /// Filter out: @@ -1002,14 +996,17 @@ fn sanitize_backed_candidates< !candidate_has_concluded_invalid_dispute_or_is_invalid(candidate_idx, backed_candidate) }); + let initial_candidate_count = backed_candidates.len(); // Map candidates to scheduled cores. Filter out any unscheduled candidates. - let (mut backed_candidates_with_core, dropped_elastic_scaling_candidates) = - map_candidates_to_cores::( - &allowed_relay_parents, - scheduled, - core_index_enabled, - backed_candidates, - ); + let mut backed_candidates_with_core = map_candidates_to_cores::( + &allowed_relay_parents, + scheduled, + core_index_enabled, + backed_candidates, + ); + + let dropped_unscheduled_candidates = + initial_candidate_count != backed_candidates_with_core.len(); // Filter out backing statements from disabled validators let votes_from_disabled_were_dropped = filter_backed_statements_from_disabled_validators::( @@ -1026,7 +1023,7 @@ fn sanitize_backed_candidates< backed_candidates_with_core.sort_by(|(_x, core_x), (_y, core_y)| core_x.cmp(&core_y)); SanitizedBackedCandidates { - dropped_elastic_scaling_candidates, + dropped_unscheduled_candidates, votes_from_disabled_were_dropped, backed_candidates_with_core, } @@ -1218,13 +1215,13 @@ fn filter_backed_statements_from_disabled_validators( allowed_relay_parents: &AllowedRelayParentsTracker>, mut scheduled: BTreeMap>, core_index_enabled: bool, candidates: Vec>, -) -> (Vec<(BackedCandidate, CoreIndex)>, bool) { - let mut dropped_elastic_scaling_candidates = false; +) -> Vec<(BackedCandidate, CoreIndex)> { let mut backed_candidates_with_core = Vec::with_capacity(candidates.len()); // We keep a candidate if the parachain has only one core assigned or if @@ -1237,6 +1234,7 @@ fn map_candidates_to_cores( allowed_relay_parents: &AllowedRelayParentsTracker>, candidate: &BackedCandidate, diff --git a/polkadot/runtime/parachains/src/paras_inherent/tests.rs b/polkadot/runtime/parachains/src/paras_inherent/tests.rs index 3dbeb5634571..898fffef733f 100644 --- a/polkadot/runtime/parachains/src/paras_inherent/tests.rs +++ b/polkadot/runtime/parachains/src/paras_inherent/tests.rs @@ -26,7 +26,10 @@ mod enter { use crate::{ builder::{Bench, BenchBuilder}, mock::{mock_assigner, new_test_ext, BlockLength, BlockWeights, MockGenesisConfig, Test}, - scheduler::common::Assignment, + scheduler::{ + common::{Assignment, AssignmentProvider, AssignmentProviderConfig}, + ParasEntry, + }, }; use assert_matches::assert_matches; use frame_support::assert_ok; @@ -697,6 +700,25 @@ mod enter { 2 ); + // One core was scheduled. We should put the assignment back, before calling enter(). + let now = >::block_number() + 1; + let used_cores = 5; + let cores = (0..used_cores) + .into_iter() + .map(|i| { + let AssignmentProviderConfig { ttl, .. } = + scheduler::Pallet::::assignment_provider_config(CoreIndex(i)); + // Load an assignment into provider so that one is present to pop + let assignment = + ::AssignmentProvider::get_mock_assignment( + CoreIndex(i), + ParaId::from(i), + ); + (CoreIndex(i), [ParasEntry::new(assignment, now + ttl)].into()) + }) + .collect(); + scheduler::ClaimQueue::::set(cores); + assert_ok!(Pallet::::enter( frame_system::RawOrigin::None.into(), limit_inherent_data, @@ -1759,7 +1781,7 @@ mod sanitizers { SanitizedBackedCandidates { backed_candidates_with_core: all_backed_candidates_with_core, votes_from_disabled_were_dropped: false, - dropped_elastic_scaling_candidates: false + dropped_unscheduled_candidates: false } ); }); @@ -1790,7 +1812,7 @@ mod sanitizers { SanitizedBackedCandidates { backed_candidates_with_core: expected_all_backed_candidates_with_core, votes_from_disabled_were_dropped: false, - dropped_elastic_scaling_candidates: !core_index_enabled + dropped_unscheduled_candidates: !core_index_enabled } ); }); @@ -1819,7 +1841,7 @@ mod sanitizers { let SanitizedBackedCandidates { backed_candidates_with_core: sanitized_backed_candidates, votes_from_disabled_were_dropped, - dropped_elastic_scaling_candidates, + dropped_unscheduled_candidates, } = sanitize_backed_candidates::( backed_candidates.clone(), &>::allowed_relay_parents(), @@ -1830,7 +1852,7 @@ mod sanitizers { assert!(sanitized_backed_candidates.is_empty()); assert!(!votes_from_disabled_were_dropped); - assert!(!dropped_elastic_scaling_candidates); + assert!(!dropped_unscheduled_candidates); }); } @@ -1858,7 +1880,7 @@ mod sanitizers { let SanitizedBackedCandidates { backed_candidates_with_core: sanitized_backed_candidates, votes_from_disabled_were_dropped, - dropped_elastic_scaling_candidates, + dropped_unscheduled_candidates, } = sanitize_backed_candidates::( backed_candidates.clone(), &>::allowed_relay_parents(), @@ -1869,7 +1891,7 @@ mod sanitizers { assert_eq!(sanitized_backed_candidates.len(), backed_candidates.len() / 2); assert!(!votes_from_disabled_were_dropped); - assert!(!dropped_elastic_scaling_candidates); + assert!(!dropped_unscheduled_candidates); }); } From 5d3a85d593ad7079ab2bc2b0a07495e916f5a612 Mon Sep 17 00:00:00 2001 From: alindima Date: Fri, 23 Feb 2024 17:40:06 +0200 Subject: [PATCH 51/51] fix unit test --- polkadot/runtime/parachains/src/paras_inherent/tests.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/polkadot/runtime/parachains/src/paras_inherent/tests.rs b/polkadot/runtime/parachains/src/paras_inherent/tests.rs index 898fffef733f..defb2f4404f5 100644 --- a/polkadot/runtime/parachains/src/paras_inherent/tests.rs +++ b/polkadot/runtime/parachains/src/paras_inherent/tests.rs @@ -1812,7 +1812,7 @@ mod sanitizers { SanitizedBackedCandidates { backed_candidates_with_core: expected_all_backed_candidates_with_core, votes_from_disabled_were_dropped: false, - dropped_unscheduled_candidates: !core_index_enabled + dropped_unscheduled_candidates: true } ); }); @@ -1852,7 +1852,7 @@ mod sanitizers { assert!(sanitized_backed_candidates.is_empty()); assert!(!votes_from_disabled_were_dropped); - assert!(!dropped_unscheduled_candidates); + assert!(dropped_unscheduled_candidates); }); }