From cab6ab9c06995ab90da3b77bd276143052be5574 Mon Sep 17 00:00:00 2001 From: Andrei Sandu Date: Thu, 1 Feb 2024 22:29:58 +0200 Subject: [PATCH 01/20] Switch statement table from ParaId to CoreIndex Signed-off-by: Andrei Sandu --- polkadot/node/core/backing/src/lib.rs | 141 ++++++++++++++++-------- polkadot/primitives/Cargo.toml | 2 + polkadot/primitives/src/v6/mod.rs | 33 +++++- polkadot/statement-table/src/generic.rs | 13 +-- polkadot/statement-table/src/lib.rs | 6 +- 5 files changed, 136 insertions(+), 59 deletions(-) diff --git a/polkadot/node/core/backing/src/lib.rs b/polkadot/node/core/backing/src/lib.rs index 98bbd6232add..01ca089d8c46 100644 --- a/polkadot/node/core/backing/src/lib.rs +++ b/polkadot/node/core/backing/src/lib.rs @@ -105,9 +105,9 @@ use polkadot_node_subsystem_util::{ }; use polkadot_primitives::{ BackedCandidate, CandidateCommitments, CandidateHash, CandidateReceipt, - CommittedCandidateReceipt, CoreIndex, CoreState, ExecutorParams, Hash, Id as ParaId, - PersistedValidationData, PvfExecKind, SigningContext, ValidationCode, ValidatorId, - ValidatorIndex, ValidatorSignature, ValidityAttestation, + CommittedCandidateReceipt, CoreIndex, CoreState, ExecutorParams, GroupIndex, Hash, + Id as ParaId, PersistedValidationData, PvfExecKind, SigningContext, ValidationCode, + ValidatorId, ValidatorIndex, ValidatorSignature, ValidityAttestation, }; use sp_keystore::KeystorePtr; use statement_table::{ @@ -208,8 +208,10 @@ struct PerRelayParentState { prospective_parachains_mode: ProspectiveParachainsMode, /// The hash of the relay parent on top of which this job is doing it's work. parent: Hash, - /// The `ParaId` assigned to the local validator at this relay parent. - assignment: Option, + /// The `CoreIndex` assigned to the local validator at this relay parent. + assigned_para: Option, + /// The `CoreIndex` assigned to the local validator at this relay parent. + assigned_core: Option, /// The candidates that are backed by enough validators in their group, by hash. backed: HashSet, /// The table of candidates and statements under this relay-parent. @@ -382,7 +384,7 @@ struct AttestingData { #[derive(Default)] struct TableContext { validator: Option, - groups: HashMap>, + groups: HashMap>, validators: Vec, disabled_validators: Vec, } @@ -404,7 +406,7 @@ impl TableContext { impl TableContextTrait for TableContext { type AuthorityId = ValidatorIndex; type Digest = CandidateHash; - type GroupId = ParaId; + type GroupId = CoreIndex; type Signature = ValidatorSignature; type Candidate = CommittedCandidateReceipt; @@ -412,15 +414,11 @@ impl TableContextTrait for TableContext { candidate.hash() } - fn candidate_group(candidate: &CommittedCandidateReceipt) -> ParaId { - candidate.descriptor().para_id + fn is_member_of(&self, authority: &ValidatorIndex, core: &CoreIndex) -> bool { + self.groups.get(core).map_or(false, |g| g.iter().any(|a| a == authority)) } - fn is_member_of(&self, authority: &ValidatorIndex, group: &ParaId) -> bool { - self.groups.get(group).map_or(false, |g| g.iter().any(|a| a == authority)) - } - - fn get_group_size(&self, group: &ParaId) -> Option { + fn get_group_size(&self, group: &CoreIndex) -> Option { self.groups.get(group).map(|g| g.len()) } } @@ -442,20 +440,22 @@ fn primitive_statement_to_table(s: &SignedFullStatementWithPVD) -> TableSignedSt fn table_attested_to_backed( attested: TableAttestedCandidate< - ParaId, + CoreIndex, CommittedCandidateReceipt, ValidatorIndex, ValidatorSignature, >, table_context: &TableContext, ) -> Option { - let TableAttestedCandidate { candidate, validity_votes, group_id: para_id } = attested; + let TableAttestedCandidate { candidate, validity_votes, group_id: core_index } = attested; let (ids, validity_votes): (Vec<_>, Vec) = validity_votes.into_iter().map(|(id, vote)| (id, vote.into())).unzip(); - let group = table_context.groups.get(¶_id)?; + let group = table_context.groups.get(&core_index)?; + // TODO: This si a temporary fix and will not work if a para is assigned to + // different sized backing groups. We need core index in the candidate descriptor let mut validator_indices = BitVec::with_capacity(group.len()); validator_indices.resize(group.len(), false); @@ -981,6 +981,56 @@ async fn handle_active_leaves_update( Ok(()) } +macro_rules! try_runtime_api { + ($x: expr) => { + match $x { + Ok(x) => x, + Err(err) => { + // Only bubble up fatal errors. + error::log_error(Err(Into::::into(err).into()))?; + + // We can't do candidate validation work if we don't have the + // requisite runtime API data. But these errors should not take + // down the node. + return Ok(None) + }, + } + }; +} + +#[overseer::contextbounds(CandidateBacking, prefix = self::overseer)] +async fn core_index_from_statement( + ctx: &mut Context, + relay_parent: Hash, + statement: &SignedFullStatementWithPVD, +) -> Result, Error> { + let parent = relay_parent; + + let (groups, cores) = futures::try_join!( + request_validator_groups(parent, ctx.sender()).await, + request_from_runtime(parent, ctx.sender(), |tx| { + RuntimeApiRequest::AvailabilityCores(tx) + },) + .await, + ) + .map_err(Error::JoinMultiple)?; + let (validator_groups, group_rotation_info) = try_runtime_api!(groups); + let cores = try_runtime_api!(cores); + + let statement_validator_index = statement.validator_index(); + for (group_index, group) in validator_groups.iter().enumerate() { + for validator_index in group { + if *validator_index == statement_validator_index { + return Ok(Some( + group_rotation_info.core_for_group(GroupIndex(group_index as u32), cores.len()), + )) + } + } + } + + Ok(None) +} + /// Load the data necessary to do backing work on top of a relay-parent. #[overseer::contextbounds(CandidateBacking, prefix = self::overseer)] async fn construct_per_relay_parent_state( @@ -989,23 +1039,6 @@ async fn construct_per_relay_parent_state( keystore: &KeystorePtr, mode: ProspectiveParachainsMode, ) -> Result, Error> { - macro_rules! try_runtime_api { - ($x: expr) => { - match $x { - Ok(x) => x, - Err(err) => { - // Only bubble up fatal errors. - error::log_error(Err(Into::::into(err).into()))?; - - // We can't do candidate validation work if we don't have the - // requisite runtime API data. But these errors should not take - // down the node. - return Ok(None) - }, - } - }; - } - let parent = relay_parent; let (session_index, validators, groups, cores) = futures::try_join!( @@ -1055,9 +1088,11 @@ async fn construct_per_relay_parent_state( }, }; - let mut groups = HashMap::new(); let n_cores = cores.len(); - let mut assignment = None; + + let mut groups = HashMap::>::new(); + let mut assigned_core = None; + let mut assigned_para = None; for (idx, core) in cores.into_iter().enumerate() { let core_para_id = match core { @@ -1077,11 +1112,13 @@ async fn construct_per_relay_parent_state( let group_index = group_rotation_info.group_for_core(core_index, n_cores); if let Some(g) = validator_groups.get(group_index.0 as usize) { if validator.as_ref().map_or(false, |v| g.contains(&v.index())) { - assignment = Some(core_para_id); + assigned_para = Some(core_para_id); + assigned_core = Some(core_index); } - groups.insert(core_para_id, g.clone()); + groups.insert(core_index, g.clone()); } } + gum::debug!(target: LOG_TARGET, ?groups, "TableContext" ); let table_context = TableContext { validator, groups, validators, disabled_validators }; let table_config = TableConfig { @@ -1094,7 +1131,8 @@ async fn construct_per_relay_parent_state( Ok(Some(PerRelayParentState { prospective_parachains_mode: mode, parent, - assignment, + assigned_core, + assigned_para, backed: HashSet::new(), table: Table::new(table_config), table_context, @@ -1519,15 +1557,16 @@ async fn import_statement( per_candidate: &mut HashMap, statement: &SignedFullStatementWithPVD, ) -> Result, Error> { + let candidate_hash = statement.payload().candidate_hash(); + gum::debug!( target: LOG_TARGET, statement = ?statement.payload().to_compact(), validator_index = statement.validator_index().0, + ?candidate_hash, "Importing statement", ); - let candidate_hash = statement.payload().candidate_hash(); - // If this is a new candidate (statement is 'seconded' and candidate is unknown), // we need to create an entry in the `PerCandidateState` map. // @@ -1593,7 +1632,11 @@ async fn import_statement( let stmt = primitive_statement_to_table(statement); - Ok(rp_state.table.import_statement(&rp_state.table_context, stmt)) + let core = core_index_from_statement(ctx, rp_state.parent, statement) + .await + .unwrap() + .unwrap(); + Ok(rp_state.table.import_statement(&rp_state.table_context, core, stmt)) } /// Handles a summary received from [`import_statement`] and dispatches `Backed` notifications and @@ -1654,8 +1697,14 @@ async fn post_import_statement_actions( ); ctx.send_unbounded_message(message); } + } else { + gum::debug!(target: LOG_TARGET, ?candidate_hash, "Cannot get BackedCandidate"); } + } else { + gum::debug!(target: LOG_TARGET, ?candidate_hash, "Candidate already known"); } + } else { + gum::debug!(target: LOG_TARGET, "No attested candidate"); } issue_new_misbehaviors(ctx, rp_state.parent, &mut rp_state.table); @@ -1859,9 +1908,10 @@ async fn maybe_validate_and_import( let candidate_hash = summary.candidate; - if Some(summary.group_id) != rp_state.assignment { + if Some(summary.group_id) != rp_state.assigned_core { return Ok(()) } + let attesting = match statement.payload() { StatementWithPVD::Seconded(receipt, _) => { let attesting = AttestingData { @@ -2004,10 +2054,11 @@ async fn handle_second_message( } // Sanity check that candidate is from our assignment. - if Some(candidate.descriptor().para_id) != rp_state.assignment { + if Some(candidate.descriptor().para_id) != rp_state.assigned_para { gum::debug!( target: LOG_TARGET, - our_assignment = ?rp_state.assignment, + our_assignment_core = ?rp_state.assigned_core, + our_assignment_para = ?rp_state.assigned_para, collation = ?candidate.descriptor().para_id, "Subsystem asked to second for para outside of our assignment", ); diff --git a/polkadot/primitives/Cargo.toml b/polkadot/primitives/Cargo.toml index c2fdf331568d..27827fcd7d78 100644 --- a/polkadot/primitives/Cargo.toml +++ b/polkadot/primitives/Cargo.toml @@ -15,6 +15,7 @@ hex-literal = "0.4.1" parity-scale-codec = { version = "3.6.1", default-features = false, features = ["bit-vec", "derive"] } scale-info = { version = "2.10.0", default-features = false, features = ["bit-vec", "derive", "serde"] } serde = { version = "1.0.195", default-features = false, features = ["alloc", "derive"] } +log = { version = "0.4.17", default-features = false } application-crypto = { package = "sp-application-crypto", path = "../../substrate/primitives/application-crypto", default-features = false, features = ["serde"] } inherents = { package = "sp-inherents", path = "../../substrate/primitives/inherents", default-features = false } @@ -44,6 +45,7 @@ std = [ "primitives/std", "runtime_primitives/std", "scale-info/std", + "log/std", "serde/std", "sp-api/std", "sp-arithmetic/std", diff --git a/polkadot/primitives/src/v6/mod.rs b/polkadot/primitives/src/v6/mod.rs index fd0b32db7994..c9c78499cbb5 100644 --- a/polkadot/primitives/src/v6/mod.rs +++ b/polkadot/primitives/src/v6/mod.rs @@ -72,6 +72,7 @@ pub use metrics::{ /// The key type ID for a collator key. pub const COLLATOR_KEY_TYPE_ID: KeyTypeId = KeyTypeId(*b"coll"); +const LOG_TARGET: &str = "runtime::primitives"; mod collator_app { use application_crypto::{app_crypto, sr25519}; @@ -743,17 +744,35 @@ impl BackedCandidate { /// /// Returns either an error, indicating that one of the signatures was invalid or that the index /// was out-of-bounds, or the number of signatures checked. -pub fn check_candidate_backing + Clone + Encode>( +pub fn check_candidate_backing + Clone + Encode + core::fmt::Debug>( backed: &BackedCandidate, signing_context: &SigningContext, group_len: usize, validator_lookup: impl Fn(usize) -> Option, ) -> Result { + log::debug!( + target: LOG_TARGET, + "checking candidate {:?}", + backed + ); + if backed.validator_indices.len() != group_len { + log::debug!( + target: LOG_TARGET, + "indices mismatch: group_len = {} , indices_len = {}", + group_len, + backed.validator_indices.len(), + ); return Err(()) } if backed.validity_votes.len() > group_len { + log::debug!( + target: LOG_TARGET, + "Too many votes, expected: {}, found: {}", + group_len, + backed.validity_votes.len(), + ); return Err(()) } @@ -775,11 +794,23 @@ pub fn check_candidate_backing + Clone + Encode>( if sig.verify(&payload[..], &validator_id) { signed += 1; } else { + log::debug!( + target: LOG_TARGET, + "Invalid signature. validator_id = {:?}, validator_index = {} ", + validator_id, + val_in_group_idx, + ); return Err(()) } } if signed != backed.validity_votes.len() { + log::error!( + target: LOG_TARGET, + "Too many signatures, expected = {}, found = {}", + backed.validity_votes.len() , + signed, + ); return Err(()) } diff --git a/polkadot/statement-table/src/generic.rs b/polkadot/statement-table/src/generic.rs index 22bffde5acc1..cef2c87151c0 100644 --- a/polkadot/statement-table/src/generic.rs +++ b/polkadot/statement-table/src/generic.rs @@ -53,9 +53,6 @@ pub trait Context { /// get the digest of a candidate. fn candidate_digest(candidate: &Self::Candidate) -> Self::Digest; - /// get the group of a candidate. - fn candidate_group(candidate: &Self::Candidate) -> Self::GroupId; - /// Whether a authority is a member of a group. /// Members are meant to submit candidates and vote on validity. fn is_member_of(&self, authority: &Self::AuthorityId, group: &Self::GroupId) -> bool; @@ -342,13 +339,13 @@ impl Table { pub fn import_statement( &mut self, context: &Ctx, + group_id: Ctx::GroupId, statement: SignedStatement, ) -> Option> { let SignedStatement { statement, signature, sender: signer } = statement; - let res = match statement { Statement::Seconded(candidate) => - self.import_candidate(context, signer.clone(), candidate, signature), + self.import_candidate(context, signer.clone(), candidate, signature, group_id), Statement::Valid(digest) => self.validity_vote(context, signer.clone(), digest, ValidityVote::Valid(signature)), }; @@ -387,8 +384,8 @@ impl Table { authority: Ctx::AuthorityId, candidate: Ctx::Candidate, signature: Ctx::Signature, + group: Ctx::GroupId, ) -> ImportResult { - let group = Ctx::candidate_group(&candidate); if !context.is_member_of(&authority, &group) { return Err(Misbehavior::UnauthorizedStatement(UnauthorizedStatement { statement: SignedStatement { @@ -634,10 +631,6 @@ mod tests { Digest(candidate.1) } - fn candidate_group(candidate: &Candidate) -> GroupId { - GroupId(candidate.0) - } - fn is_member_of(&self, authority: &AuthorityId, group: &GroupId) -> bool { self.authorities.get(authority).map(|v| v == group).unwrap_or(false) } diff --git a/polkadot/statement-table/src/lib.rs b/polkadot/statement-table/src/lib.rs index d4629330ac01..3740d15cc4f3 100644 --- a/polkadot/statement-table/src/lib.rs +++ b/polkadot/statement-table/src/lib.rs @@ -35,8 +35,8 @@ pub use generic::{Config, Context, Table}; pub mod v2 { use crate::generic; use primitives::{ - CandidateHash, CommittedCandidateReceipt, CompactStatement as PrimitiveStatement, Id, - ValidatorIndex, ValidatorSignature, + CandidateHash, CommittedCandidateReceipt, CompactStatement as PrimitiveStatement, + CoreIndex, ValidatorIndex, ValidatorSignature, }; /// Statements about candidates on the network. @@ -59,7 +59,7 @@ pub mod v2 { >; /// A summary of import of a statement. - pub type Summary = generic::Summary; + pub type Summary = generic::Summary; impl<'a> From<&'a Statement> for PrimitiveStatement { fn from(s: &'a Statement) -> PrimitiveStatement { From d2df658335e3525a3efec59a5a8286e96555b0bb Mon Sep 17 00:00:00 2001 From: Andrei Sandu Date: Fri, 2 Feb 2024 23:06:21 +0200 Subject: [PATCH 02/20] cargo lock Signed-off-by: Andrei Sandu --- Cargo.lock | 1 + 1 file changed, 1 insertion(+) diff --git a/Cargo.lock b/Cargo.lock index 3e7e59ac1174..c38490bebdd2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -13030,6 +13030,7 @@ version = "7.0.0" dependencies = [ "bitvec", "hex-literal", + "log", "parity-scale-codec", "polkadot-core-primitives", "polkadot-parachain-primitives", From 4a8b8d5a724c42e8236b7595530686f0e7239a1d Mon Sep 17 00:00:00 2001 From: Andrei Sandu Date: Tue, 6 Feb 2024 17:58:16 +0200 Subject: [PATCH 03/20] add experimental feature Signed-off-by: Andrei Sandu --- polkadot/primitives/src/vstaging/mod.rs | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/polkadot/primitives/src/vstaging/mod.rs b/polkadot/primitives/src/vstaging/mod.rs index 630bcf8679ad..dfae1af07f38 100644 --- a/polkadot/primitives/src/vstaging/mod.rs +++ b/polkadot/primitives/src/vstaging/mod.rs @@ -68,5 +68,12 @@ pub mod node_features { /// Every time a new feature flag is assigned it should take this value. /// and this should be incremented. FirstUnassigned = 1, + /// Experimental features start at bit 16. Note that experimental features pop in and out + /// of exsitence without warning. + /// + /// This feature enables the extension of `BackedCandidate::validator_indices` by 8 bit. + /// The value stored there represents the assumed core index where the candidates + /// are backed. + InjectCoreIndex = 16, } } From 9244632c43f52521d667f581fb164a7067358625 Mon Sep 17 00:00:00 2001 From: Andrei Sandu Date: Tue, 6 Feb 2024 17:59:29 +0200 Subject: [PATCH 04/20] inject core_index from statements Signed-off-by: Andrei Sandu --- polkadot/node/core/backing/src/lib.rs | 41 +++++++++++++++++++++++---- 1 file changed, 35 insertions(+), 6 deletions(-) diff --git a/polkadot/node/core/backing/src/lib.rs b/polkadot/node/core/backing/src/lib.rs index 01ca089d8c46..723db247f42a 100644 --- a/polkadot/node/core/backing/src/lib.rs +++ b/polkadot/node/core/backing/src/lib.rs @@ -70,7 +70,7 @@ use std::{ sync::Arc, }; -use bitvec::vec::BitVec; +use bitvec::{order::Lsb0 as BitOrderLsb0, vec::BitVec}; use futures::{ channel::{mpsc, oneshot}, future::BoxFuture, @@ -104,6 +104,7 @@ use polkadot_node_subsystem_util::{ Validator, }; use polkadot_primitives::{ + vstaging::{node_features::FeatureIndex, NodeFeatures}, BackedCandidate, CandidateCommitments, CandidateHash, CandidateReceipt, CommittedCandidateReceipt, CoreIndex, CoreState, ExecutorParams, GroupIndex, Hash, Id as ParaId, PersistedValidationData, PvfExecKind, SigningContext, ValidationCode, @@ -118,7 +119,7 @@ use statement_table::{ }, Config as TableConfig, Context as TableContextTrait, Table, }; -use util::vstaging::get_disabled_validators_with_fallback; +use util::{runtime::request_node_features, vstaging::get_disabled_validators_with_fallback}; mod error; @@ -226,6 +227,8 @@ struct PerRelayParentState { fallbacks: HashMap, /// The minimum backing votes threshold. minimum_backing_votes: u32, + /// If true, we're appendindg extra bits in the BackedCandidate validator indices bitfield. + inject_core_index: bool, } struct PerCandidateState { @@ -446,6 +449,7 @@ fn table_attested_to_backed( ValidatorSignature, >, table_context: &TableContext, + inject_core_index: bool, ) -> Option { let TableAttestedCandidate { candidate, validity_votes, group_id: core_index } = attested; @@ -454,8 +458,6 @@ fn table_attested_to_backed( let group = table_context.groups.get(&core_index)?; - // TODO: This si a temporary fix and will not work if a para is assigned to - // different sized backing groups. We need core index in the candidate descriptor let mut validator_indices = BitVec::with_capacity(group.len()); validator_indices.resize(group.len(), false); @@ -479,6 +481,12 @@ fn table_attested_to_backed( } vote_positions.sort_by_key(|(_orig, pos_in_group)| *pos_in_group); + if inject_core_index { + let core_index_to_inject: BitVec = + BitVec::from_vec(vec![core_index.0 as u8]); + validator_indices.extend(core_index_to_inject); + } + Some(BackedCandidate { candidate, validity_votes: vote_positions @@ -1053,6 +1061,16 @@ async fn construct_per_relay_parent_state( .map_err(Error::JoinMultiple)?; let session_index = try_runtime_api!(session_index); + + let inject_core_index = request_node_features(parent, session_index, ctx.sender()) + .await? + .unwrap_or(NodeFeatures::EMPTY) + .get(FeatureIndex::InjectCoreIndex as usize) + .map(|b| *b) + .unwrap_or(false); + + gum::debug!(target: LOG_TARGET, inject_core_index, ?parent, "New state"); + let validators: Vec<_> = try_runtime_api!(validators); let (validator_groups, group_rotation_info) = try_runtime_api!(groups); let cores = try_runtime_api!(cores); @@ -1140,6 +1158,7 @@ async fn construct_per_relay_parent_state( awaiting_validation: HashSet::new(), fallbacks: HashMap::new(), minimum_backing_votes, + inject_core_index, })) } @@ -1658,7 +1677,11 @@ async fn post_import_statement_actions( // `HashSet::insert` returns true if the thing wasn't in there already. if rp_state.backed.insert(candidate_hash) { - if let Some(backed) = table_attested_to_backed(attested, &rp_state.table_context) { + if let Some(backed) = table_attested_to_backed( + attested, + &rp_state.table_context, + rp_state.inject_core_index, + ) { let para_id = backed.candidate.descriptor.para_id; gum::debug!( target: LOG_TARGET, @@ -2138,7 +2161,13 @@ fn handle_get_backed_candidates_message( &rp_state.table_context, rp_state.minimum_backing_votes, ) - .and_then(|attested| table_attested_to_backed(attested, &rp_state.table_context)) + .and_then(|attested| { + table_attested_to_backed( + attested, + &rp_state.table_context, + rp_state.inject_core_index, + ) + }) }) .collect(); From 22e017b4e2d12880429b675c0588f0ac6ccf658e Mon Sep 17 00:00:00 2001 From: Andrei Sandu Date: Tue, 6 Feb 2024 17:59:45 +0200 Subject: [PATCH 05/20] temporary provisioner fix Signed-off-by: Andrei Sandu --- polkadot/node/core/provisioner/src/lib.rs | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/polkadot/node/core/provisioner/src/lib.rs b/polkadot/node/core/provisioner/src/lib.rs index 3970b8572612..4257cc3b511f 100644 --- a/polkadot/node/core/provisioner/src/lib.rs +++ b/polkadot/node/core/provisioner/src/lib.rs @@ -681,10 +681,16 @@ async fn request_backable_candidates( CoreState::Free => continue, }; + // We're currently fetching based on para id. This has to be chagned to query prospective + // parachains via core index. We should be calling this once per para rather than per core. + // TODO: Fix after https://github.com/paritytech/polkadot-sdk/pull/3160 let response = get_backable_candidate(relay_parent, para_id, required_path, sender).await?; - match response { - Some((hash, relay_parent)) => selected_candidates.push((hash, relay_parent)), + Some((hash, relay_parent)) => { + if selected_candidates.iter().position(|bc| &(hash, relay_parent) == bc).is_none() { + selected_candidates.push((hash, relay_parent)) + } + }, None => { gum::debug!( target: LOG_TARGET, @@ -726,6 +732,7 @@ async fn select_candidates( ) .await?, }; + gum::debug!(target: LOG_TARGET, ?selected_candidates, "Got backedable candidates"); // now get the backed candidates corresponding to these candidate receipts let (tx, rx) = oneshot::channel(); From fbb7351d3b186bf2fa29281310d50a067e1efaa8 Mon Sep 17 00:00:00 2001 From: Andrei Sandu Date: Tue, 6 Feb 2024 18:21:18 +0200 Subject: [PATCH 06/20] cargo lock Signed-off-by: Andrei Sandu --- Cargo.lock | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b49b77dd9333..abd137e1ab15 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -21235,9 +21235,9 @@ dependencies = [ [[package]] name = "wasmi" -version = "0.31.2" +version = "0.31.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77a8281d1d660cdf54c76a3efa9ddd0c270cada1383a995db3ccb43d166456c7" +checksum = "1f341edb80021141d4ae6468cbeefc50798716a347d4085c3811900049ea8945" dependencies = [ "smallvec", "spin 0.9.8", @@ -21248,9 +21248,9 @@ dependencies = [ [[package]] name = "wasmi_arena" -version = "0.4.1" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "104a7f73be44570cac297b3035d76b169d6599637631cf37a1703326a0727073" +checksum = "401c1f35e413fac1846d4843745589d9ec678977ab35a384db8ae7830525d468" [[package]] name = "wasmi_core" From 6c72918a0e68cacfcf629e2974ae1404dd5b1e9b Mon Sep 17 00:00:00 2001 From: Andrei Sandu Date: Mon, 12 Feb 2024 14:35:50 +0200 Subject: [PATCH 07/20] It was damn hard to fix these tests Signed-off-by: Andrei Sandu --- Cargo.lock | 1 + polkadot/node/core/backing/src/error.rs | 3 + polkadot/node/core/backing/src/lib.rs | 19 ++++- polkadot/node/core/backing/src/tests/mod.rs | 81 +++++++++++++++++-- .../src/tests/prospective_parachains.rs | 67 ++++++++++++++- polkadot/node/core/provisioner/src/lib.rs | 2 +- polkadot/statement-table/Cargo.toml | 1 + polkadot/statement-table/src/generic.rs | 2 + 8 files changed, 164 insertions(+), 12 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index abd137e1ab15..fb345e3d36ff 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -13460,6 +13460,7 @@ dependencies = [ "parity-scale-codec", "polkadot-primitives", "sp-core", + "tracing-gum", ] [[package]] diff --git a/polkadot/node/core/backing/src/error.rs b/polkadot/node/core/backing/src/error.rs index 1b00a62510b7..64955a393962 100644 --- a/polkadot/node/core/backing/src/error.rs +++ b/polkadot/node/core/backing/src/error.rs @@ -48,6 +48,9 @@ pub enum Error { #[error("Candidate is not found")] CandidateNotFound, + #[error("CoreIndex cannot be determined for a candidate")] + CoreIndexUnavailable, + #[error("Signature is invalid")] InvalidSignature, diff --git a/polkadot/node/core/backing/src/lib.rs b/polkadot/node/core/backing/src/lib.rs index 723db247f42a..4bdbe0214f8f 100644 --- a/polkadot/node/core/backing/src/lib.rs +++ b/polkadot/node/core/backing/src/lib.rs @@ -384,7 +384,7 @@ struct AttestingData { backing: Vec, } -#[derive(Default)] +#[derive(Default, Debug)] struct TableContext { validator: Option, groups: HashMap>, @@ -1025,6 +1025,10 @@ async fn core_index_from_statement( let (validator_groups, group_rotation_info) = try_runtime_api!(groups); let cores = try_runtime_api!(cores); + let compact_statement = statement.as_unchecked(); + let candidate_hash = CandidateHash(*compact_statement.unchecked_payload().candidate_hash()); + + gum::trace!(target: LOG_TARGET, ?group_rotation_info, ?statement, ?validator_groups, ?cores, ?candidate_hash, "Extracting core index from statement"); let statement_validator_index = statement.validator_index(); for (group_index, group) in validator_groups.iter().enumerate() { for validator_index in group { @@ -1653,8 +1657,9 @@ async fn import_statement( let core = core_index_from_statement(ctx, rp_state.parent, statement) .await - .unwrap() - .unwrap(); + .map_err(|_| Error::CoreIndexUnavailable)? + .ok_or(Error::CoreIndexUnavailable)?; + Ok(rp_state.table.import_statement(&rp_state.table_context, core, stmt)) } @@ -2089,6 +2094,14 @@ async fn handle_second_message( return Ok(()) } + gum::debug!( + target: LOG_TARGET, + our_assignment_core = ?rp_state.assigned_core, + our_assignment_para = ?rp_state.assigned_para, + collation = ?candidate.descriptor().para_id, + "Current assignments vs collation", + ); + // If the message is a `CandidateBackingMessage::Second`, sign and dispatch a // Seconded statement only if we have not signed a Valid statement for the requested candidate. // diff --git a/polkadot/node/core/backing/src/tests/mod.rs b/polkadot/node/core/backing/src/tests/mod.rs index 1957f4e19c54..97e1f2ea10c3 100644 --- a/polkadot/node/core/backing/src/tests/mod.rs +++ b/polkadot/node/core/backing/src/tests/mod.rs @@ -65,7 +65,7 @@ fn dummy_pvd() -> PersistedValidationData { } } -struct TestState { +pub(crate) struct TestState { chain_ids: Vec, keystore: KeystorePtr, validators: Vec, @@ -161,6 +161,7 @@ fn test_harness>( test: impl FnOnce(VirtualOverseer) -> T, ) { let pool = sp_core::testing::TaskExecutor::new(); + sp_tracing::init_for_tests(); let (context, virtual_overseer) = test_helpers::make_subsystem_context(pool.clone()); @@ -285,6 +286,16 @@ async fn test_startup(virtual_overseer: &mut VirtualOverseer, test_state: &TestS } ); + // Node features request from runtime: all features are disabled. + assert_matches!( + virtual_overseer.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(_parent, RuntimeApiRequest::NodeFeatures(_session_index, tx)) + ) => { + tx.send(Ok(Default::default())).unwrap(); + } + ); + // Check if subsystem job issues a request for the minimum backing votes. assert_matches!( virtual_overseer.recv().await, @@ -317,6 +328,30 @@ async fn test_startup(virtual_overseer: &mut VirtualOverseer, test_state: &TestS ); } +pub(crate) async fn assert_core_index_from_statement( + virtual_overseer: &mut VirtualOverseer, + test_state: &TestState, +) { + assert_matches!( + virtual_overseer.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(_parent, RuntimeApiRequest::ValidatorGroups(tx)) + ) => { + tx.send(Ok(test_state.validator_groups.clone())).unwrap(); + } + ); + + // Check that subsystem job issues a request for the availability cores. + assert_matches!( + virtual_overseer.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(_parent, RuntimeApiRequest::AvailabilityCores(tx)) + ) => { + tx.send(Ok(test_state.availability_cores.clone())).unwrap(); + } + ); +} + async fn assert_validation_requests( virtual_overseer: &mut VirtualOverseer, validation_code: ValidationCode, @@ -449,6 +484,8 @@ fn backing_second_works() { } ); + assert_core_index_from_statement(&mut virtual_overseer, &test_state).await; + assert_matches!( virtual_overseer.recv().await, AllMessages::StatementDistribution( @@ -545,6 +582,7 @@ fn backing_works() { virtual_overseer.send(FromOrchestra::Communication { msg: statement }).await; + assert_core_index_from_statement(&mut virtual_overseer, &test_state).await; assert_validation_requests(&mut virtual_overseer, validation_code_ab.clone()).await; // Sending a `Statement::Seconded` for our assignment will start @@ -604,6 +642,8 @@ fn backing_works() { } ); + assert_core_index_from_statement(&mut virtual_overseer, &test_state).await; + assert_matches!( virtual_overseer.recv().await, AllMessages::StatementDistribution( @@ -630,6 +670,8 @@ fn backing_works() { virtual_overseer.send(FromOrchestra::Communication { msg: statement }).await; + assert_core_index_from_statement(&mut virtual_overseer, &test_state).await; + virtual_overseer .send(FromOrchestra::Signal(OverseerSignal::ActiveLeaves( ActiveLeavesUpdate::stop_work(test_state.relay_parent), @@ -722,6 +764,7 @@ fn backing_works_while_validation_ongoing() { let statement = CandidateBackingMessage::Statement(test_state.relay_parent, signed_a.clone()); virtual_overseer.send(FromOrchestra::Communication { msg: statement }).await; + assert_core_index_from_statement(&mut virtual_overseer, &test_state).await; assert_validation_requests(&mut virtual_overseer, validation_code_abc.clone()).await; @@ -771,6 +814,7 @@ fn backing_works_while_validation_ongoing() { CandidateBackingMessage::Statement(test_state.relay_parent, signed_b.clone()); virtual_overseer.send(FromOrchestra::Communication { msg: statement }).await; + assert_core_index_from_statement(&mut virtual_overseer, &test_state).await; // Candidate gets backed entirely by other votes. assert_matches!( @@ -791,6 +835,8 @@ fn backing_works_while_validation_ongoing() { virtual_overseer.send(FromOrchestra::Communication { msg: statement }).await; + assert_core_index_from_statement(&mut virtual_overseer, &test_state).await; + let (tx, rx) = oneshot::channel(); let msg = CandidateBackingMessage::GetBackedCandidates( vec![(candidate_a.hash(), test_state.relay_parent)], @@ -889,6 +935,7 @@ fn backing_misbehavior_works() { virtual_overseer.send(FromOrchestra::Communication { msg: statement }).await; + assert_core_index_from_statement(&mut virtual_overseer, &test_state).await; assert_validation_requests(&mut virtual_overseer, validation_code_a.clone()).await; assert_matches!( @@ -944,6 +991,8 @@ fn backing_misbehavior_works() { } ); + assert_core_index_from_statement(&mut virtual_overseer, &test_state).await; + assert_matches!( virtual_overseer.recv().await, AllMessages::StatementDistribution( @@ -975,6 +1024,8 @@ fn backing_misbehavior_works() { virtual_overseer.send(FromOrchestra::Communication { msg: statement }).await; + assert_core_index_from_statement(&mut virtual_overseer, &test_state).await; + assert_matches!( virtual_overseer.recv().await, AllMessages::Provisioner( @@ -1150,6 +1201,7 @@ fn backing_dont_second_invalid() { tx.send(Ok(())).unwrap(); } ); + assert_core_index_from_statement(&mut virtual_overseer, &test_state).await; assert_matches!( virtual_overseer.recv().await, @@ -1221,6 +1273,7 @@ fn backing_second_after_first_fails_works() { virtual_overseer.send(FromOrchestra::Communication { msg: statement }).await; + assert_core_index_from_statement(&mut virtual_overseer, &test_state).await; assert_validation_requests(&mut virtual_overseer, validation_code_a.clone()).await; // Subsystem requests PoV and requests validation. @@ -1365,6 +1418,7 @@ fn backing_works_after_failed_validation() { virtual_overseer.send(FromOrchestra::Communication { msg: statement }).await; + assert_core_index_from_statement(&mut virtual_overseer, &test_state).await; assert_validation_requests(&mut virtual_overseer, validation_code_a.clone()).await; // Subsystem requests PoV and requests validation. @@ -1422,7 +1476,7 @@ fn backing_works_after_failed_validation() { fn candidate_backing_reorders_votes() { use sp_core::Encode; - let para_id = ParaId::from(10); + let core_idx = CoreIndex(10); let validators = vec![ Sr25519Keyring::Alice, Sr25519Keyring::Bob, @@ -1436,7 +1490,7 @@ fn candidate_backing_reorders_votes() { let validator_groups = { let mut validator_groups = HashMap::new(); validator_groups - .insert(para_id, vec![0, 1, 2, 3, 4, 5].into_iter().map(ValidatorIndex).collect()); + .insert(core_idx, vec![0, 1, 2, 3, 4, 5].into_iter().map(ValidatorIndex).collect()); validator_groups }; @@ -1466,10 +1520,10 @@ fn candidate_backing_reorders_votes() { (ValidatorIndex(3), fake_attestation(3)), (ValidatorIndex(1), fake_attestation(1)), ], - group_id: para_id, + group_id: core_idx, }; - let backed = table_attested_to_backed(attested, &table_context).unwrap(); + let backed = table_attested_to_backed(attested, &table_context, false).unwrap(); let expected_bitvec = { let mut validator_indices = BitVec::::with_capacity(6); @@ -1569,6 +1623,7 @@ fn retry_works() { CandidateBackingMessage::Statement(test_state.relay_parent, signed_a.clone()); virtual_overseer.send(FromOrchestra::Communication { msg: statement }).await; + assert_core_index_from_statement(&mut virtual_overseer, &test_state).await; assert_validation_requests(&mut virtual_overseer, validation_code_a.clone()).await; // Subsystem requests PoV and requests validation. @@ -1590,6 +1645,8 @@ fn retry_works() { CandidateBackingMessage::Statement(test_state.relay_parent, signed_b.clone()); virtual_overseer.send(FromOrchestra::Communication { msg: statement }).await; + assert_core_index_from_statement(&mut virtual_overseer, &test_state).await; + // Not deterministic which message comes first: for _ in 0u32..5 { match virtual_overseer.recv().await { @@ -1632,6 +1689,7 @@ fn retry_works() { CandidateBackingMessage::Statement(test_state.relay_parent, signed_c.clone()); virtual_overseer.send(FromOrchestra::Communication { msg: statement }).await; + assert_core_index_from_statement(&mut virtual_overseer, &test_state).await; assert_validation_requests(&mut virtual_overseer, validation_code_a.clone()).await; assert_matches!( @@ -1756,11 +1814,14 @@ fn observes_backing_even_if_not_validator() { virtual_overseer.send(FromOrchestra::Communication { msg: statement }).await; + assert_core_index_from_statement(&mut virtual_overseer, &test_state).await; + let statement = CandidateBackingMessage::Statement(test_state.relay_parent, signed_b.clone()); virtual_overseer.send(FromOrchestra::Communication { msg: statement }).await; + assert_core_index_from_statement(&mut virtual_overseer, &test_state).await; assert_matches!( virtual_overseer.recv().await, AllMessages::Provisioner( @@ -1778,6 +1839,8 @@ fn observes_backing_even_if_not_validator() { virtual_overseer.send(FromOrchestra::Communication { msg: statement }).await; + assert_core_index_from_statement(&mut virtual_overseer, &test_state).await; + virtual_overseer .send(FromOrchestra::Signal(OverseerSignal::ActiveLeaves( ActiveLeavesUpdate::stop_work(test_state.relay_parent), @@ -1844,6 +1907,8 @@ fn cannot_second_multiple_candidates_per_parent() { } ); + assert_core_index_from_statement(&mut virtual_overseer, &test_state).await; + assert_matches!( virtual_overseer.recv().await, AllMessages::StatementDistribution( @@ -2078,6 +2143,8 @@ fn disabled_validator_doesnt_distribute_statement_on_receiving_statement() { virtual_overseer.send(FromOrchestra::Communication { msg: statement }).await; + assert_core_index_from_statement(&mut virtual_overseer, &test_state).await; + // Ensure backing subsystem is not doing any work assert_matches!(virtual_overseer.recv().timeout(Duration::from_secs(1)).await, None); @@ -2169,6 +2236,8 @@ fn validator_ignores_statements_from_disabled_validators() { virtual_overseer.send(FromOrchestra::Communication { msg: statement_3 }).await; + assert_core_index_from_statement(&mut virtual_overseer, &test_state).await; + assert_matches!( virtual_overseer.recv().await, AllMessages::RuntimeApi( @@ -2255,6 +2324,8 @@ fn validator_ignores_statements_from_disabled_validators() { } ); + assert_core_index_from_statement(&mut virtual_overseer, &test_state).await; + assert_matches!( virtual_overseer.recv().await, AllMessages::StatementDistribution( diff --git a/polkadot/node/core/backing/src/tests/prospective_parachains.rs b/polkadot/node/core/backing/src/tests/prospective_parachains.rs index 578f21bef665..0fbf52403ea5 100644 --- a/polkadot/node/core/backing/src/tests/prospective_parachains.rs +++ b/polkadot/node/core/backing/src/tests/prospective_parachains.rs @@ -185,6 +185,16 @@ async fn activate_leaf( } ); + // Node features request from runtime: all features are disabled. + assert_matches!( + virtual_overseer.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(parent, RuntimeApiRequest::NodeFeatures(_session_index, tx)) + ) if parent == hash => { + tx.send(Ok(Default::default())).unwrap(); + } + ); + // Check if subsystem job issues a request for the minimum backing votes. assert_matches!( virtual_overseer.recv().await, @@ -305,10 +315,11 @@ async fn assert_hypothetical_frontier_requests( ) => { let idx = match expected_requests.iter().position(|r| r.0 == request) { Some(idx) => idx, - None => panic!( + None => + panic!( "unexpected hypothetical frontier request, no match found for {:?}", request - ), + ), }; let resp = std::mem::take(&mut expected_requests[idx].1); tx.send(resp).unwrap(); @@ -451,6 +462,8 @@ fn seconding_sanity_check_allowed() { )) ); + assert_core_index_from_statement(&mut virtual_overseer, &test_state).await; + assert_matches!( virtual_overseer.recv().await, AllMessages::StatementDistribution( @@ -586,6 +599,8 @@ fn seconding_sanity_check_disallowed() { )) ); + assert_core_index_from_statement(&mut virtual_overseer, &test_state).await; + assert_matches!( virtual_overseer.recv().await, AllMessages::StatementDistribution( @@ -840,6 +855,8 @@ fn prospective_parachains_reject_candidate() { )) ); + assert_core_index_from_statement(&mut virtual_overseer, &test_state).await; + assert_matches!( virtual_overseer.recv().await, AllMessages::StatementDistribution( @@ -976,6 +993,8 @@ fn second_multiple_candidates_per_relay_parent() { ) ); + assert_core_index_from_statement(&mut virtual_overseer, &test_state).await; + assert_matches!( virtual_overseer.recv().await, AllMessages::StatementDistribution( @@ -1106,6 +1125,8 @@ fn backing_works() { )) ); + assert_core_index_from_statement(&mut virtual_overseer, &test_state).await; + assert_validate_seconded_candidate( &mut virtual_overseer, candidate_a.descriptor().relay_parent, @@ -1118,6 +1139,7 @@ fn backing_works() { ) .await; + assert_core_index_from_statement(&mut virtual_overseer, &test_state).await; assert_matches!( virtual_overseer.recv().await, AllMessages::StatementDistribution( @@ -1154,6 +1176,8 @@ fn backing_works() { virtual_overseer.send(FromOrchestra::Communication { msg: statement }).await; + assert_core_index_from_statement(&mut virtual_overseer, &test_state).await; + virtual_overseer }); } @@ -1268,6 +1292,7 @@ fn concurrent_dependent_candidates() { let statement_b = CandidateBackingMessage::Statement(leaf_parent, signed_b.clone()); virtual_overseer.send(FromOrchestra::Communication { msg: statement_a }).await; + // At this point the subsystem waits for response, the previous message is received, // send a second one without blocking. let _ = virtual_overseer @@ -1388,7 +1413,19 @@ fn concurrent_dependent_candidates() { assert_eq!(sess_idx, 1); tx.send(Ok(Some(ExecutorParams::default()))).unwrap(); }, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + _parent, + RuntimeApiRequest::ValidatorGroups(tx), + )) => { + tx.send(Ok(test_state.validator_groups.clone())).unwrap(); + }, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + _parent, + RuntimeApiRequest::AvailabilityCores(tx), + )) => { + tx.send(Ok(test_state.availability_cores.clone())).unwrap(); + }, _ => panic!("unexpected message received from overseer: {:?}", msg), } } @@ -1419,7 +1456,6 @@ fn seconding_sanity_check_occupy_same_depth() { let leaf_parent = get_parent_hash(leaf_hash); let activated = new_leaf(leaf_hash, LEAF_BLOCK_NUMBER); - let min_block_number = LEAF_BLOCK_NUMBER - LEAF_ANCESTRY_LEN; let min_relay_parents = vec![(para_id_a, min_block_number), (para_id_b, min_block_number)]; let test_leaf_a = TestLeaf { activated, min_relay_parents }; @@ -1523,6 +1559,29 @@ fn seconding_sanity_check_occupy_same_depth() { ) ); + assert_matches!( + virtual_overseer.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(_parent, RuntimeApiRequest::ValidatorGroups(tx)) + ) => { + let (groups, mut rotation) = test_state.validator_groups.clone(); + if leaf_hash == _parent { + rotation.now = 100; + } + tx.send(Ok((groups, rotation))).unwrap(); + } + ); + + // Check that subsystem job issues a request for the availability cores. + assert_matches!( + virtual_overseer.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(_parent, RuntimeApiRequest::AvailabilityCores(tx)) + ) => { + tx.send(Ok(test_state.availability_cores.clone())).unwrap(); + } + ); + assert_matches!( virtual_overseer.recv().await, AllMessages::StatementDistribution( @@ -1662,6 +1721,8 @@ fn occupied_core_assignment() { )) ); + assert_core_index_from_statement(&mut virtual_overseer, &test_state).await; + assert_matches!( virtual_overseer.recv().await, AllMessages::StatementDistribution( diff --git a/polkadot/node/core/provisioner/src/lib.rs b/polkadot/node/core/provisioner/src/lib.rs index 06fd3c31524e..ddbf3a0529d7 100644 --- a/polkadot/node/core/provisioner/src/lib.rs +++ b/polkadot/node/core/provisioner/src/lib.rs @@ -683,7 +683,7 @@ async fn request_backable_candidates( // We're currently fetching based on para id. This has to be chagned to query prospective // parachains via core index. We should be calling this once per para rather than per core. - // TODO: Fix after https://github.com/paritytech/polkadot-sdk/pull/3160 + // TODO: Fix after https://github.com/paritytech/polkadot-sdk/pull/3233 let response = get_backable_candidate(relay_parent, para_id, required_path, sender).await?; match response { Some((hash, relay_parent)) => { diff --git a/polkadot/statement-table/Cargo.toml b/polkadot/statement-table/Cargo.toml index 6403b822ed9b..37b8a99d640a 100644 --- a/polkadot/statement-table/Cargo.toml +++ b/polkadot/statement-table/Cargo.toml @@ -13,3 +13,4 @@ workspace = true parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] } sp-core = { path = "../../substrate/primitives/core" } primitives = { package = "polkadot-primitives", path = "../primitives" } +gum = { package = "tracing-gum", path = "../node/gum" } diff --git a/polkadot/statement-table/src/generic.rs b/polkadot/statement-table/src/generic.rs index cef2c87151c0..825b6474e663 100644 --- a/polkadot/statement-table/src/generic.rs +++ b/polkadot/statement-table/src/generic.rs @@ -36,6 +36,7 @@ use primitives::{ }; use parity_scale_codec::{Decode, Encode}; +const LOG_TARGET: &str = "parachain::statement-table"; /// Context for the statement table. pub trait Context { @@ -387,6 +388,7 @@ impl Table { group: Ctx::GroupId, ) -> ImportResult { if !context.is_member_of(&authority, &group) { + gum::debug!(target: LOG_TARGET, authority = ?authority, group = ?group, "New `Misbehavior::UnauthorizedStatement`, candidate backed by validator that doesn't belong to expected group" ); return Err(Misbehavior::UnauthorizedStatement(UnauthorizedStatement { statement: SignedStatement { signature, From fc5c1091b66af4a3d8dc7ee5a2aabcab774e1a00 Mon Sep 17 00:00:00 2001 From: Andrei Sandu Date: Mon, 12 Feb 2024 14:47:41 +0200 Subject: [PATCH 08/20] These tests were easy to fix Signed-off-by: Andrei Sandu --- polkadot/statement-table/src/generic.rs | 35 +++++++++++++------------ 1 file changed, 18 insertions(+), 17 deletions(-) diff --git a/polkadot/statement-table/src/generic.rs b/polkadot/statement-table/src/generic.rs index 825b6474e663..2ee6f6a4f781 100644 --- a/polkadot/statement-table/src/generic.rs +++ b/polkadot/statement-table/src/generic.rs @@ -670,10 +670,10 @@ mod tests { sender: AuthorityId(1), }; - table.import_statement(&context, statement_a); + table.import_statement(&context, GroupId(2), statement_a); assert!(!table.detected_misbehavior.contains_key(&AuthorityId(1))); - table.import_statement(&context, statement_b); + table.import_statement(&context, GroupId(2), statement_b); assert_eq!( table.detected_misbehavior[&AuthorityId(1)][0], Misbehavior::MultipleCandidates(MultipleCandidates { @@ -706,10 +706,10 @@ mod tests { sender: AuthorityId(1), }; - table.import_statement(&context, statement_a); + table.import_statement(&context, GroupId(2), statement_a); assert!(!table.detected_misbehavior.contains_key(&AuthorityId(1))); - table.import_statement(&context, statement_b); + table.import_statement(&context, GroupId(2), statement_b); assert!(!table.detected_misbehavior.contains_key(&AuthorityId(1))); } @@ -730,7 +730,7 @@ mod tests { sender: AuthorityId(1), }; - table.import_statement(&context, statement); + table.import_statement(&context, GroupId(2), statement); assert_eq!( table.detected_misbehavior[&AuthorityId(1)][0], @@ -764,7 +764,7 @@ mod tests { }; let candidate_a_digest = Digest(100); - table.import_statement(&context, candidate_a); + table.import_statement(&context, GroupId(2), candidate_a); assert!(!table.detected_misbehavior.contains_key(&AuthorityId(1))); assert!(!table.detected_misbehavior.contains_key(&AuthorityId(2))); @@ -774,7 +774,7 @@ mod tests { signature: Signature(2), sender: AuthorityId(2), }; - table.import_statement(&context, bad_validity_vote); + table.import_statement(&context, GroupId(3), bad_validity_vote); assert_eq!( table.detected_misbehavior[&AuthorityId(2)][0], @@ -806,7 +806,7 @@ mod tests { sender: AuthorityId(1), }; - table.import_statement(&context, statement); + table.import_statement(&context, GroupId(2), statement); assert!(!table.detected_misbehavior.contains_key(&AuthorityId(1))); let invalid_statement = SignedStatement { @@ -815,7 +815,7 @@ mod tests { sender: AuthorityId(1), }; - table.import_statement(&context, invalid_statement); + table.import_statement(&context, GroupId(2), invalid_statement); assert!(table.detected_misbehavior.contains_key(&AuthorityId(1))); } @@ -837,7 +837,7 @@ mod tests { }; let candidate_digest = Digest(100); - table.import_statement(&context, statement); + table.import_statement(&context, GroupId(2), statement); assert!(!table.detected_misbehavior.contains_key(&AuthorityId(1))); let extra_vote = SignedStatement { @@ -846,7 +846,7 @@ mod tests { sender: AuthorityId(1), }; - table.import_statement(&context, extra_vote); + table.import_statement(&context, GroupId(2), extra_vote); assert_eq!( table.detected_misbehavior[&AuthorityId(1)][0], Misbehavior::ValidityDoubleVote(ValidityDoubleVote::IssuedAndValidity( @@ -905,7 +905,7 @@ mod tests { }; let candidate_digest = Digest(100); - table.import_statement(&context, statement); + table.import_statement(&context, GroupId(2), statement); assert!(!table.detected_misbehavior.contains_key(&AuthorityId(1))); assert!(table.attested_candidate(&candidate_digest, &context, 2).is_none()); @@ -916,7 +916,7 @@ mod tests { sender: AuthorityId(2), }; - table.import_statement(&context, vote); + table.import_statement(&context, GroupId(2), vote); assert!(!table.detected_misbehavior.contains_key(&AuthorityId(2))); assert!(table.attested_candidate(&candidate_digest, &context, 2).is_some()); } @@ -939,7 +939,7 @@ mod tests { }; let summary = table - .import_statement(&context, statement) + .import_statement(&context, GroupId(2), statement) .expect("candidate import to give summary"); assert_eq!(summary.candidate, Digest(100)); @@ -966,7 +966,7 @@ mod tests { }; let candidate_digest = Digest(100); - table.import_statement(&context, statement); + table.import_statement(&context, GroupId(2), statement); assert!(!table.detected_misbehavior.contains_key(&AuthorityId(1))); let vote = SignedStatement { @@ -975,8 +975,9 @@ mod tests { sender: AuthorityId(2), }; - let summary = - table.import_statement(&context, vote).expect("candidate vote to give summary"); + let summary = table + .import_statement(&context, GroupId(2), vote) + .expect("candidate vote to give summary"); assert!(!table.detected_misbehavior.contains_key(&AuthorityId(2))); From 33351b42efa03cdbbff2e805561e9e9154d3f5e8 Mon Sep 17 00:00:00 2001 From: Andrei Sandu Date: Mon, 12 Feb 2024 14:51:20 +0200 Subject: [PATCH 09/20] Fix comment Signed-off-by: Andrei Sandu --- polkadot/node/core/backing/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/polkadot/node/core/backing/src/lib.rs b/polkadot/node/core/backing/src/lib.rs index 4bdbe0214f8f..c3ae1ddac7a3 100644 --- a/polkadot/node/core/backing/src/lib.rs +++ b/polkadot/node/core/backing/src/lib.rs @@ -209,7 +209,7 @@ struct PerRelayParentState { prospective_parachains_mode: ProspectiveParachainsMode, /// The hash of the relay parent on top of which this job is doing it's work. parent: Hash, - /// The `CoreIndex` assigned to the local validator at this relay parent. + /// The `ParaId` assigned to the local validator at this relay parent. assigned_para: Option, /// The `CoreIndex` assigned to the local validator at this relay parent. assigned_core: Option, From 0d994bfb7d9a5c85b7c1993f92cf990f0e44935a Mon Sep 17 00:00:00 2001 From: Andrei Sandu Date: Mon, 12 Feb 2024 15:10:27 +0200 Subject: [PATCH 10/20] clippy was angry Signed-off-by: Andrei Sandu --- polkadot/node/core/provisioner/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/polkadot/node/core/provisioner/src/lib.rs b/polkadot/node/core/provisioner/src/lib.rs index ddbf3a0529d7..8d094eaa7673 100644 --- a/polkadot/node/core/provisioner/src/lib.rs +++ b/polkadot/node/core/provisioner/src/lib.rs @@ -687,7 +687,7 @@ async fn request_backable_candidates( let response = get_backable_candidate(relay_parent, para_id, required_path, sender).await?; match response { Some((hash, relay_parent)) => { - if selected_candidates.iter().position(|bc| &(hash, relay_parent) == bc).is_none() { + if !selected_candidates.iter().any(|bc| &(hash, relay_parent) == bc) { selected_candidates.push((hash, relay_parent)) } }, From 534c019dd7e038d42734d999007217730a1656ed Mon Sep 17 00:00:00 2001 From: Andrei Sandu Date: Mon, 12 Feb 2024 18:36:59 +0200 Subject: [PATCH 11/20] A bit refactor and add a test Signed-off-by: Andrei Sandu --- polkadot/node/core/backing/src/error.rs | 6 ++ polkadot/node/core/backing/src/lib.rs | 59 +++++++++------ polkadot/node/core/backing/src/tests/mod.rs | 79 ++++++++++++++++++++- 3 files changed, 121 insertions(+), 23 deletions(-) diff --git a/polkadot/node/core/backing/src/error.rs b/polkadot/node/core/backing/src/error.rs index 64955a393962..c41084911d22 100644 --- a/polkadot/node/core/backing/src/error.rs +++ b/polkadot/node/core/backing/src/error.rs @@ -63,6 +63,12 @@ pub enum Error { #[error("Fetching validation code by hash failed {0:?}, {1:?}")] FetchValidationCode(ValidationCodeHash, RuntimeApiError), + #[error("Fetching validator groups failed")] + FetchValidatorGroups(RuntimeApiError), + + #[error("Fetching availability cores failed")] + FetchAvailabilityCores(RuntimeApiError), + #[error("Fetching Runtime API version failed {0:?}")] FetchRuntimeApiVersion(RuntimeApiError), diff --git a/polkadot/node/core/backing/src/lib.rs b/polkadot/node/core/backing/src/lib.rs index c3ae1ddac7a3..c4e8ea7c1fe2 100644 --- a/polkadot/node/core/backing/src/lib.rs +++ b/polkadot/node/core/backing/src/lib.rs @@ -92,12 +92,13 @@ use polkadot_node_subsystem::{ RuntimeApiRequest, StatementDistributionMessage, StoreAvailableDataError, }, overseer, ActiveLeavesUpdate, FromOrchestra, OverseerSignal, SpawnedSubsystem, SubsystemError, + SubsystemSender, }; use polkadot_node_subsystem_util::{ self as util, backing_implicit_view::{FetchError as ImplicitViewFetchError, View as ImplicitView}, - executor_params_at_relay_parent, request_from_runtime, request_session_index_for_child, - request_validator_groups, request_validators, + executor_params_at_relay_parent, request_availability_cores, request_from_runtime, + request_session_index_for_child, request_validator_groups, request_validators, runtime::{ self, prospective_parachains_mode, request_min_backing_votes, ProspectiveParachainsMode, }, @@ -106,8 +107,8 @@ use polkadot_node_subsystem_util::{ use polkadot_primitives::{ vstaging::{node_features::FeatureIndex, NodeFeatures}, BackedCandidate, CandidateCommitments, CandidateHash, CandidateReceipt, - CommittedCandidateReceipt, CoreIndex, CoreState, ExecutorParams, GroupIndex, Hash, - Id as ParaId, PersistedValidationData, PvfExecKind, SigningContext, ValidationCode, + CommittedCandidateReceipt, CoreIndex, CoreState, ExecutorParams, GroupIndex, GroupRotationInfo, + Hash, Id as ParaId, PersistedValidationData, PvfExecKind, SigningContext, ValidationCode, ValidatorId, ValidatorIndex, ValidatorSignature, ValidityAttestation, }; use sp_keystore::KeystorePtr; @@ -1006,41 +1007,55 @@ macro_rules! try_runtime_api { }; } -#[overseer::contextbounds(CandidateBacking, prefix = self::overseer)] -async fn core_index_from_statement( - ctx: &mut Context, +#[overseer::contextbounds(ApprovalVoting, prefix = self::overseer)] +async fn core_index_from_statement( + sender: &mut Sender, relay_parent: Hash, statement: &SignedFullStatementWithPVD, -) -> Result, Error> { +) -> Result, Error> +where + Sender: SubsystemSender + Clone, +{ let parent = relay_parent; - let (groups, cores) = futures::try_join!( - request_validator_groups(parent, ctx.sender()).await, - request_from_runtime(parent, ctx.sender(), |tx| { - RuntimeApiRequest::AvailabilityCores(tx) - },) - .await, - ) - .map_err(Error::JoinMultiple)?; - let (validator_groups, group_rotation_info) = try_runtime_api!(groups); - let cores = try_runtime_api!(cores); + let (validator_groups, group_rotation_info) = request_validator_groups(parent, sender) + .await + .await + .map_err(Error::RuntimeApiUnavailable)? + .map_err(Error::FetchValidatorGroups)?; + + let cores = request_availability_cores(parent, sender) + .await + .await + .map_err(Error::RuntimeApiUnavailable)? + .map_err(Error::FetchAvailabilityCores)?; let compact_statement = statement.as_unchecked(); let candidate_hash = CandidateHash(*compact_statement.unchecked_payload().candidate_hash()); gum::trace!(target: LOG_TARGET, ?group_rotation_info, ?statement, ?validator_groups, ?cores, ?candidate_hash, "Extracting core index from statement"); + + Ok(core_index_from_statement_inner(&cores, &validator_groups, &group_rotation_info, statement)) +} + +pub(crate) fn core_index_from_statement_inner( + cores: &[CoreState], + validator_groups: &[Vec], + group_rotation_info: &GroupRotationInfo, + statement: &SignedFullStatementWithPVD, +) -> Option { let statement_validator_index = statement.validator_index(); for (group_index, group) in validator_groups.iter().enumerate() { for validator_index in group { if *validator_index == statement_validator_index { - return Ok(Some( + return Some( group_rotation_info.core_for_group(GroupIndex(group_index as u32), cores.len()), - )) + ) } } } - Ok(None) + None } /// Load the data necessary to do backing work on top of a relay-parent. @@ -1655,7 +1670,7 @@ async fn import_statement( let stmt = primitive_statement_to_table(statement); - let core = core_index_from_statement(ctx, rp_state.parent, statement) + let core = core_index_from_statement(ctx.sender(), rp_state.parent, statement) .await .map_err(|_| Error::CoreIndexUnavailable)? .ok_or(Error::CoreIndexUnavailable)?; diff --git a/polkadot/node/core/backing/src/tests/mod.rs b/polkadot/node/core/backing/src/tests/mod.rs index 97e1f2ea10c3..2bf32c407395 100644 --- a/polkadot/node/core/backing/src/tests/mod.rs +++ b/polkadot/node/core/backing/src/tests/mod.rs @@ -161,7 +161,6 @@ fn test_harness>( test: impl FnOnce(VirtualOverseer) -> T, ) { let pool = sp_core::testing::TaskExecutor::new(); - sp_tracing::init_for_tests(); let (context, virtual_overseer) = test_helpers::make_subsystem_context(pool.clone()); @@ -681,6 +680,84 @@ fn backing_works() { }); } +#[test] +fn extract_core_index_from_statement_works() { + let test_state = TestState::default(); + + let pov_a = PoV { block_data: BlockData(vec![42, 43, 44]) }; + let pvd_a = dummy_pvd(); + let validation_code_a = ValidationCode(vec![1, 2, 3]); + + let pov_hash = pov_a.hash(); + + let candidate = TestCandidateBuilder { + para_id: test_state.chain_ids[0], + relay_parent: test_state.relay_parent, + pov_hash, + erasure_root: make_erasure_root(&test_state, pov_a.clone(), pvd_a.clone()), + persisted_validation_data_hash: pvd_a.hash(), + validation_code: validation_code_a.0.clone(), + ..Default::default() + } + .build(); + + let public2 = Keystore::sr25519_generate_new( + &*test_state.keystore, + ValidatorId::ID, + Some(&test_state.validators[2].to_seed()), + ) + .expect("Insert key into keystore"); + + let signed_statement_1 = SignedFullStatementWithPVD::sign( + &test_state.keystore, + StatementWithPVD::Seconded(candidate.clone(), pvd_a.clone()), + &test_state.signing_context, + ValidatorIndex(2), + &public2.into(), + ) + .ok() + .flatten() + .expect("should be signed"); + + let public1 = Keystore::sr25519_generate_new( + &*test_state.keystore, + ValidatorId::ID, + Some(&test_state.validators[1].to_seed()), + ) + .expect("Insert key into keystore"); + + let signed_statement_2 = SignedFullStatementWithPVD::sign( + &test_state.keystore, + StatementWithPVD::Seconded(candidate.clone(), pvd_a.clone()), + &test_state.signing_context, + ValidatorIndex(1), + &public1.into(), + ) + .ok() + .flatten() + .expect("should be signed"); + + let core_index_1 = core_index_from_statement_inner( + &test_state.availability_cores, + &test_state.validator_groups.0, + &test_state.validator_groups.1, + &signed_statement_1, + ) + .unwrap(); + + assert_eq!(core_index_1, CoreIndex(0)); + + let core_index_2 = core_index_from_statement_inner( + &test_state.availability_cores, + &test_state.validator_groups.0, + &test_state.validator_groups.1, + &signed_statement_2, + ) + .unwrap(); + + assert_eq!(core_index_2, CoreIndex(1)); +} + #[test] fn backing_works_while_validation_ongoing() { let test_state = TestState::default(); From 10d86ddfe3616356528daa8302cbd9e5034500fa Mon Sep 17 00:00:00 2001 From: Andrei Sandu Date: Mon, 12 Feb 2024 18:45:20 +0200 Subject: [PATCH 12/20] taplo happy Signed-off-by: Andrei Sandu --- polkadot/primitives/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/polkadot/primitives/Cargo.toml b/polkadot/primitives/Cargo.toml index 27827fcd7d78..9d366f6cfaa5 100644 --- a/polkadot/primitives/Cargo.toml +++ b/polkadot/primitives/Cargo.toml @@ -39,13 +39,13 @@ std = [ "application-crypto/std", "bitvec/std", "inherents/std", + "log/std", "parity-scale-codec/std", "polkadot-core-primitives/std", "polkadot-parachain-primitives/std", "primitives/std", "runtime_primitives/std", "scale-info/std", - "log/std", "serde/std", "sp-api/std", "sp-arithmetic/std", From 222609c23a59bb3d9277d21f9ddf14216e7e2a2f Mon Sep 17 00:00:00 2001 From: Andrei Sandu Date: Wed, 14 Feb 2024 14:04:45 +0200 Subject: [PATCH 13/20] review feedback Signed-off-by: Andrei Sandu --- Cargo.lock | 8 +-- polkadot/node/core/backing/src/error.rs | 6 -- polkadot/node/core/backing/src/lib.rs | 70 ++++++++----------- polkadot/node/core/backing/src/tests/mod.rs | 68 ++---------------- .../src/tests/prospective_parachains.rs | 38 ---------- polkadot/node/core/provisioner/src/lib.rs | 7 +- polkadot/primitives/src/vstaging/mod.rs | 13 ++-- 7 files changed, 44 insertions(+), 166 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index fb345e3d36ff..332c05825ec0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -21236,9 +21236,9 @@ dependencies = [ [[package]] name = "wasmi" -version = "0.31.0" +version = "0.31.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f341edb80021141d4ae6468cbeefc50798716a347d4085c3811900049ea8945" +checksum = "77a8281d1d660cdf54c76a3efa9ddd0c270cada1383a995db3ccb43d166456c7" dependencies = [ "smallvec", "spin 0.9.8", @@ -21249,9 +21249,9 @@ dependencies = [ [[package]] name = "wasmi_arena" -version = "0.4.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "401c1f35e413fac1846d4843745589d9ec678977ab35a384db8ae7830525d468" +checksum = "104a7f73be44570cac297b3035d76b169d6599637631cf37a1703326a0727073" [[package]] name = "wasmi_core" diff --git a/polkadot/node/core/backing/src/error.rs b/polkadot/node/core/backing/src/error.rs index c41084911d22..64955a393962 100644 --- a/polkadot/node/core/backing/src/error.rs +++ b/polkadot/node/core/backing/src/error.rs @@ -63,12 +63,6 @@ pub enum Error { #[error("Fetching validation code by hash failed {0:?}, {1:?}")] FetchValidationCode(ValidationCodeHash, RuntimeApiError), - #[error("Fetching validator groups failed")] - FetchValidatorGroups(RuntimeApiError), - - #[error("Fetching availability cores failed")] - FetchAvailabilityCores(RuntimeApiError), - #[error("Fetching Runtime API version failed {0:?}")] FetchRuntimeApiVersion(RuntimeApiError), diff --git a/polkadot/node/core/backing/src/lib.rs b/polkadot/node/core/backing/src/lib.rs index c4e8ea7c1fe2..a8ccbd17b3d3 100644 --- a/polkadot/node/core/backing/src/lib.rs +++ b/polkadot/node/core/backing/src/lib.rs @@ -92,13 +92,12 @@ use polkadot_node_subsystem::{ RuntimeApiRequest, StatementDistributionMessage, StoreAvailableDataError, }, overseer, ActiveLeavesUpdate, FromOrchestra, OverseerSignal, SpawnedSubsystem, SubsystemError, - SubsystemSender, }; use polkadot_node_subsystem_util::{ self as util, backing_implicit_view::{FetchError as ImplicitViewFetchError, View as ImplicitView}, - executor_params_at_relay_parent, request_availability_cores, request_from_runtime, - request_session_index_for_child, request_validator_groups, request_validators, + executor_params_at_relay_parent, request_from_runtime, request_session_index_for_child, + request_validator_groups, request_validators, runtime::{ self, prospective_parachains_mode, request_min_backing_votes, ProspectiveParachainsMode, }, @@ -228,8 +227,15 @@ struct PerRelayParentState { fallbacks: HashMap, /// The minimum backing votes threshold. minimum_backing_votes: u32, - /// If true, we're appendindg extra bits in the BackedCandidate validator indices bitfield. + /// If true, we're appendindg extra bits in the BackedCandidate validator indices bitfield, + /// which represent the assigned core index. inject_core_index: bool, + /// Number of cores. + n_cores: usize, + /// The validator groups at this relay parent. + validator_groups: Vec>, + /// The associated group rotation information. + group_rotation_info: GroupRotationInfo, } struct PerCandidateState { @@ -1007,49 +1013,23 @@ macro_rules! try_runtime_api { }; } -#[overseer::contextbounds(ApprovalVoting, prefix = self::overseer)] -async fn core_index_from_statement( - sender: &mut Sender, - relay_parent: Hash, +fn core_index_from_statement( + validator_groups: &[Vec], + group_rotation_info: &GroupRotationInfo, + n_cores: usize, statement: &SignedFullStatementWithPVD, -) -> Result, Error> -where - Sender: SubsystemSender + Clone, -{ - let parent = relay_parent; - - let (validator_groups, group_rotation_info) = request_validator_groups(parent, sender) - .await - .await - .map_err(Error::RuntimeApiUnavailable)? - .map_err(Error::FetchValidatorGroups)?; - - let cores = request_availability_cores(parent, sender) - .await - .await - .map_err(Error::RuntimeApiUnavailable)? - .map_err(Error::FetchAvailabilityCores)?; - +) -> Option { let compact_statement = statement.as_unchecked(); let candidate_hash = CandidateHash(*compact_statement.unchecked_payload().candidate_hash()); - gum::trace!(target: LOG_TARGET, ?group_rotation_info, ?statement, ?validator_groups, ?cores, ?candidate_hash, "Extracting core index from statement"); - - Ok(core_index_from_statement_inner(&cores, &validator_groups, &group_rotation_info, statement)) -} + gum::trace!(target: LOG_TARGET, ?group_rotation_info, ?statement, ?validator_groups, ?n_cores, ?candidate_hash, "Extracting core index from statement"); -pub(crate) fn core_index_from_statement_inner( - cores: &[CoreState], - validator_groups: &[Vec], - group_rotation_info: &GroupRotationInfo, - statement: &SignedFullStatementWithPVD, -) -> Option { let statement_validator_index = statement.validator_index(); for (group_index, group) in validator_groups.iter().enumerate() { for validator_index in group { if *validator_index == statement_validator_index { return Some( - group_rotation_info.core_for_group(GroupIndex(group_index as u32), cores.len()), + group_rotation_info.core_for_group(GroupIndex(group_index as u32), n_cores), ) } } @@ -1084,7 +1064,7 @@ async fn construct_per_relay_parent_state( let inject_core_index = request_node_features(parent, session_index, ctx.sender()) .await? .unwrap_or(NodeFeatures::EMPTY) - .get(FeatureIndex::InjectCoreIndex as usize) + .get(FeatureIndex::ElasticScalingCoreIndex as usize) .map(|b| *b) .unwrap_or(false); @@ -1178,6 +1158,9 @@ async fn construct_per_relay_parent_state( fallbacks: HashMap::new(), minimum_backing_votes, inject_core_index, + n_cores, + validator_groups, + group_rotation_info, })) } @@ -1670,10 +1653,13 @@ async fn import_statement( let stmt = primitive_statement_to_table(statement); - let core = core_index_from_statement(ctx.sender(), rp_state.parent, statement) - .await - .map_err(|_| Error::CoreIndexUnavailable)? - .ok_or(Error::CoreIndexUnavailable)?; + let core = core_index_from_statement( + &rp_state.validator_groups, + &rp_state.group_rotation_info, + rp_state.n_cores, + statement, + ) + .ok_or(Error::CoreIndexUnavailable)?; Ok(rp_state.table.import_statement(&rp_state.table_context, core, stmt)) } diff --git a/polkadot/node/core/backing/src/tests/mod.rs b/polkadot/node/core/backing/src/tests/mod.rs index 2bf32c407395..316dcdba6d86 100644 --- a/polkadot/node/core/backing/src/tests/mod.rs +++ b/polkadot/node/core/backing/src/tests/mod.rs @@ -327,30 +327,6 @@ async fn test_startup(virtual_overseer: &mut VirtualOverseer, test_state: &TestS ); } -pub(crate) async fn assert_core_index_from_statement( - virtual_overseer: &mut VirtualOverseer, - test_state: &TestState, -) { - assert_matches!( - virtual_overseer.recv().await, - AllMessages::RuntimeApi( - RuntimeApiMessage::Request(_parent, RuntimeApiRequest::ValidatorGroups(tx)) - ) => { - tx.send(Ok(test_state.validator_groups.clone())).unwrap(); - } - ); - - // Check that subsystem job issues a request for the availability cores. - assert_matches!( - virtual_overseer.recv().await, - AllMessages::RuntimeApi( - RuntimeApiMessage::Request(_parent, RuntimeApiRequest::AvailabilityCores(tx)) - ) => { - tx.send(Ok(test_state.availability_cores.clone())).unwrap(); - } - ); -} - async fn assert_validation_requests( virtual_overseer: &mut VirtualOverseer, validation_code: ValidationCode, @@ -483,8 +459,6 @@ fn backing_second_works() { } ); - assert_core_index_from_statement(&mut virtual_overseer, &test_state).await; - assert_matches!( virtual_overseer.recv().await, AllMessages::StatementDistribution( @@ -581,7 +555,6 @@ fn backing_works() { virtual_overseer.send(FromOrchestra::Communication { msg: statement }).await; - assert_core_index_from_statement(&mut virtual_overseer, &test_state).await; assert_validation_requests(&mut virtual_overseer, validation_code_ab.clone()).await; // Sending a `Statement::Seconded` for our assignment will start @@ -641,8 +614,6 @@ fn backing_works() { } ); - assert_core_index_from_statement(&mut virtual_overseer, &test_state).await; - assert_matches!( virtual_overseer.recv().await, AllMessages::StatementDistribution( @@ -669,8 +640,6 @@ fn backing_works() { virtual_overseer.send(FromOrchestra::Communication { msg: statement }).await; - assert_core_index_from_statement(&mut virtual_overseer, &test_state).await; - virtual_overseer .send(FromOrchestra::Signal(OverseerSignal::ActiveLeaves( ActiveLeavesUpdate::stop_work(test_state.relay_parent), @@ -737,20 +706,20 @@ fn extract_core_index_from_statement_works() { .flatten() .expect("should be signed"); - let core_index_1 = core_index_from_statement_inner( - &test_state.availability_cores, + let core_index_1 = core_index_from_statement( &test_state.validator_groups.0, &test_state.validator_groups.1, + test_state.availability_cores.len(), &signed_statement_1, ) .unwrap(); assert_eq!(core_index_1, CoreIndex(0)); - let core_index_2 = core_index_from_statement_inner( - &test_state.availability_cores, + let core_index_2 = core_index_from_statement( &test_state.validator_groups.0, &test_state.validator_groups.1, + test_state.availability_cores.len(), &signed_statement_2, ) .unwrap(); @@ -841,7 +810,6 @@ fn backing_works_while_validation_ongoing() { let statement = CandidateBackingMessage::Statement(test_state.relay_parent, signed_a.clone()); virtual_overseer.send(FromOrchestra::Communication { msg: statement }).await; - assert_core_index_from_statement(&mut virtual_overseer, &test_state).await; assert_validation_requests(&mut virtual_overseer, validation_code_abc.clone()).await; @@ -891,7 +859,6 @@ fn backing_works_while_validation_ongoing() { CandidateBackingMessage::Statement(test_state.relay_parent, signed_b.clone()); virtual_overseer.send(FromOrchestra::Communication { msg: statement }).await; - assert_core_index_from_statement(&mut virtual_overseer, &test_state).await; // Candidate gets backed entirely by other votes. assert_matches!( @@ -912,8 +879,6 @@ fn backing_works_while_validation_ongoing() { virtual_overseer.send(FromOrchestra::Communication { msg: statement }).await; - assert_core_index_from_statement(&mut virtual_overseer, &test_state).await; - let (tx, rx) = oneshot::channel(); let msg = CandidateBackingMessage::GetBackedCandidates( vec![(candidate_a.hash(), test_state.relay_parent)], @@ -1012,7 +977,6 @@ fn backing_misbehavior_works() { virtual_overseer.send(FromOrchestra::Communication { msg: statement }).await; - assert_core_index_from_statement(&mut virtual_overseer, &test_state).await; assert_validation_requests(&mut virtual_overseer, validation_code_a.clone()).await; assert_matches!( @@ -1068,8 +1032,6 @@ fn backing_misbehavior_works() { } ); - assert_core_index_from_statement(&mut virtual_overseer, &test_state).await; - assert_matches!( virtual_overseer.recv().await, AllMessages::StatementDistribution( @@ -1101,8 +1063,6 @@ fn backing_misbehavior_works() { virtual_overseer.send(FromOrchestra::Communication { msg: statement }).await; - assert_core_index_from_statement(&mut virtual_overseer, &test_state).await; - assert_matches!( virtual_overseer.recv().await, AllMessages::Provisioner( @@ -1278,7 +1238,6 @@ fn backing_dont_second_invalid() { tx.send(Ok(())).unwrap(); } ); - assert_core_index_from_statement(&mut virtual_overseer, &test_state).await; assert_matches!( virtual_overseer.recv().await, @@ -1350,7 +1309,6 @@ fn backing_second_after_first_fails_works() { virtual_overseer.send(FromOrchestra::Communication { msg: statement }).await; - assert_core_index_from_statement(&mut virtual_overseer, &test_state).await; assert_validation_requests(&mut virtual_overseer, validation_code_a.clone()).await; // Subsystem requests PoV and requests validation. @@ -1495,7 +1453,6 @@ fn backing_works_after_failed_validation() { virtual_overseer.send(FromOrchestra::Communication { msg: statement }).await; - assert_core_index_from_statement(&mut virtual_overseer, &test_state).await; assert_validation_requests(&mut virtual_overseer, validation_code_a.clone()).await; // Subsystem requests PoV and requests validation. @@ -1700,7 +1657,6 @@ fn retry_works() { CandidateBackingMessage::Statement(test_state.relay_parent, signed_a.clone()); virtual_overseer.send(FromOrchestra::Communication { msg: statement }).await; - assert_core_index_from_statement(&mut virtual_overseer, &test_state).await; assert_validation_requests(&mut virtual_overseer, validation_code_a.clone()).await; // Subsystem requests PoV and requests validation. @@ -1722,8 +1678,6 @@ fn retry_works() { CandidateBackingMessage::Statement(test_state.relay_parent, signed_b.clone()); virtual_overseer.send(FromOrchestra::Communication { msg: statement }).await; - assert_core_index_from_statement(&mut virtual_overseer, &test_state).await; - // Not deterministic which message comes first: for _ in 0u32..5 { match virtual_overseer.recv().await { @@ -1766,7 +1720,6 @@ fn retry_works() { CandidateBackingMessage::Statement(test_state.relay_parent, signed_c.clone()); virtual_overseer.send(FromOrchestra::Communication { msg: statement }).await; - assert_core_index_from_statement(&mut virtual_overseer, &test_state).await; assert_validation_requests(&mut virtual_overseer, validation_code_a.clone()).await; assert_matches!( @@ -1891,14 +1844,11 @@ fn observes_backing_even_if_not_validator() { virtual_overseer.send(FromOrchestra::Communication { msg: statement }).await; - assert_core_index_from_statement(&mut virtual_overseer, &test_state).await; - let statement = CandidateBackingMessage::Statement(test_state.relay_parent, signed_b.clone()); virtual_overseer.send(FromOrchestra::Communication { msg: statement }).await; - assert_core_index_from_statement(&mut virtual_overseer, &test_state).await; assert_matches!( virtual_overseer.recv().await, AllMessages::Provisioner( @@ -1916,8 +1866,6 @@ fn observes_backing_even_if_not_validator() { virtual_overseer.send(FromOrchestra::Communication { msg: statement }).await; - assert_core_index_from_statement(&mut virtual_overseer, &test_state).await; - virtual_overseer .send(FromOrchestra::Signal(OverseerSignal::ActiveLeaves( ActiveLeavesUpdate::stop_work(test_state.relay_parent), @@ -1984,8 +1932,6 @@ fn cannot_second_multiple_candidates_per_parent() { } ); - assert_core_index_from_statement(&mut virtual_overseer, &test_state).await; - assert_matches!( virtual_overseer.recv().await, AllMessages::StatementDistribution( @@ -2220,8 +2166,6 @@ fn disabled_validator_doesnt_distribute_statement_on_receiving_statement() { virtual_overseer.send(FromOrchestra::Communication { msg: statement }).await; - assert_core_index_from_statement(&mut virtual_overseer, &test_state).await; - // Ensure backing subsystem is not doing any work assert_matches!(virtual_overseer.recv().timeout(Duration::from_secs(1)).await, None); @@ -2313,8 +2257,6 @@ fn validator_ignores_statements_from_disabled_validators() { virtual_overseer.send(FromOrchestra::Communication { msg: statement_3 }).await; - assert_core_index_from_statement(&mut virtual_overseer, &test_state).await; - assert_matches!( virtual_overseer.recv().await, AllMessages::RuntimeApi( @@ -2401,8 +2343,6 @@ fn validator_ignores_statements_from_disabled_validators() { } ); - assert_core_index_from_statement(&mut virtual_overseer, &test_state).await; - assert_matches!( virtual_overseer.recv().await, AllMessages::StatementDistribution( diff --git a/polkadot/node/core/backing/src/tests/prospective_parachains.rs b/polkadot/node/core/backing/src/tests/prospective_parachains.rs index 0fbf52403ea5..165d39b4fcc0 100644 --- a/polkadot/node/core/backing/src/tests/prospective_parachains.rs +++ b/polkadot/node/core/backing/src/tests/prospective_parachains.rs @@ -462,8 +462,6 @@ fn seconding_sanity_check_allowed() { )) ); - assert_core_index_from_statement(&mut virtual_overseer, &test_state).await; - assert_matches!( virtual_overseer.recv().await, AllMessages::StatementDistribution( @@ -599,8 +597,6 @@ fn seconding_sanity_check_disallowed() { )) ); - assert_core_index_from_statement(&mut virtual_overseer, &test_state).await; - assert_matches!( virtual_overseer.recv().await, AllMessages::StatementDistribution( @@ -855,8 +851,6 @@ fn prospective_parachains_reject_candidate() { )) ); - assert_core_index_from_statement(&mut virtual_overseer, &test_state).await; - assert_matches!( virtual_overseer.recv().await, AllMessages::StatementDistribution( @@ -993,8 +987,6 @@ fn second_multiple_candidates_per_relay_parent() { ) ); - assert_core_index_from_statement(&mut virtual_overseer, &test_state).await; - assert_matches!( virtual_overseer.recv().await, AllMessages::StatementDistribution( @@ -1125,8 +1117,6 @@ fn backing_works() { )) ); - assert_core_index_from_statement(&mut virtual_overseer, &test_state).await; - assert_validate_seconded_candidate( &mut virtual_overseer, candidate_a.descriptor().relay_parent, @@ -1139,7 +1129,6 @@ fn backing_works() { ) .await; - assert_core_index_from_statement(&mut virtual_overseer, &test_state).await; assert_matches!( virtual_overseer.recv().await, AllMessages::StatementDistribution( @@ -1176,8 +1165,6 @@ fn backing_works() { virtual_overseer.send(FromOrchestra::Communication { msg: statement }).await; - assert_core_index_from_statement(&mut virtual_overseer, &test_state).await; - virtual_overseer }); } @@ -1559,29 +1546,6 @@ fn seconding_sanity_check_occupy_same_depth() { ) ); - assert_matches!( - virtual_overseer.recv().await, - AllMessages::RuntimeApi( - RuntimeApiMessage::Request(_parent, RuntimeApiRequest::ValidatorGroups(tx)) - ) => { - let (groups, mut rotation) = test_state.validator_groups.clone(); - if leaf_hash == _parent { - rotation.now = 100; - } - tx.send(Ok((groups, rotation))).unwrap(); - } - ); - - // Check that subsystem job issues a request for the availability cores. - assert_matches!( - virtual_overseer.recv().await, - AllMessages::RuntimeApi( - RuntimeApiMessage::Request(_parent, RuntimeApiRequest::AvailabilityCores(tx)) - ) => { - tx.send(Ok(test_state.availability_cores.clone())).unwrap(); - } - ); - assert_matches!( virtual_overseer.recv().await, AllMessages::StatementDistribution( @@ -1721,8 +1685,6 @@ fn occupied_core_assignment() { )) ); - assert_core_index_from_statement(&mut virtual_overseer, &test_state).await; - assert_matches!( virtual_overseer.recv().await, AllMessages::StatementDistribution( diff --git a/polkadot/node/core/provisioner/src/lib.rs b/polkadot/node/core/provisioner/src/lib.rs index 8d094eaa7673..e130780c2f3d 100644 --- a/polkadot/node/core/provisioner/src/lib.rs +++ b/polkadot/node/core/provisioner/src/lib.rs @@ -681,9 +681,8 @@ async fn request_backable_candidates( CoreState::Free => continue, }; - // We're currently fetching based on para id. This has to be chagned to query prospective - // parachains via core index. We should be calling this once per para rather than per core. - // TODO: Fix after https://github.com/paritytech/polkadot-sdk/pull/3233 + // We should be calling this once per para rather than per core. + // TODO: Will be fixed in https://github.com/paritytech/polkadot-sdk/pull/3233 let response = get_backable_candidate(relay_parent, para_id, required_path, sender).await?; match response { Some((hash, relay_parent)) => { @@ -732,7 +731,7 @@ async fn select_candidates( ) .await?, }; - gum::debug!(target: LOG_TARGET, ?selected_candidates, "Got backedable candidates"); + gum::debug!(target: LOG_TARGET, ?selected_candidates, "Got backeable candidates"); // now get the backed candidates corresponding to these candidate receipts let (tx, rx) = oneshot::channel(); diff --git a/polkadot/primitives/src/vstaging/mod.rs b/polkadot/primitives/src/vstaging/mod.rs index dfae1af07f38..8f4806be85f8 100644 --- a/polkadot/primitives/src/vstaging/mod.rs +++ b/polkadot/primitives/src/vstaging/mod.rs @@ -64,16 +64,13 @@ pub mod node_features { /// Tells if tranch0 assignments could be sent in a single certificate. /// Reserved for: `` EnableAssignmentsV2 = 0, - /// First unassigned feature bit. - /// Every time a new feature flag is assigned it should take this value. - /// and this should be incremented. - FirstUnassigned = 1, - /// Experimental features start at bit 16. Note that experimental features pop in and out - /// of exsitence without warning. - /// /// This feature enables the extension of `BackedCandidate::validator_indices` by 8 bit. /// The value stored there represents the assumed core index where the candidates /// are backed. - InjectCoreIndex = 16, + ElasticScalingCoreIndex = 1, + /// First unassigned feature bit. + /// Every time a new feature flag is assigned it should take this value. + /// and this should be incremented. + FirstUnassigned = 2, } } From a02e896f1970a9dfd299358525f8dc67a4721c43 Mon Sep 17 00:00:00 2001 From: Andrei Sandu Date: Wed, 14 Feb 2024 15:39:09 +0200 Subject: [PATCH 14/20] remove log Signed-off-by: Andrei Sandu --- polkadot/primitives/src/v6/mod.rs | 6 ------ 1 file changed, 6 deletions(-) diff --git a/polkadot/primitives/src/v6/mod.rs b/polkadot/primitives/src/v6/mod.rs index c9c78499cbb5..7723b8a1d74d 100644 --- a/polkadot/primitives/src/v6/mod.rs +++ b/polkadot/primitives/src/v6/mod.rs @@ -750,12 +750,6 @@ pub fn check_candidate_backing + Clone + Encode + core::fmt::Debu group_len: usize, validator_lookup: impl Fn(usize) -> Option, ) -> Result { - log::debug!( - target: LOG_TARGET, - "checking candidate {:?}", - backed - ); - if backed.validator_indices.len() != group_len { log::debug!( target: LOG_TARGET, From dd34850d5c933f31c49f03dedb0b32ed400a6ece Mon Sep 17 00:00:00 2001 From: Andrei Sandu Date: Wed, 14 Feb 2024 19:47:24 +0200 Subject: [PATCH 15/20] more feedback Signed-off-by: Andrei Sandu --- polkadot/node/core/backing/src/lib.rs | 47 ++++++++++++++++----- polkadot/node/core/backing/src/tests/mod.rs | 31 ++++++++++++-- 2 files changed, 64 insertions(+), 14 deletions(-) diff --git a/polkadot/node/core/backing/src/lib.rs b/polkadot/node/core/backing/src/lib.rs index a8ccbd17b3d3..48bb5121554f 100644 --- a/polkadot/node/core/backing/src/lib.rs +++ b/polkadot/node/core/backing/src/lib.rs @@ -230,8 +230,8 @@ struct PerRelayParentState { /// If true, we're appendindg extra bits in the BackedCandidate validator indices bitfield, /// which represent the assigned core index. inject_core_index: bool, - /// Number of cores. - n_cores: usize, + /// The core state for all cores + cores: Vec, /// The validator groups at this relay parent. validator_groups: Vec>, /// The associated group rotation information. @@ -1016,21 +1016,48 @@ macro_rules! try_runtime_api { fn core_index_from_statement( validator_groups: &[Vec], group_rotation_info: &GroupRotationInfo, - n_cores: usize, + cores: &[CoreState], statement: &SignedFullStatementWithPVD, ) -> Option { let compact_statement = statement.as_unchecked(); let candidate_hash = CandidateHash(*compact_statement.unchecked_payload().candidate_hash()); - gum::trace!(target: LOG_TARGET, ?group_rotation_info, ?statement, ?validator_groups, ?n_cores, ?candidate_hash, "Extracting core index from statement"); + let n_cores = cores.len(); + + gum::trace!(target: LOG_TARGET, ?group_rotation_info, ?statement, ?validator_groups, n_cores = ?cores.len() , ?candidate_hash, "Extracting core index from statement"); let statement_validator_index = statement.validator_index(); for (group_index, group) in validator_groups.iter().enumerate() { for validator_index in group { if *validator_index == statement_validator_index { - return Some( - group_rotation_info.core_for_group(GroupIndex(group_index as u32), n_cores), - ) + // First check if the statement para id matches the core assignment. + let core_index = + group_rotation_info.core_for_group(GroupIndex(group_index as u32), n_cores); + + if core_index.0 as usize > n_cores { + gum::warn!(target: LOG_TARGET, ?candidate_hash, ?core_index, n_cores, "Invalid CoreIndex"); + return None + } + + if let StatementWithPVD::Seconded(candidate, _pvd) = statement.payload() { + let candidate_para_id = candidate.descriptor.para_id; + let assigned_para_id = match &cores[core_index.0 as usize] { + CoreState::Free => { + gum::debug!(target: LOG_TARGET, ?candidate_hash, "Invalid CoreIndex, core is not assigned to any para_id"); + return None + }, + CoreState::Occupied(occupied) => occupied.candidate_descriptor.para_id, + CoreState::Scheduled(scheduled) => scheduled.para_id, + }; + + if assigned_para_id != candidate_para_id { + gum::debug!(target: LOG_TARGET, ?candidate_hash, ?core_index, ?assigned_para_id, ?candidate_para_id, "Invalid CoreIndex, core is assigned to a different para_id"); + return None + } + return Some(core_index) + } else { + return Some(core_index) + } } } } @@ -1111,7 +1138,7 @@ async fn construct_per_relay_parent_state( let mut assigned_core = None; let mut assigned_para = None; - for (idx, core) in cores.into_iter().enumerate() { + for (idx, core) in cores.iter().enumerate() { let core_para_id = match core { CoreState::Scheduled(scheduled) => scheduled.para_id, CoreState::Occupied(occupied) => @@ -1158,7 +1185,7 @@ async fn construct_per_relay_parent_state( fallbacks: HashMap::new(), minimum_backing_votes, inject_core_index, - n_cores, + cores, validator_groups, group_rotation_info, })) @@ -1656,7 +1683,7 @@ async fn import_statement( let core = core_index_from_statement( &rp_state.validator_groups, &rp_state.group_rotation_info, - rp_state.n_cores, + &rp_state.cores, statement, ) .ok_or(Error::CoreIndexUnavailable)?; diff --git a/polkadot/node/core/backing/src/tests/mod.rs b/polkadot/node/core/backing/src/tests/mod.rs index 316dcdba6d86..a4f77817427c 100644 --- a/polkadot/node/core/backing/src/tests/mod.rs +++ b/polkadot/node/core/backing/src/tests/mod.rs @@ -659,7 +659,7 @@ fn extract_core_index_from_statement_works() { let pov_hash = pov_a.hash(); - let candidate = TestCandidateBuilder { + let mut candidate = TestCandidateBuilder { para_id: test_state.chain_ids[0], relay_parent: test_state.relay_parent, pov_hash, @@ -706,10 +706,23 @@ fn extract_core_index_from_statement_works() { .flatten() .expect("should be signed"); + candidate.descriptor.para_id = test_state.chain_ids[1]; + + let signed_statement_3 = SignedFullStatementWithPVD::sign( + &test_state.keystore, + StatementWithPVD::Seconded(candidate, pvd_a.clone()), + &test_state.signing_context, + ValidatorIndex(1), + &public1.into(), + ) + .ok() + .flatten() + .expect("should be signed"); + let core_index_1 = core_index_from_statement( &test_state.validator_groups.0, &test_state.validator_groups.1, - test_state.availability_cores.len(), + &test_state.availability_cores, &signed_statement_1, ) .unwrap(); @@ -719,12 +732,22 @@ fn extract_core_index_from_statement_works() { let core_index_2 = core_index_from_statement( &test_state.validator_groups.0, &test_state.validator_groups.1, - test_state.availability_cores.len(), + &test_state.availability_cores, &signed_statement_2, + ); + + // Must be none, para_id in descriptor is different than para assigned to core + assert_eq!(core_index_2, None); + + let core_index_3 = core_index_from_statement( + &test_state.validator_groups.0, + &test_state.validator_groups.1, + &test_state.availability_cores, + &signed_statement_3, ) .unwrap(); - assert_eq!(core_index_2, CoreIndex(1)); + assert_eq!(core_index_3, CoreIndex(1)); } #[test] From ad98f18fce42f090cd86574d972416ce6a431393 Mon Sep 17 00:00:00 2001 From: alindima Date: Mon, 19 Feb 2024 12:18:24 +0200 Subject: [PATCH 16/20] use next up on available instead of occupied core index --- polkadot/node/core/backing/src/lib.rs | 14 ++++++++++++-- .../backing/src/tests/prospective_parachains.rs | 5 +++-- 2 files changed, 15 insertions(+), 4 deletions(-) diff --git a/polkadot/node/core/backing/src/lib.rs b/polkadot/node/core/backing/src/lib.rs index 48bb5121554f..a44f3c47f46e 100644 --- a/polkadot/node/core/backing/src/lib.rs +++ b/polkadot/node/core/backing/src/lib.rs @@ -1046,7 +1046,13 @@ fn core_index_from_statement( gum::debug!(target: LOG_TARGET, ?candidate_hash, "Invalid CoreIndex, core is not assigned to any para_id"); return None }, - CoreState::Occupied(occupied) => occupied.candidate_descriptor.para_id, + CoreState::Occupied(occupied) => { + if let Some(next) = &occupied.next_up_on_available { + next.para_id + } else { + return None + } + }, CoreState::Scheduled(scheduled) => scheduled.para_id, }; @@ -1145,7 +1151,11 @@ async fn construct_per_relay_parent_state( if mode.is_enabled() { // Async backing makes it legal to build on top of // occupied core. - occupied.candidate_descriptor.para_id + if let Some(next) = &occupied.next_up_on_available { + next.para_id + } else { + continue + } } else { continue }, diff --git a/polkadot/node/core/backing/src/tests/prospective_parachains.rs b/polkadot/node/core/backing/src/tests/prospective_parachains.rs index 165d39b4fcc0..94310d2aa164 100644 --- a/polkadot/node/core/backing/src/tests/prospective_parachains.rs +++ b/polkadot/node/core/backing/src/tests/prospective_parachains.rs @@ -1578,13 +1578,14 @@ fn occupied_core_assignment() { const LEAF_A_BLOCK_NUMBER: BlockNumber = 100; const LEAF_A_ANCESTRY_LEN: BlockNumber = 3; let para_id = test_state.chain_ids[0]; + let previous_para_id = test_state.chain_ids[1]; // Set the core state to occupied. let mut candidate_descriptor = ::test_helpers::dummy_candidate_descriptor(Hash::zero()); - candidate_descriptor.para_id = para_id; + candidate_descriptor.para_id = previous_para_id; test_state.availability_cores[0] = CoreState::Occupied(OccupiedCore { group_responsible: Default::default(), - next_up_on_available: None, + next_up_on_available: Some(ScheduledCore { para_id, collator: None }), occupied_since: 100_u32, time_out_at: 200_u32, next_up_on_time_out: None, From 606d7c4ef7c1d28d38cc25f63e5442d78c74daea Mon Sep 17 00:00:00 2001 From: alindima Date: Mon, 19 Feb 2024 13:45:31 +0200 Subject: [PATCH 17/20] ElasticScalingCoreIndex -> ElasticScalingMVP --- polkadot/node/core/backing/src/lib.rs | 2 +- polkadot/primitives/src/vstaging/mod.rs | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/polkadot/node/core/backing/src/lib.rs b/polkadot/node/core/backing/src/lib.rs index a44f3c47f46e..50084e1658b4 100644 --- a/polkadot/node/core/backing/src/lib.rs +++ b/polkadot/node/core/backing/src/lib.rs @@ -1097,7 +1097,7 @@ async fn construct_per_relay_parent_state( let inject_core_index = request_node_features(parent, session_index, ctx.sender()) .await? .unwrap_or(NodeFeatures::EMPTY) - .get(FeatureIndex::ElasticScalingCoreIndex as usize) + .get(FeatureIndex::ElasticScalingMVP as usize) .map(|b| *b) .unwrap_or(false); diff --git a/polkadot/primitives/src/vstaging/mod.rs b/polkadot/primitives/src/vstaging/mod.rs index 8f4806be85f8..39d9dfc02c5b 100644 --- a/polkadot/primitives/src/vstaging/mod.rs +++ b/polkadot/primitives/src/vstaging/mod.rs @@ -64,10 +64,10 @@ pub mod node_features { /// Tells if tranch0 assignments could be sent in a single certificate. /// Reserved for: `` EnableAssignmentsV2 = 0, - /// This feature enables the extension of `BackedCandidate::validator_indices` by 8 bit. + /// This feature enables the extension of `BackedCandidate::validator_indices` by 8 bits. /// The value stored there represents the assumed core index where the candidates - /// are backed. - ElasticScalingCoreIndex = 1, + /// are backed. This is needed for the elastic scaling MVP. + ElasticScalingMVP = 1, /// First unassigned feature bit. /// Every time a new feature flag is assigned it should take this value. /// and this should be incremented. From c793b89835a01136e0cfdc1c987b6026858864e8 Mon Sep 17 00:00:00 2001 From: alindima Date: Tue, 20 Feb 2024 17:47:29 +0200 Subject: [PATCH 18/20] small nits and typos Signed-off-by: alindima --- polkadot/node/core/backing/src/lib.rs | 6 +++--- polkadot/node/core/provisioner/src/lib.rs | 6 ++++-- polkadot/primitives/src/v6/mod.rs | 8 ++++---- 3 files changed, 11 insertions(+), 9 deletions(-) diff --git a/polkadot/node/core/backing/src/lib.rs b/polkadot/node/core/backing/src/lib.rs index 50084e1658b4..a6fe0ddbfb91 100644 --- a/polkadot/node/core/backing/src/lib.rs +++ b/polkadot/node/core/backing/src/lib.rs @@ -227,10 +227,10 @@ struct PerRelayParentState { fallbacks: HashMap, /// The minimum backing votes threshold. minimum_backing_votes: u32, - /// If true, we're appendindg extra bits in the BackedCandidate validator indices bitfield, - /// which represent the assigned core index. + /// If true, we're appending extra bits in the BackedCandidate validator indices bitfield, + /// which represent the assigned core index. True if ElasticScalingMVP is enabled. inject_core_index: bool, - /// The core state for all cores + /// The core states for all cores. cores: Vec, /// The validator groups at this relay parent. validator_groups: Vec>, diff --git a/polkadot/node/core/provisioner/src/lib.rs b/polkadot/node/core/provisioner/src/lib.rs index e130780c2f3d..d98f6ebfe428 100644 --- a/polkadot/node/core/provisioner/src/lib.rs +++ b/polkadot/node/core/provisioner/src/lib.rs @@ -682,7 +682,9 @@ async fn request_backable_candidates( }; // We should be calling this once per para rather than per core. - // TODO: Will be fixed in https://github.com/paritytech/polkadot-sdk/pull/3233 + // TODO: Will be fixed in https://github.com/paritytech/polkadot-sdk/pull/3233. + // For now, at least make sure we don't supply the same candidate multiple times in case a + // para has multiple cores scheduled. let response = get_backable_candidate(relay_parent, para_id, required_path, sender).await?; match response { Some((hash, relay_parent)) => { @@ -731,7 +733,7 @@ async fn select_candidates( ) .await?, }; - gum::debug!(target: LOG_TARGET, ?selected_candidates, "Got backeable candidates"); + gum::debug!(target: LOG_TARGET, ?selected_candidates, "Got backable candidates"); // now get the backed candidates corresponding to these candidate receipts let (tx, rx) = oneshot::channel(); diff --git a/polkadot/primitives/src/v6/mod.rs b/polkadot/primitives/src/v6/mod.rs index ffb287e581d6..538eb3855848 100644 --- a/polkadot/primitives/src/v6/mod.rs +++ b/polkadot/primitives/src/v6/mod.rs @@ -756,7 +756,7 @@ pub fn check_candidate_backing + Clone + Encode + core::fmt::Debu if backed.validator_indices.len() != group_len { log::debug!( target: LOG_TARGET, - "indices mismatch: group_len = {} , indices_len = {}", + "Check candidate backing: indices mismatch: group_len = {} , indices_len = {}", group_len, backed.validator_indices.len(), ); @@ -766,7 +766,7 @@ pub fn check_candidate_backing + Clone + Encode + core::fmt::Debu if backed.validity_votes.len() > group_len { log::debug!( target: LOG_TARGET, - "Too many votes, expected: {}, found: {}", + "Check candidate backing: Too many votes, expected: {}, found: {}", group_len, backed.validity_votes.len(), ); @@ -793,7 +793,7 @@ pub fn check_candidate_backing + Clone + Encode + core::fmt::Debu } else { log::debug!( target: LOG_TARGET, - "Invalid signature. validator_id = {:?}, validator_index = {} ", + "Check candidate backing: Invalid signature. validator_id = {:?}, validator_index = {} ", validator_id, val_in_group_idx, ); @@ -804,7 +804,7 @@ pub fn check_candidate_backing + Clone + Encode + core::fmt::Debu if signed != backed.validity_votes.len() { log::error!( target: LOG_TARGET, - "Too many signatures, expected = {}, found = {}", + "Check candidate backing: Too many signatures, expected = {}, found = {}", backed.validity_votes.len() , signed, ); From 9c3dd5cb54b53abdfde8ccaaef94b4d238303f1c Mon Sep 17 00:00:00 2001 From: alindima Date: Wed, 21 Feb 2024 15:25:12 +0200 Subject: [PATCH 19/20] cache Validator->Group mapping --- Cargo.lock | 1 + polkadot/node/core/backing/Cargo.toml | 1 + polkadot/node/core/backing/src/lib.rs | 143 +++++++++++++------- polkadot/node/core/backing/src/tests/mod.rs | 13 +- 4 files changed, 108 insertions(+), 50 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 378cf44c8cf7..0142532cb050 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -12487,6 +12487,7 @@ dependencies = [ "polkadot-primitives-test-helpers", "polkadot-statement-table", "sc-keystore", + "schnellru", "sp-application-crypto", "sp-core", "sp-keyring", diff --git a/polkadot/node/core/backing/Cargo.toml b/polkadot/node/core/backing/Cargo.toml index b0cf041e38da..f71b8df80dd2 100644 --- a/polkadot/node/core/backing/Cargo.toml +++ b/polkadot/node/core/backing/Cargo.toml @@ -22,6 +22,7 @@ bitvec = { version = "1.0.0", default-features = false, features = ["alloc"] } gum = { package = "tracing-gum", path = "../../gum" } thiserror = { workspace = true } fatality = "0.0.6" +schnellru = "0.2.1" [dev-dependencies] sp-core = { path = "../../../../substrate/primitives/core" } diff --git a/polkadot/node/core/backing/src/lib.rs b/polkadot/node/core/backing/src/lib.rs index a6fe0ddbfb91..04b24a417066 100644 --- a/polkadot/node/core/backing/src/lib.rs +++ b/polkadot/node/core/backing/src/lib.rs @@ -77,6 +77,7 @@ use futures::{ stream::FuturesOrdered, FutureExt, SinkExt, StreamExt, TryFutureExt, }; +use schnellru::{ByLength, LruMap}; use error::{Error, FatalResult}; use polkadot_node_primitives::{ @@ -107,8 +108,9 @@ use polkadot_primitives::{ vstaging::{node_features::FeatureIndex, NodeFeatures}, BackedCandidate, CandidateCommitments, CandidateHash, CandidateReceipt, CommittedCandidateReceipt, CoreIndex, CoreState, ExecutorParams, GroupIndex, GroupRotationInfo, - Hash, Id as ParaId, PersistedValidationData, PvfExecKind, SigningContext, ValidationCode, - ValidatorId, ValidatorIndex, ValidatorSignature, ValidityAttestation, + Hash, Id as ParaId, IndexedVec, PersistedValidationData, PvfExecKind, SessionIndex, + SigningContext, ValidationCode, ValidatorId, ValidatorIndex, ValidatorSignature, + ValidityAttestation, }; use sp_keystore::KeystorePtr; use statement_table::{ @@ -232,8 +234,8 @@ struct PerRelayParentState { inject_core_index: bool, /// The core states for all cores. cores: Vec, - /// The validator groups at this relay parent. - validator_groups: Vec>, + /// The validator index -> group mapping at this relay parent. + validator_to_group: IndexedVec>, /// The associated group rotation information. group_rotation_info: GroupRotationInfo, } @@ -287,6 +289,8 @@ struct State { /// This is guaranteed to have an entry for each candidate with a relay parent in the implicit /// or explicit view for which a `Seconded` statement has been successfully imported. per_candidate: HashMap, + /// Cache the per-session Validator->Group mapping. + validator_to_group_cache: LruMap>>, /// A cloneable sender which is dispatched to background candidate validation tasks to inform /// the main task of the result. background_validation_tx: mpsc::Sender<(Hash, ValidatedCandidateCommand)>, @@ -304,6 +308,7 @@ impl State { per_leaf: HashMap::default(), per_relay_parent: HashMap::default(), per_candidate: HashMap::new(), + validator_to_group_cache: LruMap::new(ByLength::new(2)), background_validation_tx, keystore, } @@ -986,7 +991,14 @@ async fn handle_active_leaves_update( // construct a `PerRelayParent` from the runtime API // and insert it. - let per = construct_per_relay_parent_state(ctx, maybe_new, &state.keystore, mode).await?; + let per = construct_per_relay_parent_state( + ctx, + maybe_new, + &state.keystore, + &mut state.validator_to_group_cache, + mode, + ) + .await?; if let Some(per) = per { state.per_relay_parent.insert(maybe_new, per); @@ -1014,7 +1026,7 @@ macro_rules! try_runtime_api { } fn core_index_from_statement( - validator_groups: &[Vec], + validator_to_group: &IndexedVec>, group_rotation_info: &GroupRotationInfo, cores: &[CoreState], statement: &SignedFullStatementWithPVD, @@ -1024,51 +1036,70 @@ fn core_index_from_statement( let n_cores = cores.len(); - gum::trace!(target: LOG_TARGET, ?group_rotation_info, ?statement, ?validator_groups, n_cores = ?cores.len() , ?candidate_hash, "Extracting core index from statement"); + gum::trace!( + target:LOG_TARGET, + ?group_rotation_info, + ?statement, + ?validator_to_group, + n_cores = ?cores.len(), + ?candidate_hash, + "Extracting core index from statement" + ); let statement_validator_index = statement.validator_index(); - for (group_index, group) in validator_groups.iter().enumerate() { - for validator_index in group { - if *validator_index == statement_validator_index { - // First check if the statement para id matches the core assignment. - let core_index = - group_rotation_info.core_for_group(GroupIndex(group_index as u32), n_cores); - - if core_index.0 as usize > n_cores { - gum::warn!(target: LOG_TARGET, ?candidate_hash, ?core_index, n_cores, "Invalid CoreIndex"); - return None - } + let Some(Some(group_index)) = validator_to_group.get(statement_validator_index) else { + gum::debug!( + target: LOG_TARGET, + ?group_rotation_info, + ?statement, + ?validator_to_group, + n_cores = ?cores.len() , + ?candidate_hash, + "Invalid validator index: {:?}", + statement_validator_index + ); + return None + }; - if let StatementWithPVD::Seconded(candidate, _pvd) = statement.payload() { - let candidate_para_id = candidate.descriptor.para_id; - let assigned_para_id = match &cores[core_index.0 as usize] { - CoreState::Free => { - gum::debug!(target: LOG_TARGET, ?candidate_hash, "Invalid CoreIndex, core is not assigned to any para_id"); - return None - }, - CoreState::Occupied(occupied) => { - if let Some(next) = &occupied.next_up_on_available { - next.para_id - } else { - return None - } - }, - CoreState::Scheduled(scheduled) => scheduled.para_id, - }; + // First check if the statement para id matches the core assignment. + let core_index = group_rotation_info.core_for_group(*group_index, n_cores); - if assigned_para_id != candidate_para_id { - gum::debug!(target: LOG_TARGET, ?candidate_hash, ?core_index, ?assigned_para_id, ?candidate_para_id, "Invalid CoreIndex, core is assigned to a different para_id"); - return None - } - return Some(core_index) + if core_index.0 as usize > n_cores { + gum::warn!(target: LOG_TARGET, ?candidate_hash, ?core_index, n_cores, "Invalid CoreIndex"); + return None + } + + if let StatementWithPVD::Seconded(candidate, _pvd) = statement.payload() { + let candidate_para_id = candidate.descriptor.para_id; + let assigned_para_id = match &cores[core_index.0 as usize] { + CoreState::Free => { + gum::debug!(target: LOG_TARGET, ?candidate_hash, "Invalid CoreIndex, core is not assigned to any para_id"); + return None + }, + CoreState::Occupied(occupied) => + if let Some(next) = &occupied.next_up_on_available { + next.para_id } else { - return Some(core_index) - } - } + return None + }, + CoreState::Scheduled(scheduled) => scheduled.para_id, + }; + + if assigned_para_id != candidate_para_id { + gum::debug!( + target: LOG_TARGET, + ?candidate_hash, + ?core_index, + ?assigned_para_id, + ?candidate_para_id, + "Invalid CoreIndex, core is assigned to a different para_id" + ); + return None } + return Some(core_index) + } else { + return Some(core_index) } - - None } /// Load the data necessary to do backing work on top of a relay-parent. @@ -1077,6 +1108,10 @@ async fn construct_per_relay_parent_state( ctx: &mut Context, relay_parent: Hash, keystore: &KeystorePtr, + validator_to_group_cache: &mut LruMap< + SessionIndex, + IndexedVec>, + >, mode: ProspectiveParachainsMode, ) -> Result, Error> { let parent = relay_parent; @@ -1172,7 +1207,21 @@ async fn construct_per_relay_parent_state( groups.insert(core_index, g.clone()); } } - gum::debug!(target: LOG_TARGET, ?groups, "TableContext" ); + gum::debug!(target: LOG_TARGET, ?groups, "TableContext"); + + let validator_to_group = validator_to_group_cache + .get_or_insert(session_index, || { + let mut vector = vec![None; validators.len()]; + + for (group_idx, validator_group) in validator_groups.iter().enumerate() { + for validator in validator_group { + vector[validator.0 as usize] = Some(GroupIndex(group_idx as u32)); + } + } + + IndexedVec::<_, _>::from(vector) + }) + .expect("Just inserted"); let table_context = TableContext { validator, groups, validators, disabled_validators }; let table_config = TableConfig { @@ -1196,7 +1245,7 @@ async fn construct_per_relay_parent_state( minimum_backing_votes, inject_core_index, cores, - validator_groups, + validator_to_group: validator_to_group.clone(), group_rotation_info, })) } @@ -1691,7 +1740,7 @@ async fn import_statement( let stmt = primitive_statement_to_table(statement); let core = core_index_from_statement( - &rp_state.validator_groups, + &rp_state.validator_to_group, &rp_state.group_rotation_info, &rp_state.cores, statement, diff --git a/polkadot/node/core/backing/src/tests/mod.rs b/polkadot/node/core/backing/src/tests/mod.rs index a4f77817427c..7223f1e1dfb0 100644 --- a/polkadot/node/core/backing/src/tests/mod.rs +++ b/polkadot/node/core/backing/src/tests/mod.rs @@ -72,6 +72,7 @@ pub(crate) struct TestState { validator_public: Vec, validation_data: PersistedValidationData, validator_groups: (Vec>, GroupRotationInfo), + validator_to_group: IndexedVec>, availability_cores: Vec, head_data: HashMap, signing_context: SigningContext, @@ -114,6 +115,11 @@ impl Default for TestState { .into_iter() .map(|g| g.into_iter().map(ValidatorIndex).collect()) .collect(); + let validator_to_group: IndexedVec<_, _> = + vec![Some(0), Some(1), Some(0), Some(0), None, Some(0)] + .into_iter() + .map(|x| x.map(|x| GroupIndex(x))) + .collect(); let group_rotation_info = GroupRotationInfo { session_start_block: 0, group_rotation_frequency: 100, now: 1 }; @@ -143,6 +149,7 @@ impl Default for TestState { validators, validator_public, validator_groups: (validator_groups, group_rotation_info), + validator_to_group, availability_cores, head_data, validation_data, @@ -720,7 +727,7 @@ fn extract_core_index_from_statement_works() { .expect("should be signed"); let core_index_1 = core_index_from_statement( - &test_state.validator_groups.0, + &test_state.validator_to_group, &test_state.validator_groups.1, &test_state.availability_cores, &signed_statement_1, @@ -730,7 +737,7 @@ fn extract_core_index_from_statement_works() { assert_eq!(core_index_1, CoreIndex(0)); let core_index_2 = core_index_from_statement( - &test_state.validator_groups.0, + &test_state.validator_to_group, &test_state.validator_groups.1, &test_state.availability_cores, &signed_statement_2, @@ -740,7 +747,7 @@ fn extract_core_index_from_statement_works() { assert_eq!(core_index_2, None); let core_index_3 = core_index_from_statement( - &test_state.validator_groups.0, + &test_state.validator_to_group, &test_state.validator_groups.1, &test_state.availability_cores, &signed_statement_3, From af1cd821d3fb348af2fa25109604ca5a5ffde5dc Mon Sep 17 00:00:00 2001 From: alindima Date: Thu, 22 Feb 2024 09:27:42 +0200 Subject: [PATCH 20/20] use Arc to avoid cloning --- polkadot/node/core/backing/src/lib.rs | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/polkadot/node/core/backing/src/lib.rs b/polkadot/node/core/backing/src/lib.rs index 04b24a417066..cc192607cea0 100644 --- a/polkadot/node/core/backing/src/lib.rs +++ b/polkadot/node/core/backing/src/lib.rs @@ -235,7 +235,7 @@ struct PerRelayParentState { /// The core states for all cores. cores: Vec, /// The validator index -> group mapping at this relay parent. - validator_to_group: IndexedVec>, + validator_to_group: Arc>>, /// The associated group rotation information. group_rotation_info: GroupRotationInfo, } @@ -290,7 +290,8 @@ struct State { /// or explicit view for which a `Seconded` statement has been successfully imported. per_candidate: HashMap, /// Cache the per-session Validator->Group mapping. - validator_to_group_cache: LruMap>>, + validator_to_group_cache: + LruMap>>>, /// A cloneable sender which is dispatched to background candidate validation tasks to inform /// the main task of the result. background_validation_tx: mpsc::Sender<(Hash, ValidatedCandidateCommand)>, @@ -1110,7 +1111,7 @@ async fn construct_per_relay_parent_state( keystore: &KeystorePtr, validator_to_group_cache: &mut LruMap< SessionIndex, - IndexedVec>, + Arc>>, >, mode: ProspectiveParachainsMode, ) -> Result, Error> { @@ -1219,7 +1220,7 @@ async fn construct_per_relay_parent_state( } } - IndexedVec::<_, _>::from(vector) + Arc::new(IndexedVec::<_, _>::from(vector)) }) .expect("Just inserted");