diff --git a/.gitlab/pipeline/zombienet/cumulus.yml b/.gitlab/pipeline/zombienet/cumulus.yml index c473f5c5fed7..746c77ee7cf3 100644 --- a/.gitlab/pipeline/zombienet/cumulus.yml +++ b/.gitlab/pipeline/zombienet/cumulus.yml @@ -150,3 +150,15 @@ zombienet-cumulus-0007-full_node_warp_sync: --local-dir="${LOCAL_DIR}" --concurrency=1 --test="0007-full_node_warp_sync.zndsl" + +zombienet-cumulus-0008-elastic_authoring: + extends: + - .zombienet-cumulus-common + - .zombienet-refs + - .zombienet-before-script + - .zombienet-after-script + script: + - /home/nonroot/zombie-net/scripts/ci/run-test-local-env-manager.sh + --local-dir="${LOCAL_DIR}" + --concurrency=1 + --test="0008-elastic_authoring.zndsl" diff --git a/Cargo.lock b/Cargo.lock index 6467f1e8a14f..a727f5809d68 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3631,6 +3631,7 @@ dependencies = [ "sc-consensus-aura", "sc-consensus-babe", "sc-consensus-slots", + "sc-service", "sc-telemetry", "schnellru", "sp-api", @@ -3646,6 +3647,7 @@ dependencies = [ "sp-state-machine", "sp-timestamp", "substrate-prometheus-endpoint", + "tokio", "tracing", ] @@ -4379,7 +4381,6 @@ dependencies = [ "polkadot-test-service", "portpicker", "rand 0.8.5", - "rococo-parachain-runtime", "sc-basic-authorship", "sc-block-builder", "sc-chain-spec", @@ -4404,7 +4405,6 @@ dependencies = [ "sp-blockchain", "sp-consensus", "sp-consensus-aura", - "sp-consensus-grandpa", "sp-core", "sp-io", "sp-keyring", @@ -13330,7 +13330,6 @@ dependencies = [ "sc-consensus", "sc-executor", "sc-network", - "sc-network-sync", "sc-rpc", "sc-service", "sc-sysinfo", @@ -21121,10 +21120,11 @@ checksum = "b6bc1c9ce2b5135ac7f93c72918fc37feb872bdc6a5533a8b85eb4b86bfdae52" [[package]] name = "tracing" -version = "0.1.40" +version = "0.1.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3523ab5a71916ccf420eebdf5521fcef02141234bbc0b8a49f2fdc4544364ef" +checksum = "8ce8c33a8d48bd45d624a6e523445fd21ec13d3653cd51f681abf67418f54eb8" dependencies = [ + "cfg-if", "log", "pin-project-lite 0.2.12", "tracing-attributes", @@ -21133,9 +21133,9 @@ dependencies = [ [[package]] name = "tracing-attributes" -version = "0.1.27" +version = "0.1.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" +checksum = "5f4f31f56159e98206da9efd823404b79b6ef3143b4a7ab76e67b1751b25a4ab" dependencies = [ "proc-macro2", "quote", diff --git a/cumulus/client/consensus/aura/Cargo.toml b/cumulus/client/consensus/aura/Cargo.toml index 70dd67cb9a00..a4ac78efb688 100644 --- a/cumulus/client/consensus/aura/Cargo.toml +++ b/cumulus/client/consensus/aura/Cargo.toml @@ -15,6 +15,7 @@ codec = { package = "parity-scale-codec", version = "3.0.0", features = ["derive futures = "0.3.28" tracing = "0.1.37" schnellru = "0.2.1" +tokio = { version = "1.36.0", features = ["sync"] } # Substrate sc-client-api = { path = "../../../../substrate/client/api" } @@ -35,6 +36,7 @@ sp-keystore = { path = "../../../../substrate/primitives/keystore" } sp-runtime = { path = "../../../../substrate/primitives/runtime" } sp-timestamp = { path = "../../../../substrate/primitives/timestamp" } sp-state-machine = { path = "../../../../substrate/primitives/state-machine" } +sc-service = { path = "../../../../substrate/client/service" } substrate-prometheus-endpoint = { path = "../../../../substrate/utils/prometheus" } # Cumulus diff --git a/cumulus/client/consensus/aura/src/collator.rs b/cumulus/client/consensus/aura/src/collator.rs index 5b7669c88f47..bac1c27c4837 100644 --- a/cumulus/client/consensus/aura/src/collator.rs +++ b/cumulus/client/consensus/aura/src/collator.rs @@ -156,15 +156,8 @@ where Ok((paras_inherent_data, other_inherent_data)) } - /// Propose, seal, and import a block, packaging it into a collation. - /// - /// Provide the slot to build at as well as any other necessary pre-digest logs, - /// the inherent data, and the proposal duration and PoV size limits. - /// - /// The Aura pre-digest should not be explicitly provided and is set internally. - /// - /// This does not announce the collation to the parachain network or the relay chain. - pub async fn collate( + /// Build and import a parachain block on the given parent header, using the given slot claim. + pub async fn build_block_and_import( &mut self, parent_header: &Block::Header, slot_claim: &SlotClaim, @@ -172,10 +165,7 @@ where inherent_data: (ParachainInherentData, InherentData), proposal_duration: Duration, max_pov_size: usize, - ) -> Result< - Option<(Collation, ParachainBlockData, Block::Hash)>, - Box, - > { + ) -> Result>, Box> { let mut digest = additional_pre_digest.into().unwrap_or_default(); digest.push(slot_claim.pre_digest.clone()); @@ -205,7 +195,6 @@ where ) .map_err(|e| e as Box)?; - let post_hash = sealed_importable.post_hash(); let block = Block::new( sealed_importable.post_header(), sealed_importable @@ -220,11 +209,46 @@ where .map_err(|e| Box::new(e) as Box) .await?; - if let Some((collation, block_data)) = self.collator_service.build_collation( - parent_header, - post_hash, - ParachainCandidate { block, proof: proposal.proof }, - ) { + Ok(Some(ParachainCandidate { block, proof: proposal.proof })) + } + + /// Propose, seal, and import a block, packaging it into a collation. + /// + /// Provide the slot to build at as well as any other necessary pre-digest logs, + /// the inherent data, and the proposal duration and PoV size limits. + /// + /// The Aura pre-digest should not be explicitly provided and is set internally. + /// + /// This does not announce the collation to the parachain network or the relay chain. + pub async fn collate( + &mut self, + parent_header: &Block::Header, + slot_claim: &SlotClaim, + additional_pre_digest: impl Into>>, + inherent_data: (ParachainInherentData, InherentData), + proposal_duration: Duration, + max_pov_size: usize, + ) -> Result< + Option<(Collation, ParachainBlockData, Block::Hash)>, + Box, + > { + let maybe_candidate = self + .build_block_and_import( + parent_header, + slot_claim, + additional_pre_digest, + inherent_data, + proposal_duration, + max_pov_size, + ) + .await?; + + let Some(candidate) = maybe_candidate else { return Ok(None) }; + + let hash = candidate.block.header().hash(); + if let Some((collation, block_data)) = + self.collator_service.build_collation(parent_header, hash, candidate) + { tracing::info!( target: crate::LOG_TARGET, "PoV size {{ header: {}kb, extrinsics: {}kb, storage_proof: {}kb }}", @@ -241,7 +265,7 @@ where ); } - Ok(Some((collation, block_data, post_hash))) + Ok(Some((collation, block_data, hash))) } else { Err(Box::::from("Unable to produce collation") as Box) diff --git a/cumulus/client/consensus/aura/src/collators/basic.rs b/cumulus/client/consensus/aura/src/collators/basic.rs index a4c22a45266c..4efd50a04ec6 100644 --- a/cumulus/client/consensus/aura/src/collators/basic.rs +++ b/cumulus/client/consensus/aura/src/collators/basic.rs @@ -41,19 +41,18 @@ use sc_consensus::BlockImport; use sp_api::{CallApiAt, ProvideRuntimeApi}; use sp_application_crypto::AppPublic; use sp_blockchain::HeaderBackend; -use sp_consensus::SyncOracle; use sp_consensus_aura::AuraApi; use sp_core::crypto::Pair; use sp_inherents::CreateInherentDataProviders; use sp_keystore::KeystorePtr; use sp_runtime::traits::{Block as BlockT, Header as HeaderT, Member}; use sp_state_machine::Backend as _; -use std::{convert::TryFrom, sync::Arc, time::Duration}; +use std::{sync::Arc, time::Duration}; use crate::collator as collator_util; /// Parameters for [`run`]. -pub struct Params { +pub struct Params { /// Inherent data providers. Only non-consensus inherent data should be provided, i.e. /// the timestamp, slot, and paras inherents should be omitted, as they are set by this /// collator. @@ -64,8 +63,6 @@ pub struct Params { pub para_client: Arc, /// A handle to the relay-chain client. pub relay_client: RClient, - /// A chain synchronization oracle. - pub sync_oracle: SO, /// The underlying keystore, which should contain Aura consensus keys. pub keystore: KeystorePtr, /// The collator key used to sign collations before submitting to validators. @@ -89,8 +86,8 @@ pub struct Params { } /// Run bare Aura consensus as a relay-chain-driven collator. -pub fn run( - params: Params, +pub fn run( + params: Params, ) -> impl Future + Send + 'static where Block: BlockT + Send, @@ -108,7 +105,6 @@ where CIDP: CreateInherentDataProviders + Send + 'static, CIDP::InherentDataProviders: Send, BI: BlockImport + ParachainBlockImportMarker + Send + Sync + 'static, - SO: SyncOracle + Send + Sync + Clone + 'static, Proposer: ProposerInterface + Send + Sync + 'static, CS: CollatorServiceInterface + Send + Sync + 'static, P: Pair, diff --git a/cumulus/client/consensus/aura/src/collators/lookahead.rs b/cumulus/client/consensus/aura/src/collators/lookahead.rs index 2b774128c1fb..3dcc6f70db53 100644 --- a/cumulus/client/consensus/aura/src/collators/lookahead.rs +++ b/cumulus/client/consensus/aura/src/collators/lookahead.rs @@ -60,19 +60,18 @@ use sc_consensus_aura::standalone as aura_internal; use sp_api::ProvideRuntimeApi; use sp_application_crypto::AppPublic; use sp_blockchain::HeaderBackend; -use sp_consensus::SyncOracle; use sp_consensus_aura::{AuraApi, Slot}; use sp_core::crypto::Pair; use sp_inherents::CreateInherentDataProviders; use sp_keystore::KeystorePtr; use sp_runtime::traits::{Block as BlockT, Header as HeaderT, Member}; use sp_timestamp::Timestamp; -use std::{convert::TryFrom, sync::Arc, time::Duration}; +use std::{sync::Arc, time::Duration}; use crate::collator::{self as collator_util, SlotClaim}; /// Parameters for [`run`]. -pub struct Params { +pub struct Params { /// Inherent data providers. Only non-consensus inherent data should be provided, i.e. /// the timestamp, slot, and paras inherents should be omitted, as they are set by this /// collator. @@ -87,8 +86,6 @@ pub struct Params { pub relay_client: RClient, /// A validation code hash provider, used to get the current validation code hash. pub code_hash_provider: CHP, - /// A chain synchronization oracle. - pub sync_oracle: SO, /// The underlying keystore, which should contain Aura consensus keys. pub keystore: KeystorePtr, /// The collator key used to sign collations before submitting to validators. @@ -110,8 +107,8 @@ pub struct Params { } /// Run async-backing-friendly Aura. -pub fn run( - mut params: Params, +pub fn run( + mut params: Params, ) -> impl Future + Send + 'static where Block: BlockT, @@ -130,7 +127,6 @@ where CIDP: CreateInherentDataProviders + 'static, CIDP::InherentDataProviders: Send, BI: BlockImport + ParachainBlockImportMarker + Send + Sync + 'static, - SO: SyncOracle + Send + Sync + Clone + 'static, Proposer: ProposerInterface + Send + Sync + 'static, CS: CollatorServiceInterface + Send + Sync + 'static, CHP: consensus_common::ValidationCodeHashProvider + Send + 'static, diff --git a/cumulus/client/consensus/aura/src/collators/mod.rs b/cumulus/client/consensus/aura/src/collators/mod.rs index 6e0067d0cedb..7ee236e910da 100644 --- a/cumulus/client/consensus/aura/src/collators/mod.rs +++ b/cumulus/client/consensus/aura/src/collators/mod.rs @@ -27,6 +27,7 @@ use polkadot_primitives::{ pub mod basic; pub mod lookahead; +pub mod slot_based; /// Check the `local_validation_code_hash` against the validation code hash in the relay chain /// state. diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs b/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs new file mode 100644 index 000000000000..83c2a8c859b7 --- /dev/null +++ b/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs @@ -0,0 +1,462 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! A collator for Aura that looks ahead of the most recently included parachain block +//! when determining what to build upon. +//! +//! The block building mechanism consists of two parts: +//! 1. A block-builder task that builds parachain blocks at each of our slot. +//! 2. A collator task that transforms the blocks into a collation and submits them to the relay +//! chain. +//! +//! This collator also builds additional blocks when the maximum backlog is not saturated. +//! The size of the backlog is determined by invoking a runtime API. If that runtime API +//! is not supported, this assumes a maximum backlog size of 1. +//! +//! This takes more advantage of asynchronous backing, though not complete advantage. +//! When the backlog is not saturated, this approach lets the backlog temporarily 'catch up' +//! with periods of higher throughput. When the backlog is saturated, we typically +//! fall back to the limited cadence of a single parachain block per relay-chain block. +//! +//! Despite this, the fact that there is a backlog at all allows us to spend more time +//! building the block, as there is some buffer before it can get posted to the relay-chain. +//! The main limitation is block propagation time - i.e. the new blocks created by an author +//! must be propagated to the next author before their turn. + +use codec::{Codec, Encode}; + +use cumulus_client_collator::service::ServiceInterface as CollatorServiceInterface; +use cumulus_client_consensus_common::{ + self as consensus_common, load_abridged_host_configuration, ParachainBlockImportMarker, + ParentSearchParams, +}; +use cumulus_client_consensus_proposer::ProposerInterface; +use cumulus_primitives_aura::AuraUnincludedSegmentApi; +use cumulus_primitives_core::{ + relay_chain::Hash as PHash, CollectCollationInfo, PersistedValidationData, +}; +use cumulus_relay_chain_interface::RelayChainInterface; + +use polkadot_primitives::{BlockId, Id as ParaId, OccupiedCoreAssumption}; + +use sc_client_api::{backend::AuxStore, BlockBackend, BlockOf, UsageProvider}; +use sc_consensus::BlockImport; +use sc_consensus_aura::standalone as aura_internal; +use sc_consensus_slots::time_until_next_slot; +use sp_api::ProvideRuntimeApi; +use sp_application_crypto::AppPublic; +use sp_blockchain::HeaderBackend; +use sp_consensus_aura::{AuraApi, Slot, SlotDuration}; +use sp_core::crypto::Pair; +use sp_inherents::CreateInherentDataProviders; +use sp_keystore::KeystorePtr; +use sp_runtime::traits::{Block as BlockT, Header as HeaderT, Member}; +use sp_timestamp::Timestamp; +use std::{sync::Arc, time::Duration}; + +use super::{scheduled_cores, CollatorMessage}; +use crate::{ + collator::{self as collator_util, SlotClaim}, + collators::check_validation_code_or_log, + LOG_TARGET, +}; + +const PARENT_SEARCH_DEPTH: usize = 10; + +/// Parameters for [`run_block_builder`]. +pub struct BuilderTaskParams { + /// Inherent data providers. Only non-consensus inherent data should be provided, i.e. + /// the timestamp, slot, and paras inherents should be omitted, as they are set by this + /// collator. + pub create_inherent_data_providers: CIDP, + /// Used to actually import blocks. + pub block_import: BI, + /// The underlying para client. + pub para_client: Arc, + /// The para client's backend, used to access the database. + pub para_backend: Arc, + /// A handle to the relay-chain client. + pub relay_client: RClient, + /// A validation code hash provider, used to get the current validation code hash. + pub code_hash_provider: CHP, + /// The underlying keystore, which should contain Aura consensus keys. + pub keystore: KeystorePtr, + /// The para's ID. + pub para_id: ParaId, + /// The underlying block proposer this should call into. + pub proposer: Proposer, + /// The generic collator service used to plug into this consensus engine. + pub collator_service: CS, + /// The amount of time to spend authoring each block. + pub authoring_duration: Duration, + /// Channel to send built blocks to the collation task. + pub collator_sender: tokio::sync::mpsc::Sender>, +} + +#[derive(Debug)] +struct SlotAndTime { + timestamp: Timestamp, + slot: Slot, +} + +#[derive(Debug)] +struct SlotTimer { + slot_duration: SlotDuration, +} + +impl SlotTimer { + pub fn new(slot_duration: SlotDuration) -> Self { + Self { slot_duration } + } + + /// Returns a future that resolves when the next slot arrives. + pub async fn wait_until_next_slot(&self) -> SlotAndTime { + let time_until_next_slot = time_until_next_slot(self.slot_duration.as_duration()); + tokio::time::sleep(time_until_next_slot).await; + let timestamp = sp_timestamp::Timestamp::current(); + SlotAndTime { slot: Slot::from_timestamp(timestamp, self.slot_duration), timestamp } + } +} + +/// Reads allowed ancestry length parameter from the relay chain storage at the given relay parent. +/// +/// Falls back to 0 in case of an error. +async fn max_ancestry_lookback( + relay_parent: PHash, + relay_client: &impl RelayChainInterface, +) -> usize { + match load_abridged_host_configuration(relay_parent, relay_client).await { + Ok(Some(config)) => config.async_backing_params.allowed_ancestry_len as usize, + Ok(None) => { + tracing::error!( + target: crate::LOG_TARGET, + "Active config is missing in relay chain storage", + ); + 0 + }, + Err(err) => { + tracing::error!( + target: crate::LOG_TARGET, + ?err, + ?relay_parent, + "Failed to read active config from relay chain client", + ); + 0 + }, + } +} + +// Checks if we own the slot at the given block and whether there +// is space in the unincluded segment. +async fn can_build_upon( + slot: Slot, + timestamp: Timestamp, + parent_hash: Block::Hash, + included_block: Block::Hash, + client: &Client, + keystore: &KeystorePtr, +) -> Option> +where + Client: ProvideRuntimeApi, + Client::Api: AuraApi + AuraUnincludedSegmentApi, + P: Pair, + P::Public: Codec, + P::Signature: Codec, +{ + let runtime_api = client.runtime_api(); + let authorities = runtime_api.authorities(parent_hash).ok()?; + let author_pub = aura_internal::claim_slot::

(slot, &authorities, keystore).await?; + + // Here we lean on the property that building on an empty unincluded segment must always + // be legal. Skipping the runtime API query here allows us to seamlessly run this + // collator against chains which have not yet upgraded their runtime. + if parent_hash != included_block { + if !runtime_api.can_build_upon(parent_hash, included_block, slot).ok()? { + tracing::debug!( + target: crate::LOG_TARGET, + ?parent_hash, + ?included_block, + ?slot, + "Cannot build on top of the current block, skipping slot." + ); + return None + } + } + + Some(SlotClaim::unchecked::

(author_pub, slot, timestamp)) +} + +/// Run block-builder. +pub async fn run_block_builder( + params: BuilderTaskParams, +) where + Block: BlockT, + Client: ProvideRuntimeApi + + UsageProvider + + BlockOf + + AuxStore + + HeaderBackend + + BlockBackend + + Send + + Sync + + 'static, + Client::Api: + AuraApi + CollectCollationInfo + AuraUnincludedSegmentApi, + Backend: sc_client_api::Backend + 'static, + RClient: RelayChainInterface + Clone + 'static, + CIDP: CreateInherentDataProviders + 'static, + CIDP::InherentDataProviders: Send, + BI: BlockImport + ParachainBlockImportMarker + Send + Sync + 'static, + Proposer: ProposerInterface + Send + Sync + 'static, + CS: CollatorServiceInterface + Send + Sync + 'static, + CHP: consensus_common::ValidationCodeHashProvider + Send + 'static, + P: Pair, + P::Public: AppPublic + Member + Codec, + P::Signature: TryFrom> + Member + Codec, +{ + let BuilderTaskParams { + relay_client, + create_inherent_data_providers, + para_client, + keystore, + block_import, + para_id, + proposer, + collator_service, + collator_sender, + code_hash_provider, + authoring_duration, + para_backend, + } = params; + + let slot_duration = match crate::slot_duration(&*para_client) { + Ok(s) => s, + Err(e) => { + tracing::error!(target: crate::LOG_TARGET, ?e, "Failed to fetch slot duration from runtime. Killing collator task."); + return + }, + }; + + let slot_timer = SlotTimer::new(slot_duration); + + let mut collator = { + let params = collator_util::Params { + create_inherent_data_providers, + block_import, + relay_client: relay_client.clone(), + keystore: keystore.clone(), + para_id, + proposer, + collator_service, + }; + + collator_util::Collator::::new(params) + }; + + loop { + // We wait here until the next slot arrives. + let para_slot = slot_timer.wait_until_next_slot().await; + + let Ok(relay_parent) = relay_client.best_block_hash().await else { + tracing::warn!("Unable to fetch latest relay chain block hash, skipping slot."); + continue; + }; + + let scheduled_cores = scheduled_cores(relay_parent, para_id, &relay_client).await; + if scheduled_cores.is_empty() { + tracing::debug!(target: LOG_TARGET, "Parachain not scheduled, skipping slot."); + continue; + } + + let Ok(Some(relay_parent_header)) = relay_client.header(BlockId::Hash(relay_parent)).await + else { + tracing::warn!("Unable to fetch latest relay chain block header."); + continue; + }; + + let max_pov_size = match relay_client + .persisted_validation_data(relay_parent, para_id, OccupiedCoreAssumption::Included) + .await + { + Ok(None) => continue, + Ok(Some(pvd)) => pvd.max_pov_size, + Err(err) => { + tracing::error!(target: crate::LOG_TARGET, ?err, "Failed to gather information from relay-client"); + continue + }, + }; + + let (included_block, parent) = + match find_parent(relay_parent, para_id, &*para_backend, &relay_client).await { + Some(value) => value, + None => continue, + }; + + let parent_header = parent.header; + let parent_hash = parent.hash; + + // We mainly call this to inform users at genesis if there is a mismatch with the + // on-chain data. + collator.collator_service().check_block_status(parent_hash, &parent_header); + + let slot_claim = match can_build_upon::<_, _, P>( + para_slot.slot, + para_slot.timestamp, + parent_hash, + included_block, + &*para_client, + &keystore, + ) + .await + { + Some(slot) => slot, + None => continue, + }; + + tracing::debug!( + target: crate::LOG_TARGET, + ?relay_parent, + slot_claim = ?para_slot.slot, + unincluded_segment_len = parent.depth, + "Slot claimed. Building" + ); + + let validation_data = PersistedValidationData { + parent_head: parent_header.encode().into(), + relay_parent_number: *relay_parent_header.number(), + relay_parent_storage_root: *relay_parent_header.state_root(), + max_pov_size, + }; + + // Build and announce collations recursively until + // `can_build_upon` fails or building a collation fails. + let (parachain_inherent_data, other_inherent_data) = match collator + .create_inherent_data( + relay_parent, + &validation_data, + parent_hash, + slot_claim.timestamp(), + ) + .await + { + Err(err) => { + tracing::error!(target: crate::LOG_TARGET, ?err); + break + }, + Ok(x) => x, + }; + + let validation_code_hash = match code_hash_provider.code_hash_at(parent_hash) { + None => { + tracing::error!(target: crate::LOG_TARGET, ?parent_hash, "Could not fetch validation code hash"); + break + }, + Some(v) => v, + }; + + check_validation_code_or_log(&validation_code_hash, para_id, &relay_client, relay_parent) + .await; + + let Ok(Some(candidate)) = collator + .build_block_and_import( + &parent_header, + &slot_claim, + None, + (parachain_inherent_data, other_inherent_data), + authoring_duration, + // Set the block limit to 50% of the maximum PoV size. + // + // TODO: If we got benchmarking that includes the proof size, + // we should be able to use the maximum pov size. + (validation_data.max_pov_size / 2) as usize, + ) + .await + else { + tracing::error!(target: crate::LOG_TARGET, "Unable to build block at slot."); + continue; + }; + + let new_block_hash = candidate.block.header().hash(); + + // Announce the newly built block to our peers. + collator.collator_service().announce_block(new_block_hash, None); + + if let Err(err) = collator_sender + .send(CollatorMessage { + relay_parent: relay_parent_header.hash(), + parent_header, + parachain_candidate: candidate, + hash: new_block_hash, + validation_code_hash, + }) + .await + { + tracing::error!(target: crate::LOG_TARGET, ?err, "Unable to send block to collation task."); + } + } +} + +async fn find_parent( + relay_parent: PHash, + para_id: ParaId, + para_backend: &impl sc_client_api::Backend, + relay_client: &impl RelayChainInterface, +) -> Option<(::Hash, consensus_common::PotentialParent)> +where + Block: BlockT, +{ + let parent_search_params = ParentSearchParams { + relay_parent, + para_id, + ancestry_lookback: max_ancestry_lookback(relay_parent, relay_client).await, + max_depth: PARENT_SEARCH_DEPTH, + ignore_alternative_branches: true, + }; + + let potential_parents = cumulus_client_consensus_common::find_potential_parents::( + parent_search_params, + para_backend, + relay_client, + ) + .await; + + let mut potential_parents = match potential_parents { + Err(e) => { + tracing::error!( + target: crate::LOG_TARGET, + ?relay_parent, + err = ?e, + "Could not fetch potential parents to build upon" + ); + + return None + }, + Ok(x) => x, + }; + + let included_block = match potential_parents.iter().find(|x| x.depth == 0) { + None => return None, // also serves as an `is_empty` check. + Some(b) => b.hash, + }; + potential_parents.sort_by_key(|a| a.depth); + + let parent = match potential_parents.pop() { + None => return None, + Some(p) => p, + }; + + Some((included_block, parent)) +} diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/collation_task.rs b/cumulus/client/consensus/aura/src/collators/slot_based/collation_task.rs new file mode 100644 index 000000000000..451551fa2697 --- /dev/null +++ b/cumulus/client/consensus/aura/src/collators/slot_based/collation_task.rs @@ -0,0 +1,198 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +use std::collections::VecDeque; + +use codec::Encode; + +use cumulus_client_collator::service::ServiceInterface as CollatorServiceInterface; +use cumulus_relay_chain_interface::RelayChainInterface; + +use polkadot_node_primitives::{MaybeCompressedPoV, SubmitCollationParams}; +use polkadot_node_subsystem::messages::CollationGenerationMessage; +use polkadot_overseer::Handle as OverseerHandle; +use polkadot_primitives::{CollatorPair, CoreIndex, Id as ParaId}; + +use futures::prelude::*; + +use sp_runtime::traits::{Block as BlockT, Header}; + +use super::{scheduled_cores, CollatorMessage}; + +const LOG_TARGET: &str = "aura::cumulus::collation_task"; + +/// Parameters for the collation task. +pub struct Params { + /// A handle to the relay-chain client. + pub relay_client: RClient, + /// The collator key used to sign collations before submitting to validators. + pub collator_key: CollatorPair, + /// The para's ID. + pub para_id: ParaId, + /// A handle to the relay-chain client's "Overseer" or task orchestrator. + pub overseer_handle: OverseerHandle, + /// Whether we should reinitialize the collator config (i.e. we are transitioning to aura). + pub reinitialize: bool, + /// Collator service interface + pub collator_service: CS, + /// Receiver channel for communication with the block builder task. + pub collator_receiver: tokio::sync::mpsc::Receiver>, +} + +/// Asynchronously executes the collation task for a parachain. +/// +/// This function initializes the collator subsystems necessary for producing and submitting +/// collations to the relay chain. It listens for new best relay chain block notifications and +/// handles collator messages. If our parachain is scheduled on a core and we have a candidate, +/// the task will build a collation and send it to the relay chain. +pub async fn run_collation_task(mut params: Params) +where + Block: BlockT, + CS: CollatorServiceInterface + Send + Sync + 'static, + RClient: RelayChainInterface + Clone + 'static, +{ + cumulus_client_collator::initialize_collator_subsystems( + &mut params.overseer_handle, + params.collator_key, + params.para_id, + params.reinitialize, + ) + .await; + + let collator_service = params.collator_service; + let mut best_notifications = match params.relay_client.new_best_notification_stream().await { + Ok(s) => s, + Err(err) => { + tracing::error!( + target: LOG_TARGET, + ?err, + "Failed to initialize consensus: no relay chain import notification stream" + ); + + return + }, + }; + + let mut overseer_handle = params.overseer_handle; + let mut core_queue = Default::default(); + let mut messages = VecDeque::new(); + loop { + tokio::select! { + // Check for scheduled cores. + Some(notification) = best_notifications.next() => { + core_queue = + scheduled_cores(notification.hash(), params.para_id, ¶ms.relay_client).await; + tracing::debug!( + target: LOG_TARGET, + relay_parent = ?notification.hash(), + ?params.para_id, + cores = ?core_queue, + "New best relay block.", + ); + }, + // Add new message from the block builder to the queue. + collator_message = params.collator_receiver.recv() => { + if let Some(message) = collator_message { + tracing::debug!( + target: LOG_TARGET, + hash = ?message.hash, + num_messages = ?messages.len() + 1, + "Pushing new message.", + ); + messages.push_back(message); + } + } + } + + while !core_queue.is_empty() { + // If there are no more messages to process, we wait for new messages. + let Some(message) = messages.pop_front() else { + break; + }; + + handle_collation_message( + message, + &collator_service, + &mut overseer_handle, + &mut core_queue, + ) + .await; + } + } +} + +async fn handle_collation_message( + message: CollatorMessage, + collator_service: &impl CollatorServiceInterface, + overseer_handle: &mut OverseerHandle, + core_queue: &mut VecDeque, +) { + let CollatorMessage { + parent_header, + hash, + parachain_candidate, + validation_code_hash, + relay_parent, + } = message; + + if core_queue.is_empty() { + tracing::warn!(target: crate::LOG_TARGET, cores_for_para = core_queue.len(), "Not submitting since we have no cores left!."); + return; + } + + let number = *parachain_candidate.block.header().number(); + let (collation, block_data) = + match collator_service.build_collation(&parent_header, hash, parachain_candidate) { + Some(collation) => collation, + None => { + tracing::warn!(target: LOG_TARGET, ?hash, ?number, "Unable to build collation."); + return; + }, + }; + + tracing::info!( + target: LOG_TARGET, + "PoV size {{ header: {}kb, extrinsics: {}kb, storage_proof: {}kb }}", + block_data.header().encode().len() as f64 / 1024f64, + block_data.extrinsics().encode().len() as f64 / 1024f64, + block_data.storage_proof().encode().len() as f64 / 1024f64, + ); + + if let MaybeCompressedPoV::Compressed(ref pov) = collation.proof_of_validity { + tracing::info!( + target: LOG_TARGET, + "Compressed PoV size: {}kb", + pov.block_data.0.len() as f64 / 1024f64, + ); + } + + if let Some(core) = core_queue.pop_front() { + tracing::debug!(target: LOG_TARGET, ?core, ?hash, ?number, "Submitting collation for core."); + overseer_handle + .send_msg( + CollationGenerationMessage::SubmitCollation(SubmitCollationParams { + relay_parent, + collation, + parent_head: parent_header.encode().into(), + validation_code_hash, + core_index: core, + result_sender: None, + }), + "SubmitCollation", + ) + .await; + } +} diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/mod.rs b/cumulus/client/consensus/aura/src/collators/slot_based/mod.rs new file mode 100644 index 000000000000..d56840425470 --- /dev/null +++ b/cumulus/client/consensus/aura/src/collators/slot_based/mod.rs @@ -0,0 +1,215 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! A collator for Aura that looks ahead of the most recently included parachain block +//! when determining what to build upon. +//! +//! The block building mechanism consists of two parts: +//! 1. A block-builder task that builds parachain blocks at each of our slots. +//! 2. A collator task that transforms the blocks into a collation and submits them to the relay +//! chain. +//! +//! This collator also builds additional blocks when the maximum backlog is not saturated. +//! The size of the backlog is determined by invoking a runtime API. If that runtime API +//! is not supported, this assumes a maximum backlog size of 1. +//! +//! This takes more advantage of asynchronous backing, though not complete advantage. +//! When the backlog is not saturated, this approach lets the backlog temporarily 'catch up' +//! with periods of higher throughput. When the backlog is saturated, we typically +//! fall back to the limited cadence of a single parachain block per relay-chain block. +//! +//! Despite this, the fact that there is a backlog at all allows us to spend more time +//! building the block, as there is some buffer before it can get posted to the relay-chain. +//! The main limitation is block propagation time - i.e. the new blocks created by an author +//! must be propagated to the next author before their turn. + +use codec::Codec; +use consensus_common::ParachainCandidate; +use cumulus_client_collator::service::ServiceInterface as CollatorServiceInterface; +use cumulus_client_consensus_common::{self as consensus_common, ParachainBlockImportMarker}; +use cumulus_client_consensus_proposer::ProposerInterface; +use cumulus_primitives_aura::AuraUnincludedSegmentApi; +use cumulus_primitives_core::{relay_chain::Hash as PHash, CollectCollationInfo}; +use cumulus_relay_chain_interface::RelayChainInterface; +use polkadot_overseer::Handle as OverseerHandle; +use polkadot_primitives::{ + CollatorPair, CoreIndex, Hash as RelayHash, Id as ParaId, ValidationCodeHash, +}; + +use sc_client_api::{backend::AuxStore, BlockBackend, BlockOf, UsageProvider}; +use sc_consensus::BlockImport; + +use sp_api::ProvideRuntimeApi; +use sp_application_crypto::AppPublic; +use sp_blockchain::HeaderBackend; +use sp_consensus_aura::AuraApi; +use sp_core::crypto::Pair; +use sp_inherents::CreateInherentDataProviders; +use sp_keystore::KeystorePtr; +use sp_runtime::traits::{Block as BlockT, Member}; + +use std::{collections::VecDeque, sync::Arc, time::Duration}; + +use crate::LOG_TARGET; + +use self::{block_builder_task::run_block_builder, collation_task::run_collation_task}; + +mod block_builder_task; +mod collation_task; + +/// Parameters for [`run`]. +pub struct Params { + /// Inherent data providers. Only non-consensus inherent data should be provided, i.e. + /// the timestamp, slot, and paras inherents should be omitted, as they are set by this + /// collator. + pub create_inherent_data_providers: CIDP, + /// Used to actually import blocks. + pub block_import: BI, + /// The underlying para client. + pub para_client: Arc, + /// The para client's backend, used to access the database. + pub para_backend: Arc, + /// A handle to the relay-chain client. + pub relay_client: RClient, + /// A validation code hash provider, used to get the current validation code hash. + pub code_hash_provider: CHP, + /// The underlying keystore, which should contain Aura consensus keys. + pub keystore: KeystorePtr, + /// The collator key used to sign collations before submitting to validators. + pub collator_key: CollatorPair, + /// The para's ID. + pub para_id: ParaId, + /// A handle to the relay-chain client's "Overseer" or task orchestrator. + pub overseer_handle: OverseerHandle, + /// The length of slots in the relay chain. + pub relay_chain_slot_duration: Duration, + /// The underlying block proposer this should call into. + pub proposer: Proposer, + /// The generic collator service used to plug into this consensus engine. + pub collator_service: CS, + /// The amount of time to spend authoring each block. + pub authoring_duration: Duration, + /// Whether we should reinitialize the collator config (i.e. we are transitioning to aura). + pub reinitialize: bool, +} + +/// Run aura-based block building and collation task. +pub fn run( + params: Params, +) -> (impl futures::Future, impl futures::Future) +where + Block: BlockT, + Client: ProvideRuntimeApi + + BlockOf + + AuxStore + + HeaderBackend + + BlockBackend + + UsageProvider + + Send + + Sync + + 'static, + Client::Api: + AuraApi + CollectCollationInfo + AuraUnincludedSegmentApi, + Backend: sc_client_api::Backend + 'static, + RClient: RelayChainInterface + Clone + 'static, + CIDP: CreateInherentDataProviders + 'static, + CIDP::InherentDataProviders: Send, + BI: BlockImport + ParachainBlockImportMarker + Send + Sync + 'static, + Proposer: ProposerInterface + Send + Sync + 'static, + CS: CollatorServiceInterface + Send + Sync + Clone + 'static, + CHP: consensus_common::ValidationCodeHashProvider + Send + 'static, + P: Pair + 'static, + P::Public: AppPublic + Member + Codec, + P::Signature: TryFrom> + Member + Codec, +{ + let (tx, rx) = tokio::sync::mpsc::channel(100); + + let collator_task_params = collation_task::Params { + relay_client: params.relay_client.clone(), + collator_key: params.collator_key.clone(), + para_id: params.para_id, + overseer_handle: params.overseer_handle.clone(), + reinitialize: params.reinitialize, + collator_service: params.collator_service.clone(), + collator_receiver: rx, + }; + + let collation_task_fut = run_collation_task::(collator_task_params); + + let block_builder_params = block_builder_task::BuilderTaskParams { + create_inherent_data_providers: params.create_inherent_data_providers, + block_import: params.block_import, + para_client: params.para_client, + para_backend: params.para_backend, + relay_client: params.relay_client, + code_hash_provider: params.code_hash_provider, + keystore: params.keystore, + para_id: params.para_id, + proposer: params.proposer, + collator_service: params.collator_service, + authoring_duration: params.authoring_duration, + collator_sender: tx, + }; + + let block_builder_fut = + run_block_builder::(block_builder_params); + + (collation_task_fut, block_builder_fut) +} + +/// Message to be sent from the block builder to the collation task. +/// +/// Contains all data necessary to submit a collation to the relay chain. +struct CollatorMessage { + /// The hash of the relay chain block that provides the context for the parachain block. + pub relay_parent: RelayHash, + /// The header of the parent block. + pub parent_header: Block::Header, + /// The parachain block candidate. + pub parachain_candidate: ParachainCandidate, + /// The hash of the parachain block. + pub hash: Block::Hash, + /// The validation code hash at the parent block. + pub validation_code_hash: ValidationCodeHash, +} + +/// Retrieve the scheduled cores for the parachain with id `para_id` from the relay chain. +async fn scheduled_cores( + relay_parent: PHash, + para_id: ParaId, + relay_chain_interface: &RClient, +) -> VecDeque { + let cores = match relay_chain_interface.availability_cores(relay_parent).await { + Ok(cores) => cores, + Err(error) => { + tracing::error!( + target: LOG_TARGET, + ?error, + ?relay_parent, + "Failed to query availability cores runtime API", + ); + return VecDeque::new() + }, + }; + + cores + .iter() + .enumerate() + .filter_map(|(idx, core)| { + (core.para_id() == Some(para_id)).then_some(CoreIndex(idx as u32)) + }) + .collect() +} diff --git a/cumulus/client/consensus/common/src/lib.rs b/cumulus/client/consensus/common/src/lib.rs index cebe34e7ea58..db9fe5c9c258 100644 --- a/cumulus/client/consensus/common/src/lib.rs +++ b/cumulus/client/consensus/common/src/lib.rs @@ -46,6 +46,8 @@ pub use level_monitor::{LevelLimit, MAX_LEAVES_PER_LEVEL_SENSIBLE_DEFAULT}; pub mod import_queue; +const PARENT_SEARCH_LOG_TARGET: &str = "consensus::common::find_potential_parents"; + /// Provides the hash of validation code used for authoring/execution of blocks at a given /// hash. pub trait ValidationCodeHashProvider { @@ -248,7 +250,7 @@ pub struct ParentSearchParams { } /// A potential parent block returned from [`find_potential_parents`] -#[derive(Debug, PartialEq)] +#[derive(PartialEq)] pub struct PotentialParent { /// The hash of the block. pub hash: B::Hash, @@ -261,6 +263,17 @@ pub struct PotentialParent { pub aligned_with_pending: bool, } +impl std::fmt::Debug for PotentialParent { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("PotentialParent") + .field("hash", &self.hash) + .field("depth", &self.depth) + .field("aligned_with_pending", &self.aligned_with_pending) + .field("number", &self.header.number()) + .finish() + } +} + /// Perform a recursive search through blocks to find potential /// parent blocks for a new block. /// @@ -278,10 +291,11 @@ pub struct PotentialParent { /// * the block number is within `max_depth` blocks of the included block pub async fn find_potential_parents( params: ParentSearchParams, - client: &impl Backend, + backend: &impl Backend, relay_client: &impl RelayChainInterface, ) -> Result>, RelayChainError> { // 1. Build up the ancestry record of the relay chain to compare against. + tracing::trace!("Parent search parameters: {params:?}"); let rp_ancestry = { let mut ancestry = Vec::with_capacity(params.ancestry_lookback + 1); let mut current_rp = params.relay_parent; @@ -350,36 +364,117 @@ pub async fn find_potential_parents( let included_hash = included_header.hash(); let pending_hash = pending_header.as_ref().map(|hdr| hdr.hash()); - let mut frontier = vec![PotentialParent:: { - hash: included_hash, - header: included_header, - depth: 0, - aligned_with_pending: true, - }]; + if params.max_depth == 0 { + return Ok(vec![PotentialParent:: { + hash: included_hash, + header: included_header, + depth: 0, + aligned_with_pending: true, + }]) + }; + + let maybe_route = pending_hash + .map(|pending| sp_blockchain::tree_route(backend.blockchain(), included_hash, pending)) + .transpose()?; + + // The distance between pending and included block. Is later used to check if a child + // is aligned with pending when it is between pending and included block. + let pending_distance = maybe_route.as_ref().map(|route| route.enacted().len()); + + // If we want to ignore alternative branches there is no reason to start + // the parent search at the included block. We can add the included block and + // the path to the pending block to the potential parents directly (limited by max_depth). + let (mut frontier, mut potential_parents) = if let (Some(pending), true, Some(ref route)) = + (pending_header, params.ignore_alternative_branches, &maybe_route) + { + let mut potential_parents = Vec::new(); + // Included block is always a potential parent + potential_parents.push(PotentialParent:: { + hash: included_hash, + header: included_header.clone(), + depth: 0, + aligned_with_pending: true, + }); - // Recursive search through descendants of the included block which have acceptable - // relay parents. - let mut potential_parents = Vec::new(); + // Add all items on the path included -> pending - 1 to the potential parents, but not + // more than `max_depth`. + let num_parents_on_path = route.enacted().len().saturating_sub(1).min(params.max_depth); + for (num, block) in route.enacted().iter().take(num_parents_on_path).enumerate() { + let header = match backend.blockchain().header(block.hash) { + Ok(Some(h)) => h, + Ok(None) => continue, + Err(_) => continue, + }; + + potential_parents.push(PotentialParent:: { + hash: block.hash, + header, + depth: 1 + num, + aligned_with_pending: true, + }); + } + + // The search for additional potential parents should now start at the + // pending block. + ( + vec![PotentialParent:: { + hash: pending.hash(), + header: pending.clone(), + depth: route.enacted().len(), + aligned_with_pending: true, + }], + potential_parents, + ) + } else { + ( + vec![PotentialParent:: { + hash: included_hash, + header: included_header.clone(), + depth: 0, + aligned_with_pending: true, + }], + Default::default(), + ) + }; + + if potential_parents.len() > params.max_depth { + return Ok(potential_parents); + } + + // If a block is on the path included -> pending, we consider it `aligned_with_pending`. + let is_child_in_path_to_pending = |hash| { + maybe_route + .as_ref() + .map_or(true, |route| route.enacted().iter().any(|x| x.hash == hash)) + }; + + tracing::trace!(target: PARENT_SEARCH_LOG_TARGET, ?included_hash, included_num = ?included_header.number(), ?pending_hash ,?rp_ancestry, "Searching relay chain ancestry."); while let Some(entry) = frontier.pop() { - let is_pending = - entry.depth == 1 && pending_hash.as_ref().map_or(false, |h| &entry.hash == h); - let is_included = entry.depth == 0; + // TODO Adjust once we can fetch multiple pending blocks. + // https://github.com/paritytech/polkadot-sdk/issues/3967 + let is_pending = pending_hash.as_ref().map_or(false, |h| &entry.hash == h); + let is_included = included_hash == entry.hash; // note: even if the pending block or included block have a relay parent // outside of the expected part of the relay chain, they are always allowed // because they have already been posted on chain. let is_potential = is_pending || is_included || { let digest = entry.header.digest(); - cumulus_primitives_core::extract_relay_parent(digest).map_or(false, is_hash_in_ancestry) || + let is_hash_in_ancestry_check = cumulus_primitives_core::extract_relay_parent(digest) + .map_or(false, is_hash_in_ancestry); + let is_root_in_ancestry_check = cumulus_primitives_core::rpsr_digest::extract_relay_parent_storage_root(digest) .map(|(r, _n)| r) - .map_or(false, is_root_in_ancestry) + .map_or(false, is_root_in_ancestry); + + is_hash_in_ancestry_check || is_root_in_ancestry_check }; let parent_aligned_with_pending = entry.aligned_with_pending; let child_depth = entry.depth + 1; let hash = entry.hash; + tracing::trace!(target: PARENT_SEARCH_LOG_TARGET, root_in_ancestry = is_potential && !is_pending && !is_included, ?hash, is_pending, is_included, "Checking potential parent."); if is_potential { potential_parents.push(entry); } @@ -389,19 +484,19 @@ pub async fn find_potential_parents( } // push children onto search frontier. - for child in client.blockchain().children(hash).ok().into_iter().flatten() { + for child in backend.blockchain().children(hash).ok().into_iter().flatten() { + tracing::trace!(target: PARENT_SEARCH_LOG_TARGET, ?child, child_depth, ?pending_distance, "Looking at child."); let aligned_with_pending = parent_aligned_with_pending && - if child_depth == 1 { - pending_hash.as_ref().map_or(true, |h| &child == h) - } else { - true - }; + (pending_distance.map_or(true, |dist| child_depth > dist) || + pending_hash.as_ref().map_or(true, |h| &child == h) || + is_child_in_path_to_pending(child)); if params.ignore_alternative_branches && !aligned_with_pending { + tracing::trace!(target: PARENT_SEARCH_LOG_TARGET, ?child, "Child is not aligned with pending block."); continue } - let header = match client.blockchain().header(child) { + let header = match backend.blockchain().header(child) { Ok(Some(h)) => h, Ok(None) => continue, Err(_) => continue, diff --git a/cumulus/client/consensus/common/src/parachain_consensus.rs b/cumulus/client/consensus/common/src/parachain_consensus.rs index b4b315bb32be..944917673b11 100644 --- a/cumulus/client/consensus/common/src/parachain_consensus.rs +++ b/cumulus/client/consensus/common/src/parachain_consensus.rs @@ -375,60 +375,61 @@ async fn handle_new_best_parachain_head( target: LOG_TARGET, block_hash = ?hash, "Skipping set new best block, because block is already the best.", - ) - } else { - // Make sure the block is already known or otherwise we skip setting new best. - match parachain.block_status(hash) { - Ok(BlockStatus::InChainWithState) => { - unset_best_header.take(); - tracing::debug!( - target: LOG_TARGET, - ?hash, - "Importing block as new best for parachain.", - ); - import_block_as_new_best(hash, parachain_head, parachain).await; - }, - Ok(BlockStatus::InChainPruned) => { - tracing::error!( - target: LOG_TARGET, - block_hash = ?hash, - "Trying to set pruned block as new best!", - ); - }, - Ok(BlockStatus::Unknown) => { - *unset_best_header = Some(parachain_head); + ); + return; + } - tracing::debug!( - target: LOG_TARGET, - block_hash = ?hash, - "Parachain block not yet imported, waiting for import to enact as best block.", - ); - - if let Some(ref mut recovery_chan_tx) = recovery_chan_tx { - // Best effort channel to actively encourage block recovery. - // An error here is not fatal; the relay chain continuously re-announces - // the best block, thus we will have other opportunities to retry. - let req = RecoveryRequest { hash, kind: RecoveryKind::Full }; - if let Err(err) = recovery_chan_tx.try_send(req) { - tracing::warn!( - target: LOG_TARGET, - block_hash = ?hash, - error = ?err, - "Unable to notify block recovery subsystem" - ) - } + // Make sure the block is already known or otherwise we skip setting new best. + match parachain.block_status(hash) { + Ok(BlockStatus::InChainWithState) => { + unset_best_header.take(); + tracing::debug!( + target: LOG_TARGET, + included = ?hash, + "Importing block as new best for parachain.", + ); + import_block_as_new_best(hash, parachain_head, parachain).await; + }, + Ok(BlockStatus::InChainPruned) => { + tracing::error!( + target: LOG_TARGET, + block_hash = ?hash, + "Trying to set pruned block as new best!", + ); + }, + Ok(BlockStatus::Unknown) => { + *unset_best_header = Some(parachain_head); + + tracing::debug!( + target: LOG_TARGET, + block_hash = ?hash, + "Parachain block not yet imported, waiting for import to enact as best block.", + ); + + if let Some(ref mut recovery_chan_tx) = recovery_chan_tx { + // Best effort channel to actively encourage block recovery. + // An error here is not fatal; the relay chain continuously re-announces + // the best block, thus we will have other opportunities to retry. + let req = RecoveryRequest { hash, kind: RecoveryKind::Full }; + if let Err(err) = recovery_chan_tx.try_send(req) { + tracing::warn!( + target: LOG_TARGET, + block_hash = ?hash, + error = ?err, + "Unable to notify block recovery subsystem" + ) } - }, - Err(e) => { - tracing::error!( - target: LOG_TARGET, - block_hash = ?hash, - error = ?e, - "Failed to get block status of block.", - ); - }, - _ => {}, - } + } + }, + Err(e) => { + tracing::error!( + target: LOG_TARGET, + block_hash = ?hash, + error = ?e, + "Failed to get block status of block.", + ); + }, + _ => {}, } } diff --git a/cumulus/client/consensus/common/src/tests.rs b/cumulus/client/consensus/common/src/tests.rs index aca922657072..a66828248920 100644 --- a/cumulus/client/consensus/common/src/tests.rs +++ b/cumulus/client/consensus/common/src/tests.rs @@ -20,7 +20,7 @@ use async_trait::async_trait; use codec::Encode; use cumulus_client_pov_recovery::RecoveryKind; use cumulus_primitives_core::{ - relay_chain::{self, BlockId}, + relay_chain::{BlockId, BlockNumber, CoreState}, CumulusDigestItem, InboundDownwardMessage, InboundHrmpMessage, }; use cumulus_relay_chain_interface::{ @@ -45,11 +45,11 @@ use std::{ time::Duration, }; -fn relay_block_num_from_hash(hash: &PHash) -> relay_chain::BlockNumber { +fn relay_block_num_from_hash(hash: &PHash) -> BlockNumber { hash.to_low_u64_be() as u32 } -fn relay_hash_from_block_num(block_number: relay_chain::BlockNumber) -> PHash { +fn relay_hash_from_block_num(block_number: BlockNumber) -> PHash { PHash::from_low_u64_be(block_number as u64) } @@ -247,6 +247,13 @@ impl RelayChainInterface for Relaychain { extrinsics_root: PHash::zero(), })) } + + async fn availability_cores( + &self, + _relay_parent: PHash, + ) -> RelayChainResult>> { + unimplemented!("Not needed for test"); + } } fn sproof_with_best_parent(client: &Client) -> RelayStateSproofBuilder { @@ -1125,6 +1132,177 @@ fn find_potential_parents_with_max_depth() { } } +/// Test where there is an additional block between included and pending block. +#[test] +fn find_potential_parents_aligned_with_late_pending() { + sp_tracing::try_init_simple(); + + const NON_INCLUDED_CHAIN_LEN: usize = 5; + + let backend = Arc::new(Backend::new_test(1000, 1)); + let client = Arc::new(TestClientBuilder::with_backend(backend.clone()).build()); + let mut para_import = + ParachainBlockImport::new_with_delayed_best_block(client.clone(), backend.clone()); + + let relay_parent = relay_hash_from_block_num(10); + // Choose different relay parent for alternative chain to get new hashes. + let search_relay_parent = relay_hash_from_block_num(11); + let included_block = build_and_import_block_ext( + &client, + BlockOrigin::NetworkInitialSync, + true, + &mut para_import, + None, + None, + Some(relay_parent), + ); + + let in_between_block = build_and_import_block_ext( + &client, + BlockOrigin::NetworkInitialSync, + true, + &mut para_import, + Some(included_block.header().hash()), + None, + Some(relay_parent), + ); + + let pending_block = build_and_import_block_ext( + &client, + BlockOrigin::Own, + true, + &mut para_import, + Some(in_between_block.header().hash()), + None, + Some(relay_parent), + ); + + let relay_chain = Relaychain::new(); + { + let relay_inner = &mut relay_chain.inner.lock().unwrap(); + relay_inner + .relay_chain_hash_to_header + .insert(search_relay_parent, included_block.header().clone()); + relay_inner + .relay_chain_hash_to_header_pending + .insert(search_relay_parent, in_between_block.header().clone()); + relay_inner + .relay_chain_hash_to_header_pending + .insert(search_relay_parent, pending_block.header().clone()); + } + + // Build two sibling chains from the included block. + let mut aligned_blocks = Vec::new(); + let mut parent = pending_block.header().hash(); + for _ in 2..NON_INCLUDED_CHAIN_LEN { + let block = build_and_import_block_ext( + &client, + BlockOrigin::Own, + true, + &mut para_import, + Some(parent), + None, + Some(relay_parent), + ); + parent = block.header().hash(); + aligned_blocks.push(block); + } + + let mut alt_blocks = Vec::new(); + let mut parent = included_block.header().hash(); + for _ in 0..NON_INCLUDED_CHAIN_LEN { + let block = build_and_import_block_ext( + &client, + BlockOrigin::NetworkInitialSync, + true, + &mut para_import, + Some(parent), + None, + Some(search_relay_parent), + ); + parent = block.header().hash(); + alt_blocks.push(block); + } + + // Ignore alternative branch: + for max_depth in 0..=NON_INCLUDED_CHAIN_LEN { + let potential_parents = block_on(find_potential_parents( + ParentSearchParams { + relay_parent: search_relay_parent, + para_id: ParaId::from(100), + ancestry_lookback: 1, // aligned chain is in ancestry. + max_depth, + ignore_alternative_branches: true, + }, + &*backend, + &relay_chain, + )) + .unwrap(); + + assert_eq!(potential_parents.len(), max_depth + 1); + let expected_parents: Vec<_> = [&included_block, &in_between_block, &pending_block] + .into_iter() + .chain(aligned_blocks.iter()) + .take(max_depth + 1) + .collect(); + + for i in 0..(max_depth + 1) { + let parent = &potential_parents[i]; + let expected = &expected_parents[i]; + + assert_eq!(parent.hash, expected.hash()); + assert_eq!(&parent.header, expected.header()); + assert_eq!(parent.depth, i); + assert!(parent.aligned_with_pending); + } + } + + // Do not ignore: + for max_depth in 0..=NON_INCLUDED_CHAIN_LEN { + let potential_parents = block_on(find_potential_parents( + ParentSearchParams { + relay_parent: search_relay_parent, + para_id: ParaId::from(100), + ancestry_lookback: 1, // aligned chain is in ancestry. + max_depth, + ignore_alternative_branches: false, + }, + &*backend, + &relay_chain, + )) + .unwrap(); + + let expected_len = 2 * max_depth + 1; + assert_eq!(potential_parents.len(), expected_len); + let expected_aligned: Vec<_> = [&included_block, &in_between_block, &pending_block] + .into_iter() + .chain(aligned_blocks.iter()) + .take(max_depth + 1) + .collect(); + let expected_alt = alt_blocks.iter().take(max_depth); + + let expected_parents: Vec<_> = + expected_aligned.clone().into_iter().chain(expected_alt).collect(); + // Check correctness. + assert_eq!(expected_parents.len(), expected_len); + + for i in 0..expected_len { + let parent = &potential_parents[i]; + let expected = expected_parents + .iter() + .find(|block| block.header().hash() == parent.hash) + .expect("missing parent"); + + let is_aligned = expected_aligned.contains(&expected); + + assert_eq!(parent.hash, expected.hash()); + assert_eq!(&parent.header, expected.header()); + + assert_eq!(parent.aligned_with_pending, is_aligned); + } + } +} + #[test] fn find_potential_parents_aligned_with_pending() { sp_tracing::try_init_simple(); @@ -1236,6 +1414,7 @@ fn find_potential_parents_aligned_with_pending() { // Do not ignore: for max_depth in 0..=NON_INCLUDED_CHAIN_LEN { + log::info!("Ran with max_depth = {max_depth}"); let potential_parents = block_on(find_potential_parents( ParentSearchParams { relay_parent: search_relay_parent, @@ -1263,6 +1442,7 @@ fn find_potential_parents_aligned_with_pending() { // Check correctness. assert_eq!(expected_parents.len(), expected_len); + potential_parents.iter().for_each(|p| log::info!("result: {:?}", p)); for i in 0..expected_len { let parent = &potential_parents[i]; let expected = expected_parents @@ -1275,6 +1455,12 @@ fn find_potential_parents_aligned_with_pending() { assert_eq!(parent.hash, expected.hash()); assert_eq!(&parent.header, expected.header()); + log::info!( + "Check hash: {:?} expected: {} is: {}", + parent.hash, + is_aligned, + parent.aligned_with_pending, + ); assert_eq!(parent.aligned_with_pending, is_aligned); } } diff --git a/cumulus/client/network/src/tests.rs b/cumulus/client/network/src/tests.rs index 3f5757d5eac1..8649e7f1901b 100644 --- a/cumulus/client/network/src/tests.rs +++ b/cumulus/client/network/src/tests.rs @@ -26,9 +26,10 @@ use futures::{executor::block_on, poll, task::Poll, FutureExt, Stream, StreamExt use parking_lot::Mutex; use polkadot_node_primitives::{SignedFullStatement, Statement}; use polkadot_primitives::{ - CandidateCommitments, CandidateDescriptor, CollatorPair, CommittedCandidateReceipt, - Hash as PHash, HeadData, InboundDownwardMessage, InboundHrmpMessage, OccupiedCoreAssumption, - PersistedValidationData, SessionIndex, SigningContext, ValidationCodeHash, ValidatorId, + BlockNumber, CandidateCommitments, CandidateDescriptor, CollatorPair, + CommittedCandidateReceipt, CoreState, Hash as PHash, HeadData, InboundDownwardMessage, + InboundHrmpMessage, OccupiedCoreAssumption, PersistedValidationData, SessionIndex, + SigningContext, ValidationCodeHash, ValidatorId, }; use polkadot_test_client::{ Client as PClient, ClientBlockImportExt, DefaultTestClientBuilderExt, FullBackend as PBackend, @@ -264,6 +265,13 @@ impl RelayChainInterface for DummyRelayChainInterface { Ok(header) } + + async fn availability_cores( + &self, + _relay_parent: PHash, + ) -> RelayChainResult>> { + unimplemented!("Not needed for test"); + } } fn make_validator_and_api() -> ( diff --git a/cumulus/client/relay-chain-inprocess-interface/src/lib.rs b/cumulus/client/relay-chain-inprocess-interface/src/lib.rs index 6ea02b2e7c1f..7c8f4376357f 100644 --- a/cumulus/client/relay-chain-inprocess-interface/src/lib.rs +++ b/cumulus/client/relay-chain-inprocess-interface/src/lib.rs @@ -19,9 +19,9 @@ use std::{pin::Pin, sync::Arc, time::Duration}; use async_trait::async_trait; use cumulus_primitives_core::{ relay_chain::{ - runtime_api::ParachainHost, Block as PBlock, BlockId, CommittedCandidateReceipt, - Hash as PHash, Header as PHeader, InboundHrmpMessage, OccupiedCoreAssumption, SessionIndex, - ValidationCodeHash, ValidatorId, + runtime_api::ParachainHost, Block as PBlock, BlockId, BlockNumber, + CommittedCandidateReceipt, CoreState, Hash as PHash, Header as PHeader, InboundHrmpMessage, + OccupiedCoreAssumption, SessionIndex, ValidationCodeHash, ValidatorId, }, InboundDownwardMessage, ParaId, PersistedValidationData, }; @@ -251,6 +251,13 @@ impl RelayChainInterface for RelayChainInProcessInterface { }); Ok(Box::pin(notifications_stream)) } + + async fn availability_cores( + &self, + relay_parent: PHash, + ) -> RelayChainResult>> { + Ok(self.full_client.runtime_api().availability_cores(relay_parent)?) + } } pub enum BlockCheckStatus { diff --git a/cumulus/client/relay-chain-interface/src/lib.rs b/cumulus/client/relay-chain-interface/src/lib.rs index bb93e6a168c8..aacf35483ada 100644 --- a/cumulus/client/relay-chain-interface/src/lib.rs +++ b/cumulus/client/relay-chain-interface/src/lib.rs @@ -29,8 +29,8 @@ use sp_api::ApiError; use cumulus_primitives_core::relay_chain::BlockId; pub use cumulus_primitives_core::{ relay_chain::{ - CommittedCandidateReceipt, Hash as PHash, Header as PHeader, InboundHrmpMessage, - OccupiedCoreAssumption, SessionIndex, ValidationCodeHash, ValidatorId, + BlockNumber, CommittedCandidateReceipt, CoreState, Hash as PHash, Header as PHeader, + InboundHrmpMessage, OccupiedCoreAssumption, SessionIndex, ValidationCodeHash, ValidatorId, }, InboundDownwardMessage, ParaId, PersistedValidationData, }; @@ -203,6 +203,13 @@ pub trait RelayChainInterface: Send + Sync { para_id: ParaId, occupied_core_assumption: OccupiedCoreAssumption, ) -> RelayChainResult>; + + /// Yields information on all availability cores as relevant to the child block. + /// Cores are either free or occupied. Free cores can have paras assigned to them. + async fn availability_cores( + &self, + relay_parent: PHash, + ) -> RelayChainResult>>; } #[async_trait] @@ -321,4 +328,11 @@ where .validation_code_hash(relay_parent, para_id, occupied_core_assumption) .await } + + async fn availability_cores( + &self, + relay_parent: PHash, + ) -> RelayChainResult>> { + (**self).availability_cores(relay_parent).await + } } diff --git a/cumulus/client/relay-chain-rpc-interface/src/lib.rs b/cumulus/client/relay-chain-rpc-interface/src/lib.rs index 3a4c186e301e..bfe13f83f5b1 100644 --- a/cumulus/client/relay-chain-rpc-interface/src/lib.rs +++ b/cumulus/client/relay-chain-rpc-interface/src/lib.rs @@ -24,7 +24,7 @@ use cumulus_primitives_core::{ InboundDownwardMessage, ParaId, PersistedValidationData, }; use cumulus_relay_chain_interface::{ - PHeader, RelayChainError, RelayChainInterface, RelayChainResult, + BlockNumber, CoreState, PHeader, RelayChainError, RelayChainInterface, RelayChainResult, }; use futures::{FutureExt, Stream, StreamExt}; use polkadot_overseer::Handle; @@ -237,4 +237,11 @@ impl RelayChainInterface for RelayChainRpcInterface { let imported_headers_stream = self.rpc_client.get_best_heads_stream()?; Ok(imported_headers_stream.boxed()) } + + async fn availability_cores( + &self, + relay_parent: RelayHash, + ) -> RelayChainResult>> { + self.rpc_client.parachain_host_availability_cores(relay_parent).await + } } diff --git a/cumulus/pallets/aura-ext/src/consensus_hook.rs b/cumulus/pallets/aura-ext/src/consensus_hook.rs index 592029803391..40276c035f6d 100644 --- a/cumulus/pallets/aura-ext/src/consensus_hook.rs +++ b/cumulus/pallets/aura-ext/src/consensus_hook.rs @@ -65,8 +65,17 @@ where let para_slot_from_relay = Slot::from_timestamp(relay_chain_timestamp.into(), para_slot_duration); - // Perform checks. - assert_eq!(slot, para_slot_from_relay, "slot number mismatch"); + // Check that we are not too far in the future. Since we expect `V` parachain blocks + // during the relay chain slot, we can allow for `V` parachain slots into the future. + if *slot > *para_slot_from_relay + u64::from(velocity) { + panic!( + "Parachain slot is too far in the future: parachain_slot: {:?}, derived_from_relay_slot: {:?} velocity: {:?}", + slot, + para_slot_from_relay, + velocity + ); + } + if authored > velocity + 1 { panic!("authored blocks limit is reached for the slot") } @@ -113,6 +122,11 @@ impl< return false } + // TODO: This logic needs to be adjusted. + // It checks that we have not authored more than `V + 1` blocks in the slot. + // As a slot however, we take the parachain slot here. Velocity should + // be measured in relation to the relay chain slot. + // https://github.com/paritytech/polkadot-sdk/issues/3967 if last_slot == new_slot { authored_so_far < velocity + 1 } else { diff --git a/cumulus/pallets/aura-ext/src/lib.rs b/cumulus/pallets/aura-ext/src/lib.rs index 7ca84dff7c51..4605dd325bee 100644 --- a/cumulus/pallets/aura-ext/src/lib.rs +++ b/cumulus/pallets/aura-ext/src/lib.rs @@ -83,7 +83,7 @@ pub mod pallet { SlotInfo::::put((new_slot, authored)); - T::DbWeight::get().reads_writes(2, 1) + T::DbWeight::get().reads_writes(4, 2) } } diff --git a/cumulus/polkadot-parachain/Cargo.toml b/cumulus/polkadot-parachain/Cargo.toml index 280ece30fb68..6e74c2587fed 100644 --- a/cumulus/polkadot-parachain/Cargo.toml +++ b/cumulus/polkadot-parachain/Cargo.toml @@ -66,7 +66,6 @@ sc-telemetry = { path = "../../substrate/client/telemetry" } sc-transaction-pool = { path = "../../substrate/client/transaction-pool" } sp-transaction-pool = { path = "../../substrate/primitives/transaction-pool" } sc-network = { path = "../../substrate/client/network" } -sc-network-sync = { path = "../../substrate/client/network/sync" } sc-basic-authorship = { path = "../../substrate/client/basic-authorship" } sp-timestamp = { path = "../../substrate/primitives/timestamp" } sp-blockchain = { path = "../../substrate/primitives/blockchain" } @@ -112,6 +111,7 @@ cumulus-primitives-aura = { path = "../primitives/aura" } cumulus-primitives-core = { path = "../primitives/core" } cumulus-relay-chain-interface = { path = "../client/relay-chain-interface" } color-print = "0.3.4" +tokio = { version = "1.32.0", features = ["macros", "parking_lot", "time"] } [build-dependencies] substrate-build-script-utils = { path = "../../substrate/utils/build-script-utils" } @@ -120,7 +120,6 @@ substrate-build-script-utils = { path = "../../substrate/utils/build-script-util assert_cmd = "2.0" nix = { version = "0.26.1", features = ["signal"] } tempfile = "3.8.0" -tokio = { version = "1.32.0", features = ["macros", "parking_lot", "time"] } wait-timeout = "0.2" [features] diff --git a/cumulus/polkadot-parachain/src/service.rs b/cumulus/polkadot-parachain/src/service.rs index 2dd3541e85f4..c4facd3ef8d5 100644 --- a/cumulus/polkadot-parachain/src/service.rs +++ b/cumulus/polkadot-parachain/src/service.rs @@ -52,7 +52,6 @@ use sc_consensus::{ }; use sc_executor::{HeapAllocStrategy, WasmExecutor, DEFAULT_HEAP_ALLOC_STRATEGY}; use sc_network::{config::FullNetworkConfiguration, service::traits::NetworkBackend, NetworkBlock}; -use sc_network_sync::SyncingService; use sc_service::{Configuration, PartialComponents, TFullBackend, TFullClient, TaskManager}; use sc_telemetry::{Telemetry, TelemetryHandle, TelemetryWorker, TelemetryWorkerHandle}; use sp_api::{ApiExt, ConstructRuntimeApi, ProvideRuntimeApi}; @@ -235,7 +234,6 @@ where &TaskManager, Arc, Arc>>, - Arc>, KeystorePtr, Duration, ParaId, @@ -369,7 +367,6 @@ where &task_manager, relay_chain_interface.clone(), transaction_pool, - sync_service.clone(), params.keystore_container.keystore(), relay_chain_slot_duration, para_id, @@ -710,7 +707,6 @@ pub async fn start_generic_aura_node>( task_manager, relay_chain_interface, transaction_pool, - sync_oracle, keystore, relay_chain_slot_duration, para_id, @@ -739,7 +735,6 @@ pub async fn start_generic_aura_node>( block_import, para_client: client, relay_client: relay_chain_interface, - sync_oracle, keystore, collator_key, para_id, @@ -753,7 +748,7 @@ pub async fn start_generic_aura_node>( }; let fut = - basic_aura::run::::Pair, _, _, _, _, _, _, _>(params); + basic_aura::run::::Pair, _, _, _, _, _, _>(params); task_manager.spawn_essential_handle().spawn("aura", None, fut); Ok(()) @@ -828,7 +823,6 @@ where task_manager, relay_chain_interface, transaction_pool, - sync_oracle, keystore, relay_chain_slot_duration, para_id, @@ -896,7 +890,6 @@ where block_import, para_client: client, relay_client: relay_chain_interface2, - sync_oracle, keystore, collator_key, para_id, @@ -909,7 +902,7 @@ where collation_request_receiver: Some(request_stream), }; - basic_aura::run::::Pair, _, _, _, _, _, _, _>(params) + basic_aura::run::::Pair, _, _, _, _, _, _>(params) .await }); @@ -972,7 +965,6 @@ where task_manager, relay_chain_interface, transaction_pool, - sync_oracle, keystore, relay_chain_slot_duration, para_id, @@ -1045,7 +1037,6 @@ where code_hash_provider: move |block_hash| { client.code_at(block_hash).ok().map(|c| ValidationCode::from(c).hash()) }, - sync_oracle, keystore, collator_key, para_id, @@ -1058,7 +1049,7 @@ where * to aura */ }; - aura::run::::Pair, _, _, _, _, _, _, _, _, _>(params) + aura::run::::Pair, _, _, _, _, _, _, _, _>(params) .await }); @@ -1082,7 +1073,6 @@ fn start_relay_chain_consensus( task_manager: &TaskManager, relay_chain_interface: Arc, transaction_pool: Arc>>, - _sync_oracle: Arc>, _keystore: KeystorePtr, _relay_chain_slot_duration: Duration, para_id: ParaId, @@ -1153,7 +1143,6 @@ fn start_lookahead_aura_consensus( task_manager: &TaskManager, relay_chain_interface: Arc, transaction_pool: Arc>>, - sync_oracle: Arc>, keystore: KeystorePtr, relay_chain_slot_duration: Duration, para_id: ParaId, @@ -1186,7 +1175,6 @@ fn start_lookahead_aura_consensus( code_hash_provider: move |block_hash| { client.code_at(block_hash).ok().map(|c| ValidationCode::from(c).hash()) }, - sync_oracle, keystore, collator_key, para_id, @@ -1198,7 +1186,7 @@ fn start_lookahead_aura_consensus( reinitialize: false, }; - let fut = aura::run::::Pair, _, _, _, _, _, _, _, _, _>(params); + let fut = aura::run::::Pair, _, _, _, _, _, _, _, _>(params); task_manager.spawn_essential_handle().spawn("aura", None, fut); Ok(()) diff --git a/cumulus/test/client/src/lib.rs b/cumulus/test/client/src/lib.rs index a39a662553b0..6ccea43fb7cb 100644 --- a/cumulus/test/client/src/lib.rs +++ b/cumulus/test/client/src/lib.rs @@ -94,6 +94,7 @@ impl substrate_test_client::GenesisInit for GenesisParameters { cumulus_test_service::chain_spec::get_chain_spec_with_extra_endowed( None, self.endowed_accounts.clone(), + cumulus_test_runtime::WASM_BINARY.expect("WASM binary not compiled!"), ) .build_storage() .expect("Builds test runtime genesis storage") diff --git a/cumulus/test/runtime/Cargo.toml b/cumulus/test/runtime/Cargo.toml index 1969045640ed..aebc1033db8d 100644 --- a/cumulus/test/runtime/Cargo.toml +++ b/cumulus/test/runtime/Cargo.toml @@ -93,3 +93,4 @@ std = [ "substrate-wasm-builder", ] increment-spec-version = [] +elastic-scaling = [] diff --git a/cumulus/test/runtime/build.rs b/cumulus/test/runtime/build.rs index 5e5f6a35a505..bf01795ac127 100644 --- a/cumulus/test/runtime/build.rs +++ b/cumulus/test/runtime/build.rs @@ -30,6 +30,13 @@ fn main() { .import_memory() .set_file_name("wasm_binary_spec_version_incremented.rs") .build(); + + WasmBuilder::new() + .with_current_project() + .enable_feature("elastic-scaling") + .import_memory() + .set_file_name("wasm_binary_elastic_scaling.rs") + .build(); } #[cfg(not(feature = "std"))] diff --git a/cumulus/test/runtime/src/lib.rs b/cumulus/test/runtime/src/lib.rs index 22dc5d857b7c..da9fd2768b43 100644 --- a/cumulus/test/runtime/src/lib.rs +++ b/cumulus/test/runtime/src/lib.rs @@ -27,6 +27,11 @@ pub mod wasm_spec_version_incremented { include!(concat!(env!("OUT_DIR"), "/wasm_binary_spec_version_incremented.rs")); } +pub mod elastic_scaling { + #[cfg(feature = "std")] + include!(concat!(env!("OUT_DIR"), "/wasm_binary_elastic_scaling.rs")); +} + mod test_pallet; use frame_support::{derive_impl, traits::OnRuntimeUpgrade, PalletId}; use sp_api::{decl_runtime_apis, impl_runtime_apis}; @@ -83,8 +88,23 @@ impl_opaque_keys! { /// The para-id used in this runtime. pub const PARACHAIN_ID: u32 = 100; -const UNINCLUDED_SEGMENT_CAPACITY: u32 = 3; +#[cfg(not(feature = "elastic-scaling"))] +const UNINCLUDED_SEGMENT_CAPACITY: u32 = 4; +#[cfg(not(feature = "elastic-scaling"))] const BLOCK_PROCESSING_VELOCITY: u32 = 1; + +#[cfg(feature = "elastic-scaling")] +const UNINCLUDED_SEGMENT_CAPACITY: u32 = 6; +#[cfg(feature = "elastic-scaling")] +const BLOCK_PROCESSING_VELOCITY: u32 = 3; + +#[cfg(not(feature = "elastic-scaling"))] +pub const MILLISECS_PER_BLOCK: u64 = 6000; +#[cfg(feature = "elastic-scaling")] +pub const MILLISECS_PER_BLOCK: u64 = 2000; + +pub const SLOT_DURATION: u64 = MILLISECS_PER_BLOCK; + const RELAY_CHAIN_SLOT_DURATION_MILLIS: u32 = 6000; // The only difference between the two declarations below is the `spec_version`. With the @@ -126,10 +146,6 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { state_version: 1, }; -pub const MILLISECS_PER_BLOCK: u64 = 6000; - -pub const SLOT_DURATION: u64 = MILLISECS_PER_BLOCK; - pub const EPOCH_DURATION_IN_BLOCKS: u32 = 10 * MINUTES; // These time units are defined in number of blocks. diff --git a/cumulus/test/service/Cargo.toml b/cumulus/test/service/Cargo.toml index 18213b2f6326..3444fd6192d2 100644 --- a/cumulus/test/service/Cargo.toml +++ b/cumulus/test/service/Cargo.toml @@ -62,7 +62,7 @@ sc-executor-common = { path = "../../../substrate/client/executor/common" } # Polkadot polkadot-primitives = { path = "../../../polkadot/primitives" } -polkadot-service = { path = "../../../polkadot/node/service" } +polkadot-service = { path = "../../../polkadot/node/service", features = ["elastic-scaling-experimental"] } polkadot-test-service = { path = "../../../polkadot/node/test/service" } polkadot-cli = { path = "../../../polkadot/cli" } polkadot-node-subsystem = { path = "../../../polkadot/node/subsystem" } @@ -92,8 +92,6 @@ pallet-timestamp = { path = "../../../substrate/frame/timestamp" } [dev-dependencies] futures = "0.3.28" portpicker = "0.1.1" -rococo-parachain-runtime = { path = "../../parachains/runtimes/testing/rococo-parachain" } -sp-consensus-grandpa = { path = "../../../substrate/primitives/consensus/grandpa" } sp-authority-discovery = { path = "../../../substrate/primitives/authority-discovery" } cumulus-test-client = { path = "../client" } @@ -116,7 +114,6 @@ runtime-benchmarks = [ "polkadot-primitives/runtime-benchmarks", "polkadot-service/runtime-benchmarks", "polkadot-test-service/runtime-benchmarks", - "rococo-parachain-runtime/runtime-benchmarks", "sc-service/runtime-benchmarks", "sp-runtime/runtime-benchmarks", ] diff --git a/cumulus/test/service/src/chain_spec.rs b/cumulus/test/service/src/chain_spec.rs index 4db2513e2b63..7b44bdf2fee0 100644 --- a/cumulus/test/service/src/chain_spec.rs +++ b/cumulus/test/service/src/chain_spec.rs @@ -66,9 +66,10 @@ where pub fn get_chain_spec_with_extra_endowed( id: Option, extra_endowed_accounts: Vec, + code: &[u8], ) -> ChainSpec { ChainSpec::builder( - cumulus_test_runtime::WASM_BINARY.expect("WASM binary was not built, please build it!"), + code, Extensions { para_id: id.unwrap_or(cumulus_test_runtime::PARACHAIN_ID.into()).into() }, ) .with_name("Local Testnet") @@ -83,7 +84,21 @@ pub fn get_chain_spec_with_extra_endowed( /// Get the chain spec for a specific parachain ID. pub fn get_chain_spec(id: Option) -> ChainSpec { - get_chain_spec_with_extra_endowed(id, Default::default()) + get_chain_spec_with_extra_endowed( + id, + Default::default(), + cumulus_test_runtime::WASM_BINARY.expect("WASM binary was not built, please build it!"), + ) +} + +/// Get the chain spec for a specific parachain ID. +pub fn get_elastic_scaling_chain_spec(id: Option) -> ChainSpec { + get_chain_spec_with_extra_endowed( + id, + Default::default(), + cumulus_test_runtime::elastic_scaling::WASM_BINARY + .expect("WASM binary was not built, please build it!"), + ) } /// Local testnet genesis for testing. diff --git a/cumulus/test/service/src/cli.rs b/cumulus/test/service/src/cli.rs index 87d1d4af8a95..79d557e9db82 100644 --- a/cumulus/test/service/src/cli.rs +++ b/cumulus/test/service/src/cli.rs @@ -45,6 +45,9 @@ pub struct TestCollatorCli { #[arg(long)] pub use_null_consensus: bool, + #[arg(long)] + pub use_slot_authoring: bool, + #[arg(long)] pub disable_block_announcements: bool, @@ -253,8 +256,16 @@ impl SubstrateCli for TestCollatorCli { fn load_spec(&self, id: &str) -> std::result::Result, String> { Ok(match id { - "" => - Box::new(cumulus_test_service::get_chain_spec(Some(ParaId::from(2000)))) as Box<_>, + "" => { + tracing::info!("Using default test service chain spec."); + Box::new(cumulus_test_service::get_chain_spec(Some(ParaId::from(2000)))) as Box<_> + }, + "elastic-scaling" => { + tracing::info!("Using elastic-scaling chain spec."); + Box::new(cumulus_test_service::get_elastic_scaling_chain_spec(Some(ParaId::from( + 2100, + )))) as Box<_> + }, path => { let chain_spec = cumulus_test_service::chain_spec::ChainSpec::from_json_file(path.into())?; diff --git a/cumulus/test/service/src/lib.rs b/cumulus/test/service/src/lib.rs index 11aa2e5b9f35..836cf8bbc531 100644 --- a/cumulus/test/service/src/lib.rs +++ b/cumulus/test/service/src/lib.rs @@ -25,7 +25,10 @@ pub mod chain_spec; use cumulus_client_collator::service::CollatorService; use cumulus_client_consensus_aura::{ - collators::lookahead::{self as aura, Params as AuraParams}, + collators::{ + lookahead::{self as aura, Params as AuraParams}, + slot_based::{self as slot_based, Params as SlotBasedParams}, + }, ImportQueueParams, }; use cumulus_client_consensus_proposer::Proposer; @@ -303,7 +306,7 @@ async fn build_relay_chain_interface( /// Start a node with the given parachain `Configuration` and relay chain `Configuration`. /// /// This is the actual implementation that is abstract over the executor and the runtime api. -#[sc_tracing::logging::prefix_logs_with(parachain_config.network.node_name.as_str())] +#[sc_tracing::logging::prefix_logs_with("Parachain")] pub async fn start_node_impl>( parachain_config: Configuration, collator_key: Option, @@ -315,6 +318,7 @@ pub async fn start_node_impl>( consensus: Consensus, collator_options: CollatorOptions, proof_recording_during_import: bool, + slot_based_authoring: bool, ) -> sc_service::error::Result<( TaskManager, Arc, @@ -460,29 +464,72 @@ where ); let client_for_aura = client.clone(); - let params = AuraParams { - create_inherent_data_providers: move |_, ()| async move { Ok(()) }, - block_import, - para_client: client.clone(), - para_backend: backend.clone(), - relay_client: relay_chain_interface, - code_hash_provider: move |block_hash| { - client_for_aura.code_at(block_hash).ok().map(|c| ValidationCode::from(c).hash()) - }, - sync_oracle: sync_service, - keystore, - collator_key, - para_id, - overseer_handle, - relay_chain_slot_duration, - proposer, - collator_service, - authoring_duration: Duration::from_millis(2000), - reinitialize: false, - }; - let fut = aura::run::(params); - task_manager.spawn_essential_handle().spawn("aura", None, fut); + if slot_based_authoring { + tracing::info!(target: LOG_TARGET, "Starting block authoring with slot based authoring."); + let params = SlotBasedParams { + create_inherent_data_providers: move |_, ()| async move { Ok(()) }, + block_import, + para_client: client.clone(), + para_backend: backend.clone(), + relay_client: relay_chain_interface, + code_hash_provider: move |block_hash| { + client_for_aura + .code_at(block_hash) + .ok() + .map(|c| ValidationCode::from(c).hash()) + }, + keystore, + collator_key, + para_id, + overseer_handle, + relay_chain_slot_duration, + proposer, + collator_service, + authoring_duration: Duration::from_millis(2000), + reinitialize: false, + }; + + let (collation_future, block_builer_future) = + slot_based::run::(params); + task_manager.spawn_essential_handle().spawn( + "collation-task", + None, + collation_future, + ); + task_manager.spawn_essential_handle().spawn( + "block-builder-task", + None, + block_builer_future, + ); + } else { + tracing::info!(target: LOG_TARGET, "Starting block authoring with lookahead collator."); + let params = AuraParams { + create_inherent_data_providers: move |_, ()| async move { Ok(()) }, + block_import, + para_client: client.clone(), + para_backend: backend.clone(), + relay_client: relay_chain_interface, + code_hash_provider: move |block_hash| { + client_for_aura + .code_at(block_hash) + .ok() + .map(|c| ValidationCode::from(c).hash()) + }, + keystore, + collator_key, + para_id, + overseer_handle, + relay_chain_slot_duration, + proposer, + collator_service, + authoring_duration: Duration::from_millis(2000), + reinitialize: false, + }; + + let fut = aura::run::(params); + task_manager.spawn_essential_handle().spawn("aura", None, fut); + } } } @@ -719,6 +766,7 @@ impl TestNodeBuilder { self.consensus, collator_options, self.record_proof_during_import, + false, ) .await .expect("could not create Cumulus test service"), @@ -734,6 +782,7 @@ impl TestNodeBuilder { self.consensus, collator_options, self.record_proof_during_import, + false, ) .await .expect("could not create Cumulus test service"), @@ -765,8 +814,11 @@ pub fn node_config( let root = base_path.path().join(format!("cumulus_test_service_{}", key)); let role = if is_collator { Role::Authority } else { Role::Full }; let key_seed = key.to_seed(); - let mut spec = - Box::new(chain_spec::get_chain_spec_with_extra_endowed(Some(para_id), endowed_accounts)); + let mut spec = Box::new(chain_spec::get_chain_spec_with_extra_endowed( + Some(para_id), + endowed_accounts, + cumulus_test_runtime::WASM_BINARY.expect("WASM binary was not built, please build it!"), + )); let mut storage = spec.as_storage_builder().build_storage().expect("could not build storage"); diff --git a/cumulus/test/service/src/main.rs b/cumulus/test/service/src/main.rs index 90d37173dd59..96a1155f3abe 100644 --- a/cumulus/test/service/src/main.rs +++ b/cumulus/test/service/src/main.rs @@ -118,6 +118,7 @@ fn main() -> Result<(), sc_cli::Error> { consensus, collator_options, true, + cli.use_slot_authoring, ) .await, sc_network::config::NetworkBackendType::Litep2p => @@ -135,6 +136,7 @@ fn main() -> Result<(), sc_cli::Error> { consensus, collator_options, true, + cli.use_slot_authoring, ) .await, } diff --git a/cumulus/zombienet/tests/0008-configure-broker.js b/cumulus/zombienet/tests/0008-configure-broker.js new file mode 100644 index 000000000000..f38a3bc03331 --- /dev/null +++ b/cumulus/zombienet/tests/0008-configure-broker.js @@ -0,0 +1,82 @@ +const assert = require("assert"); + +async function run(nodeName, networkInfo, _jsArgs) { + const { wsUri, userDefinedTypes } = networkInfo.nodesByName[nodeName]; + const api = await zombie.connect(wsUri, userDefinedTypes); + + await zombie.util.cryptoWaitReady(); + + // account to submit tx + const keyring = new zombie.Keyring({ type: "sr25519" }); + const alice = keyring.addFromUri("//Alice"); + + const calls = [ + // Default broker configuration + api.tx.broker.configure({ + advanceNotice: 5, + interludeLength: 1, + leadinLength: 1, + regionLength: 1, + idealBulkProportion: 100, + limitCoresOffered: null, + renewalBump: 10, + contributionTimeout: 5, + }), + // We need MOARE cores. + api.tx.broker.requestCoreCount(5), + // Set a lease for the broker chain itself. + api.tx.broker.setLease( + 1005, + 1000, + ), + // Three cores for para 2100. + api.tx.broker.setLease( + 2100, + 1000, + ), + api.tx.broker.setLease( + 2100, + 1000, + ), + api.tx.broker.setLease( + 2100, + 1000, + ), + // One cores for para 2000. + api.tx.broker.setLease( + 2000, + 1000, + ), + // Start sale to make the broker "work", but we don't offer any cores + // as we have fixed leases only anyway. + api.tx.broker.startSales(1, 0), + ]; + const sudo_batch = api.tx.sudo.sudo(api.tx.utility.batch(calls)); + + await new Promise(async (resolve, reject) => { + const unsub = await sudo_batch.signAndSend(alice, (result) => { + console.log(`Current status is ${result.status}`); + if (result.status.isInBlock) { + console.log( + `Transaction included at blockHash ${result.status.asInBlock}` + ); + } else if (result.status.isFinalized) { + console.log( + `Transaction finalized at blockHash ${result.status.asFinalized}` + ); + unsub(); + return resolve(); + } else if (result.isError) { + // Probably happens because of: https://github.com/paritytech/polkadot-sdk/issues/1202. + console.log(`Transaction error`); + // We ignore the error because it is very likely misleading, because of the issue mentioned above. + unsub(); + return resolve(); + } + }); + }); + + return 0; +} + +module.exports = { run }; diff --git a/cumulus/zombienet/tests/0008-configure-relay.js b/cumulus/zombienet/tests/0008-configure-relay.js new file mode 100644 index 000000000000..c546ac3f5fdd --- /dev/null +++ b/cumulus/zombienet/tests/0008-configure-relay.js @@ -0,0 +1,78 @@ +const assert = require("assert"); + +async function run(nodeName, networkInfo, _jsArgs) { + const init = networkInfo.nodesByName[nodeName]; + let wsUri = init.wsUri; + let userDefinedTypes = init.userDefinedTypes; + const api = await zombie.connect(wsUri, userDefinedTypes); + + const collatorElastic = networkInfo.nodesByName["collator-elastic"]; + wsUri = collatorElastic.wsUri; + userDefinedTypes = collatorElastic.userDefinedTypes; + const apiCollatorElastic = await zombie.connect(wsUri, userDefinedTypes); + + const collatorSingleCore = networkInfo.nodesByName["collator-single-core"]; + wsUriSingleCore = collatorSingleCore.wsUri; + userDefinedTypes6s = collatorSingleCore.userDefinedTypes; + + const apiCollatorSingleCore = await zombie.connect(wsUriSingleCore, userDefinedTypes6s); + + await zombie.util.cryptoWaitReady(); + + // Get the genesis header and the validation code of parachain 2100 + const genesisHeaderElastic = await apiCollatorElastic.rpc.chain.getHeader(); + const validationCodeElastic = await apiCollatorElastic.rpc.state.getStorage("0x3A636F6465"); + + // Get the genesis header and the validation code of parachain 2000 + const genesisHeaderSingleCore = await apiCollatorSingleCore.rpc.chain.getHeader(); + const validationCodeSingleCore = await apiCollatorSingleCore.rpc.state.getStorage("0x3A636F6465"); + + // account to submit tx + const keyring = new zombie.Keyring({ type: "sr25519" }); + const alice = keyring.addFromUri("//Alice"); + + const calls = [ + api.tx.configuration.setCoretimeCores({ new: 7 }), + api.tx.coretime.assignCore(0, 20,[[ { task: 1005 }, 57600 ]], null), + api.tx.registrar.forceRegister( + alice.address, + 0, + 2100, + genesisHeaderElastic.toHex(), + validationCodeElastic.toHex(), + ), + api.tx.registrar.forceRegister( + alice.address, + 0, + 2000, + genesisHeaderSingleCore.toHex(), + validationCodeSingleCore.toHex(), + ) + ]; + const sudo_batch = api.tx.sudo.sudo(api.tx.utility.batch(calls)); + + await new Promise(async (resolve, reject) => { + const unsub = await sudo_batch.signAndSend(alice, (result) => { + console.log(`Current status is ${result.status}`); + if (result.status.isInBlock) { + console.log( + `Transaction included at blockHash ${result.status.asInBlock}` + ); + } else if (result.status.isFinalized) { + console.log( + `Transaction finalized at blockHash ${result.status.asFinalized}` + ); + unsub(); + return resolve(); + } else if (result.isError) { + console.log(`Transaction Error`); + unsub(); + return reject(); + } + }); + }); + + return 0; +} + +module.exports = { run }; diff --git a/cumulus/zombienet/tests/0008-elastic_authoring.toml b/cumulus/zombienet/tests/0008-elastic_authoring.toml new file mode 100644 index 000000000000..9d3769d7cb0a --- /dev/null +++ b/cumulus/zombienet/tests/0008-elastic_authoring.toml @@ -0,0 +1,57 @@ +[settings] +timeout = 1000 + +[relaychain.genesis.runtimeGenesis.patch.configuration.config.async_backing_params] + max_candidate_depth = 6 + allowed_ancestry_len = 3 + +[relaychain] +default_image = "{{ZOMBIENET_INTEGRATION_TEST_IMAGE}}" +chain = "rococo-local" +command = "polkadot" + + [[relaychain.nodes]] + name = "alice" + args = ["" ] + + [[relaychain.node_groups]] + name = "validator" + args = ["-lruntime=debug,parachain=trace" ] + count = 8 + +[[parachains]] +id = 1005 +chain = "coretime-rococo-local" + + [parachains.collator] + name = "coretime-collator" + image = "{{CUMULUS_IMAGE}}" + command = "polkadot-parachain" + args = [ "-lruntime=info" ,"--force-authoring"] + +# Slot based authoring with 3 cores and 2s slot duration +[[parachains]] +id = 2100 +add_to_genesis = false +register_para = false +onboard_as_parachain = false +chain = "elastic-scaling" + + [[parachains.collators]] + name = "collator-elastic" + image = "{{CUMULUS_IMAGE}}" + command = "test-parachain" + args = ["-laura=trace,runtime=info,cumulus-consensus=trace,consensus::common=trace,parachain::collation-generation=trace,parachain::collator-protocol=trace,parachain=debug", "--force-authoring", "--use-slot-authoring"] + +# Slot based authoring with 3 cores and 2s slot duration +[[parachains]] +id = 2000 +add_to_genesis = false +register_para = false +onboard_as_parachain = false + + [[parachains.collators]] + name = "collator-single-core" + image = "{{CUMULUS_IMAGE}}" + command = "test-parachain" + args = ["-laura=trace,runtime=info,cumulus-consensus=trace,consensus::common=trace,parachain::collation-generation=trace,parachain::collator-protocol=trace,parachain=debug", "--force-authoring", "--use-slot-authoring"] diff --git a/cumulus/zombienet/tests/0008-elastic_authoring.zndsl b/cumulus/zombienet/tests/0008-elastic_authoring.zndsl new file mode 100644 index 000000000000..8f83ba8b1a2c --- /dev/null +++ b/cumulus/zombienet/tests/0008-elastic_authoring.zndsl @@ -0,0 +1,18 @@ +Description: Slot based authoring for elastic scaling +Network: ./0008-elastic_authoring.toml +Creds: config + +alice: is up +coretime-collator: is up + +# configure relay chain +alice: js-script ./0008-configure-relay.js with "" return is 0 within 600 secs + +# configure broker chain +coretime-collator: js-script ./0008-configure-broker.js with "" return is 0 within 600 secs + +# Ensure that parachain 2100 got onboarded +alice: parachain 2100 block height is at least 30 within 45000 seconds + +# Ensure that parachain 2000 got onboarded +alice: parachain 2000 block height is at least 10 within 45000 seconds diff --git a/substrate/client/basic-authorship/src/basic_authorship.rs b/substrate/client/basic-authorship/src/basic_authorship.rs index 1519c76c42c0..74805488792a 100644 --- a/substrate/client/basic-authorship/src/basic_authorship.rs +++ b/substrate/client/basic-authorship/src/basic_authorship.rs @@ -205,7 +205,11 @@ where ) -> Proposer { let parent_hash = parent_header.hash(); - info!("🙌 Starting consensus session on top of parent {:?}", parent_hash); + info!( + "🙌 Starting consensus session on top of parent {:?} (#{})", + parent_hash, + parent_header.number() + ); let proposer = Proposer::<_, _, _, PR> { spawn_handle: self.spawn_handle.clone(), diff --git a/substrate/client/consensus/aura/src/standalone.rs b/substrate/client/consensus/aura/src/standalone.rs index 0f9b8668d447..c1536d9ef73f 100644 --- a/substrate/client/consensus/aura/src/standalone.rs +++ b/substrate/client/consensus/aura/src/standalone.rs @@ -24,7 +24,7 @@ use log::trace; use codec::Codec; -use sc_client_api::{backend::AuxStore, UsageProvider}; +use sc_client_api::UsageProvider; use sp_api::{Core, ProvideRuntimeApi}; use sp_application_crypto::{AppCrypto, AppPublic}; use sp_blockchain::Result as CResult; @@ -48,7 +48,7 @@ pub fn slot_duration(client: &C) -> CResult where A: Codec, B: BlockT, - C: AuxStore + ProvideRuntimeApi + UsageProvider, + C: ProvideRuntimeApi + UsageProvider, C::Api: AuraApi, { slot_duration_at(client, client.usage_info().chain.best_hash) @@ -59,7 +59,7 @@ pub fn slot_duration_at(client: &C, block_hash: B::Hash) -> CResult, + C: ProvideRuntimeApi, C::Api: AuraApi, { client.runtime_api().slot_duration(block_hash).map_err(|err| err.into()) diff --git a/substrate/client/consensus/slots/src/lib.rs b/substrate/client/consensus/slots/src/lib.rs index d9d792005312..7cdf90877dff 100644 --- a/substrate/client/consensus/slots/src/lib.rs +++ b/substrate/client/consensus/slots/src/lib.rs @@ -29,8 +29,8 @@ mod aux_schema; mod slots; pub use aux_schema::{check_equivocation, MAX_SLOT_CAPACITY, PRUNING_BOUND}; -pub use slots::SlotInfo; use slots::Slots; +pub use slots::{time_until_next_slot, SlotInfo}; use futures::{future::Either, Future, TryFutureExt}; use futures_timer::Delay; diff --git a/templates/parachain/node/src/service.rs b/templates/parachain/node/src/service.rs index 373df01b0c43..0b1b6259c72d 100644 --- a/templates/parachain/node/src/service.rs +++ b/templates/parachain/node/src/service.rs @@ -28,7 +28,6 @@ use sc_client_api::Backend; use sc_consensus::ImportQueue; use sc_executor::{HeapAllocStrategy, WasmExecutor, DEFAULT_HEAP_ALLOC_STRATEGY}; use sc_network::NetworkBlock; -use sc_network_sync::SyncingService; use sc_service::{Configuration, PartialComponents, TFullBackend, TFullClient, TaskManager}; use sc_telemetry::{Telemetry, TelemetryHandle, TelemetryWorker, TelemetryWorkerHandle}; use sc_transaction_pool_api::OffchainTransactionPoolFactory; @@ -162,7 +161,6 @@ fn start_consensus( task_manager: &TaskManager, relay_chain_interface: Arc, transaction_pool: Arc>, - sync_oracle: Arc>, keystore: KeystorePtr, relay_chain_slot_duration: Duration, para_id: ParaId, @@ -196,7 +194,6 @@ fn start_consensus( block_import, para_client: client, relay_client: relay_chain_interface, - sync_oracle, keystore, collator_key, para_id, @@ -209,10 +206,9 @@ fn start_consensus( collation_request_receiver: None, }; - let fut = - basic_aura::run::( - params, - ); + let fut = basic_aura::run::( + params, + ); task_manager.spawn_essential_handle().spawn("aura", None, fut); Ok(()) @@ -388,7 +384,6 @@ pub async fn start_parachain_node( &task_manager, relay_chain_interface.clone(), transaction_pool, - sync_service.clone(), params.keystore_container.keystore(), relay_chain_slot_duration, para_id,