From 7b836a2d35d6715b20c2a4c83dd16b9b373742c1 Mon Sep 17 00:00:00 2001 From: Ermal Kaleci Date: Wed, 21 Oct 2020 17:52:12 +0200 Subject: [PATCH] Support instant sealing (#170) * support both manual and instant sealing * fix name * remove sealing.unwrap() * make MockTimestamp to start from 0 * update docs * use thread_local to avoid unsafe * remove expect --- template/node/src/cli.rs | 18 +++++-- template/node/src/command.rs | 12 ++--- template/node/src/service.rs | 100 ++++++++++++++++++++++++++--------- ts-tests/tests/test-block.ts | 11 ++-- ts-tests/tests/util.ts | 2 +- 5 files changed, 100 insertions(+), 43 deletions(-) diff --git a/template/node/src/cli.rs b/template/node/src/cli.rs index 1fb494063a..1bca696b7a 100644 --- a/template/node/src/cli.rs +++ b/template/node/src/cli.rs @@ -15,7 +15,19 @@ // See the License for the specific language governing permissions and // limitations under the License. -use structopt::StructOpt; +use structopt::{StructOpt, clap::arg_enum}; + +arg_enum! { + /// Available Sealing methods. + #[allow(missing_docs)] + #[derive(Debug, Copy, Clone, StructOpt)] + pub enum Sealing { + // Seal using rpc method. + Manual, + // Seal when transaction is executed. + Instant, + } +} #[allow(missing_docs)] #[derive(Debug, StructOpt)] @@ -25,8 +37,8 @@ pub struct RunCmd { pub base: sc_cli::RunCmd, /// Force using Kusama native runtime. - #[structopt(long = "manual-seal")] - pub manual_seal: bool, + #[structopt(long = "sealing")] + pub sealing: Option, } #[derive(Debug, StructOpt)] diff --git a/template/node/src/command.rs b/template/node/src/command.rs index 2cfdcc1582..48f8f617e2 100644 --- a/template/node/src/command.rs +++ b/template/node/src/command.rs @@ -75,7 +75,7 @@ pub fn run() -> sc_cli::Result<()> { let runner = cli.create_runner(cmd)?; runner.async_run(|config| { let PartialComponents { client, task_manager, import_queue, ..} - = new_partial(&config, cli.run.manual_seal)?; + = new_partial(&config, cli.run.sealing)?; Ok((cmd.run(client, import_queue), task_manager)) }) }, @@ -83,7 +83,7 @@ pub fn run() -> sc_cli::Result<()> { let runner = cli.create_runner(cmd)?; runner.async_run(|config| { let PartialComponents { client, task_manager, ..} - = new_partial(&config, cli.run.manual_seal)?; + = new_partial(&config, cli.run.sealing)?; Ok((cmd.run(client, config.database), task_manager)) }) }, @@ -91,7 +91,7 @@ pub fn run() -> sc_cli::Result<()> { let runner = cli.create_runner(cmd)?; runner.async_run(|config| { let PartialComponents { client, task_manager, ..} - = new_partial(&config, cli.run.manual_seal)?; + = new_partial(&config, cli.run.sealing)?; Ok((cmd.run(client, config.chain_spec), task_manager)) }) }, @@ -99,7 +99,7 @@ pub fn run() -> sc_cli::Result<()> { let runner = cli.create_runner(cmd)?; runner.async_run(|config| { let PartialComponents { client, task_manager, import_queue, ..} - = new_partial(&config, cli.run.manual_seal)?; + = new_partial(&config, cli.run.sealing)?; Ok((cmd.run(client, import_queue), task_manager)) }) }, @@ -111,7 +111,7 @@ pub fn run() -> sc_cli::Result<()> { let runner = cli.create_runner(cmd)?; runner.async_run(|config| { let PartialComponents { client, task_manager, backend, ..} - = new_partial(&config, cli.run.manual_seal)?; + = new_partial(&config, cli.run.sealing)?; Ok((cmd.run(client, backend), task_manager)) }) }, @@ -119,7 +119,7 @@ pub fn run() -> sc_cli::Result<()> { let runner = cli.create_runner(&cli.run.base)?; runner.run_node_until_exit(|config| match config.role { Role::Light => service::new_light(config), - _ => service::new_full(config, cli.run.manual_seal), + _ => service::new_full(config, cli.run.sealing), }) } } diff --git a/template/node/src/service.rs b/template/node/src/service.rs index 6997440b8e..112706da52 100644 --- a/template/node/src/service.rs +++ b/template/node/src/service.rs @@ -1,19 +1,20 @@ //! Service and ServiceFactory implementation. Specialized wrapper over substrate service. -use std::sync::Arc; -use std::time::Duration; +use std::{sync::Arc, cell::RefCell, time::Duration}; use sc_client_api::{ExecutorProvider, RemoteBackend}; use sc_consensus_manual_seal::{self as manual_seal}; use frontier_consensus::FrontierBlockImport; -use frontier_template_runtime::{self, opaque::Block, RuntimeApi}; +use frontier_template_runtime::{self, opaque::Block, RuntimeApi, SLOT_DURATION}; use sc_service::{error::Error as ServiceError, Configuration, TaskManager}; -use sp_inherents::InherentDataProviders; +use sp_inherents::{InherentDataProviders, ProvideInherentData, InherentIdentifier, InherentData}; use sc_executor::native_executor_instance; pub use sc_executor::NativeExecutor; use sp_consensus_aura::sr25519::{AuthorityPair as AuraPair}; use sc_finality_grandpa::{ FinalityProofProvider as GrandpaFinalityProofProvider, SharedVoterState, }; +use sp_timestamp::InherentError; +use crate::cli::Sealing; // Our native executor instance. native_executor_instance!( @@ -40,10 +41,38 @@ pub enum ConsensusResult { >, sc_finality_grandpa::LinkHalf ), - ManualSeal(FrontierBlockImport, FullClient>) + ManualSeal(FrontierBlockImport, FullClient>, Sealing) } -pub fn new_partial(config: &Configuration, manual_seal: bool) -> Result< +/// Provide a mock duration starting at 0 in millisecond for timestamp inherent. +/// Each call will increment timestamp by slot_duration making Aura think time has passed. +pub struct MockTimestampInherentDataProvider; + +pub const INHERENT_IDENTIFIER: InherentIdentifier = *b"timstap0"; + +thread_local!(static TIMESTAMP: RefCell = RefCell::new(0)); + +impl ProvideInherentData for MockTimestampInherentDataProvider { + fn inherent_identifier(&self) -> &'static InherentIdentifier { + &INHERENT_IDENTIFIER + } + + fn provide_inherent_data( + &self, + inherent_data: &mut InherentData, + ) -> Result<(), sp_inherents::Error> { + TIMESTAMP.with(|x| { + *x.borrow_mut() += SLOT_DURATION; + inherent_data.put_data(INHERENT_IDENTIFIER, &*x.borrow()) + }) + } + + fn error_to_string(&self, error: &[u8]) -> Option { + InherentError::try_from(&INHERENT_IDENTIFIER, error).map(|e| format!("{:?}", e)) + } +} + +pub fn new_partial(config: &Configuration, sealing: Option) -> Result< sc_service::PartialComponents< FullClient, FullBackend, FullSelectChain, sp_consensus::import_queue::BasicQueue>, @@ -65,9 +94,9 @@ pub fn new_partial(config: &Configuration, manual_seal: bool) -> Result< client.clone(), ); - if manual_seal { + if let Some(sealing) = sealing { inherent_data_providers - .register_provider(sp_timestamp::InherentDataProvider) + .register_provider(MockTimestampInherentDataProvider) .map_err(Into::into) .map_err(sp_consensus::error::Error::InherentData)?; @@ -86,7 +115,7 @@ pub fn new_partial(config: &Configuration, manual_seal: bool) -> Result< return Ok(sc_service::PartialComponents { client, backend, task_manager, import_queue, keystore, select_chain, transaction_pool, inherent_data_providers, - other: ConsensusResult::ManualSeal(frontier_block_import) + other: ConsensusResult::ManualSeal(frontier_block_import, sealing) }) } @@ -124,15 +153,15 @@ pub fn new_partial(config: &Configuration, manual_seal: bool) -> Result< } /// Builds a new service for a full client. -pub fn new_full(config: Configuration, manual_seal: bool) -> Result { +pub fn new_full(config: Configuration, sealing: Option) -> Result { let sc_service::PartialComponents { client, backend, mut task_manager, import_queue, keystore, select_chain, transaction_pool, inherent_data_providers, other: consensus_result - } = new_partial(&config, manual_seal)?; + } = new_partial(&config, sealing)?; let (network, network_status_sinks, system_rpc_tx, network_starter) = match consensus_result { - ConsensusResult::ManualSeal(_) => { + ConsensusResult::ManualSeal(_, _) => { sc_service::build_network(sc_service::BuildNetworkParams { config: &config, client: client.clone(), @@ -213,7 +242,7 @@ pub fn new_full(config: Configuration, manual_seal: bool) -> Result { + ConsensusResult::ManualSeal(block_import, sealing) => { if role.is_authority() { let env = sc_basic_authorship::ProposerFactory::new( client.clone(), @@ -222,21 +251,40 @@ pub fn new_full(config: Configuration, manual_seal: bool) -> Result { + let authorship_future = manual_seal::run_manual_seal( + manual_seal::ManualSealParams { + block_import, + env, + client, + pool: transaction_pool.pool().clone(), + commands_stream, + select_chain, + consensus_data_provider: None, + inherent_data_providers, + } + ); + // we spawn the future on a background thread managed by service. + task_manager.spawn_essential_handle().spawn_blocking("manual-seal", authorship_future); + }, + Sealing::Instant => { + let authorship_future = manual_seal::run_instant_seal( + manual_seal::InstantSealParams { + block_import, + env, + client: client.clone(), + pool: transaction_pool.pool().clone(), + select_chain, + consensus_data_provider: None, + inherent_data_providers, + } + ); + // we spawn the future on a background thread managed by service. + task_manager.spawn_essential_handle().spawn_blocking("instant-seal", authorship_future); } - ); + }; - // we spawn the future on a background thread managed by service. - task_manager.spawn_essential_handle().spawn_blocking("manual-seal", authorship_future); } log::info!("Manual Seal Ready"); }, diff --git a/ts-tests/tests/test-block.ts b/ts-tests/tests/test-block.ts index aba85a1d2f..ff2381083a 100644 --- a/ts-tests/tests/test-block.ts +++ b/ts-tests/tests/test-block.ts @@ -32,7 +32,7 @@ describeWithFrontier("Frontier RPC (Block)", `simple-specs.json`, (context) => { receiptsRoot: "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", size: 533, stateRoot: "0x0000000000000000000000000000000000000000000000000000000000000000", - //timestamp: 1595012243836, + timestamp: 0, totalDifficulty: null, //transactions: [], transactionsRoot: "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", @@ -60,10 +60,7 @@ describeWithFrontier("Frontier RPC (Block)", `simple-specs.json`, (context) => { step("should have valid timestamp after block production", async function () { const block = await context.web3.eth.getBlock("latest"); - const last5Minutes = (Date.now() / 1000) - 300; - const next5Minutes = (Date.now() / 1000) + 300; - expect(block.timestamp).to.be.least(last5Minutes); - expect(block.timestamp).to.be.below(next5Minutes); + expect(block.timestamp).to.be.eq(6); }); step("retrieve block information", async function () { @@ -83,9 +80,9 @@ describeWithFrontier("Frontier RPC (Block)", `simple-specs.json`, (context) => { number: 1, //parentHash: "0x04540257811b46d103d9896e7807040e7de5080e285841c5430d1a81588a0ce4", receiptsRoot: "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", - size: 539, + size: 535, stateRoot: "0x0000000000000000000000000000000000000000000000000000000000000000", - //timestamp: 1595012243836, + timestamp: 6, totalDifficulty: null, //transactions: [], transactionsRoot: "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", diff --git a/ts-tests/tests/util.ts b/ts-tests/tests/util.ts index c9394a899e..dd159dfb64 100644 --- a/ts-tests/tests/util.ts +++ b/ts-tests/tests/util.ts @@ -55,7 +55,7 @@ export async function startFrontierNode(specFilename: string): Promise<{ web3: W `--execution=Native`, // Faster execution using native `--no-telemetry`, `--no-prometheus`, - `--manual-seal`, + `--sealing=Manual`, `--no-grandpa`, `--force-authoring`, `-l${FRONTIER_LOG}`,