From 01b858a2f2762a35f98c85a13696862618c850d4 Mon Sep 17 00:00:00 2001 From: Sebastian Kunert Date: Fri, 25 Aug 2023 17:16:48 +0200 Subject: [PATCH 01/61] Transfer substrate clawback to monorepo --- Cargo.lock | 2 + cumulus/test/client/src/block_builder.rs | 2 +- .../node/test/client/src/block_builder.rs | 2 +- polkadot/node/test/client/src/lib.rs | 2 +- .../bin/node/cli/benches/block_production.rs | 10 +- substrate/bin/node/cli/src/service.rs | 3 +- substrate/bin/node/testing/src/bench.rs | 3 +- .../client/api/src/execution_extensions.rs | 9 ++ .../basic-authorship/src/basic_authorship.rs | 41 +++++- substrate/client/block-builder/Cargo.toml | 1 + substrate/client/block-builder/src/lib.rs | 43 +++++- substrate/client/consensus/babe/src/tests.rs | 2 +- substrate/client/consensus/beefy/src/tests.rs | 8 +- .../client/consensus/grandpa/src/tests.rs | 6 +- .../merkle-mountain-range/src/test_utils.rs | 2 +- substrate/client/network/sync/src/lib.rs | 5 +- substrate/client/network/test/src/lib.rs | 2 +- .../rpc-spec-v2/src/chain_head/tests.rs | 28 ++-- substrate/client/service/src/builder.rs | 42 +++++- .../service/src/client/call_executor.rs | 1 + substrate/client/service/src/client/client.rs | 29 +++- substrate/client/service/src/lib.rs | 6 +- .../client/service/test/src/client/mod.rs | 129 +++++++++--------- .../api/proc-macro/src/impl_runtime_apis.rs | 4 + .../proc-macro/src/mock_impl_runtime_apis.rs | 4 + substrate/primitives/api/src/lib.rs | 14 ++ .../api/test/tests/runtime_calls.rs | 2 +- substrate/primitives/externalities/Cargo.toml | 9 +- .../externalities/src/extensions.rs | 6 + .../runtime-interface/proc-macro/Cargo.toml | 1 + .../proc-macro/src/runtime_interface/mod.rs | 6 + substrate/primitives/trie/src/lib.rs | 4 + substrate/primitives/trie/src/recorder.rs | 6 + substrate/test-utils/client/src/lib.rs | 3 +- .../runtime/client/src/trait_tests.rs | 54 ++++---- 35 files changed, 347 insertions(+), 144 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 4b0028a64513..316eafbd0002 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -14728,6 +14728,7 @@ dependencies = [ name = "sc-block-builder" version = "0.10.0-dev" dependencies = [ + "log", "parity-scale-codec", "sc-client-api", "sp-api", @@ -17496,6 +17497,7 @@ name = "sp-runtime-interface-proc-macro" version = "11.0.0" dependencies = [ "Inflector", + "expander 2.0.0", "proc-macro-crate", "proc-macro2", "quote", diff --git a/cumulus/test/client/src/block_builder.rs b/cumulus/test/client/src/block_builder.rs index 06c7416be67f..b6f82f8411f6 100644 --- a/cumulus/test/client/src/block_builder.rs +++ b/cumulus/test/client/src/block_builder.rs @@ -72,7 +72,7 @@ fn init_block_builder( timestamp: u64, ) -> BlockBuilder<'_, Block, Client, Backend> { let mut block_builder = client - .new_block_at(at, Default::default(), true) + .new_block_at(at, Default::default(), true, None) .expect("Creates new block builder for test runtime"); let mut inherent_data = sp_inherents::InherentData::new(); diff --git a/polkadot/node/test/client/src/block_builder.rs b/polkadot/node/test/client/src/block_builder.rs index 0987cef55c1f..6ffdb9b7262f 100644 --- a/polkadot/node/test/client/src/block_builder.rs +++ b/polkadot/node/test/client/src/block_builder.rs @@ -91,7 +91,7 @@ impl InitPolkadotBlockBuilder for Client { }; let mut block_builder = self - .new_block_at(hash, digest, false) + .new_block_at(hash, digest, false, None) .expect("Creates new block builder for test runtime"); let mut inherent_data = sp_inherents::InherentData::new(); diff --git a/polkadot/node/test/client/src/lib.rs b/polkadot/node/test/client/src/lib.rs index 5d97ffcdf1da..bd52d2c1b84c 100644 --- a/polkadot/node/test/client/src/lib.rs +++ b/polkadot/node/test/client/src/lib.rs @@ -75,7 +75,7 @@ impl TestClientBuilderExt for TestClientBuilder { self.backend().clone(), executor.clone(), Default::default(), - ExecutionExtensions::new(Default::default(), Arc::new(executor)), + ExecutionExtensions::new(Default::default(), Arc::new(executor), None), ) .unwrap(); diff --git a/substrate/bin/node/cli/benches/block_production.rs b/substrate/bin/node/cli/benches/block_production.rs index b877aa735022..3a7a09c6d768 100644 --- a/substrate/bin/node/cli/benches/block_production.rs +++ b/substrate/bin/node/cli/benches/block_production.rs @@ -191,8 +191,9 @@ fn block_production(c: &mut Criterion) { b.iter_batched( || extrinsics.clone(), |extrinsics| { - let mut block_builder = - client.new_block_at(best_hash, Default::default(), RecordProof::No).unwrap(); + let mut block_builder = client + .new_block_at(best_hash, Default::default(), RecordProof::No, None) + .unwrap(); for extrinsic in extrinsics { block_builder.push(extrinsic).unwrap(); } @@ -206,8 +207,9 @@ fn block_production(c: &mut Criterion) { b.iter_batched( || extrinsics.clone(), |extrinsics| { - let mut block_builder = - client.new_block_at(best_hash, Default::default(), RecordProof::Yes).unwrap(); + let mut block_builder = client + .new_block_at(best_hash, Default::default(), RecordProof::Yes, None) + .unwrap(); for extrinsic in extrinsics { block_builder.push(extrinsic).unwrap(); } diff --git a/substrate/bin/node/cli/src/service.rs b/substrate/bin/node/cli/src/service.rs index ecca5c60db51..cd20a87a902b 100644 --- a/substrate/bin/node/cli/src/service.rs +++ b/substrate/bin/node/cli/src/service.rs @@ -173,10 +173,11 @@ pub fn new_partial( let executor = sc_service::new_native_or_wasm_executor(&config); let (client, backend, keystore_container, task_manager) = - sc_service::new_full_parts::( + sc_service::new_full_parts_extension::( config, telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()), executor, + None, )?; let client = Arc::new(client); diff --git a/substrate/bin/node/testing/src/bench.rs b/substrate/bin/node/testing/src/bench.rs index f1ab2212239b..279c8089ae8a 100644 --- a/substrate/bin/node/testing/src/bench.rs +++ b/substrate/bin/node/testing/src/bench.rs @@ -405,13 +405,14 @@ impl BenchDb { ) .expect("Failed to create genesis block builder"); + // TODO skunert Check back if this is correct let client = sc_service::new_client( backend.clone(), executor.clone(), genesis_block_builder, None, None, - ExecutionExtensions::new(None, Arc::new(executor)), + ExecutionExtensions::new(None, Arc::new(executor), None), Box::new(task_executor.clone()), None, None, diff --git a/substrate/client/api/src/execution_extensions.rs b/substrate/client/api/src/execution_extensions.rs index 6f927105df0b..dd8aef2ede00 100644 --- a/substrate/client/api/src/execution_extensions.rs +++ b/substrate/client/api/src/execution_extensions.rs @@ -23,6 +23,7 @@ //! extensions to support APIs for particular execution context & capabilities. use parking_lot::RwLock; +use sp_api::ExtensionProducer; use sp_core::traits::{ReadRuntimeVersion, ReadRuntimeVersionExt}; use sp_externalities::{Extension, Extensions}; use sp_runtime::traits::{Block as BlockT, NumberFor}; @@ -95,6 +96,7 @@ impl ExtensionsFactory pub struct ExecutionExtensions { extensions_factory: RwLock>>, read_runtime_version: Arc, + import_extension: Option, } impl ExecutionExtensions { @@ -102,15 +104,22 @@ impl ExecutionExtensions { pub fn new( extensions_factory: Option>>, read_runtime_version: Arc, + import_extension: Option, ) -> Self { Self { extensions_factory: extensions_factory .map(RwLock::new) .unwrap_or_else(|| RwLock::new(Box::new(()))), read_runtime_version, + import_extension, } } + /// Get extension that should be registered during block import + pub fn get_import_extension(&self) -> Option { + self.import_extension.clone() + } + /// Set the new extensions_factory pub fn set_extensions_factory(&self, maker: impl ExtensionsFactory + 'static) { *self.extensions_factory.write() = Box::new(maker); diff --git a/substrate/client/basic-authorship/src/basic_authorship.rs b/substrate/client/basic-authorship/src/basic_authorship.rs index b3a8f0d8970b..102df1c91847 100644 --- a/substrate/client/basic-authorship/src/basic_authorship.rs +++ b/substrate/client/basic-authorship/src/basic_authorship.rs @@ -32,7 +32,7 @@ use sc_block_builder::{BlockBuilderApi, BlockBuilderProvider}; use sc_client_api::backend; use sc_telemetry::{telemetry, TelemetryHandle, CONSENSUS_INFO}; use sc_transaction_pool_api::{InPoolTransaction, TransactionPool}; -use sp_api::{ApiExt, ProvideRuntimeApi}; +use sp_api::{ApiExt, ExtensionProducer, ProvideRuntimeApi}; use sp_blockchain::{ApplyExtrinsicFailed::Validity, Error::ApplyExtrinsicFailed, HeaderBackend}; use sp_consensus::{DisableProofRecording, EnableProofRecording, ProofRecording, Proposal}; use sp_core::traits::SpawnNamed; @@ -84,7 +84,9 @@ pub struct ProposerFactory { telemetry: Option, /// When estimating the block size, should the proof be included? include_proof_in_block_size_estimation: bool, - /// phantom member to pin the `Backend`/`ProofRecording` type. + /// Externalities extension to be used for block authoring + extension: Option, + /// Phantom member to pin the `Backend`/`ProofRecording` type. _phantom: PhantomData<(B, PR)>, } @@ -109,6 +111,7 @@ impl ProposerFactory { telemetry, client, include_proof_in_block_size_estimation: false, + extension: None, _phantom: PhantomData, } } @@ -127,6 +130,24 @@ impl ProposerFactory { transaction_pool: Arc, prometheus: Option<&PrometheusRegistry>, telemetry: Option, + ) -> Self { + Self::with_proof_recording_extension( + spawn_handle, + client, + transaction_pool, + prometheus, + telemetry, + None, + ) + } + + pub fn with_proof_recording_extension( + spawn_handle: impl SpawnNamed + 'static, + client: Arc, + transaction_pool: Arc, + prometheus: Option<&PrometheusRegistry>, + telemetry: Option, + extension: Option, ) -> Self { ProposerFactory { client, @@ -137,6 +158,7 @@ impl ProposerFactory { soft_deadline_percent: DEFAULT_SOFT_DEADLINE_PERCENT, telemetry, include_proof_in_block_size_estimation: true, + extension, _phantom: PhantomData, } } @@ -211,6 +233,7 @@ where telemetry: self.telemetry.clone(), _phantom: PhantomData, include_proof_in_block_size_estimation: self.include_proof_in_block_size_estimation, + extension: self.extension.clone(), }; proposer @@ -252,6 +275,7 @@ pub struct Proposer { default_block_size_limit: usize, include_proof_in_block_size_estimation: bool, soft_deadline_percent: Percent, + extension: Option, telemetry: Option, _phantom: PhantomData<(B, PR)>, } @@ -328,15 +352,20 @@ where PR: ProofRecording, { async fn propose_with( - self, + mut self, inherent_data: InherentData, inherent_digests: Digest, deadline: time::Instant, block_size_limit: Option, ) -> Result, sp_blockchain::Error> { let propose_with_timer = time::Instant::now(); - let mut block_builder = - self.client.new_block_at(self.parent_hash, inherent_digests, PR::ENABLED)?; + + let mut block_builder = self.client.new_block_at( + self.parent_hash, + inherent_digests, + PR::ENABLED, + self.extension.take(), + )?; self.apply_inherents(&mut block_builder, inherent_data)?; @@ -969,7 +998,7 @@ mod tests { // 99 (header_size) + 718 (proof@initialize_block) + 246 (one Transfer extrinsic) let block_limit = { let builder = - client.new_block_at(genesis_header.hash(), Default::default(), true).unwrap(); + client.new_block_at(genesis_header.hash(), Default::default(), true, None).unwrap(); builder.estimate_block_size(true) + extrinsics[0].encoded_size() }; let block = block_on(proposer.propose( diff --git a/substrate/client/block-builder/Cargo.toml b/substrate/client/block-builder/Cargo.toml index 3a6a0ea184d2..8b9cc5938507 100644 --- a/substrate/client/block-builder/Cargo.toml +++ b/substrate/client/block-builder/Cargo.toml @@ -16,6 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.6.1", features = [ "derive", ] } +log = "0.4.20" sc-client-api = { path = "../api" } sp-api = { path = "../../primitives/api" } sp-block-builder = { path = "../../primitives/block-builder" } diff --git a/substrate/client/block-builder/src/lib.rs b/substrate/client/block-builder/src/lib.rs index 1878e7627480..20a9f14c56de 100644 --- a/substrate/client/block-builder/src/lib.rs +++ b/substrate/client/block-builder/src/lib.rs @@ -29,7 +29,8 @@ use codec::Encode; use sp_api::{ - ApiExt, ApiRef, Core, ProvideRuntimeApi, StorageChanges, StorageProof, TransactionOutcome, + ApiExt, ApiRef, Core, ExtensionProducer, ProvideRuntimeApi, StorageChanges, StorageProof, + TransactionOutcome, }; use sp_blockchain::{ApplyExtrinsicFailed, Error}; use sp_core::traits::CallContext; @@ -119,6 +120,7 @@ where parent: Block::Hash, inherent_digests: Digest, record_proof: R, + extension: Option, ) -> sp_blockchain::Result>; /// Create a new block, built on the head of the chain. @@ -151,13 +153,16 @@ where /// While proof recording is enabled, all accessed trie nodes are saved. /// These recorded trie nodes can be used by a third party to prove the /// output of this block builder without having access to the full storage. - pub fn new( + /// The given externality extension `extension` will be registered for the + /// runtime instance used to build the block. + pub fn new_with_extension( api: &'a A, parent_hash: Block::Hash, parent_number: NumberFor, record_proof: RecordProof, inherent_digests: Digest, backend: &'a B, + extension_producer: Option, ) -> Result { let header = <::Header as HeaderT>::new( parent_number + One::one(), @@ -175,6 +180,16 @@ where api.record_proof(); } + if let Some(proof_recorder) = api.proof_recorder() { + if let Some(extension_producer) = extension_producer { + log::info!(target:"skunert", "Registering extension in Block-builder"); + let extension = extension_producer(Box::new(proof_recorder)); + api.register_extension_with_type_id(extension.0, extension.1); + } else { + log::info!(target:"skunert", "not registering extension in Block-builder"); + } + } + api.set_call_context(CallContext::Onchain); api.initialize_block(parent_hash, &header)?; @@ -193,6 +208,30 @@ where }) } + /// Create a new instance of builder based on the given `parent_hash` and `parent_number`. + /// + /// While proof recording is enabled, all accessed trie nodes are saved. + /// These recorded trie nodes can be used by a third party to prove the + /// output of this block builder without having access to the full storage. + pub fn new( + api: &'a A, + parent_hash: Block::Hash, + parent_number: NumberFor, + record_proof: RecordProof, + inherent_digests: Digest, + backend: &'a B, + ) -> Result { + Self::new_with_extension( + api, + parent_hash, + parent_number, + record_proof, + inherent_digests, + backend, + None, + ) + } + /// Push onto the block's list of extrinsics. /// /// This will ensure the extrinsic can be validly executed (by executing it). diff --git a/substrate/client/consensus/babe/src/tests.rs b/substrate/client/consensus/babe/src/tests.rs index b3843f8acfa0..420a0177e2d0 100644 --- a/substrate/client/consensus/babe/src/tests.rs +++ b/substrate/client/consensus/babe/src/tests.rs @@ -99,7 +99,7 @@ impl DummyProposer { pre_digests: Digest, ) -> future::Ready, Error>> { let block_builder = - self.factory.client.new_block_at(self.parent_hash, pre_digests, false).unwrap(); + self.factory.client.new_block_at(self.parent_hash, pre_digests, false, None).unwrap(); let mut block = match block_builder.build().map_err(|e| e.into()) { Ok(b) => b.block, diff --git a/substrate/client/consensus/beefy/src/tests.rs b/substrate/client/consensus/beefy/src/tests.rs index 3bb65e9d57f4..a74a9023ffcb 100644 --- a/substrate/client/consensus/beefy/src/tests.rs +++ b/substrate/client/consensus/beefy/src/tests.rs @@ -775,7 +775,7 @@ async fn beefy_importing_justifications() { }; let builder = full_client - .new_block_at(full_client.chain_info().genesis_hash, Default::default(), false) + .new_block_at(full_client.chain_info().genesis_hash, Default::default(), false, None) .unwrap(); let block = builder.build().unwrap().block; let hashof1 = block.header.hash(); @@ -792,7 +792,7 @@ async fn beefy_importing_justifications() { // Import block 2 with "valid" justification (beefy pallet genesis block not yet reached). let block_num = 2; - let builder = full_client.new_block_at(hashof1, Default::default(), false).unwrap(); + let builder = full_client.new_block_at(hashof1, Default::default(), false, None).unwrap(); let block = builder.build().unwrap().block; let hashof2 = block.header.hash(); @@ -824,7 +824,7 @@ async fn beefy_importing_justifications() { // Import block 3 with valid justification. let block_num = 3; - let builder = full_client.new_block_at(hashof2, Default::default(), false).unwrap(); + let builder = full_client.new_block_at(hashof2, Default::default(), false, None).unwrap(); let block = builder.build().unwrap().block; let hashof3 = block.header.hash(); let proof = crate::justification::tests::new_finality_proof(block_num, &good_set, keys); @@ -858,7 +858,7 @@ async fn beefy_importing_justifications() { // Import block 4 with invalid justification (incorrect validator set). let block_num = 4; - let builder = full_client.new_block_at(hashof3, Default::default(), false).unwrap(); + let builder = full_client.new_block_at(hashof3, Default::default(), false, None).unwrap(); let block = builder.build().unwrap().block; let hashof4 = block.header.hash(); let keys = &[BeefyKeyring::Alice]; diff --git a/substrate/client/consensus/grandpa/src/tests.rs b/substrate/client/consensus/grandpa/src/tests.rs index 0175f7d1b473..3f1a9701af70 100644 --- a/substrate/client/consensus/grandpa/src/tests.rs +++ b/substrate/client/consensus/grandpa/src/tests.rs @@ -898,7 +898,7 @@ async fn allows_reimporting_change_blocks() { let full_client = client.as_client(); let mut builder = full_client - .new_block_at(full_client.chain_info().genesis_hash, Default::default(), false) + .new_block_at(full_client.chain_info().genesis_hash, Default::default(), false, None) .unwrap(); add_scheduled_change( @@ -943,7 +943,7 @@ async fn test_bad_justification() { let full_client = client.as_client(); let mut builder = full_client - .new_block_at(full_client.chain_info().genesis_hash, Default::default(), false) + .new_block_at(full_client.chain_info().genesis_hash, Default::default(), false, None) .unwrap(); add_scheduled_change( @@ -1913,7 +1913,7 @@ async fn imports_justification_for_regular_blocks_on_import() { // create a new block (without importing it) let generate_block = |parent| { - let builder = full_client.new_block_at(parent, Default::default(), false).unwrap(); + let builder = full_client.new_block_at(parent, Default::default(), false, None).unwrap(); builder.build().unwrap().block }; diff --git a/substrate/client/merkle-mountain-range/src/test_utils.rs b/substrate/client/merkle-mountain-range/src/test_utils.rs index 010b48bb3d7d..1eb7ba78442f 100644 --- a/substrate/client/merkle-mountain-range/src/test_utils.rs +++ b/substrate/client/merkle-mountain-range/src/test_utils.rs @@ -125,7 +125,7 @@ impl MockClient { let mut client = self.client.lock(); let hash = client.expect_block_hash_from_id(&at).unwrap(); - let mut block_builder = client.new_block_at(hash, Default::default(), false).unwrap(); + let mut block_builder = client.new_block_at(hash, Default::default(), false, None).unwrap(); // Make sure the block has a different hash than its siblings block_builder .push_storage_change(b"name".to_vec(), Some(name.to_vec())) diff --git a/substrate/client/network/sync/src/lib.rs b/substrate/client/network/sync/src/lib.rs index 175c1c43f46f..beb10ef820c5 100644 --- a/substrate/client/network/sync/src/lib.rs +++ b/substrate/client/network/sync/src/lib.rs @@ -3408,7 +3408,7 @@ mod test { fn build_block(client: &mut Arc, at: Option, fork: bool) -> Block { let at = at.unwrap_or_else(|| client.info().best_hash); - let mut block_builder = client.new_block_at(at, Default::default(), false).unwrap(); + let mut block_builder = client.new_block_at(at, Default::default(), false, None).unwrap(); if fork { block_builder.push_storage_change(vec![1, 2, 3], Some(vec![4, 5, 6])).unwrap(); @@ -3462,7 +3462,8 @@ mod test { let mut client2 = client.clone(); let mut build_block_at = |at, import| { - let mut block_builder = client2.new_block_at(at, Default::default(), false).unwrap(); + let mut block_builder = + client2.new_block_at(at, Default::default(), false, None).unwrap(); // Make sure we generate a different block as fork block_builder.push_storage_change(vec![1, 2, 3], Some(vec![4, 5, 6])).unwrap(); diff --git a/substrate/client/network/test/src/lib.rs b/substrate/client/network/test/src/lib.rs index 2a20da5a556b..d5e0fceb26fb 100644 --- a/substrate/client/network/test/src/lib.rs +++ b/substrate/client/network/test/src/lib.rs @@ -372,7 +372,7 @@ where let full_client = self.client.as_client(); let mut at = full_client.block_hash_from_id(&at).unwrap().unwrap(); for _ in 0..count { - let builder = full_client.new_block_at(at, Default::default(), false).unwrap(); + let builder = full_client.new_block_at(at, Default::default(), false, None).unwrap(); let block = edit_block(builder); let hash = block.header.hash(); trace!( diff --git a/substrate/client/rpc-spec-v2/src/chain_head/tests.rs b/substrate/client/rpc-spec-v2/src/chain_head/tests.rs index 00ed9089058e..8883cfd3b9e7 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/tests.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/tests.rs @@ -1260,8 +1260,9 @@ async fn follow_generates_initial_blocks() { let block_2_hash = block_2.header.hash(); client.import(BlockOrigin::Own, block_2.clone()).await.unwrap(); - let mut block_builder = - client.new_block_at(block_1.header.hash(), Default::default(), false).unwrap(); + let mut block_builder = client + .new_block_at(block_1.header.hash(), Default::default(), false, None) + .unwrap(); // This push is required as otherwise block 3 has the same hash as block 2 and won't get // imported block_builder @@ -1556,7 +1557,8 @@ async fn follow_prune_best_block() { client.import(BlockOrigin::Own, block_4.clone()).await.unwrap(); // Import block 2 as best on the fork. - let mut block_builder = client.new_block_at(block_1_hash, Default::default(), false).unwrap(); + let mut block_builder = + client.new_block_at(block_1_hash, Default::default(), false, None).unwrap(); // This push is required as otherwise block 3 has the same hash as block 2 and won't get // imported block_builder @@ -1698,8 +1700,9 @@ async fn follow_forks_pruned_block() { client.import(BlockOrigin::Own, block_3.clone()).await.unwrap(); // Block 4 with parent Block 1 is not the best imported. - let mut block_builder = - client.new_block_at(block_1.header.hash(), Default::default(), false).unwrap(); + let mut block_builder = client + .new_block_at(block_1.header.hash(), Default::default(), false, None) + .unwrap(); // This push is required as otherwise block 4 has the same hash as block 2 and won't get // imported block_builder @@ -1713,8 +1716,9 @@ async fn follow_forks_pruned_block() { let block_4 = block_builder.build().unwrap().block; client.import(BlockOrigin::Own, block_4.clone()).await.unwrap(); - let mut block_builder = - client.new_block_at(block_4.header.hash(), Default::default(), false).unwrap(); + let mut block_builder = client + .new_block_at(block_4.header.hash(), Default::default(), false, None) + .unwrap(); block_builder .push_transfer(Transfer { from: AccountKeyring::Bob.into(), @@ -1819,8 +1823,9 @@ async fn follow_report_multiple_pruned_block() { client.import(BlockOrigin::Own, block_3.clone()).await.unwrap(); // Block 4 with parent Block 1 is not the best imported. - let mut block_builder = - client.new_block_at(block_1.header.hash(), Default::default(), false).unwrap(); + let mut block_builder = client + .new_block_at(block_1.header.hash(), Default::default(), false, None) + .unwrap(); // This push is required as otherwise block 4 has the same hash as block 2 and won't get // imported block_builder @@ -1835,8 +1840,9 @@ async fn follow_report_multiple_pruned_block() { let block_4_hash = block_4.header.hash(); client.import(BlockOrigin::Own, block_4.clone()).await.unwrap(); - let mut block_builder = - client.new_block_at(block_4.header.hash(), Default::default(), false).unwrap(); + let mut block_builder = client + .new_block_at(block_4.header.hash(), Default::default(), false, None) + .unwrap(); block_builder .push_transfer(Transfer { from: AccountKeyring::Bob.into(), diff --git a/substrate/client/service/src/builder.rs b/substrate/client/service/src/builder.rs index fe18d1d002d5..65af33c6b836 100644 --- a/substrate/client/service/src/builder.rs +++ b/substrate/client/service/src/builder.rs @@ -66,7 +66,7 @@ use sc_rpc_spec_v2::{chain_head::ChainHeadApiServer, transaction::TransactionApi use sc_telemetry::{telemetry, ConnectionMessage, Telemetry, TelemetryHandle, SUBSTRATE_INFO}; use sc_transaction_pool_api::{MaintainedTransactionPool, TransactionPool}; use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedSender}; -use sp_api::{CallApiAt, ProvideRuntimeApi}; +use sp_api::{CallApiAt, ExtensionProducer, ProvideRuntimeApi}; use sp_blockchain::{HeaderBackend, HeaderMetadata}; use sp_consensus::block_validation::{ BlockAnnounceValidator, Chain, DefaultBlockAnnounceValidator, @@ -128,6 +128,35 @@ where new_full_parts(config, telemetry, executor).map(|parts| parts.0) } +/// Create the initial parts of a full node with the default genesis block builder. +pub fn new_full_parts_extension( + config: &Configuration, + telemetry: Option, + executor: TExec, + import_extension_factory: Option, +) -> Result, Error> +where + TBl: BlockT, + TExec: CodeExecutor + RuntimeVersionOf + Clone, +{ + let backend = new_db_backend(config.db_config())?; + + let genesis_block_builder = GenesisBlockBuilder::new( + config.chain_spec.as_storage_builder(), + !config.no_genesis(), + backend.clone(), + executor.clone(), + )?; + + new_full_parts_with_genesis_builder( + config, + telemetry, + executor, + backend, + genesis_block_builder, + import_extension_factory, + ) +} /// Create the initial parts of a full node with the default genesis block builder. pub fn new_full_parts( config: &Configuration, @@ -147,7 +176,14 @@ where executor.clone(), )?; - new_full_parts_with_genesis_builder(config, telemetry, executor, backend, genesis_block_builder) + new_full_parts_with_genesis_builder( + config, + telemetry, + executor, + backend, + genesis_block_builder, + None, + ) } /// Create the initial parts of a full node. @@ -157,6 +193,7 @@ pub fn new_full_parts_with_genesis_builder>, genesis_block_builder: TBuildGenesisBlock, + import_extension_factory: Option, ) -> Result, Error> where TBl: BlockT, @@ -186,6 +223,7 @@ where let extensions = sc_client_api::execution_extensions::ExecutionExtensions::new( None, Arc::new(executor.clone()), + import_extension_factory.clone(), ); let wasm_runtime_substitutes = config diff --git a/substrate/client/service/src/client/call_executor.rs b/substrate/client/service/src/client/call_executor.rs index 86b5c7c61fcd..2a33f71e1860 100644 --- a/substrate/client/service/src/client/call_executor.rs +++ b/substrate/client/service/src/client/call_executor.rs @@ -406,6 +406,7 @@ mod tests { execution_extensions: Arc::new(ExecutionExtensions::new( None, Arc::new(executor.clone()), + None, )), }; diff --git a/substrate/client/service/src/client/client.rs b/substrate/client/service/src/client/client.rs index a0983d823e5b..10b16f6e438e 100644 --- a/substrate/client/service/src/client/client.rs +++ b/substrate/client/service/src/client/client.rs @@ -48,7 +48,7 @@ use sc_executor::RuntimeVersion; use sc_telemetry::{telemetry, TelemetryHandle, SUBSTRATE_INFO}; use sp_api::{ ApiExt, ApiRef, CallApiAt, CallApiAtParams, ConstructRuntimeApi, Core as CoreApi, - ProvideRuntimeApi, + ExtensionProducer, ProvideRuntimeApi, }; use sp_blockchain::{ self as blockchain, Backend as ChainBackend, CachedHeaderMetadata, Error, @@ -234,7 +234,8 @@ where Block: BlockT, B: backend::LocalBackend + 'static, { - let extensions = ExecutionExtensions::new(None, Arc::new(executor.clone())); + // TODO skunert Check if we need to pass something here + let extensions = ExecutionExtensions::new(None, Arc::new(executor.clone()), None); let call_executor = LocalCallExecutor::new(backend.clone(), executor, config.clone(), extensions)?; @@ -854,7 +855,10 @@ where let storage_changes = match (enact_state, storage_changes, &import_block.body) { // We have storage changes and should enact the state, so we don't need to do anything // here - (true, changes @ Some(_), _) => changes, + (true, changes @ Some(_), _) => { + log::info!(target: "skunert", "Changes were provided, block is not re-executed."); + changes + }, // We should enact state, but don't have any storage changes, so we need to execute the // block. (true, None, Some(ref body)) => { @@ -862,6 +866,21 @@ where runtime_api.set_call_context(CallContext::Onchain); + if let Some(extension) = + self.executor.execution_extensions().get_import_extension().clone() + { + runtime_api.record_proof(); + if let Some(proof_recorder) = runtime_api.proof_recorder() { + log::info!(target:"skunert", "Block import with extension and proof recording."); + let extension = extension(Box::new(proof_recorder)); + runtime_api.register_extension_with_type_id(extension.0, extension.1); + } else { + log::info!(target:"skunert", "Block import without proof recorder"); + } + } else { + log::info!(target:"skunert", "Block import without extension"); + } + runtime_api.execute_block( *parent_hash, Block::new(import_block.header.clone(), body.clone()), @@ -1418,14 +1437,16 @@ where parent: Block::Hash, inherent_digests: Digest, record_proof: R, + extension: Option, ) -> sp_blockchain::Result> { - sc_block_builder::BlockBuilder::new( + sc_block_builder::BlockBuilder::new_with_extension( self, parent, self.expect_block_number_from_id(&BlockId::Hash(parent))?, record_proof.into(), inherent_digests, &self.backend, + extension, ) } diff --git a/substrate/client/service/src/lib.rs b/substrate/client/service/src/lib.rs index 0961967f9ca2..c4281cfb768f 100644 --- a/substrate/client/service/src/lib.rs +++ b/substrate/client/service/src/lib.rs @@ -56,9 +56,9 @@ use sp_runtime::{ pub use self::{ builder::{ build_network, new_client, new_db_backend, new_full_client, new_full_parts, - new_full_parts_with_genesis_builder, new_native_or_wasm_executor, new_wasm_executor, - spawn_tasks, BuildNetworkParams, KeystoreContainer, NetworkStarter, SpawnTasksParams, - TFullBackend, TFullCallExecutor, TFullClient, + new_full_parts_extension, new_full_parts_with_genesis_builder, new_native_or_wasm_executor, + new_wasm_executor, spawn_tasks, BuildNetworkParams, KeystoreContainer, NetworkStarter, + SpawnTasksParams, TFullBackend, TFullCallExecutor, TFullClient, }, client::{ClientConfig, LocalCallExecutor}, error::Error, diff --git a/substrate/client/service/test/src/client/mod.rs b/substrate/client/service/test/src/client/mod.rs index c40ac33da4bb..dcb66b9a05c2 100644 --- a/substrate/client/service/test/src/client/mod.rs +++ b/substrate/client/service/test/src/client/mod.rs @@ -413,7 +413,7 @@ fn uncles_with_multiple_forks() { // A1 -> A2 let a2 = client - .new_block_at(a1.hash(), Default::default(), false) + .new_block_at(a1.hash(), Default::default(), false, None) .unwrap() .build() .unwrap() @@ -422,7 +422,7 @@ fn uncles_with_multiple_forks() { // A2 -> A3 let a3 = client - .new_block_at(a2.hash(), Default::default(), false) + .new_block_at(a2.hash(), Default::default(), false, None) .unwrap() .build() .unwrap() @@ -431,7 +431,7 @@ fn uncles_with_multiple_forks() { // A3 -> A4 let a4 = client - .new_block_at(a3.hash(), Default::default(), false) + .new_block_at(a3.hash(), Default::default(), false, None) .unwrap() .build() .unwrap() @@ -440,7 +440,7 @@ fn uncles_with_multiple_forks() { // A4 -> A5 let a5 = client - .new_block_at(a4.hash(), Default::default(), false) + .new_block_at(a4.hash(), Default::default(), false, None) .unwrap() .build() .unwrap() @@ -448,7 +448,7 @@ fn uncles_with_multiple_forks() { block_on(client.import(BlockOrigin::Own, a5.clone())).unwrap(); // A1 -> B2 - let mut builder = client.new_block_at(a1.hash(), Default::default(), false).unwrap(); + let mut builder = client.new_block_at(a1.hash(), Default::default(), false, None).unwrap(); // this push is required as otherwise B2 has the same hash as A2 and won't get imported builder .push_transfer(Transfer { @@ -463,7 +463,7 @@ fn uncles_with_multiple_forks() { // B2 -> B3 let b3 = client - .new_block_at(b2.hash(), Default::default(), false) + .new_block_at(b2.hash(), Default::default(), false, None) .unwrap() .build() .unwrap() @@ -472,7 +472,7 @@ fn uncles_with_multiple_forks() { // B3 -> B4 let b4 = client - .new_block_at(b3.hash(), Default::default(), false) + .new_block_at(b3.hash(), Default::default(), false, None) .unwrap() .build() .unwrap() @@ -480,7 +480,7 @@ fn uncles_with_multiple_forks() { block_on(client.import(BlockOrigin::Own, b4.clone())).unwrap(); // // B2 -> C3 - let mut builder = client.new_block_at(b2.hash(), Default::default(), false).unwrap(); + let mut builder = client.new_block_at(b2.hash(), Default::default(), false, None).unwrap(); // this push is required as otherwise C3 has the same hash as B3 and won't get imported builder .push_transfer(Transfer { @@ -494,7 +494,7 @@ fn uncles_with_multiple_forks() { block_on(client.import(BlockOrigin::Own, c3.clone())).unwrap(); // A1 -> D2 - let mut builder = client.new_block_at(a1.hash(), Default::default(), false).unwrap(); + let mut builder = client.new_block_at(a1.hash(), Default::default(), false, None).unwrap(); // this push is required as otherwise D2 has the same hash as B2 and won't get imported builder .push_transfer(Transfer { @@ -568,7 +568,7 @@ fn finality_target_on_longest_chain_with_multiple_forks() { // A1 -> A2 let a2 = client - .new_block_at(a1.hash(), Default::default(), false) + .new_block_at(a1.hash(), Default::default(), false, None) .unwrap() .build() .unwrap() @@ -577,7 +577,7 @@ fn finality_target_on_longest_chain_with_multiple_forks() { // A2 -> A3 let a3 = client - .new_block_at(a2.hash(), Default::default(), false) + .new_block_at(a2.hash(), Default::default(), false, None) .unwrap() .build() .unwrap() @@ -586,7 +586,7 @@ fn finality_target_on_longest_chain_with_multiple_forks() { // A3 -> A4 let a4 = client - .new_block_at(a3.hash(), Default::default(), false) + .new_block_at(a3.hash(), Default::default(), false, None) .unwrap() .build() .unwrap() @@ -595,7 +595,7 @@ fn finality_target_on_longest_chain_with_multiple_forks() { // A4 -> A5 let a5 = client - .new_block_at(a4.hash(), Default::default(), false) + .new_block_at(a4.hash(), Default::default(), false, None) .unwrap() .build() .unwrap() @@ -603,7 +603,7 @@ fn finality_target_on_longest_chain_with_multiple_forks() { block_on(client.import(BlockOrigin::Own, a5.clone())).unwrap(); // A1 -> B2 - let mut builder = client.new_block_at(a1.hash(), Default::default(), false).unwrap(); + let mut builder = client.new_block_at(a1.hash(), Default::default(), false, None).unwrap(); // this push is required as otherwise B2 has the same hash as A2 and won't get imported builder .push_transfer(Transfer { @@ -618,7 +618,7 @@ fn finality_target_on_longest_chain_with_multiple_forks() { // B2 -> B3 let b3 = client - .new_block_at(b2.hash(), Default::default(), false) + .new_block_at(b2.hash(), Default::default(), false, None) .unwrap() .build() .unwrap() @@ -627,7 +627,7 @@ fn finality_target_on_longest_chain_with_multiple_forks() { // B3 -> B4 let b4 = client - .new_block_at(b3.hash(), Default::default(), false) + .new_block_at(b3.hash(), Default::default(), false, None) .unwrap() .build() .unwrap() @@ -635,7 +635,7 @@ fn finality_target_on_longest_chain_with_multiple_forks() { block_on(client.import(BlockOrigin::Own, b4.clone())).unwrap(); // B2 -> C3 - let mut builder = client.new_block_at(b2.hash(), Default::default(), false).unwrap(); + let mut builder = client.new_block_at(b2.hash(), Default::default(), false, None).unwrap(); // this push is required as otherwise C3 has the same hash as B3 and won't get imported builder .push_transfer(Transfer { @@ -649,7 +649,7 @@ fn finality_target_on_longest_chain_with_multiple_forks() { block_on(client.import(BlockOrigin::Own, c3.clone())).unwrap(); // A1 -> D2 - let mut builder = client.new_block_at(a1.hash(), Default::default(), false).unwrap(); + let mut builder = client.new_block_at(a1.hash(), Default::default(), false, None).unwrap(); // this push is required as otherwise D2 has the same hash as B2 and won't get imported builder .push_transfer(Transfer { @@ -825,7 +825,7 @@ fn finality_target_with_best_not_on_longest_chain() { block_on(client.import(BlockOrigin::Own, a5.clone())).unwrap(); // A1 -> B2 - let mut builder = client.new_block_at(a1.hash(), Default::default(), false).unwrap(); + let mut builder = client.new_block_at(a1.hash(), Default::default(), false, None).unwrap(); // this push is required as otherwise B2 has the same hash as A2 and won't get imported builder .push_transfer(Transfer { @@ -847,7 +847,7 @@ fn finality_target_with_best_not_on_longest_chain() { // B2 -> B3 let b3 = client - .new_block_at(b2.hash(), Default::default(), false) + .new_block_at(b2.hash(), Default::default(), false, None) .unwrap() .build() .unwrap() @@ -856,7 +856,7 @@ fn finality_target_with_best_not_on_longest_chain() { // B3 -> B4 let b4 = client - .new_block_at(b3.hash(), Default::default(), false) + .new_block_at(b3.hash(), Default::default(), false, None) .unwrap() .build() .unwrap() @@ -892,7 +892,7 @@ fn import_with_justification() { // A1 -> A2 let a2 = client - .new_block_at(a1.hash(), Default::default(), false) + .new_block_at(a1.hash(), Default::default(), false, None) .unwrap() .build() .unwrap() @@ -903,7 +903,7 @@ fn import_with_justification() { // A2 -> A3 let justification = Justifications::from((TEST_ENGINE_ID, vec![1, 2, 3])); let a3 = client - .new_block_at(a2.hash(), Default::default(), false) + .new_block_at(a2.hash(), Default::default(), false, None) .unwrap() .build() .unwrap() @@ -934,7 +934,7 @@ fn importing_diverged_finalized_block_should_trigger_reorg() { let mut finality_notifications = client.finality_notification_stream(); let a1 = client - .new_block_at(client.chain_info().genesis_hash, Default::default(), false) + .new_block_at(client.chain_info().genesis_hash, Default::default(), false, None) .unwrap() .build() .unwrap() @@ -942,7 +942,7 @@ fn importing_diverged_finalized_block_should_trigger_reorg() { block_on(client.import(BlockOrigin::Own, a1.clone())).unwrap(); let a2 = client - .new_block_at(a1.hash(), Default::default(), false) + .new_block_at(a1.hash(), Default::default(), false, None) .unwrap() .build() .unwrap() @@ -950,7 +950,7 @@ fn importing_diverged_finalized_block_should_trigger_reorg() { block_on(client.import(BlockOrigin::Own, a2.clone())).unwrap(); let mut b1 = client - .new_block_at(client.chain_info().genesis_hash, Default::default(), false) + .new_block_at(client.chain_info().genesis_hash, Default::default(), false, None) .unwrap(); // needed to make sure B1 gets a different hash from A1 b1.push_transfer(Transfer { @@ -989,7 +989,7 @@ fn finalizing_diverged_block_should_trigger_reorg() { let mut finality_notifications = client.finality_notification_stream(); let a1 = client - .new_block_at(client.chain_info().genesis_hash, Default::default(), false) + .new_block_at(client.chain_info().genesis_hash, Default::default(), false, None) .unwrap() .build() .unwrap() @@ -997,7 +997,7 @@ fn finalizing_diverged_block_should_trigger_reorg() { block_on(client.import(BlockOrigin::Own, a1.clone())).unwrap(); let a2 = client - .new_block_at(a1.hash(), Default::default(), false) + .new_block_at(a1.hash(), Default::default(), false, None) .unwrap() .build() .unwrap() @@ -1005,7 +1005,7 @@ fn finalizing_diverged_block_should_trigger_reorg() { block_on(client.import(BlockOrigin::Own, a2.clone())).unwrap(); let mut b1 = client - .new_block_at(client.chain_info().genesis_hash, Default::default(), false) + .new_block_at(client.chain_info().genesis_hash, Default::default(), false, None) .unwrap(); // needed to make sure B1 gets a different hash from A1 b1.push_transfer(Transfer { @@ -1019,7 +1019,7 @@ fn finalizing_diverged_block_should_trigger_reorg() { block_on(client.import(BlockOrigin::Own, b1.clone())).unwrap(); let b2 = client - .new_block_at(b1.hash(), Default::default(), false) + .new_block_at(b1.hash(), Default::default(), false, None) .unwrap() .build() .unwrap() @@ -1045,7 +1045,7 @@ fn finalizing_diverged_block_should_trigger_reorg() { // after we build B3 on top of B2 and import it, it should be the new best block let b3 = client - .new_block_at(b2.hash(), Default::default(), false) + .new_block_at(b2.hash(), Default::default(), false, None) .unwrap() .build() .unwrap() @@ -1077,7 +1077,7 @@ fn finality_notifications_content() { let mut finality_notifications = client.finality_notification_stream(); let a1 = client - .new_block_at(client.chain_info().genesis_hash, Default::default(), false) + .new_block_at(client.chain_info().genesis_hash, Default::default(), false, None) .unwrap() .build() .unwrap() @@ -1085,7 +1085,7 @@ fn finality_notifications_content() { block_on(client.import(BlockOrigin::Own, a1.clone())).unwrap(); let a2 = client - .new_block_at(a1.hash(), Default::default(), false) + .new_block_at(a1.hash(), Default::default(), false, None) .unwrap() .build() .unwrap() @@ -1093,7 +1093,7 @@ fn finality_notifications_content() { block_on(client.import(BlockOrigin::Own, a2.clone())).unwrap(); let a3 = client - .new_block_at(a2.hash(), Default::default(), false) + .new_block_at(a2.hash(), Default::default(), false, None) .unwrap() .build() .unwrap() @@ -1101,7 +1101,7 @@ fn finality_notifications_content() { block_on(client.import(BlockOrigin::Own, a3.clone())).unwrap(); let mut b1 = client - .new_block_at(client.chain_info().genesis_hash, Default::default(), false) + .new_block_at(client.chain_info().genesis_hash, Default::default(), false, None) .unwrap(); // needed to make sure B1 gets a different hash from A1 b1.push_transfer(Transfer { @@ -1115,7 +1115,7 @@ fn finality_notifications_content() { block_on(client.import(BlockOrigin::Own, b1.clone())).unwrap(); let b2 = client - .new_block_at(b1.hash(), Default::default(), false) + .new_block_at(b1.hash(), Default::default(), false, None) .unwrap() .build() .unwrap() @@ -1123,7 +1123,7 @@ fn finality_notifications_content() { block_on(client.import(BlockOrigin::Own, b2.clone())).unwrap(); let mut c1 = client - .new_block_at(client.chain_info().genesis_hash, Default::default(), false) + .new_block_at(client.chain_info().genesis_hash, Default::default(), false, None) .unwrap(); // needed to make sure B1 gets a different hash from A1 c1.push_transfer(Transfer { @@ -1136,7 +1136,7 @@ fn finality_notifications_content() { let c1 = c1.build().unwrap().block; block_on(client.import(BlockOrigin::Own, c1.clone())).unwrap(); - let mut d3 = client.new_block_at(a2.hash(), Default::default(), false).unwrap(); + let mut d3 = client.new_block_at(a2.hash(), Default::default(), false, None).unwrap(); // needed to make sure D3 gets a different hash from A3 d3.push_transfer(Transfer { from: AccountKeyring::Alice.into(), @@ -1149,7 +1149,7 @@ fn finality_notifications_content() { block_on(client.import(BlockOrigin::Own, d3.clone())).unwrap(); let d4 = client - .new_block_at(d3.hash(), Default::default(), false) + .new_block_at(d3.hash(), Default::default(), false, None) .unwrap() .build() .unwrap() @@ -1210,7 +1210,7 @@ fn state_reverted_on_reorg() { // \ // -> B1 let mut a1 = client - .new_block_at(client.chain_info().genesis_hash, Default::default(), false) + .new_block_at(client.chain_info().genesis_hash, Default::default(), false, None) .unwrap(); a1.push_transfer(Transfer { from: AccountKeyring::Alice.into(), @@ -1223,7 +1223,7 @@ fn state_reverted_on_reorg() { block_on(client.import(BlockOrigin::Own, a1.clone())).unwrap(); let mut b1 = client - .new_block_at(client.chain_info().genesis_hash, Default::default(), false) + .new_block_at(client.chain_info().genesis_hash, Default::default(), false, None) .unwrap(); b1.push_transfer(Transfer { from: AccountKeyring::Alice.into(), @@ -1237,7 +1237,7 @@ fn state_reverted_on_reorg() { block_on(client.import_as_best(BlockOrigin::Own, b1.clone())).unwrap(); assert_eq!(950 * DOLLARS, current_balance(&client)); - let mut a2 = client.new_block_at(a1.hash(), Default::default(), false).unwrap(); + let mut a2 = client.new_block_at(a1.hash(), Default::default(), false, None).unwrap(); a2.push_transfer(Transfer { from: AccountKeyring::Alice.into(), to: AccountKeyring::Charlie.into(), @@ -1282,7 +1282,7 @@ fn doesnt_import_blocks_that_revert_finality() { // -> B1 -> B2 -> B3 let a1 = client - .new_block_at(client.chain_info().genesis_hash, Default::default(), false) + .new_block_at(client.chain_info().genesis_hash, Default::default(), false, None) .unwrap() .build() .unwrap() @@ -1290,7 +1290,7 @@ fn doesnt_import_blocks_that_revert_finality() { block_on(client.import(BlockOrigin::Own, a1.clone())).unwrap(); let a2 = client - .new_block_at(a1.hash(), Default::default(), false) + .new_block_at(a1.hash(), Default::default(), false, None) .unwrap() .build() .unwrap() @@ -1298,7 +1298,7 @@ fn doesnt_import_blocks_that_revert_finality() { block_on(client.import(BlockOrigin::Own, a2.clone())).unwrap(); let mut b1 = client - .new_block_at(client.chain_info().genesis_hash, Default::default(), false) + .new_block_at(client.chain_info().genesis_hash, Default::default(), false, None) .unwrap(); // needed to make sure B1 gets a different hash from A1 @@ -1313,7 +1313,7 @@ fn doesnt_import_blocks_that_revert_finality() { block_on(client.import(BlockOrigin::Own, b1.clone())).unwrap(); let b2 = client - .new_block_at(b1.hash(), Default::default(), false) + .new_block_at(b1.hash(), Default::default(), false, None) .unwrap() .build() .unwrap() @@ -1323,7 +1323,7 @@ fn doesnt_import_blocks_that_revert_finality() { // prepare B3 before we finalize A2, because otherwise we won't be able to // read changes trie configuration after A2 is finalized let b3 = client - .new_block_at(b2.hash(), Default::default(), false) + .new_block_at(b2.hash(), Default::default(), false, None) .unwrap() .build() .unwrap() @@ -1342,7 +1342,7 @@ fn doesnt_import_blocks_that_revert_finality() { // adding a C1 block which is lower than the last finalized should also // fail (with a cheaper check that doesn't require checking ancestry). let mut c1 = client - .new_block_at(client.chain_info().genesis_hash, Default::default(), false) + .new_block_at(client.chain_info().genesis_hash, Default::default(), false, None) .unwrap(); // needed to make sure C1 gets a different hash from A1 and B1 @@ -1362,7 +1362,7 @@ fn doesnt_import_blocks_that_revert_finality() { assert_eq!(import_err.to_string(), expected_err.to_string()); let a3 = client - .new_block_at(a2.hash(), Default::default(), false) + .new_block_at(a2.hash(), Default::default(), false, None) .unwrap() .build() .unwrap() @@ -1410,7 +1410,7 @@ fn respects_block_rules() { // build B[1] let block_ok = client - .new_block_at(client.chain_info().genesis_hash, Default::default(), false) + .new_block_at(client.chain_info().genesis_hash, Default::default(), false, None) .unwrap() .build() .unwrap() @@ -1429,7 +1429,7 @@ fn respects_block_rules() { // build B'[1] let mut block_not_ok = client - .new_block_at(client.chain_info().genesis_hash, Default::default(), false) + .new_block_at(client.chain_info().genesis_hash, Default::default(), false, None) .unwrap(); block_not_ok.push_storage_change(vec![0], Some(vec![1])).unwrap(); let block_not_ok = block_not_ok.build().unwrap().block; @@ -1452,7 +1452,8 @@ fn respects_block_rules() { block_on(client.import_as_final(BlockOrigin::Own, block_ok)).unwrap(); // And check good fork (build B[2]) - let mut block_ok = client.new_block_at(block_ok_1_hash, Default::default(), false).unwrap(); + let mut block_ok = + client.new_block_at(block_ok_1_hash, Default::default(), false, None).unwrap(); block_ok.push_storage_change(vec![0], Some(vec![2])).unwrap(); let block_ok = block_ok.build().unwrap().block; assert_eq!(*block_ok.header().number(), 2); @@ -1472,7 +1473,7 @@ fn respects_block_rules() { // And now try bad fork (build B'[2]) let mut block_not_ok = - client.new_block_at(block_ok_1_hash, Default::default(), false).unwrap(); + client.new_block_at(block_ok_1_hash, Default::default(), false, None).unwrap(); block_not_ok.push_storage_change(vec![0], Some(vec![3])).unwrap(); let block_not_ok = block_not_ok.build().unwrap().block; assert_eq!(*block_not_ok.header().number(), 2); @@ -1528,14 +1529,14 @@ fn returns_status_for_pruned_blocks() { let mut client = TestClientBuilder::with_backend(backend).build(); let a1 = client - .new_block_at(client.chain_info().genesis_hash, Default::default(), false) + .new_block_at(client.chain_info().genesis_hash, Default::default(), false, None) .unwrap() .build() .unwrap() .block; let mut b1 = client - .new_block_at(client.chain_info().genesis_hash, Default::default(), false) + .new_block_at(client.chain_info().genesis_hash, Default::default(), false, None) .unwrap(); // b1 is created, but not imported @@ -1572,7 +1573,7 @@ fn returns_status_for_pruned_blocks() { assert_eq!(client.block_status(check_block_a1.hash).unwrap(), BlockStatus::InChainWithState); let a2 = client - .new_block_at(a1.hash(), Default::default(), false) + .new_block_at(a1.hash(), Default::default(), false, None) .unwrap() .build() .unwrap() @@ -1600,7 +1601,7 @@ fn returns_status_for_pruned_blocks() { assert_eq!(client.block_status(check_block_a2.hash).unwrap(), BlockStatus::InChainWithState); let a3 = client - .new_block_at(a2.hash(), Default::default(), false) + .new_block_at(a2.hash(), Default::default(), false, None) .unwrap() .build() .unwrap() @@ -1908,7 +1909,7 @@ fn reorg_triggers_a_notification_even_for_sources_that_should_not_trigger_notifi futures::executor::block_on_stream(client.import_notification_stream()); let a1 = client - .new_block_at(client.chain_info().genesis_hash, Default::default(), false) + .new_block_at(client.chain_info().genesis_hash, Default::default(), false, None) .unwrap() .build() .unwrap() @@ -1916,7 +1917,7 @@ fn reorg_triggers_a_notification_even_for_sources_that_should_not_trigger_notifi block_on(client.import(BlockOrigin::NetworkInitialSync, a1.clone())).unwrap(); let a2 = client - .new_block_at(a1.hash(), Default::default(), false) + .new_block_at(a1.hash(), Default::default(), false, None) .unwrap() .build() .unwrap() @@ -1924,7 +1925,7 @@ fn reorg_triggers_a_notification_even_for_sources_that_should_not_trigger_notifi block_on(client.import(BlockOrigin::NetworkInitialSync, a2.clone())).unwrap(); let mut b1 = client - .new_block_at(client.chain_info().genesis_hash, Default::default(), false) + .new_block_at(client.chain_info().genesis_hash, Default::default(), false, None) .unwrap(); // needed to make sure B1 gets a different hash from A1 b1.push_transfer(Transfer { @@ -1938,7 +1939,7 @@ fn reorg_triggers_a_notification_even_for_sources_that_should_not_trigger_notifi block_on(client.import(BlockOrigin::NetworkInitialSync, b1.clone())).unwrap(); let b2 = client - .new_block_at(b1.hash(), Default::default(), false) + .new_block_at(b1.hash(), Default::default(), false, None) .unwrap() .build() .unwrap() @@ -1974,7 +1975,7 @@ fn use_dalek_ext_works() { ); let a1 = client - .new_block_at(client.chain_info().genesis_hash, Default::default(), false) + .new_block_at(client.chain_info().genesis_hash, Default::default(), false, None) .unwrap() .build() .unwrap() @@ -2002,7 +2003,7 @@ fn finalize_after_best_block_updates_best() { // A1 -> A2 let a2 = client - .new_block_at(a1.hash(), Default::default(), false) + .new_block_at(a1.hash(), Default::default(), false, None) .unwrap() .build() .unwrap() @@ -2011,7 +2012,7 @@ fn finalize_after_best_block_updates_best() { // A2 -> A3 let a3 = client - .new_block_at(a2.hash(), Default::default(), false) + .new_block_at(a2.hash(), Default::default(), false, None) .unwrap() .build() .unwrap() diff --git a/substrate/primitives/api/proc-macro/src/impl_runtime_apis.rs b/substrate/primitives/api/proc-macro/src/impl_runtime_apis.rs index 74cfa0980623..446110442e72 100644 --- a/substrate/primitives/api/proc-macro/src/impl_runtime_apis.rs +++ b/substrate/primitives/api/proc-macro/src/impl_runtime_apis.rs @@ -365,6 +365,10 @@ fn generate_runtime_api_base_structures() -> Result { fn register_extension(&mut self, extension: E) { std::cell::RefCell::borrow_mut(&self.extensions).register(extension); } + + fn register_extension_with_type_id(&mut self, type_id: core::any::TypeId, extension: Box) { + let _ = std::cell::RefCell::borrow_mut(&self.extensions).register_with_type_id(type_id, extension); + } } impl #crate_::ConstructRuntimeApi diff --git a/substrate/primitives/api/proc-macro/src/mock_impl_runtime_apis.rs b/substrate/primitives/api/proc-macro/src/mock_impl_runtime_apis.rs index c1339ff6621b..c58cf01e922b 100644 --- a/substrate/primitives/api/proc-macro/src/mock_impl_runtime_apis.rs +++ b/substrate/primitives/api/proc-macro/src/mock_impl_runtime_apis.rs @@ -127,6 +127,10 @@ fn implement_common_api_traits(block_type: TypePath, self_ty: Type) -> Result(&mut self, _: E) { unimplemented!("`register_extension` not implemented for runtime api mocks") } + + fn register_extension_with_type_id(&mut self, type_id: core::any::TypeId, extension: Box) { + unimplemented!("`register_extension_with_type_id` not implemented for runtime api mocks") + } } impl #crate_::Core<#block_type> for #self_ty { diff --git a/substrate/primitives/api/src/lib.rs b/substrate/primitives/api/src/lib.rs index c3f80acf09ae..e50fcafb1b93 100644 --- a/substrate/primitives/api/src/lib.rs +++ b/substrate/primitives/api/src/lib.rs @@ -72,6 +72,7 @@ extern crate self as sp_api; #[doc(hidden)] pub use codec::{self, Decode, DecodeLimit, Encode}; +use core::any::TypeId; #[doc(hidden)] #[cfg(feature = "std")] pub use hash_db::Hasher; @@ -504,6 +505,16 @@ pub use sp_api_proc_macro::mock_impl_runtime_apis; #[cfg(feature = "std")] pub type ProofRecorder = sp_trie::recorder::Recorder>; +#[cfg(feature = "std")] +pub type ExtensionProducer = sp_std::sync::Arc< + dyn Fn( + Box, + ) -> (core::any::TypeId, Box) + + Send + + Sync, +>; + +/// A type that is used as cache for the storage transactions. #[cfg(feature = "std")] pub type StorageChanges = sp_state_machine::StorageChanges>; @@ -620,6 +631,9 @@ pub trait ApiExt { /// Register an [`Extension`] that will be accessible while executing a runtime api call. fn register_extension(&mut self, extension: E); + + /// Register an [`Extension`] that will be accessible while executing a runtime api call. + fn register_extension_with_type_id(&mut self, type_id: TypeId, extension: Box); } /// Parameters for [`CallApiAt::call_api_at`]. diff --git a/substrate/primitives/api/test/tests/runtime_calls.rs b/substrate/primitives/api/test/tests/runtime_calls.rs index 353be73dcccd..c659b7fbdcbf 100644 --- a/substrate/primitives/api/test/tests/runtime_calls.rs +++ b/substrate/primitives/api/test/tests/runtime_calls.rs @@ -106,7 +106,7 @@ fn record_proof_works() { // Build the block and record proof let mut builder = client - .new_block_at(client.chain_info().best_hash, Default::default(), true) + .new_block_at(client.chain_info().best_hash, Default::default(), true, None) .expect("Creates block builder"); builder.push(transaction.clone()).unwrap(); let (block, _, proof) = builder.build().expect("Bake block").into_inner(); diff --git a/substrate/primitives/externalities/Cargo.toml b/substrate/primitives/externalities/Cargo.toml index 7468237c2a0f..7795ed3b890a 100644 --- a/substrate/primitives/externalities/Cargo.toml +++ b/substrate/primitives/externalities/Cargo.toml @@ -20,5 +20,10 @@ sp-std = { path = "../std", default-features = false} sp-storage = { path = "../storage", default-features = false} [features] -default = [ "std" ] -std = [ "codec/std", "environmental/std", "sp-std/std", "sp-storage/std" ] +default = ["std"] +std = [ + "codec/std", + "environmental/std", + "sp-std/std", + "sp-storage/std", +] diff --git a/substrate/primitives/externalities/src/extensions.rs b/substrate/primitives/externalities/src/extensions.rs index 8b0bbd2c5921..616ac6e827d1 100644 --- a/substrate/primitives/externalities/src/extensions.rs +++ b/substrate/primitives/externalities/src/extensions.rs @@ -48,6 +48,12 @@ impl Extension for Box { } } +impl Extension for Box { + fn as_mut_any(&mut self) -> &mut dyn Any { + (**self).as_mut_any() + } +} + /// Macro for declaring an extension that usable with [`Extensions`]. /// /// The extension will be an unit wrapper struct that implements [`Extension`], `Deref` and diff --git a/substrate/primitives/runtime-interface/proc-macro/Cargo.toml b/substrate/primitives/runtime-interface/proc-macro/Cargo.toml index 4b50dfe2a7a1..207e076dde87 100644 --- a/substrate/primitives/runtime-interface/proc-macro/Cargo.toml +++ b/substrate/primitives/runtime-interface/proc-macro/Cargo.toml @@ -20,4 +20,5 @@ Inflector = "0.11.4" proc-macro-crate = "1.1.3" proc-macro2 = "1.0.56" quote = "1.0.28" +expander = "2.0.0" syn = { version = "2.0.16", features = ["full", "visit", "fold", "extra-traits"] } diff --git a/substrate/primitives/runtime-interface/proc-macro/src/runtime_interface/mod.rs b/substrate/primitives/runtime-interface/proc-macro/src/runtime_interface/mod.rs index 008d69b32100..1c4274121aaa 100644 --- a/substrate/primitives/runtime-interface/proc-macro/src/runtime_interface/mod.rs +++ b/substrate/primitives/runtime-interface/proc-macro/src/runtime_interface/mod.rs @@ -68,5 +68,11 @@ pub fn runtime_interface_impl( } }; + let res = expander::Expander::new("runtime_interface") + .dry(std::env::var("SP_RUNTIME_INTERFACE_EXPAND").is_err()) + .verbose(true) + .write_to_out_dir(res) + .expect("Does not fail because of IO in OUT_DIR; qed"); + Ok(res) } diff --git a/substrate/primitives/trie/src/lib.rs b/substrate/primitives/trie/src/lib.rs index 94155458569b..c5a3d4ace338 100644 --- a/substrate/primitives/trie/src/lib.rs +++ b/substrate/primitives/trie/src/lib.rs @@ -30,6 +30,10 @@ mod storage_proof; mod trie_codec; mod trie_stream; +pub trait ProofSizeEstimationProvider { + fn estimate_proof_size(&self) -> usize; +} + /// Our `NodeCodec`-specific error. pub use error::Error; /// Various re-exports from the `hash-db` crate. diff --git a/substrate/primitives/trie/src/recorder.rs b/substrate/primitives/trie/src/recorder.rs index 728dc836205b..557df1e6f883 100644 --- a/substrate/primitives/trie/src/recorder.rs +++ b/substrate/primitives/trie/src/recorder.rs @@ -393,6 +393,12 @@ impl>> trie_db::TrieRecord } } +impl crate::ProofSizeEstimationProvider for Recorder { + fn estimate_proof_size(&self) -> usize { + self.estimate_encoded_size() + } +} + #[cfg(test)] mod tests { use super::*; diff --git a/substrate/test-utils/client/src/lib.rs b/substrate/test-utils/client/src/lib.rs index 90e15e0f8d53..dfa20e647096 100644 --- a/substrate/test-utils/client/src/lib.rs +++ b/substrate/test-utils/client/src/lib.rs @@ -266,11 +266,12 @@ impl let executor = executor.into().unwrap_or_else(|| { NativeElseWasmExecutor::new_with_wasm_executor(WasmExecutor::builder().build()) }); + // TODO skunert Check back on this let executor = LocalCallExecutor::new( self.backend.clone(), executor.clone(), Default::default(), - ExecutionExtensions::new(None, Arc::new(executor)), + ExecutionExtensions::new(None, Arc::new(executor), None), ) .expect("Creates LocalCallExecutor"); diff --git a/substrate/test-utils/runtime/client/src/trait_tests.rs b/substrate/test-utils/runtime/client/src/trait_tests.rs index 5fce7a2860b7..789314b4ff66 100644 --- a/substrate/test-utils/runtime/client/src/trait_tests.rs +++ b/substrate/test-utils/runtime/client/src/trait_tests.rs @@ -60,7 +60,7 @@ where // A1 -> A2 let a2 = client - .new_block_at(a1.hash(), Default::default(), false) + .new_block_at(a1.hash(), Default::default(), false, None) .unwrap() .build() .unwrap() @@ -71,7 +71,7 @@ where // A2 -> A3 let a3 = client - .new_block_at(a2.hash(), Default::default(), false) + .new_block_at(a2.hash(), Default::default(), false, None) .unwrap() .build() .unwrap() @@ -82,7 +82,7 @@ where // A3 -> A4 let a4 = client - .new_block_at(a3.hash(), Default::default(), false) + .new_block_at(a3.hash(), Default::default(), false, None) .unwrap() .build() .unwrap() @@ -92,7 +92,7 @@ where // A4 -> A5 let a5 = client - .new_block_at(a4.hash(), Default::default(), false) + .new_block_at(a4.hash(), Default::default(), false, None) .unwrap() .build() .unwrap() @@ -102,7 +102,7 @@ where assert_eq!(blockchain.leaves().unwrap(), vec![a5.hash()]); // A1 -> B2 - let mut builder = client.new_block_at(a1.hash(), Default::default(), false).unwrap(); + let mut builder = client.new_block_at(a1.hash(), Default::default(), false, None).unwrap(); // this push is required as otherwise B2 has the same hash as A2 and won't get imported builder @@ -119,7 +119,7 @@ where // B2 -> B3 let b3 = client - .new_block_at(b2.hash(), Default::default(), false) + .new_block_at(b2.hash(), Default::default(), false, None) .unwrap() .build() .unwrap() @@ -130,7 +130,7 @@ where // B3 -> B4 let b4 = client - .new_block_at(b3.hash(), Default::default(), false) + .new_block_at(b3.hash(), Default::default(), false, None) .unwrap() .build() .unwrap() @@ -139,7 +139,7 @@ where assert_eq!(blockchain.leaves().unwrap(), vec![a5.hash(), b4.hash()]); // // B2 -> C3 - let mut builder = client.new_block_at(b2.hash(), Default::default(), false).unwrap(); + let mut builder = client.new_block_at(b2.hash(), Default::default(), false, None).unwrap(); // this push is required as otherwise C3 has the same hash as B3 and won't get imported builder .push_transfer(Transfer { @@ -154,7 +154,7 @@ where assert_eq!(blockchain.leaves().unwrap(), vec![a5.hash(), b4.hash(), c3.hash()]); // A1 -> D2 - let mut builder = client.new_block_at(a1.hash(), Default::default(), false).unwrap(); + let mut builder = client.new_block_at(a1.hash(), Default::default(), false, None).unwrap(); // this push is required as otherwise D2 has the same hash as B2 and won't get imported builder .push_transfer(Transfer { @@ -189,7 +189,7 @@ where // A1 -> A2 let a2 = client - .new_block_at(a1.hash(), Default::default(), false) + .new_block_at(a1.hash(), Default::default(), false, None) .unwrap() .build() .unwrap() @@ -198,7 +198,7 @@ where // A2 -> A3 let a3 = client - .new_block_at(a2.hash(), Default::default(), false) + .new_block_at(a2.hash(), Default::default(), false, None) .unwrap() .build() .unwrap() @@ -207,7 +207,7 @@ where // A3 -> A4 let a4 = client - .new_block_at(a3.hash(), Default::default(), false) + .new_block_at(a3.hash(), Default::default(), false, None) .unwrap() .build() .unwrap() @@ -216,7 +216,7 @@ where // A4 -> A5 let a5 = client - .new_block_at(a4.hash(), Default::default(), false) + .new_block_at(a4.hash(), Default::default(), false, None) .unwrap() .build() .unwrap() @@ -224,7 +224,7 @@ where block_on(client.import(BlockOrigin::Own, a5.clone())).unwrap(); // A1 -> B2 - let mut builder = client.new_block_at(a1.hash(), Default::default(), false).unwrap(); + let mut builder = client.new_block_at(a1.hash(), Default::default(), false, None).unwrap(); // this push is required as otherwise B2 has the same hash as A2 and won't get imported builder .push_transfer(Transfer { @@ -239,7 +239,7 @@ where // B2 -> B3 let b3 = client - .new_block_at(b2.hash(), Default::default(), false) + .new_block_at(b2.hash(), Default::default(), false, None) .unwrap() .build() .unwrap() @@ -248,7 +248,7 @@ where // B3 -> B4 let b4 = client - .new_block_at(b3.hash(), Default::default(), false) + .new_block_at(b3.hash(), Default::default(), false, None) .unwrap() .build() .unwrap() @@ -256,7 +256,7 @@ where block_on(client.import(BlockOrigin::Own, b4)).unwrap(); // // B2 -> C3 - let mut builder = client.new_block_at(b2.hash(), Default::default(), false).unwrap(); + let mut builder = client.new_block_at(b2.hash(), Default::default(), false, None).unwrap(); // this push is required as otherwise C3 has the same hash as B3 and won't get imported builder .push_transfer(Transfer { @@ -270,7 +270,7 @@ where block_on(client.import(BlockOrigin::Own, c3.clone())).unwrap(); // A1 -> D2 - let mut builder = client.new_block_at(a1.hash(), Default::default(), false).unwrap(); + let mut builder = client.new_block_at(a1.hash(), Default::default(), false, None).unwrap(); // this push is required as otherwise D2 has the same hash as B2 and won't get imported builder .push_transfer(Transfer { @@ -316,7 +316,7 @@ where // A1 -> A2 let a2 = client - .new_block_at(a1.hash(), Default::default(), false) + .new_block_at(a1.hash(), Default::default(), false, None) .unwrap() .build() .unwrap() @@ -325,7 +325,7 @@ where // A2 -> A3 let a3 = client - .new_block_at(a2.hash(), Default::default(), false) + .new_block_at(a2.hash(), Default::default(), false, None) .unwrap() .build() .unwrap() @@ -334,7 +334,7 @@ where // A3 -> A4 let a4 = client - .new_block_at(a3.hash(), Default::default(), false) + .new_block_at(a3.hash(), Default::default(), false, None) .unwrap() .build() .unwrap() @@ -343,7 +343,7 @@ where // A4 -> A5 let a5 = client - .new_block_at(a4.hash(), Default::default(), false) + .new_block_at(a4.hash(), Default::default(), false, None) .unwrap() .build() .unwrap() @@ -351,7 +351,7 @@ where block_on(client.import(BlockOrigin::Own, a5.clone())).unwrap(); // A1 -> B2 - let mut builder = client.new_block_at(a1.hash(), Default::default(), false).unwrap(); + let mut builder = client.new_block_at(a1.hash(), Default::default(), false, None).unwrap(); // this push is required as otherwise B2 has the same hash as A2 and won't get imported builder .push_transfer(Transfer { @@ -366,7 +366,7 @@ where // B2 -> B3 let b3 = client - .new_block_at(b2.hash(), Default::default(), false) + .new_block_at(b2.hash(), Default::default(), false, None) .unwrap() .build() .unwrap() @@ -375,7 +375,7 @@ where // B3 -> B4 let b4 = client - .new_block_at(b3.hash(), Default::default(), false) + .new_block_at(b3.hash(), Default::default(), false, None) .unwrap() .build() .unwrap() @@ -383,7 +383,7 @@ where block_on(client.import(BlockOrigin::Own, b4)).unwrap(); // // B2 -> C3 - let mut builder = client.new_block_at(b2.hash(), Default::default(), false).unwrap(); + let mut builder = client.new_block_at(b2.hash(), Default::default(), false, None).unwrap(); // this push is required as otherwise C3 has the same hash as B3 and won't get imported builder .push_transfer(Transfer { @@ -397,7 +397,7 @@ where block_on(client.import(BlockOrigin::Own, c3)).unwrap(); // A1 -> D2 - let mut builder = client.new_block_at(a1.hash(), Default::default(), false).unwrap(); + let mut builder = client.new_block_at(a1.hash(), Default::default(), false, None).unwrap(); // this push is required as otherwise D2 has the same hash as B2 and won't get imported builder .push_transfer(Transfer { From 9c21d3ca4b6dd5f3de4a1f06ccd34696b9c8e26f Mon Sep 17 00:00:00 2001 From: Sebastian Kunert Date: Fri, 25 Aug 2023 17:36:48 +0200 Subject: [PATCH 02/61] Clawback cumulus monorepo transfer --- Cargo.lock | 18 ++++ cumulus/client/clawback/Cargo.toml | 27 ++++++ cumulus/client/clawback/src/lib.rs | 82 +++++++++++++++++++ cumulus/parachain-template/node/Cargo.toml | 1 + .../parachain-template/node/src/service.rs | 7 +- .../assets/asset-hub-kusama/Cargo.toml | 2 + .../assets/asset-hub-kusama/src/lib.rs | 6 ++ cumulus/polkadot-parachain/Cargo.toml | 1 + cumulus/polkadot-parachain/src/service.rs | 80 ++++++++++-------- cumulus/test/service/Cargo.toml | 1 + cumulus/test/service/benches/block_import.rs | 5 +- .../service/benches/block_import_glutton.rs | 2 +- .../test/service/benches/block_production.rs | 4 +- .../benches/block_production_glutton.rs | 4 +- cumulus/test/service/src/lib.rs | 7 +- cumulus/zombienet/examples/small_network.toml | 2 +- 16 files changed, 206 insertions(+), 43 deletions(-) create mode 100644 cumulus/client/clawback/Cargo.toml create mode 100644 cumulus/client/clawback/src/lib.rs diff --git a/Cargo.lock b/Cargo.lock index 316eafbd0002..ec057dbd14a8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -727,6 +727,7 @@ version = "0.9.420" dependencies = [ "asset-test-utils", "assets-common", + "cumulus-client-clawback", "cumulus-pallet-aura-ext", "cumulus-pallet-dmp-queue", "cumulus-pallet-parachain-system", @@ -3274,6 +3275,20 @@ dependencies = [ "cipher 0.4.4", ] +[[package]] +name = "cumulus-client-clawback" +version = "0.0.1" +dependencies = [ + "sp-api", + "sp-core", + "sp-externalities", + "sp-runtime", + "sp-runtime-interface", + "sp-std", + "sp-trie", + "tracing", +] + [[package]] name = "cumulus-client-cli" version = "0.1.0" @@ -3973,6 +3988,7 @@ dependencies = [ "async-trait", "clap 4.4.0", "criterion 0.5.1", + "cumulus-client-clawback", "cumulus-client-cli", "cumulus-client-consensus-common", "cumulus-client-consensus-relay-chain", @@ -11021,6 +11037,7 @@ version = "0.1.0" dependencies = [ "clap 4.4.0", "color-print", + "cumulus-client-clawback", "cumulus-client-cli", "cumulus-client-collator", "cumulus-client-consensus-aura", @@ -12565,6 +12582,7 @@ dependencies = [ "collectives-polkadot-runtime", "color-print", "contracts-rococo-runtime", + "cumulus-client-clawback", "cumulus-client-cli", "cumulus-client-collator", "cumulus-client-consensus-aura", diff --git a/cumulus/client/clawback/Cargo.toml b/cumulus/client/clawback/Cargo.toml new file mode 100644 index 000000000000..7123d69f226d --- /dev/null +++ b/cumulus/client/clawback/Cargo.toml @@ -0,0 +1,27 @@ +[package] +name = "cumulus-client-clawback" +version = "0.0.1" +authors = [ "Sebastian Kunert " ] + +[dependencies] +sp-runtime-interface = { path = "../../../substrate/primitives/runtime-interface", default-features = false } +sp-externalities = { path = "../../../substrate/primitives/externalities", default-features = false } +sp-std = { path = "../../../substrate/primitives/std", default-features = false } +sp-trie = { path = "../../../substrate/primitives/trie", default-features = false } +sp-core = { path = "../../../substrate/primitives/core", default-features = false } +sp-api = { path = "../../../substrate/primitives/api", default-features = false } +sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } +tracing = { version = "0.1.37", default-features = false } + +[features] +default = ["std"] +std = [ + "sp-runtime-interface/std", + "sp-externalities/std", + "sp-runtime/std", + "sp-std/std", + "sp-api/std", + "sp-trie/std", + "sp-core/std", + "tracing/std" +] diff --git a/cumulus/client/clawback/src/lib.rs b/cumulus/client/clawback/src/lib.rs new file mode 100644 index 000000000000..4ecfec26e852 --- /dev/null +++ b/cumulus/client/clawback/src/lib.rs @@ -0,0 +1,82 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . +// +#![cfg_attr(not(feature = "std"), no_std)] + +extern crate sp_api; +extern crate sp_core; +extern crate sp_externalities; +extern crate sp_runtime; +extern crate sp_runtime_interface; +extern crate sp_std; +extern crate sp_trie; + +use sp_externalities::Extension; +use sp_runtime_interface::runtime_interface; +use sp_trie::ProofSizeEstimationProvider; +#[cfg(feature = "std")] +use std::sync::Arc; + +#[cfg(feature = "std")] +use sp_api::ExtensionProducer; + +use sp_std::boxed::Box; + +#[cfg(feature = "std")] +use sp_runtime_interface::ExternalitiesExt; + +#[runtime_interface] +pub trait ClawbackHostFunctions { + fn current_storage_proof_size(&mut self) -> u32 { + match self.extension::() { + Some(ext) => ext.current_storage_proof_size(), + None => 0, + } + } +} + +pub trait ReportPovUsage: Send + Sync { + fn current_storage_proof_size(&self) -> u32; +} + +#[cfg(feature = "std")] +sp_externalities::decl_extension! { + pub struct PovUsageExt(PovUsageReporter); +} + +pub struct PovUsageReporter { + recorder: Box, +} + +impl PovUsageReporter { + fn new(recorder: Box) -> Self { + PovUsageReporter { recorder } + } + + fn current_storage_proof_size(&self) -> u32 { + self.recorder.estimate_proof_size() as u32 + } +} + +#[cfg(feature = "std")] +pub fn get_extension_factory() -> ExtensionProducer { + std::sync::Arc::new(|recorder| { + ( + core::any::TypeId::of::(), + Box::new(PovUsageExt(PovUsageReporter::new(recorder))) + as Box, + ) + }) as Arc<_> +} diff --git a/cumulus/parachain-template/node/Cargo.toml b/cumulus/parachain-template/node/Cargo.toml index 21eb00760f56..d8159659a802 100644 --- a/cumulus/parachain-template/node/Cargo.toml +++ b/cumulus/parachain-template/node/Cargo.toml @@ -61,6 +61,7 @@ xcm = { path = "../../../polkadot/xcm", default-features = false} cumulus-client-cli = { path = "../../client/cli" } cumulus-client-collator = { path = "../../client/collator" } cumulus-client-consensus-aura = { path = "../../client/consensus/aura" } +cumulus-client-clawback = { path = "../../client/clawback" } cumulus-client-consensus-common = { path = "../../client/consensus/common" } cumulus-client-consensus-proposer = { path = "../../client/consensus/proposer" } cumulus-client-service = { path = "../../client/service" } diff --git a/cumulus/parachain-template/node/src/service.rs b/cumulus/parachain-template/node/src/service.rs index 9fa6d60c2e74..441d08f75323 100644 --- a/cumulus/parachain-template/node/src/service.rs +++ b/cumulus/parachain-template/node/src/service.rs @@ -3,6 +3,7 @@ // std use std::{sync::Arc, time::Duration}; +use cumulus_client_clawback::get_extension_factory; use cumulus_client_cli::CollatorOptions; // Local Runtime Types use parachain_template_runtime::{ @@ -102,10 +103,11 @@ pub fn new_partial( let executor = ParachainExecutor::new_with_wasm_executor(wasm); let (client, backend, keystore_container, task_manager) = - sc_service::new_full_parts::( + sc_service::new_full_parts_extension::( config, telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()), executor, + Some(get_extension_factory()), )?; let client = Arc::new(client); @@ -378,12 +380,13 @@ fn start_consensus( let slot_duration = cumulus_client_consensus_aura::slot_duration(&*client)?; - let proposer_factory = sc_basic_authorship::ProposerFactory::with_proof_recording( + let proposer_factory = sc_basic_authorship::ProposerFactory::with_proof_recording_extension( task_manager.spawn_handle(), client.clone(), transaction_pool, prometheus_registry, telemetry.clone(), + Some(get_extension_factory()), ); let proposer = Proposer::new(proposer_factory); diff --git a/cumulus/parachains/runtimes/assets/asset-hub-kusama/Cargo.toml b/cumulus/parachains/runtimes/assets/asset-hub-kusama/Cargo.toml index 27e8031136a2..a0b00efe3fa4 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-kusama/Cargo.toml +++ b/cumulus/parachains/runtimes/assets/asset-hub-kusama/Cargo.toml @@ -74,6 +74,7 @@ cumulus-pallet-xcm = { path = "../../../../pallets/xcm", default-features = fals cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-features = false } cumulus-primitives-core = { path = "../../../../primitives/core", default-features = false } cumulus-primitives-utility = { path = "../../../../primitives/utility", default-features = false } +cumulus-client-clawback = { path = "../../../../client/clawback", default-features = false } pallet-collator-selection = { path = "../../../../pallets/collator-selection", default-features = false } parachain-info = { path = "../../../pallets/parachain-info", default-features = false } parachains-common = { path = "../../../common", default-features = false } @@ -204,6 +205,7 @@ std = [ "cumulus-pallet-xcmp-queue/std", "cumulus-primitives-core/std", "cumulus-primitives-utility/std", + "cumulus-client-clawback/std", "pallet-collator-selection/std", "parachain-info/std", "parachains-common/std", diff --git a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/lib.rs index 2130c6502f8e..a2056202431a 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/lib.rs @@ -973,10 +973,16 @@ impl_runtime_apis! { impl sp_block_builder::BlockBuilder for Runtime { fn apply_extrinsic(extrinsic: ::Extrinsic) -> ApplyExtrinsicResult { + log::info!("apply_extrinsic"); + let proof_size = cumulus_client_clawback::clawback_host_functions::current_storage_proof_size(); + log::info!("Got proof size: {}", proof_size); Executive::apply_extrinsic(extrinsic) } fn finalize_block() -> ::Header { + log::info!("finalize_block"); + let proof_size = cumulus_client_clawback::clawback_host_functions::current_storage_proof_size(); + log::info!("Got proof size: {}", proof_size); Executive::finalize_block() } diff --git a/cumulus/polkadot-parachain/Cargo.toml b/cumulus/polkadot-parachain/Cargo.toml index 4049f1001a18..831a2c7ec2b9 100644 --- a/cumulus/polkadot-parachain/Cargo.toml +++ b/cumulus/polkadot-parachain/Cargo.toml @@ -81,6 +81,7 @@ xcm = { path = "../../polkadot/xcm" } # Cumulus cumulus-client-cli = { path = "../client/cli" } +cumulus-client-clawback = { path = "../client/clawback" } cumulus-client-collator = { path = "../client/collator" } cumulus-client-consensus-aura = { path = "../client/consensus/aura" } cumulus-client-consensus-relay-chain = { path = "../client/consensus/relay-chain" } diff --git a/cumulus/polkadot-parachain/src/service.rs b/cumulus/polkadot-parachain/src/service.rs index 3899814cb0e8..8513a7534813 100644 --- a/cumulus/polkadot-parachain/src/service.rs +++ b/cumulus/polkadot-parachain/src/service.rs @@ -15,6 +15,7 @@ // along with Cumulus. If not, see . use codec::Codec; +use cumulus_client_clawback::get_extension_factory; use cumulus_client_cli::CollatorOptions; use cumulus_client_collator::service::CollatorService; use cumulus_client_consensus_aura::collators::basic::{ @@ -63,11 +64,17 @@ use substrate_prometheus_endpoint::Registry; use polkadot_primitives::CollatorPair; #[cfg(not(feature = "runtime-benchmarks"))] -type HostFunctions = sp_io::SubstrateHostFunctions; +type HostFunctions = ( + sp_io::SubstrateHostFunctions, + cumulus_client_clawback::clawback_host_functions::HostFunctions, +); #[cfg(feature = "runtime-benchmarks")] -type HostFunctions = - (sp_io::SubstrateHostFunctions, frame_benchmarking::benchmarking::HostFunctions); +type HostFunctions = ( + sp_io::SubstrateHostFunctions, + cumulus_client_clawback::clawback_host_functions::HostFunctions, + frame_benchmarking::benchmarking::HostFunctions, +); type ParachainClient = TFullClient>; @@ -284,10 +291,11 @@ where .build(); let (client, backend, keystore_container, task_manager) = - sc_service::new_full_parts::( + sc_service::new_full_parts_extension::( config, telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()), executor, + Some(get_extension_factory()), )?; let client = Arc::new(client); @@ -769,13 +777,15 @@ pub async fn start_rococo_parachain_node( announce_block| { let slot_duration = cumulus_client_consensus_aura::slot_duration(&*client)?; - let proposer_factory = sc_basic_authorship::ProposerFactory::with_proof_recording( - task_manager.spawn_handle(), - client.clone(), - transaction_pool, - prometheus_registry, - telemetry.clone(), - ); + let proposer_factory = + sc_basic_authorship::ProposerFactory::with_proof_recording_extension( + task_manager.spawn_handle(), + client.clone(), + transaction_pool, + prometheus_registry, + telemetry.clone(), + Some(get_extension_factory()), + ); let proposer = Proposer::new(proposer_factory); let collator_service = CollatorService::new( @@ -890,13 +900,15 @@ where collator_key, overseer_handle, announce_block| { - let proposer_factory = sc_basic_authorship::ProposerFactory::with_proof_recording( - task_manager.spawn_handle(), - client.clone(), - transaction_pool, - prometheus_registry, - telemetry, - ); + let proposer_factory = + sc_basic_authorship::ProposerFactory::with_proof_recording_extension( + task_manager.spawn_handle(), + client.clone(), + transaction_pool, + prometheus_registry, + telemetry.clone(), + Some(get_extension_factory()), + ); let free_for_all = cumulus_client_consensus_relay_chain::build_relay_chain_consensus( cumulus_client_consensus_relay_chain::BuildRelayChainConsensusParams { @@ -1161,13 +1173,15 @@ where announce_block| { let slot_duration = cumulus_client_consensus_aura::slot_duration(&*client)?; - let proposer_factory = sc_basic_authorship::ProposerFactory::with_proof_recording( - task_manager.spawn_handle(), - client.clone(), - transaction_pool, - prometheus_registry, - telemetry.clone(), - ); + let proposer_factory = + sc_basic_authorship::ProposerFactory::with_proof_recording_extension( + task_manager.spawn_handle(), + client.clone(), + transaction_pool, + prometheus_registry, + telemetry.clone(), + Some(get_extension_factory()), + ); let proposer = Proposer::new(proposer_factory); let collator_service = CollatorService::new( @@ -1465,13 +1479,15 @@ pub async fn start_contracts_rococo_node( announce_block| { let slot_duration = cumulus_client_consensus_aura::slot_duration(&*client)?; - let proposer_factory = sc_basic_authorship::ProposerFactory::with_proof_recording( - task_manager.spawn_handle(), - client.clone(), - transaction_pool, - prometheus_registry, - telemetry.clone(), - ); + let proposer_factory = + sc_basic_authorship::ProposerFactory::with_proof_recording_extension( + task_manager.spawn_handle(), + client.clone(), + transaction_pool, + prometheus_registry, + telemetry.clone(), + Some(get_extension_factory()), + ); let proposer = Proposer::new(proposer_factory); let collator_service = CollatorService::new( diff --git a/cumulus/test/service/Cargo.toml b/cumulus/test/service/Cargo.toml index 70c21abdfb93..9832bf3e17c1 100644 --- a/cumulus/test/service/Cargo.toml +++ b/cumulus/test/service/Cargo.toml @@ -65,6 +65,7 @@ polkadot-overseer = { path = "../../../polkadot/node/overseer" } cumulus-client-cli = { path = "../../client/cli" } parachains-common = { path = "../../parachains/common" } cumulus-client-consensus-common = { path = "../../client/consensus/common" } +cumulus-client-clawback = { path = "../../client/clawback" } cumulus-client-consensus-relay-chain = { path = "../../client/consensus/relay-chain" } cumulus-client-service = { path = "../../client/service" } cumulus-primitives-core = { path = "../../primitives/core" } diff --git a/cumulus/test/service/benches/block_import.rs b/cumulus/test/service/benches/block_import.rs index b79598b15302..bf8db8a7ed6b 100644 --- a/cumulus/test/service/benches/block_import.rs +++ b/cumulus/test/service/benches/block_import.rs @@ -52,8 +52,9 @@ fn benchmark_block_import(c: &mut Criterion) { utils::create_benchmarking_transfer_extrinsics(&client, &src_accounts, &dst_accounts); let parent_hash = client.usage_info().chain.best_hash; - let mut block_builder = - client.new_block_at(parent_hash, Default::default(), RecordProof::No).unwrap(); + let mut block_builder = client + .new_block_at(parent_hash, Default::default(), RecordProof::No, None) + .unwrap(); for extrinsic in extrinsics { block_builder.push(extrinsic).unwrap(); } diff --git a/cumulus/test/service/benches/block_import_glutton.rs b/cumulus/test/service/benches/block_import_glutton.rs index b49db9f449e9..19bf8ff3fee2 100644 --- a/cumulus/test/service/benches/block_import_glutton.rs +++ b/cumulus/test/service/benches/block_import_glutton.rs @@ -64,7 +64,7 @@ fn benchmark_block_import(c: &mut Criterion) { let parent_hash = client.usage_info().chain.best_hash; let parent_header = client.header(parent_hash).expect("Just fetched this hash.").unwrap(); let mut block_builder = - client.new_block_at(parent_hash, Default::default(), RecordProof::No).unwrap(); + client.new_block_at(parent_hash, Default::default(), RecordProof::No, None).unwrap(); block_builder .push(utils::extrinsic_set_validation_data(parent_header.clone()).clone()) .unwrap(); diff --git a/cumulus/test/service/benches/block_production.rs b/cumulus/test/service/benches/block_production.rs index 1b868d736302..7881a5f44731 100644 --- a/cumulus/test/service/benches/block_production.rs +++ b/cumulus/test/service/benches/block_production.rs @@ -75,7 +75,7 @@ fn benchmark_block_production(c: &mut Criterion) { || extrinsics.clone(), |extrinsics| { let mut block_builder = client - .new_block_at(best_hash, Default::default(), RecordProof::Yes) + .new_block_at(best_hash, Default::default(), RecordProof::Yes, None) .unwrap(); for extrinsic in extrinsics { block_builder.push(extrinsic).unwrap(); @@ -94,7 +94,7 @@ fn benchmark_block_production(c: &mut Criterion) { || extrinsics.clone(), |extrinsics| { let mut block_builder = client - .new_block_at(best_hash, Default::default(), RecordProof::No) + .new_block_at(best_hash, Default::default(), RecordProof::No, None) .unwrap(); for extrinsic in extrinsics { block_builder.push(extrinsic).unwrap(); diff --git a/cumulus/test/service/benches/block_production_glutton.rs b/cumulus/test/service/benches/block_production_glutton.rs index 92a368c88c8d..bd8f9aba5cfa 100644 --- a/cumulus/test/service/benches/block_production_glutton.rs +++ b/cumulus/test/service/benches/block_production_glutton.rs @@ -77,7 +77,7 @@ fn benchmark_block_production_compute(c: &mut Criterion) { || (set_validation_data_extrinsic.clone(), set_time_extrinsic.clone()), |(validation_data, time)| { let mut block_builder = client - .new_block_at(best_hash, Default::default(), RecordProof::Yes) + .new_block_at(best_hash, Default::default(), RecordProof::Yes, None) .unwrap(); block_builder.push(validation_data).unwrap(); block_builder.push(time).unwrap(); @@ -99,7 +99,7 @@ fn benchmark_block_production_compute(c: &mut Criterion) { || (set_validation_data_extrinsic.clone(), set_time_extrinsic.clone()), |(validation_data, time)| { let mut block_builder = client - .new_block_at(best_hash, Default::default(), RecordProof::No) + .new_block_at(best_hash, Default::default(), RecordProof::No, None) .unwrap(); block_builder.push(validation_data).unwrap(); block_builder.push(time).unwrap(); diff --git a/cumulus/test/service/src/lib.rs b/cumulus/test/service/src/lib.rs index 7dcab7c5076c..faec18d11465 100644 --- a/cumulus/test/service/src/lib.rs +++ b/cumulus/test/service/src/lib.rs @@ -208,7 +208,12 @@ pub fn new_partial( sc_executor::NativeElseWasmExecutor::::new_with_wasm_executor(wasm); let (client, backend, keystore_container, task_manager) = - sc_service::new_full_parts::(config, None, executor)?; + sc_service::new_full_parts_extension::( + config, + None, + executor, + Some(cumulus_client_clawback::get_extension_factory()), + )?; let client = Arc::new(client); let block_import = ParachainBlockImport::new(client.clone(), backend.clone()); diff --git a/cumulus/zombienet/examples/small_network.toml b/cumulus/zombienet/examples/small_network.toml index 06ac0d0e5e78..482dece4af7e 100644 --- a/cumulus/zombienet/examples/small_network.toml +++ b/cumulus/zombienet/examples/small_network.toml @@ -22,4 +22,4 @@ chain = "asset-hub-kusama-local" validator = true image = "parity/polkadot-parachain:latest" command = "polkadot-parachain" - args = ["--force-authoring"] + args = ["--force-authoring -laura=debug,basic-authorship=debug"] From f71f137756ae644ba06726ba3891202089c68d21 Mon Sep 17 00:00:00 2001 From: Sebastian Kunert Date: Tue, 29 Aug 2023 10:55:30 +0200 Subject: [PATCH 03/61] debugging: Implement SignedExtension --- Cargo.lock | 3 + cumulus/client/clawback/src/lib.rs | 1 + cumulus/parachains/common/Cargo.toml | 1 + cumulus/parachains/common/src/impls.rs | 81 ++++++++++++++++++- .../assets/asset-hub-kusama/src/lib.rs | 1 + cumulus/test/service/src/lib.rs | 2 +- cumulus/zombienet/examples/small_network.toml | 13 ++- 7 files changed, 96 insertions(+), 6 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ec057dbd14a8..72aad2f49087 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3579,6 +3579,7 @@ version = "0.1.0" dependencies = [ "assert_matches", "bytes", + "cumulus-client-clawback", "cumulus-pallet-parachain-system-proc-macro", "cumulus-primitives-core", "cumulus-primitives-parachain-inherent", @@ -3595,6 +3596,7 @@ dependencies = [ "polkadot-parachain", "sc-client-api", "scale-info", + "sp-api", "sp-core", "sp-externalities", "sp-inherents", @@ -11148,6 +11150,7 @@ dependencies = [ name = "parachains-common" version = "1.0.0" dependencies = [ + "cumulus-client-clawback", "cumulus-primitives-core", "cumulus-primitives-utility", "frame-support", diff --git a/cumulus/client/clawback/src/lib.rs b/cumulus/client/clawback/src/lib.rs index 4ecfec26e852..f15dcdfacb98 100644 --- a/cumulus/client/clawback/src/lib.rs +++ b/cumulus/client/clawback/src/lib.rs @@ -40,6 +40,7 @@ use sp_runtime_interface::ExternalitiesExt; #[runtime_interface] pub trait ClawbackHostFunctions { fn current_storage_proof_size(&mut self) -> u32 { + tracing::info!(target:"skunert", "current_storage_proof_size is called"); match self.extension::() { Some(ext) => ext.current_storage_proof_size(), None => 0, diff --git a/cumulus/parachains/common/Cargo.toml b/cumulus/parachains/common/Cargo.toml index 7862f798059a..760a18f998e6 100644 --- a/cumulus/parachains/common/Cargo.toml +++ b/cumulus/parachains/common/Cargo.toml @@ -37,6 +37,7 @@ xcm-executor = { path = "../../../polkadot/xcm/xcm-executor", default-features = pallet-collator-selection = { path = "../../pallets/collator-selection", default-features = false } cumulus-primitives-core = { path = "../../primitives/core", default-features = false } cumulus-primitives-utility = { path = "../../primitives/utility", default-features = false } +cumulus-client-clawback = { path = "../../client/clawback", default-features = false } [dev-dependencies] pallet-authorship = { path = "../../../substrate/frame/authorship", default-features = false} diff --git a/cumulus/parachains/common/src/impls.rs b/cumulus/parachains/common/src/impls.rs index 4a1f4f90d055..68241bef43a1 100644 --- a/cumulus/parachains/common/src/impls.rs +++ b/cumulus/parachains/common/src/impls.rs @@ -16,12 +16,22 @@ //! Auxiliary struct/enums for parachain runtimes. //! Taken from polkadot/runtime/common (at a21cd64) and adapted for parachains. -use frame_support::traits::{ - fungibles::{self, Balanced, Credit}, - Contains, ContainsPair, Currency, Get, Imbalance, OnUnbalanced, +use codec::{Decode, Encode}; +use frame_support::{ + dispatch::{DispatchInfo, PostDispatchInfo}, + traits::{ + fungibles::{self, Balanced, Credit}, + Contains, ContainsPair, Currency, Get, Imbalance, OnUnbalanced, + }, }; +use frame_system::Config; use pallet_asset_tx_payment::HandleCredit; -use sp_runtime::traits::Zero; +use scale_info::TypeInfo; +use sp_runtime::{ + traits::{DispatchInfoOf, Dispatchable, PostDispatchInfoOf, Zero}, + transaction_validity::TransactionValidityError, + DispatchResult, +}; use sp_std::marker::PhantomData; use xcm::latest::{AssetId, Fungibility::Fungible, MultiAsset, MultiLocation}; @@ -278,3 +288,66 @@ mod tests { ); } } + +#[derive(Encode, Decode, Clone, Eq, PartialEq, Default, TypeInfo)] +#[scale_info(skip_type_params(T))] +pub struct ClawbackExtension(sp_std::marker::PhantomData); + +impl core::fmt::Debug for ClawbackExtension { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> { + f.write_str("jap"); + Ok(()) + } +} + +impl ClawbackExtension where + T::RuntimeCall: Dispatchable +{ +} + +impl sp_runtime::traits::SignedExtension for ClawbackExtension +where + T::RuntimeCall: Dispatchable, +{ + const IDENTIFIER: &'static str = "Clawback"; + + type AccountId = T::AccountId; + type Call = T::RuntimeCall; + type AdditionalSigned = (); + type Pre = (); + + fn additional_signed( + &self, + ) -> Result + { + Ok(()) + } + + fn pre_dispatch( + self, + who: &Self::AccountId, + call: &Self::Call, + info: &sp_runtime::traits::DispatchInfoOf, + len: usize, + ) -> Result { + log::info!(target: "skunert", "Calling pre dispatch of my extension"); + let proof_size = + cumulus_client_clawback::clawback_host_functions::current_storage_proof_size(); + log::info!(target: "skunert","Got proof size: {}", proof_size); + Ok(()) + } + + fn post_dispatch( + _pre: Option, + _info: &DispatchInfoOf, + _post_info: &PostDispatchInfoOf, + _len: usize, + _result: &DispatchResult, + ) -> Result<(), TransactionValidityError> { + log::info!(target: "skunert", "Calling post dispatch of my extension"); + let proof_size = + cumulus_client_clawback::clawback_host_functions::current_storage_proof_size(); + log::info!(target: "skunert","Got proof size: {}", proof_size); + Ok(()) + } +} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/lib.rs index a2056202431a..2099841c4772 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/lib.rs @@ -884,6 +884,7 @@ pub type SignedExtra = ( frame_system::CheckNonce, frame_system::CheckWeight, pallet_asset_conversion_tx_payment::ChargeAssetTxPayment, + parachains_common::impls::ClawbackExtension, ); /// Unchecked extrinsic type as expected by this runtime. pub type UncheckedExtrinsic = diff --git a/cumulus/test/service/src/lib.rs b/cumulus/test/service/src/lib.rs index faec18d11465..efd9d19bfa17 100644 --- a/cumulus/test/service/src/lib.rs +++ b/cumulus/test/service/src/lib.rs @@ -113,7 +113,7 @@ pub type AnnounceBlockFn = Arc>) + Send + Sync>; pub struct RuntimeExecutor; impl sc_executor::NativeExecutionDispatch for RuntimeExecutor { - type ExtendHostFunctions = (); + type ExtendHostFunctions = (cumulus_client_clawback::clawback_host_functions::HostFunctions); fn dispatch(method: &str, data: &[u8]) -> Option> { cumulus_test_runtime::api::dispatch(method, data) diff --git a/cumulus/zombienet/examples/small_network.toml b/cumulus/zombienet/examples/small_network.toml index 482dece4af7e..1364a1b0dcc6 100644 --- a/cumulus/zombienet/examples/small_network.toml +++ b/cumulus/zombienet/examples/small_network.toml @@ -11,6 +11,10 @@ chain = "rococo-local" name = "bob" validator = true + [[relaychain.nodes]] + name = "charlie" + validator = true + [[parachains]] id = 2000 cumulus_based = true @@ -18,7 +22,14 @@ chain = "asset-hub-kusama-local" # run charlie as parachain collator [[parachains.collators]] - name = "charlie" + name = "one" + validator = true + image = "parity/polkadot-parachain:latest" + command = "polkadot-parachain" + args = ["--force-authoring -laura=debug,basic-authorship=debug"] + + [[parachains.collators]] + name = "two" validator = true image = "parity/polkadot-parachain:latest" command = "polkadot-parachain" From a30abdeb458d6939d86e8ba69f763fbf09451315 Mon Sep 17 00:00:00 2001 From: Sebastian Kunert Date: Tue, 29 Aug 2023 10:56:12 +0200 Subject: [PATCH 04/61] Recorder Experiment --- cumulus/pallets/parachain-system/Cargo.toml | 2 + .../src/validate_block/implementation.rs | 39 ++++- .../state-machine/src/trie_backend.rs | 149 +++++++++++------- .../state-machine/src/trie_backend_essence.rs | 113 +++++++------ substrate/primitives/trie/src/lib.rs | 10 ++ substrate/primitives/trie/src/recorder.rs | 35 ++-- 6 files changed, 233 insertions(+), 115 deletions(-) diff --git a/cumulus/pallets/parachain-system/Cargo.toml b/cumulus/pallets/parachain-system/Cargo.toml index 125fe01af4af..efe561bcf66a 100644 --- a/cumulus/pallets/parachain-system/Cargo.toml +++ b/cumulus/pallets/parachain-system/Cargo.toml @@ -21,6 +21,7 @@ sp-core = { path = "../../../substrate/primitives/core", default-features = fals sp-externalities = { path = "../../../substrate/primitives/externalities", default-features = false} sp-inherents = { path = "../../../substrate/primitives/inherents", default-features = false} sp-io = { path = "../../../substrate/primitives/io", default-features = false} +sp-api = { path = "../../../substrate/primitives/api", default-features = false} sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false} sp-state-machine = { path = "../../../substrate/primitives/state-machine", default-features = false} sp-std = { path = "../../../substrate/primitives/std", default-features = false} @@ -35,6 +36,7 @@ xcm = { path = "../../../polkadot/xcm", default-features = false} cumulus-pallet-parachain-system-proc-macro = { path = "proc-macro", default-features = false } cumulus-primitives-core = { path = "../../primitives/core", default-features = false } cumulus-primitives-parachain-inherent = { path = "../../primitives/parachain-inherent", default-features = false } +cumulus-client-clawback = { path = "../../client/clawback", default-features = false } [dev-dependencies] assert_matches = "1.5" diff --git a/cumulus/pallets/parachain-system/src/validate_block/implementation.rs b/cumulus/pallets/parachain-system/src/validate_block/implementation.rs index 0eb83639018b..f2a1e230c851 100644 --- a/cumulus/pallets/parachain-system/src/validate_block/implementation.rs +++ b/cumulus/pallets/parachain-system/src/validate_block/implementation.rs @@ -32,12 +32,14 @@ use sp_externalities::{set_and_run_with_externalities, Externalities}; use sp_io::KillStorageResult; use sp_runtime::traits::{Block as BlockT, Extrinsic, HashingFor, Header as HeaderT}; use sp_std::prelude::*; -use sp_trie::MemoryDB; +use sp_trie::{MemoryDB, StorageProof}; +use trie_db::{RecordedForKey, TrieAccess}; type TrieBackend = sp_state_machine::TrieBackend< MemoryDB>, HashingFor, trie_cache::CacheProvider>, + RecorderImpl, >; type Ext<'a, B> = sp_state_machine::Ext<'a, HashingFor, TrieBackend>; @@ -46,6 +48,30 @@ fn with_externalities R, R>(f: F) -> R { sp_externalities::with_externalities(f).expect("Environmental externalities not set.") } +struct RecorderImpl {} +impl trie_db::TrieRecorder for RecorderImpl { + fn record<'a>(&mut self, access: TrieAccess<'a, H>) {} + + fn trie_nodes_recorded_for_key(&self, key: &[u8]) -> RecordedForKey { + RecordedForKey::None + } +} + +impl sp_trie::TrieRecorderProvider for RecorderImpl { + type Recorder<'a> = RecorderImpl; + + fn drain_storage_proof(self) -> StorageProof { + todo!() + } + + fn as_trie_recorder(&self, storage_root: H::Out) -> Self::Recorder<'_> { + todo!() + } + + fn estimate_encoded_size(&self) -> usize { + todo!() + } +} /// Validate the given parachain block. /// /// This function is doing roughly the following: @@ -90,9 +116,11 @@ where B::Extrinsic: ExtrinsicCall, ::Call: IsSubType>, { + sp_api::init_runtime_logger(); let block_data = codec::decode_from_bytes::>(block_data) .expect("Invalid parachain block data"); + log::info!(target:"skunert", "Hello World from validate block!"); let parent_header = codec::decode_from_bytes::(parent_head.clone()).expect("Invalid parent head"); @@ -118,6 +146,7 @@ where sp_std::mem::drop(storage_proof); + let recorder = RecorderImpl {}; let cache_provider = trie_cache::CacheProvider::new(); // We use the storage root of the `parent_head` to ensure that it is the correct root. // This is already being done above while creating the in-memory db, but let's be paranoid!! @@ -126,6 +155,7 @@ where *parent_header.state_root(), cache_provider, ) + .with_recorder(recorder) .build(); let _guard = ( @@ -165,6 +195,8 @@ where .replace_implementation(host_default_child_storage_next_key), sp_io::offchain_index::host_set.replace_implementation(host_offchain_index_set), sp_io::offchain_index::host_clear.replace_implementation(host_offchain_index_clear), + cumulus_client_clawback::clawback_host_functions::host_current_storage_proof_size + .replace_implementation(reclaim_pov_weight), ); run_with_externalities::(&backend, || { @@ -303,6 +335,11 @@ fn host_storage_clear(key: &[u8]) { with_externalities(|ext| ext.place_storage(key.to_vec(), None)) } +fn reclaim_pov_weight() -> u32 { + log::info!(target: "skunert", "Calling my replaced method."); + 0 +} + fn host_storage_root(version: StateVersion) -> Vec { with_externalities(|ext| ext.storage_root(version)) } diff --git a/substrate/primitives/state-machine/src/trie_backend.rs b/substrate/primitives/state-machine/src/trie_backend.rs index cc7132181f90..8038589531dc 100644 --- a/substrate/primitives/state-machine/src/trie_backend.rs +++ b/substrate/primitives/state-machine/src/trie_backend.rs @@ -26,19 +26,20 @@ use crate::{ }; use codec::Codec; +use core::marker::PhantomData; #[cfg(feature = "std")] use hash_db::HashDB; use hash_db::Hasher; use sp_core::storage::{ChildInfo, StateVersion}; -use sp_trie::PrefixedMemoryDB; #[cfg(feature = "std")] use sp_trie::{ cache::{LocalTrieCache, TrieCache}, recorder::Recorder, - MemoryDB, StorageProof, + MemoryDB, }; #[cfg(not(feature = "std"))] use sp_trie::{Error, NodeCodec}; +use sp_trie::{PrefixedMemoryDB, StorageProof, TrieRecorderProvider}; use trie_db::TrieCache as TrieCacheT; #[cfg(not(feature = "std"))] use trie_db::{node::NodeOwned, CachedValue}; @@ -153,52 +154,87 @@ impl TrieCacheProvider for UnimplementedCacheProvider { } } +#[cfg(not(feature = "std"))] +pub struct UnimplementedRecorderProvider { + // Not strictly necessary, but the H bound allows to use this as a drop-in + // replacement for the `LocalTrieCache` in no-std contexts. + _phantom: core::marker::PhantomData, + // Statically prevents construction. + _infallible: core::convert::Infallible, +} + +#[cfg(not(feature = "std"))] +impl trie_db::TrieRecorder for UnimplementedRecorderProvider { + fn record<'a>(&mut self, access: trie_db::TrieAccess<'a, H::Out>) { + todo!() + } + + fn trie_nodes_recorded_for_key(&self, key: &[u8]) -> trie_db::RecordedForKey { + todo!() + } +} + +#[cfg(not(feature = "std"))] +impl TrieRecorderProvider for UnimplementedRecorderProvider { + type Recorder<'a> = UnimplementedRecorderProvider where H: 'a; + + fn drain_storage_proof(self) -> StorageProof { + unimplemented!() + } + + fn as_trie_recorder(&self, storage_root: H::Out) -> Self::Recorder<'_> { + unimplemented!() + } + + fn estimate_encoded_size(&self) -> usize { + unimplemented!() + } +} + #[cfg(feature = "std")] type DefaultCache = LocalTrieCache; #[cfg(not(feature = "std"))] type DefaultCache = UnimplementedCacheProvider; +#[cfg(feature = "std")] +type DefaultRecorder = sp_trie::recorder::Recorder; + +#[cfg(not(feature = "std"))] +type DefaultRecorder = UnimplementedRecorderProvider; + /// Builder for creating a [`TrieBackend`]. -pub struct TrieBackendBuilder, H: Hasher, C = DefaultCache> { +pub struct TrieBackendBuilder< + S: TrieBackendStorage, + H: Hasher, + C = DefaultCache, + R = DefaultRecorder, +> { storage: S, root: H::Out, - #[cfg(feature = "std")] - recorder: Option>, + recorder: Option, cache: Option, } -impl TrieBackendBuilder> +impl TrieBackendBuilder where S: TrieBackendStorage, H: Hasher, { /// Create a new builder instance. pub fn new(storage: S, root: H::Out) -> Self { - Self { - storage, - root, - #[cfg(feature = "std")] - recorder: None, - cache: None, - } + Self { storage, root, recorder: None, cache: None } } } -impl TrieBackendBuilder +impl TrieBackendBuilder where S: TrieBackendStorage, H: Hasher, { /// Create a new builder instance. pub fn new_with_cache(storage: S, root: H::Out, cache: C) -> Self { - Self { - storage, - root, - #[cfg(feature = "std")] - recorder: None, - cache: Some(cache), - } + Self { storage, root, recorder: None, cache: Some(cache) } } /// Wrap the given [`TrieBackend`]. /// @@ -207,53 +243,47 @@ where /// backend. /// /// The backend storage and the cache will be taken from `other`. - pub fn wrap(other: &TrieBackend) -> TrieBackendBuilder<&S, H, &C> { + pub fn wrap(other: &TrieBackend) -> TrieBackendBuilder<&S, H, &C, R> { TrieBackendBuilder { storage: other.essence.backend_storage(), root: *other.essence.root(), - #[cfg(feature = "std")] recorder: None, cache: other.essence.trie_node_cache.as_ref(), } } /// Use the given optional `recorder` for the to be configured [`TrieBackend`]. - #[cfg(feature = "std")] - pub fn with_optional_recorder(self, recorder: Option>) -> Self { + pub fn with_optional_recorder(self, recorder: Option) -> Self { Self { recorder, ..self } } /// Use the given `recorder` for the to be configured [`TrieBackend`]. - #[cfg(feature = "std")] - pub fn with_recorder(self, recorder: Recorder) -> Self { + pub fn with_recorder(self, recorder: R) -> Self { Self { recorder: Some(recorder), ..self } } /// Use the given optional `cache` for the to be configured [`TrieBackend`]. - pub fn with_optional_cache(self, cache: Option) -> TrieBackendBuilder { + pub fn with_optional_cache(self, cache: Option) -> TrieBackendBuilder { TrieBackendBuilder { cache, root: self.root, storage: self.storage, - #[cfg(feature = "std")] recorder: self.recorder, } } /// Use the given `cache` for the to be configured [`TrieBackend`]. - pub fn with_cache(self, cache: LC) -> TrieBackendBuilder { + pub fn with_cache(self, cache: LC) -> TrieBackendBuilder { TrieBackendBuilder { cache: Some(cache), root: self.root, storage: self.storage, - #[cfg(feature = "std")] recorder: self.recorder, } } /// Build the configured [`TrieBackend`]. - #[cfg(feature = "std")] - pub fn build(self) -> TrieBackend { + pub fn build(self) -> TrieBackend { TrieBackend { essence: TrieBackendEssence::new_with_cache_and_recorder( self.storage, @@ -264,27 +294,18 @@ where next_storage_key_cache: Default::default(), } } - - /// Build the configured [`TrieBackend`]. - #[cfg(not(feature = "std"))] - pub fn build(self) -> TrieBackend { - TrieBackend { - essence: TrieBackendEssence::new_with_cache(self.storage, self.root, self.cache), - next_storage_key_cache: Default::default(), - } - } } /// A cached iterator. -struct CachedIter +struct CachedIter where H: Hasher, { last_key: sp_std::vec::Vec, - iter: RawIter, + iter: RawIter, } -impl Default for CachedIter +impl Default for CachedIter where H: Hasher, { @@ -310,23 +331,32 @@ fn access_cache(cell: &CacheCell, callback: impl FnOnce(&mut T) -> R) - } /// Patricia trie-based backend. Transaction type is an overlay of changes to commit. -pub struct TrieBackend, H: Hasher, C = DefaultCache> { - pub(crate) essence: TrieBackendEssence, - next_storage_key_cache: CacheCell>>, +pub struct TrieBackend< + S: TrieBackendStorage, + H: Hasher, + C = DefaultCache, + R = DefaultRecorder, +> { + pub(crate) essence: TrieBackendEssence, + next_storage_key_cache: CacheCell>>, } -impl, H: Hasher, C: TrieCacheProvider + Send + Sync> - TrieBackend +impl< + S: TrieBackendStorage, + H: Hasher, + C: TrieCacheProvider + Send + Sync, + R: TrieRecorderProvider + Send + Sync, + > TrieBackend where H::Out: Codec, { #[cfg(test)] - pub(crate) fn from_essence(essence: TrieBackendEssence) -> Self { + pub(crate) fn from_essence(essence: TrieBackendEssence) -> Self { Self { essence, next_storage_key_cache: Default::default() } } /// Get backend essence reference. - pub fn essence(&self) -> &TrieBackendEssence { + pub fn essence(&self) -> &TrieBackendEssence { &self.essence } @@ -358,28 +388,31 @@ where /// Extract the [`StorageProof`]. /// /// This only returns `Some` when there was a recorder set. - #[cfg(feature = "std")] pub fn extract_proof(mut self) -> Option { self.essence.recorder.take().map(|r| r.drain_storage_proof()) } } -impl, H: Hasher, C: TrieCacheProvider> sp_std::fmt::Debug - for TrieBackend +impl, H: Hasher, C: TrieCacheProvider, R: TrieRecorderProvider> + sp_std::fmt::Debug for TrieBackend { fn fmt(&self, f: &mut sp_std::fmt::Formatter<'_>) -> sp_std::fmt::Result { write!(f, "TrieBackend") } } -impl, H: Hasher, C: TrieCacheProvider + Send + Sync> Backend - for TrieBackend +impl< + S: TrieBackendStorage, + H: Hasher, + C: TrieCacheProvider + Send + Sync, + R: TrieRecorderProvider + Send + Sync, + > Backend for TrieBackend where H::Out: Ord + Codec, { type Error = crate::DefaultError; type TrieBackendStorage = S; - type RawIter = crate::trie_backend_essence::RawIter; + type RawIter = crate::trie_backend_essence::RawIter; fn storage_hash(&self, key: &[u8]) -> Result, Self::Error> { self.essence.storage_hash(key) diff --git a/substrate/primitives/state-machine/src/trie_backend_essence.rs b/substrate/primitives/state-machine/src/trie_backend_essence.rs index 4bb51f4a1343..18e19b4c4cdb 100644 --- a/substrate/primitives/state-machine/src/trie_backend_essence.rs +++ b/substrate/primitives/state-machine/src/trie_backend_essence.rs @@ -28,7 +28,7 @@ use hash_db::{self, AsHashDB, HashDB, HashDBRef, Hasher, Prefix}; #[cfg(feature = "std")] use parking_lot::RwLock; use sp_core::storage::{ChildInfo, ChildType, StateVersion}; -use sp_std::{boxed::Box, marker::PhantomData, vec::Vec}; +use sp_std::{boxed::Box, marker::PhantomData, sync::Arc, vec::Vec}; #[cfg(feature = "std")] use sp_trie::recorder::Recorder; use sp_trie::{ @@ -36,10 +36,10 @@ use sp_trie::{ read_child_trie_value, read_trie_value, trie_types::{TrieDBBuilder, TrieError}, DBValue, KeySpacedDB, NodeCodec, PrefixedMemoryDB, Trie, TrieCache, TrieDBRawIterator, - TrieRecorder, + TrieRecorder, TrieRecorderProvider, }; #[cfg(feature = "std")] -use std::{collections::HashMap, sync::Arc}; +use std::collections::HashMap; // In this module, we only use layout for read operation and empty root, // where V1 and V0 are equivalent. use sp_trie::LayoutV1 as Layout; @@ -82,7 +82,7 @@ enum IterState { } /// A raw iterator over the storage. -pub struct RawIter +pub struct RawIter where H: Hasher, { @@ -92,25 +92,26 @@ where child_info: Option, trie_iter: TrieDBRawIterator>, state: IterState, - _phantom: PhantomData<(S, C)>, + _phantom: PhantomData<(S, C, R)>, } -impl RawIter +impl RawIter where H: Hasher, S: TrieBackendStorage, H::Out: Codec + Ord, C: TrieCacheProvider + Send + Sync, + R: TrieRecorderProvider + Send + Sync, { #[inline] - fn prepare( + fn prepare( &mut self, - backend: &TrieBackendEssence, + backend: &TrieBackendEssence, callback: impl FnOnce( &sp_trie::TrieDB>, &mut TrieDBRawIterator>, - ) -> Option::Out>>>>, - ) -> Option> { + ) -> Option::Out>>>>, + ) -> Option> { if !matches!(self.state, IterState::Pending) { return None } @@ -138,7 +139,7 @@ where } } -impl Default for RawIter +impl Default for RawIter where H: Hasher, { @@ -155,14 +156,15 @@ where } } -impl StorageIterator for RawIter +impl StorageIterator for RawIter where H: Hasher, S: TrieBackendStorage, H::Out: Codec + Ord, C: TrieCacheProvider + Send + Sync, + R: TrieRecorderProvider + Send + Sync, { - type Backend = crate::TrieBackend; + type Backend = crate::TrieBackend; type Error = crate::DefaultError; #[inline] @@ -203,18 +205,17 @@ where } /// Patricia trie-based pairs storage essence. -pub struct TrieBackendEssence, H: Hasher, C> { +pub struct TrieBackendEssence, H: Hasher, C, R> { storage: S, root: H::Out, empty: H::Out, #[cfg(feature = "std")] pub(crate) cache: Arc>>, pub(crate) trie_node_cache: Option, - #[cfg(feature = "std")] - pub(crate) recorder: Option>, + pub(crate) recorder: Option, } -impl, H: Hasher, C> TrieBackendEssence { +impl, H: Hasher, C, R> TrieBackendEssence { /// Create new trie-based backend. pub fn new(storage: S, root: H::Out) -> Self { Self::new_with_cache(storage, root, None) @@ -229,23 +230,22 @@ impl, H: Hasher, C> TrieBackendEssence { #[cfg(feature = "std")] cache: Arc::new(RwLock::new(Cache::new())), trie_node_cache: cache, - #[cfg(feature = "std")] recorder: None, } } /// Create new trie-based backend. - #[cfg(feature = "std")] pub fn new_with_cache_and_recorder( storage: S, root: H::Out, cache: Option, - recorder: Option>, + recorder: Option, ) -> Self { TrieBackendEssence { storage, root, empty: H::hash(&[0u8]), + #[cfg(feature = "std")] cache: Arc::new(RwLock::new(Cache::new())), trie_node_cache: cache, recorder, @@ -288,19 +288,21 @@ impl, H: Hasher, C> TrieBackendEssence { } } -impl, H: Hasher, C: TrieCacheProvider> TrieBackendEssence { +impl, H: Hasher, C: TrieCacheProvider, R: TrieRecorderProvider> + TrieBackendEssence +{ /// Call the given closure passing it the recorder and the cache. /// /// If the given `storage_root` is `None`, `self.root` will be used. #[inline] - fn with_recorder_and_cache( + fn with_recorder_and_cache( &self, storage_root: Option, callback: impl FnOnce( Option<&mut dyn TrieRecorder>, Option<&mut dyn TrieCache>>, - ) -> R, - ) -> R { + ) -> RE, + ) -> RE { let storage_root = storage_root.unwrap_or_else(|| self.root); let mut cache = self.trie_node_cache.as_ref().map(|c| c.as_trie_db_cache(storage_root)); let cache = cache.as_mut().map(|c| c as _); @@ -329,14 +331,14 @@ impl, H: Hasher, C: TrieCacheProvider> TrieBackendEs /// for the correct storage root. The given `storage_root` corresponds to the root of the "old" /// trie. If the value is not given, `self.root` is used. #[cfg(feature = "std")] - fn with_recorder_and_cache_for_storage_root( + fn with_recorder_and_cache_for_storage_root( &self, storage_root: Option, callback: impl FnOnce( Option<&mut dyn TrieRecorder>, Option<&mut dyn TrieCache>>, - ) -> (Option, R), - ) -> R { + ) -> (Option, RE), + ) -> RE { let storage_root = storage_root.unwrap_or_else(|| self.root); let mut recorder = self.recorder.as_ref().map(|r| r.as_trie_recorder(storage_root)); let recorder = match recorder.as_mut() { @@ -362,14 +364,14 @@ impl, H: Hasher, C: TrieCacheProvider> TrieBackendEs } #[cfg(not(feature = "std"))] - fn with_recorder_and_cache_for_storage_root( + fn with_recorder_and_cache_for_storage_root( &self, _storage_root: Option, callback: impl FnOnce( Option<&mut dyn TrieRecorder>, Option<&mut dyn TrieCache>>, - ) -> (Option, R), - ) -> R { + ) -> (Option, RE), + ) -> RE { if let Some(local_cache) = self.trie_node_cache.as_ref() { let mut cache = local_cache.as_trie_db_mut_cache(); @@ -386,20 +388,24 @@ impl, H: Hasher, C: TrieCacheProvider> TrieBackendEs } } -impl, H: Hasher, C: TrieCacheProvider + Send + Sync> - TrieBackendEssence +impl< + S: TrieBackendStorage, + H: Hasher, + C: TrieCacheProvider + Send + Sync, + R: TrieRecorderProvider + Send + Sync, + > TrieBackendEssence where H::Out: Codec + Ord, { /// Calls the given closure with a [`TrieDb`] constructed for the given /// storage root and (optionally) child trie. #[inline] - fn with_trie_db( + fn with_trie_db( &self, root: H::Out, child_info: Option<&ChildInfo>, - callback: impl FnOnce(&sp_trie::TrieDB>) -> R, - ) -> R { + callback: impl FnOnce(&sp_trie::TrieDB>) -> RE, + ) -> RE { let backend = self as &dyn HashDBRef>; let db = child_info .as_ref() @@ -575,7 +581,7 @@ where } /// Create a raw iterator over the storage. - pub fn raw_iter(&self, args: IterArgs) -> Result> { + pub fn raw_iter(&self, args: IterArgs) -> Result> { let root = if let Some(child_info) = args.child_info.as_ref() { let root = match self.child_root(&child_info)? { Some(root) => root, @@ -797,19 +803,28 @@ where } } -impl, H: Hasher, C: TrieCacheProvider + Send + Sync> - AsHashDB for TrieBackendEssence +impl< + S: TrieBackendStorage, + H: Hasher, + C: TrieCacheProvider + Send + Sync, + R: TrieRecorderProvider + Send + Sync, + > AsHashDB for TrieBackendEssence { fn as_hash_db<'b>(&'b self) -> &'b (dyn HashDB + 'b) { self } + fn as_hash_db_mut<'b>(&'b mut self) -> &'b mut (dyn HashDB + 'b) { self } } -impl, H: Hasher, C: TrieCacheProvider + Send + Sync> HashDB - for TrieBackendEssence +impl< + S: TrieBackendStorage, + H: Hasher, + C: TrieCacheProvider + Send + Sync, + R: TrieRecorderProvider + Send + Sync, + > HashDB for TrieBackendEssence { fn get(&self, key: &H::Out, prefix: Prefix) -> Option { if *key == self.empty { @@ -841,8 +856,12 @@ impl, H: Hasher, C: TrieCacheProvider + Send + Sync> } } -impl, H: Hasher, C: TrieCacheProvider + Send + Sync> - HashDBRef for TrieBackendEssence +impl< + S: TrieBackendStorage, + H: Hasher, + C: TrieCacheProvider + Send + Sync, + R: TrieRecorderProvider + Send + Sync, + > HashDBRef for TrieBackendEssence { fn get(&self, key: &H::Out, prefix: Prefix) -> Option { HashDB::get(self, key, prefix) @@ -894,7 +913,10 @@ mod test { .expect("insert failed"); }; - let essence_1 = TrieBackendEssence::<_, _, LocalTrieCache<_>>::new(mdb, root_1); + let essence_1 = + TrieBackendEssence::<_, _, LocalTrieCache<_>, sp_trie::recorder::Recorder<_>>::new( + mdb, root_1, + ); let mdb = essence_1.backend_storage().clone(); let essence_1 = TrieBackend::from_essence(essence_1); @@ -904,7 +926,10 @@ mod test { assert_eq!(essence_1.next_storage_key(b"5"), Ok(Some(b"6".to_vec()))); assert_eq!(essence_1.next_storage_key(b"6"), Ok(None)); - let essence_2 = TrieBackendEssence::<_, _, LocalTrieCache<_>>::new(mdb, root_2); + let essence_2 = + TrieBackendEssence::<_, _, LocalTrieCache<_>, sp_trie::recorder::Recorder<_>>::new( + mdb, root_2, + ); assert_eq!(essence_2.next_child_storage_key(child_info, b"2"), Ok(Some(b"3".to_vec()))); assert_eq!(essence_2.next_child_storage_key(child_info, b"3"), Ok(Some(b"4".to_vec()))); diff --git a/substrate/primitives/trie/src/lib.rs b/substrate/primitives/trie/src/lib.rs index c5a3d4ace338..f5edbd2c488f 100644 --- a/substrate/primitives/trie/src/lib.rs +++ b/substrate/primitives/trie/src/lib.rs @@ -150,6 +150,16 @@ where } } +pub trait TrieRecorderProvider { + type Recorder<'a>: trie_db::TrieRecorder + 'a + where + Self: 'a; + + fn drain_storage_proof(self) -> StorageProof; + fn as_trie_recorder(&self, storage_root: H::Out) -> Self::Recorder<'_>; + fn estimate_encoded_size(&self) -> usize; +} + /// TrieDB error over `TrieConfiguration` trait. pub type TrieError = trie_db::TrieError, CError>; /// Reexport from `hash_db`, with genericity set for `Hasher` trait. diff --git a/substrate/primitives/trie/src/recorder.rs b/substrate/primitives/trie/src/recorder.rs index 557df1e6f883..000ff8383341 100644 --- a/substrate/primitives/trie/src/recorder.rs +++ b/substrate/primitives/trie/src/recorder.rs @@ -23,7 +23,7 @@ use crate::{NodeCodec, StorageProof}; use codec::Encode; use hash_db::Hasher; -use parking_lot::Mutex; +use parking_lot::{Mutex, MutexGuard}; use std::{ collections::{HashMap, HashSet}, marker::PhantomData, @@ -112,11 +112,8 @@ impl Recorder { /// /// NOTE: This locks a mutex that stays locked until the return value is dropped. #[inline] - pub fn as_trie_recorder( - &self, - storage_root: H::Out, - ) -> impl trie_db::TrieRecorder + '_ { - TrieRecorder:: { + pub fn as_trie_recorder(&self, storage_root: H::Out) -> TrieRecorder<'_, H> { + TrieRecorder:: { inner: self.inner.lock(), storage_root, encoded_size_estimation: self.encoded_size_estimation.clone(), @@ -232,14 +229,30 @@ impl Recorder { } /// The [`TrieRecorder`](trie_db::TrieRecorder) implementation. -struct TrieRecorder { - inner: I, +pub struct TrieRecorder<'a, H: Hasher> { + inner: MutexGuard<'a, RecorderInner>, storage_root: H::Out, encoded_size_estimation: Arc, _phantom: PhantomData, } -impl>> TrieRecorder { +impl crate::TrieRecorderProvider for Recorder { + type Recorder<'a> = TrieRecorder<'a, H> where H: 'a; + + fn drain_storage_proof(self) -> StorageProof { + Recorder::drain_storage_proof(self) + } + + fn as_trie_recorder(&self, storage_root: H::Out) -> Self::Recorder<'_> { + self.as_trie_recorder(storage_root) + } + + fn estimate_encoded_size(&self) -> usize { + self.estimate_encoded_size() + } +} + +impl<'a, H: Hasher> TrieRecorder<'a, H> { /// Update the recorded keys entry for the given `full_key`. fn update_recorded_keys(&mut self, full_key: &[u8], access: RecordedForKey) { let inner = self.inner.deref_mut(); @@ -283,9 +296,7 @@ impl>> TrieRecorder } } -impl>> trie_db::TrieRecorder - for TrieRecorder -{ +impl<'a, H: Hasher> trie_db::TrieRecorder for TrieRecorder<'a, H> { fn record(&mut self, access: TrieAccess) { let mut encoded_size_update = 0; From 84eb1e767548ea936095a7e433a0223977bdb903 Mon Sep 17 00:00:00 2001 From: Sebastian Kunert Date: Mon, 4 Sep 2023 16:49:14 +0200 Subject: [PATCH 05/61] Make recorder impl compile --- .../src/validate_block/implementation.rs | 38 ++--- .../src/validate_block/mod.rs | 4 + .../src/validate_block/trie_recorder.rs | 151 ++++++++++++++++++ .../state-machine/src/trie_backend.rs | 6 +- .../state-machine/src/trie_backend_essence.rs | 69 +++----- 5 files changed, 185 insertions(+), 83 deletions(-) create mode 100644 cumulus/pallets/parachain-system/src/validate_block/trie_recorder.rs diff --git a/cumulus/pallets/parachain-system/src/validate_block/implementation.rs b/cumulus/pallets/parachain-system/src/validate_block/implementation.rs index 587b10e3b4e4..4c5d0fcc3e83 100644 --- a/cumulus/pallets/parachain-system/src/validate_block/implementation.rs +++ b/cumulus/pallets/parachain-system/src/validate_block/implementation.rs @@ -16,7 +16,7 @@ //! The actual implementation of the validate block functionality. -use super::{trie_cache, MemoryOptimizedValidationParams}; +use super::{trie_cache, trie_recorder, MemoryOptimizedValidationParams}; use cumulus_primitives_core::{ relay_chain::Hash as RHash, ParachainBlockData, PersistedValidationData, }; @@ -25,21 +25,27 @@ use cumulus_primitives_parachain_inherent::ParachainInherentData; use polkadot_parachain::primitives::{HeadData, RelayChainBlockNumber, ValidationResult}; use codec::Encode; - use frame_support::traits::{ExecuteBlock, ExtrinsicCall, Get, IsSubType}; use sp_core::storage::{ChildInfo, StateVersion}; use sp_externalities::{set_and_run_with_externalities, Externalities}; use sp_io::KillStorageResult; use sp_runtime::traits::{Block as BlockT, Extrinsic, HashingFor, Header as HeaderT}; use sp_std::prelude::*; +use sp_std::{ + boxed::Box, + cell::{RefCell, RefMut}, + collections::btree_set::BTreeSet, +}; +use sp_trie::NodeCodec; use sp_trie::{MemoryDB, StorageProof}; use trie_db::{RecordedForKey, TrieAccess}; +use trie_recorder::RecorderProvider; type TrieBackend = sp_state_machine::TrieBackend< MemoryDB>, HashingFor, trie_cache::CacheProvider>, - RecorderImpl, + RecorderProvider>, >; type Ext<'a, B> = sp_state_machine::Ext<'a, HashingFor, TrieBackend>; @@ -48,30 +54,6 @@ fn with_externalities R, R>(f: F) -> R { sp_externalities::with_externalities(f).expect("Environmental externalities not set.") } -struct RecorderImpl {} -impl trie_db::TrieRecorder for RecorderImpl { - fn record<'a>(&mut self, access: TrieAccess<'a, H>) {} - - fn trie_nodes_recorded_for_key(&self, key: &[u8]) -> RecordedForKey { - RecordedForKey::None - } -} - -impl sp_trie::TrieRecorderProvider for RecorderImpl { - type Recorder<'a> = RecorderImpl; - - fn drain_storage_proof(self) -> StorageProof { - todo!() - } - - fn as_trie_recorder(&self, storage_root: H::Out) -> Self::Recorder<'_> { - todo!() - } - - fn estimate_encoded_size(&self) -> usize { - todo!() - } -} /// Validate the given parachain block. /// /// This function is doing roughly the following: @@ -146,7 +128,7 @@ where sp_std::mem::drop(storage_proof); - let recorder = RecorderImpl {}; + let recorder = RecorderProvider::new(); let cache_provider = trie_cache::CacheProvider::new(); // We use the storage root of the `parent_head` to ensure that it is the correct root. // This is already being done above while creating the in-memory db, but let's be paranoid!! diff --git a/cumulus/pallets/parachain-system/src/validate_block/mod.rs b/cumulus/pallets/parachain-system/src/validate_block/mod.rs index ab8ea43ec7c6..8865d243bc45 100644 --- a/cumulus/pallets/parachain-system/src/validate_block/mod.rs +++ b/cumulus/pallets/parachain-system/src/validate_block/mod.rs @@ -26,6 +26,10 @@ mod tests; #[doc(hidden)] mod trie_cache; +#[cfg(not(feature = "std"))] +#[doc(hidden)] +mod trie_recorder; + #[cfg(not(feature = "std"))] #[doc(hidden)] pub use bytes; diff --git a/cumulus/pallets/parachain-system/src/validate_block/trie_recorder.rs b/cumulus/pallets/parachain-system/src/validate_block/trie_recorder.rs new file mode 100644 index 000000000000..29c1543bb89c --- /dev/null +++ b/cumulus/pallets/parachain-system/src/validate_block/trie_recorder.rs @@ -0,0 +1,151 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! The actual implementation of the validate block functionality. + +use super::{trie_cache, MemoryOptimizedValidationParams}; +use cumulus_primitives_core::{ + relay_chain::Hash as RHash, ParachainBlockData, PersistedValidationData, +}; +use cumulus_primitives_parachain_inherent::ParachainInherentData; + +use polkadot_parachain::primitives::{HeadData, RelayChainBlockNumber, ValidationResult}; + +use codec::Encode; + +use core::borrow::BorrowMut; +use frame_support::traits::{ExecuteBlock, ExtrinsicCall, Get, IsSubType}; +use sp_core::storage::{ChildInfo, StateVersion}; +use sp_externalities::{set_and_run_with_externalities, Externalities}; +use sp_io::KillStorageResult; +use sp_runtime::traits::{Block as BlockT, Extrinsic, HashingFor, Header as HeaderT}; +use sp_std::prelude::*; +use sp_std::{ + boxed::Box, + cell::{RefCell, RefMut}, + collections::btree_set::BTreeSet, +}; +use sp_trie::NodeCodec; +use sp_trie::{MemoryDB, StorageProof}; +use trie_db::{Hasher, RecordedForKey, TrieAccess}; + +type TrieBackend = sp_state_machine::TrieBackend< + MemoryDB>, + HashingFor, + trie_cache::CacheProvider>, + RecorderProvider>, +>; + +type Ext<'a, B> = sp_state_machine::Ext<'a, HashingFor, TrieBackend>; + +fn with_externalities R, R>(f: F) -> R { + sp_externalities::with_externalities(f).expect("Environmental externalities not set.") +} + +pub(crate) struct RecorderProvider { + seen_nodes: RefCell>, + encoded_size: RefCell, +} + +impl RecorderProvider { + pub fn new() -> Self { + Self { seen_nodes: Default::default(), encoded_size: Default::default() } + } +} + +pub(crate) struct SizeRecorder<'a, H: Hasher> { + seen_nodes: RefMut<'a, BTreeSet>, + encoded_size: RefMut<'a, usize>, +} + +impl<'a, H: trie_db::Hasher> trie_db::TrieRecorder for SizeRecorder<'a, H> { + fn record<'b>(&mut self, access: TrieAccess<'b, H::Out>) { + log::info!(target: "skunert", "recorder: record"); + let mut encoded_size_update = 0; + + match access { + TrieAccess::NodeOwned { hash, node_owned } => { + if !self.seen_nodes.get(&hash).is_some() { + let node = node_owned.to_encoded::>(); + encoded_size_update += node.encoded_size(); + log::info!( + target: "skunert", + "Recording node({encoded_size_update})", + ); + //TODO skunert: Check if this is correct, original has transaction handling + self.seen_nodes.insert(hash); + } + }, + TrieAccess::EncodedNode { hash, encoded_node } => { + if !self.seen_nodes.get(&hash).is_some() { + let node = encoded_node.into_owned(); + encoded_size_update += node.encoded_size(); + log::info!( + target: "skunert", + "Recording node ({encoded_size_update} bytes)", + ); + self.seen_nodes.insert(hash); + } + }, + TrieAccess::Value { hash, value, .. } => { + if !self.seen_nodes.get(&hash).is_some() { + let value = value.into_owned(); + + encoded_size_update += value.encoded_size(); + log::info!( + target: "skunert", + "Recording value ({encoded_size_update} bytes)", + ); + + self.seen_nodes.insert(hash); + } + }, + TrieAccess::Hash { .. } => {}, + TrieAccess::NonExisting { .. } => {}, + }; + + *self.encoded_size += encoded_size_update; + } + + fn trie_nodes_recorded_for_key(&self, key: &[u8]) -> RecordedForKey { + RecordedForKey::None + } +} + +impl sp_trie::TrieRecorderProvider for RecorderProvider { + type Recorder<'a> = SizeRecorder<'a, H> where H: 'a; + + fn drain_storage_proof(self) -> StorageProof { + panic!("Tried to drain storage proof") + } + + fn as_trie_recorder(&self, storage_root: H::Out) -> Self::Recorder<'_> { + log::info!(target: "skunert", "validate_block: as_trie_recorder"); + SizeRecorder { + encoded_size: self.encoded_size.borrow_mut(), + seen_nodes: self.seen_nodes.borrow_mut(), + } + } + + fn estimate_encoded_size(&self) -> usize { + log::info!(target: "skunert", "validate_block: estimate_encoded_size"); + *self.encoded_size.borrow() + } +} + +// This is safe here since we are single-threaded in WASM +unsafe impl Send for RecorderProvider {} +unsafe impl Sync for RecorderProvider {} diff --git a/substrate/primitives/state-machine/src/trie_backend.rs b/substrate/primitives/state-machine/src/trie_backend.rs index 8038589531dc..f84ea5a4eee5 100644 --- a/substrate/primitives/state-machine/src/trie_backend.rs +++ b/substrate/primitives/state-machine/src/trie_backend.rs @@ -26,7 +26,6 @@ use crate::{ }; use codec::Codec; -use core::marker::PhantomData; #[cfg(feature = "std")] use hash_db::HashDB; use hash_db::Hasher; @@ -34,7 +33,6 @@ use sp_core::storage::{ChildInfo, StateVersion}; #[cfg(feature = "std")] use sp_trie::{ cache::{LocalTrieCache, TrieCache}, - recorder::Recorder, MemoryDB, }; #[cfg(not(feature = "std"))] @@ -995,8 +993,8 @@ pub mod tests { .storage_root(iter::once((&b"new-key"[..], Some(&b"new-value"[..]))), state_version); assert!(!tx.drain().is_empty()); assert!( - new_root != - test_trie(state_version, None, None) + new_root + != test_trie(state_version, None, None) .storage_root(iter::empty(), state_version) .0 ); diff --git a/substrate/primitives/state-machine/src/trie_backend_essence.rs b/substrate/primitives/state-machine/src/trie_backend_essence.rs index 18e19b4c4cdb..d96d33f1853d 100644 --- a/substrate/primitives/state-machine/src/trie_backend_essence.rs +++ b/substrate/primitives/state-machine/src/trie_backend_essence.rs @@ -29,8 +29,6 @@ use hash_db::{self, AsHashDB, HashDB, HashDBRef, Hasher, Prefix}; use parking_lot::RwLock; use sp_core::storage::{ChildInfo, ChildType, StateVersion}; use sp_std::{boxed::Box, marker::PhantomData, sync::Arc, vec::Vec}; -#[cfg(feature = "std")] -use sp_trie::recorder::Recorder; use sp_trie::{ child_delta_trie_root, delta_trie_root, empty_child_trie_root, read_child_trie_hash, read_child_trie_value, read_trie_value, @@ -113,7 +111,7 @@ where ) -> Option::Out>>>>, ) -> Option> { if !matches!(self.state, IterState::Pending) { - return None + return None; } let result = backend.with_trie_db(self.root, self.child_info.as_ref(), |db| { @@ -127,8 +125,8 @@ where }, Some(Err(error)) => { self.state = IterState::FinishedIncomplete; - if matches!(*error, TrieError::IncompleteDatabase(_)) && - self.stop_on_incomplete_database + if matches!(*error, TrieError::IncompleteDatabase(_)) + && self.stop_on_incomplete_database { None } else { @@ -307,20 +305,12 @@ impl, H: Hasher, C: TrieCacheProvider, R: TrieRecord let mut cache = self.trie_node_cache.as_ref().map(|c| c.as_trie_db_cache(storage_root)); let cache = cache.as_mut().map(|c| c as _); - #[cfg(feature = "std")] - { - let mut recorder = self.recorder.as_ref().map(|r| r.as_trie_recorder(storage_root)); - let recorder = match recorder.as_mut() { - Some(recorder) => Some(recorder as &mut dyn TrieRecorder), - None => None, - }; - callback(recorder, cache) - } - - #[cfg(not(feature = "std"))] - { - callback(None, cache) - } + let mut recorder = self.recorder.as_ref().map(|r| r.as_trie_recorder(storage_root)); + let recorder = match recorder.as_mut() { + Some(recorder) => Some(recorder as &mut dyn TrieRecorder), + None => None, + }; + callback(recorder, cache) } /// Call the given closure passing it the recorder and the cache. @@ -330,7 +320,6 @@ impl, H: Hasher, C: TrieCacheProvider, R: TrieRecord /// the new storage root. This is required to register the changes in the cache /// for the correct storage root. The given `storage_root` corresponds to the root of the "old" /// trie. If the value is not given, `self.root` is used. - #[cfg(feature = "std")] fn with_recorder_and_cache_for_storage_root( &self, storage_root: Option, @@ -362,30 +351,6 @@ impl, H: Hasher, C: TrieCacheProvider, R: TrieRecord result } - - #[cfg(not(feature = "std"))] - fn with_recorder_and_cache_for_storage_root( - &self, - _storage_root: Option, - callback: impl FnOnce( - Option<&mut dyn TrieRecorder>, - Option<&mut dyn TrieCache>>, - ) -> (Option, RE), - ) -> RE { - if let Some(local_cache) = self.trie_node_cache.as_ref() { - let mut cache = local_cache.as_trie_db_mut_cache(); - - let (new_root, r) = callback(None, Some(&mut cache)); - - if let Some(new_root) = new_root { - local_cache.merge(cache, new_root); - } - - r - } else { - callback(None, None).1 - } - } } impl< @@ -438,7 +403,7 @@ where #[cfg(feature = "std")] { if let Some(result) = self.cache.read().child_root.get(child_info.storage_key()) { - return Ok(*result) + return Ok(*result); } } @@ -594,7 +559,7 @@ where if self.root == Default::default() { // A special-case for an empty storage root. - return Ok(Default::default()) + return Ok(Default::default()); } let trie_iter = self @@ -679,7 +644,7 @@ where self.with_recorder_and_cache_for_storage_root(Some(child_root), |recorder, cache| { let mut eph = Ephemeral::new(self.backend_storage(), &mut write_overlay); match match state_version { - StateVersion::V0 => + StateVersion::V0 => { child_delta_trie_root::, _, _, _, _, _, _>( child_info.keyspace(), &mut eph, @@ -687,8 +652,9 @@ where delta, recorder, cache, - ), - StateVersion::V1 => + ) + }, + StateVersion::V1 => { child_delta_trie_root::, _, _, _, _, _, _>( child_info.keyspace(), &mut eph, @@ -696,7 +662,8 @@ where delta, recorder, cache, - ), + ) + }, } { Ok(ret) => (Some(ret), ret), Err(e) => { @@ -828,7 +795,7 @@ impl< { fn get(&self, key: &H::Out, prefix: Prefix) -> Option { if *key == self.empty { - return Some([0u8].to_vec()) + return Some([0u8].to_vec()); } match self.storage.get(key, prefix) { Ok(x) => x, From 649ec11e0908e788b2408a7a8d45b224f31d7af7 Mon Sep 17 00:00:00 2001 From: Sebastian Kunert Date: Tue, 5 Sep 2023 15:46:09 +0200 Subject: [PATCH 06/61] Imports --- .../src/validate_block/implementation.rs | 10 ++-------- .../src/validate_block/trie_recorder.rs | 11 ++--------- 2 files changed, 4 insertions(+), 17 deletions(-) diff --git a/cumulus/pallets/parachain-system/src/validate_block/implementation.rs b/cumulus/pallets/parachain-system/src/validate_block/implementation.rs index 4c5d0fcc3e83..b6c36ccfb745 100644 --- a/cumulus/pallets/parachain-system/src/validate_block/implementation.rs +++ b/cumulus/pallets/parachain-system/src/validate_block/implementation.rs @@ -31,14 +31,8 @@ use sp_externalities::{set_and_run_with_externalities, Externalities}; use sp_io::KillStorageResult; use sp_runtime::traits::{Block as BlockT, Extrinsic, HashingFor, Header as HeaderT}; use sp_std::prelude::*; -use sp_std::{ - boxed::Box, - cell::{RefCell, RefMut}, - collections::btree_set::BTreeSet, -}; -use sp_trie::NodeCodec; -use sp_trie::{MemoryDB, StorageProof}; -use trie_db::{RecordedForKey, TrieAccess}; +use sp_std::sync::Arc; +use sp_trie::{MemoryDB, TrieRecorderProvider}; use trie_recorder::RecorderProvider; type TrieBackend = sp_state_machine::TrieBackend< diff --git a/cumulus/pallets/parachain-system/src/validate_block/trie_recorder.rs b/cumulus/pallets/parachain-system/src/validate_block/trie_recorder.rs index 29c1543bb89c..903d82559325 100644 --- a/cumulus/pallets/parachain-system/src/validate_block/trie_recorder.rs +++ b/cumulus/pallets/parachain-system/src/validate_block/trie_recorder.rs @@ -16,19 +16,12 @@ //! The actual implementation of the validate block functionality. -use super::{trie_cache, MemoryOptimizedValidationParams}; -use cumulus_primitives_core::{ - relay_chain::Hash as RHash, ParachainBlockData, PersistedValidationData, -}; -use cumulus_primitives_parachain_inherent::ParachainInherentData; - -use polkadot_parachain::primitives::{HeadData, RelayChainBlockNumber, ValidationResult}; +use super::trie_cache; use codec::Encode; use core::borrow::BorrowMut; -use frame_support::traits::{ExecuteBlock, ExtrinsicCall, Get, IsSubType}; -use sp_core::storage::{ChildInfo, StateVersion}; +use frame_support::traits::Get; use sp_externalities::{set_and_run_with_externalities, Externalities}; use sp_io::KillStorageResult; use sp_runtime::traits::{Block as BlockT, Extrinsic, HashingFor, Header as HeaderT}; From b17e91c9487adf9beb7c2823e471ae77a2f24593 Mon Sep 17 00:00:00 2001 From: Sebastian Kunert Date: Tue, 5 Sep 2023 16:34:31 +0200 Subject: [PATCH 07/61] Simplify --- cumulus/client/clawback/src/lib.rs | 39 +---------------- .../src/validate_block/implementation.rs | 2 +- substrate/client/block-builder/src/lib.rs | 43 +------------------ .../state-machine/src/trie_backend.rs | 7 +++ 4 files changed, 11 insertions(+), 80 deletions(-) diff --git a/cumulus/client/clawback/src/lib.rs b/cumulus/client/clawback/src/lib.rs index f15dcdfacb98..7cf10341b71f 100644 --- a/cumulus/client/clawback/src/lib.rs +++ b/cumulus/client/clawback/src/lib.rs @@ -41,43 +41,6 @@ use sp_runtime_interface::ExternalitiesExt; pub trait ClawbackHostFunctions { fn current_storage_proof_size(&mut self) -> u32 { tracing::info!(target:"skunert", "current_storage_proof_size is called"); - match self.extension::() { - Some(ext) => ext.current_storage_proof_size(), - None => 0, - } + self.proof_size().unwrap_or_default() } } - -pub trait ReportPovUsage: Send + Sync { - fn current_storage_proof_size(&self) -> u32; -} - -#[cfg(feature = "std")] -sp_externalities::decl_extension! { - pub struct PovUsageExt(PovUsageReporter); -} - -pub struct PovUsageReporter { - recorder: Box, -} - -impl PovUsageReporter { - fn new(recorder: Box) -> Self { - PovUsageReporter { recorder } - } - - fn current_storage_proof_size(&self) -> u32 { - self.recorder.estimate_proof_size() as u32 - } -} - -#[cfg(feature = "std")] -pub fn get_extension_factory() -> ExtensionProducer { - std::sync::Arc::new(|recorder| { - ( - core::any::TypeId::of::(), - Box::new(PovUsageExt(PovUsageReporter::new(recorder))) - as Box, - ) - }) as Arc<_> -} diff --git a/cumulus/pallets/parachain-system/src/validate_block/implementation.rs b/cumulus/pallets/parachain-system/src/validate_block/implementation.rs index b6c36ccfb745..436c5299d90b 100644 --- a/cumulus/pallets/parachain-system/src/validate_block/implementation.rs +++ b/cumulus/pallets/parachain-system/src/validate_block/implementation.rs @@ -313,7 +313,7 @@ fn host_storage_clear(key: &[u8]) { fn reclaim_pov_weight() -> u32 { log::info!(target: "skunert", "Calling my replaced method."); - 0 + with_externalities(|ext| ext.proof_size()).unwrap_or(0) } fn host_storage_root(version: StateVersion) -> Vec { diff --git a/substrate/client/block-builder/src/lib.rs b/substrate/client/block-builder/src/lib.rs index 20a9f14c56de..1878e7627480 100644 --- a/substrate/client/block-builder/src/lib.rs +++ b/substrate/client/block-builder/src/lib.rs @@ -29,8 +29,7 @@ use codec::Encode; use sp_api::{ - ApiExt, ApiRef, Core, ExtensionProducer, ProvideRuntimeApi, StorageChanges, StorageProof, - TransactionOutcome, + ApiExt, ApiRef, Core, ProvideRuntimeApi, StorageChanges, StorageProof, TransactionOutcome, }; use sp_blockchain::{ApplyExtrinsicFailed, Error}; use sp_core::traits::CallContext; @@ -120,7 +119,6 @@ where parent: Block::Hash, inherent_digests: Digest, record_proof: R, - extension: Option, ) -> sp_blockchain::Result>; /// Create a new block, built on the head of the chain. @@ -153,16 +151,13 @@ where /// While proof recording is enabled, all accessed trie nodes are saved. /// These recorded trie nodes can be used by a third party to prove the /// output of this block builder without having access to the full storage. - /// The given externality extension `extension` will be registered for the - /// runtime instance used to build the block. - pub fn new_with_extension( + pub fn new( api: &'a A, parent_hash: Block::Hash, parent_number: NumberFor, record_proof: RecordProof, inherent_digests: Digest, backend: &'a B, - extension_producer: Option, ) -> Result { let header = <::Header as HeaderT>::new( parent_number + One::one(), @@ -180,16 +175,6 @@ where api.record_proof(); } - if let Some(proof_recorder) = api.proof_recorder() { - if let Some(extension_producer) = extension_producer { - log::info!(target:"skunert", "Registering extension in Block-builder"); - let extension = extension_producer(Box::new(proof_recorder)); - api.register_extension_with_type_id(extension.0, extension.1); - } else { - log::info!(target:"skunert", "not registering extension in Block-builder"); - } - } - api.set_call_context(CallContext::Onchain); api.initialize_block(parent_hash, &header)?; @@ -208,30 +193,6 @@ where }) } - /// Create a new instance of builder based on the given `parent_hash` and `parent_number`. - /// - /// While proof recording is enabled, all accessed trie nodes are saved. - /// These recorded trie nodes can be used by a third party to prove the - /// output of this block builder without having access to the full storage. - pub fn new( - api: &'a A, - parent_hash: Block::Hash, - parent_number: NumberFor, - record_proof: RecordProof, - inherent_digests: Digest, - backend: &'a B, - ) -> Result { - Self::new_with_extension( - api, - parent_hash, - parent_number, - record_proof, - inherent_digests, - backend, - None, - ) - } - /// Push onto the block's list of extrinsics. /// /// This will ensure the extrinsic can be validly executed (by executing it). diff --git a/substrate/primitives/state-machine/src/trie_backend.rs b/substrate/primitives/state-machine/src/trie_backend.rs index f84ea5a4eee5..3fc8391f94d8 100644 --- a/substrate/primitives/state-machine/src/trie_backend.rs +++ b/substrate/primitives/state-machine/src/trie_backend.rs @@ -517,6 +517,13 @@ where fn wipe(&self) -> Result<(), Self::Error> { Ok(()) } + + fn proof_size(&self) -> Option { + self.essence + .recorder + .as_ref() + .map(|rec| rec.estimate_encoded_size().try_into().unwrap_or(0)) + } } #[cfg(feature = "std")] From dfefa16744d1f06ed11a528282cf32eca1b608f0 Mon Sep 17 00:00:00 2001 From: Sebastian Kunert Date: Tue, 5 Sep 2023 17:26:30 +0200 Subject: [PATCH 08/61] Simplify --- cumulus/client/clawback/src/lib.rs | 12 -- .../parachain-template/node/src/service.rs | 8 +- cumulus/polkadot-parachain/src/service.rs | 69 ++++----- cumulus/test/client/src/block_builder.rs | 2 +- .../service/benches/block_import_glutton.rs | 2 +- .../test/service/benches/block_production.rs | 4 +- .../benches/block_production_glutton.rs | 4 +- cumulus/test/service/src/lib.rs | 19 ++- .../node/test/client/src/block_builder.rs | 2 +- polkadot/node/test/client/src/lib.rs | 2 +- .../bin/node/cli/benches/block_production.rs | 10 +- substrate/bin/node/cli/src/service.rs | 6 +- substrate/bin/node/testing/src/bench.rs | 22 +-- .../client/api/src/execution_extensions.rs | 9 -- .../basic-authorship/src/basic_authorship.rs | 41 +---- substrate/client/consensus/babe/src/tests.rs | 2 +- substrate/client/consensus/beefy/src/tests.rs | 8 +- .../client/consensus/grandpa/src/tests.rs | 6 +- .../merkle-mountain-range/src/test_utils.rs | 2 +- substrate/client/network/sync/src/lib.rs | 5 +- substrate/client/network/test/src/lib.rs | 2 +- .../rpc-spec-v2/src/chain_head/tests.rs | 28 ++-- substrate/client/service/src/builder.rs | 54 +++---- .../service/src/chain_ops/export_blocks.rs | 9 +- .../service/src/chain_ops/export_raw_state.rs | 2 +- .../service/src/chain_ops/import_blocks.rs | 25 +-- .../client/service/src/client/block_rules.rs | 4 +- .../service/src/client/call_executor.rs | 1 - substrate/client/service/src/client/client.rs | 145 +++++++++--------- .../service/src/client/wasm_override.rs | 4 +- substrate/client/service/src/lib.rs | 20 +-- .../client/service/test/src/client/mod.rs | 129 ++++++++-------- .../api/proc-macro/src/impl_runtime_apis.rs | 4 - .../proc-macro/src/mock_impl_runtime_apis.rs | 4 - substrate/primitives/api/src/lib.rs | 14 -- .../api/test/tests/runtime_calls.rs | 2 +- .../externalities/src/extensions.rs | 6 - substrate/test-utils/client/src/lib.rs | 8 +- .../runtime/client/src/trait_tests.rs | 54 +++---- 39 files changed, 323 insertions(+), 427 deletions(-) diff --git a/cumulus/client/clawback/src/lib.rs b/cumulus/client/clawback/src/lib.rs index 7cf10341b71f..5290359e3649 100644 --- a/cumulus/client/clawback/src/lib.rs +++ b/cumulus/client/clawback/src/lib.rs @@ -23,19 +23,7 @@ extern crate sp_runtime_interface; extern crate sp_std; extern crate sp_trie; -use sp_externalities::Extension; use sp_runtime_interface::runtime_interface; -use sp_trie::ProofSizeEstimationProvider; -#[cfg(feature = "std")] -use std::sync::Arc; - -#[cfg(feature = "std")] -use sp_api::ExtensionProducer; - -use sp_std::boxed::Box; - -#[cfg(feature = "std")] -use sp_runtime_interface::ExternalitiesExt; #[runtime_interface] pub trait ClawbackHostFunctions { diff --git a/cumulus/parachain-template/node/src/service.rs b/cumulus/parachain-template/node/src/service.rs index 441d08f75323..bb7daff8d943 100644 --- a/cumulus/parachain-template/node/src/service.rs +++ b/cumulus/parachain-template/node/src/service.rs @@ -3,7 +3,6 @@ // std use std::{sync::Arc, time::Duration}; -use cumulus_client_clawback::get_extension_factory; use cumulus_client_cli::CollatorOptions; // Local Runtime Types use parachain_template_runtime::{ @@ -103,11 +102,11 @@ pub fn new_partial( let executor = ParachainExecutor::new_with_wasm_executor(wasm); let (client, backend, keystore_container, task_manager) = - sc_service::new_full_parts_extension::( + sc_service::new_full_parts_record_import::( config, telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()), executor, - Some(get_extension_factory()), + true, )?; let client = Arc::new(client); @@ -380,13 +379,12 @@ fn start_consensus( let slot_duration = cumulus_client_consensus_aura::slot_duration(&*client)?; - let proposer_factory = sc_basic_authorship::ProposerFactory::with_proof_recording_extension( + let proposer_factory = sc_basic_authorship::ProposerFactory::with_proof_recording( task_manager.spawn_handle(), client.clone(), transaction_pool, prometheus_registry, telemetry.clone(), - Some(get_extension_factory()), ); let proposer = Proposer::new(proposer_factory); diff --git a/cumulus/polkadot-parachain/src/service.rs b/cumulus/polkadot-parachain/src/service.rs index f250b50a15f4..10808d0910a3 100644 --- a/cumulus/polkadot-parachain/src/service.rs +++ b/cumulus/polkadot-parachain/src/service.rs @@ -15,7 +15,6 @@ // along with Cumulus. If not, see . use codec::Codec; -use cumulus_client_clawback::get_extension_factory; use cumulus_client_cli::CollatorOptions; use cumulus_client_collator::service::CollatorService; use cumulus_client_consensus_aura::collators::basic::{ @@ -291,11 +290,11 @@ where .build(); let (client, backend, keystore_container, task_manager) = - sc_service::new_full_parts_extension::( + sc_service::new_full_parts_record_import::( config, telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()), executor, - Some(get_extension_factory()), + true, )?; let client = Arc::new(client); @@ -777,15 +776,13 @@ pub async fn start_rococo_parachain_node( announce_block| { let slot_duration = cumulus_client_consensus_aura::slot_duration(&*client)?; - let proposer_factory = - sc_basic_authorship::ProposerFactory::with_proof_recording_extension( - task_manager.spawn_handle(), - client.clone(), - transaction_pool, - prometheus_registry, - telemetry.clone(), - Some(get_extension_factory()), - ); + let proposer_factory = sc_basic_authorship::ProposerFactory::with_proof_recording( + task_manager.spawn_handle(), + client.clone(), + transaction_pool, + prometheus_registry, + telemetry.clone(), + ); let proposer = Proposer::new(proposer_factory); let collator_service = CollatorService::new( @@ -900,15 +897,13 @@ where collator_key, overseer_handle, announce_block| { - let proposer_factory = - sc_basic_authorship::ProposerFactory::with_proof_recording_extension( - task_manager.spawn_handle(), - client.clone(), - transaction_pool, - prometheus_registry, - telemetry.clone(), - Some(get_extension_factory()), - ); + let proposer_factory = sc_basic_authorship::ProposerFactory::with_proof_recording( + task_manager.spawn_handle(), + client.clone(), + transaction_pool, + prometheus_registry, + telemetry, + ); let free_for_all = cumulus_client_consensus_relay_chain::build_relay_chain_consensus( cumulus_client_consensus_relay_chain::BuildRelayChainConsensusParams { @@ -1173,15 +1168,13 @@ where announce_block| { let slot_duration = cumulus_client_consensus_aura::slot_duration(&*client)?; - let proposer_factory = - sc_basic_authorship::ProposerFactory::with_proof_recording_extension( - task_manager.spawn_handle(), - client.clone(), - transaction_pool, - prometheus_registry, - telemetry.clone(), - Some(get_extension_factory()), - ); + let proposer_factory = sc_basic_authorship::ProposerFactory::with_proof_recording( + task_manager.spawn_handle(), + client.clone(), + transaction_pool, + prometheus_registry, + telemetry.clone(), + ); let proposer = Proposer::new(proposer_factory); let collator_service = CollatorService::new( @@ -1479,15 +1472,13 @@ pub async fn start_contracts_rococo_node( announce_block| { let slot_duration = cumulus_client_consensus_aura::slot_duration(&*client)?; - let proposer_factory = - sc_basic_authorship::ProposerFactory::with_proof_recording_extension( - task_manager.spawn_handle(), - client.clone(), - transaction_pool, - prometheus_registry, - telemetry.clone(), - Some(get_extension_factory()), - ); + let proposer_factory = sc_basic_authorship::ProposerFactory::with_proof_recording( + task_manager.spawn_handle(), + client.clone(), + transaction_pool, + prometheus_registry, + telemetry.clone(), + ); let proposer = Proposer::new(proposer_factory); let collator_service = CollatorService::new( diff --git a/cumulus/test/client/src/block_builder.rs b/cumulus/test/client/src/block_builder.rs index 974440e56c5d..2d930d1be597 100644 --- a/cumulus/test/client/src/block_builder.rs +++ b/cumulus/test/client/src/block_builder.rs @@ -72,7 +72,7 @@ fn init_block_builder( timestamp: u64, ) -> BlockBuilder<'_, Block, Client, Backend> { let mut block_builder = client - .new_block_at(at, Default::default(), true, None) + .new_block_at(at, Default::default(), true) .expect("Creates new block builder for test runtime"); let mut inherent_data = sp_inherents::InherentData::new(); diff --git a/cumulus/test/service/benches/block_import_glutton.rs b/cumulus/test/service/benches/block_import_glutton.rs index 19bf8ff3fee2..b49db9f449e9 100644 --- a/cumulus/test/service/benches/block_import_glutton.rs +++ b/cumulus/test/service/benches/block_import_glutton.rs @@ -64,7 +64,7 @@ fn benchmark_block_import(c: &mut Criterion) { let parent_hash = client.usage_info().chain.best_hash; let parent_header = client.header(parent_hash).expect("Just fetched this hash.").unwrap(); let mut block_builder = - client.new_block_at(parent_hash, Default::default(), RecordProof::No, None).unwrap(); + client.new_block_at(parent_hash, Default::default(), RecordProof::No).unwrap(); block_builder .push(utils::extrinsic_set_validation_data(parent_header.clone()).clone()) .unwrap(); diff --git a/cumulus/test/service/benches/block_production.rs b/cumulus/test/service/benches/block_production.rs index 7881a5f44731..1b868d736302 100644 --- a/cumulus/test/service/benches/block_production.rs +++ b/cumulus/test/service/benches/block_production.rs @@ -75,7 +75,7 @@ fn benchmark_block_production(c: &mut Criterion) { || extrinsics.clone(), |extrinsics| { let mut block_builder = client - .new_block_at(best_hash, Default::default(), RecordProof::Yes, None) + .new_block_at(best_hash, Default::default(), RecordProof::Yes) .unwrap(); for extrinsic in extrinsics { block_builder.push(extrinsic).unwrap(); @@ -94,7 +94,7 @@ fn benchmark_block_production(c: &mut Criterion) { || extrinsics.clone(), |extrinsics| { let mut block_builder = client - .new_block_at(best_hash, Default::default(), RecordProof::No, None) + .new_block_at(best_hash, Default::default(), RecordProof::No) .unwrap(); for extrinsic in extrinsics { block_builder.push(extrinsic).unwrap(); diff --git a/cumulus/test/service/benches/block_production_glutton.rs b/cumulus/test/service/benches/block_production_glutton.rs index bd8f9aba5cfa..92a368c88c8d 100644 --- a/cumulus/test/service/benches/block_production_glutton.rs +++ b/cumulus/test/service/benches/block_production_glutton.rs @@ -77,7 +77,7 @@ fn benchmark_block_production_compute(c: &mut Criterion) { || (set_validation_data_extrinsic.clone(), set_time_extrinsic.clone()), |(validation_data, time)| { let mut block_builder = client - .new_block_at(best_hash, Default::default(), RecordProof::Yes, None) + .new_block_at(best_hash, Default::default(), RecordProof::Yes) .unwrap(); block_builder.push(validation_data).unwrap(); block_builder.push(time).unwrap(); @@ -99,7 +99,7 @@ fn benchmark_block_production_compute(c: &mut Criterion) { || (set_validation_data_extrinsic.clone(), set_time_extrinsic.clone()), |(validation_data, time)| { let mut block_builder = client - .new_block_at(best_hash, Default::default(), RecordProof::No, None) + .new_block_at(best_hash, Default::default(), RecordProof::No) .unwrap(); block_builder.push(validation_data).unwrap(); block_builder.push(time).unwrap(); diff --git a/cumulus/test/service/src/lib.rs b/cumulus/test/service/src/lib.rs index 0690ce1c661e..daa6f5b66fa7 100644 --- a/cumulus/test/service/src/lib.rs +++ b/cumulus/test/service/src/lib.rs @@ -113,7 +113,7 @@ pub type AnnounceBlockFn = Arc>) + Send + Sync>; pub struct RuntimeExecutor; impl sc_executor::NativeExecutionDispatch for RuntimeExecutor { - type ExtendHostFunctions = (cumulus_client_clawback::clawback_host_functions::HostFunctions); + type ExtendHostFunctions = cumulus_client_clawback::clawback_host_functions::HostFunctions; fn dispatch(method: &str, data: &[u8]) -> Option> { cumulus_test_runtime::api::dispatch(method, data) @@ -208,11 +208,8 @@ pub fn new_partial( sc_executor::NativeElseWasmExecutor::::new_with_wasm_executor(wasm); let (client, backend, keystore_container, task_manager) = - sc_service::new_full_parts_extension::( - config, - None, - executor, - Some(cumulus_client_clawback::get_extension_factory()), + sc_service::new_full_parts_record_import::( + config, None, executor, true, )?; let client = Arc::new(client); @@ -267,18 +264,20 @@ async fn build_relay_chain_interface( None, ) .map_err(|e| RelayChainError::Application(Box::new(e) as Box<_>))?, - cumulus_client_cli::RelayChainMode::ExternalRpc(rpc_target_urls) => + cumulus_client_cli::RelayChainMode::ExternalRpc(rpc_target_urls) => { return build_minimal_relay_chain_node_with_rpc( relay_chain_config, task_manager, rpc_target_urls, ) .await - .map(|r| r.0), - cumulus_client_cli::RelayChainMode::LightClient => + .map(|r| r.0) + }, + cumulus_client_cli::RelayChainMode::LightClient => { return build_minimal_relay_chain_node_light_client(relay_chain_config, task_manager) .await - .map(|r| r.0), + .map(|r| r.0) + }, }; task_manager.add_child(relay_chain_full_node.task_manager); diff --git a/polkadot/node/test/client/src/block_builder.rs b/polkadot/node/test/client/src/block_builder.rs index 203ca3e90626..b4ff050ff152 100644 --- a/polkadot/node/test/client/src/block_builder.rs +++ b/polkadot/node/test/client/src/block_builder.rs @@ -91,7 +91,7 @@ impl InitPolkadotBlockBuilder for Client { }; let mut block_builder = self - .new_block_at(hash, digest, false, None) + .new_block_at(hash, digest, false) .expect("Creates new block builder for test runtime"); let mut inherent_data = sp_inherents::InherentData::new(); diff --git a/polkadot/node/test/client/src/lib.rs b/polkadot/node/test/client/src/lib.rs index bd52d2c1b84c..5d97ffcdf1da 100644 --- a/polkadot/node/test/client/src/lib.rs +++ b/polkadot/node/test/client/src/lib.rs @@ -75,7 +75,7 @@ impl TestClientBuilderExt for TestClientBuilder { self.backend().clone(), executor.clone(), Default::default(), - ExecutionExtensions::new(Default::default(), Arc::new(executor), None), + ExecutionExtensions::new(Default::default(), Arc::new(executor)), ) .unwrap(); diff --git a/substrate/bin/node/cli/benches/block_production.rs b/substrate/bin/node/cli/benches/block_production.rs index 3a7a09c6d768..b877aa735022 100644 --- a/substrate/bin/node/cli/benches/block_production.rs +++ b/substrate/bin/node/cli/benches/block_production.rs @@ -191,9 +191,8 @@ fn block_production(c: &mut Criterion) { b.iter_batched( || extrinsics.clone(), |extrinsics| { - let mut block_builder = client - .new_block_at(best_hash, Default::default(), RecordProof::No, None) - .unwrap(); + let mut block_builder = + client.new_block_at(best_hash, Default::default(), RecordProof::No).unwrap(); for extrinsic in extrinsics { block_builder.push(extrinsic).unwrap(); } @@ -207,9 +206,8 @@ fn block_production(c: &mut Criterion) { b.iter_batched( || extrinsics.clone(), |extrinsics| { - let mut block_builder = client - .new_block_at(best_hash, Default::default(), RecordProof::Yes, None) - .unwrap(); + let mut block_builder = + client.new_block_at(best_hash, Default::default(), RecordProof::Yes).unwrap(); for extrinsic in extrinsics { block_builder.push(extrinsic).unwrap(); } diff --git a/substrate/bin/node/cli/src/service.rs b/substrate/bin/node/cli/src/service.rs index cd20a87a902b..10c1e569f123 100644 --- a/substrate/bin/node/cli/src/service.rs +++ b/substrate/bin/node/cli/src/service.rs @@ -173,11 +173,11 @@ pub fn new_partial( let executor = sc_service::new_native_or_wasm_executor(&config); let (client, backend, keystore_container, task_manager) = - sc_service::new_full_parts_extension::( + sc_service::new_full_parts_record_import::( config, telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()), executor, - None, + false, )?; let client = Arc::new(client); @@ -765,7 +765,7 @@ mod tests { sc_consensus_babe::authorship::claim_slot(slot.into(), &epoch, &keystore) .map(|(digest, _)| digest) { - break (babe_pre_digest, epoch_descriptor) + break (babe_pre_digest, epoch_descriptor); } slot += 1; diff --git a/substrate/bin/node/testing/src/bench.rs b/substrate/bin/node/testing/src/bench.rs index 279c8089ae8a..2ef3252762fb 100644 --- a/substrate/bin/node/testing/src/bench.rs +++ b/substrate/bin/node/testing/src/bench.rs @@ -91,7 +91,7 @@ pub fn drop_system_cache() { target: "bench-logistics", "Clearing system cache on windows is not supported. Benchmark might totally be wrong.", ); - return + return; } std::process::Command::new("sync") @@ -283,7 +283,7 @@ impl<'a> Iterator for BlockContentIterator<'a> { fn next(&mut self) -> Option { if self.content.size.map(|size| size <= self.iteration).unwrap_or(false) { - return None + return None; } let sender = self.keyring.at(self.iteration); @@ -299,22 +299,24 @@ impl<'a> Iterator for BlockContentIterator<'a> { signed_extra(0, kitchensink_runtime::ExistentialDeposit::get() + 1), )), function: match self.content.block_type { - BlockType::RandomTransfersKeepAlive => + BlockType::RandomTransfersKeepAlive => { RuntimeCall::Balances(BalancesCall::transfer_keep_alive { dest: sp_runtime::MultiAddress::Id(receiver), value: kitchensink_runtime::ExistentialDeposit::get() + 1, - }), + }) + }, BlockType::RandomTransfersReaping => { RuntimeCall::Balances(BalancesCall::transfer_allow_death { dest: sp_runtime::MultiAddress::Id(receiver), // Transfer so that ending balance would be 1 less than existential // deposit so that we kill the sender account. - value: 100 * DOLLARS - - (kitchensink_runtime::ExistentialDeposit::get() - 1), + value: 100 * DOLLARS + - (kitchensink_runtime::ExistentialDeposit::get() - 1), }) }, - BlockType::Noop => - RuntimeCall::System(SystemCall::remark { remark: Vec::new() }), + BlockType::Noop => { + RuntimeCall::System(SystemCall::remark { remark: Vec::new() }) + }, }, }, self.runtime_version.spec_version, @@ -405,18 +407,18 @@ impl BenchDb { ) .expect("Failed to create genesis block builder"); - // TODO skunert Check back if this is correct let client = sc_service::new_client( backend.clone(), executor.clone(), genesis_block_builder, None, None, - ExecutionExtensions::new(None, Arc::new(executor), None), + ExecutionExtensions::new(None, Arc::new(executor)), Box::new(task_executor.clone()), None, None, client_config, + false, ) .expect("Should not fail"); diff --git a/substrate/client/api/src/execution_extensions.rs b/substrate/client/api/src/execution_extensions.rs index dd8aef2ede00..6f927105df0b 100644 --- a/substrate/client/api/src/execution_extensions.rs +++ b/substrate/client/api/src/execution_extensions.rs @@ -23,7 +23,6 @@ //! extensions to support APIs for particular execution context & capabilities. use parking_lot::RwLock; -use sp_api::ExtensionProducer; use sp_core::traits::{ReadRuntimeVersion, ReadRuntimeVersionExt}; use sp_externalities::{Extension, Extensions}; use sp_runtime::traits::{Block as BlockT, NumberFor}; @@ -96,7 +95,6 @@ impl ExtensionsFactory pub struct ExecutionExtensions { extensions_factory: RwLock>>, read_runtime_version: Arc, - import_extension: Option, } impl ExecutionExtensions { @@ -104,22 +102,15 @@ impl ExecutionExtensions { pub fn new( extensions_factory: Option>>, read_runtime_version: Arc, - import_extension: Option, ) -> Self { Self { extensions_factory: extensions_factory .map(RwLock::new) .unwrap_or_else(|| RwLock::new(Box::new(()))), read_runtime_version, - import_extension, } } - /// Get extension that should be registered during block import - pub fn get_import_extension(&self) -> Option { - self.import_extension.clone() - } - /// Set the new extensions_factory pub fn set_extensions_factory(&self, maker: impl ExtensionsFactory + 'static) { *self.extensions_factory.write() = Box::new(maker); diff --git a/substrate/client/basic-authorship/src/basic_authorship.rs b/substrate/client/basic-authorship/src/basic_authorship.rs index 102df1c91847..b3a8f0d8970b 100644 --- a/substrate/client/basic-authorship/src/basic_authorship.rs +++ b/substrate/client/basic-authorship/src/basic_authorship.rs @@ -32,7 +32,7 @@ use sc_block_builder::{BlockBuilderApi, BlockBuilderProvider}; use sc_client_api::backend; use sc_telemetry::{telemetry, TelemetryHandle, CONSENSUS_INFO}; use sc_transaction_pool_api::{InPoolTransaction, TransactionPool}; -use sp_api::{ApiExt, ExtensionProducer, ProvideRuntimeApi}; +use sp_api::{ApiExt, ProvideRuntimeApi}; use sp_blockchain::{ApplyExtrinsicFailed::Validity, Error::ApplyExtrinsicFailed, HeaderBackend}; use sp_consensus::{DisableProofRecording, EnableProofRecording, ProofRecording, Proposal}; use sp_core::traits::SpawnNamed; @@ -84,9 +84,7 @@ pub struct ProposerFactory { telemetry: Option, /// When estimating the block size, should the proof be included? include_proof_in_block_size_estimation: bool, - /// Externalities extension to be used for block authoring - extension: Option, - /// Phantom member to pin the `Backend`/`ProofRecording` type. + /// phantom member to pin the `Backend`/`ProofRecording` type. _phantom: PhantomData<(B, PR)>, } @@ -111,7 +109,6 @@ impl ProposerFactory { telemetry, client, include_proof_in_block_size_estimation: false, - extension: None, _phantom: PhantomData, } } @@ -130,24 +127,6 @@ impl ProposerFactory { transaction_pool: Arc, prometheus: Option<&PrometheusRegistry>, telemetry: Option, - ) -> Self { - Self::with_proof_recording_extension( - spawn_handle, - client, - transaction_pool, - prometheus, - telemetry, - None, - ) - } - - pub fn with_proof_recording_extension( - spawn_handle: impl SpawnNamed + 'static, - client: Arc, - transaction_pool: Arc, - prometheus: Option<&PrometheusRegistry>, - telemetry: Option, - extension: Option, ) -> Self { ProposerFactory { client, @@ -158,7 +137,6 @@ impl ProposerFactory { soft_deadline_percent: DEFAULT_SOFT_DEADLINE_PERCENT, telemetry, include_proof_in_block_size_estimation: true, - extension, _phantom: PhantomData, } } @@ -233,7 +211,6 @@ where telemetry: self.telemetry.clone(), _phantom: PhantomData, include_proof_in_block_size_estimation: self.include_proof_in_block_size_estimation, - extension: self.extension.clone(), }; proposer @@ -275,7 +252,6 @@ pub struct Proposer { default_block_size_limit: usize, include_proof_in_block_size_estimation: bool, soft_deadline_percent: Percent, - extension: Option, telemetry: Option, _phantom: PhantomData<(B, PR)>, } @@ -352,20 +328,15 @@ where PR: ProofRecording, { async fn propose_with( - mut self, + self, inherent_data: InherentData, inherent_digests: Digest, deadline: time::Instant, block_size_limit: Option, ) -> Result, sp_blockchain::Error> { let propose_with_timer = time::Instant::now(); - - let mut block_builder = self.client.new_block_at( - self.parent_hash, - inherent_digests, - PR::ENABLED, - self.extension.take(), - )?; + let mut block_builder = + self.client.new_block_at(self.parent_hash, inherent_digests, PR::ENABLED)?; self.apply_inherents(&mut block_builder, inherent_data)?; @@ -998,7 +969,7 @@ mod tests { // 99 (header_size) + 718 (proof@initialize_block) + 246 (one Transfer extrinsic) let block_limit = { let builder = - client.new_block_at(genesis_header.hash(), Default::default(), true, None).unwrap(); + client.new_block_at(genesis_header.hash(), Default::default(), true).unwrap(); builder.estimate_block_size(true) + extrinsics[0].encoded_size() }; let block = block_on(proposer.propose( diff --git a/substrate/client/consensus/babe/src/tests.rs b/substrate/client/consensus/babe/src/tests.rs index 420a0177e2d0..b3843f8acfa0 100644 --- a/substrate/client/consensus/babe/src/tests.rs +++ b/substrate/client/consensus/babe/src/tests.rs @@ -99,7 +99,7 @@ impl DummyProposer { pre_digests: Digest, ) -> future::Ready, Error>> { let block_builder = - self.factory.client.new_block_at(self.parent_hash, pre_digests, false, None).unwrap(); + self.factory.client.new_block_at(self.parent_hash, pre_digests, false).unwrap(); let mut block = match block_builder.build().map_err(|e| e.into()) { Ok(b) => b.block, diff --git a/substrate/client/consensus/beefy/src/tests.rs b/substrate/client/consensus/beefy/src/tests.rs index a74a9023ffcb..3bb65e9d57f4 100644 --- a/substrate/client/consensus/beefy/src/tests.rs +++ b/substrate/client/consensus/beefy/src/tests.rs @@ -775,7 +775,7 @@ async fn beefy_importing_justifications() { }; let builder = full_client - .new_block_at(full_client.chain_info().genesis_hash, Default::default(), false, None) + .new_block_at(full_client.chain_info().genesis_hash, Default::default(), false) .unwrap(); let block = builder.build().unwrap().block; let hashof1 = block.header.hash(); @@ -792,7 +792,7 @@ async fn beefy_importing_justifications() { // Import block 2 with "valid" justification (beefy pallet genesis block not yet reached). let block_num = 2; - let builder = full_client.new_block_at(hashof1, Default::default(), false, None).unwrap(); + let builder = full_client.new_block_at(hashof1, Default::default(), false).unwrap(); let block = builder.build().unwrap().block; let hashof2 = block.header.hash(); @@ -824,7 +824,7 @@ async fn beefy_importing_justifications() { // Import block 3 with valid justification. let block_num = 3; - let builder = full_client.new_block_at(hashof2, Default::default(), false, None).unwrap(); + let builder = full_client.new_block_at(hashof2, Default::default(), false).unwrap(); let block = builder.build().unwrap().block; let hashof3 = block.header.hash(); let proof = crate::justification::tests::new_finality_proof(block_num, &good_set, keys); @@ -858,7 +858,7 @@ async fn beefy_importing_justifications() { // Import block 4 with invalid justification (incorrect validator set). let block_num = 4; - let builder = full_client.new_block_at(hashof3, Default::default(), false, None).unwrap(); + let builder = full_client.new_block_at(hashof3, Default::default(), false).unwrap(); let block = builder.build().unwrap().block; let hashof4 = block.header.hash(); let keys = &[BeefyKeyring::Alice]; diff --git a/substrate/client/consensus/grandpa/src/tests.rs b/substrate/client/consensus/grandpa/src/tests.rs index 3f1a9701af70..0175f7d1b473 100644 --- a/substrate/client/consensus/grandpa/src/tests.rs +++ b/substrate/client/consensus/grandpa/src/tests.rs @@ -898,7 +898,7 @@ async fn allows_reimporting_change_blocks() { let full_client = client.as_client(); let mut builder = full_client - .new_block_at(full_client.chain_info().genesis_hash, Default::default(), false, None) + .new_block_at(full_client.chain_info().genesis_hash, Default::default(), false) .unwrap(); add_scheduled_change( @@ -943,7 +943,7 @@ async fn test_bad_justification() { let full_client = client.as_client(); let mut builder = full_client - .new_block_at(full_client.chain_info().genesis_hash, Default::default(), false, None) + .new_block_at(full_client.chain_info().genesis_hash, Default::default(), false) .unwrap(); add_scheduled_change( @@ -1913,7 +1913,7 @@ async fn imports_justification_for_regular_blocks_on_import() { // create a new block (without importing it) let generate_block = |parent| { - let builder = full_client.new_block_at(parent, Default::default(), false, None).unwrap(); + let builder = full_client.new_block_at(parent, Default::default(), false).unwrap(); builder.build().unwrap().block }; diff --git a/substrate/client/merkle-mountain-range/src/test_utils.rs b/substrate/client/merkle-mountain-range/src/test_utils.rs index 1eb7ba78442f..010b48bb3d7d 100644 --- a/substrate/client/merkle-mountain-range/src/test_utils.rs +++ b/substrate/client/merkle-mountain-range/src/test_utils.rs @@ -125,7 +125,7 @@ impl MockClient { let mut client = self.client.lock(); let hash = client.expect_block_hash_from_id(&at).unwrap(); - let mut block_builder = client.new_block_at(hash, Default::default(), false, None).unwrap(); + let mut block_builder = client.new_block_at(hash, Default::default(), false).unwrap(); // Make sure the block has a different hash than its siblings block_builder .push_storage_change(b"name".to_vec(), Some(name.to_vec())) diff --git a/substrate/client/network/sync/src/lib.rs b/substrate/client/network/sync/src/lib.rs index beb10ef820c5..175c1c43f46f 100644 --- a/substrate/client/network/sync/src/lib.rs +++ b/substrate/client/network/sync/src/lib.rs @@ -3408,7 +3408,7 @@ mod test { fn build_block(client: &mut Arc, at: Option, fork: bool) -> Block { let at = at.unwrap_or_else(|| client.info().best_hash); - let mut block_builder = client.new_block_at(at, Default::default(), false, None).unwrap(); + let mut block_builder = client.new_block_at(at, Default::default(), false).unwrap(); if fork { block_builder.push_storage_change(vec![1, 2, 3], Some(vec![4, 5, 6])).unwrap(); @@ -3462,8 +3462,7 @@ mod test { let mut client2 = client.clone(); let mut build_block_at = |at, import| { - let mut block_builder = - client2.new_block_at(at, Default::default(), false, None).unwrap(); + let mut block_builder = client2.new_block_at(at, Default::default(), false).unwrap(); // Make sure we generate a different block as fork block_builder.push_storage_change(vec![1, 2, 3], Some(vec![4, 5, 6])).unwrap(); diff --git a/substrate/client/network/test/src/lib.rs b/substrate/client/network/test/src/lib.rs index d5e0fceb26fb..2a20da5a556b 100644 --- a/substrate/client/network/test/src/lib.rs +++ b/substrate/client/network/test/src/lib.rs @@ -372,7 +372,7 @@ where let full_client = self.client.as_client(); let mut at = full_client.block_hash_from_id(&at).unwrap().unwrap(); for _ in 0..count { - let builder = full_client.new_block_at(at, Default::default(), false, None).unwrap(); + let builder = full_client.new_block_at(at, Default::default(), false).unwrap(); let block = edit_block(builder); let hash = block.header.hash(); trace!( diff --git a/substrate/client/rpc-spec-v2/src/chain_head/tests.rs b/substrate/client/rpc-spec-v2/src/chain_head/tests.rs index 725d74119c6d..1336cff84b6f 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/tests.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/tests.rs @@ -1278,9 +1278,8 @@ async fn follow_generates_initial_blocks() { let block_2_hash = block_2.header.hash(); client.import(BlockOrigin::Own, block_2.clone()).await.unwrap(); - let mut block_builder = client - .new_block_at(block_1.header.hash(), Default::default(), false, None) - .unwrap(); + let mut block_builder = + client.new_block_at(block_1.header.hash(), Default::default(), false).unwrap(); // This push is required as otherwise block 3 has the same hash as block 2 and won't get // imported block_builder @@ -1575,8 +1574,7 @@ async fn follow_prune_best_block() { client.import(BlockOrigin::Own, block_4.clone()).await.unwrap(); // Import block 2 as best on the fork. - let mut block_builder = - client.new_block_at(block_1_hash, Default::default(), false, None).unwrap(); + let mut block_builder = client.new_block_at(block_1_hash, Default::default(), false).unwrap(); // This push is required as otherwise block 3 has the same hash as block 2 and won't get // imported block_builder @@ -1718,9 +1716,8 @@ async fn follow_forks_pruned_block() { client.import(BlockOrigin::Own, block_3.clone()).await.unwrap(); // Block 4 with parent Block 1 is not the best imported. - let mut block_builder = client - .new_block_at(block_1.header.hash(), Default::default(), false, None) - .unwrap(); + let mut block_builder = + client.new_block_at(block_1.header.hash(), Default::default(), false).unwrap(); // This push is required as otherwise block 4 has the same hash as block 2 and won't get // imported block_builder @@ -1734,9 +1731,8 @@ async fn follow_forks_pruned_block() { let block_4 = block_builder.build().unwrap().block; client.import(BlockOrigin::Own, block_4.clone()).await.unwrap(); - let mut block_builder = client - .new_block_at(block_4.header.hash(), Default::default(), false, None) - .unwrap(); + let mut block_builder = + client.new_block_at(block_4.header.hash(), Default::default(), false).unwrap(); block_builder .push_transfer(Transfer { from: AccountKeyring::Bob.into(), @@ -1841,9 +1837,8 @@ async fn follow_report_multiple_pruned_block() { client.import(BlockOrigin::Own, block_3.clone()).await.unwrap(); // Block 4 with parent Block 1 is not the best imported. - let mut block_builder = client - .new_block_at(block_1.header.hash(), Default::default(), false, None) - .unwrap(); + let mut block_builder = + client.new_block_at(block_1.header.hash(), Default::default(), false).unwrap(); // This push is required as otherwise block 4 has the same hash as block 2 and won't get // imported block_builder @@ -1858,9 +1853,8 @@ async fn follow_report_multiple_pruned_block() { let block_4_hash = block_4.header.hash(); client.import(BlockOrigin::Own, block_4.clone()).await.unwrap(); - let mut block_builder = client - .new_block_at(block_4.header.hash(), Default::default(), false, None) - .unwrap(); + let mut block_builder = + client.new_block_at(block_4.header.hash(), Default::default(), false).unwrap(); block_builder .push_transfer(Transfer { from: AccountKeyring::Bob.into(), diff --git a/substrate/client/service/src/builder.rs b/substrate/client/service/src/builder.rs index 65af33c6b836..0b971858dd9b 100644 --- a/substrate/client/service/src/builder.rs +++ b/substrate/client/service/src/builder.rs @@ -66,7 +66,7 @@ use sc_rpc_spec_v2::{chain_head::ChainHeadApiServer, transaction::TransactionApi use sc_telemetry::{telemetry, ConnectionMessage, Telemetry, TelemetryHandle, SUBSTRATE_INFO}; use sc_transaction_pool_api::{MaintainedTransactionPool, TransactionPool}; use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedSender}; -use sp_api::{CallApiAt, ExtensionProducer, ProvideRuntimeApi}; +use sp_api::{CallApiAt, ProvideRuntimeApi}; use sp_blockchain::{HeaderBackend, HeaderMetadata}; use sp_consensus::block_validation::{ BlockAnnounceValidator, Chain, DefaultBlockAnnounceValidator, @@ -96,8 +96,9 @@ impl KeystoreContainer { /// Construct KeystoreContainer pub fn new(config: &KeystoreConfig) -> Result { let keystore = Arc::new(match config { - KeystoreConfig::Path { path, password } => - LocalKeystore::open(path.clone(), password.clone())?, + KeystoreConfig::Path { path, password } => { + LocalKeystore::open(path.clone(), password.clone())? + }, KeystoreConfig::InMemory => LocalKeystore::in_memory(), }); @@ -129,11 +130,11 @@ where } /// Create the initial parts of a full node with the default genesis block builder. -pub fn new_full_parts_extension( +pub fn new_full_parts_record_import( config: &Configuration, telemetry: Option, executor: TExec, - import_extension_factory: Option, + enable_import_proof_recording: bool, ) -> Result, Error> where TBl: BlockT, @@ -154,7 +155,7 @@ where executor, backend, genesis_block_builder, - import_extension_factory, + enable_import_proof_recording, ) } /// Create the initial parts of a full node with the default genesis block builder. @@ -167,23 +168,7 @@ where TBl: BlockT, TExec: CodeExecutor + RuntimeVersionOf + Clone, { - let backend = new_db_backend(config.db_config())?; - - let genesis_block_builder = GenesisBlockBuilder::new( - config.chain_spec.as_storage_builder(), - !config.no_genesis(), - backend.clone(), - executor.clone(), - )?; - - new_full_parts_with_genesis_builder( - config, - telemetry, - executor, - backend, - genesis_block_builder, - None, - ) + new_full_parts_record_import(config, telemetry, executor, false) } /// Create the initial parts of a full node. @@ -193,7 +178,7 @@ pub fn new_full_parts_with_genesis_builder>, genesis_block_builder: TBuildGenesisBlock, - import_extension_factory: Option, + enable_import_proof_recording: bool, ) -> Result, Error> where TBl: BlockT, @@ -223,7 +208,6 @@ where let extensions = sc_client_api::execution_extensions::ExecutionExtensions::new( None, Arc::new(executor.clone()), - import_extension_factory.clone(), ); let wasm_runtime_substitutes = config @@ -263,6 +247,7 @@ where ), wasm_runtime_substitutes, }, + enable_import_proof_recording, )?; client @@ -316,6 +301,7 @@ pub fn new_client( prometheus_registry: Option, telemetry: Option, config: ClientConfig, + enable_import_proof_recording: bool, ) -> Result< Client< Backend, @@ -350,6 +336,7 @@ where prometheus_registry, telemetry, config, + enable_import_proof_recording, ) } @@ -774,13 +761,14 @@ where } = params; if warp_sync_params.is_none() && config.network.sync_mode.is_warp() { - return Err("Warp sync enabled, but no warp sync provider configured.".into()) + return Err("Warp sync enabled, but no warp sync provider configured.".into()); } if client.requires_full_sync() { match config.network.sync_mode { - SyncMode::LightState { .. } => - return Err("Fast sync doesn't work for archive nodes".into()), + SyncMode::LightState { .. } => { + return Err("Fast sync doesn't work for archive nodes".into()) + }, SyncMode::Warp => return Err("Warp sync doesn't work for archive nodes".into()), SyncMode::Full => {}, } @@ -800,8 +788,8 @@ where &protocol_id, config.chain_spec.fork_id(), client.clone(), - net_config.network_config.default_peers_set.in_peers as usize + - net_config.network_config.default_peers_set.out_peers as usize, + net_config.network_config.default_peers_set.in_peers as usize + + net_config.network_config.default_peers_set.out_peers as usize, ); let config_name = protocol_config.name.clone(); spawn_handle.spawn("block-request-handler", Some("networking"), handler.run()); @@ -809,8 +797,8 @@ where }; let (state_request_protocol_config, state_request_protocol_name) = { - let num_peer_hint = net_config.network_config.default_peers_set_num_full as usize + - net_config.network_config.default_peers_set.reserved_nodes.len(); + let num_peer_hint = net_config.network_config.default_peers_set_num_full as usize + + net_config.network_config.default_peers_set.reserved_nodes.len(); // Allow both outgoing and incoming requests. let (handler, protocol_config) = StateRequestHandler::new( &protocol_id, @@ -1003,7 +991,7 @@ where ); // This `return` might seem unnecessary, but we don't want to make it look like // everything is working as normal even though the user is clearly misusing the API. - return + return; } future.await diff --git a/substrate/client/service/src/chain_ops/export_blocks.rs b/substrate/client/service/src/chain_ops/export_blocks.rs index 8d66f1f96baf..2538581b1df1 100644 --- a/substrate/client/service/src/chain_ops/export_blocks.rs +++ b/substrate/client/service/src/chain_ops/export_blocks.rs @@ -61,7 +61,7 @@ where let client = &client; if last < block { - return Poll::Ready(Err("Invalid block range specified".into())) + return Poll::Ready(Err("Invalid block range specified".into())); } if !wrote_header { @@ -81,20 +81,21 @@ where .transpose()? .flatten() { - Some(block) => + Some(block) => { if binary { output.write_all(&block.encode())?; } else { serde_json::to_writer(&mut output, &block) .map_err(|e| format!("Error writing JSON: {}", e))?; - }, + } + }, None => return Poll::Ready(Ok(())), } if (block % 10000u32.into()).is_zero() { info!("#{}", block); } if block == last { - return Poll::Ready(Ok(())) + return Poll::Ready(Ok(())); } block += One::one(); diff --git a/substrate/client/service/src/chain_ops/export_raw_state.rs b/substrate/client/service/src/chain_ops/export_raw_state.rs index fde2c5617cb4..aa5aeaf95426 100644 --- a/substrate/client/service/src/chain_ops/export_raw_state.rs +++ b/substrate/client/service/src/chain_ops/export_raw_state.rs @@ -53,7 +53,7 @@ where } children_default.insert(child_root_key.0, StorageChild { child_info, data: pairs }); - continue + continue; } top.insert(key.0, value.0); diff --git a/substrate/client/service/src/chain_ops/import_blocks.rs b/substrate/client/service/src/chain_ops/import_blocks.rs index 34f7669d0106..f30236c8dc3f 100644 --- a/substrate/client/service/src/chain_ops/import_blocks.rs +++ b/substrate/client/service/src/chain_ops/import_blocks.rs @@ -102,8 +102,8 @@ where /// Returns the number of blocks read thus far. fn read_block_count(&self) -> u64 { match self { - BlockIter::Binary { read_block_count, .. } | - BlockIter::Json { read_block_count, .. } => *read_block_count, + BlockIter::Binary { read_block_count, .. } + | BlockIter::Json { read_block_count, .. } => *read_block_count, } } @@ -227,8 +227,8 @@ impl Speedometer { let speed = diff .saturating_mul(10_000) .checked_div(u128::from(elapsed_ms)) - .map_or(0.0, |s| s as f64) / - 10.0; + .map_or(0.0, |s| s as f64) + / 10.0; info!("📦 Current best block: {} ({:4.1} bps)", self.best_number, speed); } else { // If the number of blocks can't be converted to a regular integer, then we need a more @@ -324,7 +324,7 @@ where if let (Err(err), hash) = result { warn!("There was an error importing block with hash {:?}: {}", hash, err); self.has_error = true; - break + break; } } } @@ -338,7 +338,7 @@ where Err(e) => { // We've encountered an error while creating the block iterator // so we can just return a future that returns an error. - return future::ready(Err(Error::Other(e))).boxed() + return future::ready(Err(Error::Other(e))).boxed(); }, }; @@ -388,11 +388,12 @@ where state = Some(ImportState::Reading { block_iter }); } }, - Err(e) => + Err(e) => { return Poll::Ready(Err(Error::Other(format!( "Error reading block #{}: {}", read_block_count, e - )))), + )))) + }, } }, } @@ -408,7 +409,7 @@ where delay, block, }); - return Poll::Pending + return Poll::Pending; }, Poll::Ready(_) => { delay.reset(Duration::from_millis(DELAY_TIME)); @@ -440,7 +441,7 @@ where read_block_count, client.info().best_number ); - return Poll::Ready(Ok(())) + return Poll::Ready(Ok(())); } else { // Importing is not done, we still have to wait for the queue to finish. // Wait for the delay, because we know the queue is lagging behind. @@ -451,7 +452,7 @@ where read_block_count, delay, }); - return Poll::Pending + return Poll::Pending; }, Poll::Ready(_) => { delay.reset(Duration::from_millis(DELAY_TIME)); @@ -476,7 +477,7 @@ where return Poll::Ready(Err(Error::Other(format!( "Stopping after #{} blocks because of an error", link.imported_blocks - )))) + )))); } cx.waker().wake_by_ref(); diff --git a/substrate/client/service/src/client/block_rules.rs b/substrate/client/service/src/client/block_rules.rs index 532cde1ae78f..c8391c0e17b0 100644 --- a/substrate/client/service/src/client/block_rules.rs +++ b/substrate/client/service/src/client/block_rules.rs @@ -61,12 +61,12 @@ impl BlockRules { pub fn lookup(&self, number: NumberFor, hash: &B::Hash) -> LookupResult { if let Some(hash_for_height) = self.forks.get(&number) { if hash_for_height != hash { - return LookupResult::Expected(*hash_for_height) + return LookupResult::Expected(*hash_for_height); } } if self.bad.contains(hash) { - return LookupResult::KnownBad + return LookupResult::KnownBad; } LookupResult::NotSpecial diff --git a/substrate/client/service/src/client/call_executor.rs b/substrate/client/service/src/client/call_executor.rs index 2a33f71e1860..86b5c7c61fcd 100644 --- a/substrate/client/service/src/client/call_executor.rs +++ b/substrate/client/service/src/client/call_executor.rs @@ -406,7 +406,6 @@ mod tests { execution_extensions: Arc::new(ExecutionExtensions::new( None, Arc::new(executor.clone()), - None, )), }; diff --git a/substrate/client/service/src/client/client.rs b/substrate/client/service/src/client/client.rs index 10b16f6e438e..2413ce3d3a94 100644 --- a/substrate/client/service/src/client/client.rs +++ b/substrate/client/service/src/client/client.rs @@ -48,7 +48,7 @@ use sc_executor::RuntimeVersion; use sc_telemetry::{telemetry, TelemetryHandle, SUBSTRATE_INFO}; use sp_api::{ ApiExt, ApiRef, CallApiAt, CallApiAtParams, ConstructRuntimeApi, Core as CoreApi, - ExtensionProducer, ProvideRuntimeApi, + ProvideRuntimeApi, }; use sp_blockchain::{ self as blockchain, Backend as ChainBackend, CachedHeaderMetadata, Error, @@ -116,6 +116,7 @@ where config: ClientConfig, telemetry: Option, unpin_worker_sender: TracingUnboundedSender, + enable_import_proof_recording: bool, _phantom: PhantomData, } @@ -234,8 +235,7 @@ where Block: BlockT, B: backend::LocalBackend + 'static, { - // TODO skunert Check if we need to pass something here - let extensions = ExecutionExtensions::new(None, Arc::new(executor.clone()), None); + let extensions = ExecutionExtensions::new(None, Arc::new(executor.clone())); let call_executor = LocalCallExecutor::new(backend.clone(), executor, config.clone(), extensions)?; @@ -250,6 +250,7 @@ where prometheus_registry, telemetry, config, + false, ) } @@ -387,6 +388,7 @@ where prometheus_registry: Option, telemetry: Option, config: ClientConfig, + enable_import_proof_recording: bool, ) -> sp_blockchain::Result where G: BuildGenesisBlock< @@ -427,7 +429,7 @@ where backend.unpin_block(message); } else { log::debug!("Terminating unpin-worker, backend reference was dropped."); - return + return; } } log::debug!("Terminating unpin-worker, stream terminated.") @@ -449,6 +451,7 @@ where config, telemetry, unpin_worker_sender, + enable_import_proof_recording, _phantom: Default::default(), }) } @@ -513,7 +516,7 @@ where } = import_block; if !intermediates.is_empty() { - return Err(Error::IncompletePipeline) + return Err(Error::IncompletePipeline); } let fork_choice = fork_choice.ok_or(Error::IncompletePipeline)?; @@ -608,19 +611,20 @@ where // the block is lower than our last finalized block so it must revert // finality, refusing import. - if status == blockchain::BlockStatus::Unknown && - *import_headers.post().number() <= info.finalized_number && - !gap_block + if status == blockchain::BlockStatus::Unknown + && *import_headers.post().number() <= info.finalized_number + && !gap_block { - return Err(sp_blockchain::Error::NotInFinalizedChain) + return Err(sp_blockchain::Error::NotInFinalizedChain); } // this is a fairly arbitrary choice of where to draw the line on making notifications, // but the general goal is to only make notifications when we are already fully synced // and get a new chain head. let make_notifications = match origin { - BlockOrigin::NetworkBroadcast | BlockOrigin::Own | BlockOrigin::ConsensusBroadcast => - true, + BlockOrigin::NetworkBroadcast | BlockOrigin::Own | BlockOrigin::ConsensusBroadcast => { + true + }, BlockOrigin::Genesis | BlockOrigin::NetworkInitialSync | BlockOrigin::File => false, }; @@ -654,12 +658,14 @@ where let storage_key = PrefixedStorageKey::new_ref(&parent_storage); let storage_key = match ChildType::from_prefixed_key(storage_key) { - Some((ChildType::ParentKeyId, storage_key)) => - storage_key, - None => + Some((ChildType::ParentKeyId, storage_key)) => { + storage_key + }, + None => { return Err(Error::Backend( "Invalid child storage key.".to_string(), - )), + )) + }, }; let entry = storage .children_default @@ -684,7 +690,7 @@ where // State root mismatch when importing state. This should not happen in // safe fast sync mode, but may happen in unsafe mode. warn!("Error importing state: State root mismatch."); - return Err(Error::InvalidStateRoot) + return Err(Error::InvalidStateRoot); } None }, @@ -707,11 +713,12 @@ where )?; } - let is_new_best = !gap_block && - (finalized || - match fork_choice { - ForkChoiceStrategy::LongestChain => - import_headers.post().number() > &info.best_number, + let is_new_best = !gap_block + && (finalized + || match fork_choice { + ForkChoiceStrategy::LongestChain => { + import_headers.post().number() > &info.best_number + }, ForkChoiceStrategy::Custom(v) => v, }); @@ -835,18 +842,21 @@ where let state_action = std::mem::replace(&mut import_block.state_action, StateAction::Skip); let (enact_state, storage_changes) = match (self.block_status(*parent_hash)?, state_action) { - (BlockStatus::KnownBad, _) => - return Ok(PrepareStorageChangesResult::Discard(ImportResult::KnownBad)), + (BlockStatus::KnownBad, _) => { + return Ok(PrepareStorageChangesResult::Discard(ImportResult::KnownBad)) + }, ( BlockStatus::InChainPruned, StateAction::ApplyChanges(sc_consensus::StorageChanges::Changes(_)), ) => return Ok(PrepareStorageChangesResult::Discard(ImportResult::MissingState)), (_, StateAction::ApplyChanges(changes)) => (true, Some(changes)), - (BlockStatus::Unknown, _) => - return Ok(PrepareStorageChangesResult::Discard(ImportResult::UnknownParent)), + (BlockStatus::Unknown, _) => { + return Ok(PrepareStorageChangesResult::Discard(ImportResult::UnknownParent)) + }, (_, StateAction::Skip) => (false, None), - (BlockStatus::InChainPruned, StateAction::Execute) => - return Ok(PrepareStorageChangesResult::Discard(ImportResult::MissingState)), + (BlockStatus::InChainPruned, StateAction::Execute) => { + return Ok(PrepareStorageChangesResult::Discard(ImportResult::MissingState)) + }, (BlockStatus::InChainPruned, StateAction::ExecuteIfPossible) => (false, None), (_, StateAction::Execute) => (true, None), (_, StateAction::ExecuteIfPossible) => (true, None), @@ -866,19 +876,9 @@ where runtime_api.set_call_context(CallContext::Onchain); - if let Some(extension) = - self.executor.execution_extensions().get_import_extension().clone() - { + if self.enable_import_proof_recording { + log::info!(target:"skunert", "Block import with proof recording."); runtime_api.record_proof(); - if let Some(proof_recorder) = runtime_api.proof_recorder() { - log::info!(target:"skunert", "Block import with extension and proof recording."); - let extension = extension(Box::new(proof_recorder)); - runtime_api.register_extension_with_type_id(extension.0, extension.1); - } else { - log::info!(target:"skunert", "Block import without proof recorder"); - } - } else { - log::info!(target:"skunert", "Block import without extension"); } runtime_api.execute_block( @@ -893,7 +893,7 @@ where if import_block.header.state_root() != &gen_storage_changes.transaction_storage_root { - return Err(Error::InvalidStateRoot) + return Err(Error::InvalidStateRoot); } Some(sc_consensus::StorageChanges::Changes(gen_storage_changes)) }, @@ -919,7 +919,7 @@ where "Possible safety violation: attempted to re-finalize last finalized block {:?} ", hash, ); - return Ok(()) + return Ok(()); } // Find tree route from last finalized to given block. @@ -933,7 +933,7 @@ where retracted, info.finalized_hash ); - return Err(sp_blockchain::Error::NotInFinalizedChain) + return Err(sp_blockchain::Error::NotInFinalizedChain); } // We may need to coercively update the best block if there is more than one @@ -1013,7 +1013,7 @@ where // since we won't be running the loop below which // would also remove any closed sinks. sinks.retain(|sink| !sink.is_closed()); - return Ok(()) + return Ok(()); }, }; @@ -1049,7 +1049,7 @@ where self.every_import_notification_sinks.lock().retain(|sink| !sink.is_closed()); - return Ok(()) + return Ok(()); }, }; @@ -1148,17 +1148,18 @@ where .as_ref() .map_or(false, |importing| &hash == importing) { - return Ok(BlockStatus::Queued) + return Ok(BlockStatus::Queued); } let hash_and_number = self.backend.blockchain().number(hash)?.map(|n| (hash, n)); match hash_and_number { - Some((hash, number)) => + Some((hash, number)) => { if self.backend.have_state_at(hash, number) { Ok(BlockStatus::InChainWithState) } else { Ok(BlockStatus::InChainPruned) - }, + } + }, None => Ok(BlockStatus::Unknown), } } @@ -1194,7 +1195,7 @@ where let genesis_hash = self.backend.blockchain().info().genesis_hash; if genesis_hash == target_hash { - return Ok(Vec::new()) + return Ok(Vec::new()); } let mut current_hash = target_hash; @@ -1210,7 +1211,7 @@ where current_hash = ancestor_hash; if genesis_hash == current_hash { - break + break; } current = ancestor; @@ -1295,14 +1296,15 @@ where size_limit: usize, ) -> sp_blockchain::Result> { if start_key.len() > MAX_NESTED_TRIE_DEPTH { - return Err(Error::Backend("Invalid start key.".to_string())) + return Err(Error::Backend("Invalid start key.".to_string())); } let state = self.state_at(hash)?; let child_info = |storage_key: &Vec| -> sp_blockchain::Result { let storage_key = PrefixedStorageKey::new_ref(storage_key); match ChildType::from_prefixed_key(storage_key) { - Some((ChildType::ParentKeyId, storage_key)) => - Ok(ChildInfo::new_default(storage_key)), + Some((ChildType::ParentKeyId, storage_key)) => { + Ok(ChildInfo::new_default(storage_key)) + }, None => Err(Error::Backend("Invalid child storage key.".to_string())), } }; @@ -1314,7 +1316,7 @@ where { Some((child_info(start_key)?, child_root)) } else { - return Err(Error::Backend("Invalid root start key.".to_string())) + return Err(Error::Backend("Invalid root start key.".to_string())); } } else { None @@ -1358,18 +1360,18 @@ where let size = value.len() + next_key.len(); if total_size + size > size_limit && !entries.is_empty() { complete = false; - break + break; } total_size += size; - if current_child.is_none() && - sp_core::storage::well_known_keys::is_child_storage_key(next_key.as_slice()) && - !child_roots.contains(value.as_slice()) + if current_child.is_none() + && sp_core::storage::well_known_keys::is_child_storage_key(next_key.as_slice()) + && !child_roots.contains(value.as_slice()) { child_roots.insert(value.clone()); switch_child_key = Some((next_key.clone(), value.clone())); entries.push((next_key.clone(), value)); - break + break; } entries.push((next_key.clone(), value)); current_key = next_key; @@ -1389,12 +1391,12 @@ where complete, )); if !complete { - break + break; } } else { result[0].0.key_values.extend(entries.into_iter()); result[0].1 = complete; - break + break; } } Ok(result) @@ -1437,16 +1439,14 @@ where parent: Block::Hash, inherent_digests: Digest, record_proof: R, - extension: Option, ) -> sp_blockchain::Result> { - sc_block_builder::BlockBuilder::new_with_extension( + sc_block_builder::BlockBuilder::new( self, parent, self.expect_block_number_from_id(&BlockId::Hash(parent))?, record_proof.into(), inherent_digests, &self.backend, - extension, ) } @@ -1821,7 +1821,7 @@ where match self.block_rules.lookup(number, &hash) { BlockLookupResult::KnownBad => { trace!("Rejecting known bad block: #{} {:?}", number, hash); - return Ok(ImportResult::KnownBad) + return Ok(ImportResult::KnownBad); }, BlockLookupResult::Expected(expected_hash) => { trace!( @@ -1830,7 +1830,7 @@ where expected_hash, number ); - return Ok(ImportResult::KnownBad) + return Ok(ImportResult::KnownBad); }, BlockLookupResult::NotSpecial => {}, } @@ -1841,10 +1841,12 @@ where .block_status(hash) .map_err(|e| ConsensusError::ClientImport(e.to_string()))? { - BlockStatus::InChainWithState | BlockStatus::Queued => - return Ok(ImportResult::AlreadyInChain), - BlockStatus::InChainPruned if !import_existing => - return Ok(ImportResult::AlreadyInChain), + BlockStatus::InChainWithState | BlockStatus::Queued => { + return Ok(ImportResult::AlreadyInChain) + }, + BlockStatus::InChainPruned if !import_existing => { + return Ok(ImportResult::AlreadyInChain) + }, BlockStatus::InChainPruned => {}, BlockStatus::Unknown => {}, BlockStatus::KnownBad => return Ok(ImportResult::KnownBad), @@ -2010,8 +2012,9 @@ where fn block(&self, hash: Block::Hash) -> sp_blockchain::Result>> { Ok(match (self.header(hash)?, self.body(hash)?, self.justifications(hash)?) { - (Some(header), Some(extrinsics), justifications) => - Some(SignedBlock { block: Block::new(header, extrinsics), justifications }), + (Some(header), Some(extrinsics), justifications) => { + Some(SignedBlock { block: Block::new(header, extrinsics), justifications }) + }, _ => None, }) } diff --git a/substrate/client/service/src/client/wasm_override.rs b/substrate/client/service/src/client/wasm_override.rs index 725c8ab9429a..f83bc820fd20 100644 --- a/substrate/client/service/src/client/wasm_override.rs +++ b/substrate/client/service/src/client/wasm_override.rs @@ -178,7 +178,7 @@ impl WasmOverride { }; if !dir.is_dir() { - return Err(WasmOverrideError::NotADirectory(dir.to_owned()).into()) + return Err(WasmOverrideError::NotADirectory(dir.to_owned()).into()); } let mut overrides = HashMap::new(); @@ -213,7 +213,7 @@ impl WasmOverride { } if !duplicates.is_empty() { - return Err(WasmOverrideError::DuplicateRuntime(duplicates).into()) + return Err(WasmOverrideError::DuplicateRuntime(duplicates).into()); } Ok(overrides) diff --git a/substrate/client/service/src/lib.rs b/substrate/client/service/src/lib.rs index c4281cfb768f..50a1ff4fd99b 100644 --- a/substrate/client/service/src/lib.rs +++ b/substrate/client/service/src/lib.rs @@ -56,9 +56,10 @@ use sp_runtime::{ pub use self::{ builder::{ build_network, new_client, new_db_backend, new_full_client, new_full_parts, - new_full_parts_extension, new_full_parts_with_genesis_builder, new_native_or_wasm_executor, - new_wasm_executor, spawn_tasks, BuildNetworkParams, KeystoreContainer, NetworkStarter, - SpawnTasksParams, TFullBackend, TFullCallExecutor, TFullClient, + new_full_parts_record_import, new_full_parts_with_genesis_builder, + new_native_or_wasm_executor, new_wasm_executor, spawn_tasks, BuildNetworkParams, + KeystoreContainer, NetworkStarter, SpawnTasksParams, TFullBackend, TFullCallExecutor, + TFullClient, }, client::{ClientConfig, LocalCallExecutor}, error::Error, @@ -237,7 +238,7 @@ pub async fn build_system_rpc_future< // Answer incoming RPC requests. let Some(req) = rpc_rx.next().await else { debug!("RPC requests stream has terminated, shutting down the system RPC future."); - return + return; }; match req { @@ -286,7 +287,7 @@ pub async fn build_system_rpc_future< let _ = sender.send(network_state); } } else { - break + break; } }, sc_rpc::system::Request::NetworkAddReservedPeer(peer_addr, sender) => { @@ -315,7 +316,7 @@ pub async fn build_system_rpc_future< reserved_peers.iter().map(|peer_id| peer_id.to_base58()).collect(); let _ = sender.send(reserved_peers); } else { - break + break; } }, sc_rpc::system::Request::NodeRoles(sender) => { @@ -477,7 +478,7 @@ where Ok(uxt) => uxt, Err(e) => { debug!("Transaction invalid: {:?}", e); - return Box::pin(futures::future::ready(TransactionImport::Bad)) + return Box::pin(futures::future::ready(TransactionImport::Bad)); }, }; @@ -492,8 +493,9 @@ where match import_future.await { Ok(_) => TransactionImport::NewGood, Err(e) => match e.into_pool_error() { - Ok(sc_transaction_pool_api::error::Error::AlreadyImported(_)) => - TransactionImport::KnownGood, + Ok(sc_transaction_pool_api::error::Error::AlreadyImported(_)) => { + TransactionImport::KnownGood + }, Ok(e) => { debug!("Error adding transaction to the pool: {:?}", e); TransactionImport::Bad diff --git a/substrate/client/service/test/src/client/mod.rs b/substrate/client/service/test/src/client/mod.rs index dcb66b9a05c2..c40ac33da4bb 100644 --- a/substrate/client/service/test/src/client/mod.rs +++ b/substrate/client/service/test/src/client/mod.rs @@ -413,7 +413,7 @@ fn uncles_with_multiple_forks() { // A1 -> A2 let a2 = client - .new_block_at(a1.hash(), Default::default(), false, None) + .new_block_at(a1.hash(), Default::default(), false) .unwrap() .build() .unwrap() @@ -422,7 +422,7 @@ fn uncles_with_multiple_forks() { // A2 -> A3 let a3 = client - .new_block_at(a2.hash(), Default::default(), false, None) + .new_block_at(a2.hash(), Default::default(), false) .unwrap() .build() .unwrap() @@ -431,7 +431,7 @@ fn uncles_with_multiple_forks() { // A3 -> A4 let a4 = client - .new_block_at(a3.hash(), Default::default(), false, None) + .new_block_at(a3.hash(), Default::default(), false) .unwrap() .build() .unwrap() @@ -440,7 +440,7 @@ fn uncles_with_multiple_forks() { // A4 -> A5 let a5 = client - .new_block_at(a4.hash(), Default::default(), false, None) + .new_block_at(a4.hash(), Default::default(), false) .unwrap() .build() .unwrap() @@ -448,7 +448,7 @@ fn uncles_with_multiple_forks() { block_on(client.import(BlockOrigin::Own, a5.clone())).unwrap(); // A1 -> B2 - let mut builder = client.new_block_at(a1.hash(), Default::default(), false, None).unwrap(); + let mut builder = client.new_block_at(a1.hash(), Default::default(), false).unwrap(); // this push is required as otherwise B2 has the same hash as A2 and won't get imported builder .push_transfer(Transfer { @@ -463,7 +463,7 @@ fn uncles_with_multiple_forks() { // B2 -> B3 let b3 = client - .new_block_at(b2.hash(), Default::default(), false, None) + .new_block_at(b2.hash(), Default::default(), false) .unwrap() .build() .unwrap() @@ -472,7 +472,7 @@ fn uncles_with_multiple_forks() { // B3 -> B4 let b4 = client - .new_block_at(b3.hash(), Default::default(), false, None) + .new_block_at(b3.hash(), Default::default(), false) .unwrap() .build() .unwrap() @@ -480,7 +480,7 @@ fn uncles_with_multiple_forks() { block_on(client.import(BlockOrigin::Own, b4.clone())).unwrap(); // // B2 -> C3 - let mut builder = client.new_block_at(b2.hash(), Default::default(), false, None).unwrap(); + let mut builder = client.new_block_at(b2.hash(), Default::default(), false).unwrap(); // this push is required as otherwise C3 has the same hash as B3 and won't get imported builder .push_transfer(Transfer { @@ -494,7 +494,7 @@ fn uncles_with_multiple_forks() { block_on(client.import(BlockOrigin::Own, c3.clone())).unwrap(); // A1 -> D2 - let mut builder = client.new_block_at(a1.hash(), Default::default(), false, None).unwrap(); + let mut builder = client.new_block_at(a1.hash(), Default::default(), false).unwrap(); // this push is required as otherwise D2 has the same hash as B2 and won't get imported builder .push_transfer(Transfer { @@ -568,7 +568,7 @@ fn finality_target_on_longest_chain_with_multiple_forks() { // A1 -> A2 let a2 = client - .new_block_at(a1.hash(), Default::default(), false, None) + .new_block_at(a1.hash(), Default::default(), false) .unwrap() .build() .unwrap() @@ -577,7 +577,7 @@ fn finality_target_on_longest_chain_with_multiple_forks() { // A2 -> A3 let a3 = client - .new_block_at(a2.hash(), Default::default(), false, None) + .new_block_at(a2.hash(), Default::default(), false) .unwrap() .build() .unwrap() @@ -586,7 +586,7 @@ fn finality_target_on_longest_chain_with_multiple_forks() { // A3 -> A4 let a4 = client - .new_block_at(a3.hash(), Default::default(), false, None) + .new_block_at(a3.hash(), Default::default(), false) .unwrap() .build() .unwrap() @@ -595,7 +595,7 @@ fn finality_target_on_longest_chain_with_multiple_forks() { // A4 -> A5 let a5 = client - .new_block_at(a4.hash(), Default::default(), false, None) + .new_block_at(a4.hash(), Default::default(), false) .unwrap() .build() .unwrap() @@ -603,7 +603,7 @@ fn finality_target_on_longest_chain_with_multiple_forks() { block_on(client.import(BlockOrigin::Own, a5.clone())).unwrap(); // A1 -> B2 - let mut builder = client.new_block_at(a1.hash(), Default::default(), false, None).unwrap(); + let mut builder = client.new_block_at(a1.hash(), Default::default(), false).unwrap(); // this push is required as otherwise B2 has the same hash as A2 and won't get imported builder .push_transfer(Transfer { @@ -618,7 +618,7 @@ fn finality_target_on_longest_chain_with_multiple_forks() { // B2 -> B3 let b3 = client - .new_block_at(b2.hash(), Default::default(), false, None) + .new_block_at(b2.hash(), Default::default(), false) .unwrap() .build() .unwrap() @@ -627,7 +627,7 @@ fn finality_target_on_longest_chain_with_multiple_forks() { // B3 -> B4 let b4 = client - .new_block_at(b3.hash(), Default::default(), false, None) + .new_block_at(b3.hash(), Default::default(), false) .unwrap() .build() .unwrap() @@ -635,7 +635,7 @@ fn finality_target_on_longest_chain_with_multiple_forks() { block_on(client.import(BlockOrigin::Own, b4.clone())).unwrap(); // B2 -> C3 - let mut builder = client.new_block_at(b2.hash(), Default::default(), false, None).unwrap(); + let mut builder = client.new_block_at(b2.hash(), Default::default(), false).unwrap(); // this push is required as otherwise C3 has the same hash as B3 and won't get imported builder .push_transfer(Transfer { @@ -649,7 +649,7 @@ fn finality_target_on_longest_chain_with_multiple_forks() { block_on(client.import(BlockOrigin::Own, c3.clone())).unwrap(); // A1 -> D2 - let mut builder = client.new_block_at(a1.hash(), Default::default(), false, None).unwrap(); + let mut builder = client.new_block_at(a1.hash(), Default::default(), false).unwrap(); // this push is required as otherwise D2 has the same hash as B2 and won't get imported builder .push_transfer(Transfer { @@ -825,7 +825,7 @@ fn finality_target_with_best_not_on_longest_chain() { block_on(client.import(BlockOrigin::Own, a5.clone())).unwrap(); // A1 -> B2 - let mut builder = client.new_block_at(a1.hash(), Default::default(), false, None).unwrap(); + let mut builder = client.new_block_at(a1.hash(), Default::default(), false).unwrap(); // this push is required as otherwise B2 has the same hash as A2 and won't get imported builder .push_transfer(Transfer { @@ -847,7 +847,7 @@ fn finality_target_with_best_not_on_longest_chain() { // B2 -> B3 let b3 = client - .new_block_at(b2.hash(), Default::default(), false, None) + .new_block_at(b2.hash(), Default::default(), false) .unwrap() .build() .unwrap() @@ -856,7 +856,7 @@ fn finality_target_with_best_not_on_longest_chain() { // B3 -> B4 let b4 = client - .new_block_at(b3.hash(), Default::default(), false, None) + .new_block_at(b3.hash(), Default::default(), false) .unwrap() .build() .unwrap() @@ -892,7 +892,7 @@ fn import_with_justification() { // A1 -> A2 let a2 = client - .new_block_at(a1.hash(), Default::default(), false, None) + .new_block_at(a1.hash(), Default::default(), false) .unwrap() .build() .unwrap() @@ -903,7 +903,7 @@ fn import_with_justification() { // A2 -> A3 let justification = Justifications::from((TEST_ENGINE_ID, vec![1, 2, 3])); let a3 = client - .new_block_at(a2.hash(), Default::default(), false, None) + .new_block_at(a2.hash(), Default::default(), false) .unwrap() .build() .unwrap() @@ -934,7 +934,7 @@ fn importing_diverged_finalized_block_should_trigger_reorg() { let mut finality_notifications = client.finality_notification_stream(); let a1 = client - .new_block_at(client.chain_info().genesis_hash, Default::default(), false, None) + .new_block_at(client.chain_info().genesis_hash, Default::default(), false) .unwrap() .build() .unwrap() @@ -942,7 +942,7 @@ fn importing_diverged_finalized_block_should_trigger_reorg() { block_on(client.import(BlockOrigin::Own, a1.clone())).unwrap(); let a2 = client - .new_block_at(a1.hash(), Default::default(), false, None) + .new_block_at(a1.hash(), Default::default(), false) .unwrap() .build() .unwrap() @@ -950,7 +950,7 @@ fn importing_diverged_finalized_block_should_trigger_reorg() { block_on(client.import(BlockOrigin::Own, a2.clone())).unwrap(); let mut b1 = client - .new_block_at(client.chain_info().genesis_hash, Default::default(), false, None) + .new_block_at(client.chain_info().genesis_hash, Default::default(), false) .unwrap(); // needed to make sure B1 gets a different hash from A1 b1.push_transfer(Transfer { @@ -989,7 +989,7 @@ fn finalizing_diverged_block_should_trigger_reorg() { let mut finality_notifications = client.finality_notification_stream(); let a1 = client - .new_block_at(client.chain_info().genesis_hash, Default::default(), false, None) + .new_block_at(client.chain_info().genesis_hash, Default::default(), false) .unwrap() .build() .unwrap() @@ -997,7 +997,7 @@ fn finalizing_diverged_block_should_trigger_reorg() { block_on(client.import(BlockOrigin::Own, a1.clone())).unwrap(); let a2 = client - .new_block_at(a1.hash(), Default::default(), false, None) + .new_block_at(a1.hash(), Default::default(), false) .unwrap() .build() .unwrap() @@ -1005,7 +1005,7 @@ fn finalizing_diverged_block_should_trigger_reorg() { block_on(client.import(BlockOrigin::Own, a2.clone())).unwrap(); let mut b1 = client - .new_block_at(client.chain_info().genesis_hash, Default::default(), false, None) + .new_block_at(client.chain_info().genesis_hash, Default::default(), false) .unwrap(); // needed to make sure B1 gets a different hash from A1 b1.push_transfer(Transfer { @@ -1019,7 +1019,7 @@ fn finalizing_diverged_block_should_trigger_reorg() { block_on(client.import(BlockOrigin::Own, b1.clone())).unwrap(); let b2 = client - .new_block_at(b1.hash(), Default::default(), false, None) + .new_block_at(b1.hash(), Default::default(), false) .unwrap() .build() .unwrap() @@ -1045,7 +1045,7 @@ fn finalizing_diverged_block_should_trigger_reorg() { // after we build B3 on top of B2 and import it, it should be the new best block let b3 = client - .new_block_at(b2.hash(), Default::default(), false, None) + .new_block_at(b2.hash(), Default::default(), false) .unwrap() .build() .unwrap() @@ -1077,7 +1077,7 @@ fn finality_notifications_content() { let mut finality_notifications = client.finality_notification_stream(); let a1 = client - .new_block_at(client.chain_info().genesis_hash, Default::default(), false, None) + .new_block_at(client.chain_info().genesis_hash, Default::default(), false) .unwrap() .build() .unwrap() @@ -1085,7 +1085,7 @@ fn finality_notifications_content() { block_on(client.import(BlockOrigin::Own, a1.clone())).unwrap(); let a2 = client - .new_block_at(a1.hash(), Default::default(), false, None) + .new_block_at(a1.hash(), Default::default(), false) .unwrap() .build() .unwrap() @@ -1093,7 +1093,7 @@ fn finality_notifications_content() { block_on(client.import(BlockOrigin::Own, a2.clone())).unwrap(); let a3 = client - .new_block_at(a2.hash(), Default::default(), false, None) + .new_block_at(a2.hash(), Default::default(), false) .unwrap() .build() .unwrap() @@ -1101,7 +1101,7 @@ fn finality_notifications_content() { block_on(client.import(BlockOrigin::Own, a3.clone())).unwrap(); let mut b1 = client - .new_block_at(client.chain_info().genesis_hash, Default::default(), false, None) + .new_block_at(client.chain_info().genesis_hash, Default::default(), false) .unwrap(); // needed to make sure B1 gets a different hash from A1 b1.push_transfer(Transfer { @@ -1115,7 +1115,7 @@ fn finality_notifications_content() { block_on(client.import(BlockOrigin::Own, b1.clone())).unwrap(); let b2 = client - .new_block_at(b1.hash(), Default::default(), false, None) + .new_block_at(b1.hash(), Default::default(), false) .unwrap() .build() .unwrap() @@ -1123,7 +1123,7 @@ fn finality_notifications_content() { block_on(client.import(BlockOrigin::Own, b2.clone())).unwrap(); let mut c1 = client - .new_block_at(client.chain_info().genesis_hash, Default::default(), false, None) + .new_block_at(client.chain_info().genesis_hash, Default::default(), false) .unwrap(); // needed to make sure B1 gets a different hash from A1 c1.push_transfer(Transfer { @@ -1136,7 +1136,7 @@ fn finality_notifications_content() { let c1 = c1.build().unwrap().block; block_on(client.import(BlockOrigin::Own, c1.clone())).unwrap(); - let mut d3 = client.new_block_at(a2.hash(), Default::default(), false, None).unwrap(); + let mut d3 = client.new_block_at(a2.hash(), Default::default(), false).unwrap(); // needed to make sure D3 gets a different hash from A3 d3.push_transfer(Transfer { from: AccountKeyring::Alice.into(), @@ -1149,7 +1149,7 @@ fn finality_notifications_content() { block_on(client.import(BlockOrigin::Own, d3.clone())).unwrap(); let d4 = client - .new_block_at(d3.hash(), Default::default(), false, None) + .new_block_at(d3.hash(), Default::default(), false) .unwrap() .build() .unwrap() @@ -1210,7 +1210,7 @@ fn state_reverted_on_reorg() { // \ // -> B1 let mut a1 = client - .new_block_at(client.chain_info().genesis_hash, Default::default(), false, None) + .new_block_at(client.chain_info().genesis_hash, Default::default(), false) .unwrap(); a1.push_transfer(Transfer { from: AccountKeyring::Alice.into(), @@ -1223,7 +1223,7 @@ fn state_reverted_on_reorg() { block_on(client.import(BlockOrigin::Own, a1.clone())).unwrap(); let mut b1 = client - .new_block_at(client.chain_info().genesis_hash, Default::default(), false, None) + .new_block_at(client.chain_info().genesis_hash, Default::default(), false) .unwrap(); b1.push_transfer(Transfer { from: AccountKeyring::Alice.into(), @@ -1237,7 +1237,7 @@ fn state_reverted_on_reorg() { block_on(client.import_as_best(BlockOrigin::Own, b1.clone())).unwrap(); assert_eq!(950 * DOLLARS, current_balance(&client)); - let mut a2 = client.new_block_at(a1.hash(), Default::default(), false, None).unwrap(); + let mut a2 = client.new_block_at(a1.hash(), Default::default(), false).unwrap(); a2.push_transfer(Transfer { from: AccountKeyring::Alice.into(), to: AccountKeyring::Charlie.into(), @@ -1282,7 +1282,7 @@ fn doesnt_import_blocks_that_revert_finality() { // -> B1 -> B2 -> B3 let a1 = client - .new_block_at(client.chain_info().genesis_hash, Default::default(), false, None) + .new_block_at(client.chain_info().genesis_hash, Default::default(), false) .unwrap() .build() .unwrap() @@ -1290,7 +1290,7 @@ fn doesnt_import_blocks_that_revert_finality() { block_on(client.import(BlockOrigin::Own, a1.clone())).unwrap(); let a2 = client - .new_block_at(a1.hash(), Default::default(), false, None) + .new_block_at(a1.hash(), Default::default(), false) .unwrap() .build() .unwrap() @@ -1298,7 +1298,7 @@ fn doesnt_import_blocks_that_revert_finality() { block_on(client.import(BlockOrigin::Own, a2.clone())).unwrap(); let mut b1 = client - .new_block_at(client.chain_info().genesis_hash, Default::default(), false, None) + .new_block_at(client.chain_info().genesis_hash, Default::default(), false) .unwrap(); // needed to make sure B1 gets a different hash from A1 @@ -1313,7 +1313,7 @@ fn doesnt_import_blocks_that_revert_finality() { block_on(client.import(BlockOrigin::Own, b1.clone())).unwrap(); let b2 = client - .new_block_at(b1.hash(), Default::default(), false, None) + .new_block_at(b1.hash(), Default::default(), false) .unwrap() .build() .unwrap() @@ -1323,7 +1323,7 @@ fn doesnt_import_blocks_that_revert_finality() { // prepare B3 before we finalize A2, because otherwise we won't be able to // read changes trie configuration after A2 is finalized let b3 = client - .new_block_at(b2.hash(), Default::default(), false, None) + .new_block_at(b2.hash(), Default::default(), false) .unwrap() .build() .unwrap() @@ -1342,7 +1342,7 @@ fn doesnt_import_blocks_that_revert_finality() { // adding a C1 block which is lower than the last finalized should also // fail (with a cheaper check that doesn't require checking ancestry). let mut c1 = client - .new_block_at(client.chain_info().genesis_hash, Default::default(), false, None) + .new_block_at(client.chain_info().genesis_hash, Default::default(), false) .unwrap(); // needed to make sure C1 gets a different hash from A1 and B1 @@ -1362,7 +1362,7 @@ fn doesnt_import_blocks_that_revert_finality() { assert_eq!(import_err.to_string(), expected_err.to_string()); let a3 = client - .new_block_at(a2.hash(), Default::default(), false, None) + .new_block_at(a2.hash(), Default::default(), false) .unwrap() .build() .unwrap() @@ -1410,7 +1410,7 @@ fn respects_block_rules() { // build B[1] let block_ok = client - .new_block_at(client.chain_info().genesis_hash, Default::default(), false, None) + .new_block_at(client.chain_info().genesis_hash, Default::default(), false) .unwrap() .build() .unwrap() @@ -1429,7 +1429,7 @@ fn respects_block_rules() { // build B'[1] let mut block_not_ok = client - .new_block_at(client.chain_info().genesis_hash, Default::default(), false, None) + .new_block_at(client.chain_info().genesis_hash, Default::default(), false) .unwrap(); block_not_ok.push_storage_change(vec![0], Some(vec![1])).unwrap(); let block_not_ok = block_not_ok.build().unwrap().block; @@ -1452,8 +1452,7 @@ fn respects_block_rules() { block_on(client.import_as_final(BlockOrigin::Own, block_ok)).unwrap(); // And check good fork (build B[2]) - let mut block_ok = - client.new_block_at(block_ok_1_hash, Default::default(), false, None).unwrap(); + let mut block_ok = client.new_block_at(block_ok_1_hash, Default::default(), false).unwrap(); block_ok.push_storage_change(vec![0], Some(vec![2])).unwrap(); let block_ok = block_ok.build().unwrap().block; assert_eq!(*block_ok.header().number(), 2); @@ -1473,7 +1472,7 @@ fn respects_block_rules() { // And now try bad fork (build B'[2]) let mut block_not_ok = - client.new_block_at(block_ok_1_hash, Default::default(), false, None).unwrap(); + client.new_block_at(block_ok_1_hash, Default::default(), false).unwrap(); block_not_ok.push_storage_change(vec![0], Some(vec![3])).unwrap(); let block_not_ok = block_not_ok.build().unwrap().block; assert_eq!(*block_not_ok.header().number(), 2); @@ -1529,14 +1528,14 @@ fn returns_status_for_pruned_blocks() { let mut client = TestClientBuilder::with_backend(backend).build(); let a1 = client - .new_block_at(client.chain_info().genesis_hash, Default::default(), false, None) + .new_block_at(client.chain_info().genesis_hash, Default::default(), false) .unwrap() .build() .unwrap() .block; let mut b1 = client - .new_block_at(client.chain_info().genesis_hash, Default::default(), false, None) + .new_block_at(client.chain_info().genesis_hash, Default::default(), false) .unwrap(); // b1 is created, but not imported @@ -1573,7 +1572,7 @@ fn returns_status_for_pruned_blocks() { assert_eq!(client.block_status(check_block_a1.hash).unwrap(), BlockStatus::InChainWithState); let a2 = client - .new_block_at(a1.hash(), Default::default(), false, None) + .new_block_at(a1.hash(), Default::default(), false) .unwrap() .build() .unwrap() @@ -1601,7 +1600,7 @@ fn returns_status_for_pruned_blocks() { assert_eq!(client.block_status(check_block_a2.hash).unwrap(), BlockStatus::InChainWithState); let a3 = client - .new_block_at(a2.hash(), Default::default(), false, None) + .new_block_at(a2.hash(), Default::default(), false) .unwrap() .build() .unwrap() @@ -1909,7 +1908,7 @@ fn reorg_triggers_a_notification_even_for_sources_that_should_not_trigger_notifi futures::executor::block_on_stream(client.import_notification_stream()); let a1 = client - .new_block_at(client.chain_info().genesis_hash, Default::default(), false, None) + .new_block_at(client.chain_info().genesis_hash, Default::default(), false) .unwrap() .build() .unwrap() @@ -1917,7 +1916,7 @@ fn reorg_triggers_a_notification_even_for_sources_that_should_not_trigger_notifi block_on(client.import(BlockOrigin::NetworkInitialSync, a1.clone())).unwrap(); let a2 = client - .new_block_at(a1.hash(), Default::default(), false, None) + .new_block_at(a1.hash(), Default::default(), false) .unwrap() .build() .unwrap() @@ -1925,7 +1924,7 @@ fn reorg_triggers_a_notification_even_for_sources_that_should_not_trigger_notifi block_on(client.import(BlockOrigin::NetworkInitialSync, a2.clone())).unwrap(); let mut b1 = client - .new_block_at(client.chain_info().genesis_hash, Default::default(), false, None) + .new_block_at(client.chain_info().genesis_hash, Default::default(), false) .unwrap(); // needed to make sure B1 gets a different hash from A1 b1.push_transfer(Transfer { @@ -1939,7 +1938,7 @@ fn reorg_triggers_a_notification_even_for_sources_that_should_not_trigger_notifi block_on(client.import(BlockOrigin::NetworkInitialSync, b1.clone())).unwrap(); let b2 = client - .new_block_at(b1.hash(), Default::default(), false, None) + .new_block_at(b1.hash(), Default::default(), false) .unwrap() .build() .unwrap() @@ -1975,7 +1974,7 @@ fn use_dalek_ext_works() { ); let a1 = client - .new_block_at(client.chain_info().genesis_hash, Default::default(), false, None) + .new_block_at(client.chain_info().genesis_hash, Default::default(), false) .unwrap() .build() .unwrap() @@ -2003,7 +2002,7 @@ fn finalize_after_best_block_updates_best() { // A1 -> A2 let a2 = client - .new_block_at(a1.hash(), Default::default(), false, None) + .new_block_at(a1.hash(), Default::default(), false) .unwrap() .build() .unwrap() @@ -2012,7 +2011,7 @@ fn finalize_after_best_block_updates_best() { // A2 -> A3 let a3 = client - .new_block_at(a2.hash(), Default::default(), false, None) + .new_block_at(a2.hash(), Default::default(), false) .unwrap() .build() .unwrap() diff --git a/substrate/primitives/api/proc-macro/src/impl_runtime_apis.rs b/substrate/primitives/api/proc-macro/src/impl_runtime_apis.rs index 446110442e72..74cfa0980623 100644 --- a/substrate/primitives/api/proc-macro/src/impl_runtime_apis.rs +++ b/substrate/primitives/api/proc-macro/src/impl_runtime_apis.rs @@ -365,10 +365,6 @@ fn generate_runtime_api_base_structures() -> Result { fn register_extension(&mut self, extension: E) { std::cell::RefCell::borrow_mut(&self.extensions).register(extension); } - - fn register_extension_with_type_id(&mut self, type_id: core::any::TypeId, extension: Box) { - let _ = std::cell::RefCell::borrow_mut(&self.extensions).register_with_type_id(type_id, extension); - } } impl #crate_::ConstructRuntimeApi diff --git a/substrate/primitives/api/proc-macro/src/mock_impl_runtime_apis.rs b/substrate/primitives/api/proc-macro/src/mock_impl_runtime_apis.rs index c58cf01e922b..c1339ff6621b 100644 --- a/substrate/primitives/api/proc-macro/src/mock_impl_runtime_apis.rs +++ b/substrate/primitives/api/proc-macro/src/mock_impl_runtime_apis.rs @@ -127,10 +127,6 @@ fn implement_common_api_traits(block_type: TypePath, self_ty: Type) -> Result(&mut self, _: E) { unimplemented!("`register_extension` not implemented for runtime api mocks") } - - fn register_extension_with_type_id(&mut self, type_id: core::any::TypeId, extension: Box) { - unimplemented!("`register_extension_with_type_id` not implemented for runtime api mocks") - } } impl #crate_::Core<#block_type> for #self_ty { diff --git a/substrate/primitives/api/src/lib.rs b/substrate/primitives/api/src/lib.rs index e50fcafb1b93..c3f80acf09ae 100644 --- a/substrate/primitives/api/src/lib.rs +++ b/substrate/primitives/api/src/lib.rs @@ -72,7 +72,6 @@ extern crate self as sp_api; #[doc(hidden)] pub use codec::{self, Decode, DecodeLimit, Encode}; -use core::any::TypeId; #[doc(hidden)] #[cfg(feature = "std")] pub use hash_db::Hasher; @@ -505,16 +504,6 @@ pub use sp_api_proc_macro::mock_impl_runtime_apis; #[cfg(feature = "std")] pub type ProofRecorder = sp_trie::recorder::Recorder>; -#[cfg(feature = "std")] -pub type ExtensionProducer = sp_std::sync::Arc< - dyn Fn( - Box, - ) -> (core::any::TypeId, Box) - + Send - + Sync, ->; - -/// A type that is used as cache for the storage transactions. #[cfg(feature = "std")] pub type StorageChanges = sp_state_machine::StorageChanges>; @@ -631,9 +620,6 @@ pub trait ApiExt { /// Register an [`Extension`] that will be accessible while executing a runtime api call. fn register_extension(&mut self, extension: E); - - /// Register an [`Extension`] that will be accessible while executing a runtime api call. - fn register_extension_with_type_id(&mut self, type_id: TypeId, extension: Box); } /// Parameters for [`CallApiAt::call_api_at`]. diff --git a/substrate/primitives/api/test/tests/runtime_calls.rs b/substrate/primitives/api/test/tests/runtime_calls.rs index c659b7fbdcbf..353be73dcccd 100644 --- a/substrate/primitives/api/test/tests/runtime_calls.rs +++ b/substrate/primitives/api/test/tests/runtime_calls.rs @@ -106,7 +106,7 @@ fn record_proof_works() { // Build the block and record proof let mut builder = client - .new_block_at(client.chain_info().best_hash, Default::default(), true, None) + .new_block_at(client.chain_info().best_hash, Default::default(), true) .expect("Creates block builder"); builder.push(transaction.clone()).unwrap(); let (block, _, proof) = builder.build().expect("Bake block").into_inner(); diff --git a/substrate/primitives/externalities/src/extensions.rs b/substrate/primitives/externalities/src/extensions.rs index 616ac6e827d1..8b0bbd2c5921 100644 --- a/substrate/primitives/externalities/src/extensions.rs +++ b/substrate/primitives/externalities/src/extensions.rs @@ -48,12 +48,6 @@ impl Extension for Box { } } -impl Extension for Box { - fn as_mut_any(&mut self) -> &mut dyn Any { - (**self).as_mut_any() - } -} - /// Macro for declaring an extension that usable with [`Extensions`]. /// /// The extension will be an unit wrapper struct that implements [`Extension`], `Deref` and diff --git a/substrate/test-utils/client/src/lib.rs b/substrate/test-utils/client/src/lib.rs index dfa20e647096..eace869b2961 100644 --- a/substrate/test-utils/client/src/lib.rs +++ b/substrate/test-utils/client/src/lib.rs @@ -227,6 +227,7 @@ impl None, None, client_config, + false, ) .expect("Creates new client"); @@ -266,12 +267,11 @@ impl let executor = executor.into().unwrap_or_else(|| { NativeElseWasmExecutor::new_with_wasm_executor(WasmExecutor::builder().build()) }); - // TODO skunert Check back on this let executor = LocalCallExecutor::new( self.backend.clone(), executor.clone(), Default::default(), - ExecutionExtensions::new(None, Arc::new(executor), None), + ExecutionExtensions::new(None, Arc::new(executor)), ) .expect("Creates LocalCallExecutor"); @@ -352,7 +352,7 @@ pub(crate) fn parse_rpc_result( if let Some(error) = error { return Err(serde_json::from_value(error.clone()) - .expect("the JSONRPC result's error is always valid; qed")) + .expect("the JSONRPC result's error is always valid; qed")); } Ok(RpcTransactionOutput { result, receiver }) @@ -386,7 +386,7 @@ where if notification.is_new_best { blocks.insert(*notification.header.number()); if blocks.len() == count { - break + break; } } } diff --git a/substrate/test-utils/runtime/client/src/trait_tests.rs b/substrate/test-utils/runtime/client/src/trait_tests.rs index 789314b4ff66..5fce7a2860b7 100644 --- a/substrate/test-utils/runtime/client/src/trait_tests.rs +++ b/substrate/test-utils/runtime/client/src/trait_tests.rs @@ -60,7 +60,7 @@ where // A1 -> A2 let a2 = client - .new_block_at(a1.hash(), Default::default(), false, None) + .new_block_at(a1.hash(), Default::default(), false) .unwrap() .build() .unwrap() @@ -71,7 +71,7 @@ where // A2 -> A3 let a3 = client - .new_block_at(a2.hash(), Default::default(), false, None) + .new_block_at(a2.hash(), Default::default(), false) .unwrap() .build() .unwrap() @@ -82,7 +82,7 @@ where // A3 -> A4 let a4 = client - .new_block_at(a3.hash(), Default::default(), false, None) + .new_block_at(a3.hash(), Default::default(), false) .unwrap() .build() .unwrap() @@ -92,7 +92,7 @@ where // A4 -> A5 let a5 = client - .new_block_at(a4.hash(), Default::default(), false, None) + .new_block_at(a4.hash(), Default::default(), false) .unwrap() .build() .unwrap() @@ -102,7 +102,7 @@ where assert_eq!(blockchain.leaves().unwrap(), vec![a5.hash()]); // A1 -> B2 - let mut builder = client.new_block_at(a1.hash(), Default::default(), false, None).unwrap(); + let mut builder = client.new_block_at(a1.hash(), Default::default(), false).unwrap(); // this push is required as otherwise B2 has the same hash as A2 and won't get imported builder @@ -119,7 +119,7 @@ where // B2 -> B3 let b3 = client - .new_block_at(b2.hash(), Default::default(), false, None) + .new_block_at(b2.hash(), Default::default(), false) .unwrap() .build() .unwrap() @@ -130,7 +130,7 @@ where // B3 -> B4 let b4 = client - .new_block_at(b3.hash(), Default::default(), false, None) + .new_block_at(b3.hash(), Default::default(), false) .unwrap() .build() .unwrap() @@ -139,7 +139,7 @@ where assert_eq!(blockchain.leaves().unwrap(), vec![a5.hash(), b4.hash()]); // // B2 -> C3 - let mut builder = client.new_block_at(b2.hash(), Default::default(), false, None).unwrap(); + let mut builder = client.new_block_at(b2.hash(), Default::default(), false).unwrap(); // this push is required as otherwise C3 has the same hash as B3 and won't get imported builder .push_transfer(Transfer { @@ -154,7 +154,7 @@ where assert_eq!(blockchain.leaves().unwrap(), vec![a5.hash(), b4.hash(), c3.hash()]); // A1 -> D2 - let mut builder = client.new_block_at(a1.hash(), Default::default(), false, None).unwrap(); + let mut builder = client.new_block_at(a1.hash(), Default::default(), false).unwrap(); // this push is required as otherwise D2 has the same hash as B2 and won't get imported builder .push_transfer(Transfer { @@ -189,7 +189,7 @@ where // A1 -> A2 let a2 = client - .new_block_at(a1.hash(), Default::default(), false, None) + .new_block_at(a1.hash(), Default::default(), false) .unwrap() .build() .unwrap() @@ -198,7 +198,7 @@ where // A2 -> A3 let a3 = client - .new_block_at(a2.hash(), Default::default(), false, None) + .new_block_at(a2.hash(), Default::default(), false) .unwrap() .build() .unwrap() @@ -207,7 +207,7 @@ where // A3 -> A4 let a4 = client - .new_block_at(a3.hash(), Default::default(), false, None) + .new_block_at(a3.hash(), Default::default(), false) .unwrap() .build() .unwrap() @@ -216,7 +216,7 @@ where // A4 -> A5 let a5 = client - .new_block_at(a4.hash(), Default::default(), false, None) + .new_block_at(a4.hash(), Default::default(), false) .unwrap() .build() .unwrap() @@ -224,7 +224,7 @@ where block_on(client.import(BlockOrigin::Own, a5.clone())).unwrap(); // A1 -> B2 - let mut builder = client.new_block_at(a1.hash(), Default::default(), false, None).unwrap(); + let mut builder = client.new_block_at(a1.hash(), Default::default(), false).unwrap(); // this push is required as otherwise B2 has the same hash as A2 and won't get imported builder .push_transfer(Transfer { @@ -239,7 +239,7 @@ where // B2 -> B3 let b3 = client - .new_block_at(b2.hash(), Default::default(), false, None) + .new_block_at(b2.hash(), Default::default(), false) .unwrap() .build() .unwrap() @@ -248,7 +248,7 @@ where // B3 -> B4 let b4 = client - .new_block_at(b3.hash(), Default::default(), false, None) + .new_block_at(b3.hash(), Default::default(), false) .unwrap() .build() .unwrap() @@ -256,7 +256,7 @@ where block_on(client.import(BlockOrigin::Own, b4)).unwrap(); // // B2 -> C3 - let mut builder = client.new_block_at(b2.hash(), Default::default(), false, None).unwrap(); + let mut builder = client.new_block_at(b2.hash(), Default::default(), false).unwrap(); // this push is required as otherwise C3 has the same hash as B3 and won't get imported builder .push_transfer(Transfer { @@ -270,7 +270,7 @@ where block_on(client.import(BlockOrigin::Own, c3.clone())).unwrap(); // A1 -> D2 - let mut builder = client.new_block_at(a1.hash(), Default::default(), false, None).unwrap(); + let mut builder = client.new_block_at(a1.hash(), Default::default(), false).unwrap(); // this push is required as otherwise D2 has the same hash as B2 and won't get imported builder .push_transfer(Transfer { @@ -316,7 +316,7 @@ where // A1 -> A2 let a2 = client - .new_block_at(a1.hash(), Default::default(), false, None) + .new_block_at(a1.hash(), Default::default(), false) .unwrap() .build() .unwrap() @@ -325,7 +325,7 @@ where // A2 -> A3 let a3 = client - .new_block_at(a2.hash(), Default::default(), false, None) + .new_block_at(a2.hash(), Default::default(), false) .unwrap() .build() .unwrap() @@ -334,7 +334,7 @@ where // A3 -> A4 let a4 = client - .new_block_at(a3.hash(), Default::default(), false, None) + .new_block_at(a3.hash(), Default::default(), false) .unwrap() .build() .unwrap() @@ -343,7 +343,7 @@ where // A4 -> A5 let a5 = client - .new_block_at(a4.hash(), Default::default(), false, None) + .new_block_at(a4.hash(), Default::default(), false) .unwrap() .build() .unwrap() @@ -351,7 +351,7 @@ where block_on(client.import(BlockOrigin::Own, a5.clone())).unwrap(); // A1 -> B2 - let mut builder = client.new_block_at(a1.hash(), Default::default(), false, None).unwrap(); + let mut builder = client.new_block_at(a1.hash(), Default::default(), false).unwrap(); // this push is required as otherwise B2 has the same hash as A2 and won't get imported builder .push_transfer(Transfer { @@ -366,7 +366,7 @@ where // B2 -> B3 let b3 = client - .new_block_at(b2.hash(), Default::default(), false, None) + .new_block_at(b2.hash(), Default::default(), false) .unwrap() .build() .unwrap() @@ -375,7 +375,7 @@ where // B3 -> B4 let b4 = client - .new_block_at(b3.hash(), Default::default(), false, None) + .new_block_at(b3.hash(), Default::default(), false) .unwrap() .build() .unwrap() @@ -383,7 +383,7 @@ where block_on(client.import(BlockOrigin::Own, b4)).unwrap(); // // B2 -> C3 - let mut builder = client.new_block_at(b2.hash(), Default::default(), false, None).unwrap(); + let mut builder = client.new_block_at(b2.hash(), Default::default(), false).unwrap(); // this push is required as otherwise C3 has the same hash as B3 and won't get imported builder .push_transfer(Transfer { @@ -397,7 +397,7 @@ where block_on(client.import(BlockOrigin::Own, c3)).unwrap(); // A1 -> D2 - let mut builder = client.new_block_at(a1.hash(), Default::default(), false, None).unwrap(); + let mut builder = client.new_block_at(a1.hash(), Default::default(), false).unwrap(); // this push is required as otherwise D2 has the same hash as B2 and won't get imported builder .push_transfer(Transfer { From 177d2ddb5be341a70287d18b0e74816986d89b2f Mon Sep 17 00:00:00 2001 From: Sebastian Kunert Date: Tue, 5 Sep 2023 20:42:54 +0200 Subject: [PATCH 09/61] Clean up and implement externalities --- .../src/validate_block/trie_recorder.rs | 42 +++++++------------ cumulus/test/service/src/lib.rs | 18 +++++++- cumulus/test/service/src/main.rs | 1 + .../node/core/pvf/common/src/executor_intf.rs | 4 ++ substrate/primitives/externalities/src/lib.rs | 14 ++----- .../primitives/state-machine/src/basic.rs | 4 ++ .../primitives/state-machine/src/read_only.rs | 4 ++ 7 files changed, 47 insertions(+), 40 deletions(-) diff --git a/cumulus/pallets/parachain-system/src/validate_block/trie_recorder.rs b/cumulus/pallets/parachain-system/src/validate_block/trie_recorder.rs index 903d82559325..e8cf023badfd 100644 --- a/cumulus/pallets/parachain-system/src/validate_block/trie_recorder.rs +++ b/cumulus/pallets/parachain-system/src/validate_block/trie_recorder.rs @@ -35,30 +35,7 @@ use sp_trie::NodeCodec; use sp_trie::{MemoryDB, StorageProof}; use trie_db::{Hasher, RecordedForKey, TrieAccess}; -type TrieBackend = sp_state_machine::TrieBackend< - MemoryDB>, - HashingFor, - trie_cache::CacheProvider>, - RecorderProvider>, ->; - -type Ext<'a, B> = sp_state_machine::Ext<'a, HashingFor, TrieBackend>; - -fn with_externalities R, R>(f: F) -> R { - sp_externalities::with_externalities(f).expect("Environmental externalities not set.") -} - -pub(crate) struct RecorderProvider { - seen_nodes: RefCell>, - encoded_size: RefCell, -} - -impl RecorderProvider { - pub fn new() -> Self { - Self { seen_nodes: Default::default(), encoded_size: Default::default() } - } -} - +/// A trie recorder that only keeps track of the proof size. pub(crate) struct SizeRecorder<'a, H: Hasher> { seen_nodes: RefMut<'a, BTreeSet>, encoded_size: RefMut<'a, usize>, @@ -66,9 +43,7 @@ pub(crate) struct SizeRecorder<'a, H: Hasher> { impl<'a, H: trie_db::Hasher> trie_db::TrieRecorder for SizeRecorder<'a, H> { fn record<'b>(&mut self, access: TrieAccess<'b, H::Out>) { - log::info!(target: "skunert", "recorder: record"); let mut encoded_size_update = 0; - match access { TrieAccess::NodeOwned { hash, node_owned } => { if !self.seen_nodes.get(&hash).is_some() { @@ -76,7 +51,7 @@ impl<'a, H: trie_db::Hasher> trie_db::TrieRecorder for SizeRecorder<'a, encoded_size_update += node.encoded_size(); log::info!( target: "skunert", - "Recording node({encoded_size_update})", + "Recording node ({encoded_size_update} bytes)", ); //TODO skunert: Check if this is correct, original has transaction handling self.seen_nodes.insert(hash); @@ -88,7 +63,7 @@ impl<'a, H: trie_db::Hasher> trie_db::TrieRecorder for SizeRecorder<'a, encoded_size_update += node.encoded_size(); log::info!( target: "skunert", - "Recording node ({encoded_size_update} bytes)", + "Recording encoded node ({encoded_size_update} bytes)", ); self.seen_nodes.insert(hash); } @@ -118,6 +93,17 @@ impl<'a, H: trie_db::Hasher> trie_db::TrieRecorder for SizeRecorder<'a, } } +pub(crate) struct RecorderProvider { + seen_nodes: RefCell>, + encoded_size: RefCell, +} + +impl RecorderProvider { + pub fn new() -> Self { + Self { seen_nodes: Default::default(), encoded_size: Default::default() } + } +} + impl sp_trie::TrieRecorderProvider for RecorderProvider { type Recorder<'a> = SizeRecorder<'a, H> where H: 'a; diff --git a/cumulus/test/service/src/lib.rs b/cumulus/test/service/src/lib.rs index daa6f5b66fa7..b69d447de3a1 100644 --- a/cumulus/test/service/src/lib.rs +++ b/cumulus/test/service/src/lib.rs @@ -181,6 +181,7 @@ impl RecoveryHandle for FailingRecoveryHandle { /// be able to perform chain operations. pub fn new_partial( config: &mut Configuration, + enable_import_proof_record: bool, ) -> Result< PartialComponents< Client, @@ -209,7 +210,10 @@ pub fn new_partial( let (client, backend, keystore_container, task_manager) = sc_service::new_full_parts_record_import::( - config, None, executor, true, + config, + None, + executor, + enable_import_proof_record, )?; let client = Arc::new(client); @@ -306,6 +310,7 @@ pub async fn start_node_impl( rpc_ext_builder: RB, consensus: Consensus, collator_options: CollatorOptions, + proof_recording_during_import: bool, ) -> sc_service::error::Result<( TaskManager, Arc, @@ -318,7 +323,7 @@ where { let mut parachain_config = prepare_node_config(parachain_config); - let params = new_partial(&mut parachain_config)?; + let params = new_partial(&mut parachain_config, proof_recording_during_import)?; let transaction_pool = params.transaction_pool.clone(); let mut task_manager = params.task_manager; @@ -517,6 +522,7 @@ pub struct TestNodeBuilder { consensus: Consensus, relay_chain_mode: RelayChainMode, endowed_accounts: Vec, + record_proof_during_import: bool, } impl TestNodeBuilder { @@ -541,6 +547,7 @@ impl TestNodeBuilder { consensus: Consensus::RelayChain, endowed_accounts: Default::default(), relay_chain_mode: RelayChainMode::Embedded, + record_proof_during_import: true, } } @@ -653,6 +660,12 @@ impl TestNodeBuilder { self } + /// Record proofs during import. + pub fn disable_import_proof_recording(mut self) -> TestNodeBuilder { + self.record_proof_during_import = false; + self + } + /// Build the [`TestNode`]. pub async fn build(self) -> TestNode { let parachain_config = node_config( @@ -691,6 +704,7 @@ impl TestNodeBuilder { |_| Ok(jsonrpsee::RpcModule::new(())), self.consensus, collator_options, + self.record_proof_during_import, ) .await .expect("could not create Cumulus test service"); diff --git a/cumulus/test/service/src/main.rs b/cumulus/test/service/src/main.rs index 5946e9cc3506..1a83e7b1ade7 100644 --- a/cumulus/test/service/src/main.rs +++ b/cumulus/test/service/src/main.rs @@ -139,6 +139,7 @@ fn main() -> Result<(), sc_cli::Error> { |_| Ok(jsonrpsee::RpcModule::new(())), consensus, collator_options, + true, )) .expect("could not create Cumulus test service"); diff --git a/polkadot/node/core/pvf/common/src/executor_intf.rs b/polkadot/node/core/pvf/common/src/executor_intf.rs index 79839149ebdf..3bb23d038093 100644 --- a/polkadot/node/core/pvf/common/src/executor_intf.rs +++ b/polkadot/node/core/pvf/common/src/executor_intf.rs @@ -322,6 +322,10 @@ impl sp_externalities::Externalities for ValidationExternalities { fn get_read_and_written_keys(&self) -> Vec<(Vec, u32, u32, bool)> { panic!("get_read_and_written_keys: unsupported feature for parachain validation") } + + fn proof_size(&self) -> Option { + panic!("proof_size: unsupported feature for parachain validation") + } } impl sp_externalities::ExtensionStore for ValidationExternalities { diff --git a/substrate/primitives/externalities/src/lib.rs b/substrate/primitives/externalities/src/lib.rs index 411ec97a6b82..a86d4e68a8ef 100644 --- a/substrate/primitives/externalities/src/lib.rs +++ b/substrate/primitives/externalities/src/lib.rs @@ -239,6 +239,10 @@ pub trait Externalities: ExtensionStore { /// no transaction is open that can be closed. fn storage_commit_transaction(&mut self) -> Result<(), ()>; + /// Returns estimated proof size for the state queries so far. + /// Proof is reset on commit and wipe. + fn proof_size(&self) -> Option; + /// Index specified transaction slice and store it. fn storage_index_transaction(&mut self, _index: u32, _hash: &[u8], _size: u32) { unimplemented!("storage_index_transaction"); @@ -293,16 +297,6 @@ pub trait Externalities: ExtensionStore { /// Adds new storage keys to the DB tracking whitelist. fn set_whitelist(&mut self, new: Vec); - /// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - /// Benchmarking related functionality and shouldn't be used anywhere else! - /// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - /// - /// Returns estimated proof size for the state queries so far. - /// Proof is reset on commit and wipe. - fn proof_size(&self) -> Option { - None - } - /// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! /// Benchmarking related functionality and shouldn't be used anywhere else! /// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! diff --git a/substrate/primitives/state-machine/src/basic.rs b/substrate/primitives/state-machine/src/basic.rs index ace88aee2628..f30df99876f5 100644 --- a/substrate/primitives/state-machine/src/basic.rs +++ b/substrate/primitives/state-machine/src/basic.rs @@ -307,6 +307,10 @@ impl Externalities for BasicExternalities { fn commit(&mut self) {} + fn proof_size(&self) -> Option { + None + } + fn read_write_count(&self) -> (u32, u32, u32, u32) { unimplemented!("read_write_count is not supported in Basic") } diff --git a/substrate/primitives/state-machine/src/read_only.rs b/substrate/primitives/state-machine/src/read_only.rs index 2056bf986635..9fbfb8411452 100644 --- a/substrate/primitives/state-machine/src/read_only.rs +++ b/substrate/primitives/state-machine/src/read_only.rs @@ -218,6 +218,10 @@ where fn get_read_and_written_keys(&self) -> Vec<(Vec, u32, u32, bool)> { unimplemented!("get_read_and_written_keys is not supported in ReadOnlyExternalities") } + + fn proof_size(&self) -> Option { + self.backend.proof_size() + } } impl<'a, H: Hasher, B: 'a + Backend> sp_externalities::ExtensionStore From aeee13576d6dcc2014dd41344c0e71b8700d3731 Mon Sep 17 00:00:00 2001 From: Sebastian Kunert Date: Tue, 5 Sep 2023 21:04:44 +0200 Subject: [PATCH 10/61] Rename clawback -> reclaim --- Cargo.lock | 34 +- cumulus/client/clawback/Cargo.toml | 27 -- cumulus/pallets/parachain-system/Cargo.toml | 2 +- .../src/validate_block/implementation.rs | 2 +- cumulus/parachain-template/node/Cargo.toml | 2 +- cumulus/parachains/common/Cargo.toml | 2 +- cumulus/parachains/common/src/impls.rs | 8 +- .../assets/asset-hub-kusama/Cargo.toml | 4 +- .../assets/asset-hub-kusama/src/lib.rs | 155 ++++---- .../asset-hub-kusama/src/weights/xcm/mod.rs | 5 +- .../assets/asset-hub-kusama/src/xcm_config.rs | 354 +++++++++--------- cumulus/polkadot-parachain/Cargo.toml | 2 +- cumulus/polkadot-parachain/src/service.rs | 4 +- cumulus/primitives/pov-reclaim/Cargo.toml | 15 + .../pov-reclaim}/src/lib.rs | 8 +- cumulus/test/service/Cargo.toml | 2 +- cumulus/test/service/src/lib.rs | 3 +- 17 files changed, 301 insertions(+), 328 deletions(-) delete mode 100644 cumulus/client/clawback/Cargo.toml create mode 100644 cumulus/primitives/pov-reclaim/Cargo.toml rename cumulus/{client/clawback => primitives/pov-reclaim}/src/lib.rs (85%) diff --git a/Cargo.lock b/Cargo.lock index 6025b7705d62..33f5543ae951 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -727,7 +727,6 @@ version = "0.9.420" dependencies = [ "asset-test-utils", "assets-common", - "cumulus-client-clawback", "cumulus-pallet-aura-ext", "cumulus-pallet-dmp-queue", "cumulus-pallet-parachain-system", @@ -735,6 +734,7 @@ dependencies = [ "cumulus-pallet-xcm", "cumulus-pallet-xcmp-queue", "cumulus-primitives-core", + "cumulus-primitives-reclaim", "cumulus-primitives-utility", "frame-benchmarking", "frame-executive", @@ -3274,20 +3274,6 @@ dependencies = [ "cipher 0.4.4", ] -[[package]] -name = "cumulus-client-clawback" -version = "0.0.1" -dependencies = [ - "sp-api", - "sp-core", - "sp-externalities", - "sp-runtime", - "sp-runtime-interface", - "sp-std", - "sp-trie", - "tracing", -] - [[package]] name = "cumulus-client-cli" version = "0.1.0" @@ -3578,10 +3564,10 @@ version = "0.1.0" dependencies = [ "assert_matches", "bytes", - "cumulus-client-clawback", "cumulus-pallet-parachain-system-proc-macro", "cumulus-primitives-core", "cumulus-primitives-parachain-inherent", + "cumulus-primitives-reclaim", "cumulus-test-client", "cumulus-test-relay-sproof-builder", "environmental", @@ -3754,6 +3740,14 @@ dependencies = [ "tracing", ] +[[package]] +name = "cumulus-primitives-reclaim" +version = "0.0.1" +dependencies = [ + "sp-runtime-interface", + "tracing", +] + [[package]] name = "cumulus-primitives-timestamp" version = "0.1.0" @@ -3989,7 +3983,6 @@ dependencies = [ "async-trait", "clap 4.4.2", "criterion 0.5.1", - "cumulus-client-clawback", "cumulus-client-cli", "cumulus-client-consensus-common", "cumulus-client-consensus-relay-chain", @@ -3998,6 +3991,7 @@ dependencies = [ "cumulus-pallet-parachain-system", "cumulus-primitives-core", "cumulus-primitives-parachain-inherent", + "cumulus-primitives-reclaim", "cumulus-relay-chain-inprocess-interface", "cumulus-relay-chain-interface", "cumulus-relay-chain-minimal-node", @@ -10921,7 +10915,6 @@ version = "0.1.0" dependencies = [ "clap 4.4.2", "color-print", - "cumulus-client-clawback", "cumulus-client-cli", "cumulus-client-collator", "cumulus-client-consensus-aura", @@ -10930,6 +10923,7 @@ dependencies = [ "cumulus-client-service", "cumulus-primitives-core", "cumulus-primitives-parachain-inherent", + "cumulus-primitives-reclaim", "cumulus-relay-chain-interface", "frame-benchmarking", "frame-benchmarking-cli", @@ -11032,8 +11026,8 @@ dependencies = [ name = "parachains-common" version = "1.0.0" dependencies = [ - "cumulus-client-clawback", "cumulus-primitives-core", + "cumulus-primitives-reclaim", "cumulus-primitives-utility", "frame-support", "frame-system", @@ -12471,7 +12465,6 @@ dependencies = [ "collectives-polkadot-runtime", "color-print", "contracts-rococo-runtime", - "cumulus-client-clawback", "cumulus-client-cli", "cumulus-client-collator", "cumulus-client-consensus-aura", @@ -12481,6 +12474,7 @@ dependencies = [ "cumulus-client-service", "cumulus-primitives-core", "cumulus-primitives-parachain-inherent", + "cumulus-primitives-reclaim", "cumulus-relay-chain-interface", "frame-benchmarking", "frame-benchmarking-cli", diff --git a/cumulus/client/clawback/Cargo.toml b/cumulus/client/clawback/Cargo.toml deleted file mode 100644 index 7123d69f226d..000000000000 --- a/cumulus/client/clawback/Cargo.toml +++ /dev/null @@ -1,27 +0,0 @@ -[package] -name = "cumulus-client-clawback" -version = "0.0.1" -authors = [ "Sebastian Kunert " ] - -[dependencies] -sp-runtime-interface = { path = "../../../substrate/primitives/runtime-interface", default-features = false } -sp-externalities = { path = "../../../substrate/primitives/externalities", default-features = false } -sp-std = { path = "../../../substrate/primitives/std", default-features = false } -sp-trie = { path = "../../../substrate/primitives/trie", default-features = false } -sp-core = { path = "../../../substrate/primitives/core", default-features = false } -sp-api = { path = "../../../substrate/primitives/api", default-features = false } -sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } -tracing = { version = "0.1.37", default-features = false } - -[features] -default = ["std"] -std = [ - "sp-runtime-interface/std", - "sp-externalities/std", - "sp-runtime/std", - "sp-std/std", - "sp-api/std", - "sp-trie/std", - "sp-core/std", - "tracing/std" -] diff --git a/cumulus/pallets/parachain-system/Cargo.toml b/cumulus/pallets/parachain-system/Cargo.toml index e5d6d2e4a6a9..85e34c5e6244 100644 --- a/cumulus/pallets/parachain-system/Cargo.toml +++ b/cumulus/pallets/parachain-system/Cargo.toml @@ -36,7 +36,7 @@ xcm = { package = "staging-xcm", path = "../../../polkadot/xcm", default-feature cumulus-pallet-parachain-system-proc-macro = { path = "proc-macro", default-features = false } cumulus-primitives-core = { path = "../../primitives/core", default-features = false } cumulus-primitives-parachain-inherent = { path = "../../primitives/parachain-inherent", default-features = false } -cumulus-client-clawback = { path = "../../client/clawback", default-features = false } +cumulus-primitives-reclaim = { path = "../../primitives/pov-reclaim", default-features = false } [dev-dependencies] assert_matches = "1.5" diff --git a/cumulus/pallets/parachain-system/src/validate_block/implementation.rs b/cumulus/pallets/parachain-system/src/validate_block/implementation.rs index 8cf4fd01bca7..326c93553396 100644 --- a/cumulus/pallets/parachain-system/src/validate_block/implementation.rs +++ b/cumulus/pallets/parachain-system/src/validate_block/implementation.rs @@ -173,7 +173,7 @@ where .replace_implementation(host_default_child_storage_next_key), sp_io::offchain_index::host_set.replace_implementation(host_offchain_index_set), sp_io::offchain_index::host_clear.replace_implementation(host_offchain_index_clear), - cumulus_client_clawback::clawback_host_functions::host_current_storage_proof_size + cumulus_primitives_reclaim::pov_reclaim_host_functions::host_current_storage_proof_size .replace_implementation(reclaim_pov_weight), ); diff --git a/cumulus/parachain-template/node/Cargo.toml b/cumulus/parachain-template/node/Cargo.toml index 39ec9f9d8033..60bd3fa1d791 100644 --- a/cumulus/parachain-template/node/Cargo.toml +++ b/cumulus/parachain-template/node/Cargo.toml @@ -62,7 +62,7 @@ xcm = { package = "staging-xcm", path = "../../../polkadot/xcm", default-feature cumulus-client-cli = { path = "../../client/cli" } cumulus-client-collator = { path = "../../client/collator" } cumulus-client-consensus-aura = { path = "../../client/consensus/aura" } -cumulus-client-clawback = { path = "../../client/clawback" } +cumulus-primitives-reclaim = { path = "../../primitives/pov-reclaim" } cumulus-client-consensus-common = { path = "../../client/consensus/common" } cumulus-client-consensus-proposer = { path = "../../client/consensus/proposer" } cumulus-client-service = { path = "../../client/service" } diff --git a/cumulus/parachains/common/Cargo.toml b/cumulus/parachains/common/Cargo.toml index 23cf328da7ac..33a4cb2675f6 100644 --- a/cumulus/parachains/common/Cargo.toml +++ b/cumulus/parachains/common/Cargo.toml @@ -37,7 +37,7 @@ xcm-executor = { package = "staging-xcm-executor", path = "../../../polkadot/xcm pallet-collator-selection = { path = "../../pallets/collator-selection", default-features = false } cumulus-primitives-core = { path = "../../primitives/core", default-features = false } cumulus-primitives-utility = { path = "../../primitives/utility", default-features = false } -cumulus-client-clawback = { path = "../../client/clawback", default-features = false } +cumulus-primitives-reclaim = { path = "../../primitives/pov-reclaim", default-features = false } [dev-dependencies] pallet-authorship = { path = "../../../substrate/frame/authorship", default-features = false} diff --git a/cumulus/parachains/common/src/impls.rs b/cumulus/parachains/common/src/impls.rs index 5a9dc23e922c..73ff884e151a 100644 --- a/cumulus/parachains/common/src/impls.rs +++ b/cumulus/parachains/common/src/impls.rs @@ -122,8 +122,8 @@ pub struct AssetsFrom(PhantomData); impl> ContainsPair for AssetsFrom { fn contains(asset: &MultiAsset, origin: &MultiLocation) -> bool { let loc = T::get(); - &loc == origin && - matches!(asset, MultiAsset { id: AssetId::Concrete(asset_loc), fun: Fungible(_a) } + &loc == origin + && matches!(asset, MultiAsset { id: AssetId::Concrete(asset_loc), fun: Fungible(_a) } if asset_loc.match_and_split(&loc).is_some()) } } @@ -332,7 +332,7 @@ where ) -> Result { log::info!(target: "skunert", "Calling pre dispatch of my extension"); let proof_size = - cumulus_client_clawback::clawback_host_functions::current_storage_proof_size(); + cumulus_primitives_reclaim::pov_reclaim_host_functions::current_storage_proof_size(); log::info!(target: "skunert","Got proof size: {}", proof_size); Ok(()) } @@ -346,7 +346,7 @@ where ) -> Result<(), TransactionValidityError> { log::info!(target: "skunert", "Calling post dispatch of my extension"); let proof_size = - cumulus_client_clawback::clawback_host_functions::current_storage_proof_size(); + cumulus_primitives_reclaim::pov_reclaim_host_functions::current_storage_proof_size(); log::info!(target: "skunert","Got proof size: {}", proof_size); Ok(()) } diff --git a/cumulus/parachains/runtimes/assets/asset-hub-kusama/Cargo.toml b/cumulus/parachains/runtimes/assets/asset-hub-kusama/Cargo.toml index a8c8827da7ce..f90d32e6c168 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-kusama/Cargo.toml +++ b/cumulus/parachains/runtimes/assets/asset-hub-kusama/Cargo.toml @@ -74,7 +74,7 @@ cumulus-pallet-xcm = { path = "../../../../pallets/xcm", default-features = fals cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-features = false } cumulus-primitives-core = { path = "../../../../primitives/core", default-features = false } cumulus-primitives-utility = { path = "../../../../primitives/utility", default-features = false } -cumulus-client-clawback = { path = "../../../../client/clawback", default-features = false } +cumulus-primitives-reclaim = { path = "../../../../primitives/pov-reclaim", default-features = false } pallet-collator-selection = { path = "../../../../pallets/collator-selection", default-features = false } parachain-info = { path = "../../../pallets/parachain-info", default-features = false } parachains-common = { path = "../../../common", default-features = false } @@ -168,7 +168,7 @@ std = [ "cumulus-pallet-xcmp-queue/std", "cumulus-primitives-core/std", "cumulus-primitives-utility/std", - "cumulus-client-clawback/std", + "cumulus-primitives-reclaim/std", "frame-benchmarking?/std", "frame-executive/std", "frame-support/std", diff --git a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/lib.rs index 07bc40db89d1..a8470a021941 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/lib.rs @@ -477,97 +477,98 @@ impl InstanceFilter for ProxyType { ProxyType::Any => true, ProxyType::NonTransfer => !matches!( c, - RuntimeCall::Balances { .. } | - RuntimeCall::Assets { .. } | - RuntimeCall::NftFractionalization { .. } | - RuntimeCall::Nfts { .. } | - RuntimeCall::Uniques { .. } + RuntimeCall::Balances { .. } + | RuntimeCall::Assets { .. } + | RuntimeCall::NftFractionalization { .. } + | RuntimeCall::Nfts { .. } + | RuntimeCall::Uniques { .. } ), ProxyType::CancelProxy => matches!( c, - RuntimeCall::Proxy(pallet_proxy::Call::reject_announcement { .. }) | - RuntimeCall::Utility { .. } | - RuntimeCall::Multisig { .. } + RuntimeCall::Proxy(pallet_proxy::Call::reject_announcement { .. }) + | RuntimeCall::Utility { .. } + | RuntimeCall::Multisig { .. } ), ProxyType::Assets => { matches!( c, - RuntimeCall::Assets { .. } | - RuntimeCall::Utility { .. } | - RuntimeCall::Multisig { .. } | - RuntimeCall::NftFractionalization { .. } | - RuntimeCall::Nfts { .. } | RuntimeCall::Uniques { .. } + RuntimeCall::Assets { .. } + | RuntimeCall::Utility { .. } + | RuntimeCall::Multisig { .. } + | RuntimeCall::NftFractionalization { .. } + | RuntimeCall::Nfts { .. } + | RuntimeCall::Uniques { .. } ) }, ProxyType::AssetOwner => matches!( c, - RuntimeCall::Assets(TrustBackedAssetsCall::create { .. }) | - RuntimeCall::Assets(TrustBackedAssetsCall::start_destroy { .. }) | - RuntimeCall::Assets(TrustBackedAssetsCall::destroy_accounts { .. }) | - RuntimeCall::Assets(TrustBackedAssetsCall::destroy_approvals { .. }) | - RuntimeCall::Assets(TrustBackedAssetsCall::finish_destroy { .. }) | - RuntimeCall::Assets(TrustBackedAssetsCall::transfer_ownership { .. }) | - RuntimeCall::Assets(TrustBackedAssetsCall::set_team { .. }) | - RuntimeCall::Assets(TrustBackedAssetsCall::set_metadata { .. }) | - RuntimeCall::Assets(TrustBackedAssetsCall::clear_metadata { .. }) | - RuntimeCall::Assets(TrustBackedAssetsCall::set_min_balance { .. }) | - RuntimeCall::Nfts(pallet_nfts::Call::create { .. }) | - RuntimeCall::Nfts(pallet_nfts::Call::destroy { .. }) | - RuntimeCall::Nfts(pallet_nfts::Call::redeposit { .. }) | - RuntimeCall::Nfts(pallet_nfts::Call::transfer_ownership { .. }) | - RuntimeCall::Nfts(pallet_nfts::Call::set_team { .. }) | - RuntimeCall::Nfts(pallet_nfts::Call::set_collection_max_supply { .. }) | - RuntimeCall::Nfts(pallet_nfts::Call::lock_collection { .. }) | - RuntimeCall::Uniques(pallet_uniques::Call::create { .. }) | - RuntimeCall::Uniques(pallet_uniques::Call::destroy { .. }) | - RuntimeCall::Uniques(pallet_uniques::Call::transfer_ownership { .. }) | - RuntimeCall::Uniques(pallet_uniques::Call::set_team { .. }) | - RuntimeCall::Uniques(pallet_uniques::Call::set_metadata { .. }) | - RuntimeCall::Uniques(pallet_uniques::Call::set_attribute { .. }) | - RuntimeCall::Uniques(pallet_uniques::Call::set_collection_metadata { .. }) | - RuntimeCall::Uniques(pallet_uniques::Call::clear_metadata { .. }) | - RuntimeCall::Uniques(pallet_uniques::Call::clear_attribute { .. }) | - RuntimeCall::Uniques(pallet_uniques::Call::clear_collection_metadata { .. }) | - RuntimeCall::Uniques(pallet_uniques::Call::set_collection_max_supply { .. }) | - RuntimeCall::Utility { .. } | - RuntimeCall::Multisig { .. } + RuntimeCall::Assets(TrustBackedAssetsCall::create { .. }) + | RuntimeCall::Assets(TrustBackedAssetsCall::start_destroy { .. }) + | RuntimeCall::Assets(TrustBackedAssetsCall::destroy_accounts { .. }) + | RuntimeCall::Assets(TrustBackedAssetsCall::destroy_approvals { .. }) + | RuntimeCall::Assets(TrustBackedAssetsCall::finish_destroy { .. }) + | RuntimeCall::Assets(TrustBackedAssetsCall::transfer_ownership { .. }) + | RuntimeCall::Assets(TrustBackedAssetsCall::set_team { .. }) + | RuntimeCall::Assets(TrustBackedAssetsCall::set_metadata { .. }) + | RuntimeCall::Assets(TrustBackedAssetsCall::clear_metadata { .. }) + | RuntimeCall::Assets(TrustBackedAssetsCall::set_min_balance { .. }) + | RuntimeCall::Nfts(pallet_nfts::Call::create { .. }) + | RuntimeCall::Nfts(pallet_nfts::Call::destroy { .. }) + | RuntimeCall::Nfts(pallet_nfts::Call::redeposit { .. }) + | RuntimeCall::Nfts(pallet_nfts::Call::transfer_ownership { .. }) + | RuntimeCall::Nfts(pallet_nfts::Call::set_team { .. }) + | RuntimeCall::Nfts(pallet_nfts::Call::set_collection_max_supply { .. }) + | RuntimeCall::Nfts(pallet_nfts::Call::lock_collection { .. }) + | RuntimeCall::Uniques(pallet_uniques::Call::create { .. }) + | RuntimeCall::Uniques(pallet_uniques::Call::destroy { .. }) + | RuntimeCall::Uniques(pallet_uniques::Call::transfer_ownership { .. }) + | RuntimeCall::Uniques(pallet_uniques::Call::set_team { .. }) + | RuntimeCall::Uniques(pallet_uniques::Call::set_metadata { .. }) + | RuntimeCall::Uniques(pallet_uniques::Call::set_attribute { .. }) + | RuntimeCall::Uniques(pallet_uniques::Call::set_collection_metadata { .. }) + | RuntimeCall::Uniques(pallet_uniques::Call::clear_metadata { .. }) + | RuntimeCall::Uniques(pallet_uniques::Call::clear_attribute { .. }) + | RuntimeCall::Uniques(pallet_uniques::Call::clear_collection_metadata { .. }) + | RuntimeCall::Uniques(pallet_uniques::Call::set_collection_max_supply { .. }) + | RuntimeCall::Utility { .. } + | RuntimeCall::Multisig { .. } ), ProxyType::AssetManager => matches!( c, - RuntimeCall::Assets(TrustBackedAssetsCall::mint { .. }) | - RuntimeCall::Assets(TrustBackedAssetsCall::burn { .. }) | - RuntimeCall::Assets(TrustBackedAssetsCall::freeze { .. }) | - RuntimeCall::Assets(TrustBackedAssetsCall::block { .. }) | - RuntimeCall::Assets(TrustBackedAssetsCall::thaw { .. }) | - RuntimeCall::Assets(TrustBackedAssetsCall::freeze_asset { .. }) | - RuntimeCall::Assets(TrustBackedAssetsCall::thaw_asset { .. }) | - RuntimeCall::Assets(TrustBackedAssetsCall::touch_other { .. }) | - RuntimeCall::Assets(TrustBackedAssetsCall::refund_other { .. }) | - RuntimeCall::Nfts(pallet_nfts::Call::force_mint { .. }) | - RuntimeCall::Nfts(pallet_nfts::Call::update_mint_settings { .. }) | - RuntimeCall::Nfts(pallet_nfts::Call::mint_pre_signed { .. }) | - RuntimeCall::Nfts(pallet_nfts::Call::set_attributes_pre_signed { .. }) | - RuntimeCall::Nfts(pallet_nfts::Call::lock_item_transfer { .. }) | - RuntimeCall::Nfts(pallet_nfts::Call::unlock_item_transfer { .. }) | - RuntimeCall::Nfts(pallet_nfts::Call::lock_item_properties { .. }) | - RuntimeCall::Nfts(pallet_nfts::Call::set_metadata { .. }) | - RuntimeCall::Nfts(pallet_nfts::Call::clear_metadata { .. }) | - RuntimeCall::Nfts(pallet_nfts::Call::set_collection_metadata { .. }) | - RuntimeCall::Nfts(pallet_nfts::Call::clear_collection_metadata { .. }) | - RuntimeCall::Uniques(pallet_uniques::Call::mint { .. }) | - RuntimeCall::Uniques(pallet_uniques::Call::burn { .. }) | - RuntimeCall::Uniques(pallet_uniques::Call::freeze { .. }) | - RuntimeCall::Uniques(pallet_uniques::Call::thaw { .. }) | - RuntimeCall::Uniques(pallet_uniques::Call::freeze_collection { .. }) | - RuntimeCall::Uniques(pallet_uniques::Call::thaw_collection { .. }) | - RuntimeCall::Utility { .. } | - RuntimeCall::Multisig { .. } + RuntimeCall::Assets(TrustBackedAssetsCall::mint { .. }) + | RuntimeCall::Assets(TrustBackedAssetsCall::burn { .. }) + | RuntimeCall::Assets(TrustBackedAssetsCall::freeze { .. }) + | RuntimeCall::Assets(TrustBackedAssetsCall::block { .. }) + | RuntimeCall::Assets(TrustBackedAssetsCall::thaw { .. }) + | RuntimeCall::Assets(TrustBackedAssetsCall::freeze_asset { .. }) + | RuntimeCall::Assets(TrustBackedAssetsCall::thaw_asset { .. }) + | RuntimeCall::Assets(TrustBackedAssetsCall::touch_other { .. }) + | RuntimeCall::Assets(TrustBackedAssetsCall::refund_other { .. }) + | RuntimeCall::Nfts(pallet_nfts::Call::force_mint { .. }) + | RuntimeCall::Nfts(pallet_nfts::Call::update_mint_settings { .. }) + | RuntimeCall::Nfts(pallet_nfts::Call::mint_pre_signed { .. }) + | RuntimeCall::Nfts(pallet_nfts::Call::set_attributes_pre_signed { .. }) + | RuntimeCall::Nfts(pallet_nfts::Call::lock_item_transfer { .. }) + | RuntimeCall::Nfts(pallet_nfts::Call::unlock_item_transfer { .. }) + | RuntimeCall::Nfts(pallet_nfts::Call::lock_item_properties { .. }) + | RuntimeCall::Nfts(pallet_nfts::Call::set_metadata { .. }) + | RuntimeCall::Nfts(pallet_nfts::Call::clear_metadata { .. }) + | RuntimeCall::Nfts(pallet_nfts::Call::set_collection_metadata { .. }) + | RuntimeCall::Nfts(pallet_nfts::Call::clear_collection_metadata { .. }) + | RuntimeCall::Uniques(pallet_uniques::Call::mint { .. }) + | RuntimeCall::Uniques(pallet_uniques::Call::burn { .. }) + | RuntimeCall::Uniques(pallet_uniques::Call::freeze { .. }) + | RuntimeCall::Uniques(pallet_uniques::Call::thaw { .. }) + | RuntimeCall::Uniques(pallet_uniques::Call::freeze_collection { .. }) + | RuntimeCall::Uniques(pallet_uniques::Call::thaw_collection { .. }) + | RuntimeCall::Utility { .. } + | RuntimeCall::Multisig { .. } ), ProxyType::Collator => matches!( c, - RuntimeCall::CollatorSelection { .. } | - RuntimeCall::Utility { .. } | - RuntimeCall::Multisig { .. } + RuntimeCall::CollatorSelection { .. } + | RuntimeCall::Utility { .. } + | RuntimeCall::Multisig { .. } ), } } @@ -974,16 +975,10 @@ impl_runtime_apis! { impl sp_block_builder::BlockBuilder for Runtime { fn apply_extrinsic(extrinsic: ::Extrinsic) -> ApplyExtrinsicResult { - log::info!("apply_extrinsic"); - let proof_size = cumulus_client_clawback::clawback_host_functions::current_storage_proof_size(); - log::info!("Got proof size: {}", proof_size); Executive::apply_extrinsic(extrinsic) } fn finalize_block() -> ::Header { - log::info!("finalize_block"); - let proof_size = cumulus_client_clawback::clawback_host_functions::current_storage_proof_size(); - log::info!("Got proof size: {}", proof_size); Executive::finalize_block() } diff --git a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/xcm/mod.rs b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/xcm/mod.rs index 9aff4902d15b..98de7180774a 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/xcm/mod.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/xcm/mod.rs @@ -40,8 +40,9 @@ impl WeighMultiAssets for MultiAssetFilter { WildFungibility::Fungible => weight, // Magic number 2 has to do with the fact that we could have up to 2 times // MaxAssetsIntoHolding in the worst-case scenario. - WildFungibility::NonFungible => - weight.saturating_mul((MaxAssetsIntoHolding::get() * 2) as u64), + WildFungibility::NonFungible => { + weight.saturating_mul((MaxAssetsIntoHolding::get() * 2) as u64) + }, }, AllCounted(count) => weight.saturating_mul(MAX_ASSETS.min(*count as u64)), AllOfCounted { count, .. } => weight.saturating_mul(MAX_ASSETS.min(*count as u64)), diff --git a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/xcm_config.rs b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/xcm_config.rs index 0c197598f889..994905d76a86 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/xcm_config.rs @@ -245,194 +245,194 @@ impl Contains for SafeCallFilter { #[cfg(feature = "runtime-benchmarks")] { if matches!(call, RuntimeCall::System(frame_system::Call::remark_with_event { .. })) { - return true + return true; } } matches!( call, - RuntimeCall::PolkadotXcm(pallet_xcm::Call::force_xcm_version { .. }) | - RuntimeCall::System( - frame_system::Call::set_heap_pages { .. } | - frame_system::Call::set_code { .. } | - frame_system::Call::set_code_without_checks { .. } | - frame_system::Call::kill_prefix { .. }, - ) | RuntimeCall::ParachainSystem(..) | - RuntimeCall::Timestamp(..) | - RuntimeCall::Balances(..) | - RuntimeCall::CollatorSelection( - pallet_collator_selection::Call::set_desired_candidates { .. } | - pallet_collator_selection::Call::set_candidacy_bond { .. } | - pallet_collator_selection::Call::register_as_candidate { .. } | - pallet_collator_selection::Call::leave_intent { .. } | - pallet_collator_selection::Call::set_invulnerables { .. } | - pallet_collator_selection::Call::add_invulnerable { .. } | - pallet_collator_selection::Call::remove_invulnerable { .. }, - ) | RuntimeCall::Session(pallet_session::Call::purge_keys { .. }) | - RuntimeCall::XcmpQueue(..) | - RuntimeCall::DmpQueue(..) | - RuntimeCall::Assets( - pallet_assets::Call::create { .. } | - pallet_assets::Call::force_create { .. } | - pallet_assets::Call::start_destroy { .. } | - pallet_assets::Call::destroy_accounts { .. } | - pallet_assets::Call::destroy_approvals { .. } | - pallet_assets::Call::finish_destroy { .. } | - pallet_assets::Call::block { .. } | - pallet_assets::Call::mint { .. } | - pallet_assets::Call::burn { .. } | - pallet_assets::Call::transfer { .. } | - pallet_assets::Call::transfer_keep_alive { .. } | - pallet_assets::Call::force_transfer { .. } | - pallet_assets::Call::freeze { .. } | - pallet_assets::Call::thaw { .. } | - pallet_assets::Call::freeze_asset { .. } | - pallet_assets::Call::thaw_asset { .. } | - pallet_assets::Call::transfer_ownership { .. } | - pallet_assets::Call::set_team { .. } | - pallet_assets::Call::set_metadata { .. } | - pallet_assets::Call::clear_metadata { .. } | - pallet_assets::Call::force_set_metadata { .. } | - pallet_assets::Call::force_clear_metadata { .. } | - pallet_assets::Call::force_asset_status { .. } | - pallet_assets::Call::approve_transfer { .. } | - pallet_assets::Call::cancel_approval { .. } | - pallet_assets::Call::force_cancel_approval { .. } | - pallet_assets::Call::transfer_approved { .. } | - pallet_assets::Call::touch { .. } | - pallet_assets::Call::touch_other { .. } | - pallet_assets::Call::refund { .. } | - pallet_assets::Call::refund_other { .. }, + RuntimeCall::PolkadotXcm(pallet_xcm::Call::force_xcm_version { .. }) + | RuntimeCall::System( + frame_system::Call::set_heap_pages { .. } + | frame_system::Call::set_code { .. } + | frame_system::Call::set_code_without_checks { .. } + | frame_system::Call::kill_prefix { .. }, + ) | RuntimeCall::ParachainSystem(..) + | RuntimeCall::Timestamp(..) + | RuntimeCall::Balances(..) + | RuntimeCall::CollatorSelection( + pallet_collator_selection::Call::set_desired_candidates { .. } + | pallet_collator_selection::Call::set_candidacy_bond { .. } + | pallet_collator_selection::Call::register_as_candidate { .. } + | pallet_collator_selection::Call::leave_intent { .. } + | pallet_collator_selection::Call::set_invulnerables { .. } + | pallet_collator_selection::Call::add_invulnerable { .. } + | pallet_collator_selection::Call::remove_invulnerable { .. }, + ) | RuntimeCall::Session(pallet_session::Call::purge_keys { .. }) + | RuntimeCall::XcmpQueue(..) + | RuntimeCall::DmpQueue(..) + | RuntimeCall::Assets( + pallet_assets::Call::create { .. } + | pallet_assets::Call::force_create { .. } + | pallet_assets::Call::start_destroy { .. } + | pallet_assets::Call::destroy_accounts { .. } + | pallet_assets::Call::destroy_approvals { .. } + | pallet_assets::Call::finish_destroy { .. } + | pallet_assets::Call::block { .. } + | pallet_assets::Call::mint { .. } + | pallet_assets::Call::burn { .. } + | pallet_assets::Call::transfer { .. } + | pallet_assets::Call::transfer_keep_alive { .. } + | pallet_assets::Call::force_transfer { .. } + | pallet_assets::Call::freeze { .. } + | pallet_assets::Call::thaw { .. } + | pallet_assets::Call::freeze_asset { .. } + | pallet_assets::Call::thaw_asset { .. } + | pallet_assets::Call::transfer_ownership { .. } + | pallet_assets::Call::set_team { .. } + | pallet_assets::Call::set_metadata { .. } + | pallet_assets::Call::clear_metadata { .. } + | pallet_assets::Call::force_set_metadata { .. } + | pallet_assets::Call::force_clear_metadata { .. } + | pallet_assets::Call::force_asset_status { .. } + | pallet_assets::Call::approve_transfer { .. } + | pallet_assets::Call::cancel_approval { .. } + | pallet_assets::Call::force_cancel_approval { .. } + | pallet_assets::Call::transfer_approved { .. } + | pallet_assets::Call::touch { .. } + | pallet_assets::Call::touch_other { .. } + | pallet_assets::Call::refund { .. } + | pallet_assets::Call::refund_other { .. }, ) | RuntimeCall::ForeignAssets( - pallet_assets::Call::create { .. } | - pallet_assets::Call::force_create { .. } | - pallet_assets::Call::start_destroy { .. } | - pallet_assets::Call::destroy_accounts { .. } | - pallet_assets::Call::destroy_approvals { .. } | - pallet_assets::Call::finish_destroy { .. } | - pallet_assets::Call::block { .. } | - pallet_assets::Call::mint { .. } | - pallet_assets::Call::burn { .. } | - pallet_assets::Call::transfer { .. } | - pallet_assets::Call::transfer_keep_alive { .. } | - pallet_assets::Call::force_transfer { .. } | - pallet_assets::Call::freeze { .. } | - pallet_assets::Call::thaw { .. } | - pallet_assets::Call::freeze_asset { .. } | - pallet_assets::Call::thaw_asset { .. } | - pallet_assets::Call::transfer_ownership { .. } | - pallet_assets::Call::set_team { .. } | - pallet_assets::Call::set_metadata { .. } | - pallet_assets::Call::clear_metadata { .. } | - pallet_assets::Call::force_set_metadata { .. } | - pallet_assets::Call::force_clear_metadata { .. } | - pallet_assets::Call::force_asset_status { .. } | - pallet_assets::Call::approve_transfer { .. } | - pallet_assets::Call::cancel_approval { .. } | - pallet_assets::Call::force_cancel_approval { .. } | - pallet_assets::Call::transfer_approved { .. } | - pallet_assets::Call::touch { .. } | - pallet_assets::Call::touch_other { .. } | - pallet_assets::Call::refund { .. } | - pallet_assets::Call::refund_other { .. }, + pallet_assets::Call::create { .. } + | pallet_assets::Call::force_create { .. } + | pallet_assets::Call::start_destroy { .. } + | pallet_assets::Call::destroy_accounts { .. } + | pallet_assets::Call::destroy_approvals { .. } + | pallet_assets::Call::finish_destroy { .. } + | pallet_assets::Call::block { .. } + | pallet_assets::Call::mint { .. } + | pallet_assets::Call::burn { .. } + | pallet_assets::Call::transfer { .. } + | pallet_assets::Call::transfer_keep_alive { .. } + | pallet_assets::Call::force_transfer { .. } + | pallet_assets::Call::freeze { .. } + | pallet_assets::Call::thaw { .. } + | pallet_assets::Call::freeze_asset { .. } + | pallet_assets::Call::thaw_asset { .. } + | pallet_assets::Call::transfer_ownership { .. } + | pallet_assets::Call::set_team { .. } + | pallet_assets::Call::set_metadata { .. } + | pallet_assets::Call::clear_metadata { .. } + | pallet_assets::Call::force_set_metadata { .. } + | pallet_assets::Call::force_clear_metadata { .. } + | pallet_assets::Call::force_asset_status { .. } + | pallet_assets::Call::approve_transfer { .. } + | pallet_assets::Call::cancel_approval { .. } + | pallet_assets::Call::force_cancel_approval { .. } + | pallet_assets::Call::transfer_approved { .. } + | pallet_assets::Call::touch { .. } + | pallet_assets::Call::touch_other { .. } + | pallet_assets::Call::refund { .. } + | pallet_assets::Call::refund_other { .. }, ) | RuntimeCall::PoolAssets( - pallet_assets::Call::force_create { .. } | - pallet_assets::Call::block { .. } | - pallet_assets::Call::burn { .. } | - pallet_assets::Call::transfer { .. } | - pallet_assets::Call::transfer_keep_alive { .. } | - pallet_assets::Call::force_transfer { .. } | - pallet_assets::Call::freeze { .. } | - pallet_assets::Call::thaw { .. } | - pallet_assets::Call::freeze_asset { .. } | - pallet_assets::Call::thaw_asset { .. } | - pallet_assets::Call::transfer_ownership { .. } | - pallet_assets::Call::set_team { .. } | - pallet_assets::Call::set_metadata { .. } | - pallet_assets::Call::clear_metadata { .. } | - pallet_assets::Call::force_set_metadata { .. } | - pallet_assets::Call::force_clear_metadata { .. } | - pallet_assets::Call::force_asset_status { .. } | - pallet_assets::Call::approve_transfer { .. } | - pallet_assets::Call::cancel_approval { .. } | - pallet_assets::Call::force_cancel_approval { .. } | - pallet_assets::Call::transfer_approved { .. } | - pallet_assets::Call::touch { .. } | - pallet_assets::Call::touch_other { .. } | - pallet_assets::Call::refund { .. } | - pallet_assets::Call::refund_other { .. }, + pallet_assets::Call::force_create { .. } + | pallet_assets::Call::block { .. } + | pallet_assets::Call::burn { .. } + | pallet_assets::Call::transfer { .. } + | pallet_assets::Call::transfer_keep_alive { .. } + | pallet_assets::Call::force_transfer { .. } + | pallet_assets::Call::freeze { .. } + | pallet_assets::Call::thaw { .. } + | pallet_assets::Call::freeze_asset { .. } + | pallet_assets::Call::thaw_asset { .. } + | pallet_assets::Call::transfer_ownership { .. } + | pallet_assets::Call::set_team { .. } + | pallet_assets::Call::set_metadata { .. } + | pallet_assets::Call::clear_metadata { .. } + | pallet_assets::Call::force_set_metadata { .. } + | pallet_assets::Call::force_clear_metadata { .. } + | pallet_assets::Call::force_asset_status { .. } + | pallet_assets::Call::approve_transfer { .. } + | pallet_assets::Call::cancel_approval { .. } + | pallet_assets::Call::force_cancel_approval { .. } + | pallet_assets::Call::transfer_approved { .. } + | pallet_assets::Call::touch { .. } + | pallet_assets::Call::touch_other { .. } + | pallet_assets::Call::refund { .. } + | pallet_assets::Call::refund_other { .. }, ) | RuntimeCall::AssetConversion( - pallet_asset_conversion::Call::create_pool { .. } | - pallet_asset_conversion::Call::add_liquidity { .. } | - pallet_asset_conversion::Call::remove_liquidity { .. } | - pallet_asset_conversion::Call::swap_tokens_for_exact_tokens { .. } | - pallet_asset_conversion::Call::swap_exact_tokens_for_tokens { .. }, + pallet_asset_conversion::Call::create_pool { .. } + | pallet_asset_conversion::Call::add_liquidity { .. } + | pallet_asset_conversion::Call::remove_liquidity { .. } + | pallet_asset_conversion::Call::swap_tokens_for_exact_tokens { .. } + | pallet_asset_conversion::Call::swap_exact_tokens_for_tokens { .. }, ) | RuntimeCall::NftFractionalization( - pallet_nft_fractionalization::Call::fractionalize { .. } | - pallet_nft_fractionalization::Call::unify { .. }, + pallet_nft_fractionalization::Call::fractionalize { .. } + | pallet_nft_fractionalization::Call::unify { .. }, ) | RuntimeCall::Nfts( - pallet_nfts::Call::create { .. } | - pallet_nfts::Call::force_create { .. } | - pallet_nfts::Call::destroy { .. } | - pallet_nfts::Call::mint { .. } | - pallet_nfts::Call::force_mint { .. } | - pallet_nfts::Call::burn { .. } | - pallet_nfts::Call::transfer { .. } | - pallet_nfts::Call::lock_item_transfer { .. } | - pallet_nfts::Call::unlock_item_transfer { .. } | - pallet_nfts::Call::lock_collection { .. } | - pallet_nfts::Call::transfer_ownership { .. } | - pallet_nfts::Call::set_team { .. } | - pallet_nfts::Call::force_collection_owner { .. } | - pallet_nfts::Call::force_collection_config { .. } | - pallet_nfts::Call::approve_transfer { .. } | - pallet_nfts::Call::cancel_approval { .. } | - pallet_nfts::Call::clear_all_transfer_approvals { .. } | - pallet_nfts::Call::lock_item_properties { .. } | - pallet_nfts::Call::set_attribute { .. } | - pallet_nfts::Call::force_set_attribute { .. } | - pallet_nfts::Call::clear_attribute { .. } | - pallet_nfts::Call::approve_item_attributes { .. } | - pallet_nfts::Call::cancel_item_attributes_approval { .. } | - pallet_nfts::Call::set_metadata { .. } | - pallet_nfts::Call::clear_metadata { .. } | - pallet_nfts::Call::set_collection_metadata { .. } | - pallet_nfts::Call::clear_collection_metadata { .. } | - pallet_nfts::Call::set_accept_ownership { .. } | - pallet_nfts::Call::set_collection_max_supply { .. } | - pallet_nfts::Call::update_mint_settings { .. } | - pallet_nfts::Call::set_price { .. } | - pallet_nfts::Call::buy_item { .. } | - pallet_nfts::Call::pay_tips { .. } | - pallet_nfts::Call::create_swap { .. } | - pallet_nfts::Call::cancel_swap { .. } | - pallet_nfts::Call::claim_swap { .. }, + pallet_nfts::Call::create { .. } + | pallet_nfts::Call::force_create { .. } + | pallet_nfts::Call::destroy { .. } + | pallet_nfts::Call::mint { .. } + | pallet_nfts::Call::force_mint { .. } + | pallet_nfts::Call::burn { .. } + | pallet_nfts::Call::transfer { .. } + | pallet_nfts::Call::lock_item_transfer { .. } + | pallet_nfts::Call::unlock_item_transfer { .. } + | pallet_nfts::Call::lock_collection { .. } + | pallet_nfts::Call::transfer_ownership { .. } + | pallet_nfts::Call::set_team { .. } + | pallet_nfts::Call::force_collection_owner { .. } + | pallet_nfts::Call::force_collection_config { .. } + | pallet_nfts::Call::approve_transfer { .. } + | pallet_nfts::Call::cancel_approval { .. } + | pallet_nfts::Call::clear_all_transfer_approvals { .. } + | pallet_nfts::Call::lock_item_properties { .. } + | pallet_nfts::Call::set_attribute { .. } + | pallet_nfts::Call::force_set_attribute { .. } + | pallet_nfts::Call::clear_attribute { .. } + | pallet_nfts::Call::approve_item_attributes { .. } + | pallet_nfts::Call::cancel_item_attributes_approval { .. } + | pallet_nfts::Call::set_metadata { .. } + | pallet_nfts::Call::clear_metadata { .. } + | pallet_nfts::Call::set_collection_metadata { .. } + | pallet_nfts::Call::clear_collection_metadata { .. } + | pallet_nfts::Call::set_accept_ownership { .. } + | pallet_nfts::Call::set_collection_max_supply { .. } + | pallet_nfts::Call::update_mint_settings { .. } + | pallet_nfts::Call::set_price { .. } + | pallet_nfts::Call::buy_item { .. } + | pallet_nfts::Call::pay_tips { .. } + | pallet_nfts::Call::create_swap { .. } + | pallet_nfts::Call::cancel_swap { .. } + | pallet_nfts::Call::claim_swap { .. }, ) | RuntimeCall::Uniques( - pallet_uniques::Call::create { .. } | - pallet_uniques::Call::force_create { .. } | - pallet_uniques::Call::destroy { .. } | - pallet_uniques::Call::mint { .. } | - pallet_uniques::Call::burn { .. } | - pallet_uniques::Call::transfer { .. } | - pallet_uniques::Call::freeze { .. } | - pallet_uniques::Call::thaw { .. } | - pallet_uniques::Call::freeze_collection { .. } | - pallet_uniques::Call::thaw_collection { .. } | - pallet_uniques::Call::transfer_ownership { .. } | - pallet_uniques::Call::set_team { .. } | - pallet_uniques::Call::approve_transfer { .. } | - pallet_uniques::Call::cancel_approval { .. } | - pallet_uniques::Call::force_item_status { .. } | - pallet_uniques::Call::set_attribute { .. } | - pallet_uniques::Call::clear_attribute { .. } | - pallet_uniques::Call::set_metadata { .. } | - pallet_uniques::Call::clear_metadata { .. } | - pallet_uniques::Call::set_collection_metadata { .. } | - pallet_uniques::Call::clear_collection_metadata { .. } | - pallet_uniques::Call::set_accept_ownership { .. } | - pallet_uniques::Call::set_collection_max_supply { .. } | - pallet_uniques::Call::set_price { .. } | - pallet_uniques::Call::buy_item { .. } + pallet_uniques::Call::create { .. } + | pallet_uniques::Call::force_create { .. } + | pallet_uniques::Call::destroy { .. } + | pallet_uniques::Call::mint { .. } + | pallet_uniques::Call::burn { .. } + | pallet_uniques::Call::transfer { .. } + | pallet_uniques::Call::freeze { .. } + | pallet_uniques::Call::thaw { .. } + | pallet_uniques::Call::freeze_collection { .. } + | pallet_uniques::Call::thaw_collection { .. } + | pallet_uniques::Call::transfer_ownership { .. } + | pallet_uniques::Call::set_team { .. } + | pallet_uniques::Call::approve_transfer { .. } + | pallet_uniques::Call::cancel_approval { .. } + | pallet_uniques::Call::force_item_status { .. } + | pallet_uniques::Call::set_attribute { .. } + | pallet_uniques::Call::clear_attribute { .. } + | pallet_uniques::Call::set_metadata { .. } + | pallet_uniques::Call::clear_metadata { .. } + | pallet_uniques::Call::set_collection_metadata { .. } + | pallet_uniques::Call::clear_collection_metadata { .. } + | pallet_uniques::Call::set_accept_ownership { .. } + | pallet_uniques::Call::set_collection_max_supply { .. } + | pallet_uniques::Call::set_price { .. } + | pallet_uniques::Call::buy_item { .. } ) ) } diff --git a/cumulus/polkadot-parachain/Cargo.toml b/cumulus/polkadot-parachain/Cargo.toml index 73f3f0e9904d..ce35a59da583 100644 --- a/cumulus/polkadot-parachain/Cargo.toml +++ b/cumulus/polkadot-parachain/Cargo.toml @@ -81,7 +81,7 @@ xcm = { package = "staging-xcm", path = "../../polkadot/xcm" } # Cumulus cumulus-client-cli = { path = "../client/cli" } -cumulus-client-clawback = { path = "../client/clawback" } +cumulus-primitives-reclaim = { path = "../primitives/pov-reclaim" } cumulus-client-collator = { path = "../client/collator" } cumulus-client-consensus-aura = { path = "../client/consensus/aura" } cumulus-client-consensus-relay-chain = { path = "../client/consensus/relay-chain" } diff --git a/cumulus/polkadot-parachain/src/service.rs b/cumulus/polkadot-parachain/src/service.rs index 10808d0910a3..8f88b044653d 100644 --- a/cumulus/polkadot-parachain/src/service.rs +++ b/cumulus/polkadot-parachain/src/service.rs @@ -65,13 +65,13 @@ use polkadot_primitives::CollatorPair; #[cfg(not(feature = "runtime-benchmarks"))] type HostFunctions = ( sp_io::SubstrateHostFunctions, - cumulus_client_clawback::clawback_host_functions::HostFunctions, + cumulus_primitives_reclaim::pov_reclaim_host_functions::HostFunctions, ); #[cfg(feature = "runtime-benchmarks")] type HostFunctions = ( sp_io::SubstrateHostFunctions, - cumulus_client_clawback::clawback_host_functions::HostFunctions, + cumulus_primitives_reclaim::pov_reclaim_host_functions::HostFunctions, frame_benchmarking::benchmarking::HostFunctions, ); diff --git a/cumulus/primitives/pov-reclaim/Cargo.toml b/cumulus/primitives/pov-reclaim/Cargo.toml new file mode 100644 index 000000000000..8e4fc701443e --- /dev/null +++ b/cumulus/primitives/pov-reclaim/Cargo.toml @@ -0,0 +1,15 @@ +[package] +name = "cumulus-primitives-reclaim" +version = "0.0.1" +authors = [ "Sebastian Kunert " ] + +[dependencies] +sp-runtime-interface = { path = "../../../substrate/primitives/runtime-interface", default-features = false } +tracing = { version = "0.1.37", default-features = false } + +[features] +default = ["std"] +std = [ + "sp-runtime-interface/std", + "tracing/std" +] diff --git a/cumulus/client/clawback/src/lib.rs b/cumulus/primitives/pov-reclaim/src/lib.rs similarity index 85% rename from cumulus/client/clawback/src/lib.rs rename to cumulus/primitives/pov-reclaim/src/lib.rs index 5290359e3649..5b6913ebfdc7 100644 --- a/cumulus/client/clawback/src/lib.rs +++ b/cumulus/primitives/pov-reclaim/src/lib.rs @@ -15,18 +15,12 @@ // #![cfg_attr(not(feature = "std"), no_std)] -extern crate sp_api; -extern crate sp_core; -extern crate sp_externalities; -extern crate sp_runtime; extern crate sp_runtime_interface; -extern crate sp_std; -extern crate sp_trie; use sp_runtime_interface::runtime_interface; #[runtime_interface] -pub trait ClawbackHostFunctions { +pub trait PovReclaimHostFunctions { fn current_storage_proof_size(&mut self) -> u32 { tracing::info!(target:"skunert", "current_storage_proof_size is called"); self.proof_size().unwrap_or_default() diff --git a/cumulus/test/service/Cargo.toml b/cumulus/test/service/Cargo.toml index c066b5e132ae..540e50c42ed4 100644 --- a/cumulus/test/service/Cargo.toml +++ b/cumulus/test/service/Cargo.toml @@ -66,7 +66,7 @@ polkadot-overseer = { path = "../../../polkadot/node/overseer" } cumulus-client-cli = { path = "../../client/cli" } parachains-common = { path = "../../parachains/common" } cumulus-client-consensus-common = { path = "../../client/consensus/common" } -cumulus-client-clawback = { path = "../../client/clawback" } +cumulus-primitives-reclaim = { path = "../../primitives/pov-reclaim" } cumulus-client-consensus-relay-chain = { path = "../../client/consensus/relay-chain" } cumulus-client-service = { path = "../../client/service" } cumulus-primitives-core = { path = "../../primitives/core" } diff --git a/cumulus/test/service/src/lib.rs b/cumulus/test/service/src/lib.rs index b69d447de3a1..6dc19a8e4564 100644 --- a/cumulus/test/service/src/lib.rs +++ b/cumulus/test/service/src/lib.rs @@ -113,7 +113,8 @@ pub type AnnounceBlockFn = Arc>) + Send + Sync>; pub struct RuntimeExecutor; impl sc_executor::NativeExecutionDispatch for RuntimeExecutor { - type ExtendHostFunctions = cumulus_client_clawback::clawback_host_functions::HostFunctions; + type ExtendHostFunctions = + cumulus_primitives_reclaim::pov_reclaim_host_functions::HostFunctions; fn dispatch(method: &str, data: &[u8]) -> Option> { cumulus_test_runtime::api::dispatch(method, data) From 42688a390c4b7ee8aaab76d97e1109729660afdb Mon Sep 17 00:00:00 2001 From: Sebastian Kunert Date: Tue, 5 Sep 2023 21:31:03 +0200 Subject: [PATCH 11/61] Cleanup --- .../src/validate_block/implementation.rs | 6 ++-- .../src/validate_block/trie_recorder.rs | 32 ++++--------------- cumulus/test/service/benches/block_import.rs | 5 ++- substrate/client/block-builder/Cargo.toml | 1 - .../service/src/chain_ops/export_blocks.rs | 9 +++--- .../service/src/chain_ops/export_raw_state.rs | 2 +- .../service/src/chain_ops/import_blocks.rs | 25 +++++++-------- .../client/service/src/client/block_rules.rs | 4 +-- 8 files changed, 30 insertions(+), 54 deletions(-) diff --git a/cumulus/pallets/parachain-system/src/validate_block/implementation.rs b/cumulus/pallets/parachain-system/src/validate_block/implementation.rs index 326c93553396..3c609c4bbd41 100644 --- a/cumulus/pallets/parachain-system/src/validate_block/implementation.rs +++ b/cumulus/pallets/parachain-system/src/validate_block/implementation.rs @@ -32,8 +32,7 @@ use sp_core::storage::{ChildInfo, StateVersion}; use sp_externalities::{set_and_run_with_externalities, Externalities}; use sp_io::KillStorageResult; use sp_runtime::traits::{Block as BlockT, Extrinsic, HashingFor, Header as HeaderT}; -use sp_std::prelude::*; -use sp_std::sync::Arc; +use sp_std::{prelude::*, sync::Arc}; use sp_trie::{MemoryDB, TrieRecorderProvider}; use trie_recorder::RecorderProvider; @@ -314,8 +313,7 @@ fn host_storage_clear(key: &[u8]) { } fn reclaim_pov_weight() -> u32 { - log::info!(target: "skunert", "Calling my replaced method."); - with_externalities(|ext| ext.proof_size()).unwrap_or(0) + with_externalities(|ext| ext.proof_size()).unwrap_or_default() } fn host_storage_root(version: StateVersion) -> Vec { diff --git a/cumulus/pallets/parachain-system/src/validate_block/trie_recorder.rs b/cumulus/pallets/parachain-system/src/validate_block/trie_recorder.rs index e8cf023badfd..16948cbe9b4e 100644 --- a/cumulus/pallets/parachain-system/src/validate_block/trie_recorder.rs +++ b/cumulus/pallets/parachain-system/src/validate_block/trie_recorder.rs @@ -25,14 +25,13 @@ use frame_support::traits::Get; use sp_externalities::{set_and_run_with_externalities, Externalities}; use sp_io::KillStorageResult; use sp_runtime::traits::{Block as BlockT, Extrinsic, HashingFor, Header as HeaderT}; -use sp_std::prelude::*; use sp_std::{ boxed::Box, cell::{RefCell, RefMut}, collections::btree_set::BTreeSet, + prelude::*, }; -use sp_trie::NodeCodec; -use sp_trie::{MemoryDB, StorageProof}; +use sp_trie::{MemoryDB, NodeCodec, StorageProof}; use trie_db::{Hasher, RecordedForKey, TrieAccess}; /// A trie recorder that only keeps track of the proof size. @@ -45,44 +44,27 @@ impl<'a, H: trie_db::Hasher> trie_db::TrieRecorder for SizeRecorder<'a, fn record<'b>(&mut self, access: TrieAccess<'b, H::Out>) { let mut encoded_size_update = 0; match access { - TrieAccess::NodeOwned { hash, node_owned } => { + TrieAccess::NodeOwned { hash, node_owned } => if !self.seen_nodes.get(&hash).is_some() { let node = node_owned.to_encoded::>(); encoded_size_update += node.encoded_size(); - log::info!( - target: "skunert", - "Recording node ({encoded_size_update} bytes)", - ); - //TODO skunert: Check if this is correct, original has transaction handling self.seen_nodes.insert(hash); - } - }, + }, TrieAccess::EncodedNode { hash, encoded_node } => { if !self.seen_nodes.get(&hash).is_some() { let node = encoded_node.into_owned(); encoded_size_update += node.encoded_size(); - log::info!( - target: "skunert", - "Recording encoded node ({encoded_size_update} bytes)", - ); self.seen_nodes.insert(hash); } }, - TrieAccess::Value { hash, value, .. } => { + TrieAccess::Value { hash, value, .. } => if !self.seen_nodes.get(&hash).is_some() { let value = value.into_owned(); encoded_size_update += value.encoded_size(); - log::info!( - target: "skunert", - "Recording value ({encoded_size_update} bytes)", - ); - self.seen_nodes.insert(hash); - } - }, - TrieAccess::Hash { .. } => {}, - TrieAccess::NonExisting { .. } => {}, + }, + TrieAccess::Hash { .. } | TrieAccess::NonExisting { .. } => {}, }; *self.encoded_size += encoded_size_update; diff --git a/cumulus/test/service/benches/block_import.rs b/cumulus/test/service/benches/block_import.rs index bf8db8a7ed6b..b79598b15302 100644 --- a/cumulus/test/service/benches/block_import.rs +++ b/cumulus/test/service/benches/block_import.rs @@ -52,9 +52,8 @@ fn benchmark_block_import(c: &mut Criterion) { utils::create_benchmarking_transfer_extrinsics(&client, &src_accounts, &dst_accounts); let parent_hash = client.usage_info().chain.best_hash; - let mut block_builder = client - .new_block_at(parent_hash, Default::default(), RecordProof::No, None) - .unwrap(); + let mut block_builder = + client.new_block_at(parent_hash, Default::default(), RecordProof::No).unwrap(); for extrinsic in extrinsics { block_builder.push(extrinsic).unwrap(); } diff --git a/substrate/client/block-builder/Cargo.toml b/substrate/client/block-builder/Cargo.toml index 7c192f676a0b..ff2f9635b7a2 100644 --- a/substrate/client/block-builder/Cargo.toml +++ b/substrate/client/block-builder/Cargo.toml @@ -16,7 +16,6 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.6.1", features = [ "derive", ] } -log = "0.4.20" sc-client-api = { path = "../api" } sp-api = { path = "../../primitives/api" } sp-block-builder = { path = "../../primitives/block-builder" } diff --git a/substrate/client/service/src/chain_ops/export_blocks.rs b/substrate/client/service/src/chain_ops/export_blocks.rs index 2538581b1df1..8d66f1f96baf 100644 --- a/substrate/client/service/src/chain_ops/export_blocks.rs +++ b/substrate/client/service/src/chain_ops/export_blocks.rs @@ -61,7 +61,7 @@ where let client = &client; if last < block { - return Poll::Ready(Err("Invalid block range specified".into())); + return Poll::Ready(Err("Invalid block range specified".into())) } if !wrote_header { @@ -81,21 +81,20 @@ where .transpose()? .flatten() { - Some(block) => { + Some(block) => if binary { output.write_all(&block.encode())?; } else { serde_json::to_writer(&mut output, &block) .map_err(|e| format!("Error writing JSON: {}", e))?; - } - }, + }, None => return Poll::Ready(Ok(())), } if (block % 10000u32.into()).is_zero() { info!("#{}", block); } if block == last { - return Poll::Ready(Ok(())); + return Poll::Ready(Ok(())) } block += One::one(); diff --git a/substrate/client/service/src/chain_ops/export_raw_state.rs b/substrate/client/service/src/chain_ops/export_raw_state.rs index aa5aeaf95426..fde2c5617cb4 100644 --- a/substrate/client/service/src/chain_ops/export_raw_state.rs +++ b/substrate/client/service/src/chain_ops/export_raw_state.rs @@ -53,7 +53,7 @@ where } children_default.insert(child_root_key.0, StorageChild { child_info, data: pairs }); - continue; + continue } top.insert(key.0, value.0); diff --git a/substrate/client/service/src/chain_ops/import_blocks.rs b/substrate/client/service/src/chain_ops/import_blocks.rs index f30236c8dc3f..34f7669d0106 100644 --- a/substrate/client/service/src/chain_ops/import_blocks.rs +++ b/substrate/client/service/src/chain_ops/import_blocks.rs @@ -102,8 +102,8 @@ where /// Returns the number of blocks read thus far. fn read_block_count(&self) -> u64 { match self { - BlockIter::Binary { read_block_count, .. } - | BlockIter::Json { read_block_count, .. } => *read_block_count, + BlockIter::Binary { read_block_count, .. } | + BlockIter::Json { read_block_count, .. } => *read_block_count, } } @@ -227,8 +227,8 @@ impl Speedometer { let speed = diff .saturating_mul(10_000) .checked_div(u128::from(elapsed_ms)) - .map_or(0.0, |s| s as f64) - / 10.0; + .map_or(0.0, |s| s as f64) / + 10.0; info!("📦 Current best block: {} ({:4.1} bps)", self.best_number, speed); } else { // If the number of blocks can't be converted to a regular integer, then we need a more @@ -324,7 +324,7 @@ where if let (Err(err), hash) = result { warn!("There was an error importing block with hash {:?}: {}", hash, err); self.has_error = true; - break; + break } } } @@ -338,7 +338,7 @@ where Err(e) => { // We've encountered an error while creating the block iterator // so we can just return a future that returns an error. - return future::ready(Err(Error::Other(e))).boxed(); + return future::ready(Err(Error::Other(e))).boxed() }, }; @@ -388,12 +388,11 @@ where state = Some(ImportState::Reading { block_iter }); } }, - Err(e) => { + Err(e) => return Poll::Ready(Err(Error::Other(format!( "Error reading block #{}: {}", read_block_count, e - )))) - }, + )))), } }, } @@ -409,7 +408,7 @@ where delay, block, }); - return Poll::Pending; + return Poll::Pending }, Poll::Ready(_) => { delay.reset(Duration::from_millis(DELAY_TIME)); @@ -441,7 +440,7 @@ where read_block_count, client.info().best_number ); - return Poll::Ready(Ok(())); + return Poll::Ready(Ok(())) } else { // Importing is not done, we still have to wait for the queue to finish. // Wait for the delay, because we know the queue is lagging behind. @@ -452,7 +451,7 @@ where read_block_count, delay, }); - return Poll::Pending; + return Poll::Pending }, Poll::Ready(_) => { delay.reset(Duration::from_millis(DELAY_TIME)); @@ -477,7 +476,7 @@ where return Poll::Ready(Err(Error::Other(format!( "Stopping after #{} blocks because of an error", link.imported_blocks - )))); + )))) } cx.waker().wake_by_ref(); diff --git a/substrate/client/service/src/client/block_rules.rs b/substrate/client/service/src/client/block_rules.rs index c8391c0e17b0..532cde1ae78f 100644 --- a/substrate/client/service/src/client/block_rules.rs +++ b/substrate/client/service/src/client/block_rules.rs @@ -61,12 +61,12 @@ impl BlockRules { pub fn lookup(&self, number: NumberFor, hash: &B::Hash) -> LookupResult { if let Some(hash_for_height) = self.forks.get(&number) { if hash_for_height != hash { - return LookupResult::Expected(*hash_for_height); + return LookupResult::Expected(*hash_for_height) } } if self.bad.contains(hash) { - return LookupResult::KnownBad; + return LookupResult::KnownBad } LookupResult::NotSpecial From d9ee3e35f7b505b6f88dcc20deccb882f497e7a3 Mon Sep 17 00:00:00 2001 From: Sebastian Kunert Date: Tue, 5 Sep 2023 22:12:05 +0200 Subject: [PATCH 12/61] Remove debug logs --- Cargo.lock | 1 - .../src/validate_block/trie_recorder.rs | 2 - .../asset-hub-kusama/src/weights/xcm/mod.rs | 5 +- cumulus/primitives/pov-reclaim/src/lib.rs | 9 +- cumulus/zombienet/examples/small_network.toml | 15 +-- substrate/client/service/src/client/client.rs | 123 ++++++++---------- .../procedural/src/construct_runtime/mod.rs | 2 +- .../api/proc-macro/src/decl_runtime_apis.rs | 2 +- .../api/proc-macro/src/impl_runtime_apis.rs | 2 +- .../proc-macro/src/runtime_interface/mod.rs | 2 +- .../state-machine/src/trie_backend.rs | 10 +- substrate/primitives/trie/src/lib.rs | 4 - substrate/primitives/trie/src/recorder.rs | 10 +- 13 files changed, 74 insertions(+), 113 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 9944c52fd81f..a4094e4efc78 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -14612,7 +14612,6 @@ dependencies = [ name = "sc-block-builder" version = "0.10.0-dev" dependencies = [ - "log", "parity-scale-codec", "sc-client-api", "sp-api", diff --git a/cumulus/pallets/parachain-system/src/validate_block/trie_recorder.rs b/cumulus/pallets/parachain-system/src/validate_block/trie_recorder.rs index 16948cbe9b4e..ea2efe0976e4 100644 --- a/cumulus/pallets/parachain-system/src/validate_block/trie_recorder.rs +++ b/cumulus/pallets/parachain-system/src/validate_block/trie_recorder.rs @@ -94,7 +94,6 @@ impl sp_trie::TrieRecorderProvider for RecorderProvider Self::Recorder<'_> { - log::info!(target: "skunert", "validate_block: as_trie_recorder"); SizeRecorder { encoded_size: self.encoded_size.borrow_mut(), seen_nodes: self.seen_nodes.borrow_mut(), @@ -102,7 +101,6 @@ impl sp_trie::TrieRecorderProvider for RecorderProvider usize { - log::info!(target: "skunert", "validate_block: estimate_encoded_size"); *self.encoded_size.borrow() } } diff --git a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/xcm/mod.rs b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/xcm/mod.rs index 98de7180774a..9aff4902d15b 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/xcm/mod.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/xcm/mod.rs @@ -40,9 +40,8 @@ impl WeighMultiAssets for MultiAssetFilter { WildFungibility::Fungible => weight, // Magic number 2 has to do with the fact that we could have up to 2 times // MaxAssetsIntoHolding in the worst-case scenario. - WildFungibility::NonFungible => { - weight.saturating_mul((MaxAssetsIntoHolding::get() * 2) as u64) - }, + WildFungibility::NonFungible => + weight.saturating_mul((MaxAssetsIntoHolding::get() * 2) as u64), }, AllCounted(count) => weight.saturating_mul(MAX_ASSETS.min(*count as u64)), AllOfCounted { count, .. } => weight.saturating_mul(MAX_ASSETS.min(*count as u64)), diff --git a/cumulus/primitives/pov-reclaim/src/lib.rs b/cumulus/primitives/pov-reclaim/src/lib.rs index 5b6913ebfdc7..f117db58c4bd 100644 --- a/cumulus/primitives/pov-reclaim/src/lib.rs +++ b/cumulus/primitives/pov-reclaim/src/lib.rs @@ -1,18 +1,19 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// Copyright (C) Parity Technologies (UK) Ltd. // This file is part of Cumulus. -// Substrate is free software: you can redistribute it and/or modify +// Cumulus is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// Polkadot is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. + // You should have received a copy of the GNU General Public License // along with Cumulus. If not, see . -// + #![cfg_attr(not(feature = "std"), no_std)] extern crate sp_runtime_interface; diff --git a/cumulus/zombienet/examples/small_network.toml b/cumulus/zombienet/examples/small_network.toml index 1364a1b0dcc6..06ac0d0e5e78 100644 --- a/cumulus/zombienet/examples/small_network.toml +++ b/cumulus/zombienet/examples/small_network.toml @@ -11,10 +11,6 @@ chain = "rococo-local" name = "bob" validator = true - [[relaychain.nodes]] - name = "charlie" - validator = true - [[parachains]] id = 2000 cumulus_based = true @@ -22,15 +18,8 @@ chain = "asset-hub-kusama-local" # run charlie as parachain collator [[parachains.collators]] - name = "one" - validator = true - image = "parity/polkadot-parachain:latest" - command = "polkadot-parachain" - args = ["--force-authoring -laura=debug,basic-authorship=debug"] - - [[parachains.collators]] - name = "two" + name = "charlie" validator = true image = "parity/polkadot-parachain:latest" command = "polkadot-parachain" - args = ["--force-authoring -laura=debug,basic-authorship=debug"] + args = ["--force-authoring"] diff --git a/substrate/client/service/src/client/client.rs b/substrate/client/service/src/client/client.rs index 2413ce3d3a94..a01c94edda62 100644 --- a/substrate/client/service/src/client/client.rs +++ b/substrate/client/service/src/client/client.rs @@ -429,7 +429,7 @@ where backend.unpin_block(message); } else { log::debug!("Terminating unpin-worker, backend reference was dropped."); - return; + return } } log::debug!("Terminating unpin-worker, stream terminated.") @@ -516,7 +516,7 @@ where } = import_block; if !intermediates.is_empty() { - return Err(Error::IncompletePipeline); + return Err(Error::IncompletePipeline) } let fork_choice = fork_choice.ok_or(Error::IncompletePipeline)?; @@ -611,20 +611,19 @@ where // the block is lower than our last finalized block so it must revert // finality, refusing import. - if status == blockchain::BlockStatus::Unknown - && *import_headers.post().number() <= info.finalized_number - && !gap_block + if status == blockchain::BlockStatus::Unknown && + *import_headers.post().number() <= info.finalized_number && + !gap_block { - return Err(sp_blockchain::Error::NotInFinalizedChain); + return Err(sp_blockchain::Error::NotInFinalizedChain) } // this is a fairly arbitrary choice of where to draw the line on making notifications, // but the general goal is to only make notifications when we are already fully synced // and get a new chain head. let make_notifications = match origin { - BlockOrigin::NetworkBroadcast | BlockOrigin::Own | BlockOrigin::ConsensusBroadcast => { - true - }, + BlockOrigin::NetworkBroadcast | BlockOrigin::Own | BlockOrigin::ConsensusBroadcast => + true, BlockOrigin::Genesis | BlockOrigin::NetworkInitialSync | BlockOrigin::File => false, }; @@ -658,14 +657,12 @@ where let storage_key = PrefixedStorageKey::new_ref(&parent_storage); let storage_key = match ChildType::from_prefixed_key(storage_key) { - Some((ChildType::ParentKeyId, storage_key)) => { - storage_key - }, - None => { + Some((ChildType::ParentKeyId, storage_key)) => + storage_key, + None => return Err(Error::Backend( "Invalid child storage key.".to_string(), - )) - }, + )), }; let entry = storage .children_default @@ -690,7 +687,7 @@ where // State root mismatch when importing state. This should not happen in // safe fast sync mode, but may happen in unsafe mode. warn!("Error importing state: State root mismatch."); - return Err(Error::InvalidStateRoot); + return Err(Error::InvalidStateRoot) } None }, @@ -713,12 +710,11 @@ where )?; } - let is_new_best = !gap_block - && (finalized - || match fork_choice { - ForkChoiceStrategy::LongestChain => { - import_headers.post().number() > &info.best_number - }, + let is_new_best = !gap_block && + (finalized || + match fork_choice { + ForkChoiceStrategy::LongestChain => + import_headers.post().number() > &info.best_number, ForkChoiceStrategy::Custom(v) => v, }); @@ -842,21 +838,18 @@ where let state_action = std::mem::replace(&mut import_block.state_action, StateAction::Skip); let (enact_state, storage_changes) = match (self.block_status(*parent_hash)?, state_action) { - (BlockStatus::KnownBad, _) => { - return Ok(PrepareStorageChangesResult::Discard(ImportResult::KnownBad)) - }, + (BlockStatus::KnownBad, _) => + return Ok(PrepareStorageChangesResult::Discard(ImportResult::KnownBad)), ( BlockStatus::InChainPruned, StateAction::ApplyChanges(sc_consensus::StorageChanges::Changes(_)), ) => return Ok(PrepareStorageChangesResult::Discard(ImportResult::MissingState)), (_, StateAction::ApplyChanges(changes)) => (true, Some(changes)), - (BlockStatus::Unknown, _) => { - return Ok(PrepareStorageChangesResult::Discard(ImportResult::UnknownParent)) - }, + (BlockStatus::Unknown, _) => + return Ok(PrepareStorageChangesResult::Discard(ImportResult::UnknownParent)), (_, StateAction::Skip) => (false, None), - (BlockStatus::InChainPruned, StateAction::Execute) => { - return Ok(PrepareStorageChangesResult::Discard(ImportResult::MissingState)) - }, + (BlockStatus::InChainPruned, StateAction::Execute) => + return Ok(PrepareStorageChangesResult::Discard(ImportResult::MissingState)), (BlockStatus::InChainPruned, StateAction::ExecuteIfPossible) => (false, None), (_, StateAction::Execute) => (true, None), (_, StateAction::ExecuteIfPossible) => (true, None), @@ -865,10 +858,7 @@ where let storage_changes = match (enact_state, storage_changes, &import_block.body) { // We have storage changes and should enact the state, so we don't need to do anything // here - (true, changes @ Some(_), _) => { - log::info!(target: "skunert", "Changes were provided, block is not re-executed."); - changes - }, + (true, changes @ Some(_), _) => changes, // We should enact state, but don't have any storage changes, so we need to execute the // block. (true, None, Some(ref body)) => { @@ -893,7 +883,7 @@ where if import_block.header.state_root() != &gen_storage_changes.transaction_storage_root { - return Err(Error::InvalidStateRoot); + return Err(Error::InvalidStateRoot) } Some(sc_consensus::StorageChanges::Changes(gen_storage_changes)) }, @@ -919,7 +909,7 @@ where "Possible safety violation: attempted to re-finalize last finalized block {:?} ", hash, ); - return Ok(()); + return Ok(()) } // Find tree route from last finalized to given block. @@ -933,7 +923,7 @@ where retracted, info.finalized_hash ); - return Err(sp_blockchain::Error::NotInFinalizedChain); + return Err(sp_blockchain::Error::NotInFinalizedChain) } // We may need to coercively update the best block if there is more than one @@ -1013,7 +1003,7 @@ where // since we won't be running the loop below which // would also remove any closed sinks. sinks.retain(|sink| !sink.is_closed()); - return Ok(()); + return Ok(()) }, }; @@ -1049,7 +1039,7 @@ where self.every_import_notification_sinks.lock().retain(|sink| !sink.is_closed()); - return Ok(()); + return Ok(()) }, }; @@ -1148,18 +1138,17 @@ where .as_ref() .map_or(false, |importing| &hash == importing) { - return Ok(BlockStatus::Queued); + return Ok(BlockStatus::Queued) } let hash_and_number = self.backend.blockchain().number(hash)?.map(|n| (hash, n)); match hash_and_number { - Some((hash, number)) => { + Some((hash, number)) => if self.backend.have_state_at(hash, number) { Ok(BlockStatus::InChainWithState) } else { Ok(BlockStatus::InChainPruned) - } - }, + }, None => Ok(BlockStatus::Unknown), } } @@ -1195,7 +1184,7 @@ where let genesis_hash = self.backend.blockchain().info().genesis_hash; if genesis_hash == target_hash { - return Ok(Vec::new()); + return Ok(Vec::new()) } let mut current_hash = target_hash; @@ -1211,7 +1200,7 @@ where current_hash = ancestor_hash; if genesis_hash == current_hash { - break; + break } current = ancestor; @@ -1296,15 +1285,14 @@ where size_limit: usize, ) -> sp_blockchain::Result> { if start_key.len() > MAX_NESTED_TRIE_DEPTH { - return Err(Error::Backend("Invalid start key.".to_string())); + return Err(Error::Backend("Invalid start key.".to_string())) } let state = self.state_at(hash)?; let child_info = |storage_key: &Vec| -> sp_blockchain::Result { let storage_key = PrefixedStorageKey::new_ref(storage_key); match ChildType::from_prefixed_key(storage_key) { - Some((ChildType::ParentKeyId, storage_key)) => { - Ok(ChildInfo::new_default(storage_key)) - }, + Some((ChildType::ParentKeyId, storage_key)) => + Ok(ChildInfo::new_default(storage_key)), None => Err(Error::Backend("Invalid child storage key.".to_string())), } }; @@ -1316,7 +1304,7 @@ where { Some((child_info(start_key)?, child_root)) } else { - return Err(Error::Backend("Invalid root start key.".to_string())); + return Err(Error::Backend("Invalid root start key.".to_string())) } } else { None @@ -1360,18 +1348,18 @@ where let size = value.len() + next_key.len(); if total_size + size > size_limit && !entries.is_empty() { complete = false; - break; + break } total_size += size; - if current_child.is_none() - && sp_core::storage::well_known_keys::is_child_storage_key(next_key.as_slice()) - && !child_roots.contains(value.as_slice()) + if current_child.is_none() && + sp_core::storage::well_known_keys::is_child_storage_key(next_key.as_slice()) && + !child_roots.contains(value.as_slice()) { child_roots.insert(value.clone()); switch_child_key = Some((next_key.clone(), value.clone())); entries.push((next_key.clone(), value)); - break; + break } entries.push((next_key.clone(), value)); current_key = next_key; @@ -1391,12 +1379,12 @@ where complete, )); if !complete { - break; + break } } else { result[0].0.key_values.extend(entries.into_iter()); result[0].1 = complete; - break; + break } } Ok(result) @@ -1821,7 +1809,7 @@ where match self.block_rules.lookup(number, &hash) { BlockLookupResult::KnownBad => { trace!("Rejecting known bad block: #{} {:?}", number, hash); - return Ok(ImportResult::KnownBad); + return Ok(ImportResult::KnownBad) }, BlockLookupResult::Expected(expected_hash) => { trace!( @@ -1830,7 +1818,7 @@ where expected_hash, number ); - return Ok(ImportResult::KnownBad); + return Ok(ImportResult::KnownBad) }, BlockLookupResult::NotSpecial => {}, } @@ -1841,12 +1829,10 @@ where .block_status(hash) .map_err(|e| ConsensusError::ClientImport(e.to_string()))? { - BlockStatus::InChainWithState | BlockStatus::Queued => { - return Ok(ImportResult::AlreadyInChain) - }, - BlockStatus::InChainPruned if !import_existing => { - return Ok(ImportResult::AlreadyInChain) - }, + BlockStatus::InChainWithState | BlockStatus::Queued => + return Ok(ImportResult::AlreadyInChain), + BlockStatus::InChainPruned if !import_existing => + return Ok(ImportResult::AlreadyInChain), BlockStatus::InChainPruned => {}, BlockStatus::Unknown => {}, BlockStatus::KnownBad => return Ok(ImportResult::KnownBad), @@ -2012,9 +1998,8 @@ where fn block(&self, hash: Block::Hash) -> sp_blockchain::Result>> { Ok(match (self.header(hash)?, self.body(hash)?, self.justifications(hash)?) { - (Some(header), Some(extrinsics), justifications) => { - Some(SignedBlock { block: Block::new(header, extrinsics), justifications }) - }, + (Some(header), Some(extrinsics), justifications) => + Some(SignedBlock { block: Block::new(header, extrinsics), justifications }), _ => None, }) } diff --git a/substrate/frame/support/procedural/src/construct_runtime/mod.rs b/substrate/frame/support/procedural/src/construct_runtime/mod.rs index f42dd837e3a9..38ec0c647944 100644 --- a/substrate/frame/support/procedural/src/construct_runtime/mod.rs +++ b/substrate/frame/support/procedural/src/construct_runtime/mod.rs @@ -252,7 +252,7 @@ pub fn construct_runtime(input: TokenStream) -> TokenStream { let res = res.unwrap_or_else(|e| e.to_compile_error()); let res = expander::Expander::new("construct_runtime") - .dry(std::env::var("FRAME_EXPAND").is_err()) + .dry(std::env::var("EXPAND_MACROS").is_err()) .verbose(true) .write_to_out_dir(res) .expect("Does not fail because of IO in OUT_DIR; qed"); diff --git a/substrate/primitives/api/proc-macro/src/decl_runtime_apis.rs b/substrate/primitives/api/proc-macro/src/decl_runtime_apis.rs index 370735819f94..2b1e65ec8852 100644 --- a/substrate/primitives/api/proc-macro/src/decl_runtime_apis.rs +++ b/substrate/primitives/api/proc-macro/src/decl_runtime_apis.rs @@ -729,7 +729,7 @@ fn decl_runtime_apis_impl_inner(api_decls: &[ItemTrait]) -> Result }; let decl = expander::Expander::new("decl_runtime_apis") - .dry(std::env::var("SP_API_EXPAND").is_err()) + .dry(std::env::var("EXPAND_MACROS").is_err()) .verbose(true) .write_to_out_dir(decl) .expect("Does not fail because of IO in OUT_DIR; qed"); diff --git a/substrate/primitives/api/proc-macro/src/impl_runtime_apis.rs b/substrate/primitives/api/proc-macro/src/impl_runtime_apis.rs index 74cfa0980623..1167e9ac3783 100644 --- a/substrate/primitives/api/proc-macro/src/impl_runtime_apis.rs +++ b/substrate/primitives/api/proc-macro/src/impl_runtime_apis.rs @@ -837,7 +837,7 @@ fn impl_runtime_apis_impl_inner(api_impls: &[ItemImpl]) -> Result { ); let impl_ = expander::Expander::new("impl_runtime_apis") - .dry(std::env::var("SP_API_EXPAND").is_err()) + .dry(std::env::var("EXPAND_MACROS").is_err()) .verbose(true) .write_to_out_dir(impl_) .expect("Does not fail because of IO in OUT_DIR; qed"); diff --git a/substrate/primitives/runtime-interface/proc-macro/src/runtime_interface/mod.rs b/substrate/primitives/runtime-interface/proc-macro/src/runtime_interface/mod.rs index 1c4274121aaa..d0cc9e7b96ba 100644 --- a/substrate/primitives/runtime-interface/proc-macro/src/runtime_interface/mod.rs +++ b/substrate/primitives/runtime-interface/proc-macro/src/runtime_interface/mod.rs @@ -69,7 +69,7 @@ pub fn runtime_interface_impl( }; let res = expander::Expander::new("runtime_interface") - .dry(std::env::var("SP_RUNTIME_INTERFACE_EXPAND").is_err()) + .dry(std::env::var("EXPAND_MACROS").is_err()) .verbose(true) .write_to_out_dir(res) .expect("Does not fail because of IO in OUT_DIR; qed"); diff --git a/substrate/primitives/state-machine/src/trie_backend.rs b/substrate/primitives/state-machine/src/trie_backend.rs index 3fc8391f94d8..11844917b767 100644 --- a/substrate/primitives/state-machine/src/trie_backend.rs +++ b/substrate/primitives/state-machine/src/trie_backend.rs @@ -155,7 +155,7 @@ impl TrieCacheProvider for UnimplementedCacheProvider { #[cfg(not(feature = "std"))] pub struct UnimplementedRecorderProvider { // Not strictly necessary, but the H bound allows to use this as a drop-in - // replacement for the `LocalTrieCache` in no-std contexts. + // replacement for the [`sp_trie::recorder::Recorder`] in no-std contexts. _phantom: core::marker::PhantomData, // Statically prevents construction. _infallible: core::convert::Infallible, @@ -164,11 +164,11 @@ pub struct UnimplementedRecorderProvider { #[cfg(not(feature = "std"))] impl trie_db::TrieRecorder for UnimplementedRecorderProvider { fn record<'a>(&mut self, access: trie_db::TrieAccess<'a, H::Out>) { - todo!() + unimplemented!() } fn trie_nodes_recorded_for_key(&self, key: &[u8]) -> trie_db::RecordedForKey { - todo!() + unimplemented!() } } @@ -1000,8 +1000,8 @@ pub mod tests { .storage_root(iter::once((&b"new-key"[..], Some(&b"new-value"[..]))), state_version); assert!(!tx.drain().is_empty()); assert!( - new_root - != test_trie(state_version, None, None) + new_root != + test_trie(state_version, None, None) .storage_root(iter::empty(), state_version) .0 ); diff --git a/substrate/primitives/trie/src/lib.rs b/substrate/primitives/trie/src/lib.rs index f5edbd2c488f..ba817f0a63e4 100644 --- a/substrate/primitives/trie/src/lib.rs +++ b/substrate/primitives/trie/src/lib.rs @@ -30,10 +30,6 @@ mod storage_proof; mod trie_codec; mod trie_stream; -pub trait ProofSizeEstimationProvider { - fn estimate_proof_size(&self) -> usize; -} - /// Our `NodeCodec`-specific error. pub use error::Error; /// Various re-exports from the `hash-db` crate. diff --git a/substrate/primitives/trie/src/recorder.rs b/substrate/primitives/trie/src/recorder.rs index 000ff8383341..28f029f75344 100644 --- a/substrate/primitives/trie/src/recorder.rs +++ b/substrate/primitives/trie/src/recorder.rs @@ -244,11 +244,11 @@ impl crate::TrieRecorderProvider for Recorder { } fn as_trie_recorder(&self, storage_root: H::Out) -> Self::Recorder<'_> { - self.as_trie_recorder(storage_root) + Recorder::as_trie_recorder(&self, storage_root) } fn estimate_encoded_size(&self) -> usize { - self.estimate_encoded_size() + Recorder::estimate_encoded_size(&self) } } @@ -404,12 +404,6 @@ impl<'a, H: Hasher> trie_db::TrieRecorder for TrieRecorder<'a, H> { } } -impl crate::ProofSizeEstimationProvider for Recorder { - fn estimate_proof_size(&self) -> usize { - self.estimate_encoded_size() - } -} - #[cfg(test)] mod tests { use super::*; From 678bb9725b93639d2f8a41872f9ab9a496a22c05 Mon Sep 17 00:00:00 2001 From: Sebastian Kunert Date: Wed, 6 Sep 2023 08:03:35 +0200 Subject: [PATCH 13/61] Implement `trie_nodes_recorded_for_key` --- .../src/validate_block/trie_recorder.rs | 39 +++++++++++++++---- 1 file changed, 32 insertions(+), 7 deletions(-) diff --git a/cumulus/pallets/parachain-system/src/validate_block/trie_recorder.rs b/cumulus/pallets/parachain-system/src/validate_block/trie_recorder.rs index ea2efe0976e4..cd33ee5765d9 100644 --- a/cumulus/pallets/parachain-system/src/validate_block/trie_recorder.rs +++ b/cumulus/pallets/parachain-system/src/validate_block/trie_recorder.rs @@ -28,8 +28,10 @@ use sp_runtime::traits::{Block as BlockT, Extrinsic, HashingFor, Header as Heade use sp_std::{ boxed::Box, cell::{RefCell, RefMut}, + collections::btree_map::BTreeMap, collections::btree_set::BTreeSet, prelude::*, + sync::Arc, }; use sp_trie::{MemoryDB, NodeCodec, StorageProof}; use trie_db::{Hasher, RecordedForKey, TrieAccess}; @@ -38,18 +40,20 @@ use trie_db::{Hasher, RecordedForKey, TrieAccess}; pub(crate) struct SizeRecorder<'a, H: Hasher> { seen_nodes: RefMut<'a, BTreeSet>, encoded_size: RefMut<'a, usize>, + recorded_keys: RefMut<'a, BTreeMap, RecordedForKey>>, } impl<'a, H: trie_db::Hasher> trie_db::TrieRecorder for SizeRecorder<'a, H> { fn record<'b>(&mut self, access: TrieAccess<'b, H::Out>) { let mut encoded_size_update = 0; match access { - TrieAccess::NodeOwned { hash, node_owned } => + TrieAccess::NodeOwned { hash, node_owned } => { if !self.seen_nodes.get(&hash).is_some() { let node = node_owned.to_encoded::>(); encoded_size_update += node.encoded_size(); self.seen_nodes.insert(hash); - }, + } + }, TrieAccess::EncodedNode { hash, encoded_node } => { if !self.seen_nodes.get(&hash).is_some() { let node = encoded_node.into_owned(); @@ -57,32 +61,52 @@ impl<'a, H: trie_db::Hasher> trie_db::TrieRecorder for SizeRecorder<'a, self.seen_nodes.insert(hash); } }, - TrieAccess::Value { hash, value, .. } => + TrieAccess::Value { hash, value, full_key } => { if !self.seen_nodes.get(&hash).is_some() { let value = value.into_owned(); encoded_size_update += value.encoded_size(); self.seen_nodes.insert(hash); - }, - TrieAccess::Hash { .. } | TrieAccess::NonExisting { .. } => {}, + } + self.recorded_keys + .entry(full_key.into()) + .and_modify(|e| *e = RecordedForKey::Value) + .or_insert_with(|| RecordedForKey::Value); + }, + TrieAccess::Hash { full_key } => { + self.recorded_keys + .entry(full_key.into()) + .or_insert_with(|| RecordedForKey::Hash); + }, + TrieAccess::NonExisting { full_key } => { + self.recorded_keys + .entry(full_key.into()) + .and_modify(|e| *e = RecordedForKey::Value) + .or_insert_with(|| RecordedForKey::Value); + }, }; *self.encoded_size += encoded_size_update; } fn trie_nodes_recorded_for_key(&self, key: &[u8]) -> RecordedForKey { - RecordedForKey::None + self.recorded_keys.get(key).copied().unwrap_or(RecordedForKey::None) } } pub(crate) struct RecorderProvider { seen_nodes: RefCell>, encoded_size: RefCell, + recorded_keys: RefCell, RecordedForKey>>, } impl RecorderProvider { pub fn new() -> Self { - Self { seen_nodes: Default::default(), encoded_size: Default::default() } + Self { + seen_nodes: Default::default(), + encoded_size: Default::default(), + recorded_keys: Default::default(), + } } } @@ -97,6 +121,7 @@ impl sp_trie::TrieRecorderProvider for RecorderProvider Date: Wed, 6 Sep 2023 20:40:23 +0200 Subject: [PATCH 14/61] Improve validate_block bench --- cumulus/test/client/Cargo.toml | 1 + cumulus/test/client/src/lib.rs | 3 ++- cumulus/test/service/benches/validate_block.rs | 7 +++++-- 3 files changed, 8 insertions(+), 3 deletions(-) diff --git a/cumulus/test/client/Cargo.toml b/cumulus/test/client/Cargo.toml index 290cfd7e4d89..e4c281267bbb 100644 --- a/cumulus/test/client/Cargo.toml +++ b/cumulus/test/client/Cargo.toml @@ -36,4 +36,5 @@ cumulus-test-runtime = { path = "../runtime" } cumulus-test-service = { path = "../service" } cumulus-test-relay-sproof-builder = { path = "../relay-sproof-builder" } cumulus-primitives-core = { path = "../../primitives/core" } +cumulus-primitives-reclaim = { path = "../../primitives/pov-reclaim" } cumulus-primitives-parachain-inherent = { path = "../../primitives/parachain-inherent" } diff --git a/cumulus/test/client/src/lib.rs b/cumulus/test/client/src/lib.rs index 61249bdb066a..1791f8ca3ef3 100644 --- a/cumulus/test/client/src/lib.rs +++ b/cumulus/test/client/src/lib.rs @@ -44,7 +44,8 @@ mod local_executor { pub struct LocalExecutor; impl sc_executor::NativeExecutionDispatch for LocalExecutor { - type ExtendHostFunctions = (); + type ExtendHostFunctions = + cumulus_primitives_reclaim::pov_reclaim_host_functions::HostFunctions; fn dispatch(method: &str, data: &[u8]) -> Option> { cumulus_test_runtime::api::dispatch(method, data) diff --git a/cumulus/test/service/benches/validate_block.rs b/cumulus/test/service/benches/validate_block.rs index f3b4d0b12144..9dc672c50fd1 100644 --- a/cumulus/test/service/benches/validate_block.rs +++ b/cumulus/test/service/benches/validate_block.rs @@ -94,7 +94,11 @@ fn benchmark_block_validation(c: &mut Criterion) { ..Default::default() }; - let mut block_builder = client.init_block_builder(Some(validation_data), Default::default()); + let mut sproof_builder: RelayStateSproofBuilder = Default::default(); + sproof_builder.included_para_head = Some(parent_header.clone().encode().into()); + + let mut block_builder = + client.init_block_builder(Some(validation_data), sproof_builder.clone()); for extrinsic in extrinsics { block_builder.push(extrinsic).unwrap(); } @@ -104,7 +108,6 @@ fn benchmark_block_validation(c: &mut Criterion) { let proof_size_in_kb = parachain_block.storage_proof().encode().len() as f64 / 1024f64; let runtime = utils::get_wasm_module(); - let sproof_builder: RelayStateSproofBuilder = Default::default(); let (relay_parent_storage_root, _) = sproof_builder.into_state_root_and_proof(); let encoded_params = ValidationParams { block_data: cumulus_test_client::BlockData(parachain_block.encode()), From 9ebc861892477d1d345c39e3eb5ecf0a624e5ae1 Mon Sep 17 00:00:00 2001 From: Sebastian Kunert Date: Thu, 7 Sep 2023 14:19:45 +0200 Subject: [PATCH 15/61] Move client parameter to config --- Cargo.lock | 1 + .../src/validate_block/trie_recorder.rs | 2 +- cumulus/primitives/pov-reclaim/src/lib.rs | 1 - cumulus/test/service/benches/block_import.rs | 80 +++++++++++-------- .../test/service/benches/validate_block.rs | 8 +- cumulus/test/service/src/bench_utils.rs | 7 +- cumulus/test/service/src/lib.rs | 4 +- substrate/bin/node/testing/src/bench.rs | 1 - substrate/client/service/src/builder.rs | 4 +- substrate/client/service/src/client/client.rs | 11 ++- substrate/test-utils/client/src/lib.rs | 1 - 11 files changed, 69 insertions(+), 51 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a4094e4efc78..87ddcd6da1be 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3873,6 +3873,7 @@ version = "0.1.0" dependencies = [ "cumulus-primitives-core", "cumulus-primitives-parachain-inherent", + "cumulus-primitives-reclaim", "cumulus-test-relay-sproof-builder", "cumulus-test-runtime", "cumulus-test-service", diff --git a/cumulus/pallets/parachain-system/src/validate_block/trie_recorder.rs b/cumulus/pallets/parachain-system/src/validate_block/trie_recorder.rs index cd33ee5765d9..e618bc0f857b 100644 --- a/cumulus/pallets/parachain-system/src/validate_block/trie_recorder.rs +++ b/cumulus/pallets/parachain-system/src/validate_block/trie_recorder.rs @@ -114,7 +114,7 @@ impl sp_trie::TrieRecorderProvider for RecorderProvider = SizeRecorder<'a, H> where H: 'a; fn drain_storage_proof(self) -> StorageProof { - panic!("Tried to drain storage proof") + unimplemented!("Draining storage proof not supported!") } fn as_trie_recorder(&self, storage_root: H::Out) -> Self::Recorder<'_> { diff --git a/cumulus/primitives/pov-reclaim/src/lib.rs b/cumulus/primitives/pov-reclaim/src/lib.rs index f117db58c4bd..212dfed1c2be 100644 --- a/cumulus/primitives/pov-reclaim/src/lib.rs +++ b/cumulus/primitives/pov-reclaim/src/lib.rs @@ -23,7 +23,6 @@ use sp_runtime_interface::runtime_interface; #[runtime_interface] pub trait PovReclaimHostFunctions { fn current_storage_proof_size(&mut self) -> u32 { - tracing::info!(target:"skunert", "current_storage_proof_size is called"); self.proof_size().unwrap_or_default() } } diff --git a/cumulus/test/service/benches/block_import.rs b/cumulus/test/service/benches/block_import.rs index b79598b15302..85a681813785 100644 --- a/cumulus/test/service/benches/block_import.rs +++ b/cumulus/test/service/benches/block_import.rs @@ -24,7 +24,7 @@ use cumulus_primitives_core::ParaId; use sc_block_builder::{BlockBuilderProvider, RecordProof}; use sp_api::{Core, ProvideRuntimeApi}; -use sp_keyring::Sr25519Keyring::Alice; +use sp_keyring::Sr25519Keyring::{Alice, Bob}; use cumulus_test_service::bench_utils as utils; @@ -32,47 +32,61 @@ fn benchmark_block_import(c: &mut Criterion) { sp_tracing::try_init_simple(); let runtime = tokio::runtime::Runtime::new().expect("creating tokio runtime doesn't fail; qed"); - let para_id = ParaId::from(100); + + let para_id = ParaId::from(cumulus_test_runtime::PARACHAIN_ID); let tokio_handle = runtime.handle(); // Create enough accounts to fill the block with transactions. // Each account should only be included in one transfer. let (src_accounts, dst_accounts, account_ids) = utils::create_benchmark_accounts(); - let alice = runtime.block_on( - cumulus_test_service::TestNodeBuilder::new(para_id, tokio_handle.clone(), Alice) + for bench_parameters in &[(true, Alice), (false, Bob)] { + let alice = runtime.block_on( + cumulus_test_service::TestNodeBuilder::new( + para_id, + tokio_handle.clone(), + bench_parameters.1, + ) // Preload all accounts with funds for the transfers - .endowed_accounts(account_ids) + .endowed_accounts(account_ids.clone()) + .import_proof_recording(bench_parameters.0) .build(), - ); - - let client = alice.client; - - let (max_transfer_count, extrinsics) = - utils::create_benchmarking_transfer_extrinsics(&client, &src_accounts, &dst_accounts); - - let parent_hash = client.usage_info().chain.best_hash; - let mut block_builder = - client.new_block_at(parent_hash, Default::default(), RecordProof::No).unwrap(); - for extrinsic in extrinsics { - block_builder.push(extrinsic).unwrap(); - } - let benchmark_block = block_builder.build().unwrap(); - - let mut group = c.benchmark_group("Block import"); - group.sample_size(20); - group.measurement_time(Duration::from_secs(120)); - group.throughput(Throughput::Elements(max_transfer_count as u64)); - - group.bench_function(format!("(transfers = {}) block import", max_transfer_count), |b| { - b.iter_batched( - || benchmark_block.block.clone(), - |block| { - client.runtime_api().execute_block(parent_hash, block).unwrap(); + ); + + let client = alice.client; + + let (max_transfer_count, extrinsics) = + utils::create_benchmarking_transfer_extrinsics(&client, &src_accounts, &dst_accounts); + + let parent_hash = client.usage_info().chain.best_hash; + let mut block_builder = + client.new_block_at(parent_hash, Default::default(), RecordProof::No).unwrap(); + for extrinsic in extrinsics { + block_builder.push(extrinsic).unwrap(); + } + let benchmark_block = block_builder.build().unwrap(); + + let mut group = c.benchmark_group("Block import"); + group.sample_size(20); + group.measurement_time(Duration::from_secs(120)); + group.throughput(Throughput::Elements(max_transfer_count as u64)); + + group.bench_function( + format!( + "(transfers = {max_transfer_count}, import = {}) block import", + bench_parameters.0 + ), + |b| { + b.iter_batched( + || benchmark_block.block.clone(), + |block| { + client.runtime_api().execute_block(parent_hash, block).unwrap(); + }, + BatchSize::SmallInput, + ) }, - BatchSize::SmallInput, - ) - }); + ); + } } criterion_group!(benches, benchmark_block_import); diff --git a/cumulus/test/service/benches/validate_block.rs b/cumulus/test/service/benches/validate_block.rs index 9dc672c50fd1..abcf7ad8e625 100644 --- a/cumulus/test/service/benches/validate_block.rs +++ b/cumulus/test/service/benches/validate_block.rs @@ -79,6 +79,7 @@ fn benchmark_block_validation(c: &mut Criterion) { // Each account should only be included in one transfer. let (src_accounts, dst_accounts, account_ids) = utils::create_benchmark_accounts(); + let para_id = ParaId::from(cumulus_test_runtime::PARACHAIN_ID); let mut test_client_builder = TestClientBuilder::with_default_backend(); let genesis_init = test_client_builder.genesis_init_mut(); *genesis_init = cumulus_test_client::GenesisParameters { endowed_accounts: account_ids }; @@ -94,8 +95,11 @@ fn benchmark_block_validation(c: &mut Criterion) { ..Default::default() }; - let mut sproof_builder: RelayStateSproofBuilder = Default::default(); - sproof_builder.included_para_head = Some(parent_header.clone().encode().into()); + let mut sproof_builder = RelayStateSproofBuilder { + included_para_head: Some(parent_header.clone().encode().into()), + para_id, + ..Default::default() + }; let mut block_builder = client.init_block_builder(Some(validation_data), sproof_builder.clone()); diff --git a/cumulus/test/service/src/bench_utils.rs b/cumulus/test/service/src/bench_utils.rs index 172c9e504196..1e9bda6155ff 100644 --- a/cumulus/test/service/src/bench_utils.rs +++ b/cumulus/test/service/src/bench_utils.rs @@ -81,8 +81,13 @@ pub fn extrinsic_set_time(client: &TestClient) -> OpaqueExtrinsic { pub fn extrinsic_set_validation_data( parent_header: cumulus_test_runtime::Header, ) -> OpaqueExtrinsic { - let sproof_builder = RelayStateSproofBuilder { para_id: 100.into(), ..Default::default() }; let parent_head = HeadData(parent_header.encode()); + let sproof_builder = RelayStateSproofBuilder { + para_id: cumulus_test_runtime::PARACHAIN_ID.into(), + included_para_head: parent_head.clone().into(), + ..Default::default() + }; + let (relay_parent_storage_root, relay_chain_state) = sproof_builder.into_state_root_and_proof(); let data = ParachainInherentData { validation_data: PersistedValidationData { diff --git a/cumulus/test/service/src/lib.rs b/cumulus/test/service/src/lib.rs index 6dc19a8e4564..16866eeb1dcd 100644 --- a/cumulus/test/service/src/lib.rs +++ b/cumulus/test/service/src/lib.rs @@ -662,8 +662,8 @@ impl TestNodeBuilder { } /// Record proofs during import. - pub fn disable_import_proof_recording(mut self) -> TestNodeBuilder { - self.record_proof_during_import = false; + pub fn import_proof_recording(mut self, should_record_proof: bool) -> TestNodeBuilder { + self.record_proof_during_import = should_record_proof; self } diff --git a/substrate/bin/node/testing/src/bench.rs b/substrate/bin/node/testing/src/bench.rs index 2ef3252762fb..915e1e3cd4b2 100644 --- a/substrate/bin/node/testing/src/bench.rs +++ b/substrate/bin/node/testing/src/bench.rs @@ -418,7 +418,6 @@ impl BenchDb { None, None, client_config, - false, ) .expect("Should not fail"); diff --git a/substrate/client/service/src/builder.rs b/substrate/client/service/src/builder.rs index b86311607549..167b2dff6753 100644 --- a/substrate/client/service/src/builder.rs +++ b/substrate/client/service/src/builder.rs @@ -247,8 +247,8 @@ where SyncMode::LightState { .. } | SyncMode::Warp { .. } ), wasm_runtime_substitutes, + enable_import_proof_recording, }, - enable_import_proof_recording, )?; client @@ -302,7 +302,6 @@ pub fn new_client( prometheus_registry: Option, telemetry: Option, config: ClientConfig, - enable_import_proof_recording: bool, ) -> Result< Client< Backend, @@ -337,7 +336,6 @@ where prometheus_registry, telemetry, config, - enable_import_proof_recording, ) } diff --git a/substrate/client/service/src/client/client.rs b/substrate/client/service/src/client/client.rs index a01c94edda62..8b87c3f5a859 100644 --- a/substrate/client/service/src/client/client.rs +++ b/substrate/client/service/src/client/client.rs @@ -116,7 +116,6 @@ where config: ClientConfig, telemetry: Option, unpin_worker_sender: TracingUnboundedSender, - enable_import_proof_recording: bool, _phantom: PhantomData, } @@ -186,7 +185,7 @@ where ) } -/// Relevant client configuration items relevant for the client. +/// Client configuration items. #[derive(Debug, Clone)] pub struct ClientConfig { /// Enable the offchain worker db. @@ -200,6 +199,8 @@ pub struct ClientConfig { /// Map of WASM runtime substitute starting at the child of the given block until the runtime /// version doesn't match anymore. pub wasm_runtime_substitutes: HashMap, Vec>, + /// Enable recording of storage proofs during block import + pub enable_import_proof_recording: bool, } impl Default for ClientConfig { @@ -210,6 +211,7 @@ impl Default for ClientConfig { wasm_runtime_overrides: None, no_genesis: false, wasm_runtime_substitutes: HashMap::new(), + enable_import_proof_recording: false, } } } @@ -250,7 +252,6 @@ where prometheus_registry, telemetry, config, - false, ) } @@ -388,7 +389,6 @@ where prometheus_registry: Option, telemetry: Option, config: ClientConfig, - enable_import_proof_recording: bool, ) -> sp_blockchain::Result where G: BuildGenesisBlock< @@ -451,7 +451,6 @@ where config, telemetry, unpin_worker_sender, - enable_import_proof_recording, _phantom: Default::default(), }) } @@ -866,7 +865,7 @@ where runtime_api.set_call_context(CallContext::Onchain); - if self.enable_import_proof_recording { + if self.config.enable_import_proof_recording { log::info!(target:"skunert", "Block import with proof recording."); runtime_api.record_proof(); } diff --git a/substrate/test-utils/client/src/lib.rs b/substrate/test-utils/client/src/lib.rs index eace869b2961..192580c8447d 100644 --- a/substrate/test-utils/client/src/lib.rs +++ b/substrate/test-utils/client/src/lib.rs @@ -227,7 +227,6 @@ impl None, None, client_config, - false, ) .expect("Creates new client"); From 99ccf64c9a4df8fd6192ff662136a9b41433d4cd Mon Sep 17 00:00:00 2001 From: Sebastian Kunert Date: Thu, 7 Sep 2023 15:33:55 +0200 Subject: [PATCH 16/61] Remove unwanted changes --- Cargo.lock | 4 +- cumulus/pallets/parachain-system/Cargo.toml | 1 - .../src/validate_block/implementation.rs | 2 - cumulus/parachains/common/src/impls.rs | 4 +- .../assets/asset-hub-kusama/src/lib.rs | 149 ++++---- .../assets/asset-hub-kusama/src/xcm_config.rs | 354 +++++++++--------- cumulus/primitives/pov-reclaim/Cargo.toml | 7 +- cumulus/primitives/pov-reclaim/src/lib.rs | 2 - cumulus/test/service/src/lib.rs | 10 +- substrate/bin/node/cli/src/service.rs | 2 +- substrate/bin/node/testing/src/bench.rs | 18 +- substrate/client/service/src/builder.rs | 22 +- substrate/client/service/src/client/client.rs | 119 +++--- .../service/src/client/wasm_override.rs | 4 +- substrate/client/service/src/lib.rs | 13 +- .../state-machine/src/trie_backend_essence.rs | 20 +- substrate/test-utils/client/src/lib.rs | 4 +- 17 files changed, 364 insertions(+), 371 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 5fc218290534..b3bb6a623bfd 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3555,7 +3555,6 @@ dependencies = [ "polkadot-parachain-primitives", "sc-client-api", "scale-info", - "sp-api", "sp-core", "sp-externalities", "sp-inherents", @@ -3716,10 +3715,9 @@ dependencies = [ [[package]] name = "cumulus-primitives-reclaim" -version = "0.0.1" +version = "0.1.0" dependencies = [ "sp-runtime-interface", - "tracing", ] [[package]] diff --git a/cumulus/pallets/parachain-system/Cargo.toml b/cumulus/pallets/parachain-system/Cargo.toml index 85e34c5e6244..689295437daa 100644 --- a/cumulus/pallets/parachain-system/Cargo.toml +++ b/cumulus/pallets/parachain-system/Cargo.toml @@ -21,7 +21,6 @@ sp-core = { path = "../../../substrate/primitives/core", default-features = fals sp-externalities = { path = "../../../substrate/primitives/externalities", default-features = false} sp-inherents = { path = "../../../substrate/primitives/inherents", default-features = false} sp-io = { path = "../../../substrate/primitives/io", default-features = false} -sp-api = { path = "../../../substrate/primitives/api", default-features = false} sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false} sp-state-machine = { path = "../../../substrate/primitives/state-machine", default-features = false} sp-std = { path = "../../../substrate/primitives/std", default-features = false} diff --git a/cumulus/pallets/parachain-system/src/validate_block/implementation.rs b/cumulus/pallets/parachain-system/src/validate_block/implementation.rs index 3c609c4bbd41..7dd339b211f6 100644 --- a/cumulus/pallets/parachain-system/src/validate_block/implementation.rs +++ b/cumulus/pallets/parachain-system/src/validate_block/implementation.rs @@ -93,11 +93,9 @@ where B::Extrinsic: ExtrinsicCall, ::Call: IsSubType>, { - sp_api::init_runtime_logger(); let block_data = codec::decode_from_bytes::>(block_data) .expect("Invalid parachain block data"); - log::info!(target:"skunert", "Hello World from validate block!"); let parent_header = codec::decode_from_bytes::(parent_head.clone()).expect("Invalid parent head"); diff --git a/cumulus/parachains/common/src/impls.rs b/cumulus/parachains/common/src/impls.rs index 73ff884e151a..f78cdcb00e7e 100644 --- a/cumulus/parachains/common/src/impls.rs +++ b/cumulus/parachains/common/src/impls.rs @@ -122,8 +122,8 @@ pub struct AssetsFrom(PhantomData); impl> ContainsPair for AssetsFrom { fn contains(asset: &MultiAsset, origin: &MultiLocation) -> bool { let loc = T::get(); - &loc == origin - && matches!(asset, MultiAsset { id: AssetId::Concrete(asset_loc), fun: Fungible(_a) } + &loc == origin && + matches!(asset, MultiAsset { id: AssetId::Concrete(asset_loc), fun: Fungible(_a) } if asset_loc.match_and_split(&loc).is_some()) } } diff --git a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/lib.rs index 5d52a91b0f03..58d3c85bbc9d 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/lib.rs @@ -477,98 +477,97 @@ impl InstanceFilter for ProxyType { ProxyType::Any => true, ProxyType::NonTransfer => !matches!( c, - RuntimeCall::Balances { .. } - | RuntimeCall::Assets { .. } - | RuntimeCall::NftFractionalization { .. } - | RuntimeCall::Nfts { .. } - | RuntimeCall::Uniques { .. } + RuntimeCall::Balances { .. } | + RuntimeCall::Assets { .. } | + RuntimeCall::NftFractionalization { .. } | + RuntimeCall::Nfts { .. } | + RuntimeCall::Uniques { .. } ), ProxyType::CancelProxy => matches!( c, - RuntimeCall::Proxy(pallet_proxy::Call::reject_announcement { .. }) - | RuntimeCall::Utility { .. } - | RuntimeCall::Multisig { .. } + RuntimeCall::Proxy(pallet_proxy::Call::reject_announcement { .. }) | + RuntimeCall::Utility { .. } | + RuntimeCall::Multisig { .. } ), ProxyType::Assets => { matches!( c, - RuntimeCall::Assets { .. } - | RuntimeCall::Utility { .. } - | RuntimeCall::Multisig { .. } - | RuntimeCall::NftFractionalization { .. } - | RuntimeCall::Nfts { .. } - | RuntimeCall::Uniques { .. } + RuntimeCall::Assets { .. } | + RuntimeCall::Utility { .. } | + RuntimeCall::Multisig { .. } | + RuntimeCall::NftFractionalization { .. } | + RuntimeCall::Nfts { .. } | RuntimeCall::Uniques { .. } ) }, ProxyType::AssetOwner => matches!( c, - RuntimeCall::Assets(TrustBackedAssetsCall::create { .. }) - | RuntimeCall::Assets(TrustBackedAssetsCall::start_destroy { .. }) - | RuntimeCall::Assets(TrustBackedAssetsCall::destroy_accounts { .. }) - | RuntimeCall::Assets(TrustBackedAssetsCall::destroy_approvals { .. }) - | RuntimeCall::Assets(TrustBackedAssetsCall::finish_destroy { .. }) - | RuntimeCall::Assets(TrustBackedAssetsCall::transfer_ownership { .. }) - | RuntimeCall::Assets(TrustBackedAssetsCall::set_team { .. }) - | RuntimeCall::Assets(TrustBackedAssetsCall::set_metadata { .. }) - | RuntimeCall::Assets(TrustBackedAssetsCall::clear_metadata { .. }) - | RuntimeCall::Assets(TrustBackedAssetsCall::set_min_balance { .. }) - | RuntimeCall::Nfts(pallet_nfts::Call::create { .. }) - | RuntimeCall::Nfts(pallet_nfts::Call::destroy { .. }) - | RuntimeCall::Nfts(pallet_nfts::Call::redeposit { .. }) - | RuntimeCall::Nfts(pallet_nfts::Call::transfer_ownership { .. }) - | RuntimeCall::Nfts(pallet_nfts::Call::set_team { .. }) - | RuntimeCall::Nfts(pallet_nfts::Call::set_collection_max_supply { .. }) - | RuntimeCall::Nfts(pallet_nfts::Call::lock_collection { .. }) - | RuntimeCall::Uniques(pallet_uniques::Call::create { .. }) - | RuntimeCall::Uniques(pallet_uniques::Call::destroy { .. }) - | RuntimeCall::Uniques(pallet_uniques::Call::transfer_ownership { .. }) - | RuntimeCall::Uniques(pallet_uniques::Call::set_team { .. }) - | RuntimeCall::Uniques(pallet_uniques::Call::set_metadata { .. }) - | RuntimeCall::Uniques(pallet_uniques::Call::set_attribute { .. }) - | RuntimeCall::Uniques(pallet_uniques::Call::set_collection_metadata { .. }) - | RuntimeCall::Uniques(pallet_uniques::Call::clear_metadata { .. }) - | RuntimeCall::Uniques(pallet_uniques::Call::clear_attribute { .. }) - | RuntimeCall::Uniques(pallet_uniques::Call::clear_collection_metadata { .. }) - | RuntimeCall::Uniques(pallet_uniques::Call::set_collection_max_supply { .. }) - | RuntimeCall::Utility { .. } - | RuntimeCall::Multisig { .. } + RuntimeCall::Assets(TrustBackedAssetsCall::create { .. }) | + RuntimeCall::Assets(TrustBackedAssetsCall::start_destroy { .. }) | + RuntimeCall::Assets(TrustBackedAssetsCall::destroy_accounts { .. }) | + RuntimeCall::Assets(TrustBackedAssetsCall::destroy_approvals { .. }) | + RuntimeCall::Assets(TrustBackedAssetsCall::finish_destroy { .. }) | + RuntimeCall::Assets(TrustBackedAssetsCall::transfer_ownership { .. }) | + RuntimeCall::Assets(TrustBackedAssetsCall::set_team { .. }) | + RuntimeCall::Assets(TrustBackedAssetsCall::set_metadata { .. }) | + RuntimeCall::Assets(TrustBackedAssetsCall::clear_metadata { .. }) | + RuntimeCall::Assets(TrustBackedAssetsCall::set_min_balance { .. }) | + RuntimeCall::Nfts(pallet_nfts::Call::create { .. }) | + RuntimeCall::Nfts(pallet_nfts::Call::destroy { .. }) | + RuntimeCall::Nfts(pallet_nfts::Call::redeposit { .. }) | + RuntimeCall::Nfts(pallet_nfts::Call::transfer_ownership { .. }) | + RuntimeCall::Nfts(pallet_nfts::Call::set_team { .. }) | + RuntimeCall::Nfts(pallet_nfts::Call::set_collection_max_supply { .. }) | + RuntimeCall::Nfts(pallet_nfts::Call::lock_collection { .. }) | + RuntimeCall::Uniques(pallet_uniques::Call::create { .. }) | + RuntimeCall::Uniques(pallet_uniques::Call::destroy { .. }) | + RuntimeCall::Uniques(pallet_uniques::Call::transfer_ownership { .. }) | + RuntimeCall::Uniques(pallet_uniques::Call::set_team { .. }) | + RuntimeCall::Uniques(pallet_uniques::Call::set_metadata { .. }) | + RuntimeCall::Uniques(pallet_uniques::Call::set_attribute { .. }) | + RuntimeCall::Uniques(pallet_uniques::Call::set_collection_metadata { .. }) | + RuntimeCall::Uniques(pallet_uniques::Call::clear_metadata { .. }) | + RuntimeCall::Uniques(pallet_uniques::Call::clear_attribute { .. }) | + RuntimeCall::Uniques(pallet_uniques::Call::clear_collection_metadata { .. }) | + RuntimeCall::Uniques(pallet_uniques::Call::set_collection_max_supply { .. }) | + RuntimeCall::Utility { .. } | + RuntimeCall::Multisig { .. } ), ProxyType::AssetManager => matches!( c, - RuntimeCall::Assets(TrustBackedAssetsCall::mint { .. }) - | RuntimeCall::Assets(TrustBackedAssetsCall::burn { .. }) - | RuntimeCall::Assets(TrustBackedAssetsCall::freeze { .. }) - | RuntimeCall::Assets(TrustBackedAssetsCall::block { .. }) - | RuntimeCall::Assets(TrustBackedAssetsCall::thaw { .. }) - | RuntimeCall::Assets(TrustBackedAssetsCall::freeze_asset { .. }) - | RuntimeCall::Assets(TrustBackedAssetsCall::thaw_asset { .. }) - | RuntimeCall::Assets(TrustBackedAssetsCall::touch_other { .. }) - | RuntimeCall::Assets(TrustBackedAssetsCall::refund_other { .. }) - | RuntimeCall::Nfts(pallet_nfts::Call::force_mint { .. }) - | RuntimeCall::Nfts(pallet_nfts::Call::update_mint_settings { .. }) - | RuntimeCall::Nfts(pallet_nfts::Call::mint_pre_signed { .. }) - | RuntimeCall::Nfts(pallet_nfts::Call::set_attributes_pre_signed { .. }) - | RuntimeCall::Nfts(pallet_nfts::Call::lock_item_transfer { .. }) - | RuntimeCall::Nfts(pallet_nfts::Call::unlock_item_transfer { .. }) - | RuntimeCall::Nfts(pallet_nfts::Call::lock_item_properties { .. }) - | RuntimeCall::Nfts(pallet_nfts::Call::set_metadata { .. }) - | RuntimeCall::Nfts(pallet_nfts::Call::clear_metadata { .. }) - | RuntimeCall::Nfts(pallet_nfts::Call::set_collection_metadata { .. }) - | RuntimeCall::Nfts(pallet_nfts::Call::clear_collection_metadata { .. }) - | RuntimeCall::Uniques(pallet_uniques::Call::mint { .. }) - | RuntimeCall::Uniques(pallet_uniques::Call::burn { .. }) - | RuntimeCall::Uniques(pallet_uniques::Call::freeze { .. }) - | RuntimeCall::Uniques(pallet_uniques::Call::thaw { .. }) - | RuntimeCall::Uniques(pallet_uniques::Call::freeze_collection { .. }) - | RuntimeCall::Uniques(pallet_uniques::Call::thaw_collection { .. }) - | RuntimeCall::Utility { .. } - | RuntimeCall::Multisig { .. } + RuntimeCall::Assets(TrustBackedAssetsCall::mint { .. }) | + RuntimeCall::Assets(TrustBackedAssetsCall::burn { .. }) | + RuntimeCall::Assets(TrustBackedAssetsCall::freeze { .. }) | + RuntimeCall::Assets(TrustBackedAssetsCall::block { .. }) | + RuntimeCall::Assets(TrustBackedAssetsCall::thaw { .. }) | + RuntimeCall::Assets(TrustBackedAssetsCall::freeze_asset { .. }) | + RuntimeCall::Assets(TrustBackedAssetsCall::thaw_asset { .. }) | + RuntimeCall::Assets(TrustBackedAssetsCall::touch_other { .. }) | + RuntimeCall::Assets(TrustBackedAssetsCall::refund_other { .. }) | + RuntimeCall::Nfts(pallet_nfts::Call::force_mint { .. }) | + RuntimeCall::Nfts(pallet_nfts::Call::update_mint_settings { .. }) | + RuntimeCall::Nfts(pallet_nfts::Call::mint_pre_signed { .. }) | + RuntimeCall::Nfts(pallet_nfts::Call::set_attributes_pre_signed { .. }) | + RuntimeCall::Nfts(pallet_nfts::Call::lock_item_transfer { .. }) | + RuntimeCall::Nfts(pallet_nfts::Call::unlock_item_transfer { .. }) | + RuntimeCall::Nfts(pallet_nfts::Call::lock_item_properties { .. }) | + RuntimeCall::Nfts(pallet_nfts::Call::set_metadata { .. }) | + RuntimeCall::Nfts(pallet_nfts::Call::clear_metadata { .. }) | + RuntimeCall::Nfts(pallet_nfts::Call::set_collection_metadata { .. }) | + RuntimeCall::Nfts(pallet_nfts::Call::clear_collection_metadata { .. }) | + RuntimeCall::Uniques(pallet_uniques::Call::mint { .. }) | + RuntimeCall::Uniques(pallet_uniques::Call::burn { .. }) | + RuntimeCall::Uniques(pallet_uniques::Call::freeze { .. }) | + RuntimeCall::Uniques(pallet_uniques::Call::thaw { .. }) | + RuntimeCall::Uniques(pallet_uniques::Call::freeze_collection { .. }) | + RuntimeCall::Uniques(pallet_uniques::Call::thaw_collection { .. }) | + RuntimeCall::Utility { .. } | + RuntimeCall::Multisig { .. } ), ProxyType::Collator => matches!( c, - RuntimeCall::CollatorSelection { .. } - | RuntimeCall::Utility { .. } - | RuntimeCall::Multisig { .. } + RuntimeCall::CollatorSelection { .. } | + RuntimeCall::Utility { .. } | + RuntimeCall::Multisig { .. } ), } } diff --git a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/xcm_config.rs b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/xcm_config.rs index 994905d76a86..0c197598f889 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/xcm_config.rs @@ -245,194 +245,194 @@ impl Contains for SafeCallFilter { #[cfg(feature = "runtime-benchmarks")] { if matches!(call, RuntimeCall::System(frame_system::Call::remark_with_event { .. })) { - return true; + return true } } matches!( call, - RuntimeCall::PolkadotXcm(pallet_xcm::Call::force_xcm_version { .. }) - | RuntimeCall::System( - frame_system::Call::set_heap_pages { .. } - | frame_system::Call::set_code { .. } - | frame_system::Call::set_code_without_checks { .. } - | frame_system::Call::kill_prefix { .. }, - ) | RuntimeCall::ParachainSystem(..) - | RuntimeCall::Timestamp(..) - | RuntimeCall::Balances(..) - | RuntimeCall::CollatorSelection( - pallet_collator_selection::Call::set_desired_candidates { .. } - | pallet_collator_selection::Call::set_candidacy_bond { .. } - | pallet_collator_selection::Call::register_as_candidate { .. } - | pallet_collator_selection::Call::leave_intent { .. } - | pallet_collator_selection::Call::set_invulnerables { .. } - | pallet_collator_selection::Call::add_invulnerable { .. } - | pallet_collator_selection::Call::remove_invulnerable { .. }, - ) | RuntimeCall::Session(pallet_session::Call::purge_keys { .. }) - | RuntimeCall::XcmpQueue(..) - | RuntimeCall::DmpQueue(..) - | RuntimeCall::Assets( - pallet_assets::Call::create { .. } - | pallet_assets::Call::force_create { .. } - | pallet_assets::Call::start_destroy { .. } - | pallet_assets::Call::destroy_accounts { .. } - | pallet_assets::Call::destroy_approvals { .. } - | pallet_assets::Call::finish_destroy { .. } - | pallet_assets::Call::block { .. } - | pallet_assets::Call::mint { .. } - | pallet_assets::Call::burn { .. } - | pallet_assets::Call::transfer { .. } - | pallet_assets::Call::transfer_keep_alive { .. } - | pallet_assets::Call::force_transfer { .. } - | pallet_assets::Call::freeze { .. } - | pallet_assets::Call::thaw { .. } - | pallet_assets::Call::freeze_asset { .. } - | pallet_assets::Call::thaw_asset { .. } - | pallet_assets::Call::transfer_ownership { .. } - | pallet_assets::Call::set_team { .. } - | pallet_assets::Call::set_metadata { .. } - | pallet_assets::Call::clear_metadata { .. } - | pallet_assets::Call::force_set_metadata { .. } - | pallet_assets::Call::force_clear_metadata { .. } - | pallet_assets::Call::force_asset_status { .. } - | pallet_assets::Call::approve_transfer { .. } - | pallet_assets::Call::cancel_approval { .. } - | pallet_assets::Call::force_cancel_approval { .. } - | pallet_assets::Call::transfer_approved { .. } - | pallet_assets::Call::touch { .. } - | pallet_assets::Call::touch_other { .. } - | pallet_assets::Call::refund { .. } - | pallet_assets::Call::refund_other { .. }, + RuntimeCall::PolkadotXcm(pallet_xcm::Call::force_xcm_version { .. }) | + RuntimeCall::System( + frame_system::Call::set_heap_pages { .. } | + frame_system::Call::set_code { .. } | + frame_system::Call::set_code_without_checks { .. } | + frame_system::Call::kill_prefix { .. }, + ) | RuntimeCall::ParachainSystem(..) | + RuntimeCall::Timestamp(..) | + RuntimeCall::Balances(..) | + RuntimeCall::CollatorSelection( + pallet_collator_selection::Call::set_desired_candidates { .. } | + pallet_collator_selection::Call::set_candidacy_bond { .. } | + pallet_collator_selection::Call::register_as_candidate { .. } | + pallet_collator_selection::Call::leave_intent { .. } | + pallet_collator_selection::Call::set_invulnerables { .. } | + pallet_collator_selection::Call::add_invulnerable { .. } | + pallet_collator_selection::Call::remove_invulnerable { .. }, + ) | RuntimeCall::Session(pallet_session::Call::purge_keys { .. }) | + RuntimeCall::XcmpQueue(..) | + RuntimeCall::DmpQueue(..) | + RuntimeCall::Assets( + pallet_assets::Call::create { .. } | + pallet_assets::Call::force_create { .. } | + pallet_assets::Call::start_destroy { .. } | + pallet_assets::Call::destroy_accounts { .. } | + pallet_assets::Call::destroy_approvals { .. } | + pallet_assets::Call::finish_destroy { .. } | + pallet_assets::Call::block { .. } | + pallet_assets::Call::mint { .. } | + pallet_assets::Call::burn { .. } | + pallet_assets::Call::transfer { .. } | + pallet_assets::Call::transfer_keep_alive { .. } | + pallet_assets::Call::force_transfer { .. } | + pallet_assets::Call::freeze { .. } | + pallet_assets::Call::thaw { .. } | + pallet_assets::Call::freeze_asset { .. } | + pallet_assets::Call::thaw_asset { .. } | + pallet_assets::Call::transfer_ownership { .. } | + pallet_assets::Call::set_team { .. } | + pallet_assets::Call::set_metadata { .. } | + pallet_assets::Call::clear_metadata { .. } | + pallet_assets::Call::force_set_metadata { .. } | + pallet_assets::Call::force_clear_metadata { .. } | + pallet_assets::Call::force_asset_status { .. } | + pallet_assets::Call::approve_transfer { .. } | + pallet_assets::Call::cancel_approval { .. } | + pallet_assets::Call::force_cancel_approval { .. } | + pallet_assets::Call::transfer_approved { .. } | + pallet_assets::Call::touch { .. } | + pallet_assets::Call::touch_other { .. } | + pallet_assets::Call::refund { .. } | + pallet_assets::Call::refund_other { .. }, ) | RuntimeCall::ForeignAssets( - pallet_assets::Call::create { .. } - | pallet_assets::Call::force_create { .. } - | pallet_assets::Call::start_destroy { .. } - | pallet_assets::Call::destroy_accounts { .. } - | pallet_assets::Call::destroy_approvals { .. } - | pallet_assets::Call::finish_destroy { .. } - | pallet_assets::Call::block { .. } - | pallet_assets::Call::mint { .. } - | pallet_assets::Call::burn { .. } - | pallet_assets::Call::transfer { .. } - | pallet_assets::Call::transfer_keep_alive { .. } - | pallet_assets::Call::force_transfer { .. } - | pallet_assets::Call::freeze { .. } - | pallet_assets::Call::thaw { .. } - | pallet_assets::Call::freeze_asset { .. } - | pallet_assets::Call::thaw_asset { .. } - | pallet_assets::Call::transfer_ownership { .. } - | pallet_assets::Call::set_team { .. } - | pallet_assets::Call::set_metadata { .. } - | pallet_assets::Call::clear_metadata { .. } - | pallet_assets::Call::force_set_metadata { .. } - | pallet_assets::Call::force_clear_metadata { .. } - | pallet_assets::Call::force_asset_status { .. } - | pallet_assets::Call::approve_transfer { .. } - | pallet_assets::Call::cancel_approval { .. } - | pallet_assets::Call::force_cancel_approval { .. } - | pallet_assets::Call::transfer_approved { .. } - | pallet_assets::Call::touch { .. } - | pallet_assets::Call::touch_other { .. } - | pallet_assets::Call::refund { .. } - | pallet_assets::Call::refund_other { .. }, + pallet_assets::Call::create { .. } | + pallet_assets::Call::force_create { .. } | + pallet_assets::Call::start_destroy { .. } | + pallet_assets::Call::destroy_accounts { .. } | + pallet_assets::Call::destroy_approvals { .. } | + pallet_assets::Call::finish_destroy { .. } | + pallet_assets::Call::block { .. } | + pallet_assets::Call::mint { .. } | + pallet_assets::Call::burn { .. } | + pallet_assets::Call::transfer { .. } | + pallet_assets::Call::transfer_keep_alive { .. } | + pallet_assets::Call::force_transfer { .. } | + pallet_assets::Call::freeze { .. } | + pallet_assets::Call::thaw { .. } | + pallet_assets::Call::freeze_asset { .. } | + pallet_assets::Call::thaw_asset { .. } | + pallet_assets::Call::transfer_ownership { .. } | + pallet_assets::Call::set_team { .. } | + pallet_assets::Call::set_metadata { .. } | + pallet_assets::Call::clear_metadata { .. } | + pallet_assets::Call::force_set_metadata { .. } | + pallet_assets::Call::force_clear_metadata { .. } | + pallet_assets::Call::force_asset_status { .. } | + pallet_assets::Call::approve_transfer { .. } | + pallet_assets::Call::cancel_approval { .. } | + pallet_assets::Call::force_cancel_approval { .. } | + pallet_assets::Call::transfer_approved { .. } | + pallet_assets::Call::touch { .. } | + pallet_assets::Call::touch_other { .. } | + pallet_assets::Call::refund { .. } | + pallet_assets::Call::refund_other { .. }, ) | RuntimeCall::PoolAssets( - pallet_assets::Call::force_create { .. } - | pallet_assets::Call::block { .. } - | pallet_assets::Call::burn { .. } - | pallet_assets::Call::transfer { .. } - | pallet_assets::Call::transfer_keep_alive { .. } - | pallet_assets::Call::force_transfer { .. } - | pallet_assets::Call::freeze { .. } - | pallet_assets::Call::thaw { .. } - | pallet_assets::Call::freeze_asset { .. } - | pallet_assets::Call::thaw_asset { .. } - | pallet_assets::Call::transfer_ownership { .. } - | pallet_assets::Call::set_team { .. } - | pallet_assets::Call::set_metadata { .. } - | pallet_assets::Call::clear_metadata { .. } - | pallet_assets::Call::force_set_metadata { .. } - | pallet_assets::Call::force_clear_metadata { .. } - | pallet_assets::Call::force_asset_status { .. } - | pallet_assets::Call::approve_transfer { .. } - | pallet_assets::Call::cancel_approval { .. } - | pallet_assets::Call::force_cancel_approval { .. } - | pallet_assets::Call::transfer_approved { .. } - | pallet_assets::Call::touch { .. } - | pallet_assets::Call::touch_other { .. } - | pallet_assets::Call::refund { .. } - | pallet_assets::Call::refund_other { .. }, + pallet_assets::Call::force_create { .. } | + pallet_assets::Call::block { .. } | + pallet_assets::Call::burn { .. } | + pallet_assets::Call::transfer { .. } | + pallet_assets::Call::transfer_keep_alive { .. } | + pallet_assets::Call::force_transfer { .. } | + pallet_assets::Call::freeze { .. } | + pallet_assets::Call::thaw { .. } | + pallet_assets::Call::freeze_asset { .. } | + pallet_assets::Call::thaw_asset { .. } | + pallet_assets::Call::transfer_ownership { .. } | + pallet_assets::Call::set_team { .. } | + pallet_assets::Call::set_metadata { .. } | + pallet_assets::Call::clear_metadata { .. } | + pallet_assets::Call::force_set_metadata { .. } | + pallet_assets::Call::force_clear_metadata { .. } | + pallet_assets::Call::force_asset_status { .. } | + pallet_assets::Call::approve_transfer { .. } | + pallet_assets::Call::cancel_approval { .. } | + pallet_assets::Call::force_cancel_approval { .. } | + pallet_assets::Call::transfer_approved { .. } | + pallet_assets::Call::touch { .. } | + pallet_assets::Call::touch_other { .. } | + pallet_assets::Call::refund { .. } | + pallet_assets::Call::refund_other { .. }, ) | RuntimeCall::AssetConversion( - pallet_asset_conversion::Call::create_pool { .. } - | pallet_asset_conversion::Call::add_liquidity { .. } - | pallet_asset_conversion::Call::remove_liquidity { .. } - | pallet_asset_conversion::Call::swap_tokens_for_exact_tokens { .. } - | pallet_asset_conversion::Call::swap_exact_tokens_for_tokens { .. }, + pallet_asset_conversion::Call::create_pool { .. } | + pallet_asset_conversion::Call::add_liquidity { .. } | + pallet_asset_conversion::Call::remove_liquidity { .. } | + pallet_asset_conversion::Call::swap_tokens_for_exact_tokens { .. } | + pallet_asset_conversion::Call::swap_exact_tokens_for_tokens { .. }, ) | RuntimeCall::NftFractionalization( - pallet_nft_fractionalization::Call::fractionalize { .. } - | pallet_nft_fractionalization::Call::unify { .. }, + pallet_nft_fractionalization::Call::fractionalize { .. } | + pallet_nft_fractionalization::Call::unify { .. }, ) | RuntimeCall::Nfts( - pallet_nfts::Call::create { .. } - | pallet_nfts::Call::force_create { .. } - | pallet_nfts::Call::destroy { .. } - | pallet_nfts::Call::mint { .. } - | pallet_nfts::Call::force_mint { .. } - | pallet_nfts::Call::burn { .. } - | pallet_nfts::Call::transfer { .. } - | pallet_nfts::Call::lock_item_transfer { .. } - | pallet_nfts::Call::unlock_item_transfer { .. } - | pallet_nfts::Call::lock_collection { .. } - | pallet_nfts::Call::transfer_ownership { .. } - | pallet_nfts::Call::set_team { .. } - | pallet_nfts::Call::force_collection_owner { .. } - | pallet_nfts::Call::force_collection_config { .. } - | pallet_nfts::Call::approve_transfer { .. } - | pallet_nfts::Call::cancel_approval { .. } - | pallet_nfts::Call::clear_all_transfer_approvals { .. } - | pallet_nfts::Call::lock_item_properties { .. } - | pallet_nfts::Call::set_attribute { .. } - | pallet_nfts::Call::force_set_attribute { .. } - | pallet_nfts::Call::clear_attribute { .. } - | pallet_nfts::Call::approve_item_attributes { .. } - | pallet_nfts::Call::cancel_item_attributes_approval { .. } - | pallet_nfts::Call::set_metadata { .. } - | pallet_nfts::Call::clear_metadata { .. } - | pallet_nfts::Call::set_collection_metadata { .. } - | pallet_nfts::Call::clear_collection_metadata { .. } - | pallet_nfts::Call::set_accept_ownership { .. } - | pallet_nfts::Call::set_collection_max_supply { .. } - | pallet_nfts::Call::update_mint_settings { .. } - | pallet_nfts::Call::set_price { .. } - | pallet_nfts::Call::buy_item { .. } - | pallet_nfts::Call::pay_tips { .. } - | pallet_nfts::Call::create_swap { .. } - | pallet_nfts::Call::cancel_swap { .. } - | pallet_nfts::Call::claim_swap { .. }, + pallet_nfts::Call::create { .. } | + pallet_nfts::Call::force_create { .. } | + pallet_nfts::Call::destroy { .. } | + pallet_nfts::Call::mint { .. } | + pallet_nfts::Call::force_mint { .. } | + pallet_nfts::Call::burn { .. } | + pallet_nfts::Call::transfer { .. } | + pallet_nfts::Call::lock_item_transfer { .. } | + pallet_nfts::Call::unlock_item_transfer { .. } | + pallet_nfts::Call::lock_collection { .. } | + pallet_nfts::Call::transfer_ownership { .. } | + pallet_nfts::Call::set_team { .. } | + pallet_nfts::Call::force_collection_owner { .. } | + pallet_nfts::Call::force_collection_config { .. } | + pallet_nfts::Call::approve_transfer { .. } | + pallet_nfts::Call::cancel_approval { .. } | + pallet_nfts::Call::clear_all_transfer_approvals { .. } | + pallet_nfts::Call::lock_item_properties { .. } | + pallet_nfts::Call::set_attribute { .. } | + pallet_nfts::Call::force_set_attribute { .. } | + pallet_nfts::Call::clear_attribute { .. } | + pallet_nfts::Call::approve_item_attributes { .. } | + pallet_nfts::Call::cancel_item_attributes_approval { .. } | + pallet_nfts::Call::set_metadata { .. } | + pallet_nfts::Call::clear_metadata { .. } | + pallet_nfts::Call::set_collection_metadata { .. } | + pallet_nfts::Call::clear_collection_metadata { .. } | + pallet_nfts::Call::set_accept_ownership { .. } | + pallet_nfts::Call::set_collection_max_supply { .. } | + pallet_nfts::Call::update_mint_settings { .. } | + pallet_nfts::Call::set_price { .. } | + pallet_nfts::Call::buy_item { .. } | + pallet_nfts::Call::pay_tips { .. } | + pallet_nfts::Call::create_swap { .. } | + pallet_nfts::Call::cancel_swap { .. } | + pallet_nfts::Call::claim_swap { .. }, ) | RuntimeCall::Uniques( - pallet_uniques::Call::create { .. } - | pallet_uniques::Call::force_create { .. } - | pallet_uniques::Call::destroy { .. } - | pallet_uniques::Call::mint { .. } - | pallet_uniques::Call::burn { .. } - | pallet_uniques::Call::transfer { .. } - | pallet_uniques::Call::freeze { .. } - | pallet_uniques::Call::thaw { .. } - | pallet_uniques::Call::freeze_collection { .. } - | pallet_uniques::Call::thaw_collection { .. } - | pallet_uniques::Call::transfer_ownership { .. } - | pallet_uniques::Call::set_team { .. } - | pallet_uniques::Call::approve_transfer { .. } - | pallet_uniques::Call::cancel_approval { .. } - | pallet_uniques::Call::force_item_status { .. } - | pallet_uniques::Call::set_attribute { .. } - | pallet_uniques::Call::clear_attribute { .. } - | pallet_uniques::Call::set_metadata { .. } - | pallet_uniques::Call::clear_metadata { .. } - | pallet_uniques::Call::set_collection_metadata { .. } - | pallet_uniques::Call::clear_collection_metadata { .. } - | pallet_uniques::Call::set_accept_ownership { .. } - | pallet_uniques::Call::set_collection_max_supply { .. } - | pallet_uniques::Call::set_price { .. } - | pallet_uniques::Call::buy_item { .. } + pallet_uniques::Call::create { .. } | + pallet_uniques::Call::force_create { .. } | + pallet_uniques::Call::destroy { .. } | + pallet_uniques::Call::mint { .. } | + pallet_uniques::Call::burn { .. } | + pallet_uniques::Call::transfer { .. } | + pallet_uniques::Call::freeze { .. } | + pallet_uniques::Call::thaw { .. } | + pallet_uniques::Call::freeze_collection { .. } | + pallet_uniques::Call::thaw_collection { .. } | + pallet_uniques::Call::transfer_ownership { .. } | + pallet_uniques::Call::set_team { .. } | + pallet_uniques::Call::approve_transfer { .. } | + pallet_uniques::Call::cancel_approval { .. } | + pallet_uniques::Call::force_item_status { .. } | + pallet_uniques::Call::set_attribute { .. } | + pallet_uniques::Call::clear_attribute { .. } | + pallet_uniques::Call::set_metadata { .. } | + pallet_uniques::Call::clear_metadata { .. } | + pallet_uniques::Call::set_collection_metadata { .. } | + pallet_uniques::Call::clear_collection_metadata { .. } | + pallet_uniques::Call::set_accept_ownership { .. } | + pallet_uniques::Call::set_collection_max_supply { .. } | + pallet_uniques::Call::set_price { .. } | + pallet_uniques::Call::buy_item { .. } ) ) } diff --git a/cumulus/primitives/pov-reclaim/Cargo.toml b/cumulus/primitives/pov-reclaim/Cargo.toml index 8e4fc701443e..2db48d8b9aae 100644 --- a/cumulus/primitives/pov-reclaim/Cargo.toml +++ b/cumulus/primitives/pov-reclaim/Cargo.toml @@ -1,15 +1,14 @@ [package] name = "cumulus-primitives-reclaim" -version = "0.0.1" -authors = [ "Sebastian Kunert " ] +version = "0.1.0" +authors.workspace = true +edition.workspace = true [dependencies] sp-runtime-interface = { path = "../../../substrate/primitives/runtime-interface", default-features = false } -tracing = { version = "0.1.37", default-features = false } [features] default = ["std"] std = [ "sp-runtime-interface/std", - "tracing/std" ] diff --git a/cumulus/primitives/pov-reclaim/src/lib.rs b/cumulus/primitives/pov-reclaim/src/lib.rs index 212dfed1c2be..d48ee703a6af 100644 --- a/cumulus/primitives/pov-reclaim/src/lib.rs +++ b/cumulus/primitives/pov-reclaim/src/lib.rs @@ -16,8 +16,6 @@ #![cfg_attr(not(feature = "std"), no_std)] -extern crate sp_runtime_interface; - use sp_runtime_interface::runtime_interface; #[runtime_interface] diff --git a/cumulus/test/service/src/lib.rs b/cumulus/test/service/src/lib.rs index 16866eeb1dcd..eed9575f3041 100644 --- a/cumulus/test/service/src/lib.rs +++ b/cumulus/test/service/src/lib.rs @@ -269,20 +269,18 @@ async fn build_relay_chain_interface( None, ) .map_err(|e| RelayChainError::Application(Box::new(e) as Box<_>))?, - cumulus_client_cli::RelayChainMode::ExternalRpc(rpc_target_urls) => { + cumulus_client_cli::RelayChainMode::ExternalRpc(rpc_target_urls) => return build_minimal_relay_chain_node_with_rpc( relay_chain_config, task_manager, rpc_target_urls, ) .await - .map(|r| r.0) - }, - cumulus_client_cli::RelayChainMode::LightClient => { + .map(|r| r.0), + cumulus_client_cli::RelayChainMode::LightClient => return build_minimal_relay_chain_node_light_client(relay_chain_config, task_manager) .await - .map(|r| r.0) - }, + .map(|r| r.0), }; task_manager.add_child(relay_chain_full_node.task_manager); diff --git a/substrate/bin/node/cli/src/service.rs b/substrate/bin/node/cli/src/service.rs index 56b056687670..ad0c69b2cd01 100644 --- a/substrate/bin/node/cli/src/service.rs +++ b/substrate/bin/node/cli/src/service.rs @@ -764,7 +764,7 @@ mod tests { sc_consensus_babe::authorship::claim_slot(slot.into(), &epoch, &keystore) .map(|(digest, _)| digest) { - break (babe_pre_digest, epoch_descriptor); + break (babe_pre_digest, epoch_descriptor) } slot += 1; diff --git a/substrate/bin/node/testing/src/bench.rs b/substrate/bin/node/testing/src/bench.rs index 915e1e3cd4b2..f1ab2212239b 100644 --- a/substrate/bin/node/testing/src/bench.rs +++ b/substrate/bin/node/testing/src/bench.rs @@ -91,7 +91,7 @@ pub fn drop_system_cache() { target: "bench-logistics", "Clearing system cache on windows is not supported. Benchmark might totally be wrong.", ); - return; + return } std::process::Command::new("sync") @@ -283,7 +283,7 @@ impl<'a> Iterator for BlockContentIterator<'a> { fn next(&mut self) -> Option { if self.content.size.map(|size| size <= self.iteration).unwrap_or(false) { - return None; + return None } let sender = self.keyring.at(self.iteration); @@ -299,24 +299,22 @@ impl<'a> Iterator for BlockContentIterator<'a> { signed_extra(0, kitchensink_runtime::ExistentialDeposit::get() + 1), )), function: match self.content.block_type { - BlockType::RandomTransfersKeepAlive => { + BlockType::RandomTransfersKeepAlive => RuntimeCall::Balances(BalancesCall::transfer_keep_alive { dest: sp_runtime::MultiAddress::Id(receiver), value: kitchensink_runtime::ExistentialDeposit::get() + 1, - }) - }, + }), BlockType::RandomTransfersReaping => { RuntimeCall::Balances(BalancesCall::transfer_allow_death { dest: sp_runtime::MultiAddress::Id(receiver), // Transfer so that ending balance would be 1 less than existential // deposit so that we kill the sender account. - value: 100 * DOLLARS - - (kitchensink_runtime::ExistentialDeposit::get() - 1), + value: 100 * DOLLARS - + (kitchensink_runtime::ExistentialDeposit::get() - 1), }) }, - BlockType::Noop => { - RuntimeCall::System(SystemCall::remark { remark: Vec::new() }) - }, + BlockType::Noop => + RuntimeCall::System(SystemCall::remark { remark: Vec::new() }), }, }, self.runtime_version.spec_version, diff --git a/substrate/client/service/src/builder.rs b/substrate/client/service/src/builder.rs index 167b2dff6753..85cbc3e421b0 100644 --- a/substrate/client/service/src/builder.rs +++ b/substrate/client/service/src/builder.rs @@ -97,9 +97,8 @@ impl KeystoreContainer { /// Construct KeystoreContainer pub fn new(config: &KeystoreConfig) -> Result { let keystore = Arc::new(match config { - KeystoreConfig::Path { path, password } => { - LocalKeystore::open(path.clone(), password.clone())? - }, + KeystoreConfig::Path { path, password } => + LocalKeystore::open(path.clone(), password.clone())?, KeystoreConfig::InMemory => LocalKeystore::in_memory(), }); @@ -760,14 +759,13 @@ where } = params; if warp_sync_params.is_none() && config.network.sync_mode.is_warp() { - return Err("Warp sync enabled, but no warp sync provider configured.".into()); + return Err("Warp sync enabled, but no warp sync provider configured.".into()) } if client.requires_full_sync() { match config.network.sync_mode { - SyncMode::LightState { .. } => { - return Err("Fast sync doesn't work for archive nodes".into()) - }, + SyncMode::LightState { .. } => + return Err("Fast sync doesn't work for archive nodes".into()), SyncMode::Warp => return Err("Warp sync doesn't work for archive nodes".into()), SyncMode::Full => {}, } @@ -787,8 +785,8 @@ where &protocol_id, config.chain_spec.fork_id(), client.clone(), - net_config.network_config.default_peers_set.in_peers as usize - + net_config.network_config.default_peers_set.out_peers as usize, + net_config.network_config.default_peers_set.in_peers as usize + + net_config.network_config.default_peers_set.out_peers as usize, ); let config_name = protocol_config.name.clone(); spawn_handle.spawn("block-request-handler", Some("networking"), handler.run()); @@ -796,8 +794,8 @@ where }; let (state_request_protocol_config, state_request_protocol_name) = { - let num_peer_hint = net_config.network_config.default_peers_set_num_full as usize - + net_config.network_config.default_peers_set.reserved_nodes.len(); + let num_peer_hint = net_config.network_config.default_peers_set_num_full as usize + + net_config.network_config.default_peers_set.reserved_nodes.len(); // Allow both outgoing and incoming requests. let (handler, protocol_config) = StateRequestHandler::new( &protocol_id, @@ -990,7 +988,7 @@ where ); // This `return` might seem unnecessary, but we don't want to make it look like // everything is working as normal even though the user is clearly misusing the API. - return; + return } future.await diff --git a/substrate/client/service/src/client/client.rs b/substrate/client/service/src/client/client.rs index 8b87c3f5a859..fed7811744a8 100644 --- a/substrate/client/service/src/client/client.rs +++ b/substrate/client/service/src/client/client.rs @@ -429,7 +429,7 @@ where backend.unpin_block(message); } else { log::debug!("Terminating unpin-worker, backend reference was dropped."); - return + return; } } log::debug!("Terminating unpin-worker, stream terminated.") @@ -515,7 +515,7 @@ where } = import_block; if !intermediates.is_empty() { - return Err(Error::IncompletePipeline) + return Err(Error::IncompletePipeline); } let fork_choice = fork_choice.ok_or(Error::IncompletePipeline)?; @@ -610,19 +610,20 @@ where // the block is lower than our last finalized block so it must revert // finality, refusing import. - if status == blockchain::BlockStatus::Unknown && - *import_headers.post().number() <= info.finalized_number && - !gap_block + if status == blockchain::BlockStatus::Unknown + && *import_headers.post().number() <= info.finalized_number + && !gap_block { - return Err(sp_blockchain::Error::NotInFinalizedChain) + return Err(sp_blockchain::Error::NotInFinalizedChain); } // this is a fairly arbitrary choice of where to draw the line on making notifications, // but the general goal is to only make notifications when we are already fully synced // and get a new chain head. let make_notifications = match origin { - BlockOrigin::NetworkBroadcast | BlockOrigin::Own | BlockOrigin::ConsensusBroadcast => - true, + BlockOrigin::NetworkBroadcast | BlockOrigin::Own | BlockOrigin::ConsensusBroadcast => { + true + }, BlockOrigin::Genesis | BlockOrigin::NetworkInitialSync | BlockOrigin::File => false, }; @@ -656,12 +657,14 @@ where let storage_key = PrefixedStorageKey::new_ref(&parent_storage); let storage_key = match ChildType::from_prefixed_key(storage_key) { - Some((ChildType::ParentKeyId, storage_key)) => - storage_key, - None => + Some((ChildType::ParentKeyId, storage_key)) => { + storage_key + }, + None => { return Err(Error::Backend( "Invalid child storage key.".to_string(), - )), + )) + }, }; let entry = storage .children_default @@ -686,7 +689,7 @@ where // State root mismatch when importing state. This should not happen in // safe fast sync mode, but may happen in unsafe mode. warn!("Error importing state: State root mismatch."); - return Err(Error::InvalidStateRoot) + return Err(Error::InvalidStateRoot); } None }, @@ -709,11 +712,12 @@ where )?; } - let is_new_best = !gap_block && - (finalized || - match fork_choice { - ForkChoiceStrategy::LongestChain => - import_headers.post().number() > &info.best_number, + let is_new_best = !gap_block + && (finalized + || match fork_choice { + ForkChoiceStrategy::LongestChain => { + import_headers.post().number() > &info.best_number + }, ForkChoiceStrategy::Custom(v) => v, }); @@ -837,18 +841,21 @@ where let state_action = std::mem::replace(&mut import_block.state_action, StateAction::Skip); let (enact_state, storage_changes) = match (self.block_status(*parent_hash)?, state_action) { - (BlockStatus::KnownBad, _) => - return Ok(PrepareStorageChangesResult::Discard(ImportResult::KnownBad)), + (BlockStatus::KnownBad, _) => { + return Ok(PrepareStorageChangesResult::Discard(ImportResult::KnownBad)) + }, ( BlockStatus::InChainPruned, StateAction::ApplyChanges(sc_consensus::StorageChanges::Changes(_)), ) => return Ok(PrepareStorageChangesResult::Discard(ImportResult::MissingState)), (_, StateAction::ApplyChanges(changes)) => (true, Some(changes)), - (BlockStatus::Unknown, _) => - return Ok(PrepareStorageChangesResult::Discard(ImportResult::UnknownParent)), + (BlockStatus::Unknown, _) => { + return Ok(PrepareStorageChangesResult::Discard(ImportResult::UnknownParent)) + }, (_, StateAction::Skip) => (false, None), - (BlockStatus::InChainPruned, StateAction::Execute) => - return Ok(PrepareStorageChangesResult::Discard(ImportResult::MissingState)), + (BlockStatus::InChainPruned, StateAction::Execute) => { + return Ok(PrepareStorageChangesResult::Discard(ImportResult::MissingState)) + }, (BlockStatus::InChainPruned, StateAction::ExecuteIfPossible) => (false, None), (_, StateAction::Execute) => (true, None), (_, StateAction::ExecuteIfPossible) => (true, None), @@ -866,7 +873,6 @@ where runtime_api.set_call_context(CallContext::Onchain); if self.config.enable_import_proof_recording { - log::info!(target:"skunert", "Block import with proof recording."); runtime_api.record_proof(); } @@ -882,7 +888,7 @@ where if import_block.header.state_root() != &gen_storage_changes.transaction_storage_root { - return Err(Error::InvalidStateRoot) + return Err(Error::InvalidStateRoot); } Some(sc_consensus::StorageChanges::Changes(gen_storage_changes)) }, @@ -908,7 +914,7 @@ where "Possible safety violation: attempted to re-finalize last finalized block {:?} ", hash, ); - return Ok(()) + return Ok(()); } // Find tree route from last finalized to given block. @@ -922,7 +928,7 @@ where retracted, info.finalized_hash ); - return Err(sp_blockchain::Error::NotInFinalizedChain) + return Err(sp_blockchain::Error::NotInFinalizedChain); } // We may need to coercively update the best block if there is more than one @@ -1002,7 +1008,7 @@ where // since we won't be running the loop below which // would also remove any closed sinks. sinks.retain(|sink| !sink.is_closed()); - return Ok(()) + return Ok(()); }, }; @@ -1038,7 +1044,7 @@ where self.every_import_notification_sinks.lock().retain(|sink| !sink.is_closed()); - return Ok(()) + return Ok(()); }, }; @@ -1137,17 +1143,18 @@ where .as_ref() .map_or(false, |importing| &hash == importing) { - return Ok(BlockStatus::Queued) + return Ok(BlockStatus::Queued); } let hash_and_number = self.backend.blockchain().number(hash)?.map(|n| (hash, n)); match hash_and_number { - Some((hash, number)) => + Some((hash, number)) => { if self.backend.have_state_at(hash, number) { Ok(BlockStatus::InChainWithState) } else { Ok(BlockStatus::InChainPruned) - }, + } + }, None => Ok(BlockStatus::Unknown), } } @@ -1183,7 +1190,7 @@ where let genesis_hash = self.backend.blockchain().info().genesis_hash; if genesis_hash == target_hash { - return Ok(Vec::new()) + return Ok(Vec::new()); } let mut current_hash = target_hash; @@ -1199,7 +1206,7 @@ where current_hash = ancestor_hash; if genesis_hash == current_hash { - break + break; } current = ancestor; @@ -1284,14 +1291,15 @@ where size_limit: usize, ) -> sp_blockchain::Result> { if start_key.len() > MAX_NESTED_TRIE_DEPTH { - return Err(Error::Backend("Invalid start key.".to_string())) + return Err(Error::Backend("Invalid start key.".to_string())); } let state = self.state_at(hash)?; let child_info = |storage_key: &Vec| -> sp_blockchain::Result { let storage_key = PrefixedStorageKey::new_ref(storage_key); match ChildType::from_prefixed_key(storage_key) { - Some((ChildType::ParentKeyId, storage_key)) => - Ok(ChildInfo::new_default(storage_key)), + Some((ChildType::ParentKeyId, storage_key)) => { + Ok(ChildInfo::new_default(storage_key)) + }, None => Err(Error::Backend("Invalid child storage key.".to_string())), } }; @@ -1303,7 +1311,7 @@ where { Some((child_info(start_key)?, child_root)) } else { - return Err(Error::Backend("Invalid root start key.".to_string())) + return Err(Error::Backend("Invalid root start key.".to_string())); } } else { None @@ -1347,18 +1355,18 @@ where let size = value.len() + next_key.len(); if total_size + size > size_limit && !entries.is_empty() { complete = false; - break + break; } total_size += size; - if current_child.is_none() && - sp_core::storage::well_known_keys::is_child_storage_key(next_key.as_slice()) && - !child_roots.contains(value.as_slice()) + if current_child.is_none() + && sp_core::storage::well_known_keys::is_child_storage_key(next_key.as_slice()) + && !child_roots.contains(value.as_slice()) { child_roots.insert(value.clone()); switch_child_key = Some((next_key.clone(), value.clone())); entries.push((next_key.clone(), value)); - break + break; } entries.push((next_key.clone(), value)); current_key = next_key; @@ -1378,12 +1386,12 @@ where complete, )); if !complete { - break + break; } } else { result[0].0.key_values.extend(entries.into_iter()); result[0].1 = complete; - break + break; } } Ok(result) @@ -1808,7 +1816,7 @@ where match self.block_rules.lookup(number, &hash) { BlockLookupResult::KnownBad => { trace!("Rejecting known bad block: #{} {:?}", number, hash); - return Ok(ImportResult::KnownBad) + return Ok(ImportResult::KnownBad); }, BlockLookupResult::Expected(expected_hash) => { trace!( @@ -1817,7 +1825,7 @@ where expected_hash, number ); - return Ok(ImportResult::KnownBad) + return Ok(ImportResult::KnownBad); }, BlockLookupResult::NotSpecial => {}, } @@ -1828,10 +1836,12 @@ where .block_status(hash) .map_err(|e| ConsensusError::ClientImport(e.to_string()))? { - BlockStatus::InChainWithState | BlockStatus::Queued => - return Ok(ImportResult::AlreadyInChain), - BlockStatus::InChainPruned if !import_existing => - return Ok(ImportResult::AlreadyInChain), + BlockStatus::InChainWithState | BlockStatus::Queued => { + return Ok(ImportResult::AlreadyInChain) + }, + BlockStatus::InChainPruned if !import_existing => { + return Ok(ImportResult::AlreadyInChain) + }, BlockStatus::InChainPruned => {}, BlockStatus::Unknown => {}, BlockStatus::KnownBad => return Ok(ImportResult::KnownBad), @@ -1997,8 +2007,9 @@ where fn block(&self, hash: Block::Hash) -> sp_blockchain::Result>> { Ok(match (self.header(hash)?, self.body(hash)?, self.justifications(hash)?) { - (Some(header), Some(extrinsics), justifications) => - Some(SignedBlock { block: Block::new(header, extrinsics), justifications }), + (Some(header), Some(extrinsics), justifications) => { + Some(SignedBlock { block: Block::new(header, extrinsics), justifications }) + }, _ => None, }) } diff --git a/substrate/client/service/src/client/wasm_override.rs b/substrate/client/service/src/client/wasm_override.rs index f83bc820fd20..725c8ab9429a 100644 --- a/substrate/client/service/src/client/wasm_override.rs +++ b/substrate/client/service/src/client/wasm_override.rs @@ -178,7 +178,7 @@ impl WasmOverride { }; if !dir.is_dir() { - return Err(WasmOverrideError::NotADirectory(dir.to_owned()).into()); + return Err(WasmOverrideError::NotADirectory(dir.to_owned()).into()) } let mut overrides = HashMap::new(); @@ -213,7 +213,7 @@ impl WasmOverride { } if !duplicates.is_empty() { - return Err(WasmOverrideError::DuplicateRuntime(duplicates).into()); + return Err(WasmOverrideError::DuplicateRuntime(duplicates).into()) } Ok(overrides) diff --git a/substrate/client/service/src/lib.rs b/substrate/client/service/src/lib.rs index 8c45ad254b39..ee893fc20ac2 100644 --- a/substrate/client/service/src/lib.rs +++ b/substrate/client/service/src/lib.rs @@ -238,7 +238,7 @@ pub async fn build_system_rpc_future< // Answer incoming RPC requests. let Some(req) = rpc_rx.next().await else { debug!("RPC requests stream has terminated, shutting down the system RPC future."); - return; + return }; match req { @@ -287,7 +287,7 @@ pub async fn build_system_rpc_future< let _ = sender.send(network_state); } } else { - break; + break } }, sc_rpc::system::Request::NetworkAddReservedPeer(peer_addr, sender) => { @@ -316,7 +316,7 @@ pub async fn build_system_rpc_future< reserved_peers.iter().map(|peer_id| peer_id.to_base58()).collect(); let _ = sender.send(reserved_peers); } else { - break; + break } }, sc_rpc::system::Request::NodeRoles(sender) => { @@ -478,7 +478,7 @@ where Ok(uxt) => uxt, Err(e) => { debug!("Transaction invalid: {:?}", e); - return Box::pin(futures::future::ready(TransactionImport::Bad)); + return Box::pin(futures::future::ready(TransactionImport::Bad)) }, }; @@ -493,9 +493,8 @@ where match import_future.await { Ok(_) => TransactionImport::NewGood, Err(e) => match e.into_pool_error() { - Ok(sc_transaction_pool_api::error::Error::AlreadyImported(_)) => { - TransactionImport::KnownGood - }, + Ok(sc_transaction_pool_api::error::Error::AlreadyImported(_)) => + TransactionImport::KnownGood, Ok(e) => { debug!("Error adding transaction to the pool: {:?}", e); TransactionImport::Bad diff --git a/substrate/primitives/state-machine/src/trie_backend_essence.rs b/substrate/primitives/state-machine/src/trie_backend_essence.rs index d96d33f1853d..7e155f9a65b3 100644 --- a/substrate/primitives/state-machine/src/trie_backend_essence.rs +++ b/substrate/primitives/state-machine/src/trie_backend_essence.rs @@ -125,8 +125,8 @@ where }, Some(Err(error)) => { self.state = IterState::FinishedIncomplete; - if matches!(*error, TrieError::IncompleteDatabase(_)) - && self.stop_on_incomplete_database + if matches!(*error, TrieError::IncompleteDatabase(_)) && + self.stop_on_incomplete_database { None } else { @@ -403,7 +403,7 @@ where #[cfg(feature = "std")] { if let Some(result) = self.cache.read().child_root.get(child_info.storage_key()) { - return Ok(*result); + return Ok(*result) } } @@ -559,7 +559,7 @@ where if self.root == Default::default() { // A special-case for an empty storage root. - return Ok(Default::default()); + return Ok(Default::default()) } let trie_iter = self @@ -644,7 +644,7 @@ where self.with_recorder_and_cache_for_storage_root(Some(child_root), |recorder, cache| { let mut eph = Ephemeral::new(self.backend_storage(), &mut write_overlay); match match state_version { - StateVersion::V0 => { + StateVersion::V0 => child_delta_trie_root::, _, _, _, _, _, _>( child_info.keyspace(), &mut eph, @@ -652,9 +652,8 @@ where delta, recorder, cache, - ) - }, - StateVersion::V1 => { + ), + StateVersion::V1 => child_delta_trie_root::, _, _, _, _, _, _>( child_info.keyspace(), &mut eph, @@ -662,8 +661,7 @@ where delta, recorder, cache, - ) - }, + ), } { Ok(ret) => (Some(ret), ret), Err(e) => { @@ -795,7 +793,7 @@ impl< { fn get(&self, key: &H::Out, prefix: Prefix) -> Option { if *key == self.empty { - return Some([0u8].to_vec()); + return Some([0u8].to_vec()) } match self.storage.get(key, prefix) { Ok(x) => x, diff --git a/substrate/test-utils/client/src/lib.rs b/substrate/test-utils/client/src/lib.rs index 192580c8447d..90e15e0f8d53 100644 --- a/substrate/test-utils/client/src/lib.rs +++ b/substrate/test-utils/client/src/lib.rs @@ -351,7 +351,7 @@ pub(crate) fn parse_rpc_result( if let Some(error) = error { return Err(serde_json::from_value(error.clone()) - .expect("the JSONRPC result's error is always valid; qed")); + .expect("the JSONRPC result's error is always valid; qed")) } Ok(RpcTransactionOutput { result, receiver }) @@ -385,7 +385,7 @@ where if notification.is_new_best { blocks.insert(*notification.header.number()); if blocks.len() == count { - break; + break } } } From 783e0f308efd7e3adc6b38a33476a3fcccca7cc3 Mon Sep 17 00:00:00 2001 From: Sebastian Kunert Date: Thu, 7 Sep 2023 19:03:08 +0200 Subject: [PATCH 17/61] Add some tests for recorder --- Cargo.lock | 2 + cumulus/pallets/parachain-system/Cargo.toml | 3 + .../src/validate_block/mod.rs | 1 - .../src/validate_block/tests.rs | 75 ++++++++++++++++++- .../src/validate_block/trie_recorder.rs | 14 +--- .../parachain-template/node/src/service.rs | 5 +- cumulus/test/client/src/lib.rs | 17 +++-- 7 files changed, 96 insertions(+), 21 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b3bb6a623bfd..d4dc84709f03 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3553,6 +3553,7 @@ dependencies = [ "log", "parity-scale-codec", "polkadot-parachain-primitives", + "rand 0.8.5", "sc-client-api", "scale-info", "sp-core", @@ -3568,6 +3569,7 @@ dependencies = [ "sp-version", "staging-xcm", "trie-db", + "trie-standardmap", ] [[package]] diff --git a/cumulus/pallets/parachain-system/Cargo.toml b/cumulus/pallets/parachain-system/Cargo.toml index 689295437daa..48e2209ff291 100644 --- a/cumulus/pallets/parachain-system/Cargo.toml +++ b/cumulus/pallets/parachain-system/Cargo.toml @@ -41,6 +41,8 @@ cumulus-primitives-reclaim = { path = "../../primitives/pov-reclaim", default-fe assert_matches = "1.5" hex-literal = "0.4.1" lazy_static = "1.4" +trie-standardmap = "0.16.0" +rand = "0.8.5" # Substrate sc-client-api = { path = "../../../substrate/client/api" } @@ -52,6 +54,7 @@ sp-version = { path = "../../../substrate/primitives/version" } cumulus-test-client = { path = "../../test/client" } cumulus-test-relay-sproof-builder = { path = "../../test/relay-sproof-builder" } + [features] default = [ "std" ] std = [ diff --git a/cumulus/pallets/parachain-system/src/validate_block/mod.rs b/cumulus/pallets/parachain-system/src/validate_block/mod.rs index 9815f8023915..b159481079aa 100644 --- a/cumulus/pallets/parachain-system/src/validate_block/mod.rs +++ b/cumulus/pallets/parachain-system/src/validate_block/mod.rs @@ -26,7 +26,6 @@ mod tests; #[doc(hidden)] mod trie_cache; -#[cfg(not(feature = "std"))] #[doc(hidden)] mod trie_recorder; diff --git a/cumulus/pallets/parachain-system/src/validate_block/tests.rs b/cumulus/pallets/parachain-system/src/validate_block/tests.rs index 0cf68f25cc34..15fd67d5431e 100644 --- a/cumulus/pallets/parachain-system/src/validate_block/tests.rs +++ b/cumulus/pallets/parachain-system/src/validate_block/tests.rs @@ -26,9 +26,12 @@ use cumulus_test_client::{ }; use cumulus_test_relay_sproof_builder::RelayStateSproofBuilder; use sp_keyring::AccountKeyring::*; -use sp_runtime::traits::Header as HeaderT; +use sp_runtime::traits::{HashingFor, Header as HeaderT}; +use sp_trie::TrieRecorderProvider; use std::{env, process::Command}; +use trie_standardmap::{Alphabet, StandardMap, ValueMode}; +use crate::validate_block::trie_recorder::RecorderProvider; use crate::validate_block::MemoryOptimizedValidationParams; fn call_validate_block_encoded_header( @@ -290,3 +293,73 @@ fn validation_params_and_memory_optimized_validation_params_encode_and_decode() let decoded = ValidationParams::decode_all(&mut &encoded[..]).unwrap(); assert_eq!(decoded, validation_params); } + +const TEST_DATA: &[(&[u8], &[u8])] = + &[(b"key1", &[1; 64]), (b"key2", &[2; 64]), (b"key3", &[3; 64]), (b"key4", &[4; 64])]; + +use trie_db::{Trie, TrieDBBuilder, TrieDBMutBuilder, TrieHash, TrieMut, TrieRecorder}; +type MemoryDB = sp_trie::MemoryDB; +type Layout = sp_trie::LayoutV1; +type Recorder = sp_trie::recorder::Recorder; + +fn create_trie() -> (MemoryDB, TrieHash, Vec<(Vec, Vec)>) { + let mut db = MemoryDB::default(); + let mut root = Default::default(); + + let mut seed = Default::default(); + let x = StandardMap { + alphabet: Alphabet::Low, + min_key: 5, + journal_key: 0, + value_mode: ValueMode::Random, + count: 1000, + } + .make_with(&mut seed); + + { + let mut trie = TrieDBMutBuilder::::new(&mut db, &mut root).build(); + for (k, v) in x.iter() { + trie.insert(k, v).expect("Inserts data"); + } + } + + (db, root, x) +} +use rand::Rng; +#[test] +fn recorder_does_its_thing() { + let (db, root, values) = create_trie(); + + let mut rng = rand::thread_rng(); + for _ in 1..100 { + let reference_recorder = Recorder::default(); + let recorder_under_test: RecorderProvider = RecorderProvider::new(); + { + let mut reference_trie_recorder = reference_recorder.as_trie_recorder(root); + let reference_trie = TrieDBBuilder::::new(&db, &root) + .with_recorder(&mut reference_trie_recorder) + .build(); + + let mut trie_recorder_under_test = recorder_under_test.as_trie_recorder(root); + let test_trie = TrieDBBuilder::::new(&db, &root) + .with_recorder(&mut trie_recorder_under_test) + .build(); + + for _ in 0..100 { + let index: usize = rng.gen_range(0..values.len()); + test_trie.get(&values[index].0).unwrap().unwrap(); + reference_trie.get(&values[index].0).unwrap().unwrap(); + } + + for _ in 0..100 { + let index: usize = rng.gen_range(0..values.len()); + test_trie.get_hash(&values[index].0); + reference_trie.get_hash(&values[index].0); + } + } + assert_eq!( + reference_recorder.estimate_encoded_size(), + recorder_under_test.estimate_encoded_size() + ); + } +} diff --git a/cumulus/pallets/parachain-system/src/validate_block/trie_recorder.rs b/cumulus/pallets/parachain-system/src/validate_block/trie_recorder.rs index e618bc0f857b..aeeb0c117e00 100644 --- a/cumulus/pallets/parachain-system/src/validate_block/trie_recorder.rs +++ b/cumulus/pallets/parachain-system/src/validate_block/trie_recorder.rs @@ -15,25 +15,17 @@ // along with Cumulus. If not, see . //! The actual implementation of the validate block functionality. - -use super::trie_cache; +#![cfg_attr(not(feature = "std"), no_std)] use codec::Encode; -use core::borrow::BorrowMut; -use frame_support::traits::Get; -use sp_externalities::{set_and_run_with_externalities, Externalities}; -use sp_io::KillStorageResult; -use sp_runtime::traits::{Block as BlockT, Extrinsic, HashingFor, Header as HeaderT}; use sp_std::{ - boxed::Box, cell::{RefCell, RefMut}, collections::btree_map::BTreeMap, collections::btree_set::BTreeSet, - prelude::*, sync::Arc, }; -use sp_trie::{MemoryDB, NodeCodec, StorageProof}; +use sp_trie::{NodeCodec, StorageProof}; use trie_db::{Hasher, RecordedForKey, TrieAccess}; /// A trie recorder that only keeps track of the proof size. @@ -117,7 +109,7 @@ impl sp_trie::TrieRecorderProvider for RecorderProvider Self::Recorder<'_> { + fn as_trie_recorder(&self, _storage_root: H::Out) -> Self::Recorder<'_> { SizeRecorder { encoded_size: self.encoded_size.borrow_mut(), seen_nodes: self.seen_nodes.borrow_mut(), diff --git a/cumulus/parachain-template/node/src/service.rs b/cumulus/parachain-template/node/src/service.rs index bb7daff8d943..3054b4b7ca3c 100644 --- a/cumulus/parachain-template/node/src/service.rs +++ b/cumulus/parachain-template/node/src/service.rs @@ -40,7 +40,10 @@ use substrate_prometheus_endpoint::Registry; pub struct ParachainNativeExecutor; impl sc_executor::NativeExecutionDispatch for ParachainNativeExecutor { - type ExtendHostFunctions = frame_benchmarking::benchmarking::HostFunctions; + type ExtendHostFunctions = ( + cumulus_primitives_reclaim::pov_reclaim_host_functions::HostFunctions, + frame_benchmarking::benchmarking::HostFunctions, + ); fn dispatch(method: &str, data: &[u8]) -> Option> { parachain_template_runtime::api::dispatch(method, data) diff --git a/cumulus/test/client/src/lib.rs b/cumulus/test/client/src/lib.rs index 1791f8ca3ef3..309b57059dbb 100644 --- a/cumulus/test/client/src/lib.rs +++ b/cumulus/test/client/src/lib.rs @@ -211,13 +211,16 @@ pub fn validate_block( let mut ext_ext = ext.ext(); let heap_pages = HeapAllocStrategy::Static { extra_pages: 1024 }; - let executor = WasmExecutor::::builder() - .with_execution_method(WasmExecutionMethod::default()) - .with_max_runtime_instances(1) - .with_runtime_cache_size(2) - .with_onchain_heap_alloc_strategy(heap_pages) - .with_offchain_heap_alloc_strategy(heap_pages) - .build(); + let executor = WasmExecutor::<( + sp_io::SubstrateHostFunctions, + cumulus_primitives_reclaim::pov_reclaim_host_functions::HostFunctions, + )>::builder() + .with_execution_method(WasmExecutionMethod::default()) + .with_max_runtime_instances(1) + .with_runtime_cache_size(2) + .with_onchain_heap_alloc_strategy(heap_pages) + .with_offchain_heap_alloc_strategy(heap_pages) + .build(); executor .uncached_call( From 8726f7881f744648ff2ae5c928a5ca9af703bed9 Mon Sep 17 00:00:00 2001 From: Sebastian Kunert Date: Fri, 8 Sep 2023 10:09:42 +0200 Subject: [PATCH 18/61] Add cache to test --- Cargo.lock | 1 + cumulus/pallets/parachain-system/Cargo.toml | 1 + .../src/validate_block/implementation.rs | 1 + .../src/validate_block/tests.rs | 31 +++++++++++++------ .../src/validate_block/trie_recorder.rs | 13 ++++---- 5 files changed, 32 insertions(+), 15 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d4dc84709f03..04c36109a0c2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3556,6 +3556,7 @@ dependencies = [ "rand 0.8.5", "sc-client-api", "scale-info", + "sp-api", "sp-core", "sp-externalities", "sp-inherents", diff --git a/cumulus/pallets/parachain-system/Cargo.toml b/cumulus/pallets/parachain-system/Cargo.toml index 48e2209ff291..d89d2b2d0679 100644 --- a/cumulus/pallets/parachain-system/Cargo.toml +++ b/cumulus/pallets/parachain-system/Cargo.toml @@ -20,6 +20,7 @@ frame-system = { path = "../../../substrate/frame/system", default-features = fa sp-core = { path = "../../../substrate/primitives/core", default-features = false} sp-externalities = { path = "../../../substrate/primitives/externalities", default-features = false} sp-inherents = { path = "../../../substrate/primitives/inherents", default-features = false} +sp-api = { path = "../../../substrate/primitives/api", default-features = false} sp-io = { path = "../../../substrate/primitives/io", default-features = false} sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false} sp-state-machine = { path = "../../../substrate/primitives/state-machine", default-features = false} diff --git a/cumulus/pallets/parachain-system/src/validate_block/implementation.rs b/cumulus/pallets/parachain-system/src/validate_block/implementation.rs index 7dd339b211f6..78df38947d40 100644 --- a/cumulus/pallets/parachain-system/src/validate_block/implementation.rs +++ b/cumulus/pallets/parachain-system/src/validate_block/implementation.rs @@ -93,6 +93,7 @@ where B::Extrinsic: ExtrinsicCall, ::Call: IsSubType>, { + sp_api::init_runtime_logger(); let block_data = codec::decode_from_bytes::>(block_data) .expect("Invalid parachain block data"); diff --git a/cumulus/pallets/parachain-system/src/validate_block/tests.rs b/cumulus/pallets/parachain-system/src/validate_block/tests.rs index 15fd67d5431e..d6b9cadb15db 100644 --- a/cumulus/pallets/parachain-system/src/validate_block/tests.rs +++ b/cumulus/pallets/parachain-system/src/validate_block/tests.rs @@ -31,8 +31,7 @@ use sp_trie::TrieRecorderProvider; use std::{env, process::Command}; use trie_standardmap::{Alphabet, StandardMap, ValueMode}; -use crate::validate_block::trie_recorder::RecorderProvider; -use crate::validate_block::MemoryOptimizedValidationParams; +use crate::validate_block::{trie_recorder::RecorderProvider, MemoryOptimizedValidationParams}; fn call_validate_block_encoded_header( parent_head: Header, @@ -326,32 +325,46 @@ fn create_trie() -> (MemoryDB, TrieHash, Vec<(Vec, Vec)>) { (db, root, x) } use rand::Rng; +use sp_trie::cache::{CacheSize, SharedTrieCache}; #[test] fn recorder_does_its_thing() { + sp_tracing::try_init_simple(); let (db, root, values) = create_trie(); let mut rng = rand::thread_rng(); - for _ in 1..100 { - let reference_recorder = Recorder::default(); - let recorder_under_test: RecorderProvider = RecorderProvider::new(); + for _ in 1..2 { + let mut reference_recorder = Recorder::default(); + let recorder_for_test: RecorderProvider = RecorderProvider::new(); + let reference_cache: SharedTrieCache = + SharedTrieCache::new(CacheSize::new(1024 * 5)); + let cache_for_test: SharedTrieCache = + SharedTrieCache::new(CacheSize::new(1024 * 5)); { + let mut local_cache = cache_for_test.local_cache(); + let mut trie_cache_for_reference = local_cache.as_trie_db_cache(root); let mut reference_trie_recorder = reference_recorder.as_trie_recorder(root); let reference_trie = TrieDBBuilder::::new(&db, &root) .with_recorder(&mut reference_trie_recorder) + .with_cache(&mut trie_cache_for_reference) .build(); - let mut trie_recorder_under_test = recorder_under_test.as_trie_recorder(root); + let mut local_cache_for_test = cache_for_test.local_cache(); + let mut trie_cache_for_test = local_cache_for_test.as_trie_db_cache(root); + let mut trie_recorder_under_test = recorder_for_test.as_trie_recorder(root); let test_trie = TrieDBBuilder::::new(&db, &root) .with_recorder(&mut trie_recorder_under_test) + .with_cache(&mut trie_cache_for_test) .build(); - for _ in 0..100 { + log::info!("just get"); + for _ in 0..10 { let index: usize = rng.gen_range(0..values.len()); test_trie.get(&values[index].0).unwrap().unwrap(); reference_trie.get(&values[index].0).unwrap().unwrap(); } - for _ in 0..100 { + log::info!("hash access"); + for _ in 0..10 { let index: usize = rng.gen_range(0..values.len()); test_trie.get_hash(&values[index].0); reference_trie.get_hash(&values[index].0); @@ -359,7 +372,7 @@ fn recorder_does_its_thing() { } assert_eq!( reference_recorder.estimate_encoded_size(), - recorder_under_test.estimate_encoded_size() + recorder_for_test.estimate_encoded_size() ); } } diff --git a/cumulus/pallets/parachain-system/src/validate_block/trie_recorder.rs b/cumulus/pallets/parachain-system/src/validate_block/trie_recorder.rs index aeeb0c117e00..28ceb9ea7a00 100644 --- a/cumulus/pallets/parachain-system/src/validate_block/trie_recorder.rs +++ b/cumulus/pallets/parachain-system/src/validate_block/trie_recorder.rs @@ -21,8 +21,7 @@ use codec::Encode; use sp_std::{ cell::{RefCell, RefMut}, - collections::btree_map::BTreeMap, - collections::btree_set::BTreeSet, + collections::{btree_map::BTreeMap, btree_set::BTreeSet}, sync::Arc, }; use sp_trie::{NodeCodec, StorageProof}; @@ -39,16 +38,18 @@ impl<'a, H: trie_db::Hasher> trie_db::TrieRecorder for SizeRecorder<'a, fn record<'b>(&mut self, access: TrieAccess<'b, H::Out>) { let mut encoded_size_update = 0; match access { - TrieAccess::NodeOwned { hash, node_owned } => { + TrieAccess::NodeOwned { hash, node_owned } => if !self.seen_nodes.get(&hash).is_some() { let node = node_owned.to_encoded::>(); + log::info!(target: "skunert", "TrieAccess::NodeOwned"); encoded_size_update += node.encoded_size(); self.seen_nodes.insert(hash); - } - }, + }, TrieAccess::EncodedNode { hash, encoded_node } => { if !self.seen_nodes.get(&hash).is_some() { let node = encoded_node.into_owned(); + + log::info!(target: "skunert", "TrieAccess::EncodedNode"); encoded_size_update += node.encoded_size(); self.seen_nodes.insert(hash); } @@ -56,7 +57,7 @@ impl<'a, H: trie_db::Hasher> trie_db::TrieRecorder for SizeRecorder<'a, TrieAccess::Value { hash, value, full_key } => { if !self.seen_nodes.get(&hash).is_some() { let value = value.into_owned(); - + log::info!(target: "skunert", "TrieAccess::Value"); encoded_size_update += value.encoded_size(); self.seen_nodes.insert(hash); } From f44111f0c48792620e4d04027c58d1cba4933cb2 Mon Sep 17 00:00:00 2001 From: Sebastian Kunert Date: Fri, 8 Sep 2023 11:37:27 +0200 Subject: [PATCH 19/61] Improve tests and rename Recorder --- Cargo.lock | 1 - cumulus/pallets/parachain-system/Cargo.toml | 2 - .../src/validate_block/implementation.rs | 7 +- .../src/validate_block/tests.rs | 90 +-------- .../src/validate_block/trie_recorder.rs | 181 ++++++++++++++++-- substrate/client/service/src/client/client.rs | 118 +++++------- 6 files changed, 225 insertions(+), 174 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 04c36109a0c2..d4dc84709f03 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3556,7 +3556,6 @@ dependencies = [ "rand 0.8.5", "sc-client-api", "scale-info", - "sp-api", "sp-core", "sp-externalities", "sp-inherents", diff --git a/cumulus/pallets/parachain-system/Cargo.toml b/cumulus/pallets/parachain-system/Cargo.toml index d89d2b2d0679..70a4586416e4 100644 --- a/cumulus/pallets/parachain-system/Cargo.toml +++ b/cumulus/pallets/parachain-system/Cargo.toml @@ -20,7 +20,6 @@ frame-system = { path = "../../../substrate/frame/system", default-features = fa sp-core = { path = "../../../substrate/primitives/core", default-features = false} sp-externalities = { path = "../../../substrate/primitives/externalities", default-features = false} sp-inherents = { path = "../../../substrate/primitives/inherents", default-features = false} -sp-api = { path = "../../../substrate/primitives/api", default-features = false} sp-io = { path = "../../../substrate/primitives/io", default-features = false} sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false} sp-state-machine = { path = "../../../substrate/primitives/state-machine", default-features = false} @@ -55,7 +54,6 @@ sp-version = { path = "../../../substrate/primitives/version" } cumulus-test-client = { path = "../../test/client" } cumulus-test-relay-sproof-builder = { path = "../../test/relay-sproof-builder" } - [features] default = [ "std" ] std = [ diff --git a/cumulus/pallets/parachain-system/src/validate_block/implementation.rs b/cumulus/pallets/parachain-system/src/validate_block/implementation.rs index 78df38947d40..e6176b05acbf 100644 --- a/cumulus/pallets/parachain-system/src/validate_block/implementation.rs +++ b/cumulus/pallets/parachain-system/src/validate_block/implementation.rs @@ -34,13 +34,13 @@ use sp_io::KillStorageResult; use sp_runtime::traits::{Block as BlockT, Extrinsic, HashingFor, Header as HeaderT}; use sp_std::{prelude::*, sync::Arc}; use sp_trie::{MemoryDB, TrieRecorderProvider}; -use trie_recorder::RecorderProvider; +use trie_recorder::SizeOnlyRecorderProvider; type TrieBackend = sp_state_machine::TrieBackend< MemoryDB>, HashingFor, trie_cache::CacheProvider>, - RecorderProvider>, + SizeOnlyRecorderProvider>, >; type Ext<'a, B> = sp_state_machine::Ext<'a, HashingFor, TrieBackend>; @@ -93,7 +93,6 @@ where B::Extrinsic: ExtrinsicCall, ::Call: IsSubType>, { - sp_api::init_runtime_logger(); let block_data = codec::decode_from_bytes::>(block_data) .expect("Invalid parachain block data"); @@ -122,7 +121,7 @@ where sp_std::mem::drop(storage_proof); - let recorder = RecorderProvider::new(); + let recorder = SizeOnlyRecorderProvider::new(); let cache_provider = trie_cache::CacheProvider::new(); // We use the storage root of the `parent_head` to ensure that it is the correct root. // This is already being done above while creating the in-memory db, but let's be paranoid!! diff --git a/cumulus/pallets/parachain-system/src/validate_block/tests.rs b/cumulus/pallets/parachain-system/src/validate_block/tests.rs index d6b9cadb15db..0cf68f25cc34 100644 --- a/cumulus/pallets/parachain-system/src/validate_block/tests.rs +++ b/cumulus/pallets/parachain-system/src/validate_block/tests.rs @@ -26,12 +26,10 @@ use cumulus_test_client::{ }; use cumulus_test_relay_sproof_builder::RelayStateSproofBuilder; use sp_keyring::AccountKeyring::*; -use sp_runtime::traits::{HashingFor, Header as HeaderT}; -use sp_trie::TrieRecorderProvider; +use sp_runtime::traits::Header as HeaderT; use std::{env, process::Command}; -use trie_standardmap::{Alphabet, StandardMap, ValueMode}; -use crate::validate_block::{trie_recorder::RecorderProvider, MemoryOptimizedValidationParams}; +use crate::validate_block::MemoryOptimizedValidationParams; fn call_validate_block_encoded_header( parent_head: Header, @@ -292,87 +290,3 @@ fn validation_params_and_memory_optimized_validation_params_encode_and_decode() let decoded = ValidationParams::decode_all(&mut &encoded[..]).unwrap(); assert_eq!(decoded, validation_params); } - -const TEST_DATA: &[(&[u8], &[u8])] = - &[(b"key1", &[1; 64]), (b"key2", &[2; 64]), (b"key3", &[3; 64]), (b"key4", &[4; 64])]; - -use trie_db::{Trie, TrieDBBuilder, TrieDBMutBuilder, TrieHash, TrieMut, TrieRecorder}; -type MemoryDB = sp_trie::MemoryDB; -type Layout = sp_trie::LayoutV1; -type Recorder = sp_trie::recorder::Recorder; - -fn create_trie() -> (MemoryDB, TrieHash, Vec<(Vec, Vec)>) { - let mut db = MemoryDB::default(); - let mut root = Default::default(); - - let mut seed = Default::default(); - let x = StandardMap { - alphabet: Alphabet::Low, - min_key: 5, - journal_key: 0, - value_mode: ValueMode::Random, - count: 1000, - } - .make_with(&mut seed); - - { - let mut trie = TrieDBMutBuilder::::new(&mut db, &mut root).build(); - for (k, v) in x.iter() { - trie.insert(k, v).expect("Inserts data"); - } - } - - (db, root, x) -} -use rand::Rng; -use sp_trie::cache::{CacheSize, SharedTrieCache}; -#[test] -fn recorder_does_its_thing() { - sp_tracing::try_init_simple(); - let (db, root, values) = create_trie(); - - let mut rng = rand::thread_rng(); - for _ in 1..2 { - let mut reference_recorder = Recorder::default(); - let recorder_for_test: RecorderProvider = RecorderProvider::new(); - let reference_cache: SharedTrieCache = - SharedTrieCache::new(CacheSize::new(1024 * 5)); - let cache_for_test: SharedTrieCache = - SharedTrieCache::new(CacheSize::new(1024 * 5)); - { - let mut local_cache = cache_for_test.local_cache(); - let mut trie_cache_for_reference = local_cache.as_trie_db_cache(root); - let mut reference_trie_recorder = reference_recorder.as_trie_recorder(root); - let reference_trie = TrieDBBuilder::::new(&db, &root) - .with_recorder(&mut reference_trie_recorder) - .with_cache(&mut trie_cache_for_reference) - .build(); - - let mut local_cache_for_test = cache_for_test.local_cache(); - let mut trie_cache_for_test = local_cache_for_test.as_trie_db_cache(root); - let mut trie_recorder_under_test = recorder_for_test.as_trie_recorder(root); - let test_trie = TrieDBBuilder::::new(&db, &root) - .with_recorder(&mut trie_recorder_under_test) - .with_cache(&mut trie_cache_for_test) - .build(); - - log::info!("just get"); - for _ in 0..10 { - let index: usize = rng.gen_range(0..values.len()); - test_trie.get(&values[index].0).unwrap().unwrap(); - reference_trie.get(&values[index].0).unwrap().unwrap(); - } - - log::info!("hash access"); - for _ in 0..10 { - let index: usize = rng.gen_range(0..values.len()); - test_trie.get_hash(&values[index].0); - reference_trie.get_hash(&values[index].0); - } - } - assert_eq!( - reference_recorder.estimate_encoded_size(), - recorder_for_test.estimate_encoded_size() - ); - } -} diff --git a/cumulus/pallets/parachain-system/src/validate_block/trie_recorder.rs b/cumulus/pallets/parachain-system/src/validate_block/trie_recorder.rs index 28ceb9ea7a00..95629750434e 100644 --- a/cumulus/pallets/parachain-system/src/validate_block/trie_recorder.rs +++ b/cumulus/pallets/parachain-system/src/validate_block/trie_recorder.rs @@ -28,28 +28,27 @@ use sp_trie::{NodeCodec, StorageProof}; use trie_db::{Hasher, RecordedForKey, TrieAccess}; /// A trie recorder that only keeps track of the proof size. -pub(crate) struct SizeRecorder<'a, H: Hasher> { +pub(crate) struct SizeOnlyRecorder<'a, H: Hasher> { seen_nodes: RefMut<'a, BTreeSet>, encoded_size: RefMut<'a, usize>, recorded_keys: RefMut<'a, BTreeMap, RecordedForKey>>, } -impl<'a, H: trie_db::Hasher> trie_db::TrieRecorder for SizeRecorder<'a, H> { +impl<'a, H: trie_db::Hasher> trie_db::TrieRecorder for SizeOnlyRecorder<'a, H> { fn record<'b>(&mut self, access: TrieAccess<'b, H::Out>) { let mut encoded_size_update = 0; match access { - TrieAccess::NodeOwned { hash, node_owned } => + TrieAccess::NodeOwned { hash, node_owned } => { if !self.seen_nodes.get(&hash).is_some() { let node = node_owned.to_encoded::>(); - log::info!(target: "skunert", "TrieAccess::NodeOwned"); encoded_size_update += node.encoded_size(); self.seen_nodes.insert(hash); - }, + } + }, TrieAccess::EncodedNode { hash, encoded_node } => { if !self.seen_nodes.get(&hash).is_some() { let node = encoded_node.into_owned(); - log::info!(target: "skunert", "TrieAccess::EncodedNode"); encoded_size_update += node.encoded_size(); self.seen_nodes.insert(hash); } @@ -57,7 +56,6 @@ impl<'a, H: trie_db::Hasher> trie_db::TrieRecorder for SizeRecorder<'a, TrieAccess::Value { hash, value, full_key } => { if !self.seen_nodes.get(&hash).is_some() { let value = value.into_owned(); - log::info!(target: "skunert", "TrieAccess::Value"); encoded_size_update += value.encoded_size(); self.seen_nodes.insert(hash); } @@ -87,13 +85,13 @@ impl<'a, H: trie_db::Hasher> trie_db::TrieRecorder for SizeRecorder<'a, } } -pub(crate) struct RecorderProvider { +pub(crate) struct SizeOnlyRecorderProvider { seen_nodes: RefCell>, encoded_size: RefCell, recorded_keys: RefCell, RecordedForKey>>, } -impl RecorderProvider { +impl SizeOnlyRecorderProvider { pub fn new() -> Self { Self { seen_nodes: Default::default(), @@ -103,15 +101,15 @@ impl RecorderProvider { } } -impl sp_trie::TrieRecorderProvider for RecorderProvider { - type Recorder<'a> = SizeRecorder<'a, H> where H: 'a; +impl sp_trie::TrieRecorderProvider for SizeOnlyRecorderProvider { + type Recorder<'a> = SizeOnlyRecorder<'a, H> where H: 'a; fn drain_storage_proof(self) -> StorageProof { unimplemented!("Draining storage proof not supported!") } fn as_trie_recorder(&self, _storage_root: H::Out) -> Self::Recorder<'_> { - SizeRecorder { + SizeOnlyRecorder { encoded_size: self.encoded_size.borrow_mut(), seen_nodes: self.seen_nodes.borrow_mut(), recorded_keys: self.recorded_keys.borrow_mut(), @@ -124,5 +122,160 @@ impl sp_trie::TrieRecorderProvider for RecorderProvider Send for RecorderProvider {} -unsafe impl Sync for RecorderProvider {} +unsafe impl Send for SizeOnlyRecorderProvider {} +unsafe impl Sync for SizeOnlyRecorderProvider {} + +#[cfg(test)] +mod tests { + use rand::Rng; + use sp_trie::{ + cache::{CacheSize, SharedTrieCache}, + MemoryDB, TrieRecorderProvider, + }; + use trie_db::{Trie, TrieDBBuilder, TrieDBMutBuilder, TrieHash, TrieMut, TrieRecorder}; + use trie_standardmap::{Alphabet, StandardMap, ValueMode}; + + use crate::validate_block::trie_recorder::SizeOnlyRecorderProvider; + + type Recorder = sp_trie::recorder::Recorder; + + fn create_trie() -> ( + sp_trie::MemoryDB, + TrieHash>, + Vec<(Vec, Vec)>, + ) { + let mut db = MemoryDB::default(); + let mut root = Default::default(); + + let mut seed = Default::default(); + let test_data: Vec<(Vec, Vec)> = StandardMap { + alphabet: Alphabet::Low, + min_key: 16, + journal_key: 0, + value_mode: ValueMode::Random, + count: 1000, + } + .make_with(&mut seed) + .into_iter() + .map(|(k, v)| { + // Double the length so we end up with some values of 2 bytes and some of 64 + let v = [v.clone(), v].concat(); + (k, v) + }) + .collect(); + + // Fill database with values + { + let mut trie = TrieDBMutBuilder::>::new( + &mut db, &mut root, + ) + .build(); + for (k, v) in &test_data { + trie.insert(k, v).expect("Inserts data"); + } + } + + (db, root, test_data) + } + + #[test] + fn recorder_equivalence_cache() { + sp_tracing::try_init_simple(); + let (db, root, test_data) = create_trie(); + + let mut rng = rand::thread_rng(); + for _ in 1..10 { + let reference_recorder = Recorder::default(); + let recorder_for_test: SizeOnlyRecorderProvider = + SizeOnlyRecorderProvider::new(); + let reference_cache: SharedTrieCache = + SharedTrieCache::new(CacheSize::new(1024 * 5)); + let cache_for_test: SharedTrieCache = + SharedTrieCache::new(CacheSize::new(1024 * 5)); + { + let local_cache = cache_for_test.local_cache(); + let mut trie_cache_for_reference = local_cache.as_trie_db_cache(root); + let mut reference_trie_recorder = reference_recorder.as_trie_recorder(root); + let reference_trie = + TrieDBBuilder::>::new(&db, &root) + .with_recorder(&mut reference_trie_recorder) + .with_cache(&mut trie_cache_for_reference) + .build(); + + let local_cache_for_test = reference_cache.local_cache(); + let mut trie_cache_for_test = local_cache_for_test.as_trie_db_cache(root); + let mut trie_recorder_under_test = recorder_for_test.as_trie_recorder(root); + let test_trie = + TrieDBBuilder::>::new(&db, &root) + .with_recorder(&mut trie_recorder_under_test) + .with_cache(&mut trie_cache_for_test) + .build(); + + // Access random values from the test data + for _ in 0..100 { + let index: usize = rng.gen_range(0..test_data.len()); + test_trie.get(&test_data[index].0).unwrap().unwrap(); + reference_trie.get(&test_data[index].0).unwrap().unwrap(); + } + + // Check that we have the same nodes recorded for both recorders + for (key, _) in test_data.iter() { + let refe = reference_trie_recorder.trie_nodes_recorded_for_key(key); + let comp = trie_recorder_under_test.trie_nodes_recorded_for_key(key); + assert!(matches!(refe, comp)); + } + } + + // Check that we have the same size recorded for both recorders + assert_eq!( + reference_recorder.estimate_encoded_size(), + recorder_for_test.estimate_encoded_size() + ); + } + } + + #[test] + fn recorder_equivalence_no_cache() { + sp_tracing::try_init_simple(); + let (db, root, test_data) = create_trie(); + + let mut rng = rand::thread_rng(); + for _ in 1..10 { + let reference_recorder = Recorder::default(); + let recorder_for_test: SizeOnlyRecorderProvider = + SizeOnlyRecorderProvider::new(); + { + let mut reference_trie_recorder = reference_recorder.as_trie_recorder(root); + let reference_trie = + TrieDBBuilder::>::new(&db, &root) + .with_recorder(&mut reference_trie_recorder) + .build(); + + let mut trie_recorder_under_test = recorder_for_test.as_trie_recorder(root); + let test_trie = + TrieDBBuilder::>::new(&db, &root) + .with_recorder(&mut trie_recorder_under_test) + .build(); + + for _ in 0..200 { + let index: usize = rng.gen_range(0..test_data.len()); + test_trie.get(&test_data[index].0).unwrap().unwrap(); + reference_trie.get(&test_data[index].0).unwrap().unwrap(); + } + + // Check that we have the same nodes recorded for both recorders + for (key, _) in test_data.iter() { + let refe = reference_trie_recorder.trie_nodes_recorded_for_key(key); + let comp = trie_recorder_under_test.trie_nodes_recorded_for_key(key); + assert!(matches!(refe, comp)); + } + } + + // Check that we have the same size recorded for both recorders + assert_eq!( + reference_recorder.estimate_encoded_size(), + recorder_for_test.estimate_encoded_size() + ); + } + } +} diff --git a/substrate/client/service/src/client/client.rs b/substrate/client/service/src/client/client.rs index fed7811744a8..467daa88d328 100644 --- a/substrate/client/service/src/client/client.rs +++ b/substrate/client/service/src/client/client.rs @@ -429,7 +429,7 @@ where backend.unpin_block(message); } else { log::debug!("Terminating unpin-worker, backend reference was dropped."); - return; + return } } log::debug!("Terminating unpin-worker, stream terminated.") @@ -515,7 +515,7 @@ where } = import_block; if !intermediates.is_empty() { - return Err(Error::IncompletePipeline); + return Err(Error::IncompletePipeline) } let fork_choice = fork_choice.ok_or(Error::IncompletePipeline)?; @@ -610,20 +610,19 @@ where // the block is lower than our last finalized block so it must revert // finality, refusing import. - if status == blockchain::BlockStatus::Unknown - && *import_headers.post().number() <= info.finalized_number - && !gap_block + if status == blockchain::BlockStatus::Unknown && + *import_headers.post().number() <= info.finalized_number && + !gap_block { - return Err(sp_blockchain::Error::NotInFinalizedChain); + return Err(sp_blockchain::Error::NotInFinalizedChain) } // this is a fairly arbitrary choice of where to draw the line on making notifications, // but the general goal is to only make notifications when we are already fully synced // and get a new chain head. let make_notifications = match origin { - BlockOrigin::NetworkBroadcast | BlockOrigin::Own | BlockOrigin::ConsensusBroadcast => { - true - }, + BlockOrigin::NetworkBroadcast | BlockOrigin::Own | BlockOrigin::ConsensusBroadcast => + true, BlockOrigin::Genesis | BlockOrigin::NetworkInitialSync | BlockOrigin::File => false, }; @@ -657,14 +656,12 @@ where let storage_key = PrefixedStorageKey::new_ref(&parent_storage); let storage_key = match ChildType::from_prefixed_key(storage_key) { - Some((ChildType::ParentKeyId, storage_key)) => { - storage_key - }, - None => { + Some((ChildType::ParentKeyId, storage_key)) => + storage_key, + None => return Err(Error::Backend( "Invalid child storage key.".to_string(), - )) - }, + )), }; let entry = storage .children_default @@ -689,7 +686,7 @@ where // State root mismatch when importing state. This should not happen in // safe fast sync mode, but may happen in unsafe mode. warn!("Error importing state: State root mismatch."); - return Err(Error::InvalidStateRoot); + return Err(Error::InvalidStateRoot) } None }, @@ -712,12 +709,11 @@ where )?; } - let is_new_best = !gap_block - && (finalized - || match fork_choice { - ForkChoiceStrategy::LongestChain => { - import_headers.post().number() > &info.best_number - }, + let is_new_best = !gap_block && + (finalized || + match fork_choice { + ForkChoiceStrategy::LongestChain => + import_headers.post().number() > &info.best_number, ForkChoiceStrategy::Custom(v) => v, }); @@ -841,21 +837,18 @@ where let state_action = std::mem::replace(&mut import_block.state_action, StateAction::Skip); let (enact_state, storage_changes) = match (self.block_status(*parent_hash)?, state_action) { - (BlockStatus::KnownBad, _) => { - return Ok(PrepareStorageChangesResult::Discard(ImportResult::KnownBad)) - }, + (BlockStatus::KnownBad, _) => + return Ok(PrepareStorageChangesResult::Discard(ImportResult::KnownBad)), ( BlockStatus::InChainPruned, StateAction::ApplyChanges(sc_consensus::StorageChanges::Changes(_)), ) => return Ok(PrepareStorageChangesResult::Discard(ImportResult::MissingState)), (_, StateAction::ApplyChanges(changes)) => (true, Some(changes)), - (BlockStatus::Unknown, _) => { - return Ok(PrepareStorageChangesResult::Discard(ImportResult::UnknownParent)) - }, + (BlockStatus::Unknown, _) => + return Ok(PrepareStorageChangesResult::Discard(ImportResult::UnknownParent)), (_, StateAction::Skip) => (false, None), - (BlockStatus::InChainPruned, StateAction::Execute) => { - return Ok(PrepareStorageChangesResult::Discard(ImportResult::MissingState)) - }, + (BlockStatus::InChainPruned, StateAction::Execute) => + return Ok(PrepareStorageChangesResult::Discard(ImportResult::MissingState)), (BlockStatus::InChainPruned, StateAction::ExecuteIfPossible) => (false, None), (_, StateAction::Execute) => (true, None), (_, StateAction::ExecuteIfPossible) => (true, None), @@ -888,7 +881,7 @@ where if import_block.header.state_root() != &gen_storage_changes.transaction_storage_root { - return Err(Error::InvalidStateRoot); + return Err(Error::InvalidStateRoot) } Some(sc_consensus::StorageChanges::Changes(gen_storage_changes)) }, @@ -914,7 +907,7 @@ where "Possible safety violation: attempted to re-finalize last finalized block {:?} ", hash, ); - return Ok(()); + return Ok(()) } // Find tree route from last finalized to given block. @@ -928,7 +921,7 @@ where retracted, info.finalized_hash ); - return Err(sp_blockchain::Error::NotInFinalizedChain); + return Err(sp_blockchain::Error::NotInFinalizedChain) } // We may need to coercively update the best block if there is more than one @@ -1008,7 +1001,7 @@ where // since we won't be running the loop below which // would also remove any closed sinks. sinks.retain(|sink| !sink.is_closed()); - return Ok(()); + return Ok(()) }, }; @@ -1044,7 +1037,7 @@ where self.every_import_notification_sinks.lock().retain(|sink| !sink.is_closed()); - return Ok(()); + return Ok(()) }, }; @@ -1143,18 +1136,17 @@ where .as_ref() .map_or(false, |importing| &hash == importing) { - return Ok(BlockStatus::Queued); + return Ok(BlockStatus::Queued) } let hash_and_number = self.backend.blockchain().number(hash)?.map(|n| (hash, n)); match hash_and_number { - Some((hash, number)) => { + Some((hash, number)) => if self.backend.have_state_at(hash, number) { Ok(BlockStatus::InChainWithState) } else { Ok(BlockStatus::InChainPruned) - } - }, + }, None => Ok(BlockStatus::Unknown), } } @@ -1190,7 +1182,7 @@ where let genesis_hash = self.backend.blockchain().info().genesis_hash; if genesis_hash == target_hash { - return Ok(Vec::new()); + return Ok(Vec::new()) } let mut current_hash = target_hash; @@ -1206,7 +1198,7 @@ where current_hash = ancestor_hash; if genesis_hash == current_hash { - break; + break } current = ancestor; @@ -1291,15 +1283,14 @@ where size_limit: usize, ) -> sp_blockchain::Result> { if start_key.len() > MAX_NESTED_TRIE_DEPTH { - return Err(Error::Backend("Invalid start key.".to_string())); + return Err(Error::Backend("Invalid start key.".to_string())) } let state = self.state_at(hash)?; let child_info = |storage_key: &Vec| -> sp_blockchain::Result { let storage_key = PrefixedStorageKey::new_ref(storage_key); match ChildType::from_prefixed_key(storage_key) { - Some((ChildType::ParentKeyId, storage_key)) => { - Ok(ChildInfo::new_default(storage_key)) - }, + Some((ChildType::ParentKeyId, storage_key)) => + Ok(ChildInfo::new_default(storage_key)), None => Err(Error::Backend("Invalid child storage key.".to_string())), } }; @@ -1311,7 +1302,7 @@ where { Some((child_info(start_key)?, child_root)) } else { - return Err(Error::Backend("Invalid root start key.".to_string())); + return Err(Error::Backend("Invalid root start key.".to_string())) } } else { None @@ -1355,18 +1346,18 @@ where let size = value.len() + next_key.len(); if total_size + size > size_limit && !entries.is_empty() { complete = false; - break; + break } total_size += size; - if current_child.is_none() - && sp_core::storage::well_known_keys::is_child_storage_key(next_key.as_slice()) - && !child_roots.contains(value.as_slice()) + if current_child.is_none() && + sp_core::storage::well_known_keys::is_child_storage_key(next_key.as_slice()) && + !child_roots.contains(value.as_slice()) { child_roots.insert(value.clone()); switch_child_key = Some((next_key.clone(), value.clone())); entries.push((next_key.clone(), value)); - break; + break } entries.push((next_key.clone(), value)); current_key = next_key; @@ -1386,12 +1377,12 @@ where complete, )); if !complete { - break; + break } } else { result[0].0.key_values.extend(entries.into_iter()); result[0].1 = complete; - break; + break } } Ok(result) @@ -1816,7 +1807,7 @@ where match self.block_rules.lookup(number, &hash) { BlockLookupResult::KnownBad => { trace!("Rejecting known bad block: #{} {:?}", number, hash); - return Ok(ImportResult::KnownBad); + return Ok(ImportResult::KnownBad) }, BlockLookupResult::Expected(expected_hash) => { trace!( @@ -1825,7 +1816,7 @@ where expected_hash, number ); - return Ok(ImportResult::KnownBad); + return Ok(ImportResult::KnownBad) }, BlockLookupResult::NotSpecial => {}, } @@ -1836,12 +1827,10 @@ where .block_status(hash) .map_err(|e| ConsensusError::ClientImport(e.to_string()))? { - BlockStatus::InChainWithState | BlockStatus::Queued => { - return Ok(ImportResult::AlreadyInChain) - }, - BlockStatus::InChainPruned if !import_existing => { - return Ok(ImportResult::AlreadyInChain) - }, + BlockStatus::InChainWithState | BlockStatus::Queued => + return Ok(ImportResult::AlreadyInChain), + BlockStatus::InChainPruned if !import_existing => + return Ok(ImportResult::AlreadyInChain), BlockStatus::InChainPruned => {}, BlockStatus::Unknown => {}, BlockStatus::KnownBad => return Ok(ImportResult::KnownBad), @@ -2007,9 +1996,8 @@ where fn block(&self, hash: Block::Hash) -> sp_blockchain::Result>> { Ok(match (self.header(hash)?, self.body(hash)?, self.justifications(hash)?) { - (Some(header), Some(extrinsics), justifications) => { - Some(SignedBlock { block: Block::new(header, extrinsics), justifications }) - }, + (Some(header), Some(extrinsics), justifications) => + Some(SignedBlock { block: Block::new(header, extrinsics), justifications }), _ => None, }) } From d57e689365e49e034d163f28bb91697b43b7f6fc Mon Sep 17 00:00:00 2001 From: Sebastian Kunert Date: Fri, 8 Sep 2023 12:09:07 +0200 Subject: [PATCH 20/61] Remove last debugging relicts --- cumulus/parachains/common/src/impls.rs | 63 ------------------- .../assets/asset-hub-kusama/src/lib.rs | 1 - 2 files changed, 64 deletions(-) diff --git a/cumulus/parachains/common/src/impls.rs b/cumulus/parachains/common/src/impls.rs index f78cdcb00e7e..35d775c6fb0d 100644 --- a/cumulus/parachains/common/src/impls.rs +++ b/cumulus/parachains/common/src/impls.rs @@ -288,66 +288,3 @@ mod tests { ); } } - -#[derive(Encode, Decode, Clone, Eq, PartialEq, Default, TypeInfo)] -#[scale_info(skip_type_params(T))] -pub struct ClawbackExtension(sp_std::marker::PhantomData); - -impl core::fmt::Debug for ClawbackExtension { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> { - f.write_str("jap"); - Ok(()) - } -} - -impl ClawbackExtension where - T::RuntimeCall: Dispatchable -{ -} - -impl sp_runtime::traits::SignedExtension for ClawbackExtension -where - T::RuntimeCall: Dispatchable, -{ - const IDENTIFIER: &'static str = "Clawback"; - - type AccountId = T::AccountId; - type Call = T::RuntimeCall; - type AdditionalSigned = (); - type Pre = (); - - fn additional_signed( - &self, - ) -> Result - { - Ok(()) - } - - fn pre_dispatch( - self, - who: &Self::AccountId, - call: &Self::Call, - info: &sp_runtime::traits::DispatchInfoOf, - len: usize, - ) -> Result { - log::info!(target: "skunert", "Calling pre dispatch of my extension"); - let proof_size = - cumulus_primitives_reclaim::pov_reclaim_host_functions::current_storage_proof_size(); - log::info!(target: "skunert","Got proof size: {}", proof_size); - Ok(()) - } - - fn post_dispatch( - _pre: Option, - _info: &DispatchInfoOf, - _post_info: &PostDispatchInfoOf, - _len: usize, - _result: &DispatchResult, - ) -> Result<(), TransactionValidityError> { - log::info!(target: "skunert", "Calling post dispatch of my extension"); - let proof_size = - cumulus_primitives_reclaim::pov_reclaim_host_functions::current_storage_proof_size(); - log::info!(target: "skunert","Got proof size: {}", proof_size); - Ok(()) - } -} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/lib.rs index 58d3c85bbc9d..828d1b4750a3 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/lib.rs @@ -884,7 +884,6 @@ pub type SignedExtra = ( frame_system::CheckNonce, frame_system::CheckWeight, pallet_asset_conversion_tx_payment::ChargeAssetTxPayment, - parachains_common::impls::ClawbackExtension, ); /// Unchecked extrinsic type as expected by this runtime. pub type UncheckedExtrinsic = From 5b317fda3be205f4136f10d4490387ccd4f9765d Mon Sep 17 00:00:00 2001 From: Sebastian Kunert Date: Fri, 8 Sep 2023 12:12:32 +0200 Subject: [PATCH 21/61] Fix name in validate_block implementation --- .../parachain-system/src/validate_block/implementation.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cumulus/pallets/parachain-system/src/validate_block/implementation.rs b/cumulus/pallets/parachain-system/src/validate_block/implementation.rs index e6176b05acbf..971b357f2142 100644 --- a/cumulus/pallets/parachain-system/src/validate_block/implementation.rs +++ b/cumulus/pallets/parachain-system/src/validate_block/implementation.rs @@ -171,7 +171,7 @@ where sp_io::offchain_index::host_set.replace_implementation(host_offchain_index_set), sp_io::offchain_index::host_clear.replace_implementation(host_offchain_index_clear), cumulus_primitives_reclaim::pov_reclaim_host_functions::host_current_storage_proof_size - .replace_implementation(reclaim_pov_weight), + .replace_implementation(host_current_storage_proof_size), ); run_with_externalities::(&backend, || { @@ -310,7 +310,7 @@ fn host_storage_clear(key: &[u8]) { with_externalities(|ext| ext.place_storage(key.to_vec(), None)) } -fn reclaim_pov_weight() -> u32 { +fn host_current_storage_proof_size() -> u32 { with_externalities(|ext| ext.proof_size()).unwrap_or_default() } From 2793696eaf06ebd43df0abe5d9fe10bd62d74fbd Mon Sep 17 00:00:00 2001 From: Sebastian Kunert Date: Fri, 8 Sep 2023 12:48:54 +0200 Subject: [PATCH 22/61] Reset trie-cache between runs --- cumulus/parachains/common/src/impls.rs | 22 ++++-------- cumulus/test/service/benches/block_import.rs | 8 +++-- .../service/benches/block_import_glutton.rs | 6 +++- cumulus/test/service/src/lib.rs | 36 ++++++++++--------- cumulus/test/service/src/main.rs | 2 +- 5 files changed, 38 insertions(+), 36 deletions(-) diff --git a/cumulus/parachains/common/src/impls.rs b/cumulus/parachains/common/src/impls.rs index 35d775c6fb0d..8078e5174cb3 100644 --- a/cumulus/parachains/common/src/impls.rs +++ b/cumulus/parachains/common/src/impls.rs @@ -16,22 +16,12 @@ //! Auxiliary struct/enums for parachain runtimes. //! Taken from polkadot/runtime/common (at a21cd64) and adapted for parachains. -use codec::{Decode, Encode}; -use frame_support::{ - dispatch::{DispatchInfo, PostDispatchInfo}, - traits::{ - fungibles::{self, Balanced, Credit}, - Contains, ContainsPair, Currency, Get, Imbalance, OnUnbalanced, - }, +use frame_support::traits::{ + fungibles::{self, Balanced, Credit}, + Contains, ContainsPair, Currency, Get, Imbalance, OnUnbalanced, }; -use frame_system::Config; use pallet_asset_tx_payment::HandleCredit; -use scale_info::TypeInfo; -use sp_runtime::{ - traits::{DispatchInfoOf, Dispatchable, PostDispatchInfoOf, Zero}, - transaction_validity::TransactionValidityError, - DispatchResult, -}; +use sp_runtime::traits::Zero; use sp_std::marker::PhantomData; use xcm::latest::{AssetId, Fungibility::Fungible, MultiAsset, MultiLocation}; @@ -122,8 +112,8 @@ pub struct AssetsFrom(PhantomData); impl> ContainsPair for AssetsFrom { fn contains(asset: &MultiAsset, origin: &MultiLocation) -> bool { let loc = T::get(); - &loc == origin && - matches!(asset, MultiAsset { id: AssetId::Concrete(asset_loc), fun: Fungible(_a) } + &loc == origin + && matches!(asset, MultiAsset { id: AssetId::Concrete(asset_loc), fun: Fungible(_a) } if asset_loc.match_and_split(&loc).is_some()) } } diff --git a/cumulus/test/service/benches/block_import.rs b/cumulus/test/service/benches/block_import.rs index 85a681813785..99fef006d50d 100644 --- a/cumulus/test/service/benches/block_import.rs +++ b/cumulus/test/service/benches/block_import.rs @@ -54,6 +54,7 @@ fn benchmark_block_import(c: &mut Criterion) { ); let client = alice.client; + let backend = alice.backend; let (max_transfer_count, extrinsics) = utils::create_benchmarking_transfer_extrinsics(&client, &src_accounts, &dst_accounts); @@ -73,12 +74,15 @@ fn benchmark_block_import(c: &mut Criterion) { group.bench_function( format!( - "(transfers = {max_transfer_count}, import = {}) block import", + "(transfers = {max_transfer_count}, proof_recording = {}) block import", bench_parameters.0 ), |b| { b.iter_batched( - || benchmark_block.block.clone(), + || { + backend.reset_trie_cache(); + benchmark_block.block.clone() + }, |block| { client.runtime_api().execute_block(parent_hash, block).unwrap(); }, diff --git a/cumulus/test/service/benches/block_import_glutton.rs b/cumulus/test/service/benches/block_import_glutton.rs index b49db9f449e9..82e68b5bcf82 100644 --- a/cumulus/test/service/benches/block_import_glutton.rs +++ b/cumulus/test/service/benches/block_import_glutton.rs @@ -43,6 +43,7 @@ fn benchmark_block_import(c: &mut Criterion) { cumulus_test_service::TestNodeBuilder::new(para_id, tokio_handle.clone(), Alice).build(), ); let client = alice.client; + let backend = alice.backend; let mut group = c.benchmark_group("Block import"); group.sample_size(20); @@ -79,7 +80,10 @@ fn benchmark_block_import(c: &mut Criterion) { ), |b| { b.iter_batched( - || benchmark_block.block.clone(), + || { + backend.reset_trie_cache(); + benchmark_block.block.clone() + }, |block| { client.runtime_api().execute_block(parent_hash, block).unwrap(); }, diff --git a/cumulus/test/service/src/lib.rs b/cumulus/test/service/src/lib.rs index eed9575f3041..097b57d988a5 100644 --- a/cumulus/test/service/src/lib.rs +++ b/cumulus/test/service/src/lib.rs @@ -316,6 +316,7 @@ pub async fn start_node_impl( Arc>, RpcHandlers, TransactionPool, + Arc, )> where RB: Fn(Arc) -> Result, sc_service::Error> + Send + 'static, @@ -478,7 +479,7 @@ where start_network.start_network(); - Ok((task_manager, client, network, rpc_handlers, transaction_pool)) + Ok((task_manager, client, network, rpc_handlers, transaction_pool, backend)) } /// A Cumulus test node instance used for testing. @@ -496,6 +497,8 @@ pub struct TestNode { pub rpc_handlers: RpcHandlers, /// Node's transaction pool pub transaction_pool: TransactionPool, + /// Node's backend + pub backend: Arc, } #[allow(missing_docs)] @@ -693,25 +696,26 @@ impl TestNodeBuilder { format!("{} (relay chain)", relay_chain_config.network.node_name); let multiaddr = parachain_config.network.listen_addresses[0].clone(); - let (task_manager, client, network, rpc_handlers, transaction_pool) = start_node_impl( - parachain_config, - self.collator_key, - relay_chain_config, - self.para_id, - self.wrap_announce_block, - false, - |_| Ok(jsonrpsee::RpcModule::new(())), - self.consensus, - collator_options, - self.record_proof_during_import, - ) - .await - .expect("could not create Cumulus test service"); + let (task_manager, client, network, rpc_handlers, transaction_pool, backend) = + start_node_impl( + parachain_config, + self.collator_key, + relay_chain_config, + self.para_id, + self.wrap_announce_block, + false, + |_| Ok(jsonrpsee::RpcModule::new(())), + self.consensus, + collator_options, + self.record_proof_during_import, + ) + .await + .expect("could not create Cumulus test service"); let peer_id = network.local_peer_id(); let addr = MultiaddrWithPeerId { multiaddr, peer_id }; - TestNode { task_manager, client, network, addr, rpc_handlers, transaction_pool } + TestNode { task_manager, client, network, addr, rpc_handlers, transaction_pool, backend } } } diff --git a/cumulus/test/service/src/main.rs b/cumulus/test/service/src/main.rs index 1a83e7b1ade7..e3829a078da3 100644 --- a/cumulus/test/service/src/main.rs +++ b/cumulus/test/service/src/main.rs @@ -128,7 +128,7 @@ fn main() -> Result<(), sc_cli::Error> { }) .unwrap_or(cumulus_test_service::Consensus::RelayChain); - let (mut task_manager, _, _, _, _) = tokio_runtime + let (mut task_manager, _, _, _, _, _) = tokio_runtime .block_on(cumulus_test_service::start_node_impl( config, collator_key, From 652d1d812b360bd61c84fa5670a719233071221f Mon Sep 17 00:00:00 2001 From: Sebastian Kunert Date: Fri, 8 Sep 2023 14:11:53 +0200 Subject: [PATCH 23/61] Improve benchmarks --- cumulus/test/service/benches/block_import.rs | 6 ++-- .../service/benches/block_import_glutton.rs | 33 ++++++++++++------- 2 files changed, 25 insertions(+), 14 deletions(-) diff --git a/cumulus/test/service/benches/block_import.rs b/cumulus/test/service/benches/block_import.rs index 99fef006d50d..9f88e464012c 100644 --- a/cumulus/test/service/benches/block_import.rs +++ b/cumulus/test/service/benches/block_import.rs @@ -41,7 +41,7 @@ fn benchmark_block_import(c: &mut Criterion) { let (src_accounts, dst_accounts, account_ids) = utils::create_benchmark_accounts(); for bench_parameters in &[(true, Alice), (false, Bob)] { - let alice = runtime.block_on( + let node = runtime.block_on( cumulus_test_service::TestNodeBuilder::new( para_id, tokio_handle.clone(), @@ -53,8 +53,8 @@ fn benchmark_block_import(c: &mut Criterion) { .build(), ); - let client = alice.client; - let backend = alice.backend; + let client = node.client; + let backend = node.backend; let (max_transfer_count, extrinsics) = utils::create_benchmarking_transfer_extrinsics(&client, &src_accounts, &dst_accounts); diff --git a/cumulus/test/service/benches/block_import_glutton.rs b/cumulus/test/service/benches/block_import_glutton.rs index 82e68b5bcf82..6e19dfa35b8a 100644 --- a/cumulus/test/service/benches/block_import_glutton.rs +++ b/cumulus/test/service/benches/block_import_glutton.rs @@ -28,7 +28,7 @@ use core::time::Duration; use cumulus_primitives_core::ParaId; use sc_block_builder::{BlockBuilderProvider, RecordProof}; -use sp_keyring::Sr25519Keyring::Alice; +use sp_keyring::Sr25519Keyring::{Alice, Bob, Charlie, Ferdie}; use cumulus_test_service::bench_utils as utils; @@ -39,18 +39,29 @@ fn benchmark_block_import(c: &mut Criterion) { let para_id = ParaId::from(100); let tokio_handle = runtime.handle(); - let alice = runtime.block_on( - cumulus_test_service::TestNodeBuilder::new(para_id, tokio_handle.clone(), Alice).build(), - ); - let client = alice.client; - let backend = alice.backend; + let mut initialize_glutton_pallet = true; + for (compute_ratio, storage_ratio, proof_on_import, keyring_identity) in &[ + (One::one(), Zero::zero(), true, Alice), + (One::one(), One::one(), true, Bob), + (One::one(), Zero::zero(), false, Charlie), + (One::one(), One::one(), false, Ferdie), + ] { + let node = runtime.block_on( + cumulus_test_service::TestNodeBuilder::new( + para_id, + tokio_handle.clone(), + *keyring_identity, + ) + .import_proof_recording(*proof_on_import) + .build(), + ); + let client = node.client; + let backend = node.backend; - let mut group = c.benchmark_group("Block import"); - group.sample_size(20); - group.measurement_time(Duration::from_secs(120)); + let mut group = c.benchmark_group("Block import"); + group.sample_size(20); + group.measurement_time(Duration::from_secs(120)); - let mut initialize_glutton_pallet = true; - for (compute_ratio, storage_ratio) in &[(One::one(), Zero::zero()), (One::one(), One::one())] { let block = utils::set_glutton_parameters( &client, initialize_glutton_pallet, From 00c734a18293dcb5c01b795cab2e772d838560a7 Mon Sep 17 00:00:00 2001 From: command-bot <> Date: Fri, 8 Sep 2023 12:26:56 +0000 Subject: [PATCH 24/61] ".git/.scripts/commands/fmt/fmt.sh" --- .../parachain-system/src/validate_block/trie_recorder.rs | 5 ++--- cumulus/parachains/common/src/impls.rs | 4 ++-- .../primitives/state-machine/src/trie_backend_essence.rs | 2 +- 3 files changed, 5 insertions(+), 6 deletions(-) diff --git a/cumulus/pallets/parachain-system/src/validate_block/trie_recorder.rs b/cumulus/pallets/parachain-system/src/validate_block/trie_recorder.rs index 95629750434e..72535516153e 100644 --- a/cumulus/pallets/parachain-system/src/validate_block/trie_recorder.rs +++ b/cumulus/pallets/parachain-system/src/validate_block/trie_recorder.rs @@ -38,13 +38,12 @@ impl<'a, H: trie_db::Hasher> trie_db::TrieRecorder for SizeOnlyRecorder< fn record<'b>(&mut self, access: TrieAccess<'b, H::Out>) { let mut encoded_size_update = 0; match access { - TrieAccess::NodeOwned { hash, node_owned } => { + TrieAccess::NodeOwned { hash, node_owned } => if !self.seen_nodes.get(&hash).is_some() { let node = node_owned.to_encoded::>(); encoded_size_update += node.encoded_size(); self.seen_nodes.insert(hash); - } - }, + }, TrieAccess::EncodedNode { hash, encoded_node } => { if !self.seen_nodes.get(&hash).is_some() { let node = encoded_node.into_owned(); diff --git a/cumulus/parachains/common/src/impls.rs b/cumulus/parachains/common/src/impls.rs index 8078e5174cb3..107cd5c68732 100644 --- a/cumulus/parachains/common/src/impls.rs +++ b/cumulus/parachains/common/src/impls.rs @@ -112,8 +112,8 @@ pub struct AssetsFrom(PhantomData); impl> ContainsPair for AssetsFrom { fn contains(asset: &MultiAsset, origin: &MultiLocation) -> bool { let loc = T::get(); - &loc == origin - && matches!(asset, MultiAsset { id: AssetId::Concrete(asset_loc), fun: Fungible(_a) } + &loc == origin && + matches!(asset, MultiAsset { id: AssetId::Concrete(asset_loc), fun: Fungible(_a) } if asset_loc.match_and_split(&loc).is_some()) } } diff --git a/substrate/primitives/state-machine/src/trie_backend_essence.rs b/substrate/primitives/state-machine/src/trie_backend_essence.rs index 7e155f9a65b3..8e1a680414e0 100644 --- a/substrate/primitives/state-machine/src/trie_backend_essence.rs +++ b/substrate/primitives/state-machine/src/trie_backend_essence.rs @@ -111,7 +111,7 @@ where ) -> Option::Out>>>>, ) -> Option> { if !matches!(self.state, IterState::Pending) { - return None; + return None } let result = backend.with_trie_db(self.root, self.child_info.as_ref(), |db| { From 961f7500ca65dc524646606d8876ef9e0ce5d14a Mon Sep 17 00:00:00 2001 From: Sebastian Kunert Date: Fri, 8 Sep 2023 15:38:45 +0200 Subject: [PATCH 25/61] CI fixes --- Cargo.lock | 1 - cumulus/pallets/parachain-system/Cargo.toml | 1 + .../src/validate_block/mod.rs | 1 + .../src/validate_block/trie_recorder.rs | 19 ++++++++++--------- cumulus/parachains/common/Cargo.toml | 1 - .../test/service/benches/validate_block.rs | 6 ++++-- .../state-machine/src/trie_backend.rs | 10 +++++----- .../state-machine/src/trie_backend_essence.rs | 4 +++- 8 files changed, 24 insertions(+), 19 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d4dc84709f03..a6197bc4a216 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -10989,7 +10989,6 @@ name = "parachains-common" version = "1.0.0" dependencies = [ "cumulus-primitives-core", - "cumulus-primitives-reclaim", "cumulus-primitives-utility", "frame-support", "frame-system", diff --git a/cumulus/pallets/parachain-system/Cargo.toml b/cumulus/pallets/parachain-system/Cargo.toml index 70a4586416e4..2e35acf4fac3 100644 --- a/cumulus/pallets/parachain-system/Cargo.toml +++ b/cumulus/pallets/parachain-system/Cargo.toml @@ -61,6 +61,7 @@ std = [ "cumulus-pallet-parachain-system-proc-macro/std", "cumulus-primitives-core/std", "cumulus-primitives-parachain-inherent/std", + "cumulus-primitives-reclaim/std", "environmental/std", "frame-support/std", "frame-system/std", diff --git a/cumulus/pallets/parachain-system/src/validate_block/mod.rs b/cumulus/pallets/parachain-system/src/validate_block/mod.rs index b159481079aa..763a4cffd77f 100644 --- a/cumulus/pallets/parachain-system/src/validate_block/mod.rs +++ b/cumulus/pallets/parachain-system/src/validate_block/mod.rs @@ -26,6 +26,7 @@ mod tests; #[doc(hidden)] mod trie_cache; +#[cfg(any(test, not(feature = "std")))] #[doc(hidden)] mod trie_recorder; diff --git a/cumulus/pallets/parachain-system/src/validate_block/trie_recorder.rs b/cumulus/pallets/parachain-system/src/validate_block/trie_recorder.rs index 72535516153e..ece7ae1016b1 100644 --- a/cumulus/pallets/parachain-system/src/validate_block/trie_recorder.rs +++ b/cumulus/pallets/parachain-system/src/validate_block/trie_recorder.rs @@ -35,15 +35,16 @@ pub(crate) struct SizeOnlyRecorder<'a, H: Hasher> { } impl<'a, H: trie_db::Hasher> trie_db::TrieRecorder for SizeOnlyRecorder<'a, H> { - fn record<'b>(&mut self, access: TrieAccess<'b, H::Out>) { + fn record(&mut self, access: TrieAccess<'_, H::Out>) { let mut encoded_size_update = 0; match access { - TrieAccess::NodeOwned { hash, node_owned } => + TrieAccess::NodeOwned { hash, node_owned } => { if !self.seen_nodes.get(&hash).is_some() { let node = node_owned.to_encoded::>(); encoded_size_update += node.encoded_size(); self.seen_nodes.insert(hash); - }, + } + }, TrieAccess::EncodedNode { hash, encoded_node } => { if !self.seen_nodes.get(&hash).is_some() { let node = encoded_node.into_owned(); @@ -219,9 +220,9 @@ mod tests { // Check that we have the same nodes recorded for both recorders for (key, _) in test_data.iter() { - let refe = reference_trie_recorder.trie_nodes_recorded_for_key(key); - let comp = trie_recorder_under_test.trie_nodes_recorded_for_key(key); - assert!(matches!(refe, comp)); + let reference = reference_trie_recorder.trie_nodes_recorded_for_key(key); + let test_value = trie_recorder_under_test.trie_nodes_recorded_for_key(key); + assert_eq!(format!("{:?}", reference), format!("{:?}", test_value)); } } @@ -264,9 +265,9 @@ mod tests { // Check that we have the same nodes recorded for both recorders for (key, _) in test_data.iter() { - let refe = reference_trie_recorder.trie_nodes_recorded_for_key(key); - let comp = trie_recorder_under_test.trie_nodes_recorded_for_key(key); - assert!(matches!(refe, comp)); + let reference = reference_trie_recorder.trie_nodes_recorded_for_key(key); + let test_value = trie_recorder_under_test.trie_nodes_recorded_for_key(key); + assert_eq!(format!("{:?}", reference), format!("{:?}", test_value)); } } diff --git a/cumulus/parachains/common/Cargo.toml b/cumulus/parachains/common/Cargo.toml index 1d928898880d..18cafde0d303 100644 --- a/cumulus/parachains/common/Cargo.toml +++ b/cumulus/parachains/common/Cargo.toml @@ -43,7 +43,6 @@ xcm-executor = { package = "staging-xcm-executor", path = "../../../polkadot/xcm pallet-collator-selection = { path = "../../pallets/collator-selection", default-features = false } cumulus-primitives-core = { path = "../../primitives/core", default-features = false } cumulus-primitives-utility = { path = "../../primitives/utility", default-features = false } -cumulus-primitives-reclaim = { path = "../../primitives/pov-reclaim", default-features = false } [dev-dependencies] pallet-authorship = { path = "../../../substrate/frame/authorship", default-features = false} diff --git a/cumulus/test/service/benches/validate_block.rs b/cumulus/test/service/benches/validate_block.rs index abcf7ad8e625..48b1cf1f5a1f 100644 --- a/cumulus/test/service/benches/validate_block.rs +++ b/cumulus/test/service/benches/validate_block.rs @@ -18,7 +18,9 @@ use codec::{Decode, Encode}; use core::time::Duration; use criterion::{criterion_group, criterion_main, BatchSize, Criterion, Throughput}; -use cumulus_primitives_core::{relay_chain::AccountId, PersistedValidationData, ValidationParams}; +use cumulus_primitives_core::{ + relay_chain::AccountId, ParaId, PersistedValidationData, ValidationParams, +}; use cumulus_test_client::{ generate_extrinsic_with_pair, BuildParachainBlockData, InitBlockBuilder, TestClientBuilder, ValidationResult, @@ -95,7 +97,7 @@ fn benchmark_block_validation(c: &mut Criterion) { ..Default::default() }; - let mut sproof_builder = RelayStateSproofBuilder { + let sproof_builder = RelayStateSproofBuilder { included_para_head: Some(parent_header.clone().encode().into()), para_id, ..Default::default() diff --git a/substrate/primitives/state-machine/src/trie_backend.rs b/substrate/primitives/state-machine/src/trie_backend.rs index 11844917b767..60ddb5327a63 100644 --- a/substrate/primitives/state-machine/src/trie_backend.rs +++ b/substrate/primitives/state-machine/src/trie_backend.rs @@ -163,11 +163,11 @@ pub struct UnimplementedRecorderProvider { #[cfg(not(feature = "std"))] impl trie_db::TrieRecorder for UnimplementedRecorderProvider { - fn record<'a>(&mut self, access: trie_db::TrieAccess<'a, H::Out>) { + fn record<'a>(&mut self, _access: trie_db::TrieAccess<'a, H::Out>) { unimplemented!() } - fn trie_nodes_recorded_for_key(&self, key: &[u8]) -> trie_db::RecordedForKey { + fn trie_nodes_recorded_for_key(&self, _key: &[u8]) -> trie_db::RecordedForKey { unimplemented!() } } @@ -180,7 +180,7 @@ impl TrieRecorderProvider for UnimplementedRecorderProvider { unimplemented!() } - fn as_trie_recorder(&self, storage_root: H::Out) -> Self::Recorder<'_> { + fn as_trie_recorder(&self, _storage_root: H::Out) -> Self::Recorder<'_> { unimplemented!() } @@ -1000,8 +1000,8 @@ pub mod tests { .storage_root(iter::once((&b"new-key"[..], Some(&b"new-value"[..]))), state_version); assert!(!tx.drain().is_empty()); assert!( - new_root != - test_trie(state_version, None, None) + new_root + != test_trie(state_version, None, None) .storage_root(iter::empty(), state_version) .0 ); diff --git a/substrate/primitives/state-machine/src/trie_backend_essence.rs b/substrate/primitives/state-machine/src/trie_backend_essence.rs index 8e1a680414e0..82b8a168a2de 100644 --- a/substrate/primitives/state-machine/src/trie_backend_essence.rs +++ b/substrate/primitives/state-machine/src/trie_backend_essence.rs @@ -28,7 +28,9 @@ use hash_db::{self, AsHashDB, HashDB, HashDBRef, Hasher, Prefix}; #[cfg(feature = "std")] use parking_lot::RwLock; use sp_core::storage::{ChildInfo, ChildType, StateVersion}; -use sp_std::{boxed::Box, marker::PhantomData, sync::Arc, vec::Vec}; +#[cfg(feature = "std")] +use sp_std::sync::Arc; +use sp_std::{boxed::Box, marker::PhantomData, vec::Vec}; use sp_trie::{ child_delta_trie_root, delta_trie_root, empty_child_trie_root, read_child_trie_hash, read_child_trie_value, read_trie_value, From 15f8c5dbf5e1be057bc2d27a9d1a8d8b3f4a2ff6 Mon Sep 17 00:00:00 2001 From: Sebastian Kunert Date: Mon, 11 Sep 2023 13:58:48 +0200 Subject: [PATCH 26/61] Reintroduce extension --- Cargo.lock | 13 +++++++ Cargo.toml | 1 + cumulus/primitives/pov-reclaim/Cargo.toml | 4 +++ cumulus/primitives/pov-reclaim/src/lib.rs | 9 ++++- substrate/client/block-builder/Cargo.toml | 1 + substrate/client/block-builder/src/lib.rs | 3 ++ substrate/client/service/Cargo.toml | 1 + substrate/client/service/src/client/client.rs | 4 +++ .../primitives/proof-size-ext/Cargo.toml | 25 +++++++++++++ .../primitives/proof-size-ext/src/lib.rs | 35 +++++++++++++++++++ substrate/primitives/trie/src/lib.rs | 5 +++ substrate/primitives/trie/src/recorder.rs | 6 ++++ 12 files changed, 106 insertions(+), 1 deletion(-) create mode 100644 substrate/primitives/proof-size-ext/Cargo.toml create mode 100644 substrate/primitives/proof-size-ext/src/lib.rs diff --git a/Cargo.lock b/Cargo.lock index a6197bc4a216..e20b20ee9193 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3719,6 +3719,8 @@ dependencies = [ name = "cumulus-primitives-reclaim" version = "0.1.0" dependencies = [ + "sp-externalities", + "sp-proof-size-ext", "sp-runtime-interface", ] @@ -14626,6 +14628,7 @@ dependencies = [ "sp-blockchain", "sp-core", "sp-inherents", + "sp-proof-size-ext", "sp-runtime", "sp-state-machine", "substrate-test-runtime-client", @@ -15680,6 +15683,7 @@ dependencies = [ "sp-core", "sp-externalities", "sp-keystore", + "sp-proof-size-ext", "sp-runtime", "sp-session", "sp-state-machine", @@ -17337,6 +17341,15 @@ dependencies = [ "regex", ] +[[package]] +name = "sp-proof-size-ext" +version = "4.0.0-dev" +dependencies = [ + "sp-externalities", + "sp-runtime", + "sp-trie", +] + [[package]] name = "sp-rpc" version = "6.0.0" diff --git a/Cargo.toml b/Cargo.toml index 4db27b98e907..d4bbd73f52e0 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -397,6 +397,7 @@ members = [ "substrate/primitives/genesis-builder", "substrate/primitives/inherents", "substrate/primitives/io", + "substrate/primitives/proof-size-ext", "substrate/primitives/keyring", "substrate/primitives/keystore", "substrate/primitives/maybe-compressed-blob", diff --git a/cumulus/primitives/pov-reclaim/Cargo.toml b/cumulus/primitives/pov-reclaim/Cargo.toml index 2db48d8b9aae..79b856d8f7df 100644 --- a/cumulus/primitives/pov-reclaim/Cargo.toml +++ b/cumulus/primitives/pov-reclaim/Cargo.toml @@ -6,9 +6,13 @@ edition.workspace = true [dependencies] sp-runtime-interface = { path = "../../../substrate/primitives/runtime-interface", default-features = false } +sp-externalities = { path = "../../../substrate/primitives/externalities", default-features = false } +sp-proof-size-ext = { path = "../../../substrate/primitives/proof-size-ext", default-features = false } [features] default = ["std"] std = [ "sp-runtime-interface/std", + "sp-proof-size-ext/std", + "sp-externalities/std", ] diff --git a/cumulus/primitives/pov-reclaim/src/lib.rs b/cumulus/primitives/pov-reclaim/src/lib.rs index d48ee703a6af..44092bb15853 100644 --- a/cumulus/primitives/pov-reclaim/src/lib.rs +++ b/cumulus/primitives/pov-reclaim/src/lib.rs @@ -16,11 +16,18 @@ #![cfg_attr(not(feature = "std"), no_std)] +use sp_externalities::ExternalitiesExt; + +#[cfg(feature = "std")] +use sp_proof_size_ext::ProofSizeExt; use sp_runtime_interface::runtime_interface; #[runtime_interface] pub trait PovReclaimHostFunctions { fn current_storage_proof_size(&mut self) -> u32 { - self.proof_size().unwrap_or_default() + match self.extension::() { + Some(ext) => ext.current_storage_proof_size(), + None => 0, + } } } diff --git a/substrate/client/block-builder/Cargo.toml b/substrate/client/block-builder/Cargo.toml index ff2f9635b7a2..74c75d8e7d17 100644 --- a/substrate/client/block-builder/Cargo.toml +++ b/substrate/client/block-builder/Cargo.toml @@ -20,6 +20,7 @@ sc-client-api = { path = "../api" } sp-api = { path = "../../primitives/api" } sp-block-builder = { path = "../../primitives/block-builder" } sp-blockchain = { path = "../../primitives/blockchain" } +sp-proof-size-ext = { path = "../../primitives/proof-size-ext" } sp-core = { path = "../../primitives/core" } sp-inherents = { path = "../../primitives/inherents" } sp-runtime = { path = "../../primitives/runtime" } diff --git a/substrate/client/block-builder/src/lib.rs b/substrate/client/block-builder/src/lib.rs index 1878e7627480..e68f2e8095ad 100644 --- a/substrate/client/block-builder/src/lib.rs +++ b/substrate/client/block-builder/src/lib.rs @@ -173,6 +173,9 @@ where if record_proof.yes() { api.record_proof(); + let recorder = + api.proof_recorder().expect("We enable proof recording hte line before; qed"); + api.register_extension(sp_proof_size_ext::ProofSizeExt::new(recorder)); } api.set_call_context(CallContext::Onchain); diff --git a/substrate/client/service/Cargo.toml b/substrate/client/service/Cargo.toml index 87b341fe3123..90cee44268bd 100644 --- a/substrate/client/service/Cargo.toml +++ b/substrate/client/service/Cargo.toml @@ -47,6 +47,7 @@ sp-core = { path = "../../primitives/core" } sp-keystore = { path = "../../primitives/keystore" } sp-session = { path = "../../primitives/session" } sp-state-machine = { path = "../../primitives/state-machine" } +sp-proof-size-ext = { path = "../../primitives/proof-size-ext" } sp-consensus = { path = "../../primitives/consensus/common" } sc-consensus = { path = "../consensus/common" } sp-storage = { path = "../../primitives/storage" } diff --git a/substrate/client/service/src/client/client.rs b/substrate/client/service/src/client/client.rs index 467daa88d328..bd3333b0cf8a 100644 --- a/substrate/client/service/src/client/client.rs +++ b/substrate/client/service/src/client/client.rs @@ -867,6 +867,10 @@ where if self.config.enable_import_proof_recording { runtime_api.record_proof(); + let recorder = runtime_api + .proof_recorder() + .expect("We enable proof recording hte line before; qed"); + runtime_api.register_extension(sp_proof_size_ext::ProofSizeExt::new(recorder)); } runtime_api.execute_block( diff --git a/substrate/primitives/proof-size-ext/Cargo.toml b/substrate/primitives/proof-size-ext/Cargo.toml new file mode 100644 index 000000000000..c3c1eb199f43 --- /dev/null +++ b/substrate/primitives/proof-size-ext/Cargo.toml @@ -0,0 +1,25 @@ +[package] +name = "sp-proof-size-ext" +version = "4.0.0-dev" +authors.workspace = true +edition.workspace = true +license = "Apache-2.0" +homepage = "https://substrate.io" +repository.workspace = true +description = "A crate which contains an externalities extension to fetch the proof size." + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +sp-externalities = { path = "../externalities", default-features = false} +sp-trie = { path = "../trie", default-features = false} +sp-runtime = { path = "../runtime", default-features = false} + +[features] +default = ["std"] +std = [ + "sp-externalities/std", + "sp-trie/std", + "sp-runtime/std", +] diff --git a/substrate/primitives/proof-size-ext/src/lib.rs b/substrate/primitives/proof-size-ext/src/lib.rs new file mode 100644 index 000000000000..981b1095473a --- /dev/null +++ b/substrate/primitives/proof-size-ext/src/lib.rs @@ -0,0 +1,35 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use sp_trie::ProofSizeProvider; + +#[cfg(feature = "std")] +sp_externalities::decl_extension! { + /// The proof size extension to fetch the current storage proof size + /// in externalities. + pub struct ProofSizeExt(Box); +} + +#[cfg(feature = "std")] +impl ProofSizeExt { + pub fn new(recorder: T) -> Self { + ProofSizeExt(Box::new(recorder)) + } + + pub fn current_storage_proof_size(&self) -> u32 { + self.0.estimate_encoded_size() as u32 + } +} diff --git a/substrate/primitives/trie/src/lib.rs b/substrate/primitives/trie/src/lib.rs index ba817f0a63e4..3cd6beefdd39 100644 --- a/substrate/primitives/trie/src/lib.rs +++ b/substrate/primitives/trie/src/lib.rs @@ -156,6 +156,11 @@ pub trait TrieRecorderProvider { fn estimate_encoded_size(&self) -> usize; } +pub trait ProofSizeProvider { + fn estimate_encoded_size(&self) -> usize; +} + + /// TrieDB error over `TrieConfiguration` trait. pub type TrieError = trie_db::TrieError, CError>; /// Reexport from `hash_db`, with genericity set for `Hasher` trait. diff --git a/substrate/primitives/trie/src/recorder.rs b/substrate/primitives/trie/src/recorder.rs index 28f029f75344..2e3f71d3d338 100644 --- a/substrate/primitives/trie/src/recorder.rs +++ b/substrate/primitives/trie/src/recorder.rs @@ -228,6 +228,12 @@ impl Recorder { } } +impl crate::ProofSizeProvider for Recorder { + fn estimate_encoded_size(&self) -> usize { + self.estimate_encoded_size() + } +} + /// The [`TrieRecorder`](trie_db::TrieRecorder) implementation. pub struct TrieRecorder<'a, H: Hasher> { inner: MutexGuard<'a, RecorderInner>, From 0a30b70dd92ed74f4c33ac4d12b9db9329c27cb2 Mon Sep 17 00:00:00 2001 From: Sebastian Kunert Date: Tue, 12 Sep 2023 10:45:36 +0200 Subject: [PATCH 27/61] Use recorder in validate_block --- .../src/validate_block/implementation.rs | 107 ++++++++++-------- .../src/validate_block/trie_recorder.rs | 19 ++-- 2 files changed, 70 insertions(+), 56 deletions(-) diff --git a/cumulus/pallets/parachain-system/src/validate_block/implementation.rs b/cumulus/pallets/parachain-system/src/validate_block/implementation.rs index 971b357f2142..d221c9072ea1 100644 --- a/cumulus/pallets/parachain-system/src/validate_block/implementation.rs +++ b/cumulus/pallets/parachain-system/src/validate_block/implementation.rs @@ -27,13 +27,14 @@ use polkadot_parachain_primitives::primitives::{ }; use codec::Encode; +use environmental::environmental; use frame_support::traits::{ExecuteBlock, ExtrinsicCall, Get, IsSubType}; use sp_core::storage::{ChildInfo, StateVersion}; use sp_externalities::{set_and_run_with_externalities, Externalities}; use sp_io::KillStorageResult; use sp_runtime::traits::{Block as BlockT, Extrinsic, HashingFor, Header as HeaderT}; use sp_std::{prelude::*, sync::Arc}; -use sp_trie::{MemoryDB, TrieRecorderProvider}; +use sp_trie::{MemoryDB, ProofSizeProvider, TrieRecorderProvider}; use trie_recorder::SizeOnlyRecorderProvider; type TrieBackend = sp_state_machine::TrieBackend< @@ -44,11 +45,12 @@ type TrieBackend = sp_state_machine::TrieBackend< >; type Ext<'a, B> = sp_state_machine::Ext<'a, HashingFor, TrieBackend>; - fn with_externalities R, R>(f: F) -> R { sp_externalities::with_externalities(f).expect("Environmental externalities not set.") } +environmental!(rec: trait ProofSizeProvider); + /// Validate the given parachain block. /// /// This function is doing roughly the following: @@ -121,7 +123,7 @@ where sp_std::mem::drop(storage_proof); - let recorder = SizeOnlyRecorderProvider::new(); + let mut recorder = SizeOnlyRecorderProvider::new(); let cache_provider = trie_cache::CacheProvider::new(); // We use the storage root of the `parent_head` to ensure that it is the correct root. // This is already being done above while creating the in-memory db, but let's be paranoid!! @@ -130,7 +132,7 @@ where *parent_header.state_root(), cache_provider, ) - .with_recorder(recorder) + .with_recorder(recorder.clone()) .build(); let _guard = ( @@ -174,57 +176,61 @@ where .replace_implementation(host_current_storage_proof_size), ); - run_with_externalities::(&backend, || { - let relay_chain_proof = crate::RelayChainStateProof::new( - PSC::SelfParaId::get(), - inherent_data.validation_data.relay_parent_storage_root, - inherent_data.relay_chain_state.clone(), - ) - .expect("Invalid relay chain state proof"); - - let res = CI::check_inherents(&block, &relay_chain_proof); - - if !res.ok() { - if log::log_enabled!(log::Level::Error) { - res.into_errors().for_each(|e| { - log::error!("Checking inherent with identifier `{:?}` failed", e.0) - }); + rec::using(&mut recorder, || { + run_with_externalities::(&backend, || { + let relay_chain_proof = crate::RelayChainStateProof::new( + PSC::SelfParaId::get(), + inherent_data.validation_data.relay_parent_storage_root, + inherent_data.relay_chain_state.clone(), + ) + .expect("Invalid relay chain state proof"); + + let res = CI::check_inherents(&block, &relay_chain_proof); + + if !res.ok() { + if log::log_enabled!(log::Level::Error) { + res.into_errors().for_each(|e| { + log::error!("Checking inherent with identifier `{:?}` failed", e.0) + }); + } + + panic!("Checking inherents failed"); } - - panic!("Checking inherents failed"); - } + }) }); - run_with_externalities::(&backend, || { - let head_data = HeadData(block.header().encode()); + rec::using(&mut recorder, || { + run_with_externalities::(&backend, || { + let head_data = HeadData(block.header().encode()); - E::execute_block(block); + E::execute_block(block); - let new_validation_code = crate::NewValidationCode::::get(); - let upward_messages = crate::UpwardMessages::::get().try_into().expect( - "Number of upward messages should not be greater than `MAX_UPWARD_MESSAGE_NUM`", - ); - let processed_downward_messages = crate::ProcessedDownwardMessages::::get(); - let horizontal_messages = crate::HrmpOutboundMessages::::get().try_into().expect( + let new_validation_code = crate::NewValidationCode::::get(); + let upward_messages = crate::UpwardMessages::::get().try_into().expect( + "Number of upward messages should not be greater than `MAX_UPWARD_MESSAGE_NUM`", + ); + let processed_downward_messages = crate::ProcessedDownwardMessages::::get(); + let horizontal_messages = crate::HrmpOutboundMessages::::get().try_into().expect( "Number of horizontal messages should not be greater than `MAX_HORIZONTAL_MESSAGE_NUM`", ); - let hrmp_watermark = crate::HrmpWatermark::::get(); - - let head_data = - if let Some(custom_head_data) = crate::CustomValidationHeadData::::get() { - HeadData(custom_head_data) - } else { - head_data - }; - - ValidationResult { - head_data, - new_validation_code: new_validation_code.map(Into::into), - upward_messages, - processed_downward_messages, - horizontal_messages, - hrmp_watermark, - } + let hrmp_watermark = crate::HrmpWatermark::::get(); + + let head_data = + if let Some(custom_head_data) = crate::CustomValidationHeadData::::get() { + HeadData(custom_head_data) + } else { + head_data + }; + + ValidationResult { + head_data, + new_validation_code: new_validation_code.map(Into::into), + upward_messages, + processed_downward_messages, + horizontal_messages, + hrmp_watermark, + } + }) }) } @@ -311,7 +317,10 @@ fn host_storage_clear(key: &[u8]) { } fn host_current_storage_proof_size() -> u32 { - with_externalities(|ext| ext.proof_size()).unwrap_or_default() + rec::with(|rec| rec.estimate_encoded_size()) + .unwrap_or_default() + .try_into() + .unwrap() } fn host_storage_root(version: StateVersion) -> Vec { diff --git a/cumulus/pallets/parachain-system/src/validate_block/trie_recorder.rs b/cumulus/pallets/parachain-system/src/validate_block/trie_recorder.rs index ece7ae1016b1..2257ac6ddf58 100644 --- a/cumulus/pallets/parachain-system/src/validate_block/trie_recorder.rs +++ b/cumulus/pallets/parachain-system/src/validate_block/trie_recorder.rs @@ -24,7 +24,7 @@ use sp_std::{ collections::{btree_map::BTreeMap, btree_set::BTreeSet}, sync::Arc, }; -use sp_trie::{NodeCodec, StorageProof}; +use sp_trie::{NodeCodec, ProofSizeProvider, StorageProof}; use trie_db::{Hasher, RecordedForKey, TrieAccess}; /// A trie recorder that only keeps track of the proof size. @@ -38,13 +38,12 @@ impl<'a, H: trie_db::Hasher> trie_db::TrieRecorder for SizeOnlyRecorder< fn record(&mut self, access: TrieAccess<'_, H::Out>) { let mut encoded_size_update = 0; match access { - TrieAccess::NodeOwned { hash, node_owned } => { + TrieAccess::NodeOwned { hash, node_owned } => if !self.seen_nodes.get(&hash).is_some() { let node = node_owned.to_encoded::>(); encoded_size_update += node.encoded_size(); self.seen_nodes.insert(hash); - } - }, + }, TrieAccess::EncodedNode { hash, encoded_node } => { if !self.seen_nodes.get(&hash).is_some() { let node = encoded_node.into_owned(); @@ -85,10 +84,11 @@ impl<'a, H: trie_db::Hasher> trie_db::TrieRecorder for SizeOnlyRecorder< } } +#[derive(Clone)] pub(crate) struct SizeOnlyRecorderProvider { - seen_nodes: RefCell>, - encoded_size: RefCell, - recorded_keys: RefCell, RecordedForKey>>, + seen_nodes: Arc>>, + encoded_size: Arc>, + recorded_keys: Arc, RecordedForKey>>>, } impl SizeOnlyRecorderProvider { @@ -120,6 +120,11 @@ impl sp_trie::TrieRecorderProvider for SizeOnlyRecorderPr *self.encoded_size.borrow() } } +impl ProofSizeProvider for SizeOnlyRecorderProvider { + fn estimate_encoded_size(&self) -> usize { + *self.encoded_size.borrow() + } +} // This is safe here since we are single-threaded in WASM unsafe impl Send for SizeOnlyRecorderProvider {} From a1d7db5ff4bc2d12c2eb5ef61ecd8a79da3f3050 Mon Sep 17 00:00:00 2001 From: Sebastian Kunert Date: Tue, 12 Sep 2023 14:10:17 +0200 Subject: [PATCH 28/61] Move recorder into static mut --- .../src/validate_block/implementation.rs | 117 ++++++++++-------- .../src/validate_block/trie_recorder.rs | 15 +-- substrate/client/block-builder/src/lib.rs | 6 +- substrate/client/service/src/client/client.rs | 8 +- .../primitives/proof-size-ext/src/lib.rs | 2 - 5 files changed, 79 insertions(+), 69 deletions(-) diff --git a/cumulus/pallets/parachain-system/src/validate_block/implementation.rs b/cumulus/pallets/parachain-system/src/validate_block/implementation.rs index d221c9072ea1..6d5ac628e7bf 100644 --- a/cumulus/pallets/parachain-system/src/validate_block/implementation.rs +++ b/cumulus/pallets/parachain-system/src/validate_block/implementation.rs @@ -27,7 +27,6 @@ use polkadot_parachain_primitives::primitives::{ }; use codec::Encode; -use environmental::environmental; use frame_support::traits::{ExecuteBlock, ExtrinsicCall, Get, IsSubType}; use sp_core::storage::{ChildInfo, StateVersion}; use sp_externalities::{set_and_run_with_externalities, Externalities}; @@ -49,7 +48,9 @@ fn with_externalities R, R>(f: F) -> R { sp_externalities::with_externalities(f).expect("Environmental externalities not set.") } -environmental!(rec: trait ProofSizeProvider); +/// Recorder instance to be used during this validate_block call. +/// Static mut is fine here because we are single-threaded in WASM. +static mut RECORDER: Option> = None; /// Validate the given parachain block. /// @@ -123,7 +124,7 @@ where sp_std::mem::drop(storage_proof); - let mut recorder = SizeOnlyRecorderProvider::new(); + let recorder = SizeOnlyRecorderProvider::new(); let cache_provider = trie_cache::CacheProvider::new(); // We use the storage root of the `parent_head` to ensure that it is the correct root. // This is already being done above while creating the in-memory db, but let's be paranoid!! @@ -135,6 +136,8 @@ where .with_recorder(recorder.clone()) .build(); + set_recorder(recorder); + let _guard = ( // Replace storage calls with our own implementations sp_io::storage::host_read.replace_implementation(host_storage_read), @@ -176,61 +179,57 @@ where .replace_implementation(host_current_storage_proof_size), ); - rec::using(&mut recorder, || { - run_with_externalities::(&backend, || { - let relay_chain_proof = crate::RelayChainStateProof::new( - PSC::SelfParaId::get(), - inherent_data.validation_data.relay_parent_storage_root, - inherent_data.relay_chain_state.clone(), - ) - .expect("Invalid relay chain state proof"); - - let res = CI::check_inherents(&block, &relay_chain_proof); - - if !res.ok() { - if log::log_enabled!(log::Level::Error) { - res.into_errors().for_each(|e| { - log::error!("Checking inherent with identifier `{:?}` failed", e.0) - }); - } - - panic!("Checking inherents failed"); + run_with_externalities::(&backend, || { + let relay_chain_proof = crate::RelayChainStateProof::new( + PSC::SelfParaId::get(), + inherent_data.validation_data.relay_parent_storage_root, + inherent_data.relay_chain_state.clone(), + ) + .expect("Invalid relay chain state proof"); + + let res = CI::check_inherents(&block, &relay_chain_proof); + + if !res.ok() { + if log::log_enabled!(log::Level::Error) { + res.into_errors().for_each(|e| { + log::error!("Checking inherent with identifier `{:?}` failed", e.0) + }); } - }) + + panic!("Checking inherents failed"); + } }); - rec::using(&mut recorder, || { - run_with_externalities::(&backend, || { - let head_data = HeadData(block.header().encode()); + run_with_externalities::(&backend, || { + let head_data = HeadData(block.header().encode()); - E::execute_block(block); + E::execute_block(block); - let new_validation_code = crate::NewValidationCode::::get(); - let upward_messages = crate::UpwardMessages::::get().try_into().expect( - "Number of upward messages should not be greater than `MAX_UPWARD_MESSAGE_NUM`", - ); - let processed_downward_messages = crate::ProcessedDownwardMessages::::get(); - let horizontal_messages = crate::HrmpOutboundMessages::::get().try_into().expect( + let new_validation_code = crate::NewValidationCode::::get(); + let upward_messages = crate::UpwardMessages::::get().try_into().expect( + "Number of upward messages should not be greater than `MAX_UPWARD_MESSAGE_NUM`", + ); + let processed_downward_messages = crate::ProcessedDownwardMessages::::get(); + let horizontal_messages = crate::HrmpOutboundMessages::::get().try_into().expect( "Number of horizontal messages should not be greater than `MAX_HORIZONTAL_MESSAGE_NUM`", ); - let hrmp_watermark = crate::HrmpWatermark::::get(); - - let head_data = - if let Some(custom_head_data) = crate::CustomValidationHeadData::::get() { - HeadData(custom_head_data) - } else { - head_data - }; - - ValidationResult { - head_data, - new_validation_code: new_validation_code.map(Into::into), - upward_messages, - processed_downward_messages, - horizontal_messages, - hrmp_watermark, - } - }) + let hrmp_watermark = crate::HrmpWatermark::::get(); + + let head_data = + if let Some(custom_head_data) = crate::CustomValidationHeadData::::get() { + HeadData(custom_head_data) + } else { + head_data + }; + + ValidationResult { + head_data, + new_validation_code: new_validation_code.map(Into::into), + upward_messages, + processed_downward_messages, + horizontal_messages, + hrmp_watermark, + } }) } @@ -317,10 +316,11 @@ fn host_storage_clear(key: &[u8]) { } fn host_current_storage_proof_size() -> u32 { - rec::with(|rec| rec.estimate_encoded_size()) + get_recorder_ref() + .map(|r| r.estimate_encoded_size()) .unwrap_or_default() .try_into() - .unwrap() + .unwrap_or_default() } fn host_storage_root(version: StateVersion) -> Vec { @@ -424,3 +424,14 @@ fn host_default_child_storage_next_key(storage_key: &[u8], key: &[u8]) -> Option fn host_offchain_index_set(_key: &[u8], _value: &[u8]) {} fn host_offchain_index_clear(_key: &[u8]) {} + +fn set_recorder(recorder: SizeOnlyRecorderProvider) { + // This is safe here, there is strictly sequential access. + unsafe { + RECORDER = Some(Box::new(recorder)); + } +} + +fn get_recorder_ref() -> Option<&'static Box> { + unsafe { RECORDER.as_ref() } +} diff --git a/cumulus/pallets/parachain-system/src/validate_block/trie_recorder.rs b/cumulus/pallets/parachain-system/src/validate_block/trie_recorder.rs index 2257ac6ddf58..ce49f3065ce0 100644 --- a/cumulus/pallets/parachain-system/src/validate_block/trie_recorder.rs +++ b/cumulus/pallets/parachain-system/src/validate_block/trie_recorder.rs @@ -20,18 +20,21 @@ use codec::Encode; use sp_std::{ + alloc::rc::Rc, cell::{RefCell, RefMut}, collections::{btree_map::BTreeMap, btree_set::BTreeSet}, - sync::Arc, }; use sp_trie::{NodeCodec, ProofSizeProvider, StorageProof}; use trie_db::{Hasher, RecordedForKey, TrieAccess}; /// A trie recorder that only keeps track of the proof size. +/// +/// The internal size counting logic should align +/// with ['sp_trie::recorder::Recorder']. pub(crate) struct SizeOnlyRecorder<'a, H: Hasher> { seen_nodes: RefMut<'a, BTreeSet>, encoded_size: RefMut<'a, usize>, - recorded_keys: RefMut<'a, BTreeMap, RecordedForKey>>, + recorded_keys: RefMut<'a, BTreeMap, RecordedForKey>>, } impl<'a, H: trie_db::Hasher> trie_db::TrieRecorder for SizeOnlyRecorder<'a, H> { @@ -86,9 +89,9 @@ impl<'a, H: trie_db::Hasher> trie_db::TrieRecorder for SizeOnlyRecorder< #[derive(Clone)] pub(crate) struct SizeOnlyRecorderProvider { - seen_nodes: Arc>>, - encoded_size: Arc>, - recorded_keys: Arc, RecordedForKey>>>, + seen_nodes: Rc>>, + encoded_size: Rc>, + recorded_keys: Rc, RecordedForKey>>>, } impl SizeOnlyRecorderProvider { @@ -185,7 +188,6 @@ mod tests { #[test] fn recorder_equivalence_cache() { - sp_tracing::try_init_simple(); let (db, root, test_data) = create_trie(); let mut rng = rand::thread_rng(); @@ -241,7 +243,6 @@ mod tests { #[test] fn recorder_equivalence_no_cache() { - sp_tracing::try_init_simple(); let (db, root, test_data) = create_trie(); let mut rng = rand::thread_rng(); diff --git a/substrate/client/block-builder/src/lib.rs b/substrate/client/block-builder/src/lib.rs index e68f2e8095ad..a5552bb7a794 100644 --- a/substrate/client/block-builder/src/lib.rs +++ b/substrate/client/block-builder/src/lib.rs @@ -173,9 +173,9 @@ where if record_proof.yes() { api.record_proof(); - let recorder = - api.proof_recorder().expect("We enable proof recording hte line before; qed"); - api.register_extension(sp_proof_size_ext::ProofSizeExt::new(recorder)); + if let Some(recorder) = api.proof_recorder() { + api.register_extension(sp_proof_size_ext::ProofSizeExt::new(recorder)); + } } api.set_call_context(CallContext::Onchain); diff --git a/substrate/client/service/src/client/client.rs b/substrate/client/service/src/client/client.rs index bd3333b0cf8a..de7e725adb1c 100644 --- a/substrate/client/service/src/client/client.rs +++ b/substrate/client/service/src/client/client.rs @@ -867,10 +867,10 @@ where if self.config.enable_import_proof_recording { runtime_api.record_proof(); - let recorder = runtime_api - .proof_recorder() - .expect("We enable proof recording hte line before; qed"); - runtime_api.register_extension(sp_proof_size_ext::ProofSizeExt::new(recorder)); + if let Some(recorder) = runtime_api.proof_recorder() { + runtime_api + .register_extension(sp_proof_size_ext::ProofSizeExt::new(recorder)); + }; } runtime_api.execute_block( diff --git a/substrate/primitives/proof-size-ext/src/lib.rs b/substrate/primitives/proof-size-ext/src/lib.rs index 981b1095473a..a5e0ed5568b6 100644 --- a/substrate/primitives/proof-size-ext/src/lib.rs +++ b/substrate/primitives/proof-size-ext/src/lib.rs @@ -1,5 +1,3 @@ -// This file is part of Substrate. - // Copyright (C) Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); From 4abdc431ee7ee09ab6efd9ea30c4fffec5f09430 Mon Sep 17 00:00:00 2001 From: Sebastian Kunert Date: Tue, 12 Sep 2023 16:13:23 +0200 Subject: [PATCH 29/61] Use `Execution_Extensions` to register the extensions --- Cargo.lock | 3 +++ .../src/validate_block/trie_recorder.rs | 2 +- cumulus/polkadot-parachain/Cargo.toml | 2 +- cumulus/polkadot-parachain/src/command.rs | 3 ++- substrate/client/api/Cargo.toml | 2 ++ substrate/client/api/src/execution_extensions.rs | 11 +++++++---- substrate/client/block-builder/src/lib.rs | 3 --- substrate/client/rpc-spec-v2/Cargo.toml | 1 + .../client/rpc-spec-v2/src/chain_head/test_utils.rs | 5 +++-- substrate/client/service/src/client/call_executor.rs | 8 ++++++-- substrate/client/service/src/client/client.rs | 11 ++++++----- .../api/proc-macro/src/impl_runtime_apis.rs | 1 + substrate/primitives/api/src/lib.rs | 1 + substrate/primitives/trie/src/recorder.rs | 6 ++++++ 14 files changed, 40 insertions(+), 19 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index e20b20ee9193..48ff0caeb2f6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -14721,11 +14721,13 @@ dependencies = [ "sp-core", "sp-database", "sp-externalities", + "sp-proof-size-ext", "sp-runtime", "sp-state-machine", "sp-statement-store", "sp-storage", "sp-test-primitives", + "sp-trie", "substrate-prometheus-endpoint", "substrate-test-runtime", "thiserror", @@ -15617,6 +15619,7 @@ dependencies = [ "sp-core", "sp-maybe-compressed-blob", "sp-runtime", + "sp-trie", "sp-version", "substrate-test-runtime", "substrate-test-runtime-client", diff --git a/cumulus/pallets/parachain-system/src/validate_block/trie_recorder.rs b/cumulus/pallets/parachain-system/src/validate_block/trie_recorder.rs index ce49f3065ce0..6d10607c5b29 100644 --- a/cumulus/pallets/parachain-system/src/validate_block/trie_recorder.rs +++ b/cumulus/pallets/parachain-system/src/validate_block/trie_recorder.rs @@ -20,9 +20,9 @@ use codec::Encode; use sp_std::{ - alloc::rc::Rc, cell::{RefCell, RefMut}, collections::{btree_map::BTreeMap, btree_set::BTreeSet}, + rc::Rc, }; use sp_trie::{NodeCodec, ProofSizeProvider, StorageProof}; use trie_db::{Hasher, RecordedForKey, TrieAccess}; diff --git a/cumulus/polkadot-parachain/Cargo.toml b/cumulus/polkadot-parachain/Cargo.toml index ce35a59da583..a6f8a1bbbc7b 100644 --- a/cumulus/polkadot-parachain/Cargo.toml +++ b/cumulus/polkadot-parachain/Cargo.toml @@ -81,7 +81,6 @@ xcm = { package = "staging-xcm", path = "../../polkadot/xcm" } # Cumulus cumulus-client-cli = { path = "../client/cli" } -cumulus-primitives-reclaim = { path = "../primitives/pov-reclaim" } cumulus-client-collator = { path = "../client/collator" } cumulus-client-consensus-aura = { path = "../client/consensus/aura" } cumulus-client-consensus-relay-chain = { path = "../client/consensus/relay-chain" } @@ -90,6 +89,7 @@ cumulus-client-consensus-proposer = { path = "../client/consensus/proposer" } cumulus-client-service = { path = "../client/service" } cumulus-primitives-core = { path = "../primitives/core" } cumulus-primitives-parachain-inherent = { path = "../primitives/parachain-inherent" } +cumulus-primitives-reclaim = { path = "../primitives/pov-reclaim" } cumulus-relay-chain-interface = { path = "../client/relay-chain-interface" } color-print = "0.3.4" diff --git a/cumulus/polkadot-parachain/src/command.rs b/cumulus/polkadot-parachain/src/command.rs index 596b7baf6710..97630db41cff 100644 --- a/cumulus/polkadot-parachain/src/command.rs +++ b/cumulus/polkadot-parachain/src/command.rs @@ -20,6 +20,7 @@ use crate::{ service::{new_partial, Block}, }; use cumulus_primitives_core::ParaId; +use cumulus_primitives_reclaim::pov_reclaim_host_functions::HostFunctions as ReclaimHostFunctions; use frame_benchmarking_cli::{BenchmarkCmd, SUBSTRATE_REFERENCE_HARDWARE}; use log::info; use parachains_common::{AssetHubPolkadotAuraId, AuraId}; @@ -697,7 +698,7 @@ pub fn run() -> Result<()> { match cmd { BenchmarkCmd::Pallet(cmd) => if cfg!(feature = "runtime-benchmarks") { - runner.sync_run(|config| cmd.run::(config)) + runner.sync_run(|config| cmd.run::(config)) } else { Err("Benchmarking wasn't enabled when building the node. \ You can enable it with `--features runtime-benchmarks`." diff --git a/substrate/client/api/Cargo.toml b/substrate/client/api/Cargo.toml index b59149424ed3..ef39e5067c9a 100644 --- a/substrate/client/api/Cargo.toml +++ b/substrate/client/api/Cargo.toml @@ -32,6 +32,8 @@ sp-core = { path = "../../primitives/core", default-features = false} sp-database = { path = "../../primitives/database" } sp-externalities = { path = "../../primitives/externalities" } sp-runtime = { path = "../../primitives/runtime", default-features = false} +sp-proof-size-ext = { path = "../../primitives/proof-size-ext", default-features = false} +sp-trie = { path = "../../primitives/trie", default-features = false} sp-state-machine = { path = "../../primitives/state-machine" } sp-statement-store = { path = "../../primitives/statement-store" } sp-storage = { path = "../../primitives/storage" } diff --git a/substrate/client/api/src/execution_extensions.rs b/substrate/client/api/src/execution_extensions.rs index 6f927105df0b..b385d5f83145 100644 --- a/substrate/client/api/src/execution_extensions.rs +++ b/substrate/client/api/src/execution_extensions.rs @@ -25,7 +25,8 @@ use parking_lot::RwLock; use sp_core::traits::{ReadRuntimeVersion, ReadRuntimeVersionExt}; use sp_externalities::{Extension, Extensions}; -use sp_runtime::traits::{Block as BlockT, NumberFor}; +use sp_runtime::traits::{Block as BlockT, HashingFor, NumberFor}; +use sp_trie::recorder::Recorder; use std::{marker::PhantomData, sync::Arc}; /// Generate the starting set of [`Extensions`]. @@ -91,7 +92,6 @@ impl ExtensionsFactory /// /// This crate aggregates extensions available for the offchain calls /// and is responsible for producing a correct `Extensions` object. -/// for each call, based on required `Capabilities`. pub struct ExecutionExtensions { extensions_factory: RwLock>>, read_runtime_version: Arc, @@ -116,17 +116,20 @@ impl ExecutionExtensions { *self.extensions_factory.write() = Box::new(maker); } - /// Based on the execution context and capabilities it produces - /// the extensions object to support desired set of APIs. + /// Produces default extensions based on the input parameters. pub fn extensions( &self, block_hash: Block::Hash, block_number: NumberFor, + proof_recorder: Option<&Recorder>>, ) -> Extensions { let mut extensions = self.extensions_factory.read().extensions_for(block_hash, block_number); extensions.register(ReadRuntimeVersionExt::new(self.read_runtime_version.clone())); + if let Some(recorder) = proof_recorder { + extensions.register(sp_proof_size_ext::ProofSizeExt::new(recorder.clone())); + }; extensions } diff --git a/substrate/client/block-builder/src/lib.rs b/substrate/client/block-builder/src/lib.rs index a5552bb7a794..1878e7627480 100644 --- a/substrate/client/block-builder/src/lib.rs +++ b/substrate/client/block-builder/src/lib.rs @@ -173,9 +173,6 @@ where if record_proof.yes() { api.record_proof(); - if let Some(recorder) = api.proof_recorder() { - api.register_extension(sp_proof_size_ext::ProofSizeExt::new(recorder)); - } } api.set_call_context(CallContext::Onchain); diff --git a/substrate/client/rpc-spec-v2/Cargo.toml b/substrate/client/rpc-spec-v2/Cargo.toml index c93006753afb..63cdd71908e2 100644 --- a/substrate/client/rpc-spec-v2/Cargo.toml +++ b/substrate/client/rpc-spec-v2/Cargo.toml @@ -46,5 +46,6 @@ sp-consensus = { path = "../../primitives/consensus/common" } sp-maybe-compressed-blob = { path = "../../primitives/maybe-compressed-blob" } sc-block-builder = { path = "../block-builder" } sc-service = { path = "../service", features = ["test-helpers"]} +sp-trie = { path = "../../primitives/trie" } assert_matches = "1.3.0" pretty_assertions = "1.2.1" diff --git a/substrate/client/rpc-spec-v2/src/chain_head/test_utils.rs b/substrate/client/rpc-spec-v2/src/chain_head/test_utils.rs index 6e92e87608b4..8dcbfca1038d 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/test_utils.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/test_utils.rs @@ -29,7 +29,7 @@ use sp_blockchain::{BlockStatus, CachedHeaderMetadata, HeaderBackend, HeaderMeta use sp_consensus::BlockOrigin; use sp_runtime::{ generic::SignedBlock, - traits::{Block as BlockT, Header as HeaderT}, + traits::{Block as BlockT, HashingFor, Header as HeaderT}, Justifications, }; use std::sync::Arc; @@ -218,9 +218,10 @@ impl> CallApiAt for ChainHeadMock fn initialize_extensions( &self, at: ::Hash, + recorder: Option<&sp_trie::recorder::Recorder>>, extensions: &mut sp_api::Extensions, ) -> Result<(), sp_api::ApiError> { - self.client.initialize_extensions(at, extensions) + self.client.initialize_extensions(at, recorder, extensions) } } diff --git a/substrate/client/service/src/client/call_executor.rs b/substrate/client/service/src/client/call_executor.rs index 86b5c7c61fcd..7a5d859a9631 100644 --- a/substrate/client/service/src/client/call_executor.rs +++ b/substrate/client/service/src/client/call_executor.rs @@ -177,7 +177,7 @@ where let runtime_code = self.check_override(runtime_code, &state, at_hash)?.0; - let mut extensions = self.execution_extensions.extensions(at_hash, at_number); + let mut extensions = self.execution_extensions.extensions(at_hash, at_number, None); let mut sm = StateMachine::new( &state, @@ -290,7 +290,11 @@ where method, call_data, &runtime_code, - &mut self.execution_extensions.extensions(at_hash, at_number), + &mut self.execution_extensions.extensions( + at_hash, + at_number, + None::<&sp_trie::recorder::Recorder>>, + ), ) .map_err(Into::into) } diff --git a/substrate/client/service/src/client/client.rs b/substrate/client/service/src/client/client.rs index de7e725adb1c..9ac47b707f14 100644 --- a/substrate/client/service/src/client/client.rs +++ b/substrate/client/service/src/client/client.rs @@ -867,10 +867,6 @@ where if self.config.enable_import_proof_recording { runtime_api.record_proof(); - if let Some(recorder) = runtime_api.proof_recorder() { - runtime_api - .register_extension(sp_proof_size_ext::ProofSizeExt::new(recorder)); - }; } runtime_api.execute_block( @@ -1733,11 +1729,16 @@ where fn initialize_extensions( &self, at: Block::Hash, + recorder: Option<&sp_trie::recorder::Recorder>>, extensions: &mut sp_externalities::Extensions, ) -> Result<(), sp_api::ApiError> { let block_number = self.expect_block_number_from_id(&BlockId::Hash(at))?; - extensions.merge(self.executor.execution_extensions().extensions(at, block_number)); + extensions.merge(self.executor.execution_extensions().extensions( + at, + block_number, + recorder, + )); Ok(()) } diff --git a/substrate/primitives/api/proc-macro/src/impl_runtime_apis.rs b/substrate/primitives/api/proc-macro/src/impl_runtime_apis.rs index 1167e9ac3783..ef124d2530d1 100644 --- a/substrate/primitives/api/proc-macro/src/impl_runtime_apis.rs +++ b/substrate/primitives/api/proc-macro/src/impl_runtime_apis.rs @@ -584,6 +584,7 @@ impl<'a> ApiRuntimeImplToApiRuntimeApiImpl<'a> { #crate_::CallApiAt::<__SrApiBlock__>::initialize_extensions( self.call, at, + self.recorder.as_ref(), &mut std::cell::RefCell::borrow_mut(&self.extensions), )?; diff --git a/substrate/primitives/api/src/lib.rs b/substrate/primitives/api/src/lib.rs index c3f80acf09ae..4e4be4cf6466 100644 --- a/substrate/primitives/api/src/lib.rs +++ b/substrate/primitives/api/src/lib.rs @@ -661,6 +661,7 @@ pub trait CallApiAt { fn initialize_extensions( &self, at: Block::Hash, + recorder: Option<&sp_trie::recorder::Recorder>>, extensions: &mut Extensions, ) -> Result<(), ApiError>; } diff --git a/substrate/primitives/trie/src/recorder.rs b/substrate/primitives/trie/src/recorder.rs index 2e3f71d3d338..bd3645290e4e 100644 --- a/substrate/primitives/trie/src/recorder.rs +++ b/substrate/primitives/trie/src/recorder.rs @@ -234,6 +234,12 @@ impl crate::ProofSizeProvider for Recorder { } } +impl crate::ProofSizeProvider for &Recorder { + fn estimate_encoded_size(&self) -> usize { + (**self).estimate_encoded_size() + } +} + /// The [`TrieRecorder`](trie_db::TrieRecorder) implementation. pub struct TrieRecorder<'a, H: Hasher> { inner: MutexGuard<'a, RecorderInner>, From 77fbed64a09331adc2ac41e5a00871f7e50f8a5f Mon Sep 17 00:00:00 2001 From: Sebastian Kunert Date: Tue, 12 Sep 2023 17:51:00 +0200 Subject: [PATCH 30/61] Unchange `proof_size` --- .../src/validate_block/trie_recorder.rs | 3 --- polkadot/node/core/pvf/common/src/executor_intf.rs | 4 ---- substrate/primitives/externalities/src/lib.rs | 14 ++++++++++---- substrate/primitives/state-machine/src/basic.rs | 4 ---- .../primitives/state-machine/src/read_only.rs | 4 ---- .../primitives/state-machine/src/trie_backend.rs | 11 ++--------- substrate/primitives/trie/src/lib.rs | 1 - 7 files changed, 12 insertions(+), 29 deletions(-) diff --git a/cumulus/pallets/parachain-system/src/validate_block/trie_recorder.rs b/cumulus/pallets/parachain-system/src/validate_block/trie_recorder.rs index 6d10607c5b29..9f4f883e1d1c 100644 --- a/cumulus/pallets/parachain-system/src/validate_block/trie_recorder.rs +++ b/cumulus/pallets/parachain-system/src/validate_block/trie_recorder.rs @@ -14,9 +14,6 @@ // You should have received a copy of the GNU General Public License // along with Cumulus. If not, see . -//! The actual implementation of the validate block functionality. -#![cfg_attr(not(feature = "std"), no_std)] - use codec::Encode; use sp_std::{ diff --git a/polkadot/node/core/pvf/common/src/executor_intf.rs b/polkadot/node/core/pvf/common/src/executor_intf.rs index 3bb23d038093..79839149ebdf 100644 --- a/polkadot/node/core/pvf/common/src/executor_intf.rs +++ b/polkadot/node/core/pvf/common/src/executor_intf.rs @@ -322,10 +322,6 @@ impl sp_externalities::Externalities for ValidationExternalities { fn get_read_and_written_keys(&self) -> Vec<(Vec, u32, u32, bool)> { panic!("get_read_and_written_keys: unsupported feature for parachain validation") } - - fn proof_size(&self) -> Option { - panic!("proof_size: unsupported feature for parachain validation") - } } impl sp_externalities::ExtensionStore for ValidationExternalities { diff --git a/substrate/primitives/externalities/src/lib.rs b/substrate/primitives/externalities/src/lib.rs index a86d4e68a8ef..411ec97a6b82 100644 --- a/substrate/primitives/externalities/src/lib.rs +++ b/substrate/primitives/externalities/src/lib.rs @@ -239,10 +239,6 @@ pub trait Externalities: ExtensionStore { /// no transaction is open that can be closed. fn storage_commit_transaction(&mut self) -> Result<(), ()>; - /// Returns estimated proof size for the state queries so far. - /// Proof is reset on commit and wipe. - fn proof_size(&self) -> Option; - /// Index specified transaction slice and store it. fn storage_index_transaction(&mut self, _index: u32, _hash: &[u8], _size: u32) { unimplemented!("storage_index_transaction"); @@ -297,6 +293,16 @@ pub trait Externalities: ExtensionStore { /// Adds new storage keys to the DB tracking whitelist. fn set_whitelist(&mut self, new: Vec); + /// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + /// Benchmarking related functionality and shouldn't be used anywhere else! + /// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + /// + /// Returns estimated proof size for the state queries so far. + /// Proof is reset on commit and wipe. + fn proof_size(&self) -> Option { + None + } + /// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! /// Benchmarking related functionality and shouldn't be used anywhere else! /// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! diff --git a/substrate/primitives/state-machine/src/basic.rs b/substrate/primitives/state-machine/src/basic.rs index f30df99876f5..ace88aee2628 100644 --- a/substrate/primitives/state-machine/src/basic.rs +++ b/substrate/primitives/state-machine/src/basic.rs @@ -307,10 +307,6 @@ impl Externalities for BasicExternalities { fn commit(&mut self) {} - fn proof_size(&self) -> Option { - None - } - fn read_write_count(&self) -> (u32, u32, u32, u32) { unimplemented!("read_write_count is not supported in Basic") } diff --git a/substrate/primitives/state-machine/src/read_only.rs b/substrate/primitives/state-machine/src/read_only.rs index 9fbfb8411452..2056bf986635 100644 --- a/substrate/primitives/state-machine/src/read_only.rs +++ b/substrate/primitives/state-machine/src/read_only.rs @@ -218,10 +218,6 @@ where fn get_read_and_written_keys(&self) -> Vec<(Vec, u32, u32, bool)> { unimplemented!("get_read_and_written_keys is not supported in ReadOnlyExternalities") } - - fn proof_size(&self) -> Option { - self.backend.proof_size() - } } impl<'a, H: Hasher, B: 'a + Backend> sp_externalities::ExtensionStore diff --git a/substrate/primitives/state-machine/src/trie_backend.rs b/substrate/primitives/state-machine/src/trie_backend.rs index 60ddb5327a63..fad0bd228ae8 100644 --- a/substrate/primitives/state-machine/src/trie_backend.rs +++ b/substrate/primitives/state-machine/src/trie_backend.rs @@ -517,13 +517,6 @@ where fn wipe(&self) -> Result<(), Self::Error> { Ok(()) } - - fn proof_size(&self) -> Option { - self.essence - .recorder - .as_ref() - .map(|rec| rec.estimate_encoded_size().try_into().unwrap_or(0)) - } } #[cfg(feature = "std")] @@ -1000,8 +993,8 @@ pub mod tests { .storage_root(iter::once((&b"new-key"[..], Some(&b"new-value"[..]))), state_version); assert!(!tx.drain().is_empty()); assert!( - new_root - != test_trie(state_version, None, None) + new_root != + test_trie(state_version, None, None) .storage_root(iter::empty(), state_version) .0 ); diff --git a/substrate/primitives/trie/src/lib.rs b/substrate/primitives/trie/src/lib.rs index 3cd6beefdd39..940e91d14f47 100644 --- a/substrate/primitives/trie/src/lib.rs +++ b/substrate/primitives/trie/src/lib.rs @@ -160,7 +160,6 @@ pub trait ProofSizeProvider { fn estimate_encoded_size(&self) -> usize; } - /// TrieDB error over `TrieConfiguration` trait. pub type TrieError = trie_db::TrieError, CError>; /// Reexport from `hash_db`, with genericity set for `Hasher` trait. From 7057770503407ad5db40ed7f678fc1d7bd610f99 Mon Sep 17 00:00:00 2001 From: Sebastian Kunert Date: Tue, 12 Sep 2023 18:25:38 +0200 Subject: [PATCH 31/61] Zepter & CI --- .../runtimes/assets/asset-hub-kusama/Cargo.toml | 2 +- cumulus/primitives/pov-reclaim/Cargo.toml | 10 +++------- substrate/client/api/Cargo.toml | 2 +- substrate/primitives/externalities/Cargo.toml | 9 ++------- substrate/primitives/proof-size-ext/Cargo.toml | 14 +++----------- substrate/primitives/proof-size-ext/src/lib.rs | 3 --- 6 files changed, 10 insertions(+), 30 deletions(-) diff --git a/cumulus/parachains/runtimes/assets/asset-hub-kusama/Cargo.toml b/cumulus/parachains/runtimes/assets/asset-hub-kusama/Cargo.toml index f90d32e6c168..95bea2984ff3 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-kusama/Cargo.toml +++ b/cumulus/parachains/runtimes/assets/asset-hub-kusama/Cargo.toml @@ -167,8 +167,8 @@ std = [ "cumulus-pallet-xcm/std", "cumulus-pallet-xcmp-queue/std", "cumulus-primitives-core/std", - "cumulus-primitives-utility/std", "cumulus-primitives-reclaim/std", + "cumulus-primitives-utility/std", "frame-benchmarking?/std", "frame-executive/std", "frame-support/std", diff --git a/cumulus/primitives/pov-reclaim/Cargo.toml b/cumulus/primitives/pov-reclaim/Cargo.toml index 79b856d8f7df..bf8461e6c49a 100644 --- a/cumulus/primitives/pov-reclaim/Cargo.toml +++ b/cumulus/primitives/pov-reclaim/Cargo.toml @@ -7,12 +7,8 @@ edition.workspace = true [dependencies] sp-runtime-interface = { path = "../../../substrate/primitives/runtime-interface", default-features = false } sp-externalities = { path = "../../../substrate/primitives/externalities", default-features = false } -sp-proof-size-ext = { path = "../../../substrate/primitives/proof-size-ext", default-features = false } +sp-proof-size-ext = { path = "../../../substrate/primitives/proof-size-ext" } [features] -default = ["std"] -std = [ - "sp-runtime-interface/std", - "sp-proof-size-ext/std", - "sp-externalities/std", -] +default = [ "std" ] +std = [ "sp-externalities/std", "sp-runtime-interface/std" ] diff --git a/substrate/client/api/Cargo.toml b/substrate/client/api/Cargo.toml index ef39e5067c9a..140ca3040ab0 100644 --- a/substrate/client/api/Cargo.toml +++ b/substrate/client/api/Cargo.toml @@ -32,7 +32,7 @@ sp-core = { path = "../../primitives/core", default-features = false} sp-database = { path = "../../primitives/database" } sp-externalities = { path = "../../primitives/externalities" } sp-runtime = { path = "../../primitives/runtime", default-features = false} -sp-proof-size-ext = { path = "../../primitives/proof-size-ext", default-features = false} +sp-proof-size-ext = { path = "../../primitives/proof-size-ext" } sp-trie = { path = "../../primitives/trie", default-features = false} sp-state-machine = { path = "../../primitives/state-machine" } sp-statement-store = { path = "../../primitives/statement-store" } diff --git a/substrate/primitives/externalities/Cargo.toml b/substrate/primitives/externalities/Cargo.toml index 18dbbb17ced4..417eb363867b 100644 --- a/substrate/primitives/externalities/Cargo.toml +++ b/substrate/primitives/externalities/Cargo.toml @@ -20,10 +20,5 @@ sp-std = { path = "../std", default-features = false} sp-storage = { path = "../storage", default-features = false} [features] -default = ["std"] -std = [ - "codec/std", - "environmental/std", - "sp-std/std", - "sp-storage/std", -] +default = [ "std" ] +std = [ "codec/std", "environmental/std", "sp-std/std", "sp-storage/std" ] diff --git a/substrate/primitives/proof-size-ext/Cargo.toml b/substrate/primitives/proof-size-ext/Cargo.toml index c3c1eb199f43..f8239d387182 100644 --- a/substrate/primitives/proof-size-ext/Cargo.toml +++ b/substrate/primitives/proof-size-ext/Cargo.toml @@ -12,14 +12,6 @@ description = "A crate which contains an externalities extension to fetch the pr targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-externalities = { path = "../externalities", default-features = false} -sp-trie = { path = "../trie", default-features = false} -sp-runtime = { path = "../runtime", default-features = false} - -[features] -default = ["std"] -std = [ - "sp-externalities/std", - "sp-trie/std", - "sp-runtime/std", -] +sp-externalities = { path = "../externalities" } +sp-trie = { path = "../trie" } +sp-runtime = { path = "../runtime" } diff --git a/substrate/primitives/proof-size-ext/src/lib.rs b/substrate/primitives/proof-size-ext/src/lib.rs index a5e0ed5568b6..32edf7819c52 100644 --- a/substrate/primitives/proof-size-ext/src/lib.rs +++ b/substrate/primitives/proof-size-ext/src/lib.rs @@ -11,17 +11,14 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. - use sp_trie::ProofSizeProvider; -#[cfg(feature = "std")] sp_externalities::decl_extension! { /// The proof size extension to fetch the current storage proof size /// in externalities. pub struct ProofSizeExt(Box); } -#[cfg(feature = "std")] impl ProofSizeExt { pub fn new(recorder: T) -> Self { ProofSizeExt(Box::new(recorder)) From 9e195ceccbd3b95843de976e4f4ed78a5b5e2475 Mon Sep 17 00:00:00 2001 From: Sebastian Kunert Date: Tue, 12 Sep 2023 22:31:48 +0200 Subject: [PATCH 32/61] CI --- cumulus/primitives/pov-reclaim/Cargo.toml | 8 ++++++-- cumulus/primitives/pov-reclaim/src/lib.rs | 1 + substrate/primitives/trie/src/recorder.rs | 4 ++-- 3 files changed, 9 insertions(+), 4 deletions(-) diff --git a/cumulus/primitives/pov-reclaim/Cargo.toml b/cumulus/primitives/pov-reclaim/Cargo.toml index bf8461e6c49a..02f50de4bbf6 100644 --- a/cumulus/primitives/pov-reclaim/Cargo.toml +++ b/cumulus/primitives/pov-reclaim/Cargo.toml @@ -7,8 +7,12 @@ edition.workspace = true [dependencies] sp-runtime-interface = { path = "../../../substrate/primitives/runtime-interface", default-features = false } sp-externalities = { path = "../../../substrate/primitives/externalities", default-features = false } -sp-proof-size-ext = { path = "../../../substrate/primitives/proof-size-ext" } +sp-proof-size-ext = { path = "../../../substrate/primitives/proof-size-ext", optional = true } [features] default = [ "std" ] -std = [ "sp-externalities/std", "sp-runtime-interface/std" ] +std = [ + "sp-externalities/std", + "sp-proof-size-ext", + "sp-runtime-interface/std", +] diff --git a/cumulus/primitives/pov-reclaim/src/lib.rs b/cumulus/primitives/pov-reclaim/src/lib.rs index 44092bb15853..d4820e623f83 100644 --- a/cumulus/primitives/pov-reclaim/src/lib.rs +++ b/cumulus/primitives/pov-reclaim/src/lib.rs @@ -20,6 +20,7 @@ use sp_externalities::ExternalitiesExt; #[cfg(feature = "std")] use sp_proof_size_ext::ProofSizeExt; + use sp_runtime_interface::runtime_interface; #[runtime_interface] diff --git a/substrate/primitives/trie/src/recorder.rs b/substrate/primitives/trie/src/recorder.rs index bd3645290e4e..e3e31e7f296c 100644 --- a/substrate/primitives/trie/src/recorder.rs +++ b/substrate/primitives/trie/src/recorder.rs @@ -230,13 +230,13 @@ impl Recorder { impl crate::ProofSizeProvider for Recorder { fn estimate_encoded_size(&self) -> usize { - self.estimate_encoded_size() + Recorder::estimate_encoded_size(self) } } impl crate::ProofSizeProvider for &Recorder { fn estimate_encoded_size(&self) -> usize { - (**self).estimate_encoded_size() + Recorder::estimate_encoded_size(*self) } } From bd2219fb5b7ab761802e83ad37b5987635b3b796 Mon Sep 17 00:00:00 2001 From: Sebastian Kunert Date: Thu, 14 Sep 2023 12:55:02 +0200 Subject: [PATCH 33/61] Add some test for the host function --- Cargo.lock | 4 ++ cumulus/primitives/pov-reclaim/Cargo.toml | 6 +++ cumulus/primitives/pov-reclaim/src/lib.rs | 63 +++++++++++++++++++++++ 3 files changed, 73 insertions(+) diff --git a/Cargo.lock b/Cargo.lock index ac8a0454db62..11f156d7148f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3718,9 +3718,13 @@ dependencies = [ name = "cumulus-primitives-reclaim" version = "0.1.0" dependencies = [ + "sp-core", "sp-externalities", + "sp-io", "sp-proof-size-ext", "sp-runtime-interface", + "sp-state-machine", + "sp-trie", ] [[package]] diff --git a/cumulus/primitives/pov-reclaim/Cargo.toml b/cumulus/primitives/pov-reclaim/Cargo.toml index 02f50de4bbf6..51f6009c30e5 100644 --- a/cumulus/primitives/pov-reclaim/Cargo.toml +++ b/cumulus/primitives/pov-reclaim/Cargo.toml @@ -9,6 +9,12 @@ sp-runtime-interface = { path = "../../../substrate/primitives/runtime-interface sp-externalities = { path = "../../../substrate/primitives/externalities", default-features = false } sp-proof-size-ext = { path = "../../../substrate/primitives/proof-size-ext", optional = true } +[dev-dependencies] +sp-state-machine = { path = "../../../substrate/primitives/state-machine" } +sp-trie = { path = "../../../substrate/primitives/trie" } +sp-core = { path = "../../../substrate/primitives/core" } +sp-io = { path = "../../../substrate/primitives/io" } + [features] default = [ "std" ] std = [ diff --git a/cumulus/primitives/pov-reclaim/src/lib.rs b/cumulus/primitives/pov-reclaim/src/lib.rs index d4820e623f83..a4b41c8a037b 100644 --- a/cumulus/primitives/pov-reclaim/src/lib.rs +++ b/cumulus/primitives/pov-reclaim/src/lib.rs @@ -32,3 +32,66 @@ pub trait PovReclaimHostFunctions { } } } + +#[cfg(test)] +mod tests { + use sp_core::Blake2Hasher; + use sp_proof_size_ext::ProofSizeExt; + use sp_state_machine::TestExternalities; + use sp_trie::{recorder::Recorder, LayoutV1, PrefixedMemoryDB, TrieDBMutBuilder, TrieMut}; + + use crate::pov_reclaim_host_functions; + + const TEST_DATA: &[(&[u8], &[u8])] = &[(b"key1", &[1; 64]), (b"key2", &[2; 64])]; + + type TestLayout = LayoutV1; + + fn get_prepared_test_externalities() -> (TestExternalities, Recorder) + { + let mut db = PrefixedMemoryDB::default(); + let mut root = Default::default(); + + { + let mut trie = TrieDBMutBuilder::::new(&mut db, &mut root).build(); + for (k, v) in TEST_DATA { + trie.insert(k, v).expect("Inserts data"); + } + } + + let recorder: sp_trie::recorder::Recorder = Default::default(); + let trie_backend = sp_state_machine::TrieBackendBuilder::new(db, root) + .with_recorder(recorder.clone()) + .build(); + + let mut ext: TestExternalities = TestExternalities::default(); + ext.backend = trie_backend; + (ext, recorder) + } + + #[test] + fn host_function_returns_size_from_recorder() { + let (mut ext, recorder) = get_prepared_test_externalities(); + ext.register_extension(ProofSizeExt::new(recorder)); + + ext.execute_with(|| { + assert_eq!(pov_reclaim_host_functions::current_storage_proof_size(), 0); + sp_io::storage::get(b"key1"); + assert_eq!(pov_reclaim_host_functions::current_storage_proof_size(), 175); + sp_io::storage::get(b"key2"); + assert_eq!(pov_reclaim_host_functions::current_storage_proof_size(), 275); + }); + } + + #[test] + fn host_function_returns_zero_without_extension() { + let (mut ext, _) = get_prepared_test_externalities(); + + ext.execute_with(|| { + assert_eq!(pov_reclaim_host_functions::current_storage_proof_size(), 0); + sp_io::storage::get(b"key1"); + assert_eq!(pov_reclaim_host_functions::current_storage_proof_size(), 0); + sp_io::storage::get(b"key2"); + assert_eq!(pov_reclaim_host_functions::current_storage_proof_size(), 0); + }); + } +} From b51bf3af197f699e11867e99c75e0e47ced7a19f Mon Sep 17 00:00:00 2001 From: Sebastian Kunert Date: Mon, 18 Sep 2023 17:56:15 +0200 Subject: [PATCH 34/61] Adjust recorder to match new trie-db version --- .../parachain-system/src/validate_block/trie_recorder.rs | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/cumulus/pallets/parachain-system/src/validate_block/trie_recorder.rs b/cumulus/pallets/parachain-system/src/validate_block/trie_recorder.rs index 9f4f883e1d1c..4ab2d90b92e1 100644 --- a/cumulus/pallets/parachain-system/src/validate_block/trie_recorder.rs +++ b/cumulus/pallets/parachain-system/src/validate_block/trie_recorder.rs @@ -74,6 +74,12 @@ impl<'a, H: trie_db::Hasher> trie_db::TrieRecorder for SizeOnlyRecorder< .and_modify(|e| *e = RecordedForKey::Value) .or_insert_with(|| RecordedForKey::Value); }, + TrieAccess::InlineValue { full_key } => { + self.recorded_keys + .entry(full_key.into()) + .and_modify(|e| *e = RecordedForKey::Value) + .or_insert_with(|| RecordedForKey::Value); + }, }; *self.encoded_size += encoded_size_update; From 480d9901c6e02abe86ecf8e1848b7c25004a71d6 Mon Sep 17 00:00:00 2001 From: Sebastian Kunert Date: Tue, 19 Sep 2023 13:15:13 +0200 Subject: [PATCH 35/61] Apply suggestions from code review Co-authored-by: Davide Galassi --- substrate/client/block-builder/Cargo.toml | 1 - substrate/client/service/src/client/call_executor.rs | 2 +- substrate/primitives/state-machine/src/trie_backend.rs | 2 +- 3 files changed, 2 insertions(+), 3 deletions(-) diff --git a/substrate/client/block-builder/Cargo.toml b/substrate/client/block-builder/Cargo.toml index 74c75d8e7d17..ff2f9635b7a2 100644 --- a/substrate/client/block-builder/Cargo.toml +++ b/substrate/client/block-builder/Cargo.toml @@ -20,7 +20,6 @@ sc-client-api = { path = "../api" } sp-api = { path = "../../primitives/api" } sp-block-builder = { path = "../../primitives/block-builder" } sp-blockchain = { path = "../../primitives/blockchain" } -sp-proof-size-ext = { path = "../../primitives/proof-size-ext" } sp-core = { path = "../../primitives/core" } sp-inherents = { path = "../../primitives/inherents" } sp-runtime = { path = "../../primitives/runtime" } diff --git a/substrate/client/service/src/client/call_executor.rs b/substrate/client/service/src/client/call_executor.rs index 7a5d859a9631..414035b017eb 100644 --- a/substrate/client/service/src/client/call_executor.rs +++ b/substrate/client/service/src/client/call_executor.rs @@ -293,7 +293,7 @@ where &mut self.execution_extensions.extensions( at_hash, at_number, - None::<&sp_trie::recorder::Recorder>>, + None, ), ) .map_err(Into::into) diff --git a/substrate/primitives/state-machine/src/trie_backend.rs b/substrate/primitives/state-machine/src/trie_backend.rs index fad0bd228ae8..bfc756c5e81f 100644 --- a/substrate/primitives/state-machine/src/trie_backend.rs +++ b/substrate/primitives/state-machine/src/trie_backend.rs @@ -158,7 +158,7 @@ pub struct UnimplementedRecorderProvider { // replacement for the [`sp_trie::recorder::Recorder`] in no-std contexts. _phantom: core::marker::PhantomData, // Statically prevents construction. - _infallible: core::convert::Infallible, + _void: sp_core::Void, } #[cfg(not(feature = "std"))] From bb5ecb104fc77a91f46db983cb95267bbe41f900 Mon Sep 17 00:00:00 2001 From: Sebastian Kunert Date: Tue, 19 Sep 2023 11:55:04 +0200 Subject: [PATCH 36/61] Review comments --- Cargo.lock | 13 ++++++------- cumulus/pallets/parachain-system/Cargo.toml | 4 ++-- .../src/validate_block/implementation.rs | 4 +++- .../src/validate_block/trie_recorder.rs | 10 ++++++++++ cumulus/parachain-template/node/Cargo.toml | 2 +- cumulus/parachain-template/node/src/service.rs | 2 +- .../runtimes/assets/asset-hub-kusama/Cargo.toml | 2 -- cumulus/polkadot-parachain/Cargo.toml | 2 +- cumulus/polkadot-parachain/src/command.rs | 2 +- cumulus/polkadot-parachain/src/service.rs | 4 ++-- cumulus/primitives/pov-reclaim/Cargo.toml | 2 +- cumulus/test/client/Cargo.toml | 2 +- cumulus/test/client/src/lib.rs | 4 ++-- cumulus/test/service/Cargo.toml | 2 +- cumulus/test/service/src/lib.rs | 2 +- .../client/service/src/client/call_executor.rs | 6 +----- .../primitives/state-machine/src/trie_backend.rs | 6 ++++-- substrate/primitives/trie/src/lib.rs | 12 ++++++++++++ 18 files changed, 50 insertions(+), 31 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 98600737e120..c01965f47a8d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -729,7 +729,6 @@ dependencies = [ "cumulus-pallet-xcm", "cumulus-pallet-xcmp-queue", "cumulus-primitives-core", - "cumulus-primitives-reclaim", "cumulus-primitives-utility", "frame-benchmarking", "frame-executive", @@ -3540,7 +3539,7 @@ dependencies = [ "cumulus-pallet-parachain-system-proc-macro", "cumulus-primitives-core", "cumulus-primitives-parachain-inherent", - "cumulus-primitives-reclaim", + "cumulus-primitives-pov-reclaim", "cumulus-test-client", "cumulus-test-relay-sproof-builder", "environmental", @@ -3715,7 +3714,7 @@ dependencies = [ ] [[package]] -name = "cumulus-primitives-reclaim" +name = "cumulus-primitives-pov-reclaim" version = "0.1.0" dependencies = [ "sp-core", @@ -3878,7 +3877,7 @@ version = "0.1.0" dependencies = [ "cumulus-primitives-core", "cumulus-primitives-parachain-inherent", - "cumulus-primitives-reclaim", + "cumulus-primitives-pov-reclaim", "cumulus-test-relay-sproof-builder", "cumulus-test-runtime", "cumulus-test-service", @@ -3963,7 +3962,7 @@ dependencies = [ "cumulus-pallet-parachain-system", "cumulus-primitives-core", "cumulus-primitives-parachain-inherent", - "cumulus-primitives-reclaim", + "cumulus-primitives-pov-reclaim", "cumulus-relay-chain-inprocess-interface", "cumulus-relay-chain-interface", "cumulus-relay-chain-minimal-node", @@ -10824,7 +10823,7 @@ dependencies = [ "cumulus-client-service", "cumulus-primitives-core", "cumulus-primitives-parachain-inherent", - "cumulus-primitives-reclaim", + "cumulus-primitives-pov-reclaim", "cumulus-relay-chain-interface", "frame-benchmarking", "frame-benchmarking-cli", @@ -12384,7 +12383,7 @@ dependencies = [ "cumulus-client-service", "cumulus-primitives-core", "cumulus-primitives-parachain-inherent", - "cumulus-primitives-reclaim", + "cumulus-primitives-pov-reclaim", "cumulus-relay-chain-interface", "frame-benchmarking", "frame-benchmarking-cli", diff --git a/cumulus/pallets/parachain-system/Cargo.toml b/cumulus/pallets/parachain-system/Cargo.toml index 289d842224e0..89afc7369290 100644 --- a/cumulus/pallets/parachain-system/Cargo.toml +++ b/cumulus/pallets/parachain-system/Cargo.toml @@ -35,7 +35,7 @@ xcm = { package = "staging-xcm", path = "../../../polkadot/xcm", default-feature cumulus-pallet-parachain-system-proc-macro = { path = "proc-macro", default-features = false } cumulus-primitives-core = { path = "../../primitives/core", default-features = false } cumulus-primitives-parachain-inherent = { path = "../../primitives/parachain-inherent", default-features = false } -cumulus-primitives-reclaim = { path = "../../primitives/pov-reclaim", default-features = false } +cumulus-primitives-pov-reclaim = { path = "../../primitives/pov-reclaim", default-features = false } [dev-dependencies] assert_matches = "1.5" @@ -61,7 +61,7 @@ std = [ "cumulus-pallet-parachain-system-proc-macro/std", "cumulus-primitives-core/std", "cumulus-primitives-parachain-inherent/std", - "cumulus-primitives-reclaim/std", + "cumulus-primitives-pov-reclaim/std", "environmental/std", "frame-support/std", "frame-system/std", diff --git a/cumulus/pallets/parachain-system/src/validate_block/implementation.rs b/cumulus/pallets/parachain-system/src/validate_block/implementation.rs index 6d5ac628e7bf..6ca73de9a04e 100644 --- a/cumulus/pallets/parachain-system/src/validate_block/implementation.rs +++ b/cumulus/pallets/parachain-system/src/validate_block/implementation.rs @@ -27,6 +27,7 @@ use polkadot_parachain_primitives::primitives::{ }; use codec::Encode; + use frame_support::traits::{ExecuteBlock, ExtrinsicCall, Get, IsSubType}; use sp_core::storage::{ChildInfo, StateVersion}; use sp_externalities::{set_and_run_with_externalities, Externalities}; @@ -44,6 +45,7 @@ type TrieBackend = sp_state_machine::TrieBackend< >; type Ext<'a, B> = sp_state_machine::Ext<'a, HashingFor, TrieBackend>; + fn with_externalities R, R>(f: F) -> R { sp_externalities::with_externalities(f).expect("Environmental externalities not set.") } @@ -175,7 +177,7 @@ where .replace_implementation(host_default_child_storage_next_key), sp_io::offchain_index::host_set.replace_implementation(host_offchain_index_set), sp_io::offchain_index::host_clear.replace_implementation(host_offchain_index_clear), - cumulus_primitives_reclaim::pov_reclaim_host_functions::host_current_storage_proof_size + cumulus_primitives_pov_reclaim::pov_reclaim_host_functions::host_current_storage_proof_size .replace_implementation(host_current_storage_proof_size), ); diff --git a/cumulus/pallets/parachain-system/src/validate_block/trie_recorder.rs b/cumulus/pallets/parachain-system/src/validate_block/trie_recorder.rs index 4ab2d90b92e1..8530a5506fa7 100644 --- a/cumulus/pallets/parachain-system/src/validate_block/trie_recorder.rs +++ b/cumulus/pallets/parachain-system/src/validate_block/trie_recorder.rs @@ -14,6 +14,16 @@ // You should have received a copy of the GNU General Public License // along with Cumulus. If not, see . +//! Module for defining classes that provide a specialized trie-recorder +//! and provider for use in validate-block. +//! +//! This file defines two main structs, [`SizeOnlyRecorder`] and +//! [`SizeOnlyRecorderProvider`]. They are used to track the current +//! proof-size without actually recording the accessed nodes themselves. +//! +//! # Panics +//! The `drain_storage_proof` method is not implemented and will panic if called. + use codec::Encode; use sp_std::{ diff --git a/cumulus/parachain-template/node/Cargo.toml b/cumulus/parachain-template/node/Cargo.toml index 78d5780cf531..0cb0e53282e3 100644 --- a/cumulus/parachain-template/node/Cargo.toml +++ b/cumulus/parachain-template/node/Cargo.toml @@ -62,7 +62,7 @@ xcm = { package = "staging-xcm", path = "../../../polkadot/xcm", default-feature cumulus-client-cli = { path = "../../client/cli" } cumulus-client-collator = { path = "../../client/collator" } cumulus-client-consensus-aura = { path = "../../client/consensus/aura" } -cumulus-primitives-reclaim = { path = "../../primitives/pov-reclaim" } +cumulus-primitives-pov-reclaim = { path = "../../primitives/pov-reclaim" } cumulus-client-consensus-common = { path = "../../client/consensus/common" } cumulus-client-consensus-proposer = { path = "../../client/consensus/proposer" } cumulus-client-service = { path = "../../client/service" } diff --git a/cumulus/parachain-template/node/src/service.rs b/cumulus/parachain-template/node/src/service.rs index 3054b4b7ca3c..c5c388d990d9 100644 --- a/cumulus/parachain-template/node/src/service.rs +++ b/cumulus/parachain-template/node/src/service.rs @@ -41,7 +41,7 @@ pub struct ParachainNativeExecutor; impl sc_executor::NativeExecutionDispatch for ParachainNativeExecutor { type ExtendHostFunctions = ( - cumulus_primitives_reclaim::pov_reclaim_host_functions::HostFunctions, + cumulus_primitives_pov_reclaim::pov_reclaim_host_functions::HostFunctions, frame_benchmarking::benchmarking::HostFunctions, ); diff --git a/cumulus/parachains/runtimes/assets/asset-hub-kusama/Cargo.toml b/cumulus/parachains/runtimes/assets/asset-hub-kusama/Cargo.toml index 9346c0759046..1d0c7bc98563 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-kusama/Cargo.toml +++ b/cumulus/parachains/runtimes/assets/asset-hub-kusama/Cargo.toml @@ -74,7 +74,6 @@ cumulus-pallet-xcm = { path = "../../../../pallets/xcm", default-features = fals cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-features = false } cumulus-primitives-core = { path = "../../../../primitives/core", default-features = false } cumulus-primitives-utility = { path = "../../../../primitives/utility", default-features = false } -cumulus-primitives-reclaim = { path = "../../../../primitives/pov-reclaim", default-features = false } pallet-collator-selection = { path = "../../../../pallets/collator-selection", default-features = false } parachain-info = { path = "../../../pallets/parachain-info", default-features = false } parachains-common = { path = "../../../common", default-features = false } @@ -169,7 +168,6 @@ std = [ "cumulus-pallet-xcm/std", "cumulus-pallet-xcmp-queue/std", "cumulus-primitives-core/std", - "cumulus-primitives-reclaim/std", "cumulus-primitives-utility/std", "frame-benchmarking?/std", "frame-executive/std", diff --git a/cumulus/polkadot-parachain/Cargo.toml b/cumulus/polkadot-parachain/Cargo.toml index 4e339455a43c..2e6255681cc5 100644 --- a/cumulus/polkadot-parachain/Cargo.toml +++ b/cumulus/polkadot-parachain/Cargo.toml @@ -89,7 +89,7 @@ cumulus-client-consensus-proposer = { path = "../client/consensus/proposer" } cumulus-client-service = { path = "../client/service" } cumulus-primitives-core = { path = "../primitives/core" } cumulus-primitives-parachain-inherent = { path = "../primitives/parachain-inherent" } -cumulus-primitives-reclaim = { path = "../primitives/pov-reclaim" } +cumulus-primitives-pov-reclaim = { path = "../primitives/pov-reclaim" } cumulus-relay-chain-interface = { path = "../client/relay-chain-interface" } color-print = "0.3.4" diff --git a/cumulus/polkadot-parachain/src/command.rs b/cumulus/polkadot-parachain/src/command.rs index 1f4961caf06e..710bf2b4968d 100644 --- a/cumulus/polkadot-parachain/src/command.rs +++ b/cumulus/polkadot-parachain/src/command.rs @@ -20,7 +20,7 @@ use crate::{ service::{new_partial, Block}, }; use cumulus_primitives_core::ParaId; -use cumulus_primitives_reclaim::pov_reclaim_host_functions::HostFunctions as ReclaimHostFunctions; +use cumulus_primitives_pov_reclaim::pov_reclaim_host_functions::HostFunctions as ReclaimHostFunctions; use frame_benchmarking_cli::{BenchmarkCmd, SUBSTRATE_REFERENCE_HARDWARE}; use log::info; use parachains_common::{AssetHubPolkadotAuraId, AuraId}; diff --git a/cumulus/polkadot-parachain/src/service.rs b/cumulus/polkadot-parachain/src/service.rs index 8f88b044653d..e941c45b4d95 100644 --- a/cumulus/polkadot-parachain/src/service.rs +++ b/cumulus/polkadot-parachain/src/service.rs @@ -65,13 +65,13 @@ use polkadot_primitives::CollatorPair; #[cfg(not(feature = "runtime-benchmarks"))] type HostFunctions = ( sp_io::SubstrateHostFunctions, - cumulus_primitives_reclaim::pov_reclaim_host_functions::HostFunctions, + cumulus_primitives_pov_reclaim::pov_reclaim_host_functions::HostFunctions, ); #[cfg(feature = "runtime-benchmarks")] type HostFunctions = ( sp_io::SubstrateHostFunctions, - cumulus_primitives_reclaim::pov_reclaim_host_functions::HostFunctions, + cumulus_primitives_pov_reclaim::pov_reclaim_host_functions::HostFunctions, frame_benchmarking::benchmarking::HostFunctions, ); diff --git a/cumulus/primitives/pov-reclaim/Cargo.toml b/cumulus/primitives/pov-reclaim/Cargo.toml index 51f6009c30e5..8aede0649610 100644 --- a/cumulus/primitives/pov-reclaim/Cargo.toml +++ b/cumulus/primitives/pov-reclaim/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "cumulus-primitives-reclaim" +name = "cumulus-primitives-pov-reclaim" version = "0.1.0" authors.workspace = true edition.workspace = true diff --git a/cumulus/test/client/Cargo.toml b/cumulus/test/client/Cargo.toml index e4c281267bbb..8791486e5202 100644 --- a/cumulus/test/client/Cargo.toml +++ b/cumulus/test/client/Cargo.toml @@ -36,5 +36,5 @@ cumulus-test-runtime = { path = "../runtime" } cumulus-test-service = { path = "../service" } cumulus-test-relay-sproof-builder = { path = "../relay-sproof-builder" } cumulus-primitives-core = { path = "../../primitives/core" } -cumulus-primitives-reclaim = { path = "../../primitives/pov-reclaim" } +cumulus-primitives-pov-reclaim = { path = "../../primitives/pov-reclaim" } cumulus-primitives-parachain-inherent = { path = "../../primitives/parachain-inherent" } diff --git a/cumulus/test/client/src/lib.rs b/cumulus/test/client/src/lib.rs index 309b57059dbb..19e6ee970bfc 100644 --- a/cumulus/test/client/src/lib.rs +++ b/cumulus/test/client/src/lib.rs @@ -45,7 +45,7 @@ mod local_executor { impl sc_executor::NativeExecutionDispatch for LocalExecutor { type ExtendHostFunctions = - cumulus_primitives_reclaim::pov_reclaim_host_functions::HostFunctions; + cumulus_primitives_pov_reclaim::pov_reclaim_host_functions::HostFunctions; fn dispatch(method: &str, data: &[u8]) -> Option> { cumulus_test_runtime::api::dispatch(method, data) @@ -213,7 +213,7 @@ pub fn validate_block( let heap_pages = HeapAllocStrategy::Static { extra_pages: 1024 }; let executor = WasmExecutor::<( sp_io::SubstrateHostFunctions, - cumulus_primitives_reclaim::pov_reclaim_host_functions::HostFunctions, + cumulus_primitives_pov_reclaim::pov_reclaim_host_functions::HostFunctions, )>::builder() .with_execution_method(WasmExecutionMethod::default()) .with_max_runtime_instances(1) diff --git a/cumulus/test/service/Cargo.toml b/cumulus/test/service/Cargo.toml index 221a55b34421..2db8cc20548e 100644 --- a/cumulus/test/service/Cargo.toml +++ b/cumulus/test/service/Cargo.toml @@ -66,7 +66,7 @@ polkadot-overseer = { path = "../../../polkadot/node/overseer" } cumulus-client-cli = { path = "../../client/cli" } parachains-common = { path = "../../parachains/common" } cumulus-client-consensus-common = { path = "../../client/consensus/common" } -cumulus-primitives-reclaim = { path = "../../primitives/pov-reclaim" } +cumulus-primitives-pov-reclaim = { path = "../../primitives/pov-reclaim" } cumulus-client-consensus-relay-chain = { path = "../../client/consensus/relay-chain" } cumulus-client-service = { path = "../../client/service" } cumulus-primitives-core = { path = "../../primitives/core" } diff --git a/cumulus/test/service/src/lib.rs b/cumulus/test/service/src/lib.rs index a3011cddc496..0e0af3255775 100644 --- a/cumulus/test/service/src/lib.rs +++ b/cumulus/test/service/src/lib.rs @@ -114,7 +114,7 @@ pub struct RuntimeExecutor; impl sc_executor::NativeExecutionDispatch for RuntimeExecutor { type ExtendHostFunctions = - cumulus_primitives_reclaim::pov_reclaim_host_functions::HostFunctions; + cumulus_primitives_pov_reclaim::pov_reclaim_host_functions::HostFunctions; fn dispatch(method: &str, data: &[u8]) -> Option> { cumulus_test_runtime::api::dispatch(method, data) diff --git a/substrate/client/service/src/client/call_executor.rs b/substrate/client/service/src/client/call_executor.rs index 7a5d859a9631..820ad99b2e80 100644 --- a/substrate/client/service/src/client/call_executor.rs +++ b/substrate/client/service/src/client/call_executor.rs @@ -290,11 +290,7 @@ where method, call_data, &runtime_code, - &mut self.execution_extensions.extensions( - at_hash, - at_number, - None::<&sp_trie::recorder::Recorder>>, - ), + &mut self.execution_extensions.extensions(at_hash, at_number, None), ) .map_err(Into::into) } diff --git a/substrate/primitives/state-machine/src/trie_backend.rs b/substrate/primitives/state-machine/src/trie_backend.rs index cdca42d874a5..f50ecfacb6c5 100644 --- a/substrate/primitives/state-machine/src/trie_backend.rs +++ b/substrate/primitives/state-machine/src/trie_backend.rs @@ -111,7 +111,7 @@ pub struct UnimplementedCacheProvider { // replacement for the `LocalTrieCache` in no-std contexts. _phantom: core::marker::PhantomData, // Statically prevents construction. - _infallible: core::convert::Infallible, + _void: sp_core::Void, } #[cfg(not(feature = "std"))] @@ -154,13 +154,15 @@ impl TrieCacheProvider for UnimplementedCacheProvider { } } +/// Recorder provider that allows construction of a [`TrieBackend`] and satisfies the requirements, +/// but can never be instantiated. #[cfg(not(feature = "std"))] pub struct UnimplementedRecorderProvider { // Not strictly necessary, but the H bound allows to use this as a drop-in // replacement for the [`sp_trie::recorder::Recorder`] in no-std contexts. _phantom: core::marker::PhantomData, // Statically prevents construction. - _infallible: core::convert::Infallible, + _void: sp_core::Void, } #[cfg(not(feature = "std"))] diff --git a/substrate/primitives/trie/src/lib.rs b/substrate/primitives/trie/src/lib.rs index e5dbdcb5f0b1..6f3871f77fe7 100644 --- a/substrate/primitives/trie/src/lib.rs +++ b/substrate/primitives/trie/src/lib.rs @@ -146,16 +146,28 @@ where } } +/// Type that is able to provide a [`trie_db::TrieRecorder`]. +/// +/// Types implementing this trait can be used to maintain recorded state +/// across operations on different [`trie_db::TrieDB`] instances. pub trait TrieRecorderProvider { + /// Recorder type that is going to be returned by implementors of this trait. type Recorder<'a>: trie_db::TrieRecorder + 'a where Self: 'a; + /// Create a [`StorageProof`] derived from the internal state. fn drain_storage_proof(self) -> StorageProof; + + /// Provide a recorder implementing [`trie_db::TrieRecorder`]. fn as_trie_recorder(&self, storage_root: H::Out) -> Self::Recorder<'_>; + + /// Provide an estimation of the current storage proof size. fn estimate_encoded_size(&self) -> usize; } +/// Object-safe trait implemented by types that are able to provide a proof +/// size estimation. pub trait ProofSizeProvider { fn estimate_encoded_size(&self) -> usize; } From 91ffa721286c10d46bcb0207283a4e3f4c08ecad Mon Sep 17 00:00:00 2001 From: Sebastian Kunert Date: Tue, 19 Sep 2023 13:58:09 +0200 Subject: [PATCH 37/61] Remove size estimation and make proof optional --- .../src/validate_block/trie_recorder.rs | 9 +++------ .../primitives/state-machine/src/trie_backend.rs | 8 ++------ substrate/primitives/trie/src/lib.rs | 8 ++------ substrate/primitives/trie/src/recorder.rs | 14 ++------------ 4 files changed, 9 insertions(+), 30 deletions(-) diff --git a/cumulus/pallets/parachain-system/src/validate_block/trie_recorder.rs b/cumulus/pallets/parachain-system/src/validate_block/trie_recorder.rs index 8530a5506fa7..b36ef9d3f133 100644 --- a/cumulus/pallets/parachain-system/src/validate_block/trie_recorder.rs +++ b/cumulus/pallets/parachain-system/src/validate_block/trie_recorder.rs @@ -120,8 +120,8 @@ impl SizeOnlyRecorderProvider { impl sp_trie::TrieRecorderProvider for SizeOnlyRecorderProvider { type Recorder<'a> = SizeOnlyRecorder<'a, H> where H: 'a; - fn drain_storage_proof(self) -> StorageProof { - unimplemented!("Draining storage proof not supported!") + fn drain_storage_proof(self) -> Option { + None } fn as_trie_recorder(&self, _storage_root: H::Out) -> Self::Recorder<'_> { @@ -131,11 +131,8 @@ impl sp_trie::TrieRecorderProvider for SizeOnlyRecorderPr recorded_keys: self.recorded_keys.borrow_mut(), } } - - fn estimate_encoded_size(&self) -> usize { - *self.encoded_size.borrow() - } } + impl ProofSizeProvider for SizeOnlyRecorderProvider { fn estimate_encoded_size(&self) -> usize { *self.encoded_size.borrow() diff --git a/substrate/primitives/state-machine/src/trie_backend.rs b/substrate/primitives/state-machine/src/trie_backend.rs index a0f39657713b..bf0aa9be2f37 100644 --- a/substrate/primitives/state-machine/src/trie_backend.rs +++ b/substrate/primitives/state-machine/src/trie_backend.rs @@ -179,17 +179,13 @@ impl trie_db::TrieRecorder for UnimplementedRecorderProvider< impl TrieRecorderProvider for UnimplementedRecorderProvider { type Recorder<'a> = UnimplementedRecorderProvider where H: 'a; - fn drain_storage_proof(self) -> StorageProof { + fn drain_storage_proof(self) -> Option { unimplemented!() } fn as_trie_recorder(&self, _storage_root: H::Out) -> Self::Recorder<'_> { unimplemented!() } - - fn estimate_encoded_size(&self) -> usize { - unimplemented!() - } } #[cfg(feature = "std")] @@ -390,7 +386,7 @@ where /// /// This only returns `Some` when there was a recorder set. pub fn extract_proof(mut self) -> Option { - self.essence.recorder.take().map(|r| r.drain_storage_proof()) + self.essence.recorder.take().map(|r| r.drain_storage_proof()).flatten() } } diff --git a/substrate/primitives/trie/src/lib.rs b/substrate/primitives/trie/src/lib.rs index 6f3871f77fe7..18515dadd754 100644 --- a/substrate/primitives/trie/src/lib.rs +++ b/substrate/primitives/trie/src/lib.rs @@ -157,17 +157,13 @@ pub trait TrieRecorderProvider { Self: 'a; /// Create a [`StorageProof`] derived from the internal state. - fn drain_storage_proof(self) -> StorageProof; + fn drain_storage_proof(self) -> Option; /// Provide a recorder implementing [`trie_db::TrieRecorder`]. fn as_trie_recorder(&self, storage_root: H::Out) -> Self::Recorder<'_>; - - /// Provide an estimation of the current storage proof size. - fn estimate_encoded_size(&self) -> usize; } -/// Object-safe trait implemented by types that are able to provide a proof -/// size estimation. +/// Type that is able to provide a proof size estimation. pub trait ProofSizeProvider { fn estimate_encoded_size(&self) -> usize; } diff --git a/substrate/primitives/trie/src/recorder.rs b/substrate/primitives/trie/src/recorder.rs index 1a14d2cbadcf..d99c53f35ea4 100644 --- a/substrate/primitives/trie/src/recorder.rs +++ b/substrate/primitives/trie/src/recorder.rs @@ -234,12 +234,6 @@ impl crate::ProofSizeProvider for Recorder { } } -impl crate::ProofSizeProvider for &Recorder { - fn estimate_encoded_size(&self) -> usize { - Recorder::estimate_encoded_size(*self) - } -} - /// The [`TrieRecorder`](trie_db::TrieRecorder) implementation. pub struct TrieRecorder<'a, H: Hasher> { inner: MutexGuard<'a, RecorderInner>, @@ -251,17 +245,13 @@ pub struct TrieRecorder<'a, H: Hasher> { impl crate::TrieRecorderProvider for Recorder { type Recorder<'a> = TrieRecorder<'a, H> where H: 'a; - fn drain_storage_proof(self) -> StorageProof { - Recorder::drain_storage_proof(self) + fn drain_storage_proof(self) -> Option { + Some(Recorder::drain_storage_proof(self)) } fn as_trie_recorder(&self, storage_root: H::Out) -> Self::Recorder<'_> { Recorder::as_trie_recorder(&self, storage_root) } - - fn estimate_encoded_size(&self) -> usize { - Recorder::estimate_encoded_size(&self) - } } impl<'a, H: Hasher> TrieRecorder<'a, H> { From a7fe651c63344bcc71aa882cc267f78b68dbfb8e Mon Sep 17 00:00:00 2001 From: Sebastian Kunert Date: Tue, 19 Sep 2023 14:31:50 +0200 Subject: [PATCH 38/61] Move extension to sp-trie crate --- Cargo.lock | 13 +------------ Cargo.toml | 1 - cumulus/primitives/pov-reclaim/Cargo.toml | 5 ++--- cumulus/primitives/pov-reclaim/src/lib.rs | 12 +++++++----- substrate/client/api/Cargo.toml | 1 - .../client/api/src/execution_extensions.rs | 2 +- substrate/client/service/Cargo.toml | 1 - substrate/primitives/proof-size-ext/Cargo.toml | 17 ----------------- substrate/primitives/trie/Cargo.toml | 2 ++ substrate/primitives/trie/src/lib.rs | 3 +++ .../lib.rs => trie/src/proof_size_extension.rs} | 9 ++++++++- 11 files changed, 24 insertions(+), 42 deletions(-) delete mode 100644 substrate/primitives/proof-size-ext/Cargo.toml rename substrate/primitives/{proof-size-ext/src/lib.rs => trie/src/proof_size_extension.rs} (86%) diff --git a/Cargo.lock b/Cargo.lock index bd19b35ad058..4d9462826f54 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3720,7 +3720,6 @@ dependencies = [ "sp-core", "sp-externalities", "sp-io", - "sp-proof-size-ext", "sp-runtime-interface", "sp-state-machine", "sp-trie", @@ -14656,7 +14655,6 @@ dependencies = [ "sp-core", "sp-database", "sp-externalities", - "sp-proof-size-ext", "sp-runtime", "sp-state-machine", "sp-statement-store", @@ -15618,7 +15616,6 @@ dependencies = [ "sp-core", "sp-externalities", "sp-keystore", - "sp-proof-size-ext", "sp-runtime", "sp-session", "sp-state-machine", @@ -17263,15 +17260,6 @@ dependencies = [ "regex", ] -[[package]] -name = "sp-proof-size-ext" -version = "4.0.0-dev" -dependencies = [ - "sp-externalities", - "sp-runtime", - "sp-trie", -] - [[package]] name = "sp-rpc" version = "6.0.0" @@ -17547,6 +17535,7 @@ dependencies = [ "scale-info", "schnellru", "sp-core", + "sp-externalities", "sp-runtime", "sp-std", "thiserror", diff --git a/Cargo.toml b/Cargo.toml index 8c42232baefc..d1078e3c86a8 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -395,7 +395,6 @@ members = [ "substrate/primitives/genesis-builder", "substrate/primitives/inherents", "substrate/primitives/io", - "substrate/primitives/proof-size-ext", "substrate/primitives/keyring", "substrate/primitives/keystore", "substrate/primitives/maybe-compressed-blob", diff --git a/cumulus/primitives/pov-reclaim/Cargo.toml b/cumulus/primitives/pov-reclaim/Cargo.toml index 8aede0649610..134acb0fc9df 100644 --- a/cumulus/primitives/pov-reclaim/Cargo.toml +++ b/cumulus/primitives/pov-reclaim/Cargo.toml @@ -7,11 +7,10 @@ edition.workspace = true [dependencies] sp-runtime-interface = { path = "../../../substrate/primitives/runtime-interface", default-features = false } sp-externalities = { path = "../../../substrate/primitives/externalities", default-features = false } -sp-proof-size-ext = { path = "../../../substrate/primitives/proof-size-ext", optional = true } +sp-trie = { path = "../../../substrate/primitives/trie", default-features = false } [dev-dependencies] sp-state-machine = { path = "../../../substrate/primitives/state-machine" } -sp-trie = { path = "../../../substrate/primitives/trie" } sp-core = { path = "../../../substrate/primitives/core" } sp-io = { path = "../../../substrate/primitives/io" } @@ -19,6 +18,6 @@ sp-io = { path = "../../../substrate/primitives/io" } default = [ "std" ] std = [ "sp-externalities/std", - "sp-proof-size-ext", + "sp-trie/std", "sp-runtime-interface/std", ] diff --git a/cumulus/primitives/pov-reclaim/src/lib.rs b/cumulus/primitives/pov-reclaim/src/lib.rs index a4b41c8a037b..5df373b66eb1 100644 --- a/cumulus/primitives/pov-reclaim/src/lib.rs +++ b/cumulus/primitives/pov-reclaim/src/lib.rs @@ -18,11 +18,11 @@ use sp_externalities::ExternalitiesExt; -#[cfg(feature = "std")] -use sp_proof_size_ext::ProofSizeExt; - use sp_runtime_interface::runtime_interface; +#[cfg(feature = "std")] +use sp_trie::proof_size_extension::ProofSizeExt; + #[runtime_interface] pub trait PovReclaimHostFunctions { fn current_storage_proof_size(&mut self) -> u32 { @@ -36,9 +36,11 @@ pub trait PovReclaimHostFunctions { #[cfg(test)] mod tests { use sp_core::Blake2Hasher; - use sp_proof_size_ext::ProofSizeExt; use sp_state_machine::TestExternalities; - use sp_trie::{recorder::Recorder, LayoutV1, PrefixedMemoryDB, TrieDBMutBuilder, TrieMut}; + use sp_trie::{ + proof_size_extension::ProofSizeExt, recorder::Recorder, LayoutV1, PrefixedMemoryDB, + TrieDBMutBuilder, TrieMut, + }; use crate::pov_reclaim_host_functions; diff --git a/substrate/client/api/Cargo.toml b/substrate/client/api/Cargo.toml index 9f7b6872cd01..2b64c86038dd 100644 --- a/substrate/client/api/Cargo.toml +++ b/substrate/client/api/Cargo.toml @@ -32,7 +32,6 @@ sp-core = { path = "../../primitives/core", default-features = false} sp-database = { path = "../../primitives/database" } sp-externalities = { path = "../../primitives/externalities" } sp-runtime = { path = "../../primitives/runtime", default-features = false} -sp-proof-size-ext = { path = "../../primitives/proof-size-ext" } sp-state-machine = { path = "../../primitives/state-machine" } sp-statement-store = { path = "../../primitives/statement-store" } sp-storage = { path = "../../primitives/storage" } diff --git a/substrate/client/api/src/execution_extensions.rs b/substrate/client/api/src/execution_extensions.rs index b385d5f83145..dbf305bf8bcd 100644 --- a/substrate/client/api/src/execution_extensions.rs +++ b/substrate/client/api/src/execution_extensions.rs @@ -128,7 +128,7 @@ impl ExecutionExtensions { extensions.register(ReadRuntimeVersionExt::new(self.read_runtime_version.clone())); if let Some(recorder) = proof_recorder { - extensions.register(sp_proof_size_ext::ProofSizeExt::new(recorder.clone())); + extensions.register(sp_trie::proof_size_extension::ProofSizeExt::new(recorder.clone())); }; extensions diff --git a/substrate/client/service/Cargo.toml b/substrate/client/service/Cargo.toml index db0b26cb279f..ccf23bc8994b 100644 --- a/substrate/client/service/Cargo.toml +++ b/substrate/client/service/Cargo.toml @@ -47,7 +47,6 @@ sp-core = { path = "../../primitives/core" } sp-keystore = { path = "../../primitives/keystore" } sp-session = { path = "../../primitives/session" } sp-state-machine = { path = "../../primitives/state-machine" } -sp-proof-size-ext = { path = "../../primitives/proof-size-ext" } sp-consensus = { path = "../../primitives/consensus/common" } sc-consensus = { path = "../consensus/common" } sp-storage = { path = "../../primitives/storage" } diff --git a/substrate/primitives/proof-size-ext/Cargo.toml b/substrate/primitives/proof-size-ext/Cargo.toml deleted file mode 100644 index f8239d387182..000000000000 --- a/substrate/primitives/proof-size-ext/Cargo.toml +++ /dev/null @@ -1,17 +0,0 @@ -[package] -name = "sp-proof-size-ext" -version = "4.0.0-dev" -authors.workspace = true -edition.workspace = true -license = "Apache-2.0" -homepage = "https://substrate.io" -repository.workspace = true -description = "A crate which contains an externalities extension to fetch the proof size." - -[package.metadata.docs.rs] -targets = ["x86_64-unknown-linux-gnu"] - -[dependencies] -sp-externalities = { path = "../externalities" } -sp-trie = { path = "../trie" } -sp-runtime = { path = "../runtime" } diff --git a/substrate/primitives/trie/Cargo.toml b/substrate/primitives/trie/Cargo.toml index 0b54514f6003..aa46f8aa6d19 100644 --- a/substrate/primitives/trie/Cargo.toml +++ b/substrate/primitives/trie/Cargo.toml @@ -33,6 +33,7 @@ trie-db = { version = "0.28.0", default-features = false } trie-root = { version = "0.18.0", default-features = false } sp-core = { path = "../core", default-features = false} sp-std = { path = "../std", default-features = false} +sp-externalities = { path = "../externalities", default-features = false } schnellru = { version = "0.2.1", optional = true } [dev-dependencies] @@ -58,6 +59,7 @@ std = [ "sp-core/std", "sp-runtime/std", "sp-std/std", + "sp-externalities/std", "thiserror", "tracing", "trie-db/std", diff --git a/substrate/primitives/trie/src/lib.rs b/substrate/primitives/trie/src/lib.rs index 18515dadd754..4974aeb9b5c5 100644 --- a/substrate/primitives/trie/src/lib.rs +++ b/substrate/primitives/trie/src/lib.rs @@ -30,6 +30,9 @@ mod storage_proof; mod trie_codec; mod trie_stream; +#[cfg(feature = "std")] +pub mod proof_size_extension; + /// Our `NodeCodec`-specific error. pub use error::Error; /// Various re-exports from the `hash-db` crate. diff --git a/substrate/primitives/proof-size-ext/src/lib.rs b/substrate/primitives/trie/src/proof_size_extension.rs similarity index 86% rename from substrate/primitives/proof-size-ext/src/lib.rs rename to substrate/primitives/trie/src/proof_size_extension.rs index 32edf7819c52..05dca2132e04 100644 --- a/substrate/primitives/proof-size-ext/src/lib.rs +++ b/substrate/primitives/trie/src/proof_size_extension.rs @@ -1,5 +1,8 @@ +// This file is part of Substrate. + // Copyright (C) Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 + // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at @@ -11,7 +14,11 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -use sp_trie::ProofSizeProvider; + +//! Externalities extension that provides access to the current proof size +//! of the underlying recorder. + +use crate::ProofSizeProvider; sp_externalities::decl_extension! { /// The proof size extension to fetch the current storage proof size From a367f06bd287ebc9c18f1f13d7880c8562d7292f Mon Sep 17 00:00:00 2001 From: Sebastian Kunert Date: Tue, 19 Sep 2023 15:55:44 +0200 Subject: [PATCH 39/61] Improve docs & format features --- cumulus/primitives/pov-reclaim/Cargo.toml | 6 +----- cumulus/primitives/pov-reclaim/src/lib.rs | 4 ++++ substrate/primitives/trie/Cargo.toml | 2 +- substrate/primitives/trie/src/recorder.rs | 4 +++- 4 files changed, 9 insertions(+), 7 deletions(-) diff --git a/cumulus/primitives/pov-reclaim/Cargo.toml b/cumulus/primitives/pov-reclaim/Cargo.toml index 134acb0fc9df..e640362d8763 100644 --- a/cumulus/primitives/pov-reclaim/Cargo.toml +++ b/cumulus/primitives/pov-reclaim/Cargo.toml @@ -16,8 +16,4 @@ sp-io = { path = "../../../substrate/primitives/io" } [features] default = [ "std" ] -std = [ - "sp-externalities/std", - "sp-trie/std", - "sp-runtime-interface/std", -] +std = [ "sp-externalities/std", "sp-runtime-interface/std", "sp-trie/std" ] diff --git a/cumulus/primitives/pov-reclaim/src/lib.rs b/cumulus/primitives/pov-reclaim/src/lib.rs index 5df373b66eb1..a598e66a8818 100644 --- a/cumulus/primitives/pov-reclaim/src/lib.rs +++ b/cumulus/primitives/pov-reclaim/src/lib.rs @@ -14,6 +14,8 @@ // You should have received a copy of the GNU General Public License // along with Cumulus. If not, see . +//! Tools for reclaiming PoV weight in parachain runtimes. + #![cfg_attr(not(feature = "std"), no_std)] use sp_externalities::ExternalitiesExt; @@ -23,8 +25,10 @@ use sp_runtime_interface::runtime_interface; #[cfg(feature = "std")] use sp_trie::proof_size_extension::ProofSizeExt; +/// Interface that provides access to the current storage proof size. #[runtime_interface] pub trait PovReclaimHostFunctions { + /// Returns the current storage proof size. fn current_storage_proof_size(&mut self) -> u32 { match self.extension::() { Some(ext) => ext.current_storage_proof_size(), diff --git a/substrate/primitives/trie/Cargo.toml b/substrate/primitives/trie/Cargo.toml index aa46f8aa6d19..7a381debb86e 100644 --- a/substrate/primitives/trie/Cargo.toml +++ b/substrate/primitives/trie/Cargo.toml @@ -57,9 +57,9 @@ std = [ "scale-info/std", "schnellru", "sp-core/std", + "sp-externalities/std", "sp-runtime/std", "sp-std/std", - "sp-externalities/std", "thiserror", "tracing", "trie-db/std", diff --git a/substrate/primitives/trie/src/recorder.rs b/substrate/primitives/trie/src/recorder.rs index d99c53f35ea4..037034cd4ccd 100644 --- a/substrate/primitives/trie/src/recorder.rs +++ b/substrate/primitives/trie/src/recorder.rs @@ -80,7 +80,9 @@ impl Default for RecorderInner { /// The trie recorder. /// -/// It can be used to record accesses to the trie and then to convert them into a [`StorageProof`]. +/// Owns a [`RecorderInner`] containing the recorded data. Is used to transform data into a storage +/// proof and to provide transaction support. The `as_trie_recorder` method provides a +/// [`trie_db::TrieDB`] compatible recorder that implements the actual recording logic. pub struct Recorder { inner: Arc>>, /// The estimated encoded size of the storage proof this recorder will produce. From 3701a7ab664a2d22d86401f5b2bb18a2f26e29a5 Mon Sep 17 00:00:00 2001 From: Sebastian Kunert Date: Tue, 19 Sep 2023 16:31:22 +0200 Subject: [PATCH 40/61] Sorry clippy --- .../parachain-system/src/validate_block/trie_recorder.rs | 2 +- substrate/primitives/state-machine/src/trie_backend.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/cumulus/pallets/parachain-system/src/validate_block/trie_recorder.rs b/cumulus/pallets/parachain-system/src/validate_block/trie_recorder.rs index b36ef9d3f133..7fda88ea3b3c 100644 --- a/cumulus/pallets/parachain-system/src/validate_block/trie_recorder.rs +++ b/cumulus/pallets/parachain-system/src/validate_block/trie_recorder.rs @@ -148,7 +148,7 @@ mod tests { use rand::Rng; use sp_trie::{ cache::{CacheSize, SharedTrieCache}, - MemoryDB, TrieRecorderProvider, + MemoryDB, ProofSizeProvider, TrieRecorderProvider, }; use trie_db::{Trie, TrieDBBuilder, TrieDBMutBuilder, TrieHash, TrieMut, TrieRecorder}; use trie_standardmap::{Alphabet, StandardMap, ValueMode}; diff --git a/substrate/primitives/state-machine/src/trie_backend.rs b/substrate/primitives/state-machine/src/trie_backend.rs index bf0aa9be2f37..e6cc33ce735a 100644 --- a/substrate/primitives/state-machine/src/trie_backend.rs +++ b/substrate/primitives/state-machine/src/trie_backend.rs @@ -386,7 +386,7 @@ where /// /// This only returns `Some` when there was a recorder set. pub fn extract_proof(mut self) -> Option { - self.essence.recorder.take().map(|r| r.drain_storage_proof()).flatten() + self.essence.recorder.take().and_then(|r| r.drain_storage_proof()) } } From 795e68e4379caaf64bec3a0c7fe3207c47dd2d28 Mon Sep 17 00:00:00 2001 From: Sebastian Kunert Date: Wed, 20 Sep 2023 11:52:05 +0200 Subject: [PATCH 41/61] Remove reference to private struct. --- substrate/primitives/trie/src/recorder.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/substrate/primitives/trie/src/recorder.rs b/substrate/primitives/trie/src/recorder.rs index 037034cd4ccd..b236f281bb10 100644 --- a/substrate/primitives/trie/src/recorder.rs +++ b/substrate/primitives/trie/src/recorder.rs @@ -80,7 +80,7 @@ impl Default for RecorderInner { /// The trie recorder. /// -/// Owns a [`RecorderInner`] containing the recorded data. Is used to transform data into a storage +/// Owns the recorded data. Is used to transform data into a storage /// proof and to provide transaction support. The `as_trie_recorder` method provides a /// [`trie_db::TrieDB`] compatible recorder that implements the actual recording logic. pub struct Recorder { From 57976f69ee86a8ec5bcd02468de53fcd93a6566c Mon Sep 17 00:00:00 2001 From: Sebastian Kunert Date: Fri, 29 Sep 2023 17:31:21 +0200 Subject: [PATCH 42/61] Apply suggestions from code review MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Bastian Köcher --- .../src/validate_block/trie_recorder.rs | 22 +++++-------------- 1 file changed, 6 insertions(+), 16 deletions(-) diff --git a/cumulus/pallets/parachain-system/src/validate_block/trie_recorder.rs b/cumulus/pallets/parachain-system/src/validate_block/trie_recorder.rs index 7fda88ea3b3c..7f6628fa0bc1 100644 --- a/cumulus/pallets/parachain-system/src/validate_block/trie_recorder.rs +++ b/cumulus/pallets/parachain-system/src/validate_block/trie_recorder.rs @@ -14,15 +14,11 @@ // You should have received a copy of the GNU General Public License // along with Cumulus. If not, see . -//! Module for defining classes that provide a specialized trie-recorder -//! and provider for use in validate-block. +//! Provide a specialized trie-recorder and provider for use in validate-block. //! //! This file defines two main structs, [`SizeOnlyRecorder`] and //! [`SizeOnlyRecorderProvider`]. They are used to track the current //! proof-size without actually recording the accessed nodes themselves. -//! -//! # Panics -//! The `drain_storage_proof` method is not implemented and will panic if called. use codec::Encode; @@ -49,24 +45,18 @@ impl<'a, H: trie_db::Hasher> trie_db::TrieRecorder for SizeOnlyRecorder< let mut encoded_size_update = 0; match access { TrieAccess::NodeOwned { hash, node_owned } => - if !self.seen_nodes.get(&hash).is_some() { + if self.seen_nodes.insert(hash) { let node = node_owned.to_encoded::>(); encoded_size_update += node.encoded_size(); - self.seen_nodes.insert(hash); }, TrieAccess::EncodedNode { hash, encoded_node } => { - if !self.seen_nodes.get(&hash).is_some() { - let node = encoded_node.into_owned(); - - encoded_size_update += node.encoded_size(); - self.seen_nodes.insert(hash); + if self.seen_nodes.insert(hash) { + encoded_size_update += encoded_node.encoded_size(); } }, TrieAccess::Value { hash, value, full_key } => { - if !self.seen_nodes.get(&hash).is_some() { - let value = value.into_owned(); + if self.seen_nodes.insert(hash) { encoded_size_update += value.encoded_size(); - self.seen_nodes.insert(hash); } self.recorded_keys .entry(full_key.into()) @@ -153,7 +143,7 @@ mod tests { use trie_db::{Trie, TrieDBBuilder, TrieDBMutBuilder, TrieHash, TrieMut, TrieRecorder}; use trie_standardmap::{Alphabet, StandardMap, ValueMode}; - use crate::validate_block::trie_recorder::SizeOnlyRecorderProvider; + use super::*; type Recorder = sp_trie::recorder::Recorder; From 7bda9cc788b1da448c0fa4caecc0737bb277a49c Mon Sep 17 00:00:00 2001 From: Sebastian Kunert Date: Fri, 29 Sep 2023 17:36:52 +0200 Subject: [PATCH 43/61] Use environmental crate instead of `static mut` --- .../src/validate_block/implementation.rs | 30 +++++-------------- 1 file changed, 8 insertions(+), 22 deletions(-) diff --git a/cumulus/pallets/parachain-system/src/validate_block/implementation.rs b/cumulus/pallets/parachain-system/src/validate_block/implementation.rs index 6ca73de9a04e..62ac42d677ce 100644 --- a/cumulus/pallets/parachain-system/src/validate_block/implementation.rs +++ b/cumulus/pallets/parachain-system/src/validate_block/implementation.rs @@ -51,8 +51,7 @@ fn with_externalities R, R>(f: F) -> R { } /// Recorder instance to be used during this validate_block call. -/// Static mut is fine here because we are single-threaded in WASM. -static mut RECORDER: Option> = None; +environmental::environmental!(recorder: trait ProofSizeProvider); /// Validate the given parachain block. /// @@ -126,7 +125,7 @@ where sp_std::mem::drop(storage_proof); - let recorder = SizeOnlyRecorderProvider::new(); + let mut recorder = SizeOnlyRecorderProvider::new(); let cache_provider = trie_cache::CacheProvider::new(); // We use the storage root of the `parent_head` to ensure that it is the correct root. // This is already being done above while creating the in-memory db, but let's be paranoid!! @@ -138,8 +137,6 @@ where .with_recorder(recorder.clone()) .build(); - set_recorder(recorder); - let _guard = ( // Replace storage calls with our own implementations sp_io::storage::host_read.replace_implementation(host_storage_read), @@ -181,7 +178,7 @@ where .replace_implementation(host_current_storage_proof_size), ); - run_with_externalities::(&backend, || { + run_with_externalities_and_recorder::(&backend, &mut recorder, || { let relay_chain_proof = crate::RelayChainStateProof::new( PSC::SelfParaId::get(), inherent_data.validation_data.relay_parent_storage_root, @@ -202,7 +199,7 @@ where } }); - run_with_externalities::(&backend, || { + run_with_externalities_and_recorder::(&backend, &mut recorder, || { let head_data = HeadData(block.header().encode()); E::execute_block(block); @@ -278,14 +275,15 @@ fn validate_validation_data( } /// Run the given closure with the externalities set. -fn run_with_externalities R>( +fn run_with_externalities_and_recorder R>( backend: &TrieBackend, + recorder: &mut SizeOnlyRecorderProvider>, execute: F, ) -> R { let mut overlay = sp_state_machine::OverlayedChanges::default(); let mut ext = Ext::::new(&mut overlay, backend); - set_and_run_with_externalities(&mut ext, || execute()) + recorder::using(recorder, || set_and_run_with_externalities(&mut ext, || execute())) } fn host_storage_read(key: &[u8], value_out: &mut [u8], value_offset: u32) -> Option { @@ -318,8 +316,7 @@ fn host_storage_clear(key: &[u8]) { } fn host_current_storage_proof_size() -> u32 { - get_recorder_ref() - .map(|r| r.estimate_encoded_size()) + recorder::with(|rec| rec.estimate_encoded_size()) .unwrap_or_default() .try_into() .unwrap_or_default() @@ -426,14 +423,3 @@ fn host_default_child_storage_next_key(storage_key: &[u8], key: &[u8]) -> Option fn host_offchain_index_set(_key: &[u8], _value: &[u8]) {} fn host_offchain_index_clear(_key: &[u8]) {} - -fn set_recorder(recorder: SizeOnlyRecorderProvider) { - // This is safe here, there is strictly sequential access. - unsafe { - RECORDER = Some(Box::new(recorder)); - } -} - -fn get_recorder_ref() -> Option<&'static Box> { - unsafe { RECORDER.as_ref() } -} From fa22eb463a218ad9202e070e9f33e12b871901c5 Mon Sep 17 00:00:00 2001 From: Sebastian Kunert Date: Fri, 29 Sep 2023 17:48:47 +0200 Subject: [PATCH 44/61] Review comments --- cumulus/client/service/Cargo.toml | 1 + cumulus/client/service/src/lib.rs | 2 ++ cumulus/parachain-template/node/Cargo.toml | 1 - cumulus/parachain-template/node/src/command.rs | 3 ++- cumulus/parachain-template/node/src/service.rs | 2 +- cumulus/polkadot-parachain/Cargo.toml | 1 - cumulus/polkadot-parachain/src/command.rs | 2 +- cumulus/polkadot-parachain/src/service.rs | 4 ++-- cumulus/primitives/pov-reclaim/Cargo.toml | 1 + cumulus/test/service/Cargo.toml | 1 - cumulus/test/service/src/lib.rs | 3 +-- substrate/primitives/state-machine/src/trie_backend.rs | 4 ---- 12 files changed, 11 insertions(+), 14 deletions(-) diff --git a/cumulus/client/service/Cargo.toml b/cumulus/client/service/Cargo.toml index b53bdbdfc815..84ea8bf931e8 100644 --- a/cumulus/client/service/Cargo.toml +++ b/cumulus/client/service/Cargo.toml @@ -36,6 +36,7 @@ cumulus-client-consensus-common = { path = "../consensus/common" } cumulus-client-pov-recovery = { path = "../pov-recovery" } cumulus-client-network = { path = "../network" } cumulus-primitives-core = { path = "../../primitives/core" } +cumulus-primitives-pov-reclaim = { path = "../../primitives/pov-reclaim" } cumulus-relay-chain-interface = { path = "../relay-chain-interface" } cumulus-relay-chain-inprocess-interface = { path = "../relay-chain-inprocess-interface" } cumulus-relay-chain-minimal-node = { path = "../relay-chain-minimal-node" } diff --git a/cumulus/client/service/src/lib.rs b/cumulus/client/service/src/lib.rs index 82890666ebae..45c6d8ede6f7 100644 --- a/cumulus/client/service/src/lib.rs +++ b/cumulus/client/service/src/lib.rs @@ -52,6 +52,8 @@ use sp_core::{traits::SpawnNamed, Decode}; use sp_runtime::traits::{Block as BlockT, BlockIdTo}; use std::{sync::Arc, time::Duration}; +pub use cumulus_primitives_pov_reclaim::pov_reclaim_host_functions; + // Given the sporadic nature of the explicit recovery operation and the // possibility to retry infinite times this value is more than enough. // In practice here we expect no more than one queued messages. diff --git a/cumulus/parachain-template/node/Cargo.toml b/cumulus/parachain-template/node/Cargo.toml index 016cb8189636..223a78dacc49 100644 --- a/cumulus/parachain-template/node/Cargo.toml +++ b/cumulus/parachain-template/node/Cargo.toml @@ -62,7 +62,6 @@ xcm = { package = "staging-xcm", path = "../../../polkadot/xcm", default-feature cumulus-client-cli = { path = "../../client/cli" } cumulus-client-collator = { path = "../../client/collator" } cumulus-client-consensus-aura = { path = "../../client/consensus/aura" } -cumulus-primitives-pov-reclaim = { path = "../../primitives/pov-reclaim" } cumulus-client-consensus-common = { path = "../../client/consensus/common" } cumulus-client-consensus-proposer = { path = "../../client/consensus/proposer" } cumulus-client-service = { path = "../../client/service" } diff --git a/cumulus/parachain-template/node/src/command.rs b/cumulus/parachain-template/node/src/command.rs index 7acfb0c2035e..62a09257131a 100644 --- a/cumulus/parachain-template/node/src/command.rs +++ b/cumulus/parachain-template/node/src/command.rs @@ -1,5 +1,6 @@ use std::net::SocketAddr; +use cumulus_client_service::pov_reclaim_host_functions::HostFunctions as ReclaimHostFunctions; use cumulus_primitives_core::ParaId; use frame_benchmarking_cli::{BenchmarkCmd, SUBSTRATE_REFERENCE_HARDWARE}; use log::info; @@ -183,7 +184,7 @@ pub fn run() -> Result<()> { match cmd { BenchmarkCmd::Pallet(cmd) => if cfg!(feature = "runtime-benchmarks") { - runner.sync_run(|config| cmd.run::(config)) + runner.sync_run(|config| cmd.run::(config)) } else { Err("Benchmarking wasn't enabled when building the node. \ You can enable it with `--features runtime-benchmarks`." diff --git a/cumulus/parachain-template/node/src/service.rs b/cumulus/parachain-template/node/src/service.rs index c5c388d990d9..cd06e996c0d3 100644 --- a/cumulus/parachain-template/node/src/service.rs +++ b/cumulus/parachain-template/node/src/service.rs @@ -41,7 +41,7 @@ pub struct ParachainNativeExecutor; impl sc_executor::NativeExecutionDispatch for ParachainNativeExecutor { type ExtendHostFunctions = ( - cumulus_primitives_pov_reclaim::pov_reclaim_host_functions::HostFunctions, + cumulus_client_service::pov_reclaim_host_functions::HostFunctions, frame_benchmarking::benchmarking::HostFunctions, ); diff --git a/cumulus/polkadot-parachain/Cargo.toml b/cumulus/polkadot-parachain/Cargo.toml index 1d9b3f043600..ac8ad53b5243 100644 --- a/cumulus/polkadot-parachain/Cargo.toml +++ b/cumulus/polkadot-parachain/Cargo.toml @@ -89,7 +89,6 @@ cumulus-client-consensus-proposer = { path = "../client/consensus/proposer" } cumulus-client-service = { path = "../client/service" } cumulus-primitives-core = { path = "../primitives/core" } cumulus-primitives-parachain-inherent = { path = "../primitives/parachain-inherent" } -cumulus-primitives-pov-reclaim = { path = "../primitives/pov-reclaim" } cumulus-relay-chain-interface = { path = "../client/relay-chain-interface" } color-print = "0.3.4" diff --git a/cumulus/polkadot-parachain/src/command.rs b/cumulus/polkadot-parachain/src/command.rs index 710bf2b4968d..431bc3c8f8f0 100644 --- a/cumulus/polkadot-parachain/src/command.rs +++ b/cumulus/polkadot-parachain/src/command.rs @@ -19,8 +19,8 @@ use crate::{ cli::{Cli, RelayChainCli, Subcommand}, service::{new_partial, Block}, }; +use cumulus_client_service::pov_reclaim_host_functions::HostFunctions as ReclaimHostFunctions; use cumulus_primitives_core::ParaId; -use cumulus_primitives_pov_reclaim::pov_reclaim_host_functions::HostFunctions as ReclaimHostFunctions; use frame_benchmarking_cli::{BenchmarkCmd, SUBSTRATE_REFERENCE_HARDWARE}; use log::info; use parachains_common::{AssetHubPolkadotAuraId, AuraId}; diff --git a/cumulus/polkadot-parachain/src/service.rs b/cumulus/polkadot-parachain/src/service.rs index e941c45b4d95..18f7b9462a60 100644 --- a/cumulus/polkadot-parachain/src/service.rs +++ b/cumulus/polkadot-parachain/src/service.rs @@ -65,13 +65,13 @@ use polkadot_primitives::CollatorPair; #[cfg(not(feature = "runtime-benchmarks"))] type HostFunctions = ( sp_io::SubstrateHostFunctions, - cumulus_primitives_pov_reclaim::pov_reclaim_host_functions::HostFunctions, + cumulus_client_service::pov_reclaim_host_functions::HostFunctions, ); #[cfg(feature = "runtime-benchmarks")] type HostFunctions = ( sp_io::SubstrateHostFunctions, - cumulus_primitives_pov_reclaim::pov_reclaim_host_functions::HostFunctions, + cumulus_client_service::pov_reclaim_host_functions::HostFunctions, frame_benchmarking::benchmarking::HostFunctions, ); diff --git a/cumulus/primitives/pov-reclaim/Cargo.toml b/cumulus/primitives/pov-reclaim/Cargo.toml index e640362d8763..2e71919e07c1 100644 --- a/cumulus/primitives/pov-reclaim/Cargo.toml +++ b/cumulus/primitives/pov-reclaim/Cargo.toml @@ -3,6 +3,7 @@ name = "cumulus-primitives-pov-reclaim" version = "0.1.0" authors.workspace = true edition.workspace = true +license = "Apache-2.0" [dependencies] sp-runtime-interface = { path = "../../../substrate/primitives/runtime-interface", default-features = false } diff --git a/cumulus/test/service/Cargo.toml b/cumulus/test/service/Cargo.toml index 839cf2448b07..5285376f3d59 100644 --- a/cumulus/test/service/Cargo.toml +++ b/cumulus/test/service/Cargo.toml @@ -66,7 +66,6 @@ polkadot-overseer = { path = "../../../polkadot/node/overseer" } cumulus-client-cli = { path = "../../client/cli" } parachains-common = { path = "../../parachains/common" } cumulus-client-consensus-common = { path = "../../client/consensus/common" } -cumulus-primitives-pov-reclaim = { path = "../../primitives/pov-reclaim" } cumulus-client-consensus-relay-chain = { path = "../../client/consensus/relay-chain" } cumulus-client-service = { path = "../../client/service" } cumulus-primitives-core = { path = "../../primitives/core" } diff --git a/cumulus/test/service/src/lib.rs b/cumulus/test/service/src/lib.rs index 0e0af3255775..2ca7376ff5d1 100644 --- a/cumulus/test/service/src/lib.rs +++ b/cumulus/test/service/src/lib.rs @@ -113,8 +113,7 @@ pub type AnnounceBlockFn = Arc>) + Send + Sync>; pub struct RuntimeExecutor; impl sc_executor::NativeExecutionDispatch for RuntimeExecutor { - type ExtendHostFunctions = - cumulus_primitives_pov_reclaim::pov_reclaim_host_functions::HostFunctions; + type ExtendHostFunctions = cumulus_client_service::pov_reclaim_host_functions::HostFunctions; fn dispatch(method: &str, data: &[u8]) -> Option> { cumulus_test_runtime::api::dispatch(method, data) diff --git a/substrate/primitives/state-machine/src/trie_backend.rs b/substrate/primitives/state-machine/src/trie_backend.rs index e6cc33ce735a..fd4284f5a858 100644 --- a/substrate/primitives/state-machine/src/trie_backend.rs +++ b/substrate/primitives/state-machine/src/trie_backend.rs @@ -109,8 +109,6 @@ pub struct UnimplementedCacheProvider { // Not strictly necessary, but the H bound allows to use this as a drop-in // replacement for the `LocalTrieCache` in no-std contexts. _phantom: core::marker::PhantomData, - // Statically prevents construction. - _void: sp_core::Void, } #[cfg(not(feature = "std"))] @@ -160,8 +158,6 @@ pub struct UnimplementedRecorderProvider { // Not strictly necessary, but the H bound allows to use this as a drop-in // replacement for the [`sp_trie::recorder::Recorder`] in no-std contexts. _phantom: core::marker::PhantomData, - // Statically prevents construction. - _void: sp_core::Void, } #[cfg(not(feature = "std"))] From 7ef514e5326c47eb4f8ff36352f177591a0739c1 Mon Sep 17 00:00:00 2001 From: command-bot <> Date: Tue, 17 Oct 2023 07:44:21 +0000 Subject: [PATCH 45/61] ".git/.scripts/commands/fmt/fmt.sh" --- .../parachain-system/src/validate_block/trie_recorder.rs | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/cumulus/pallets/parachain-system/src/validate_block/trie_recorder.rs b/cumulus/pallets/parachain-system/src/validate_block/trie_recorder.rs index 7f6628fa0bc1..e73aef70aa49 100644 --- a/cumulus/pallets/parachain-system/src/validate_block/trie_recorder.rs +++ b/cumulus/pallets/parachain-system/src/validate_block/trie_recorder.rs @@ -49,11 +49,10 @@ impl<'a, H: trie_db::Hasher> trie_db::TrieRecorder for SizeOnlyRecorder< let node = node_owned.to_encoded::>(); encoded_size_update += node.encoded_size(); }, - TrieAccess::EncodedNode { hash, encoded_node } => { + TrieAccess::EncodedNode { hash, encoded_node } => if self.seen_nodes.insert(hash) { encoded_size_update += encoded_node.encoded_size(); - } - }, + }, TrieAccess::Value { hash, value, full_key } => { if self.seen_nodes.insert(hash) { encoded_size_update += value.encoded_size(); From bb0489eedb592eb4b0a2e4f2532b2df44fcb4cf6 Mon Sep 17 00:00:00 2001 From: Sebastian Kunert Date: Fri, 27 Oct 2023 14:04:58 +0200 Subject: [PATCH 46/61] Move to substrate --- Cargo.lock | 30 +++++++++---------- cumulus/client/service/Cargo.toml | 2 +- cumulus/client/service/src/lib.rs | 2 +- cumulus/pallets/parachain-system/Cargo.toml | 4 +-- .../src/validate_block/implementation.rs | 6 ++-- .../parachain-template/node/src/command.rs | 2 +- .../parachain-template/node/src/service.rs | 2 +- cumulus/polkadot-parachain/src/command.rs | 2 +- cumulus/polkadot-parachain/src/service.rs | 4 +-- cumulus/primitives/pov-reclaim/Cargo.toml | 20 ------------- cumulus/test/client/Cargo.toml | 2 +- cumulus/test/client/src/lib.rs | 4 +-- cumulus/test/service/src/lib.rs | 2 +- .../primitives/proof-size-provider/Cargo.toml | 20 +++++++++++++ .../proof-size-provider}/src/lib.rs | 20 ++++++------- .../trie/src/proof_size_extension.rs | 2 +- 16 files changed, 62 insertions(+), 62 deletions(-) delete mode 100644 cumulus/primitives/pov-reclaim/Cargo.toml create mode 100644 substrate/primitives/proof-size-provider/Cargo.toml rename {cumulus/primitives/pov-reclaim => substrate/primitives/proof-size-provider}/src/lib.rs (81%) diff --git a/Cargo.lock b/Cargo.lock index b81dceef3a94..6dd881315def 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3413,7 +3413,6 @@ dependencies = [ "cumulus-client-network", "cumulus-client-pov-recovery", "cumulus-primitives-core", - "cumulus-primitives-pov-reclaim", "cumulus-relay-chain-inprocess-interface", "cumulus-relay-chain-interface", "cumulus-relay-chain-minimal-node", @@ -3434,6 +3433,7 @@ dependencies = [ "sp-blockchain", "sp-consensus", "sp-core", + "sp-proof-size-provider", "sp-runtime", "sp-transaction-pool", ] @@ -3482,7 +3482,6 @@ dependencies = [ "cumulus-pallet-parachain-system-proc-macro", "cumulus-primitives-core", "cumulus-primitives-parachain-inherent", - "cumulus-primitives-pov-reclaim", "cumulus-test-client", "cumulus-test-relay-sproof-builder", "environmental", @@ -3502,6 +3501,7 @@ dependencies = [ "sp-inherents", "sp-io", "sp-keyring", + "sp-proof-size-provider", "sp-runtime", "sp-state-machine", "sp-std", @@ -3656,18 +3656,6 @@ dependencies = [ "tracing", ] -[[package]] -name = "cumulus-primitives-pov-reclaim" -version = "0.1.0" -dependencies = [ - "sp-core", - "sp-externalities", - "sp-io", - "sp-runtime-interface", - "sp-state-machine", - "sp-trie", -] - [[package]] name = "cumulus-primitives-timestamp" version = "0.1.0" @@ -3819,7 +3807,6 @@ version = "0.1.0" dependencies = [ "cumulus-primitives-core", "cumulus-primitives-parachain-inherent", - "cumulus-primitives-pov-reclaim", "cumulus-test-relay-sproof-builder", "cumulus-test-runtime", "cumulus-test-service", @@ -3840,6 +3827,7 @@ dependencies = [ "sp-inherents", "sp-io", "sp-keyring", + "sp-proof-size-provider", "sp-runtime", "sp-timestamp", "substrate-test-client", @@ -17124,6 +17112,18 @@ dependencies = [ "regex", ] +[[package]] +name = "sp-proof-size-provider" +version = "0.1.0" +dependencies = [ + "sp-core", + "sp-externalities", + "sp-io", + "sp-runtime-interface", + "sp-state-machine", + "sp-trie", +] + [[package]] name = "sp-rpc" version = "6.0.0" diff --git a/cumulus/client/service/Cargo.toml b/cumulus/client/service/Cargo.toml index a9acd0acd385..207b7d44f567 100644 --- a/cumulus/client/service/Cargo.toml +++ b/cumulus/client/service/Cargo.toml @@ -36,7 +36,7 @@ cumulus-client-consensus-common = { path = "../consensus/common" } cumulus-client-pov-recovery = { path = "../pov-recovery" } cumulus-client-network = { path = "../network" } cumulus-primitives-core = { path = "../../primitives/core" } -cumulus-primitives-pov-reclaim = { path = "../../primitives/pov-reclaim" } +sp-proof-size-provider = { path = "../../../substrate/primitives/proof-size-provider" } cumulus-relay-chain-interface = { path = "../relay-chain-interface" } cumulus-relay-chain-inprocess-interface = { path = "../relay-chain-inprocess-interface" } cumulus-relay-chain-minimal-node = { path = "../relay-chain-minimal-node" } diff --git a/cumulus/client/service/src/lib.rs b/cumulus/client/service/src/lib.rs index 45c6d8ede6f7..ad851e05ead0 100644 --- a/cumulus/client/service/src/lib.rs +++ b/cumulus/client/service/src/lib.rs @@ -52,7 +52,7 @@ use sp_core::{traits::SpawnNamed, Decode}; use sp_runtime::traits::{Block as BlockT, BlockIdTo}; use std::{sync::Arc, time::Duration}; -pub use cumulus_primitives_pov_reclaim::pov_reclaim_host_functions; +pub use sp_proof_size_provider::storage_proof_size; // Given the sporadic nature of the explicit recovery operation and the // possibility to retry infinite times this value is more than enough. diff --git a/cumulus/pallets/parachain-system/Cargo.toml b/cumulus/pallets/parachain-system/Cargo.toml index 89afc7369290..bd4833a0b38d 100644 --- a/cumulus/pallets/parachain-system/Cargo.toml +++ b/cumulus/pallets/parachain-system/Cargo.toml @@ -35,7 +35,7 @@ xcm = { package = "staging-xcm", path = "../../../polkadot/xcm", default-feature cumulus-pallet-parachain-system-proc-macro = { path = "proc-macro", default-features = false } cumulus-primitives-core = { path = "../../primitives/core", default-features = false } cumulus-primitives-parachain-inherent = { path = "../../primitives/parachain-inherent", default-features = false } -cumulus-primitives-pov-reclaim = { path = "../../primitives/pov-reclaim", default-features = false } +sp-proof-size-provider = { path = "../../../substrate/primitives/proof-size-provider", default-features = false } [dev-dependencies] assert_matches = "1.5" @@ -61,7 +61,7 @@ std = [ "cumulus-pallet-parachain-system-proc-macro/std", "cumulus-primitives-core/std", "cumulus-primitives-parachain-inherent/std", - "cumulus-primitives-pov-reclaim/std", + "sp-proof-size-provider/std", "environmental/std", "frame-support/std", "frame-system/std", diff --git a/cumulus/pallets/parachain-system/src/validate_block/implementation.rs b/cumulus/pallets/parachain-system/src/validate_block/implementation.rs index 62ac42d677ce..7a5bf370e6f4 100644 --- a/cumulus/pallets/parachain-system/src/validate_block/implementation.rs +++ b/cumulus/pallets/parachain-system/src/validate_block/implementation.rs @@ -174,8 +174,8 @@ where .replace_implementation(host_default_child_storage_next_key), sp_io::offchain_index::host_set.replace_implementation(host_offchain_index_set), sp_io::offchain_index::host_clear.replace_implementation(host_offchain_index_clear), - cumulus_primitives_pov_reclaim::pov_reclaim_host_functions::host_current_storage_proof_size - .replace_implementation(host_current_storage_proof_size), + sp_proof_size_provider::storage_proof_size::host_storage_proof_size + .replace_implementation(host_storage_proof_size), ); run_with_externalities_and_recorder::(&backend, &mut recorder, || { @@ -315,7 +315,7 @@ fn host_storage_clear(key: &[u8]) { with_externalities(|ext| ext.place_storage(key.to_vec(), None)) } -fn host_current_storage_proof_size() -> u32 { +fn host_storage_proof_size() -> u32 { recorder::with(|rec| rec.estimate_encoded_size()) .unwrap_or_default() .try_into() diff --git a/cumulus/parachain-template/node/src/command.rs b/cumulus/parachain-template/node/src/command.rs index a7a3a74e6cab..489ed0634df3 100644 --- a/cumulus/parachain-template/node/src/command.rs +++ b/cumulus/parachain-template/node/src/command.rs @@ -1,6 +1,6 @@ use std::net::SocketAddr; -use cumulus_client_service::pov_reclaim_host_functions::HostFunctions as ReclaimHostFunctions; +use cumulus_client_service::storage_proof_size::HostFunctions as ReclaimHostFunctions; use cumulus_primitives_core::ParaId; use frame_benchmarking_cli::{BenchmarkCmd, SUBSTRATE_REFERENCE_HARDWARE}; use log::info; diff --git a/cumulus/parachain-template/node/src/service.rs b/cumulus/parachain-template/node/src/service.rs index cd06e996c0d3..659d5d8d64fd 100644 --- a/cumulus/parachain-template/node/src/service.rs +++ b/cumulus/parachain-template/node/src/service.rs @@ -41,7 +41,7 @@ pub struct ParachainNativeExecutor; impl sc_executor::NativeExecutionDispatch for ParachainNativeExecutor { type ExtendHostFunctions = ( - cumulus_client_service::pov_reclaim_host_functions::HostFunctions, + cumulus_client_service::storage_proof_size::HostFunctions, frame_benchmarking::benchmarking::HostFunctions, ); diff --git a/cumulus/polkadot-parachain/src/command.rs b/cumulus/polkadot-parachain/src/command.rs index 0727488ccdac..c240cd60458a 100644 --- a/cumulus/polkadot-parachain/src/command.rs +++ b/cumulus/polkadot-parachain/src/command.rs @@ -19,7 +19,7 @@ use crate::{ cli::{Cli, RelayChainCli, Subcommand}, service::{new_partial, Block}, }; -use cumulus_client_service::pov_reclaim_host_functions::HostFunctions as ReclaimHostFunctions; +use cumulus_client_service::storage_proof_size::HostFunctions as ReclaimHostFunctions; use cumulus_primitives_core::ParaId; use frame_benchmarking_cli::{BenchmarkCmd, SUBSTRATE_REFERENCE_HARDWARE}; use log::info; diff --git a/cumulus/polkadot-parachain/src/service.rs b/cumulus/polkadot-parachain/src/service.rs index 713716b158ac..37e5235acbcd 100644 --- a/cumulus/polkadot-parachain/src/service.rs +++ b/cumulus/polkadot-parachain/src/service.rs @@ -70,13 +70,13 @@ use polkadot_primitives::CollatorPair; #[cfg(not(feature = "runtime-benchmarks"))] type HostFunctions = ( sp_io::SubstrateHostFunctions, - cumulus_client_service::pov_reclaim_host_functions::HostFunctions, + cumulus_client_service::storage_proof_size::HostFunctions, ); #[cfg(feature = "runtime-benchmarks")] type HostFunctions = ( sp_io::SubstrateHostFunctions, - cumulus_client_service::pov_reclaim_host_functions::HostFunctions, + cumulus_client_service::storage_proof_size::HostFunctions, frame_benchmarking::benchmarking::HostFunctions, ); diff --git a/cumulus/primitives/pov-reclaim/Cargo.toml b/cumulus/primitives/pov-reclaim/Cargo.toml deleted file mode 100644 index 2e71919e07c1..000000000000 --- a/cumulus/primitives/pov-reclaim/Cargo.toml +++ /dev/null @@ -1,20 +0,0 @@ -[package] -name = "cumulus-primitives-pov-reclaim" -version = "0.1.0" -authors.workspace = true -edition.workspace = true -license = "Apache-2.0" - -[dependencies] -sp-runtime-interface = { path = "../../../substrate/primitives/runtime-interface", default-features = false } -sp-externalities = { path = "../../../substrate/primitives/externalities", default-features = false } -sp-trie = { path = "../../../substrate/primitives/trie", default-features = false } - -[dev-dependencies] -sp-state-machine = { path = "../../../substrate/primitives/state-machine" } -sp-core = { path = "../../../substrate/primitives/core" } -sp-io = { path = "../../../substrate/primitives/io" } - -[features] -default = [ "std" ] -std = [ "sp-externalities/std", "sp-runtime-interface/std", "sp-trie/std" ] diff --git a/cumulus/test/client/Cargo.toml b/cumulus/test/client/Cargo.toml index 8791486e5202..063da06eaccb 100644 --- a/cumulus/test/client/Cargo.toml +++ b/cumulus/test/client/Cargo.toml @@ -36,5 +36,5 @@ cumulus-test-runtime = { path = "../runtime" } cumulus-test-service = { path = "../service" } cumulus-test-relay-sproof-builder = { path = "../relay-sproof-builder" } cumulus-primitives-core = { path = "../../primitives/core" } -cumulus-primitives-pov-reclaim = { path = "../../primitives/pov-reclaim" } +sp-proof-size-provider = { path = "../../../substrate/primitives/proof-size-provider" } cumulus-primitives-parachain-inherent = { path = "../../primitives/parachain-inherent" } diff --git a/cumulus/test/client/src/lib.rs b/cumulus/test/client/src/lib.rs index 19e6ee970bfc..f4436e4933b7 100644 --- a/cumulus/test/client/src/lib.rs +++ b/cumulus/test/client/src/lib.rs @@ -45,7 +45,7 @@ mod local_executor { impl sc_executor::NativeExecutionDispatch for LocalExecutor { type ExtendHostFunctions = - cumulus_primitives_pov_reclaim::pov_reclaim_host_functions::HostFunctions; + sp_proof_size_provider::storage_proof_size::HostFunctions; fn dispatch(method: &str, data: &[u8]) -> Option> { cumulus_test_runtime::api::dispatch(method, data) @@ -213,7 +213,7 @@ pub fn validate_block( let heap_pages = HeapAllocStrategy::Static { extra_pages: 1024 }; let executor = WasmExecutor::<( sp_io::SubstrateHostFunctions, - cumulus_primitives_pov_reclaim::pov_reclaim_host_functions::HostFunctions, + sp_proof_size_provider::storage_proof_size::HostFunctions, )>::builder() .with_execution_method(WasmExecutionMethod::default()) .with_max_runtime_instances(1) diff --git a/cumulus/test/service/src/lib.rs b/cumulus/test/service/src/lib.rs index 2ca7376ff5d1..51eeea70e43c 100644 --- a/cumulus/test/service/src/lib.rs +++ b/cumulus/test/service/src/lib.rs @@ -113,7 +113,7 @@ pub type AnnounceBlockFn = Arc>) + Send + Sync>; pub struct RuntimeExecutor; impl sc_executor::NativeExecutionDispatch for RuntimeExecutor { - type ExtendHostFunctions = cumulus_client_service::pov_reclaim_host_functions::HostFunctions; + type ExtendHostFunctions = cumulus_client_service::storage_proof_size::HostFunctions; fn dispatch(method: &str, data: &[u8]) -> Option> { cumulus_test_runtime::api::dispatch(method, data) diff --git a/substrate/primitives/proof-size-provider/Cargo.toml b/substrate/primitives/proof-size-provider/Cargo.toml new file mode 100644 index 000000000000..0fc1241c7b00 --- /dev/null +++ b/substrate/primitives/proof-size-provider/Cargo.toml @@ -0,0 +1,20 @@ +[package] +name = "sp-proof-size-provider" +version = "0.1.0" +authors.workspace = true +edition.workspace = true +license = "Apache-2.0" + +[dependencies] +sp-runtime-interface = { path = "../runtime-interface", default-features = false } +sp-externalities = { path = "../externalities", default-features = false } +sp-trie = { path = "../trie", default-features = false } + +[dev-dependencies] +sp-state-machine = { path = "../state-machine" } +sp-core = { path = "../core" } +sp-io = { path = "../io" } + +[features] +default = [ "std" ] +std = [ "sp-externalities/std", "sp-runtime-interface/std", "sp-trie/std" ] diff --git a/cumulus/primitives/pov-reclaim/src/lib.rs b/substrate/primitives/proof-size-provider/src/lib.rs similarity index 81% rename from cumulus/primitives/pov-reclaim/src/lib.rs rename to substrate/primitives/proof-size-provider/src/lib.rs index a598e66a8818..5b02737a45cd 100644 --- a/cumulus/primitives/pov-reclaim/src/lib.rs +++ b/substrate/primitives/proof-size-provider/src/lib.rs @@ -27,11 +27,11 @@ use sp_trie::proof_size_extension::ProofSizeExt; /// Interface that provides access to the current storage proof size. #[runtime_interface] -pub trait PovReclaimHostFunctions { +pub trait StorageProofSize { /// Returns the current storage proof size. - fn current_storage_proof_size(&mut self) -> u32 { + fn storage_proof_size(&mut self) -> u32 { match self.extension::() { - Some(ext) => ext.current_storage_proof_size(), + Some(ext) => ext.storage_proof_size(), None => 0, } } @@ -46,7 +46,7 @@ mod tests { TrieDBMutBuilder, TrieMut, }; - use crate::pov_reclaim_host_functions; + use crate::storage_proof_size; const TEST_DATA: &[(&[u8], &[u8])] = &[(b"key1", &[1; 64]), (b"key2", &[2; 64])]; @@ -80,11 +80,11 @@ mod tests { ext.register_extension(ProofSizeExt::new(recorder)); ext.execute_with(|| { - assert_eq!(pov_reclaim_host_functions::current_storage_proof_size(), 0); + assert_eq!(storage_proof_size::storage_proof_size(), 0); sp_io::storage::get(b"key1"); - assert_eq!(pov_reclaim_host_functions::current_storage_proof_size(), 175); + assert_eq!(storage_proof_size::storage_proof_size(), 175); sp_io::storage::get(b"key2"); - assert_eq!(pov_reclaim_host_functions::current_storage_proof_size(), 275); + assert_eq!(storage_proof_size::storage_proof_size(), 275); }); } @@ -93,11 +93,11 @@ mod tests { let (mut ext, _) = get_prepared_test_externalities(); ext.execute_with(|| { - assert_eq!(pov_reclaim_host_functions::current_storage_proof_size(), 0); + assert_eq!(storage_proof_size::storage_proof_size(), 0); sp_io::storage::get(b"key1"); - assert_eq!(pov_reclaim_host_functions::current_storage_proof_size(), 0); + assert_eq!(storage_proof_size::storage_proof_size(), 0); sp_io::storage::get(b"key2"); - assert_eq!(pov_reclaim_host_functions::current_storage_proof_size(), 0); + assert_eq!(storage_proof_size::storage_proof_size(), 0); }); } } diff --git a/substrate/primitives/trie/src/proof_size_extension.rs b/substrate/primitives/trie/src/proof_size_extension.rs index 05dca2132e04..dfb1ac4d60f1 100644 --- a/substrate/primitives/trie/src/proof_size_extension.rs +++ b/substrate/primitives/trie/src/proof_size_extension.rs @@ -31,7 +31,7 @@ impl ProofSizeExt { ProofSizeExt(Box::new(recorder)) } - pub fn current_storage_proof_size(&self) -> u32 { + pub fn storage_proof_size(&self) -> u32 { self.0.estimate_encoded_size() as u32 } } From 205c34d4fe4761b3e71823ddcb0cd495802a463e Mon Sep 17 00:00:00 2001 From: Sebastian Kunert Date: Fri, 27 Oct 2023 15:02:58 +0200 Subject: [PATCH 47/61] u32->u64 --- .../parachain-system/src/validate_block/implementation.rs | 2 +- substrate/primitives/proof-size-provider/src/lib.rs | 2 +- substrate/primitives/trie/src/proof_size_extension.rs | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/cumulus/pallets/parachain-system/src/validate_block/implementation.rs b/cumulus/pallets/parachain-system/src/validate_block/implementation.rs index 7a5bf370e6f4..151be27727eb 100644 --- a/cumulus/pallets/parachain-system/src/validate_block/implementation.rs +++ b/cumulus/pallets/parachain-system/src/validate_block/implementation.rs @@ -315,7 +315,7 @@ fn host_storage_clear(key: &[u8]) { with_externalities(|ext| ext.place_storage(key.to_vec(), None)) } -fn host_storage_proof_size() -> u32 { +fn host_storage_proof_size() -> u64 { recorder::with(|rec| rec.estimate_encoded_size()) .unwrap_or_default() .try_into() diff --git a/substrate/primitives/proof-size-provider/src/lib.rs b/substrate/primitives/proof-size-provider/src/lib.rs index 5b02737a45cd..29f263408c22 100644 --- a/substrate/primitives/proof-size-provider/src/lib.rs +++ b/substrate/primitives/proof-size-provider/src/lib.rs @@ -29,7 +29,7 @@ use sp_trie::proof_size_extension::ProofSizeExt; #[runtime_interface] pub trait StorageProofSize { /// Returns the current storage proof size. - fn storage_proof_size(&mut self) -> u32 { + fn storage_proof_size(&mut self) -> u64 { match self.extension::() { Some(ext) => ext.storage_proof_size(), None => 0, diff --git a/substrate/primitives/trie/src/proof_size_extension.rs b/substrate/primitives/trie/src/proof_size_extension.rs index dfb1ac4d60f1..02477ecef8d9 100644 --- a/substrate/primitives/trie/src/proof_size_extension.rs +++ b/substrate/primitives/trie/src/proof_size_extension.rs @@ -31,7 +31,7 @@ impl ProofSizeExt { ProofSizeExt(Box::new(recorder)) } - pub fn storage_proof_size(&self) -> u32 { - self.0.estimate_encoded_size() as u32 + pub fn storage_proof_size(&self) -> u64 { + self.0.estimate_encoded_size().try_into().unwrap_or_default() } } From ae410c7f3084e5c78a39f7cdac7cddc6880161d9 Mon Sep 17 00:00:00 2001 From: Sebastian Kunert Date: Fri, 27 Oct 2023 15:21:22 +0200 Subject: [PATCH 48/61] fmt --- cumulus/pallets/parachain-system/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cumulus/pallets/parachain-system/Cargo.toml b/cumulus/pallets/parachain-system/Cargo.toml index e9d92e5eb133..d75c367b1814 100644 --- a/cumulus/pallets/parachain-system/Cargo.toml +++ b/cumulus/pallets/parachain-system/Cargo.toml @@ -63,7 +63,6 @@ std = [ "cumulus-pallet-parachain-system-proc-macro/std", "cumulus-primitives-core/std", "cumulus-primitives-parachain-inherent/std", - "sp-proof-size-provider/std", "environmental/std", "frame-support/std", "frame-system/std", @@ -75,6 +74,7 @@ std = [ "sp-externalities/std", "sp-inherents/std", "sp-io/std", + "sp-proof-size-provider/std", "sp-runtime/std", "sp-state-machine/std", "sp-std/std", From 14be7972ad2031ec03741d0deed3ac72d4471a0a Mon Sep 17 00:00:00 2001 From: Sebastian Kunert Date: Mon, 30 Oct 2023 09:48:41 +0100 Subject: [PATCH 49/61] Adjust naming to differentiate from trait --- Cargo.lock | 20 +++++++++++++++++++ Cargo.toml | 1 + cumulus/client/service/Cargo.toml | 2 +- cumulus/client/service/src/lib.rs | 2 +- cumulus/pallets/parachain-system/Cargo.toml | 4 ++-- .../src/validate_block/implementation.rs | 2 +- cumulus/test/client/Cargo.toml | 2 +- cumulus/test/client/src/lib.rs | 4 ++-- .../Cargo.toml | 2 +- .../src/lib.rs | 0 10 files changed, 30 insertions(+), 9 deletions(-) rename substrate/primitives/{proof-size-provider => proof-size-hostfunction}/Cargo.toml (94%) rename substrate/primitives/{proof-size-provider => proof-size-hostfunction}/src/lib.rs (100%) diff --git a/Cargo.lock b/Cargo.lock index 6c65b1392535..f6b2fed29421 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3569,6 +3569,7 @@ dependencies = [ "sp-blockchain", "sp-consensus", "sp-core", + "sp-proof-size-hostfunction", "sp-runtime", "sp-transaction-pool", ] @@ -3629,6 +3630,7 @@ dependencies = [ "parity-scale-codec", "polkadot-parachain-primitives", "polkadot-runtime-parachains", + "rand 0.8.5", "sc-client-api", "scale-info", "sp-core", @@ -3636,6 +3638,7 @@ dependencies = [ "sp-inherents", "sp-io", "sp-keyring", + "sp-proof-size-hostfunction", "sp-runtime", "sp-state-machine", "sp-std", @@ -3644,6 +3647,7 @@ dependencies = [ "sp-version", "staging-xcm", "trie-db", + "trie-standardmap", ] [[package]] @@ -3964,6 +3968,7 @@ dependencies = [ "sp-inherents", "sp-io", "sp-keyring", + "sp-proof-size-hostfunction", "sp-runtime", "sp-timestamp", "substrate-test-client", @@ -15711,6 +15716,7 @@ dependencies = [ "sp-core", "sp-maybe-compressed-blob", "sp-runtime", + "sp-trie", "sp-version", "substrate-test-runtime", "substrate-test-runtime-client", @@ -17342,6 +17348,18 @@ dependencies = [ "regex", ] +[[package]] +name = "sp-proof-size-hostfunction" +version = "0.1.0" +dependencies = [ + "sp-core", + "sp-externalities", + "sp-io", + "sp-runtime-interface", + "sp-state-machine", + "sp-trie", +] + [[package]] name = "sp-rpc" version = "6.0.0" @@ -17407,6 +17425,7 @@ name = "sp-runtime-interface-proc-macro" version = "11.0.0" dependencies = [ "Inflector", + "expander 2.0.0", "proc-macro-crate", "proc-macro2", "quote", @@ -17617,6 +17636,7 @@ dependencies = [ "scale-info", "schnellru", "sp-core", + "sp-externalities", "sp-runtime", "sp-std", "thiserror", diff --git a/Cargo.toml b/Cargo.toml index 66271139dfd4..ac2ccaed4552 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -410,6 +410,7 @@ members = [ "substrate/primitives/npos-elections/fuzzer", "substrate/primitives/offchain", "substrate/primitives/panic-handler", + "substrate/primitives/proof-size-hostfunction", "substrate/primitives/rpc", "substrate/primitives/runtime-interface", "substrate/primitives/runtime-interface/proc-macro", diff --git a/cumulus/client/service/Cargo.toml b/cumulus/client/service/Cargo.toml index 207b7d44f567..8b9cd67e3b84 100644 --- a/cumulus/client/service/Cargo.toml +++ b/cumulus/client/service/Cargo.toml @@ -36,7 +36,7 @@ cumulus-client-consensus-common = { path = "../consensus/common" } cumulus-client-pov-recovery = { path = "../pov-recovery" } cumulus-client-network = { path = "../network" } cumulus-primitives-core = { path = "../../primitives/core" } -sp-proof-size-provider = { path = "../../../substrate/primitives/proof-size-provider" } +sp-proof-size-hostfunction = { path = "../../../substrate/primitives/proof-size-hostfunction" } cumulus-relay-chain-interface = { path = "../relay-chain-interface" } cumulus-relay-chain-inprocess-interface = { path = "../relay-chain-inprocess-interface" } cumulus-relay-chain-minimal-node = { path = "../relay-chain-minimal-node" } diff --git a/cumulus/client/service/src/lib.rs b/cumulus/client/service/src/lib.rs index ad851e05ead0..470c39b0b725 100644 --- a/cumulus/client/service/src/lib.rs +++ b/cumulus/client/service/src/lib.rs @@ -52,7 +52,7 @@ use sp_core::{traits::SpawnNamed, Decode}; use sp_runtime::traits::{Block as BlockT, BlockIdTo}; use std::{sync::Arc, time::Duration}; -pub use sp_proof_size_provider::storage_proof_size; +pub use sp_proof_size_hostfunction::storage_proof_size; // Given the sporadic nature of the explicit recovery operation and the // possibility to retry infinite times this value is more than enough. diff --git a/cumulus/pallets/parachain-system/Cargo.toml b/cumulus/pallets/parachain-system/Cargo.toml index d75c367b1814..1786f031636f 100644 --- a/cumulus/pallets/parachain-system/Cargo.toml +++ b/cumulus/pallets/parachain-system/Cargo.toml @@ -36,7 +36,7 @@ xcm = { package = "staging-xcm", path = "../../../polkadot/xcm", default-feature cumulus-pallet-parachain-system-proc-macro = { path = "proc-macro", default-features = false } cumulus-primitives-core = { path = "../../primitives/core", default-features = false } cumulus-primitives-parachain-inherent = { path = "../../primitives/parachain-inherent", default-features = false } -sp-proof-size-provider = { path = "../../../substrate/primitives/proof-size-provider", default-features = false } +sp-proof-size-hostfunction = { path = "../../../substrate/primitives/proof-size-hostfunction", default-features = false } [dev-dependencies] assert_matches = "1.5" @@ -74,7 +74,7 @@ std = [ "sp-externalities/std", "sp-inherents/std", "sp-io/std", - "sp-proof-size-provider/std", + "sp-proof-size-hostfunction/std", "sp-runtime/std", "sp-state-machine/std", "sp-std/std", diff --git a/cumulus/pallets/parachain-system/src/validate_block/implementation.rs b/cumulus/pallets/parachain-system/src/validate_block/implementation.rs index 151be27727eb..93512eac5dcf 100644 --- a/cumulus/pallets/parachain-system/src/validate_block/implementation.rs +++ b/cumulus/pallets/parachain-system/src/validate_block/implementation.rs @@ -174,7 +174,7 @@ where .replace_implementation(host_default_child_storage_next_key), sp_io::offchain_index::host_set.replace_implementation(host_offchain_index_set), sp_io::offchain_index::host_clear.replace_implementation(host_offchain_index_clear), - sp_proof_size_provider::storage_proof_size::host_storage_proof_size + sp_proof_size_hostfunction::storage_proof_size::host_storage_proof_size .replace_implementation(host_storage_proof_size), ); diff --git a/cumulus/test/client/Cargo.toml b/cumulus/test/client/Cargo.toml index 063da06eaccb..58b108452e85 100644 --- a/cumulus/test/client/Cargo.toml +++ b/cumulus/test/client/Cargo.toml @@ -36,5 +36,5 @@ cumulus-test-runtime = { path = "../runtime" } cumulus-test-service = { path = "../service" } cumulus-test-relay-sproof-builder = { path = "../relay-sproof-builder" } cumulus-primitives-core = { path = "../../primitives/core" } -sp-proof-size-provider = { path = "../../../substrate/primitives/proof-size-provider" } +sp-proof-size-hostfunction = { path = "../../../substrate/primitives/proof-size-hostfunction" } cumulus-primitives-parachain-inherent = { path = "../../primitives/parachain-inherent" } diff --git a/cumulus/test/client/src/lib.rs b/cumulus/test/client/src/lib.rs index f4436e4933b7..2b3c6cf6ef53 100644 --- a/cumulus/test/client/src/lib.rs +++ b/cumulus/test/client/src/lib.rs @@ -45,7 +45,7 @@ mod local_executor { impl sc_executor::NativeExecutionDispatch for LocalExecutor { type ExtendHostFunctions = - sp_proof_size_provider::storage_proof_size::HostFunctions; + sp_proof_size_hostfunction::storage_proof_size::HostFunctions; fn dispatch(method: &str, data: &[u8]) -> Option> { cumulus_test_runtime::api::dispatch(method, data) @@ -213,7 +213,7 @@ pub fn validate_block( let heap_pages = HeapAllocStrategy::Static { extra_pages: 1024 }; let executor = WasmExecutor::<( sp_io::SubstrateHostFunctions, - sp_proof_size_provider::storage_proof_size::HostFunctions, + sp_proof_size_hostfunction::storage_proof_size::HostFunctions, )>::builder() .with_execution_method(WasmExecutionMethod::default()) .with_max_runtime_instances(1) diff --git a/substrate/primitives/proof-size-provider/Cargo.toml b/substrate/primitives/proof-size-hostfunction/Cargo.toml similarity index 94% rename from substrate/primitives/proof-size-provider/Cargo.toml rename to substrate/primitives/proof-size-hostfunction/Cargo.toml index 0fc1241c7b00..12d485b8bc1e 100644 --- a/substrate/primitives/proof-size-provider/Cargo.toml +++ b/substrate/primitives/proof-size-hostfunction/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "sp-proof-size-provider" +name = "sp-proof-size-hostfunction" version = "0.1.0" authors.workspace = true edition.workspace = true diff --git a/substrate/primitives/proof-size-provider/src/lib.rs b/substrate/primitives/proof-size-hostfunction/src/lib.rs similarity index 100% rename from substrate/primitives/proof-size-provider/src/lib.rs rename to substrate/primitives/proof-size-hostfunction/src/lib.rs From 4d6f547b16dc2e0c2916fdd0f48000f86783ceb6 Mon Sep 17 00:00:00 2001 From: Sebastian Kunert Date: Mon, 30 Oct 2023 10:01:34 +0100 Subject: [PATCH 50/61] Review comments --- .../parachain-system/src/validate_block/implementation.rs | 5 +---- substrate/bin/node/cli/src/service.rs | 3 +-- substrate/primitives/trie/src/lib.rs | 1 + substrate/primitives/trie/src/proof_size_extension.rs | 4 +++- 4 files changed, 6 insertions(+), 7 deletions(-) diff --git a/cumulus/pallets/parachain-system/src/validate_block/implementation.rs b/cumulus/pallets/parachain-system/src/validate_block/implementation.rs index 93512eac5dcf..4a348482cc07 100644 --- a/cumulus/pallets/parachain-system/src/validate_block/implementation.rs +++ b/cumulus/pallets/parachain-system/src/validate_block/implementation.rs @@ -316,10 +316,7 @@ fn host_storage_clear(key: &[u8]) { } fn host_storage_proof_size() -> u64 { - recorder::with(|rec| rec.estimate_encoded_size()) - .unwrap_or_default() - .try_into() - .unwrap_or_default() + recorder::with(|rec| rec.estimate_encoded_size()).expect("Recorder is always set; qed") as _ } fn host_storage_root(version: StateVersion) -> Vec { diff --git a/substrate/bin/node/cli/src/service.rs b/substrate/bin/node/cli/src/service.rs index fe3b47f11280..5a85f4cde0ae 100644 --- a/substrate/bin/node/cli/src/service.rs +++ b/substrate/bin/node/cli/src/service.rs @@ -174,11 +174,10 @@ pub fn new_partial( let executor = sc_service::new_native_or_wasm_executor(&config); let (client, backend, keystore_container, task_manager) = - sc_service::new_full_parts_record_import::( + sc_service::new_full_parts::( config, telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()), executor, - false, )?; let client = Arc::new(client); diff --git a/substrate/primitives/trie/src/lib.rs b/substrate/primitives/trie/src/lib.rs index 4974aeb9b5c5..fd1320b3fbcb 100644 --- a/substrate/primitives/trie/src/lib.rs +++ b/substrate/primitives/trie/src/lib.rs @@ -168,6 +168,7 @@ pub trait TrieRecorderProvider { /// Type that is able to provide a proof size estimation. pub trait ProofSizeProvider { + /// Returns the storage proof size. fn estimate_encoded_size(&self) -> usize; } diff --git a/substrate/primitives/trie/src/proof_size_extension.rs b/substrate/primitives/trie/src/proof_size_extension.rs index 02477ecef8d9..c97f334494af 100644 --- a/substrate/primitives/trie/src/proof_size_extension.rs +++ b/substrate/primitives/trie/src/proof_size_extension.rs @@ -27,11 +27,13 @@ sp_externalities::decl_extension! { } impl ProofSizeExt { + /// Creates a new instance of [`ProofSizeExt`]. pub fn new(recorder: T) -> Self { ProofSizeExt(Box::new(recorder)) } + /// Returns the storage proof size. pub fn storage_proof_size(&self) -> u64 { - self.0.estimate_encoded_size().try_into().unwrap_or_default() + self.0.estimate_encoded_size() as _ } } From cdb04cedffed0e4866763ec24ea1698dc0a21fbe Mon Sep 17 00:00:00 2001 From: Sebastian Kunert Date: Tue, 31 Oct 2023 15:55:33 +0100 Subject: [PATCH 51/61] Move back to cumulus --- Cargo.lock | 30 +++++++++---------- Cargo.toml | 2 +- cumulus/client/service/Cargo.toml | 2 +- cumulus/client/service/src/lib.rs | 2 +- cumulus/pallets/parachain-system/Cargo.toml | 4 +-- .../src/validate_block/implementation.rs | 2 +- .../proof-size-hostfunction/Cargo.toml | 20 +++++++++++++ .../proof-size-hostfunction/src/lib.rs | 0 cumulus/test/client/Cargo.toml | 2 +- cumulus/test/client/src/lib.rs | 4 +-- .../proof-size-hostfunction/Cargo.toml | 20 ------------- 11 files changed, 44 insertions(+), 44 deletions(-) create mode 100644 cumulus/primitives/proof-size-hostfunction/Cargo.toml rename {substrate => cumulus}/primitives/proof-size-hostfunction/src/lib.rs (100%) delete mode 100644 substrate/primitives/proof-size-hostfunction/Cargo.toml diff --git a/Cargo.lock b/Cargo.lock index 8c74ed924396..15e115c690f0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3550,6 +3550,7 @@ dependencies = [ "cumulus-client-network", "cumulus-client-pov-recovery", "cumulus-primitives-core", + "cumulus-primitives-proof-size-hostfunction", "cumulus-relay-chain-inprocess-interface", "cumulus-relay-chain-interface", "cumulus-relay-chain-minimal-node", @@ -3570,7 +3571,6 @@ dependencies = [ "sp-blockchain", "sp-consensus", "sp-core", - "sp-proof-size-hostfunction", "sp-runtime", "sp-transaction-pool", ] @@ -3619,6 +3619,7 @@ dependencies = [ "cumulus-pallet-parachain-system-proc-macro", "cumulus-primitives-core", "cumulus-primitives-parachain-inherent", + "cumulus-primitives-proof-size-hostfunction", "cumulus-test-client", "cumulus-test-relay-sproof-builder", "environmental", @@ -3639,7 +3640,6 @@ dependencies = [ "sp-inherents", "sp-io", "sp-keyring", - "sp-proof-size-hostfunction", "sp-runtime", "sp-state-machine", "sp-std", @@ -3796,6 +3796,18 @@ dependencies = [ "tracing", ] +[[package]] +name = "cumulus-primitives-proof-size-hostfunction" +version = "0.1.0" +dependencies = [ + "sp-core", + "sp-externalities", + "sp-io", + "sp-runtime-interface", + "sp-state-machine", + "sp-trie", +] + [[package]] name = "cumulus-primitives-timestamp" version = "0.1.0" @@ -3949,6 +3961,7 @@ version = "0.1.0" dependencies = [ "cumulus-primitives-core", "cumulus-primitives-parachain-inherent", + "cumulus-primitives-proof-size-hostfunction", "cumulus-test-relay-sproof-builder", "cumulus-test-runtime", "cumulus-test-service", @@ -3969,7 +3982,6 @@ dependencies = [ "sp-inherents", "sp-io", "sp-keyring", - "sp-proof-size-hostfunction", "sp-runtime", "sp-timestamp", "substrate-test-client", @@ -17349,18 +17361,6 @@ dependencies = [ "regex", ] -[[package]] -name = "sp-proof-size-hostfunction" -version = "0.1.0" -dependencies = [ - "sp-core", - "sp-externalities", - "sp-io", - "sp-runtime-interface", - "sp-state-machine", - "sp-trie", -] - [[package]] name = "sp-rpc" version = "6.0.0" diff --git a/Cargo.toml b/Cargo.toml index be0963366eb1..c8bd1abdc20d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -91,6 +91,7 @@ members = [ "cumulus/primitives/aura", "cumulus/primitives/core", "cumulus/primitives/parachain-inherent", + "cumulus/primitives/proof-size-hostfunction", "cumulus/primitives/timestamp", "cumulus/primitives/utility", "cumulus/test/client", @@ -410,7 +411,6 @@ members = [ "substrate/primitives/npos-elections/fuzzer", "substrate/primitives/offchain", "substrate/primitives/panic-handler", - "substrate/primitives/proof-size-hostfunction", "substrate/primitives/rpc", "substrate/primitives/runtime-interface", "substrate/primitives/runtime-interface/proc-macro", diff --git a/cumulus/client/service/Cargo.toml b/cumulus/client/service/Cargo.toml index 8b9cd67e3b84..71edc0c2b617 100644 --- a/cumulus/client/service/Cargo.toml +++ b/cumulus/client/service/Cargo.toml @@ -36,7 +36,7 @@ cumulus-client-consensus-common = { path = "../consensus/common" } cumulus-client-pov-recovery = { path = "../pov-recovery" } cumulus-client-network = { path = "../network" } cumulus-primitives-core = { path = "../../primitives/core" } -sp-proof-size-hostfunction = { path = "../../../substrate/primitives/proof-size-hostfunction" } +cumulus-primitives-proof-size-hostfunction = { path = "../../primitives/proof-size-hostfunction" } cumulus-relay-chain-interface = { path = "../relay-chain-interface" } cumulus-relay-chain-inprocess-interface = { path = "../relay-chain-inprocess-interface" } cumulus-relay-chain-minimal-node = { path = "../relay-chain-minimal-node" } diff --git a/cumulus/client/service/src/lib.rs b/cumulus/client/service/src/lib.rs index 470c39b0b725..687b53a514f7 100644 --- a/cumulus/client/service/src/lib.rs +++ b/cumulus/client/service/src/lib.rs @@ -52,7 +52,7 @@ use sp_core::{traits::SpawnNamed, Decode}; use sp_runtime::traits::{Block as BlockT, BlockIdTo}; use std::{sync::Arc, time::Duration}; -pub use sp_proof_size_hostfunction::storage_proof_size; +pub use cumulus_primitives_proof_size_hostfunction::storage_proof_size; // Given the sporadic nature of the explicit recovery operation and the // possibility to retry infinite times this value is more than enough. diff --git a/cumulus/pallets/parachain-system/Cargo.toml b/cumulus/pallets/parachain-system/Cargo.toml index 1786f031636f..6a51cdad7ded 100644 --- a/cumulus/pallets/parachain-system/Cargo.toml +++ b/cumulus/pallets/parachain-system/Cargo.toml @@ -36,7 +36,7 @@ xcm = { package = "staging-xcm", path = "../../../polkadot/xcm", default-feature cumulus-pallet-parachain-system-proc-macro = { path = "proc-macro", default-features = false } cumulus-primitives-core = { path = "../../primitives/core", default-features = false } cumulus-primitives-parachain-inherent = { path = "../../primitives/parachain-inherent", default-features = false } -sp-proof-size-hostfunction = { path = "../../../substrate/primitives/proof-size-hostfunction", default-features = false } +cumulus-primitives-proof-size-hostfunction = { path = "../../primitives/proof-size-hostfunction", default-features = false } [dev-dependencies] assert_matches = "1.5" @@ -74,7 +74,7 @@ std = [ "sp-externalities/std", "sp-inherents/std", "sp-io/std", - "sp-proof-size-hostfunction/std", + "cumulus-primitives-proof-size-hostfunction/std", "sp-runtime/std", "sp-state-machine/std", "sp-std/std", diff --git a/cumulus/pallets/parachain-system/src/validate_block/implementation.rs b/cumulus/pallets/parachain-system/src/validate_block/implementation.rs index 4a348482cc07..a07e079fec39 100644 --- a/cumulus/pallets/parachain-system/src/validate_block/implementation.rs +++ b/cumulus/pallets/parachain-system/src/validate_block/implementation.rs @@ -174,7 +174,7 @@ where .replace_implementation(host_default_child_storage_next_key), sp_io::offchain_index::host_set.replace_implementation(host_offchain_index_set), sp_io::offchain_index::host_clear.replace_implementation(host_offchain_index_clear), - sp_proof_size_hostfunction::storage_proof_size::host_storage_proof_size + cumulus_primitives_proof_size_hostfunction::storage_proof_size::host_storage_proof_size .replace_implementation(host_storage_proof_size), ); diff --git a/cumulus/primitives/proof-size-hostfunction/Cargo.toml b/cumulus/primitives/proof-size-hostfunction/Cargo.toml new file mode 100644 index 000000000000..a904e11a98d5 --- /dev/null +++ b/cumulus/primitives/proof-size-hostfunction/Cargo.toml @@ -0,0 +1,20 @@ +[package] +name = "cumulus-primitives-proof-size-hostfunction" +version = "0.1.0" +authors.workspace = true +edition.workspace = true +license = "Apache-2.0" + +[dependencies] +sp-runtime-interface = { path = "../../../substrate/primitives/runtime-interface", default-features = false } +sp-externalities = { path = "../../../substrate/primitives/externalities", default-features = false } +sp-trie = { path = "../../../substrate/primitives/trie", default-features = false } + +[dev-dependencies] +sp-state-machine = { path = "../../../substrate/primitives/state-machine" } +sp-core = { path = "../../../substrate/primitives/core" } +sp-io = { path = "../../../substrate/primitives/io" } + +[features] +default = [ "std" ] +std = [ "sp-externalities/std", "sp-runtime-interface/std", "sp-trie/std" ] diff --git a/substrate/primitives/proof-size-hostfunction/src/lib.rs b/cumulus/primitives/proof-size-hostfunction/src/lib.rs similarity index 100% rename from substrate/primitives/proof-size-hostfunction/src/lib.rs rename to cumulus/primitives/proof-size-hostfunction/src/lib.rs diff --git a/cumulus/test/client/Cargo.toml b/cumulus/test/client/Cargo.toml index 58b108452e85..6c30c968d188 100644 --- a/cumulus/test/client/Cargo.toml +++ b/cumulus/test/client/Cargo.toml @@ -36,5 +36,5 @@ cumulus-test-runtime = { path = "../runtime" } cumulus-test-service = { path = "../service" } cumulus-test-relay-sproof-builder = { path = "../relay-sproof-builder" } cumulus-primitives-core = { path = "../../primitives/core" } -sp-proof-size-hostfunction = { path = "../../../substrate/primitives/proof-size-hostfunction" } +cumulus-primitives-proof-size-hostfunction = { path = "../../primitives/proof-size-hostfunction" } cumulus-primitives-parachain-inherent = { path = "../../primitives/parachain-inherent" } diff --git a/cumulus/test/client/src/lib.rs b/cumulus/test/client/src/lib.rs index 2b3c6cf6ef53..ee153e68fd55 100644 --- a/cumulus/test/client/src/lib.rs +++ b/cumulus/test/client/src/lib.rs @@ -45,7 +45,7 @@ mod local_executor { impl sc_executor::NativeExecutionDispatch for LocalExecutor { type ExtendHostFunctions = - sp_proof_size_hostfunction::storage_proof_size::HostFunctions; + cumulus_primitives_proof_size_hostfunction::storage_proof_size::HostFunctions; fn dispatch(method: &str, data: &[u8]) -> Option> { cumulus_test_runtime::api::dispatch(method, data) @@ -213,7 +213,7 @@ pub fn validate_block( let heap_pages = HeapAllocStrategy::Static { extra_pages: 1024 }; let executor = WasmExecutor::<( sp_io::SubstrateHostFunctions, - sp_proof_size_hostfunction::storage_proof_size::HostFunctions, + cumulus_primitives_proof_size_hostfunction::storage_proof_size::HostFunctions, )>::builder() .with_execution_method(WasmExecutionMethod::default()) .with_max_runtime_instances(1) diff --git a/substrate/primitives/proof-size-hostfunction/Cargo.toml b/substrate/primitives/proof-size-hostfunction/Cargo.toml deleted file mode 100644 index 12d485b8bc1e..000000000000 --- a/substrate/primitives/proof-size-hostfunction/Cargo.toml +++ /dev/null @@ -1,20 +0,0 @@ -[package] -name = "sp-proof-size-hostfunction" -version = "0.1.0" -authors.workspace = true -edition.workspace = true -license = "Apache-2.0" - -[dependencies] -sp-runtime-interface = { path = "../runtime-interface", default-features = false } -sp-externalities = { path = "../externalities", default-features = false } -sp-trie = { path = "../trie", default-features = false } - -[dev-dependencies] -sp-state-machine = { path = "../state-machine" } -sp-core = { path = "../core" } -sp-io = { path = "../io" } - -[features] -default = [ "std" ] -std = [ "sp-externalities/std", "sp-runtime-interface/std", "sp-trie/std" ] From 416a507de99cf7552f8ba0c5c13fd51dbd76a2cb Mon Sep 17 00:00:00 2001 From: Sebastian Kunert Date: Tue, 31 Oct 2023 17:21:25 +0100 Subject: [PATCH 52/61] Instantiate extension only in block builder and import --- Cargo.lock | 1 + cumulus/pallets/parachain-system/Cargo.toml | 2 +- substrate/client/api/src/execution_extensions.rs | 5 ----- substrate/client/block-builder/Cargo.toml | 1 + substrate/client/block-builder/src/lib.rs | 5 +++++ .../client/service/src/client/call_executor.rs | 4 ++-- substrate/client/service/src/client/client.rs | 13 ++++++------- .../api/proc-macro/src/impl_runtime_apis.rs | 1 - substrate/primitives/api/src/lib.rs | 1 - 9 files changed, 16 insertions(+), 17 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 15e115c690f0..8f3ee1bfe3ad 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -14713,6 +14713,7 @@ dependencies = [ "sp-inherents", "sp-runtime", "sp-state-machine", + "sp-trie", "substrate-test-runtime-client", ] diff --git a/cumulus/pallets/parachain-system/Cargo.toml b/cumulus/pallets/parachain-system/Cargo.toml index 6a51cdad7ded..2dbc2a165d4a 100644 --- a/cumulus/pallets/parachain-system/Cargo.toml +++ b/cumulus/pallets/parachain-system/Cargo.toml @@ -63,6 +63,7 @@ std = [ "cumulus-pallet-parachain-system-proc-macro/std", "cumulus-primitives-core/std", "cumulus-primitives-parachain-inherent/std", + "cumulus-primitives-proof-size-hostfunction/std", "environmental/std", "frame-support/std", "frame-system/std", @@ -74,7 +75,6 @@ std = [ "sp-externalities/std", "sp-inherents/std", "sp-io/std", - "cumulus-primitives-proof-size-hostfunction/std", "sp-runtime/std", "sp-state-machine/std", "sp-std/std", diff --git a/substrate/client/api/src/execution_extensions.rs b/substrate/client/api/src/execution_extensions.rs index dbf305bf8bcd..9c9577bb8b36 100644 --- a/substrate/client/api/src/execution_extensions.rs +++ b/substrate/client/api/src/execution_extensions.rs @@ -121,16 +121,11 @@ impl ExecutionExtensions { &self, block_hash: Block::Hash, block_number: NumberFor, - proof_recorder: Option<&Recorder>>, ) -> Extensions { let mut extensions = self.extensions_factory.read().extensions_for(block_hash, block_number); extensions.register(ReadRuntimeVersionExt::new(self.read_runtime_version.clone())); - if let Some(recorder) = proof_recorder { - extensions.register(sp_trie::proof_size_extension::ProofSizeExt::new(recorder.clone())); - }; - extensions } } diff --git a/substrate/client/block-builder/Cargo.toml b/substrate/client/block-builder/Cargo.toml index ff2f9635b7a2..61acf7b92254 100644 --- a/substrate/client/block-builder/Cargo.toml +++ b/substrate/client/block-builder/Cargo.toml @@ -21,6 +21,7 @@ sp-api = { path = "../../primitives/api" } sp-block-builder = { path = "../../primitives/block-builder" } sp-blockchain = { path = "../../primitives/blockchain" } sp-core = { path = "../../primitives/core" } +sp-trie = { path = "../../primitives/trie" } sp-inherents = { path = "../../primitives/inherents" } sp-runtime = { path = "../../primitives/runtime" } diff --git a/substrate/client/block-builder/src/lib.rs b/substrate/client/block-builder/src/lib.rs index 1878e7627480..4d7575554368 100644 --- a/substrate/client/block-builder/src/lib.rs +++ b/substrate/client/block-builder/src/lib.rs @@ -41,6 +41,7 @@ use sp_runtime::{ use sc_client_api::backend; pub use sp_block_builder::BlockBuilder as BlockBuilderApi; +use sp_trie::proof_size_extension::ProofSizeExt; /// Used as parameter to [`BlockBuilderProvider`] to express if proof recording should be enabled. /// @@ -173,6 +174,10 @@ where if record_proof.yes() { api.record_proof(); + let recorder = api + .proof_recorder() + .expect("Proof recording is enabled in the line above; qed."); + api.register_extension(ProofSizeExt::new(recorder)); } api.set_call_context(CallContext::Onchain); diff --git a/substrate/client/service/src/client/call_executor.rs b/substrate/client/service/src/client/call_executor.rs index 820ad99b2e80..86b5c7c61fcd 100644 --- a/substrate/client/service/src/client/call_executor.rs +++ b/substrate/client/service/src/client/call_executor.rs @@ -177,7 +177,7 @@ where let runtime_code = self.check_override(runtime_code, &state, at_hash)?.0; - let mut extensions = self.execution_extensions.extensions(at_hash, at_number, None); + let mut extensions = self.execution_extensions.extensions(at_hash, at_number); let mut sm = StateMachine::new( &state, @@ -290,7 +290,7 @@ where method, call_data, &runtime_code, - &mut self.execution_extensions.extensions(at_hash, at_number, None), + &mut self.execution_extensions.extensions(at_hash, at_number), ) .map_err(Into::into) } diff --git a/substrate/client/service/src/client/client.rs b/substrate/client/service/src/client/client.rs index 6121f1a628ec..b8aac2a46593 100644 --- a/substrate/client/service/src/client/client.rs +++ b/substrate/client/service/src/client/client.rs @@ -78,7 +78,7 @@ use sp_state_machine::{ ChildStorageCollection, KeyValueStates, KeyValueStorageLevel, StorageCollection, MAX_NESTED_TRIE_DEPTH, }; -use sp_trie::{CompactProof, MerkleValue, StorageProof}; +use sp_trie::{proof_size_extension::ProofSizeExt, CompactProof, MerkleValue, StorageProof}; use std::{ collections::{HashMap, HashSet}, marker::PhantomData, @@ -864,6 +864,10 @@ where if self.config.enable_import_proof_recording { runtime_api.record_proof(); + let recorder = runtime_api + .proof_recorder() + .expect("Proof recording is enabled in the line above; qed."); + runtime_api.register_extension(ProofSizeExt::new(recorder)); } runtime_api.execute_block( @@ -1747,16 +1751,11 @@ where fn initialize_extensions( &self, at: Block::Hash, - recorder: Option<&sp_trie::recorder::Recorder>>, extensions: &mut sp_externalities::Extensions, ) -> Result<(), sp_api::ApiError> { let block_number = self.expect_block_number_from_id(&BlockId::Hash(at))?; - extensions.merge(self.executor.execution_extensions().extensions( - at, - block_number, - recorder, - )); + extensions.merge(self.executor.execution_extensions().extensions(at, block_number)); Ok(()) } diff --git a/substrate/primitives/api/proc-macro/src/impl_runtime_apis.rs b/substrate/primitives/api/proc-macro/src/impl_runtime_apis.rs index a2da116187f1..fd6110220af2 100644 --- a/substrate/primitives/api/proc-macro/src/impl_runtime_apis.rs +++ b/substrate/primitives/api/proc-macro/src/impl_runtime_apis.rs @@ -584,7 +584,6 @@ impl<'a> ApiRuntimeImplToApiRuntimeApiImpl<'a> { #crate_::CallApiAt::<__SrApiBlock__>::initialize_extensions( self.call, at, - self.recorder.as_ref(), &mut std::cell::RefCell::borrow_mut(&self.extensions), )?; diff --git a/substrate/primitives/api/src/lib.rs b/substrate/primitives/api/src/lib.rs index 4e4be4cf6466..c3f80acf09ae 100644 --- a/substrate/primitives/api/src/lib.rs +++ b/substrate/primitives/api/src/lib.rs @@ -661,7 +661,6 @@ pub trait CallApiAt { fn initialize_extensions( &self, at: Block::Hash, - recorder: Option<&sp_trie::recorder::Recorder>>, extensions: &mut Extensions, ) -> Result<(), ApiError>; } From 8014f90ce66f2d532ea67ba5a317c1dc6712f3e1 Mon Sep 17 00:00:00 2001 From: Sebastian Kunert Date: Wed, 1 Nov 2023 13:50:14 +0100 Subject: [PATCH 53/61] Fix compilation & clippy --- substrate/client/api/src/execution_extensions.rs | 3 +-- substrate/client/rpc-spec-v2/src/chain_head/test_utils.rs | 5 ++--- 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/substrate/client/api/src/execution_extensions.rs b/substrate/client/api/src/execution_extensions.rs index 9c9577bb8b36..26d3ae73f69f 100644 --- a/substrate/client/api/src/execution_extensions.rs +++ b/substrate/client/api/src/execution_extensions.rs @@ -25,8 +25,7 @@ use parking_lot::RwLock; use sp_core::traits::{ReadRuntimeVersion, ReadRuntimeVersionExt}; use sp_externalities::{Extension, Extensions}; -use sp_runtime::traits::{Block as BlockT, HashingFor, NumberFor}; -use sp_trie::recorder::Recorder; +use sp_runtime::traits::{Block as BlockT, NumberFor}; use std::{marker::PhantomData, sync::Arc}; /// Generate the starting set of [`Extensions`]. diff --git a/substrate/client/rpc-spec-v2/src/chain_head/test_utils.rs b/substrate/client/rpc-spec-v2/src/chain_head/test_utils.rs index d164c7c3708a..a901f3039ffe 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/test_utils.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/test_utils.rs @@ -29,7 +29,7 @@ use sp_blockchain::{BlockStatus, CachedHeaderMetadata, HeaderBackend, HeaderMeta use sp_consensus::BlockOrigin; use sp_runtime::{ generic::SignedBlock, - traits::{Block as BlockT, HashingFor, Header as HeaderT}, + traits::{Block as BlockT, Header as HeaderT}, Justifications, }; use std::sync::Arc; @@ -235,10 +235,9 @@ impl> CallApiAt for ChainHeadMock fn initialize_extensions( &self, at: ::Hash, - recorder: Option<&sp_trie::recorder::Recorder>>, extensions: &mut sp_api::Extensions, ) -> Result<(), sp_api::ApiError> { - self.client.initialize_extensions(at, recorder, extensions) + self.client.initialize_extensions(at, extensions) } } From c69cf596e57069f5261ee2869ec94c804d236a0e Mon Sep 17 00:00:00 2001 From: command-bot <> Date: Wed, 1 Nov 2023 13:04:13 +0000 Subject: [PATCH 54/61] ".git/.scripts/commands/fmt/fmt.sh" --- cumulus/polkadot-parachain/src/service.rs | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/cumulus/polkadot-parachain/src/service.rs b/cumulus/polkadot-parachain/src/service.rs index 787f6be71e17..95b46f6175eb 100644 --- a/cumulus/polkadot-parachain/src/service.rs +++ b/cumulus/polkadot-parachain/src/service.rs @@ -68,10 +68,8 @@ use substrate_prometheus_endpoint::Registry; use polkadot_primitives::CollatorPair; #[cfg(not(feature = "runtime-benchmarks"))] -type HostFunctions = ( - sp_io::SubstrateHostFunctions, - cumulus_client_service::storage_proof_size::HostFunctions, -); +type HostFunctions = + (sp_io::SubstrateHostFunctions, cumulus_client_service::storage_proof_size::HostFunctions); #[cfg(feature = "runtime-benchmarks")] type HostFunctions = ( From 21b388ae6d57a99d4255ba4116de07bf7ffb0c7d Mon Sep 17 00:00:00 2001 From: Sebastian Kunert Date: Wed, 1 Nov 2023 16:04:27 +0100 Subject: [PATCH 55/61] Remove sp-trie from rpc-spec-v2 --- Cargo.lock | 1 - substrate/client/rpc-spec-v2/Cargo.toml | 1 - 2 files changed, 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 8f3ee1bfe3ad..7d6ee91fd82c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -15730,7 +15730,6 @@ dependencies = [ "sp-core", "sp-maybe-compressed-blob", "sp-runtime", - "sp-trie", "sp-version", "substrate-test-runtime", "substrate-test-runtime-client", diff --git a/substrate/client/rpc-spec-v2/Cargo.toml b/substrate/client/rpc-spec-v2/Cargo.toml index 1b0bc264c1cc..1eaed65706e0 100644 --- a/substrate/client/rpc-spec-v2/Cargo.toml +++ b/substrate/client/rpc-spec-v2/Cargo.toml @@ -45,6 +45,5 @@ sp-consensus = { path = "../../primitives/consensus/common" } sp-maybe-compressed-blob = { path = "../../primitives/maybe-compressed-blob" } sc-block-builder = { path = "../block-builder" } sc-service = { path = "../service", features = ["test-helpers"]} -sp-trie = { path = "../../primitives/trie" } assert_matches = "1.3.0" pretty_assertions = "1.2.1" From c4b0e05692367078e0935c592a3fff8ba1b639f7 Mon Sep 17 00:00:00 2001 From: Sebastian Kunert Date: Thu, 2 Nov 2023 14:12:09 +0100 Subject: [PATCH 56/61] Remove usages of host function --- .../src/validate_block/implementation.rs | 28 +++++-------------- .../parachain-template/node/src/command.rs | 3 +- .../parachain-template/node/src/service.rs | 5 +--- cumulus/polkadot-parachain/src/command.rs | 3 +- cumulus/polkadot-parachain/src/service.rs | 10 ++----- cumulus/test/service/src/lib.rs | 2 +- 6 files changed, 14 insertions(+), 37 deletions(-) diff --git a/cumulus/pallets/parachain-system/src/validate_block/implementation.rs b/cumulus/pallets/parachain-system/src/validate_block/implementation.rs index a07e079fec39..ce3b724420f1 100644 --- a/cumulus/pallets/parachain-system/src/validate_block/implementation.rs +++ b/cumulus/pallets/parachain-system/src/validate_block/implementation.rs @@ -16,7 +16,7 @@ //! The actual implementation of the validate block functionality. -use super::{trie_cache, trie_recorder, MemoryOptimizedValidationParams}; +use super::{trie_cache, MemoryOptimizedValidationParams}; use cumulus_primitives_core::{ relay_chain::Hash as RHash, ParachainBlockData, PersistedValidationData, }; @@ -33,15 +33,13 @@ use sp_core::storage::{ChildInfo, StateVersion}; use sp_externalities::{set_and_run_with_externalities, Externalities}; use sp_io::KillStorageResult; use sp_runtime::traits::{Block as BlockT, Extrinsic, HashingFor, Header as HeaderT}; -use sp_std::{prelude::*, sync::Arc}; -use sp_trie::{MemoryDB, ProofSizeProvider, TrieRecorderProvider}; -use trie_recorder::SizeOnlyRecorderProvider; +use sp_std::prelude::*; +use sp_trie::MemoryDB; type TrieBackend = sp_state_machine::TrieBackend< MemoryDB>, HashingFor, trie_cache::CacheProvider>, - SizeOnlyRecorderProvider>, >; type Ext<'a, B> = sp_state_machine::Ext<'a, HashingFor, TrieBackend>; @@ -50,9 +48,6 @@ fn with_externalities R, R>(f: F) -> R { sp_externalities::with_externalities(f).expect("Environmental externalities not set.") } -/// Recorder instance to be used during this validate_block call. -environmental::environmental!(recorder: trait ProofSizeProvider); - /// Validate the given parachain block. /// /// This function is doing roughly the following: @@ -125,7 +120,6 @@ where sp_std::mem::drop(storage_proof); - let mut recorder = SizeOnlyRecorderProvider::new(); let cache_provider = trie_cache::CacheProvider::new(); // We use the storage root of the `parent_head` to ensure that it is the correct root. // This is already being done above while creating the in-memory db, but let's be paranoid!! @@ -134,7 +128,6 @@ where *parent_header.state_root(), cache_provider, ) - .with_recorder(recorder.clone()) .build(); let _guard = ( @@ -174,11 +167,9 @@ where .replace_implementation(host_default_child_storage_next_key), sp_io::offchain_index::host_set.replace_implementation(host_offchain_index_set), sp_io::offchain_index::host_clear.replace_implementation(host_offchain_index_clear), - cumulus_primitives_proof_size_hostfunction::storage_proof_size::host_storage_proof_size - .replace_implementation(host_storage_proof_size), ); - run_with_externalities_and_recorder::(&backend, &mut recorder, || { + run_with_externalities::(&backend, || { let relay_chain_proof = crate::RelayChainStateProof::new( PSC::SelfParaId::get(), inherent_data.validation_data.relay_parent_storage_root, @@ -199,7 +190,7 @@ where } }); - run_with_externalities_and_recorder::(&backend, &mut recorder, || { + run_with_externalities::(&backend, || { let head_data = HeadData(block.header().encode()); E::execute_block(block); @@ -275,15 +266,14 @@ fn validate_validation_data( } /// Run the given closure with the externalities set. -fn run_with_externalities_and_recorder R>( +fn run_with_externalities R>( backend: &TrieBackend, - recorder: &mut SizeOnlyRecorderProvider>, execute: F, ) -> R { let mut overlay = sp_state_machine::OverlayedChanges::default(); let mut ext = Ext::::new(&mut overlay, backend); - recorder::using(recorder, || set_and_run_with_externalities(&mut ext, || execute())) + set_and_run_with_externalities(&mut ext, || execute()) } fn host_storage_read(key: &[u8], value_out: &mut [u8], value_offset: u32) -> Option { @@ -315,10 +305,6 @@ fn host_storage_clear(key: &[u8]) { with_externalities(|ext| ext.place_storage(key.to_vec(), None)) } -fn host_storage_proof_size() -> u64 { - recorder::with(|rec| rec.estimate_encoded_size()).expect("Recorder is always set; qed") as _ -} - fn host_storage_root(version: StateVersion) -> Vec { with_externalities(|ext| ext.storage_root(version)) } diff --git a/cumulus/parachain-template/node/src/command.rs b/cumulus/parachain-template/node/src/command.rs index 489ed0634df3..4dd8463f6be6 100644 --- a/cumulus/parachain-template/node/src/command.rs +++ b/cumulus/parachain-template/node/src/command.rs @@ -1,6 +1,5 @@ use std::net::SocketAddr; -use cumulus_client_service::storage_proof_size::HostFunctions as ReclaimHostFunctions; use cumulus_primitives_core::ParaId; use frame_benchmarking_cli::{BenchmarkCmd, SUBSTRATE_REFERENCE_HARDWARE}; use log::info; @@ -184,7 +183,7 @@ pub fn run() -> Result<()> { match cmd { BenchmarkCmd::Pallet(cmd) => if cfg!(feature = "runtime-benchmarks") { - runner.sync_run(|config| cmd.run::(config)) + runner.sync_run(|config| cmd.run::(config)) } else { Err("Benchmarking wasn't enabled when building the node. \ You can enable it with `--features runtime-benchmarks`." diff --git a/cumulus/parachain-template/node/src/service.rs b/cumulus/parachain-template/node/src/service.rs index 4c7445b906e0..686bf31cca16 100644 --- a/cumulus/parachain-template/node/src/service.rs +++ b/cumulus/parachain-template/node/src/service.rs @@ -40,10 +40,7 @@ use substrate_prometheus_endpoint::Registry; pub struct ParachainNativeExecutor; impl sc_executor::NativeExecutionDispatch for ParachainNativeExecutor { - type ExtendHostFunctions = ( - cumulus_client_service::storage_proof_size::HostFunctions, - frame_benchmarking::benchmarking::HostFunctions, - ); + type ExtendHostFunctions = frame_benchmarking::benchmarking::HostFunctions; fn dispatch(method: &str, data: &[u8]) -> Option> { parachain_template_runtime::api::dispatch(method, data) diff --git a/cumulus/polkadot-parachain/src/command.rs b/cumulus/polkadot-parachain/src/command.rs index 9b771d709890..870e45e1d551 100644 --- a/cumulus/polkadot-parachain/src/command.rs +++ b/cumulus/polkadot-parachain/src/command.rs @@ -19,7 +19,6 @@ use crate::{ cli::{Cli, RelayChainCli, Subcommand}, service::{new_partial, Block}, }; -use cumulus_client_service::storage_proof_size::HostFunctions as ReclaimHostFunctions; use cumulus_primitives_core::ParaId; use frame_benchmarking_cli::{BenchmarkCmd, SUBSTRATE_REFERENCE_HARDWARE}; use log::info; @@ -787,7 +786,7 @@ pub fn run() -> Result<()> { match cmd { BenchmarkCmd::Pallet(cmd) => if cfg!(feature = "runtime-benchmarks") { - runner.sync_run(|config| cmd.run::(config)) + runner.sync_run(|config| cmd.run::(config)) } else { Err("Benchmarking wasn't enabled when building the node. \ You can enable it with `--features runtime-benchmarks`." diff --git a/cumulus/polkadot-parachain/src/service.rs b/cumulus/polkadot-parachain/src/service.rs index 95b46f6175eb..1a33d07c6750 100644 --- a/cumulus/polkadot-parachain/src/service.rs +++ b/cumulus/polkadot-parachain/src/service.rs @@ -68,15 +68,11 @@ use substrate_prometheus_endpoint::Registry; use polkadot_primitives::CollatorPair; #[cfg(not(feature = "runtime-benchmarks"))] -type HostFunctions = - (sp_io::SubstrateHostFunctions, cumulus_client_service::storage_proof_size::HostFunctions); +type HostFunctions = sp_io::SubstrateHostFunctions; #[cfg(feature = "runtime-benchmarks")] -type HostFunctions = ( - sp_io::SubstrateHostFunctions, - cumulus_client_service::storage_proof_size::HostFunctions, - frame_benchmarking::benchmarking::HostFunctions, -); +type HostFunctions = + (sp_io::SubstrateHostFunctions, frame_benchmarking::benchmarking::HostFunctions); type ParachainClient = TFullClient>; diff --git a/cumulus/test/service/src/lib.rs b/cumulus/test/service/src/lib.rs index 51eeea70e43c..ce5e0692b8d4 100644 --- a/cumulus/test/service/src/lib.rs +++ b/cumulus/test/service/src/lib.rs @@ -113,7 +113,7 @@ pub type AnnounceBlockFn = Arc>) + Send + Sync>; pub struct RuntimeExecutor; impl sc_executor::NativeExecutionDispatch for RuntimeExecutor { - type ExtendHostFunctions = cumulus_client_service::storage_proof_size::HostFunctions; + type ExtendHostFunctions = (); fn dispatch(method: &str, data: &[u8]) -> Option> { cumulus_test_runtime::api::dispatch(method, data) From 8739e618ba7272177938c523b5c0b9cd9356415a Mon Sep 17 00:00:00 2001 From: Sebastian Kunert Date: Fri, 3 Nov 2023 17:41:39 +0100 Subject: [PATCH 57/61] Update lib.rs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Bastian Köcher --- cumulus/primitives/proof-size-hostfunction/src/lib.rs | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/cumulus/primitives/proof-size-hostfunction/src/lib.rs b/cumulus/primitives/proof-size-hostfunction/src/lib.rs index 29f263408c22..6c1093530033 100644 --- a/cumulus/primitives/proof-size-hostfunction/src/lib.rs +++ b/cumulus/primitives/proof-size-hostfunction/src/lib.rs @@ -30,10 +30,7 @@ use sp_trie::proof_size_extension::ProofSizeExt; pub trait StorageProofSize { /// Returns the current storage proof size. fn storage_proof_size(&mut self) -> u64 { - match self.extension::() { - Some(ext) => ext.storage_proof_size(), - None => 0, - } + self.extension::().map_or_else(0, |e| e.storage_proof_size()) } } From a7ea2a5e9e64145771150cf43d8139398819f55f Mon Sep 17 00:00:00 2001 From: Sebastian Kunert Date: Mon, 6 Nov 2023 18:28:59 +0100 Subject: [PATCH 58/61] Fix map_or --- cumulus/primitives/proof-size-hostfunction/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cumulus/primitives/proof-size-hostfunction/src/lib.rs b/cumulus/primitives/proof-size-hostfunction/src/lib.rs index 6c1093530033..0dbd98cdffb0 100644 --- a/cumulus/primitives/proof-size-hostfunction/src/lib.rs +++ b/cumulus/primitives/proof-size-hostfunction/src/lib.rs @@ -30,7 +30,7 @@ use sp_trie::proof_size_extension::ProofSizeExt; pub trait StorageProofSize { /// Returns the current storage proof size. fn storage_proof_size(&mut self) -> u64 { - self.extension::().map_or_else(0, |e| e.storage_proof_size()) + self.extension::().map_or(0, |e| e.storage_proof_size()) } } From 115db201350980b7e922fc0309574cf974b20e0c Mon Sep 17 00:00:00 2001 From: Sebastian Kunert Date: Fri, 10 Nov 2023 16:43:33 +0100 Subject: [PATCH 59/61] Use u64::MAX as proof recording disabled signal --- .../proof-size-hostfunction/src/lib.rs | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) diff --git a/cumulus/primitives/proof-size-hostfunction/src/lib.rs b/cumulus/primitives/proof-size-hostfunction/src/lib.rs index 0dbd98cdffb0..6da6235e585a 100644 --- a/cumulus/primitives/proof-size-hostfunction/src/lib.rs +++ b/cumulus/primitives/proof-size-hostfunction/src/lib.rs @@ -25,12 +25,17 @@ use sp_runtime_interface::runtime_interface; #[cfg(feature = "std")] use sp_trie::proof_size_extension::ProofSizeExt; +pub const PROOF_RECORDING_DISABLED: u64 = u64::MAX; + /// Interface that provides access to the current storage proof size. +/// +/// Should return the current storage proof size if [`ProofSizeExt`] is registered. Otherwise, needs +/// to return u64::MAX. #[runtime_interface] pub trait StorageProofSize { /// Returns the current storage proof size. fn storage_proof_size(&mut self) -> u64 { - self.extension::().map_or(0, |e| e.storage_proof_size()) + self.extension::().map_or(u64::MAX, |e| e.storage_proof_size()) } } @@ -43,7 +48,7 @@ mod tests { TrieDBMutBuilder, TrieMut, }; - use crate::storage_proof_size; + use crate::{storage_proof_size, PROOF_RECORDING_DISABLED}; const TEST_DATA: &[(&[u8], &[u8])] = &[(b"key1", &[1; 64]), (b"key2", &[2; 64])]; @@ -82,19 +87,21 @@ mod tests { assert_eq!(storage_proof_size::storage_proof_size(), 175); sp_io::storage::get(b"key2"); assert_eq!(storage_proof_size::storage_proof_size(), 275); + sp_io::storage::get(b"key2"); + assert_eq!(storage_proof_size::storage_proof_size(), 275); }); } #[test] - fn host_function_returns_zero_without_extension() { + fn host_function_returns_max_without_extension() { let (mut ext, _) = get_prepared_test_externalities(); ext.execute_with(|| { - assert_eq!(storage_proof_size::storage_proof_size(), 0); + assert_eq!(storage_proof_size::storage_proof_size(), PROOF_RECORDING_DISABLED); sp_io::storage::get(b"key1"); - assert_eq!(storage_proof_size::storage_proof_size(), 0); + assert_eq!(storage_proof_size::storage_proof_size(), PROOF_RECORDING_DISABLED); sp_io::storage::get(b"key2"); - assert_eq!(storage_proof_size::storage_proof_size(), 0); + assert_eq!(storage_proof_size::storage_proof_size(), PROOF_RECORDING_DISABLED); }); } } From 1035709a985451c857177e263e2d603f0e393e23 Mon Sep 17 00:00:00 2001 From: Sebastian Kunert Date: Thu, 30 Nov 2023 13:17:33 +0100 Subject: [PATCH 60/61] Add crate description --- cumulus/primitives/proof-size-hostfunction/Cargo.toml | 1 + 1 file changed, 1 insertion(+) diff --git a/cumulus/primitives/proof-size-hostfunction/Cargo.toml b/cumulus/primitives/proof-size-hostfunction/Cargo.toml index a904e11a98d5..83dad428d00f 100644 --- a/cumulus/primitives/proof-size-hostfunction/Cargo.toml +++ b/cumulus/primitives/proof-size-hostfunction/Cargo.toml @@ -3,6 +3,7 @@ name = "cumulus-primitives-proof-size-hostfunction" version = "0.1.0" authors.workspace = true edition.workspace = true +description = "Hostfunction exposing storage proof size to the runtime." license = "Apache-2.0" [dependencies] From 711e1f1214dd121f2245c8d12178e85fbd57b848 Mon Sep 17 00:00:00 2001 From: Sebastian Kunert Date: Thu, 30 Nov 2023 14:05:46 +0100 Subject: [PATCH 61/61] Remove some unwanted proof recordings --- cumulus/parachain-template/node/src/service.rs | 3 +-- cumulus/polkadot-parachain/src/service.rs | 3 +-- cumulus/test/client/src/lib.rs | 17 +++++++---------- 3 files changed, 9 insertions(+), 14 deletions(-) diff --git a/cumulus/parachain-template/node/src/service.rs b/cumulus/parachain-template/node/src/service.rs index 118c27a2f372..43d16ee0d5b7 100644 --- a/cumulus/parachain-template/node/src/service.rs +++ b/cumulus/parachain-template/node/src/service.rs @@ -102,11 +102,10 @@ pub fn new_partial( let executor = ParachainExecutor::new_with_wasm_executor(wasm); let (client, backend, keystore_container, task_manager) = - sc_service::new_full_parts_record_import::( + sc_service::new_full_parts::( config, telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()), executor, - true, )?; let client = Arc::new(client); diff --git a/cumulus/polkadot-parachain/src/service.rs b/cumulus/polkadot-parachain/src/service.rs index 3d6c3cdada48..6280d86e9f9f 100644 --- a/cumulus/polkadot-parachain/src/service.rs +++ b/cumulus/polkadot-parachain/src/service.rs @@ -229,11 +229,10 @@ where .build(); let (client, backend, keystore_container, task_manager) = - sc_service::new_full_parts_record_import::( + sc_service::new_full_parts::( config, telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()), executor, - true, )?; let client = Arc::new(client); diff --git a/cumulus/test/client/src/lib.rs b/cumulus/test/client/src/lib.rs index 0593e7d60060..df63f683de6b 100644 --- a/cumulus/test/client/src/lib.rs +++ b/cumulus/test/client/src/lib.rs @@ -203,16 +203,13 @@ pub fn validate_block( let mut ext_ext = ext.ext(); let heap_pages = HeapAllocStrategy::Static { extra_pages: 1024 }; - let executor = WasmExecutor::<( - sp_io::SubstrateHostFunctions, - cumulus_primitives_proof_size_hostfunction::storage_proof_size::HostFunctions, - )>::builder() - .with_execution_method(WasmExecutionMethod::default()) - .with_max_runtime_instances(1) - .with_runtime_cache_size(2) - .with_onchain_heap_alloc_strategy(heap_pages) - .with_offchain_heap_alloc_strategy(heap_pages) - .build(); + let executor = WasmExecutor::::builder() + .with_execution_method(WasmExecutionMethod::default()) + .with_max_runtime_instances(1) + .with_runtime_cache_size(2) + .with_onchain_heap_alloc_strategy(heap_pages) + .with_offchain_heap_alloc_strategy(heap_pages) + .build(); executor .uncached_call(