diff --git a/Cargo.lock b/Cargo.lock
index de4471783abf..e7caa46cad83 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -3495,6 +3495,7 @@ dependencies = [
"cumulus-client-network",
"cumulus-client-pov-recovery",
"cumulus-primitives-core",
+ "cumulus-primitives-proof-size-hostfunction",
"cumulus-relay-chain-inprocess-interface",
"cumulus-relay-chain-interface",
"cumulus-relay-chain-minimal-node",
@@ -3564,6 +3565,7 @@ dependencies = [
"cumulus-pallet-parachain-system-proc-macro",
"cumulus-primitives-core",
"cumulus-primitives-parachain-inherent",
+ "cumulus-primitives-proof-size-hostfunction",
"cumulus-test-client",
"cumulus-test-relay-sproof-builder",
"environmental",
@@ -3595,6 +3597,7 @@ dependencies = [
"sp-version",
"staging-xcm",
"trie-db",
+ "trie-standardmap",
]
[[package]]
@@ -3743,6 +3746,18 @@ dependencies = [
"tracing",
]
+[[package]]
+name = "cumulus-primitives-proof-size-hostfunction"
+version = "0.1.0"
+dependencies = [
+ "sp-core",
+ "sp-externalities 0.19.0",
+ "sp-io",
+ "sp-runtime-interface 17.0.0",
+ "sp-state-machine",
+ "sp-trie",
+]
+
[[package]]
name = "cumulus-primitives-timestamp"
version = "0.1.0"
@@ -3903,6 +3918,7 @@ version = "0.1.0"
dependencies = [
"cumulus-primitives-core",
"cumulus-primitives-parachain-inherent",
+ "cumulus-primitives-proof-size-hostfunction",
"cumulus-test-relay-sproof-builder",
"cumulus-test-runtime",
"cumulus-test-service",
@@ -14766,6 +14782,7 @@ dependencies = [
"sp-inherents",
"sp-runtime",
"sp-state-machine",
+ "sp-trie",
"substrate-test-runtime-client",
]
@@ -17612,6 +17629,7 @@ name = "sp-runtime-interface-proc-macro"
version = "11.0.0"
dependencies = [
"Inflector",
+ "expander 2.0.0",
"proc-macro-crate",
"proc-macro2",
"quote",
@@ -17864,6 +17882,7 @@ dependencies = [
"scale-info",
"schnellru",
"sp-core",
+ "sp-externalities 0.19.0",
"sp-runtime",
"sp-std 8.0.0",
"thiserror",
diff --git a/Cargo.toml b/Cargo.toml
index a295aca819cc..b585ceb5b441 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -98,6 +98,7 @@ members = [
"cumulus/primitives/aura",
"cumulus/primitives/core",
"cumulus/primitives/parachain-inherent",
+ "cumulus/primitives/proof-size-hostfunction",
"cumulus/primitives/timestamp",
"cumulus/primitives/utility",
"cumulus/test/client",
diff --git a/cumulus/client/service/Cargo.toml b/cumulus/client/service/Cargo.toml
index f80c65128d52..cc2f22e65659 100644
--- a/cumulus/client/service/Cargo.toml
+++ b/cumulus/client/service/Cargo.toml
@@ -38,6 +38,7 @@ cumulus-client-consensus-common = { path = "../consensus/common" }
cumulus-client-pov-recovery = { path = "../pov-recovery" }
cumulus-client-network = { path = "../network" }
cumulus-primitives-core = { path = "../../primitives/core" }
+cumulus-primitives-proof-size-hostfunction = { path = "../../primitives/proof-size-hostfunction" }
cumulus-relay-chain-interface = { path = "../relay-chain-interface" }
cumulus-relay-chain-inprocess-interface = { path = "../relay-chain-inprocess-interface" }
cumulus-relay-chain-minimal-node = { path = "../relay-chain-minimal-node" }
diff --git a/cumulus/client/service/src/lib.rs b/cumulus/client/service/src/lib.rs
index f8ebca11c8c1..950e59aff24e 100644
--- a/cumulus/client/service/src/lib.rs
+++ b/cumulus/client/service/src/lib.rs
@@ -52,6 +52,8 @@ use sp_core::{traits::SpawnNamed, Decode};
use sp_runtime::traits::{Block as BlockT, BlockIdTo, Header};
use std::{sync::Arc, time::Duration};
+pub use cumulus_primitives_proof_size_hostfunction::storage_proof_size;
+
// Given the sporadic nature of the explicit recovery operation and the
// possibility to retry infinite times this value is more than enough.
// In practice here we expect no more than one queued messages.
diff --git a/cumulus/pallets/parachain-system/Cargo.toml b/cumulus/pallets/parachain-system/Cargo.toml
index 5600c95a2a60..cf6af4b4786d 100644
--- a/cumulus/pallets/parachain-system/Cargo.toml
+++ b/cumulus/pallets/parachain-system/Cargo.toml
@@ -39,11 +39,13 @@ xcm = { package = "staging-xcm", path = "../../../polkadot/xcm", default-feature
cumulus-pallet-parachain-system-proc-macro = { path = "proc-macro", default-features = false }
cumulus-primitives-core = { path = "../../primitives/core", default-features = false }
cumulus-primitives-parachain-inherent = { path = "../../primitives/parachain-inherent", default-features = false }
+cumulus-primitives-proof-size-hostfunction = { path = "../../primitives/proof-size-hostfunction", default-features = false }
[dev-dependencies]
assert_matches = "1.5"
hex-literal = "0.4.1"
lazy_static = "1.4"
+trie-standardmap = "0.16.0"
rand = "0.8.5"
futures = "0.3.28"
@@ -65,6 +67,7 @@ std = [
"cumulus-pallet-parachain-system-proc-macro/std",
"cumulus-primitives-core/std",
"cumulus-primitives-parachain-inherent/std",
+ "cumulus-primitives-proof-size-hostfunction/std",
"environmental/std",
"frame-benchmarking/std",
"frame-support/std",
diff --git a/cumulus/pallets/parachain-system/src/validate_block/mod.rs b/cumulus/pallets/parachain-system/src/validate_block/mod.rs
index db149401638a..763a4cffd77f 100644
--- a/cumulus/pallets/parachain-system/src/validate_block/mod.rs
+++ b/cumulus/pallets/parachain-system/src/validate_block/mod.rs
@@ -26,6 +26,10 @@ mod tests;
#[doc(hidden)]
mod trie_cache;
+#[cfg(any(test, not(feature = "std")))]
+#[doc(hidden)]
+mod trie_recorder;
+
#[cfg(not(feature = "std"))]
#[doc(hidden)]
pub use bytes;
diff --git a/cumulus/pallets/parachain-system/src/validate_block/trie_recorder.rs b/cumulus/pallets/parachain-system/src/validate_block/trie_recorder.rs
new file mode 100644
index 000000000000..e73aef70aa49
--- /dev/null
+++ b/cumulus/pallets/parachain-system/src/validate_block/trie_recorder.rs
@@ -0,0 +1,286 @@
+// Copyright (C) Parity Technologies (UK) Ltd.
+// This file is part of Substrate.
+
+// Substrate is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Substrate is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Cumulus. If not, see .
+
+//! Provide a specialized trie-recorder and provider for use in validate-block.
+//!
+//! This file defines two main structs, [`SizeOnlyRecorder`] and
+//! [`SizeOnlyRecorderProvider`]. They are used to track the current
+//! proof-size without actually recording the accessed nodes themselves.
+
+use codec::Encode;
+
+use sp_std::{
+ cell::{RefCell, RefMut},
+ collections::{btree_map::BTreeMap, btree_set::BTreeSet},
+ rc::Rc,
+};
+use sp_trie::{NodeCodec, ProofSizeProvider, StorageProof};
+use trie_db::{Hasher, RecordedForKey, TrieAccess};
+
+/// A trie recorder that only keeps track of the proof size.
+///
+/// The internal size counting logic should align
+/// with ['sp_trie::recorder::Recorder'].
+pub(crate) struct SizeOnlyRecorder<'a, H: Hasher> {
+ seen_nodes: RefMut<'a, BTreeSet>,
+ encoded_size: RefMut<'a, usize>,
+ recorded_keys: RefMut<'a, BTreeMap, RecordedForKey>>,
+}
+
+impl<'a, H: trie_db::Hasher> trie_db::TrieRecorder for SizeOnlyRecorder<'a, H> {
+ fn record(&mut self, access: TrieAccess<'_, H::Out>) {
+ let mut encoded_size_update = 0;
+ match access {
+ TrieAccess::NodeOwned { hash, node_owned } =>
+ if self.seen_nodes.insert(hash) {
+ let node = node_owned.to_encoded::>();
+ encoded_size_update += node.encoded_size();
+ },
+ TrieAccess::EncodedNode { hash, encoded_node } =>
+ if self.seen_nodes.insert(hash) {
+ encoded_size_update += encoded_node.encoded_size();
+ },
+ TrieAccess::Value { hash, value, full_key } => {
+ if self.seen_nodes.insert(hash) {
+ encoded_size_update += value.encoded_size();
+ }
+ self.recorded_keys
+ .entry(full_key.into())
+ .and_modify(|e| *e = RecordedForKey::Value)
+ .or_insert_with(|| RecordedForKey::Value);
+ },
+ TrieAccess::Hash { full_key } => {
+ self.recorded_keys
+ .entry(full_key.into())
+ .or_insert_with(|| RecordedForKey::Hash);
+ },
+ TrieAccess::NonExisting { full_key } => {
+ self.recorded_keys
+ .entry(full_key.into())
+ .and_modify(|e| *e = RecordedForKey::Value)
+ .or_insert_with(|| RecordedForKey::Value);
+ },
+ TrieAccess::InlineValue { full_key } => {
+ self.recorded_keys
+ .entry(full_key.into())
+ .and_modify(|e| *e = RecordedForKey::Value)
+ .or_insert_with(|| RecordedForKey::Value);
+ },
+ };
+
+ *self.encoded_size += encoded_size_update;
+ }
+
+ fn trie_nodes_recorded_for_key(&self, key: &[u8]) -> RecordedForKey {
+ self.recorded_keys.get(key).copied().unwrap_or(RecordedForKey::None)
+ }
+}
+
+#[derive(Clone)]
+pub(crate) struct SizeOnlyRecorderProvider {
+ seen_nodes: Rc>>,
+ encoded_size: Rc>,
+ recorded_keys: Rc, RecordedForKey>>>,
+}
+
+impl SizeOnlyRecorderProvider {
+ pub fn new() -> Self {
+ Self {
+ seen_nodes: Default::default(),
+ encoded_size: Default::default(),
+ recorded_keys: Default::default(),
+ }
+ }
+}
+
+impl sp_trie::TrieRecorderProvider for SizeOnlyRecorderProvider {
+ type Recorder<'a> = SizeOnlyRecorder<'a, H> where H: 'a;
+
+ fn drain_storage_proof(self) -> Option {
+ None
+ }
+
+ fn as_trie_recorder(&self, _storage_root: H::Out) -> Self::Recorder<'_> {
+ SizeOnlyRecorder {
+ encoded_size: self.encoded_size.borrow_mut(),
+ seen_nodes: self.seen_nodes.borrow_mut(),
+ recorded_keys: self.recorded_keys.borrow_mut(),
+ }
+ }
+}
+
+impl ProofSizeProvider for SizeOnlyRecorderProvider {
+ fn estimate_encoded_size(&self) -> usize {
+ *self.encoded_size.borrow()
+ }
+}
+
+// This is safe here since we are single-threaded in WASM
+unsafe impl Send for SizeOnlyRecorderProvider {}
+unsafe impl Sync for SizeOnlyRecorderProvider {}
+
+#[cfg(test)]
+mod tests {
+ use rand::Rng;
+ use sp_trie::{
+ cache::{CacheSize, SharedTrieCache},
+ MemoryDB, ProofSizeProvider, TrieRecorderProvider,
+ };
+ use trie_db::{Trie, TrieDBBuilder, TrieDBMutBuilder, TrieHash, TrieMut, TrieRecorder};
+ use trie_standardmap::{Alphabet, StandardMap, ValueMode};
+
+ use super::*;
+
+ type Recorder = sp_trie::recorder::Recorder;
+
+ fn create_trie() -> (
+ sp_trie::MemoryDB,
+ TrieHash>,
+ Vec<(Vec, Vec)>,
+ ) {
+ let mut db = MemoryDB::default();
+ let mut root = Default::default();
+
+ let mut seed = Default::default();
+ let test_data: Vec<(Vec, Vec)> = StandardMap {
+ alphabet: Alphabet::Low,
+ min_key: 16,
+ journal_key: 0,
+ value_mode: ValueMode::Random,
+ count: 1000,
+ }
+ .make_with(&mut seed)
+ .into_iter()
+ .map(|(k, v)| {
+ // Double the length so we end up with some values of 2 bytes and some of 64
+ let v = [v.clone(), v].concat();
+ (k, v)
+ })
+ .collect();
+
+ // Fill database with values
+ {
+ let mut trie = TrieDBMutBuilder::>::new(
+ &mut db, &mut root,
+ )
+ .build();
+ for (k, v) in &test_data {
+ trie.insert(k, v).expect("Inserts data");
+ }
+ }
+
+ (db, root, test_data)
+ }
+
+ #[test]
+ fn recorder_equivalence_cache() {
+ let (db, root, test_data) = create_trie();
+
+ let mut rng = rand::thread_rng();
+ for _ in 1..10 {
+ let reference_recorder = Recorder::default();
+ let recorder_for_test: SizeOnlyRecorderProvider =
+ SizeOnlyRecorderProvider::new();
+ let reference_cache: SharedTrieCache =
+ SharedTrieCache::new(CacheSize::new(1024 * 5));
+ let cache_for_test: SharedTrieCache =
+ SharedTrieCache::new(CacheSize::new(1024 * 5));
+ {
+ let local_cache = cache_for_test.local_cache();
+ let mut trie_cache_for_reference = local_cache.as_trie_db_cache(root);
+ let mut reference_trie_recorder = reference_recorder.as_trie_recorder(root);
+ let reference_trie =
+ TrieDBBuilder::>::new(&db, &root)
+ .with_recorder(&mut reference_trie_recorder)
+ .with_cache(&mut trie_cache_for_reference)
+ .build();
+
+ let local_cache_for_test = reference_cache.local_cache();
+ let mut trie_cache_for_test = local_cache_for_test.as_trie_db_cache(root);
+ let mut trie_recorder_under_test = recorder_for_test.as_trie_recorder(root);
+ let test_trie =
+ TrieDBBuilder::>::new(&db, &root)
+ .with_recorder(&mut trie_recorder_under_test)
+ .with_cache(&mut trie_cache_for_test)
+ .build();
+
+ // Access random values from the test data
+ for _ in 0..100 {
+ let index: usize = rng.gen_range(0..test_data.len());
+ test_trie.get(&test_data[index].0).unwrap().unwrap();
+ reference_trie.get(&test_data[index].0).unwrap().unwrap();
+ }
+
+ // Check that we have the same nodes recorded for both recorders
+ for (key, _) in test_data.iter() {
+ let reference = reference_trie_recorder.trie_nodes_recorded_for_key(key);
+ let test_value = trie_recorder_under_test.trie_nodes_recorded_for_key(key);
+ assert_eq!(format!("{:?}", reference), format!("{:?}", test_value));
+ }
+ }
+
+ // Check that we have the same size recorded for both recorders
+ assert_eq!(
+ reference_recorder.estimate_encoded_size(),
+ recorder_for_test.estimate_encoded_size()
+ );
+ }
+ }
+
+ #[test]
+ fn recorder_equivalence_no_cache() {
+ let (db, root, test_data) = create_trie();
+
+ let mut rng = rand::thread_rng();
+ for _ in 1..10 {
+ let reference_recorder = Recorder::default();
+ let recorder_for_test: SizeOnlyRecorderProvider =
+ SizeOnlyRecorderProvider::new();
+ {
+ let mut reference_trie_recorder = reference_recorder.as_trie_recorder(root);
+ let reference_trie =
+ TrieDBBuilder::>::new(&db, &root)
+ .with_recorder(&mut reference_trie_recorder)
+ .build();
+
+ let mut trie_recorder_under_test = recorder_for_test.as_trie_recorder(root);
+ let test_trie =
+ TrieDBBuilder::>::new(&db, &root)
+ .with_recorder(&mut trie_recorder_under_test)
+ .build();
+
+ for _ in 0..200 {
+ let index: usize = rng.gen_range(0..test_data.len());
+ test_trie.get(&test_data[index].0).unwrap().unwrap();
+ reference_trie.get(&test_data[index].0).unwrap().unwrap();
+ }
+
+ // Check that we have the same nodes recorded for both recorders
+ for (key, _) in test_data.iter() {
+ let reference = reference_trie_recorder.trie_nodes_recorded_for_key(key);
+ let test_value = trie_recorder_under_test.trie_nodes_recorded_for_key(key);
+ assert_eq!(format!("{:?}", reference), format!("{:?}", test_value));
+ }
+ }
+
+ // Check that we have the same size recorded for both recorders
+ assert_eq!(
+ reference_recorder.estimate_encoded_size(),
+ recorder_for_test.estimate_encoded_size()
+ );
+ }
+ }
+}
diff --git a/cumulus/primitives/proof-size-hostfunction/Cargo.toml b/cumulus/primitives/proof-size-hostfunction/Cargo.toml
new file mode 100644
index 000000000000..83dad428d00f
--- /dev/null
+++ b/cumulus/primitives/proof-size-hostfunction/Cargo.toml
@@ -0,0 +1,21 @@
+[package]
+name = "cumulus-primitives-proof-size-hostfunction"
+version = "0.1.0"
+authors.workspace = true
+edition.workspace = true
+description = "Hostfunction exposing storage proof size to the runtime."
+license = "Apache-2.0"
+
+[dependencies]
+sp-runtime-interface = { path = "../../../substrate/primitives/runtime-interface", default-features = false }
+sp-externalities = { path = "../../../substrate/primitives/externalities", default-features = false }
+sp-trie = { path = "../../../substrate/primitives/trie", default-features = false }
+
+[dev-dependencies]
+sp-state-machine = { path = "../../../substrate/primitives/state-machine" }
+sp-core = { path = "../../../substrate/primitives/core" }
+sp-io = { path = "../../../substrate/primitives/io" }
+
+[features]
+default = [ "std" ]
+std = [ "sp-externalities/std", "sp-runtime-interface/std", "sp-trie/std" ]
diff --git a/cumulus/primitives/proof-size-hostfunction/src/lib.rs b/cumulus/primitives/proof-size-hostfunction/src/lib.rs
new file mode 100644
index 000000000000..6da6235e585a
--- /dev/null
+++ b/cumulus/primitives/proof-size-hostfunction/src/lib.rs
@@ -0,0 +1,107 @@
+// Copyright (C) Parity Technologies (UK) Ltd.
+// This file is part of Cumulus.
+
+// Cumulus is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Polkadot is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Cumulus. If not, see .
+
+//! Tools for reclaiming PoV weight in parachain runtimes.
+
+#![cfg_attr(not(feature = "std"), no_std)]
+
+use sp_externalities::ExternalitiesExt;
+
+use sp_runtime_interface::runtime_interface;
+
+#[cfg(feature = "std")]
+use sp_trie::proof_size_extension::ProofSizeExt;
+
+pub const PROOF_RECORDING_DISABLED: u64 = u64::MAX;
+
+/// Interface that provides access to the current storage proof size.
+///
+/// Should return the current storage proof size if [`ProofSizeExt`] is registered. Otherwise, needs
+/// to return u64::MAX.
+#[runtime_interface]
+pub trait StorageProofSize {
+ /// Returns the current storage proof size.
+ fn storage_proof_size(&mut self) -> u64 {
+ self.extension::().map_or(u64::MAX, |e| e.storage_proof_size())
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use sp_core::Blake2Hasher;
+ use sp_state_machine::TestExternalities;
+ use sp_trie::{
+ proof_size_extension::ProofSizeExt, recorder::Recorder, LayoutV1, PrefixedMemoryDB,
+ TrieDBMutBuilder, TrieMut,
+ };
+
+ use crate::{storage_proof_size, PROOF_RECORDING_DISABLED};
+
+ const TEST_DATA: &[(&[u8], &[u8])] = &[(b"key1", &[1; 64]), (b"key2", &[2; 64])];
+
+ type TestLayout = LayoutV1;
+
+ fn get_prepared_test_externalities() -> (TestExternalities, Recorder)
+ {
+ let mut db = PrefixedMemoryDB::default();
+ let mut root = Default::default();
+
+ {
+ let mut trie = TrieDBMutBuilder::::new(&mut db, &mut root).build();
+ for (k, v) in TEST_DATA {
+ trie.insert(k, v).expect("Inserts data");
+ }
+ }
+
+ let recorder: sp_trie::recorder::Recorder = Default::default();
+ let trie_backend = sp_state_machine::TrieBackendBuilder::new(db, root)
+ .with_recorder(recorder.clone())
+ .build();
+
+ let mut ext: TestExternalities = TestExternalities::default();
+ ext.backend = trie_backend;
+ (ext, recorder)
+ }
+
+ #[test]
+ fn host_function_returns_size_from_recorder() {
+ let (mut ext, recorder) = get_prepared_test_externalities();
+ ext.register_extension(ProofSizeExt::new(recorder));
+
+ ext.execute_with(|| {
+ assert_eq!(storage_proof_size::storage_proof_size(), 0);
+ sp_io::storage::get(b"key1");
+ assert_eq!(storage_proof_size::storage_proof_size(), 175);
+ sp_io::storage::get(b"key2");
+ assert_eq!(storage_proof_size::storage_proof_size(), 275);
+ sp_io::storage::get(b"key2");
+ assert_eq!(storage_proof_size::storage_proof_size(), 275);
+ });
+ }
+
+ #[test]
+ fn host_function_returns_max_without_extension() {
+ let (mut ext, _) = get_prepared_test_externalities();
+
+ ext.execute_with(|| {
+ assert_eq!(storage_proof_size::storage_proof_size(), PROOF_RECORDING_DISABLED);
+ sp_io::storage::get(b"key1");
+ assert_eq!(storage_proof_size::storage_proof_size(), PROOF_RECORDING_DISABLED);
+ sp_io::storage::get(b"key2");
+ assert_eq!(storage_proof_size::storage_proof_size(), PROOF_RECORDING_DISABLED);
+ });
+ }
+}
diff --git a/cumulus/test/client/Cargo.toml b/cumulus/test/client/Cargo.toml
index b760b796ec9a..6ab873d320c8 100644
--- a/cumulus/test/client/Cargo.toml
+++ b/cumulus/test/client/Cargo.toml
@@ -36,6 +36,7 @@ cumulus-test-runtime = { path = "../runtime" }
cumulus-test-service = { path = "../service" }
cumulus-test-relay-sproof-builder = { path = "../relay-sproof-builder" }
cumulus-primitives-core = { path = "../../primitives/core" }
+cumulus-primitives-proof-size-hostfunction = { path = "../../primitives/proof-size-hostfunction" }
cumulus-primitives-parachain-inherent = { path = "../../primitives/parachain-inherent" }
[features]
diff --git a/cumulus/test/client/src/lib.rs b/cumulus/test/client/src/lib.rs
index a3c79158f492..df63f683de6b 100644
--- a/cumulus/test/client/src/lib.rs
+++ b/cumulus/test/client/src/lib.rs
@@ -44,7 +44,8 @@ mod local_executor {
pub struct LocalExecutor;
impl sc_executor::NativeExecutionDispatch for LocalExecutor {
- type ExtendHostFunctions = ();
+ type ExtendHostFunctions =
+ cumulus_primitives_proof_size_hostfunction::storage_proof_size::HostFunctions;
fn dispatch(method: &str, data: &[u8]) -> Option> {
cumulus_test_runtime::api::dispatch(method, data)
diff --git a/cumulus/test/service/benches/block_import.rs b/cumulus/test/service/benches/block_import.rs
index 254e03b9263a..9d6485d74c59 100644
--- a/cumulus/test/service/benches/block_import.rs
+++ b/cumulus/test/service/benches/block_import.rs
@@ -24,7 +24,7 @@ use core::time::Duration;
use cumulus_primitives_core::ParaId;
use sp_api::{Core, ProvideRuntimeApi};
-use sp_keyring::Sr25519Keyring::Alice;
+use sp_keyring::Sr25519Keyring::{Alice, Bob};
use cumulus_test_service::bench_utils as utils;
@@ -32,51 +32,69 @@ fn benchmark_block_import(c: &mut Criterion) {
sp_tracing::try_init_simple();
let runtime = tokio::runtime::Runtime::new().expect("creating tokio runtime doesn't fail; qed");
- let para_id = ParaId::from(100);
+
+ let para_id = ParaId::from(cumulus_test_runtime::PARACHAIN_ID);
let tokio_handle = runtime.handle();
// Create enough accounts to fill the block with transactions.
// Each account should only be included in one transfer.
let (src_accounts, dst_accounts, account_ids) = utils::create_benchmark_accounts();
- let alice = runtime.block_on(
- cumulus_test_service::TestNodeBuilder::new(para_id, tokio_handle.clone(), Alice)
+ for bench_parameters in &[(true, Alice), (false, Bob)] {
+ let node = runtime.block_on(
+ cumulus_test_service::TestNodeBuilder::new(
+ para_id,
+ tokio_handle.clone(),
+ bench_parameters.1,
+ )
// Preload all accounts with funds for the transfers
- .endowed_accounts(account_ids)
+ .endowed_accounts(account_ids.clone())
+ .import_proof_recording(bench_parameters.0)
.build(),
- );
-
- let client = alice.client;
-
- let (max_transfer_count, extrinsics) =
- utils::create_benchmarking_transfer_extrinsics(&client, &src_accounts, &dst_accounts);
-
- let parent_hash = client.usage_info().chain.best_hash;
- let mut block_builder = BlockBuilderBuilder::new(&*client)
- .on_parent_block(parent_hash)
- .fetch_parent_block_number(&*client)
- .unwrap()
- .build()
- .unwrap();
- for extrinsic in extrinsics {
- block_builder.push(extrinsic).unwrap();
- }
- let benchmark_block = block_builder.build().unwrap();
-
- let mut group = c.benchmark_group("Block import");
- group.sample_size(20);
- group.measurement_time(Duration::from_secs(120));
- group.throughput(Throughput::Elements(max_transfer_count as u64));
-
- group.bench_function(format!("(transfers = {}) block import", max_transfer_count), |b| {
- b.iter_batched(
- || benchmark_block.block.clone(),
- |block| {
- client.runtime_api().execute_block(parent_hash, block).unwrap();
+ );
+
+ let client = node.client;
+ let backend = node.backend;
+
+ let (max_transfer_count, extrinsics) =
+ utils::create_benchmarking_transfer_extrinsics(&client, &src_accounts, &dst_accounts);
+
+ let parent_hash = client.usage_info().chain.best_hash;
+ let mut block_builder = BlockBuilderBuilder::new(&*client)
+ .on_parent_block(parent_hash)
+ .fetch_parent_block_number(&*client)
+ .unwrap()
+ .build()
+ .unwrap();
+ for extrinsic in extrinsics {
+ block_builder.push(extrinsic).unwrap();
+ }
+ let benchmark_block = block_builder.build().unwrap();
+
+ let mut group = c.benchmark_group("Block import");
+ group.sample_size(20);
+ group.measurement_time(Duration::from_secs(120));
+ group.throughput(Throughput::Elements(max_transfer_count as u64));
+
+ group.bench_function(
+ format!(
+ "(transfers = {max_transfer_count}, proof_recording = {}) block import",
+ bench_parameters.0
+ ),
+ |b| {
+ b.iter_batched(
+ || {
+ backend.reset_trie_cache();
+ benchmark_block.block.clone()
+ },
+ |block| {
+ client.runtime_api().execute_block(parent_hash, block).unwrap();
+ },
+ BatchSize::SmallInput,
+ )
},
- BatchSize::SmallInput,
- )
- });
+ );
+ }
}
criterion_group!(benches, benchmark_block_import);
diff --git a/cumulus/test/service/benches/block_import_glutton.rs b/cumulus/test/service/benches/block_import_glutton.rs
index aeaf0722e724..6295fd68286b 100644
--- a/cumulus/test/service/benches/block_import_glutton.rs
+++ b/cumulus/test/service/benches/block_import_glutton.rs
@@ -27,7 +27,7 @@ use core::time::Duration;
use cumulus_primitives_core::ParaId;
use sc_block_builder::BlockBuilderBuilder;
-use sp_keyring::Sr25519Keyring::Alice;
+use sp_keyring::Sr25519Keyring::{Alice, Bob, Charlie, Ferdie};
use cumulus_test_service::bench_utils as utils;
@@ -38,17 +38,29 @@ fn benchmark_block_import(c: &mut Criterion) {
let para_id = ParaId::from(100);
let tokio_handle = runtime.handle();
- let alice = runtime.block_on(
- cumulus_test_service::TestNodeBuilder::new(para_id, tokio_handle.clone(), Alice).build(),
- );
- let client = alice.client;
+ let mut initialize_glutton_pallet = true;
+ for (compute_ratio, storage_ratio, proof_on_import, keyring_identity) in &[
+ (One::one(), Zero::zero(), true, Alice),
+ (One::one(), One::one(), true, Bob),
+ (One::one(), Zero::zero(), false, Charlie),
+ (One::one(), One::one(), false, Ferdie),
+ ] {
+ let node = runtime.block_on(
+ cumulus_test_service::TestNodeBuilder::new(
+ para_id,
+ tokio_handle.clone(),
+ *keyring_identity,
+ )
+ .import_proof_recording(*proof_on_import)
+ .build(),
+ );
+ let client = node.client;
+ let backend = node.backend;
- let mut group = c.benchmark_group("Block import");
- group.sample_size(20);
- group.measurement_time(Duration::from_secs(120));
+ let mut group = c.benchmark_group("Block import");
+ group.sample_size(20);
+ group.measurement_time(Duration::from_secs(120));
- let mut initialize_glutton_pallet = true;
- for (compute_ratio, storage_ratio) in &[(One::one(), Zero::zero()), (One::one(), One::one())] {
let block = utils::set_glutton_parameters(
&client,
initialize_glutton_pallet,
@@ -82,7 +94,10 @@ fn benchmark_block_import(c: &mut Criterion) {
),
|b| {
b.iter_batched(
- || benchmark_block.block.clone(),
+ || {
+ backend.reset_trie_cache();
+ benchmark_block.block.clone()
+ },
|block| {
client.runtime_api().execute_block(parent_hash, block).unwrap();
},
diff --git a/cumulus/test/service/benches/validate_block.rs b/cumulus/test/service/benches/validate_block.rs
index 11a7c4376d4c..a614863803e0 100644
--- a/cumulus/test/service/benches/validate_block.rs
+++ b/cumulus/test/service/benches/validate_block.rs
@@ -18,7 +18,9 @@
use codec::{Decode, Encode};
use core::time::Duration;
use criterion::{criterion_group, criterion_main, BatchSize, Criterion, Throughput};
-use cumulus_primitives_core::{relay_chain::AccountId, PersistedValidationData, ValidationParams};
+use cumulus_primitives_core::{
+ relay_chain::AccountId, ParaId, PersistedValidationData, ValidationParams,
+};
use cumulus_test_client::{
generate_extrinsic_with_pair, BuildParachainBlockData, InitBlockBuilder, TestClientBuilder,
ValidationResult,
@@ -83,6 +85,7 @@ fn benchmark_block_validation(c: &mut Criterion) {
// Each account should only be included in one transfer.
let (src_accounts, dst_accounts, account_ids) = utils::create_benchmark_accounts();
+ let para_id = ParaId::from(cumulus_test_runtime::PARACHAIN_ID);
let mut test_client_builder = TestClientBuilder::with_default_backend();
let genesis_init = test_client_builder.genesis_init_mut();
*genesis_init = cumulus_test_client::GenesisParameters { endowed_accounts: account_ids };
@@ -98,7 +101,14 @@ fn benchmark_block_validation(c: &mut Criterion) {
..Default::default()
};
- let mut block_builder = client.init_block_builder(Some(validation_data), Default::default());
+ let sproof_builder = RelayStateSproofBuilder {
+ included_para_head: Some(parent_header.clone().encode().into()),
+ para_id,
+ ..Default::default()
+ };
+
+ let mut block_builder =
+ client.init_block_builder(Some(validation_data), sproof_builder.clone());
for extrinsic in extrinsics {
block_builder.push(extrinsic).unwrap();
}
@@ -108,7 +118,6 @@ fn benchmark_block_validation(c: &mut Criterion) {
let proof_size_in_kb = parachain_block.storage_proof().encode().len() as f64 / 1024f64;
let runtime = utils::get_wasm_module();
- let sproof_builder: RelayStateSproofBuilder = Default::default();
let (relay_parent_storage_root, _) = sproof_builder.into_state_root_and_proof();
let encoded_params = ValidationParams {
block_data: cumulus_test_client::BlockData(parachain_block.encode()),
diff --git a/cumulus/test/service/src/bench_utils.rs b/cumulus/test/service/src/bench_utils.rs
index 82142f21695f..1894835caec8 100644
--- a/cumulus/test/service/src/bench_utils.rs
+++ b/cumulus/test/service/src/bench_utils.rs
@@ -81,8 +81,13 @@ pub fn extrinsic_set_time(client: &TestClient) -> OpaqueExtrinsic {
pub fn extrinsic_set_validation_data(
parent_header: cumulus_test_runtime::Header,
) -> OpaqueExtrinsic {
- let sproof_builder = RelayStateSproofBuilder { para_id: 100.into(), ..Default::default() };
let parent_head = HeadData(parent_header.encode());
+ let sproof_builder = RelayStateSproofBuilder {
+ para_id: cumulus_test_runtime::PARACHAIN_ID.into(),
+ included_para_head: parent_head.clone().into(),
+ ..Default::default()
+ };
+
let (relay_parent_storage_root, relay_chain_state) = sproof_builder.into_state_root_and_proof();
let data = ParachainInherentData {
validation_data: PersistedValidationData {
diff --git a/cumulus/test/service/src/lib.rs b/cumulus/test/service/src/lib.rs
index fb858ce0b714..627d060d8a0c 100644
--- a/cumulus/test/service/src/lib.rs
+++ b/cumulus/test/service/src/lib.rs
@@ -187,6 +187,7 @@ impl RecoveryHandle for FailingRecoveryHandle {
/// be able to perform chain operations.
pub fn new_partial(
config: &mut Configuration,
+ enable_import_proof_record: bool,
) -> Result<
PartialComponents<
Client,
@@ -214,7 +215,12 @@ pub fn new_partial(
sc_executor::NativeElseWasmExecutor::::new_with_wasm_executor(wasm);
let (client, backend, keystore_container, task_manager) =
- sc_service::new_full_parts::(config, None, executor)?;
+ sc_service::new_full_parts_record_import::(
+ config,
+ None,
+ executor,
+ enable_import_proof_record,
+ )?;
let client = Arc::new(client);
let block_import =
@@ -309,19 +315,21 @@ pub async fn start_node_impl(
rpc_ext_builder: RB,
consensus: Consensus,
collator_options: CollatorOptions,
+ proof_recording_during_import: bool,
) -> sc_service::error::Result<(
TaskManager,
Arc,
Arc>,
RpcHandlers,
TransactionPool,
+ Arc,
)>
where
RB: Fn(Arc) -> Result, sc_service::Error> + Send + 'static,
{
let mut parachain_config = prepare_node_config(parachain_config);
- let params = new_partial(&mut parachain_config)?;
+ let params = new_partial(&mut parachain_config, proof_recording_during_import)?;
let transaction_pool = params.transaction_pool.clone();
let mut task_manager = params.task_manager;
@@ -477,7 +485,7 @@ where
start_network.start_network();
- Ok((task_manager, client, network, rpc_handlers, transaction_pool))
+ Ok((task_manager, client, network, rpc_handlers, transaction_pool, backend))
}
/// A Cumulus test node instance used for testing.
@@ -495,6 +503,8 @@ pub struct TestNode {
pub rpc_handlers: RpcHandlers,
/// Node's transaction pool
pub transaction_pool: TransactionPool,
+ /// Node's backend
+ pub backend: Arc,
}
#[allow(missing_docs)]
@@ -520,6 +530,7 @@ pub struct TestNodeBuilder {
consensus: Consensus,
relay_chain_mode: RelayChainMode,
endowed_accounts: Vec,
+ record_proof_during_import: bool,
}
impl TestNodeBuilder {
@@ -544,6 +555,7 @@ impl TestNodeBuilder {
consensus: Consensus::RelayChain,
endowed_accounts: Default::default(),
relay_chain_mode: RelayChainMode::Embedded,
+ record_proof_during_import: true,
}
}
@@ -656,6 +668,12 @@ impl TestNodeBuilder {
self
}
+ /// Record proofs during import.
+ pub fn import_proof_recording(mut self, should_record_proof: bool) -> TestNodeBuilder {
+ self.record_proof_during_import = should_record_proof;
+ self
+ }
+
/// Build the [`TestNode`].
pub async fn build(self) -> TestNode {
let parachain_config = node_config(
@@ -684,24 +702,26 @@ impl TestNodeBuilder {
format!("{} (relay chain)", relay_chain_config.network.node_name);
let multiaddr = parachain_config.network.listen_addresses[0].clone();
- let (task_manager, client, network, rpc_handlers, transaction_pool) = start_node_impl(
- parachain_config,
- self.collator_key,
- relay_chain_config,
- self.para_id,
- self.wrap_announce_block,
- false,
- |_| Ok(jsonrpsee::RpcModule::new(())),
- self.consensus,
- collator_options,
- )
- .await
- .expect("could not create Cumulus test service");
+ let (task_manager, client, network, rpc_handlers, transaction_pool, backend) =
+ start_node_impl(
+ parachain_config,
+ self.collator_key,
+ relay_chain_config,
+ self.para_id,
+ self.wrap_announce_block,
+ false,
+ |_| Ok(jsonrpsee::RpcModule::new(())),
+ self.consensus,
+ collator_options,
+ self.record_proof_during_import,
+ )
+ .await
+ .expect("could not create Cumulus test service");
let peer_id = network.local_peer_id();
let addr = MultiaddrWithPeerId { multiaddr, peer_id };
- TestNode { task_manager, client, network, addr, rpc_handlers, transaction_pool }
+ TestNode { task_manager, client, network, addr, rpc_handlers, transaction_pool, backend }
}
}
diff --git a/cumulus/test/service/src/main.rs b/cumulus/test/service/src/main.rs
index 16b68796bd39..55a0f12d671a 100644
--- a/cumulus/test/service/src/main.rs
+++ b/cumulus/test/service/src/main.rs
@@ -128,7 +128,7 @@ fn main() -> Result<(), sc_cli::Error> {
})
.unwrap_or(cumulus_test_service::Consensus::RelayChain);
- let (mut task_manager, _, _, _, _) = tokio_runtime
+ let (mut task_manager, _, _, _, _, _) = tokio_runtime
.block_on(cumulus_test_service::start_node_impl(
config,
collator_key,
@@ -139,6 +139,7 @@ fn main() -> Result<(), sc_cli::Error> {
|_| Ok(jsonrpsee::RpcModule::new(())),
consensus,
collator_options,
+ true,
))
.expect("could not create Cumulus test service");
diff --git a/substrate/client/api/src/execution_extensions.rs b/substrate/client/api/src/execution_extensions.rs
index 6f927105df0b..26d3ae73f69f 100644
--- a/substrate/client/api/src/execution_extensions.rs
+++ b/substrate/client/api/src/execution_extensions.rs
@@ -91,7 +91,6 @@ impl ExtensionsFactory
///
/// This crate aggregates extensions available for the offchain calls
/// and is responsible for producing a correct `Extensions` object.
-/// for each call, based on required `Capabilities`.
pub struct ExecutionExtensions {
extensions_factory: RwLock>>,
read_runtime_version: Arc,
@@ -116,8 +115,7 @@ impl ExecutionExtensions {
*self.extensions_factory.write() = Box::new(maker);
}
- /// Based on the execution context and capabilities it produces
- /// the extensions object to support desired set of APIs.
+ /// Produces default extensions based on the input parameters.
pub fn extensions(
&self,
block_hash: Block::Hash,
@@ -127,7 +125,6 @@ impl ExecutionExtensions {
self.extensions_factory.read().extensions_for(block_hash, block_number);
extensions.register(ReadRuntimeVersionExt::new(self.read_runtime_version.clone()));
-
extensions
}
}
diff --git a/substrate/client/block-builder/Cargo.toml b/substrate/client/block-builder/Cargo.toml
index 2492c4101b29..852ee84f89b8 100644
--- a/substrate/client/block-builder/Cargo.toml
+++ b/substrate/client/block-builder/Cargo.toml
@@ -20,6 +20,7 @@ sp-api = { path = "../../primitives/api" }
sp-block-builder = { path = "../../primitives/block-builder" }
sp-blockchain = { path = "../../primitives/blockchain" }
sp-core = { path = "../../primitives/core" }
+sp-trie = { path = "../../primitives/trie" }
sp-inherents = { path = "../../primitives/inherents" }
sp-runtime = { path = "../../primitives/runtime" }
diff --git a/substrate/client/block-builder/src/lib.rs b/substrate/client/block-builder/src/lib.rs
index f62b941fdb18..258e39d962b2 100644
--- a/substrate/client/block-builder/src/lib.rs
+++ b/substrate/client/block-builder/src/lib.rs
@@ -42,6 +42,7 @@ use sp_runtime::{
use std::marker::PhantomData;
pub use sp_block_builder::BlockBuilder as BlockBuilderApi;
+use sp_trie::proof_size_extension::ProofSizeExt;
/// A builder for creating an instance of [`BlockBuilder`].
pub struct BlockBuilderBuilder<'a, B, C> {
@@ -235,6 +236,10 @@ where
if record_proof {
api.record_proof();
+ let recorder = api
+ .proof_recorder()
+ .expect("Proof recording is enabled in the line above; qed.");
+ api.register_extension(ProofSizeExt::new(recorder));
}
api.set_call_context(CallContext::Onchain);
diff --git a/substrate/client/service/src/builder.rs b/substrate/client/service/src/builder.rs
index d078f44f198f..1a3a679c519a 100644
--- a/substrate/client/service/src/builder.rs
+++ b/substrate/client/service/src/builder.rs
@@ -130,10 +130,11 @@ where
}
/// Create the initial parts of a full node with the default genesis block builder.
-pub fn new_full_parts(
+pub fn new_full_parts_record_import(
config: &Configuration,
telemetry: Option,
executor: TExec,
+ enable_import_proof_recording: bool,
) -> Result, Error>
where
TBl: BlockT,
@@ -148,7 +149,26 @@ where
executor.clone(),
)?;
- new_full_parts_with_genesis_builder(config, telemetry, executor, backend, genesis_block_builder)
+ new_full_parts_with_genesis_builder(
+ config,
+ telemetry,
+ executor,
+ backend,
+ genesis_block_builder,
+ enable_import_proof_recording,
+ )
+}
+/// Create the initial parts of a full node with the default genesis block builder.
+pub fn new_full_parts(
+ config: &Configuration,
+ telemetry: Option,
+ executor: TExec,
+) -> Result, Error>
+where
+ TBl: BlockT,
+ TExec: CodeExecutor + RuntimeVersionOf + Clone,
+{
+ new_full_parts_record_import(config, telemetry, executor, false)
}
/// Create the initial parts of a full node.
@@ -158,6 +178,7 @@ pub fn new_full_parts_with_genesis_builder>,
genesis_block_builder: TBuildGenesisBlock,
+ enable_import_proof_recording: bool,
) -> Result, Error>
where
TBl: BlockT,
@@ -225,6 +246,7 @@ where
SyncMode::LightState { .. } | SyncMode::Warp { .. }
),
wasm_runtime_substitutes,
+ enable_import_proof_recording,
},
)?;
diff --git a/substrate/client/service/src/client/client.rs b/substrate/client/service/src/client/client.rs
index 9d51aae55b20..aa9c1b80a29a 100644
--- a/substrate/client/service/src/client/client.rs
+++ b/substrate/client/service/src/client/client.rs
@@ -77,7 +77,7 @@ use sp_state_machine::{
ChildStorageCollection, KeyValueStates, KeyValueStorageLevel, StorageCollection,
MAX_NESTED_TRIE_DEPTH,
};
-use sp_trie::{CompactProof, MerkleValue, StorageProof};
+use sp_trie::{proof_size_extension::ProofSizeExt, CompactProof, MerkleValue, StorageProof};
use std::{
collections::{HashMap, HashSet},
marker::PhantomData,
@@ -184,7 +184,7 @@ where
)
}
-/// Relevant client configuration items relevant for the client.
+/// Client configuration items.
#[derive(Debug, Clone)]
pub struct ClientConfig {
/// Enable the offchain worker db.
@@ -198,6 +198,8 @@ pub struct ClientConfig {
/// Map of WASM runtime substitute starting at the child of the given block until the runtime
/// version doesn't match anymore.
pub wasm_runtime_substitutes: HashMap, Vec>,
+ /// Enable recording of storage proofs during block import
+ pub enable_import_proof_recording: bool,
}
impl Default for ClientConfig {
@@ -208,6 +210,7 @@ impl Default for ClientConfig {
wasm_runtime_overrides: None,
no_genesis: false,
wasm_runtime_substitutes: HashMap::new(),
+ enable_import_proof_recording: false,
}
}
}
@@ -858,6 +861,14 @@ where
runtime_api.set_call_context(CallContext::Onchain);
+ if self.config.enable_import_proof_recording {
+ runtime_api.record_proof();
+ let recorder = runtime_api
+ .proof_recorder()
+ .expect("Proof recording is enabled in the line above; qed.");
+ runtime_api.register_extension(ProofSizeExt::new(recorder));
+ }
+
runtime_api.execute_block(
*parent_hash,
Block::new(import_block.header.clone(), body.clone()),
diff --git a/substrate/client/service/src/lib.rs b/substrate/client/service/src/lib.rs
index ff9eb982b862..0c7e138ce905 100644
--- a/substrate/client/service/src/lib.rs
+++ b/substrate/client/service/src/lib.rs
@@ -53,9 +53,10 @@ use sp_runtime::traits::{Block as BlockT, Header as HeaderT};
pub use self::{
builder::{
build_network, new_client, new_db_backend, new_full_client, new_full_parts,
- new_full_parts_with_genesis_builder, new_native_or_wasm_executor, new_wasm_executor,
- spawn_tasks, BuildNetworkParams, KeystoreContainer, NetworkStarter, SpawnTasksParams,
- TFullBackend, TFullCallExecutor, TFullClient,
+ new_full_parts_record_import, new_full_parts_with_genesis_builder,
+ new_native_or_wasm_executor, new_wasm_executor, spawn_tasks, BuildNetworkParams,
+ KeystoreContainer, NetworkStarter, SpawnTasksParams, TFullBackend, TFullCallExecutor,
+ TFullClient,
},
client::{ClientConfig, LocalCallExecutor},
error::Error,
diff --git a/substrate/frame/support/procedural/src/construct_runtime/mod.rs b/substrate/frame/support/procedural/src/construct_runtime/mod.rs
index d1d6040d33a9..010143574ed5 100644
--- a/substrate/frame/support/procedural/src/construct_runtime/mod.rs
+++ b/substrate/frame/support/procedural/src/construct_runtime/mod.rs
@@ -253,7 +253,7 @@ pub fn construct_runtime(input: TokenStream) -> TokenStream {
let res = res.unwrap_or_else(|e| e.to_compile_error());
let res = expander::Expander::new("construct_runtime")
- .dry(std::env::var("FRAME_EXPAND").is_err())
+ .dry(std::env::var("EXPAND_MACROS").is_err())
.verbose(true)
.write_to_out_dir(res)
.expect("Does not fail because of IO in OUT_DIR; qed");
diff --git a/substrate/primitives/api/proc-macro/src/decl_runtime_apis.rs b/substrate/primitives/api/proc-macro/src/decl_runtime_apis.rs
index 370735819f94..2b1e65ec8852 100644
--- a/substrate/primitives/api/proc-macro/src/decl_runtime_apis.rs
+++ b/substrate/primitives/api/proc-macro/src/decl_runtime_apis.rs
@@ -729,7 +729,7 @@ fn decl_runtime_apis_impl_inner(api_decls: &[ItemTrait]) -> Result
};
let decl = expander::Expander::new("decl_runtime_apis")
- .dry(std::env::var("SP_API_EXPAND").is_err())
+ .dry(std::env::var("EXPAND_MACROS").is_err())
.verbose(true)
.write_to_out_dir(decl)
.expect("Does not fail because of IO in OUT_DIR; qed");
diff --git a/substrate/primitives/api/proc-macro/src/impl_runtime_apis.rs b/substrate/primitives/api/proc-macro/src/impl_runtime_apis.rs
index e97291bc58ad..fd81fdb624c1 100644
--- a/substrate/primitives/api/proc-macro/src/impl_runtime_apis.rs
+++ b/substrate/primitives/api/proc-macro/src/impl_runtime_apis.rs
@@ -846,7 +846,7 @@ fn impl_runtime_apis_impl_inner(api_impls: &[ItemImpl]) -> Result {
);
let impl_ = expander::Expander::new("impl_runtime_apis")
- .dry(std::env::var("SP_API_EXPAND").is_err())
+ .dry(std::env::var("EXPAND_MACROS").is_err())
.verbose(true)
.write_to_out_dir(impl_)
.expect("Does not fail because of IO in OUT_DIR; qed");
diff --git a/substrate/primitives/runtime-interface/proc-macro/Cargo.toml b/substrate/primitives/runtime-interface/proc-macro/Cargo.toml
index fbc49785ae97..11236de91ce1 100644
--- a/substrate/primitives/runtime-interface/proc-macro/Cargo.toml
+++ b/substrate/primitives/runtime-interface/proc-macro/Cargo.toml
@@ -20,4 +20,5 @@ Inflector = "0.11.4"
proc-macro-crate = "1.1.3"
proc-macro2 = "1.0.56"
quote = "1.0.28"
+expander = "2.0.0"
syn = { version = "2.0.38", features = ["full", "visit", "fold", "extra-traits"] }
diff --git a/substrate/primitives/runtime-interface/proc-macro/src/runtime_interface/mod.rs b/substrate/primitives/runtime-interface/proc-macro/src/runtime_interface/mod.rs
index 008d69b32100..d0cc9e7b96ba 100644
--- a/substrate/primitives/runtime-interface/proc-macro/src/runtime_interface/mod.rs
+++ b/substrate/primitives/runtime-interface/proc-macro/src/runtime_interface/mod.rs
@@ -68,5 +68,11 @@ pub fn runtime_interface_impl(
}
};
+ let res = expander::Expander::new("runtime_interface")
+ .dry(std::env::var("EXPAND_MACROS").is_err())
+ .verbose(true)
+ .write_to_out_dir(res)
+ .expect("Does not fail because of IO in OUT_DIR; qed");
+
Ok(res)
}
diff --git a/substrate/primitives/state-machine/src/trie_backend.rs b/substrate/primitives/state-machine/src/trie_backend.rs
index 7b337b5fd540..7496463e6421 100644
--- a/substrate/primitives/state-machine/src/trie_backend.rs
+++ b/substrate/primitives/state-machine/src/trie_backend.rs
@@ -33,12 +33,12 @@ use sp_core::storage::{ChildInfo, StateVersion};
#[cfg(feature = "std")]
use sp_trie::{
cache::{LocalTrieCache, TrieCache},
- recorder::Recorder,
- MemoryDB, StorageProof,
+ MemoryDB,
};
#[cfg(not(feature = "std"))]
use sp_trie::{Error, NodeCodec};
-use sp_trie::{MerkleValue, PrefixedMemoryDB};
+use sp_trie::{MerkleValue, PrefixedMemoryDB, StorageProof, TrieRecorderProvider};
+
use trie_db::TrieCache as TrieCacheT;
#[cfg(not(feature = "std"))]
use trie_db::{node::NodeOwned, CachedValue};
@@ -112,8 +112,6 @@ pub struct UnimplementedCacheProvider {
// Not strictly necessary, but the H bound allows to use this as a drop-in
// replacement for the `LocalTrieCache` in no-std contexts.
_phantom: core::marker::PhantomData,
- // Statically prevents construction.
- _infallible: core::convert::Infallible,
}
#[cfg(not(feature = "std"))]
@@ -156,52 +154,83 @@ impl TrieCacheProvider for UnimplementedCacheProvider {
}
}
+/// Recorder provider that allows construction of a [`TrieBackend`] and satisfies the requirements,
+/// but can never be instantiated.
+#[cfg(not(feature = "std"))]
+pub struct UnimplementedRecorderProvider {
+ // Not strictly necessary, but the H bound allows to use this as a drop-in
+ // replacement for the [`sp_trie::recorder::Recorder`] in no-std contexts.
+ _phantom: core::marker::PhantomData,
+}
+
+#[cfg(not(feature = "std"))]
+impl trie_db::TrieRecorder for UnimplementedRecorderProvider {
+ fn record<'a>(&mut self, _access: trie_db::TrieAccess<'a, H::Out>) {
+ unimplemented!()
+ }
+
+ fn trie_nodes_recorded_for_key(&self, _key: &[u8]) -> trie_db::RecordedForKey {
+ unimplemented!()
+ }
+}
+
+#[cfg(not(feature = "std"))]
+impl TrieRecorderProvider for UnimplementedRecorderProvider {
+ type Recorder<'a> = UnimplementedRecorderProvider where H: 'a;
+
+ fn drain_storage_proof(self) -> Option {
+ unimplemented!()
+ }
+
+ fn as_trie_recorder(&self, _storage_root: H::Out) -> Self::Recorder<'_> {
+ unimplemented!()
+ }
+}
+
#[cfg(feature = "std")]
type DefaultCache = LocalTrieCache;
#[cfg(not(feature = "std"))]
type DefaultCache = UnimplementedCacheProvider;
+#[cfg(feature = "std")]
+type DefaultRecorder = sp_trie::recorder::Recorder;
+
+#[cfg(not(feature = "std"))]
+type DefaultRecorder = UnimplementedRecorderProvider;
+
/// Builder for creating a [`TrieBackend`].
-pub struct TrieBackendBuilder, H: Hasher, C = DefaultCache> {
+pub struct TrieBackendBuilder<
+ S: TrieBackendStorage,
+ H: Hasher,
+ C = DefaultCache,
+ R = DefaultRecorder,
+> {
storage: S,
root: H::Out,
- #[cfg(feature = "std")]
- recorder: Option>,
+ recorder: Option,
cache: Option,
}
-impl TrieBackendBuilder>
+impl TrieBackendBuilder
where
S: TrieBackendStorage,
H: Hasher,
{
/// Create a new builder instance.
pub fn new(storage: S, root: H::Out) -> Self {
- Self {
- storage,
- root,
- #[cfg(feature = "std")]
- recorder: None,
- cache: None,
- }
+ Self { storage, root, recorder: None, cache: None }
}
}
-impl TrieBackendBuilder
+impl TrieBackendBuilder
where
S: TrieBackendStorage,
H: Hasher,
{
/// Create a new builder instance.
pub fn new_with_cache(storage: S, root: H::Out, cache: C) -> Self {
- Self {
- storage,
- root,
- #[cfg(feature = "std")]
- recorder: None,
- cache: Some(cache),
- }
+ Self { storage, root, recorder: None, cache: Some(cache) }
}
/// Wrap the given [`TrieBackend`].
///
@@ -210,53 +239,47 @@ where
/// backend.
///
/// The backend storage and the cache will be taken from `other`.
- pub fn wrap(other: &TrieBackend) -> TrieBackendBuilder<&S, H, &C> {
+ pub fn wrap(other: &TrieBackend) -> TrieBackendBuilder<&S, H, &C, R> {
TrieBackendBuilder {
storage: other.essence.backend_storage(),
root: *other.essence.root(),
- #[cfg(feature = "std")]
recorder: None,
cache: other.essence.trie_node_cache.as_ref(),
}
}
/// Use the given optional `recorder` for the to be configured [`TrieBackend`].
- #[cfg(feature = "std")]
- pub fn with_optional_recorder(self, recorder: Option>) -> Self {
+ pub fn with_optional_recorder(self, recorder: Option) -> Self {
Self { recorder, ..self }
}
/// Use the given `recorder` for the to be configured [`TrieBackend`].
- #[cfg(feature = "std")]
- pub fn with_recorder(self, recorder: Recorder) -> Self {
+ pub fn with_recorder(self, recorder: R) -> Self {
Self { recorder: Some(recorder), ..self }
}
/// Use the given optional `cache` for the to be configured [`TrieBackend`].
- pub fn with_optional_cache(self, cache: Option) -> TrieBackendBuilder {
+ pub fn with_optional_cache(self, cache: Option) -> TrieBackendBuilder {
TrieBackendBuilder {
cache,
root: self.root,
storage: self.storage,
- #[cfg(feature = "std")]
recorder: self.recorder,
}
}
/// Use the given `cache` for the to be configured [`TrieBackend`].
- pub fn with_cache(self, cache: LC) -> TrieBackendBuilder {
+ pub fn with_cache(self, cache: LC) -> TrieBackendBuilder {
TrieBackendBuilder {
cache: Some(cache),
root: self.root,
storage: self.storage,
- #[cfg(feature = "std")]
recorder: self.recorder,
}
}
/// Build the configured [`TrieBackend`].
- #[cfg(feature = "std")]
- pub fn build(self) -> TrieBackend {
+ pub fn build(self) -> TrieBackend {
TrieBackend {
essence: TrieBackendEssence::new_with_cache_and_recorder(
self.storage,
@@ -267,27 +290,18 @@ where
next_storage_key_cache: Default::default(),
}
}
-
- /// Build the configured [`TrieBackend`].
- #[cfg(not(feature = "std"))]
- pub fn build(self) -> TrieBackend {
- TrieBackend {
- essence: TrieBackendEssence::new_with_cache(self.storage, self.root, self.cache),
- next_storage_key_cache: Default::default(),
- }
- }
}
/// A cached iterator.
-struct CachedIter
+struct CachedIter
where
H: Hasher,
{
last_key: sp_std::vec::Vec,
- iter: RawIter,
+ iter: RawIter,
}
-impl Default for CachedIter
+impl Default for CachedIter
where
H: Hasher,
{
@@ -313,23 +327,32 @@ fn access_cache(cell: &CacheCell, callback: impl FnOnce(&mut T) -> R) -
}
/// Patricia trie-based backend. Transaction type is an overlay of changes to commit.
-pub struct TrieBackend, H: Hasher, C = DefaultCache> {
- pub(crate) essence: TrieBackendEssence,
- next_storage_key_cache: CacheCell