diff --git a/Cargo.lock b/Cargo.lock index 698060c70d2f0..6c80692fb0349 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2441,8 +2441,7 @@ dependencies = [ [[package]] name = "hash-db" version = "0.15.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d23bd4e7b5eda0d0f3a307e8b381fdc8ba9000f26fbe912250c0a4cc3956364a" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-new#97caad53d595c3ecc604763a30b47dc634fd63cf" [[package]] name = "hash256-std-hasher" @@ -2453,6 +2452,14 @@ dependencies = [ "crunchy", ] +[[package]] +name = "hash256-std-hasher" +version = "0.15.2" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-new#97caad53d595c3ecc604763a30b47dc634fd63cf" +dependencies = [ + "crunchy", +] + [[package]] name = "hashbrown" version = "0.9.1" @@ -3094,7 +3101,17 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "711adba9940a039f4374fc5724c0a5eaca84a2d558cce62256bfe26f0dbef05e" dependencies = [ "hash-db", - "hash256-std-hasher", + "hash256-std-hasher 0.15.2 (registry+https://github.com/rust-lang/crates.io-index)", + "tiny-keccak", +] + +[[package]] +name = "keccak-hasher" +version = "0.15.3" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-new#97caad53d595c3ecc604763a30b47dc634fd63cf" +dependencies = [ + "hash-db", + "hash256-std-hasher 0.15.2 (git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-new)", "tiny-keccak", ] @@ -3952,8 +3969,7 @@ dependencies = [ [[package]] name = "memory-db" version = "0.27.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de006e09d04fc301a5f7e817b75aa49801c4479a8af753764416b085337ddcc5" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-new#97caad53d595c3ecc604763a30b47dc634fd63cf" dependencies = [ "hash-db", "hashbrown 0.11.2", @@ -8991,7 +9007,7 @@ dependencies = [ "ed25519-dalek", "futures 0.3.16", "hash-db", - "hash256-std-hasher", + "hash256-std-hasher 0.15.2 (registry+https://github.com/rust-lang/crates.io-index)", "hex", "hex-literal", "impl-serde", @@ -9215,7 +9231,7 @@ name = "sp-runtime" version = "4.0.0-dev" dependencies = [ "either", - "hash256-std-hasher", + "hash256-std-hasher 0.15.2 (registry+https://github.com/rust-lang/crates.io-index)", "impl-trait-for-tuples", "log 0.4.14", "parity-scale-codec", @@ -9485,7 +9501,7 @@ dependencies = [ "trie-bench", "trie-db", "trie-root", - "trie-standardmap", + "trie-standardmap 0.15.2 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -10412,24 +10428,22 @@ checksum = "a7f741b240f1a48843f9b8e0444fb55fb2a4ff67293b50a9179dfd5ea67f8d41" [[package]] name = "trie-bench" version = "0.28.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4edd9bdf0c2e08fd77c0fb2608179cac7ebed997ae18f58d47a2d96425ff51f0" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-new#97caad53d595c3ecc604763a30b47dc634fd63cf" dependencies = [ "criterion", "hash-db", - "keccak-hasher", + "keccak-hasher 0.15.3 (git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-new)", "memory-db", "parity-scale-codec", "trie-db", "trie-root", - "trie-standardmap", + "trie-standardmap 0.15.2 (git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-new)", ] [[package]] name = "trie-db" version = "0.22.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9eac131e334e81b6b3be07399482042838adcd7957aa0010231d0813e39e02fa" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-new#97caad53d595c3ecc604763a30b47dc634fd63cf" dependencies = [ "hash-db", "hashbrown 0.11.2", @@ -10441,8 +10455,7 @@ dependencies = [ [[package]] name = "trie-root" version = "0.16.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "652931506d2c1244d7217a70b99f56718a7b4161b37f04e7cd868072a99f68cd" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-new#97caad53d595c3ecc604763a30b47dc634fd63cf" dependencies = [ "hash-db", ] @@ -10454,7 +10467,16 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c3161ba520ab28cd8e6b68e1126f1009f6e335339d1a73b978139011703264c8" dependencies = [ "hash-db", - "keccak-hasher", + "keccak-hasher 0.15.3 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "trie-standardmap" +version = "0.15.2" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-new#97caad53d595c3ecc604763a30b47dc634fd63cf" +dependencies = [ + "hash-db", + "keccak-hasher 0.15.3 (git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-new)", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 64cbbf38966c3..4f7f27a3d36ee 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -265,3 +265,10 @@ zeroize = { opt-level = 3 } [profile.release] # Substrate runtime requires unwinding. panic = "unwind" + +[patch.crates-io] +hash-db = { git = "https://github.com/cheme/trie.git", branch = "hashed-value-simple4-new" } +memory-db = { git = "https://github.com/cheme/trie.git", branch = "hashed-value-simple4-new" } +trie-db = { git = "https://github.com/cheme/trie.git", branch = "hashed-value-simple4-new" } +trie-root = { git = "https://github.com/cheme/trie.git", branch = "hashed-value-simple4-new" } +trie-bench = { git = "https://github.com/cheme/trie.git", branch = "hashed-value-simple4-new" } diff --git a/bin/node-template/node/src/chain_spec.rs b/bin/node-template/node/src/chain_spec.rs index 7009b3be5c279..6556167c13363 100644 --- a/bin/node-template/node/src/chain_spec.rs +++ b/bin/node-template/node/src/chain_spec.rs @@ -72,6 +72,8 @@ pub fn development_config() -> Result { None, // Extensions None, + // StateVersion + Default::default(), )) } @@ -119,6 +121,8 @@ pub fn local_testnet_config() -> Result { None, // Extensions None, + // StateVersion + Default::default(), )) } diff --git a/bin/node/bench/src/generator.rs b/bin/node/bench/src/generator.rs index e3aa1192b5d1f..3a4c806daf90a 100644 --- a/bin/node/bench/src/generator.rs +++ b/bin/node/bench/src/generator.rs @@ -23,6 +23,7 @@ use node_primitives::Hash; use sp_trie::{trie_types::TrieDBMut, TrieMut}; use crate::simple_trie::SimpleTrie; +use sp_core::state_version::StateVersion; /// Generate trie from given `key_values`. /// @@ -31,6 +32,7 @@ use crate::simple_trie::SimpleTrie; pub fn generate_trie( db: Arc, key_values: impl IntoIterator, Vec)>, + state_version: StateVersion, ) -> Hash { let mut root = Hash::default(); @@ -43,8 +45,15 @@ pub fn generate_trie( ); let mut trie = SimpleTrie { db, overlay: &mut overlay }; { - let mut trie_db = TrieDBMut::new(&mut trie, &mut root); - + let mut trie_db = match state_version { + StateVersion::V0 => TrieDBMut::new(&mut trie, &mut root), + StateVersion::V1 { threshold } => { + let layout = sp_trie::Layout::with_max_inline_value(threshold); + TrieDBMut::::new_with_layout( + &mut trie, &mut root, layout, + ) + }, + }; for (key, value) in key_values { trie_db.insert(&key, &value).expect("trie insertion failed"); } diff --git a/bin/node/bench/src/trie.rs b/bin/node/bench/src/trie.rs index a17e386ca879b..b5df064645e1b 100644 --- a/bin/node/bench/src/trie.rs +++ b/bin/node/bench/src/trie.rs @@ -142,7 +142,7 @@ impl core::BenchmarkDescription for TrieReadBenchmarkDescription { assert_eq!(warmup_keys.len(), SAMPLE_SIZE); assert_eq!(query_keys.len(), SAMPLE_SIZE); - let root = generate_trie(database.open(self.database_type), key_values); + let root = generate_trie(database.open(self.database_type), key_values, Default::default()); Box::new(TrieReadBenchmark { database, @@ -180,7 +180,8 @@ impl core::Benchmark for TrieReadBenchmark { let storage: Arc> = Arc::new(Storage(db.open(self.database_type))); - let trie_backend = sp_state_machine::TrieBackend::new(storage, self.root); + let state_version = Default::default(); + let trie_backend = sp_state_machine::TrieBackend::new(storage, self.root, state_version); for (warmup_key, warmup_value) in self.warmup_keys.iter() { let value = trie_backend .storage(&warmup_key[..]) @@ -248,7 +249,7 @@ impl core::BenchmarkDescription for TrieWriteBenchmarkDescription { assert_eq!(warmup_keys.len(), SAMPLE_SIZE); - let root = generate_trie(database.open(self.database_type), key_values); + let root = generate_trie(database.open(self.database_type), key_values, Default::default()); Box::new(TrieWriteBenchmark { database, diff --git a/bin/node/cli/src/chain_spec.rs b/bin/node/cli/src/chain_spec.rs index bbb2904beab3a..073d058240682 100644 --- a/bin/node/cli/src/chain_spec.rs +++ b/bin/node/cli/src/chain_spec.rs @@ -197,6 +197,7 @@ pub fn staging_testnet_config() -> ChainSpec { None, None, Default::default(), + Default::default(), ) } @@ -387,6 +388,7 @@ pub fn development_config() -> ChainSpec { None, None, Default::default(), + Default::default(), ) } @@ -411,6 +413,7 @@ pub fn local_testnet_config() -> ChainSpec { None, None, Default::default(), + Default::default(), ) } @@ -442,6 +445,7 @@ pub(crate) mod tests { None, None, Default::default(), + Default::default(), ) } @@ -457,6 +461,7 @@ pub(crate) mod tests { None, None, Default::default(), + Default::default(), ) } diff --git a/bin/node/executor/benches/bench.rs b/bin/node/executor/benches/bench.rs index 0058a5c70340f..c589d0ca2fbd2 100644 --- a/bin/node/executor/benches/bench.rs +++ b/bin/node/executor/benches/bench.rs @@ -89,10 +89,10 @@ fn construct_block( let extrinsics = extrinsics.into_iter().map(sign).collect::>(); // calculate the header fields that we can. - let extrinsics_root = - Layout::::ordered_trie_root(extrinsics.iter().map(Encode::encode)) - .to_fixed_bytes() - .into(); + let extrinsics_root = Layout::::default() + .ordered_trie_root(extrinsics.iter().map(Encode::encode)) + .to_fixed_bytes() + .into(); let header = Header { parent_hash, diff --git a/bin/node/executor/tests/common.rs b/bin/node/executor/tests/common.rs index a0edb46a0d6ae..f8b5af299dcef 100644 --- a/bin/node/executor/tests/common.rs +++ b/bin/node/executor/tests/common.rs @@ -151,10 +151,10 @@ pub fn construct_block( let extrinsics = extrinsics.into_iter().map(sign).collect::>(); // calculate the header fields that we can. - let extrinsics_root = - Layout::::ordered_trie_root(extrinsics.iter().map(Encode::encode)) - .to_fixed_bytes() - .into(); + let extrinsics_root = Layout::::default() + .ordered_trie_root(extrinsics.iter().map(Encode::encode)) + .to_fixed_bytes() + .into(); let header = Header { parent_hash, diff --git a/bin/node/testing/src/bench.rs b/bin/node/testing/src/bench.rs index a1f9bc8710565..04ca7288ae531 100644 --- a/bin/node/testing/src/bench.rs +++ b/bin/node/testing/src/bench.rs @@ -387,7 +387,8 @@ impl BenchDb { }; let task_executor = TaskExecutor::new(); - let backend = sc_service::new_db_backend(db_config).expect("Should not fail"); + let backend = + sc_service::new_db_backend(db_config, Default::default()).expect("Should not fail"); let client = sc_service::new_client( backend.clone(), NativeElseWasmExecutor::new(WasmExecutionMethod::Compiled, None, 8), diff --git a/bin/utils/chain-spec-builder/src/main.rs b/bin/utils/chain-spec-builder/src/main.rs index bf5f1a149578e..1b74261638529 100644 --- a/bin/utils/chain-spec-builder/src/main.rs +++ b/bin/utils/chain-spec-builder/src/main.rs @@ -156,6 +156,7 @@ fn generate_chain_spec( None, None, Default::default(), + Default::default(), ); chain_spec.as_json(false).map_err(|err| err) diff --git a/client/api/src/cht.rs b/client/api/src/cht.rs index ee7854b5d8297..718974730ad8d 100644 --- a/client/api/src/cht.rs +++ b/client/api/src/cht.rs @@ -94,9 +94,8 @@ where I: IntoIterator>>, { use sp_trie::TrieConfiguration; - Ok(sp_trie::trie_types::Layout::::trie_root(build_pairs::( - cht_size, cht_num, hashes, - )?)) + Ok(sp_trie::Layout::::default() + .trie_root(build_pairs::(cht_size, cht_num, hashes)?)) } /// Build CHT-based header proof. @@ -117,7 +116,9 @@ where .into_iter() .map(|(k, v)| (k, Some(v))) .collect::>(); - let storage = InMemoryBackend::::default().update(vec![(None, transaction)]); + // No inner hashing for cht. + let backend: InMemoryBackend = sp_runtime::StateVersion::V0.into(); + let storage = backend.update(vec![(None, transaction)]); let trie_storage = storage .as_trie_backend() .expect("InMemoryState::as_trie_backend always returns Some; qed"); diff --git a/client/api/src/in_mem.rs b/client/api/src/in_mem.rs index e8fce19f8124e..66010e4846066 100644 --- a/client/api/src/in_mem.rs +++ b/client/api/src/in_mem.rs @@ -26,7 +26,7 @@ use sp_core::{ use sp_runtime::{ generic::BlockId, traits::{Block as BlockT, HashFor, Header as HeaderT, NumberFor, Zero}, - Justification, Justifications, Storage, + Justification, Justifications, StateVersion, StateVersions, Storage, }; use sp_state_machine::{ Backend as StateBackend, ChangesTrieTransaction, ChildStorageCollection, InMemoryBackend, @@ -112,6 +112,7 @@ struct BlockchainStorage { changes_trie_cht_roots: HashMap, Block::Hash>, leaves: LeafSet>, aux: HashMap, Vec>, + state_versions: StateVersions, } /// In-memory blockchain. Supports concurrent reads. @@ -141,6 +142,20 @@ impl Blockchain { } } + /// Get version of state. + pub fn state_version(&self, id: BlockId) -> StateVersion { + let number = match id { + BlockId::Hash(h) => + if let Ok(Some(header)) = self.header(BlockId::Hash(h)) { + header.number().clone() + } else { + 0u32.into() + }, + BlockId::Number(n) => n, + }; + self.storage.read().state_versions.state_version_at(number) + } + /// Create new in-memory blockchain storage. pub fn new() -> Blockchain { let storage = Arc::new(RwLock::new(BlockchainStorage { @@ -155,6 +170,7 @@ impl Blockchain { changes_trie_cht_roots: HashMap::new(), leaves: LeafSet::new(), aux: HashMap::new(), + state_versions: StateVersions::default(), })); Blockchain { storage } } @@ -855,7 +871,8 @@ where fn state_at(&self, block: BlockId) -> sp_blockchain::Result { match block { - BlockId::Hash(h) if h == Default::default() => return Ok(Self::State::default()), + BlockId::Hash(h) if h == Default::default() => + return Ok(self.blockchain.state_version(BlockId::Hash(h)).into()), _ => {}, } diff --git a/client/chain-spec/src/chain_spec.rs b/client/chain-spec/src/chain_spec.rs index fcdb053c47c16..5b683ba047d65 100644 --- a/client/chain-spec/src/chain_spec.rs +++ b/client/chain-spec/src/chain_spec.rs @@ -28,7 +28,7 @@ use sp_core::{ storage::{ChildInfo, Storage, StorageChild, StorageData, StorageKey}, Bytes, }; -use sp_runtime::BuildStorage; +use sp_runtime::{BuildStorage, StateVersion}; use std::{borrow::Cow, collections::HashMap, fs::File, path::PathBuf, sync::Arc}; enum GenesisSource { @@ -99,6 +99,10 @@ impl GenesisSource { } impl BuildStorage for ChainSpec { + fn state_version(&self) -> StateVersion { + self.genesis_state_version() + } + fn build_storage(&self) -> Result { match self.genesis.resolve()? { Genesis::Runtime(gc) => gc.build_storage(), @@ -170,6 +174,9 @@ struct ClientSpec { /// block hash onwards. #[serde(default)] code_substitutes: HashMap, + /// Ordered sequence of block number and their associated state version. + #[serde(default)] + state_versions: Vec<(String, StateVersion)>, } /// A type denoting empty extensions. @@ -248,6 +255,7 @@ impl ChainSpec { protocol_id: Option<&str>, properties: Option, extensions: E, + state_version: StateVersion, ) -> Self { let client_spec = ClientSpec { name: name.to_owned(), @@ -261,6 +269,7 @@ impl ChainSpec { consensus_engine: (), genesis: Default::default(), code_substitutes: HashMap::new(), + state_versions: vec![("0".to_string(), state_version)], }; ChainSpec { client_spec, genesis: GenesisSource::Factory(Arc::new(constructor)) } @@ -270,6 +279,23 @@ impl ChainSpec { fn chain_type(&self) -> ChainType { self.client_spec.chain_type.clone() } + + /// Defined state version for the chain. + /// Return None on invalid definition. + fn state_versions(&self) -> &Vec<(String, StateVersion)> { + &self.client_spec.state_versions + } + + /// Return genesis state version. + fn genesis_state_version(&self) -> StateVersion { + use std::str::FromStr; + self.state_versions() + .get(0) + // This is can be incorrect (if number representation incompatible with u64) + .and_then(|(n, s)| u64::from_str(n).ok().map(|n| (n, s))) + .and_then(|(n, s)| (n == 0).then(|| s.clone())) + .unwrap_or_default() + } } impl ChainSpec { @@ -401,6 +427,10 @@ where .map(|(h, c)| (h.clone(), c.0.clone())) .collect() } + + fn state_versions(&self) -> &Vec<(String, StateVersion)> { + ChainSpec::state_versions(self) + } } #[cfg(test)] @@ -411,6 +441,10 @@ mod tests { struct Genesis(HashMap); impl BuildStorage for Genesis { + fn state_version(&self) -> StateVersion { + Default::default() + } + fn assimilate_storage(&self, storage: &mut Storage) -> Result<(), String> { storage.top.extend( self.0.iter().map(|(a, b)| (a.clone().into_bytes(), b.clone().into_bytes())), diff --git a/client/chain-spec/src/lib.rs b/client/chain-spec/src/lib.rs index 334d8f8b3d7ac..45757e06cbb8e 100644 --- a/client/chain-spec/src/lib.rs +++ b/client/chain-spec/src/lib.rs @@ -119,7 +119,7 @@ pub use sc_chain_spec_derive::{ChainSpecExtension, ChainSpecGroup}; use sc_network::config::MultiaddrWithPeerId; use sc_telemetry::TelemetryEndpoints; use serde::{de::DeserializeOwned, Serialize}; -use sp_core::storage::Storage; +use sp_core::{state_version::StateVersion, storage::Storage}; use sp_runtime::BuildStorage; /// The type of a chain. @@ -187,6 +187,9 @@ pub trait ChainSpec: BuildStorage + Send + Sync { fn set_storage(&mut self, storage: Storage); /// Returns code substitutes that should be used for the on chain wasm. fn code_substitutes(&self) -> std::collections::HashMap>; + /// State versions for the chain. + /// Uses default state version when no definition from block 0. + fn state_versions(&self) -> &Vec<(String, StateVersion)>; } impl std::fmt::Debug for dyn ChainSpec { diff --git a/client/cli/src/commands/insert_key.rs b/client/cli/src/commands/insert_key.rs index 05055dc53c1e2..3addb7889bc84 100644 --- a/client/cli/src/commands/insert_key.rs +++ b/client/cli/src/commands/insert_key.rs @@ -143,6 +143,7 @@ mod tests { None, None, NoExtension::None, + Default::default(), ))) } } diff --git a/client/consensus/aura/src/lib.rs b/client/consensus/aura/src/lib.rs index 946e0b90c4dd4..672b6c2b6a56f 100644 --- a/client/consensus/aura/src/lib.rs +++ b/client/consensus/aura/src/lib.rs @@ -707,6 +707,10 @@ mod tests { fn mut_peers)>(&mut self, closure: F) { closure(&mut self.peers); } + fn state_versions(&self) -> Option> { + // Currently no support for aura test net with stateversion update. + None + } } #[test] diff --git a/client/consensus/babe/src/tests.rs b/client/consensus/babe/src/tests.rs index c033f4535be0b..4e39e083ebf83 100644 --- a/client/consensus/babe/src/tests.rs +++ b/client/consensus/babe/src/tests.rs @@ -365,6 +365,11 @@ impl TestNetFactory for BabeTestNet { fn mut_peers)>(&mut self, closure: F) { closure(&mut self.peers); } + + fn state_versions(&self) -> Option> { + // Currently no support for consensus test net with stateversion update. + None + } } #[test] diff --git a/client/db/src/bench.rs b/client/db/src/bench.rs index d46aca8e8ff78..a111217650b34 100644 --- a/client/db/src/bench.rs +++ b/client/db/src/bench.rs @@ -34,7 +34,7 @@ use sp_core::{ }; use sp_runtime::{ traits::{Block as BlockT, HashFor}, - Storage, + StateVersion, Storage, }; use sp_state_machine::{ backend::Backend as StateBackend, ChildStorageCollection, DBValue, ProofRecorder, @@ -64,7 +64,7 @@ impl sp_state_machine::Storage> for StorageDb>(key.clone(), backend_value.clone()); Ok(backend_value) } else { self.db @@ -73,9 +73,11 @@ impl sp_state_machine::Storage> for StorageDb { root: Cell, + state_version: Cell, genesis_root: B::Hash, state: RefCell>>, db: Cell>>, @@ -113,6 +115,7 @@ impl BenchmarkingState { state: RefCell::new(None), db: Cell::new(None), root: Cell::new(root.clone()), + state_version: Cell::new(Default::default()), genesis: Default::default(), genesis_root: Default::default(), record: Default::default(), @@ -163,7 +166,7 @@ impl BenchmarkingState { _block: Default::default(), }); *self.state.borrow_mut() = Some(State::new( - DbState::::new(storage_db, self.root.get()), + DbState::::new(storage_db, self.root.get(), self.state_version.get()), self.shared_cache.clone(), None, )); @@ -602,7 +605,7 @@ impl StateBackend> for BenchmarkingState { fn proof_size(&self) -> Option { self.proof_recorder.as_ref().map(|recorder| { let proof_size = recorder.estimate_encoded_size() as u32; - let proof = recorder.to_storage_proof(); + let proof = recorder.to_storage_proof::>(); let proof_recorder_root = self.proof_recorder_root.get(); if proof_recorder_root == Default::default() || proof_size == 1 { // empty trie @@ -622,6 +625,10 @@ impl StateBackend> for BenchmarkingState { } }) } + + fn state_version(&self) -> StateVersion { + self.state_version.get() + } } impl std::fmt::Debug for BenchmarkingState { diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index 66adb64c0109e..9726b22a575d6 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -88,7 +88,7 @@ use sp_runtime::{ Block as BlockT, Hash, HashFor, Header as HeaderT, NumberFor, One, SaturatedConversion, Zero, }, - Justification, Justifications, Storage, + Justification, Justifications, StateVersion, StateVersions, Storage, }; use sp_state_machine::{ backend::Backend as StateBackend, ChangesTrieCacheAction, ChangesTrieTransaction, @@ -286,6 +286,10 @@ impl StateBackend> for RefTrackingState { fn usage_info(&self) -> StateUsageInfo { self.state.usage_info() } + + fn state_version(&self) -> StateVersion { + self.state.state_version() + } } /// Database settings. @@ -913,7 +917,33 @@ impl sc_client_api::backend::BlockImportOperation } fn reset_storage(&mut self, storage: Storage) -> ClientResult { - let root = self.apply_new_state(storage)?; + if storage.top.keys().any(|k| well_known_keys::is_child_storage_key(&k)) { + return Err(sp_blockchain::Error::GenesisInvalid.into()) + } + + let child_delta = storage.children_default.iter().map(|(_storage_key, child_content)| { + ( + &child_content.child_info, + child_content.data.iter().map(|(k, v)| (&k[..], Some(&v[..]))), + ) + }); + + let mut changes_trie_config: Option = None; + let (root, transaction) = self.old_state.full_storage_root( + storage.top.iter().map(|(k, v)| { + if &k[..] == well_known_keys::CHANGES_TRIE_CONFIG { + changes_trie_config = Some( + Decode::decode(&mut &v[..]) + .expect("changes trie configuration is encoded properly at genesis"), + ); + } + (&k[..], Some(&v[..])) + }), + child_delta, + ); + + self.db_updates = transaction; + self.changes_trie_config_update = Some(changes_trie_config); self.commit_state = true; Ok(root) } @@ -1099,15 +1129,20 @@ pub struct Backend { io_stats: FrozenForDuration<(kvdb::IoStats, StateUsageInfo)>, state_usage: Arc, genesis_state: RwLock>>>, + state_versions: StateVersions, } impl Backend { /// Create a new instance of database backend. /// /// The pruning window is how old a block must be before the state is pruned. - pub fn new(config: DatabaseSettings, canonicalization_delay: u64) -> ClientResult { + pub fn new( + config: DatabaseSettings, + canonicalization_delay: u64, + state_versions: StateVersions, + ) -> ClientResult { let db = crate::utils::open_database::(&config, DatabaseType::Full)?; - Self::from_database(db as Arc<_>, canonicalization_delay, &config) + Self::from_database(db as Arc<_>, canonicalization_delay, &config, state_versions) } /// Create new memory-backed client backend for tests. @@ -1126,6 +1161,23 @@ impl Backend { keep_blocks: u32, canonicalization_delay: u64, transaction_storage: TransactionStorageMode, + ) -> Self { + let state_versions = Default::default(); + Self::new_test_with_tx_storage_and_state_versions( + keep_blocks, + canonicalization_delay, + transaction_storage, + state_versions, + ) + } + + /// Create new memory-backed client backend for tests. + #[cfg(any(test, feature = "test-helpers"))] + pub fn new_test_with_tx_storage_and_state_versions( + keep_blocks: u32, + canonicalization_delay: u64, + transaction_storage: TransactionStorageMode, + state_versions: StateVersions, ) -> Self { let db = kvdb_memorydb::create(crate::utils::NUM_COLUMNS); let db = sp_database::as_database(db); @@ -1138,13 +1190,15 @@ impl Backend { transaction_storage, }; - Self::new(db_setting, canonicalization_delay).expect("failed to create test-db") + Self::new(db_setting, canonicalization_delay, state_versions) + .expect("failed to create test-db") } fn from_database( db: Arc>, canonicalization_delay: u64, config: &DatabaseSettings, + state_versions: StateVersions, ) -> ClientResult { let is_archive_pruning = config.state_pruning.is_archive(); let blockchain = BlockchainDb::new(db.clone(), config.transaction_storage.clone())?; @@ -1188,6 +1242,7 @@ impl Backend { keep_blocks: config.keep_blocks.clone(), transaction_storage: config.transaction_storage.clone(), genesis_state: RwLock::new(None), + state_versions, }; // Older DB versions have no last state key. Check if the state is available and set it. @@ -1855,7 +1910,9 @@ impl Backend { fn empty_state(&self) -> ClientResult, Block>> { let root = EmptyStorage::::new().0; // Empty trie - let db_state = DbState::::new(self.storage.clone(), root); + // Using genesis state_version in empty state. + let state_version = self.state_versions.genesis_state_version(); + let db_state = DbState::::new(self.storage.clone(), root, state_version); let state = RefTrackingState::new(db_state, self.storage.clone(), None); let caching_state = CachingState::new(state, self.shared_cache.clone(), None); Ok(SyncingCachingState::new( @@ -2321,14 +2378,14 @@ impl sc_client_api::backend::Backend for Backend { use sc_client_api::blockchain::HeaderBackend as BcHeaderBackend; let is_genesis = match &block { - BlockId::Number(n) if n.is_zero() => true, - BlockId::Hash(h) if h == &self.blockchain.meta.read().genesis_hash => true, - _ => false, + BlockId::Number(n) => n.is_zero(), + BlockId::Hash(h) => h == &self.blockchain.meta.read().genesis_hash, }; if is_genesis { if let Some(genesis_state) = &*self.genesis_state.read() { let root = genesis_state.root.clone(); - let db_state = DbState::::new(genesis_state.clone(), root); + let state_version = self.state_versions.genesis_state_version(); + let db_state = DbState::::new(genesis_state.clone(), root, state_version); let state = RefTrackingState::new(db_state, self.storage.clone(), None); let caching_state = CachingState::new(state, self.shared_cache.clone(), None); let mut state = SyncingCachingState::new( @@ -2359,7 +2416,8 @@ impl sc_client_api::backend::Backend for Backend { } if let Ok(()) = self.storage.state_db.pin(&hash) { let root = hdr.state_root; - let db_state = DbState::::new(self.storage.clone(), root); + let state_version = self.state_versions.state_version_at(hdr.number); + let db_state = DbState::::new(self.storage.clone(), root, state_version); let state = RefTrackingState::new(db_state, self.storage.clone(), Some(hash.clone())); let caching_state = @@ -2546,6 +2604,7 @@ pub(crate) mod tests { transaction_storage: TransactionStorageMode::BlockBody, }, 0, + Default::default(), ) .unwrap(); assert_eq!(backend.blockchain().info().best_number, 9); @@ -2556,7 +2615,17 @@ pub(crate) mod tests { #[test] fn set_state_data() { - let db = Backend::::new_test(2, 0); + set_state_data_inner(true); + set_state_data_inner(false); + } + fn set_state_data_inner(inner_hashing: bool) { + let state_version = if inner_hashing { + StateVersion::V1 { threshold: sp_core::storage::TEST_DEFAULT_INLINE_VALUE_THESHOLD } + } else { + StateVersion::V0 + }; + let mut db = Backend::::new_test(2, 0); + db.state_versions.add((0, state_version)); let hash = { let mut op = db.begin_operation().unwrap(); let mut header = Header { diff --git a/client/db/src/storage_cache.rs b/client/db/src/storage_cache.rs index a895324a2e7b9..e51c26f319b77 100644 --- a/client/db/src/storage_cache.rs +++ b/client/db/src/storage_cache.rs @@ -25,7 +25,7 @@ use hash_db::Hasher; use linked_hash_map::{Entry, LinkedHashMap}; use log::trace; use parking_lot::{RwLock, RwLockUpgradableReadGuard}; -use sp_core::{hexdisplay::HexDisplay, storage::ChildInfo}; +use sp_core::{hexdisplay::HexDisplay, state_version::StateVersion, storage::ChildInfo}; use sp_runtime::traits::{Block as BlockT, HashFor, Header, NumberFor}; use sp_state_machine::{ backend::Backend as StateBackend, ChildStorageCollection, StorageCollection, StorageKey, @@ -716,6 +716,10 @@ impl>, B: BlockT> StateBackend> for Cachin info.include_state_machine_states(&self.overlay_stats); info } + + fn state_version(&self) -> StateVersion { + self.state.state_version() + } } /// Extended [`CachingState`] that will sync the caches on drop. @@ -915,6 +919,10 @@ impl>, B: BlockT> StateBackend> fn usage_info(&self) -> sp_state_machine::UsageInfo { self.caching_state().usage_info() } + + fn state_version(&self) -> StateVersion { + self.caching_state().state_version() + } } impl Drop for SyncingCachingState { @@ -960,11 +968,12 @@ mod tests { let h3b = H256::random(); let shared = new_shared_cache::(256 * 1024, (0, 1)); + let state_version = StateVersion::default(); // blocks [ 3a(c) 2a(c) 2b 1b 1a(c) 0 ] // state [ 5 5 4 3 2 2 ] let mut s = CachingState::new( - InMemoryBackend::::default(), + InMemoryBackend::::from(state_version), shared.clone(), Some(root_parent), ); @@ -978,12 +987,18 @@ mod tests { true, ); - let mut s = - CachingState::new(InMemoryBackend::::default(), shared.clone(), Some(h0)); + let mut s = CachingState::new( + InMemoryBackend::::from(state_version), + shared.clone(), + Some(h0), + ); s.cache.sync_cache(&[], &[], vec![], vec![], Some(h1a), Some(1), true); - let mut s = - CachingState::new(InMemoryBackend::::default(), shared.clone(), Some(h0)); + let mut s = CachingState::new( + InMemoryBackend::::from(state_version), + shared.clone(), + Some(h0), + ); s.cache.sync_cache( &[], &[], @@ -994,8 +1009,11 @@ mod tests { false, ); - let mut s = - CachingState::new(InMemoryBackend::::default(), shared.clone(), Some(h1b)); + let mut s = CachingState::new( + InMemoryBackend::::from(state_version), + shared.clone(), + Some(h1b), + ); s.cache.sync_cache( &[], &[], @@ -1006,8 +1024,11 @@ mod tests { false, ); - let mut s = - CachingState::new(InMemoryBackend::::default(), shared.clone(), Some(h1a)); + let mut s = CachingState::new( + InMemoryBackend::::from(state_version), + shared.clone(), + Some(h1a), + ); s.cache.sync_cache( &[], &[], @@ -1018,30 +1039,48 @@ mod tests { true, ); - let mut s = - CachingState::new(InMemoryBackend::::default(), shared.clone(), Some(h2a)); + let mut s = CachingState::new( + InMemoryBackend::::from(state_version), + shared.clone(), + Some(h2a), + ); s.cache.sync_cache(&[], &[], vec![], vec![], Some(h3a), Some(3), true); - let s = - CachingState::new(InMemoryBackend::::default(), shared.clone(), Some(h3a)); + let s = CachingState::new( + InMemoryBackend::::from(state_version), + shared.clone(), + Some(h3a), + ); assert_eq!(s.storage(&key).unwrap().unwrap(), vec![5]); - let s = - CachingState::new(InMemoryBackend::::default(), shared.clone(), Some(h1a)); + let s = CachingState::new( + InMemoryBackend::::from(state_version), + shared.clone(), + Some(h1a), + ); assert!(s.storage(&key).unwrap().is_none()); - let s = - CachingState::new(InMemoryBackend::::default(), shared.clone(), Some(h2b)); + let s = CachingState::new( + InMemoryBackend::::from(state_version), + shared.clone(), + Some(h2b), + ); assert!(s.storage(&key).unwrap().is_none()); - let s = - CachingState::new(InMemoryBackend::::default(), shared.clone(), Some(h1b)); + let s = CachingState::new( + InMemoryBackend::::from(state_version), + shared.clone(), + Some(h1b), + ); assert!(s.storage(&key).unwrap().is_none()); // reorg to 3b // blocks [ 3b(c) 3a 2a 2b(c) 1b 1a 0 ] - let mut s = - CachingState::new(InMemoryBackend::::default(), shared.clone(), Some(h2b)); + let mut s = CachingState::new( + InMemoryBackend::::from(state_version), + shared.clone(), + Some(h2b), + ); s.cache.sync_cache( &[h1b, h2b, h3b], &[h1a, h2a, h3a], @@ -1051,8 +1090,11 @@ mod tests { Some(3), true, ); - let s = - CachingState::new(InMemoryBackend::::default(), shared.clone(), Some(h3a)); + let s = CachingState::new( + InMemoryBackend::::from(state_version), + shared.clone(), + Some(h3a), + ); assert!(s.storage(&key).unwrap().is_none()); } @@ -1068,9 +1110,10 @@ mod tests { let h3b = H256::random(); let shared = new_shared_cache::(256 * 1024, (0, 1)); + let state_version = StateVersion::default(); let mut s = CachingState::new( - InMemoryBackend::::default(), + InMemoryBackend::::from(state_version), shared.clone(), Some(root_parent), ); @@ -1084,12 +1127,18 @@ mod tests { true, ); - let mut s = - CachingState::new(InMemoryBackend::::default(), shared.clone(), Some(h1)); + let mut s = CachingState::new( + InMemoryBackend::::from(state_version), + shared.clone(), + Some(h1), + ); s.cache.sync_cache(&[], &[], vec![], vec![], Some(h2a), Some(2), true); - let mut s = - CachingState::new(InMemoryBackend::::default(), shared.clone(), Some(h1)); + let mut s = CachingState::new( + InMemoryBackend::::from(state_version), + shared.clone(), + Some(h1), + ); s.cache.sync_cache( &[], &[], @@ -1100,8 +1149,11 @@ mod tests { false, ); - let mut s = - CachingState::new(InMemoryBackend::::default(), shared.clone(), Some(h2b)); + let mut s = CachingState::new( + InMemoryBackend::::from(state_version), + shared.clone(), + Some(h2b), + ); s.cache.sync_cache( &[], &[], @@ -1112,8 +1164,11 @@ mod tests { false, ); - let s = - CachingState::new(InMemoryBackend::::default(), shared.clone(), Some(h2a)); + let s = CachingState::new( + InMemoryBackend::::from(state_version), + shared.clone(), + Some(h2a), + ); assert_eq!(s.storage(&key).unwrap().unwrap(), vec![2]); } @@ -1128,20 +1183,27 @@ mod tests { let h3b = H256::random(); let shared = new_shared_cache::(256 * 1024, (0, 1)); + let state_version = StateVersion::default(); let mut s = CachingState::new( - InMemoryBackend::::default(), + InMemoryBackend::::from(state_version), shared.clone(), Some(root_parent), ); s.cache.sync_cache(&[], &[], vec![], vec![], Some(h1), Some(1), true); - let mut s = - CachingState::new(InMemoryBackend::::default(), shared.clone(), Some(h1)); + let mut s = CachingState::new( + InMemoryBackend::::from(state_version), + shared.clone(), + Some(h1), + ); s.cache.sync_cache(&[], &[], vec![], vec![], Some(h2a), Some(2), true); - let mut s = - CachingState::new(InMemoryBackend::::default(), shared.clone(), Some(h2a)); + let mut s = CachingState::new( + InMemoryBackend::::from(state_version), + shared.clone(), + Some(h2a), + ); s.cache.sync_cache( &[], &[], @@ -1152,12 +1214,18 @@ mod tests { true, ); - let mut s = - CachingState::new(InMemoryBackend::::default(), shared.clone(), Some(h1)); + let mut s = CachingState::new( + InMemoryBackend::::from(state_version), + shared.clone(), + Some(h1), + ); s.cache.sync_cache(&[], &[], vec![], vec![], Some(h2b), Some(2), false); - let mut s = - CachingState::new(InMemoryBackend::::default(), shared.clone(), Some(h2b)); + let mut s = CachingState::new( + InMemoryBackend::::from(state_version), + shared.clone(), + Some(h2b), + ); s.cache.sync_cache( &[], &[], @@ -1168,8 +1236,11 @@ mod tests { false, ); - let s = - CachingState::new(InMemoryBackend::::default(), shared.clone(), Some(h3a)); + let s = CachingState::new( + InMemoryBackend::::from(state_version), + shared.clone(), + Some(h3a), + ); assert_eq!(s.storage(&key).unwrap().unwrap(), vec![2]); } @@ -1181,7 +1252,7 @@ mod tests { let h1b = H256::random(); let shared = new_shared_cache::(256 * 1024, (0, 1)); - let mut backend = InMemoryBackend::::default(); + let mut backend = InMemoryBackend::::from(StateVersion::default()); backend.insert(std::iter::once((None, vec![(key.clone(), Some(vec![1]))]))); let mut s = CachingState::new(backend.clone(), shared.clone(), Some(root_parent)); @@ -1209,7 +1280,7 @@ mod tests { let h0 = H256::random(); let mut s = CachingState::new( - InMemoryBackend::::default(), + InMemoryBackend::::from(StateVersion::default()), shared.clone(), Some(root_parent.clone()), ); @@ -1249,7 +1320,7 @@ mod tests { let h0 = H256::random(); let mut s = CachingState::new( - InMemoryBackend::::default(), + InMemoryBackend::::from(StateVersion::default()), shared.clone(), Some(root_parent), ); @@ -1292,8 +1363,9 @@ mod tests { let h1 = H256::random(); let shared = new_shared_cache::(256 * 1024, (0, 1)); + let state_version = StateVersion::default(); let mut s = CachingState::new( - InMemoryBackend::::default(), + InMemoryBackend::::from(state_version), shared.clone(), Some(root_parent.clone()), ); @@ -1307,8 +1379,11 @@ mod tests { true, ); - let mut s = - CachingState::new(InMemoryBackend::::default(), shared.clone(), Some(h0)); + let mut s = CachingState::new( + InMemoryBackend::::from(state_version), + shared.clone(), + Some(h0), + ); s.cache.sync_cache( &[], &[], @@ -1319,8 +1394,11 @@ mod tests { true, ); - let mut s = - CachingState::new(InMemoryBackend::::default(), shared.clone(), Some(h1)); + let mut s = CachingState::new( + InMemoryBackend::::from(state_version), + shared.clone(), + Some(h1), + ); assert_eq!(s.storage(&key).unwrap(), Some(vec![3])); // Restart (or unknown block?), clear caches. @@ -1339,8 +1417,11 @@ mod tests { // New value is propagated. s.cache.sync_cache(&[], &[], vec![], vec![], None, None, true); - let s = - CachingState::new(InMemoryBackend::::default(), shared.clone(), Some(h1)); + let s = CachingState::new( + InMemoryBackend::::from(state_version), + shared.clone(), + Some(h1), + ); assert_eq!(s.storage(&key).unwrap(), None); } @@ -1354,9 +1435,10 @@ mod tests { let h2 = H256::random(); let shared = new_shared_cache::(256 * 1024, (0, 1)); + let state_version = StateVersion::default(); let mut s = CachingState::new( - InMemoryBackend::::default(), + InMemoryBackend::::from(state_version), shared.clone(), Some(root_parent), ); @@ -1371,8 +1453,11 @@ mod tests { ); assert_eq!(shared.write().lru_storage.get(&key).unwrap(), &Some(vec![1])); - let mut s = - CachingState::new(InMemoryBackend::::default(), shared.clone(), Some(h1)); + let mut s = CachingState::new( + InMemoryBackend::::from(state_version), + shared.clone(), + Some(h1), + ); // commit as non-best s.cache.sync_cache( @@ -1387,8 +1472,11 @@ mod tests { assert_eq!(shared.write().lru_storage.get(&key).unwrap(), &Some(vec![1])); - let mut s = - CachingState::new(InMemoryBackend::::default(), shared.clone(), Some(h1)); + let mut s = CachingState::new( + InMemoryBackend::::from(state_version), + shared.clone(), + Some(h1), + ); // commit again as best with no changes s.cache.sync_cache(&[], &[], vec![], vec![], Some(h2), Some(2), true); @@ -1519,8 +1607,9 @@ mod qc { } fn head_state(&self, hash: H256) -> CachingState, Block> { + let state_version = StateVersion::default(); CachingState::new( - InMemoryBackend::::default(), + InMemoryBackend::::from(state_version), self.shared.clone(), Some(hash), ) @@ -1554,6 +1643,7 @@ mod qc { &mut self, action: Action, ) -> Result, Block>, ()> { + let state_version = StateVersion::default(); let state = match action { Action::Fork { depth, hash, changes } => { let pos = self.canon.len() as isize - depth as isize; @@ -1591,7 +1681,7 @@ mod qc { }; let mut state = CachingState::new( - InMemoryBackend::::default(), + InMemoryBackend::::from(state_version), self.shared.clone(), Some(parent), ); @@ -1628,7 +1718,7 @@ mod qc { } let mut state = CachingState::new( - InMemoryBackend::::default(), + InMemoryBackend::::from(state_version), self.shared.clone(), Some(parent_hash), ); @@ -1679,7 +1769,7 @@ mod qc { self.canon.push(node); let mut state = CachingState::new( - InMemoryBackend::::default(), + InMemoryBackend::::from(state_version), self.shared.clone(), Some(fork_at), ); diff --git a/client/executor/src/integration_tests/mod.rs b/client/executor/src/integration_tests/mod.rs index dabead4799dc8..cade4450de2c2 100644 --- a/client/executor/src/integration_tests/mod.rs +++ b/client/executor/src/integration_tests/mod.rs @@ -33,7 +33,7 @@ use sp_core::{ }; use sp_runtime::traits::BlakeTwo256; use sp_state_machine::TestExternalities as CoreTestExternalities; -use sp_trie::{trie_types::Layout, TrieConfiguration}; +use sp_trie::{Layout, TrieConfiguration}; use sp_wasm_interface::HostFunctions as _; use std::sync::Arc; use tracing_subscriber::layer::SubscriberExt; @@ -179,16 +179,20 @@ fn storage_should_work(wasm_method: WasmExecutionMethod) { let mut ext = ext.ext(); ext.set_storage(b"foo".to_vec(), b"bar".to_vec()); - let output = - call_in_wasm("test_data_in", &b"Hello world".to_vec().encode(), wasm_method, &mut ext) - .unwrap(); + let output = call_in_wasm( + "test_data_in", + &b"Hello worldHello worldHello worldHello world".to_vec().encode(), + wasm_method, + &mut ext, + ) + .unwrap(); assert_eq!(output, b"all ok!".to_vec().encode()); } let expected = TestExternalities::new(sp_core::storage::Storage { top: map![ - b"input".to_vec() => b"Hello world".to_vec(), + b"input".to_vec() => b"Hello worldHello worldHello worldHello world".to_vec(), b"foo".to_vec() => b"bar".to_vec(), b"baz".to_vec() => b"bar".to_vec() ], @@ -368,7 +372,10 @@ fn ordered_trie_root_should_work(wasm_method: WasmExecutionMethod) { let trie_input = vec![b"zero".to_vec(), b"one".to_vec(), b"two".to_vec()]; assert_eq!( call_in_wasm("test_ordered_trie_root", &[0], wasm_method, &mut ext.ext(),).unwrap(), - Layout::::ordered_trie_root(trie_input.iter()).as_bytes().encode(), + Layout::::default() + .ordered_trie_root(trie_input.iter()) + .as_bytes() + .encode(), ); } diff --git a/client/finality-grandpa/src/tests.rs b/client/finality-grandpa/src/tests.rs index 1aef7cd1b017a..b4579c5e98783 100644 --- a/client/finality-grandpa/src/tests.rs +++ b/client/finality-grandpa/src/tests.rs @@ -172,6 +172,11 @@ impl TestNetFactory for GrandpaTestNet { fn mut_peers)>(&mut self, closure: F) { closure(&mut self.peers); } + + fn state_versions(&self) -> Option> { + // Currently no support for grandpa test net with stateversion update. + None + } } #[derive(Default, Clone)] diff --git a/client/light/src/backend.rs b/client/light/src/backend.rs index 3091dce625a3f..4fa32a9b1ba95 100644 --- a/client/light/src/backend.rs +++ b/client/light/src/backend.rs @@ -48,7 +48,7 @@ use sp_core::{ use sp_runtime::{ generic::BlockId, traits::{Block as BlockT, HashFor, Header, NumberFor, Zero}, - Justification, Justifications, Storage, + Justification, Justifications, StateVersion, StateVersions, Storage, }; use sp_state_machine::{ Backend as StateBackend, ChangesTrieTransaction, ChildStorageCollection, InMemoryBackend, @@ -59,9 +59,10 @@ const IN_MEMORY_EXPECT_PROOF: &str = "InMemory state backend has Void error type and always succeeds; qed"; /// Light client backend. -pub struct Backend { +pub struct Backend { blockchain: Arc>, - genesis_state: RwLock>>, + genesis_state: RwLock>>>, + state_versions: StateVersions, import_lock: RwLock<()>, } @@ -75,6 +76,8 @@ pub struct ImportOperation { set_head: Option>, storage_update: Option>>, changes_trie_config_update: Option>, + state_version: Option, + genesis_state_version: StateVersion, _phantom: std::marker::PhantomData, } @@ -87,10 +90,15 @@ pub enum GenesisOrUnavailableState { Unavailable, } -impl Backend { +impl Backend { /// Create new light backend. - pub fn new(blockchain: Arc>) -> Self { - Self { blockchain, genesis_state: RwLock::new(None), import_lock: Default::default() } + pub fn new(blockchain: Arc>, state_versions: StateVersions) -> Self { + Self { + blockchain, + genesis_state: RwLock::new(None), + import_lock: Default::default(), + state_versions, + } } /// Get shared blockchain reference. @@ -99,7 +107,7 @@ impl Backend { } } -impl AuxStore for Backend { +impl AuxStore for Backend { fn insert_aux< 'a, 'b: 'a, @@ -119,7 +127,7 @@ impl AuxStore for Backend { } } -impl ClientBackend for Backend> +impl ClientBackend for Backend where Block: BlockT, S: BlockchainStorage, @@ -140,15 +148,20 @@ where set_head: None, storage_update: None, changes_trie_config_update: None, + state_version: None, + genesis_state_version: self.state_versions.genesis_state_version(), _phantom: Default::default(), }) } fn begin_state_operation( &self, - _operation: &mut Self::BlockImportOperation, - _block: BlockId, + operation: &mut Self::BlockImportOperation, + block: BlockId, ) -> ClientResult<()> { + if let Some(number) = self.blockchain.storage().block_number_from_id(&block)? { + operation.state_version = Some(self.state_versions.state_version_at(number)); + } Ok(()) } @@ -261,7 +274,7 @@ where } } -impl RemoteBackend for Backend> +impl RemoteBackend for Backend where Block: BlockT, S: BlockchainStorage + 'static, @@ -355,7 +368,7 @@ where storage.insert(Some(storage_child.child_info), storage_child.data); } - let storage_update = InMemoryBackend::from(storage); + let storage_update = InMemoryBackend::from((storage, self.genesis_state_version)); let (storage_root, _) = storage_update.full_storage_root(std::iter::empty(), child_delta); if commit { self.storage_update = Some(storage_update); @@ -575,4 +588,11 @@ where GenesisOrUnavailableState::Unavailable => None, } } + + fn state_version(&self) -> StateVersion { + match self { + GenesisOrUnavailableState::Genesis(state) => state.state_version(), + GenesisOrUnavailableState::Unavailable => StateVersion::default(), + } + } } diff --git a/client/light/src/fetcher.rs b/client/light/src/fetcher.rs index 5740e407a5e89..75c113744c8bb 100644 --- a/client/light/src/fetcher.rs +++ b/client/light/src/fetcher.rs @@ -29,6 +29,7 @@ use hash_db::{HashDB, Hasher, EMPTY_PREFIX}; use sp_blockchain::{Error as ClientError, Result as ClientResult}; use sp_core::{ convert_hash, + state_version::StateVersion, storage::{ChildInfo, ChildType}, traits::{CodeExecutor, SpawnNamed}, }; @@ -169,7 +170,7 @@ impl> LightDataChecker { remote_roots_proof: StorageProof, ) -> ClientResult<()> { // all the checks are sharing the same storage - let storage = remote_roots_proof.into_memory_db(); + let storage: sp_state_machine::MemoryDB> = remote_roots_proof.into_memory_db(); // remote_roots.keys() are sorted => we can use this to group changes tries roots // that are belongs to the same CHT @@ -201,7 +202,8 @@ impl> LightDataChecker { } // check proof for single changes trie root - let proving_backend = TrieBackend::new(storage, local_cht_root); + let proving_backend = + TrieBackend::new(storage, local_cht_root, StateVersion::V0); let remote_changes_trie_root = remote_roots[&block]; cht::check_proof_on_proving_backend::>( local_cht_root, diff --git a/client/light/src/lib.rs b/client/light/src/lib.rs index 0c874326ef2e0..ca71a94496a44 100644 --- a/client/light/src/lib.rs +++ b/client/light/src/lib.rs @@ -19,7 +19,7 @@ //! Light client components. use sp_core::traits::{CodeExecutor, SpawnNamed}; -use sp_runtime::traits::{Block as BlockT, HashFor}; +use sp_runtime::traits::Block as BlockT; use std::sync::Arc; pub mod backend; @@ -50,10 +50,13 @@ pub fn new_light_blockchain>(storage: S) -> A } /// Create an instance of light client backend. -pub fn new_light_backend(blockchain: Arc>) -> Arc>> +pub fn new_light_backend( + blockchain: Arc>, + state_versions: sp_runtime::StateVersions, +) -> Arc> where B: BlockT, S: BlockchainStorage, { - Arc::new(Backend::new(blockchain)) + Arc::new(Backend::new(blockchain, state_versions)) } diff --git a/client/network/src/light_client_requests.rs b/client/network/src/light_client_requests.rs index e18b783f219be..e782b543b9252 100644 --- a/client/network/src/light_client_requests.rs +++ b/client/network/src/light_client_requests.rs @@ -178,8 +178,8 @@ mod tests { type Block = sp_runtime::generic::Block, substrate_test_runtime::Extrinsic>; - fn send_receive(request: sender::Request, pool: &LocalPool) { - let client = Arc::new(substrate_test_runtime_client::new()); + fn send_receive(request: sender::Request, pool: &LocalPool, hashed_value: bool) { + let client = Arc::new(substrate_test_runtime_client::new_with_state(hashed_value)); let (handler, protocol_config) = handler::LightClientRequestHandler::new(&protocol_id(), client); pool.spawner().spawn_obj(handler.run().boxed().into()).unwrap(); @@ -222,6 +222,10 @@ mod tests { #[test] fn send_receive_call() { + send_receive_call_inner(true); + send_receive_call_inner(false); + } + fn send_receive_call_inner(hashed_value: bool) { let chan = oneshot::channel(); let request = light::RemoteCallRequest { block: Default::default(), @@ -232,13 +236,17 @@ mod tests { }; let mut pool = LocalPool::new(); - send_receive(sender::Request::Call { request, sender: chan.0 }, &pool); + send_receive(sender::Request::Call { request, sender: chan.0 }, &pool, hashed_value); assert_eq!(vec![42], pool.run_until(chan.1).unwrap().unwrap()); // ^--- from `DummyFetchChecker::check_execution_proof` } #[test] fn send_receive_read() { + send_receive_read_inner(true); + send_receive_read_inner(false); + } + fn send_receive_read_inner(hashed_value: bool) { let chan = oneshot::channel(); let request = light::RemoteReadRequest { header: dummy_header(), @@ -247,7 +255,7 @@ mod tests { retry_count: None, }; let mut pool = LocalPool::new(); - send_receive(sender::Request::Read { request, sender: chan.0 }, &pool); + send_receive(sender::Request::Read { request, sender: chan.0 }, &pool, hashed_value); assert_eq!( Some(vec![42]), pool.run_until(chan.1).unwrap().unwrap().remove(&b":key"[..]).unwrap() @@ -257,6 +265,10 @@ mod tests { #[test] fn send_receive_read_child() { + send_receive_read_child_inner(true); + send_receive_read_child_inner(false); + } + fn send_receive_read_child_inner(hashed_value: bool) { let chan = oneshot::channel(); let child_info = ChildInfo::new_default(&b":child_storage:default:sub"[..]); let request = light::RemoteReadChildRequest { @@ -267,7 +279,7 @@ mod tests { retry_count: None, }; let mut pool = LocalPool::new(); - send_receive(sender::Request::ReadChild { request, sender: chan.0 }, &pool); + send_receive(sender::Request::ReadChild { request, sender: chan.0 }, &pool, hashed_value); assert_eq!( Some(vec![42]), pool.run_until(chan.1).unwrap().unwrap().remove(&b":key"[..]).unwrap() @@ -285,7 +297,7 @@ mod tests { retry_count: None, }; let mut pool = LocalPool::new(); - send_receive(sender::Request::Header { request, sender: chan.0 }, &pool); + send_receive(sender::Request::Header { request, sender: chan.0 }, &pool, true); // The remote does not know block 1: assert_matches!(pool.run_until(chan.1).unwrap(), Err(ClientError::RemoteFetchFailed)); } @@ -308,7 +320,7 @@ mod tests { retry_count: None, }; let mut pool = LocalPool::new(); - send_receive(sender::Request::Changes { request, sender: chan.0 }, &pool); + send_receive(sender::Request::Changes { request, sender: chan.0 }, &pool, true); assert_eq!(vec![(100, 2)], pool.run_until(chan.1).unwrap().unwrap()); // ^--- from `DummyFetchChecker::check_changes_proof` } diff --git a/client/network/src/light_client_requests/handler.rs b/client/network/src/light_client_requests/handler.rs index 609ed35f4a9d1..4359ca1a9aa46 100644 --- a/client/network/src/light_client_requests/handler.rs +++ b/client/network/src/light_client_requests/handler.rs @@ -357,13 +357,13 @@ impl LightClientRequestHandler { Ok(proof) => proof, Err(error) => { log::trace!( - "Remote changes proof request from {} for key {} ({:?}..{:?}) failed with: {}.", - peer, - format!("{} : {}", HexDisplay::from(&request.storage_key), HexDisplay::from(&key.0)), - request.first, - request.last, - error, - ); + "Remote changes proof request from {} for key {} ({:?}..{:?}) failed with: {}.", + peer, + format!("{} : {}", HexDisplay::from(&request.storage_key), HexDisplay::from(&key.0)), + request.first, + request.last, + error, + ); light::ChangesProof:: { max_block: Zero::zero(), diff --git a/client/network/src/protocol/message.rs b/client/network/src/protocol/message.rs index 1ffc57de181cf..06f71db70e6f1 100644 --- a/client/network/src/protocol/message.rs +++ b/client/network/src/protocol/message.rs @@ -351,12 +351,13 @@ pub mod generic { let compact = CompactStatus::decode(value)?; let chain_status = match >::decode(value) { Ok(v) => v, - Err(e) => + Err(e) => { if compact.version <= LAST_CHAIN_STATUS_VERSION { - return Err(e) + return Err(e); } else { Vec::new() - }, + } + } }; let CompactStatus { diff --git a/client/network/test/src/block_import.rs b/client/network/test/src/block_import.rs index 7b5804e0edb77..b2957d885569e 100644 --- a/client/network/test/src/block_import.rs +++ b/client/network/test/src/block_import.rs @@ -33,8 +33,8 @@ use substrate_test_runtime_client::{ runtime::{Block, Hash}, }; -fn prepare_good_block() -> (TestClient, Hash, u64, PeerId, IncomingBlock) { - let mut client = substrate_test_runtime_client::new(); +fn prepare_good_block(hashed_value: bool) -> (TestClient, Hash, u64, PeerId, IncomingBlock) { + let mut client = substrate_test_runtime_client::new_with_state(hashed_value); let block = client.new_block(Default::default()).unwrap().build().unwrap().block; block_on(client.import(BlockOrigin::File, block)).unwrap(); @@ -64,13 +64,18 @@ fn prepare_good_block() -> (TestClient, Hash, u64, PeerId, IncomingBlock) #[test] fn import_single_good_block_works() { - let (_, _hash, number, peer_id, block) = prepare_good_block(); + import_single_good_block_works_inner(true); + import_single_good_block_works_inner(false); +} +fn import_single_good_block_works_inner(hashed_value: bool) { + let (_, _hash, number, peer_id, block) = prepare_good_block(hashed_value); let mut expected_aux = ImportedAux::default(); expected_aux.is_new_best = true; + let mut client = substrate_test_runtime_client::new_with_state(hashed_value); match block_on(import_single_block( - &mut substrate_test_runtime_client::new(), + &mut client, BlockOrigin::File, block, &mut PassThroughVerifier::new(true), @@ -83,7 +88,7 @@ fn import_single_good_block_works() { #[test] fn import_single_good_known_block_is_ignored() { - let (mut client, _hash, number, _, block) = prepare_good_block(); + let (mut client, _hash, number, _, block) = prepare_good_block(true); match block_on(import_single_block( &mut client, BlockOrigin::File, @@ -97,7 +102,7 @@ fn import_single_good_known_block_is_ignored() { #[test] fn import_single_good_block_without_header_fails() { - let (_, _, _, peer_id, mut block) = prepare_good_block(); + let (_, _, _, peer_id, mut block) = prepare_good_block(true); block.header = None; match block_on(import_single_block( &mut substrate_test_runtime_client::new(), diff --git a/client/network/test/src/lib.rs b/client/network/test/src/lib.rs index bb49cef8c642c..00751b0554fdb 100644 --- a/client/network/test/src/lib.rs +++ b/client/network/test/src/lib.rs @@ -727,13 +727,19 @@ where self.add_full_peer_with_config(Default::default()) } + /// Get state versioning to use with test. + fn state_versions(&self) -> Option>; + /// Add a full peer. fn add_full_peer_with_config(&mut self, config: FullPeerConfig) { let mut test_client_builder = match (config.keep_blocks, config.storage_chain) { - (Some(keep_blocks), true) => TestClientBuilder::with_tx_storage(keep_blocks), - (None, true) => TestClientBuilder::with_tx_storage(u32::MAX), - (Some(keep_blocks), false) => TestClientBuilder::with_pruning_window(keep_blocks), - (None, false) => TestClientBuilder::with_default_backend(), + (Some(keep_blocks), true) => + TestClientBuilder::with_tx_storage(keep_blocks, self.state_versions()), + (None, true) => TestClientBuilder::with_tx_storage(u32::MAX, self.state_versions()), + (Some(keep_blocks), false) => + TestClientBuilder::with_pruning_window(keep_blocks, self.state_versions()), + (None, false) => + TestClientBuilder::with_default_backend_and_state_versions(self.state_versions()), }; if matches!(config.sync_mode, SyncMode::Fast { .. }) { test_client_builder = test_client_builder.set_no_genesis(); @@ -1076,12 +1082,30 @@ where pub struct TestNet { peers: Vec>, fork_choice: ForkChoiceStrategy, + state_versions: Option>, } impl TestNet { /// Create a `TestNet` that used the given fork choice rule. pub fn with_fork_choice(fork_choice: ForkChoiceStrategy) -> Self { - Self { peers: Vec::new(), fork_choice } + Self { peers: Vec::new(), fork_choice, state_versions: None } + } + + /// Create new test network with this many peers. + pub fn new_with_state_versions( + n: usize, + state_versions: sp_runtime::StateVersions, + ) -> Self { + trace!(target: "test_network", "Creating test network with peer config"); + let config = Self::default_config(); + let mut net = Self::from_config(&config); + net.state_versions = Some(state_versions); + + for i in 0..n { + trace!(target: "test_network", "Adding peer {}", i); + net.add_full_peer(); + } + net } } @@ -1092,7 +1116,11 @@ impl TestNetFactory for TestNet { /// Create new test network with peers and given config. fn from_config(_config: &ProtocolConfig) -> Self { - TestNet { peers: Vec::new(), fork_choice: ForkChoiceStrategy::LongestChain } + TestNet { + peers: Vec::new(), + fork_choice: ForkChoiceStrategy::LongestChain, + state_versions: None, + } } fn make_verifier( @@ -1126,6 +1154,10 @@ impl TestNetFactory for TestNet { fn mut_peers>)>(&mut self, closure: F) { closure(&mut self.peers); } + + fn state_versions(&self) -> Option> { + self.state_versions.clone() + } } pub struct ForceFinalized(PeersClient); @@ -1195,4 +1227,8 @@ impl TestNetFactory for JustificationTestNet { ) { (client.as_block_import(), Some(Box::new(ForceFinalized(client))), Default::default()) } + + fn state_versions(&self) -> Option> { + self.0.state_versions.clone() + } } diff --git a/client/network/test/src/sync.rs b/client/network/test/src/sync.rs index c86ccfeac3ed1..2d9bfd2daab5c 100644 --- a/client/network/test/src/sync.rs +++ b/client/network/test/src/sync.rs @@ -157,6 +157,21 @@ fn sync_from_two_peers_works() { assert!(!net.peer(0).is_major_syncing()); } +#[test] +fn sync_from_two_peers_with_versioning_switch_works() { + sp_tracing::try_init_simple(); + let mut state_versions = sp_runtime::StateVersions::::default(); + state_versions.add((0, sp_runtime::StateVersion::V0)); + state_versions.add((10, sp_runtime::StateVersion::default())); + let mut net = TestNet::new_with_state_versions(3, state_versions); + net.peer(1).push_blocks(100, false); + net.peer(2).push_blocks(100, false); + net.block_until_sync(); + let peer1 = &net.peers()[1]; + assert!(net.peers()[0].blockchain_canon_equals(peer1)); + assert!(!net.peer(0).is_major_syncing()); +} + #[test] fn sync_from_two_peers_with_ancestry_search_works() { sp_tracing::try_init_simple(); diff --git a/client/rpc/src/chain/tests.rs b/client/rpc/src/chain/tests.rs index caa9f33138b86..7fb76e4bef5e9 100644 --- a/client/rpc/src/chain/tests.rs +++ b/client/rpc/src/chain/tests.rs @@ -65,7 +65,11 @@ fn should_return_header() { #[test] fn should_return_a_block() { - let mut client = Arc::new(substrate_test_runtime_client::new()); + should_return_a_block_inner(true); + should_return_a_block_inner(false); +} +fn should_return_a_block_inner(hashed_value: bool) { + let mut client = Arc::new(substrate_test_runtime_client::new_with_state(hashed_value)); let api = new_full(client.clone(), SubscriptionManager::new(Arc::new(TaskExecutor))); let block = client.new_block(Default::default()).unwrap().build().unwrap().block; diff --git a/client/rpc/src/state/tests.rs b/client/rpc/src/state/tests.rs index ef13b37ce42fe..c9fc837651466 100644 --- a/client/rpc/src/state/tests.rs +++ b/client/rpc/src/state/tests.rs @@ -411,7 +411,8 @@ fn should_query_storage() { ); } - run_tests(Arc::new(substrate_test_runtime_client::new()), false); + run_tests(Arc::new(substrate_test_runtime_client::new_with_state(false)), false); + run_tests(Arc::new(substrate_test_runtime_client::new_with_state(true)), false); run_tests( Arc::new( TestClientBuilder::new() @@ -433,7 +434,11 @@ fn should_split_ranges() { #[test] fn should_return_runtime_version() { - let client = Arc::new(substrate_test_runtime_client::new()); + should_return_runtime_version_inner(true); + should_return_runtime_version_inner(false); +} +fn should_return_runtime_version_inner(hashed_value: bool) { + let client = Arc::new(substrate_test_runtime_client::new_with_state(hashed_value)); let (api, _child) = new_full( client.clone(), SubscriptionManager::new(Arc::new(TaskExecutor)), diff --git a/client/service/src/builder.rs b/client/service/src/builder.rs index f0c037aee232f..f48715717f24f 100644 --- a/client/service/src/builder.rs +++ b/client/service/src/builder.rs @@ -59,8 +59,8 @@ use sp_core::traits::{CodeExecutor, SpawnNamed}; use sp_keystore::{CryptoStore, SyncCryptoStore, SyncCryptoStorePtr}; use sp_runtime::{ generic::BlockId, - traits::{Block as BlockT, BlockIdTo, HashFor, Zero}, - BuildStorage, + traits::{Block as BlockT, BlockIdTo, Zero}, + BuildStorage, StateVersions, }; use std::{str::FromStr, sync::Arc, time::SystemTime}; @@ -143,15 +143,14 @@ pub type TLightClient = TLightClientWithBackend>; /// Light client backend type. -pub type TLightBackend = - sc_light::Backend, HashFor>; +pub type TLightBackend = sc_light::Backend, TBl>; /// Light call executor type. pub type TLightCallExecutor = sc_light::GenesisCallExecutor< - sc_light::Backend, HashFor>, + sc_light::Backend, TBl>, crate::client::LocalCallExecutor< TBl, - sc_light::Backend, HashFor>, + sc_light::Backend, TBl>, TExec, >, >; @@ -309,7 +308,18 @@ where transaction_storage: config.transaction_storage.clone(), }; - let backend = new_db_backend(db_config)?; + let state_versions = StateVersions::from_conf( + config + .chain_spec + .state_versions() + .iter() + .map(|(number, version)| (number.as_str(), *version)), + ) + .ok_or_else(|| { + Error::Application(Box::from("Invalid state versions for chain spec".to_string())) + })?; + + let backend = new_db_backend(db_config, state_versions.clone())?; let extensions = sc_client_api::execution_extensions::ExecutionExtensions::new( config.execution_strategies.clone(), @@ -351,6 +361,7 @@ where sc_network::config::SyncMode::Fast { .. } | sc_network::config::SyncMode::Warp ), wasm_runtime_substitutes, + state_versions, }, )?; @@ -394,7 +405,19 @@ where Box::new(task_manager.spawn_handle()), )); let on_demand = Arc::new(sc_network::config::OnDemand::new(fetch_checker)); - let backend = sc_light::new_light_backend(light_blockchain); + + let state_versions = StateVersions::from_conf( + config + .chain_spec + .state_versions() + .iter() + .map(|(number, version)| (number.as_str(), *version)), + ) + .ok_or_else(|| { + Error::Application(Box::from("Invalid state versions for chain spec".to_string())) + })?; + + let backend = sc_light::new_light_backend(light_blockchain, state_versions); let client = Arc::new(light::new_light( backend.clone(), config.chain_spec.as_storage_builder(), @@ -410,13 +433,14 @@ where /// Create an instance of default DB-backend backend. pub fn new_db_backend( settings: DatabaseSettings, + state_versions: StateVersions, ) -> Result>, sp_blockchain::Error> where Block: BlockT, { const CANONICALIZATION_DELAY: u64 = 4096; - Ok(Arc::new(Backend::new(settings, CANONICALIZATION_DELAY)?)) + Ok(Arc::new(Backend::new(settings, CANONICALIZATION_DELAY, state_versions)?)) } /// Create an instance of client backed by given backend. diff --git a/client/service/src/client/call_executor.rs b/client/service/src/client/call_executor.rs index 9b8774ce6d497..0fed083e85b1b 100644 --- a/client/service/src/client/call_executor.rs +++ b/client/service/src/client/call_executor.rs @@ -395,7 +395,10 @@ mod tests { >( backend.clone(), executor.clone(), - &substrate_test_runtime_client::GenesisParameters::default().genesis_storage(), + &( + substrate_test_runtime_client::GenesisParameters::default().genesis_storage(), + sp_runtime::StateVersion::default(), + ), None, Box::new(TaskExecutor::new()), None, diff --git a/client/service/src/client/client.rs b/client/service/src/client/client.rs index f7d93d036a3fa..b148a6dda4ceb 100644 --- a/client/service/src/client/client.rs +++ b/client/service/src/client/client.rs @@ -199,6 +199,8 @@ pub struct ClientConfig { /// Map of WASM runtime substitute starting at the child of the given block until the runtime /// version doesn't match anymore. pub wasm_runtime_substitutes: HashMap>, + /// State version to use with chain. + pub state_versions: sp_runtime::StateVersions, } impl Default for ClientConfig { @@ -209,6 +211,7 @@ impl Default for ClientConfig { wasm_runtime_overrides: None, no_genesis: false, wasm_runtime_substitutes: HashMap::new(), + state_versions: Default::default(), } } } @@ -334,6 +337,7 @@ where config: ClientConfig, ) -> sp_blockchain::Result { let info = backend.blockchain().info(); + if info.finalized_state.is_none() { let genesis_storage = build_genesis_storage.build_storage().map_err(sp_blockchain::Error::Storage)?; @@ -1262,6 +1266,11 @@ where trace!("Collected {} uncles", uncles.len()); Ok(uncles) } + + /// Access to configured state versions. + pub fn state_versions(&self) -> &sp_runtime::StateVersions { + &self.config.state_versions + } } impl UsageProvider for Client diff --git a/client/service/src/client/light.rs b/client/service/src/client/light.rs index 7c13b98843e05..e50c41aa3acba 100644 --- a/client/service/src/client/light.rs +++ b/client/service/src/client/light.rs @@ -25,10 +25,7 @@ use sc_executor::RuntimeVersionOf; use sc_telemetry::TelemetryHandle; use sp_blockchain::Result as ClientResult; use sp_core::traits::{CodeExecutor, SpawnNamed}; -use sp_runtime::{ - traits::{Block as BlockT, HashFor}, - BuildStorage, -}; +use sp_runtime::{traits::Block as BlockT, BuildStorage}; use super::{ call_executor::LocalCallExecutor, @@ -39,7 +36,7 @@ use sc_light::{Backend, GenesisCallExecutor}; /// Create an instance of light client. pub fn new_light( - backend: Arc>>, + backend: Arc>, genesis_storage: &dyn BuildStorage, code_executor: E, spawn_handle: Box, @@ -47,11 +44,8 @@ pub fn new_light( telemetry: Option, ) -> ClientResult< Client< - Backend>, - GenesisCallExecutor< - Backend>, - LocalCallExecutor>, E>, - >, + Backend, + GenesisCallExecutor, LocalCallExecutor, E>>, B, RA, >, diff --git a/client/service/src/client/wasm_override.rs b/client/service/src/client/wasm_override.rs index 6d5a071269d4d..52a17a202f15d 100644 --- a/client/service/src/client/wasm_override.rs +++ b/client/service/src/client/wasm_override.rs @@ -181,7 +181,7 @@ where code: &WasmBlob, heap_pages: Option, ) -> Result { - let mut ext = BasicExternalities::default(); + let mut ext: BasicExternalities = sp_runtime::StateVersion::default().into(); executor .runtime_version(&mut ext, &code.runtime_code(heap_pages)) .map_err(|e| WasmOverrideError::VersionInvalid(format!("{:?}", e)).into()) diff --git a/client/service/src/client/wasm_substitutes.rs b/client/service/src/client/wasm_substitutes.rs index 28975790e9b57..b3e980b63c675 100644 --- a/client/service/src/client/wasm_substitutes.rs +++ b/client/service/src/client/wasm_substitutes.rs @@ -178,7 +178,7 @@ where executor: &Executor, code: &WasmSubstitute, ) -> Result { - let mut ext = BasicExternalities::default(); + let mut ext: BasicExternalities = sp_runtime::StateVersion::default().into(); executor .runtime_version(&mut ext, &code.runtime_code(None)) .map_err(|e| WasmSubstituteError::VersionInvalid(format!("{:?}", e)).into()) diff --git a/client/service/test/src/client/light.rs b/client/service/test/src/client/light.rs index fb9566d208f76..4f16f388a0ef0 100644 --- a/client/service/test/src/client/light.rs +++ b/client/service/test/src/client/light.rs @@ -269,8 +269,8 @@ fn local_state_is_created_when_genesis_state_is_available() { let header0 = substrate_test_runtime_client::runtime::Header::new(0, def, def, def, Default::default()); - let backend: Backend<_, BlakeTwo256> = - Backend::new(Arc::new(DummyBlockchain::new(DummyStorage::new()))); + let backend: Backend<_, Block> = + Backend::new(Arc::new(DummyBlockchain::new(DummyStorage::new())), Default::default()); let mut op = backend.begin_operation().unwrap(); op.set_block_data(header0, None, None, None, NewBlockState::Final).unwrap(); op.set_genesis_state(Default::default(), true).unwrap(); @@ -284,8 +284,8 @@ fn local_state_is_created_when_genesis_state_is_available() { #[test] fn unavailable_state_is_created_when_genesis_state_is_unavailable() { - let backend: Backend<_, BlakeTwo256> = - Backend::new(Arc::new(DummyBlockchain::new(DummyStorage::new()))); + let backend: Backend<_, Block> = + Backend::new(Arc::new(DummyBlockchain::new(DummyStorage::new())), Default::default()); match backend.state_at(BlockId::Number(0)).unwrap() { GenesisOrUnavailableState::Unavailable => (), @@ -295,7 +295,8 @@ fn unavailable_state_is_created_when_genesis_state_is_unavailable() { #[test] fn light_aux_store_is_updated_via_non_importing_op() { - let backend = Backend::new(Arc::new(DummyBlockchain::new(DummyStorage::new()))); + let backend = + Backend::new(Arc::new(DummyBlockchain::new(DummyStorage::new())), Default::default()); let mut op = ClientBackend::::begin_operation(&backend).unwrap(); BlockImportOperation::::insert_aux(&mut op, vec![(vec![1], Some(vec![2]))]).unwrap(); ClientBackend::::commit_operation(&backend, op).unwrap(); @@ -305,6 +306,10 @@ fn light_aux_store_is_updated_via_non_importing_op() { #[test] fn execution_proof_is_generated_and_checked() { + execution_proof_is_generated_and_checked_inner(true); + execution_proof_is_generated_and_checked_inner(false); +} +fn execution_proof_is_generated_and_checked_inner(hashed_value: bool) { fn execute(remote_client: &TestClient, at: u64, method: &'static str) -> (Vec, Vec) { let remote_block_id = BlockId::Number(at); let remote_header = remote_client.header(&remote_block_id).unwrap().unwrap(); @@ -378,7 +383,8 @@ fn execution_proof_is_generated_and_checked() { } // prepare remote client - let mut remote_client = substrate_test_runtime_client::new(); + let mut remote_client = substrate_test_runtime_client::new_with_state(hashed_value); + for i in 1u32..3u32 { let mut digest = Digest::default(); digest.push(sp_runtime::generic::DigestItem::Other::(i.to_le_bytes().to_vec())); @@ -452,16 +458,16 @@ type TestChecker = LightDataChecker< DummyStorage, >; -fn prepare_for_read_proof_check() -> (TestChecker, Header, StorageProof, u32) { +fn prepare_for_read_proof_check(hashed_value: bool) -> (TestChecker, Header, StorageProof, u32) { // prepare remote client - let remote_client = substrate_test_runtime_client::new(); + let remote_client = substrate_test_runtime_client::new_with_state(hashed_value); let remote_block_id = BlockId::Number(0); let remote_block_hash = remote_client.block_hash(0).unwrap().unwrap(); let mut remote_block_header = remote_client.header(&remote_block_id).unwrap().unwrap(); remote_block_header.state_root = remote_client .state_at(&remote_block_id) .unwrap() - .storage_root(::std::iter::empty()) + .storage_root(std::iter::empty()) .0 .into(); @@ -502,7 +508,7 @@ fn prepare_for_read_child_proof_check() -> (TestChecker, Header, StorageProof, V remote_block_header.state_root = remote_client .state_at(&remote_block_id) .unwrap() - .storage_root(::std::iter::empty()) + .storage_root(std::iter::empty()) .0 .into(); @@ -530,9 +536,12 @@ fn prepare_for_read_child_proof_check() -> (TestChecker, Header, StorageProof, V (local_checker, remote_block_header, remote_read_proof, child_value) } -fn prepare_for_header_proof_check(insert_cht: bool) -> (TestChecker, Hash, Header, StorageProof) { +fn prepare_for_header_proof_check( + insert_cht: bool, + hashed_value: bool, +) -> (TestChecker, Hash, Header, StorageProof) { // prepare remote client - let mut remote_client = substrate_test_runtime_client::new(); + let mut remote_client = substrate_test_runtime_client::new_with_state(hashed_value); let mut local_headers_hashes = Vec::new(); for i in 0..4 { let block = remote_client.new_block(Default::default()).unwrap().build().unwrap().block; @@ -567,7 +576,7 @@ fn prepare_for_header_proof_check(insert_cht: bool) -> (TestChecker, Hash, Heade fn header_with_computed_extrinsics_root(extrinsics: Vec) -> Header { use sp_trie::{trie_types::Layout, TrieConfiguration}; let iter = extrinsics.iter().map(Encode::encode); - let extrinsics_root = Layout::::ordered_trie_root(iter); + let extrinsics_root = Layout::::default().ordered_trie_root(iter); // only care about `extrinsics_root` Header::new(0, extrinsics_root, H256::zero(), H256::zero(), Default::default()) @@ -575,8 +584,12 @@ fn header_with_computed_extrinsics_root(extrinsics: Vec) -> Header { #[test] fn storage_read_proof_is_generated_and_checked() { + storage_read_proof_is_generated_and_checked_inner(true); + storage_read_proof_is_generated_and_checked_inner(false); +} +fn storage_read_proof_is_generated_and_checked_inner(hashed_value: bool) { let (local_checker, remote_block_header, remote_read_proof, heap_pages) = - prepare_for_read_proof_check(); + prepare_for_read_proof_check(hashed_value); assert_eq!( (&local_checker as &dyn FetchChecker) .check_read_proof( @@ -623,8 +636,12 @@ fn storage_child_read_proof_is_generated_and_checked() { #[test] fn header_proof_is_generated_and_checked() { + header_proof_is_generated_and_checked_inner(true); + header_proof_is_generated_and_checked_inner(false); +} +fn header_proof_is_generated_and_checked_inner(hashed: bool) { let (local_checker, local_cht_root, remote_block_header, remote_header_proof) = - prepare_for_header_proof_check(true); + prepare_for_header_proof_check(true, hashed); assert_eq!( (&local_checker as &dyn FetchChecker) .check_header_proof( @@ -644,7 +661,7 @@ fn header_proof_is_generated_and_checked() { #[test] fn check_header_proof_fails_if_cht_root_is_invalid() { let (local_checker, _, mut remote_block_header, remote_header_proof) = - prepare_for_header_proof_check(true); + prepare_for_header_proof_check(true, true); remote_block_header.number = 100; assert!((&local_checker as &dyn FetchChecker) .check_header_proof( @@ -662,7 +679,7 @@ fn check_header_proof_fails_if_cht_root_is_invalid() { #[test] fn check_header_proof_fails_if_invalid_header_provided() { let (local_checker, local_cht_root, mut remote_block_header, remote_header_proof) = - prepare_for_header_proof_check(true); + prepare_for_header_proof_check(true, true); remote_block_header.number = 100; assert!((&local_checker as &dyn FetchChecker) .check_header_proof( diff --git a/client/service/test/src/client/mod.rs b/client/service/test/src/client/mod.rs index 295e941f7ceb1..afd0632d71cae 100644 --- a/client/service/test/src/client/mod.rs +++ b/client/service/test/src/client/mod.rs @@ -40,7 +40,7 @@ use sp_state_machine::{ backend::Backend as _, ExecutionStrategy, InMemoryBackend, OverlayedChanges, StateMachine, }; use sp_storage::{ChildInfo, StorageKey}; -use sp_trie::{trie_types::Layout, TrieConfiguration}; +use sp_trie::{Layout, TrieConfiguration}; use std::{ collections::{HashMap, HashSet}, sync::Arc, @@ -170,7 +170,7 @@ fn construct_block( let transactions = txs.into_iter().map(|tx| tx.into_signed_tx()).collect::>(); let iter = transactions.iter().map(Encode::encode); - let extrinsics_root = Layout::::ordered_trie_root(iter).into(); + let extrinsics_root = Layout::::default().ordered_trie_root(iter).into(); let mut header = Header { parent_hash, @@ -250,6 +250,7 @@ fn block1(genesis_hash: Hash, backend: &InMemoryBackend) -> (Vec A1 -> A2 -> A3 -> A4 -> A5 // A1 -> B2 -> B3 -> B4 - // B2 -> C3 - // A1 -> D2 + // B2 -> C3 + // A1 -> D2 let mut client = substrate_test_runtime_client::new(); // G -> A1 @@ -1443,6 +1454,7 @@ fn doesnt_import_blocks_that_revert_finality() { source: DatabaseSource::RocksDb { path: tmp.path().into(), cache_size: 1024 }, }, u64::MAX, + Default::default(), ) .unwrap(), ); @@ -1658,6 +1670,7 @@ fn returns_status_for_pruned_blocks() { source: DatabaseSource::RocksDb { path: tmp.path().into(), cache_size: 1024 }, }, u64::MAX, + Default::default(), ) .unwrap(), ); @@ -2026,7 +2039,11 @@ fn storage_keys_iter_prefix_and_start_key_works() { #[test] fn storage_keys_iter_works() { - let client = substrate_test_runtime_client::new(); + storage_keys_iter_works_inner(true); + storage_keys_iter_works_inner(false); +} +fn storage_keys_iter_works_inner(hashed_value: bool) { + let client = substrate_test_runtime_client::new_with_state(hashed_value); let prefix = StorageKey(hex!("").to_vec()); @@ -2092,7 +2109,10 @@ fn cleans_up_closed_notification_sinks_on_block_import() { substrate_test_runtime_client::runtime::RuntimeApi, >( substrate_test_runtime_client::new_native_executor(), - &substrate_test_runtime_client::GenesisParameters::default().genesis_storage(), + &( + substrate_test_runtime_client::GenesisParameters::default().genesis_storage(), + sp_runtime::StateVersion::default(), + ), None, None, None, diff --git a/frame/executive/src/lib.rs b/frame/executive/src/lib.rs index 5f1ae23c2f531..ebfd5bcb0ebab 100644 --- a/frame/executive/src/lib.rs +++ b/frame/executive/src/lib.rs @@ -846,9 +846,17 @@ mod tests { t.into() } + fn new_test_ext_v0(balance_factor: Balance) -> sp_io::TestExternalities { + let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); + pallet_balances::GenesisConfig:: { balances: vec![(1, 111 * balance_factor)] } + .assimilate_storage(&mut t) + .unwrap(); + (t, sp_runtime::StateVersion::V0).into() + } + #[test] fn block_import_works() { - new_test_ext(1).execute_with(|| { + new_test_ext_v0(1).execute_with(|| { Executive::execute_block(Block { header: Header { parent_hash: [69u8; 32].into(), @@ -871,7 +879,7 @@ mod tests { #[test] #[should_panic] fn block_import_of_bad_state_root_fails() { - new_test_ext(1).execute_with(|| { + new_test_ext_v0(1).execute_with(|| { Executive::execute_block(Block { header: Header { parent_hash: [69u8; 32].into(), @@ -891,7 +899,7 @@ mod tests { #[test] #[should_panic] fn block_import_of_bad_extrinsic_root_fails() { - new_test_ext(1).execute_with(|| { + new_test_ext_v0(1).execute_with(|| { Executive::execute_block(Block { header: Header { parent_hash: [69u8; 32].into(), diff --git a/frame/support/procedural/src/construct_runtime/expand/config.rs b/frame/support/procedural/src/construct_runtime/expand/config.rs index 5e1b9d94700e6..fd6bea304b1c9 100644 --- a/frame/support/procedural/src/construct_runtime/expand/config.rs +++ b/frame/support/procedural/src/construct_runtime/expand/config.rs @@ -75,6 +75,10 @@ pub fn expand_outer_config( #[cfg(any(feature = "std", test))] impl #scrate::sp_runtime::BuildStorage for GenesisConfig { + fn state_version(&self) -> #scrate::sp_runtime::StateVersion { + unimplemented!("Genesis build storage do not support state version"); + } + fn assimilate_storage( &self, storage: &mut #scrate::sp_runtime::Storage, diff --git a/primitives/api/proc-macro/src/impl_runtime_apis.rs b/primitives/api/proc-macro/src/impl_runtime_apis.rs index bc0f027e1efaa..81287b1fac64b 100644 --- a/primitives/api/proc-macro/src/impl_runtime_apis.rs +++ b/primitives/api/proc-macro/src/impl_runtime_apis.rs @@ -282,7 +282,7 @@ fn generate_runtime_api_base_structures() -> Result { fn extract_proof(&mut self) -> Option<#crate_::StorageProof> { self.recorder .take() - .map(|recorder| recorder.to_storage_proof()) + .map(|recorder| recorder.to_storage_proof::<#crate_::HashFor>()) } fn into_storage_changes( diff --git a/primitives/blockchain/src/error.rs b/primitives/blockchain/src/error.rs index ef3afa5bce942..6e60b2da6549f 100644 --- a/primitives/blockchain/src/error.rs +++ b/primitives/blockchain/src/error.rs @@ -90,6 +90,9 @@ pub enum Error { #[error("Failed to get runtime version: {0}")] VersionInvalid(String), + #[error("Genesis config provided is invalid")] + GenesisInvalid, + #[error("Provided state is invalid")] InvalidState, diff --git a/primitives/core/src/lib.rs b/primitives/core/src/lib.rs index 0a61c90d71357..20087c459a59c 100644 --- a/primitives/core/src/lib.rs +++ b/primitives/core/src/lib.rs @@ -66,6 +66,7 @@ mod hasher; pub mod offchain; pub mod sandbox; pub mod sr25519; +pub mod state_version; pub mod testing; #[cfg(feature = "std")] pub mod traits; diff --git a/primitives/core/src/state_version.rs b/primitives/core/src/state_version.rs new file mode 100644 index 0000000000000..5afd404e16ccc --- /dev/null +++ b/primitives/core/src/state_version.rs @@ -0,0 +1,54 @@ +// This file is part of Substrate. + +// Copyright (C) 2021-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Substrate state versioning core types. + +use codec::{Decode, Encode}; + +/// Default state version to use with a new substrate chain. +/// +/// When this value change, old chain will require to force their +/// initial state versionning in their chainspec for block 0. +/// Therefore defining genesis version in chainspec is good practice +/// and this default should mostly be use when testing. +pub const DEFAULT_STATE_VERSION: StateVersion = + StateVersion::V1 { threshold: crate::storage::TEST_DEFAULT_INLINE_VALUE_THESHOLD }; + +/// Supported version with substrate chain. +#[derive(Debug, Clone, Copy, PartialEq, Eq, Encode, Decode)] +#[cfg_attr(feature = "std", derive(serde::Serialize, serde::Deserialize))] +#[cfg_attr(feature = "std", derive(parity_util_mem::MallocSizeOf))] +pub enum StateVersion { + /// Patricia trie Radix 16 without extension node. + V0, + /// Patricia trie Radix 16 without extension node, + /// with inner hashing applied on value of size. + V1 { + /// Inner hashing apply only when the value + /// is equal to `threshold`. + /// Threashold should ALWAYS be bigger than + /// the hasher output size due to inline node + /// (with most hasher at least 33). + threshold: u32, + }, +} + +impl Default for StateVersion { + fn default() -> Self { + DEFAULT_STATE_VERSION + } +} diff --git a/primitives/io/src/lib.rs b/primitives/io/src/lib.rs index 5faeb59c72db6..bddebaa942610 100644 --- a/primitives/io/src/lib.rs +++ b/primitives/io/src/lib.rs @@ -51,11 +51,13 @@ use sp_core::{ offchain::{ HttpError, HttpRequestId, HttpRequestStatus, OpaqueNetworkState, StorageKind, Timestamp, }, - sr25519, LogLevel, LogLevelFilter, OpaquePeerId, H256, + sr25519, + state_version::StateVersion, + LogLevel, LogLevelFilter, OpaquePeerId, H256, }; #[cfg(feature = "std")] -use sp_trie::{trie_types::Layout, TrieConfiguration}; +use sp_trie::{Layout, TrieConfiguration}; use sp_runtime_interface::{ pass_by::{PassBy, PassByCodec}, @@ -396,22 +398,22 @@ pub trait DefaultChildStorage { pub trait Trie { /// A trie root formed from the iterated items. fn blake2_256_root(input: Vec<(Vec, Vec)>) -> H256 { - Layout::::trie_root(input) + Layout::::default().trie_root(input) } /// A trie root formed from the enumerated items. fn blake2_256_ordered_root(input: Vec>) -> H256 { - Layout::::ordered_trie_root(input) + Layout::::default().ordered_trie_root(input) } /// A trie root formed from the iterated items. fn keccak_256_root(input: Vec<(Vec, Vec)>) -> H256 { - Layout::::trie_root(input) + Layout::::default().trie_root(input) } /// A trie root formed from the enumerated items. fn keccak_256_ordered_root(input: Vec>) -> H256 { - Layout::::ordered_trie_root(input) + Layout::::default().ordered_trie_root(input) } /// Verify trie proof @@ -477,7 +479,7 @@ pub trait Misc { fn runtime_version(&mut self, wasm: &[u8]) -> Option> { use sp_core::traits::ReadRuntimeVersionExt; - let mut ext = sp_state_machine::BasicExternalities::default(); + let mut ext: sp_state_machine::BasicExternalities = StateVersion::default().into(); match self .extension::() @@ -1532,7 +1534,8 @@ mod tests { #[test] fn storage_works() { - let mut t = BasicExternalities::default(); + let state_version = StateVersion::default(); + let mut t: BasicExternalities = state_version.into(); t.execute_with(|| { assert_eq!(storage::get(b"hello"), None); storage::set(b"hello", b"world"); @@ -1541,24 +1544,35 @@ mod tests { storage::set(b"foo", &[1, 2, 3][..]); }); - t = BasicExternalities::new(Storage { - top: map![b"foo".to_vec() => b"bar".to_vec()], - children_default: map![], - }); + t = BasicExternalities::new( + Storage { top: map![b"foo".to_vec() => b"bar".to_vec()], children_default: map![] }, + state_version, + ); t.execute_with(|| { assert_eq!(storage::get(b"hello"), None); assert_eq!(storage::get(b"foo"), Some(b"bar".to_vec())); }); + + let value = vec![7u8; 35]; + let storage = + Storage { top: map![b"foo00".to_vec() => value.clone()], children_default: map![] }; + t = BasicExternalities::new(storage, state_version); + + t.execute_with(|| { + assert_eq!(storage::get(b"hello"), None); + assert_eq!(storage::get(b"foo00"), Some(value.clone())); + }); } #[test] fn read_storage_works() { + let state_version = StateVersion::default(); let value = b"\x0b\0\0\0Hello world".to_vec(); - let mut t = BasicExternalities::new(Storage { - top: map![b":test".to_vec() => value.clone()], - children_default: map![], - }); + let mut t = BasicExternalities::new( + Storage { top: map![b":test".to_vec() => value.clone()], children_default: map![] }, + state_version, + ); t.execute_with(|| { let mut v = [0u8; 4]; @@ -1572,15 +1586,19 @@ mod tests { #[test] fn clear_prefix_works() { - let mut t = BasicExternalities::new(Storage { - top: map![ - b":a".to_vec() => b"\x0b\0\0\0Hello world".to_vec(), - b":abcd".to_vec() => b"\x0b\0\0\0Hello world".to_vec(), - b":abc".to_vec() => b"\x0b\0\0\0Hello world".to_vec(), - b":abdd".to_vec() => b"\x0b\0\0\0Hello world".to_vec() - ], - children_default: map![], - }); + let state_version = StateVersion::default(); + let mut t = BasicExternalities::new( + Storage { + top: map![ + b":a".to_vec() => b"\x0b\0\0\0Hello world".to_vec(), + b":abcd".to_vec() => b"\x0b\0\0\0Hello world".to_vec(), + b":abc".to_vec() => b"\x0b\0\0\0Hello world".to_vec(), + b":abdd".to_vec() => b"\x0b\0\0\0Hello world".to_vec() + ], + children_default: map![], + }, + state_version, + ); t.execute_with(|| { assert!(matches!( @@ -1597,7 +1615,8 @@ mod tests { #[test] fn batch_verify_start_finish_works() { - let mut ext = BasicExternalities::default(); + let state_version = StateVersion::default(); + let mut ext: sp_state_machine::BasicExternalities = state_version.into(); ext.register_extension(TaskExecutorExt::new(TaskExecutor::new())); ext.execute_with(|| { @@ -1615,7 +1634,8 @@ mod tests { #[test] fn long_sr25519_batching() { - let mut ext = BasicExternalities::default(); + let state_version = StateVersion::default(); + let mut ext: sp_state_machine::BasicExternalities = state_version.into(); ext.register_extension(TaskExecutorExt::new(TaskExecutor::new())); ext.execute_with(|| { let pair = sr25519::Pair::generate_with_phrase(None).0; @@ -1642,7 +1662,8 @@ mod tests { #[test] fn batching_works() { - let mut ext = BasicExternalities::default(); + let state_version = StateVersion::default(); + let mut ext: sp_state_machine::BasicExternalities = state_version.into(); ext.register_extension(TaskExecutorExt::new(TaskExecutor::new())); ext.execute_with(|| { // invalid ed25519 signature diff --git a/primitives/runtime/src/lib.rs b/primitives/runtime/src/lib.rs index 4a9c6087fa5cc..ee7871342b975 100644 --- a/primitives/runtime/src/lib.rs +++ b/primitives/runtime/src/lib.rs @@ -57,6 +57,7 @@ mod multiaddress; pub mod offchain; pub mod runtime_logger; mod runtime_string; +pub mod state_version; #[cfg(feature = "std")] pub mod testing; pub mod traits; @@ -93,6 +94,8 @@ pub use sp_arithmetic::{ pub use either::Either; +pub use state_version::{StateVersion, StateVersions}; + /// An abstraction over justification for a block's validity under a consensus algorithm. /// /// Essentially a finality proof. The exact formulation will vary between consensus @@ -169,6 +172,8 @@ pub use serde::{de::DeserializeOwned, Deserialize, Serialize}; /// Complex storage builder stuff. #[cfg(feature = "std")] pub trait BuildStorage { + /// State version to use with storage. + fn state_version(&self) -> StateVersion; /// Build the storage out of this builder. fn build_storage(&self) -> Result { let mut storage = Default::default(); @@ -190,10 +195,14 @@ pub trait BuildModuleGenesisStorage: Sized { } #[cfg(feature = "std")] -impl BuildStorage for sp_core::storage::Storage { +impl BuildStorage for (sp_core::storage::Storage, StateVersion) { + fn state_version(&self) -> StateVersion { + self.1 + } + fn assimilate_storage(&self, storage: &mut sp_core::storage::Storage) -> Result<(), String> { - storage.top.extend(self.top.iter().map(|(k, v)| (k.clone(), v.clone()))); - for (k, other_map) in self.children_default.iter() { + storage.top.extend(self.0.top.iter().map(|(k, v)| (k.clone(), v.clone()))); + for (k, other_map) in self.0.children_default.iter() { let k = k.clone(); if let Some(map) = storage.children_default.get_mut(&k) { map.data.extend(other_map.data.iter().map(|(k, v)| (k.clone(), v.clone()))); @@ -210,6 +219,11 @@ impl BuildStorage for sp_core::storage::Storage { #[cfg(feature = "std")] impl BuildStorage for () { + fn state_version(&self) -> StateVersion { + // Warning just a stub implementation, should not be use. + StateVersion::default() + } + fn assimilate_storage(&self, _: &mut sp_core::storage::Storage) -> Result<(), String> { Err("`assimilate_storage` not implemented for `()`".into()) } @@ -992,7 +1006,7 @@ mod tests { #[test] #[should_panic(expected = "Signature verification has not been called")] fn batching_still_finishes_when_not_called_directly() { - let mut ext = sp_state_machine::BasicExternalities::default(); + let mut ext = sp_state_machine::BasicExternalities::new_empty(Default::default()); ext.register_extension(sp_core::traits::TaskExecutorExt::new( sp_core::testing::TaskExecutor::new(), )); @@ -1006,7 +1020,7 @@ mod tests { #[test] #[should_panic(expected = "Hey, I'm an error")] fn batching_does_not_panic_while_thread_is_already_panicking() { - let mut ext = sp_state_machine::BasicExternalities::default(); + let mut ext = sp_state_machine::BasicExternalities::new_empty(Default::default()); ext.register_extension(sp_core::traits::TaskExecutorExt::new( sp_core::testing::TaskExecutor::new(), )); diff --git a/primitives/runtime/src/state_version.rs b/primitives/runtime/src/state_version.rs new file mode 100644 index 0000000000000..2a4cd28a23b3f --- /dev/null +++ b/primitives/runtime/src/state_version.rs @@ -0,0 +1,103 @@ +// This file is part of Substrate. + +// Copyright (C) 2021-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Substrate state versioning and migrations related types. + +use crate::traits::{Block, NumberFor}; +use sp_arithmetic::traits::Zero; +pub use sp_core::state_version::{StateVersion, DEFAULT_STATE_VERSION}; +use sp_std::{str::FromStr, vec::Vec}; + +/// Multiple versions of state in use for a chain. +#[derive(Clone, crate::RuntimeDebug)] +pub struct StateVersions { + canonical_states: Vec<(NumberFor, StateVersion)>, +} + +impl Default for StateVersions { + fn default() -> Self { + StateVersions { canonical_states: Vec::new() } + } +} + +impl StateVersions { + /// Access genesis state version. + /// This uses default state if undefined. + pub fn genesis_state_version(&self) -> StateVersion { + if let Some((number, version)) = self.canonical_states.get(0) { + if number.is_zero() { + return *version + } + } + DEFAULT_STATE_VERSION + } + + /// Resolve state version for a given + /// block height. + pub fn state_version_at(&self, at: NumberFor) -> StateVersion { + let mut version = DEFAULT_STATE_VERSION; + for (number, state) in self.canonical_states.iter() { + if number > &at { + break + } + version = *state; + } + version + } + + /// Modify configuration, mostly for testing. + pub fn add(&mut self, (at, conf): (NumberFor, StateVersion)) { + let mut insert = Some(0); + let mut replace = None; + for (i, (number, _)) in self.canonical_states.iter().enumerate() { + if number == &at { + replace = Some(i); + break + } + if number > &at { + break + } + insert = Some(i + 1); + } + if let Some(i) = replace { + self.canonical_states[i] = (at, conf); + } else if let Some(i) = insert { + self.canonical_states.insert(i, (at, conf)); + } + } + + /// Convert from chainspec conf. + pub fn from_conf<'a, I>(conf: I) -> Option + where + I: IntoIterator, + { + let iter = conf.into_iter(); + let mut canonical_states = match iter.size_hint() { + (s, None) => Vec::with_capacity(s), + (_, Some(s)) => Vec::with_capacity(s), + }; + + for (number, version) in iter { + if let Ok(number) = NumberFor::::from_str(number) { + canonical_states.push((number.into(), version)); + } else { + return None + } + } + Some(StateVersions { canonical_states }) + } +} diff --git a/primitives/state-machine/src/backend.rs b/primitives/state-machine/src/backend.rs index 1b1a732f8d0fc..8aa1135722184 100644 --- a/primitives/state-machine/src/backend.rs +++ b/primitives/state-machine/src/backend.rs @@ -205,7 +205,7 @@ pub trait Backend: sp_std::fmt::Debug { } let (root, parent_txs) = self.storage_root( delta - .map(|(k, v)| (k, v.as_ref().map(|v| &v[..]))) + .map(|(k, v)| (&k[..], v.as_ref().map(|v| &v[..]))) .chain(child_roots.iter().map(|(k, v)| (&k[..], v.as_ref().map(|v| &v[..])))), ); txs.consolidate(parent_txs); @@ -266,6 +266,9 @@ pub trait Backend: sp_std::fmt::Debug { fn get_read_and_written_keys(&self) -> Vec<(Vec, u32, u32, bool)> { unimplemented!() } + + /// Get current state version in use. + fn state_version(&self) -> sp_core::state_version::StateVersion; } /// Trait that allows consolidate two transactions together. @@ -286,7 +289,11 @@ impl Consolidate for Vec<(Option, StorageCollection)> { } } -impl> Consolidate for sp_trie::GenericMemoryDB { +impl Consolidate for sp_trie::GenericMemoryDB +where + H: Hasher, + KF: sp_trie::KeyFunction, +{ fn consolidate(&mut self, other: Self) { sp_trie::GenericMemoryDB::consolidate(self, other) } diff --git a/primitives/state-machine/src/basic.rs b/primitives/state-machine/src/basic.rs index 0bbd2d0a8e8e6..65f1539dd9ee7 100644 --- a/primitives/state-machine/src/basic.rs +++ b/primitives/state-machine/src/basic.rs @@ -22,6 +22,7 @@ use codec::Encode; use hash_db::Hasher; use log::warn; use sp_core::{ + state_version::StateVersion, storage::{ well_known_keys::is_child_storage_key, ChildInfo, Storage, StorageChild, TrackedStorageKey, }, @@ -29,11 +30,10 @@ use sp_core::{ Blake2Hasher, }; use sp_externalities::{Extension, Extensions}; -use sp_trie::{empty_child_trie_root, trie_types::Layout, TrieConfiguration}; +use sp_trie::{empty_child_trie_root, Layout, TrieConfiguration}; use std::{ any::{Any, TypeId}, collections::BTreeMap, - iter::FromIterator, ops::Bound, }; @@ -42,17 +42,22 @@ use std::{ pub struct BasicExternalities { inner: Storage, extensions: Extensions, + state_version: Option, } impl BasicExternalities { /// Create a new instance of `BasicExternalities` - pub fn new(inner: Storage) -> Self { - BasicExternalities { inner, extensions: Default::default() } + pub fn new(inner: Storage, state_version: StateVersion) -> Self { + BasicExternalities { + inner, + extensions: Default::default(), + state_version: Some(state_version), + } } /// New basic externalities with empty storage. - pub fn new_empty() -> Self { - Self::new(Storage::default()) + pub fn new_empty(state_version: StateVersion) -> Self { + Self::new(Storage::default(), state_version) } /// Insert key/value @@ -68,9 +73,33 @@ impl BasicExternalities { /// Execute the given closure `f` with the externalities set and initialized with `storage`. /// /// Returns the result of the closure and updates `storage` with all changes. + /// + /// Do not support runtime transaction. TODO useless?? + pub fn execute_with_storage_and_state( + storage: &mut sp_core::storage::Storage, + state_version: StateVersion, + f: impl FnOnce() -> R, + ) -> R { + Self::execute_with_storage_inner(storage, Some(state_version), f) + } + + /// Execute the given closure `f` with the externalities set and initialized with `storage`. + /// + /// Returns the result of the closure and updates `storage` with all changes. + /// + /// Do not support runtime transaction and root calculation. + /// This limitation is fine for most genesis runtime storage initialization. pub fn execute_with_storage( storage: &mut sp_core::storage::Storage, f: impl FnOnce() -> R, + ) -> R { + Self::execute_with_storage_inner(storage, None, f) + } + + fn execute_with_storage_inner( + storage: &mut sp_core::storage::Storage, + state_version: Option, + f: impl FnOnce() -> R, ) -> R { let mut ext = Self { inner: Storage { @@ -78,6 +107,7 @@ impl BasicExternalities { children_default: std::mem::take(&mut storage.children_default), }, extensions: Default::default(), + state_version, }; let r = ext.execute_with(f); @@ -112,25 +142,18 @@ impl PartialEq for BasicExternalities { } } -impl FromIterator<(StorageKey, StorageValue)> for BasicExternalities { - fn from_iter>(iter: I) -> Self { - let mut t = Self::default(); - t.inner.top.extend(iter); - t +impl From for BasicExternalities { + fn from(state_version: StateVersion) -> Self { + Self::new(Default::default(), state_version) } } -impl Default for BasicExternalities { - fn default() -> Self { - Self::new(Default::default()) - } -} - -impl From> for BasicExternalities { - fn from(hashmap: BTreeMap) -> Self { +impl From<(BTreeMap, StateVersion)> for BasicExternalities { + fn from((hashmap, state_version): (BTreeMap, StateVersion)) -> Self { BasicExternalities { inner: Storage { top: hashmap, children_default: Default::default() }, extensions: Default::default(), + state_version: Some(state_version), } } } @@ -294,15 +317,25 @@ impl Externalities for BasicExternalities { } } - Layout::::trie_root(self.inner.top.clone()).as_ref().into() + let layout = match self + .state_version + .expect("Unsupported state calculation for genesis storage build.") + { + StateVersion::V0 => Layout::::default(), + StateVersion::V1 { threshold } => + Layout::::with_max_inline_value(threshold), + }; + layout.trie_root(self.inner.top.clone()).as_ref().into() } fn child_storage_root(&mut self, child_info: &ChildInfo) -> Vec { + let state_version = self + .state_version + .expect("Unsupported state calculation for genesis storage build."); if let Some(child) = self.inner.children_default.get(child_info.storage_key()) { let delta = child.data.iter().map(|(k, v)| (k.as_ref(), Some(v.as_ref()))); - crate::in_memory_backend::new_in_mem::() - .child_storage_root(&child.child_info, delta) - .0 + let in_mem = crate::in_memory_backend::new_in_mem::(state_version); + in_mem.child_storage_root(&child.child_info, delta).0 } else { empty_child_trie_root::>() } @@ -386,7 +419,7 @@ mod tests { #[test] fn commit_should_work() { - let mut ext = BasicExternalities::default(); + let mut ext: BasicExternalities = StateVersion::default().into(); ext.set_storage(b"doe".to_vec(), b"reindeer".to_vec()); ext.set_storage(b"dog".to_vec(), b"puppy".to_vec()); ext.set_storage(b"dogglesworth".to_vec(), b"cat".to_vec()); @@ -398,7 +431,7 @@ mod tests { #[test] fn set_and_retrieve_code() { - let mut ext = BasicExternalities::default(); + let mut ext: BasicExternalities = StateVersion::default().into(); let code = vec![1, 2, 3]; ext.set_storage(CODE.to_vec(), code.clone()); @@ -410,15 +443,18 @@ mod tests { fn children_works() { let child_info = ChildInfo::new_default(b"storage_key"); let child_info = &child_info; - let mut ext = BasicExternalities::new(Storage { - top: Default::default(), - children_default: map![ - child_info.storage_key().to_vec() => StorageChild { - data: map![ b"doe".to_vec() => b"reindeer".to_vec() ], - child_info: child_info.to_owned(), - } - ], - }); + let mut ext = BasicExternalities::new( + Storage { + top: Default::default(), + children_default: map![ + child_info.storage_key().to_vec() => StorageChild { + data: map![ b"doe".to_vec() => b"reindeer".to_vec() ], + child_info: child_info.to_owned(), + } + ], + }, + StateVersion::default(), + ); assert_eq!(ext.child_storage(child_info, b"doe"), Some(b"reindeer".to_vec())); @@ -436,19 +472,22 @@ mod tests { fn kill_child_storage_returns_num_elements_removed() { let child_info = ChildInfo::new_default(b"storage_key"); let child_info = &child_info; - let mut ext = BasicExternalities::new(Storage { - top: Default::default(), - children_default: map![ - child_info.storage_key().to_vec() => StorageChild { - data: map![ - b"doe".to_vec() => b"reindeer".to_vec(), - b"dog".to_vec() => b"puppy".to_vec(), - b"hello".to_vec() => b"world".to_vec(), - ], - child_info: child_info.to_owned(), - } - ], - }); + let mut ext = BasicExternalities::new( + Storage { + top: Default::default(), + children_default: map![ + child_info.storage_key().to_vec() => StorageChild { + data: map![ + b"doe".to_vec() => b"reindeer".to_vec(), + b"dog".to_vec() => b"puppy".to_vec(), + b"hello".to_vec() => b"world".to_vec(), + ], + child_info: child_info.to_owned(), + } + ], + }, + StateVersion::default(), + ); let res = ext.kill_child_storage(child_info, None); assert_eq!(res, (true, 3)); @@ -457,7 +496,7 @@ mod tests { #[test] fn basic_externalities_is_empty() { // Make sure no values are set by default in `BasicExternalities`. - let storage = BasicExternalities::new_empty().into_storages(); + let storage = BasicExternalities::new_empty(StateVersion::default()).into_storages(); assert!(storage.top.is_empty()); assert!(storage.children_default.is_empty()); } diff --git a/primitives/state-machine/src/changes_trie/build.rs b/primitives/state-machine/src/changes_trie/build.rs index d3c6c12122c4f..37f9d964a0ce3 100644 --- a/primitives/state-machine/src/changes_trie/build.rs +++ b/primitives/state-machine/src/changes_trie/build.rs @@ -382,17 +382,20 @@ mod test { ) { let child_info_1 = ChildInfo::new_default(b"storage_key1"); let child_info_2 = ChildInfo::new_default(b"storage_key2"); - let backend: InMemoryBackend<_> = vec![ - (vec![100], vec![255]), - (vec![101], vec![255]), - (vec![102], vec![255]), - (vec![103], vec![255]), - (vec![104], vec![255]), - (vec![105], vec![255]), - ] - .into_iter() - .collect::>() - .into(); + let backend: InMemoryBackend<_> = ( + vec![ + (vec![100], vec![255]), + (vec![101], vec![255]), + (vec![102], vec![255]), + (vec![103], vec![255]), + (vec![104], vec![255]), + (vec![105], vec![255]), + ] + .into_iter() + .collect::>(), + sp_core::state_version::StateVersion::V0, + ) + .into(); let prefixed_child_trie_key1 = child_info_1.prefixed_storage_key(); let storage = InMemoryStorage::with_inputs( vec![ diff --git a/primitives/state-machine/src/changes_trie/storage.rs b/primitives/state-machine/src/changes_trie/storage.rs index bd5e3a32b5657..6a81edc90ff11 100644 --- a/primitives/state-machine/src/changes_trie/storage.rs +++ b/primitives/state-machine/src/changes_trie/storage.rs @@ -191,7 +191,7 @@ impl Storage for InMemoryStorage Result, String> { - MemoryDB::::get(&self.data.read().mdb, key, prefix) + Ok( as hash_db::HashDBRef>::get(&self.data.read().mdb, key, prefix)) } } diff --git a/primitives/state-machine/src/ext.rs b/primitives/state-machine/src/ext.rs index c9693ca6a88c1..c07a942f685d8 100644 --- a/primitives/state-machine/src/ext.rs +++ b/primitives/state-machine/src/ext.rs @@ -951,6 +951,7 @@ mod tests { use num_traits::Zero; use sp_core::{ map, + state_version::StateVersion, storage::{well_known_keys::EXTRINSIC_INDEX, Storage, StorageChild}, Blake2Hasher, H256, }; @@ -977,7 +978,7 @@ mod tests { fn storage_changes_root_is_none_when_storage_is_not_provided() { let mut overlay = prepare_overlay_with_changes(); let mut cache = StorageTransactionCache::default(); - let backend = TestBackend::default(); + let backend: TestBackend = StateVersion::default().into(); let mut ext = TestExt::new(&mut overlay, &mut cache, &backend, None, None); assert_eq!(ext.storage_changes_root(&H256::default().encode()).unwrap(), None); } @@ -986,7 +987,7 @@ mod tests { fn storage_changes_root_is_none_when_state_is_not_provided() { let mut overlay = prepare_overlay_with_changes(); let mut cache = StorageTransactionCache::default(); - let backend = TestBackend::default(); + let backend: TestBackend = StateVersion::default().into(); let mut ext = TestExt::new(&mut overlay, &mut cache, &backend, None, None); assert_eq!(ext.storage_changes_root(&H256::default().encode()).unwrap(), None); } @@ -997,7 +998,7 @@ mod tests { let mut cache = StorageTransactionCache::default(); let storage = TestChangesTrieStorage::with_blocks(vec![(99, Default::default())]); let state = Some(ChangesTrieState::new(changes_trie_config(), Zero::zero(), &storage)); - let backend = TestBackend::default(); + let backend: TestBackend = StateVersion::default().into(); let mut ext = TestExt::new(&mut overlay, &mut cache, &backend, state, None); assert_eq!( ext.storage_changes_root(&H256::default().encode()).unwrap(), @@ -1013,7 +1014,7 @@ mod tests { overlay.set_storage(vec![1], None); let storage = TestChangesTrieStorage::with_blocks(vec![(99, Default::default())]); let state = Some(ChangesTrieState::new(changes_trie_config(), Zero::zero(), &storage)); - let backend = TestBackend::default(); + let backend: TestBackend = StateVersion::default().into(); let mut ext = TestExt::new(&mut overlay, &mut cache, &backend, state, None); assert_eq!( ext.storage_changes_root(&H256::default().encode()).unwrap(), @@ -1027,15 +1028,18 @@ mod tests { let mut overlay = OverlayedChanges::default(); overlay.set_storage(vec![20], None); overlay.set_storage(vec![30], Some(vec![31])); - let backend = Storage { - top: map![ - vec![10] => vec![10], - vec![20] => vec![20], - vec![40] => vec![40] - ], - children_default: map![], - } - .into(); + let backend = ( + Storage { + top: map![ + vec![10] => vec![10], + vec![20] => vec![20], + vec![40] => vec![40] + ], + children_default: map![], + }, + Default::default(), + ) + .into(); let ext = TestExt::new(&mut overlay, &mut cache, &backend, None, None); @@ -1073,13 +1077,16 @@ mod tests { overlay.set_storage(vec![27], None); overlay.set_storage(vec![28], None); overlay.set_storage(vec![29], None); - let backend = Storage { - top: map![ - vec![30] => vec![30] - ], - children_default: map![], - } - .into(); + let backend = ( + Storage { + top: map![ + vec![30] => vec![30] + ], + children_default: map![], + }, + Default::default(), + ) + .into(); let ext = TestExt::new(&mut overlay, &mut cache, &backend, None, None); @@ -1097,20 +1104,23 @@ mod tests { let mut overlay = OverlayedChanges::default(); overlay.set_child_storage(child_info, vec![20], None); overlay.set_child_storage(child_info, vec![30], Some(vec![31])); - let backend = Storage { - top: map![], - children_default: map![ - child_info.storage_key().to_vec() => StorageChild { - data: map![ - vec![10] => vec![10], - vec![20] => vec![20], - vec![40] => vec![40] - ], - child_info: child_info.to_owned(), - } - ], - } - .into(); + let backend = ( + Storage { + top: map![], + children_default: map![ + child_info.storage_key().to_vec() => StorageChild { + data: map![ + vec![10] => vec![10], + vec![20] => vec![20], + vec![40] => vec![40] + ], + child_info: child_info.to_owned(), + } + ], + }, + Default::default(), + ) + .into(); let ext = TestExt::new(&mut overlay, &mut cache, &backend, None, None); @@ -1142,20 +1152,23 @@ mod tests { let mut overlay = OverlayedChanges::default(); overlay.set_child_storage(child_info, vec![20], None); overlay.set_child_storage(child_info, vec![30], Some(vec![31])); - let backend = Storage { - top: map![], - children_default: map![ - child_info.storage_key().to_vec() => StorageChild { - data: map![ - vec![10] => vec![10], - vec![20] => vec![20], - vec![30] => vec![40] - ], - child_info: child_info.to_owned(), - } - ], - } - .into(); + let backend = ( + Storage { + top: map![], + children_default: map![ + child_info.storage_key().to_vec() => StorageChild { + data: map![ + vec![10] => vec![10], + vec![20] => vec![20], + vec![30] => vec![40] + ], + child_info: child_info.to_owned(), + } + ], + }, + Default::default(), + ) + .into(); let ext = TestExt::new(&mut overlay, &mut cache, &backend, None, None); @@ -1181,18 +1194,21 @@ mod tests { let child_info = &child_info; let mut cache = StorageTransactionCache::default(); let mut overlay = OverlayedChanges::default(); - let backend = Storage { - top: map![], - children_default: map![ - child_info.storage_key().to_vec() => StorageChild { - data: map![ - vec![30] => vec![40] - ], - child_info: child_info.to_owned(), - } - ], - } - .into(); + let backend = ( + Storage { + top: map![], + children_default: map![ + child_info.storage_key().to_vec() => StorageChild { + data: map![ + vec![30] => vec![40] + ], + child_info: child_info.to_owned(), + } + ], + }, + Default::default(), + ) + .into(); let ext = TestExt::new(&mut overlay, &mut cache, &backend, None, None); diff --git a/primitives/state-machine/src/in_memory_backend.rs b/primitives/state-machine/src/in_memory_backend.rs index f9f94c0c50d60..5768061abc7d7 100644 --- a/primitives/state-machine/src/in_memory_backend.rs +++ b/primitives/state-machine/src/in_memory_backend.rs @@ -22,17 +22,20 @@ use crate::{ }; use codec::Codec; use hash_db::Hasher; -use sp_core::storage::{ChildInfo, Storage}; +use sp_core::{ + state_version::StateVersion, + storage::{ChildInfo, Storage}, +}; use sp_trie::{empty_trie_root, Layout, MemoryDB}; use std::collections::{BTreeMap, HashMap}; /// Create a new empty instance of in-memory backend. -pub fn new_in_mem() -> TrieBackend, H> +pub fn new_in_mem(state_version: StateVersion) -> TrieBackend, H> where H::Out: Codec + Ord, { let db = MemoryDB::default(); - TrieBackend::new(db, empty_trie_root::>()) + TrieBackend::new(db, empty_trie_root::>(), state_version) } impl TrieBackend, H> @@ -69,14 +72,15 @@ where pub fn update_backend(&self, root: H::Out, changes: MemoryDB) -> Self { let mut clone = self.backend_storage().clone(); clone.consolidate(changes); - Self::new(clone, root) + Self::new(clone, root, self.state_version()) } /// Apply the given transaction to this backend and set the root to the given value. pub fn apply_transaction(&mut self, root: H::Out, transaction: MemoryDB) { + let state_version = self.state_version(); let mut storage = sp_std::mem::take(self).into_storage(); storage.consolidate(transaction); - *self = TrieBackend::new(storage, root); + *self = TrieBackend::new(storage, root, state_version); } /// Compare with another in-memory backend. @@ -90,7 +94,7 @@ where H::Out: Codec + Ord, { fn clone(&self) -> Self { - TrieBackend::new(self.backend_storage().clone(), self.root().clone()) + TrieBackend::new(self.backend_storage().clone(), self.root().clone(), self.state_version()) } } @@ -99,17 +103,31 @@ where H::Out: Codec + Ord, { fn default() -> Self { - new_in_mem() + new_in_mem(Default::default()) } } -impl From, BTreeMap>> +impl From for TrieBackend, H> +where + H::Out: Codec + Ord, +{ + fn from(state_version: StateVersion) -> Self { + new_in_mem(state_version) + } +} + +impl From<(HashMap, BTreeMap>, StateVersion)> for TrieBackend, H> where H::Out: Codec + Ord, { - fn from(inner: HashMap, BTreeMap>) -> Self { - let mut backend = new_in_mem(); + fn from( + (inner, state_version): ( + HashMap, BTreeMap>, + StateVersion, + ), + ) -> Self { + let mut backend = new_in_mem(state_version); backend.insert( inner .into_iter() @@ -119,37 +137,41 @@ where } } -impl From for TrieBackend, H> +impl From<(Storage, StateVersion)> for TrieBackend, H> where H::Out: Codec + Ord, { - fn from(inners: Storage) -> Self { + fn from((inners, state_version): (Storage, StateVersion)) -> Self { let mut inner: HashMap, BTreeMap> = inners .children_default .into_iter() .map(|(_k, c)| (Some(c.child_info), c.data)) .collect(); inner.insert(None, inners.top); - inner.into() + (inner, state_version).into() } } -impl From> for TrieBackend, H> +impl From<(BTreeMap, StateVersion)> + for TrieBackend, H> where H::Out: Codec + Ord, { - fn from(inner: BTreeMap) -> Self { + fn from((inner, state_version): (BTreeMap, StateVersion)) -> Self { let mut expanded = HashMap::new(); expanded.insert(None, inner); - expanded.into() + (expanded, state_version).into() } } -impl From, StorageCollection)>> for TrieBackend, H> +impl From<(Vec<(Option, StorageCollection)>, StateVersion)> + for TrieBackend, H> where H::Out: Codec + Ord, { - fn from(inner: Vec<(Option, StorageCollection)>) -> Self { + fn from( + (inner, state_version): (Vec<(Option, StorageCollection)>, StateVersion), + ) -> Self { let mut expanded: HashMap, BTreeMap> = HashMap::new(); for (child_info, key_values) in inner { @@ -160,7 +182,7 @@ where } } } - expanded.into() + (expanded, state_version).into() } } @@ -173,7 +195,7 @@ mod tests { /// Assert in memory backend with only child trie keys works as trie backend. #[test] fn in_memory_with_child_trie_only() { - let storage = new_in_mem::(); + let storage = new_in_mem::(StateVersion::default()); let child_info = ChildInfo::new_default(b"1"); let child_info = &child_info; let storage = storage @@ -186,7 +208,7 @@ mod tests { #[test] fn insert_multiple_times_child_data_works() { - let mut storage = new_in_mem::(); + let mut storage = new_in_mem::(StateVersion::default()); let child_info = ChildInfo::new_default(b"1"); storage diff --git a/primitives/state-machine/src/lib.rs b/primitives/state-machine/src/lib.rs index 032899faeb523..b3a9128a6a485 100644 --- a/primitives/state-machine/src/lib.rs +++ b/primitives/state-machine/src/lib.rs @@ -119,6 +119,7 @@ impl sp_std::fmt::Display for DefaultError { pub use crate::{ backend::Backend, + error::{Error, ExecutionError}, ext::Ext, overlayed_changes::{ ChildStorageCollection, IndexOperation, OffchainChangesCollection, @@ -129,7 +130,6 @@ pub use crate::{ trie_backend::TrieBackend, trie_backend_essence::{Storage, TrieBackendStorage}, }; -pub use error::{Error, ExecutionError}; #[cfg(not(feature = "std"))] mod changes_trie { @@ -153,7 +153,6 @@ mod std_reexport { InMemoryStorage as InMemoryChangesTrieStorage, RootsStorage as ChangesTrieRootsStorage, State as ChangesTrieState, Storage as ChangesTrieStorage, }, - error::{Error, ExecutionError}, in_memory_backend::new_in_mem, proving_backend::{ create_proof_check_backend, ProofRecorder, ProvingBackend, ProvingBackendRecorder, @@ -161,10 +160,7 @@ mod std_reexport { read_only::{InspectState, ReadOnlyExternalities}, testing::TestExternalities, }; - pub use sp_trie::{ - trie_types::{Layout, TrieDBMut}, - DBValue, MemoryDB, StorageProof, TrieMut, - }; + pub use sp_trie::{trie_types::TrieDBMut, DBValue, Layout, MemoryDB, StorageProof, TrieMut}; } #[cfg(feature = "std")] @@ -991,7 +987,8 @@ mod tests { use codec::{Decode, Encode}; use sp_core::{ map, - storage::ChildInfo, + state_version::StateVersion, + storage::{ChildInfo, TEST_DEFAULT_INLINE_VALUE_THESHOLD as TRESHOLD}, testing::TaskExecutor, traits::{CodeExecutor, Externalities, RuntimeCode}, NativeOrEncoded, NeverNativeValue, @@ -1062,7 +1059,11 @@ mod tests { #[test] fn execute_works() { - let backend = trie_backend::tests::test_trie(); + execute_works_inner(false); + execute_works_inner(true); + } + fn execute_works_inner(hashed: bool) { + let backend = trie_backend::tests::test_trie(hashed); let mut overlayed_changes = Default::default(); let wasm_code = RuntimeCode::empty(); @@ -1088,7 +1089,11 @@ mod tests { #[test] fn execute_works_with_native_else_wasm() { - let backend = trie_backend::tests::test_trie(); + execute_works_with_native_else_wasm_inner(false); + execute_works_with_native_else_wasm_inner(true); + } + fn execute_works_with_native_else_wasm_inner(hashed: bool) { + let backend = trie_backend::tests::test_trie(hashed); let mut overlayed_changes = Default::default(); let wasm_code = RuntimeCode::empty(); @@ -1114,8 +1119,12 @@ mod tests { #[test] fn dual_execution_strategy_detects_consensus_failure() { + dual_execution_strategy_detects_consensus_failure_inner(false); + dual_execution_strategy_detects_consensus_failure_inner(true); + } + fn dual_execution_strategy_detects_consensus_failure_inner(hashed: bool) { let mut consensus_failed = false; - let backend = trie_backend::tests::test_trie(); + let backend = trie_backend::tests::test_trie(hashed); let mut overlayed_changes = Default::default(); let wasm_code = RuntimeCode::empty(); @@ -1150,6 +1159,10 @@ mod tests { #[test] fn prove_execution_and_proof_check_works() { + prove_execution_and_proof_check_works_inner(true); + prove_execution_and_proof_check_works_inner(false); + } + fn prove_execution_and_proof_check_works_inner(flagged: bool) { let executor = DummyCodeExecutor { change_changes_trie_config: false, native_available: true, @@ -1158,7 +1171,7 @@ mod tests { }; // fetch execution proof from 'remote' full node - let mut remote_backend = trie_backend::tests::test_trie(); + let mut remote_backend = trie_backend::tests::test_trie(flagged); let remote_root = remote_backend.storage_root(std::iter::empty()).0; let (remote_result, remote_proof) = prove_execution::<_, _, u64, _, _>( &mut remote_backend, @@ -1197,7 +1210,7 @@ mod tests { b"abc".to_vec() => b"2".to_vec(), b"bbb".to_vec() => b"3".to_vec() ]; - let state = InMemoryBackend::::from(initial); + let state = InMemoryBackend::::from((initial, Default::default())); let backend = state.as_trie_backend().unwrap(); let mut overlay = OverlayedChanges::default(); @@ -1278,7 +1291,7 @@ mod tests { b"d".to_vec() => b"3".to_vec() ], ]; - let backend = InMemoryBackend::::from(initial); + let backend = InMemoryBackend::::from((initial, Default::default())); let mut overlay = OverlayedChanges::default(); overlay.set_child_storage(&child_info, b"1".to_vec(), Some(b"1312".to_vec())); @@ -1326,7 +1339,7 @@ mod tests { b"d".to_vec() => b"3".to_vec() ], ]; - let backend = InMemoryBackend::::from(initial); + let backend = InMemoryBackend::::from((initial, Default::default())); let mut overlay = OverlayedChanges::default(); let mut cache = StorageTransactionCache::default(); let mut ext = Ext::new( @@ -1350,7 +1363,7 @@ mod tests { fn set_child_storage_works() { let child_info = ChildInfo::new_default(b"sub1"); let child_info = &child_info; - let state = new_in_mem::(); + let state = new_in_mem::(Default::default()); let backend = state.as_trie_backend().unwrap(); let mut overlay = OverlayedChanges::default(); let mut cache = StorageTransactionCache::default(); @@ -1372,7 +1385,7 @@ mod tests { fn append_storage_works() { let reference_data = vec![b"data1".to_vec(), b"2".to_vec(), b"D3".to_vec(), b"d4".to_vec()]; let key = b"key".to_vec(); - let state = new_in_mem::(); + let state = new_in_mem::(Default::default()); let backend = state.as_trie_backend().unwrap(); let mut overlay = OverlayedChanges::default(); let mut cache = StorageTransactionCache::default(); @@ -1427,7 +1440,7 @@ mod tests { let key = b"events".to_vec(); let mut cache = StorageTransactionCache::default(); - let state = new_in_mem::(); + let state = new_in_mem::(Default::default()); let backend = state.as_trie_backend().unwrap(); let mut overlay = OverlayedChanges::default(); @@ -1514,12 +1527,16 @@ mod tests { #[test] fn prove_read_and_proof_check_works() { + prove_read_and_proof_check_works_inner(false); + prove_read_and_proof_check_works_inner(true); + } + fn prove_read_and_proof_check_works_inner(flagged: bool) { let child_info = ChildInfo::new_default(b"sub1"); let missing_child_info = ChildInfo::new_default(b"sub1sub2"); // key will include other child root to proof. let child_info = &child_info; let missing_child_info = &missing_child_info; // fetch read proof from 'remote' full node - let remote_backend = trie_backend::tests::test_trie(); + let remote_backend = trie_backend::tests::test_trie(flagged); let remote_root = remote_backend.storage_root(std::iter::empty()).0; let remote_proof = prove_read(remote_backend, &[b"value2"]).unwrap(); let remote_proof = test_compact(remote_proof, &remote_root); @@ -1537,7 +1554,7 @@ mod tests { ); assert_eq!(local_result2, false); // on child trie - let remote_backend = trie_backend::tests::test_trie(); + let remote_backend = trie_backend::tests::test_trie(flagged); let remote_root = remote_backend.storage_root(std::iter::empty()).0; let remote_proof = prove_child_read(remote_backend, child_info, &[b"value3"]).unwrap(); let remote_proof = test_compact(remote_proof, &remote_root); @@ -1664,7 +1681,8 @@ mod tests { #[test] fn prove_read_with_size_limit_works() { - let remote_backend = trie_backend::tests::test_trie(); + let hashed_value = false; + let remote_backend = trie_backend::tests::test_trie(hashed_value); let remote_root = remote_backend.storage_root(::std::iter::empty()).0; let (proof, count) = prove_range_read_with_size(remote_backend, None, None, 0, None).unwrap(); @@ -1672,7 +1690,7 @@ mod tests { assert_eq!(proof.into_memory_db::().drain().len(), 3); assert_eq!(count, 1); - let remote_backend = trie_backend::tests::test_trie(); + let remote_backend = trie_backend::tests::test_trie(hashed_value); let (proof, count) = prove_range_read_with_size(remote_backend, None, None, 800, Some(&[])).unwrap(); assert_eq!(proof.clone().into_memory_db::().drain().len(), 9); @@ -1695,7 +1713,7 @@ mod tests { assert_eq!(results.len() as u32, 101); assert_eq!(completed, false); - let remote_backend = trie_backend::tests::test_trie(); + let remote_backend = trie_backend::tests::test_trie(hashed_value); let (proof, count) = prove_range_read_with_size(remote_backend, None, None, 50000, Some(&[])).unwrap(); assert_eq!(proof.clone().into_memory_db::().drain().len(), 11); @@ -1713,22 +1731,96 @@ mod tests { assert_eq!(completed, true); } + #[test] + fn inner_state_hashing_switch_proofs() { + let mut layout = Layout::default(); + let (mut mdb, mut root) = trie_backend::tests::test_db(false); + { + let mut trie = + TrieDBMut::from_existing_with_layout(&mut mdb, &mut root, layout.clone()).unwrap(); + trie.insert(b"foo", vec![1u8; 1_000].as_slice()) // big inner hash + .expect("insert failed"); + trie.insert(b"foo2", vec![3u8; 16].as_slice()) // no inner hash + .expect("insert failed"); + trie.insert(b"foo222", vec![5u8; 100].as_slice()) // inner hash + .expect("insert failed"); + } + + let check_proof = |mdb, root, state_version| -> StorageProof { + let remote_backend = TrieBackend::new(mdb, root, state_version); + let remote_root = remote_backend.storage_root(::std::iter::empty()).0; + let remote_proof = prove_read(remote_backend, &[b"foo222"]).unwrap(); + // check proof locally + let local_result1 = + read_proof_check::(remote_root, remote_proof.clone(), &[b"foo222"]) + .unwrap(); + // check that results are correct + assert_eq!( + local_result1.into_iter().collect::>(), + vec![(b"foo222".to_vec(), Some(vec![5u8; 100]))], + ); + remote_proof + }; + + let remote_proof = check_proof(mdb.clone(), root.clone(), StateVersion::V0); + // check full values in proof + assert!(remote_proof.encode().len() > 1_100); + assert!(remote_proof.encoded_size() > 1_100); + let root1 = root.clone(); + + // do switch + layout = Layout::with_max_inline_value(TRESHOLD); + // update with same value do change + { + let mut trie = + TrieDBMut::from_existing_with_layout(&mut mdb, &mut root, layout.clone()).unwrap(); + // changed + trie.insert(b"foo222", vec![5u8; 100].as_slice()) // inner hash + .expect("insert failed"); + // unchange but should also update node. + trie.insert(b"foo", vec![1u8; 1_000].as_slice()) // big inner hash + .expect("insert failed"); + } + let root3 = root.clone(); + assert!(root1 != root3); + let remote_proof = + check_proof(mdb.clone(), root.clone(), StateVersion::V1 { threshold: TRESHOLD }); + // nodes foo is replaced by its hashed value form. + assert!(remote_proof.encode().len() < 1000); + assert!(remote_proof.encoded_size() < 1000); + assert_eq!(remote_proof.encode().len(), remote_proof.encoded_size()); + } + #[test] fn compact_multiple_child_trie() { + let size_inner_hash = compact_multiple_child_trie_inner(true); + let size_no_inner_hash = compact_multiple_child_trie_inner(false); + assert!(size_inner_hash < size_no_inner_hash); + } + fn compact_multiple_child_trie_inner(inner_hashed: bool) -> usize { // this root will be queried let child_info1 = ChildInfo::new_default(b"sub1"); // this root will not be include in proof let child_info2 = ChildInfo::new_default(b"sub2"); // this root will be include in proof let child_info3 = ChildInfo::new_default(b"sub"); - let remote_backend = trie_backend::tests::test_trie(); + let remote_backend = trie_backend::tests::test_trie(inner_hashed); + let long_vec: Vec = (0..1024usize).map(|_| 8u8).collect(); let (remote_root, transaction) = remote_backend.full_storage_root( std::iter::empty(), vec![ ( &child_info1, - vec![(&b"key1"[..], Some(&b"val2"[..])), (&b"key2"[..], Some(&b"val3"[..]))] - .into_iter(), + vec![ + // a inner hashable node + (&b"k"[..], Some(&long_vec[..])), + // need to ensure this is not an inline node + // otherwhise we do not know what is accessed when + // storing proof. + (&b"key1"[..], Some(&vec![5u8; 32][..])), + (&b"key2"[..], Some(&b"val3"[..])), + ] + .into_iter(), ), ( &child_info2, @@ -1743,10 +1835,12 @@ mod tests { ] .into_iter(), ); + let state_version = remote_backend.state_version(); let mut remote_storage = remote_backend.into_storage(); remote_storage.consolidate(transaction); - let remote_backend = TrieBackend::new(remote_storage, remote_root); + let remote_backend = TrieBackend::new(remote_storage, remote_root, state_version); let remote_proof = prove_child_read(remote_backend, &child_info1, &[b"key1"]).unwrap(); + let size = remote_proof.encoded_size(); let remote_proof = test_compact(remote_proof, &remote_root); let local_result1 = read_child_proof_check::( remote_root, @@ -1756,7 +1850,8 @@ mod tests { ) .unwrap(); assert_eq!(local_result1.len(), 1); - assert_eq!(local_result1.get(&b"key1"[..]), Some(&Some(b"val2".to_vec()))); + assert_eq!(local_result1.get(&b"key1"[..]), Some(&Some(vec![5u8; 32]))); + size } #[test] @@ -1768,7 +1863,7 @@ mod tests { let mut overlay = OverlayedChanges::default(); let mut transaction = { - let backend = test_trie(); + let backend = test_trie(false); let mut cache = StorageTransactionCache::default(); let mut ext = Ext::new( &mut overlay, @@ -1799,7 +1894,7 @@ mod tests { b"aaa".to_vec() => b"0".to_vec(), b"bbb".to_vec() => b"".to_vec() ]; - let state = InMemoryBackend::::from(initial); + let state = InMemoryBackend::::from((initial, Default::default())); let backend = state.as_trie_backend().unwrap(); let mut overlay = OverlayedChanges::default(); @@ -1836,7 +1931,7 @@ mod tests { struct DummyExt(u32); } - let backend = trie_backend::tests::test_trie(); + let backend = trie_backend::tests::test_trie(false); let mut overlayed_changes = Default::default(); let wasm_code = RuntimeCode::empty(); diff --git a/primitives/state-machine/src/overlayed_changes/mod.rs b/primitives/state-machine/src/overlayed_changes/mod.rs index a0558e06a380e..36a394a99d05a 100644 --- a/primitives/state-machine/src/overlayed_changes/mod.rs +++ b/primitives/state-machine/src/overlayed_changes/mod.rs @@ -931,7 +931,7 @@ mod tests { ] .into_iter() .collect(); - let backend = InMemoryBackend::::from(initial); + let backend = InMemoryBackend::::from((initial, Default::default())); let mut overlay = OverlayedChanges::default(); overlay.set_collect_extrinsics(false); diff --git a/primitives/state-machine/src/proving_backend.rs b/primitives/state-machine/src/proving_backend.rs index 690266dab1e72..e5a1865b4da62 100644 --- a/primitives/state-machine/src/proving_backend.rs +++ b/primitives/state-machine/src/proving_backend.rs @@ -26,14 +26,11 @@ use codec::{Codec, Decode, Encode}; use hash_db::{HashDB, Hasher, Prefix, EMPTY_PREFIX}; use log::debug; use parking_lot::RwLock; -use sp_core::storage::ChildInfo; +use sp_core::{state_version::StateVersion, storage::ChildInfo}; +pub use sp_trie::trie_types::TrieError; use sp_trie::{ empty_child_trie_root, read_child_trie_value_with, read_trie_value_with, record_all_keys, - MemoryDB, StorageProof, -}; -pub use sp_trie::{ - trie_types::{Layout, TrieError}, - Recorder, + Layout, MemoryDB, Recorder, StorageProof, }; use std::{ collections::{hash_map::Entry, HashMap}, @@ -117,6 +114,8 @@ struct ProofRecorderInner { records: HashMap>, /// The encoded size of all recorded values. encoded_size: usize, + /// State version in use. + state_version: StateVersion, } /// Global proof recorder, act as a layer over a hash db for recording queried data. @@ -125,9 +124,9 @@ pub struct ProofRecorder { inner: Arc>>, } -impl ProofRecorder { +impl ProofRecorder { /// Record the given `key` => `val` combination. - pub fn record(&self, key: Hash, val: Option) { + pub fn record(&self, key: Hash, val: Option) { let mut inner = self.inner.write(); let encoded_size = if let Entry::Vacant(entry) = inner.records.entry(key) { let encoded_size = val.as_ref().map(Encode::encoded_size).unwrap_or(0); @@ -152,20 +151,21 @@ impl ProofRecorder { /// encoded proof. pub fn estimate_encoded_size(&self) -> usize { let inner = self.inner.read(); - inner.encoded_size + codec::Compact(inner.records.len() as u32).encoded_size() + inner.encoded_size + + codec::Compact(inner.records.len() as u32).encoded_size() + + sp_trie::state_version_encoded_size(inner.state_version) } /// Convert into a [`StorageProof`]. - pub fn to_storage_proof(&self) -> StorageProof { - let trie_nodes = self - .inner - .read() + pub fn to_storage_proof(&self) -> StorageProof { + let inner = self.inner.read(); + let trie_nodes = inner .records .iter() .filter_map(|(_k, v)| v.as_ref().map(|v| v.to_vec())) .collect(); - StorageProof::new(trie_nodes) + StorageProof::new(trie_nodes, inner.state_version) } /// Reset the internal state. @@ -190,7 +190,7 @@ pub struct ProofRecorderBackend<'a, S: 'a + TrieBackendStorage, H: 'a + Hashe impl<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> ProvingBackend<'a, S, H> where - H::Out: Codec, + H::Out: Codec + Ord, { /// Create new proving backend. pub fn new(backend: &'a TrieBackend) -> Self { @@ -206,12 +206,12 @@ where let essence = backend.essence(); let root = essence.root().clone(); let recorder = ProofRecorderBackend { backend: essence.backend_storage(), proof_recorder }; - ProvingBackend(TrieBackend::new(recorder, root)) + ProvingBackend(TrieBackend::new(recorder, root, backend.state_version())) } /// Extracting the gathered unordered proof. pub fn extract_proof(&self) -> StorageProof { - self.0.essence().backend_storage().proof_recorder.to_storage_proof() + self.0.essence().backend_storage().proof_recorder.to_storage_proof::() } /// Returns the estimated encoded size of the proof. @@ -230,11 +230,11 @@ impl<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> TrieBackendStorage fn get(&self, key: &H::Out, prefix: Prefix) -> Result, String> { if let Some(v) = self.proof_recorder.get(key) { - return Ok(v) + return Ok(v); } let backend_value = self.backend.get(key, prefix)?; - self.proof_recorder.record(key.clone(), backend_value.clone()); + self.proof_recorder.record::(key.clone(), backend_value.clone()); Ok(backend_value) } } @@ -356,6 +356,10 @@ where fn usage_info(&self) -> crate::stats::UsageInfo { self.0.usage_info() } + + fn state_version(&self) -> StateVersion { + self.0.state_version() + } } /// Create proof check backend. @@ -367,10 +371,11 @@ where H: Hasher, H::Out: Codec, { + let state_version = proof.state_version(); let db = proof.into_memory_db(); if db.contains(&root, EMPTY_PREFIX) { - Ok(TrieBackend::new(db, root)) + Ok(TrieBackend::new(db, root, state_version)) } else { Err(Box::new(ExecutionError::InvalidProof)) } @@ -394,13 +399,21 @@ mod tests { #[test] fn proof_is_empty_until_value_is_read() { - let trie_backend = test_trie(); + proof_is_empty_until_value_is_read_inner(false); + proof_is_empty_until_value_is_read_inner(true); + } + fn proof_is_empty_until_value_is_read_inner(flagged: bool) { + let trie_backend = test_trie(flagged); assert!(test_proving(&trie_backend).extract_proof().is_empty()); } #[test] fn proof_is_non_empty_after_value_is_read() { - let trie_backend = test_trie(); + proof_is_non_empty_after_value_is_read_inner(false); + proof_is_non_empty_after_value_is_read_inner(true); + } + fn proof_is_non_empty_after_value_is_read_inner(flagged: bool) { + let trie_backend = test_trie(flagged); let backend = test_proving(&trie_backend); assert_eq!(backend.storage(b"key").unwrap(), Some(b"value".to_vec())); assert!(!backend.extract_proof().is_empty()); @@ -418,7 +431,11 @@ mod tests { #[test] fn passes_through_backend_calls() { - let trie_backend = test_trie(); + passes_through_backend_calls_inner(false); + passes_through_backend_calls_inner(true); + } + fn passes_through_backend_calls_inner(flagged: bool) { + let trie_backend = test_trie(flagged); let proving_backend = test_proving(&trie_backend); assert_eq!(trie_backend.storage(b"key").unwrap(), proving_backend.storage(b"key").unwrap()); assert_eq!(trie_backend.pairs(), proving_backend.pairs()); @@ -430,41 +447,80 @@ mod tests { } #[test] - fn proof_recorded_and_checked() { - let contents = (0..64).map(|i| (vec![i], Some(vec![i]))).collect::>(); - let in_memory = InMemoryBackend::::default(); - let in_memory = in_memory.update(vec![(None, contents)]); - let in_memory_root = in_memory.storage_root(::std::iter::empty()).0; - (0..64).for_each(|i| assert_eq!(in_memory.storage(&[i]).unwrap().unwrap(), vec![i])); + fn proof_recorded_and_checked_top() { + proof_recorded_and_checked_inner(true); + proof_recorded_and_checked_inner(false); + } + fn proof_recorded_and_checked_inner(flagged: bool) { + let size_content = 34; // above hashable value treshold. + let value_range = 0..64; + + let contents = vec![( + None, + value_range + .clone() + .map(|i| (vec![i], Some(vec![i; size_content]))) + .collect::>(), + )]; + let in_memory: InMemoryBackend = if flagged { + ( + contents, + StateVersion::V1 { + threshold: sp_core::storage::TEST_DEFAULT_INLINE_VALUE_THESHOLD, + }, + ) + .into() + } else { + (contents, StateVersion::V0).into() + }; + let in_memory_root = in_memory.storage_root(std::iter::empty()).0; + value_range.clone().for_each(|i| { + assert_eq!(in_memory.storage(&[i]).unwrap().unwrap(), vec![i; size_content]) + }); let trie = in_memory.as_trie_backend().unwrap(); - let trie_root = trie.storage_root(::std::iter::empty()).0; + let trie_root = trie.storage_root(std::iter::empty()).0; assert_eq!(in_memory_root, trie_root); - (0..64).for_each(|i| assert_eq!(trie.storage(&[i]).unwrap().unwrap(), vec![i])); + value_range + .clone() + .for_each(|i| assert_eq!(trie.storage(&[i]).unwrap().unwrap(), vec![i; size_content])); let proving = ProvingBackend::new(trie); - assert_eq!(proving.storage(&[42]).unwrap().unwrap(), vec![42]); + assert_eq!(proving.storage(&[42]).unwrap().unwrap(), vec![42; size_content]); let proof = proving.extract_proof(); let proof_check = create_proof_check_backend::(in_memory_root.into(), proof).unwrap(); - assert_eq!(proof_check.storage(&[42]).unwrap().unwrap(), vec![42]); + assert_eq!(proof_check.storage(&[42]).unwrap().unwrap(), vec![42; size_content]); } #[test] fn proof_recorded_and_checked_with_child() { + proof_recorded_and_checked_with_child_inner(false); + proof_recorded_and_checked_with_child_inner(true); + } + fn proof_recorded_and_checked_with_child_inner(flagged: bool) { let child_info_1 = ChildInfo::new_default(b"sub1"); let child_info_2 = ChildInfo::new_default(b"sub2"); let child_info_1 = &child_info_1; let child_info_2 = &child_info_2; let contents = vec![ - (None, (0..64).map(|i| (vec![i], Some(vec![i]))).collect()), + (None, (0..64).map(|i| (vec![i], Some(vec![i]))).collect::>()), (Some(child_info_1.clone()), (28..65).map(|i| (vec![i], Some(vec![i]))).collect()), (Some(child_info_2.clone()), (10..15).map(|i| (vec![i], Some(vec![i]))).collect()), ]; - let in_memory = InMemoryBackend::::default(); - let in_memory = in_memory.update(contents); + let in_memory: InMemoryBackend = if flagged { + ( + contents, + StateVersion::V1 { + threshold: sp_core::storage::TEST_DEFAULT_INLINE_VALUE_THESHOLD, + }, + ) + .into() + } else { + (contents, StateVersion::V0).into() + }; let child_storage_keys = vec![child_info_1.to_owned(), child_info_2.to_owned()]; let in_memory_root = in_memory .full_storage_root( @@ -509,7 +565,11 @@ mod tests { #[test] fn storage_proof_encoded_size_estimation_works() { - let trie_backend = test_trie(); + storage_proof_encoded_size_estimation_works_inner(false); + storage_proof_encoded_size_estimation_works_inner(true); + } + fn storage_proof_encoded_size_estimation_works_inner(flagged: bool) { + let trie_backend = test_trie(flagged); let backend = test_proving(&trie_backend); let check_estimation = diff --git a/primitives/state-machine/src/testing.rs b/primitives/state-machine/src/testing.rs index ec1772ba8666f..01ff9a23dc1fc 100644 --- a/primitives/state-machine/src/testing.rs +++ b/primitives/state-machine/src/testing.rs @@ -36,6 +36,7 @@ use codec::Decode; use hash_db::Hasher; use sp_core::{ offchain::testing::TestPersistentOffchainDB, + state_version::StateVersion, storage::{ well_known_keys::{is_child_storage_key, CHANGES_TRIE_CONFIG, CODE}, Storage, @@ -87,16 +88,31 @@ where /// Create a new instance of `TestExternalities` with storage. pub fn new(storage: Storage) -> Self { - Self::new_with_code(&[], storage) + Self::new_with_code_and_state(&[], storage, Default::default()) + } + + /// Create a new instance of `TestExternalities` with storage for a given state version. + pub fn new_with_state_version(storage: Storage, state_version: StateVersion) -> Self { + Self::new_with_code_and_state(&[], storage, state_version) } /// New empty test externalities. pub fn new_empty() -> Self { - Self::new_with_code(&[], Storage::default()) + Self::new_with_code_and_state(&[], Storage::default(), Default::default()) } /// Create a new instance of `TestExternalities` with code and storage. - pub fn new_with_code(code: &[u8], mut storage: Storage) -> Self { + pub fn new_with_code(code: &[u8], storage: Storage) -> Self { + Self::new_with_code_and_state(code, storage, Default::default()) + } + + /// Create a new instance of `TestExternalities` with code and storage for a given state + /// version. + pub fn new_with_code_and_state( + code: &[u8], + mut storage: Storage, + state_version: StateVersion, + ) -> Self { let mut overlay = OverlayedChanges::default(); let changes_trie_config = storage .top @@ -114,13 +130,15 @@ where let offchain_db = TestPersistentOffchainDB::new(); + let backend = (storage, state_version).into(); + TestExternalities { overlay, offchain_db, changes_trie_config, extensions, changes_trie_storage: ChangesTrieInMemoryStorage::new(), - backend: storage.into(), + backend, storage_transaction_cache: Default::default(), } } @@ -241,7 +259,8 @@ where H::Out: Ord + 'static + codec::Codec, { fn default() -> Self { - Self::new(Default::default()) + // default to default version. + Self::new_with_state_version(Storage::default(), Default::default()) } } @@ -250,7 +269,16 @@ where H::Out: Ord + 'static + codec::Codec, { fn from(storage: Storage) -> Self { - Self::new(storage) + Self::new_with_state_version(storage, Default::default()) + } +} + +impl From<(Storage, StateVersion)> for TestExternalities +where + H::Out: Ord + 'static + codec::Codec, +{ + fn from((storage, state_version): (Storage, StateVersion)) -> Self { + Self::new_with_state_version(storage, state_version) } } @@ -312,7 +340,8 @@ mod tests { #[test] fn commit_should_work() { - let mut ext = TestExternalities::::default(); + let storage = Storage::default(); // avoid adding the trie threshold. + let mut ext = TestExternalities::::from((storage, Default::default())); let mut ext = ext.ext(); ext.set_storage(b"doe".to_vec(), b"reindeer".to_vec()); ext.set_storage(b"dog".to_vec(), b"puppy".to_vec()); diff --git a/primitives/state-machine/src/trie_backend.rs b/primitives/state-machine/src/trie_backend.rs index 7cb725a80503d..18808ff552f71 100644 --- a/primitives/state-machine/src/trie_backend.rs +++ b/primitives/state-machine/src/trie_backend.rs @@ -24,17 +24,21 @@ use crate::{ }; use codec::{Codec, Decode}; use hash_db::Hasher; -use sp_core::storage::{ChildInfo, ChildType}; +use sp_core::{ + state_version::StateVersion, + storage::{ChildInfo, ChildType}, +}; use sp_std::{boxed::Box, vec::Vec}; use sp_trie::{ child_delta_trie_root, delta_trie_root, empty_child_trie_root, - trie_types::{Layout, TrieDB, TrieError}, - Trie, + trie_types::{TrieDB, TrieError}, + Layout, Trie, }; /// Patricia trie-based backend. Transaction type is an overlay of changes to commit. pub struct TrieBackend, H: Hasher> { pub(crate) essence: TrieBackendEssence, + state_version: StateVersion, } impl, H: Hasher> TrieBackend @@ -42,8 +46,8 @@ where H::Out: Codec, { /// Create new trie-based backend. - pub fn new(storage: S, root: H::Out) -> Self { - TrieBackend { essence: TrieBackendEssence::new(storage, root) } + pub fn new(storage: S, root: H::Out, state_version: StateVersion) -> Self { + TrieBackend { essence: TrieBackendEssence::new(storage, root), state_version } } /// Get backend essence reference. @@ -61,6 +65,11 @@ where self.essence.root() } + /// State version define for this backend. + pub fn state_version(&self) -> StateVersion { + self.state_version + } + /// Consumes self and returns underlying storage. pub fn into_storage(self) -> S { self.essence.into_storage() @@ -195,8 +204,16 @@ where { let mut eph = Ephemeral::new(self.essence.backend_storage(), &mut write_overlay); - - match delta_trie_root::, _, _, _, _, _>(&mut eph, root, delta) { + let res = || { + let layout = match self.state_version { + StateVersion::V0 => sp_trie::Layout::default(), + StateVersion::V1 { threshold } => + sp_trie::Layout::with_max_inline_value(threshold), + }; + delta_trie_root::, _, _, _, _, _>(&mut eph, root, delta, layout) + }; + + match res() { Ok(ret) => root = ret, Err(e) => warn!(target: "trie", "Failed to write to trie: {}", e), } @@ -216,6 +233,10 @@ where let default_root = match child_info.child_type() { ChildType::ParentKeyId => empty_child_trie_root::>(), }; + let layout = match self.state_version { + StateVersion::V0 => sp_trie::Layout::default(), + StateVersion::V1 { threshold } => sp_trie::Layout::with_max_inline_value(threshold), + }; let mut write_overlay = S::Overlay::default(); let prefixed_storage_key = child_info.prefixed_storage_key(); @@ -237,6 +258,7 @@ where &mut eph, root, delta, + layout, ) { Ok(ret) => root = ret, Err(e) => warn!(target: "trie", "Failed to write to trie: {}", e), @@ -261,20 +283,24 @@ where fn wipe(&self) -> Result<(), Self::Error> { Ok(()) } + + fn state_version(&self) -> StateVersion { + self.state_version.clone() + } } #[cfg(test)] pub mod tests { use super::*; use codec::Encode; - use sp_core::H256; + use sp_core::{storage::TEST_DEFAULT_INLINE_VALUE_THESHOLD as TRESHOLD, H256}; use sp_runtime::traits::BlakeTwo256; use sp_trie::{trie_types::TrieDBMut, KeySpacedDBMut, PrefixedMemoryDB, TrieMut}; use std::{collections::HashSet, iter}; const CHILD_KEY_1: &[u8] = b"sub1"; - fn test_db() -> (PrefixedMemoryDB, H256) { + pub(crate) fn test_db(hashed_value: bool) -> (PrefixedMemoryDB, H256) { let child_info = ChildInfo::new_default(CHILD_KEY_1); let mut root = H256::default(); let mut mdb = PrefixedMemoryDB::::default(); @@ -288,7 +314,13 @@ pub mod tests { { let mut sub_root = Vec::new(); root.encode_to(&mut sub_root); - let mut trie = TrieDBMut::new(&mut mdb, &mut root); + let mut trie = if hashed_value { + let layout = Layout::with_max_inline_value(TRESHOLD); + TrieDBMut::new_with_layout(&mut mdb, &mut root, layout) + } else { + TrieDBMut::new(&mut mdb, &mut root) + }; + trie.insert(child_info.prefixed_storage_key().as_slice(), &sub_root[..]) .expect("insert failed"); trie.insert(b"key", b"value").expect("insert failed"); @@ -302,19 +334,35 @@ pub mod tests { (mdb, root) } - pub(crate) fn test_trie() -> TrieBackend, BlakeTwo256> { - let (mdb, root) = test_db(); - TrieBackend::new(mdb, root) + pub(crate) fn test_trie( + hashed_value: bool, + ) -> TrieBackend, BlakeTwo256> { + let (mdb, root) = test_db(hashed_value); + let state_version = if hashed_value { + StateVersion::V1 { threshold: sp_core::storage::TEST_DEFAULT_INLINE_VALUE_THESHOLD } + } else { + StateVersion::V0 + }; + + TrieBackend::new(mdb, root, state_version) } #[test] fn read_from_storage_returns_some() { - assert_eq!(test_trie().storage(b"key").unwrap(), Some(b"value".to_vec())); + read_from_storage_returns_some_inner(false); + read_from_storage_returns_some_inner(true); + } + fn read_from_storage_returns_some_inner(flagged: bool) { + assert_eq!(test_trie(flagged).storage(b"key").unwrap(), Some(b"value".to_vec())); } #[test] fn read_from_child_storage_returns_some() { - let test_trie = test_trie(); + read_from_child_storage_returns_some_inner(false); + read_from_child_storage_returns_some_inner(true); + } + fn read_from_child_storage_returns_some_inner(flagged: bool) { + let test_trie = test_trie(flagged); assert_eq!( test_trie .child_storage(&ChildInfo::new_default(CHILD_KEY_1), b"value3") @@ -341,12 +389,20 @@ pub mod tests { #[test] fn read_from_storage_returns_none() { - assert_eq!(test_trie().storage(b"non-existing-key").unwrap(), None); + read_from_storage_returns_none_inner(false); + read_from_storage_returns_none_inner(true); + } + fn read_from_storage_returns_none_inner(flagged: bool) { + assert_eq!(test_trie(flagged).storage(b"non-existing-key").unwrap(), None); } #[test] fn pairs_are_not_empty_on_non_empty_storage() { - assert!(!test_trie().pairs().is_empty()); + pairs_are_not_empty_on_non_empty_storage_inner(false); + pairs_are_not_empty_on_non_empty_storage_inner(true); + } + fn pairs_are_not_empty_on_non_empty_storage_inner(flagged: bool) { + assert!(!test_trie(flagged).pairs().is_empty()); } #[test] @@ -354,6 +410,7 @@ pub mod tests { assert!(TrieBackend::, BlakeTwo256>::new( PrefixedMemoryDB::default(), Default::default(), + Default::default(), ) .pairs() .is_empty()); @@ -361,25 +418,32 @@ pub mod tests { #[test] fn storage_root_is_non_default() { - assert!(test_trie().storage_root(iter::empty()).0 != H256::repeat_byte(0)); + storage_root_is_non_default_inner(false); + storage_root_is_non_default_inner(true); } - - #[test] - fn storage_root_transaction_is_empty() { - assert!(test_trie().storage_root(iter::empty()).1.drain().is_empty()); + fn storage_root_is_non_default_inner(flagged: bool) { + assert!(test_trie(flagged).storage_root(iter::empty()).0 != H256::repeat_byte(0)); } #[test] fn storage_root_transaction_is_non_empty() { + storage_root_transaction_is_non_empty_inner(false); + storage_root_transaction_is_non_empty_inner(true); + } + fn storage_root_transaction_is_non_empty_inner(flagged: bool) { let (new_root, mut tx) = - test_trie().storage_root(iter::once((&b"new-key"[..], Some(&b"new-value"[..])))); + test_trie(flagged).storage_root(iter::once((&b"new-key"[..], Some(&b"new-value"[..])))); assert!(!tx.drain().is_empty()); - assert!(new_root != test_trie().storage_root(iter::empty()).0); + assert!(new_root != test_trie(false).storage_root(iter::empty()).0); } #[test] fn prefix_walking_works() { - let trie = test_trie(); + prefix_walking_works_inner(false); + prefix_walking_works_inner(true); + } + fn prefix_walking_works_inner(flagged: bool) { + let trie = test_trie(flagged); let mut seen = HashSet::new(); trie.for_keys_with_prefix(b"value", |key| { diff --git a/primitives/state-machine/src/trie_backend_essence.rs b/primitives/state-machine/src/trie_backend_essence.rs index 557a098fbaf79..1dd8b80792d0f 100644 --- a/primitives/state-machine/src/trie_backend_essence.rs +++ b/primitives/state-machine/src/trie_backend_essence.rs @@ -20,15 +20,15 @@ use crate::{backend::Consolidate, debug, warn, StorageKey, StorageValue}; use codec::Encode; -use hash_db::{self, Hasher, Prefix}; +use hash_db::{self, AsHashDB, HashDB, HashDBRef, Hasher, Prefix}; #[cfg(feature = "std")] use parking_lot::RwLock; use sp_core::storage::ChildInfo; use sp_std::{boxed::Box, ops::Deref, vec::Vec}; use sp_trie::{ empty_child_trie_root, read_child_trie_value, read_trie_value, - trie_types::{Layout, TrieDB, TrieError}, - DBValue, KeySpacedDB, MemoryDB, PrefixedMemoryDB, Trie, TrieDBIterator, + trie_types::{TrieDB, TrieError}, + DBValue, KeySpacedDB, Layout, PrefixedMemoryDB, Trie, TrieDBIterator, TrieDBKeyIterator, }; #[cfg(feature = "std")] use std::collections::HashMap; @@ -175,7 +175,7 @@ where child_info: Option<&ChildInfo>, key: &[u8], ) -> Result> { - let dyn_eph: &dyn hash_db::HashDBRef<_, _>; + let dyn_eph: &dyn HashDBRef<_, _>; let keyspace_eph; if let Some(child_info) = child_info.as_ref() { keyspace_eph = KeySpacedDB::new(self, child_info.keyspace()); @@ -186,7 +186,7 @@ where let trie = TrieDB::::new(dyn_eph, root).map_err(|e| format!("TrieDB creation error: {}", e))?; - let mut iter = trie.iter().map_err(|e| format!("TrieDB iteration error: {}", e))?; + let mut iter = trie.key_iter().map_err(|e| format!("TrieDB iteration error: {}", e))?; // The key just after the one given in input, basically `key++0`. // Note: We are sure this is the next key if: @@ -202,7 +202,7 @@ where let next_element = iter.next(); let next_key = if let Some(next_element) = next_element { - let (next_key, _) = + let next_key = next_element.map_err(|e| format!("TrieDB iterator next error: {}", e))?; Some(next_key) } else { @@ -288,17 +288,7 @@ where &self.root }; - let _ = self.trie_iter_inner( - root, - prefix, - |k, _v| { - f(&k); - true - }, - child_info, - None, - false, - ); + self.trie_iter_key_inner(root, prefix, |k| f(k), child_info) } /// Execute given closure for all keys starting with prefix. @@ -317,32 +307,70 @@ where }; let mut root = H::Out::default(); root.as_mut().copy_from_slice(&root_vec); - let _ = self.trie_iter_inner( + self.trie_iter_key_inner( &root, Some(prefix), - |k, _v| { - f(&k); + |k| { + f(k); true }, Some(child_info), - None, - false, - ); + ) } /// Execute given closure for all keys starting with prefix. pub fn for_keys_with_prefix(&self, prefix: &[u8], mut f: F) { - let _ = self.trie_iter_inner( + self.trie_iter_key_inner( &self.root, Some(prefix), - |k, _v| { - f(&k); + |k| { + f(k); true }, None, - None, - false, - ); + ) + } + + fn trie_iter_key_inner bool>( + &self, + root: &H::Out, + prefix: Option<&[u8]>, + mut f: F, + child_info: Option<&ChildInfo>, + ) { + let mut iter = move |db| -> sp_std::result::Result<(), Box>> { + let trie = TrieDB::::new(db, root)?; + let iter = if let Some(prefix) = prefix.as_ref() { + TrieDBKeyIterator::new_prefixed(&trie, prefix)? + } else { + TrieDBKeyIterator::new(&trie)? + }; + + for x in iter { + let key = x?; + + debug_assert!(prefix + .as_ref() + .map(|prefix| key.starts_with(prefix)) + .unwrap_or(true)); + + if !f(&key) { + break + } + } + + Ok(()) + }; + + let result = if let Some(child_info) = child_info { + let db = KeySpacedDB::new(self, child_info.keyspace()); + iter(&db) + } else { + iter(self) + }; + if let Err(e) = result { + debug!(target: "trie", "Error while iterating by prefix: {}", e); + } } fn trie_iter_inner, Vec) -> bool>( @@ -411,13 +439,13 @@ pub(crate) struct Ephemeral<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> { overlay: &'a mut S::Overlay, } -impl<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> hash_db::AsHashDB +impl<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> AsHashDB for Ephemeral<'a, S, H> { - fn as_hash_db<'b>(&'b self) -> &'b (dyn hash_db::HashDB + 'b) { + fn as_hash_db<'b>(&'b self) -> &'b (dyn HashDB + 'b) { self } - fn as_hash_db_mut<'b>(&'b mut self) -> &'b mut (dyn hash_db::HashDB + 'b) { + fn as_hash_db_mut<'b>(&'b mut self) -> &'b mut (dyn HashDB + 'b) { self } } @@ -432,7 +460,7 @@ impl<'a, S: 'a + TrieBackendStorage, H: Hasher> hash_db::HashDB for Ephemeral<'a, S, H> { fn get(&self, key: &H::Out, prefix: Prefix) -> Option { - if let Some(val) = hash_db::HashDB::get(self.overlay, key, prefix) { + if let Some(val) = HashDB::get(self.overlay, key, prefix) { Some(val) } else { match self.storage.get(&key, prefix) { @@ -446,38 +474,37 @@ impl<'a, S: 'a + TrieBackendStorage, H: Hasher> hash_db::HashDB } fn contains(&self, key: &H::Out, prefix: Prefix) -> bool { - hash_db::HashDB::get(self, key, prefix).is_some() + HashDB::get(self, key, prefix).is_some() } fn insert(&mut self, prefix: Prefix, value: &[u8]) -> H::Out { - hash_db::HashDB::insert(self.overlay, prefix, value) + HashDB::insert(self.overlay, prefix, value) } fn emplace(&mut self, key: H::Out, prefix: Prefix, value: DBValue) { - hash_db::HashDB::emplace(self.overlay, key, prefix, value) + HashDB::emplace(self.overlay, key, prefix, value) } fn remove(&mut self, key: &H::Out, prefix: Prefix) { - hash_db::HashDB::remove(self.overlay, key, prefix) + HashDB::remove(self.overlay, key, prefix) } } -impl<'a, S: 'a + TrieBackendStorage, H: Hasher> hash_db::HashDBRef - for Ephemeral<'a, S, H> -{ +impl<'a, S: 'a + TrieBackendStorage, H: Hasher> HashDBRef for Ephemeral<'a, S, H> { fn get(&self, key: &H::Out, prefix: Prefix) -> Option { - hash_db::HashDB::get(self, key, prefix) + HashDB::get(self, key, prefix) } fn contains(&self, key: &H::Out, prefix: Prefix) -> bool { - hash_db::HashDB::contains(self, key, prefix) + HashDB::contains(self, key, prefix) } } /// Key-value pairs storage that is used by trie backend essence. pub trait TrieBackendStorage: Send + Sync { /// Type of in-memory overlay. - type Overlay: hash_db::HashDB + Default + Consolidate; + type Overlay: HashDB + Default + Consolidate; + /// Get the value stored at key. fn get(&self, key: &H::Out, prefix: Prefix) -> Result>; } @@ -492,35 +519,28 @@ impl TrieBackendStorage for Arc> { } } -// This implementation is used by test storage trie clients. -impl TrieBackendStorage for PrefixedMemoryDB { - type Overlay = PrefixedMemoryDB; - - fn get(&self, key: &H::Out, prefix: Prefix) -> Result> { - Ok(hash_db::HashDB::get(self, key, prefix)) - } -} - -impl TrieBackendStorage for MemoryDB { - type Overlay = MemoryDB; +impl TrieBackendStorage for sp_trie::GenericMemoryDB +where + H: Hasher, + KF: sp_trie::KeyFunction + Send + Sync, +{ + type Overlay = Self; fn get(&self, key: &H::Out, prefix: Prefix) -> Result> { Ok(hash_db::HashDB::get(self, key, prefix)) } } -impl, H: Hasher> hash_db::AsHashDB - for TrieBackendEssence -{ - fn as_hash_db<'b>(&'b self) -> &'b (dyn hash_db::HashDB + 'b) { +impl, H: Hasher> AsHashDB for TrieBackendEssence { + fn as_hash_db<'b>(&'b self) -> &'b (dyn HashDB + 'b) { self } - fn as_hash_db_mut<'b>(&'b mut self) -> &'b mut (dyn hash_db::HashDB + 'b) { + fn as_hash_db_mut<'b>(&'b mut self) -> &'b mut (dyn HashDB + 'b) { self } } -impl, H: Hasher> hash_db::HashDB for TrieBackendEssence { +impl, H: Hasher> HashDB for TrieBackendEssence { fn get(&self, key: &H::Out, prefix: Prefix) -> Option { if *key == self.empty { return Some([0u8].to_vec()) @@ -535,7 +555,7 @@ impl, H: Hasher> hash_db::HashDB for TrieBa } fn contains(&self, key: &H::Out, prefix: Prefix) -> bool { - hash_db::HashDB::get(self, key, prefix).is_some() + HashDB::get(self, key, prefix).is_some() } fn insert(&mut self, _prefix: Prefix, _value: &[u8]) -> H::Out { @@ -551,15 +571,13 @@ impl, H: Hasher> hash_db::HashDB for TrieBa } } -impl, H: Hasher> hash_db::HashDBRef - for TrieBackendEssence -{ +impl, H: Hasher> HashDBRef for TrieBackendEssence { fn get(&self, key: &H::Out, prefix: Prefix) -> Option { - hash_db::HashDB::get(self, key, prefix) + HashDB::get(self, key, prefix) } fn contains(&self, key: &H::Out, prefix: Prefix) -> bool { - hash_db::HashDB::contains(self, key, prefix) + HashDB::contains(self, key, prefix) } } diff --git a/primitives/storage/src/lib.rs b/primitives/storage/src/lib.rs index 45474a44693ab..3b20797d6dc0b 100644 --- a/primitives/storage/src/lib.rs +++ b/primitives/storage/src/lib.rs @@ -232,6 +232,20 @@ pub mod well_known_keys { } } +/// Configuration value for a given threshold. +pub fn trie_threshold_encode(threshold: u32) -> Vec { + codec::Compact(threshold).encode() +} + +/// Configuration threshold from encoded, invalid encoded +/// is same as no threshold. +pub fn trie_threshold_decode(mut encoded: &[u8]) -> Option { + codec::Compact::::decode(&mut encoded).ok().map(|compact| compact.0) +} + +/// Default value to use as a threshold for testing. +pub const TEST_DEFAULT_INLINE_VALUE_THESHOLD: u32 = 33; + /// Information related to a child state. #[derive(Debug, Clone)] #[cfg_attr(feature = "std", derive(PartialEq, Eq, Hash, PartialOrd, Ord))] diff --git a/primitives/trie/src/lib.rs b/primitives/trie/src/lib.rs index 8ba13284d379f..473cbc2337492 100644 --- a/primitives/trie/src/lib.rs +++ b/primitives/trie/src/lib.rs @@ -36,8 +36,8 @@ pub use memory_db::prefixed_key; pub use memory_db::KeyFunction; /// The Substrate format implementation of `NodeCodec`. pub use node_codec::NodeCodec; -use sp_std::{borrow::Borrow, boxed::Box, marker::PhantomData, vec::Vec}; -pub use storage_proof::{CompactProof, StorageProof}; +use sp_std::{borrow::Borrow, boxed::Box, fmt, marker::PhantomData, vec::Vec}; +pub use storage_proof::{CompactProof, StorageProof, state_version_encoded_size}; /// Trie codec reexport, mainly child trie support /// for trie compact proof. pub use trie_codec::{decode_compact, encode_compact, Error as CompactProofError}; @@ -45,40 +45,80 @@ pub use trie_db::proof::VerifyError; use trie_db::proof::{generate_proof, verify_proof}; /// Various re-exports from the `trie-db` crate. pub use trie_db::{ - nibble_ops, CError, DBValue, Query, Recorder, Trie, TrieConfiguration, TrieDBIterator, + nibble_ops, + node::{NodePlan, ValuePlan}, + CError, DBValue, Query, Recorder, Trie, TrieConfiguration, TrieDBIterator, TrieDBKeyIterator, TrieLayout, TrieMut, }; /// The Substrate format implementation of `TrieStream`. pub use trie_stream::TrieStream; -#[derive(Default)] /// substrate trie layout -pub struct Layout(sp_std::marker::PhantomData); +pub struct Layout(Option, sp_std::marker::PhantomData); -impl TrieLayout for Layout { +impl fmt::Debug for Layout { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("Layout").finish() + } +} + +impl Clone for Layout { + fn clone(&self) -> Self { + Layout(self.0, sp_std::marker::PhantomData) + } +} + +impl Default for Layout { + fn default() -> Self { + Layout(None, sp_std::marker::PhantomData) + } +} + +impl Layout { + /// Layout with inner hash value size limit active. + pub fn with_max_inline_value(threshold: u32) -> Self { + Layout(Some(threshold), sp_std::marker::PhantomData) + } +} + +impl TrieLayout for Layout +where + H: Hasher, +{ const USE_EXTENSION: bool = false; const ALLOW_EMPTY: bool = true; + type Hash = H; type Codec = NodeCodec; + + fn max_inline_value(&self) -> Option { + self.0 + } } -impl TrieConfiguration for Layout { - fn trie_root(input: I) -> ::Out +impl TrieConfiguration for Layout +where + H: Hasher, +{ + fn trie_root(&self, input: I) -> ::Out where I: IntoIterator, A: AsRef<[u8]> + Ord, B: AsRef<[u8]>, { - trie_root::trie_root_no_extension::(input) + trie_root::trie_root_no_extension::(input, self.max_inline_value()) } - fn trie_root_unhashed(input: I) -> Vec + fn trie_root_unhashed(&self, input: I) -> Vec where I: IntoIterator, A: AsRef<[u8]> + Ord, B: AsRef<[u8]>, { - trie_root::unhashed_trie_no_extension::(input) + trie_root::unhashed_trie_no_extension::( + input, + self.max_inline_value(), + ) } fn encode_index(input: u32) -> Vec { @@ -118,7 +158,6 @@ pub type TrieDBMut<'a, L> = trie_db::TrieDBMut<'a, L>; pub type Lookup<'a, L, Q> = trie_db::Lookup<'a, L, Q>; /// Hash type for a trie layout. pub type TrieHash = <::Hash as Hasher>::Out; - /// This module is for non generic definition of trie type. /// Only the `Hasher` trait is generic in this case. pub mod trie_types { @@ -141,16 +180,18 @@ pub mod trie_types { /// For a key `K` that is included in the `db` a proof of inclusion is generated. /// For a key `K` that is not included in the `db` a proof of non-inclusion is generated. /// These can be later checked in `verify_trie_proof`. -pub fn generate_trie_proof<'a, L: TrieConfiguration, I, K, DB>( +pub fn generate_trie_proof<'a, L, I, K, DB>( db: &DB, root: TrieHash, keys: I, ) -> Result>, Box>> where + L: TrieConfiguration, I: IntoIterator, K: 'a + AsRef<[u8]>, DB: hash_db::HashDBRef, { + // Can use default layout (read only). let trie = TrieDB::::new(db, &root)?; generate_proof(&trie, keys) } @@ -163,12 +204,13 @@ where /// checked for inclusion in the proof. /// If the value is omitted (`(key, None)`), this key will be checked for non-inclusion in the /// proof. -pub fn verify_trie_proof<'a, L: TrieConfiguration, I, K, V>( +pub fn verify_trie_proof<'a, L, I, K, V>( root: &TrieHash, proof: &[Vec], items: I, ) -> Result<(), VerifyError, error::Error>> where + L: TrieConfiguration, I: IntoIterator)>, K: 'a + AsRef<[u8]>, V: 'a + AsRef<[u8]>, @@ -181,6 +223,7 @@ pub fn delta_trie_root( db: &mut DB, mut root: TrieHash, delta: I, + layout: L, ) -> Result, Box>> where I: IntoIterator, @@ -190,7 +233,7 @@ where DB: hash_db::HashDB, { { - let mut trie = TrieDBMut::::from_existing(db, &mut root)?; + let mut trie = TrieDBMut::::from_existing_with_layout(db, &mut root, layout)?; let mut delta = delta.into_iter().collect::>(); delta.sort_by(|l, r| l.0.borrow().cmp(r.0.borrow())); @@ -207,49 +250,57 @@ where } /// Read a value from the trie. -pub fn read_trie_value>( +pub fn read_trie_value( db: &DB, root: &TrieHash, key: &[u8], -) -> Result>, Box>> { - TrieDB::::new(&*db, root)?.get(key).map(|x| x.map(|val| val.to_vec())) +) -> Result>, Box>> +where + L: TrieConfiguration, + DB: hash_db::HashDBRef, +{ + Ok(TrieDB::::new(&*db, root)?.get(key).map(|x| x.map(|val| val.to_vec()))?) } /// Read a value from the trie with given Query. -pub fn read_trie_value_with< - L: TrieConfiguration, - Q: Query, - DB: hash_db::HashDBRef, ->( +pub fn read_trie_value_with( db: &DB, root: &TrieHash, key: &[u8], query: Q, -) -> Result>, Box>> { - TrieDB::::new(&*db, root)? +) -> Result>, Box>> +where + L: TrieConfiguration, + Q: Query, + DB: hash_db::HashDBRef, +{ + Ok(TrieDB::::new(&*db, root)? .get_with(key, query) - .map(|x| x.map(|val| val.to_vec())) + .map(|x| x.map(|val| val.to_vec()))?) } /// Determine the empty trie root. pub fn empty_trie_root() -> ::Out { - L::trie_root::<_, Vec, Vec>(core::iter::empty()) + L::default().trie_root::<_, Vec, Vec>(core::iter::empty()) } /// Determine the empty child trie root. pub fn empty_child_trie_root() -> ::Out { - L::trie_root::<_, Vec, Vec>(core::iter::empty()) + L::default().trie_root::<_, Vec, Vec>(core::iter::empty()) } /// Determine a child trie root given its ordered contents, closed form. H is the default hasher, /// but a generic implementation may ignore this type parameter and use other hashers. -pub fn child_trie_root(input: I) -> ::Out +pub fn child_trie_root( + layout: &L, + input: I, +) -> ::Out where I: IntoIterator, A: AsRef<[u8]> + Ord, B: AsRef<[u8]>, { - L::trie_root(input) + layout.trie_root(input) } /// Determine a child trie root given a hash DB and delta values. H is the default hasher, @@ -259,6 +310,7 @@ pub fn child_delta_trie_root( db: &mut DB, root_data: RD, delta: I, + layout: L, ) -> Result<::Out, Box>> where I: IntoIterator, @@ -273,7 +325,7 @@ where root.as_mut().copy_from_slice(root_data.as_ref()); let mut db = KeySpacedDBMut::new(&mut *db, keyspace); - delta_trie_root::(&mut db, root, delta) + delta_trie_root::(&mut db, root, delta, layout) } /// Record all keys for a given root. @@ -319,7 +371,7 @@ where } /// Read a value from the child trie with given query. -pub fn read_child_trie_value_with, DB>( +pub fn read_child_trie_value_with( keyspace: &[u8], db: &DB, root_slice: &[u8], @@ -327,6 +379,8 @@ pub fn read_child_trie_value_with Result>, Box>> where + L: TrieConfiguration, + Q: Query, DB: hash_db::HashDBRef, { let mut root = TrieHash::::default(); @@ -444,11 +498,15 @@ where /// Constants used into trie simplification codec. mod trie_constants { - pub const EMPTY_TRIE: u8 = 0; - pub const NIBBLE_SIZE_BOUND: usize = u16::MAX as usize; + const FIRST_PREFIX: u8 = 0b_00 << 6; + pub const NIBBLE_SIZE_BOUND: usize = u16::max_value() as usize; pub const LEAF_PREFIX_MASK: u8 = 0b_01 << 6; pub const BRANCH_WITHOUT_MASK: u8 = 0b_10 << 6; pub const BRANCH_WITH_MASK: u8 = 0b_11 << 6; + pub const EMPTY_TRIE: u8 = FIRST_PREFIX | (0b_00 << 4); + pub const ALT_HASHING_LEAF_PREFIX_MASK: u8 = FIRST_PREFIX | (0b_1 << 5); + pub const ALT_HASHING_BRANCH_WITH_MASK: u8 = FIRST_PREFIX | (0b_01 << 4); + pub const ESCAPE_COMPACT_HEADER: u8 = EMPTY_TRIE | 0b_00_01; } #[cfg(test)] @@ -457,25 +515,28 @@ mod tests { use codec::{Compact, Decode, Encode}; use hash_db::{HashDB, Hasher}; use hex_literal::hex; - use sp_core::Blake2Hasher; + use sp_core::{storage::TEST_DEFAULT_INLINE_VALUE_THESHOLD as TRESHOLD, Blake2Hasher}; use trie_db::{DBValue, NodeCodec as NodeCodecT, Trie, TrieMut}; use trie_standardmap::{Alphabet, StandardMap, ValueMode}; type Layout = super::Layout; + type MemoryDBMeta = + memory_db::MemoryDB, trie_db::DBValue, MemTracker>; + fn hashed_null_node() -> TrieHash { ::hashed_null_node() } - fn check_equivalent(input: &Vec<(&[u8], &[u8])>) { + fn check_equivalent(input: &Vec<(&[u8], &[u8])>, layout: T) { { - let closed_form = T::trie_root(input.clone()); - let d = T::trie_root_unhashed(input.clone()); + let closed_form = layout.trie_root(input.clone()); + let d = layout.trie_root_unhashed(input.clone()); println!("Data: {:#x?}, {:#x?}", d, Blake2Hasher::hash(&d[..])); let persistent = { - let mut memdb = MemoryDB::default(); + let mut memdb = MemoryDBMeta::default(); let mut root = Default::default(); - let mut t = TrieDBMut::::new(&mut memdb, &mut root); + let mut t = TrieDBMut::::new_with_layout(&mut memdb, &mut root, layout); for (x, y) in input.iter().rev() { t.insert(x, y).unwrap(); } @@ -485,17 +546,17 @@ mod tests { } } - fn check_iteration(input: &Vec<(&[u8], &[u8])>) { - let mut memdb = MemoryDB::default(); + fn check_iteration(input: &Vec<(&[u8], &[u8])>, layout: T) { + let mut memdb = MemoryDBMeta::default(); let mut root = Default::default(); { - let mut t = TrieDBMut::::new(&mut memdb, &mut root); + let mut t = TrieDBMut::::new_with_layout(&mut memdb, &mut root, layout.clone()); for (x, y) in input.clone() { t.insert(x, y).unwrap(); } } { - let t = TrieDB::::new(&mut memdb, &root).unwrap(); + let t = TrieDB::::new_with_layout(&mut memdb, &root, layout).unwrap(); assert_eq!( input.iter().map(|(i, j)| (i.to_vec(), j.to_vec())).collect::>(), t.iter() @@ -506,6 +567,15 @@ mod tests { } } + fn check_input(input: &Vec<(&[u8], &[u8])>) { + let layout = Layout::default(); + check_equivalent::(input, layout.clone()); + check_iteration::(input, layout); + let layout = Layout::with_max_inline_value(TRESHOLD); + check_equivalent::(input, layout.clone()); + check_iteration::(input, layout); + } + #[test] fn default_trie_root() { let mut db = MemoryDB::default(); @@ -513,7 +583,8 @@ mod tests { let mut empty = TrieDBMut::::new(&mut db, &mut root); empty.commit(); let root1 = empty.root().as_ref().to_vec(); - let root2: Vec = Layout::trie_root::<_, Vec, Vec>(std::iter::empty()) + let root2: Vec = Layout::default() + .trie_root::<_, Vec, Vec>(std::iter::empty()) .as_ref() .iter() .cloned() @@ -525,31 +596,27 @@ mod tests { #[test] fn empty_is_equivalent() { let input: Vec<(&[u8], &[u8])> = vec![]; - check_equivalent::(&input); - check_iteration::(&input); + check_input(&input); } #[test] fn leaf_is_equivalent() { let input: Vec<(&[u8], &[u8])> = vec![(&[0xaa][..], &[0xbb][..])]; - check_equivalent::(&input); - check_iteration::(&input); + check_input(&input); } #[test] fn branch_is_equivalent() { let input: Vec<(&[u8], &[u8])> = vec![(&[0xaa][..], &[0x10][..]), (&[0xba][..], &[0x11][..])]; - check_equivalent::(&input); - check_iteration::(&input); + check_input(&input); } #[test] fn extension_and_branch_is_equivalent() { let input: Vec<(&[u8], &[u8])> = vec![(&[0xaa][..], &[0x10][..]), (&[0xab][..], &[0x11][..])]; - check_equivalent::(&input); - check_iteration::(&input); + check_input(&input); } #[test] @@ -564,8 +631,7 @@ mod tests { let mut d = st.make(); d.sort_by(|&(ref a, _), &(ref b, _)| a.cmp(b)); let dr = d.iter().map(|v| (&v.0[..], &v.1[..])).collect(); - check_equivalent::(&dr); - check_iteration::(&dr); + check_input(&dr); } #[test] @@ -575,8 +641,7 @@ mod tests { (&[0xaa, 0xaa][..], &[0xaa][..]), (&[0xaa, 0xbb][..], &[0xab][..]), ]; - check_equivalent::(&input); - check_iteration::(&input); + check_input(&input); } #[test] @@ -589,8 +654,7 @@ mod tests { (&[0xbb, 0xbb][..], &[0xbb][..]), (&[0xbb, 0xcc][..], &[0xbc][..]), ]; - check_equivalent::(&input); - check_iteration::(&input); + check_input(&input); } #[test] @@ -602,8 +666,7 @@ mod tests { ), (&[0xba][..], &[0x11][..]), ]; - check_equivalent::(&input); - check_iteration::(&input); + check_input(&input); } #[test] @@ -618,16 +681,19 @@ mod tests { &b"ABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABC"[..], ), ]; - check_equivalent::(&input); - check_iteration::(&input); + check_input(&input); } - fn populate_trie<'db, T: TrieConfiguration>( + fn populate_trie<'db, T>( db: &'db mut dyn HashDB, root: &'db mut TrieHash, v: &[(Vec, Vec)], - ) -> TrieDBMut<'db, T> { - let mut t = TrieDBMut::::new(db, root); + layout: T, + ) -> TrieDBMut<'db, T> + where + T: TrieConfiguration, + { + let mut t = TrieDBMut::::new_with_layout(db, root, layout); for i in 0..v.len() { let key: &[u8] = &v[i].0; let val: &[u8] = &v[i].1; @@ -648,8 +714,12 @@ mod tests { #[test] fn random_should_work() { + random_should_work_inner(true); + random_should_work_inner(false); + } + fn random_should_work_inner(limit_inline_value: bool) { let mut seed = ::Out::zero(); - for test_i in 0..10000 { + for test_i in 0..10_000 { if test_i % 50 == 0 { println!("{:?} of 10000 stress tests done", test_i); } @@ -662,10 +732,16 @@ mod tests { } .make_with(seed.as_fixed_bytes_mut()); - let real = Layout::trie_root(x.clone()); + let layout = if limit_inline_value { + Layout::with_max_inline_value(TRESHOLD) + } else { + Layout::default() + }; + let real = layout.trie_root(x.clone()); let mut memdb = MemoryDB::default(); let mut root = Default::default(); - let mut memtrie = populate_trie::(&mut memdb, &mut root, &x); + + let mut memtrie = populate_trie::(&mut memdb, &mut root, &x, layout.clone()); memtrie.commit(); if *memtrie.root() != real { @@ -698,16 +774,18 @@ mod tests { #[test] fn codec_trie_empty() { + let layout = Layout::default(); let input: Vec<(&[u8], &[u8])> = vec![]; - let trie = Layout::trie_root_unhashed::<_, _, _>(input); + let trie = layout.trie_root_unhashed(input); println!("trie: {:#x?}", trie); assert_eq!(trie, vec![0x0]); } #[test] fn codec_trie_single_tuple() { + let layout = Layout::default(); let input = vec![(vec![0xaa], vec![0xbb])]; - let trie = Layout::trie_root_unhashed::<_, _, _>(input); + let trie = layout.trie_root_unhashed(input); println!("trie: {:#x?}", trie); assert_eq!( trie, @@ -722,8 +800,9 @@ mod tests { #[test] fn codec_trie_two_tuples_disjoint_keys() { + let layout = Layout::default(); let input = vec![(&[0x48, 0x19], &[0xfe]), (&[0x13, 0x14], &[0xff])]; - let trie = Layout::trie_root_unhashed::<_, _, _>(input); + let trie = layout.trie_root_unhashed(input); println!("trie: {:#x?}", trie); let mut ex = Vec::::new(); ex.push(0x80); // branch, no value (0b_10..) no nibble @@ -747,6 +826,16 @@ mod tests { #[test] fn iterator_works() { + iterator_works_inner(true); + iterator_works_inner(false); + } + fn iterator_works_inner(limit_inline_value: bool) { + let layout = if limit_inline_value { + Layout::with_max_inline_value(TRESHOLD) + } else { + Layout::default() + }; + let pairs = vec![ (hex!("0103000000000000000464").to_vec(), hex!("0400000000").to_vec()), (hex!("0103000000000000000469").to_vec(), hex!("0401000000").to_vec()), @@ -754,9 +843,9 @@ mod tests { let mut mdb = MemoryDB::default(); let mut root = Default::default(); - let _ = populate_trie::(&mut mdb, &mut root, &pairs); + let _ = populate_trie::(&mut mdb, &mut root, &pairs, layout.clone()); - let trie = TrieDB::::new(&mdb, &root).unwrap(); + let trie = TrieDB::::new_with_layout(&mdb, &root, layout).unwrap(); let iter = trie.iter().unwrap(); let mut iter_pairs = Vec::new(); @@ -777,7 +866,8 @@ mod tests { let mut memdb = MemoryDB::default(); let mut root = Default::default(); - populate_trie::(&mut memdb, &mut root, &pairs); + let layout = Layout::default(); + populate_trie::(&mut memdb, &mut root, &pairs, layout); let non_included_key: Vec = hex!("0909").to_vec(); let proof = @@ -810,7 +900,8 @@ mod tests { let mut memdb = MemoryDB::default(); let mut root = Default::default(); - populate_trie::(&mut memdb, &mut root, &pairs); + let layout = Layout::default(); + populate_trie::(&mut memdb, &mut root, &pairs, layout); let proof = generate_trie_proof::(&memdb, root, &[pairs[0].0.clone()]).unwrap(); @@ -869,12 +960,14 @@ mod tests { &mut proof_db.clone(), storage_root, valid_delta, + Default::default(), ) .unwrap(); let second_storage_root = delta_trie_root::( &mut proof_db.clone(), storage_root, invalid_delta, + Default::default(), ) .unwrap(); diff --git a/primitives/trie/src/node_codec.rs b/primitives/trie/src/node_codec.rs index d5ffb3219cf68..e630f3222de1e 100644 --- a/primitives/trie/src/node_codec.rs +++ b/primitives/trie/src/node_codec.rs @@ -24,7 +24,7 @@ use hash_db::Hasher; use sp_std::{borrow::Borrow, marker::PhantomData, ops::Range, vec::Vec}; use trie_db::{ self, nibble_ops, - node::{NibbleSlicePlan, NodeHandlePlan, NodePlan}, + node::{NibbleSlicePlan, NodeHandlePlan, NodePlan, Value, ValuePlan}, ChildReference, NodeCodec as NodeCodecT, Partial, }; @@ -80,19 +80,23 @@ impl<'a> Input for ByteSliceInput<'a> { #[derive(Default, Clone)] pub struct NodeCodec(PhantomData); -impl NodeCodecT for NodeCodec { - type Error = Error; - type HashOut = H::Out; +impl NodeCodec { + fn decode_plan_inner_hashed(data: &[u8]) -> Result { + let mut input = ByteSliceInput::new(data); - fn hashed_null_node() -> ::Out { - H::hash(::empty_node()) - } + let header = NodeHeader::decode(&mut input)?; + let contains_hash = header.contains_hash_of_value(); - fn decode_plan(data: &[u8]) -> sp_std::result::Result { - let mut input = ByteSliceInput::new(data); - match NodeHeader::decode(&mut input)? { + let branch_has_value = if let NodeHeader::Branch(has_value, _) = &header { + *has_value + } else { + // hashed_value_branch + true + }; + + match header { NodeHeader::Null => Ok(NodePlan::Empty), - NodeHeader::Branch(has_value, nibble_count) => { + NodeHeader::HashedValueBranch(nibble_count) | NodeHeader::Branch(_, nibble_count) => { let padding = nibble_count % nibble_ops::NIBBLE_PER_BYTE != 0; // check that the padding is valid (if any) if padding && nibble_ops::pad_left(data[input.offset]) != 0 { @@ -105,11 +109,15 @@ impl NodeCodecT for NodeCodec { let partial_padding = nibble_ops::number_padding(nibble_count); let bitmap_range = input.take(BITMAP_LENGTH)?; let bitmap = Bitmap::decode(&data[bitmap_range])?; - let value = if has_value { - let count = >::decode(&mut input)?.0 as usize; - Some(input.take(count)?) + let value = if branch_has_value { + if contains_hash { + ValuePlan::HashedValue(input.take(H::LENGTH)?) + } else { + let count = >::decode(&mut input)?.0 as usize; + ValuePlan::Value(input.take(count)?) + } } else { - None + ValuePlan::NoValue }; let mut children = [ None, None, None, None, None, None, None, None, None, None, None, None, None, @@ -132,7 +140,7 @@ impl NodeCodecT for NodeCodec { children, }) }, - NodeHeader::Leaf(nibble_count) => { + NodeHeader::HashedValueLeaf(nibble_count) | NodeHeader::Leaf(nibble_count) => { let padding = nibble_count % nibble_ops::NIBBLE_PER_BYTE != 0; // check that the padding is valid (if any) if padding && nibble_ops::pad_left(data[input.offset]) != 0 { @@ -143,14 +151,37 @@ impl NodeCodecT for NodeCodec { nibble_ops::NIBBLE_PER_BYTE, )?; let partial_padding = nibble_ops::number_padding(nibble_count); - let count = >::decode(&mut input)?.0 as usize; + let value = if contains_hash { + ValuePlan::HashedValue(input.take(H::LENGTH)?) + } else { + let count = >::decode(&mut input)?.0 as usize; + ValuePlan::Value(input.take(count)?) + }; + Ok(NodePlan::Leaf { partial: NibbleSlicePlan::new(partial, partial_padding), - value: input.take(count)?, + value, }) }, } } +} + +impl NodeCodecT for NodeCodec +where + H: Hasher, +{ + const ESCAPE_HEADER: Option<&'static [u8]> = Some(&[trie_constants::ESCAPE_COMPACT_HEADER]); + type Error = Error; + type HashOut = H::Out; + + fn hashed_null_node() -> ::Out { + H::hash(::empty_node()) + } + + fn decode_plan(data: &[u8]) -> Result { + Self::decode_plan_inner_hashed(data) + } fn is_empty_node(data: &[u8]) -> bool { data == ::empty_node() @@ -160,9 +191,24 @@ impl NodeCodecT for NodeCodec { &[trie_constants::EMPTY_TRIE] } - fn leaf_node(partial: Partial, value: &[u8]) -> Vec { - let mut output = partial_encode(partial, NodeKind::Leaf); - value.encode_to(&mut output); + fn leaf_node(partial: Partial, value: Value) -> Vec { + let contains_hash = matches!(&value, Value::HashedValue(..)); + let mut output = if contains_hash { + partial_encode(partial, NodeKind::HashedValueLeaf) + } else { + partial_encode(partial, NodeKind::Leaf) + }; + match value { + Value::Value(value) => { + Compact(value.len() as u32).encode_to(&mut output); + output.extend_from_slice(value); + }, + Value::HashedValue(hash, _) => { + debug_assert!(hash.len() == H::LENGTH); + output.extend_from_slice(hash); + }, + Value::NoValue => unreachable!("Leaf node always with value."), + } output } @@ -171,33 +217,46 @@ impl NodeCodecT for NodeCodec { _nbnibble: usize, _child: ChildReference<::Out>, ) -> Vec { - unreachable!() + unreachable!("No extension codec.") } fn branch_node( _children: impl Iterator::Out>>>>, - _maybe_value: Option<&[u8]>, + _maybe_value: Value, ) -> Vec { - unreachable!() + unreachable!("No extension codec.") } fn branch_node_nibbled( partial: impl Iterator, number_nibble: usize, children: impl Iterator::Out>>>>, - maybe_value: Option<&[u8]>, + value: Value, ) -> Vec { - let mut output = if maybe_value.is_some() { - partial_from_iterator_encode(partial, number_nibble, NodeKind::BranchWithValue) - } else { - partial_from_iterator_encode(partial, number_nibble, NodeKind::BranchNoValue) + let contains_hash = matches!(&value, Value::HashedValue(..)); + let mut output = match (&value, contains_hash) { + (&Value::NoValue, _) => + partial_from_iterator_encode(partial, number_nibble, NodeKind::BranchNoValue), + (_, false) => + partial_from_iterator_encode(partial, number_nibble, NodeKind::BranchWithValue), + (_, true) => + partial_from_iterator_encode(partial, number_nibble, NodeKind::HashedValueBranch), }; + let bitmap_index = output.len(); let mut bitmap: [u8; BITMAP_LENGTH] = [0; BITMAP_LENGTH]; (0..BITMAP_LENGTH).for_each(|_| output.push(0)); - if let Some(value) = maybe_value { - value.encode_to(&mut output); - }; + match value { + Value::Value(value) => { + Compact(value.len() as u32).encode_to(&mut output); + output.extend_from_slice(value); + }, + Value::HashedValue(hash, _) => { + debug_assert!(hash.len() == H::LENGTH); + output.extend_from_slice(hash); + }, + Value::NoValue => (), + } Bitmap::encode( children.map(|maybe_child| match maybe_child.borrow() { Some(ChildReference::Hash(h)) => { @@ -229,11 +288,15 @@ fn partial_from_iterator_encode>( ) -> Vec { let nibble_count = sp_std::cmp::min(trie_constants::NIBBLE_SIZE_BOUND, nibble_count); - let mut output = Vec::with_capacity(3 + (nibble_count / nibble_ops::NIBBLE_PER_BYTE)); + let mut output = Vec::with_capacity(4 + (nibble_count / nibble_ops::NIBBLE_PER_BYTE)); match node_kind { NodeKind::Leaf => NodeHeader::Leaf(nibble_count).encode_to(&mut output), NodeKind::BranchWithValue => NodeHeader::Branch(true, nibble_count).encode_to(&mut output), NodeKind::BranchNoValue => NodeHeader::Branch(false, nibble_count).encode_to(&mut output), + NodeKind::HashedValueLeaf => + NodeHeader::HashedValueLeaf(nibble_count).encode_to(&mut output), + NodeKind::HashedValueBranch => + NodeHeader::HashedValueBranch(nibble_count).encode_to(&mut output), }; output.extend(partial); output @@ -247,11 +310,15 @@ fn partial_encode(partial: Partial, node_kind: NodeKind) -> Vec { let nibble_count = sp_std::cmp::min(trie_constants::NIBBLE_SIZE_BOUND, nibble_count); - let mut output = Vec::with_capacity(3 + partial.1.len()); + let mut output = Vec::with_capacity(4 + partial.1.len()); match node_kind { NodeKind::Leaf => NodeHeader::Leaf(nibble_count).encode_to(&mut output), NodeKind::BranchWithValue => NodeHeader::Branch(true, nibble_count).encode_to(&mut output), NodeKind::BranchNoValue => NodeHeader::Branch(false, nibble_count).encode_to(&mut output), + NodeKind::HashedValueLeaf => + NodeHeader::HashedValueLeaf(nibble_count).encode_to(&mut output), + NodeKind::HashedValueBranch => + NodeHeader::HashedValueBranch(nibble_count).encode_to(&mut output), }; if number_nibble_encoded > 0 { output.push(nibble_ops::pad_right((partial.0).1)); diff --git a/primitives/trie/src/node_header.rs b/primitives/trie/src/node_header.rs index 9f05113a35935..839fffb87058f 100644 --- a/primitives/trie/src/node_header.rs +++ b/primitives/trie/src/node_header.rs @@ -25,8 +25,23 @@ use sp_std::iter::once; #[derive(Copy, Clone, PartialEq, Eq, sp_core::RuntimeDebug)] pub(crate) enum NodeHeader { Null, + // contains wether there is a value and nibble count Branch(bool, usize), + // contains nibble count Leaf(usize), + // contains nibble count. + HashedValueBranch(usize), + // contains nibble count. + HashedValueLeaf(usize), +} + +impl NodeHeader { + pub(crate) fn contains_hash_of_value(&self) -> bool { + match self { + NodeHeader::HashedValueBranch(_) | NodeHeader::HashedValueLeaf(_) => true, + _ => false, + } + } } /// NodeHeader without content @@ -34,6 +49,8 @@ pub(crate) enum NodeKind { Leaf, BranchNoValue, BranchWithValue, + HashedValueLeaf, + HashedValueBranch, } impl Encode for NodeHeader { @@ -41,11 +58,27 @@ impl Encode for NodeHeader { match self { NodeHeader::Null => output.push_byte(trie_constants::EMPTY_TRIE), NodeHeader::Branch(true, nibble_count) => - encode_size_and_prefix(*nibble_count, trie_constants::BRANCH_WITH_MASK, output), - NodeHeader::Branch(false, nibble_count) => - encode_size_and_prefix(*nibble_count, trie_constants::BRANCH_WITHOUT_MASK, output), + encode_size_and_prefix(*nibble_count, trie_constants::BRANCH_WITH_MASK, 2, output), + NodeHeader::Branch(false, nibble_count) => encode_size_and_prefix( + *nibble_count, + trie_constants::BRANCH_WITHOUT_MASK, + 2, + output, + ), NodeHeader::Leaf(nibble_count) => - encode_size_and_prefix(*nibble_count, trie_constants::LEAF_PREFIX_MASK, output), + encode_size_and_prefix(*nibble_count, trie_constants::LEAF_PREFIX_MASK, 2, output), + NodeHeader::HashedValueBranch(nibble_count) => encode_size_and_prefix( + *nibble_count, + trie_constants::ALT_HASHING_BRANCH_WITH_MASK, + 4, + output, + ), + NodeHeader::HashedValueLeaf(nibble_count) => encode_size_and_prefix( + *nibble_count, + trie_constants::ALT_HASHING_LEAF_PREFIX_MASK, + 3, + output, + ), } } } @@ -59,13 +92,22 @@ impl Decode for NodeHeader { return Ok(NodeHeader::Null) } match i & (0b11 << 6) { - trie_constants::LEAF_PREFIX_MASK => Ok(NodeHeader::Leaf(decode_size(i, input)?)), - trie_constants::BRANCH_WITHOUT_MASK => - Ok(NodeHeader::Branch(false, decode_size(i, input)?)), + trie_constants::LEAF_PREFIX_MASK => Ok(NodeHeader::Leaf(decode_size(i, input, 2)?)), trie_constants::BRANCH_WITH_MASK => - Ok(NodeHeader::Branch(true, decode_size(i, input)?)), - // do not allow any special encoding - _ => Err("Unallowed encoding".into()), + Ok(NodeHeader::Branch(true, decode_size(i, input, 2)?)), + trie_constants::BRANCH_WITHOUT_MASK => + Ok(NodeHeader::Branch(false, decode_size(i, input, 2)?)), + trie_constants::EMPTY_TRIE => { + if i & (0b111 << 5) == trie_constants::ALT_HASHING_LEAF_PREFIX_MASK { + Ok(NodeHeader::HashedValueLeaf(decode_size(i, input, 3)?)) + } else if i & (0b1111 << 4) == trie_constants::ALT_HASHING_BRANCH_WITH_MASK { + Ok(NodeHeader::HashedValueBranch(decode_size(i, input, 4)?)) + } else { + // do not allow any special encoding + Err("Unallowed encoding".into()) + } + }, + _ => unreachable!(), } } } @@ -73,12 +115,20 @@ impl Decode for NodeHeader { /// Returns an iterator over encoded bytes for node header and size. /// Size encoding allows unlimited, length inefficient, representation, but /// is bounded to 16 bit maximum value to avoid possible DOS. -pub(crate) fn size_and_prefix_iterator(size: usize, prefix: u8) -> impl Iterator { +pub(crate) fn size_and_prefix_iterator( + size: usize, + prefix: u8, + prefix_mask: usize, +) -> impl Iterator { let size = sp_std::cmp::min(trie_constants::NIBBLE_SIZE_BOUND, size); - let l1 = sp_std::cmp::min(62, size); - let (first_byte, mut rem) = - if size == l1 { (once(prefix + l1 as u8), 0) } else { (once(prefix + 63), size - l1) }; + let max_value = 255u8 >> prefix_mask; + let l1 = sp_std::cmp::min(max_value as usize - 1, size); + let (first_byte, mut rem) = if size == l1 { + (once(prefix + l1 as u8), 0) + } else { + (once(prefix + max_value as u8), size - l1) + }; let next_bytes = move || { if rem > 0 { if rem < 256 { @@ -96,17 +146,25 @@ pub(crate) fn size_and_prefix_iterator(size: usize, prefix: u8) -> impl Iterator first_byte.chain(sp_std::iter::from_fn(next_bytes)) } -/// Encodes size and prefix to a stream output. -fn encode_size_and_prefix(size: usize, prefix: u8, out: &mut W) { - for b in size_and_prefix_iterator(size, prefix) { +/// Encodes size and prefix to a stream output (prefix on 2 first bit only). +fn encode_size_and_prefix(size: usize, prefix: u8, prefix_mask: usize, out: &mut W) +where + W: Output + ?Sized, +{ + for b in size_and_prefix_iterator(size, prefix, prefix_mask) { out.push_byte(b) } } /// Decode size only from stream input and header byte. -fn decode_size(first: u8, input: &mut impl Input) -> Result { - let mut result = (first & 255u8 >> 2) as usize; - if result < 63 { +fn decode_size( + first: u8, + input: &mut impl Input, + prefix_mask: usize, +) -> Result { + let max_value = 255u8 >> prefix_mask; + let mut result = (first & max_value) as usize; + if result < max_value as usize { return Ok(result) } result -= 1; diff --git a/primitives/trie/src/storage_proof.rs b/primitives/trie/src/storage_proof.rs index 410ad44e75a63..ea7af59aecb0e 100644 --- a/primitives/trie/src/storage_proof.rs +++ b/primitives/trie/src/storage_proof.rs @@ -15,8 +15,11 @@ // See the License for the specific language governing permissions and // limitations under the License. +use crate::Layout; use codec::{Decode, Encode}; +pub use decode_encode_impl::state_version_encoded_size; use hash_db::{HashDB, Hasher}; +use sp_core::state_version::StateVersion; use sp_std::vec::Vec; /// A proof that some set of key-value pairs are included in the storage trie. The proof contains @@ -26,21 +29,129 @@ use sp_std::vec::Vec; /// The proof consists of the set of serialized nodes in the storage trie accessed when looking up /// the keys covered by the proof. Verifying the proof requires constructing the partial trie from /// the serialized nodes and performing the key lookups. -#[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] +#[derive(Debug, PartialEq, Eq, Clone)] pub struct StorageProof { - trie_nodes: Vec>, + pub(crate) state_version: StateVersion, + pub(crate) trie_nodes: Vec>, } /// Storage proof in compact form. -#[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] +#[derive(Debug, PartialEq, Eq, Clone)] pub struct CompactProof { + pub state_version: StateVersion, pub encoded_nodes: Vec>, } +mod decode_encode_impl { + use super::*; + use codec::{Compact, Error, Input, Output}; + // This is compact size encoding first byte for + // length > u128::MAX. In case such big proof + // get needed state version could not be omitted. + // Use to indicate state V0. + const STATE_V0: u8 = 251; + + // First byte encoding for state V1. + const STATE_V1: u8 = 247; + + /// Prefix another input with a byte. + struct PrefixInput<'a, T> { + prefix: Option, + input: &'a mut T, + } + + struct StateVersionInProof(StateVersion); + + impl<'a, T: 'a + Input> Input for PrefixInput<'a, T> { + fn remaining_len(&mut self) -> Result, Error> { + let len = if let Some(len) = self.input.remaining_len()? { + Some(len.saturating_add(self.prefix.iter().count())) + } else { + None + }; + Ok(len) + } + + fn read(&mut self, buffer: &mut [u8]) -> Result<(), Error> { + if buffer.is_empty() { + return Ok(()); + } + match self.prefix.take() { + Some(v) => { + buffer[0] = v; + self.input.read(&mut buffer[1..]) + } + _ => self.input.read(buffer), + } + } + } + + fn decode_inner(input: &mut I) -> Result<(StateVersion, Vec>), Error> { + let prefix = input.read_byte()?; + let (state_version, mut input) = if prefix == STATE_V0 { + (StateVersion::V0, PrefixInput { prefix: None, input }) + } else if prefix == STATE_V1 { + let threshold = Compact::::decode(input)?.0; + (StateVersion::V1 { threshold }, PrefixInput { prefix: None, input }) + } else { + (Default::default(), PrefixInput { prefix: Some(prefix), input }) + }; + let trie_nodes: Vec> = Decode::decode(&mut input)?; + Ok((state_version, trie_nodes)) + } + + impl Encode for StateVersionInProof { + fn encode_to(&self, dest: &mut T) { + if self.0 != Default::default() { + match self.0 { + StateVersion::V0 => dest.push_byte(STATE_V0), + StateVersion::V1 { threshold } => { + dest.push_byte(STATE_V1); + Compact(threshold).encode_to(dest); + } + } + } + } + } + + impl Decode for StorageProof { + fn decode(input: &mut I) -> Result { + let (state_version, trie_nodes) = decode_inner(input)?; + Ok(StorageProof { trie_nodes, state_version }) + } + } + + impl Decode for CompactProof { + fn decode(input: &mut I) -> Result { + let (state_version, encoded_nodes) = decode_inner(input)?; + Ok(CompactProof { encoded_nodes, state_version }) + } + } + + impl Encode for StorageProof { + fn encode_to(&self, dest: &mut T) { + StateVersionInProof(self.state_version).encode_to(dest); + self.trie_nodes.encode_to(dest); + } + } + + impl Encode for CompactProof { + fn encode_to(&self, dest: &mut T) { + StateVersionInProof(self.state_version).encode_to(dest); + self.encoded_nodes.encode_to(dest); + } + } + + /// Utility to get state version size encoded in proof. + pub fn state_version_encoded_size(state_version: StateVersion) -> usize { + StateVersionInProof(state_version).encoded_size() + } +} + impl StorageProof { /// Constructs a storage proof from a subset of encoded trie nodes in a storage backend. - pub fn new(trie_nodes: Vec>) -> Self { - StorageProof { trie_nodes } + pub fn new(trie_nodes: Vec>, state_version: StateVersion) -> Self { + StorageProof { trie_nodes, state_version } } /// Returns a new empty proof. @@ -48,7 +159,7 @@ impl StorageProof { /// An empty proof is capable of only proving trivial statements (ie. that an empty set of /// key-value pairs exist in storage). pub fn empty() -> Self { - StorageProof { trie_nodes: Vec::new() } + StorageProof { trie_nodes: Vec::new(), state_version: StateVersion::default() } } /// Returns whether this is an empty proof. @@ -56,6 +167,11 @@ impl StorageProof { self.trie_nodes.is_empty() } + /// State version used in the proof. + pub fn state_version(&self) -> StateVersion { + self.state_version + } + /// Create an iterator over trie nodes constructed from the proof. The nodes are not guaranteed /// to be traversed in any particular order. pub fn iter_nodes(self) -> StorageProofNodeIterator { @@ -78,14 +194,23 @@ impl StorageProof { where I: IntoIterator, { + let mut state_version = StateVersion::default(); + let state_version = &mut state_version; let trie_nodes = proofs .into_iter() - .flat_map(|proof| proof.iter_nodes()) + .flat_map(|proof| { + debug_assert!( + state_version == &StateVersion::default() + || state_version == &proof.state_version + ); + *state_version = proof.state_version; + proof.iter_nodes() + }) .collect::>() .into_iter() .collect(); - Self { trie_nodes } + Self { trie_nodes, state_version: *state_version } } /// Encode as a compact proof with default @@ -93,8 +218,8 @@ impl StorageProof { pub fn into_compact_proof( self, root: H::Out, - ) -> Result>> { - crate::encode_compact::>(self, root) + ) -> Result>> { + crate::encode_compact::>(self, root) } /// Returns the estimated encoded size of the compact proof. @@ -121,9 +246,10 @@ impl CompactProof { pub fn to_storage_proof( &self, expected_root: Option<&H::Out>, - ) -> Result<(StorageProof, H::Out), crate::CompactProofError>> { + ) -> Result<(StorageProof, H::Out), crate::CompactProofError>> { + let state_version = self.state_version; let mut db = crate::MemoryDB::::new(&[]); - let root = crate::decode_compact::, _, _>( + let root = crate::decode_compact::, _, _>( &mut db, self.iter_compact_encoded_nodes(), expected_root, @@ -134,6 +260,7 @@ impl CompactProof { .into_iter() .filter_map(|kv| if (kv.1).1 > 0 { Some((kv.1).0) } else { None }) .collect(), + state_version, ), root, )) diff --git a/primitives/trie/src/trie_codec.rs b/primitives/trie/src/trie_codec.rs index 1596229f2b5de..ae809b56f384e 100644 --- a/primitives/trie/src/trie_codec.rs +++ b/primitives/trie/src/trie_codec.rs @@ -112,8 +112,9 @@ where I: IntoIterator, { let mut nodes_iter = encoded.into_iter(); - let (top_root, _nb_used) = - trie_db::decode_compact_from_iter::(db, &mut nodes_iter)?; + // Layout does not change trie reading. + let layout = L::default(); + let (top_root, _nb_used) = trie_db::decode_compact_from_iter::(db, &mut nodes_iter)?; // Only check root if expected root is passed as argument. if let Some(expected_root) = expected_root { @@ -125,7 +126,7 @@ where let mut child_tries = Vec::new(); { // fetch child trie roots - let trie = crate::TrieDB::::new(db, &top_root)?; + let trie = crate::TrieDB::::new_with_layout(db, &top_root, layout.clone())?; let mut iter = trie.iter()?; @@ -164,8 +165,7 @@ where let mut nodes_iter = nodes_iter.peekable(); for child_root in child_tries.into_iter() { if previous_extracted_child_trie.is_none() && nodes_iter.peek().is_some() { - let (top_root, _) = - trie_db::decode_compact_from_iter::(db, &mut nodes_iter)?; + let (top_root, _) = trie_db::decode_compact_from_iter::(db, &mut nodes_iter)?; previous_extracted_child_trie = Some(top_root); } @@ -201,9 +201,13 @@ pub fn encode_compact(proof: StorageProof, root: TrieHash) -> Result::new(&partial_db, &root)?; let mut iter = trie.iter()?; @@ -248,5 +252,5 @@ where compact_proof.extend(child_proof); } - Ok(CompactProof { encoded_nodes: compact_proof }) + Ok(CompactProof { encoded_nodes: compact_proof, state_version }) } diff --git a/primitives/trie/src/trie_stream.rs b/primitives/trie/src/trie_stream.rs index e0e26fea67c2e..20cc35c6b8708 100644 --- a/primitives/trie/src/trie_stream.rs +++ b/primitives/trie/src/trie_stream.rs @@ -18,21 +18,18 @@ //! `TrieStream` implementation for Substrate's trie format. use crate::{ - node_codec::Bitmap, node_header::{size_and_prefix_iterator, NodeKind}, trie_constants, }; -use codec::Encode; +use codec::{Compact, Encode}; use hash_db::Hasher; use sp_std::vec::Vec; use trie_root; -const BRANCH_NODE_NO_VALUE: u8 = 254; -const BRANCH_NODE_WITH_VALUE: u8 = 255; - #[derive(Default, Clone)] /// Codec-flavored TrieStream. pub struct TrieStream { + /// Current node buffer. buffer: Vec, } @@ -60,51 +57,78 @@ fn fuse_nibbles_node<'a>(nibbles: &'a [u8], kind: NodeKind) -> impl Iterator size_and_prefix_iterator(size, trie_constants::LEAF_PREFIX_MASK), + NodeKind::Leaf => size_and_prefix_iterator(size, trie_constants::LEAF_PREFIX_MASK, 2), NodeKind::BranchNoValue => - size_and_prefix_iterator(size, trie_constants::BRANCH_WITHOUT_MASK), + size_and_prefix_iterator(size, trie_constants::BRANCH_WITHOUT_MASK, 2), NodeKind::BranchWithValue => - size_and_prefix_iterator(size, trie_constants::BRANCH_WITH_MASK), + size_and_prefix_iterator(size, trie_constants::BRANCH_WITH_MASK, 2), + NodeKind::HashedValueLeaf => + size_and_prefix_iterator(size, trie_constants::ALT_HASHING_LEAF_PREFIX_MASK, 3), + NodeKind::HashedValueBranch => + size_and_prefix_iterator(size, trie_constants::ALT_HASHING_BRANCH_WITH_MASK, 4), }; iter_start .chain(if nibbles.len() % 2 == 1 { Some(nibbles[0]) } else { None }) .chain(nibbles[nibbles.len() % 2..].chunks(2).map(|ch| ch[0] << 4 | ch[1])) } +use trie_root::Value as TrieStreamValue; impl trie_root::TrieStream for TrieStream { fn new() -> Self { - TrieStream { buffer: Vec::new() } + Self { buffer: Vec::new() } } fn append_empty_data(&mut self) { self.buffer.push(trie_constants::EMPTY_TRIE); } - fn append_leaf(&mut self, key: &[u8], value: &[u8]) { - self.buffer.extend(fuse_nibbles_node(key, NodeKind::Leaf)); - value.encode_to(&mut self.buffer); + fn append_leaf(&mut self, key: &[u8], value: TrieStreamValue) { + let kind = match &value { + TrieStreamValue::NoValue => unreachable!(), + TrieStreamValue::Value(..) => NodeKind::Leaf, + TrieStreamValue::HashedValue(..) => NodeKind::HashedValueLeaf, + }; + self.buffer.extend(fuse_nibbles_node(key, kind)); + match &value { + TrieStreamValue::NoValue => unreachable!(), + TrieStreamValue::Value(value) => { + Compact(value.len() as u32).encode_to(&mut self.buffer); + self.buffer.extend_from_slice(value); + }, + TrieStreamValue::HashedValue(hash) => { + self.buffer.extend_from_slice(hash.as_slice()); + }, + }; } fn begin_branch( &mut self, maybe_partial: Option<&[u8]>, - maybe_value: Option<&[u8]>, + maybe_value: TrieStreamValue, has_children: impl Iterator, ) { if let Some(partial) = maybe_partial { - if maybe_value.is_some() { - self.buffer.extend(fuse_nibbles_node(partial, NodeKind::BranchWithValue)); - } else { - self.buffer.extend(fuse_nibbles_node(partial, NodeKind::BranchNoValue)); - } + let kind = match &maybe_value { + TrieStreamValue::NoValue => NodeKind::BranchNoValue, + TrieStreamValue::Value(..) => NodeKind::BranchWithValue, + TrieStreamValue::HashedValue(..) => NodeKind::HashedValueBranch, + }; + + self.buffer.extend(fuse_nibbles_node(partial, kind)); let bm = branch_node_bit_mask(has_children); self.buffer.extend([bm.0, bm.1].iter()); } else { - debug_assert!(false, "trie stream codec only for no extension trie"); - self.buffer.extend(&branch_node(maybe_value.is_some(), has_children)); + unreachable!("trie stream codec only for no extension trie"); } - if let Some(value) = maybe_value { - value.encode_to(&mut self.buffer); + match maybe_value { + TrieStreamValue::NoValue => (), + TrieStreamValue::Value(value) => { + Compact(value.len() as u32).encode_to(&mut self.buffer); + self.buffer.extend_from_slice(value); + }, + TrieStreamValue::HashedValue(hash) => { + self.buffer.extend_from_slice(hash.as_slice()); + }, } } @@ -124,18 +148,3 @@ impl trie_root::TrieStream for TrieStream { self.buffer } } - -fn branch_node(has_value: bool, has_children: impl Iterator) -> [u8; 3] { - let mut result = [0, 0, 0]; - branch_node_buffered(has_value, has_children, &mut result[..]); - result -} - -fn branch_node_buffered(has_value: bool, has_children: I, output: &mut [u8]) -where - I: Iterator, -{ - let first = if has_value { BRANCH_NODE_WITH_VALUE } else { BRANCH_NODE_NO_VALUE }; - output[0] = first; - Bitmap::encode(has_children, &mut output[1..]); -} diff --git a/test-utils/client/src/lib.rs b/test-utils/client/src/lib.rs index 9bc411af5d3ed..704d29828ff22 100644 --- a/test-utils/client/src/lib.rs +++ b/test-utils/client/src/lib.rs @@ -34,7 +34,7 @@ pub use sp_keyring::{ ed25519::Keyring as Ed25519Keyring, sr25519::Keyring as Sr25519Keyring, AccountKeyring, }; pub use sp_keystore::{SyncCryptoStore, SyncCryptoStorePtr}; -pub use sp_runtime::{Storage, StorageChild}; +pub use sp_runtime::{StateVersions, Storage, StorageChild}; pub use sp_state_machine::ExecutionStrategy; use futures::{ @@ -45,11 +45,7 @@ use sc_client_api::BlockchainEvents; use sc_service::client::{ClientConfig, LocalCallExecutor}; use serde::Deserialize; use sp_core::storage::ChildInfo; -use sp_runtime::{ - codec::Encode, - traits::{BlakeTwo256, Block as BlockT}, - OpaqueExtrinsic, -}; +use sp_runtime::{codec::Encode, traits::Block as BlockT, OpaqueExtrinsic}; use std::{ collections::{HashMap, HashSet}, pin::Pin, @@ -57,8 +53,7 @@ use std::{ }; /// Test client light database backend. -pub type LightBackend = - sc_light::Backend, BlakeTwo256>; +pub type LightBackend = sc_light::Backend, Block>; /// A genesis storage initialization trait. pub trait GenesisInit: Default { @@ -85,6 +80,7 @@ pub struct TestClientBuilder, bad_blocks: BadBlocks, enable_offchain_indexing_api: bool, + state_versions: StateVersions, no_genesis: bool, } @@ -105,20 +101,48 @@ impl Self::with_backend(backend) } + /// Create new `TestClientBuilder` with default backend and state versions. + pub fn with_default_backend_and_state_versions( + state_versions: Option>, + ) -> Self { + let state_versions = state_versions.unwrap_or_default(); + let backend = Arc::new(Backend::new_test_with_tx_storage_and_state_versions( + std::u32::MAX, + std::u64::MAX, + sc_client_db::TransactionStorageMode::BlockBody, + state_versions.clone(), + )); + Self::with_backend_and_state_versions(backend, state_versions) + } + /// Create new `TestClientBuilder` with default backend and pruning window size - pub fn with_pruning_window(keep_blocks: u32) -> Self { - let backend = Arc::new(Backend::new_test(keep_blocks, 0)); - Self::with_backend(backend) + pub fn with_pruning_window( + keep_blocks: u32, + state_versions: Option>, + ) -> Self { + let state_versions = state_versions.unwrap_or_default(); + let backend = Arc::new(Backend::new_test_with_tx_storage_and_state_versions( + keep_blocks, + 0, + sc_client_db::TransactionStorageMode::BlockBody, + state_versions.clone(), + )); + Self::with_backend_and_state_versions(backend, state_versions) } /// Create new `TestClientBuilder` with default backend and storage chain mode - pub fn with_tx_storage(keep_blocks: u32) -> Self { - let backend = Arc::new(Backend::new_test_with_tx_storage( + pub fn with_tx_storage( + keep_blocks: u32, + state_versions: Option>, + ) -> Self { + let state_versions = state_versions.unwrap_or_default(); + let backend = Arc::new(Backend::new_test_with_tx_storage_and_state_versions( keep_blocks, 0, sc_client_db::TransactionStorageMode::StorageChain, + state_versions.clone(), )); - Self::with_backend(backend) + Self::with_backend_and_state_versions(backend, state_versions) } } @@ -127,6 +151,14 @@ impl { /// Create a new instance of the test client builder. pub fn with_backend(backend: Arc) -> Self { + Self::with_backend_and_state_versions(backend, Default::default()) + } + + /// Create a new instance of the test client builder with specific state versions. + pub fn with_backend_and_state_versions( + backend: Arc, + state_versions: StateVersions, + ) -> Self { TestClientBuilder { backend, execution_strategies: ExecutionStrategies::default(), @@ -138,6 +170,7 @@ impl bad_blocks: None, enable_offchain_indexing_api: false, no_genesis: false, + state_versions, } } @@ -222,7 +255,6 @@ impl { let storage = { let mut storage = self.genesis_init.genesis_storage(); - // Add some child storage keys. for (key, child_content) in self.child_storage_extension { storage.children_default.insert( @@ -234,7 +266,7 @@ impl ); } - storage + (storage, sp_runtime::StateVersion::default()) }; let client = client::Client::new( @@ -253,6 +285,7 @@ impl ClientConfig { offchain_indexing_api: self.enable_offchain_indexing_api, no_genesis: self.no_genesis, + state_versions: self.state_versions.clone(), ..Default::default() }, ) @@ -293,11 +326,13 @@ impl let executor = executor.into().unwrap_or_else(|| { NativeElseWasmExecutor::new(WasmExecutionMethod::Interpreted, None, 8) }); + let mut client_config = ClientConfig::default(); + client_config.state_versions = self.state_versions.clone(); let executor = LocalCallExecutor::new( self.backend.clone(), executor, Box::new(sp_core::testing::TaskExecutor::new()), - Default::default(), + client_config, ) .expect("Creates LocalCallExecutor"); diff --git a/test-utils/runtime/client/src/lib.rs b/test-utils/runtime/client/src/lib.rs index dc5ccadc4574f..18eeb52203cf7 100644 --- a/test-utils/runtime/client/src/lib.rs +++ b/test-utils/runtime/client/src/lib.rs @@ -39,7 +39,7 @@ use sp_core::{ storage::{ChildInfo, Storage, StorageChild}, ChangesTrieConfiguration, }; -use sp_runtime::traits::{Block as BlockT, Hash as HashT, HashFor, Header as HeaderT, NumberFor}; +use sp_runtime::traits::{Block as BlockT, Hash as HashT, Header as HeaderT, NumberFor}; use substrate_test_runtime::genesismap::{additional_storage_with_genesis, GenesisConfig}; /// A prelude to import in tests. @@ -94,7 +94,7 @@ pub type LightExecutor = sc_light::GenesisCallExecutor< substrate_test_runtime::Block, sc_light::Backend< sc_client_db::light::LightStorage, - HashFor, + substrate_test_runtime::Block, >, NativeElseWasmExecutor, >, @@ -399,6 +399,16 @@ pub fn new() -> Client { TestClientBuilder::new().build() } +/// Creates new client instance used for tests, using +/// optionally inner state hashing. +pub fn new_with_state(inner_hashing: bool) -> Client { + let mut state_versions = StateVersions::default(); + if !inner_hashing { + state_versions.add((0, sp_core::state_version::StateVersion::V0)); + } + TestClientBuilder::with_default_backend_and_state_versions(Some(state_versions)).build() +} + /// Creates new light client instance used for tests. pub fn new_light() -> ( client::Client< @@ -411,7 +421,8 @@ pub fn new_light() -> ( ) { let storage = sc_client_db::light::LightStorage::new_test(); let blockchain = Arc::new(sc_light::Blockchain::new(storage)); - let backend = Arc::new(LightBackend::new(blockchain)); + let state_versions = Default::default(); + let backend = Arc::new(LightBackend::new(blockchain, state_versions)); let executor = new_native_executor(); let local_call_executor = client::LocalCallExecutor::new( backend.clone(), diff --git a/test-utils/runtime/src/lib.rs b/test-utils/runtime/src/lib.rs index bdb8724120813..75cf7aac1870a 100644 --- a/test-utils/runtime/src/lib.rs +++ b/test-utils/runtime/src/lib.rs @@ -1225,8 +1225,9 @@ fn test_read_child_storage() { fn test_witness(proof: StorageProof, root: crate::Hash) { use sp_externalities::Externalities; + let state_version = proof.state_version(); let db: sp_trie::MemoryDB = proof.into_memory_db(); - let backend = sp_state_machine::TrieBackend::<_, crate::Hashing>::new(db, root); + let backend = sp_state_machine::TrieBackend::<_, crate::Hashing>::new(db, root, state_version); let mut overlay = sp_state_machine::OverlayedChanges::default(); let mut cache = sp_state_machine::StorageTransactionCache::<_, _, BlockNumber>::default(); let mut ext = sp_state_machine::Ext::new( @@ -1315,7 +1316,8 @@ mod tests { #[test] fn witness_backend_works() { let (db, root) = witness_backend(); - let backend = sp_state_machine::TrieBackend::<_, crate::Hashing>::new(db, root); + let backend = + sp_state_machine::TrieBackend::<_, crate::Hashing>::new(db, root, Default::default()); let proof = sp_state_machine::prove_read(backend, vec![b"value3"]).unwrap(); let client = TestClientBuilder::new().set_execution_strategy(ExecutionStrategy::Both).build(); diff --git a/utils/frame/try-runtime/cli/src/lib.rs b/utils/frame/try-runtime/cli/src/lib.rs index c92c3959535e9..901dbd8484222 100644 --- a/utils/frame/try-runtime/cli/src/lib.rs +++ b/utils/frame/try-runtime/cli/src/lib.rs @@ -216,7 +216,7 @@ where })), }; - let (code_key, code) = extract_code(config.chain_spec)?; + let (code_key, code) = extract_code::(config.chain_spec)?; builder .inject_key_value(&[(code_key, code)]) .inject_hashed_key(&[twox_128(b"System"), twox_128(b"LastRuntimeUpgrade")].concat()) @@ -302,7 +302,7 @@ where .mode(mode) .inject_hashed_key(&[twox_128(b"System"), twox_128(b"LastRuntimeUpgrade")].concat()); let mut ext = if shared.overwrite_code { - let (code_key, code) = extract_code(config.chain_spec)?; + let (code_key, code) = extract_code::(config.chain_spec)?; builder.inject_key_value(&[(code_key, code)]).build().await? } else { builder.inject_hashed_key(well_known_keys::CODE).build().await? @@ -394,7 +394,7 @@ where .mode(mode) .inject_hashed_key(&[twox_128(b"System"), twox_128(b"LastRuntimeUpgrade")].concat()); let mut ext = if shared.overwrite_code { - let (code_key, code) = extract_code(config.chain_spec)?; + let (code_key, code) = extract_code::(config.chain_spec)?; builder.inject_key_value(&[(code_key, code)]).build().await? } else { builder.inject_hashed_key(well_known_keys::CODE).build().await? @@ -477,7 +477,9 @@ impl CliConfiguration for TryRuntimeCmd { /// Extract `:code` from the given chain spec and return as `StorageData` along with the /// corresponding `StorageKey`. -fn extract_code(spec: Box) -> sc_cli::Result<(StorageKey, StorageData)> { +fn extract_code( + spec: Box, +) -> sc_cli::Result<(StorageKey, StorageData)> { let genesis_storage = spec.build_storage()?; let code = StorageData( genesis_storage