diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 6cdc822f96dd1..4451ce63e0d6d 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -326,7 +326,6 @@ check-web-wasm: - time cargo build --target=wasm32-unknown-unknown -p sp-io - time cargo build --target=wasm32-unknown-unknown -p sp-runtime - time cargo build --target=wasm32-unknown-unknown -p sp-std - - time cargo build --target=wasm32-unknown-unknown -p sc-client - time cargo build --target=wasm32-unknown-unknown -p sc-consensus-aura - time cargo build --target=wasm32-unknown-unknown -p sc-consensus-babe - time cargo build --target=wasm32-unknown-unknown -p sp-consensus diff --git a/Cargo.lock b/Cargo.lock index 9cf78bfcfbc08..a6eb5d037b515 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -126,15 +126,15 @@ dependencies = [ [[package]] name = "arbitrary" -version = "0.4.1" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75153c95fdedd7db9732dfbfc3702324a1627eec91ba56e37cd0ac78314ab2ed" +checksum = "1148c9b25d393a07c4cc3ef5dd30f82a40a1c261018c4a670611ed8e76cad3ea" [[package]] name = "arc-swap" -version = "0.4.5" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d663a8e9a99154b5fb793032533f6328da35e23aac63d5c152279aa8ba356825" +checksum = "b585a98a234c46fc563103e9278c9391fde1f4e6850334da895d27edb9580f62" [[package]] name = "arrayref" @@ -280,9 +280,9 @@ dependencies = [ [[package]] name = "backtrace-sys" -version = "0.1.35" +version = "0.1.36" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7de8aba10a69c8e8d7622c5710229485ec32e9d55fdad160ea559c086fdcd118" +checksum = "78848718ee1255a2485d1309ad9cdecfc2e7d0362dd11c6829364c6b35ae1bc7" dependencies = [ "cc", "libc", @@ -446,9 +446,9 @@ dependencies = [ [[package]] name = "bs58" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b170cd256a3f9fa6b9edae3e44a7dfdfc77e8124dbc3e2612d75f9c3e2396dae" +checksum = "476e9cd489f9e121e02ffa6014a8ef220ecb15c05ed23fc34cca13925dc283fb" [[package]] name = "bstr" @@ -547,9 +547,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.0.50" +version = "1.0.51" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95e28fa049fda1c330bcf9d723be7663a899c4679724b34c81e9f5a326aab8cd" +checksum = "9c9384ca4b90c0ea47e19a5c996d6643a3e73dedf9b89c65efb67587e34da1bb" dependencies = [ "jobserver", ] @@ -1140,18 +1140,18 @@ dependencies = [ [[package]] name = "enumflags2" -version = "0.6.3" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a80e524ebf194285b57e5e7944018721c7fffc673253f5183f7accd88a2a3b0c" +checksum = "83c8d82922337cd23a15f88b70d8e4ef5f11da38dd7cdb55e84dd5de99695da0" dependencies = [ "enumflags2_derive", ] [[package]] name = "enumflags2_derive" -version = "0.6.3" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ed9afacaea0301eefb738c9deea725e6d53938004597cdc518a8cf9a7aa2f03" +checksum = "946ee94e3dbf58fdd324f9ce245c7b238d46a66f00e86a020b71996349e46cce" dependencies = [ "proc-macro2", "quote 1.0.3", @@ -1426,7 +1426,6 @@ dependencies = [ "frame-benchmarking", "parity-scale-codec", "sc-cli", - "sc-client", "sc-client-db", "sc-executor", "sc-service", @@ -1980,9 +1979,9 @@ dependencies = [ [[package]] name = "hermit-abi" -version = "0.1.10" +version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "725cf19794cf90aa94e65050cb4191ff5d8fa87a498383774c47b332e3af952e" +checksum = "8a0d737e0f947a1864e93d33fdef4af8445a00d1ed8dc0c8ddb73139ea6abf15" dependencies = [ "libc", ] @@ -2135,9 +2134,9 @@ dependencies = [ [[package]] name = "hyper" -version = "0.13.4" +version = "0.13.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed6081100e960d9d74734659ffc9cc91daf1c0fc7aceb8eaa94ee1a3f5046f2e" +checksum = "96816e1d921eca64d208a85aab4f7798455a8e34229ee5a88c935bdee1b78b14" dependencies = [ "bytes 0.5.4", "futures-channel", @@ -2166,7 +2165,7 @@ dependencies = [ "bytes 0.5.4", "ct-logs", "futures-util", - "hyper 0.13.4", + "hyper 0.13.5", "log", "rustls", "rustls-native-certs", @@ -3431,9 +3430,9 @@ dependencies = [ "sc-basic-authorship", "sc-chain-spec", "sc-cli", - "sc-client", "sc-client-api", "sc-client-db", + "sc-consensus", "sc-consensus-babe", "sc-consensus-epochs", "sc-finality-grandpa", @@ -3538,7 +3537,7 @@ dependencies = [ "node-runtime", "pallet-contracts-rpc", "pallet-transaction-payment-rpc", - "sc-client", + "sc-client-api", "sc-consensus-babe", "sc-consensus-babe-rpc", "sc-consensus-epochs", @@ -3638,8 +3637,8 @@ dependencies = [ "node-template-runtime", "sc-basic-authorship", "sc-cli", - "sc-client", "sc-client-api", + "sc-consensus", "sc-consensus-aura", "sc-executor", "sc-finality-grandpa", @@ -3715,7 +3714,6 @@ dependencies = [ "parity-scale-codec", "sc-block-builder", "sc-cli", - "sc-client", "sc-client-api", "sc-client-db", "sc-executor", @@ -3744,7 +3742,6 @@ dependencies = [ "parity-scale-codec", "sc-block-builder", "sc-cli", - "sc-client", "sc-client-api", "sc-service", "sp-api", @@ -3850,9 +3847,9 @@ dependencies = [ [[package]] name = "num_cpus" -version = "1.12.0" +version = "1.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46203554f085ff89c235cd12f7075f3233af9b11ed7c9e16dfe2560d03313ce6" +checksum = "05499f3756671c15885fee9034446956fff3f243d6077b91e5767df161f766b3" dependencies = [ "hermit-abi", "libc", @@ -4854,7 +4851,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d3a704eb390aafdc107b0e392f56a82b668e3a71366993b5340f5833fd62505e" dependencies = [ "lock_api", - "parking_lot_core 0.7.1", + "parking_lot_core 0.7.2", ] [[package]] @@ -4874,9 +4871,9 @@ dependencies = [ [[package]] name = "parking_lot_core" -version = "0.7.1" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e136c1904604defe99ce5fd71a28d473fa60a12255d511aa78a9ddf11237aeb" +checksum = "d58c7c768d4ba344e3e8d72518ac13e259d7c7ade24167003b8488e10b6740a3" dependencies = [ "cfg-if", "cloudabi", @@ -5547,18 +5544,18 @@ dependencies = [ [[package]] name = "ref-cast" -version = "1.0.0" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "077f197a31bfe7e4169145f9eca08d32705c6c6126c139c26793acdf163ac3ef" +checksum = "0a214c7875e1b63fc1618db7c80efc0954f6156c9ff07699fd9039e255accdd1" dependencies = [ "ref-cast-impl", ] [[package]] name = "ref-cast-impl" -version = "1.0.0" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c36eb52b69b87c9e3a07387f476c88fd0dba9a1713b38e56617ed66b45392c1f" +checksum = "602eb59cda66fcb9aec25841fb76bc01d2b34282dcdd705028da297db6f3eec8" dependencies = [ "proc-macro2", "quote 1.0.3", @@ -5567,9 +5564,9 @@ dependencies = [ [[package]] name = "regex" -version = "1.3.6" +version = "1.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f6946991529684867e47d86474e3a6d0c0ab9b82d5821e314b1ede31fa3a4b3" +checksum = "a6020f034922e3194c711b82a627453881bc4682166cabb07134a10c26ba7692" dependencies = [ "aho-corasick", "memchr", @@ -5931,47 +5928,6 @@ dependencies = [ "tokio 0.2.18", ] -[[package]] -name = "sc-client" -version = "0.8.0-dev" -dependencies = [ - "derive_more", - "env_logger 0.7.1", - "fnv", - "futures 0.3.4", - "hash-db", - "hex-literal", - "kvdb", - "kvdb-memorydb", - "log", - "parity-scale-codec", - "parking_lot 0.10.2", - "rand 0.7.3", - "sc-block-builder", - "sc-client-api", - "sc-executor", - "sc-telemetry", - "sp-api", - "sp-blockchain", - "sp-consensus", - "sp-core", - "sp-database", - "sp-externalities", - "sp-inherents", - "sp-keyring", - "sp-panic-handler", - "sp-runtime", - "sp-state-machine", - "sp-std", - "sp-trie", - "sp-utils", - "sp-version", - "substrate-prometheus-endpoint", - "substrate-test-runtime-client", - "tempfile", - "tracing", -] - [[package]] name = "sc-client-api" version = "2.0.0-dev" @@ -5982,6 +5938,7 @@ dependencies = [ "hash-db", "hex-literal", "kvdb", + "kvdb-memorydb", "lazy_static", "log", "parity-scale-codec", @@ -5992,6 +5949,7 @@ dependencies = [ "sp-blockchain", "sp-consensus", "sp-core", + "sp-database", "sp-externalities", "sp-inherents", "sp-keyring", @@ -6005,6 +5963,7 @@ dependencies = [ "sp-utils", "sp-version", "substrate-prometheus-endpoint", + "substrate-test-runtime", ] [[package]] @@ -6024,7 +5983,6 @@ dependencies = [ "parity-util-mem", "parking_lot 0.10.2", "quickcheck", - "sc-client", "sc-client-api", "sc-executor", "sc-state-db", @@ -6041,6 +5999,16 @@ dependencies = [ "tempfile", ] +[[package]] +name = "sc-consensus" +version = "0.8.0-dev" +dependencies = [ + "sc-client-api", + "sp-blockchain", + "sp-consensus", + "sp-runtime", +] + [[package]] name = "sc-consensus-aura" version = "0.8.0-dev" @@ -6053,7 +6021,6 @@ dependencies = [ "parity-scale-codec", "parking_lot 0.10.2", "sc-block-builder", - "sc-client", "sc-client-api", "sc-consensus-slots", "sc-executor", @@ -6098,7 +6065,6 @@ dependencies = [ "pdqselect", "rand 0.7.3", "sc-block-builder", - "sc-client", "sc-client-api", "sc-consensus-epochs", "sc-consensus-slots", @@ -6180,7 +6146,6 @@ dependencies = [ "log", "parking_lot 0.10.2", "sc-basic-authorship", - "sc-client", "sc-client-api", "sc-transaction-pool", "serde", @@ -6351,8 +6316,8 @@ dependencies = [ "pin-project", "rand 0.7.3", "sc-block-builder", - "sc-client", "sc-client-api", + "sc-consensus", "sc-keystore", "sc-network", "sc-network-gossip", @@ -6442,7 +6407,6 @@ dependencies = [ "quickcheck", "rand 0.7.3", "sc-block-builder", - "sc-client", "sc-client-api", "sc-peerset", "serde", @@ -6499,9 +6463,10 @@ dependencies = [ "parking_lot 0.10.2", "rand 0.7.3", "sc-block-builder", - "sc-client", "sc-client-api", + "sc-consensus", "sc-network", + "sc-service", "sp-blockchain", "sp-consensus", "sp-consensus-babe", @@ -6522,7 +6487,7 @@ dependencies = [ "fnv", "futures 0.3.4", "futures-timer 3.0.2", - "hyper 0.13.4", + "hyper 0.13.5", "hyper-rustls", "log", "num_cpus", @@ -6572,7 +6537,6 @@ dependencies = [ "parity-scale-codec", "parking_lot 0.10.2", "sc-block-builder", - "sc-client", "sc-client-api", "sc-executor", "sc-keystore", @@ -6656,6 +6620,7 @@ dependencies = [ "futures 0.1.29", "futures 0.3.4", "futures-timer 3.0.2", + "hash-db", "lazy_static", "log", "netstat2", @@ -6665,8 +6630,9 @@ dependencies = [ "parking_lot 0.10.2", "pin-project", "procfs", + "rand 0.7.3", + "sc-block-builder", "sc-chain-spec", - "sc-client", "sc-client-api", "sc-client-db", "sc-executor", @@ -6684,16 +6650,21 @@ dependencies = [ "slog", "sp-api", "sp-application-crypto", + "sp-block-builder", "sp-blockchain", "sp-consensus", "sp-consensus-babe", "sp-core", + "sp-externalities", "sp-finality-grandpa", "sp-io", "sp-runtime", "sp-session", + "sp-state-machine", "sp-transaction-pool", + "sp-trie", "sp-utils", + "sp-version", "substrate-prometheus-endpoint", "substrate-test-runtime-client", "sysinfo", @@ -6709,14 +6680,29 @@ dependencies = [ "fdlimit", "futures 0.1.29", "futures 0.3.4", + "hex-literal", "log", - "sc-client", + "parity-scale-codec", + "parking_lot 0.10.2", + "sc-block-builder", + "sc-client-api", + "sc-client-db", + "sc-executor", "sc-network", "sc-service", + "sp-api", + "sp-blockchain", "sp-consensus", "sp-core", + "sp-externalities", + "sp-panic-handler", "sp-runtime", + "sp-state-machine", + "sp-storage", "sp-transaction-pool", + "sp-trie", + "substrate-test-runtime", + "substrate-test-runtime-client", "tempfile", "tokio 0.1.22", ] @@ -7913,9 +7899,9 @@ checksum = "8ea5119cdb4c55b55d432abb513a0429384878c15dde60cc77b1c99de1a95a6a" [[package]] name = "structopt" -version = "0.3.13" +version = "0.3.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff6da2e8d107dfd7b74df5ef4d205c6aebee0706c647f6bc6a2d5789905c00fb" +checksum = "863246aaf5ddd0d6928dfeb1a9ca65f505599e4e1b399935ef7e75107516b4ef" dependencies = [ "clap", "lazy_static", @@ -7924,9 +7910,9 @@ dependencies = [ [[package]] name = "structopt-derive" -version = "0.4.6" +version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a489c87c08fbaf12e386665109dd13470dcc9c4583ea3e10dd2b4523e5ebd9ac" +checksum = "d239ca4b13aee7a2142e6795cbd69e457665ff8037aed33b3effdc430d2f927a" dependencies = [ "heck", "proc-macro-error", @@ -8058,7 +8044,7 @@ dependencies = [ "jsonrpc-derive", "log", "parity-scale-codec", - "sc-client", + "sc-client-api", "sc-transaction-pool", "serde", "sp-api", @@ -8076,7 +8062,7 @@ dependencies = [ "async-std", "derive_more", "futures-util", - "hyper 0.13.4", + "hyper 0.13.5", "log", "prometheus", "tokio 0.2.18", @@ -8089,10 +8075,11 @@ dependencies = [ "futures 0.3.4", "hash-db", "parity-scale-codec", - "sc-client", "sc-client-api", "sc-client-db", + "sc-consensus", "sc-executor", + "sc-service", "sp-blockchain", "sp-consensus", "sp-core", @@ -8117,8 +8104,8 @@ dependencies = [ "parity-scale-codec", "parity-util-mem", "sc-block-builder", - "sc-client", "sc-executor", + "sc-service", "serde", "sp-api", "sp-application-crypto", @@ -8150,10 +8137,12 @@ dependencies = [ "futures 0.3.4", "parity-scale-codec", "sc-block-builder", - "sc-client", "sc-client-api", + "sc-consensus", + "sc-service", "sp-api", "sp-blockchain", + "sp-consensus", "sp-core", "sp-runtime", "substrate-test-client", @@ -8335,9 +8324,9 @@ dependencies = [ [[package]] name = "sysinfo" -version = "0.13.3" +version = "0.13.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a0338198966bde7feb14b011a33d404a62a6e03b843352c71512a2a002634b7" +checksum = "1cac193374347e7c263c5f547524f36ff8ec6702d56c8799c8331d26dffe8c1e" dependencies = [ "cfg-if", "doc-comment", @@ -8445,12 +8434,11 @@ dependencies = [ [[package]] name = "time" -version = "0.1.42" +version = "0.1.43" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db8dcfca086c1143c9270ac42a2bbd8a7ee477b78ac8e45b19abfb0cbede4b6f" +checksum = "ca8a50ef2360fbd1eeb0ecd46795a87a19024eb4b53c5dc916ca1fd95fe62438" dependencies = [ "libc", - "redox_syscall", "winapi 0.3.8", ] @@ -9333,18 +9321,18 @@ dependencies = [ [[package]] name = "wast" -version = "13.0.0" +version = "14.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b20abd8b4a26f7e0d4dd5e357e90a3d555ec190e94472c9b2b27c5b9777f9ae" +checksum = "47b11c94c63d5365a76ea287f8e6e5b6050233fae4b2423aea2a1e126a385e17" dependencies = [ "leb128", ] [[package]] name = "wat" -version = "1.0.14" +version = "1.0.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "51a615830ee3e7200b505c441fec09aac2f114deae69df52f215cb828ba112c4" +checksum = "03db18bc33cff3859c296efbefdcc00763a644539feeadca3415a1cee8a2835d" dependencies = [ "wast", ] @@ -9426,9 +9414,9 @@ checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" [[package]] name = "winapi-util" -version = "0.1.4" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa515c5163a99cc82bab70fd3bfdd36d827be85de63737b40fcef2ce084a436e" +checksum = "70ec6ce85bb158151cae5e5c87f95a8e97d2c0c4b001223f33a334e3ce5de178" dependencies = [ "winapi 0.3.8", ] diff --git a/Cargo.toml b/Cargo.toml index abb0cca39c49c..1493865af6499 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -15,7 +15,6 @@ members = [ "bin/node/transaction-factory", "bin/utils/subkey", "bin/utils/chain-spec-builder", - "client", "client/api", "client/authority-discovery", "client/basic-authorship", @@ -26,6 +25,7 @@ members = [ "client/consensus/aura", "client/consensus/babe", "client/consensus/babe/rpc", + "client/consensus/common", "client/consensus/manual-seal", "client/consensus/pow", "client/consensus/uncles", diff --git a/bin/node-template/node/Cargo.toml b/bin/node-template/node/Cargo.toml index c53710eab4db7..53b6da219f9a8 100644 --- a/bin/node-template/node/Cargo.toml +++ b/bin/node-template/node/Cargo.toml @@ -23,7 +23,7 @@ structopt = "0.3.8" sc-cli = { version = "0.8.0-dev", path = "../../../client/cli" } sp-core = { version = "2.0.0-dev", path = "../../../primitives/core" } sc-executor = { version = "0.8.0-dev", path = "../../../client/executor" } -sc-service = { version = "0.8.0-dev", path = "../../../client/service" } +sc-service = { version = "0.8.0-dev", default-features = false, path = "../../../client/service" } sp-inherents = { version = "2.0.0-dev", path = "../../../primitives/inherents" } sc-transaction-pool = { version = "2.0.0-dev", path = "../../../client/transaction-pool" } sp-transaction-pool = { version = "2.0.0-dev", path = "../../../primitives/transaction-pool" } @@ -31,9 +31,9 @@ sc-network = { version = "0.8.0-dev", path = "../../../client/network" } sc-consensus-aura = { version = "0.8.0-dev", path = "../../../client/consensus/aura" } sp-consensus-aura = { version = "0.8.0-dev", path = "../../../primitives/consensus/aura" } sp-consensus = { version = "0.8.0-dev", path = "../../../primitives/consensus/common" } +sc-consensus = { version = "0.8.0-dev", path = "../../../client/consensus/common" } sc-finality-grandpa = { version = "0.8.0-dev", path = "../../../client/finality-grandpa" } sp-finality-grandpa = { version = "2.0.0-dev", path = "../../../primitives/finality-grandpa" } -sc-client = { version = "0.8.0-dev", path = "../../../client/" } sc-client-api = { version = "2.0.0-dev", path = "../../../client/api" } sp-runtime = { version = "2.0.0-dev", path = "../../../primitives/runtime" } sc-basic-authorship = { path = "../../../client/basic-authorship", version = "0.8.0-dev"} diff --git a/bin/node-template/node/src/service.rs b/bin/node-template/node/src/service.rs index 62177608a2e73..16e5271cce943 100644 --- a/bin/node-template/node/src/service.rs +++ b/bin/node-template/node/src/service.rs @@ -2,8 +2,8 @@ use std::sync::Arc; use std::time::Duration; -use sc_client::LongestChain; use sc_client_api::ExecutorProvider; +use sc_consensus::LongestChain; use node_template_runtime::{self, opaque::Block, RuntimeApi}; use sc_service::{error::{Error as ServiceError}, AbstractService, Configuration, ServiceBuilder}; use sp_inherents::InherentDataProviders; @@ -35,7 +35,7 @@ macro_rules! new_full_start { node_template_runtime::opaque::Block, node_template_runtime::RuntimeApi, crate::service::Executor >($config)? .with_select_chain(|_config, backend| { - Ok(sc_client::LongestChain::new(backend.clone())) + Ok(sc_consensus::LongestChain::new(backend.clone())) })? .with_transaction_pool(|config, client, _fetcher, prometheus_registry| { let pool_api = sc_transaction_pool::FullChainApi::new(client.clone()); @@ -71,9 +71,7 @@ macro_rules! new_full_start { } /// Builds a new service for a full client. -pub fn new_full(config: Configuration) - -> Result -{ +pub fn new_full(config: Configuration) -> Result { let role = config.role.clone(); let force_authoring = config.force_authoring; let name = config.network.node_name.clone(); @@ -176,9 +174,7 @@ pub fn new_full(config: Configuration) } /// Builds a new service for a light client. -pub fn new_light(config: Configuration) - -> Result -{ +pub fn new_light(config: Configuration) -> Result { let inherent_data_providers = InherentDataProviders::new(); ServiceBuilder::new_light::(config)? diff --git a/bin/node/cli/Cargo.toml b/bin/node/cli/Cargo.toml index 1f22e85ab6e63..af71de7570a30 100644 --- a/bin/node/cli/Cargo.toml +++ b/bin/node/cli/Cargo.toml @@ -60,8 +60,8 @@ sp-transaction-pool = { version = "2.0.0-dev", path = "../../../primitives/trans # client dependencies sc-client-api = { version = "2.0.0-dev", path = "../../../client/api" } -sc-client = { version = "0.8.0-dev", path = "../../../client/" } sc-chain-spec = { version = "2.0.0-dev", path = "../../../client/chain-spec" } +sc-consensus = { version = "0.8.0-dev", path = "../../../client/consensus/common" } sc-transaction-pool = { version = "2.0.0-dev", path = "../../../client/transaction-pool" } sc-network = { version = "0.8.0-dev", path = "../../../client/network" } sc-consensus-babe = { version = "0.8.0-dev", path = "../../../client/consensus/babe" } @@ -111,6 +111,7 @@ sc-service = { version = "0.8.0-dev", default-features = false, path = "../../.. [dev-dependencies] sc-keystore = { version = "2.0.0-dev", path = "../../../client/keystore" } +sc-consensus = { version = "0.8.0-dev", path = "../../../client/consensus/common" } sc-consensus-babe = { version = "0.8.0-dev", features = ["test-helpers"], path = "../../../client/consensus/babe" } sc-consensus-epochs = { version = "0.8.0-dev", path = "../../../client/consensus/epochs" } sc-service-test = { version = "2.0.0-dev", path = "../../../client/service/test" } diff --git a/bin/node/cli/src/chain_spec.rs b/bin/node/cli/src/chain_spec.rs index 461474038b8b8..ea3999fa37718 100644 --- a/bin/node/cli/src/chain_spec.rs +++ b/bin/node/cli/src/chain_spec.rs @@ -51,9 +51,9 @@ const STAGING_TELEMETRY_URL: &str = "wss://telemetry.polkadot.io/submit/"; #[serde(rename_all = "camelCase")] pub struct Extensions { /// Block numbers with known hashes. - pub fork_blocks: sc_client::ForkBlocks, + pub fork_blocks: sc_client_api::ForkBlocks, /// Known bad block hashes. - pub bad_blocks: sc_client::BadBlocks, + pub bad_blocks: sc_client_api::BadBlocks, } /// Specialized `ChainSpec`. diff --git a/bin/node/cli/src/service.rs b/bin/node/cli/src/service.rs index 1ccc6e5ec4850..c8b0e50c4ff27 100644 --- a/bin/node/cli/src/service.rs +++ b/bin/node/cli/src/service.rs @@ -21,7 +21,6 @@ use std::sync::Arc; use sc_consensus_babe; -use sc_client::{self, LongestChain}; use grandpa::{self, FinalityProofProvider as GrandpaFinalityProofProvider, StorageAndProofProvider}; use node_executor; use node_primitives::Block; @@ -30,14 +29,7 @@ use sc_service::{ AbstractService, ServiceBuilder, config::Configuration, error::{Error as ServiceError}, }; use sp_inherents::InherentDataProviders; - -use sc_service::{Service, NetworkStatus}; -use sc_client::{Client, LocalCallExecutor}; -use sc_client_db::Backend; -use sp_runtime::traits::Block as BlockT; -use node_executor::NativeExecutor; -use sc_network::NetworkService; -use sc_offchain::OffchainWorkers; +use sc_consensus::LongestChain; /// Starts a `ServiceBuilder` for a full service. /// @@ -54,7 +46,7 @@ macro_rules! new_full_start { node_primitives::Block, node_runtime::RuntimeApi, node_executor::Executor >($config)? .with_select_chain(|_config, backend| { - Ok(sc_client::LongestChain::new(backend.clone())) + Ok(sc_consensus::LongestChain::new(backend.clone())) })? .with_transaction_pool(|config, client, _fetcher, prometheus_registry| { let pool_api = sc_transaction_pool::FullChainApi::new(client.clone()); @@ -266,38 +258,9 @@ macro_rules! new_full { }} } -type ConcreteBlock = node_primitives::Block; -type ConcreteClient = - Client< - Backend, - LocalCallExecutor, NativeExecutor>, - ConcreteBlock, - node_runtime::RuntimeApi - >; -type ConcreteBackend = Backend; -type ConcreteTransactionPool = sc_transaction_pool::BasicPool< - sc_transaction_pool::FullChainApi, - ConcreteBlock ->; - /// Builds a new service for a full client. pub fn new_full(config: Configuration) --> Result< - Service< - ConcreteBlock, - ConcreteClient, - LongestChain, - NetworkStatus, - NetworkService::Hash>, - ConcreteTransactionPool, - OffchainWorkers< - ConcreteClient, - >::OffchainStorage, - ConcreteBlock, - > - >, - ServiceError, -> +-> Result { new_full!(config).map(|(service, _)| service) } @@ -416,7 +379,7 @@ mod tests { use sp_core::ed25519::Pair; use {service_test, Factory}; - use sc_client::{BlockImportParams, BlockOrigin}; + use sp_consensus::{BlockImportParams, BlockOrigin}; let alice: Arc = Arc::new(Keyring::Alice.into()); let bob: Arc = Arc::new(Keyring::Bob.into()); diff --git a/bin/node/rpc/Cargo.toml b/bin/node/rpc/Cargo.toml index d6db49ebf01a6..76d9998831cc5 100644 --- a/bin/node/rpc/Cargo.toml +++ b/bin/node/rpc/Cargo.toml @@ -11,7 +11,7 @@ repository = "https://github.com/paritytech/substrate/" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sc-client = { version = "0.8.0-dev", path = "../../../client/" } +sc-client-api = { version = "2.0.0-dev", path = "../../../client/api" } jsonrpc-core = "14.0.3" node-primitives = { version = "2.0.0-dev", path = "../primitives" } node-runtime = { version = "2.0.0-dev", path = "../runtime" } diff --git a/bin/node/rpc/src/lib.rs b/bin/node/rpc/src/lib.rs index 4e1cfa56733a7..297dc129aeada 100644 --- a/bin/node/rpc/src/lib.rs +++ b/bin/node/rpc/src/lib.rs @@ -50,7 +50,7 @@ pub struct LightDeps { /// Transaction pool instance. pub pool: Arc

, /// Remote access to the blockchain (async). - pub remote_blockchain: Arc>, + pub remote_blockchain: Arc>, /// Fetcher instance. pub fetcher: Arc, } @@ -135,9 +135,9 @@ pub fn create_full( pub fn create_light( deps: LightDeps, ) -> jsonrpc_core::IoHandler where - C: sc_client::blockchain::HeaderBackend, + C: sp_blockchain::HeaderBackend, C: Send + Sync + 'static, - F: sc_client::light::fetcher::Fetcher + 'static, + F: sc_client_api::light::Fetcher + 'static, P: TransactionPool + 'static, M: jsonrpc_core::Metadata + Default, { diff --git a/bin/node/testing/Cargo.toml b/bin/node/testing/Cargo.toml index 3d8ab42621a1d..af375c774de42 100644 --- a/bin/node/testing/Cargo.toml +++ b/bin/node/testing/Cargo.toml @@ -14,7 +14,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] pallet-balances = { version = "2.0.0-dev", path = "../../../frame/balances" } -sc-client = { version = "0.8.0-dev", path = "../../../client/" } +sc-service = { version = "0.8.0-dev", features = ["test-helpers", "db"], path = "../../../client/service" } sc-client-db = { version = "0.8.0-dev", path = "../../../client/db/", features = ["kvdb-rocksdb", "parity-db"] } sc-client-api = { version = "2.0.0-dev", path = "../../../client/api/" } codec = { package = "parity-scale-codec", version = "1.3.0" } @@ -55,4 +55,3 @@ futures = "0.3.1" [dev-dependencies] criterion = "0.3.0" sc-cli = { version = "0.8.0-dev", path = "../../../client/cli" } -sc-service = { version = "0.8.0-dev", path = "../../../client/service", features = ["db"] } diff --git a/bin/node/testing/src/bench.rs b/bin/node/testing/src/bench.rs index 91d0cfb55a0e8..d05bb9a0950ea 100644 --- a/bin/node/testing/src/bench.rs +++ b/bin/node/testing/src/bench.rs @@ -221,7 +221,7 @@ impl BenchDb { }, }; - let (client, backend) = sc_client_db::new_client( + let (client, backend) = sc_service::new_client( db_config, NativeExecutor::new(WasmExecutionMethod::Compiled, None, 8), &keyring.generate_genesis(), diff --git a/bin/node/testing/src/client.rs b/bin/node/testing/src/client.rs index 963bac7041b7d..69583e37dc90f 100644 --- a/bin/node/testing/src/client.rs +++ b/bin/node/testing/src/client.rs @@ -17,7 +17,7 @@ //! Utilities to build a `TestClient` for `node-runtime`. use sp_runtime::BuildStorage; - +use sc_service::client; /// Re-export test-client utilities. pub use substrate_test_client::*; @@ -28,9 +28,9 @@ pub type Executor = sc_executor::NativeExecutor; pub type Backend = sc_client_db::Backend; /// Test client type. -pub type Client = sc_client::Client< +pub type Client = client::Client< Backend, - sc_client::LocalCallExecutor, + client::LocalCallExecutor, node_primitives::Block, node_runtime::RuntimeApi, >; @@ -61,7 +61,7 @@ pub trait TestClientBuilderExt: Sized { impl TestClientBuilderExt for substrate_test_client::TestClientBuilder< node_primitives::Block, - sc_client::LocalCallExecutor, + client::LocalCallExecutor, Backend, GenesisParameters, > { diff --git a/bin/node/transaction-factory/Cargo.toml b/bin/node/transaction-factory/Cargo.toml index 273d1163ac9de..5aa803a7e18ee 100644 --- a/bin/node/transaction-factory/Cargo.toml +++ b/bin/node/transaction-factory/Cargo.toml @@ -15,7 +15,6 @@ sp-block-builder = { version = "2.0.0-dev", path = "../../../primitives/block-bu sc-cli = { version = "0.8.0-dev", path = "../../../client/cli" } sc-client-api = { version = "2.0.0-dev", path = "../../../client/api" } sc-block-builder = { version = "0.8.0-dev", path = "../../../client/block-builder" } -sc-client = { version = "0.8.0-dev", path = "../../../client" } codec = { package = "parity-scale-codec", version = "1.3.0", features = ["derive"] } sp-consensus = { version = "0.8.0-dev", path = "../../../primitives/consensus/common" } log = "0.4.8" diff --git a/client/Cargo.toml b/client/Cargo.toml deleted file mode 100644 index a28418b9a2813..0000000000000 --- a/client/Cargo.toml +++ /dev/null @@ -1,51 +0,0 @@ -[package] -name = "sc-client" -version = "0.8.0-dev" -authors = ["Parity Technologies "] -edition = "2018" -license = "GPL-3.0" -homepage = "https://substrate.dev" -repository = "https://github.com/paritytech/substrate/" -description = "Substrate Client and associated logic." - -[package.metadata.docs.rs] -targets = ["x86_64-unknown-linux-gnu"] - -[dependencies] -sc-block-builder = { version = "0.8.0-dev", path = "block-builder" } -sc-client-api = { version = "2.0.0-dev", path = "api" } -codec = { package = "parity-scale-codec", version = "1.3.0", features = ["derive"] } -sp-consensus = { version = "0.8.0-dev", path = "../primitives/consensus/common" } -derive_more = { version = "0.99.2" } -sc-executor = { version = "0.8.0-dev", path = "executor" } -sp-externalities = { version = "0.8.0-dev", path = "../primitives/externalities" } -fnv = { version = "1.0.6" } -futures = { version = "0.3.1", features = ["compat"] } -hash-db = { version = "0.15.2" } -hex-literal = { version = "0.2.1" } -sp-inherents = { version = "2.0.0-dev", path = "../primitives/inherents" } -sp-keyring = { version = "2.0.0-dev", path = "../primitives/keyring" } -kvdb = "0.5.0" -log = { version = "0.4.8" } -parking_lot = "0.10.0" -rand = "0.7.3" -sp-core = { version = "2.0.0-dev", path = "../primitives/core" } -sp-std = { version = "2.0.0-dev", path = "../primitives/std" } -sp-version = { version = "2.0.0-dev", path = "../primitives/version" } -sp-api = { version = "2.0.0-dev", path = "../primitives/api" } -sp-runtime = { version = "2.0.0-dev", path = "../primitives/runtime" } -sp-utils = { version = "2.0.0-dev", path = "../primitives/utils" } -sp-blockchain = { version = "2.0.0-dev", path = "../primitives/blockchain" } -sp-state-machine = { version = "0.8.0-dev", path = "../primitives/state-machine" } -sc-telemetry = { version = "2.0.0-dev", path = "telemetry" } -sp-trie = { version = "2.0.0-dev", path = "../primitives/trie" } -sp-database = { version = "2.0.0-dev", path = "../primitives/database" } -prometheus-endpoint = { package = "substrate-prometheus-endpoint", version = "0.8.0-dev", path = "../utils/prometheus" } -tracing = "0.1.10" - -[dev-dependencies] -env_logger = "0.7.0" -tempfile = "3.1.0" -substrate-test-runtime-client = { version = "2.0.0-dev", path = "../test-utils/runtime/client" } -kvdb-memorydb = "0.5.0" -sp-panic-handler = { version = "2.0.0-dev", path = "../primitives/panic-handler" } diff --git a/client/api/Cargo.toml b/client/api/Cargo.toml index c745dc3bee747..3e9cbc1a9db2b 100644 --- a/client/api/Cargo.toml +++ b/client/api/Cargo.toml @@ -9,10 +9,6 @@ repository = "https://github.com/paritytech/substrate/" description = "Substrate client interfaces." documentation = "https://docs.rs/sc-client-api" -[package.metadata.docs.rs] -targets = ["x86_64-unknown-linux-gnu"] - - [dependencies] codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false, features = ["derive"] } sp-consensus = { version = "0.8.0-dev", path = "../../primitives/consensus/common" } @@ -30,6 +26,7 @@ kvdb = "0.5.0" log = { version = "0.4.8" } parking_lot = "0.10.0" lazy_static = "1.4.0" +sp-database = { version = "2.0.0-dev", path = "../../primitives/database" } sp-core = { version = "2.0.0-dev", default-features = false, path = "../../primitives/core" } sp-std = { version = "2.0.0-dev", default-features = false, path = "../../primitives/std" } sp-version = { version = "2.0.0-dev", default-features = false, path = "../../primitives/version" } @@ -44,4 +41,9 @@ sp-transaction-pool = { version = "2.0.0-dev", path = "../../primitives/transact prometheus-endpoint = { package = "substrate-prometheus-endpoint", version = "0.8.0-dev", path = "../../utils/prometheus" } [dev-dependencies] +kvdb-memorydb = "0.5.0" sp-test-primitives = { version = "2.0.0-dev", path = "../../primitives/test-primitives" } +substrate-test-runtime = { version = "2.0.0-dev", path = "../../test-utils/runtime" } + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/client/api/src/backend.rs b/client/api/src/backend.rs index ad697a35932ab..8aaeb94483390 100644 --- a/client/api/src/backend.rs +++ b/client/api/src/backend.rs @@ -79,6 +79,25 @@ pub struct ClientImportOperation> { pub notify_finalized: Vec, } +/// Helper function to apply auxiliary data insertion into an operation. +pub fn apply_aux<'a, 'b: 'a, 'c: 'a, B, Block, D, I>( + operation: &mut ClientImportOperation, + insert: I, + delete: D, +) -> sp_blockchain::Result<()> + where + Block: BlockT, + B: Backend, + I: IntoIterator, + D: IntoIterator, +{ + operation.op.insert_aux( + insert.into_iter() + .map(|(k, v)| (k.to_vec(), Some(v.to_vec()))) + .chain(delete.into_iter().map(|k| (k.to_vec(), None))) + ) +} + /// State of a new block. #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum NewBlockState { diff --git a/client/src/cht.rs b/client/api/src/cht.rs similarity index 99% rename from client/src/cht.rs rename to client/api/src/cht.rs index de67280632302..3eba63e7026a3 100644 --- a/client/src/cht.rs +++ b/client/api/src/cht.rs @@ -331,9 +331,10 @@ pub fn decode_cht_value(value: &[u8]) -> Option { #[cfg(test)] mod tests { - use substrate_test_runtime_client::runtime::Header; - use sp_runtime::traits::BlakeTwo256; use super::*; + use sp_runtime::{generic, traits::BlakeTwo256}; + + type Header = generic::Header; #[test] fn is_build_required_works() { diff --git a/client/src/in_mem.rs b/client/api/src/in_mem.rs similarity index 92% rename from client/src/in_mem.rs rename to client/api/src/in_mem.rs index a63ea91e26e97..d5b4800f4e421 100644 --- a/client/src/in_mem.rs +++ b/client/api/src/in_mem.rs @@ -32,14 +32,15 @@ use sp_state_machine::{ }; use sp_blockchain::{CachedHeaderMetadata, HeaderMetadata}; -use sc_client_api::{ +use crate::{ backend::{self, NewBlockState}, blockchain::{ self, BlockStatus, HeaderBackend, well_known_cache_keys::Id as CacheKeyId }, UsageInfo, + light, + leaves::LeafSet, }; -use crate::leaves::LeafSet; struct PendingBlock { block: StoredBlock, @@ -400,7 +401,7 @@ impl backend::AuxStore for Blockchain { } } -impl sc_client_api::light::Storage for Blockchain +impl light::Storage for Blockchain where Block::Hash: From<[u8; 32]>, { @@ -454,7 +455,7 @@ impl sc_client_api::light::Storage for Blockchain None } - fn usage_info(&self) -> Option { + fn usage_info(&self) -> Option { None } } @@ -719,7 +720,7 @@ impl backend::RemoteBackend for Backend where Block .unwrap_or(false) } - fn remote_blockchain(&self) -> Arc> { + fn remote_blockchain(&self) -> Arc> { unimplemented!() } } @@ -737,46 +738,3 @@ pub fn check_genesis_storage(storage: &Storage) -> sp_blockchain::Result<()> { Ok(()) } - -#[cfg(test)] -mod tests { - use sp_core::offchain::{OffchainStorage, storage::InMemOffchainStorage}; - use std::sync::Arc; - - type TestBackend = substrate_test_runtime_client::sc_client::in_mem::Backend; - - #[test] - fn test_leaves_with_complex_block_tree() { - let backend = Arc::new(TestBackend::new()); - - substrate_test_runtime_client::trait_tests::test_leaves_for_backend(backend); - } - - #[test] - fn test_blockchain_query_by_number_gets_canonical() { - let backend = Arc::new(TestBackend::new()); - - substrate_test_runtime_client::trait_tests::test_blockchain_query_by_number_gets_canonical(backend); - } - - #[test] - fn in_memory_offchain_storage() { - - let mut storage = InMemOffchainStorage::default(); - assert_eq!(storage.get(b"A", b"B"), None); - assert_eq!(storage.get(b"B", b"A"), None); - - storage.set(b"A", b"B", b"C"); - assert_eq!(storage.get(b"A", b"B"), Some(b"C".to_vec())); - assert_eq!(storage.get(b"B", b"A"), None); - - storage.compare_and_set(b"A", b"B", Some(b"X"), b"D"); - assert_eq!(storage.get(b"A", b"B"), Some(b"C".to_vec())); - storage.compare_and_set(b"A", b"B", Some(b"C"), b"D"); - assert_eq!(storage.get(b"A", b"B"), Some(b"D".to_vec())); - - assert!(!storage.compare_and_set(b"B", b"A", Some(b""), b"Y")); - assert!(storage.compare_and_set(b"B", b"A", None, b"X")); - assert_eq!(storage.get(b"B", b"A"), Some(b"X".to_vec())); - } -} diff --git a/client/src/leaves.rs b/client/api/src/leaves.rs similarity index 100% rename from client/src/leaves.rs rename to client/api/src/leaves.rs diff --git a/client/api/src/lib.rs b/client/api/src/lib.rs index e4080323c188e..bad61f7687a63 100644 --- a/client/api/src/lib.rs +++ b/client/api/src/lib.rs @@ -20,8 +20,11 @@ pub mod backend; pub mod call_executor; pub mod client; +pub mod cht; pub mod execution_extensions; +pub mod in_mem; pub mod light; +pub mod leaves; pub mod notifications; pub mod proof_provider; @@ -36,6 +39,13 @@ pub use proof_provider::*; pub use sp_state_machine::{StorageProof, ExecutionStrategy, CloneableSpawn}; +/// Usage Information Provider interface +/// +pub trait UsageProvider { + /// Get usage info about current client. + fn usage_info(&self) -> ClientInfo; +} + /// Utility methods for the client. pub mod utils { use sp_blockchain::{HeaderBackend, HeaderMetadata, Error}; diff --git a/client/api/src/light.rs b/client/api/src/light.rs index 30e6d14d557f1..b359c1149eea6 100644 --- a/client/api/src/light.rs +++ b/client/api/src/light.rs @@ -296,7 +296,25 @@ pub trait RemoteBlockchain: Send + Sync { >>; } - +/// Returns future that resolves header either locally, or remotely. +pub fn future_header>( + blockchain: &dyn RemoteBlockchain, + fetcher: &F, + id: BlockId, +) -> impl Future, ClientError>> { + use futures::future::{ready, Either, FutureExt}; + + match blockchain.header(id) { + Ok(LocalOrRemote::Remote(request)) => Either::Left( + fetcher + .remote_header(request) + .then(|header| ready(header.map(Some))) + ), + Ok(LocalOrRemote::Unknown) => Either::Right(ready(Ok(None))), + Ok(LocalOrRemote::Local(local_header)) => Either::Right(ready(Ok(Some(local_header)))), + Err(err) => Either::Right(ready(Err(err))), + } +} #[cfg(test)] pub mod tests { diff --git a/client/cli/src/commands/mod.rs b/client/cli/src/commands/mod.rs index 143ff491aef95..1e470756e600e 100644 --- a/client/cli/src/commands/mod.rs +++ b/client/cli/src/commands/mod.rs @@ -267,7 +267,7 @@ macro_rules! substrate_cli_subcommands { } fn execution_strategies(&self, is_dev: bool) - -> $crate::Result<::sc_service::config::ExecutionStrategies> { + -> $crate::Result<::sc_client_api::execution_extensions::ExecutionStrategies> { match self { $($enum::$variant(cmd) => cmd.execution_strategies(is_dev)),* } diff --git a/client/cli/src/config.rs b/client/cli/src/config.rs index 1d07355f0ac93..4f9ccec6f6c61 100644 --- a/client/cli/src/config.rs +++ b/client/cli/src/config.rs @@ -25,10 +25,11 @@ use crate::arg_enums::Database; use app_dirs::{AppDataType, AppInfo}; use names::{Generator, Name}; use sc_service::config::{ - Configuration, DatabaseConfig, ExecutionStrategies, ExtTransport, KeystoreConfig, - NetworkConfiguration, NodeKeyConfig, OffchainWorkerConfig, PrometheusConfig, PruningMode, - Role, TelemetryEndpoints, TransactionPoolOptions, WasmExecutionMethod, + WasmExecutionMethod, Role, OffchainWorkerConfig, + Configuration, DatabaseConfig, ExtTransport, KeystoreConfig, NetworkConfiguration, + NodeKeyConfig, PrometheusConfig, PruningMode, TelemetryEndpoints, TransactionPoolOptions, }; +use sc_client_api::execution_extensions::ExecutionStrategies; use sc_service::{ChainSpec, TracingReceiver}; use std::future::Future; use std::net::SocketAddr; diff --git a/client/consensus/aura/Cargo.toml b/client/consensus/aura/Cargo.toml index 40f505f85918a..82196feac0396 100644 --- a/client/consensus/aura/Cargo.toml +++ b/client/consensus/aura/Cargo.toml @@ -16,7 +16,6 @@ sp-application-crypto = { version = "2.0.0-dev", path = "../../../primitives/app sp-consensus-aura = { version = "0.8.0-dev", path = "../../../primitives/consensus/aura" } sp-block-builder = { version = "2.0.0-dev", path = "../../../primitives/block-builder" } sc-block-builder = { version = "0.8.0-dev", path = "../../../client/block-builder" } -sc-client = { version = "0.8.0-dev", path = "../../" } sc-client-api = { version = "2.0.0-dev", path = "../../api" } codec = { package = "parity-scale-codec", version = "1.3.0" } sp-consensus = { version = "0.8.0-dev", path = "../../../primitives/consensus/common" } @@ -42,7 +41,7 @@ sp-keyring = { version = "2.0.0-dev", path = "../../../primitives/keyring" } sc-executor = { version = "0.8.0-dev", path = "../../executor" } sc-network = { version = "0.8.0-dev", path = "../../network" } sc-network-test = { version = "0.8.0-dev", path = "../../network/test" } -sc-service = { version = "0.8.0-dev", path = "../../service" } +sc-service = { version = "0.8.0-dev", default-features = false, path = "../../service" } substrate-test-runtime-client = { version = "2.0.0-dev", path = "../../../test-utils/runtime/client" } env_logger = "0.7.0" tempfile = "3.1.0" diff --git a/client/consensus/aura/src/lib.rs b/client/consensus/aura/src/lib.rs index 038e9e458cf24..daa181abba5a5 100644 --- a/client/consensus/aura/src/lib.rs +++ b/client/consensus/aura/src/lib.rs @@ -46,8 +46,7 @@ use sp_consensus::{ use sp_consensus::import_queue::{ Verifier, BasicQueue, BoxJustificationImport, BoxFinalityProofImport, }; -use sc_client_api::backend::AuxStore; -use sc_client::BlockOf; +use sc_client_api::{backend::AuxStore, BlockOf}; use sp_blockchain::{ self, Result as CResult, well_known_cache_keys::{self, Id as CacheKeyId}, ProvideCache, HeaderBackend, @@ -831,14 +830,14 @@ mod tests { use sc_network::config::ProtocolConfig; use parking_lot::Mutex; use sp_keyring::sr25519::Keyring; - use sc_client::BlockchainEvents; + use sc_client_api::BlockchainEvents; use sp_consensus_aura::sr25519::AuthorityPair; use std::task::Poll; use sc_block_builder::BlockBuilderProvider; type Error = sp_blockchain::Error; - type TestClient = sc_client::Client< + type TestClient = substrate_test_runtime_client::client::Client< substrate_test_runtime_client::Backend, substrate_test_runtime_client::Executor, TestBlock, diff --git a/client/consensus/babe/Cargo.toml b/client/consensus/babe/Cargo.toml index 7ac3330b6ac51..ddae8f84b7e3a 100644 --- a/client/consensus/babe/Cargo.toml +++ b/client/consensus/babe/Cargo.toml @@ -28,7 +28,6 @@ sp-timestamp = { version = "2.0.0-dev", path = "../../../primitives/timestamp" } sc-telemetry = { version = "2.0.0-dev", path = "../../telemetry" } sc-keystore = { version = "2.0.0-dev", path = "../../keystore" } sc-client-api = { version = "2.0.0-dev", path = "../../api" } -sc-client = { version = "0.8.0-dev", path = "../../" } sc-consensus-epochs = { version = "0.8.0-dev", path = "../epochs" } sp-api = { version = "2.0.0-dev", path = "../../../primitives/api" } sp-block-builder = { version = "2.0.0-dev", path = "../../../primitives/block-builder" } @@ -54,7 +53,7 @@ sp-keyring = { version = "2.0.0-dev", path = "../../../primitives/keyring" } sc-executor = { version = "0.8.0-dev", path = "../../executor" } sc-network = { version = "0.8.0-dev", path = "../../network" } sc-network-test = { version = "0.8.0-dev", path = "../../network/test" } -sc-service = { version = "0.8.0-dev", path = "../../service" } +sc-service = { version = "0.8.0-dev", default-features = false, path = "../../service" } substrate-test-runtime-client = { version = "2.0.0-dev", path = "../../../test-utils/runtime/client" } sc-block-builder = { version = "0.8.0-dev", path = "../../block-builder" } env_logger = "0.7.0" diff --git a/client/consensus/babe/src/tests.rs b/client/consensus/babe/src/tests.rs index 5e20c8b5e99f6..89514906be413 100644 --- a/client/consensus/babe/src/tests.rs +++ b/client/consensus/babe/src/tests.rs @@ -40,7 +40,7 @@ type Item = DigestItem; type Error = sp_blockchain::Error; -type TestClient = sc_client::Client< +type TestClient = substrate_test_runtime_client::client::Client< substrate_test_runtime_client::Backend, substrate_test_runtime_client::Executor, TestBlock, diff --git a/client/consensus/common/Cargo.toml b/client/consensus/common/Cargo.toml new file mode 100644 index 0000000000000..e8854faa078c0 --- /dev/null +++ b/client/consensus/common/Cargo.toml @@ -0,0 +1,17 @@ +[package] +name = "sc-consensus" +version = "0.8.0-dev" +authors = ["Parity Technologies "] +edition = "2018" +license = "GPL-3.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +sc-client-api = { version = "2.0.0-dev", path = "../../api" } +sp-blockchain = { version = "2.0.0-dev", path = "../../../primitives/blockchain" } +sp-runtime = { version = "2.0.0-dev", path = "../../../primitives/runtime" } +sp-consensus = { version = "0.8.0-dev", path = "../../../primitives/consensus/common" } diff --git a/client/consensus/common/src/lib.rs b/client/consensus/common/src/lib.rs new file mode 100644 index 0000000000000..9bfe7e56d4f34 --- /dev/null +++ b/client/consensus/common/src/lib.rs @@ -0,0 +1,19 @@ +// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . +//! Collection of consensus specific imlementations +mod longest_chain; + +pub use longest_chain::LongestChain; \ No newline at end of file diff --git a/client/consensus/common/src/longest_chain.rs b/client/consensus/common/src/longest_chain.rs new file mode 100644 index 0000000000000..981dbad0f6070 --- /dev/null +++ b/client/consensus/common/src/longest_chain.rs @@ -0,0 +1,101 @@ +// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . +//! Longest chain implementation + +use std::sync::Arc; +use std::marker::PhantomData; +use sc_client_api::backend; +use sp_consensus::{SelectChain, Error as ConsensusError}; +use sp_blockchain::{Backend, HeaderBackend}; +use sp_runtime::{ + traits::{NumberFor, Block as BlockT}, + generic::BlockId, +}; + +/// Implement Longest Chain Select implementation +/// where 'longest' is defined as the highest number of blocks +pub struct LongestChain { + backend: Arc, + _phantom: PhantomData +} + +impl Clone for LongestChain { + fn clone(&self) -> Self { + let backend = self.backend.clone(); + LongestChain { + backend, + _phantom: Default::default() + } + } +} + +impl LongestChain + where + B: backend::Backend, + Block: BlockT, +{ + /// Instantiate a new LongestChain for Backend B + pub fn new(backend: Arc) -> Self { + LongestChain { + backend, + _phantom: Default::default() + } + } + + fn best_block_header(&self) -> sp_blockchain::Result<::Header> { + let info = self.backend.blockchain().info(); + let import_lock = self.backend.get_import_lock(); + let best_hash = self.backend + .blockchain() + .best_containing(info.best_hash, None, import_lock)? + .unwrap_or(info.best_hash); + + Ok(self.backend.blockchain().header(BlockId::Hash(best_hash))? + .expect("given block hash was fetched from block in db; qed")) + } + + fn leaves(&self) -> Result::Hash>, sp_blockchain::Error> { + self.backend.blockchain().leaves() + } +} + +impl SelectChain for LongestChain + where + B: backend::Backend, + Block: BlockT, +{ + + fn leaves(&self) -> Result::Hash>, ConsensusError> { + LongestChain::leaves(self) + .map_err(|e| ConsensusError::ChainLookup(e.to_string()).into()) + } + + fn best_chain(&self) -> Result<::Header, ConsensusError> + { + LongestChain::best_block_header(&self) + .map_err(|e| ConsensusError::ChainLookup(e.to_string()).into()) + } + + fn finality_target( + &self, + target_hash: Block::Hash, + maybe_max_number: Option> + ) -> Result, ConsensusError> { + let import_lock = self.backend.get_import_lock(); + self.backend.blockchain().best_containing(target_hash, maybe_max_number, import_lock) + .map_err(|e| ConsensusError::ChainLookup(e.to_string()).into()) + } +} \ No newline at end of file diff --git a/client/consensus/manual-seal/Cargo.toml b/client/consensus/manual-seal/Cargo.toml index 8d67ef30ad487..807c370edf332 100644 --- a/client/consensus/manual-seal/Cargo.toml +++ b/client/consensus/manual-seal/Cargo.toml @@ -22,7 +22,6 @@ parking_lot = "0.10.0" serde = { version = "1.0", features=["derive"] } assert_matches = "1.3.0" -sc-client = { path = "../../../client" , version = "0.8.0-dev"} sc-client-api = { path = "../../../client/api" , version = "2.0.0-dev"} sc-transaction-pool = { path = "../../transaction-pool" , version = "2.0.0-dev"} sp-blockchain = { path = "../../../primitives/blockchain" , version = "2.0.0-dev"} diff --git a/client/db/Cargo.toml b/client/db/Cargo.toml index f636b03f30fe8..2df1836850cd7 100644 --- a/client/db/Cargo.toml +++ b/client/db/Cargo.toml @@ -26,7 +26,6 @@ blake2-rfc = "0.2.18" sc-client-api = { version = "2.0.0-dev", path = "../api" } sp-core = { version = "2.0.0-dev", path = "../../primitives/core" } sp-runtime = { version = "2.0.0-dev", path = "../../primitives/runtime" } -sc-client = { version = "0.8.0-dev", path = "../" } sp-state-machine = { version = "0.8.0-dev", path = "../../primitives/state-machine" } sc-executor = { version = "0.8.0-dev", path = "../executor" } sc-state-db = { version = "0.8.0-dev", path = "../state-db" } diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index f3e2b0ea1f0de..b9ca63b11715e 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -52,9 +52,9 @@ use std::collections::HashMap; use sc_client_api::{ - ForkBlocks, UsageInfo, MemoryInfo, BadBlocks, IoInfo, MemorySize, CloneableSpawn, - execution_extensions::ExecutionExtensions, + UsageInfo, MemoryInfo, IoInfo, MemorySize, backend::{NewBlockState, PrunableStateChangesTrieStorage}, + leaves::{LeafSet, FinalizationDisplaced}, }; use sp_blockchain::{ Result as ClientResult, Error as ClientError, @@ -65,17 +65,13 @@ use hash_db::Prefix; use sp_trie::{MemoryDB, PrefixedMemoryDB, prefixed_key}; use sp_database::Transaction; use parking_lot::RwLock; -use sp_core::{ChangesTrieConfiguration, traits::CodeExecutor}; -use sp_core::offchain::storage::{OffchainOverlayedChange,OffchainOverlayedChanges}; +use sp_core::ChangesTrieConfiguration; +use sp_core::offchain::storage::{OffchainOverlayedChange, OffchainOverlayedChanges}; use sp_core::storage::{well_known_keys, ChildInfo}; -use sp_runtime::{ - generic::BlockId, Justification, Storage, - BuildStorage, -}; +use sp_runtime::{generic::BlockId, Justification, Storage}; use sp_runtime::traits::{ Block as BlockT, Header as HeaderT, NumberFor, Zero, One, SaturatedConversion, HashFor, }; -use sc_executor::RuntimeInfo; use sp_state_machine::{ DBValue, ChangesTrieTransaction, ChangesTrieCacheAction, UsageInfo as StateUsageInfo, StorageCollection, ChildStorageCollection, @@ -83,13 +79,11 @@ use sp_state_machine::{ }; use crate::utils::{DatabaseType, Meta, meta_keys, read_db, read_meta}; use crate::changes_tries_storage::{DbChangesTrieStorage, DbChangesTrieStorageTransaction}; -use sc_client::leaves::{LeafSet, FinalizationDisplaced}; use sc_state_db::StateDb; use sp_blockchain::{CachedHeaderMetadata, HeaderMetadata, HeaderMetadataCache}; use crate::storage_cache::{CachingState, SyncingCachingState, SharedCache, new_shared_cache}; use crate::stats::StateUsageStats; use log::{trace, debug, warn}; -use prometheus_endpoint::Registry; // Re-export the Database trait so that one can pass an implementation of it. pub use sp_database::Database; @@ -98,7 +92,6 @@ pub use sc_state_db::PruningMode; #[cfg(any(feature = "kvdb-rocksdb", test))] pub use bench::BenchmarkingState; -const CANONICALIZATION_DELAY: u64 = 4096; const MIN_BLOCKS_TO_KEEP_CHANGES_TRIES_FOR: u32 = 32768; /// Default value for storage cache child ratio. @@ -324,49 +317,6 @@ impl DatabaseSettingsSrc { } } -/// Create an instance of db-backed client. -pub fn new_client( - settings: DatabaseSettings, - executor: E, - genesis_storage: &dyn BuildStorage, - fork_blocks: ForkBlocks, - bad_blocks: BadBlocks, - execution_extensions: ExecutionExtensions, - spawn_handle: Box, - prometheus_registry: Option, - config: sc_client::ClientConfig, -) -> Result<( - sc_client::Client< - Backend, - sc_client::LocalCallExecutor, E>, - Block, - RA, - >, - Arc>, - ), - sp_blockchain::Error, -> - where - Block: BlockT, - E: CodeExecutor + RuntimeInfo, -{ - let backend = Arc::new(Backend::new(settings, CANONICALIZATION_DELAY)?); - let executor = sc_client::LocalCallExecutor::new(backend.clone(), executor, spawn_handle, config.clone()); - Ok(( - sc_client::Client::new( - backend.clone(), - executor, - genesis_storage, - fork_blocks, - bad_blocks, - execution_extensions, - prometheus_registry, - config, - )?, - backend, - )) -} - pub(crate) mod columns { pub const META: u32 = crate::utils::COLUMN_META; pub const STATE: u32 = 1; @@ -446,14 +396,14 @@ impl BlockchainDb { } } -impl sc_client::blockchain::HeaderBackend for BlockchainDb { +impl sc_client_api::blockchain::HeaderBackend for BlockchainDb { fn header(&self, id: BlockId) -> ClientResult> { utils::read_header(&*self.db, columns::KEY_LOOKUP, columns::HEADER, id) } - fn info(&self) -> sc_client::blockchain::Info { + fn info(&self) -> sc_client_api::blockchain::Info { let meta = self.meta.read(); - sc_client::blockchain::Info { + sc_client_api::blockchain::Info { best_hash: meta.best_hash, best_number: meta.best_number, genesis_hash: meta.genesis_hash, @@ -463,7 +413,7 @@ impl sc_client::blockchain::HeaderBackend for BlockchainDb } } - fn status(&self, id: BlockId) -> ClientResult { + fn status(&self, id: BlockId) -> ClientResult { let exists = match id { BlockId::Hash(_) => read_db( &*self.db, @@ -474,8 +424,8 @@ impl sc_client::blockchain::HeaderBackend for BlockchainDb BlockId::Number(n) => n <= self.meta.read().best_number, }; match exists { - true => Ok(sc_client::blockchain::BlockStatus::InChain), - false => Ok(sc_client::blockchain::BlockStatus::Unknown), + true => Ok(sc_client_api::blockchain::BlockStatus::InChain), + false => Ok(sc_client_api::blockchain::BlockStatus::Unknown), } } @@ -491,7 +441,7 @@ impl sc_client::blockchain::HeaderBackend for BlockchainDb } } -impl sc_client::blockchain::Backend for BlockchainDb { +impl sc_client_api::blockchain::Backend for BlockchainDb { fn body(&self, id: BlockId) -> ClientResult>> { match read_db(&*self.db, columns::KEY_LOOKUP, columns::BODY, id)? { Some(body) => match Decode::decode(&mut &body[..]) { @@ -520,7 +470,7 @@ impl sc_client::blockchain::Backend for BlockchainDb Option>> { + fn cache(&self) -> Option>> { None } @@ -533,8 +483,8 @@ impl sc_client::blockchain::Backend for BlockchainDb sc_client::blockchain::ProvideCache for BlockchainDb { - fn cache(&self) -> Option>> { +impl sc_client_api::blockchain::ProvideCache for BlockchainDb { + fn cache(&self) -> Option>> { None } } @@ -1035,7 +985,7 @@ impl Backend { let hash = if new_canonical == number_u64 { hash } else { - ::sc_client::blockchain::HeaderBackend::hash(&self.blockchain, new_canonical.saturated_into())? + ::sc_client_api::blockchain::HeaderBackend::hash(&self.blockchain, new_canonical.saturated_into())? .expect("existence of block with number `new_canonical` \ implies existence of blocks with all numbers before it; qed") }; @@ -1259,7 +1209,7 @@ impl Backend { }; let cache_update = if let Some(set_head) = operation.set_head { - if let Some(header) = sc_client::blockchain::HeaderBackend::header(&self.blockchain, set_head)? { + if let Some(header) = sc_client_api::blockchain::HeaderBackend::header(&self.blockchain, set_head)? { let number = header.number(); let hash = header.hash(); @@ -1604,7 +1554,7 @@ impl sc_client_api::backend::Backend for Backend { } fn state_at(&self, block: BlockId) -> ClientResult { - use sc_client::blockchain::HeaderBackend as BcHeaderBackend; + use sc_client_api::blockchain::HeaderBackend as BcHeaderBackend; // special case for genesis initialization match block { @@ -1705,7 +1655,7 @@ pub(crate) mod tests { use crate::columns; use sp_core::H256; use sc_client_api::backend::{Backend as BTrait, BlockImportOperation as Op}; - use sc_client::blockchain::Backend as BLBTrait; + use sc_client_api::blockchain::Backend as BLBTrait; use sp_runtime::testing::{Header, Block as RawBlock, ExtrinsicWrapper}; use sp_runtime::traits::{Hash, BlakeTwo256}; use sp_runtime::generic::DigestItem; @@ -2284,7 +2234,7 @@ pub(crate) mod tests { #[test] fn test_finalize_block_with_justification() { - use sc_client::blockchain::{Backend as BlockChainBackend}; + use sc_client_api::blockchain::{Backend as BlockChainBackend}; let backend = Backend::::new_test(10, 10); diff --git a/client/db/src/light.rs b/client/db/src/light.rs index c87388a9546fc..edc7f8fc552dd 100644 --- a/client/db/src/light.rs +++ b/client/db/src/light.rs @@ -20,11 +20,13 @@ use std::{sync::Arc, collections::HashMap}; use std::convert::TryInto; use parking_lot::RwLock; -use sc_client_api::{backend::{AuxStore, NewBlockState}, UsageInfo}; -use sc_client::blockchain::{ - BlockStatus, Cache as BlockchainCache,Info as BlockchainInfo, +use sc_client_api::{ + cht, backend::{AuxStore, NewBlockState}, UsageInfo, + blockchain::{ + BlockStatus, Cache as BlockchainCache, Info as BlockchainInfo, + }, + Storage }; -use sc_client::cht; use sp_blockchain::{ CachedHeaderMetadata, HeaderMetadata, HeaderMetadataCache, Error as ClientError, Result as ClientResult, @@ -32,7 +34,6 @@ use sp_blockchain::{ well_known_cache_keys, }; use sp_database::{Database, Transaction}; -use sc_client::light::blockchain::Storage as LightBlockchainStorage; use codec::{Decode, Encode}; use sp_runtime::generic::{DigestItem, BlockId}; use sp_runtime::traits::{Block as BlockT, Header as HeaderT, Zero, One, NumberFor, HashFor}; @@ -406,7 +407,7 @@ impl AuxStore for LightStorage } } -impl LightBlockchainStorage for LightStorage +impl Storage for LightStorage where Block: BlockT, { fn import_header( @@ -614,7 +615,7 @@ fn cht_key>(cht_type: u8, block: N) -> ClientResult<[u8; 5]> { #[cfg(test)] pub(crate) mod tests { - use sc_client::cht; + use sc_client_api::cht; use sp_core::ChangesTrieConfiguration; use sp_runtime::generic::{DigestItem, ChangesTrieSignal}; use sp_runtime::testing::{H256 as Hash, Header, Block as RawBlock, ExtrinsicWrapper}; diff --git a/client/finality-grandpa/Cargo.toml b/client/finality-grandpa/Cargo.toml index 7c0b95a6f0711..a634595ed3ed6 100644 --- a/client/finality-grandpa/Cargo.toml +++ b/client/finality-grandpa/Cargo.toml @@ -26,13 +26,13 @@ sp-arithmetic = { version = "2.0.0-dev", path = "../../primitives/arithmetic" } sp-runtime = { version = "2.0.0-dev", path = "../../primitives/runtime" } sp-utils = { version = "2.0.0-dev", path = "../../primitives/utils" } sp-consensus = { version = "0.8.0-dev", path = "../../primitives/consensus/common" } +sc-consensus = { version = "0.8.0-dev", path = "../../client/consensus/common" } sp-core = { version = "2.0.0-dev", path = "../../primitives/core" } sp-api = { version = "2.0.0-dev", path = "../../primitives/api" } sc-telemetry = { version = "2.0.0-dev", path = "../telemetry" } sc-keystore = { version = "2.0.0-dev", path = "../keystore" } serde_json = "1.0.41" sc-client-api = { version = "2.0.0-dev", path = "../api" } -sc-client = { version = "0.8.0-dev", path = "../" } sp-inherents = { version = "2.0.0-dev", path = "../../primitives/inherents" } sp-blockchain = { version = "2.0.0-dev", path = "../../primitives/blockchain" } sc-network = { version = "0.8.0-dev", path = "../network" } diff --git a/client/finality-grandpa/src/environment.rs b/client/finality-grandpa/src/environment.rs index d3bbc1adb3cb9..cab212333c73e 100644 --- a/client/finality-grandpa/src/environment.rs +++ b/client/finality-grandpa/src/environment.rs @@ -28,8 +28,7 @@ use parking_lot::RwLock; use sp_blockchain::{HeaderBackend, Error as ClientError, HeaderMetadata}; use std::marker::PhantomData; -use sc_client_api::{backend::Backend, utils::is_descendent_of}; -use sc_client::apply_aux; +use sc_client_api::{backend::{Backend, apply_aux}, utils::is_descendent_of}; use finality_grandpa::{ BlockNumberOps, Equivocation, Error as GrandpaError, round::State as RoundState, voter, voter_set::VoterSet, diff --git a/client/finality-grandpa/src/finality_proof.rs b/client/finality-grandpa/src/finality_proof.rs index 2c85839b5e364..4035854a380b0 100644 --- a/client/finality-grandpa/src/finality_proof.rs +++ b/client/finality-grandpa/src/finality_proof.rs @@ -596,7 +596,7 @@ impl ProvableJustification for GrandpaJustificatio pub(crate) mod tests { use substrate_test_runtime_client::runtime::{Block, Header, H256}; use sc_client_api::NewBlockState; - use substrate_test_runtime_client::sc_client::in_mem::Blockchain as InMemoryBlockchain; + use sc_client_api::in_mem::Blockchain as InMemoryBlockchain; use super::*; use sp_core::crypto::Public; diff --git a/client/finality-grandpa/src/light_import.rs b/client/finality-grandpa/src/light_import.rs index 276f5d0f28d7a..dd80dd82743c0 100644 --- a/client/finality-grandpa/src/light_import.rs +++ b/client/finality-grandpa/src/light_import.rs @@ -567,7 +567,7 @@ pub mod tests { use sp_consensus::{ForkChoiceStrategy, BlockImport}; use sp_finality_grandpa::AuthorityId; use sp_core::{H256, crypto::Public}; - use substrate_test_runtime_client::sc_client::in_mem::Blockchain as InMemoryAuxStore; + use sc_client_api::in_mem::Blockchain as InMemoryAuxStore; use substrate_test_runtime_client::runtime::{Block, Header}; use crate::tests::TestApi; use crate::finality_proof::tests::TestJustification; diff --git a/client/finality-grandpa/src/tests.rs b/client/finality-grandpa/src/tests.rs index d7d1d1e48d3a0..2821737c4d471 100644 --- a/client/finality-grandpa/src/tests.rs +++ b/client/finality-grandpa/src/tests.rs @@ -27,7 +27,6 @@ use parking_lot::Mutex; use futures_timer::Delay; use tokio::runtime::{Runtime, Handle}; use sp_keyring::Ed25519Keyring; -use sc_client::LongestChain; use sc_client_api::backend::TransactionFor; use sp_blockchain::Result; use sp_api::{ApiRef, StorageProof, ProvideRuntimeApi}; @@ -50,6 +49,7 @@ use finality_proof::{ }; use consensus_changes::ConsensusChanges; use sc_block_builder::BlockBuilderProvider; +use sc_consensus::LongestChain; type PeerData = Mutex< diff --git a/client/informant/src/lib.rs b/client/informant/src/lib.rs index 66d5ed41fb5d4..090282a982092 100644 --- a/client/informant/src/lib.rs +++ b/client/informant/src/lib.rs @@ -17,7 +17,7 @@ //! Console informant. Prints sync progress and block events. Runs on the calling thread. use ansi_term::Colour; -use sc_client_api::BlockchainEvents; +use sc_client_api::{BlockchainEvents, UsageProvider}; use futures::prelude::*; use log::{info, warn, trace}; use sp_runtime::traits::Header; diff --git a/client/network/Cargo.toml b/client/network/Cargo.toml index 9e5aa51a99184..56a4bda2b2388 100644 --- a/client/network/Cargo.toml +++ b/client/network/Cargo.toml @@ -42,7 +42,6 @@ prometheus-endpoint = { package = "substrate-prometheus-endpoint", version = "0. prost = "0.6.1" rand = "0.7.2" sc-block-builder = { version = "0.8.0-dev", path = "../block-builder" } -sc-client = { version = "0.8.0-dev", path = "../" } sc-client-api = { version = "2.0.0-dev", path = "../api" } sc-peerset = { version = "2.0.0-dev", path = "../peerset" } serde = { version = "1.0.101", features = ["derive"] } diff --git a/client/network/src/protocol/light_client_handler.rs b/client/network/src/protocol/light_client_handler.rs index f4e877d675e79..2de6f56e2bb90 100644 --- a/client/network/src/protocol/light_client_handler.rs +++ b/client/network/src/protocol/light_client_handler.rs @@ -54,8 +54,13 @@ use libp2p::{ }; use nohash_hasher::IntMap; use prost::Message; -use sc_client::light::fetcher; -use sc_client_api::StorageProof; +use sc_client_api::{ + StorageProof, + light::{ + self, RemoteReadRequest, RemoteBodyRequest, ChangesProof, + RemoteCallRequest, RemoteChangesRequest, RemoteHeaderRequest, + } +}; use sc_peerset::ReputationChange; use sp_core::{ storage::{ChildInfo, ChildType,StorageKey, PrefixedStorageKey}, @@ -193,27 +198,27 @@ pub enum Error { #[derive(Debug)] pub enum Request { Body { - request: fetcher::RemoteBodyRequest, + request: RemoteBodyRequest, sender: oneshot::Sender, ClientError>> }, Header { - request: fetcher::RemoteHeaderRequest, + request: light::RemoteHeaderRequest, sender: oneshot::Sender> }, Read { - request: fetcher::RemoteReadRequest, + request: light::RemoteReadRequest, sender: oneshot::Sender, Option>>, ClientError>> }, ReadChild { - request: fetcher::RemoteReadChildRequest, + request: light::RemoteReadChildRequest, sender: oneshot::Sender, Option>>, ClientError>> }, Call { - request: fetcher::RemoteCallRequest, + request: light::RemoteCallRequest, sender: oneshot::Sender, ClientError>> }, Changes { - request: fetcher::RemoteChangesRequest, + request: light::RemoteChangesRequest, sender: oneshot::Sender, u32)>, ClientError>> } } @@ -283,7 +288,7 @@ pub struct LightClientHandler { /// Blockchain client. chain: Arc>, /// Verifies that received responses are correct. - checker: Arc>, + checker: Arc>, /// Peer information (addresses, their best block, etc.) peers: HashMap>, /// Futures sending back response to remote clients. @@ -306,7 +311,7 @@ where pub fn new( cfg: Config, chain: Arc>, - checker: Arc>, + checker: Arc>, peerset: sc_peerset::PeersetHandle, ) -> Self { LightClientHandler { @@ -471,7 +476,7 @@ where } r }; - let reply = self.checker.check_changes_proof(&request, fetcher::ChangesProof { + let reply = self.checker.check_changes_proof(&request, light::ChangesProof { max_block, proof: response.proof, roots, @@ -712,7 +717,7 @@ where request.last, error); - fetcher::ChangesProof:: { + light::ChangesProof:: { max_block: Zero::zero(), proof: Vec::new(), roots: BTreeMap::new(), @@ -1289,6 +1294,7 @@ fn fmt_keys(first: Option<&Vec>, last: Option<&Vec>) -> String { #[cfg(test)] mod tests { + use super::*; use async_std::task; use assert_matches::assert_matches; use codec::Encode; @@ -1313,8 +1319,7 @@ mod tests { swarm::{NetworkBehaviour, NetworkBehaviourAction, PollParameters}, yamux }; - use sc_client_api::StorageProof; - use sc_client::light::fetcher; + use sc_client_api::{StorageProof, RemoteReadChildRequest, FetchChecker}; use sp_blockchain::{Error as ClientError}; use sp_core::storage::ChildInfo; use std::{ @@ -1358,12 +1363,12 @@ mod tests { _mark: std::marker::PhantomData } - impl fetcher::FetchChecker for DummyFetchChecker { + impl light::FetchChecker for DummyFetchChecker { fn check_header_proof( &self, - _request: &fetcher::RemoteHeaderRequest, + _request: &RemoteHeaderRequest, header: Option, - _remote_proof: fetcher::StorageProof, + _remote_proof: StorageProof, ) -> Result { match self.ok { true if header.is_some() => Ok(header.unwrap()), @@ -1373,8 +1378,8 @@ mod tests { fn check_read_proof( &self, - request: &fetcher::RemoteReadRequest, - _: fetcher::StorageProof, + request: &RemoteReadRequest, + _: StorageProof, ) -> Result, Option>>, ClientError> { match self.ok { true => Ok(request.keys @@ -1389,8 +1394,8 @@ mod tests { fn check_read_child_proof( &self, - request: &fetcher::RemoteReadChildRequest, - _: fetcher::StorageProof, + request: &RemoteReadChildRequest, + _: StorageProof, ) -> Result, Option>>, ClientError> { match self.ok { true => Ok(request.keys @@ -1405,8 +1410,8 @@ mod tests { fn check_execution_proof( &self, - _: &fetcher::RemoteCallRequest, - _: fetcher::StorageProof, + _: &RemoteCallRequest, + _: StorageProof, ) -> Result, ClientError> { match self.ok { true => Ok(vec![42]), @@ -1416,8 +1421,8 @@ mod tests { fn check_changes_proof( &self, - _: &fetcher::RemoteChangesRequest, - _: fetcher::ChangesProof + _: &RemoteChangesRequest, + _: ChangesProof ) -> Result, u32)>, ClientError> { match self.ok { true => Ok(vec![(100.into(), 2)]), @@ -1427,7 +1432,7 @@ mod tests { fn check_body_proof( &self, - _: &fetcher::RemoteBodyRequest, + _: &RemoteBodyRequest, body: Vec ) -> Result, ClientError> { match self.ok { @@ -1545,7 +1550,7 @@ mod tests { // Issue our first request! let chan = oneshot::channel(); - let request = fetcher::RemoteCallRequest { + let request = light::RemoteCallRequest { block: Default::default(), header: dummy_header(), method: "test".into(), @@ -1602,7 +1607,7 @@ mod tests { assert_eq!(1, behaviour.peers.len()); let chan = oneshot::channel(); - let request = fetcher::RemoteCallRequest { + let request = light::RemoteCallRequest { block: Default::default(), header: dummy_header(), method: "test".into(), @@ -1677,7 +1682,7 @@ mod tests { assert_eq!(1, behaviour.peers.len()); let chan = oneshot::channel(); - let request = fetcher::RemoteCallRequest { + let request = light::RemoteCallRequest { block: Default::default(), header: dummy_header(), method: "test".into(), @@ -1736,7 +1741,7 @@ mod tests { assert_eq!(4, behaviour.peers.len()); let mut chan = oneshot::channel(); - let request = fetcher::RemoteCallRequest { + let request = light::RemoteCallRequest { block: Default::default(), header: dummy_header(), method: "test".into(), @@ -1852,7 +1857,7 @@ mod tests { #[test] fn receives_remote_call_response() { let mut chan = oneshot::channel(); - let request = fetcher::RemoteCallRequest { + let request = light::RemoteCallRequest { block: Default::default(), header: dummy_header(), method: "test".into(), @@ -1866,7 +1871,7 @@ mod tests { #[test] fn receives_remote_read_response() { let mut chan = oneshot::channel(); - let request = fetcher::RemoteReadRequest { + let request = light::RemoteReadRequest { header: dummy_header(), block: Default::default(), keys: vec![b":key".to_vec()], @@ -1880,7 +1885,7 @@ mod tests { fn receives_remote_read_child_response() { let mut chan = oneshot::channel(); let child_info = ChildInfo::new_default(&b":child_storage:default:sub"[..]); - let request = fetcher::RemoteReadChildRequest { + let request = light::RemoteReadChildRequest { header: dummy_header(), block: Default::default(), storage_key: child_info.prefixed_storage_key(), @@ -1894,7 +1899,7 @@ mod tests { #[test] fn receives_remote_header_response() { let mut chan = oneshot::channel(); - let request = fetcher::RemoteHeaderRequest { + let request = light::RemoteHeaderRequest { cht_root: Default::default(), block: 1, retry_count: None, @@ -1906,7 +1911,7 @@ mod tests { #[test] fn receives_remote_changes_response() { let mut chan = oneshot::channel(); - let request = fetcher::RemoteChangesRequest { + let request = light::RemoteChangesRequest { changes_trie_configs: vec![sp_core::ChangesTrieConfigurationRange { zero: (0, Default::default()), end: None, @@ -1951,7 +1956,7 @@ mod tests { #[test] fn send_receive_call() { let chan = oneshot::channel(); - let request = fetcher::RemoteCallRequest { + let request = light::RemoteCallRequest { block: Default::default(), header: dummy_header(), method: "test".into(), @@ -1966,7 +1971,7 @@ mod tests { #[test] fn send_receive_read() { let chan = oneshot::channel(); - let request = fetcher::RemoteReadRequest { + let request = light::RemoteReadRequest { header: dummy_header(), block: Default::default(), keys: vec![b":key".to_vec()], @@ -1981,7 +1986,7 @@ mod tests { fn send_receive_read_child() { let chan = oneshot::channel(); let child_info = ChildInfo::new_default(&b":child_storage:default:sub"[..]); - let request = fetcher::RemoteReadChildRequest { + let request = light::RemoteReadChildRequest { header: dummy_header(), block: Default::default(), storage_key: child_info.prefixed_storage_key(), @@ -1997,7 +2002,7 @@ mod tests { fn send_receive_header() { let _ = env_logger::try_init(); let chan = oneshot::channel(); - let request = fetcher::RemoteHeaderRequest { + let request = light::RemoteHeaderRequest { cht_root: Default::default(), block: 1, retry_count: None, @@ -2010,7 +2015,7 @@ mod tests { #[test] fn send_receive_changes() { let chan = oneshot::channel(); - let request = fetcher::RemoteChangesRequest { + let request = light::RemoteChangesRequest { changes_trie_configs: vec![sp_core::ChangesTrieConfigurationRange { zero: (0, Default::default()), end: None, diff --git a/client/network/test/Cargo.toml b/client/network/test/Cargo.toml index 08d0e90871d78..154694c692adb 100644 --- a/client/network/test/Cargo.toml +++ b/client/network/test/Cargo.toml @@ -21,7 +21,7 @@ futures-timer = "3.0.1" rand = "0.7.2" libp2p = { version = "0.18.1", default-features = false, features = ["libp2p-websocket"] } sp-consensus = { version = "0.8.0-dev", path = "../../../primitives/consensus/common" } -sc-client = { version = "0.8.0-dev", path = "../../" } +sc-consensus = { version = "0.8.0-dev", path = "../../../client/consensus/common" } sc-client-api = { version = "2.0.0-dev", path = "../../api" } sp-blockchain = { version = "2.0.0-dev", path = "../../../primitives/blockchain" } sp-runtime = { version = "2.0.0-dev", path = "../../../primitives/runtime" } @@ -32,3 +32,4 @@ env_logger = "0.7.0" substrate-test-runtime-client = { version = "2.0.0-dev", path = "../../../test-utils/runtime/client" } substrate-test-runtime = { version = "2.0.0-dev", path = "../../../test-utils/runtime" } tempfile = "3.1.0" +sc-service = { version = "0.8.0-dev", default-features = false, features = ["test-helpers"], path = "../../service" } diff --git a/client/network/test/src/lib.rs b/client/network/test/src/lib.rs index 40b373163b5ff..5be5de9078ef1 100644 --- a/client/network/test/src/lib.rs +++ b/client/network/test/src/lib.rs @@ -27,12 +27,16 @@ use libp2p::build_multiaddr; use log::trace; use sc_network::config::FinalityProofProvider; use sp_blockchain::{ - Result as ClientResult, well_known_cache_keys::{self, Id as CacheKeyId}, Info as BlockchainInfo, + HeaderBackend, Result as ClientResult, + well_known_cache_keys::{self, Id as CacheKeyId}, + Info as BlockchainInfo, }; -use sc_client_api::{BlockchainEvents, BlockImportNotification, FinalityNotifications, ImportNotifications, FinalityNotification, backend::{TransactionFor, AuxStore, Backend, Finalizer}, BlockBackend}; +use sc_client_api::{ + BlockchainEvents, BlockImportNotification, FinalityNotifications, ImportNotifications, FinalityNotification, + backend::{TransactionFor, AuxStore, Backend, Finalizer}, BlockBackend, +}; +use sc_consensus::LongestChain; use sc_block_builder::{BlockBuilder, BlockBuilderProvider}; -use sc_client::LongestChain; -use sc_client::blockchain::HeaderBackend; use sc_network::config::Role; use sp_consensus::block_validation::DefaultBlockAnnounceValidator; use sp_consensus::import_queue::{ @@ -52,7 +56,7 @@ use sp_runtime::generic::{BlockId, OpaqueDigestItemId}; use sp_runtime::traits::{Block as BlockT, Header as HeaderT, NumberFor}; use sp_runtime::Justification; use substrate_test_runtime_client::{self, AccountKeyring}; - +use sc_service::client::Client; pub use sc_network::config::EmptyTransactionPool; pub use substrate_test_runtime_client::runtime::{Block, Extrinsic, Hash, Transfer}; pub use substrate_test_runtime_client::{TestClient, TestClientBuilder, TestClientBuilderExt}; @@ -88,10 +92,18 @@ impl Verifier for PassThroughVerifier { } } -pub type PeersFullClient = - sc_client::Client; -pub type PeersLightClient = - sc_client::Client; +pub type PeersFullClient = Client< + substrate_test_runtime_client::Backend, + substrate_test_runtime_client::Executor, + Block, + substrate_test_runtime_client::runtime::RuntimeApi +>; +pub type PeersLightClient = Client< + substrate_test_runtime_client::LightBackend, + substrate_test_runtime_client::LightExecutor, + Block, + substrate_test_runtime_client::runtime::RuntimeApi +>; #[derive(Clone)] pub enum PeersClient { diff --git a/client/rpc/Cargo.toml b/client/rpc/Cargo.toml index e6aff8a5f8f24..66f7cb50e6d12 100644 --- a/client/rpc/Cargo.toml +++ b/client/rpc/Cargo.toml @@ -14,7 +14,6 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] sc-rpc-api = { version = "0.8.0-dev", path = "../rpc-api" } sc-client-api = { version = "2.0.0-dev", path = "../api" } -sc-client = { version = "0.8.0-dev", path = "../" } sp-api = { version = "2.0.0-dev", path = "../../primitives/api" } codec = { package = "parity-scale-codec", version = "1.3.0" } futures = { version = "0.3.1", features = ["compat"] } diff --git a/client/rpc/src/chain/chain_light.rs b/client/rpc/src/chain/chain_light.rs index b258c8dd3bc25..059233089d05d 100644 --- a/client/rpc/src/chain/chain_light.rs +++ b/client/rpc/src/chain/chain_light.rs @@ -21,9 +21,7 @@ use futures::{future::ready, FutureExt, TryFutureExt}; use rpc::futures::future::{result, Future, Either}; use sc_rpc_api::Subscriptions; -use sc_client::{ - light::{fetcher::{Fetcher, RemoteBodyRequest}, blockchain::RemoteBlockchain}, -}; +use sc_client_api::light::{Fetcher, RemoteBodyRequest, RemoteBlockchain}; use sp_runtime::{ generic::{BlockId, SignedBlock}, traits::{Block as BlockT}, @@ -80,7 +78,7 @@ impl ChainBackend for LightChain + Send>>; @@ -107,7 +108,7 @@ pub type TFullClient = Client< pub type TFullBackend = sc_client_db::Backend; /// Full client call executor type. -pub type TFullCallExecutor = sc_client::LocalCallExecutor< +pub type TFullCallExecutor = crate::client::LocalCallExecutor< sc_client_db::Backend, NativeExecutor, >; @@ -121,19 +122,19 @@ pub type TLightClient = Client< >; /// Light client backend type. -pub type TLightBackend = sc_client::light::backend::Backend< +pub type TLightBackend = crate::client::light::backend::Backend< sc_client_db::light::LightStorage, HashFor, >; /// Light call executor type. -pub type TLightCallExecutor = sc_client::light::call_executor::GenesisCallExecutor< - sc_client::light::backend::Backend< +pub type TLightCallExecutor = crate::client::light::call_executor::GenesisCallExecutor< + crate::client::light::backend::Backend< sc_client_db::light::LightStorage, HashFor >, - sc_client::LocalCallExecutor< - sc_client::light::backend::Backend< + crate::client::LocalCallExecutor< + crate::client::light::backend::Backend< sc_client_db::light::LightStorage, HashFor >, @@ -184,11 +185,11 @@ fn new_full_parts( ); let chain_spec = &config.chain_spec; - let fork_blocks = get_extension::>(chain_spec.extensions()) + let fork_blocks = get_extension::>(chain_spec.extensions()) .cloned() .unwrap_or_default(); - let bad_blocks = get_extension::>(chain_spec.extensions()) + let bad_blocks = get_extension::>(chain_spec.extensions()) .cloned() .unwrap_or_default(); @@ -206,7 +207,7 @@ fn new_full_parts( Some(keystore.clone()), ); - sc_client_db::new_client( + new_client( db_config, executor, chain_spec.as_storage_builder(), @@ -225,6 +226,52 @@ fn new_full_parts( Ok((client, backend, keystore, task_manager)) } + +/// Create an instance of db-backed client. +pub fn new_client( + settings: DatabaseSettings, + executor: E, + genesis_storage: &dyn BuildStorage, + fork_blocks: ForkBlocks, + bad_blocks: BadBlocks, + execution_extensions: ExecutionExtensions, + spawn_handle: Box, + prometheus_registry: Option, + config: ClientConfig, +) -> Result<( + crate::client::Client< + Backend, + crate::client::LocalCallExecutor, E>, + Block, + RA, + >, + Arc>, +), + sp_blockchain::Error, +> + where + Block: BlockT, + E: CodeExecutor + RuntimeInfo, +{ + const CANONICALIZATION_DELAY: u64 = 4096; + + let backend = Arc::new(Backend::new(settings, CANONICALIZATION_DELAY)?); + let executor = crate::client::LocalCallExecutor::new(backend.clone(), executor, spawn_handle, config.clone()); + Ok(( + crate::client::Client::new( + backend.clone(), + executor, + genesis_storage, + fork_blocks, + bad_blocks, + execution_extensions, + prometheus_registry, + config, + )?, + backend, + )) +} + impl ServiceBuilder<(), (), (), (), (), (), (), (), (), (), ()> { /// Start the service builder with a configuration. pub fn new_full( @@ -310,18 +357,18 @@ impl ServiceBuilder<(), (), (), (), (), (), (), (), (), (), ()> { }; sc_client_db::light::LightStorage::new(db_settings)? }; - let light_blockchain = sc_client::light::new_light_blockchain(db_storage); + let light_blockchain = crate::client::light::new_light_blockchain(db_storage); let fetch_checker = Arc::new( - sc_client::light::new_fetch_checker::<_, TBl, _>( + crate::client::light::new_fetch_checker::<_, TBl, _>( light_blockchain.clone(), executor.clone(), Box::new(task_manager.spawn_handle()), ), ); let fetcher = Arc::new(sc_network::config::OnDemand::new(fetch_checker)); - let backend = sc_client::light::new_light_backend(light_blockchain); + let backend = crate::client::light::new_light_backend(light_blockchain); let remote_blockchain = backend.remote_blockchain(); - let client = Arc::new(sc_client::light::new_light( + let client = Arc::new(crate::client::light::new_light( backend.clone(), config.chain_spec.as_storage_builder(), executor, @@ -591,7 +638,7 @@ impl sc_transaction_pool::txpool::Options, Arc, Option, - Option<&PrometheusRegistry>, + Option<&Registry>, ) -> Result<(UExPool, Option), Error> ) -> Result, Error> @@ -716,7 +763,7 @@ ServiceBuilder< TBl: BlockT, TRtApi: 'static + Send + Sync, TBackend: 'static + sc_client_api::backend::Backend + Send, - TExec: 'static + sc_client::CallExecutor + Send + Sync + Clone, + TExec: 'static + CallExecutor + Send + Sync + Clone, TSc: Clone, TImpQu: 'static + ImportQueue, TExPool: MaintainedTransactionPool::Hash> + MallocSizeOfWasm + 'static, diff --git a/client/service/src/chain_ops.rs b/client/service/src/chain_ops.rs index 12fae3224108a..59dbc8302c2e5 100644 --- a/client/service/src/chain_ops.rs +++ b/client/service/src/chain_ops.rs @@ -27,7 +27,7 @@ use sp_runtime::traits::{ }; use sp_runtime::generic::{BlockId, SignedBlock}; use codec::{Decode, Encode, IoReader}; -use sc_client::{Client, LocalCallExecutor}; +use crate::client::{Client, LocalCallExecutor}; use sp_consensus::{ BlockOrigin, import_queue::{IncomingBlock, Link, BlockImportError, BlockImportResult, ImportQueue}, diff --git a/client/src/block_rules.rs b/client/service/src/client/block_rules.rs similarity index 100% rename from client/src/block_rules.rs rename to client/service/src/client/block_rules.rs diff --git a/client/src/call_executor.rs b/client/service/src/client/call_executor.rs similarity index 99% rename from client/src/call_executor.rs rename to client/service/src/client/call_executor.rs index 88c5e204aa53e..229e7478e939f 100644 --- a/client/src/call_executor.rs +++ b/client/service/src/client/call_executor.rs @@ -28,7 +28,7 @@ use sp_externalities::Extensions; use sp_core::{NativeOrEncoded, NeverNativeValue, traits::CodeExecutor, offchain::storage::OffchainOverlayedChanges}; use sp_api::{ProofRecorder, InitializeBlock, StorageTransactionCache}; use sc_client_api::{backend, call_executor::CallExecutor, CloneableSpawn}; -use crate::client::ClientConfig; +use super::client::ClientConfig; /// Call executor that executes methods locally, querying all required /// data from local backend. diff --git a/client/src/client.rs b/client/service/src/client/client.rs similarity index 51% rename from client/src/client.rs rename to client/service/src/client/client.rs index f01c9176b8af5..9758bbe01e7ed 100644 --- a/client/src/client.rs +++ b/client/service/src/client/client.rs @@ -17,8 +17,9 @@ //! Substrate Client use std::{ - marker::PhantomData, collections::{HashSet, BTreeMap, HashMap}, sync::Arc, panic::UnwindSafe, - result, + marker::PhantomData, + collections::{HashSet, BTreeMap, HashMap}, + sync::Arc, panic::UnwindSafe, result, }; use log::{info, trace, warn}; use parking_lot::{Mutex, RwLock}; @@ -33,8 +34,8 @@ use sp_runtime::{ Justification, BuildStorage, generic::{BlockId, SignedBlock, DigestItem}, traits::{ - Block as BlockT, Header as HeaderT, Zero, NumberFor, HashFor, SaturatedConversion, One, - DigestFor, + Block as BlockT, Header as HeaderT, Zero, NumberFor, + HashFor, SaturatedConversion, One, DigestFor, }, }; use sp_state_machine::{ @@ -44,57 +45,51 @@ use sp_state_machine::{ }; use sc_executor::{RuntimeVersion, RuntimeInfo}; use sp_consensus::{ - Error as ConsensusError, BlockStatus, BlockImportParams, BlockCheckParams, ImportResult, - BlockOrigin, ForkChoiceStrategy, SelectChain, RecordProof, + Error as ConsensusError, BlockStatus, BlockImportParams, BlockCheckParams, + ImportResult, BlockOrigin, ForkChoiceStrategy, RecordProof, }; -use sp_blockchain::{self as blockchain, +use sp_blockchain::{ + self as blockchain, Backend as ChainBackend, HeaderBackend as ChainHeaderBackend, ProvideCache, Cache, well_known_cache_keys::Id as CacheKeyId, HeaderMetadata, CachedHeaderMetadata, }; use sp_trie::StorageProof; - use sp_api::{ CallApiAt, ConstructRuntimeApi, Core as CoreApi, ApiExt, ApiRef, ProvideRuntimeApi, CallApiAtParams, }; use sc_block_builder::{BlockBuilderApi, BlockBuilderProvider}; - -pub use sc_client_api::{ - backend::{ - self, BlockImportOperation, PrunableStateChangesTrieStorage, - ClientImportOperation, Finalizer, ImportSummary, NewBlockState, - changes_tries_state_at_block, StorageProvider, - LockImportRun, - }, - client::{ - ImportNotifications, FinalityNotification, FinalityNotifications, BlockImportNotification, - ClientInfo, BlockchainEvents, BlockBackend, ProvideUncles, BadBlocks, ForkBlocks, - BlockOf, - }, - execution_extensions::{ExecutionExtensions, ExecutionStrategies}, - notifications::{StorageNotifications, StorageEventStream}, - CallExecutor, ExecutorProvider, ProofProvider, CloneableSpawn, -}; -use sp_utils::mpsc::{tracing_unbounded, TracingUnboundedSender}; +use sc_client_api::{backend::{ + self, BlockImportOperation, PrunableStateChangesTrieStorage, + ClientImportOperation, Finalizer, ImportSummary, NewBlockState, + changes_tries_state_at_block, StorageProvider, + LockImportRun, apply_aux, +}, client::{ + ImportNotifications, FinalityNotification, FinalityNotifications, BlockImportNotification, + ClientInfo, BlockchainEvents, BlockBackend, ProvideUncles, BadBlocks, ForkBlocks, + BlockOf, +}, execution_extensions::ExecutionExtensions, notifications::{StorageNotifications, StorageEventStream}, KeyIterator, CallExecutor, ExecutorProvider, ProofProvider, CloneableSpawn, cht, in_mem, UsageProvider}; +use sp_utils::mpsc::tracing_unbounded; use sp_blockchain::Error; use prometheus_endpoint::Registry; - -use crate::{ - call_executor::LocalCallExecutor, +use super::{ + genesis, call_executor::LocalCallExecutor, light::{call_executor::prove_execution, fetcher::ChangesProof}, - in_mem, genesis, cht, block_rules::{BlockRules, LookupResult as BlockLookupResult}, + block_rules::{BlockRules, LookupResult as BlockLookupResult}, }; -use crate::client::backend::KeyIterator; +use futures::channel::mpsc; + +type NotificationSinks = Mutex>>; /// Substrate Client pub struct Client where Block: BlockT { backend: Arc, executor: E, storage_notifications: Mutex>, - import_notification_sinks: Mutex>>>, - finality_notification_sinks: Mutex>>>, + import_notification_sinks: NotificationSinks>, + finality_notification_sinks: NotificationSinks>, // holds the block hash currently being imported. TODO: replace this with block queue importing_block: RwLock>, block_rules: BlockRules, @@ -310,6 +305,18 @@ impl Client where }) } + /// returns a reference to the block import notification sinks + /// useful for test environments. + pub fn import_notification_sinks(&self) -> &NotificationSinks> { + &self.import_notification_sinks + } + + /// returns a reference to the finality notification sinks + /// useful for test environments. + pub fn finality_notification_sinks(&self) -> &NotificationSinks> { + &self.finality_notification_sinks + } + /// Get a reference to the state at a given block. pub fn state_at(&self, block: &BlockId) -> sp_blockchain::Result { self.backend.state_at(*block) @@ -1039,14 +1046,6 @@ impl Client where Ok(self.backend.revert(n, true)?) } - /// Get usage info about current client. - pub fn usage_info(&self) -> ClientInfo { - ClientInfo { - chain: self.chain_info(), - usage: self.backend.usage_info(), - } - } - /// Get blockchain info. pub fn chain_info(&self) -> blockchain::Info { self.backend.blockchain().info() @@ -1130,6 +1129,20 @@ impl Client where } } +impl UsageProvider for Client where + B: backend::Backend, + E: CallExecutor, + Block: BlockT, +{ + /// Get usage info about current client. + fn usage_info(&self) -> ClientInfo { + ClientInfo { + chain: self.chain_info(), + usage: self.backend.usage_info(), + } + } +} + impl ProofProvider for Client where B: backend::Backend, E: CallExecutor, @@ -1827,82 +1840,6 @@ where } } -/// Implement Longest Chain Select implementation -/// where 'longest' is defined as the highest number of blocks -pub struct LongestChain { - backend: Arc, - _phantom: PhantomData -} - -impl Clone for LongestChain { - fn clone(&self) -> Self { - let backend = self.backend.clone(); - LongestChain { - backend, - _phantom: Default::default() - } - } -} - -impl LongestChain -where - B: backend::Backend, - Block: BlockT, -{ - /// Instantiate a new LongestChain for Backend B - pub fn new(backend: Arc) -> Self { - LongestChain { - backend, - _phantom: Default::default() - } - } - - fn best_block_header(&self) -> sp_blockchain::Result<::Header> { - let info = self.backend.blockchain().info(); - let import_lock = self.backend.get_import_lock(); - let best_hash = self.backend - .blockchain() - .best_containing(info.best_hash, None, import_lock)? - .unwrap_or(info.best_hash); - - Ok(self.backend.blockchain().header(BlockId::Hash(best_hash))? - .expect("given block hash was fetched from block in db; qed")) - } - - fn leaves(&self) -> Result::Hash>, sp_blockchain::Error> { - self.backend.blockchain().leaves() - } -} - -impl SelectChain for LongestChain -where - B: backend::Backend, - Block: BlockT, -{ - - fn leaves(&self) -> Result::Hash>, ConsensusError> { - LongestChain::leaves(self) - .map_err(|e| ConsensusError::ChainLookup(e.to_string()).into()) - } - - fn best_chain(&self) - -> Result<::Header, ConsensusError> - { - LongestChain::best_block_header(&self) - .map_err(|e| ConsensusError::ChainLookup(e.to_string()).into()) - } - - fn finality_target( - &self, - target_hash: Block::Hash, - maybe_max_number: Option> - ) -> Result, ConsensusError> { - let import_lock = self.backend.get_import_lock(); - self.backend.blockchain().best_containing(target_hash, maybe_max_number, import_lock) - .map_err(|e| ConsensusError::ChainLookup(e.to_string()).into()) - } -} - impl BlockBackend for Client where B: backend::Backend, @@ -2006,26 +1943,6 @@ impl backend::AuxStore for &Client } } - -/// Helper function to apply auxiliary data insertion into an operation. -pub fn apply_aux<'a, 'b: 'a, 'c: 'a, B, Block, D, I>( - operation: &mut ClientImportOperation, - insert: I, - delete: D, -) -> sp_blockchain::Result<()> -where - Block: BlockT, - B: backend::Backend, - I: IntoIterator, - D: IntoIterator, -{ - operation.op.insert_aux( - insert.into_iter() - .map(|(k, v)| (k.to_vec(), Some(v.to_vec()))) - .chain(delete.into_iter().map(|k| (k.to_vec(), None))) - ) -} - impl sp_consensus::block_validation::Chain for Client where BE: backend::Backend, E: CallExecutor, @@ -2038,1556 +1955,3 @@ impl sp_consensus::block_validation::Chain for Client) } } - -#[cfg(test)] -pub(crate) mod tests { - use std::collections::HashMap; - use super::*; - use sp_core::{blake2_256, H256}; - use sp_runtime::DigestItem; - use sp_consensus::{BlockOrigin, SelectChain, BlockImport}; - use substrate_test_runtime_client::{ - prelude::*, - client_ext::ClientExt, - sc_client_db::{Backend, DatabaseSettings, DatabaseSettingsSrc, PruningMode}, - runtime::{self, Block, Transfer, RuntimeApi, TestAPI}, - }; - use hex_literal::hex; - - /// Returns tuple, consisting of: - /// 1) test client pre-filled with blocks changing balances; - /// 2) roots of changes tries for these blocks - /// 3) test cases in form (begin, end, key, vec![(block, extrinsic)]) that are required to pass - pub fn prepare_client_with_key_changes() -> ( - substrate_test_runtime_client::sc_client::Client, - Vec, - Vec<(u64, u64, Vec, Vec<(u64, u32)>)>, - ) { - // prepare block structure - let blocks_transfers = vec![ - vec![(AccountKeyring::Alice, AccountKeyring::Dave), (AccountKeyring::Bob, AccountKeyring::Dave)], - vec![(AccountKeyring::Charlie, AccountKeyring::Eve)], - vec![], - vec![(AccountKeyring::Alice, AccountKeyring::Dave)], - ]; - - // prepare client ang import blocks - let mut local_roots = Vec::new(); - let config = Some(ChangesTrieConfiguration::new(4, 2)); - let mut remote_client = TestClientBuilder::new().changes_trie_config(config).build(); - let mut nonces: HashMap<_, u64> = Default::default(); - for (i, block_transfers) in blocks_transfers.into_iter().enumerate() { - let mut builder = remote_client.new_block(Default::default()).unwrap(); - for (from, to) in block_transfers { - builder.push_transfer(Transfer { - from: from.into(), - to: to.into(), - amount: 1, - nonce: *nonces.entry(from).and_modify(|n| { *n = *n + 1 }).or_default(), - }).unwrap(); - } - let block = builder.build().unwrap().block; - remote_client.import(BlockOrigin::Own, block).unwrap(); - - let header = remote_client.header(&BlockId::Number(i as u64 + 1)).unwrap().unwrap(); - let trie_root = header.digest().log(DigestItem::as_changes_trie_root) - .map(|root| H256::from_slice(root.as_ref())) - .unwrap(); - local_roots.push(trie_root); - } - - // prepare test cases - let alice = blake2_256(&runtime::system::balance_of_key(AccountKeyring::Alice.into())).to_vec(); - let bob = blake2_256(&runtime::system::balance_of_key(AccountKeyring::Bob.into())).to_vec(); - let charlie = blake2_256(&runtime::system::balance_of_key(AccountKeyring::Charlie.into())).to_vec(); - let dave = blake2_256(&runtime::system::balance_of_key(AccountKeyring::Dave.into())).to_vec(); - let eve = blake2_256(&runtime::system::balance_of_key(AccountKeyring::Eve.into())).to_vec(); - let ferdie = blake2_256(&runtime::system::balance_of_key(AccountKeyring::Ferdie.into())).to_vec(); - let test_cases = vec![ - (1, 4, alice.clone(), vec![(4, 0), (1, 0)]), - (1, 3, alice.clone(), vec![(1, 0)]), - (2, 4, alice.clone(), vec![(4, 0)]), - (2, 3, alice.clone(), vec![]), - - (1, 4, bob.clone(), vec![(1, 1)]), - (1, 1, bob.clone(), vec![(1, 1)]), - (2, 4, bob.clone(), vec![]), - - (1, 4, charlie.clone(), vec![(2, 0)]), - - (1, 4, dave.clone(), vec![(4, 0), (1, 1), (1, 0)]), - (1, 1, dave.clone(), vec![(1, 1), (1, 0)]), - (3, 4, dave.clone(), vec![(4, 0)]), - - (1, 4, eve.clone(), vec![(2, 0)]), - (1, 1, eve.clone(), vec![]), - (3, 4, eve.clone(), vec![]), - - (1, 4, ferdie.clone(), vec![]), - ]; - - (remote_client, local_roots, test_cases) - } - - #[test] - fn client_initializes_from_genesis_ok() { - let client = substrate_test_runtime_client::new(); - - assert_eq!( - client.runtime_api().balance_of( - &BlockId::Number(client.chain_info().best_number), - AccountKeyring::Alice.into() - ).unwrap(), - 1000 - ); - assert_eq!( - client.runtime_api().balance_of( - &BlockId::Number(client.chain_info().best_number), - AccountKeyring::Ferdie.into() - ).unwrap(), - 0 - ); - } - - #[test] - fn block_builder_works_with_no_transactions() { - let mut client = substrate_test_runtime_client::new(); - - let block = client.new_block(Default::default()).unwrap().build().unwrap().block; - - client.import(BlockOrigin::Own, block).unwrap(); - - assert_eq!(client.chain_info().best_number, 1); - } - - #[test] - fn block_builder_works_with_transactions() { - let mut client = substrate_test_runtime_client::new(); - - let mut builder = client.new_block(Default::default()).unwrap(); - - builder.push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), - amount: 42, - nonce: 0, - }).unwrap(); - - let block = builder.build().unwrap().block; - client.import(BlockOrigin::Own, block).unwrap(); - - assert_eq!(client.chain_info().best_number, 1); - assert_ne!( - client.state_at(&BlockId::Number(1)).unwrap().pairs(), - client.state_at(&BlockId::Number(0)).unwrap().pairs() - ); - assert_eq!( - client.runtime_api().balance_of( - &BlockId::Number(client.chain_info().best_number), - AccountKeyring::Alice.into() - ).unwrap(), - 958 - ); - assert_eq!( - client.runtime_api().balance_of( - &BlockId::Number(client.chain_info().best_number), - AccountKeyring::Ferdie.into() - ).unwrap(), - 42 - ); - } - - #[test] - fn block_builder_does_not_include_invalid() { - let mut client = substrate_test_runtime_client::new(); - - let mut builder = client.new_block(Default::default()).unwrap(); - - builder.push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), - amount: 42, - nonce: 0, - }).unwrap(); - - assert!( - builder.push_transfer(Transfer { - from: AccountKeyring::Eve.into(), - to: AccountKeyring::Alice.into(), - amount: 42, - nonce: 0, - }).is_err() - ); - - let block = builder.build().unwrap().block; - client.import(BlockOrigin::Own, block).unwrap(); - - assert_eq!(client.chain_info().best_number, 1); - assert_ne!( - client.state_at(&BlockId::Number(1)).unwrap().pairs(), - client.state_at(&BlockId::Number(0)).unwrap().pairs() - ); - assert_eq!(client.body(&BlockId::Number(1)).unwrap().unwrap().len(), 1) - } - - #[test] - fn best_containing_with_genesis_block() { - // block tree: - // G - - let (client, longest_chain_select) = TestClientBuilder::new().build_with_longest_chain(); - - let genesis_hash = client.chain_info().genesis_hash; - - assert_eq!( - genesis_hash.clone(), - longest_chain_select.finality_target(genesis_hash.clone(), None).unwrap().unwrap() - ); - } - - #[test] - fn best_containing_with_hash_not_found() { - // block tree: - // G - - let (client, longest_chain_select) = TestClientBuilder::new().build_with_longest_chain(); - - let uninserted_block = client.new_block(Default::default()).unwrap().build().unwrap().block; - - assert_eq!( - None, - longest_chain_select.finality_target(uninserted_block.hash().clone(), None).unwrap() - ); - } - - #[test] - fn uncles_with_only_ancestors() { - // block tree: - // G -> A1 -> A2 - let mut client = substrate_test_runtime_client::new(); - - // G -> A1 - let a1 = client.new_block(Default::default()).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a1.clone()).unwrap(); - - // A1 -> A2 - let a2 = client.new_block(Default::default()).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a2.clone()).unwrap(); - let v: Vec = Vec::new(); - assert_eq!(v, client.uncles(a2.hash(), 3).unwrap()); - } - - #[test] - fn uncles_with_multiple_forks() { - // block tree: - // G -> A1 -> A2 -> A3 -> A4 -> A5 - // A1 -> B2 -> B3 -> B4 - // B2 -> C3 - // A1 -> D2 - let mut client = substrate_test_runtime_client::new(); - - // G -> A1 - let a1 = client.new_block(Default::default()).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a1.clone()).unwrap(); - - // A1 -> A2 - let a2 = client.new_block_at( - &BlockId::Hash(a1.hash()), - Default::default(), - false, - ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a2.clone()).unwrap(); - - // A2 -> A3 - let a3 = client.new_block_at( - &BlockId::Hash(a2.hash()), - Default::default(), - false, - ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a3.clone()).unwrap(); - - // A3 -> A4 - let a4 = client.new_block_at( - &BlockId::Hash(a3.hash()), - Default::default(), - false, - ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a4.clone()).unwrap(); - - // A4 -> A5 - let a5 = client.new_block_at( - &BlockId::Hash(a4.hash()), - Default::default(), - false, - ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a5.clone()).unwrap(); - - // A1 -> B2 - let mut builder = client.new_block_at( - &BlockId::Hash(a1.hash()), - Default::default(), - false, - ).unwrap(); - // this push is required as otherwise B2 has the same hash as A2 and won't get imported - builder.push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), - amount: 41, - nonce: 0, - }).unwrap(); - let b2 = builder.build().unwrap().block; - client.import(BlockOrigin::Own, b2.clone()).unwrap(); - - // B2 -> B3 - let b3 = client.new_block_at( - &BlockId::Hash(b2.hash()), - Default::default(), - false, - ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, b3.clone()).unwrap(); - - // B3 -> B4 - let b4 = client.new_block_at( - &BlockId::Hash(b3.hash()), - Default::default(), - false, - ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, b4.clone()).unwrap(); - - // // B2 -> C3 - let mut builder = client.new_block_at( - &BlockId::Hash(b2.hash()), - Default::default(), - false, - ).unwrap(); - // this push is required as otherwise C3 has the same hash as B3 and won't get imported - builder.push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), - amount: 1, - nonce: 1, - }).unwrap(); - let c3 = builder.build().unwrap().block; - client.import(BlockOrigin::Own, c3.clone()).unwrap(); - - // A1 -> D2 - let mut builder = client.new_block_at( - &BlockId::Hash(a1.hash()), - Default::default(), - false, - ).unwrap(); - // this push is required as otherwise D2 has the same hash as B2 and won't get imported - builder.push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), - amount: 1, - nonce: 0, - }).unwrap(); - let d2 = builder.build().unwrap().block; - client.import(BlockOrigin::Own, d2.clone()).unwrap(); - - let genesis_hash = client.chain_info().genesis_hash; - - let uncles1 = client.uncles(a4.hash(), 10).unwrap(); - assert_eq!(vec![b2.hash(), d2.hash()], uncles1); - - let uncles2 = client.uncles(a4.hash(), 0).unwrap(); - assert_eq!(0, uncles2.len()); - - let uncles3 = client.uncles(a1.hash(), 10).unwrap(); - assert_eq!(0, uncles3.len()); - - let uncles4 = client.uncles(genesis_hash, 10).unwrap(); - assert_eq!(0, uncles4.len()); - - let uncles5 = client.uncles(d2.hash(), 10).unwrap(); - assert_eq!(vec![a2.hash(), b2.hash()], uncles5); - - let uncles6 = client.uncles(b3.hash(), 1).unwrap(); - assert_eq!(vec![c3.hash()], uncles6); - } - - #[test] - fn best_containing_on_longest_chain_with_single_chain_3_blocks() { - // block tree: - // G -> A1 -> A2 - - let (mut client, longest_chain_select) = TestClientBuilder::new().build_with_longest_chain(); - - // G -> A1 - let a1 = client.new_block(Default::default()).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a1.clone()).unwrap(); - - // A1 -> A2 - let a2 = client.new_block(Default::default()).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a2.clone()).unwrap(); - - let genesis_hash = client.chain_info().genesis_hash; - - assert_eq!(a2.hash(), longest_chain_select.finality_target(genesis_hash, None).unwrap().unwrap()); - assert_eq!(a2.hash(), longest_chain_select.finality_target(a1.hash(), None).unwrap().unwrap()); - assert_eq!(a2.hash(), longest_chain_select.finality_target(a2.hash(), None).unwrap().unwrap()); - } - - #[test] - fn best_containing_on_longest_chain_with_multiple_forks() { - // block tree: - // G -> A1 -> A2 -> A3 -> A4 -> A5 - // A1 -> B2 -> B3 -> B4 - // B2 -> C3 - // A1 -> D2 - let (mut client, longest_chain_select) = TestClientBuilder::new().build_with_longest_chain(); - - // G -> A1 - let a1 = client.new_block(Default::default()).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a1.clone()).unwrap(); - - // A1 -> A2 - let a2 = client.new_block_at( - &BlockId::Hash(a1.hash()), - Default::default(), - false, - ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a2.clone()).unwrap(); - - // A2 -> A3 - let a3 = client.new_block_at( - &BlockId::Hash(a2.hash()), - Default::default(), - false, - ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a3.clone()).unwrap(); - - // A3 -> A4 - let a4 = client.new_block_at( - &BlockId::Hash(a3.hash()), - Default::default(), - false, - ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a4.clone()).unwrap(); - - // A4 -> A5 - let a5 = client.new_block_at( - &BlockId::Hash(a4.hash()), - Default::default(), - false, - ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a5.clone()).unwrap(); - - // A1 -> B2 - let mut builder = client.new_block_at( - &BlockId::Hash(a1.hash()), - Default::default(), - false, - ).unwrap(); - // this push is required as otherwise B2 has the same hash as A2 and won't get imported - builder.push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), - amount: 41, - nonce: 0, - }).unwrap(); - let b2 = builder.build().unwrap().block; - client.import(BlockOrigin::Own, b2.clone()).unwrap(); - - // B2 -> B3 - let b3 = client.new_block_at( - &BlockId::Hash(b2.hash()), - Default::default(), - false, - ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, b3.clone()).unwrap(); - - // B3 -> B4 - let b4 = client.new_block_at( - &BlockId::Hash(b3.hash()), - Default::default(), - false, - ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, b4.clone()).unwrap(); - - // // B2 -> C3 - let mut builder = client.new_block_at( - &BlockId::Hash(b2.hash()), - Default::default(), - false, - ).unwrap(); - // this push is required as otherwise C3 has the same hash as B3 and won't get imported - builder.push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), - amount: 1, - nonce: 1, - }).unwrap(); - let c3 = builder.build().unwrap().block; - client.import(BlockOrigin::Own, c3.clone()).unwrap(); - - // A1 -> D2 - let mut builder = client.new_block_at( - &BlockId::Hash(a1.hash()), - Default::default(), - false, - ).unwrap(); - // this push is required as otherwise D2 has the same hash as B2 and won't get imported - builder.push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), - amount: 1, - nonce: 0, - }).unwrap(); - let d2 = builder.build().unwrap().block; - client.import(BlockOrigin::Own, d2.clone()).unwrap(); - - assert_eq!(client.chain_info().best_hash, a5.hash()); - - let genesis_hash = client.chain_info().genesis_hash; - let leaves = longest_chain_select.leaves().unwrap(); - - assert!(leaves.contains(&a5.hash())); - assert!(leaves.contains(&b4.hash())); - assert!(leaves.contains(&c3.hash())); - assert!(leaves.contains(&d2.hash())); - assert_eq!(leaves.len(), 4); - - // search without restriction - - assert_eq!(a5.hash(), longest_chain_select.finality_target( - genesis_hash, None).unwrap().unwrap()); - assert_eq!(a5.hash(), longest_chain_select.finality_target( - a1.hash(), None).unwrap().unwrap()); - assert_eq!(a5.hash(), longest_chain_select.finality_target( - a2.hash(), None).unwrap().unwrap()); - assert_eq!(a5.hash(), longest_chain_select.finality_target( - a3.hash(), None).unwrap().unwrap()); - assert_eq!(a5.hash(), longest_chain_select.finality_target( - a4.hash(), None).unwrap().unwrap()); - assert_eq!(a5.hash(), longest_chain_select.finality_target( - a5.hash(), None).unwrap().unwrap()); - - assert_eq!(b4.hash(), longest_chain_select.finality_target( - b2.hash(), None).unwrap().unwrap()); - assert_eq!(b4.hash(), longest_chain_select.finality_target( - b3.hash(), None).unwrap().unwrap()); - assert_eq!(b4.hash(), longest_chain_select.finality_target( - b4.hash(), None).unwrap().unwrap()); - - assert_eq!(c3.hash(), longest_chain_select.finality_target( - c3.hash(), None).unwrap().unwrap()); - - assert_eq!(d2.hash(), longest_chain_select.finality_target( - d2.hash(), None).unwrap().unwrap()); - - - // search only blocks with number <= 5. equivalent to without restriction for this scenario - - assert_eq!(a5.hash(), longest_chain_select.finality_target( - genesis_hash, Some(5)).unwrap().unwrap()); - assert_eq!(a5.hash(), longest_chain_select.finality_target( - a1.hash(), Some(5)).unwrap().unwrap()); - assert_eq!(a5.hash(), longest_chain_select.finality_target( - a2.hash(), Some(5)).unwrap().unwrap()); - assert_eq!(a5.hash(), longest_chain_select.finality_target( - a3.hash(), Some(5)).unwrap().unwrap()); - assert_eq!(a5.hash(), longest_chain_select.finality_target( - a4.hash(), Some(5)).unwrap().unwrap()); - assert_eq!(a5.hash(), longest_chain_select.finality_target( - a5.hash(), Some(5)).unwrap().unwrap()); - - assert_eq!(b4.hash(), longest_chain_select.finality_target( - b2.hash(), Some(5)).unwrap().unwrap()); - assert_eq!(b4.hash(), longest_chain_select.finality_target( - b3.hash(), Some(5)).unwrap().unwrap()); - assert_eq!(b4.hash(), longest_chain_select.finality_target( - b4.hash(), Some(5)).unwrap().unwrap()); - - assert_eq!(c3.hash(), longest_chain_select.finality_target( - c3.hash(), Some(5)).unwrap().unwrap()); - - assert_eq!(d2.hash(), longest_chain_select.finality_target( - d2.hash(), Some(5)).unwrap().unwrap()); - - - // search only blocks with number <= 4 - - assert_eq!(a4.hash(), longest_chain_select.finality_target( - genesis_hash, Some(4)).unwrap().unwrap()); - assert_eq!(a4.hash(), longest_chain_select.finality_target( - a1.hash(), Some(4)).unwrap().unwrap()); - assert_eq!(a4.hash(), longest_chain_select.finality_target( - a2.hash(), Some(4)).unwrap().unwrap()); - assert_eq!(a4.hash(), longest_chain_select.finality_target( - a3.hash(), Some(4)).unwrap().unwrap()); - assert_eq!(a4.hash(), longest_chain_select.finality_target( - a4.hash(), Some(4)).unwrap().unwrap()); - assert_eq!(None, longest_chain_select.finality_target( - a5.hash(), Some(4)).unwrap()); - - assert_eq!(b4.hash(), longest_chain_select.finality_target( - b2.hash(), Some(4)).unwrap().unwrap()); - assert_eq!(b4.hash(), longest_chain_select.finality_target( - b3.hash(), Some(4)).unwrap().unwrap()); - assert_eq!(b4.hash(), longest_chain_select.finality_target( - b4.hash(), Some(4)).unwrap().unwrap()); - - assert_eq!(c3.hash(), longest_chain_select.finality_target( - c3.hash(), Some(4)).unwrap().unwrap()); - - assert_eq!(d2.hash(), longest_chain_select.finality_target( - d2.hash(), Some(4)).unwrap().unwrap()); - - - // search only blocks with number <= 3 - - assert_eq!(a3.hash(), longest_chain_select.finality_target( - genesis_hash, Some(3)).unwrap().unwrap()); - assert_eq!(a3.hash(), longest_chain_select.finality_target( - a1.hash(), Some(3)).unwrap().unwrap()); - assert_eq!(a3.hash(), longest_chain_select.finality_target( - a2.hash(), Some(3)).unwrap().unwrap()); - assert_eq!(a3.hash(), longest_chain_select.finality_target( - a3.hash(), Some(3)).unwrap().unwrap()); - assert_eq!(None, longest_chain_select.finality_target( - a4.hash(), Some(3)).unwrap()); - assert_eq!(None, longest_chain_select.finality_target( - a5.hash(), Some(3)).unwrap()); - - assert_eq!(b3.hash(), longest_chain_select.finality_target( - b2.hash(), Some(3)).unwrap().unwrap()); - assert_eq!(b3.hash(), longest_chain_select.finality_target( - b3.hash(), Some(3)).unwrap().unwrap()); - assert_eq!(None, longest_chain_select.finality_target( - b4.hash(), Some(3)).unwrap()); - - assert_eq!(c3.hash(), longest_chain_select.finality_target( - c3.hash(), Some(3)).unwrap().unwrap()); - - assert_eq!(d2.hash(), longest_chain_select.finality_target( - d2.hash(), Some(3)).unwrap().unwrap()); - - - // search only blocks with number <= 2 - - assert_eq!(a2.hash(), longest_chain_select.finality_target( - genesis_hash, Some(2)).unwrap().unwrap()); - assert_eq!(a2.hash(), longest_chain_select.finality_target( - a1.hash(), Some(2)).unwrap().unwrap()); - assert_eq!(a2.hash(), longest_chain_select.finality_target( - a2.hash(), Some(2)).unwrap().unwrap()); - assert_eq!(None, longest_chain_select.finality_target( - a3.hash(), Some(2)).unwrap()); - assert_eq!(None, longest_chain_select.finality_target( - a4.hash(), Some(2)).unwrap()); - assert_eq!(None, longest_chain_select.finality_target( - a5.hash(), Some(2)).unwrap()); - - assert_eq!(b2.hash(), longest_chain_select.finality_target( - b2.hash(), Some(2)).unwrap().unwrap()); - assert_eq!(None, longest_chain_select.finality_target( - b3.hash(), Some(2)).unwrap()); - assert_eq!(None, longest_chain_select.finality_target( - b4.hash(), Some(2)).unwrap()); - - assert_eq!(None, longest_chain_select.finality_target( - c3.hash(), Some(2)).unwrap()); - - assert_eq!(d2.hash(), longest_chain_select.finality_target( - d2.hash(), Some(2)).unwrap().unwrap()); - - - // search only blocks with number <= 1 - - assert_eq!(a1.hash(), longest_chain_select.finality_target( - genesis_hash, Some(1)).unwrap().unwrap()); - assert_eq!(a1.hash(), longest_chain_select.finality_target( - a1.hash(), Some(1)).unwrap().unwrap()); - assert_eq!(None, longest_chain_select.finality_target( - a2.hash(), Some(1)).unwrap()); - assert_eq!(None, longest_chain_select.finality_target( - a3.hash(), Some(1)).unwrap()); - assert_eq!(None, longest_chain_select.finality_target( - a4.hash(), Some(1)).unwrap()); - assert_eq!(None, longest_chain_select.finality_target( - a5.hash(), Some(1)).unwrap()); - - assert_eq!(None, longest_chain_select.finality_target( - b2.hash(), Some(1)).unwrap()); - assert_eq!(None, longest_chain_select.finality_target( - b3.hash(), Some(1)).unwrap()); - assert_eq!(None, longest_chain_select.finality_target( - b4.hash(), Some(1)).unwrap()); - - assert_eq!(None, longest_chain_select.finality_target( - c3.hash(), Some(1)).unwrap()); - - assert_eq!(None, longest_chain_select.finality_target( - d2.hash(), Some(1)).unwrap()); - - // search only blocks with number <= 0 - - assert_eq!(genesis_hash, longest_chain_select.finality_target( - genesis_hash, Some(0)).unwrap().unwrap()); - assert_eq!(None, longest_chain_select.finality_target( - a1.hash(), Some(0)).unwrap()); - assert_eq!(None, longest_chain_select.finality_target( - a2.hash(), Some(0)).unwrap()); - assert_eq!(None, longest_chain_select.finality_target( - a3.hash(), Some(0)).unwrap()); - assert_eq!(None, longest_chain_select.finality_target( - a4.hash(), Some(0)).unwrap()); - assert_eq!(None, longest_chain_select.finality_target( - a5.hash(), Some(0)).unwrap()); - - assert_eq!(None, longest_chain_select.finality_target( - b2.hash(), Some(0)).unwrap()); - assert_eq!(None, longest_chain_select.finality_target( - b3.hash(), Some(0)).unwrap()); - assert_eq!(None, longest_chain_select.finality_target( - b4.hash(), Some(0)).unwrap()); - - assert_eq!(None, longest_chain_select.finality_target( - c3.hash().clone(), Some(0)).unwrap()); - - assert_eq!(None, longest_chain_select.finality_target( - d2.hash().clone(), Some(0)).unwrap()); - } - - #[test] - fn best_containing_on_longest_chain_with_max_depth_higher_than_best() { - // block tree: - // G -> A1 -> A2 - - let (mut client, longest_chain_select) = TestClientBuilder::new().build_with_longest_chain(); - - // G -> A1 - let a1 = client.new_block(Default::default()).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a1.clone()).unwrap(); - - // A1 -> A2 - let a2 = client.new_block(Default::default()).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a2.clone()).unwrap(); - - let genesis_hash = client.chain_info().genesis_hash; - - assert_eq!(a2.hash(), longest_chain_select.finality_target(genesis_hash, Some(10)).unwrap().unwrap()); - } - - #[test] - fn key_changes_works() { - let (client, _, test_cases) = prepare_client_with_key_changes(); - - for (index, (begin, end, key, expected_result)) in test_cases.into_iter().enumerate() { - let end = client.block_hash(end).unwrap().unwrap(); - let actual_result = client.key_changes( - begin, - BlockId::Hash(end), - None, - &StorageKey(key), - ).unwrap(); - match actual_result == expected_result { - true => (), - false => panic!(format!("Failed test {}: actual = {:?}, expected = {:?}", - index, actual_result, expected_result)), - } - } - } - - #[test] - fn import_with_justification() { - let mut client = substrate_test_runtime_client::new(); - - // G -> A1 - let a1 = client.new_block(Default::default()).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a1.clone()).unwrap(); - - // A1 -> A2 - let a2 = client.new_block_at( - &BlockId::Hash(a1.hash()), - Default::default(), - false, - ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a2.clone()).unwrap(); - - // A2 -> A3 - let justification = vec![1, 2, 3]; - let a3 = client.new_block_at( - &BlockId::Hash(a2.hash()), - Default::default(), - false, - ).unwrap().build().unwrap().block; - client.import_justified(BlockOrigin::Own, a3.clone(), justification.clone()).unwrap(); - - assert_eq!( - client.chain_info().finalized_hash, - a3.hash(), - ); - - assert_eq!( - client.justification(&BlockId::Hash(a3.hash())).unwrap(), - Some(justification), - ); - - assert_eq!( - client.justification(&BlockId::Hash(a1.hash())).unwrap(), - None, - ); - - assert_eq!( - client.justification(&BlockId::Hash(a2.hash())).unwrap(), - None, - ); - } - - #[test] - fn importing_diverged_finalized_block_should_trigger_reorg() { - let mut client = substrate_test_runtime_client::new(); - - // G -> A1 -> A2 - // \ - // -> B1 - let a1 = client.new_block_at( - &BlockId::Number(0), - Default::default(), - false, - ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a1.clone()).unwrap(); - - let a2 = client.new_block_at( - &BlockId::Hash(a1.hash()), - Default::default(), - false, - ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a2.clone()).unwrap(); - - let mut b1 = client.new_block_at( - &BlockId::Number(0), - Default::default(), - false, - ).unwrap(); - // needed to make sure B1 gets a different hash from A1 - b1.push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), - amount: 1, - nonce: 0, - }).unwrap(); - // create but don't import B1 just yet - let b1 = b1.build().unwrap().block; - - // A2 is the current best since it's the longest chain - assert_eq!( - client.chain_info().best_hash, - a2.hash(), - ); - - // importing B1 as finalized should trigger a re-org and set it as new best - let justification = vec![1, 2, 3]; - client.import_justified(BlockOrigin::Own, b1.clone(), justification).unwrap(); - - assert_eq!( - client.chain_info().best_hash, - b1.hash(), - ); - - assert_eq!( - client.chain_info().finalized_hash, - b1.hash(), - ); - } - - #[test] - fn finalizing_diverged_block_should_trigger_reorg() { - - let (mut client, select_chain) = TestClientBuilder::new().build_with_longest_chain(); - - // G -> A1 -> A2 - // \ - // -> B1 -> B2 - let a1 = client.new_block_at( - &BlockId::Number(0), - Default::default(), - false, - ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a1.clone()).unwrap(); - - let a2 = client.new_block_at( - &BlockId::Hash(a1.hash()), - Default::default(), - false, - ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a2.clone()).unwrap(); - - let mut b1 = client.new_block_at( - &BlockId::Number(0), - Default::default(), - false, - ).unwrap(); - // needed to make sure B1 gets a different hash from A1 - b1.push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), - amount: 1, - nonce: 0, - }).unwrap(); - let b1 = b1.build().unwrap().block; - client.import(BlockOrigin::Own, b1.clone()).unwrap(); - - let b2 = client.new_block_at( - &BlockId::Hash(b1.hash()), - Default::default(), - false, - ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, b2.clone()).unwrap(); - - // A2 is the current best since it's the longest chain - assert_eq!( - client.chain_info().best_hash, - a2.hash(), - ); - - // we finalize block B1 which is on a different branch from current best - // which should trigger a re-org. - ClientExt::finalize_block(&client, BlockId::Hash(b1.hash()), None).unwrap(); - - // B1 should now be the latest finalized - assert_eq!( - client.chain_info().finalized_hash, - b1.hash(), - ); - - // and B1 should be the new best block (`finalize_block` as no way of - // knowing about B2) - assert_eq!( - client.chain_info().best_hash, - b1.hash(), - ); - - // `SelectChain` should report B2 as best block though - assert_eq!( - select_chain.best_chain().unwrap().hash(), - b2.hash(), - ); - - // after we build B3 on top of B2 and import it - // it should be the new best block, - let b3 = client.new_block_at( - &BlockId::Hash(b2.hash()), - Default::default(), - false, - ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, b3.clone()).unwrap(); - - assert_eq!( - client.chain_info().best_hash, - b3.hash(), - ); - } - - #[test] - fn get_header_by_block_number_doesnt_panic() { - let client = substrate_test_runtime_client::new(); - - // backend uses u32 for block numbers, make sure we don't panic when - // trying to convert - let id = BlockId::::Number(72340207214430721); - client.header(&id).expect_err("invalid block number overflows u32"); - } - - #[test] - fn state_reverted_on_reorg() { - let _ = env_logger::try_init(); - let mut client = substrate_test_runtime_client::new(); - - let current_balance = |client: &substrate_test_runtime_client::TestClient| - client.runtime_api().balance_of( - &BlockId::number(client.chain_info().best_number), AccountKeyring::Alice.into() - ).unwrap(); - - // G -> A1 -> A2 - // \ - // -> B1 - let mut a1 = client.new_block_at( - &BlockId::Number(0), - Default::default(), - false, - ).unwrap(); - a1.push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Bob.into(), - amount: 10, - nonce: 0, - }).unwrap(); - let a1 = a1.build().unwrap().block; - client.import(BlockOrigin::Own, a1.clone()).unwrap(); - - let mut b1 = client.new_block_at( - &BlockId::Number(0), - Default::default(), - false, - ).unwrap(); - b1.push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), - amount: 50, - nonce: 0, - }).unwrap(); - let b1 = b1.build().unwrap().block; - // Reorg to B1 - client.import_as_best(BlockOrigin::Own, b1.clone()).unwrap(); - - assert_eq!(950, current_balance(&client)); - let mut a2 = client.new_block_at( - &BlockId::Hash(a1.hash()), - Default::default(), - false, - ).unwrap(); - a2.push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Charlie.into(), - amount: 10, - nonce: 1, - }).unwrap(); - let a2 = a2.build().unwrap().block; - // Re-org to A2 - client.import_as_best(BlockOrigin::Own, a2).unwrap(); - assert_eq!(980, current_balance(&client)); - } - - #[test] - fn doesnt_import_blocks_that_revert_finality() { - let _ = env_logger::try_init(); - let tmp = tempfile::tempdir().unwrap(); - - // we need to run with archive pruning to avoid pruning non-canonical - // states - let backend = Arc::new(Backend::new( - DatabaseSettings { - state_cache_size: 1 << 20, - state_cache_child_ratio: None, - pruning: PruningMode::ArchiveAll, - source: DatabaseSettingsSrc::RocksDb { - path: tmp.path().into(), - cache_size: 128, - } - }, - u64::max_value(), - ).unwrap()); - - let mut client = TestClientBuilder::with_backend(backend).build(); - - // -> C1 - // / - // G -> A1 -> A2 - // \ - // -> B1 -> B2 -> B3 - - let a1 = client.new_block_at( - &BlockId::Number(0), - Default::default(), - false, - ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a1.clone()).unwrap(); - - let a2 = client.new_block_at( - &BlockId::Hash(a1.hash()), - Default::default(), - false, - ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a2.clone()).unwrap(); - - let mut b1 = client.new_block_at(&BlockId::Number(0), Default::default(), false).unwrap(); - - // needed to make sure B1 gets a different hash from A1 - b1.push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), - amount: 1, - nonce: 0, - }).unwrap(); - let b1 = b1.build().unwrap().block; - client.import(BlockOrigin::Own, b1.clone()).unwrap(); - - let b2 = client.new_block_at(&BlockId::Hash(b1.hash()), Default::default(), false) - .unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, b2.clone()).unwrap(); - - // prepare B3 before we finalize A2, because otherwise we won't be able to - // read changes trie configuration after A2 is finalized - let b3 = client.new_block_at(&BlockId::Hash(b2.hash()), Default::default(), false) - .unwrap().build().unwrap().block; - - // we will finalize A2 which should make it impossible to import a new - // B3 at the same height but that doesn't include it - ClientExt::finalize_block(&client, BlockId::Hash(a2.hash()), None).unwrap(); - - let import_err = client.import(BlockOrigin::Own, b3).err().unwrap(); - let expected_err = ConsensusError::ClientImport( - sp_blockchain::Error::NotInFinalizedChain.to_string() - ); - - assert_eq!( - import_err.to_string(), - expected_err.to_string(), - ); - - // adding a C1 block which is lower than the last finalized should also - // fail (with a cheaper check that doesn't require checking ancestry). - let mut c1 = client.new_block_at(&BlockId::Number(0), Default::default(), false).unwrap(); - - // needed to make sure C1 gets a different hash from A1 and B1 - c1.push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), - amount: 2, - nonce: 0, - }).unwrap(); - let c1 = c1.build().unwrap().block; - - let import_err = client.import(BlockOrigin::Own, c1).err().unwrap(); - let expected_err = ConsensusError::ClientImport( - sp_blockchain::Error::NotInFinalizedChain.to_string() - ); - - assert_eq!( - import_err.to_string(), - expected_err.to_string(), - ); - } - - - #[test] - fn respects_block_rules() { - - fn run_test( - record_only: bool, - known_bad: &mut HashSet, - fork_rules: &mut Vec<(u64, H256)>, - ) { - let mut client = if record_only { - TestClientBuilder::new().build() - } else { - TestClientBuilder::new() - .set_block_rules( - Some(fork_rules.clone()), - Some(known_bad.clone()), - ) - .build() - }; - - let block_ok = client.new_block_at(&BlockId::Number(0), Default::default(), false) - .unwrap().build().unwrap().block; - - let params = BlockCheckParams { - hash: block_ok.hash().clone(), - number: 0, - parent_hash: block_ok.header().parent_hash().clone(), - allow_missing_state: false, - import_existing: false, - }; - assert_eq!(client.check_block(params).unwrap(), ImportResult::imported(false)); - - // this is 0x0d6d6612a10485370d9e085aeea7ec427fb3f34d961c6a816cdbe5cde2278864 - let mut block_not_ok = client.new_block_at(&BlockId::Number(0), Default::default(), false) - .unwrap(); - block_not_ok.push_storage_change(vec![0], Some(vec![1])).unwrap(); - let block_not_ok = block_not_ok.build().unwrap().block; - - let params = BlockCheckParams { - hash: block_not_ok.hash().clone(), - number: 0, - parent_hash: block_not_ok.header().parent_hash().clone(), - allow_missing_state: false, - import_existing: false, - }; - if record_only { - known_bad.insert(block_not_ok.hash()); - } else { - assert_eq!(client.check_block(params).unwrap(), ImportResult::KnownBad); - } - - // Now going to the fork - client.import_as_final(BlockOrigin::Own, block_ok).unwrap(); - - // And check good fork - let mut block_ok = client.new_block_at(&BlockId::Number(1), Default::default(), false) - .unwrap(); - block_ok.push_storage_change(vec![0], Some(vec![2])).unwrap(); - let block_ok = block_ok.build().unwrap().block; - - let params = BlockCheckParams { - hash: block_ok.hash().clone(), - number: 1, - parent_hash: block_ok.header().parent_hash().clone(), - allow_missing_state: false, - import_existing: false, - }; - if record_only { - fork_rules.push((1, block_ok.hash().clone())); - } - assert_eq!(client.check_block(params).unwrap(), ImportResult::imported(false)); - - // And now try bad fork - let mut block_not_ok = client.new_block_at(&BlockId::Number(1), Default::default(), false) - .unwrap(); - block_not_ok.push_storage_change(vec![0], Some(vec![3])).unwrap(); - let block_not_ok = block_not_ok.build().unwrap().block; - - let params = BlockCheckParams { - hash: block_not_ok.hash().clone(), - number: 1, - parent_hash: block_not_ok.header().parent_hash().clone(), - allow_missing_state: false, - import_existing: false, - }; - - if !record_only { - assert_eq!(client.check_block(params).unwrap(), ImportResult::KnownBad); - } - } - - let mut known_bad = HashSet::new(); - let mut fork_rules = Vec::new(); - - // records what bad_blocks and fork_blocks hashes should be - run_test(true, &mut known_bad, &mut fork_rules); - - // enforces rules and actually makes assertions - run_test(false, &mut known_bad, &mut fork_rules); - } - - #[test] - fn returns_status_for_pruned_blocks() { - let _ = env_logger::try_init(); - let tmp = tempfile::tempdir().unwrap(); - - // set to prune after 1 block - // states - let backend = Arc::new(Backend::new( - DatabaseSettings { - state_cache_size: 1 << 20, - state_cache_child_ratio: None, - pruning: PruningMode::keep_blocks(1), - source: DatabaseSettingsSrc::RocksDb { - path: tmp.path().into(), - cache_size: 128, - } - }, - u64::max_value(), - ).unwrap()); - - let mut client = TestClientBuilder::with_backend(backend).build(); - - let a1 = client.new_block_at(&BlockId::Number(0), Default::default(), false) - .unwrap().build().unwrap().block; - - let mut b1 = client.new_block_at(&BlockId::Number(0), Default::default(), false).unwrap(); - - // b1 is created, but not imported - b1.push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), - amount: 1, - nonce: 0, - }).unwrap(); - let b1 = b1.build().unwrap().block; - - let check_block_a1 = BlockCheckParams { - hash: a1.hash().clone(), - number: 0, - parent_hash: a1.header().parent_hash().clone(), - allow_missing_state: false, - import_existing: false, - }; - - assert_eq!(client.check_block(check_block_a1.clone()).unwrap(), ImportResult::imported(false)); - assert_eq!(client.block_status(&BlockId::hash(check_block_a1.hash)).unwrap(), BlockStatus::Unknown); - - client.import_as_final(BlockOrigin::Own, a1.clone()).unwrap(); - - assert_eq!(client.check_block(check_block_a1.clone()).unwrap(), ImportResult::AlreadyInChain); - assert_eq!(client.block_status(&BlockId::hash(check_block_a1.hash)).unwrap(), BlockStatus::InChainWithState); - - let a2 = client.new_block_at(&BlockId::Hash(a1.hash()), Default::default(), false) - .unwrap().build().unwrap().block; - client.import_as_final(BlockOrigin::Own, a2.clone()).unwrap(); - - let check_block_a2 = BlockCheckParams { - hash: a2.hash().clone(), - number: 1, - parent_hash: a1.header().parent_hash().clone(), - allow_missing_state: false, - import_existing: false, - }; - - assert_eq!(client.check_block(check_block_a1.clone()).unwrap(), ImportResult::AlreadyInChain); - assert_eq!(client.block_status(&BlockId::hash(check_block_a1.hash)).unwrap(), BlockStatus::InChainPruned); - assert_eq!(client.check_block(check_block_a2.clone()).unwrap(), ImportResult::AlreadyInChain); - assert_eq!(client.block_status(&BlockId::hash(check_block_a2.hash)).unwrap(), BlockStatus::InChainWithState); - - let a3 = client.new_block_at(&BlockId::Hash(a2.hash()), Default::default(), false) - .unwrap().build().unwrap().block; - - client.import_as_final(BlockOrigin::Own, a3.clone()).unwrap(); - let check_block_a3 = BlockCheckParams { - hash: a3.hash().clone(), - number: 2, - parent_hash: a2.header().parent_hash().clone(), - allow_missing_state: false, - import_existing: false, - }; - - // a1 and a2 are both pruned at this point - assert_eq!(client.check_block(check_block_a1.clone()).unwrap(), ImportResult::AlreadyInChain); - assert_eq!(client.block_status(&BlockId::hash(check_block_a1.hash)).unwrap(), BlockStatus::InChainPruned); - assert_eq!(client.check_block(check_block_a2.clone()).unwrap(), ImportResult::AlreadyInChain); - assert_eq!(client.block_status(&BlockId::hash(check_block_a2.hash)).unwrap(), BlockStatus::InChainPruned); - assert_eq!(client.check_block(check_block_a3.clone()).unwrap(), ImportResult::AlreadyInChain); - assert_eq!(client.block_status(&BlockId::hash(check_block_a3.hash)).unwrap(), BlockStatus::InChainWithState); - - let mut check_block_b1 = BlockCheckParams { - hash: b1.hash().clone(), - number: 0, - parent_hash: b1.header().parent_hash().clone(), - allow_missing_state: false, - import_existing: false, - }; - assert_eq!(client.check_block(check_block_b1.clone()).unwrap(), ImportResult::MissingState); - check_block_b1.allow_missing_state = true; - assert_eq!(client.check_block(check_block_b1.clone()).unwrap(), ImportResult::imported(false)); - check_block_b1.parent_hash = H256::random(); - assert_eq!(client.check_block(check_block_b1.clone()).unwrap(), ImportResult::UnknownParent); - } - - #[test] - fn imports_blocks_with_changes_tries_config_change() { - // create client with initial 4^2 configuration - let mut client = TestClientBuilder::with_default_backend() - .changes_trie_config(Some(ChangesTrieConfiguration { - digest_interval: 4, - digest_levels: 2, - })).build(); - - // =================================================================== - // blocks 1,2,3,4,5,6,7,8,9,10 are empty - // block 11 changes the key - // block 12 is the L1 digest that covers this change - // blocks 13,14,15,16,17,18,19,20,21,22 are empty - // block 23 changes the configuration to 5^1 AND is skewed digest - // =================================================================== - // blocks 24,25 are changing the key - // block 26 is empty - // block 27 changes the key - // block 28 is the L1 digest (NOT SKEWED!!!) that covers changes AND changes configuration to 3^1 - // =================================================================== - // block 29 is empty - // block 30 changes the key - // block 31 is L1 digest that covers this change - // =================================================================== - (1..11).for_each(|number| { - let block = client.new_block_at(&BlockId::Number(number - 1), Default::default(), false) - .unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, block).unwrap(); - }); - (11..12).for_each(|number| { - let mut block = client.new_block_at(&BlockId::Number(number - 1), Default::default(), false).unwrap(); - block.push_storage_change(vec![42], Some(number.to_le_bytes().to_vec())).unwrap(); - let block = block.build().unwrap().block; - client.import(BlockOrigin::Own, block).unwrap(); - }); - (12..23).for_each(|number| { - let block = client.new_block_at(&BlockId::Number(number - 1), Default::default(), false) - .unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, block).unwrap(); - }); - (23..24).for_each(|number| { - let mut block = client.new_block_at(&BlockId::Number(number - 1), Default::default(), false).unwrap(); - block.push_changes_trie_configuration_update(Some(ChangesTrieConfiguration { - digest_interval: 5, - digest_levels: 1, - })).unwrap(); - let block = block.build().unwrap().block; - client.import(BlockOrigin::Own, block).unwrap(); - }); - (24..26).for_each(|number| { - let mut block = client.new_block_at(&BlockId::Number(number - 1), Default::default(), false).unwrap(); - block.push_storage_change(vec![42], Some(number.to_le_bytes().to_vec())).unwrap(); - let block = block.build().unwrap().block; - client.import(BlockOrigin::Own, block).unwrap(); - }); - (26..27).for_each(|number| { - let block = client.new_block_at(&BlockId::Number(number - 1), Default::default(), false) - .unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, block).unwrap(); - }); - (27..28).for_each(|number| { - let mut block = client.new_block_at(&BlockId::Number(number - 1), Default::default(), false).unwrap(); - block.push_storage_change(vec![42], Some(number.to_le_bytes().to_vec())).unwrap(); - let block = block.build().unwrap().block; - client.import(BlockOrigin::Own, block).unwrap(); - }); - (28..29).for_each(|number| { - let mut block = client.new_block_at(&BlockId::Number(number - 1), Default::default(), false).unwrap(); - block.push_changes_trie_configuration_update(Some(ChangesTrieConfiguration { - digest_interval: 3, - digest_levels: 1, - })).unwrap(); - let block = block.build().unwrap().block; - client.import(BlockOrigin::Own, block).unwrap(); - }); - (29..30).for_each(|number| { - let block = client.new_block_at(&BlockId::Number(number - 1), Default::default(), false) - .unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, block).unwrap(); - }); - (30..31).for_each(|number| { - let mut block = client.new_block_at(&BlockId::Number(number - 1), Default::default(), false).unwrap(); - block.push_storage_change(vec![42], Some(number.to_le_bytes().to_vec())).unwrap(); - let block = block.build().unwrap().block; - client.import(BlockOrigin::Own, block).unwrap(); - }); - (31..32).for_each(|number| { - let block = client.new_block_at(&BlockId::Number(number - 1), Default::default(), false) - .unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, block).unwrap(); - }); - - // now check that configuration cache works - assert_eq!( - client.key_changes(1, BlockId::Number(31), None, &StorageKey(vec![42])).unwrap(), - vec![(30, 0), (27, 0), (25, 0), (24, 0), (11, 0)] - ); - } - - #[test] - fn storage_keys_iter_prefix_and_start_key_works() { - let client = substrate_test_runtime_client::new(); - - let prefix = StorageKey(hex!("3a").to_vec()); - - let res: Vec<_> = client.storage_keys_iter(&BlockId::Number(0), Some(&prefix), None) - .unwrap() - .map(|x| x.0) - .collect(); - assert_eq!(res, [hex!("3a636f6465").to_vec(), hex!("3a686561707061676573").to_vec()]); - - let res: Vec<_> = client.storage_keys_iter(&BlockId::Number(0), Some(&prefix), Some(&StorageKey(hex!("3a636f6465").to_vec()))) - .unwrap() - .map(|x| x.0) - .collect(); - assert_eq!(res, [hex!("3a686561707061676573").to_vec()]); - - let res: Vec<_> = client.storage_keys_iter(&BlockId::Number(0), Some(&prefix), Some(&StorageKey(hex!("3a686561707061676573").to_vec()))) - .unwrap() - .map(|x| x.0) - .collect(); - assert_eq!(res, Vec::>::new()); - } - - #[test] - fn storage_keys_iter_works() { - let client = substrate_test_runtime_client::new(); - - let prefix = StorageKey(hex!("").to_vec()); - - let res: Vec<_> = client.storage_keys_iter(&BlockId::Number(0), Some(&prefix), None) - .unwrap() - .take(2) - .map(|x| x.0) - .collect(); - assert_eq!(res, [hex!("0befda6e1ca4ef40219d588a727f1271").to_vec(), hex!("3a636f6465").to_vec()]); - - let res: Vec<_> = client.storage_keys_iter(&BlockId::Number(0), Some(&prefix), Some(&StorageKey(hex!("3a636f6465").to_vec()))) - .unwrap() - .take(3) - .map(|x| x.0) - .collect(); - assert_eq!(res, [ - hex!("3a686561707061676573").to_vec(), - hex!("6644b9b8bc315888ac8e41a7968dc2b4141a5403c58acdf70b7e8f7e07bf5081").to_vec(), - hex!("79c07e2b1d2e2abfd4855b936617eeff5e0621c4869aa60c02be9adcc98a0d1d").to_vec(), - ]); - - let res: Vec<_> = client.storage_keys_iter(&BlockId::Number(0), Some(&prefix), Some(&StorageKey(hex!("79c07e2b1d2e2abfd4855b936617eeff5e0621c4869aa60c02be9adcc98a0d1d").to_vec()))) - .unwrap() - .take(1) - .map(|x| x.0) - .collect(); - assert_eq!(res, [hex!("cf722c0832b5231d35e29f319ff27389f5032bfc7bfc3ba5ed7839f2042fb99f").to_vec()]); - } - - #[test] - fn cleans_up_closed_notification_sinks_on_block_import() { - use substrate_test_runtime_client::GenesisInit; - - // NOTE: we need to build the client here instead of using the client - // provided by test_runtime_client otherwise we can't access the private - // `import_notification_sinks` and `finality_notification_sinks` fields. - let mut client = - new_in_mem::< - _, - substrate_test_runtime_client::runtime::Block, - _, - substrate_test_runtime_client::runtime::RuntimeApi - >( - substrate_test_runtime_client::new_native_executor(), - &substrate_test_runtime_client::GenesisParameters::default().genesis_storage(), - None, - None, - sp_core::tasks::executor(), - Default::default(), - ) - .unwrap(); - - type TestClient = Client< - in_mem::Backend, - LocalCallExecutor, sc_executor::NativeExecutor>, - substrate_test_runtime_client::runtime::Block, - substrate_test_runtime_client::runtime::RuntimeApi, - >; - - let import_notif1 = client.import_notification_stream(); - let import_notif2 = client.import_notification_stream(); - let finality_notif1 = client.finality_notification_stream(); - let finality_notif2 = client.finality_notification_stream(); - - // for some reason I can't seem to use `ClientBlockImportExt` - let bake_and_import_block = |client: &mut TestClient, origin| { - let block = client - .new_block(Default::default()) - .unwrap() - .build() - .unwrap() - .block; - - let (header, extrinsics) = block.deconstruct(); - let mut import = BlockImportParams::new(origin, header); - import.body = Some(extrinsics); - import.fork_choice = Some(ForkChoiceStrategy::LongestChain); - client.import_block(import, Default::default()).unwrap(); - }; - - // after importing a block we should still have 4 notification sinks - // (2 import + 2 finality) - bake_and_import_block(&mut client, BlockOrigin::Own); - assert_eq!(client.import_notification_sinks.lock().len(), 2); - assert_eq!(client.finality_notification_sinks.lock().len(), 2); - - // if we drop one import notification receiver and one finality - // notification receiver - drop(import_notif2); - drop(finality_notif2); - - // the sinks should be cleaned up after block import - bake_and_import_block(&mut client, BlockOrigin::Own); - assert_eq!(client.import_notification_sinks.lock().len(), 1); - assert_eq!(client.finality_notification_sinks.lock().len(), 1); - - // the same thing should happen if block import happens during initial - // sync - drop(import_notif1); - drop(finality_notif1); - - bake_and_import_block(&mut client, BlockOrigin::NetworkInitialSync); - assert_eq!(client.import_notification_sinks.lock().len(), 0); - assert_eq!(client.finality_notification_sinks.lock().len(), 0); - } -} diff --git a/client/service/src/client/genesis.rs b/client/service/src/client/genesis.rs new file mode 100644 index 0000000000000..41dbccc517390 --- /dev/null +++ b/client/service/src/client/genesis.rs @@ -0,0 +1,41 @@ +// Copyright 2017-2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! Tool for creating the genesis block. + +use sp_runtime::traits::{Block as BlockT, Header as HeaderT, Hash as HashT, Zero}; + +/// Create a genesis block, given the initial storage. +pub fn construct_genesis_block< + Block: BlockT +> ( + state_root: Block::Hash +) -> Block { + let extrinsics_root = <<::Header as HeaderT>::Hashing as HashT>::trie_root( + Vec::new(), + ); + + Block::new( + <::Header as HeaderT>::new( + Zero::zero(), + extrinsics_root, + state_root, + Default::default(), + Default::default() + ), + Default::default() + ) +} diff --git a/client/src/light/backend.rs b/client/service/src/client/light/backend.rs similarity index 88% rename from client/src/light/backend.rs rename to client/service/src/client/light/backend.rs index 01e9854864062..78f3938aaa828 100644 --- a/client/src/light/backend.rs +++ b/client/service/src/client/light/backend.rs @@ -32,7 +32,6 @@ use sp_state_machine::{ }; use sp_runtime::{generic::BlockId, Justification, Storage}; use sp_runtime::traits::{Block as BlockT, NumberFor, Zero, Header, HashFor}; -use crate::in_mem::check_genesis_storage; use sp_blockchain::{Error as ClientError, Result as ClientResult}; use sc_client_api::{ backend::{ @@ -43,9 +42,10 @@ use sc_client_api::{ HeaderBackend as BlockchainHeaderBackend, well_known_cache_keys, }, light::Storage as BlockchainStorage, + in_mem::check_genesis_storage, UsageInfo, }; -use crate::light::blockchain::Blockchain; +use super::blockchain::Blockchain; use hash_db::Hasher; const IN_MEMORY_EXPECT_PROOF: &str = "InMemory state backend has Void error type and always succeeds; qed"; @@ -251,7 +251,7 @@ where .unwrap_or(false) } - fn remote_blockchain(&self) -> Arc> { + fn remote_blockchain(&self) -> Arc> { self.blockchain.clone() } } @@ -513,53 +513,3 @@ impl StateBackend for GenesisOrUnavailableState } } } - -#[cfg(test)] -mod tests { - use substrate_test_runtime_client::{self, runtime::Block}; - use sc_client_api::backend::NewBlockState; - use crate::light::blockchain::tests::{DummyBlockchain, DummyStorage}; - use sp_runtime::traits::BlakeTwo256; - use super::*; - - #[test] - fn local_state_is_created_when_genesis_state_is_available() { - let def = Default::default(); - let header0 = substrate_test_runtime_client::runtime::Header::new(0, def, def, def, Default::default()); - - let backend: Backend<_, BlakeTwo256> = Backend::new( - Arc::new(DummyBlockchain::new(DummyStorage::new())), - ); - let mut op = backend.begin_operation().unwrap(); - op.set_block_data(header0, None, None, NewBlockState::Final).unwrap(); - op.reset_storage(Default::default()).unwrap(); - backend.commit_operation(op).unwrap(); - - match backend.state_at(BlockId::Number(0)).unwrap() { - GenesisOrUnavailableState::Genesis(_) => (), - _ => panic!("unexpected state"), - } - } - - #[test] - fn unavailable_state_is_created_when_genesis_state_is_unavailable() { - let backend: Backend<_, BlakeTwo256> = Backend::new( - Arc::new(DummyBlockchain::new(DummyStorage::new())), - ); - - match backend.state_at(BlockId::Number(0)).unwrap() { - GenesisOrUnavailableState::Unavailable => (), - _ => panic!("unexpected state"), - } - } - - #[test] - fn light_aux_store_is_updated_via_non_importing_op() { - let backend = Backend::new(Arc::new(DummyBlockchain::new(DummyStorage::new()))); - let mut op = ClientBackend::::begin_operation(&backend).unwrap(); - BlockImportOperation::::insert_aux(&mut op, vec![(vec![1], Some(vec![2]))]).unwrap(); - ClientBackend::::commit_operation(&backend, op).unwrap(); - - assert_eq!(AuxStore::get_aux(&backend, &[1]).unwrap(), Some(vec![2])); - } -} diff --git a/client/src/light/blockchain.rs b/client/service/src/client/light/blockchain.rs similarity index 54% rename from client/src/light/blockchain.rs rename to client/service/src/client/light/blockchain.rs index 756147c941b39..b6ccb4744b559 100644 --- a/client/src/light/blockchain.rs +++ b/client/service/src/client/light/blockchain.rs @@ -17,7 +17,6 @@ //! Light client blockchain backend. Only stores headers and justifications of recent //! blocks. CHT roots are stored for headers of ancient blocks. -use std::future::Future; use std::sync::Arc; use sp_runtime::{Justification, generic::BlockId}; @@ -38,10 +37,10 @@ pub use sc_client_api::{ }, light::{ RemoteBlockchain, LocalOrRemote, Storage - } + }, + cht, }; -use crate::cht; -use crate::light::fetcher::{Fetcher, RemoteHeaderRequest}; +use super::fetcher::RemoteHeaderRequest; /// Light client blockchain. pub struct Blockchain { @@ -173,154 +172,3 @@ impl RemoteBlockchain for Blockchain })) } } - -/// Returns future that resolves header either locally, or remotely. -pub fn future_header>( - blockchain: &dyn RemoteBlockchain, - fetcher: &F, - id: BlockId, -) -> impl Future, ClientError>> { - use futures::future::{ready, Either, FutureExt}; - - match blockchain.header(id) { - Ok(LocalOrRemote::Remote(request)) => Either::Left( - fetcher - .remote_header(request) - .then(|header| ready(header.map(Some))) - ), - Ok(LocalOrRemote::Unknown) => Either::Right(ready(Ok(None))), - Ok(LocalOrRemote::Local(local_header)) => Either::Right(ready(Ok(Some(local_header)))), - Err(err) => Either::Right(ready(Err(err))), - } -} - -#[cfg(test)] -pub mod tests { - use std::collections::HashMap; - use parking_lot::Mutex; - use substrate_test_runtime_client::runtime::{Hash, Block, Header}; - use sc_client_api::blockchain::Info; - use super::*; - - pub type DummyBlockchain = Blockchain; - - pub struct DummyStorage { - pub changes_tries_cht_roots: HashMap, - pub aux_store: Mutex, Vec>>, - } - - impl DummyStorage { - pub fn new() -> Self { - DummyStorage { - changes_tries_cht_roots: HashMap::new(), - aux_store: Mutex::new(HashMap::new()), - } - } - } - - impl BlockchainHeaderBackend for DummyStorage { - fn header(&self, _id: BlockId) -> ClientResult> { - Err(ClientError::Backend("Test error".into())) - } - - fn info(&self) -> Info { - panic!("Test error") - } - - fn status(&self, _id: BlockId) -> ClientResult { - Err(ClientError::Backend("Test error".into())) - } - - fn number(&self, hash: Hash) -> ClientResult>> { - if hash == Default::default() { - Ok(Some(Default::default())) - } else { - Err(ClientError::Backend("Test error".into())) - } - } - - fn hash(&self, number: u64) -> ClientResult> { - if number == 0 { - Ok(Some(Default::default())) - } else { - Err(ClientError::Backend("Test error".into())) - } - } - } - - impl HeaderMetadata for DummyStorage { - type Error = ClientError; - - fn header_metadata(&self, hash: Hash) -> Result, Self::Error> { - self.header(BlockId::hash(hash))?.map(|header| CachedHeaderMetadata::from(&header)) - .ok_or(ClientError::UnknownBlock("header not found".to_owned())) - } - fn insert_header_metadata(&self, _hash: Hash, _metadata: CachedHeaderMetadata) {} - fn remove_header_metadata(&self, _hash: Hash) {} - } - - impl AuxStore for DummyStorage { - fn insert_aux< - 'a, - 'b: 'a, - 'c: 'a, - I: IntoIterator, - D: IntoIterator, - >(&self, insert: I, _delete: D) -> ClientResult<()> { - for (k, v) in insert.into_iter() { - self.aux_store.lock().insert(k.to_vec(), v.to_vec()); - } - Ok(()) - } - - fn get_aux(&self, key: &[u8]) -> ClientResult>> { - Ok(self.aux_store.lock().get(key).cloned()) - } - } - - impl Storage for DummyStorage { - fn import_header( - &self, - _header: Header, - _cache: HashMap>, - _state: NewBlockState, - _aux_ops: Vec<(Vec, Option>)>, - ) -> ClientResult<()> { - Ok(()) - } - - fn set_head(&self, _block: BlockId) -> ClientResult<()> { - Err(ClientError::Backend("Test error".into())) - } - - fn finalize_header(&self, _block: BlockId) -> ClientResult<()> { - Err(ClientError::Backend("Test error".into())) - } - - fn last_finalized(&self) -> ClientResult { - Err(ClientError::Backend("Test error".into())) - } - - fn header_cht_root(&self, _cht_size: u64, _block: u64) -> ClientResult> { - Err(ClientError::Backend("Test error".into())) - } - - fn changes_trie_cht_root(&self, cht_size: u64, block: u64) -> ClientResult> { - cht::block_to_cht_number(cht_size, block) - .and_then(|cht_num| self.changes_tries_cht_roots.get(&cht_num)) - .cloned() - .ok_or_else(|| ClientError::Backend( - format!("Test error: CHT for block #{} not found", block) - ).into()) - .map(Some) - } - - fn cache(&self) -> Option>> { - None - } - - fn usage_info(&self) -> Option { - None - } - } -} diff --git a/client/src/light/call_executor.rs b/client/service/src/client/light/call_executor.rs similarity index 53% rename from client/src/light/call_executor.rs rename to client/service/src/client/light/call_executor.rs index 3a91f2dc23dac..54fcf8e8f7f4c 100644 --- a/client/src/light/call_executor.rs +++ b/client/service/src/client/light/call_executor.rs @@ -243,7 +243,11 @@ pub fn check_execution_proof( ) } -fn check_execution_proof_with_make_header Header>( +/// Check remote contextual execution proof using given backend and header factory. +/// +/// Method is executed using passed header as environment' current block. +/// Proof should include both environment preparation proof and method execution proof. +pub fn check_execution_proof_with_make_header( executor: &E, spawn_handle: Box, request: &RemoteCallRequest

, @@ -251,10 +255,11 @@ fn check_execution_proof_with_make_header ClientResult> where - Header: HeaderT, E: CodeExecutor + Clone + 'static, H: Hasher, + Header: HeaderT, H::Out: Ord + codec::Codec + 'static, + MakeNextHeader: Fn(&Header) -> Header, { let local_state_root = request.header.state_root(); let root: H::Out = convert_hash(&local_state_root); @@ -290,232 +295,3 @@ fn check_execution_proof_with_make_header for DummyCallExecutor { - type Error = ClientError; - - type Backend = substrate_test_runtime_client::Backend; - - fn call( - &self, - _id: &BlockId, - _method: &str, - _call_data: &[u8], - _strategy: ExecutionStrategy, - _extensions: Option, - ) -> Result, ClientError> { - Ok(vec![42]) - } - - fn contextual_call< - 'a, - IB: Fn() -> ClientResult<()>, - EM: Fn( - Result, Self::Error>, - Result, Self::Error> - ) -> Result, Self::Error>, - R: Encode + Decode + PartialEq, - NC: FnOnce() -> result::Result + UnwindSafe, - >( - &self, - _initialize_block_fn: IB, - _at: &BlockId, - _method: &str, - _call_data: &[u8], - _changes: &RefCell, - _offchain_changes: &RefCell, - _storage_transaction_cache: Option<&RefCell< - StorageTransactionCache< - Block, - >::State, - > - >>, - _initialize_block: InitializeBlock<'a, Block>, - _execution_manager: ExecutionManager, - _native_call: Option, - _proof_recorder: &Option>, - _extensions: Option, - ) -> ClientResult> where ExecutionManager: Clone { - unreachable!() - } - - fn runtime_version(&self, _id: &BlockId) -> Result { - unreachable!() - } - - fn prove_at_trie_state>>( - &self, - _trie_state: &sp_state_machine::TrieBackend>, - _overlay: &mut OverlayedChanges, - _method: &str, - _call_data: &[u8] - ) -> Result<(Vec, StorageProof), ClientError> { - unreachable!() - } - - fn native_runtime_version(&self) -> Option<&NativeVersion> { - unreachable!() - } - } - - fn local_executor() -> NativeExecutor { - NativeExecutor::new(WasmExecutionMethod::Interpreted, None, 8) - } - - #[test] - fn execution_proof_is_generated_and_checked() { - fn execute(remote_client: &TestClient, at: u64, method: &'static str) -> (Vec, Vec) { - let remote_block_id = BlockId::Number(at); - let remote_header = remote_client.header(&remote_block_id).unwrap().unwrap(); - - // 'fetch' execution proof from remote node - let (remote_result, remote_execution_proof) = remote_client.execution_proof( - &remote_block_id, - method, - &[] - ).unwrap(); - - // check remote execution proof locally - let local_result = check_execution_proof::<_, _, BlakeTwo256>( - &local_executor(), - tasks_executor(), - &RemoteCallRequest { - block: substrate_test_runtime_client::runtime::Hash::default(), - header: remote_header, - method: method.into(), - call_data: vec![], - retry_count: None, - }, - remote_execution_proof, - ).unwrap(); - - (remote_result, local_result) - } - - fn execute_with_proof_failure(remote_client: &TestClient, at: u64, method: &'static str) { - let remote_block_id = BlockId::Number(at); - let remote_header = remote_client.header(&remote_block_id).unwrap().unwrap(); - - // 'fetch' execution proof from remote node - let (_, remote_execution_proof) = remote_client.execution_proof( - &remote_block_id, - method, - &[] - ).unwrap(); - - // check remote execution proof locally - let execution_result = check_execution_proof_with_make_header::<_, _, BlakeTwo256, _>( - &local_executor(), - tasks_executor(), - &RemoteCallRequest { - block: substrate_test_runtime_client::runtime::Hash::default(), - header: remote_header, - method: method.into(), - call_data: vec![], - retry_count: None, - }, - remote_execution_proof, - |header|
::new( - at + 1, - Default::default(), - Default::default(), - header.hash(), - header.digest().clone(), // this makes next header wrong - ), - ); - match execution_result { - Err(sp_blockchain::Error::Execution(_)) => (), - _ => panic!("Unexpected execution result: {:?}", execution_result), - } - } - - // prepare remote client - let mut remote_client = substrate_test_runtime_client::new(); - for i in 1u32..3u32 { - let mut digest = Digest::default(); - digest.push(sp_runtime::generic::DigestItem::Other::(i.to_le_bytes().to_vec())); - remote_client.import_justified( - BlockOrigin::Own, - remote_client.new_block(digest).unwrap().build().unwrap().block, - Default::default(), - ).unwrap(); - } - - // check method that doesn't requires environment - let (remote, local) = execute(&remote_client, 0, "Core_version"); - assert_eq!(remote, local); - - let (remote, local) = execute(&remote_client, 2, "Core_version"); - assert_eq!(remote, local); - - // check method that requires environment - let (_, block) = execute(&remote_client, 0, "BlockBuilder_finalize_block"); - let local_block: Header = Decode::decode(&mut &block[..]).unwrap(); - assert_eq!(local_block.number, 1); - - let (_, block) = execute(&remote_client, 2, "BlockBuilder_finalize_block"); - let local_block: Header = Decode::decode(&mut &block[..]).unwrap(); - assert_eq!(local_block.number, 3); - - // check that proof check doesn't panic even if proof is incorrect AND no panic handler is set - execute_with_proof_failure(&remote_client, 2, "Core_version"); - - // check that proof check doesn't panic even if proof is incorrect AND panic handler is set - sp_panic_handler::set("TEST", "1.2.3"); - execute_with_proof_failure(&remote_client, 2, "Core_version"); - } - - #[test] - fn code_is_executed_at_genesis_only() { - let backend = Arc::new(InMemBackend::::new()); - let def = H256::default(); - let header0 = substrate_test_runtime_client::runtime::Header::new(0, def, def, def, Default::default()); - let hash0 = header0.hash(); - let header1 = substrate_test_runtime_client::runtime::Header::new(1, def, def, hash0, Default::default()); - let hash1 = header1.hash(); - backend.blockchain().insert(hash0, header0, None, None, NewBlockState::Final).unwrap(); - backend.blockchain().insert(hash1, header1, None, None, NewBlockState::Final).unwrap(); - - let genesis_executor = GenesisCallExecutor::new(backend, DummyCallExecutor); - assert_eq!( - genesis_executor.call( - &BlockId::Number(0), - "test_method", - &[], - ExecutionStrategy::NativeElseWasm, - None, - ).unwrap(), - vec![42], - ); - - let call_on_unavailable = genesis_executor.call( - &BlockId::Number(1), - "test_method", - &[], - ExecutionStrategy::NativeElseWasm, - None, - ); - - match call_on_unavailable { - Err(ClientError::NotAvailableOnLightClient) => (), - _ => unreachable!("unexpected result: {:?}", call_on_unavailable), - } - } -} diff --git a/client/service/src/client/light/fetcher.rs b/client/service/src/client/light/fetcher.rs new file mode 100644 index 0000000000000..ae64565a504d1 --- /dev/null +++ b/client/service/src/client/light/fetcher.rs @@ -0,0 +1,341 @@ +// Copyright 2017-2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! Light client data fetcher. Fetches requested data from remote full nodes. + +use std::sync::Arc; +use std::collections::{BTreeMap, HashMap}; +use std::marker::PhantomData; + +use hash_db::{HashDB, Hasher, EMPTY_PREFIX}; +use codec::{Decode, Encode}; +use sp_core::{convert_hash, traits::CodeExecutor}; +use sp_core::storage::{ChildInfo, ChildType}; +use sp_runtime::traits::{ + Block as BlockT, Header as HeaderT, Hash, HashFor, NumberFor, + AtLeast32Bit, CheckedConversion, +}; +use sp_state_machine::{ + ChangesTrieRootsStorage, ChangesTrieAnchorBlockId, ChangesTrieConfigurationRange, + InMemoryChangesTrieStorage, TrieBackend, read_proof_check, key_changes_proof_check_with_db, + read_child_proof_check, CloneableSpawn, +}; +pub use sp_state_machine::StorageProof; +use sp_blockchain::{Error as ClientError, Result as ClientResult}; + +pub use sc_client_api::{ + light::{ + RemoteCallRequest, RemoteHeaderRequest, RemoteReadRequest, RemoteReadChildRequest, + RemoteChangesRequest, ChangesProof, RemoteBodyRequest, Fetcher, FetchChecker, + Storage as BlockchainStorage, + }, + cht, +}; +use super::blockchain::{Blockchain}; +use super::call_executor::check_execution_proof; + +/// Remote data checker. +pub struct LightDataChecker> { + blockchain: Arc>, + executor: E, + spawn_handle: Box, + _hasher: PhantomData<(B, H)>, +} + +impl> LightDataChecker { + /// Create new light data checker. + pub fn new(blockchain: Arc>, executor: E, spawn_handle: Box) -> Self { + Self { + blockchain, executor, spawn_handle, _hasher: PhantomData + } + } + + /// Check remote changes query proof assuming that CHT-s are of given size. + pub fn check_changes_proof_with_cht_size( + &self, + request: &RemoteChangesRequest, + remote_proof: ChangesProof, + cht_size: NumberFor, + ) -> ClientResult, u32)>> + where + H: Hasher, + H::Out: Ord + codec::Codec, + { + // since we need roots of all changes tries for the range begin..max + // => remote node can't use max block greater that one that we have passed + if remote_proof.max_block > request.max_block.0 || remote_proof.max_block < request.last_block.0 { + return Err(ClientError::ChangesTrieAccessFailed(format!( + "Invalid max_block used by the remote node: {}. Local: {}..{}..{}", + remote_proof.max_block, request.first_block.0, request.last_block.0, request.max_block.0, + )).into()); + } + + // check if remote node has responded with extra changes trie roots proofs + // all changes tries roots must be in range [request.first_block.0; request.tries_roots.0) + let is_extra_first_root = remote_proof.roots.keys().next() + .map(|first_root| *first_root < request.first_block.0 + || *first_root >= request.tries_roots.0) + .unwrap_or(false); + let is_extra_last_root = remote_proof.roots.keys().next_back() + .map(|last_root| *last_root >= request.tries_roots.0) + .unwrap_or(false); + if is_extra_first_root || is_extra_last_root { + return Err(ClientError::ChangesTrieAccessFailed(format!( + "Extra changes tries roots proofs provided by the remote node: [{:?}..{:?}]. Expected in range: [{}; {})", + remote_proof.roots.keys().next(), remote_proof.roots.keys().next_back(), + request.first_block.0, request.tries_roots.0, + )).into()); + } + + // if request has been composed when some required headers were already pruned + // => remote node has sent us CHT-based proof of required changes tries roots + // => check that this proof is correct before proceeding with changes proof + let remote_max_block = remote_proof.max_block; + let remote_roots = remote_proof.roots; + let remote_roots_proof = remote_proof.roots_proof; + let remote_proof = remote_proof.proof; + if !remote_roots.is_empty() { + self.check_changes_tries_proof( + cht_size, + &remote_roots, + remote_roots_proof, + )?; + } + + // and now check the key changes proof + get the changes + let mut result = Vec::new(); + let proof_storage = InMemoryChangesTrieStorage::with_proof(remote_proof); + for config_range in &request.changes_trie_configs { + let result_range = key_changes_proof_check_with_db::( + ChangesTrieConfigurationRange { + config: config_range.config.as_ref().ok_or(ClientError::ChangesTriesNotSupported)?, + zero: config_range.zero.0, + end: config_range.end.map(|(n, _)| n), + }, + &RootsStorage { + roots: (request.tries_roots.0, &request.tries_roots.2), + prev_roots: &remote_roots, + }, + &proof_storage, + request.first_block.0, + &ChangesTrieAnchorBlockId { + hash: convert_hash(&request.last_block.1), + number: request.last_block.0, + }, + remote_max_block, + request.storage_key.as_ref(), + &request.key) + .map_err(|err| ClientError::ChangesTrieAccessFailed(err))?; + result.extend(result_range); + } + + Ok(result) + } + + /// Check CHT-based proof for changes tries roots. + pub fn check_changes_tries_proof( + &self, + cht_size: NumberFor, + remote_roots: &BTreeMap, B::Hash>, + remote_roots_proof: StorageProof, + ) -> ClientResult<()> + where + H: Hasher, + H::Out: Ord + codec::Codec, + { + // all the checks are sharing the same storage + let storage = remote_roots_proof.into_memory_db(); + + // remote_roots.keys() are sorted => we can use this to group changes tries roots + // that are belongs to the same CHT + let blocks = remote_roots.keys().cloned(); + cht::for_each_cht_group::(cht_size, blocks, |mut storage, _, cht_blocks| { + // get local changes trie CHT root for given CHT + // it should be there, because it is never pruned AND request has been composed + // when required header has been pruned (=> replaced with CHT) + let first_block = cht_blocks.first().cloned() + .expect("for_each_cht_group never calls callback with empty groups"); + let local_cht_root = self.blockchain.storage().changes_trie_cht_root(cht_size, first_block)? + .ok_or(ClientError::InvalidCHTProof)?; + + // check changes trie root for every block within CHT range + for block in cht_blocks { + // check if the proofs storage contains the root + // normally this happens in when the proving backend is created, but since + // we share the storage for multiple checks, do it here + let mut cht_root = H::Out::default(); + cht_root.as_mut().copy_from_slice(local_cht_root.as_ref()); + if !storage.contains(&cht_root, EMPTY_PREFIX) { + return Err(ClientError::InvalidCHTProof.into()); + } + + // check proof for single changes trie root + let proving_backend = TrieBackend::new(storage, cht_root); + let remote_changes_trie_root = remote_roots[&block]; + cht::check_proof_on_proving_backend::( + local_cht_root, + block, + remote_changes_trie_root, + &proving_backend, + )?; + + // and return the storage to use in following checks + storage = proving_backend.into_storage(); + } + + Ok(storage) + }, storage) + } +} + +impl FetchChecker for LightDataChecker + where + Block: BlockT, + E: CodeExecutor + Clone + 'static, + H: Hasher, + H::Out: Ord + codec::Codec + 'static, + S: BlockchainStorage, +{ + fn check_header_proof( + &self, + request: &RemoteHeaderRequest, + remote_header: Option, + remote_proof: StorageProof, + ) -> ClientResult { + let remote_header = remote_header.ok_or_else(|| + ClientError::from(ClientError::InvalidCHTProof))?; + let remote_header_hash = remote_header.hash(); + cht::check_proof::( + request.cht_root, + request.block, + remote_header_hash, + remote_proof, + ).map(|_| remote_header) + } + + fn check_read_proof( + &self, + request: &RemoteReadRequest, + remote_proof: StorageProof, + ) -> ClientResult, Option>>> { + read_proof_check::( + convert_hash(request.header.state_root()), + remote_proof, + request.keys.iter(), + ).map_err(Into::into) + } + + fn check_read_child_proof( + &self, + request: &RemoteReadChildRequest, + remote_proof: StorageProof, + ) -> ClientResult, Option>>> { + let child_info = match ChildType::from_prefixed_key(&request.storage_key) { + Some((ChildType::ParentKeyId, storage_key)) => ChildInfo::new_default(storage_key), + None => return Err("Invalid child type".into()), + }; + read_child_proof_check::( + convert_hash(request.header.state_root()), + remote_proof, + &child_info, + request.keys.iter(), + ).map_err(Into::into) + } + + fn check_execution_proof( + &self, + request: &RemoteCallRequest, + remote_proof: StorageProof, + ) -> ClientResult> { + check_execution_proof::<_, _, H>( + &self.executor, + self.spawn_handle.clone(), + request, + remote_proof, + ) + } + + fn check_changes_proof( + &self, + request: &RemoteChangesRequest, + remote_proof: ChangesProof + ) -> ClientResult, u32)>> { + self.check_changes_proof_with_cht_size(request, remote_proof, cht::size()) + } + + fn check_body_proof( + &self, + request: &RemoteBodyRequest, + body: Vec + ) -> ClientResult> { + // TODO: #2621 + let extrinsics_root = HashFor::::ordered_trie_root( + body.iter().map(Encode::encode).collect(), + ); + if *request.header.extrinsics_root() == extrinsics_root { + Ok(body) + } else { + Err(format!("RemoteBodyRequest: invalid extrinsics root expected: {} but got {}", + *request.header.extrinsics_root(), + extrinsics_root, + ).into()) + } + + } +} + +/// A view of BTreeMap as a changes trie roots storage. +struct RootsStorage<'a, Number: AtLeast32Bit, Hash: 'a> { + roots: (Number, &'a [Hash]), + prev_roots: &'a BTreeMap, +} + +impl<'a, H, Number, Hash> ChangesTrieRootsStorage for RootsStorage<'a, Number, Hash> + where + H: Hasher, + Number: std::fmt::Display + std::hash::Hash + Clone + AtLeast32Bit + Encode + Decode + Send + Sync + 'static, + Hash: 'a + Send + Sync + Clone + AsRef<[u8]>, +{ + fn build_anchor( + &self, + _hash: H::Out, + ) -> Result, String> { + Err("build_anchor is only called when building block".into()) + } + + fn root( + &self, + _anchor: &ChangesTrieAnchorBlockId, + block: Number, + ) -> Result, String> { + // we can't ask for roots from parallel forks here => ignore anchor + let root = if block < self.roots.0 { + self.prev_roots.get(&Number::unique_saturated_from(block)).cloned() + } else { + let index: Option = block.checked_sub(&self.roots.0).and_then(|index| index.checked_into()); + match index { + Some(index) => self.roots.1.get(index as usize).cloned(), + None => None, + } + }; + + Ok(root.map(|root| { + let mut hasher_root: H::Out = Default::default(); + hasher_root.as_mut().copy_from_slice(root.as_ref()); + hasher_root + })) + } +} diff --git a/client/src/light/mod.rs b/client/service/src/client/light/mod.rs similarity index 91% rename from client/src/light/mod.rs rename to client/service/src/client/light/mod.rs index 60bb87394f95a..9b3c3f5b29042 100644 --- a/client/src/light/mod.rs +++ b/client/service/src/client/light/mod.rs @@ -30,15 +30,15 @@ use sp_runtime::traits::{Block as BlockT, HashFor}; use sp_blockchain::Result as ClientResult; use prometheus_endpoint::Registry; -use crate::call_executor::LocalCallExecutor; -use crate::client::{Client,ClientConfig}; +use super::call_executor::LocalCallExecutor; +use super::client::{Client,ClientConfig}; use sc_client_api::{ light::Storage as BlockchainStorage, CloneableSpawn, }; -use crate::light::backend::Backend; -use crate::light::blockchain::Blockchain; -use crate::light::call_executor::GenesisCallExecutor; -use crate::light::fetcher::LightDataChecker; +use self::backend::Backend; +use self::blockchain::Blockchain; +use self::call_executor::GenesisCallExecutor; +use self::fetcher::LightDataChecker; /// Create an instance of light client blockchain backend. pub fn new_light_blockchain>(storage: S) -> Arc> { diff --git a/client/src/lib.rs b/client/service/src/client/mod.rs similarity index 52% rename from client/src/lib.rs rename to client/service/src/client/mod.rs index 97cd7df78c7a6..079b9d243f982 100644 --- a/client/src/lib.rs +++ b/client/service/src/client/mod.rs @@ -41,70 +41,14 @@ //! Additionally, the fourth generic parameter of the `Client` is a marker type representing //! the ways in which the runtime can interface with the outside. Any code that builds a `Client` //! is responsible for putting the right marker. -//! -//! ## Example -//! -//! ``` -//! use std::sync::Arc; -//! use sc_client::{Client, in_mem::Backend, LocalCallExecutor}; -//! use sp_runtime::Storage; -//! use sc_executor::{NativeExecutor, WasmExecutionMethod}; -//! -//! // In this example, we're using the `Block` and `RuntimeApi` types from the -//! // `substrate-test-runtime-client` crate. These types are automatically generated when -//! // compiling a runtime. In a typical use-case, these types would have been to be generated -//! // from your runtime. -//! use substrate_test_runtime_client::{LocalExecutor, runtime::Block, runtime::RuntimeApi}; -//! -//! let backend = Arc::new(Backend::::new()); -//! let client = Client::<_, _, _, RuntimeApi>::new( -//! backend.clone(), -//! LocalCallExecutor::new( -//! backend.clone(), -//! NativeExecutor::::new(WasmExecutionMethod::Interpreted, None, 8), -//! sp_core::tasks::executor(), -//! Default::default(), -//! ), -//! // This parameter provides the storage for the chain genesis. -//! &::default(), -//! Default::default(), -//! Default::default(), -//! Default::default(), -//! None, -//! Default::default(), -//! ); -//! ``` -//! - -#![warn(missing_docs)] -#![recursion_limit="128"] -pub mod cht; -pub mod in_mem; pub mod genesis; pub mod light; -pub mod leaves; mod call_executor; mod client; mod block_rules; -pub use sc_client_api::{ - blockchain, - blockchain::well_known_cache_keys, - blockchain::Info as ChainInfo, - notifications::{StorageEventStream, StorageChangeSet}, - call_executor::CallExecutor, - utils, -}; -pub use crate::{ +pub use self::{ call_executor::LocalCallExecutor, - client::{ - new_with_backend, - new_in_mem, - ImportNotifications, FinalityNotifications, BlockchainEvents, LockImportRun, - BlockImportNotification, Client, ClientConfig, ClientInfo, ExecutionStrategies, FinalityNotification, - LongestChain, BlockOf, ProvideUncles, BadBlocks, ForkBlocks, apply_aux, - }, - leaves::LeafSet, + client::{new_with_backend, new_in_mem, Client, ClientConfig}, }; -pub use sp_state_machine::{ExecutionStrategy, StorageProof, StateMachine}; diff --git a/client/service/src/config.rs b/client/service/src/config.rs index 4ebe7553949ab..4654158cb3616 100644 --- a/client/service/src/config.rs +++ b/client/service/src/config.rs @@ -16,11 +16,11 @@ //! Service configuration. -pub use sc_client::ExecutionStrategies; pub use sc_client_db::{Database, PruningMode, DatabaseSettingsSrc as DatabaseConfig}; pub use sc_network::Multiaddr; pub use sc_network::config::{ExtTransport, MultiaddrWithPeerId, NetworkConfiguration, Role, NodeKeyConfig}; pub use sc_executor::WasmExecutionMethod; +use sc_client_api::execution_extensions::ExecutionStrategies; use std::{future::Future, path::{PathBuf, Path}, pin::Pin, net::SocketAddr, sync::Arc}; pub use sc_transaction_pool::txpool::Options as TransactionPoolOptions; diff --git a/client/service/src/lib.rs b/client/service/src/lib.rs index 56fee6b6d7423..8e8c9e1e37bb3 100644 --- a/client/service/src/lib.rs +++ b/client/service/src/lib.rs @@ -18,6 +18,7 @@ //! Manages communication between them. #![warn(missing_docs)] +#![recursion_limit="128"] pub mod config; #[macro_use] @@ -26,6 +27,10 @@ pub mod error; mod metrics; mod builder; +#[cfg(feature = "test-helpers")] +pub mod client; +#[cfg(not(feature = "test-helpers"))] +mod client; mod status_sinks; mod task_manager; @@ -38,7 +43,7 @@ use wasm_timer::Instant; use std::task::{Poll, Context}; use parking_lot::Mutex; -use sc_client::Client; +use client::Client; use futures::{ Future, FutureExt, Stream, StreamExt, compat::*, @@ -49,13 +54,13 @@ use sc_network::{NetworkService, network_state::NetworkState, PeerId, ReportHand use log::{log, warn, debug, error, Level}; use codec::{Encode, Decode}; use sp_runtime::generic::BlockId; -use sp_runtime::traits::{NumberFor, Block as BlockT}; +use sp_runtime::traits::{NumberFor, Block as BlockT, BlockIdTo}; use parity_util_mem::MallocSizeOf; use sp_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; pub use self::error::Error; pub use self::builder::{ - new_full_client, + new_full_client, new_client, ServiceBuilder, ServiceBuilderCommand, TFullClient, TLightClient, TFullBackend, TLightBackend, TFullCallExecutor, TLightCallExecutor, }; @@ -66,7 +71,6 @@ pub use sc_chain_spec::{ }; pub use sp_transaction_pool::{TransactionPool, InPoolTransaction, error::IntoPoolError}; pub use sc_transaction_pool::txpool::Options as TransactionPoolOptions; -pub use sc_client::FinalityNotifications; pub use sc_rpc::Metadata as RpcMetadata; pub use sc_executor::NativeExecutionDispatch; #[doc(hidden)] @@ -76,6 +80,17 @@ pub use sc_network::config::{FinalityProofProvider, OnDemand, BoxFinalityProofRe pub use sc_tracing::TracingReceiver; pub use task_manager::SpawnTaskHandle; use task_manager::TaskManager; +use sp_blockchain::{HeaderBackend, HeaderMetadata, ProvideCache}; +use sp_api::{ProvideRuntimeApi, CallApiAt, ApiExt, ConstructRuntimeApi, ApiErrorExt}; +use sc_client_api::{ + LockImportRun, Backend as BackendT, ProofProvider, ProvideUncles, + StorageProvider, ExecutorProvider, Finalizer, AuxStore, Backend, + BlockBackend, BlockchainEvents, CallExecutor, TransactionFor, + UsageProvider, +}; +use sc_block_builder::BlockBuilderProvider; +use sp_consensus::{block_validation::Chain, BlockImport}; +use sp_block_builder::BlockBuilder; const DEFAULT_PROTOCOL_ID: &str = "sup"; @@ -116,21 +131,104 @@ pub struct Service { impl Unpin for Service {} +/// Client super trait, use this instead of the concrete Client type. +pub trait ClientProvider< + Block: BlockT, + Backend: BackendT, + Executor: CallExecutor, + Runtime: ConstructRuntimeApi, +>: + HeaderBackend + + ProvideRuntimeApi< + Block, + Api = >::RuntimeApi + > + + LockImportRun + + ProofProvider + + BlockBuilderProvider + + ProvideUncles + + StorageProvider + + Chain + + HeaderMetadata + + ExecutorProvider + + ProvideCache + + BlockIdTo + + CallApiAt< + Block, + Error = sp_blockchain::Error, + StateBackend = >::State + > + + BlockImport< + Block, + Error = sp_consensus::Error, + Transaction = TransactionFor + > + + Finalizer + + BlockchainEvents + + BlockBackend + + UsageProvider + + AuxStore +{} + +impl ClientProvider + for + Client + where + Block: BlockT, + Backend: BackendT, + Executor: CallExecutor, + Runtime: ConstructRuntimeApi, + Self: HeaderBackend + + ProvideRuntimeApi< + Block, + Api = >::RuntimeApi + > + + LockImportRun + + ProofProvider + + BlockBuilderProvider + + ProvideUncles + + StorageProvider + + Chain + + HeaderMetadata + + ExecutorProvider + + ProvideCache + + BlockIdTo + + CallApiAt< + Block, + Error = sp_blockchain::Error, + StateBackend = >::State + > + + BlockImport< + Block, + Error = sp_consensus::Error, + Transaction = TransactionFor + > + + Finalizer + + BlockchainEvents + + BlockBackend + + UsageProvider + + AuxStore +{} + /// Abstraction over a Substrate service. -pub trait AbstractService: 'static + Future> + - Spawn + Send + Unpin { +pub trait AbstractService: Future> + Send + Unpin + Spawn + 'static { /// Type of block of this chain. type Block: BlockT; /// Backend storage for the client. - type Backend: 'static + sc_client_api::backend::Backend; + type Backend: 'static + BackendT; /// How to execute calls towards the runtime. - type CallExecutor: 'static + sc_client::CallExecutor + Send + Sync + Clone; + type CallExecutor: 'static + CallExecutor + Send + Sync + Clone; /// API that the runtime provides. type RuntimeApi: Send + Sync; /// Chain selection algorithm. type SelectChain: sp_consensus::SelectChain; /// Transaction pool. type TransactionPool: TransactionPool + MallocSizeOfWasm; + /// The generic Client type, the bounds here are the ones specifically required by + /// internal crates like sc_informant. + type Client: + HeaderMetadata + UsageProvider + + BlockchainEvents + HeaderBackend + Send + Sync; /// Get event stream for telemetry connection established events. fn telemetry_on_connect_stream(&self) -> TracingUnboundedReceiver<()>; @@ -170,7 +268,7 @@ pub trait AbstractService: 'static + Future> + fn rpc_query(&self, mem: &RpcSession, request: &str) -> Pin> + Send>>; /// Get shared client instance. - fn client(&self) -> Arc>; + fn client(&self) -> Arc; /// Get clone of select chain. fn select_chain(&self) -> Option; @@ -198,9 +296,14 @@ impl AbstractService for NetworkService, TExPool, TOc> where TBl: BlockT, - TBackend: 'static + sc_client_api::backend::Backend, - TExec: 'static + sc_client::CallExecutor + Send + Sync + Clone, - TRtApi: 'static + Send + Sync, + TBackend: 'static + Backend, + TExec: 'static + CallExecutor + Send + Sync + Clone, + TRtApi: 'static + Send + Sync + ConstructRuntimeApi>, + >>::RuntimeApi: + sp_api::Core + + ApiExt + + ApiErrorExt + + BlockBuilder, TSc: sp_consensus::SelectChain + 'static + Clone + Send + Unpin, TExPool: 'static + TransactionPool + MallocSizeOfWasm, TOc: 'static + Send + Sync, @@ -211,6 +314,7 @@ where type RuntimeApi = TRtApi; type SelectChain = TSc; type TransactionPool = TExPool; + type Client = Client; fn telemetry_on_connect_stream(&self) -> TracingUnboundedReceiver<()> { let (sink, stream) = tracing_unbounded("mpsc_telemetry_on_connect"); @@ -254,7 +358,7 @@ where ) } - fn client(&self) -> Arc> { + fn client(&self) -> Arc { self.client.clone() } @@ -326,7 +430,7 @@ impl Spawn for /// The `status_sink` contain a list of senders to send a periodic network status to. fn build_network_future< B: BlockT, - C: sc_client::BlockchainEvents, + C: BlockchainEvents, H: sc_network::ExHashT > ( role: Role, diff --git a/client/service/src/metrics.rs b/client/service/src/metrics.rs index 3e4abced89c74..6456f9b1ee0be 100644 --- a/client/service/src/metrics.rs +++ b/client/service/src/metrics.rs @@ -18,11 +18,11 @@ use std::convert::TryFrom; use crate::NetworkStatus; use prometheus_endpoint::{register, Gauge, U64, F64, Registry, PrometheusError, Opts, GaugeVec}; -use sc_client::ClientInfo; use sc_telemetry::{telemetry, SUBSTRATE_INFO}; use sp_runtime::traits::{NumberFor, Block, SaturatedConversion, UniqueSaturatedInto}; use sp_transaction_pool::PoolStatus; use sp_utils::metrics::register_globals; +use sc_client_api::ClientInfo; use sysinfo::{self, ProcessExt, SystemExt}; diff --git a/client/service/test/Cargo.toml b/client/service/test/Cargo.toml index 957c3327b772a..0a270a8eac557 100644 --- a/client/service/test/Cargo.toml +++ b/client/service/test/Cargo.toml @@ -8,21 +8,36 @@ publish = false homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" -[package.metadata.docs.rs] -targets = ["x86_64-unknown-linux-gnu"] - [dependencies] +hex-literal = "0.2.1" tempfile = "3.1.0" tokio = "0.1.22" futures01 = { package = "futures", version = "0.1.29" } log = "0.4.8" env_logger = "0.7.0" fdlimit = "0.1.4" +parking_lot = "0.10.0" +sp-blockchain = { version = "2.0.0-dev", path = "../../../primitives/blockchain" } +sp-api = { version = "2.0.0-dev", path = "../../../primitives/api" } +sp-state-machine = { version = "0.8.0-dev", path = "../../../primitives/state-machine" } +sp-externalities = { version = "0.8.0-dev", path = "../../../primitives/externalities" } +sp-trie = { version = "2.0.0-dev", path = "../../../primitives/trie" } +sp-storage = { version = "2.0.0-dev", path = "../../../primitives/storage" } +sc-client-db = { version = "0.8.0-dev", default-features = false, path = "../../db" } futures = { version = "0.3.1", features = ["compat"] } -sc-service = { version = "0.8.0-dev", default-features = false, path = "../../service" } +sc-service = { version = "0.8.0-dev", default-features = false, features = ["test-helpers"], path = "../../service" } sc-network = { version = "0.8.0-dev", path = "../../network" } sp-consensus = { version = "0.8.0-dev", path = "../../../primitives/consensus/common" } -sc-client = { version = "0.8.0-dev", path = "../../" } sp-runtime = { version = "2.0.0-dev", path = "../../../primitives/runtime" } sp-core = { version = "2.0.0-dev", path = "../../../primitives/core" } sp-transaction-pool = { version = "2.0.0-dev", path = "../../../primitives/transaction-pool" } +substrate-test-runtime = { version = "2.0.0-dev", path = "../../../test-utils/runtime" } +substrate-test-runtime-client = { version = "2.0.0-dev", path = "../../../test-utils/runtime/client" } +sc-client-api = { version = "2.0.0-dev", path = "../../api" } +sc-block-builder = { version = "0.8.0-dev", path = "../../block-builder" } +sc-executor = { version = "0.8.0-dev", path = "../../executor" } +sp-panic-handler = { version = "2.0.0-dev", path = "../../../primitives/panic-handler" } +parity-scale-codec = "1.3.0" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/client/service/test/src/client/db.rs b/client/service/test/src/client/db.rs new file mode 100644 index 0000000000000..bc175652c9f79 --- /dev/null +++ b/client/service/test/src/client/db.rs @@ -0,0 +1,55 @@ +// Copyright 2018-2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +use sp_core::offchain::{OffchainStorage, storage::InMemOffchainStorage}; +use std::sync::Arc; + +type TestBackend = sc_client_api::in_mem::Backend; + +#[test] +fn test_leaves_with_complex_block_tree() { + let backend = Arc::new(TestBackend::new()); + + substrate_test_runtime_client::trait_tests::test_leaves_for_backend(backend); +} + +#[test] +fn test_blockchain_query_by_number_gets_canonical() { + let backend = Arc::new(TestBackend::new()); + + substrate_test_runtime_client::trait_tests::test_blockchain_query_by_number_gets_canonical(backend); +} + +#[test] +fn in_memory_offchain_storage() { + + let mut storage = InMemOffchainStorage::default(); + assert_eq!(storage.get(b"A", b"B"), None); + assert_eq!(storage.get(b"B", b"A"), None); + + storage.set(b"A", b"B", b"C"); + assert_eq!(storage.get(b"A", b"B"), Some(b"C".to_vec())); + assert_eq!(storage.get(b"B", b"A"), None); + + storage.compare_and_set(b"A", b"B", Some(b"X"), b"D"); + assert_eq!(storage.get(b"A", b"B"), Some(b"C".to_vec())); + storage.compare_and_set(b"A", b"B", Some(b"C"), b"D"); + assert_eq!(storage.get(b"A", b"B"), Some(b"D".to_vec())); + + assert!(!storage.compare_and_set(b"B", b"A", Some(b""), b"Y")); + assert!(storage.compare_and_set(b"B", b"A", None, b"X")); + assert_eq!(storage.get(b"B", b"A"), Some(b"X".to_vec())); +} \ No newline at end of file diff --git a/client/service/test/src/client/light.rs b/client/service/test/src/client/light.rs new file mode 100644 index 0000000000000..76e48828ee429 --- /dev/null +++ b/client/service/test/src/client/light.rs @@ -0,0 +1,896 @@ +// Copyright 2018-2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . +use sc_service::client::light::{ + call_executor::{ + GenesisCallExecutor, + check_execution_proof, + check_execution_proof_with_make_header, + }, + fetcher::LightDataChecker, + blockchain::{BlockchainCache, Blockchain}, + backend::{Backend, GenesisOrUnavailableState}, +}; +use std::sync::Arc; +use sp_runtime::{ + traits::{BlakeTwo256, HashFor, NumberFor}, + generic::BlockId, traits::{Block as _, Header as HeaderT}, Digest, +}; +use std::collections::HashMap; +use parking_lot::Mutex; +use substrate_test_runtime_client::{ + runtime::{Hash, Block, Header}, TestClient, ClientBlockImportExt, +}; +use sp_api::{InitializeBlock, StorageTransactionCache, ProofRecorder, OffchainOverlayedChanges}; +use sp_consensus::{BlockOrigin}; +use sc_executor::{NativeExecutor, WasmExecutionMethod, RuntimeVersion, NativeVersion}; +use sp_core::{H256, tasks::executor as tasks_executor, NativeOrEncoded}; +use sc_client_api::{blockchain::Info, backend::NewBlockState, Backend as ClientBackend, ProofProvider, in_mem::{Backend as InMemBackend, Blockchain as InMemoryBlockchain}, AuxStore, Storage, CallExecutor, cht, ExecutionStrategy, StorageProof, BlockImportOperation, RemoteCallRequest, StorageProvider, ChangesProof, RemoteBodyRequest, RemoteReadRequest, RemoteChangesRequest, FetchChecker, RemoteReadChildRequest, RemoteHeaderRequest}; +use sp_externalities::Extensions; +use sc_block_builder::BlockBuilderProvider; +use sp_blockchain::{ + BlockStatus, Result as ClientResult, Error as ClientError, CachedHeaderMetadata, + HeaderBackend, well_known_cache_keys +}; +use std::panic::UnwindSafe; +use std::cell::RefCell; +use sp_state_machine::{OverlayedChanges, ExecutionManager}; +use parity_scale_codec::{Decode, Encode}; +use super::prepare_client_with_key_changes; +use substrate_test_runtime_client::{ + AccountKeyring, runtime::{self, Extrinsic}, +}; + +use sp_core::{blake2_256, ChangesTrieConfiguration, storage::{well_known_keys, StorageKey, ChildInfo}}; +use sp_state_machine::Backend as _; + +pub type DummyBlockchain = Blockchain; + +pub struct DummyStorage { + pub changes_tries_cht_roots: HashMap, + pub aux_store: Mutex, Vec>>, +} + +impl DummyStorage { + pub fn new() -> Self { + DummyStorage { + changes_tries_cht_roots: HashMap::new(), + aux_store: Mutex::new(HashMap::new()), + } + } +} + +impl sp_blockchain::HeaderBackend for DummyStorage { + fn header(&self, _id: BlockId) -> ClientResult> { + Err(ClientError::Backend("Test error".into())) + } + + fn info(&self) -> Info { + panic!("Test error") + } + + fn status(&self, _id: BlockId) -> ClientResult { + Err(ClientError::Backend("Test error".into())) + } + + fn number(&self, hash: Hash) -> ClientResult>> { + if hash == Default::default() { + Ok(Some(Default::default())) + } else { + Err(ClientError::Backend("Test error".into())) + } + } + + fn hash(&self, number: u64) -> ClientResult> { + if number == 0 { + Ok(Some(Default::default())) + } else { + Err(ClientError::Backend("Test error".into())) + } + } +} + +impl sp_blockchain::HeaderMetadata for DummyStorage { + type Error = ClientError; + + fn header_metadata(&self, hash: Hash) -> Result, Self::Error> { + self.header(BlockId::hash(hash))?.map(|header| CachedHeaderMetadata::from(&header)) + .ok_or(ClientError::UnknownBlock("header not found".to_owned())) + } + fn insert_header_metadata(&self, _hash: Hash, _metadata: CachedHeaderMetadata) {} + fn remove_header_metadata(&self, _hash: Hash) {} +} + +impl AuxStore for DummyStorage { + fn insert_aux< + 'a, + 'b: 'a, + 'c: 'a, + I: IntoIterator, + D: IntoIterator, + >(&self, insert: I, _delete: D) -> ClientResult<()> { + for (k, v) in insert.into_iter() { + self.aux_store.lock().insert(k.to_vec(), v.to_vec()); + } + Ok(()) + } + + fn get_aux(&self, key: &[u8]) -> ClientResult>> { + Ok(self.aux_store.lock().get(key).cloned()) + } +} + +impl Storage for DummyStorage { + fn import_header( + &self, + _header: Header, + _cache: HashMap>, + _state: NewBlockState, + _aux_ops: Vec<(Vec, Option>)>, + ) -> ClientResult<()> { + Ok(()) + } + + fn set_head(&self, _block: BlockId) -> ClientResult<()> { + Err(ClientError::Backend("Test error".into())) + } + + fn finalize_header(&self, _block: BlockId) -> ClientResult<()> { + Err(ClientError::Backend("Test error".into())) + } + + fn last_finalized(&self) -> ClientResult { + Err(ClientError::Backend("Test error".into())) + } + + fn header_cht_root(&self, _cht_size: u64, _block: u64) -> ClientResult> { + Err(ClientError::Backend("Test error".into())) + } + + fn changes_trie_cht_root(&self, cht_size: u64, block: u64) -> ClientResult> { + cht::block_to_cht_number(cht_size, block) + .and_then(|cht_num| self.changes_tries_cht_roots.get(&cht_num)) + .cloned() + .ok_or_else(|| ClientError::Backend( + format!("Test error: CHT for block #{} not found", block) + ).into()) + .map(Some) + } + + fn cache(&self) -> Option>> { + None + } + + fn usage_info(&self) -> Option { + None + } +} + +struct DummyCallExecutor; + +impl CallExecutor for DummyCallExecutor { + type Error = ClientError; + + type Backend = substrate_test_runtime_client::Backend; + + fn call( + &self, + _id: &BlockId, + _method: &str, + _call_data: &[u8], + _strategy: ExecutionStrategy, + _extensions: Option, + ) -> Result, ClientError> { + Ok(vec![42]) + } + + fn contextual_call< + 'a, + IB: Fn() -> ClientResult<()>, + EM: Fn( + Result, Self::Error>, + Result, Self::Error> + ) -> Result, Self::Error>, + R: Encode + Decode + PartialEq, + NC: FnOnce() -> Result + UnwindSafe, + >( + &self, + _initialize_block_fn: IB, + _at: &BlockId, + _method: &str, + _call_data: &[u8], + _changes: &RefCell, + _offchain_changes: &RefCell, + _storage_transaction_cache: Option<&RefCell< + StorageTransactionCache< + Block, + >::State, + > + >>, + _initialize_block: InitializeBlock<'a, Block>, + _execution_manager: ExecutionManager, + _native_call: Option, + _proof_recorder: &Option>, + _extensions: Option, + ) -> ClientResult> where ExecutionManager: Clone { + unreachable!() + } + + fn runtime_version(&self, _id: &BlockId) -> Result { + unreachable!() + } + + fn prove_at_trie_state>>( + &self, + _trie_state: &sp_state_machine::TrieBackend>, + _overlay: &mut OverlayedChanges, + _method: &str, + _call_data: &[u8] + ) -> Result<(Vec, StorageProof), ClientError> { + unreachable!() + } + + fn native_runtime_version(&self) -> Option<&NativeVersion> { + unreachable!() + } +} + +fn local_executor() -> NativeExecutor { + NativeExecutor::new(WasmExecutionMethod::Interpreted, None, 8) +} + +#[test] +fn local_state_is_created_when_genesis_state_is_available() { + let def = Default::default(); + let header0 = substrate_test_runtime_client::runtime::Header::new(0, def, def, def, Default::default()); + + let backend: Backend<_, BlakeTwo256> = Backend::new( + Arc::new(DummyBlockchain::new(DummyStorage::new())), + ); + let mut op = backend.begin_operation().unwrap(); + op.set_block_data(header0, None, None, NewBlockState::Final).unwrap(); + op.reset_storage(Default::default()).unwrap(); + backend.commit_operation(op).unwrap(); + + match backend.state_at(BlockId::Number(0)).unwrap() { + GenesisOrUnavailableState::Genesis(_) => (), + _ => panic!("unexpected state"), + } +} + +#[test] +fn unavailable_state_is_created_when_genesis_state_is_unavailable() { + let backend: Backend<_, BlakeTwo256> = Backend::new( + Arc::new(DummyBlockchain::new(DummyStorage::new())), + ); + + match backend.state_at(BlockId::Number(0)).unwrap() { + GenesisOrUnavailableState::Unavailable => (), + _ => panic!("unexpected state"), + } +} + +#[test] +fn light_aux_store_is_updated_via_non_importing_op() { + let backend = Backend::new(Arc::new(DummyBlockchain::new(DummyStorage::new()))); + let mut op = ClientBackend::::begin_operation(&backend).unwrap(); + BlockImportOperation::::insert_aux(&mut op, vec![(vec![1], Some(vec![2]))]).unwrap(); + ClientBackend::::commit_operation(&backend, op).unwrap(); + + assert_eq!(AuxStore::get_aux(&backend, &[1]).unwrap(), Some(vec![2])); +} + +#[test] +fn execution_proof_is_generated_and_checked() { + fn execute(remote_client: &TestClient, at: u64, method: &'static str) -> (Vec, Vec) { + let remote_block_id = BlockId::Number(at); + let remote_header = remote_client.header(&remote_block_id).unwrap().unwrap(); + + // 'fetch' execution proof from remote node + let (remote_result, remote_execution_proof) = remote_client.execution_proof( + &remote_block_id, + method, + &[] + ).unwrap(); + + // check remote execution proof locally + let local_result = check_execution_proof::<_, _, BlakeTwo256>( + &local_executor(), + tasks_executor(), + &RemoteCallRequest { + block: substrate_test_runtime_client::runtime::Hash::default(), + header: remote_header, + method: method.into(), + call_data: vec![], + retry_count: None, + }, + remote_execution_proof, + ).unwrap(); + + (remote_result, local_result) + } + + fn execute_with_proof_failure(remote_client: &TestClient, at: u64, method: &'static str) { + let remote_block_id = BlockId::Number(at); + let remote_header = remote_client.header(&remote_block_id).unwrap().unwrap(); + + // 'fetch' execution proof from remote node + let (_, remote_execution_proof) = remote_client.execution_proof( + &remote_block_id, + method, + &[] + ).unwrap(); + + // check remote execution proof locally + let execution_result = check_execution_proof_with_make_header::<_, _, BlakeTwo256, _>( + &local_executor(), + tasks_executor(), + &RemoteCallRequest { + block: substrate_test_runtime_client::runtime::Hash::default(), + header: remote_header, + method: method.into(), + call_data: vec![], + retry_count: None, + }, + remote_execution_proof, + |header|
::new( + at + 1, + Default::default(), + Default::default(), + header.hash(), + header.digest().clone(), // this makes next header wrong + ), + ); + match execution_result { + Err(sp_blockchain::Error::Execution(_)) => (), + _ => panic!("Unexpected execution result: {:?}", execution_result), + } + } + + // prepare remote client + let mut remote_client = substrate_test_runtime_client::new(); + for i in 1u32..3u32 { + let mut digest = Digest::default(); + digest.push(sp_runtime::generic::DigestItem::Other::(i.to_le_bytes().to_vec())); + remote_client.import_justified( + BlockOrigin::Own, + remote_client.new_block(digest).unwrap().build().unwrap().block, + Default::default(), + ).unwrap(); + } + + // check method that doesn't requires environment + let (remote, local) = execute(&remote_client, 0, "Core_version"); + assert_eq!(remote, local); + + let (remote, local) = execute(&remote_client, 2, "Core_version"); + assert_eq!(remote, local); + + // check method that requires environment + let (_, block) = execute(&remote_client, 0, "BlockBuilder_finalize_block"); + let local_block: Header = Decode::decode(&mut &block[..]).unwrap(); + assert_eq!(local_block.number, 1); + + let (_, block) = execute(&remote_client, 2, "BlockBuilder_finalize_block"); + let local_block: Header = Decode::decode(&mut &block[..]).unwrap(); + assert_eq!(local_block.number, 3); + + // check that proof check doesn't panic even if proof is incorrect AND no panic handler is set + execute_with_proof_failure(&remote_client, 2, "Core_version"); + + // check that proof check doesn't panic even if proof is incorrect AND panic handler is set + sp_panic_handler::set("TEST", "1.2.3"); + execute_with_proof_failure(&remote_client, 2, "Core_version"); +} + +#[test] +fn code_is_executed_at_genesis_only() { + let backend = Arc::new(InMemBackend::::new()); + let def = H256::default(); + let header0 = substrate_test_runtime_client::runtime::Header::new(0, def, def, def, Default::default()); + let hash0 = header0.hash(); + let header1 = substrate_test_runtime_client::runtime::Header::new(1, def, def, hash0, Default::default()); + let hash1 = header1.hash(); + backend.blockchain().insert(hash0, header0, None, None, NewBlockState::Final).unwrap(); + backend.blockchain().insert(hash1, header1, None, None, NewBlockState::Final).unwrap(); + + let genesis_executor = GenesisCallExecutor::new(backend, DummyCallExecutor); + assert_eq!( + genesis_executor.call( + &BlockId::Number(0), + "test_method", + &[], + ExecutionStrategy::NativeElseWasm, + None, + ).unwrap(), + vec![42], + ); + + let call_on_unavailable = genesis_executor.call( + &BlockId::Number(1), + "test_method", + &[], + ExecutionStrategy::NativeElseWasm, + None, + ); + + match call_on_unavailable { + Err(ClientError::NotAvailableOnLightClient) => (), + _ => unreachable!("unexpected result: {:?}", call_on_unavailable), + } +} + + +type TestChecker = LightDataChecker< + NativeExecutor, + BlakeTwo256, + Block, + DummyStorage, +>; + +fn prepare_for_read_proof_check() -> (TestChecker, Header, StorageProof, u32) { + // prepare remote client + let remote_client = substrate_test_runtime_client::new(); + let remote_block_id = BlockId::Number(0); + let remote_block_hash = remote_client.block_hash(0).unwrap().unwrap(); + let mut remote_block_header = remote_client.header(&remote_block_id).unwrap().unwrap(); + remote_block_header.state_root = remote_client.state_at(&remote_block_id).unwrap() + .storage_root(::std::iter::empty()).0.into(); + + // 'fetch' read proof from remote node + let heap_pages = remote_client.storage(&remote_block_id, &StorageKey(well_known_keys::HEAP_PAGES.to_vec())) + .unwrap() + .and_then(|v| Decode::decode(&mut &v.0[..]).ok()).unwrap(); + let remote_read_proof = remote_client.read_proof( + &remote_block_id, + &mut std::iter::once(well_known_keys::HEAP_PAGES), + ).unwrap(); + + // check remote read proof locally + let local_storage = InMemoryBlockchain::::new(); + local_storage.insert( + remote_block_hash, + remote_block_header.clone(), + None, + None, + NewBlockState::Final, + ).unwrap(); + let local_checker = LightDataChecker::new( + Arc::new(DummyBlockchain::new(DummyStorage::new())), + local_executor(), + tasks_executor(), + ); + (local_checker, remote_block_header, remote_read_proof, heap_pages) +} + +fn prepare_for_read_child_proof_check() -> (TestChecker, Header, StorageProof, Vec) { + use substrate_test_runtime_client::DefaultTestClientBuilderExt; + use substrate_test_runtime_client::TestClientBuilderExt; + let child_info = ChildInfo::new_default(b"child1"); + let child_info = &child_info; + // prepare remote client + let remote_client = substrate_test_runtime_client::TestClientBuilder::new() + .add_extra_child_storage( + child_info, + b"key1".to_vec(), + b"value1".to_vec(), + ).build(); + let remote_block_id = BlockId::Number(0); + let remote_block_hash = remote_client.block_hash(0).unwrap().unwrap(); + let mut remote_block_header = remote_client.header(&remote_block_id).unwrap().unwrap(); + remote_block_header.state_root = remote_client.state_at(&remote_block_id).unwrap() + .storage_root(::std::iter::empty()).0.into(); + + // 'fetch' child read proof from remote node + let child_value = remote_client.child_storage( + &remote_block_id, + child_info, + &StorageKey(b"key1".to_vec()), + ).unwrap().unwrap().0; + assert_eq!(b"value1"[..], child_value[..]); + let remote_read_proof = remote_client.read_child_proof( + &remote_block_id, + child_info, + &mut std::iter::once("key1".as_bytes()), + ).unwrap(); + + // check locally + let local_storage = InMemoryBlockchain::::new(); + local_storage.insert( + remote_block_hash, + remote_block_header.clone(), + None, + None, + NewBlockState::Final, + ).unwrap(); + let local_checker = LightDataChecker::new( + Arc::new(DummyBlockchain::new(DummyStorage::new())), + local_executor(), + tasks_executor(), + ); + (local_checker, remote_block_header, remote_read_proof, child_value) +} + +fn prepare_for_header_proof_check(insert_cht: bool) -> (TestChecker, Hash, Header, StorageProof) { + // prepare remote client + let mut remote_client = substrate_test_runtime_client::new(); + let mut local_headers_hashes = Vec::new(); + for i in 0..4 { + let block = remote_client.new_block(Default::default()).unwrap().build().unwrap().block; + remote_client.import(BlockOrigin::Own, block).unwrap(); + local_headers_hashes.push( + remote_client.block_hash(i + 1) + .map_err(|_| ClientError::Backend("TestError".into())) + ); + } + + // 'fetch' header proof from remote node + let remote_block_id = BlockId::Number(1); + let (remote_block_header, remote_header_proof) = remote_client.header_proof_with_cht_size(&remote_block_id, 4).unwrap(); + + // check remote read proof locally + let local_storage = InMemoryBlockchain::::new(); + let local_cht_root = cht::compute_root::(4, 0, local_headers_hashes).unwrap(); + if insert_cht { + local_storage.insert_cht_root(1, local_cht_root); + } + let local_checker = LightDataChecker::new( + Arc::new(DummyBlockchain::new(DummyStorage::new())), + local_executor(), + tasks_executor(), + ); + (local_checker, local_cht_root, remote_block_header, remote_header_proof) +} + +fn header_with_computed_extrinsics_root(extrinsics: Vec) -> Header { + use sp_trie::{TrieConfiguration, trie_types::Layout}; + let iter = extrinsics.iter().map(Encode::encode); + let extrinsics_root = Layout::::ordered_trie_root(iter); + + // only care about `extrinsics_root` + Header::new(0, extrinsics_root, H256::zero(), H256::zero(), Default::default()) +} + +#[test] +fn storage_read_proof_is_generated_and_checked() { + let (local_checker, remote_block_header, remote_read_proof, heap_pages) = prepare_for_read_proof_check(); + assert_eq!((&local_checker as &dyn FetchChecker).check_read_proof(&RemoteReadRequest::
{ + block: remote_block_header.hash(), + header: remote_block_header, + keys: vec![well_known_keys::HEAP_PAGES.to_vec()], + retry_count: None, + }, remote_read_proof).unwrap().remove(well_known_keys::HEAP_PAGES).unwrap().unwrap()[0], heap_pages as u8); +} + +#[test] +fn storage_child_read_proof_is_generated_and_checked() { + let child_info = ChildInfo::new_default(&b"child1"[..]); + let ( + local_checker, + remote_block_header, + remote_read_proof, + result, + ) = prepare_for_read_child_proof_check(); + assert_eq!((&local_checker as &dyn FetchChecker).check_read_child_proof( + &RemoteReadChildRequest::
{ + block: remote_block_header.hash(), + header: remote_block_header, + storage_key: child_info.prefixed_storage_key(), + keys: vec![b"key1".to_vec()], + retry_count: None, + }, + remote_read_proof + ).unwrap().remove(b"key1".as_ref()).unwrap().unwrap(), result); +} + +#[test] +fn header_proof_is_generated_and_checked() { + let (local_checker, local_cht_root, remote_block_header, remote_header_proof) = prepare_for_header_proof_check(true); + assert_eq!((&local_checker as &dyn FetchChecker).check_header_proof(&RemoteHeaderRequest::
{ + cht_root: local_cht_root, + block: 1, + retry_count: None, + }, Some(remote_block_header.clone()), remote_header_proof).unwrap(), remote_block_header); +} + +#[test] +fn check_header_proof_fails_if_cht_root_is_invalid() { + let (local_checker, _, mut remote_block_header, remote_header_proof) = prepare_for_header_proof_check(true); + remote_block_header.number = 100; + assert!((&local_checker as &dyn FetchChecker).check_header_proof(&RemoteHeaderRequest::
{ + cht_root: Default::default(), + block: 1, + retry_count: None, + }, Some(remote_block_header.clone()), remote_header_proof).is_err()); +} + +#[test] +fn check_header_proof_fails_if_invalid_header_provided() { + let (local_checker, local_cht_root, mut remote_block_header, remote_header_proof) = prepare_for_header_proof_check(true); + remote_block_header.number = 100; + assert!((&local_checker as &dyn FetchChecker).check_header_proof(&RemoteHeaderRequest::
{ + cht_root: local_cht_root, + block: 1, + retry_count: None, + }, Some(remote_block_header.clone()), remote_header_proof).is_err()); +} + +#[test] +fn changes_proof_is_generated_and_checked_when_headers_are_not_pruned() { + let (remote_client, local_roots, test_cases) = prepare_client_with_key_changes(); + let local_checker = TestChecker::new( + Arc::new(DummyBlockchain::new(DummyStorage::new())), + local_executor(), + tasks_executor(), + ); + let local_checker = &local_checker as &dyn FetchChecker; + let max = remote_client.chain_info().best_number; + let max_hash = remote_client.chain_info().best_hash; + + for (index, (begin, end, key, expected_result)) in test_cases.into_iter().enumerate() { + let begin_hash = remote_client.block_hash(begin).unwrap().unwrap(); + let end_hash = remote_client.block_hash(end).unwrap().unwrap(); + + // 'fetch' changes proof from remote node + let key = StorageKey(key); + let remote_proof = remote_client.key_changes_proof( + begin_hash, end_hash, begin_hash, max_hash, None, &key + ).unwrap(); + + // check proof on local client + let local_roots_range = local_roots.clone()[(begin - 1) as usize..].to_vec(); + let config = ChangesTrieConfiguration::new(4, 2); + let request = RemoteChangesRequest::
{ + changes_trie_configs: vec![sp_core::ChangesTrieConfigurationRange { + zero: (0, Default::default()), + end: None, + config: Some(config), + }], + first_block: (begin, begin_hash), + last_block: (end, end_hash), + max_block: (max, max_hash), + tries_roots: (begin, begin_hash, local_roots_range), + key: key.0, + storage_key: None, + retry_count: None, + }; + let local_result = local_checker.check_changes_proof(&request, ChangesProof { + max_block: remote_proof.max_block, + proof: remote_proof.proof, + roots: remote_proof.roots, + roots_proof: remote_proof.roots_proof, + }).unwrap(); + + // ..and ensure that result is the same as on remote node + match local_result == expected_result { + true => (), + false => panic!(format!("Failed test {}: local = {:?}, expected = {:?}", + index, local_result, expected_result)), + } + } +} + +#[test] +fn changes_proof_is_generated_and_checked_when_headers_are_pruned() { + // we're testing this test case here: + // (1, 4, dave.clone(), vec![(4, 0), (1, 1), (1, 0)]), + let (remote_client, remote_roots, _) = prepare_client_with_key_changes(); + let dave = blake2_256(&runtime::system::balance_of_key(AccountKeyring::Dave.into())).to_vec(); + let dave = StorageKey(dave); + + // 'fetch' changes proof from remote node: + // we're fetching changes for range b1..b4 + // we do not know changes trie roots before b3 (i.e. we only know b3+b4) + // but we have changes trie CHT root for b1...b4 + let b1 = remote_client.block_hash_from_id(&BlockId::Number(1)).unwrap().unwrap(); + let b3 = remote_client.block_hash_from_id(&BlockId::Number(3)).unwrap().unwrap(); + let b4 = remote_client.block_hash_from_id(&BlockId::Number(4)).unwrap().unwrap(); + let remote_proof = remote_client.key_changes_proof_with_cht_size( + b1, b4, b3, b4, None, &dave, 4 + ).unwrap(); + + // prepare local checker, having a root of changes trie CHT#0 + let local_cht_root = cht::compute_root::(4, 0, remote_roots.iter().cloned().map(|ct| Ok(Some(ct)))).unwrap(); + let mut local_storage = DummyStorage::new(); + local_storage.changes_tries_cht_roots.insert(0, local_cht_root); + let local_checker = TestChecker::new( + Arc::new(DummyBlockchain::new(local_storage)), + local_executor(), + tasks_executor(), + ); + + // check proof on local client + let config = ChangesTrieConfiguration::new(4, 2); + let request = RemoteChangesRequest::
{ + changes_trie_configs: vec![sp_core::ChangesTrieConfigurationRange { + zero: (0, Default::default()), + end: None, + config: Some(config), + }], + first_block: (1, b1), + last_block: (4, b4), + max_block: (4, b4), + tries_roots: (3, b3, vec![remote_roots[2].clone(), remote_roots[3].clone()]), + storage_key: None, + key: dave.0, + retry_count: None, + }; + let local_result = local_checker.check_changes_proof_with_cht_size(&request, ChangesProof { + max_block: remote_proof.max_block, + proof: remote_proof.proof, + roots: remote_proof.roots, + roots_proof: remote_proof.roots_proof, + }, 4).unwrap(); + + assert_eq!(local_result, vec![(4, 0), (1, 1), (1, 0)]); +} + +#[test] +fn check_changes_proof_fails_if_proof_is_wrong() { + let (remote_client, local_roots, test_cases) = prepare_client_with_key_changes(); + let local_checker = TestChecker::new( + Arc::new(DummyBlockchain::new(DummyStorage::new())), + local_executor(), + tasks_executor(), + ); + let local_checker = &local_checker as &dyn FetchChecker; + let max = remote_client.chain_info().best_number; + let max_hash = remote_client.chain_info().best_hash; + + let (begin, end, key, _) = test_cases[0].clone(); + let begin_hash = remote_client.block_hash(begin).unwrap().unwrap(); + let end_hash = remote_client.block_hash(end).unwrap().unwrap(); + + // 'fetch' changes proof from remote node + let key = StorageKey(key); + let remote_proof = remote_client.key_changes_proof( + begin_hash, end_hash, begin_hash, max_hash, None, &key).unwrap(); + + let local_roots_range = local_roots.clone()[(begin - 1) as usize..].to_vec(); + let config = ChangesTrieConfiguration::new(4, 2); + let request = RemoteChangesRequest::
{ + changes_trie_configs: vec![sp_core::ChangesTrieConfigurationRange { + zero: (0, Default::default()), + end: None, + config: Some(config), + }], + first_block: (begin, begin_hash), + last_block: (end, end_hash), + max_block: (max, max_hash), + tries_roots: (begin, begin_hash, local_roots_range.clone()), + storage_key: None, + key: key.0, + retry_count: None, + }; + + // check proof on local client using max from the future + assert!(local_checker.check_changes_proof(&request, ChangesProof { + max_block: remote_proof.max_block + 1, + proof: remote_proof.proof.clone(), + roots: remote_proof.roots.clone(), + roots_proof: remote_proof.roots_proof.clone(), + }).is_err()); + + // check proof on local client using broken proof + assert!(local_checker.check_changes_proof(&request, ChangesProof { + max_block: remote_proof.max_block, + proof: local_roots_range.clone().into_iter().map(|v| v.as_ref().to_vec()).collect(), + roots: remote_proof.roots, + roots_proof: remote_proof.roots_proof, + }).is_err()); + + // extra roots proofs are provided + assert!(local_checker.check_changes_proof(&request, ChangesProof { + max_block: remote_proof.max_block, + proof: remote_proof.proof.clone(), + roots: vec![(begin - 1, Default::default())].into_iter().collect(), + roots_proof: StorageProof::empty(), + }).is_err()); + assert!(local_checker.check_changes_proof(&request, ChangesProof { + max_block: remote_proof.max_block, + proof: remote_proof.proof.clone(), + roots: vec![(end + 1, Default::default())].into_iter().collect(), + roots_proof: StorageProof::empty(), + }).is_err()); +} + +#[test] +fn check_changes_tries_proof_fails_if_proof_is_wrong() { + // we're testing this test case here: + // (1, 4, dave.clone(), vec![(4, 0), (1, 1), (1, 0)]), + let (remote_client, remote_roots, _) = prepare_client_with_key_changes(); + let local_cht_root = cht::compute_root::( + 4, 0, remote_roots.iter().cloned().map(|ct| Ok(Some(ct)))).unwrap(); + let dave = blake2_256(&runtime::system::balance_of_key(AccountKeyring::Dave.into())).to_vec(); + let dave = StorageKey(dave); + + // 'fetch' changes proof from remote node: + // we're fetching changes for range b1..b4 + // we do not know changes trie roots before b3 (i.e. we only know b3+b4) + // but we have changes trie CHT root for b1...b4 + let b1 = remote_client.block_hash_from_id(&BlockId::Number(1)).unwrap().unwrap(); + let b3 = remote_client.block_hash_from_id(&BlockId::Number(3)).unwrap().unwrap(); + let b4 = remote_client.block_hash_from_id(&BlockId::Number(4)).unwrap().unwrap(); + let remote_proof = remote_client.key_changes_proof_with_cht_size( + b1, b4, b3, b4, None, &dave, 4 + ).unwrap(); + + // fails when changes trie CHT is missing from the local db + let local_checker = TestChecker::new( + Arc::new(DummyBlockchain::new(DummyStorage::new())), + local_executor(), + tasks_executor(), + ); + assert!(local_checker.check_changes_tries_proof(4, &remote_proof.roots, + remote_proof.roots_proof.clone()).is_err()); + + // fails when proof is broken + let mut local_storage = DummyStorage::new(); + local_storage.changes_tries_cht_roots.insert(0, local_cht_root); + let local_checker = TestChecker::new( + Arc::new(DummyBlockchain::new(local_storage)), + local_executor(), + tasks_executor(), + ); + let result = local_checker.check_changes_tries_proof( + 4, &remote_proof.roots, StorageProof::empty() + ); + assert!(result.is_err()); +} + +#[test] +fn check_body_proof_faulty() { + let header = header_with_computed_extrinsics_root( + vec![Extrinsic::IncludeData(vec![1, 2, 3, 4])] + ); + let block = Block::new(header.clone(), Vec::new()); + + let local_checker = TestChecker::new( + Arc::new(DummyBlockchain::new(DummyStorage::new())), + local_executor(), + tasks_executor(), + ); + + let body_request = RemoteBodyRequest { + header: header.clone(), + retry_count: None, + }; + + assert!( + local_checker.check_body_proof(&body_request, block.extrinsics).is_err(), + "vec![1, 2, 3, 4] != vec![]" + ); +} + +#[test] +fn check_body_proof_of_same_data_should_succeed() { + let extrinsics = vec![Extrinsic::IncludeData(vec![1, 2, 3, 4, 5, 6, 7, 8, 255])]; + + let header = header_with_computed_extrinsics_root(extrinsics.clone()); + let block = Block::new(header.clone(), extrinsics); + + let local_checker = TestChecker::new( + Arc::new(DummyBlockchain::new(DummyStorage::new())), + local_executor(), + tasks_executor(), + ); + + let body_request = RemoteBodyRequest { + header: header.clone(), + retry_count: None, + }; + + assert!(local_checker.check_body_proof(&body_request, block.extrinsics).is_ok()); +} diff --git a/client/service/test/src/client/mod.rs b/client/service/test/src/client/mod.rs new file mode 100644 index 0000000000000..e81d1ebb5364b --- /dev/null +++ b/client/service/test/src/client/mod.rs @@ -0,0 +1,1802 @@ +// Copyright 2018-2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +use parity_scale_codec::{Encode, Decode, Joiner}; +use sc_executor::native_executor_instance; +use sp_state_machine::{StateMachine, OverlayedChanges, ExecutionStrategy, InMemoryBackend}; +use substrate_test_runtime_client::{ + prelude::*, + runtime::{ + self, genesismap::{GenesisConfig, insert_genesis_block}, + Hash, Transfer, Block, BlockNumber, Header, Digest, RuntimeApi, + }, + AccountKeyring, Sr25519Keyring, TestClientBuilder, ClientBlockImportExt, + BlockBuilderExt, DefaultTestClientBuilderExt, TestClientBuilderExt, ClientExt, +}; +use sc_client_api::{ + StorageProvider, BlockBackend, in_mem, BlockchainEvents, +}; +use sc_client_db::{Backend, DatabaseSettings, DatabaseSettingsSrc, PruningMode}; +use sc_block_builder::BlockBuilderProvider; +use sc_service::client::{self, Client, LocalCallExecutor, new_in_mem}; +use sp_runtime::traits::{ + BlakeTwo256, Block as BlockT, Header as HeaderT, +}; +use substrate_test_runtime::TestAPI; +use sp_state_machine::backend::Backend as _; +use sp_api::{ProvideRuntimeApi, OffchainOverlayedChanges}; +use sp_core::tasks::executor as tasks_executor; +use sp_core::{H256, ChangesTrieConfiguration, blake2_256}; +use std::collections::{HashMap, HashSet}; +use std::sync::Arc; +use sp_consensus::{ + BlockOrigin, SelectChain, BlockImport, Error as ConsensusError, BlockCheckParams, ImportResult, + BlockStatus, BlockImportParams, ForkChoiceStrategy, +}; +use sp_storage::StorageKey; +use sp_trie::{TrieConfiguration, trie_types::Layout}; +use sp_runtime::{generic::BlockId, DigestItem}; +use hex_literal::hex; + +mod light; +mod db; + +native_executor_instance!( + Executor, + substrate_test_runtime_client::runtime::api::dispatch, + substrate_test_runtime_client::runtime::native_version, +); + +fn executor() -> sc_executor::NativeExecutor { + sc_executor::NativeExecutor::new( + sc_executor::WasmExecutionMethod::Interpreted, + None, + 8, + ) +} + +pub fn prepare_client_with_key_changes() -> ( + client::Client< + substrate_test_runtime_client::Backend, + substrate_test_runtime_client::Executor, + Block, + RuntimeApi + >, + Vec, + Vec<(u64, u64, Vec, Vec<(u64, u32)>)>, +) { + // prepare block structure + let blocks_transfers = vec![ + vec![(AccountKeyring::Alice, AccountKeyring::Dave), (AccountKeyring::Bob, AccountKeyring::Dave)], + vec![(AccountKeyring::Charlie, AccountKeyring::Eve)], + vec![], + vec![(AccountKeyring::Alice, AccountKeyring::Dave)], + ]; + + // prepare client ang import blocks + let mut local_roots = Vec::new(); + let config = Some(ChangesTrieConfiguration::new(4, 2)); + let mut remote_client = TestClientBuilder::new().changes_trie_config(config).build(); + let mut nonces: HashMap<_, u64> = Default::default(); + for (i, block_transfers) in blocks_transfers.into_iter().enumerate() { + let mut builder = remote_client.new_block(Default::default()).unwrap(); + for (from, to) in block_transfers { + builder.push_transfer(Transfer { + from: from.into(), + to: to.into(), + amount: 1, + nonce: *nonces.entry(from).and_modify(|n| { *n = *n + 1 }).or_default(), + }).unwrap(); + } + let block = builder.build().unwrap().block; + remote_client.import(BlockOrigin::Own, block).unwrap(); + + let header = remote_client.header(&BlockId::Number(i as u64 + 1)).unwrap().unwrap(); + let trie_root = header.digest().log(DigestItem::as_changes_trie_root) + .map(|root| H256::from_slice(root.as_ref())) + .unwrap(); + local_roots.push(trie_root); + } + + // prepare test cases + let alice = blake2_256(&runtime::system::balance_of_key(AccountKeyring::Alice.into())).to_vec(); + let bob = blake2_256(&runtime::system::balance_of_key(AccountKeyring::Bob.into())).to_vec(); + let charlie = blake2_256(&runtime::system::balance_of_key(AccountKeyring::Charlie.into())).to_vec(); + let dave = blake2_256(&runtime::system::balance_of_key(AccountKeyring::Dave.into())).to_vec(); + let eve = blake2_256(&runtime::system::balance_of_key(AccountKeyring::Eve.into())).to_vec(); + let ferdie = blake2_256(&runtime::system::balance_of_key(AccountKeyring::Ferdie.into())).to_vec(); + let test_cases = vec![ + (1, 4, alice.clone(), vec![(4, 0), (1, 0)]), + (1, 3, alice.clone(), vec![(1, 0)]), + (2, 4, alice.clone(), vec![(4, 0)]), + (2, 3, alice.clone(), vec![]), + (1, 4, bob.clone(), vec![(1, 1)]), + (1, 1, bob.clone(), vec![(1, 1)]), + (2, 4, bob.clone(), vec![]), + (1, 4, charlie.clone(), vec![(2, 0)]), + (1, 4, dave.clone(), vec![(4, 0), (1, 1), (1, 0)]), + (1, 1, dave.clone(), vec![(1, 1), (1, 0)]), + (3, 4, dave.clone(), vec![(4, 0)]), + (1, 4, eve.clone(), vec![(2, 0)]), + (1, 1, eve.clone(), vec![]), + (3, 4, eve.clone(), vec![]), + (1, 4, ferdie.clone(), vec![]), + ]; + + (remote_client, local_roots, test_cases) +} + +fn construct_block( + backend: &InMemoryBackend, + number: BlockNumber, + parent_hash: Hash, + state_root: Hash, + txs: Vec, +) -> (Vec, Hash) { + let transactions = txs.into_iter().map(|tx| tx.into_signed_tx()).collect::>(); + + let iter = transactions.iter().map(Encode::encode); + let extrinsics_root = Layout::::ordered_trie_root(iter).into(); + + let mut header = Header { + parent_hash, + number, + state_root, + extrinsics_root, + digest: Digest { logs: vec![] }, + }; + let hash = header.hash(); + let mut overlay = OverlayedChanges::default(); + let mut offchain_overlay = OffchainOverlayedChanges::default(); + let backend_runtime_code = sp_state_machine::backend::BackendRuntimeCode::new(&backend); + let runtime_code = backend_runtime_code.runtime_code().expect("Code is part of the backend"); + + StateMachine::new( + backend, + sp_state_machine::disabled_changes_trie_state::<_, u64>(), + &mut overlay, + &mut offchain_overlay, + &executor(), + "Core_initialize_block", + &header.encode(), + Default::default(), + &runtime_code, + tasks_executor(), + ).execute( + ExecutionStrategy::NativeElseWasm, + ).unwrap(); + + for tx in transactions.iter() { + StateMachine::new( + backend, + sp_state_machine::disabled_changes_trie_state::<_, u64>(), + &mut overlay, + &mut offchain_overlay, + &executor(), + "BlockBuilder_apply_extrinsic", + &tx.encode(), + Default::default(), + &runtime_code, + tasks_executor(), + ).execute( + ExecutionStrategy::NativeElseWasm, + ).unwrap(); + } + + let ret_data = StateMachine::new( + backend, + sp_state_machine::disabled_changes_trie_state::<_, u64>(), + &mut overlay, + &mut offchain_overlay, + &executor(), + "BlockBuilder_finalize_block", + &[], + Default::default(), + &runtime_code, + tasks_executor(), + ).execute( + ExecutionStrategy::NativeElseWasm, + ).unwrap(); + header = Header::decode(&mut &ret_data[..]).unwrap(); + + (vec![].and(&Block { header, extrinsics: transactions }), hash) +} + +fn block1(genesis_hash: Hash, backend: &InMemoryBackend) -> (Vec, Hash) { + construct_block( + backend, + 1, + genesis_hash, + hex!("25e5b37074063ab75c889326246640729b40d0c86932edc527bc80db0e04fe5c").into(), + vec![Transfer { + from: AccountKeyring::One.into(), + to: AccountKeyring::Two.into(), + amount: 69, + nonce: 0, + }], + ) +} + +#[test] +fn construct_genesis_should_work_with_native() { + let mut storage = GenesisConfig::new( + None, + vec![Sr25519Keyring::One.public().into(), Sr25519Keyring::Two.public().into()], + vec![AccountKeyring::One.into(), AccountKeyring::Two.into()], + 1000, + None, + Default::default(), + ).genesis_map(); + let genesis_hash = insert_genesis_block(&mut storage); + + let backend = InMemoryBackend::from(storage); + let (b1data, _b1hash) = block1(genesis_hash, &backend); + let backend_runtime_code = sp_state_machine::backend::BackendRuntimeCode::new(&backend); + let runtime_code = backend_runtime_code.runtime_code().expect("Code is part of the backend"); + + let mut overlay = OverlayedChanges::default(); + let mut offchain_overlay = OffchainOverlayedChanges::default(); + + let _ = StateMachine::new( + &backend, + sp_state_machine::disabled_changes_trie_state::<_, u64>(), + &mut overlay, + &mut offchain_overlay, + &executor(), + "Core_execute_block", + &b1data, + Default::default(), + &runtime_code, + tasks_executor(), + ).execute( + ExecutionStrategy::NativeElseWasm, + ).unwrap(); +} + +#[test] +fn construct_genesis_should_work_with_wasm() { + let mut storage = GenesisConfig::new( + None, + vec![Sr25519Keyring::One.public().into(), Sr25519Keyring::Two.public().into()], + vec![AccountKeyring::One.into(), AccountKeyring::Two.into()], + 1000, + None, + Default::default(), + ).genesis_map(); + let genesis_hash = insert_genesis_block(&mut storage); + + let backend = InMemoryBackend::from(storage); + let (b1data, _b1hash) = block1(genesis_hash, &backend); + let backend_runtime_code = sp_state_machine::backend::BackendRuntimeCode::new(&backend); + let runtime_code = backend_runtime_code.runtime_code().expect("Code is part of the backend"); + + let mut overlay = OverlayedChanges::default(); + let mut offchain_overlay = OffchainOverlayedChanges::default(); + + let _ = StateMachine::new( + &backend, + sp_state_machine::disabled_changes_trie_state::<_, u64>(), + &mut overlay, + &mut offchain_overlay, + &executor(), + "Core_execute_block", + &b1data, + Default::default(), + &runtime_code, + tasks_executor(), + ).execute( + ExecutionStrategy::AlwaysWasm, + ).unwrap(); +} + +#[test] +fn construct_genesis_with_bad_transaction_should_panic() { + let mut storage = GenesisConfig::new( + None, + vec![Sr25519Keyring::One.public().into(), Sr25519Keyring::Two.public().into()], + vec![AccountKeyring::One.into(), AccountKeyring::Two.into()], + 68, + None, + Default::default(), + ).genesis_map(); + let genesis_hash = insert_genesis_block(&mut storage); + + let backend = InMemoryBackend::from(storage); + let (b1data, _b1hash) = block1(genesis_hash, &backend); + let backend_runtime_code = sp_state_machine::backend::BackendRuntimeCode::new(&backend); + let runtime_code = backend_runtime_code.runtime_code().expect("Code is part of the backend"); + + let mut overlay = OverlayedChanges::default(); + let mut offchain_overlay = OffchainOverlayedChanges::default(); + + let r = StateMachine::new( + &backend, + sp_state_machine::disabled_changes_trie_state::<_, u64>(), + &mut overlay, + &mut offchain_overlay, + &executor(), + "Core_execute_block", + &b1data, + Default::default(), + &runtime_code, + tasks_executor(), + ).execute( + ExecutionStrategy::NativeElseWasm, + ); + assert!(r.is_err()); +} + + +#[test] +fn client_initializes_from_genesis_ok() { + let client = substrate_test_runtime_client::new(); + + assert_eq!( + client.runtime_api().balance_of( + &BlockId::Number(client.chain_info().best_number), + AccountKeyring::Alice.into(), + ).unwrap(), + 1000 + ); + assert_eq!( + client.runtime_api().balance_of( + &BlockId::Number(client.chain_info().best_number), + AccountKeyring::Ferdie.into(), + ).unwrap(), + 0 + ); +} + +#[test] +fn block_builder_works_with_no_transactions() { + let mut client = substrate_test_runtime_client::new(); + + let block = client.new_block(Default::default()).unwrap().build().unwrap().block; + + client.import(BlockOrigin::Own, block).unwrap(); + + assert_eq!(client.chain_info().best_number, 1); +} + +#[test] +fn block_builder_works_with_transactions() { + let mut client = substrate_test_runtime_client::new(); + + let mut builder = client.new_block(Default::default()).unwrap(); + + builder.push_transfer(Transfer { + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), + amount: 42, + nonce: 0, + }).unwrap(); + + let block = builder.build().unwrap().block; + client.import(BlockOrigin::Own, block).unwrap(); + + assert_eq!(client.chain_info().best_number, 1); + assert_ne!( + client.state_at(&BlockId::Number(1)).unwrap().pairs(), + client.state_at(&BlockId::Number(0)).unwrap().pairs() + ); + assert_eq!( + client.runtime_api().balance_of( + &BlockId::Number(client.chain_info().best_number), + AccountKeyring::Alice.into(), + ).unwrap(), + 958 + ); + assert_eq!( + client.runtime_api().balance_of( + &BlockId::Number(client.chain_info().best_number), + AccountKeyring::Ferdie.into(), + ).unwrap(), + 42 + ); +} + +#[test] +fn block_builder_does_not_include_invalid() { + let mut client = substrate_test_runtime_client::new(); + + let mut builder = client.new_block(Default::default()).unwrap(); + + builder.push_transfer(Transfer { + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), + amount: 42, + nonce: 0, + }).unwrap(); + + assert!( + builder.push_transfer(Transfer { + from: AccountKeyring::Eve.into(), + to: AccountKeyring::Alice.into(), + amount: 42, + nonce: 0, + }).is_err() + ); + + let block = builder.build().unwrap().block; + client.import(BlockOrigin::Own, block).unwrap(); + + assert_eq!(client.chain_info().best_number, 1); + assert_ne!( + client.state_at(&BlockId::Number(1)).unwrap().pairs(), + client.state_at(&BlockId::Number(0)).unwrap().pairs() + ); + assert_eq!(client.body(&BlockId::Number(1)).unwrap().unwrap().len(), 1) +} + +#[test] +fn best_containing_with_genesis_block() { + // block tree: + // G + + let (client, longest_chain_select) = TestClientBuilder::new().build_with_longest_chain(); + + let genesis_hash = client.chain_info().genesis_hash; + + assert_eq!( + genesis_hash.clone(), + longest_chain_select.finality_target(genesis_hash.clone(), None).unwrap().unwrap() + ); +} + +#[test] +fn best_containing_with_hash_not_found() { + // block tree: + // G + + let (client, longest_chain_select) = TestClientBuilder::new().build_with_longest_chain(); + + let uninserted_block = client.new_block(Default::default()).unwrap().build().unwrap().block; + + assert_eq!( + None, + longest_chain_select.finality_target(uninserted_block.hash().clone(), None).unwrap() + ); +} + +#[test] +fn uncles_with_only_ancestors() { + // block tree: + // G -> A1 -> A2 + let mut client = substrate_test_runtime_client::new(); + + // G -> A1 + let a1 = client.new_block(Default::default()).unwrap().build().unwrap().block; + client.import(BlockOrigin::Own, a1.clone()).unwrap(); + + // A1 -> A2 + let a2 = client.new_block(Default::default()).unwrap().build().unwrap().block; + client.import(BlockOrigin::Own, a2.clone()).unwrap(); + let v: Vec = Vec::new(); + assert_eq!(v, client.uncles(a2.hash(), 3).unwrap()); +} + +#[test] +fn uncles_with_multiple_forks() { + // block tree: + // G -> A1 -> A2 -> A3 -> A4 -> A5 + // A1 -> B2 -> B3 -> B4 + // B2 -> C3 + // A1 -> D2 + let mut client = substrate_test_runtime_client::new(); + + // G -> A1 + let a1 = client.new_block(Default::default()).unwrap().build().unwrap().block; + client.import(BlockOrigin::Own, a1.clone()).unwrap(); + + // A1 -> A2 + let a2 = client.new_block_at( + &BlockId::Hash(a1.hash()), + Default::default(), + false, + ).unwrap().build().unwrap().block; + client.import(BlockOrigin::Own, a2.clone()).unwrap(); + + // A2 -> A3 + let a3 = client.new_block_at( + &BlockId::Hash(a2.hash()), + Default::default(), + false, + ).unwrap().build().unwrap().block; + client.import(BlockOrigin::Own, a3.clone()).unwrap(); + + // A3 -> A4 + let a4 = client.new_block_at( + &BlockId::Hash(a3.hash()), + Default::default(), + false, + ).unwrap().build().unwrap().block; + client.import(BlockOrigin::Own, a4.clone()).unwrap(); + + // A4 -> A5 + let a5 = client.new_block_at( + &BlockId::Hash(a4.hash()), + Default::default(), + false, + ).unwrap().build().unwrap().block; + client.import(BlockOrigin::Own, a5.clone()).unwrap(); + + // A1 -> B2 + let mut builder = client.new_block_at( + &BlockId::Hash(a1.hash()), + Default::default(), + false, + ).unwrap(); + // this push is required as otherwise B2 has the same hash as A2 and won't get imported + builder.push_transfer(Transfer { + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), + amount: 41, + nonce: 0, + }).unwrap(); + let b2 = builder.build().unwrap().block; + client.import(BlockOrigin::Own, b2.clone()).unwrap(); + + // B2 -> B3 + let b3 = client.new_block_at( + &BlockId::Hash(b2.hash()), + Default::default(), + false, + ).unwrap().build().unwrap().block; + client.import(BlockOrigin::Own, b3.clone()).unwrap(); + + // B3 -> B4 + let b4 = client.new_block_at( + &BlockId::Hash(b3.hash()), + Default::default(), + false, + ).unwrap().build().unwrap().block; + client.import(BlockOrigin::Own, b4.clone()).unwrap(); + + // // B2 -> C3 + let mut builder = client.new_block_at( + &BlockId::Hash(b2.hash()), + Default::default(), + false, + ).unwrap(); + // this push is required as otherwise C3 has the same hash as B3 and won't get imported + builder.push_transfer(Transfer { + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), + amount: 1, + nonce: 1, + }).unwrap(); + let c3 = builder.build().unwrap().block; + client.import(BlockOrigin::Own, c3.clone()).unwrap(); + + // A1 -> D2 + let mut builder = client.new_block_at( + &BlockId::Hash(a1.hash()), + Default::default(), + false, + ).unwrap(); + // this push is required as otherwise D2 has the same hash as B2 and won't get imported + builder.push_transfer(Transfer { + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), + amount: 1, + nonce: 0, + }).unwrap(); + let d2 = builder.build().unwrap().block; + client.import(BlockOrigin::Own, d2.clone()).unwrap(); + + let genesis_hash = client.chain_info().genesis_hash; + + let uncles1 = client.uncles(a4.hash(), 10).unwrap(); + assert_eq!(vec![b2.hash(), d2.hash()], uncles1); + + let uncles2 = client.uncles(a4.hash(), 0).unwrap(); + assert_eq!(0, uncles2.len()); + + let uncles3 = client.uncles(a1.hash(), 10).unwrap(); + assert_eq!(0, uncles3.len()); + + let uncles4 = client.uncles(genesis_hash, 10).unwrap(); + assert_eq!(0, uncles4.len()); + + let uncles5 = client.uncles(d2.hash(), 10).unwrap(); + assert_eq!(vec![a2.hash(), b2.hash()], uncles5); + + let uncles6 = client.uncles(b3.hash(), 1).unwrap(); + assert_eq!(vec![c3.hash()], uncles6); +} + +#[test] +fn best_containing_on_longest_chain_with_single_chain_3_blocks() { + // block tree: + // G -> A1 -> A2 + + let (mut client, longest_chain_select) = TestClientBuilder::new().build_with_longest_chain(); + + // G -> A1 + let a1 = client.new_block(Default::default()).unwrap().build().unwrap().block; + client.import(BlockOrigin::Own, a1.clone()).unwrap(); + + // A1 -> A2 + let a2 = client.new_block(Default::default()).unwrap().build().unwrap().block; + client.import(BlockOrigin::Own, a2.clone()).unwrap(); + + let genesis_hash = client.chain_info().genesis_hash; + + assert_eq!(a2.hash(), longest_chain_select.finality_target(genesis_hash, None).unwrap().unwrap()); + assert_eq!(a2.hash(), longest_chain_select.finality_target(a1.hash(), None).unwrap().unwrap()); + assert_eq!(a2.hash(), longest_chain_select.finality_target(a2.hash(), None).unwrap().unwrap()); +} + +#[test] +fn best_containing_on_longest_chain_with_multiple_forks() { + // block tree: + // G -> A1 -> A2 -> A3 -> A4 -> A5 + // A1 -> B2 -> B3 -> B4 + // B2 -> C3 + // A1 -> D2 + let (mut client, longest_chain_select) = TestClientBuilder::new().build_with_longest_chain(); + + // G -> A1 + let a1 = client.new_block(Default::default()).unwrap().build().unwrap().block; + client.import(BlockOrigin::Own, a1.clone()).unwrap(); + + // A1 -> A2 + let a2 = client.new_block_at( + &BlockId::Hash(a1.hash()), + Default::default(), + false, + ).unwrap().build().unwrap().block; + client.import(BlockOrigin::Own, a2.clone()).unwrap(); + + // A2 -> A3 + let a3 = client.new_block_at( + &BlockId::Hash(a2.hash()), + Default::default(), + false, + ).unwrap().build().unwrap().block; + client.import(BlockOrigin::Own, a3.clone()).unwrap(); + + // A3 -> A4 + let a4 = client.new_block_at( + &BlockId::Hash(a3.hash()), + Default::default(), + false, + ).unwrap().build().unwrap().block; + client.import(BlockOrigin::Own, a4.clone()).unwrap(); + + // A4 -> A5 + let a5 = client.new_block_at( + &BlockId::Hash(a4.hash()), + Default::default(), + false, + ).unwrap().build().unwrap().block; + client.import(BlockOrigin::Own, a5.clone()).unwrap(); + + // A1 -> B2 + let mut builder = client.new_block_at( + &BlockId::Hash(a1.hash()), + Default::default(), + false, + ).unwrap(); + // this push is required as otherwise B2 has the same hash as A2 and won't get imported + builder.push_transfer(Transfer { + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), + amount: 41, + nonce: 0, + }).unwrap(); + let b2 = builder.build().unwrap().block; + client.import(BlockOrigin::Own, b2.clone()).unwrap(); + + // B2 -> B3 + let b3 = client.new_block_at( + &BlockId::Hash(b2.hash()), + Default::default(), + false, + ).unwrap().build().unwrap().block; + client.import(BlockOrigin::Own, b3.clone()).unwrap(); + + // B3 -> B4 + let b4 = client.new_block_at( + &BlockId::Hash(b3.hash()), + Default::default(), + false, + ).unwrap().build().unwrap().block; + client.import(BlockOrigin::Own, b4.clone()).unwrap(); + + // // B2 -> C3 + let mut builder = client.new_block_at( + &BlockId::Hash(b2.hash()), + Default::default(), + false, + ).unwrap(); + // this push is required as otherwise C3 has the same hash as B3 and won't get imported + builder.push_transfer(Transfer { + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), + amount: 1, + nonce: 1, + }).unwrap(); + let c3 = builder.build().unwrap().block; + client.import(BlockOrigin::Own, c3.clone()).unwrap(); + + // A1 -> D2 + let mut builder = client.new_block_at( + &BlockId::Hash(a1.hash()), + Default::default(), + false, + ).unwrap(); + // this push is required as otherwise D2 has the same hash as B2 and won't get imported + builder.push_transfer(Transfer { + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), + amount: 1, + nonce: 0, + }).unwrap(); + let d2 = builder.build().unwrap().block; + client.import(BlockOrigin::Own, d2.clone()).unwrap(); + + assert_eq!(client.chain_info().best_hash, a5.hash()); + + let genesis_hash = client.chain_info().genesis_hash; + let leaves = longest_chain_select.leaves().unwrap(); + + assert!(leaves.contains(&a5.hash())); + assert!(leaves.contains(&b4.hash())); + assert!(leaves.contains(&c3.hash())); + assert!(leaves.contains(&d2.hash())); + assert_eq!(leaves.len(), 4); + + // search without restriction + + assert_eq!(a5.hash(), longest_chain_select.finality_target( + genesis_hash, None).unwrap().unwrap()); + assert_eq!(a5.hash(), longest_chain_select.finality_target( + a1.hash(), None).unwrap().unwrap()); + assert_eq!(a5.hash(), longest_chain_select.finality_target( + a2.hash(), None).unwrap().unwrap()); + assert_eq!(a5.hash(), longest_chain_select.finality_target( + a3.hash(), None).unwrap().unwrap()); + assert_eq!(a5.hash(), longest_chain_select.finality_target( + a4.hash(), None).unwrap().unwrap()); + assert_eq!(a5.hash(), longest_chain_select.finality_target( + a5.hash(), None).unwrap().unwrap()); + + assert_eq!(b4.hash(), longest_chain_select.finality_target( + b2.hash(), None).unwrap().unwrap()); + assert_eq!(b4.hash(), longest_chain_select.finality_target( + b3.hash(), None).unwrap().unwrap()); + assert_eq!(b4.hash(), longest_chain_select.finality_target( + b4.hash(), None).unwrap().unwrap()); + + assert_eq!(c3.hash(), longest_chain_select.finality_target( + c3.hash(), None).unwrap().unwrap()); + + assert_eq!(d2.hash(), longest_chain_select.finality_target( + d2.hash(), None).unwrap().unwrap()); + + + // search only blocks with number <= 5. equivalent to without restriction for this scenario + + assert_eq!(a5.hash(), longest_chain_select.finality_target( + genesis_hash, Some(5)).unwrap().unwrap()); + assert_eq!(a5.hash(), longest_chain_select.finality_target( + a1.hash(), Some(5)).unwrap().unwrap()); + assert_eq!(a5.hash(), longest_chain_select.finality_target( + a2.hash(), Some(5)).unwrap().unwrap()); + assert_eq!(a5.hash(), longest_chain_select.finality_target( + a3.hash(), Some(5)).unwrap().unwrap()); + assert_eq!(a5.hash(), longest_chain_select.finality_target( + a4.hash(), Some(5)).unwrap().unwrap()); + assert_eq!(a5.hash(), longest_chain_select.finality_target( + a5.hash(), Some(5)).unwrap().unwrap()); + + assert_eq!(b4.hash(), longest_chain_select.finality_target( + b2.hash(), Some(5)).unwrap().unwrap()); + assert_eq!(b4.hash(), longest_chain_select.finality_target( + b3.hash(), Some(5)).unwrap().unwrap()); + assert_eq!(b4.hash(), longest_chain_select.finality_target( + b4.hash(), Some(5)).unwrap().unwrap()); + + assert_eq!(c3.hash(), longest_chain_select.finality_target( + c3.hash(), Some(5)).unwrap().unwrap()); + + assert_eq!(d2.hash(), longest_chain_select.finality_target( + d2.hash(), Some(5)).unwrap().unwrap()); + + + // search only blocks with number <= 4 + + assert_eq!(a4.hash(), longest_chain_select.finality_target( + genesis_hash, Some(4)).unwrap().unwrap()); + assert_eq!(a4.hash(), longest_chain_select.finality_target( + a1.hash(), Some(4)).unwrap().unwrap()); + assert_eq!(a4.hash(), longest_chain_select.finality_target( + a2.hash(), Some(4)).unwrap().unwrap()); + assert_eq!(a4.hash(), longest_chain_select.finality_target( + a3.hash(), Some(4)).unwrap().unwrap()); + assert_eq!(a4.hash(), longest_chain_select.finality_target( + a4.hash(), Some(4)).unwrap().unwrap()); + assert_eq!(None, longest_chain_select.finality_target( + a5.hash(), Some(4)).unwrap()); + + assert_eq!(b4.hash(), longest_chain_select.finality_target( + b2.hash(), Some(4)).unwrap().unwrap()); + assert_eq!(b4.hash(), longest_chain_select.finality_target( + b3.hash(), Some(4)).unwrap().unwrap()); + assert_eq!(b4.hash(), longest_chain_select.finality_target( + b4.hash(), Some(4)).unwrap().unwrap()); + + assert_eq!(c3.hash(), longest_chain_select.finality_target( + c3.hash(), Some(4)).unwrap().unwrap()); + + assert_eq!(d2.hash(), longest_chain_select.finality_target( + d2.hash(), Some(4)).unwrap().unwrap()); + + + // search only blocks with number <= 3 + + assert_eq!(a3.hash(), longest_chain_select.finality_target( + genesis_hash, Some(3)).unwrap().unwrap()); + assert_eq!(a3.hash(), longest_chain_select.finality_target( + a1.hash(), Some(3)).unwrap().unwrap()); + assert_eq!(a3.hash(), longest_chain_select.finality_target( + a2.hash(), Some(3)).unwrap().unwrap()); + assert_eq!(a3.hash(), longest_chain_select.finality_target( + a3.hash(), Some(3)).unwrap().unwrap()); + assert_eq!(None, longest_chain_select.finality_target( + a4.hash(), Some(3)).unwrap()); + assert_eq!(None, longest_chain_select.finality_target( + a5.hash(), Some(3)).unwrap()); + + assert_eq!(b3.hash(), longest_chain_select.finality_target( + b2.hash(), Some(3)).unwrap().unwrap()); + assert_eq!(b3.hash(), longest_chain_select.finality_target( + b3.hash(), Some(3)).unwrap().unwrap()); + assert_eq!(None, longest_chain_select.finality_target( + b4.hash(), Some(3)).unwrap()); + + assert_eq!(c3.hash(), longest_chain_select.finality_target( + c3.hash(), Some(3)).unwrap().unwrap()); + + assert_eq!(d2.hash(), longest_chain_select.finality_target( + d2.hash(), Some(3)).unwrap().unwrap()); + + + // search only blocks with number <= 2 + + assert_eq!(a2.hash(), longest_chain_select.finality_target( + genesis_hash, Some(2)).unwrap().unwrap()); + assert_eq!(a2.hash(), longest_chain_select.finality_target( + a1.hash(), Some(2)).unwrap().unwrap()); + assert_eq!(a2.hash(), longest_chain_select.finality_target( + a2.hash(), Some(2)).unwrap().unwrap()); + assert_eq!(None, longest_chain_select.finality_target( + a3.hash(), Some(2)).unwrap()); + assert_eq!(None, longest_chain_select.finality_target( + a4.hash(), Some(2)).unwrap()); + assert_eq!(None, longest_chain_select.finality_target( + a5.hash(), Some(2)).unwrap()); + + assert_eq!(b2.hash(), longest_chain_select.finality_target( + b2.hash(), Some(2)).unwrap().unwrap()); + assert_eq!(None, longest_chain_select.finality_target( + b3.hash(), Some(2)).unwrap()); + assert_eq!(None, longest_chain_select.finality_target( + b4.hash(), Some(2)).unwrap()); + + assert_eq!(None, longest_chain_select.finality_target( + c3.hash(), Some(2)).unwrap()); + + assert_eq!(d2.hash(), longest_chain_select.finality_target( + d2.hash(), Some(2)).unwrap().unwrap()); + + + // search only blocks with number <= 1 + + assert_eq!(a1.hash(), longest_chain_select.finality_target( + genesis_hash, Some(1)).unwrap().unwrap()); + assert_eq!(a1.hash(), longest_chain_select.finality_target( + a1.hash(), Some(1)).unwrap().unwrap()); + assert_eq!(None, longest_chain_select.finality_target( + a2.hash(), Some(1)).unwrap()); + assert_eq!(None, longest_chain_select.finality_target( + a3.hash(), Some(1)).unwrap()); + assert_eq!(None, longest_chain_select.finality_target( + a4.hash(), Some(1)).unwrap()); + assert_eq!(None, longest_chain_select.finality_target( + a5.hash(), Some(1)).unwrap()); + + assert_eq!(None, longest_chain_select.finality_target( + b2.hash(), Some(1)).unwrap()); + assert_eq!(None, longest_chain_select.finality_target( + b3.hash(), Some(1)).unwrap()); + assert_eq!(None, longest_chain_select.finality_target( + b4.hash(), Some(1)).unwrap()); + + assert_eq!(None, longest_chain_select.finality_target( + c3.hash(), Some(1)).unwrap()); + + assert_eq!(None, longest_chain_select.finality_target( + d2.hash(), Some(1)).unwrap()); + + // search only blocks with number <= 0 + + assert_eq!(genesis_hash, longest_chain_select.finality_target( + genesis_hash, Some(0)).unwrap().unwrap()); + assert_eq!(None, longest_chain_select.finality_target( + a1.hash(), Some(0)).unwrap()); + assert_eq!(None, longest_chain_select.finality_target( + a2.hash(), Some(0)).unwrap()); + assert_eq!(None, longest_chain_select.finality_target( + a3.hash(), Some(0)).unwrap()); + assert_eq!(None, longest_chain_select.finality_target( + a4.hash(), Some(0)).unwrap()); + assert_eq!(None, longest_chain_select.finality_target( + a5.hash(), Some(0)).unwrap()); + + assert_eq!(None, longest_chain_select.finality_target( + b2.hash(), Some(0)).unwrap()); + assert_eq!(None, longest_chain_select.finality_target( + b3.hash(), Some(0)).unwrap()); + assert_eq!(None, longest_chain_select.finality_target( + b4.hash(), Some(0)).unwrap()); + + assert_eq!(None, longest_chain_select.finality_target( + c3.hash().clone(), Some(0)).unwrap()); + + assert_eq!(None, longest_chain_select.finality_target( + d2.hash().clone(), Some(0)).unwrap()); +} + +#[test] +fn best_containing_on_longest_chain_with_max_depth_higher_than_best() { + // block tree: + // G -> A1 -> A2 + + let (mut client, longest_chain_select) = TestClientBuilder::new().build_with_longest_chain(); + + // G -> A1 + let a1 = client.new_block(Default::default()).unwrap().build().unwrap().block; + client.import(BlockOrigin::Own, a1.clone()).unwrap(); + + // A1 -> A2 + let a2 = client.new_block(Default::default()).unwrap().build().unwrap().block; + client.import(BlockOrigin::Own, a2.clone()).unwrap(); + + let genesis_hash = client.chain_info().genesis_hash; + + assert_eq!(a2.hash(), longest_chain_select.finality_target(genesis_hash, Some(10)).unwrap().unwrap()); +} + +#[test] +fn key_changes_works() { + let (client, _, test_cases) = prepare_client_with_key_changes(); + + for (index, (begin, end, key, expected_result)) in test_cases.into_iter().enumerate() { + let end = client.block_hash(end).unwrap().unwrap(); + let actual_result = client.key_changes( + begin, + BlockId::Hash(end), + None, + &StorageKey(key), + ).unwrap(); + match actual_result == expected_result { + true => (), + false => panic!(format!("Failed test {}: actual = {:?}, expected = {:?}", + index, actual_result, expected_result)), + } + } +} + +#[test] +fn import_with_justification() { + let mut client = substrate_test_runtime_client::new(); + + // G -> A1 + let a1 = client.new_block(Default::default()).unwrap().build().unwrap().block; + client.import(BlockOrigin::Own, a1.clone()).unwrap(); + + // A1 -> A2 + let a2 = client.new_block_at( + &BlockId::Hash(a1.hash()), + Default::default(), + false, + ).unwrap().build().unwrap().block; + client.import(BlockOrigin::Own, a2.clone()).unwrap(); + + // A2 -> A3 + let justification = vec![1, 2, 3]; + let a3 = client.new_block_at( + &BlockId::Hash(a2.hash()), + Default::default(), + false, + ).unwrap().build().unwrap().block; + client.import_justified(BlockOrigin::Own, a3.clone(), justification.clone()).unwrap(); + + assert_eq!( + client.chain_info().finalized_hash, + a3.hash(), + ); + + assert_eq!( + client.justification(&BlockId::Hash(a3.hash())).unwrap(), + Some(justification), + ); + + assert_eq!( + client.justification(&BlockId::Hash(a1.hash())).unwrap(), + None, + ); + + assert_eq!( + client.justification(&BlockId::Hash(a2.hash())).unwrap(), + None, + ); +} + +#[test] +fn importing_diverged_finalized_block_should_trigger_reorg() { + let mut client = substrate_test_runtime_client::new(); + + // G -> A1 -> A2 + // \ + // -> B1 + let a1 = client.new_block_at( + &BlockId::Number(0), + Default::default(), + false, + ).unwrap().build().unwrap().block; + client.import(BlockOrigin::Own, a1.clone()).unwrap(); + + let a2 = client.new_block_at( + &BlockId::Hash(a1.hash()), + Default::default(), + false, + ).unwrap().build().unwrap().block; + client.import(BlockOrigin::Own, a2.clone()).unwrap(); + + let mut b1 = client.new_block_at( + &BlockId::Number(0), + Default::default(), + false, + ).unwrap(); + // needed to make sure B1 gets a different hash from A1 + b1.push_transfer(Transfer { + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), + amount: 1, + nonce: 0, + }).unwrap(); + // create but don't import B1 just yet + let b1 = b1.build().unwrap().block; + + // A2 is the current best since it's the longest chain + assert_eq!( + client.chain_info().best_hash, + a2.hash(), + ); + + // importing B1 as finalized should trigger a re-org and set it as new best + let justification = vec![1, 2, 3]; + client.import_justified(BlockOrigin::Own, b1.clone(), justification).unwrap(); + + assert_eq!( + client.chain_info().best_hash, + b1.hash(), + ); + + assert_eq!( + client.chain_info().finalized_hash, + b1.hash(), + ); +} + +#[test] +fn finalizing_diverged_block_should_trigger_reorg() { + let (mut client, select_chain) = TestClientBuilder::new().build_with_longest_chain(); + + // G -> A1 -> A2 + // \ + // -> B1 -> B2 + let a1 = client.new_block_at( + &BlockId::Number(0), + Default::default(), + false, + ).unwrap().build().unwrap().block; + client.import(BlockOrigin::Own, a1.clone()).unwrap(); + + let a2 = client.new_block_at( + &BlockId::Hash(a1.hash()), + Default::default(), + false, + ).unwrap().build().unwrap().block; + client.import(BlockOrigin::Own, a2.clone()).unwrap(); + + let mut b1 = client.new_block_at( + &BlockId::Number(0), + Default::default(), + false, + ).unwrap(); + // needed to make sure B1 gets a different hash from A1 + b1.push_transfer(Transfer { + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), + amount: 1, + nonce: 0, + }).unwrap(); + let b1 = b1.build().unwrap().block; + client.import(BlockOrigin::Own, b1.clone()).unwrap(); + + let b2 = client.new_block_at( + &BlockId::Hash(b1.hash()), + Default::default(), + false, + ).unwrap().build().unwrap().block; + client.import(BlockOrigin::Own, b2.clone()).unwrap(); + + // A2 is the current best since it's the longest chain + assert_eq!( + client.chain_info().best_hash, + a2.hash(), + ); + + // we finalize block B1 which is on a different branch from current best + // which should trigger a re-org. + ClientExt::finalize_block(&client, BlockId::Hash(b1.hash()), None).unwrap(); + + // B1 should now be the latest finalized + assert_eq!( + client.chain_info().finalized_hash, + b1.hash(), + ); + + // and B1 should be the new best block (`finalize_block` as no way of + // knowing about B2) + assert_eq!( + client.chain_info().best_hash, + b1.hash(), + ); + + // `SelectChain` should report B2 as best block though + assert_eq!( + select_chain.best_chain().unwrap().hash(), + b2.hash(), + ); + + // after we build B3 on top of B2 and import it + // it should be the new best block, + let b3 = client.new_block_at( + &BlockId::Hash(b2.hash()), + Default::default(), + false, + ).unwrap().build().unwrap().block; + client.import(BlockOrigin::Own, b3.clone()).unwrap(); + + assert_eq!( + client.chain_info().best_hash, + b3.hash(), + ); +} + +#[test] +fn get_header_by_block_number_doesnt_panic() { + let client = substrate_test_runtime_client::new(); + + // backend uses u32 for block numbers, make sure we don't panic when + // trying to convert + let id = BlockId::::Number(72340207214430721); + client.header(&id).expect_err("invalid block number overflows u32"); +} + +#[test] +fn state_reverted_on_reorg() { + let _ = env_logger::try_init(); + let mut client = substrate_test_runtime_client::new(); + + let current_balance = |client: &substrate_test_runtime_client::TestClient| + client.runtime_api().balance_of( + &BlockId::number(client.chain_info().best_number), AccountKeyring::Alice.into(), + ).unwrap(); + + // G -> A1 -> A2 + // \ + // -> B1 + let mut a1 = client.new_block_at( + &BlockId::Number(0), + Default::default(), + false, + ).unwrap(); + a1.push_transfer(Transfer { + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Bob.into(), + amount: 10, + nonce: 0, + }).unwrap(); + let a1 = a1.build().unwrap().block; + client.import(BlockOrigin::Own, a1.clone()).unwrap(); + + let mut b1 = client.new_block_at( + &BlockId::Number(0), + Default::default(), + false, + ).unwrap(); + b1.push_transfer(Transfer { + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), + amount: 50, + nonce: 0, + }).unwrap(); + let b1 = b1.build().unwrap().block; + // Reorg to B1 + client.import_as_best(BlockOrigin::Own, b1.clone()).unwrap(); + + assert_eq!(950, current_balance(&client)); + let mut a2 = client.new_block_at( + &BlockId::Hash(a1.hash()), + Default::default(), + false, + ).unwrap(); + a2.push_transfer(Transfer { + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Charlie.into(), + amount: 10, + nonce: 1, + }).unwrap(); + let a2 = a2.build().unwrap().block; + // Re-org to A2 + client.import_as_best(BlockOrigin::Own, a2).unwrap(); + assert_eq!(980, current_balance(&client)); +} + +#[test] +fn doesnt_import_blocks_that_revert_finality() { + let _ = env_logger::try_init(); + let tmp = tempfile::tempdir().unwrap(); + + // we need to run with archive pruning to avoid pruning non-canonical + // states + let backend = Arc::new(Backend::new( + DatabaseSettings { + state_cache_size: 1 << 20, + state_cache_child_ratio: None, + pruning: PruningMode::ArchiveAll, + source: DatabaseSettingsSrc::RocksDb { + path: tmp.path().into(), + cache_size: 1024, + }, + }, + u64::max_value(), + ).unwrap()); + + let mut client = TestClientBuilder::with_backend(backend).build(); + + // -> C1 + // / + // G -> A1 -> A2 + // \ + // -> B1 -> B2 -> B3 + + let a1 = client.new_block_at( + &BlockId::Number(0), + Default::default(), + false, + ).unwrap().build().unwrap().block; + client.import(BlockOrigin::Own, a1.clone()).unwrap(); + + let a2 = client.new_block_at( + &BlockId::Hash(a1.hash()), + Default::default(), + false, + ).unwrap().build().unwrap().block; + client.import(BlockOrigin::Own, a2.clone()).unwrap(); + + let mut b1 = client.new_block_at(&BlockId::Number(0), Default::default(), false).unwrap(); + + // needed to make sure B1 gets a different hash from A1 + b1.push_transfer(Transfer { + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), + amount: 1, + nonce: 0, + }).unwrap(); + let b1 = b1.build().unwrap().block; + client.import(BlockOrigin::Own, b1.clone()).unwrap(); + + let b2 = client.new_block_at(&BlockId::Hash(b1.hash()), Default::default(), false) + .unwrap().build().unwrap().block; + client.import(BlockOrigin::Own, b2.clone()).unwrap(); + + // prepare B3 before we finalize A2, because otherwise we won't be able to + // read changes trie configuration after A2 is finalized + let b3 = client.new_block_at(&BlockId::Hash(b2.hash()), Default::default(), false) + .unwrap().build().unwrap().block; + + // we will finalize A2 which should make it impossible to import a new + // B3 at the same height but that doesn't include it + ClientExt::finalize_block(&client, BlockId::Hash(a2.hash()), None).unwrap(); + + let import_err = client.import(BlockOrigin::Own, b3).err().unwrap(); + let expected_err = ConsensusError::ClientImport( + sp_blockchain::Error::NotInFinalizedChain.to_string() + ); + + assert_eq!( + import_err.to_string(), + expected_err.to_string(), + ); + + // adding a C1 block which is lower than the last finalized should also + // fail (with a cheaper check that doesn't require checking ancestry). + let mut c1 = client.new_block_at(&BlockId::Number(0), Default::default(), false).unwrap(); + + // needed to make sure C1 gets a different hash from A1 and B1 + c1.push_transfer(Transfer { + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), + amount: 2, + nonce: 0, + }).unwrap(); + let c1 = c1.build().unwrap().block; + + let import_err = client.import(BlockOrigin::Own, c1).err().unwrap(); + let expected_err = ConsensusError::ClientImport( + sp_blockchain::Error::NotInFinalizedChain.to_string() + ); + + assert_eq!( + import_err.to_string(), + expected_err.to_string(), + ); +} + + +#[test] +fn respects_block_rules() { + fn run_test( + record_only: bool, + known_bad: &mut HashSet, + fork_rules: &mut Vec<(u64, H256)>, + ) { + let mut client = if record_only { + TestClientBuilder::new().build() + } else { + TestClientBuilder::new() + .set_block_rules( + Some(fork_rules.clone()), + Some(known_bad.clone()), + ) + .build() + }; + + let block_ok = client.new_block_at(&BlockId::Number(0), Default::default(), false) + .unwrap().build().unwrap().block; + + let params = BlockCheckParams { + hash: block_ok.hash().clone(), + number: 0, + parent_hash: block_ok.header().parent_hash().clone(), + allow_missing_state: false, + import_existing: false, + }; + assert_eq!(client.check_block(params).unwrap(), ImportResult::imported(false)); + + // this is 0x0d6d6612a10485370d9e085aeea7ec427fb3f34d961c6a816cdbe5cde2278864 + let mut block_not_ok = client.new_block_at(&BlockId::Number(0), Default::default(), false) + .unwrap(); + block_not_ok.push_storage_change(vec![0], Some(vec![1])).unwrap(); + let block_not_ok = block_not_ok.build().unwrap().block; + + let params = BlockCheckParams { + hash: block_not_ok.hash().clone(), + number: 0, + parent_hash: block_not_ok.header().parent_hash().clone(), + allow_missing_state: false, + import_existing: false, + }; + if record_only { + known_bad.insert(block_not_ok.hash()); + } else { + assert_eq!(client.check_block(params).unwrap(), ImportResult::KnownBad); + } + + // Now going to the fork + client.import_as_final(BlockOrigin::Own, block_ok).unwrap(); + + // And check good fork + let mut block_ok = client.new_block_at(&BlockId::Number(1), Default::default(), false) + .unwrap(); + block_ok.push_storage_change(vec![0], Some(vec![2])).unwrap(); + let block_ok = block_ok.build().unwrap().block; + + let params = BlockCheckParams { + hash: block_ok.hash().clone(), + number: 1, + parent_hash: block_ok.header().parent_hash().clone(), + allow_missing_state: false, + import_existing: false, + }; + if record_only { + fork_rules.push((1, block_ok.hash().clone())); + } + assert_eq!(client.check_block(params).unwrap(), ImportResult::imported(false)); + + // And now try bad fork + let mut block_not_ok = client.new_block_at(&BlockId::Number(1), Default::default(), false) + .unwrap(); + block_not_ok.push_storage_change(vec![0], Some(vec![3])).unwrap(); + let block_not_ok = block_not_ok.build().unwrap().block; + + let params = BlockCheckParams { + hash: block_not_ok.hash().clone(), + number: 1, + parent_hash: block_not_ok.header().parent_hash().clone(), + allow_missing_state: false, + import_existing: false, + }; + + if !record_only { + assert_eq!(client.check_block(params).unwrap(), ImportResult::KnownBad); + } + } + + let mut known_bad = HashSet::new(); + let mut fork_rules = Vec::new(); + + // records what bad_blocks and fork_blocks hashes should be + run_test(true, &mut known_bad, &mut fork_rules); + + // enforces rules and actually makes assertions + run_test(false, &mut known_bad, &mut fork_rules); +} + +#[test] +fn returns_status_for_pruned_blocks() { + let _ = env_logger::try_init(); + let tmp = tempfile::tempdir().unwrap(); + + // set to prune after 1 block + // states + let backend = Arc::new(Backend::new( + DatabaseSettings { + state_cache_size: 1 << 20, + state_cache_child_ratio: None, + pruning: PruningMode::keep_blocks(1), + source: DatabaseSettingsSrc::RocksDb { + path: tmp.path().into(), + cache_size: 1024, + }, + }, + u64::max_value(), + ).unwrap()); + + let mut client = TestClientBuilder::with_backend(backend).build(); + + let a1 = client.new_block_at(&BlockId::Number(0), Default::default(), false) + .unwrap().build().unwrap().block; + + let mut b1 = client.new_block_at(&BlockId::Number(0), Default::default(), false).unwrap(); + + // b1 is created, but not imported + b1.push_transfer(Transfer { + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), + amount: 1, + nonce: 0, + }).unwrap(); + let b1 = b1.build().unwrap().block; + + let check_block_a1 = BlockCheckParams { + hash: a1.hash().clone(), + number: 0, + parent_hash: a1.header().parent_hash().clone(), + allow_missing_state: false, + import_existing: false, + }; + + assert_eq!(client.check_block(check_block_a1.clone()).unwrap(), ImportResult::imported(false)); + assert_eq!(client.block_status(&BlockId::hash(check_block_a1.hash)).unwrap(), BlockStatus::Unknown); + + client.import_as_final(BlockOrigin::Own, a1.clone()).unwrap(); + + assert_eq!(client.check_block(check_block_a1.clone()).unwrap(), ImportResult::AlreadyInChain); + assert_eq!(client.block_status(&BlockId::hash(check_block_a1.hash)).unwrap(), BlockStatus::InChainWithState); + + let a2 = client.new_block_at(&BlockId::Hash(a1.hash()), Default::default(), false) + .unwrap().build().unwrap().block; + client.import_as_final(BlockOrigin::Own, a2.clone()).unwrap(); + + let check_block_a2 = BlockCheckParams { + hash: a2.hash().clone(), + number: 1, + parent_hash: a1.header().parent_hash().clone(), + allow_missing_state: false, + import_existing: false, + }; + + assert_eq!(client.check_block(check_block_a1.clone()).unwrap(), ImportResult::AlreadyInChain); + assert_eq!(client.block_status(&BlockId::hash(check_block_a1.hash)).unwrap(), BlockStatus::InChainPruned); + assert_eq!(client.check_block(check_block_a2.clone()).unwrap(), ImportResult::AlreadyInChain); + assert_eq!(client.block_status(&BlockId::hash(check_block_a2.hash)).unwrap(), BlockStatus::InChainWithState); + + let a3 = client.new_block_at(&BlockId::Hash(a2.hash()), Default::default(), false) + .unwrap().build().unwrap().block; + + client.import_as_final(BlockOrigin::Own, a3.clone()).unwrap(); + let check_block_a3 = BlockCheckParams { + hash: a3.hash().clone(), + number: 2, + parent_hash: a2.header().parent_hash().clone(), + allow_missing_state: false, + import_existing: false, + }; + + // a1 and a2 are both pruned at this point + assert_eq!(client.check_block(check_block_a1.clone()).unwrap(), ImportResult::AlreadyInChain); + assert_eq!(client.block_status(&BlockId::hash(check_block_a1.hash)).unwrap(), BlockStatus::InChainPruned); + assert_eq!(client.check_block(check_block_a2.clone()).unwrap(), ImportResult::AlreadyInChain); + assert_eq!(client.block_status(&BlockId::hash(check_block_a2.hash)).unwrap(), BlockStatus::InChainPruned); + assert_eq!(client.check_block(check_block_a3.clone()).unwrap(), ImportResult::AlreadyInChain); + assert_eq!(client.block_status(&BlockId::hash(check_block_a3.hash)).unwrap(), BlockStatus::InChainWithState); + + let mut check_block_b1 = BlockCheckParams { + hash: b1.hash().clone(), + number: 0, + parent_hash: b1.header().parent_hash().clone(), + allow_missing_state: false, + import_existing: false, + }; + assert_eq!(client.check_block(check_block_b1.clone()).unwrap(), ImportResult::MissingState); + check_block_b1.allow_missing_state = true; + assert_eq!(client.check_block(check_block_b1.clone()).unwrap(), ImportResult::imported(false)); + check_block_b1.parent_hash = H256::random(); + assert_eq!(client.check_block(check_block_b1.clone()).unwrap(), ImportResult::UnknownParent); +} + +#[test] +fn imports_blocks_with_changes_tries_config_change() { + // create client with initial 4^2 configuration + let mut client = TestClientBuilder::with_default_backend() + .changes_trie_config(Some(ChangesTrieConfiguration { + digest_interval: 4, + digest_levels: 2, + })).build(); + + // =================================================================== + // blocks 1,2,3,4,5,6,7,8,9,10 are empty + // block 11 changes the key + // block 12 is the L1 digest that covers this change + // blocks 13,14,15,16,17,18,19,20,21,22 are empty + // block 23 changes the configuration to 5^1 AND is skewed digest + // =================================================================== + // blocks 24,25 are changing the key + // block 26 is empty + // block 27 changes the key + // block 28 is the L1 digest (NOT SKEWED!!!) that covers changes AND changes configuration to 3^1 + // =================================================================== + // block 29 is empty + // block 30 changes the key + // block 31 is L1 digest that covers this change + // =================================================================== + (1..11).for_each(|number| { + let block = client.new_block_at(&BlockId::Number(number - 1), Default::default(), false) + .unwrap().build().unwrap().block; + client.import(BlockOrigin::Own, block).unwrap(); + }); + (11..12).for_each(|number| { + let mut block = client.new_block_at(&BlockId::Number(number - 1), Default::default(), false).unwrap(); + block.push_storage_change(vec![42], Some(number.to_le_bytes().to_vec())).unwrap(); + let block = block.build().unwrap().block; + client.import(BlockOrigin::Own, block).unwrap(); + }); + (12..23).for_each(|number| { + let block = client.new_block_at(&BlockId::Number(number - 1), Default::default(), false) + .unwrap().build().unwrap().block; + client.import(BlockOrigin::Own, block).unwrap(); + }); + (23..24).for_each(|number| { + let mut block = client.new_block_at(&BlockId::Number(number - 1), Default::default(), false).unwrap(); + block.push_changes_trie_configuration_update(Some(ChangesTrieConfiguration { + digest_interval: 5, + digest_levels: 1, + })).unwrap(); + let block = block.build().unwrap().block; + client.import(BlockOrigin::Own, block).unwrap(); + }); + (24..26).for_each(|number| { + let mut block = client.new_block_at(&BlockId::Number(number - 1), Default::default(), false).unwrap(); + block.push_storage_change(vec![42], Some(number.to_le_bytes().to_vec())).unwrap(); + let block = block.build().unwrap().block; + client.import(BlockOrigin::Own, block).unwrap(); + }); + (26..27).for_each(|number| { + let block = client.new_block_at(&BlockId::Number(number - 1), Default::default(), false) + .unwrap().build().unwrap().block; + client.import(BlockOrigin::Own, block).unwrap(); + }); + (27..28).for_each(|number| { + let mut block = client.new_block_at(&BlockId::Number(number - 1), Default::default(), false).unwrap(); + block.push_storage_change(vec![42], Some(number.to_le_bytes().to_vec())).unwrap(); + let block = block.build().unwrap().block; + client.import(BlockOrigin::Own, block).unwrap(); + }); + (28..29).for_each(|number| { + let mut block = client.new_block_at(&BlockId::Number(number - 1), Default::default(), false).unwrap(); + block.push_changes_trie_configuration_update(Some(ChangesTrieConfiguration { + digest_interval: 3, + digest_levels: 1, + })).unwrap(); + let block = block.build().unwrap().block; + client.import(BlockOrigin::Own, block).unwrap(); + }); + (29..30).for_each(|number| { + let block = client.new_block_at(&BlockId::Number(number - 1), Default::default(), false) + .unwrap().build().unwrap().block; + client.import(BlockOrigin::Own, block).unwrap(); + }); + (30..31).for_each(|number| { + let mut block = client.new_block_at(&BlockId::Number(number - 1), Default::default(), false).unwrap(); + block.push_storage_change(vec![42], Some(number.to_le_bytes().to_vec())).unwrap(); + let block = block.build().unwrap().block; + client.import(BlockOrigin::Own, block).unwrap(); + }); + (31..32).for_each(|number| { + let block = client.new_block_at(&BlockId::Number(number - 1), Default::default(), false) + .unwrap().build().unwrap().block; + client.import(BlockOrigin::Own, block).unwrap(); + }); + + // now check that configuration cache works + assert_eq!( + client.key_changes(1, BlockId::Number(31), None, &StorageKey(vec![42])).unwrap(), + vec![(30, 0), (27, 0), (25, 0), (24, 0), (11, 0)] + ); +} + +#[test] +fn storage_keys_iter_prefix_and_start_key_works() { + let client = substrate_test_runtime_client::new(); + + let prefix = StorageKey(hex!("3a").to_vec()); + + let res: Vec<_> = client.storage_keys_iter(&BlockId::Number(0), Some(&prefix), None) + .unwrap() + .map(|x| x.0) + .collect(); + assert_eq!(res, [hex!("3a636f6465").to_vec(), hex!("3a686561707061676573").to_vec()]); + + let res: Vec<_> = client.storage_keys_iter(&BlockId::Number(0), Some(&prefix), Some(&StorageKey(hex!("3a636f6465").to_vec()))) + .unwrap() + .map(|x| x.0) + .collect(); + assert_eq!(res, [hex!("3a686561707061676573").to_vec()]); + + let res: Vec<_> = client.storage_keys_iter(&BlockId::Number(0), Some(&prefix), Some(&StorageKey(hex!("3a686561707061676573").to_vec()))) + .unwrap() + .map(|x| x.0) + .collect(); + assert_eq!(res, Vec::>::new()); +} + +#[test] +fn storage_keys_iter_works() { + let client = substrate_test_runtime_client::new(); + + let prefix = StorageKey(hex!("").to_vec()); + + let res: Vec<_> = client.storage_keys_iter(&BlockId::Number(0), Some(&prefix), None) + .unwrap() + .take(2) + .map(|x| x.0) + .collect(); + assert_eq!(res, [hex!("0befda6e1ca4ef40219d588a727f1271").to_vec(), hex!("3a636f6465").to_vec()]); + + let res: Vec<_> = client.storage_keys_iter(&BlockId::Number(0), Some(&prefix), Some(&StorageKey(hex!("3a636f6465").to_vec()))) + .unwrap() + .take(3) + .map(|x| x.0) + .collect(); + assert_eq!(res, [ + hex!("3a686561707061676573").to_vec(), + hex!("6644b9b8bc315888ac8e41a7968dc2b4141a5403c58acdf70b7e8f7e07bf5081").to_vec(), + hex!("79c07e2b1d2e2abfd4855b936617eeff5e0621c4869aa60c02be9adcc98a0d1d").to_vec(), + ]); + + let res: Vec<_> = client.storage_keys_iter(&BlockId::Number(0), Some(&prefix), Some(&StorageKey(hex!("79c07e2b1d2e2abfd4855b936617eeff5e0621c4869aa60c02be9adcc98a0d1d").to_vec()))) + .unwrap() + .take(1) + .map(|x| x.0) + .collect(); + assert_eq!(res, [hex!("cf722c0832b5231d35e29f319ff27389f5032bfc7bfc3ba5ed7839f2042fb99f").to_vec()]); +} + +#[test] +fn cleans_up_closed_notification_sinks_on_block_import() { + use substrate_test_runtime_client::GenesisInit; + + // NOTE: we need to build the client here instead of using the client + // provided by test_runtime_client otherwise we can't access the private + // `import_notification_sinks` and `finality_notification_sinks` fields. + let mut client = + new_in_mem::< + _, + substrate_test_runtime_client::runtime::Block, + _, + substrate_test_runtime_client::runtime::RuntimeApi + >( + substrate_test_runtime_client::new_native_executor(), + &substrate_test_runtime_client::GenesisParameters::default().genesis_storage(), + None, + None, + sp_core::tasks::executor(), + Default::default(), + ) + .unwrap(); + + type TestClient = Client< + in_mem::Backend, + LocalCallExecutor, sc_executor::NativeExecutor>, + substrate_test_runtime_client::runtime::Block, + substrate_test_runtime_client::runtime::RuntimeApi, + >; + + let import_notif1 = client.import_notification_stream(); + let import_notif2 = client.import_notification_stream(); + let finality_notif1 = client.finality_notification_stream(); + let finality_notif2 = client.finality_notification_stream(); + + // for some reason I can't seem to use `ClientBlockImportExt` + let bake_and_import_block = |client: &mut TestClient, origin| { + let block = client + .new_block(Default::default()) + .unwrap() + .build() + .unwrap() + .block; + + let (header, extrinsics) = block.deconstruct(); + let mut import = BlockImportParams::new(origin, header); + import.body = Some(extrinsics); + import.fork_choice = Some(ForkChoiceStrategy::LongestChain); + client.import_block(import, Default::default()).unwrap(); + }; + + // after importing a block we should still have 4 notification sinks + // (2 import + 2 finality) + bake_and_import_block(&mut client, BlockOrigin::Own); + assert_eq!(client.import_notification_sinks().lock().len(), 2); + assert_eq!(client.finality_notification_sinks().lock().len(), 2); + + // if we drop one import notification receiver and one finality + // notification receiver + drop(import_notif2); + drop(finality_notif2); + + // the sinks should be cleaned up after block import + bake_and_import_block(&mut client, BlockOrigin::Own); + assert_eq!(client.import_notification_sinks().lock().len(), 1); + assert_eq!(client.finality_notification_sinks().lock().len(), 1); + + // the same thing should happen if block import happens during initial + // sync + drop(import_notif1); + drop(finality_notif1); + + bake_and_import_block(&mut client, BlockOrigin::NetworkInitialSync); + assert_eq!(client.import_notification_sinks().lock().len(), 0); + assert_eq!(client.finality_notification_sinks().lock().len(), 0); +} + diff --git a/client/service/test/src/lib.rs b/client/service/test/src/lib.rs index b7d01c32c0f12..fd451ffc8d55f 100644 --- a/client/service/test/src/lib.rs +++ b/client/service/test/src/lib.rs @@ -37,11 +37,15 @@ use sc_service::{ Role, Error, }; +use sp_blockchain::HeaderBackend; use sc_network::{multiaddr, Multiaddr}; use sc_network::config::{NetworkConfiguration, TransportConfig}; use sp_runtime::{generic::BlockId, traits::Block as BlockT}; use sp_transaction_pool::TransactionPool; +#[cfg(test)] +mod client; + /// Maximum duration of single wait call. const MAX_WAIT_TIME: Duration = Duration::from_secs(60 * 3); @@ -462,15 +466,15 @@ pub fn sync( } network.run_until_all_full( |_index, service| - service.get().client().chain_info().best_number == (NUM_BLOCKS as u32).into(), + service.get().client().info().best_number == (NUM_BLOCKS as u32).into(), |_index, service| - service.get().client().chain_info().best_number == (NUM_BLOCKS as u32).into(), + service.get().client().info().best_number == (NUM_BLOCKS as u32).into(), ); info!("Checking extrinsic propagation"); let first_service = network.full_nodes[0].1.clone(); let first_user_data = &network.full_nodes[0].2; - let best_block = BlockId::number(first_service.get().client().chain_info().best_number); + let best_block = BlockId::number(first_service.get().client().info().best_number); let extrinsic = extrinsic_factory(&first_service.get(), first_user_data); let source = sp_transaction_pool::TransactionSource::External; @@ -523,9 +527,9 @@ pub fn consensus( } network.run_until_all_full( |_index, service| - service.get().client().chain_info().finalized_number >= (NUM_BLOCKS as u32 / 2).into(), + service.get().client().info().finalized_number >= (NUM_BLOCKS as u32 / 2).into(), |_index, service| - service.get().client().chain_info().best_number >= (NUM_BLOCKS as u32 / 2).into(), + service.get().client().info().best_number >= (NUM_BLOCKS as u32 / 2).into(), ); info!("Adding more peers"); @@ -545,8 +549,8 @@ pub fn consensus( } network.run_until_all_full( |_index, service| - service.get().client().chain_info().finalized_number >= (NUM_BLOCKS as u32).into(), + service.get().client().info().finalized_number >= (NUM_BLOCKS as u32).into(), |_index, service| - service.get().client().chain_info().best_number >= (NUM_BLOCKS as u32).into(), + service.get().client().info().best_number >= (NUM_BLOCKS as u32).into(), ); } diff --git a/client/src/light/fetcher.rs b/client/src/light/fetcher.rs deleted file mode 100644 index ef6a062cf3c07..0000000000000 --- a/client/src/light/fetcher.rs +++ /dev/null @@ -1,833 +0,0 @@ -// Copyright 2017-2020 Parity Technologies (UK) Ltd. -// This file is part of Substrate. - -// Substrate is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Substrate is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . - -//! Light client data fetcher. Fetches requested data from remote full nodes. - -use std::sync::Arc; -use std::collections::{BTreeMap, HashMap}; -use std::marker::PhantomData; - -use hash_db::{HashDB, Hasher, EMPTY_PREFIX}; -use codec::{Decode, Encode}; -use sp_core::{convert_hash, traits::CodeExecutor}; -use sp_core::storage::{ChildInfo, ChildType}; -use sp_runtime::traits::{ - Block as BlockT, Header as HeaderT, Hash, HashFor, NumberFor, - AtLeast32Bit, CheckedConversion, -}; -use sp_state_machine::{ - ChangesTrieRootsStorage, ChangesTrieAnchorBlockId, ChangesTrieConfigurationRange, - InMemoryChangesTrieStorage, TrieBackend, read_proof_check, key_changes_proof_check_with_db, - read_child_proof_check, CloneableSpawn, -}; -pub use sp_state_machine::StorageProof; -use sp_blockchain::{Error as ClientError, Result as ClientResult}; - -use crate::cht; -pub use sc_client_api::{ - light::{ - RemoteCallRequest, RemoteHeaderRequest, RemoteReadRequest, RemoteReadChildRequest, - RemoteChangesRequest, ChangesProof, RemoteBodyRequest, Fetcher, FetchChecker, - Storage as BlockchainStorage, - }, -}; -use crate::light::blockchain::{Blockchain}; -use crate::light::call_executor::check_execution_proof; - -/// Remote data checker. -pub struct LightDataChecker> { - blockchain: Arc>, - executor: E, - spawn_handle: Box, - _hasher: PhantomData<(B, H)>, -} - -impl> LightDataChecker { - /// Create new light data checker. - pub fn new(blockchain: Arc>, executor: E, spawn_handle: Box) -> Self { - Self { - blockchain, executor, spawn_handle, _hasher: PhantomData - } - } - - /// Check remote changes query proof assuming that CHT-s are of given size. - fn check_changes_proof_with_cht_size( - &self, - request: &RemoteChangesRequest, - remote_proof: ChangesProof, - cht_size: NumberFor, - ) -> ClientResult, u32)>> - where - H: Hasher, - H::Out: Ord + codec::Codec, - { - // since we need roots of all changes tries for the range begin..max - // => remote node can't use max block greater that one that we have passed - if remote_proof.max_block > request.max_block.0 || remote_proof.max_block < request.last_block.0 { - return Err(ClientError::ChangesTrieAccessFailed(format!( - "Invalid max_block used by the remote node: {}. Local: {}..{}..{}", - remote_proof.max_block, request.first_block.0, request.last_block.0, request.max_block.0, - )).into()); - } - - // check if remote node has responded with extra changes trie roots proofs - // all changes tries roots must be in range [request.first_block.0; request.tries_roots.0) - let is_extra_first_root = remote_proof.roots.keys().next() - .map(|first_root| *first_root < request.first_block.0 - || *first_root >= request.tries_roots.0) - .unwrap_or(false); - let is_extra_last_root = remote_proof.roots.keys().next_back() - .map(|last_root| *last_root >= request.tries_roots.0) - .unwrap_or(false); - if is_extra_first_root || is_extra_last_root { - return Err(ClientError::ChangesTrieAccessFailed(format!( - "Extra changes tries roots proofs provided by the remote node: [{:?}..{:?}]. Expected in range: [{}; {})", - remote_proof.roots.keys().next(), remote_proof.roots.keys().next_back(), - request.first_block.0, request.tries_roots.0, - )).into()); - } - - // if request has been composed when some required headers were already pruned - // => remote node has sent us CHT-based proof of required changes tries roots - // => check that this proof is correct before proceeding with changes proof - let remote_max_block = remote_proof.max_block; - let remote_roots = remote_proof.roots; - let remote_roots_proof = remote_proof.roots_proof; - let remote_proof = remote_proof.proof; - if !remote_roots.is_empty() { - self.check_changes_tries_proof( - cht_size, - &remote_roots, - remote_roots_proof, - )?; - } - - // and now check the key changes proof + get the changes - let mut result = Vec::new(); - let proof_storage = InMemoryChangesTrieStorage::with_proof(remote_proof); - for config_range in &request.changes_trie_configs { - let result_range = key_changes_proof_check_with_db::( - ChangesTrieConfigurationRange { - config: config_range.config.as_ref().ok_or(ClientError::ChangesTriesNotSupported)?, - zero: config_range.zero.0, - end: config_range.end.map(|(n, _)| n), - }, - &RootsStorage { - roots: (request.tries_roots.0, &request.tries_roots.2), - prev_roots: &remote_roots, - }, - &proof_storage, - request.first_block.0, - &ChangesTrieAnchorBlockId { - hash: convert_hash(&request.last_block.1), - number: request.last_block.0, - }, - remote_max_block, - request.storage_key.as_ref(), - &request.key) - .map_err(|err| ClientError::ChangesTrieAccessFailed(err))?; - result.extend(result_range); - } - - Ok(result) - } - - /// Check CHT-based proof for changes tries roots. - fn check_changes_tries_proof( - &self, - cht_size: NumberFor, - remote_roots: &BTreeMap, B::Hash>, - remote_roots_proof: StorageProof, - ) -> ClientResult<()> - where - H: Hasher, - H::Out: Ord + codec::Codec, - { - // all the checks are sharing the same storage - let storage = remote_roots_proof.into_memory_db(); - - // remote_roots.keys() are sorted => we can use this to group changes tries roots - // that are belongs to the same CHT - let blocks = remote_roots.keys().cloned(); - cht::for_each_cht_group::(cht_size, blocks, |mut storage, _, cht_blocks| { - // get local changes trie CHT root for given CHT - // it should be there, because it is never pruned AND request has been composed - // when required header has been pruned (=> replaced with CHT) - let first_block = cht_blocks.first().cloned() - .expect("for_each_cht_group never calls callback with empty groups"); - let local_cht_root = self.blockchain.storage().changes_trie_cht_root(cht_size, first_block)? - .ok_or(ClientError::InvalidCHTProof)?; - - // check changes trie root for every block within CHT range - for block in cht_blocks { - // check if the proofs storage contains the root - // normally this happens in when the proving backend is created, but since - // we share the storage for multiple checks, do it here - let mut cht_root = H::Out::default(); - cht_root.as_mut().copy_from_slice(local_cht_root.as_ref()); - if !storage.contains(&cht_root, EMPTY_PREFIX) { - return Err(ClientError::InvalidCHTProof.into()); - } - - // check proof for single changes trie root - let proving_backend = TrieBackend::new(storage, cht_root); - let remote_changes_trie_root = remote_roots[&block]; - cht::check_proof_on_proving_backend::( - local_cht_root, - block, - remote_changes_trie_root, - &proving_backend, - )?; - - // and return the storage to use in following checks - storage = proving_backend.into_storage(); - } - - Ok(storage) - }, storage) - } -} - -impl FetchChecker for LightDataChecker - where - Block: BlockT, - E: CodeExecutor + Clone + 'static, - H: Hasher, - H::Out: Ord + codec::Codec + 'static, - S: BlockchainStorage, -{ - fn check_header_proof( - &self, - request: &RemoteHeaderRequest, - remote_header: Option, - remote_proof: StorageProof, - ) -> ClientResult { - let remote_header = remote_header.ok_or_else(|| - ClientError::from(ClientError::InvalidCHTProof))?; - let remote_header_hash = remote_header.hash(); - cht::check_proof::( - request.cht_root, - request.block, - remote_header_hash, - remote_proof, - ).map(|_| remote_header) - } - - fn check_read_proof( - &self, - request: &RemoteReadRequest, - remote_proof: StorageProof, - ) -> ClientResult, Option>>> { - read_proof_check::( - convert_hash(request.header.state_root()), - remote_proof, - request.keys.iter(), - ).map_err(Into::into) - } - - fn check_read_child_proof( - &self, - request: &RemoteReadChildRequest, - remote_proof: StorageProof, - ) -> ClientResult, Option>>> { - let child_info = match ChildType::from_prefixed_key(&request.storage_key) { - Some((ChildType::ParentKeyId, storage_key)) => ChildInfo::new_default(storage_key), - None => return Err("Invalid child type".into()), - }; - read_child_proof_check::( - convert_hash(request.header.state_root()), - remote_proof, - &child_info, - request.keys.iter(), - ).map_err(Into::into) - } - - fn check_execution_proof( - &self, - request: &RemoteCallRequest, - remote_proof: StorageProof, - ) -> ClientResult> { - check_execution_proof::<_, _, H>( - &self.executor, - self.spawn_handle.clone(), - request, - remote_proof, - ) - } - - fn check_changes_proof( - &self, - request: &RemoteChangesRequest, - remote_proof: ChangesProof - ) -> ClientResult, u32)>> { - self.check_changes_proof_with_cht_size(request, remote_proof, cht::size()) - } - - fn check_body_proof( - &self, - request: &RemoteBodyRequest, - body: Vec - ) -> ClientResult> { - // TODO: #2621 - let extrinsics_root = HashFor::::ordered_trie_root( - body.iter().map(Encode::encode).collect(), - ); - if *request.header.extrinsics_root() == extrinsics_root { - Ok(body) - } else { - Err(format!("RemoteBodyRequest: invalid extrinsics root expected: {} but got {}", - *request.header.extrinsics_root(), - extrinsics_root, - ).into()) - } - - } -} - -/// A view of BTreeMap as a changes trie roots storage. -struct RootsStorage<'a, Number: AtLeast32Bit, Hash: 'a> { - roots: (Number, &'a [Hash]), - prev_roots: &'a BTreeMap, -} - -impl<'a, H, Number, Hash> ChangesTrieRootsStorage for RootsStorage<'a, Number, Hash> - where - H: Hasher, - Number: std::fmt::Display + std::hash::Hash + Clone + AtLeast32Bit + Encode + Decode + Send + Sync + 'static, - Hash: 'a + Send + Sync + Clone + AsRef<[u8]>, -{ - fn build_anchor( - &self, - _hash: H::Out, - ) -> Result, String> { - Err("build_anchor is only called when building block".into()) - } - - fn root( - &self, - _anchor: &ChangesTrieAnchorBlockId, - block: Number, - ) -> Result, String> { - // we can't ask for roots from parallel forks here => ignore anchor - let root = if block < self.roots.0 { - self.prev_roots.get(&Number::unique_saturated_from(block)).cloned() - } else { - let index: Option = block.checked_sub(&self.roots.0).and_then(|index| index.checked_into()); - match index { - Some(index) => self.roots.1.get(index as usize).cloned(), - None => None, - } - }; - - Ok(root.map(|root| { - let mut hasher_root: H::Out = Default::default(); - hasher_root.as_mut().copy_from_slice(root.as_ref()); - hasher_root - })) - } -} - -#[cfg(test)] -pub mod tests { - use codec::Decode; - use crate::client::tests::prepare_client_with_key_changes; - use sc_executor::{NativeExecutor, WasmExecutionMethod}; - use sp_blockchain::Error as ClientError; - use sc_client_api::backend::NewBlockState; - use substrate_test_runtime_client::{ - blockchain::HeaderBackend, AccountKeyring, ClientBlockImportExt, - runtime::{self, Hash, Block, Header, Extrinsic}, - tasks_executor, - }; - use sp_consensus::BlockOrigin; - - use crate::in_mem::Blockchain as InMemoryBlockchain; - use crate::light::fetcher::{FetchChecker, LightDataChecker, RemoteHeaderRequest}; - use crate::light::blockchain::tests::{DummyStorage, DummyBlockchain}; - use sp_core::{blake2_256, ChangesTrieConfiguration, H256}; - use sp_core::storage::{well_known_keys, StorageKey, ChildInfo}; - use sp_runtime::{generic::BlockId, traits::BlakeTwo256}; - use sp_state_machine::Backend; - use super::*; - use sc_client_api::{StorageProvider, ProofProvider}; - use sc_block_builder::BlockBuilderProvider; - - type TestChecker = LightDataChecker< - NativeExecutor, - BlakeTwo256, - Block, - DummyStorage, - >; - - fn local_executor() -> NativeExecutor { - NativeExecutor::new(WasmExecutionMethod::Interpreted, None, 8) - } - - fn prepare_for_read_proof_check() -> (TestChecker, Header, StorageProof, u32) { - // prepare remote client - let remote_client = substrate_test_runtime_client::new(); - let remote_block_id = BlockId::Number(0); - let remote_block_hash = remote_client.block_hash(0).unwrap().unwrap(); - let mut remote_block_header = remote_client.header(&remote_block_id).unwrap().unwrap(); - remote_block_header.state_root = remote_client.state_at(&remote_block_id).unwrap() - .storage_root(::std::iter::empty()).0.into(); - - // 'fetch' read proof from remote node - let heap_pages = remote_client.storage(&remote_block_id, &StorageKey(well_known_keys::HEAP_PAGES.to_vec())) - .unwrap() - .and_then(|v| Decode::decode(&mut &v.0[..]).ok()).unwrap(); - let remote_read_proof = remote_client.read_proof( - &remote_block_id, - &mut std::iter::once(well_known_keys::HEAP_PAGES), - ).unwrap(); - - // check remote read proof locally - let local_storage = InMemoryBlockchain::::new(); - local_storage.insert( - remote_block_hash, - remote_block_header.clone(), - None, - None, - NewBlockState::Final, - ).unwrap(); - let local_checker = LightDataChecker::new( - Arc::new(DummyBlockchain::new(DummyStorage::new())), - local_executor(), - tasks_executor(), - ); - (local_checker, remote_block_header, remote_read_proof, heap_pages) - } - - fn prepare_for_read_child_proof_check() -> (TestChecker, Header, StorageProof, Vec) { - use substrate_test_runtime_client::DefaultTestClientBuilderExt; - use substrate_test_runtime_client::TestClientBuilderExt; - let child_info = ChildInfo::new_default(b"child1"); - let child_info = &child_info; - // prepare remote client - let remote_client = substrate_test_runtime_client::TestClientBuilder::new() - .add_extra_child_storage( - child_info, - b"key1".to_vec(), - b"value1".to_vec(), - ).build(); - let remote_block_id = BlockId::Number(0); - let remote_block_hash = remote_client.block_hash(0).unwrap().unwrap(); - let mut remote_block_header = remote_client.header(&remote_block_id).unwrap().unwrap(); - remote_block_header.state_root = remote_client.state_at(&remote_block_id).unwrap() - .storage_root(::std::iter::empty()).0.into(); - - // 'fetch' child read proof from remote node - let child_value = remote_client.child_storage( - &remote_block_id, - child_info, - &StorageKey(b"key1".to_vec()), - ).unwrap().unwrap().0; - assert_eq!(b"value1"[..], child_value[..]); - let remote_read_proof = remote_client.read_child_proof( - &remote_block_id, - child_info, - &mut std::iter::once("key1".as_bytes()), - ).unwrap(); - - // check locally - let local_storage = InMemoryBlockchain::::new(); - local_storage.insert( - remote_block_hash, - remote_block_header.clone(), - None, - None, - NewBlockState::Final, - ).unwrap(); - let local_checker = LightDataChecker::new( - Arc::new(DummyBlockchain::new(DummyStorage::new())), - local_executor(), - tasks_executor(), - ); - (local_checker, remote_block_header, remote_read_proof, child_value) - } - - fn prepare_for_header_proof_check(insert_cht: bool) -> (TestChecker, Hash, Header, StorageProof) { - // prepare remote client - let mut remote_client = substrate_test_runtime_client::new(); - let mut local_headers_hashes = Vec::new(); - for i in 0..4 { - let block = remote_client.new_block(Default::default()).unwrap().build().unwrap().block; - remote_client.import(BlockOrigin::Own, block).unwrap(); - local_headers_hashes.push( - remote_client.block_hash(i + 1) - .map_err(|_| ClientError::Backend("TestError".into())) - ); - } - - // 'fetch' header proof from remote node - let remote_block_id = BlockId::Number(1); - let (remote_block_header, remote_header_proof) = remote_client.header_proof_with_cht_size(&remote_block_id, 4).unwrap(); - - // check remote read proof locally - let local_storage = InMemoryBlockchain::::new(); - let local_cht_root = cht::compute_root::(4, 0, local_headers_hashes).unwrap(); - if insert_cht { - local_storage.insert_cht_root(1, local_cht_root); - } - let local_checker = LightDataChecker::new( - Arc::new(DummyBlockchain::new(DummyStorage::new())), - local_executor(), - tasks_executor(), - ); - (local_checker, local_cht_root, remote_block_header, remote_header_proof) - } - - fn header_with_computed_extrinsics_root(extrinsics: Vec) -> Header { - use sp_trie::{TrieConfiguration, trie_types::Layout}; - let iter = extrinsics.iter().map(Encode::encode); - let extrinsics_root = Layout::::ordered_trie_root(iter); - - // only care about `extrinsics_root` - Header::new(0, extrinsics_root, H256::zero(), H256::zero(), Default::default()) - } - - #[test] - fn storage_read_proof_is_generated_and_checked() { - let (local_checker, remote_block_header, remote_read_proof, heap_pages) = prepare_for_read_proof_check(); - assert_eq!((&local_checker as &dyn FetchChecker).check_read_proof(&RemoteReadRequest::
{ - block: remote_block_header.hash(), - header: remote_block_header, - keys: vec![well_known_keys::HEAP_PAGES.to_vec()], - retry_count: None, - }, remote_read_proof).unwrap().remove(well_known_keys::HEAP_PAGES).unwrap().unwrap()[0], heap_pages as u8); - } - - #[test] - fn storage_child_read_proof_is_generated_and_checked() { - let child_info = ChildInfo::new_default(&b"child1"[..]); - let ( - local_checker, - remote_block_header, - remote_read_proof, - result, - ) = prepare_for_read_child_proof_check(); - assert_eq!((&local_checker as &dyn FetchChecker).check_read_child_proof( - &RemoteReadChildRequest::
{ - block: remote_block_header.hash(), - header: remote_block_header, - storage_key: child_info.prefixed_storage_key(), - keys: vec![b"key1".to_vec()], - retry_count: None, - }, - remote_read_proof - ).unwrap().remove(b"key1".as_ref()).unwrap().unwrap(), result); - } - - #[test] - fn header_proof_is_generated_and_checked() { - let (local_checker, local_cht_root, remote_block_header, remote_header_proof) = prepare_for_header_proof_check(true); - assert_eq!((&local_checker as &dyn FetchChecker).check_header_proof(&RemoteHeaderRequest::
{ - cht_root: local_cht_root, - block: 1, - retry_count: None, - }, Some(remote_block_header.clone()), remote_header_proof).unwrap(), remote_block_header); - } - - #[test] - fn check_header_proof_fails_if_cht_root_is_invalid() { - let (local_checker, _, mut remote_block_header, remote_header_proof) = prepare_for_header_proof_check(true); - remote_block_header.number = 100; - assert!((&local_checker as &dyn FetchChecker).check_header_proof(&RemoteHeaderRequest::
{ - cht_root: Default::default(), - block: 1, - retry_count: None, - }, Some(remote_block_header.clone()), remote_header_proof).is_err()); - } - - #[test] - fn check_header_proof_fails_if_invalid_header_provided() { - let (local_checker, local_cht_root, mut remote_block_header, remote_header_proof) = prepare_for_header_proof_check(true); - remote_block_header.number = 100; - assert!((&local_checker as &dyn FetchChecker).check_header_proof(&RemoteHeaderRequest::
{ - cht_root: local_cht_root, - block: 1, - retry_count: None, - }, Some(remote_block_header.clone()), remote_header_proof).is_err()); - } - - #[test] - fn changes_proof_is_generated_and_checked_when_headers_are_not_pruned() { - let (remote_client, local_roots, test_cases) = prepare_client_with_key_changes(); - let local_checker = TestChecker::new( - Arc::new(DummyBlockchain::new(DummyStorage::new())), - local_executor(), - tasks_executor(), - ); - let local_checker = &local_checker as &dyn FetchChecker; - let max = remote_client.chain_info().best_number; - let max_hash = remote_client.chain_info().best_hash; - - for (index, (begin, end, key, expected_result)) in test_cases.into_iter().enumerate() { - let begin_hash = remote_client.block_hash(begin).unwrap().unwrap(); - let end_hash = remote_client.block_hash(end).unwrap().unwrap(); - - // 'fetch' changes proof from remote node - let key = StorageKey(key); - let remote_proof = remote_client.key_changes_proof( - begin_hash, end_hash, begin_hash, max_hash, None, &key - ).unwrap(); - - // check proof on local client - let local_roots_range = local_roots.clone()[(begin - 1) as usize..].to_vec(); - let config = ChangesTrieConfiguration::new(4, 2); - let request = RemoteChangesRequest::
{ - changes_trie_configs: vec![sp_core::ChangesTrieConfigurationRange { - zero: (0, Default::default()), - end: None, - config: Some(config), - }], - first_block: (begin, begin_hash), - last_block: (end, end_hash), - max_block: (max, max_hash), - tries_roots: (begin, begin_hash, local_roots_range), - key: key.0, - storage_key: None, - retry_count: None, - }; - let local_result = local_checker.check_changes_proof(&request, ChangesProof { - max_block: remote_proof.max_block, - proof: remote_proof.proof, - roots: remote_proof.roots, - roots_proof: remote_proof.roots_proof, - }).unwrap(); - - // ..and ensure that result is the same as on remote node - match local_result == expected_result { - true => (), - false => panic!(format!("Failed test {}: local = {:?}, expected = {:?}", - index, local_result, expected_result)), - } - } - } - - #[test] - fn changes_proof_is_generated_and_checked_when_headers_are_pruned() { - // we're testing this test case here: - // (1, 4, dave.clone(), vec![(4, 0), (1, 1), (1, 0)]), - let (remote_client, remote_roots, _) = prepare_client_with_key_changes(); - let dave = blake2_256(&runtime::system::balance_of_key(AccountKeyring::Dave.into())).to_vec(); - let dave = StorageKey(dave); - - // 'fetch' changes proof from remote node: - // we're fetching changes for range b1..b4 - // we do not know changes trie roots before b3 (i.e. we only know b3+b4) - // but we have changes trie CHT root for b1...b4 - let b1 = remote_client.block_hash_from_id(&BlockId::Number(1)).unwrap().unwrap(); - let b3 = remote_client.block_hash_from_id(&BlockId::Number(3)).unwrap().unwrap(); - let b4 = remote_client.block_hash_from_id(&BlockId::Number(4)).unwrap().unwrap(); - let remote_proof = remote_client.key_changes_proof_with_cht_size( - b1, b4, b3, b4, None, &dave, 4 - ).unwrap(); - - // prepare local checker, having a root of changes trie CHT#0 - let local_cht_root = cht::compute_root::(4, 0, remote_roots.iter().cloned().map(|ct| Ok(Some(ct)))).unwrap(); - let mut local_storage = DummyStorage::new(); - local_storage.changes_tries_cht_roots.insert(0, local_cht_root); - let local_checker = TestChecker::new( - Arc::new(DummyBlockchain::new(local_storage)), - local_executor(), - tasks_executor(), - ); - - // check proof on local client - let config = ChangesTrieConfiguration::new(4, 2); - let request = RemoteChangesRequest::
{ - changes_trie_configs: vec![sp_core::ChangesTrieConfigurationRange { - zero: (0, Default::default()), - end: None, - config: Some(config), - }], - first_block: (1, b1), - last_block: (4, b4), - max_block: (4, b4), - tries_roots: (3, b3, vec![remote_roots[2].clone(), remote_roots[3].clone()]), - storage_key: None, - key: dave.0, - retry_count: None, - }; - let local_result = local_checker.check_changes_proof_with_cht_size(&request, ChangesProof { - max_block: remote_proof.max_block, - proof: remote_proof.proof, - roots: remote_proof.roots, - roots_proof: remote_proof.roots_proof, - }, 4).unwrap(); - - assert_eq!(local_result, vec![(4, 0), (1, 1), (1, 0)]); - } - - #[test] - fn check_changes_proof_fails_if_proof_is_wrong() { - let (remote_client, local_roots, test_cases) = prepare_client_with_key_changes(); - let local_checker = TestChecker::new( - Arc::new(DummyBlockchain::new(DummyStorage::new())), - local_executor(), - tasks_executor(), - ); - let local_checker = &local_checker as &dyn FetchChecker; - let max = remote_client.chain_info().best_number; - let max_hash = remote_client.chain_info().best_hash; - - let (begin, end, key, _) = test_cases[0].clone(); - let begin_hash = remote_client.block_hash(begin).unwrap().unwrap(); - let end_hash = remote_client.block_hash(end).unwrap().unwrap(); - - // 'fetch' changes proof from remote node - let key = StorageKey(key); - let remote_proof = remote_client.key_changes_proof( - begin_hash, end_hash, begin_hash, max_hash, None, &key).unwrap(); - - let local_roots_range = local_roots.clone()[(begin - 1) as usize..].to_vec(); - let config = ChangesTrieConfiguration::new(4, 2); - let request = RemoteChangesRequest::
{ - changes_trie_configs: vec![sp_core::ChangesTrieConfigurationRange { - zero: (0, Default::default()), - end: None, - config: Some(config), - }], - first_block: (begin, begin_hash), - last_block: (end, end_hash), - max_block: (max, max_hash), - tries_roots: (begin, begin_hash, local_roots_range.clone()), - storage_key: None, - key: key.0, - retry_count: None, - }; - - // check proof on local client using max from the future - assert!(local_checker.check_changes_proof(&request, ChangesProof { - max_block: remote_proof.max_block + 1, - proof: remote_proof.proof.clone(), - roots: remote_proof.roots.clone(), - roots_proof: remote_proof.roots_proof.clone(), - }).is_err()); - - // check proof on local client using broken proof - assert!(local_checker.check_changes_proof(&request, ChangesProof { - max_block: remote_proof.max_block, - proof: local_roots_range.clone().into_iter().map(|v| v.as_ref().to_vec()).collect(), - roots: remote_proof.roots, - roots_proof: remote_proof.roots_proof, - }).is_err()); - - // extra roots proofs are provided - assert!(local_checker.check_changes_proof(&request, ChangesProof { - max_block: remote_proof.max_block, - proof: remote_proof.proof.clone(), - roots: vec![(begin - 1, Default::default())].into_iter().collect(), - roots_proof: StorageProof::empty(), - }).is_err()); - assert!(local_checker.check_changes_proof(&request, ChangesProof { - max_block: remote_proof.max_block, - proof: remote_proof.proof.clone(), - roots: vec![(end + 1, Default::default())].into_iter().collect(), - roots_proof: StorageProof::empty(), - }).is_err()); - } - - #[test] - fn check_changes_tries_proof_fails_if_proof_is_wrong() { - // we're testing this test case here: - // (1, 4, dave.clone(), vec![(4, 0), (1, 1), (1, 0)]), - let (remote_client, remote_roots, _) = prepare_client_with_key_changes(); - let local_cht_root = cht::compute_root::( - 4, 0, remote_roots.iter().cloned().map(|ct| Ok(Some(ct)))).unwrap(); - let dave = blake2_256(&runtime::system::balance_of_key(AccountKeyring::Dave.into())).to_vec(); - let dave = StorageKey(dave); - - // 'fetch' changes proof from remote node: - // we're fetching changes for range b1..b4 - // we do not know changes trie roots before b3 (i.e. we only know b3+b4) - // but we have changes trie CHT root for b1...b4 - let b1 = remote_client.block_hash_from_id(&BlockId::Number(1)).unwrap().unwrap(); - let b3 = remote_client.block_hash_from_id(&BlockId::Number(3)).unwrap().unwrap(); - let b4 = remote_client.block_hash_from_id(&BlockId::Number(4)).unwrap().unwrap(); - let remote_proof = remote_client.key_changes_proof_with_cht_size( - b1, b4, b3, b4, None, &dave, 4 - ).unwrap(); - - // fails when changes trie CHT is missing from the local db - let local_checker = TestChecker::new( - Arc::new(DummyBlockchain::new(DummyStorage::new())), - local_executor(), - tasks_executor(), - ); - assert!(local_checker.check_changes_tries_proof(4, &remote_proof.roots, - remote_proof.roots_proof.clone()).is_err()); - - // fails when proof is broken - let mut local_storage = DummyStorage::new(); - local_storage.changes_tries_cht_roots.insert(0, local_cht_root); - let local_checker = TestChecker::new( - Arc::new(DummyBlockchain::new(local_storage)), - local_executor(), - tasks_executor(), - ); - let result = local_checker.check_changes_tries_proof( - 4, &remote_proof.roots, StorageProof::empty() - ); - assert!(result.is_err()); - } - - #[test] - fn check_body_proof_faulty() { - let header = header_with_computed_extrinsics_root( - vec![Extrinsic::IncludeData(vec![1, 2, 3, 4])] - ); - let block = Block::new(header.clone(), Vec::new()); - - let local_checker = TestChecker::new( - Arc::new(DummyBlockchain::new(DummyStorage::new())), - local_executor(), - tasks_executor(), - ); - - let body_request = RemoteBodyRequest { - header: header.clone(), - retry_count: None, - }; - - assert!( - local_checker.check_body_proof(&body_request, block.extrinsics).is_err(), - "vec![1, 2, 3, 4] != vec![]" - ); - } - - #[test] - fn check_body_proof_of_same_data_should_succeed() { - let extrinsics = vec![Extrinsic::IncludeData(vec![1, 2, 3, 4, 5, 6, 7, 8, 255])]; - - let header = header_with_computed_extrinsics_root(extrinsics.clone()); - let block = Block::new(header.clone(), extrinsics); - - let local_checker = TestChecker::new( - Arc::new(DummyBlockchain::new(DummyStorage::new())), - local_executor(), - tasks_executor(), - ); - - let body_request = RemoteBodyRequest { - header: header.clone(), - retry_count: None, - }; - - assert!(local_checker.check_body_proof(&body_request, block.extrinsics).is_ok()); - } -} diff --git a/primitives/api/test/tests/decl_and_impl.rs b/primitives/api/test/tests/decl_and_impl.rs index a09bd0412c8af..4a8c8cd6628c8 100644 --- a/primitives/api/test/tests/decl_and_impl.rs +++ b/primitives/api/test/tests/decl_and_impl.rs @@ -112,7 +112,7 @@ mock_impl_runtime_apis! { } } -type TestClient = substrate_test_runtime_client::sc_client::Client< +type TestClient = substrate_test_runtime_client::client::Client< substrate_test_runtime_client::Backend, substrate_test_runtime_client::Executor, Block, diff --git a/primitives/api/test/tests/ui/mock_only_one_error_type.stderr b/primitives/api/test/tests/ui/mock_only_one_error_type.stderr index 8f026838c96b8..7cec5246ca825 100644 --- a/primitives/api/test/tests/ui/mock_only_one_error_type.stderr +++ b/primitives/api/test/tests/ui/mock_only_one_error_type.stderr @@ -23,5 +23,5 @@ error[E0277]: the trait bound `u32: std::convert::From` is > > > - > - and 16 others + > + and 18 others diff --git a/test-utils/client/Cargo.toml b/test-utils/client/Cargo.toml index 6e9381bdafdec..37c99292ec910 100644 --- a/test-utils/client/Cargo.toml +++ b/test-utils/client/Cargo.toml @@ -13,10 +13,11 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] sc-client-api = { version = "2.0.0-dev", path = "../../client/api" } -sc-client = { version = "0.8.0-dev", path = "../../client/" } sc-client-db = { version = "0.8.0-dev", features = ["test-helpers"], path = "../../client/db" } sp-consensus = { version = "0.8.0-dev", path = "../../primitives/consensus/common" } sc-executor = { version = "0.8.0-dev", path = "../../client/executor" } +sc-consensus = { version = "0.8.0-dev", path = "../../client/consensus/common" } +sc-service = { version = "0.8.0-dev", default-features = false, features = ["test-helpers"], path = "../../client/service" } futures = "0.3.4" hash-db = "0.15.2" sp-keyring = { version = "2.0.0-dev", path = "../../primitives/keyring" } diff --git a/test-utils/client/src/client_ext.rs b/test-utils/client/src/client_ext.rs index 6d6b539483e39..d663dda7a9323 100644 --- a/test-utils/client/src/client_ext.rs +++ b/test-utils/client/src/client_ext.rs @@ -16,7 +16,7 @@ //! Client extension for tests. -use sc_client::{self, Client}; +use sc_service::client::Client; use sc_client_api::backend::Finalizer; use sp_consensus::{ BlockImportParams, BlockImport, BlockOrigin, Error as ConsensusError, @@ -64,7 +64,7 @@ pub trait ClientBlockImportExt: Sized { impl ClientExt for Client where B: sc_client_api::backend::Backend, - E: sc_client::CallExecutor + 'static, + E: sc_client_api::CallExecutor + 'static, Self: BlockImport, Block: BlockT, { diff --git a/test-utils/client/src/lib.rs b/test-utils/client/src/lib.rs index bf3f8c6878212..22173ca04edb0 100644 --- a/test-utils/client/src/lib.rs +++ b/test-utils/client/src/lib.rs @@ -20,7 +20,6 @@ pub mod client_ext; -pub use sc_client::{blockchain, self}; pub use sc_client_api::{ execution_extensions::{ExecutionStrategies, ExecutionExtensions}, ForkBlocks, BadBlocks, CloneableSpawn, @@ -36,16 +35,17 @@ pub use sp_keyring::{ pub use sp_core::{traits::BareCryptoStorePtr, tasks::executor as tasks_executor}; pub use sp_runtime::{Storage, StorageChild}; pub use sp_state_machine::ExecutionStrategy; +pub use sc_service::client; pub use self::client_ext::{ClientExt, ClientBlockImportExt}; use std::sync::Arc; use std::collections::HashMap; use sp_core::storage::ChildInfo; use sp_runtime::traits::{Block as BlockT, BlakeTwo256}; -use sc_client::LocalCallExecutor; +use sc_service::client::{LocalCallExecutor, ClientConfig}; /// Test client light database backend. -pub type LightBackend = sc_client::light::backend::Backend< +pub type LightBackend = client::light::backend::Backend< sc_client_db::light::LightStorage, BlakeTwo256, >; @@ -175,15 +175,15 @@ impl TestClientBuilder ( - sc_client::Client< + client::Client< Backend, Executor, Block, RuntimeApi, >, - sc_client::LongestChain, + sc_consensus::LongestChain, ) where - Executor: sc_client::CallExecutor + 'static, + Executor: sc_client_api::CallExecutor + 'static, Backend: sc_client_api::backend::Backend, { let storage = { @@ -203,7 +203,7 @@ impl TestClientBuilder TestClientBuilder TestClientBuilder TestClientBuilder< Block, - sc_client::LocalCallExecutor>, + client::LocalCallExecutor>, Backend, G, > { @@ -234,13 +234,13 @@ impl TestClientBuilder< self, executor: I, ) -> ( - sc_client::Client< + client::Client< Backend, - sc_client::LocalCallExecutor>, + client::LocalCallExecutor>, Block, RuntimeApi >, - sc_client::LongestChain, + sc_consensus::LongestChain, ) where I: Into>>, E: sc_executor::NativeExecutionDispatch + 'static, diff --git a/test-utils/runtime/Cargo.toml b/test-utils/runtime/Cargo.toml index 96daca8bedcb7..f4582d0b70918 100644 --- a/test-utils/runtime/Cargo.toml +++ b/test-utils/runtime/Cargo.toml @@ -17,12 +17,10 @@ sp-application-crypto = { version = "2.0.0-dev", default-features = false, path sp-consensus-aura = { version = "0.8.0-dev", default-features = false, path = "../../primitives/consensus/aura" } sp-consensus-babe = { version = "0.8.0-dev", default-features = false, path = "../../primitives/consensus/babe" } sp-block-builder = { version = "2.0.0-dev", default-features = false, path = "../../primitives/block-builder" } -cfg-if = "0.1.10" codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false, features = ["derive"] } frame-executive = { version = "2.0.0-dev", default-features = false, path = "../../frame/executive" } sp-inherents = { version = "2.0.0-dev", default-features = false, path = "../../primitives/inherents" } sp-keyring = { version = "2.0.0-dev", optional = true, path = "../../primitives/keyring" } -log = { version = "0.4.8", optional = true } memory-db = { version = "0.20.0", default-features = false } sp-offchain = { path = "../../primitives/offchain", default-features = false, version = "2.0.0-dev"} sp-core = { version = "2.0.0-dev", default-features = false, path = "../../primitives/core" } @@ -31,7 +29,6 @@ sp-runtime-interface = { path = "../../primitives/runtime-interface", default-fe sp-io = { version = "2.0.0-dev", default-features = false, path = "../../primitives/io" } frame-support = { version = "2.0.0-dev", default-features = false, path = "../../frame/support" } sp-version = { version = "2.0.0-dev", default-features = false, path = "../../primitives/version" } -serde = { version = "1.0.101", optional = true, features = ["derive"] } sp-session = { version = "2.0.0-dev", default-features = false, path = "../../primitives/session" } sp-api = { version = "2.0.0-dev", default-features = false, path = "../../primitives/api" } sp-runtime = { version = "2.0.0-dev", default-features = false, path = "../../primitives/runtime" } @@ -39,11 +36,16 @@ pallet-babe = { version = "2.0.0-dev", default-features = false, path = "../../f frame-system = { version = "2.0.0-dev", default-features = false, path = "../../frame/system" } frame-system-rpc-runtime-api = { version = "2.0.0-dev", default-features = false, path = "../../frame/system/rpc/runtime-api" } pallet-timestamp = { version = "2.0.0-dev", default-features = false, path = "../../frame/timestamp" } -sc-client = { version = "0.8.0-dev", optional = true, path = "../../client" } sp-trie = { version = "2.0.0-dev", default-features = false, path = "../../primitives/trie" } sp-transaction-pool = { version = "2.0.0-dev", default-features = false, path = "../../primitives/transaction-pool" } trie-db = { version = "0.20.1", default-features = false } parity-util-mem = { version = "0.6.1", default-features = false, features = ["primitive-types"] } +sc-service = { version = "0.8.0-dev", default-features = false, optional = true, features = ["test-helpers"], path = "../../client/service" } + +# 3rd party +cfg-if = "0.1.10" +log = { version = "0.4.8", optional = true } +serde = { version = "1.0.101", optional = true, features = ["derive"] } [dev-dependencies] sc-block-builder = { version = "0.8.0-dev", path = "../../client/block-builder" } @@ -85,7 +87,7 @@ std = [ "frame-system-rpc-runtime-api/std", "frame-system/std", "pallet-timestamp/std", - "sc-client", + "sc-service", "sp-trie/std", "sp-transaction-pool/std", "trie-db/std", diff --git a/test-utils/runtime/client/Cargo.toml b/test-utils/runtime/client/Cargo.toml index 06d4f3fcf5cf6..f622878404f44 100644 --- a/test-utils/runtime/client/Cargo.toml +++ b/test-utils/runtime/client/Cargo.toml @@ -12,6 +12,7 @@ publish = false targets = ["x86_64-unknown-linux-gnu"] [dependencies] +sp-consensus = { version = "0.8.0-dev", path = "../../../primitives/consensus/common" } sc-block-builder = { version = "0.8.0-dev", path = "../../../client/block-builder" } substrate-test-client = { version = "2.0.0-dev", path = "../../client" } sp-core = { version = "2.0.0-dev", path = "../../../primitives/core" } @@ -21,5 +22,6 @@ sp-api = { version = "2.0.0-dev", path = "../../../primitives/api" } sp-blockchain = { version = "2.0.0-dev", path = "../../../primitives/blockchain" } codec = { package = "parity-scale-codec", version = "1.3.0" } sc-client-api = { version = "2.0.0-dev", path = "../../../client/api" } -sc-client = { version = "0.8.0-dev", path = "../../../client/" } +sc-consensus = { version = "0.8.0-dev", path = "../../../client/consensus/common" } +sc-service = { version = "0.8.0-dev", default-features = false, path = "../../../client/service" } futures = "0.3.4" diff --git a/test-utils/runtime/client/src/lib.rs b/test-utils/runtime/client/src/lib.rs index a2b38a342bc6d..7b51d88e069b0 100644 --- a/test-utils/runtime/client/src/lib.rs +++ b/test-utils/runtime/client/src/lib.rs @@ -26,7 +26,7 @@ use std::sync::Arc; use std::collections::HashMap; pub use substrate_test_client::*; pub use substrate_test_runtime as runtime; -pub use sc_client::LongestChain; +pub use sc_consensus::LongestChain; pub use self::block_builder_ext::BlockBuilderExt; @@ -34,7 +34,7 @@ use sp_core::{sr25519, ChangesTrieConfiguration}; use sp_core::storage::{ChildInfo, Storage, StorageChild}; use substrate_test_runtime::genesismap::{GenesisConfig, additional_storage_with_genesis}; use sp_runtime::traits::{Block as BlockT, Header as HeaderT, Hash as HashT, NumberFor, HashFor}; -use sc_client::{ +use sc_service::client::{ light::fetcher::{ Fetcher, RemoteHeaderRequest, RemoteReadRequest, RemoteReadChildRequest, @@ -68,7 +68,7 @@ sc_executor::native_executor_instance! { pub type Backend = substrate_test_client::Backend; /// Test client executor. -pub type Executor = sc_client::LocalCallExecutor< +pub type Executor = client::LocalCallExecutor< Backend, NativeExecutor, >; @@ -77,10 +77,10 @@ pub type Executor = sc_client::LocalCallExecutor< pub type LightBackend = substrate_test_client::LightBackend; /// Test client light executor. -pub type LightExecutor = sc_client::light::call_executor::GenesisCallExecutor< +pub type LightExecutor = client::light::call_executor::GenesisCallExecutor< LightBackend, - sc_client::LocalCallExecutor< - sc_client::light::backend::Backend< + client::LocalCallExecutor< + client::light::backend::Backend< sc_client_db::light::LightStorage, HashFor >, @@ -133,7 +133,7 @@ impl substrate_test_client::GenesisInit for GenesisParameters { let state_root = <<::Header as HeaderT>::Hashing as HashT>::trie_root( storage.top.clone().into_iter().chain(child_roots).collect() ); - let block: runtime::Block = sc_client::genesis::construct_genesis_block(state_root); + let block: runtime::Block = client::genesis::construct_genesis_block(state_root); storage.top.extend(additional_storage_with_genesis(&block)); storage @@ -149,9 +149,9 @@ pub type TestClientBuilder = substrate_test_client::TestClientBuilder< >; /// Test client type with `LocalExecutor` and generic Backend. -pub type Client = sc_client::Client< +pub type Client = client::Client< B, - sc_client::LocalCallExecutor>, + client::LocalCallExecutor>, substrate_test_runtime::Block, substrate_test_runtime::RuntimeApi, >; @@ -230,14 +230,14 @@ pub trait TestClientBuilderExt: Sized { } /// Build the test client and longest chain selector. - fn build_with_longest_chain(self) -> (Client, sc_client::LongestChain); + fn build_with_longest_chain(self) -> (Client, sc_consensus::LongestChain); /// Build the test client and the backend. fn build_with_backend(self) -> (Client, Arc); } impl TestClientBuilderExt for TestClientBuilder< - sc_client::LocalCallExecutor>, + client::LocalCallExecutor>, B > where B: sc_client_api::backend::Backend + 'static, @@ -249,7 +249,7 @@ impl TestClientBuilderExt for TestClientBuilder< Self::genesis_init_mut(self) } - fn build_with_longest_chain(self) -> (Client, sc_client::LongestChain) { + fn build_with_longest_chain(self) -> (Client, sc_consensus::LongestChain) { self.build_with_native_executor(None) } @@ -344,15 +344,15 @@ pub fn new() -> Client { /// Creates new light client instance used for tests. pub fn new_light() -> ( - sc_client::Client, + client::Client, Arc, ) { let storage = sc_client_db::light::LightStorage::new_test(); - let blockchain = Arc::new(sc_client::light::blockchain::Blockchain::new(storage)); + let blockchain = Arc::new(client::light::blockchain::Blockchain::new(storage)); let backend = Arc::new(LightBackend::new(blockchain.clone())); let executor = new_native_executor(); - let local_call_executor = sc_client::LocalCallExecutor::new(backend.clone(), executor, sp_core::tasks::executor(), Default::default()); + let local_call_executor = client::LocalCallExecutor::new(backend.clone(), executor, sp_core::tasks::executor(), Default::default()); let call_executor = LightExecutor::new( backend.clone(), local_call_executor, diff --git a/test-utils/runtime/client/src/trait_tests.rs b/test-utils/runtime/client/src/trait_tests.rs index 4af8aa37b640a..2a377fabba129 100644 --- a/test-utils/runtime/client/src/trait_tests.rs +++ b/test-utils/runtime/client/src/trait_tests.rs @@ -26,7 +26,7 @@ use crate::{ }; use sc_client_api::backend; use sc_client_api::blockchain::{Backend as BlockChainBackendT, HeaderBackend}; -use substrate_test_client::sp_consensus::BlockOrigin; +use sp_consensus::BlockOrigin; use substrate_test_runtime::{self, Transfer}; use sp_runtime::generic::BlockId; use sp_runtime::traits::{Block as BlockT, HashFor}; diff --git a/test-utils/runtime/src/genesismap.rs b/test-utils/runtime/src/genesismap.rs index b9de3ab3f4cb0..9426cd6433cec 100644 --- a/test-utils/runtime/src/genesismap.rs +++ b/test-utils/runtime/src/genesismap.rs @@ -23,6 +23,7 @@ use codec::{Encode, KeyedVec, Joiner}; use sp_core::{ChangesTrieConfiguration, map}; use sp_core::storage::{well_known_keys, Storage}; use sp_runtime::traits::{Block as BlockT, Hash as HashT, Header as HeaderT}; +use sc_service::client::genesis; /// Configuration of a general Substrate test genesis block. pub struct GenesisConfig { @@ -96,7 +97,7 @@ pub fn insert_genesis_block( let state_root = <<::Header as HeaderT>::Hashing as HashT>::trie_root( storage.top.clone().into_iter().collect() ); - let block: crate::Block = sc_client::genesis::construct_genesis_block(state_root); + let block: crate::Block = genesis::construct_genesis_block(state_root); let genesis_hash = block.header.hash(); storage.top.extend(additional_storage_with_genesis(&block)); genesis_hash diff --git a/utils/frame/benchmarking-cli/Cargo.toml b/utils/frame/benchmarking-cli/Cargo.toml index 86eb20faf8075..b02f42d7593fa 100644 --- a/utils/frame/benchmarking-cli/Cargo.toml +++ b/utils/frame/benchmarking-cli/Cargo.toml @@ -16,7 +16,6 @@ frame-benchmarking = { version = "2.0.0-dev", path = "../../../frame/benchmarkin sp-core = { version = "2.0.0-dev", path = "../../../primitives/core" } sc-service = { version = "0.8.0-dev", default-features = false, path = "../../../client/service" } sc-cli = { version = "0.8.0-dev", path = "../../../client/cli" } -sc-client = { version = "0.8.0-dev", path = "../../../client" } sc-client-db = { version = "0.8.0-dev", path = "../../../client/db" } sc-executor = { version = "0.8.0-dev", path = "../../../client/executor" } sp-externalities = { version = "0.8.0-dev", path = "../../../primitives/externalities" } diff --git a/utils/frame/benchmarking-cli/src/command.rs b/utils/frame/benchmarking-cli/src/command.rs index 17205bc76e5e1..ebca380baff2d 100644 --- a/utils/frame/benchmarking-cli/src/command.rs +++ b/utils/frame/benchmarking-cli/src/command.rs @@ -18,9 +18,9 @@ use crate::BenchmarkCmd; use codec::{Decode, Encode}; use frame_benchmarking::{Analysis, BenchmarkBatch}; use sc_cli::{SharedParams, CliConfiguration, ExecutionStrategy, Result}; -use sc_client::StateMachine; use sc_client_db::BenchmarkingState; use sc_executor::NativeExecutor; +use sp_state_machine::StateMachine; use sp_externalities::Extensions; use sc_service::{Configuration, NativeExecutionDispatch}; use sp_runtime::{ diff --git a/utils/frame/rpc/system/Cargo.toml b/utils/frame/rpc/system/Cargo.toml index 20524a460a4c3..a33a9dfd73163 100644 --- a/utils/frame/rpc/system/Cargo.toml +++ b/utils/frame/rpc/system/Cargo.toml @@ -12,7 +12,7 @@ description = "FRAME's system exposed over Substrate RPC" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sc-client = { version = "0.8.0-dev", path = "../../../../client/" } +sc-client-api = { version = "2.0.0-dev", path = "../../../../client/api" } codec = { package = "parity-scale-codec", version = "1.3.0" } futures = "0.3.4" jsonrpc-core = "14.0.3" diff --git a/utils/frame/rpc/system/src/lib.rs b/utils/frame/rpc/system/src/lib.rs index 4838e8e8f436d..8dff3a641d1ed 100644 --- a/utils/frame/rpc/system/src/lib.rs +++ b/utils/frame/rpc/system/src/lib.rs @@ -19,10 +19,7 @@ use std::sync::Arc; use codec::{self, Codec, Decode, Encode}; -use sc_client::{ - light::blockchain::{future_header, RemoteBlockchain}, - light::fetcher::{Fetcher, RemoteCallRequest}, -}; +use sc_client_api::light::{future_header, RemoteBlockchain, Fetcher, RemoteCallRequest}; use jsonrpc_core::{ Error, ErrorCode, futures::future::{result, Future},