From 06d5240ef575fccd73e14741e3a5ad03103a46fb Mon Sep 17 00:00:00 2001 From: jasl Date: Wed, 18 Jan 2023 23:08:06 +0800 Subject: [PATCH] Upgrade to polkadot-v0.9.36 --- Cargo.lock | 1481 ++++--- Cargo.toml | 7 - crates/phala-mq/Cargo.toml | 2 +- crates/phala-node-rpc-ext/Cargo.toml | 14 +- crates/phala-pallet-common/Cargo.toml | 24 +- crates/phala-serde-more/Cargo.toml | 2 +- crates/phala-trie-storage/Cargo.toml | 11 +- crates/phala-trie-storage/src/lib.rs | 4 - crates/phala-trie-storage/src/memdb.rs | 499 +-- crates/phala-trie-storage/src/ser.rs | 1 - crates/phala-types/Cargo.toml | 2 +- node/Cargo.toml | 120 +- node/src/command.rs | 40 +- node/src/service/mod.rs | 47 +- node/src/service/phala.rs | 15 +- node/src/service/shell.rs | 15 +- pallets/assets-registry/Cargo.toml | 76 +- pallets/assets-registry/src/lib.rs | 10 + pallets/parachain-info/Cargo.toml | 6 +- pallets/phala-world/Cargo.toml | 32 +- pallets/phala-world/src/incubation.rs | 7 + pallets/phala-world/src/lib.rs | 3 +- pallets/phala-world/src/nft_sale.rs | 20 + pallets/phala/Cargo.toml | 42 +- pallets/phala/mq-runtime-api/Cargo.toml | 2 +- pallets/phala/src/compute/base_pool.rs | 6 + pallets/phala/src/compute/computation.rs | 7 + pallets/phala/src/compute/stake_pool_v2.rs | 18 + pallets/phala/src/compute/vault.rs | 7 + pallets/phala/src/compute/wrapped_balances.rs | 5 + pallets/phala/src/fat.rs | 7 + pallets/phala/src/fat_tokenomic.rs | 1 + pallets/phala/src/mock.rs | 4 + pallets/phala/src/mq.rs | 3 + pallets/phala/src/ott.rs | 1 + pallets/phala/src/registry.rs | 15 + pallets/subbridge/Cargo.toml | 74 +- pallets/subbridge/src/chainbridge.rs | 9 + pallets/subbridge/src/xtransfer.rs | 2 + parachains-common/Cargo.toml | 36 +- parachains-common/src/lib.rs | 9 +- polkadot/node/service/Cargo.toml | 194 +- polkadot/node/service/chain-specs/kusama.json | 6 +- .../node/service/chain-specs/polkadot.json | 6 +- .../node/service/chain-specs/westend.json | 8 +- polkadot/node/service/src/chain_spec.rs | 8 +- polkadot/node/service/src/lib.rs | 44 +- polkadot/node/service/src/overseer.rs | 14 +- .../node/service/src/relay_chain_selection.rs | 5 +- runtime/khala/Cargo.toml | 134 +- runtime/khala/src/constants.rs | 4 +- runtime/khala/src/lib.rs | 33 +- runtime/phala/Cargo.toml | 122 +- runtime/phala/src/constants.rs | 4 +- runtime/phala/src/lib.rs | 35 +- runtime/rhala/Cargo.toml | 136 +- runtime/rhala/src/constants.rs | 4 +- runtime/rhala/src/lib.rs | 33 +- runtime/shell/Cargo.toml | 52 +- runtime/shell/src/lib.rs | 7 +- runtime/thala/Cargo.toml | 136 +- runtime/thala/src/constants.rs | 4 +- runtime/thala/src/lib.rs | 33 +- substrate/client/db/Cargo.toml | 58 - substrate/client/db/README.md | 11 - substrate/client/db/benches/state_access.rs | 311 -- substrate/client/db/src/bench.rs | 673 --- substrate/client/db/src/children.rs | 123 - substrate/client/db/src/lib.rs | 3797 ----------------- substrate/client/db/src/offchain.rs | 150 - substrate/client/db/src/parity_db.rs | 162 - substrate/client/db/src/record_stats_state.rs | 230 - substrate/client/db/src/stats.rs | 145 - substrate/client/db/src/storage_cache.rs | 1979 --------- substrate/client/db/src/upgrade.rs | 256 -- substrate/client/db/src/utils.rs | 824 ---- substrate/client/state-db/Cargo.toml | 22 - substrate/client/state-db/README.md | 16 - substrate/client/state-db/src/lib.rs | 954 ----- substrate/client/state-db/src/noncanonical.rs | 1106 ----- substrate/client/state-db/src/pruning.rs | 853 ---- substrate/client/state-db/src/test.rs | 98 - 82 files changed, 1841 insertions(+), 13635 deletions(-) delete mode 100644 substrate/client/db/Cargo.toml delete mode 100644 substrate/client/db/README.md delete mode 100644 substrate/client/db/benches/state_access.rs delete mode 100644 substrate/client/db/src/bench.rs delete mode 100644 substrate/client/db/src/children.rs delete mode 100644 substrate/client/db/src/lib.rs delete mode 100644 substrate/client/db/src/offchain.rs delete mode 100644 substrate/client/db/src/parity_db.rs delete mode 100644 substrate/client/db/src/record_stats_state.rs delete mode 100644 substrate/client/db/src/stats.rs delete mode 100644 substrate/client/db/src/storage_cache.rs delete mode 100644 substrate/client/db/src/upgrade.rs delete mode 100644 substrate/client/db/src/utils.rs delete mode 100644 substrate/client/state-db/Cargo.toml delete mode 100644 substrate/client/state-db/README.md delete mode 100644 substrate/client/state-db/src/lib.rs delete mode 100644 substrate/client/state-db/src/noncanonical.rs delete mode 100644 substrate/client/state-db/src/pruning.rs delete mode 100644 substrate/client/state-db/src/test.rs diff --git a/Cargo.lock b/Cargo.lock index 2ee2ef31..bcdea0d0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -18,7 +18,16 @@ version = "0.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b9ecd88a8c8378ca913a680cd98f0f13ac67383d35993f86c90a70e3f137816b" dependencies = [ - "gimli", + "gimli 0.26.2", +] + +[[package]] +name = "addr2line" +version = "0.19.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a76fd60b23679b7d19bd066031410fb7e458ccc5e958eb5c325888ce4baedc97" +dependencies = [ + "gimli 0.27.0", ] [[package]] @@ -127,6 +136,12 @@ version = "4.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f52f63c5c1316a16a4b35eaac8b76a98248961a533f061684cb2a7cb0eafb6c6" +[[package]] +name = "array-bytes" +version = "6.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "22f72e9d6fac4bc80778ea470b20197b88d28c292bb7d60c3fb099280003cd19" + [[package]] name = "arrayref" version = "0.3.6" @@ -306,7 +321,6 @@ dependencies = [ "async-global-executor", "async-io", "async-lock", - "async-process", "crossbeam-utils", "futures-channel", "futures-core", @@ -323,21 +337,6 @@ dependencies = [ "wasm-bindgen-futures", ] -[[package]] -name = "async-std-resolver" -version = "0.22.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ba50e24d9ee0a8950d3d03fc6d0dd10aa14b5de3b101949b4e160f7fee7c723" -dependencies = [ - "async-std", - "async-trait", - "futures-io", - "futures-util", - "pin-utils", - "socket2", - "trust-dns-resolver", -] - [[package]] name = "async-task" version = "4.3.0" @@ -346,9 +345,9 @@ checksum = "7a40729d2133846d9ed0ea60a8b9541bccddab49cd30f0715a1da672fe9a2524" [[package]] name = "async-trait" -version = "0.1.58" +version = "0.1.61" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e805d94e6b5001b651426cf4cd446b1ab5f319d27bab5c644f61de0a804360c" +checksum = "705339e0e4a9690e2908d2b3d049d85682cf19fbd5782494498fbf7003a6a282" dependencies = [ "proc-macro2", "quote", @@ -380,7 +379,7 @@ version = "0.2.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8" dependencies = [ - "hermit-abi", + "hermit-abi 0.1.19", "libc", "winapi", ] @@ -397,32 +396,18 @@ version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7b7e4c2464d97fe331d41de9d5db0def0a96f4d823b8b32a2efd503578988973" -[[package]] -name = "backoff" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b62ddb9cb1ec0a098ad4bbf9344d0713fa193ae1a80af55febcff2627b6a00c1" -dependencies = [ - "futures-core", - "getrandom 0.2.8", - "instant", - "pin-project-lite 0.2.9", - "rand 0.8.5", - "tokio", -] - [[package]] name = "backtrace" -version = "0.3.66" +version = "0.3.67" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cab84319d616cfb654d03394f38ab7e6f0919e181b1b57e1fd15e7fb4077d9a7" +checksum = "233d376d6d185f2a3093e58f283f60f880315b6c60075b01f36b3b85154564ca" dependencies = [ - "addr2line", + "addr2line 0.19.0", "cc", "cfg-if", "libc", - "miniz_oxide", - "object", + "miniz_oxide 0.6.2", + "object 0.30.2", "rustc-demangle", ] @@ -468,11 +453,10 @@ dependencies = [ [[package]] name = "beefy-gadget" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ - "array-bytes", + "array-bytes 4.2.0", "async-trait", - "beefy-primitives", "fnv", "futures", "futures-timer", @@ -491,6 +475,7 @@ dependencies = [ "sp-api", "sp-application-crypto", "sp-arithmetic", + "sp-beefy", "sp-blockchain", "sp-consensus", "sp-core", @@ -505,10 +490,9 @@ dependencies = [ [[package]] name = "beefy-gadget-rpc" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "beefy-gadget", - "beefy-primitives", "futures", "jsonrpsee", "log", @@ -517,6 +501,7 @@ dependencies = [ "sc-rpc", "sc-utils", "serde", + "sp-beefy", "sp-core", "sp-runtime", "thiserror", @@ -525,30 +510,13 @@ dependencies = [ [[package]] name = "beefy-merkle-tree" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ - "beefy-primitives", "sp-api", + "sp-beefy", "sp-runtime", ] -[[package]] -name = "beefy-primitives" -version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" -dependencies = [ - "parity-scale-codec", - "scale-info", - "serde", - "sp-api", - "sp-application-crypto", - "sp-core", - "sp-io", - "sp-mmr-primitives", - "sp-runtime", - "sp-std", -] - [[package]] name = "bincode" version = "1.3.3" @@ -767,9 +735,9 @@ checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610" [[package]] name = "bytes" -version = "1.2.1" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec8a7b6a70fde80372154c65702f00a0f56f3e1c36abbc6c440484be248856db" +checksum = "dfb24e866b15a1af2a1b663f10c6b6b8f397a84aadb828f12e5b289ec23a3a3c" [[package]] name = "bzip2-sys" @@ -942,14 +910,14 @@ dependencies = [ [[package]] name = "clap" -version = "4.0.26" +version = "4.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2148adefda54e14492fb9bddcc600b4344c5d1a3123bd666dcb939c6f0e0e57e" +checksum = "4ec7a4128863c188deefe750ac1d1dfe66c236909f845af04beed823638dc1b2" dependencies = [ - "atty", "bitflags", "clap_derive", "clap_lex", + "is-terminal", "once_cell", "strsim", "termcolor", @@ -957,9 +925,9 @@ dependencies = [ [[package]] name = "clap_derive" -version = "4.0.21" +version = "4.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0177313f9f02afc995627906bbd8967e2be069f5261954222dac78290c2b9014" +checksum = "684a277d672e91966334af371f1a7b5833f9aa00b07c84e92fbce95e00208ce8" dependencies = [ "heck", "proc-macro-error", @@ -1093,6 +1061,16 @@ dependencies = [ "cfg-if", ] +[[package]] +name = "cpu-time" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e9e393a7668fe1fad3075085b86c781883000b4ede868f43627b34a87c8b7ded" +dependencies = [ + "libc", + "winapi", +] + [[package]] name = "cpufeatures" version = "0.2.5" @@ -1124,7 +1102,7 @@ dependencies = [ "cranelift-codegen-shared", "cranelift-entity", "cranelift-isle", - "gimli", + "gimli 0.26.2", "log", "regalloc2", "smallvec", @@ -1332,7 +1310,7 @@ dependencies = [ [[package]] name = "cumulus-client-cli" version = "0.1.0" -source = "git+https://github.com/paritytech/cumulus?branch=polkadot-v0.9.33#27721d794ee63aae42317a7eeda21595dd3200d9" +source = "git+https://github.com/paritytech/cumulus?branch=polkadot-v0.9.36#afe528af891f464b318293f183f6d3eefbc979b0" dependencies = [ "clap", "parity-scale-codec", @@ -1347,7 +1325,7 @@ dependencies = [ [[package]] name = "cumulus-client-collator" version = "0.1.0" -source = "git+https://github.com/paritytech/cumulus?branch=polkadot-v0.9.33#27721d794ee63aae42317a7eeda21595dd3200d9" +source = "git+https://github.com/paritytech/cumulus?branch=polkadot-v0.9.36#afe528af891f464b318293f183f6d3eefbc979b0" dependencies = [ "cumulus-client-consensus-common", "cumulus-client-network", @@ -1370,7 +1348,7 @@ dependencies = [ [[package]] name = "cumulus-client-consensus-aura" version = "0.1.0" -source = "git+https://github.com/paritytech/cumulus?branch=polkadot-v0.9.33#27721d794ee63aae42317a7eeda21595dd3200d9" +source = "git+https://github.com/paritytech/cumulus?branch=polkadot-v0.9.36#afe528af891f464b318293f183f6d3eefbc979b0" dependencies = [ "async-trait", "cumulus-client-consensus-common", @@ -1399,12 +1377,15 @@ dependencies = [ [[package]] name = "cumulus-client-consensus-common" version = "0.1.0" -source = "git+https://github.com/paritytech/cumulus?branch=polkadot-v0.9.33#27721d794ee63aae42317a7eeda21595dd3200d9" +source = "git+https://github.com/paritytech/cumulus?branch=polkadot-v0.9.36#afe528af891f464b318293f183f6d3eefbc979b0" dependencies = [ "async-trait", + "cumulus-client-pov-recovery", + "cumulus-primitives-core", "cumulus-relay-chain-interface", "dyn-clone", "futures", + "log", "parity-scale-codec", "polkadot-primitives", "sc-client-api", @@ -1419,7 +1400,7 @@ dependencies = [ [[package]] name = "cumulus-client-consensus-relay-chain" version = "0.1.0" -source = "git+https://github.com/paritytech/cumulus?branch=polkadot-v0.9.33#27721d794ee63aae42317a7eeda21595dd3200d9" +source = "git+https://github.com/paritytech/cumulus?branch=polkadot-v0.9.36#afe528af891f464b318293f183f6d3eefbc979b0" dependencies = [ "async-trait", "cumulus-client-consensus-common", @@ -1442,7 +1423,7 @@ dependencies = [ [[package]] name = "cumulus-client-network" version = "0.1.0" -source = "git+https://github.com/paritytech/cumulus?branch=polkadot-v0.9.33#27721d794ee63aae42317a7eeda21595dd3200d9" +source = "git+https://github.com/paritytech/cumulus?branch=polkadot-v0.9.36#afe528af891f464b318293f183f6d3eefbc979b0" dependencies = [ "async-trait", "cumulus-relay-chain-interface", @@ -1465,7 +1446,7 @@ dependencies = [ [[package]] name = "cumulus-client-pov-recovery" version = "0.1.0" -source = "git+https://github.com/paritytech/cumulus?branch=polkadot-v0.9.33#27721d794ee63aae42317a7eeda21595dd3200d9" +source = "git+https://github.com/paritytech/cumulus?branch=polkadot-v0.9.36#afe528af891f464b318293f183f6d3eefbc979b0" dependencies = [ "cumulus-primitives-core", "cumulus-relay-chain-interface", @@ -1488,19 +1469,24 @@ dependencies = [ [[package]] name = "cumulus-client-service" version = "0.1.0" -source = "git+https://github.com/paritytech/cumulus?branch=polkadot-v0.9.33#27721d794ee63aae42317a7eeda21595dd3200d9" +source = "git+https://github.com/paritytech/cumulus?branch=polkadot-v0.9.36#afe528af891f464b318293f183f6d3eefbc979b0" dependencies = [ "cumulus-client-cli", "cumulus-client-collator", "cumulus-client-consensus-common", "cumulus-client-pov-recovery", "cumulus-primitives-core", + "cumulus-relay-chain-inprocess-interface", "cumulus-relay-chain-interface", + "cumulus-relay-chain-minimal-node", + "futures", "parking_lot 0.12.1", "polkadot-primitives", "sc-client-api", "sc-consensus", "sc-service", + "sc-sysinfo", + "sc-telemetry", "sp-api", "sp-blockchain", "sp-consensus", @@ -1511,7 +1497,7 @@ dependencies = [ [[package]] name = "cumulus-pallet-aura-ext" version = "0.1.0" -source = "git+https://github.com/paritytech/cumulus?branch=polkadot-v0.9.33#27721d794ee63aae42317a7eeda21595dd3200d9" +source = "git+https://github.com/paritytech/cumulus?branch=polkadot-v0.9.36#afe528af891f464b318293f183f6d3eefbc979b0" dependencies = [ "frame-support", "frame-system", @@ -1527,7 +1513,7 @@ dependencies = [ [[package]] name = "cumulus-pallet-dmp-queue" version = "0.1.0" -source = "git+https://github.com/paritytech/cumulus?branch=polkadot-v0.9.33#27721d794ee63aae42317a7eeda21595dd3200d9" +source = "git+https://github.com/paritytech/cumulus?branch=polkadot-v0.9.36#afe528af891f464b318293f183f6d3eefbc979b0" dependencies = [ "cumulus-primitives-core", "frame-support", @@ -1544,7 +1530,7 @@ dependencies = [ [[package]] name = "cumulus-pallet-parachain-system" version = "0.1.0" -source = "git+https://github.com/paritytech/cumulus?branch=polkadot-v0.9.33#27721d794ee63aae42317a7eeda21595dd3200d9" +source = "git+https://github.com/paritytech/cumulus?branch=polkadot-v0.9.36#afe528af891f464b318293f183f6d3eefbc979b0" dependencies = [ "bytes", "cumulus-pallet-parachain-system-proc-macro", @@ -1572,7 +1558,7 @@ dependencies = [ [[package]] name = "cumulus-pallet-parachain-system-proc-macro" version = "0.1.0" -source = "git+https://github.com/paritytech/cumulus?branch=polkadot-v0.9.33#27721d794ee63aae42317a7eeda21595dd3200d9" +source = "git+https://github.com/paritytech/cumulus?branch=polkadot-v0.9.36#afe528af891f464b318293f183f6d3eefbc979b0" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1583,7 +1569,7 @@ dependencies = [ [[package]] name = "cumulus-pallet-session-benchmarking" version = "3.0.0" -source = "git+https://github.com/paritytech/cumulus?branch=polkadot-v0.9.33#27721d794ee63aae42317a7eeda21595dd3200d9" +source = "git+https://github.com/paritytech/cumulus?branch=polkadot-v0.9.36#afe528af891f464b318293f183f6d3eefbc979b0" dependencies = [ "frame-benchmarking", "frame-support", @@ -1597,7 +1583,7 @@ dependencies = [ [[package]] name = "cumulus-pallet-xcm" version = "0.1.0" -source = "git+https://github.com/paritytech/cumulus?branch=polkadot-v0.9.33#27721d794ee63aae42317a7eeda21595dd3200d9" +source = "git+https://github.com/paritytech/cumulus?branch=polkadot-v0.9.36#afe528af891f464b318293f183f6d3eefbc979b0" dependencies = [ "cumulus-primitives-core", "frame-support", @@ -1613,7 +1599,7 @@ dependencies = [ [[package]] name = "cumulus-pallet-xcmp-queue" version = "0.1.0" -source = "git+https://github.com/paritytech/cumulus?branch=polkadot-v0.9.33#27721d794ee63aae42317a7eeda21595dd3200d9" +source = "git+https://github.com/paritytech/cumulus?branch=polkadot-v0.9.36#afe528af891f464b318293f183f6d3eefbc979b0" dependencies = [ "cumulus-primitives-core", "frame-support", @@ -1631,7 +1617,7 @@ dependencies = [ [[package]] name = "cumulus-primitives-core" version = "0.1.0" -source = "git+https://github.com/paritytech/cumulus?branch=polkadot-v0.9.33#27721d794ee63aae42317a7eeda21595dd3200d9" +source = "git+https://github.com/paritytech/cumulus?branch=polkadot-v0.9.36#afe528af891f464b318293f183f6d3eefbc979b0" dependencies = [ "parity-scale-codec", "polkadot-core-primitives", @@ -1646,7 +1632,7 @@ dependencies = [ [[package]] name = "cumulus-primitives-parachain-inherent" version = "0.1.0" -source = "git+https://github.com/paritytech/cumulus?branch=polkadot-v0.9.33#27721d794ee63aae42317a7eeda21595dd3200d9" +source = "git+https://github.com/paritytech/cumulus?branch=polkadot-v0.9.36#afe528af891f464b318293f183f6d3eefbc979b0" dependencies = [ "async-trait", "cumulus-primitives-core", @@ -1669,7 +1655,7 @@ dependencies = [ [[package]] name = "cumulus-primitives-timestamp" version = "0.1.0" -source = "git+https://github.com/paritytech/cumulus?branch=polkadot-v0.9.33#27721d794ee63aae42317a7eeda21595dd3200d9" +source = "git+https://github.com/paritytech/cumulus?branch=polkadot-v0.9.36#afe528af891f464b318293f183f6d3eefbc979b0" dependencies = [ "cumulus-primitives-core", "futures", @@ -1682,7 +1668,7 @@ dependencies = [ [[package]] name = "cumulus-primitives-utility" version = "0.1.0" -source = "git+https://github.com/paritytech/cumulus?branch=polkadot-v0.9.33#27721d794ee63aae42317a7eeda21595dd3200d9" +source = "git+https://github.com/paritytech/cumulus?branch=polkadot-v0.9.36#afe528af891f464b318293f183f6d3eefbc979b0" dependencies = [ "cumulus-primitives-core", "frame-support", @@ -1698,7 +1684,7 @@ dependencies = [ [[package]] name = "cumulus-relay-chain-inprocess-interface" version = "0.1.0" -source = "git+https://github.com/paritytech/cumulus?branch=polkadot-v0.9.33#27721d794ee63aae42317a7eeda21595dd3200d9" +source = "git+https://github.com/paritytech/cumulus?branch=polkadot-v0.9.36#afe528af891f464b318293f183f6d3eefbc979b0" dependencies = [ "async-trait", "cumulus-primitives-core", @@ -1723,7 +1709,7 @@ dependencies = [ [[package]] name = "cumulus-relay-chain-interface" version = "0.1.0" -source = "git+https://github.com/paritytech/cumulus?branch=polkadot-v0.9.33#27721d794ee63aae42317a7eeda21595dd3200d9" +source = "git+https://github.com/paritytech/cumulus?branch=polkadot-v0.9.36#afe528af891f464b318293f183f6d3eefbc979b0" dependencies = [ "async-trait", "cumulus-primitives-core", @@ -1737,24 +1723,23 @@ dependencies = [ "sp-blockchain", "sp-state-machine", "thiserror", + "tokio", ] [[package]] name = "cumulus-relay-chain-minimal-node" version = "0.1.0" -source = "git+https://github.com/paritytech/cumulus?branch=polkadot-v0.9.33#27721d794ee63aae42317a7eeda21595dd3200d9" +source = "git+https://github.com/paritytech/cumulus?branch=polkadot-v0.9.36#afe528af891f464b318293f183f6d3eefbc979b0" dependencies = [ - "array-bytes", + "array-bytes 6.0.0", "async-trait", "cumulus-primitives-core", "cumulus-relay-chain-interface", "cumulus-relay-chain-rpc-interface", "futures", "lru", - "polkadot-availability-distribution", "polkadot-core-primitives", "polkadot-network-bridge", - "polkadot-node-core-av-store", "polkadot-node-network-protocol", "polkadot-node-subsystem-util", "polkadot-overseer", @@ -1766,8 +1751,6 @@ dependencies = [ "sc-keystore", "sc-network", "sc-network-common", - "sc-network-light", - "sc-network-sync", "sc-service", "sc-telemetry", "sc-tracing", @@ -1786,24 +1769,25 @@ dependencies = [ [[package]] name = "cumulus-relay-chain-rpc-interface" version = "0.1.0" -source = "git+https://github.com/paritytech/cumulus?branch=polkadot-v0.9.33#27721d794ee63aae42317a7eeda21595dd3200d9" +source = "git+https://github.com/paritytech/cumulus?branch=polkadot-v0.9.36#afe528af891f464b318293f183f6d3eefbc979b0" dependencies = [ "async-trait", - "backoff", "cumulus-primitives-core", "cumulus-relay-chain-interface", "futures", "futures-timer", "jsonrpsee", + "lru", "parity-scale-codec", "polkadot-service", "sc-client-api", "sc-rpc-api", + "serde", + "serde_json", "sp-api", "sp-authority-discovery", "sp-consensus-babe", "sp-core", - "sp-runtime", "sp-state-machine", "sp-storage", "tokio", @@ -1814,7 +1798,7 @@ dependencies = [ [[package]] name = "cumulus-test-relay-sproof-builder" version = "0.1.0" -source = "git+https://github.com/paritytech/cumulus?branch=polkadot-v0.9.33#27721d794ee63aae42317a7eeda21595dd3200d9" +source = "git+https://github.com/paritytech/cumulus?branch=polkadot-v0.9.36#afe528af891f464b318293f183f6d3eefbc979b0" dependencies = [ "cumulus-primitives-core", "parity-scale-codec", @@ -2106,9 +2090,9 @@ dependencies = [ [[package]] name = "dyn-clone" -version = "1.0.9" +version = "1.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f94fa09c2aeea5b8839e414b7b841bf429fd25b9c522116ac97ee87856d88b2" +checksum = "c9b0705efd4599c15a38151f4721f7bc388306f61084d3bfd50bd07fbca5cb60" [[package]] name = "ecdsa" @@ -2248,9 +2232,9 @@ dependencies = [ [[package]] name = "environmental" -version = "1.1.3" +version = "1.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68b91989ae21441195d7d9b9993a2f9295c7e1a8c96255d8b729accddc124797" +checksum = "e48c92028aaa870e83d51c64e5d4e0b6981b360c522198c23959f219a4e1b15b" [[package]] name = "errno" @@ -2273,33 +2257,6 @@ dependencies = [ "libc", ] -[[package]] -name = "ethbloom" -version = "0.13.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c22d4b5885b6aa2fe5e8b9329fb8d232bf739e434e6b87347c63bdd00c120f60" -dependencies = [ - "crunchy", - "fixed-hash", - "impl-rlp", - "impl-serde", - "tiny-keccak", -] - -[[package]] -name = "ethereum-types" -version = "0.14.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81224dc661606574f5a0f28c9947d0ee1d93ff11c5f1c4e7272f52e8c0b5483c" -dependencies = [ - "ethbloom", - "fixed-hash", - "impl-rlp", - "impl-serde", - "primitive-types", - "uint", -] - [[package]] name = "event-listener" version = "2.5.3" @@ -2429,9 +2386,9 @@ dependencies = [ [[package]] name = "finality-grandpa" -version = "0.16.0" +version = "0.16.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b22349c6a11563a202d95772a68e0fcf56119e74ea8a2a19cf2301460fcd0df5" +checksum = "e24e6c429951433ccb7c87fd528c60084834dcd14763182c1f83291bcde24c34" dependencies = [ "either", "futures", @@ -2527,7 +2484,7 @@ checksum = "f82b0f4c27ad9f8bfd1f3208d882da2b09c301bc1c828fd3a00d0216d2fbbff6" dependencies = [ "crc32fast", "libz-sys", - "miniz_oxide", + "miniz_oxide 0.5.4", ] [[package]] @@ -2548,7 +2505,7 @@ checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" [[package]] name = "fork-tree" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "parity-scale-codec", ] @@ -2571,7 +2528,7 @@ checksum = "6c2141d6d6c8512188a7891b4b01590a45f6dac67afb4f255c4124dbb86d4eaa" [[package]] name = "frame-benchmarking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "frame-support", "frame-system", @@ -2594,10 +2551,10 @@ dependencies = [ [[package]] name = "frame-benchmarking-cli" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "Inflector", - "array-bytes", + "array-bytes 4.2.0", "chrono", "clap", "comfy-table", @@ -2646,7 +2603,7 @@ dependencies = [ [[package]] name = "frame-election-provider-solution-type" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -2657,7 +2614,7 @@ dependencies = [ [[package]] name = "frame-election-provider-support" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "frame-election-provider-solution-type", "frame-support", @@ -2665,6 +2622,7 @@ dependencies = [ "parity-scale-codec", "scale-info", "sp-arithmetic", + "sp-core", "sp-npos-elections", "sp-runtime", "sp-std", @@ -2673,7 +2631,7 @@ dependencies = [ [[package]] name = "frame-executive" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "frame-support", "frame-system", @@ -2699,10 +2657,29 @@ dependencies = [ "serde", ] +[[package]] +name = "frame-remote-externalities" +version = "0.10.0-dev" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" +dependencies = [ + "env_logger", + "futures", + "log", + "parity-scale-codec", + "serde", + "serde_json", + "sp-core", + "sp-io", + "sp-runtime", + "sp-version", + "substrate-rpc-client", + "tokio", +] + [[package]] name = "frame-support" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "bitflags", "frame-metadata", @@ -2734,7 +2711,7 @@ dependencies = [ [[package]] name = "frame-support-procedural" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "Inflector", "cfg-expr", @@ -2748,7 +2725,7 @@ dependencies = [ [[package]] name = "frame-support-procedural-tools" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "frame-support-procedural-tools-derive", "proc-macro-crate", @@ -2760,7 +2737,7 @@ dependencies = [ [[package]] name = "frame-support-procedural-tools-derive" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "proc-macro2", "quote", @@ -2770,7 +2747,7 @@ dependencies = [ [[package]] name = "frame-support-test" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "frame-support", "frame-support-test-pallet", @@ -2793,7 +2770,7 @@ dependencies = [ [[package]] name = "frame-support-test-pallet" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "frame-support", "frame-system", @@ -2804,7 +2781,7 @@ dependencies = [ [[package]] name = "frame-system" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "frame-support", "log", @@ -2822,7 +2799,7 @@ dependencies = [ [[package]] name = "frame-system-benchmarking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "frame-benchmarking", "frame-support", @@ -2837,7 +2814,7 @@ dependencies = [ [[package]] name = "frame-system-rpc-runtime-api" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "parity-scale-codec", "sp-api", @@ -2846,7 +2823,7 @@ dependencies = [ [[package]] name = "frame-try-runtime" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "frame-support", "parity-scale-codec", @@ -3088,6 +3065,12 @@ dependencies = [ "stable_deref_trait", ] +[[package]] +name = "gimli" +version = "0.27.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dec7af912d60cdbd3677c1af9352ebae6fb8394d165568a2234df0fa00f87793" + [[package]] name = "glob" version = "0.3.0" @@ -3211,6 +3194,15 @@ dependencies = [ "libc", ] +[[package]] +name = "hermit-abi" +version = "0.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee512640fe35acbfb4bb779db6f0d80704c2cacfa2e39b601ef3e3f47d1ae4c7" +dependencies = [ + "libc", +] + [[package]] name = "hex" version = "0.4.3" @@ -3296,6 +3288,12 @@ dependencies = [ "pin-project-lite 0.2.9", ] +[[package]] +name = "http-range-header" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0bfe8eed0a9285ef776bb792479ea3834e8b94e13d615c2f66d03dd50a435a29" + [[package]] name = "httparse" version = "1.8.0" @@ -3450,15 +3448,6 @@ dependencies = [ "parity-scale-codec", ] -[[package]] -name = "impl-rlp" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f28220f89297a075ddc7245cd538076ee98b01f2a9c23a53a4f1105d5a322808" -dependencies = [ - "rlp", -] - [[package]] name = "impl-serde" version = "0.4.0" @@ -3567,6 +3556,18 @@ version = "2.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f88c5561171189e69df9d98bcf18fd5f9558300f7ea7b801eb8a0fd748bd8745" +[[package]] +name = "is-terminal" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "28dfb6c8100ccc63462345b67d1bbc3679177c75ee4bf59bf29c8b1d110b8189" +dependencies = [ + "hermit-abi 0.2.6", + "io-lifetimes 1.0.1", + "rustix 0.36.6", + "windows-sys 0.42.0", +] + [[package]] name = "itertools" version = "0.10.5" @@ -3602,24 +3603,23 @@ dependencies = [ [[package]] name = "jsonrpsee" -version = "0.15.1" +version = "0.16.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8bd0d559d5e679b1ab2f869b486a11182923863b1b3ee8b421763cdd707b783a" +checksum = "7d291e3a5818a2384645fd9756362e6d89cf0541b0b916fa7702ea4a9833608e" dependencies = [ "jsonrpsee-core", - "jsonrpsee-http-server", "jsonrpsee-proc-macros", + "jsonrpsee-server", "jsonrpsee-types", "jsonrpsee-ws-client", - "jsonrpsee-ws-server", "tracing", ] [[package]] name = "jsonrpsee-client-transport" -version = "0.15.1" +version = "0.16.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8752740ecd374bcbf8b69f3e80b0327942df76f793f8d4e60d3355650c31fb74" +checksum = "965de52763f2004bc91ac5bcec504192440f0b568a5d621c59d9dbd6f886c3fb" dependencies = [ "futures-util", "http", @@ -3638,9 +3638,9 @@ dependencies = [ [[package]] name = "jsonrpsee-core" -version = "0.15.1" +version = "0.16.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3dc3e9cf2ba50b7b1d7d76a667619f82846caa39e8e8daa8a4962d74acaddca" +checksum = "a4e70b4439a751a5de7dd5ed55eacff78ebf4ffe0fc009cb1ebb11417f5b536b" dependencies = [ "anyhow", "arrayvec 0.7.2", @@ -3651,10 +3651,8 @@ dependencies = [ "futures-timer", "futures-util", "globset", - "http", "hyper", "jsonrpsee-types", - "lazy_static", "parking_lot 0.12.1", "rand 0.8.5", "rustc-hash", @@ -3664,45 +3662,48 @@ dependencies = [ "thiserror", "tokio", "tracing", - "tracing-futures", - "unicase", ] [[package]] -name = "jsonrpsee-http-server" -version = "0.15.1" +name = "jsonrpsee-proc-macros" +version = "0.16.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "baa6da1e4199c10d7b1d0a6e5e8bd8e55f351163b6f4b3cbb044672a69bd4c1c" +dependencies = [ + "heck", + "proc-macro-crate", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "jsonrpsee-server" +version = "0.16.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03802f0373a38c2420c70b5144742d800b509e2937edc4afb116434f07120117" +checksum = "1fb69dad85df79527c019659a992498d03f8495390496da2f07e6c24c2b356fc" dependencies = [ "futures-channel", "futures-util", + "http", "hyper", "jsonrpsee-core", "jsonrpsee-types", "serde", "serde_json", + "soketto", "tokio", + "tokio-stream", + "tokio-util", + "tower", "tracing", - "tracing-futures", -] - -[[package]] -name = "jsonrpsee-proc-macros" -version = "0.15.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd67957d4280217247588ac86614ead007b301ca2fa9f19c19f880a536f029e3" -dependencies = [ - "proc-macro-crate", - "proc-macro2", - "quote", - "syn", ] [[package]] name = "jsonrpsee-types" -version = "0.15.1" +version = "0.16.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e290bba767401b646812f608c099b922d8142603c9e73a50fb192d3ac86f4a0d" +checksum = "5bd522fe1ce3702fd94812965d7bb7a3364b1c9aba743944c5a00529aae80f8c" dependencies = [ "anyhow", "beef", @@ -3714,9 +3715,9 @@ dependencies = [ [[package]] name = "jsonrpsee-ws-client" -version = "0.15.1" +version = "0.16.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ee5feddd5188e62ac08fcf0e56478138e581509d4730f3f7be9b57dd402a4ff" +checksum = "0b83daeecfc6517cfe210df24e570fb06213533dfb990318fae781f4c7119dd9" dependencies = [ "http", "jsonrpsee-client-transport", @@ -3724,26 +3725,6 @@ dependencies = [ "jsonrpsee-types", ] -[[package]] -name = "jsonrpsee-ws-server" -version = "0.15.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d488ba74fb369e5ab68926feb75a483458b88e768d44319f37e4ecad283c7325" -dependencies = [ - "futures-channel", - "futures-util", - "http", - "jsonrpsee-core", - "jsonrpsee-types", - "serde_json", - "soketto", - "tokio", - "tokio-stream", - "tokio-util", - "tracing", - "tracing-futures", -] - [[package]] name = "k256" version = "0.11.6" @@ -3790,10 +3771,7 @@ dependencies = [ "cumulus-client-service", "cumulus-primitives-core", "cumulus-primitives-parachain-inherent", - "cumulus-relay-chain-inprocess-interface", "cumulus-relay-chain-interface", - "cumulus-relay-chain-minimal-node", - "cumulus-relay-chain-rpc-interface", "frame-benchmarking", "frame-benchmarking-cli", "futures", @@ -3950,10 +3928,9 @@ dependencies = [ [[package]] name = "kusama-runtime" -version = "0.9.33" -source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.33#c7d6c21242fc654f6f069e12c00951484dff334d" +version = "0.9.36" +source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.36#dc25abc712e42b9b51d87ad1168e453a42b5f0bc" dependencies = [ - "beefy-primitives", "bitvec", "frame-benchmarking", "frame-election-provider-support", @@ -3980,13 +3957,13 @@ dependencies = [ "pallet-election-provider-support-benchmarking", "pallet-elections-phragmen", "pallet-fast-unstake", - "pallet-gilt", "pallet-grandpa", "pallet-identity", "pallet-im-online", "pallet-indices", "pallet-membership", "pallet-multisig", + "pallet-nis", "pallet-nomination-pools", "pallet-nomination-pools-benchmarking", "pallet-nomination-pools-runtime-api", @@ -4024,6 +4001,7 @@ dependencies = [ "sp-api", "sp-arithmetic", "sp-authority-discovery", + "sp-beefy", "sp-block-builder", "sp-consensus-babe", "sp-core", @@ -4047,8 +4025,8 @@ dependencies = [ [[package]] name = "kusama-runtime-constants" -version = "0.9.33" -source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.33#c7d6c21242fc654f6f069e12c00951484dff334d" +version = "0.9.36" +source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.36#dc25abc712e42b9b51d87ad1168e453a42b5f0bc" dependencies = [ "frame-support", "polkadot-primitives", @@ -4070,35 +4048,31 @@ dependencies = [ [[package]] name = "kvdb" -version = "0.12.0" +version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "585089ceadba0197ffe9af6740ab350b325e3c1f5fccfbc3522e0250c750409b" +checksum = "e7d770dcb02bf6835887c3a979b5107a04ff4bbde97a5f0928d27404a155add9" dependencies = [ - "parity-util-mem", "smallvec", ] [[package]] name = "kvdb-memorydb" -version = "0.12.0" +version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "40d109c87bfb7759edd2a49b2649c1afe25af785d930ad6a38479b4dc70dd873" +checksum = "bf7a85fe66f9ff9cd74e169fdd2c94c6e1e74c412c99a73b4df3200b5d3760b2" dependencies = [ "kvdb", - "parity-util-mem", "parking_lot 0.12.1", ] [[package]] name = "kvdb-rocksdb" -version = "0.16.0" +version = "0.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c076cc2cdbac89b9910c853a36c957d3862a779f31c2661174222cefb49ee597" +checksum = "2182b8219fee6bd83aacaab7344e840179ae079d5216aa4e249b4d704646a844" dependencies = [ "kvdb", - "log", "num_cpus", - "parity-util-mem", "parking_lot 0.12.1", "regex", "rocksdb", @@ -4212,7 +4186,6 @@ version = "0.37.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2322c9fb40d99101def6a01612ee30500c89abbbecb6297b3cd252903a4c1720" dependencies = [ - "async-std-resolver", "futures", "libp2p-core", "log", @@ -4276,7 +4249,6 @@ version = "0.41.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "761704e727f7d68d58d7bc2231eafae5fc1b9814de24290f126df09d4bd37a15" dependencies = [ - "async-io", "data-encoding", "dns-parser", "futures", @@ -4287,6 +4259,7 @@ dependencies = [ "rand 0.8.5", "smallvec", "socket2", + "tokio", "void", ] @@ -4415,7 +4388,6 @@ version = "0.37.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9839d96761491c6d3e238e70554b856956fca0ab60feb9de2cd08eed4473fa92" dependencies = [ - "async-io", "futures", "futures-timer", "if-watch", @@ -4423,6 +4395,7 @@ dependencies = [ "libp2p-core", "log", "socket2", + "tokio", ] [[package]] @@ -4701,7 +4674,7 @@ version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b20a59d985586e4a5aef64564ac77299f8586d8be6cf9106a5a40207e8908efb" dependencies = [ - "rustix 0.36.2", + "rustix 0.36.6", ] [[package]] @@ -4724,22 +4697,12 @@ dependencies = [ [[package]] name = "memory-db" -version = "0.30.0" +version = "0.31.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34ac11bb793c28fa095b7554466f53b3a60a2cd002afdac01bcf135cbd73a269" +checksum = "5e0c7cba9ce19ac7ffd2053ac9f49843bbd3f4318feedfd74e85c19d5fb0ba66" dependencies = [ "hash-db", "hashbrown", - "parity-util-mem", -] - -[[package]] -name = "memory-lru" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce95ae042940bad7e312857b929ee3d11b8f799a80cb7b9c7ec5125516906395" -dependencies = [ - "lru", ] [[package]] @@ -4786,6 +4749,15 @@ dependencies = [ "adler", ] +[[package]] +name = "miniz_oxide" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b275950c28b37e794e8c55d88aeb5e139d0ce23fdbbeda68f8d7174abdf9e8fa" +dependencies = [ + "adler", +] + [[package]] name = "mio" version = "0.8.5" @@ -4798,6 +4770,42 @@ dependencies = [ "windows-sys 0.42.0", ] +[[package]] +name = "mmr-gadget" +version = "4.0.0-dev" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" +dependencies = [ + "futures", + "log", + "parity-scale-codec", + "sc-client-api", + "sc-offchain", + "sp-api", + "sp-beefy", + "sp-blockchain", + "sp-consensus", + "sp-core", + "sp-io", + "sp-mmr-primitives", + "sp-runtime", +] + +[[package]] +name = "mmr-rpc" +version = "4.0.0-dev" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" +dependencies = [ + "anyhow", + "jsonrpsee", + "parity-scale-codec", + "serde", + "sp-api", + "sp-blockchain", + "sp-core", + "sp-mmr-primitives", + "sp-runtime", +] + [[package]] name = "mockall" version = "0.11.3" @@ -4893,9 +4901,9 @@ checksum = "e5ce46fe64a9d73be07dcbe690a38ce1b293be448fd8ce1e6c1b8062c9f72c6a" [[package]] name = "multistream-select" -version = "0.12.0" +version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9bc41247ec209813e2fd414d6e16b9d94297dacf3cd613fa6ef09cd4d9755c10" +checksum = "c8552ab875c1313b97b8d20cb857b9fd63e2d1d6a0a1b53ce9821e575405f27a" dependencies = [ "bytes", "futures", @@ -5116,7 +5124,7 @@ version = "1.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f6058e64324c71e02bc2b150e4f3bc8286db6c83092132ffa3f6b1eab0f9def5" dependencies = [ - "hermit-abi", + "hermit-abi 0.1.19", "libc", ] @@ -5132,6 +5140,15 @@ dependencies = [ "memchr", ] +[[package]] +name = "object" +version = "0.30.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b8c786513eb403643f2a88c244c2aaa270ef2153f55094587d0c48a3cf22a83" +dependencies = [ + "memchr", +] + [[package]] name = "once_cell" version = "1.16.0" @@ -5215,13 +5232,14 @@ dependencies = [ [[package]] name = "pallet-assets" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "frame-benchmarking", "frame-support", "frame-system", "parity-scale-codec", "scale-info", + "sp-core", "sp-runtime", "sp-std", ] @@ -5229,7 +5247,7 @@ dependencies = [ [[package]] name = "pallet-aura" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "frame-support", "frame-system", @@ -5245,7 +5263,7 @@ dependencies = [ [[package]] name = "pallet-authority-discovery" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "frame-support", "frame-system", @@ -5261,7 +5279,7 @@ dependencies = [ [[package]] name = "pallet-authorship" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "frame-support", "frame-system", @@ -5276,7 +5294,7 @@ dependencies = [ [[package]] name = "pallet-babe" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "frame-benchmarking", "frame-support", @@ -5300,7 +5318,7 @@ dependencies = [ [[package]] name = "pallet-bags-list" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "frame-benchmarking", "frame-election-provider-support", @@ -5320,7 +5338,7 @@ dependencies = [ [[package]] name = "pallet-balances" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "frame-benchmarking", "frame-support", @@ -5335,15 +5353,15 @@ dependencies = [ [[package]] name = "pallet-beefy" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ - "beefy-primitives", "frame-support", "frame-system", "pallet-session", "parity-scale-codec", "scale-info", "serde", + "sp-beefy", "sp-runtime", "sp-std", ] @@ -5351,11 +5369,10 @@ dependencies = [ [[package]] name = "pallet-beefy-mmr" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ - "array-bytes", + "array-bytes 4.2.0", "beefy-merkle-tree", - "beefy-primitives", "frame-support", "frame-system", "log", @@ -5365,6 +5382,7 @@ dependencies = [ "parity-scale-codec", "scale-info", "serde", + "sp-beefy", "sp-core", "sp-io", "sp-runtime", @@ -5374,7 +5392,7 @@ dependencies = [ [[package]] name = "pallet-bounties" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "frame-benchmarking", "frame-support", @@ -5392,7 +5410,7 @@ dependencies = [ [[package]] name = "pallet-child-bounties" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "frame-benchmarking", "frame-support", @@ -5411,7 +5429,7 @@ dependencies = [ [[package]] name = "pallet-collator-selection" version = "3.0.0" -source = "git+https://github.com/paritytech/cumulus?branch=polkadot-v0.9.33#27721d794ee63aae42317a7eeda21595dd3200d9" +source = "git+https://github.com/paritytech/cumulus?branch=polkadot-v0.9.36#afe528af891f464b318293f183f6d3eefbc979b0" dependencies = [ "frame-benchmarking", "frame-support", @@ -5430,7 +5448,7 @@ dependencies = [ [[package]] name = "pallet-collective" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "frame-benchmarking", "frame-support", @@ -5447,7 +5465,7 @@ dependencies = [ [[package]] name = "pallet-conviction-voting" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "assert_matches", "frame-benchmarking", @@ -5464,7 +5482,7 @@ dependencies = [ [[package]] name = "pallet-democracy" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "frame-benchmarking", "frame-support", @@ -5482,7 +5500,7 @@ dependencies = [ [[package]] name = "pallet-election-provider-multi-phase" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "frame-benchmarking", "frame-election-provider-support", @@ -5506,7 +5524,7 @@ dependencies = [ [[package]] name = "pallet-election-provider-support-benchmarking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "frame-benchmarking", "frame-election-provider-support", @@ -5519,7 +5537,7 @@ dependencies = [ [[package]] name = "pallet-elections-phragmen" version = "5.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "frame-benchmarking", "frame-support", @@ -5537,7 +5555,7 @@ dependencies = [ [[package]] name = "pallet-fast-unstake" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "frame-benchmarking", "frame-election-provider-support", @@ -5552,25 +5570,10 @@ dependencies = [ "sp-std", ] -[[package]] -name = "pallet-gilt" -version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" -dependencies = [ - "frame-benchmarking", - "frame-support", - "frame-system", - "parity-scale-codec", - "scale-info", - "sp-arithmetic", - "sp-runtime", - "sp-std", -] - [[package]] name = "pallet-grandpa" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "frame-benchmarking", "frame-support", @@ -5593,7 +5596,7 @@ dependencies = [ [[package]] name = "pallet-identity" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "enumflags2", "frame-benchmarking", @@ -5609,7 +5612,7 @@ dependencies = [ [[package]] name = "pallet-im-online" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "frame-benchmarking", "frame-support", @@ -5629,7 +5632,7 @@ dependencies = [ [[package]] name = "pallet-indices" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "frame-benchmarking", "frame-support", @@ -5646,7 +5649,7 @@ dependencies = [ [[package]] name = "pallet-lottery" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "frame-benchmarking", "frame-support", @@ -5660,7 +5663,7 @@ dependencies = [ [[package]] name = "pallet-membership" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "frame-benchmarking", "frame-support", @@ -5677,9 +5680,8 @@ dependencies = [ [[package]] name = "pallet-mmr" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ - "ckb-merkle-mountain-range", "frame-benchmarking", "frame-support", "frame-system", @@ -5692,22 +5694,6 @@ dependencies = [ "sp-std", ] -[[package]] -name = "pallet-mmr-rpc" -version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" -dependencies = [ - "anyhow", - "jsonrpsee", - "parity-scale-codec", - "serde", - "sp-api", - "sp-blockchain", - "sp-core", - "sp-mmr-primitives", - "sp-runtime", -] - [[package]] name = "pallet-mq-runtime-api" version = "0.1.0" @@ -5719,7 +5705,7 @@ dependencies = [ [[package]] name = "pallet-multisig" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "frame-benchmarking", "frame-support", @@ -5732,10 +5718,26 @@ dependencies = [ "sp-std", ] +[[package]] +name = "pallet-nis" +version = "4.0.0-dev" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" +dependencies = [ + "frame-benchmarking", + "frame-support", + "frame-system", + "parity-scale-codec", + "scale-info", + "sp-arithmetic", + "sp-core", + "sp-runtime", + "sp-std", +] + [[package]] name = "pallet-nomination-pools" version = "1.0.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "frame-support", "frame-system", @@ -5752,7 +5754,7 @@ dependencies = [ [[package]] name = "pallet-nomination-pools-benchmarking" version = "1.0.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "frame-benchmarking", "frame-election-provider-support", @@ -5772,7 +5774,7 @@ dependencies = [ [[package]] name = "pallet-nomination-pools-runtime-api" version = "1.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "parity-scale-codec", "sp-api", @@ -5782,7 +5784,7 @@ dependencies = [ [[package]] name = "pallet-offences" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "frame-support", "frame-system", @@ -5799,7 +5801,7 @@ dependencies = [ [[package]] name = "pallet-offences-benchmarking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "frame-benchmarking", "frame-election-provider-support", @@ -5856,7 +5858,7 @@ dependencies = [ [[package]] name = "pallet-preimage" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "frame-benchmarking", "frame-support", @@ -5873,7 +5875,7 @@ dependencies = [ [[package]] name = "pallet-proxy" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "frame-benchmarking", "frame-support", @@ -5888,7 +5890,7 @@ dependencies = [ [[package]] name = "pallet-randomness-collective-flip" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "frame-support", "frame-system", @@ -5902,7 +5904,7 @@ dependencies = [ [[package]] name = "pallet-ranked-collective" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "frame-benchmarking", "frame-support", @@ -5920,7 +5922,7 @@ dependencies = [ [[package]] name = "pallet-recovery" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "frame-benchmarking", "frame-support", @@ -5935,12 +5937,13 @@ dependencies = [ [[package]] name = "pallet-referenda" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "assert_matches", "frame-benchmarking", "frame-support", "frame-system", + "log", "parity-scale-codec", "scale-info", "serde", @@ -5953,7 +5956,7 @@ dependencies = [ [[package]] name = "pallet-rmrk-core" version = "0.0.1" -source = "git+https://github.com/Phala-Network/rmrk-substrate?branch=polkadot-v0.9.33#dca6475f9dfbeaa61e310820cbf701358099defd" +source = "git+https://github.com/Phala-Network/rmrk-substrate?branch=polkadot-v0.9.36#b1badc2422193b239b42a328e72c03c7eb6a097d" dependencies = [ "frame-benchmarking", "frame-support", @@ -5971,7 +5974,7 @@ dependencies = [ [[package]] name = "pallet-rmrk-equip" version = "0.0.1" -source = "git+https://github.com/Phala-Network/rmrk-substrate?branch=polkadot-v0.9.33#dca6475f9dfbeaa61e310820cbf701358099defd" +source = "git+https://github.com/Phala-Network/rmrk-substrate?branch=polkadot-v0.9.36#b1badc2422193b239b42a328e72c03c7eb6a097d" dependencies = [ "frame-benchmarking", "frame-support", @@ -5990,7 +5993,7 @@ dependencies = [ [[package]] name = "pallet-rmrk-market" version = "0.0.1" -source = "git+https://github.com/Phala-Network/rmrk-substrate?branch=polkadot-v0.9.33#dca6475f9dfbeaa61e310820cbf701358099defd" +source = "git+https://github.com/Phala-Network/rmrk-substrate?branch=polkadot-v0.9.36#b1badc2422193b239b42a328e72c03c7eb6a097d" dependencies = [ "frame-benchmarking", "frame-support", @@ -6009,7 +6012,7 @@ dependencies = [ [[package]] name = "pallet-rmrk-rpc" version = "0.0.1" -source = "git+https://github.com/Phala-Network/rmrk-substrate?branch=polkadot-v0.9.33#dca6475f9dfbeaa61e310820cbf701358099defd" +source = "git+https://github.com/Phala-Network/rmrk-substrate?branch=polkadot-v0.9.36#b1badc2422193b239b42a328e72c03c7eb6a097d" dependencies = [ "jsonrpsee", "pallet-rmrk-rpc-runtime-api", @@ -6026,7 +6029,7 @@ dependencies = [ [[package]] name = "pallet-rmrk-rpc-runtime-api" version = "0.0.1" -source = "git+https://github.com/Phala-Network/rmrk-substrate?branch=polkadot-v0.9.33#dca6475f9dfbeaa61e310820cbf701358099defd" +source = "git+https://github.com/Phala-Network/rmrk-substrate?branch=polkadot-v0.9.36#b1badc2422193b239b42a328e72c03c7eb6a097d" dependencies = [ "parity-scale-codec", "rmrk-traits", @@ -6041,7 +6044,7 @@ dependencies = [ [[package]] name = "pallet-scheduler" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "frame-benchmarking", "frame-support", @@ -6052,12 +6055,13 @@ dependencies = [ "sp-io", "sp-runtime", "sp-std", + "sp-weights", ] [[package]] name = "pallet-session" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "frame-support", "frame-system", @@ -6078,7 +6082,7 @@ dependencies = [ [[package]] name = "pallet-session-benchmarking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "frame-benchmarking", "frame-support", @@ -6094,7 +6098,7 @@ dependencies = [ [[package]] name = "pallet-society" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "frame-support", "frame-system", @@ -6108,7 +6112,7 @@ dependencies = [ [[package]] name = "pallet-staking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "frame-benchmarking", "frame-election-provider-support", @@ -6131,7 +6135,7 @@ dependencies = [ [[package]] name = "pallet-staking-reward-curve" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -6142,7 +6146,7 @@ dependencies = [ [[package]] name = "pallet-staking-reward-fn" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "log", "sp-arithmetic", @@ -6151,7 +6155,7 @@ dependencies = [ [[package]] name = "pallet-state-trie-migration" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "frame-benchmarking", "frame-support", @@ -6168,7 +6172,7 @@ dependencies = [ [[package]] name = "pallet-sudo" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "frame-support", "frame-system", @@ -6182,7 +6186,7 @@ dependencies = [ [[package]] name = "pallet-timestamp" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "frame-benchmarking", "frame-support", @@ -6200,7 +6204,7 @@ dependencies = [ [[package]] name = "pallet-tips" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "frame-benchmarking", "frame-support", @@ -6219,7 +6223,7 @@ dependencies = [ [[package]] name = "pallet-transaction-payment" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "frame-support", "frame-system", @@ -6235,7 +6239,7 @@ dependencies = [ [[package]] name = "pallet-transaction-payment-rpc" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "jsonrpsee", "pallet-transaction-payment-rpc-runtime-api", @@ -6251,7 +6255,7 @@ dependencies = [ [[package]] name = "pallet-transaction-payment-rpc-runtime-api" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "pallet-transaction-payment", "parity-scale-codec", @@ -6263,7 +6267,7 @@ dependencies = [ [[package]] name = "pallet-treasury" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "frame-benchmarking", "frame-support", @@ -6280,7 +6284,7 @@ dependencies = [ [[package]] name = "pallet-uniques" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "frame-benchmarking", "frame-support", @@ -6295,7 +6299,7 @@ dependencies = [ [[package]] name = "pallet-utility" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "frame-benchmarking", "frame-support", @@ -6311,7 +6315,7 @@ dependencies = [ [[package]] name = "pallet-vesting" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "frame-benchmarking", "frame-support", @@ -6326,7 +6330,7 @@ dependencies = [ [[package]] name = "pallet-whitelist" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "frame-benchmarking", "frame-support", @@ -6340,8 +6344,8 @@ dependencies = [ [[package]] name = "pallet-xcm" -version = "0.9.33" -source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.33#c7d6c21242fc654f6f069e12c00951484dff334d" +version = "0.9.36" +source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.36#dc25abc712e42b9b51d87ad1168e453a42b5f0bc" dependencies = [ "frame-support", "frame-system", @@ -6358,8 +6362,8 @@ dependencies = [ [[package]] name = "pallet-xcm-benchmarks" -version = "0.9.33" -source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.33#c7d6c21242fc654f6f069e12c00951484dff334d" +version = "0.9.36" +source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.36#dc25abc712e42b9b51d87ad1168e453a42b5f0bc" dependencies = [ "frame-benchmarking", "frame-support", @@ -6376,7 +6380,7 @@ dependencies = [ [[package]] name = "parachain-info" version = "0.1.0" -source = "git+https://github.com/paritytech/cumulus?branch=polkadot-v0.9.33#27721d794ee63aae42317a7eeda21595dd3200d9" +source = "git+https://github.com/paritytech/cumulus?branch=polkadot-v0.9.36#afe528af891f464b318293f183f6d3eefbc979b0" dependencies = [ "cumulus-primitives-core", "frame-support", @@ -6463,35 +6467,6 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "aa9777aa91b8ad9dd5aaa04a9b6bcb02c7f1deb952fca5a66034d5e63afc5c6f" -[[package]] -name = "parity-util-mem" -version = "0.12.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d32c34f4f5ca7f9196001c0aba5a1f9a5a12382c8944b8b0f90233282d1e8f8" -dependencies = [ - "cfg-if", - "ethereum-types", - "hashbrown", - "impl-trait-for-tuples", - "lru", - "parity-util-mem-derive", - "parking_lot 0.12.1", - "primitive-types", - "smallvec", - "winapi", -] - -[[package]] -name = "parity-util-mem-derive" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f557c32c6d268a07c921471619c0295f5efad3a0e76d4f97a05c091a51d110b2" -dependencies = [ - "proc-macro2", - "syn", - "synstructure", -] - [[package]] name = "parity-wasm" version = "0.45.0" @@ -6850,7 +6825,6 @@ dependencies = [ "impl-serde", "keccak-hasher", "parity-scale-codec", - "parity-util-mem", "scale-info", "serde", "serde_json", @@ -6937,8 +6911,8 @@ checksum = "e8d0eef3571242013a0d5dc84861c3ae4a652e56e12adf8bdc26ff5f8cb34c94" [[package]] name = "polkadot-approval-distribution" -version = "0.9.33" -source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.33#c7d6c21242fc654f6f069e12c00951484dff334d" +version = "0.9.36" +source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.36#dc25abc712e42b9b51d87ad1168e453a42b5f0bc" dependencies = [ "futures", "polkadot-node-network-protocol", @@ -6952,8 +6926,8 @@ dependencies = [ [[package]] name = "polkadot-availability-bitfield-distribution" -version = "0.9.33" -source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.33#c7d6c21242fc654f6f069e12c00951484dff334d" +version = "0.9.36" +source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.36#dc25abc712e42b9b51d87ad1168e453a42b5f0bc" dependencies = [ "futures", "polkadot-node-network-protocol", @@ -6966,8 +6940,8 @@ dependencies = [ [[package]] name = "polkadot-availability-distribution" -version = "0.9.33" -source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.33#c7d6c21242fc654f6f069e12c00951484dff334d" +version = "0.9.36" +source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.36#dc25abc712e42b9b51d87ad1168e453a42b5f0bc" dependencies = [ "derive_more", "fatality", @@ -6989,8 +6963,8 @@ dependencies = [ [[package]] name = "polkadot-availability-recovery" -version = "0.9.33" -source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.33#c7d6c21242fc654f6f069e12c00951484dff334d" +version = "0.9.36" +source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.36#dc25abc712e42b9b51d87ad1168e453a42b5f0bc" dependencies = [ "fatality", "futures", @@ -7010,8 +6984,8 @@ dependencies = [ [[package]] name = "polkadot-cli" -version = "0.9.33" -source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.33#c7d6c21242fc654f6f069e12c00951484dff334d" +version = "0.9.36" +source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.36#dc25abc712e42b9b51d87ad1168e453a42b5f0bc" dependencies = [ "clap", "frame-benchmarking-cli", @@ -7023,12 +6997,13 @@ dependencies = [ "polkadot-performance-test", "polkadot-service", "sc-cli", + "sc-executor", "sc-service", "sc-sysinfo", "sc-tracing", "sp-core", + "sp-io", "sp-keyring", - "sp-trie", "substrate-build-script-utils", "thiserror", "try-runtime-cli", @@ -7036,14 +7011,15 @@ dependencies = [ [[package]] name = "polkadot-client" -version = "0.9.33" -source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.33#c7d6c21242fc654f6f069e12c00951484dff334d" +version = "0.9.36" +source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.36#dc25abc712e42b9b51d87ad1168e453a42b5f0bc" dependencies = [ - "beefy-primitives", + "async-trait", "frame-benchmarking", "frame-benchmarking-cli", "frame-system", "frame-system-rpc-runtime-api", + "futures", "pallet-transaction-payment", "pallet-transaction-payment-rpc-runtime-api", "polkadot-core-primitives", @@ -7057,6 +7033,7 @@ dependencies = [ "sc-service", "sp-api", "sp-authority-discovery", + "sp-beefy", "sp-block-builder", "sp-blockchain", "sp-consensus", @@ -7076,8 +7053,8 @@ dependencies = [ [[package]] name = "polkadot-collator-protocol" -version = "0.9.33" -source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.33#c7d6c21242fc654f6f069e12c00951484dff334d" +version = "0.9.36" +source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.36#dc25abc712e42b9b51d87ad1168e453a42b5f0bc" dependencies = [ "always-assert", "bitvec", @@ -7098,11 +7075,10 @@ dependencies = [ [[package]] name = "polkadot-core-primitives" -version = "0.9.33" -source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.33#c7d6c21242fc654f6f069e12c00951484dff334d" +version = "0.9.36" +source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.36#dc25abc712e42b9b51d87ad1168e453a42b5f0bc" dependencies = [ "parity-scale-codec", - "parity-util-mem", "scale-info", "sp-core", "sp-runtime", @@ -7111,8 +7087,8 @@ dependencies = [ [[package]] name = "polkadot-dispute-distribution" -version = "0.9.33" -source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.33#c7d6c21242fc654f6f069e12c00951484dff334d" +version = "0.9.36" +source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.36#dc25abc712e42b9b51d87ad1168e453a42b5f0bc" dependencies = [ "derive_more", "fatality", @@ -7136,8 +7112,8 @@ dependencies = [ [[package]] name = "polkadot-erasure-coding" -version = "0.9.33" -source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.33#c7d6c21242fc654f6f069e12c00951484dff334d" +version = "0.9.36" +source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.36#dc25abc712e42b9b51d87ad1168e453a42b5f0bc" dependencies = [ "parity-scale-codec", "polkadot-node-primitives", @@ -7150,8 +7126,8 @@ dependencies = [ [[package]] name = "polkadot-gossip-support" -version = "0.9.33" -source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.33#c7d6c21242fc654f6f069e12c00951484dff334d" +version = "0.9.36" +source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.36#dc25abc712e42b9b51d87ad1168e453a42b5f0bc" dependencies = [ "futures", "futures-timer", @@ -7170,8 +7146,8 @@ dependencies = [ [[package]] name = "polkadot-network-bridge" -version = "0.9.33" -source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.33#c7d6c21242fc654f6f069e12c00951484dff334d" +version = "0.9.36" +source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.36#dc25abc712e42b9b51d87ad1168e453a42b5f0bc" dependencies = [ "always-assert", "async-trait", @@ -7194,8 +7170,8 @@ dependencies = [ [[package]] name = "polkadot-node-collation-generation" -version = "0.9.33" -source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.33#c7d6c21242fc654f6f069e12c00951484dff334d" +version = "0.9.36" +source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.36#dc25abc712e42b9b51d87ad1168e453a42b5f0bc" dependencies = [ "futures", "parity-scale-codec", @@ -7212,8 +7188,8 @@ dependencies = [ [[package]] name = "polkadot-node-core-approval-voting" -version = "0.9.33" -source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.33#c7d6c21242fc654f6f069e12c00951484dff334d" +version = "0.9.36" +source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.36#dc25abc712e42b9b51d87ad1168e453a42b5f0bc" dependencies = [ "bitvec", "derive_more", @@ -7241,8 +7217,8 @@ dependencies = [ [[package]] name = "polkadot-node-core-av-store" -version = "0.9.33" -source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.33#c7d6c21242fc654f6f069e12c00951484dff334d" +version = "0.9.36" +source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.36#dc25abc712e42b9b51d87ad1168e453a42b5f0bc" dependencies = [ "bitvec", "futures", @@ -7261,8 +7237,8 @@ dependencies = [ [[package]] name = "polkadot-node-core-backing" -version = "0.9.33" -source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.33#c7d6c21242fc654f6f069e12c00951484dff334d" +version = "0.9.36" +source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.36#dc25abc712e42b9b51d87ad1168e453a42b5f0bc" dependencies = [ "bitvec", "fatality", @@ -7280,8 +7256,8 @@ dependencies = [ [[package]] name = "polkadot-node-core-bitfield-signing" -version = "0.9.33" -source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.33#c7d6c21242fc654f6f069e12c00951484dff334d" +version = "0.9.36" +source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.36#dc25abc712e42b9b51d87ad1168e453a42b5f0bc" dependencies = [ "futures", "polkadot-node-subsystem", @@ -7295,8 +7271,8 @@ dependencies = [ [[package]] name = "polkadot-node-core-candidate-validation" -version = "0.9.33" -source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.33#c7d6c21242fc654f6f069e12c00951484dff334d" +version = "0.9.36" +source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.36#dc25abc712e42b9b51d87ad1168e453a42b5f0bc" dependencies = [ "async-trait", "futures", @@ -7314,8 +7290,8 @@ dependencies = [ [[package]] name = "polkadot-node-core-chain-api" -version = "0.9.33" -source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.33#c7d6c21242fc654f6f069e12c00951484dff334d" +version = "0.9.36" +source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.36#dc25abc712e42b9b51d87ad1168e453a42b5f0bc" dependencies = [ "futures", "polkadot-node-subsystem", @@ -7329,8 +7305,8 @@ dependencies = [ [[package]] name = "polkadot-node-core-chain-selection" -version = "0.9.33" -source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.33#c7d6c21242fc654f6f069e12c00951484dff334d" +version = "0.9.36" +source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.36#dc25abc712e42b9b51d87ad1168e453a42b5f0bc" dependencies = [ "futures", "futures-timer", @@ -7346,8 +7322,8 @@ dependencies = [ [[package]] name = "polkadot-node-core-dispute-coordinator" -version = "0.9.33" -source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.33#c7d6c21242fc654f6f069e12c00951484dff334d" +version = "0.9.36" +source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.36#dc25abc712e42b9b51d87ad1168e453a42b5f0bc" dependencies = [ "fatality", "futures", @@ -7365,13 +7341,14 @@ dependencies = [ [[package]] name = "polkadot-node-core-parachains-inherent" -version = "0.9.33" -source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.33#c7d6c21242fc654f6f069e12c00951484dff334d" +version = "0.9.36" +source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.36#dc25abc712e42b9b51d87ad1168e453a42b5f0bc" dependencies = [ "async-trait", "futures", "futures-timer", "polkadot-node-subsystem", + "polkadot-overseer", "polkadot-primitives", "sp-blockchain", "sp-inherents", @@ -7382,8 +7359,8 @@ dependencies = [ [[package]] name = "polkadot-node-core-provisioner" -version = "0.9.33" -source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.33#c7d6c21242fc654f6f069e12c00951484dff334d" +version = "0.9.36" +source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.36#dc25abc712e42b9b51d87ad1168e453a42b5f0bc" dependencies = [ "bitvec", "fatality", @@ -7400,13 +7377,14 @@ dependencies = [ [[package]] name = "polkadot-node-core-pvf" -version = "0.9.33" -source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.33#c7d6c21242fc654f6f069e12c00951484dff334d" +version = "0.9.36" +source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.36#dc25abc712e42b9b51d87ad1168e453a42b5f0bc" dependencies = [ "always-assert", "assert_matches", "async-process", "async-std", + "cpu-time", "futures", "futures-timer", "parity-scale-codec", @@ -7432,8 +7410,8 @@ dependencies = [ [[package]] name = "polkadot-node-core-pvf-checker" -version = "0.9.33" -source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.33#c7d6c21242fc654f6f069e12c00951484dff334d" +version = "0.9.36" +source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.36#dc25abc712e42b9b51d87ad1168e453a42b5f0bc" dependencies = [ "futures", "polkadot-node-primitives", @@ -7448,12 +7426,11 @@ dependencies = [ [[package]] name = "polkadot-node-core-runtime-api" -version = "0.9.33" -source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.33#c7d6c21242fc654f6f069e12c00951484dff334d" +version = "0.9.36" +source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.36#dc25abc712e42b9b51d87ad1168e453a42b5f0bc" dependencies = [ "futures", - "memory-lru", - "parity-util-mem", + "lru", "polkadot-node-subsystem", "polkadot-node-subsystem-types", "polkadot-node-subsystem-util", @@ -7464,10 +7441,9 @@ dependencies = [ [[package]] name = "polkadot-node-jaeger" -version = "0.9.33" -source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.33#c7d6c21242fc654f6f069e12c00951484dff334d" +version = "0.9.36" +source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.36#dc25abc712e42b9b51d87ad1168e453a42b5f0bc" dependencies = [ - "async-std", "lazy_static", "log", "mick-jaeger", @@ -7478,12 +7454,13 @@ dependencies = [ "sc-network", "sp-core", "thiserror", + "tokio", ] [[package]] name = "polkadot-node-metrics" -version = "0.9.33" -source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.33#c7d6c21242fc654f6f069e12c00951484dff334d" +version = "0.9.36" +source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.36#dc25abc712e42b9b51d87ad1168e453a42b5f0bc" dependencies = [ "bs58", "futures", @@ -7501,8 +7478,8 @@ dependencies = [ [[package]] name = "polkadot-node-network-protocol" -version = "0.9.33" -source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.33#c7d6c21242fc654f6f069e12c00951484dff334d" +version = "0.9.36" +source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.36#dc25abc712e42b9b51d87ad1168e453a42b5f0bc" dependencies = [ "async-trait", "derive_more", @@ -7524,8 +7501,8 @@ dependencies = [ [[package]] name = "polkadot-node-primitives" -version = "0.9.33" -source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.33#c7d6c21242fc654f6f069e12c00951484dff334d" +version = "0.9.36" +source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.36#dc25abc712e42b9b51d87ad1168e453a42b5f0bc" dependencies = [ "bounded-vec", "futures", @@ -7546,8 +7523,8 @@ dependencies = [ [[package]] name = "polkadot-node-subsystem" -version = "0.9.33" -source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.33#c7d6c21242fc654f6f069e12c00951484dff334d" +version = "0.9.36" +source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.36#dc25abc712e42b9b51d87ad1168e453a42b5f0bc" dependencies = [ "polkadot-node-jaeger", "polkadot-node-subsystem-types", @@ -7556,8 +7533,8 @@ dependencies = [ [[package]] name = "polkadot-node-subsystem-types" -version = "0.9.33" -source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.33#c7d6c21242fc654f6f069e12c00951484dff334d" +version = "0.9.36" +source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.36#dc25abc712e42b9b51d87ad1168e453a42b5f0bc" dependencies = [ "async-trait", "derive_more", @@ -7579,8 +7556,8 @@ dependencies = [ [[package]] name = "polkadot-node-subsystem-util" -version = "0.9.33" -source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.33#c7d6c21242fc654f6f069e12c00951484dff334d" +version = "0.9.36" +source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.36#dc25abc712e42b9b51d87ad1168e453a42b5f0bc" dependencies = [ "async-trait", "derive_more", @@ -7591,7 +7568,6 @@ dependencies = [ "lru", "parity-db", "parity-scale-codec", - "parity-util-mem", "parking_lot 0.11.2", "pin-project", "polkadot-node-jaeger", @@ -7612,15 +7588,14 @@ dependencies = [ [[package]] name = "polkadot-overseer" -version = "0.9.33" -source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.33#c7d6c21242fc654f6f069e12c00951484dff334d" +version = "0.9.36" +source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.36#dc25abc712e42b9b51d87ad1168e453a42b5f0bc" dependencies = [ "async-trait", "futures", "futures-timer", "lru", "orchestra", - "parity-util-mem", "parking_lot 0.12.1", "polkadot-node-metrics", "polkadot-node-network-protocol", @@ -7630,18 +7605,18 @@ dependencies = [ "sc-client-api", "sp-api", "sp-core", + "tikv-jemalloc-ctl", "tracing-gum", ] [[package]] name = "polkadot-parachain" -version = "0.9.33" -source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.33#c7d6c21242fc654f6f069e12c00951484dff334d" +version = "0.9.36" +source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.36#dc25abc712e42b9b51d87ad1168e453a42b5f0bc" dependencies = [ "derive_more", "frame-support", "parity-scale-codec", - "parity-util-mem", "polkadot-core-primitives", "scale-info", "serde", @@ -7652,8 +7627,8 @@ dependencies = [ [[package]] name = "polkadot-performance-test" -version = "0.9.33" -source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.33#c7d6c21242fc654f6f069e12c00951484dff334d" +version = "0.9.36" +source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.36#dc25abc712e42b9b51d87ad1168e453a42b5f0bc" dependencies = [ "env_logger", "kusama-runtime", @@ -7667,13 +7642,12 @@ dependencies = [ [[package]] name = "polkadot-primitives" -version = "0.9.33" -source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.33#c7d6c21242fc654f6f069e12c00951484dff334d" +version = "0.9.36" +source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.36#dc25abc712e42b9b51d87ad1168e453a42b5f0bc" dependencies = [ "bitvec", "hex-literal", "parity-scale-codec", - "parity-util-mem", "polkadot-core-primitives", "polkadot-parachain", "scale-info", @@ -7694,13 +7668,13 @@ dependencies = [ [[package]] name = "polkadot-rpc" -version = "0.9.33" -source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.33#c7d6c21242fc654f6f069e12c00951484dff334d" +version = "0.9.36" +source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.36#dc25abc712e42b9b51d87ad1168e453a42b5f0bc" dependencies = [ "beefy-gadget", "beefy-gadget-rpc", "jsonrpsee", - "pallet-mmr-rpc", + "mmr-rpc", "pallet-transaction-payment-rpc", "polkadot-primitives", "sc-chain-spec", @@ -7726,10 +7700,9 @@ dependencies = [ [[package]] name = "polkadot-runtime" -version = "0.9.33" -source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.33#c7d6c21242fc654f6f069e12c00951484dff334d" +version = "0.9.36" +source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.36#dc25abc712e42b9b51d87ad1168e453a42b5f0bc" dependencies = [ - "beefy-primitives", "bitvec", "frame-benchmarking", "frame-election-provider-support", @@ -7792,6 +7765,7 @@ dependencies = [ "smallvec", "sp-api", "sp-authority-discovery", + "sp-beefy", "sp-block-builder", "sp-consensus-babe", "sp-core", @@ -7815,10 +7789,9 @@ dependencies = [ [[package]] name = "polkadot-runtime-common" -version = "0.9.33" -source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.33#c7d6c21242fc654f6f069e12c00951484dff334d" +version = "0.9.36" +source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.36#dc25abc712e42b9b51d87ad1168e453a42b5f0bc" dependencies = [ - "beefy-primitives", "bitvec", "frame-benchmarking", "frame-election-provider-support", @@ -7849,6 +7822,7 @@ dependencies = [ "serde_derive", "slot-range-helper", "sp-api", + "sp-beefy", "sp-core", "sp-inherents", "sp-io", @@ -7863,8 +7837,8 @@ dependencies = [ [[package]] name = "polkadot-runtime-constants" -version = "0.9.33" -source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.33#c7d6c21242fc654f6f069e12c00951484dff334d" +version = "0.9.36" +source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.36#dc25abc712e42b9b51d87ad1168e453a42b5f0bc" dependencies = [ "frame-support", "polkadot-primitives", @@ -7877,8 +7851,8 @@ dependencies = [ [[package]] name = "polkadot-runtime-metrics" -version = "0.9.33" -source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.33#c7d6c21242fc654f6f069e12c00951484dff334d" +version = "0.9.36" +source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.36#dc25abc712e42b9b51d87ad1168e453a42b5f0bc" dependencies = [ "bs58", "parity-scale-codec", @@ -7889,8 +7863,8 @@ dependencies = [ [[package]] name = "polkadot-runtime-parachains" -version = "0.9.33" -source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.33#c7d6c21242fc654f6f069e12c00951484dff334d" +version = "0.9.36" +source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.36#dc25abc712e42b9b51d87ad1168e453a42b5f0bc" dependencies = [ "bitflags", "bitvec", @@ -7932,11 +7906,10 @@ dependencies = [ [[package]] name = "polkadot-service" -version = "0.9.33" +version = "0.9.36" dependencies = [ "async-trait", "beefy-gadget", - "beefy-primitives", "frame-support", "frame-system-rpc-runtime-api", "futures", @@ -7945,6 +7918,7 @@ dependencies = [ "kvdb", "kvdb-rocksdb", "lru", + "mmr-gadget", "pallet-babe", "pallet-im-online", "pallet-staking", @@ -8010,6 +7984,7 @@ dependencies = [ "serde_json", "sp-api", "sp-authority-discovery", + "sp-beefy", "sp-block-builder", "sp-blockchain", "sp-consensus", @@ -8019,6 +7994,7 @@ dependencies = [ "sp-inherents", "sp-io", "sp-keystore", + "sp-mmr-primitives", "sp-offchain", "sp-runtime", "sp-session", @@ -8035,8 +8011,8 @@ dependencies = [ [[package]] name = "polkadot-statement-distribution" -version = "0.9.33" -source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.33#c7d6c21242fc654f6f069e12c00951484dff334d" +version = "0.9.36" +source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.36#dc25abc712e42b9b51d87ad1168e453a42b5f0bc" dependencies = [ "arrayvec 0.5.2", "fatality", @@ -8056,8 +8032,8 @@ dependencies = [ [[package]] name = "polkadot-statement-table" -version = "0.9.33" -source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.33#c7d6c21242fc654f6f069e12c00951484dff334d" +version = "0.9.36" +source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.36#dc25abc712e42b9b51d87ad1168e453a42b5f0bc" dependencies = [ "parity-scale-codec", "polkadot-primitives", @@ -8167,7 +8143,6 @@ checksum = "9f3486ccba82358b11a77516035647c34ba167dfa53312630de83b12bd4f3d66" dependencies = [ "fixed-hash", "impl-codec", - "impl-rlp", "impl-serde", "scale-info", "uint", @@ -8226,9 +8201,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.47" +version = "1.0.50" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ea3d908b0e36316caf9e9e2c4625cdde190a7e6f440d794667ed17a1855e725" +checksum = "6ef7d57beacfaf2d8aee5937dab7b7f28de3cb8b1828479bb5de2a7106f2bae2" dependencies = [ "unicode-ident", ] @@ -8377,9 +8352,9 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.21" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbe448f377a7d6961e30f5955f9b8d106c3f5e449d493ee1b125c1d43c2b5179" +checksum = "8856d8364d252a14d474036ea1358d63c9e6965c8e5c1885c18f73d70bff9c7b" dependencies = [ "proc-macro2", ] @@ -8619,23 +8594,6 @@ version = "0.6.28" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "456c603be3e8d448b072f410900c09faf164fbce2d480456f50eea6e25f9c848" -[[package]] -name = "remote-externalities" -version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" -dependencies = [ - "env_logger", - "log", - "parity-scale-codec", - "serde", - "serde_json", - "sp-core", - "sp-io", - "sp-runtime", - "sp-version", - "substrate-rpc-client", -] - [[package]] name = "remove_dir_all" version = "0.5.3" @@ -8781,20 +8739,10 @@ dependencies = [ "winapi", ] -[[package]] -name = "rlp" -version = "0.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb919243f34364b6bd2fc10ef797edbfa75f33c252e7998527479c6d6b47e1ec" -dependencies = [ - "bytes", - "rustc-hex", -] - [[package]] name = "rmrk-traits" version = "0.0.1" -source = "git+https://github.com/Phala-Network/rmrk-substrate?branch=polkadot-v0.9.33#dca6475f9dfbeaa61e310820cbf701358099defd" +source = "git+https://github.com/Phala-Network/rmrk-substrate?branch=polkadot-v0.9.36#b1badc2422193b239b42a328e72c03c7eb6a097d" dependencies = [ "frame-benchmarking", "frame-support", @@ -8819,11 +8767,10 @@ dependencies = [ [[package]] name = "rococo-runtime" -version = "0.9.33" -source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.33#c7d6c21242fc654f6f069e12c00951484dff334d" +version = "0.9.36" +source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.36#dc25abc712e42b9b51d87ad1168e453a42b5f0bc" dependencies = [ "beefy-merkle-tree", - "beefy-primitives", "frame-benchmarking", "frame-executive", "frame-support", @@ -8843,7 +8790,6 @@ dependencies = [ "pallet-collective", "pallet-democracy", "pallet-elections-phragmen", - "pallet-gilt", "pallet-grandpa", "pallet-identity", "pallet-im-online", @@ -8851,6 +8797,7 @@ dependencies = [ "pallet-membership", "pallet-mmr", "pallet-multisig", + "pallet-nis", "pallet-offences", "pallet-preimage", "pallet-proxy", @@ -8882,6 +8829,7 @@ dependencies = [ "smallvec", "sp-api", "sp-authority-discovery", + "sp-beefy", "sp-block-builder", "sp-consensus-babe", "sp-core", @@ -8904,8 +8852,8 @@ dependencies = [ [[package]] name = "rococo-runtime-constants" -version = "0.9.33" -source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.33#c7d6c21242fc654f6f069e12c00951484dff334d" +version = "0.9.36" +source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.36#dc25abc712e42b9b51d87ad1168e453a42b5f0bc" dependencies = [ "frame-support", "polkadot-primitives", @@ -8993,9 +8941,9 @@ dependencies = [ [[package]] name = "rustix" -version = "0.36.2" +version = "0.36.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "203974af07ea769452490ee8de3e5947971efc3a090dca8a779dd432d3fa46a7" +checksum = "4feacf7db682c6c329c4ede12649cd36ecab0f3be5b7d74e6a20304725db4549" dependencies = [ "bitflags", "errno", @@ -9082,7 +9030,7 @@ dependencies = [ [[package]] name = "sc-allocator" version = "4.1.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "log", "sp-core", @@ -9093,7 +9041,7 @@ dependencies = [ [[package]] name = "sc-authority-discovery" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "async-trait", "futures", @@ -9120,7 +9068,7 @@ dependencies = [ [[package]] name = "sc-basic-authorship" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "futures", "futures-timer", @@ -9143,7 +9091,7 @@ dependencies = [ [[package]] name = "sc-block-builder" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "parity-scale-codec", "sc-client-api", @@ -9159,7 +9107,7 @@ dependencies = [ [[package]] name = "sc-chain-spec" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "impl-trait-for-tuples", "memmap2", @@ -9176,7 +9124,7 @@ dependencies = [ [[package]] name = "sc-chain-spec-derive" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -9187,9 +9135,9 @@ dependencies = [ [[package]] name = "sc-cli" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ - "array-bytes", + "array-bytes 4.2.0", "chrono", "clap", "fdlimit", @@ -9227,7 +9175,7 @@ dependencies = [ [[package]] name = "sc-client-api" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "fnv", "futures", @@ -9255,6 +9203,7 @@ dependencies = [ [[package]] name = "sc-client-db" version = "0.10.0-dev" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "hash-db", "kvdb", @@ -9279,13 +9228,14 @@ dependencies = [ [[package]] name = "sc-consensus" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "async-trait", "futures", "futures-timer", "libp2p", "log", + "mockall", "parking_lot 0.12.1", "sc-client-api", "sc-utils", @@ -9303,7 +9253,7 @@ dependencies = [ [[package]] name = "sc-consensus-aura" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "async-trait", "futures", @@ -9332,7 +9282,7 @@ dependencies = [ [[package]] name = "sc-consensus-babe" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "async-trait", "fork-tree", @@ -9373,7 +9323,7 @@ dependencies = [ [[package]] name = "sc-consensus-babe-rpc" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "futures", "jsonrpsee", @@ -9395,7 +9345,7 @@ dependencies = [ [[package]] name = "sc-consensus-epochs" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "fork-tree", "parity-scale-codec", @@ -9408,7 +9358,7 @@ dependencies = [ [[package]] name = "sc-consensus-slots" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "async-trait", "futures", @@ -9432,9 +9382,8 @@ dependencies = [ [[package]] name = "sc-executor" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ - "lazy_static", "lru", "parity-scale-codec", "parking_lot 0.12.1", @@ -9443,7 +9392,6 @@ dependencies = [ "sc-executor-wasmtime", "sp-api", "sp-core", - "sp-core-hashing-proc-macro", "sp-externalities", "sp-io", "sp-panic-handler", @@ -9458,13 +9406,10 @@ dependencies = [ [[package]] name = "sc-executor-common" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ - "environmental", - "parity-scale-codec", "sc-allocator", "sp-maybe-compressed-blob", - "sp-sandbox", "sp-wasm-interface", "thiserror", "wasm-instrument", @@ -9474,14 +9419,12 @@ dependencies = [ [[package]] name = "sc-executor-wasmi" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "log", - "parity-scale-codec", "sc-allocator", "sc-executor-common", "sp-runtime-interface", - "sp-sandbox", "sp-wasm-interface", "wasmi", ] @@ -9489,19 +9432,16 @@ dependencies = [ [[package]] name = "sc-executor-wasmtime" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "cfg-if", "libc", "log", "once_cell", - "parity-scale-codec", - "parity-wasm", "rustix 0.35.13", "sc-allocator", "sc-executor-common", "sp-runtime-interface", - "sp-sandbox", "sp-wasm-interface", "wasmtime", ] @@ -9509,10 +9449,10 @@ dependencies = [ [[package]] name = "sc-finality-grandpa" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "ahash", - "array-bytes", + "array-bytes 4.2.0", "async-trait", "dyn-clone", "finality-grandpa", @@ -9550,7 +9490,7 @@ dependencies = [ [[package]] name = "sc-finality-grandpa-rpc" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "finality-grandpa", "futures", @@ -9571,13 +9511,12 @@ dependencies = [ [[package]] name = "sc-informant" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "ansi_term", "futures", "futures-timer", "log", - "parity-util-mem", "sc-client-api", "sc-network-common", "sc-transaction-pool-api", @@ -9588,9 +9527,9 @@ dependencies = [ [[package]] name = "sc-keystore" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ - "array-bytes", + "array-bytes 4.2.0", "async-trait", "parking_lot 0.12.1", "serde_json", @@ -9603,9 +9542,9 @@ dependencies = [ [[package]] name = "sc-network" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ - "array-bytes", + "array-bytes 4.2.0", "async-trait", "asynchronous-codec", "bitflags", @@ -9650,7 +9589,7 @@ dependencies = [ [[package]] name = "sc-network-bitswap" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "cid", "futures", @@ -9670,7 +9609,7 @@ dependencies = [ [[package]] name = "sc-network-common" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "async-trait", "bitflags", @@ -9696,7 +9635,7 @@ dependencies = [ [[package]] name = "sc-network-gossip" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "ahash", "futures", @@ -9714,9 +9653,9 @@ dependencies = [ [[package]] name = "sc-network-light" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ - "array-bytes", + "array-bytes 4.2.0", "futures", "libp2p", "log", @@ -9735,9 +9674,10 @@ dependencies = [ [[package]] name = "sc-network-sync" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ - "array-bytes", + "array-bytes 4.2.0", + "async-trait", "fork-tree", "futures", "libp2p", @@ -9759,15 +9699,16 @@ dependencies = [ "sp-core", "sp-finality-grandpa", "sp-runtime", + "substrate-prometheus-endpoint", "thiserror", ] [[package]] name = "sc-network-transactions" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ - "array-bytes", + "array-bytes 4.2.0", "futures", "hex", "libp2p", @@ -9784,9 +9725,9 @@ dependencies = [ [[package]] name = "sc-offchain" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ - "array-bytes", + "array-bytes 4.2.0", "bytes", "fnv", "futures", @@ -9814,7 +9755,7 @@ dependencies = [ [[package]] name = "sc-peerset" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "futures", "libp2p", @@ -9827,7 +9768,7 @@ dependencies = [ [[package]] name = "sc-proposer-metrics" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "log", "substrate-prometheus-endpoint", @@ -9836,7 +9777,7 @@ dependencies = [ [[package]] name = "sc-rpc" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "futures", "hash-db", @@ -9866,7 +9807,7 @@ dependencies = [ [[package]] name = "sc-rpc-api" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "futures", "jsonrpsee", @@ -9889,20 +9830,23 @@ dependencies = [ [[package]] name = "sc-rpc-server" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "futures", + "http", "jsonrpsee", "log", "serde_json", "substrate-prometheus-endpoint", "tokio", + "tower", + "tower-http", ] [[package]] name = "sc-rpc-spec-v2" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "futures", "hex", @@ -9921,7 +9865,7 @@ dependencies = [ [[package]] name = "sc-service" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "async-trait", "directories", @@ -9932,7 +9876,6 @@ dependencies = [ "jsonrpsee", "log", "parity-scale-codec", - "parity-util-mem", "parking_lot 0.12.1", "pin-project", "rand 0.7.3", @@ -9992,11 +9935,10 @@ dependencies = [ [[package]] name = "sc-state-db" version = "0.10.0-dev" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "log", "parity-scale-codec", - "parity-util-mem", - "parity-util-mem-derive", "parking_lot 0.12.1", "sc-client-api", "sp-core", @@ -10005,7 +9947,7 @@ dependencies = [ [[package]] name = "sc-sync-state-rpc" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "jsonrpsee", "parity-scale-codec", @@ -10024,7 +9966,7 @@ dependencies = [ [[package]] name = "sc-sysinfo" version = "6.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "futures", "libc", @@ -10043,7 +9985,7 @@ dependencies = [ [[package]] name = "sc-telemetry" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "chrono", "futures", @@ -10061,7 +10003,7 @@ dependencies = [ [[package]] name = "sc-tracing" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "ansi_term", "atty", @@ -10092,7 +10034,7 @@ dependencies = [ [[package]] name = "sc-tracing-proc-macro" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -10103,7 +10045,7 @@ dependencies = [ [[package]] name = "sc-transaction-pool" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "async-trait", "futures", @@ -10111,7 +10053,6 @@ dependencies = [ "linked-hash-map", "log", "parity-scale-codec", - "parity-util-mem", "parking_lot 0.12.1", "sc-client-api", "sc-transaction-pool-api", @@ -10130,7 +10071,7 @@ dependencies = [ [[package]] name = "sc-transaction-pool-api" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "async-trait", "futures", @@ -10144,7 +10085,7 @@ dependencies = [ [[package]] name = "sc-utils" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "futures", "futures-timer", @@ -10156,9 +10097,9 @@ dependencies = [ [[package]] name = "scale-info" -version = "2.3.0" +version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "88d8a765117b237ef233705cc2cc4c6a27fccd46eea6ef0c8c6dae5f3ef407f8" +checksum = "001cf62ece89779fd16105b5f515ad0e5cedcd5440d3dd806bb067978e7c3608" dependencies = [ "bitvec", "cfg-if", @@ -10170,9 +10111,9 @@ dependencies = [ [[package]] name = "scale-info-derive" -version = "2.3.0" +version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cdcd47b380d8c4541044e341dcd9475f55ba37ddc50c908d945fc036a8642496" +checksum = "303959cf613a6f6efd19ed4b4ad5bf79966a13352716299ad532cfb115f4205c" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -10329,18 +10270,18 @@ checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" [[package]] name = "serde" -version = "1.0.147" +version = "1.0.152" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d193d69bae983fc11a79df82342761dfbf28a99fc8d203dca4c3c1b590948965" +checksum = "bb7d1f0d3021d347a83e556fc4683dea2ea09d87bccdf88ff5c12545d89d5efb" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.147" +version = "1.0.152" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f1d362ca8fc9c3e3a7484440752472d68a6caa98f1ab81d99b5dfe517cec852" +checksum = "af487d118eecd09402d70a5d72551860e788df87b464af30e5ea6a38c75c541e" dependencies = [ "proc-macro2", "quote", @@ -10562,8 +10503,8 @@ checksum = "03b634d87b960ab1a38c4fe143b508576f075e7c978bfad18217645ebfdfa2ec" [[package]] name = "slot-range-helper" -version = "0.9.33" -source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.33#c7d6c21242fc654f6f069e12c00951484dff334d" +version = "0.9.36" +source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.36#dc25abc712e42b9b51d87ad1168e453a42b5f0bc" dependencies = [ "enumn", "parity-scale-codec", @@ -10630,6 +10571,7 @@ dependencies = [ "bytes", "flate2", "futures", + "http", "httparse", "log", "rand 0.8.5", @@ -10639,7 +10581,7 @@ dependencies = [ [[package]] name = "sp-api" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "hash-db", "log", @@ -10657,7 +10599,7 @@ dependencies = [ [[package]] name = "sp-api-proc-macro" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "blake2", "proc-macro-crate", @@ -10668,8 +10610,8 @@ dependencies = [ [[package]] name = "sp-application-crypto" -version = "6.0.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +version = "7.0.0" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "parity-scale-codec", "scale-info", @@ -10681,8 +10623,8 @@ dependencies = [ [[package]] name = "sp-arithmetic" -version = "5.0.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +version = "6.0.0" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "integer-sqrt", "num-traits", @@ -10697,7 +10639,7 @@ dependencies = [ [[package]] name = "sp-authority-discovery" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "parity-scale-codec", "scale-info", @@ -10710,7 +10652,7 @@ dependencies = [ [[package]] name = "sp-authorship" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "async-trait", "parity-scale-codec", @@ -10719,10 +10661,27 @@ dependencies = [ "sp-std", ] +[[package]] +name = "sp-beefy" +version = "4.0.0-dev" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" +dependencies = [ + "parity-scale-codec", + "scale-info", + "serde", + "sp-api", + "sp-application-crypto", + "sp-core", + "sp-io", + "sp-mmr-primitives", + "sp-runtime", + "sp-std", +] + [[package]] name = "sp-block-builder" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "parity-scale-codec", "sp-api", @@ -10734,7 +10693,7 @@ dependencies = [ [[package]] name = "sp-blockchain" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "futures", "log", @@ -10752,7 +10711,7 @@ dependencies = [ [[package]] name = "sp-consensus" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "async-trait", "futures", @@ -10771,7 +10730,7 @@ dependencies = [ [[package]] name = "sp-consensus-aura" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "async-trait", "parity-scale-codec", @@ -10789,7 +10748,7 @@ dependencies = [ [[package]] name = "sp-consensus-babe" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "async-trait", "merlin", @@ -10812,7 +10771,7 @@ dependencies = [ [[package]] name = "sp-consensus-slots" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "parity-scale-codec", "scale-info", @@ -10826,7 +10785,7 @@ dependencies = [ [[package]] name = "sp-consensus-vrf" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "parity-scale-codec", "scale-info", @@ -10838,10 +10797,10 @@ dependencies = [ [[package]] name = "sp-core" -version = "6.0.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +version = "7.0.0" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ - "array-bytes", + "array-bytes 4.2.0", "base58", "bitflags", "blake2", @@ -10883,8 +10842,8 @@ dependencies = [ [[package]] name = "sp-core-hashing" -version = "4.0.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +version = "5.0.0" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "blake2", "byteorder", @@ -10898,7 +10857,7 @@ dependencies = [ [[package]] name = "sp-core-hashing-proc-macro" version = "5.0.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "proc-macro2", "quote", @@ -10909,7 +10868,7 @@ dependencies = [ [[package]] name = "sp-database" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "kvdb", "parking_lot 0.12.1", @@ -10917,8 +10876,8 @@ dependencies = [ [[package]] name = "sp-debug-derive" -version = "4.0.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +version = "5.0.0" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "proc-macro2", "quote", @@ -10927,8 +10886,8 @@ dependencies = [ [[package]] name = "sp-externalities" -version = "0.12.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +version = "0.13.0" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "environmental", "parity-scale-codec", @@ -10939,7 +10898,7 @@ dependencies = [ [[package]] name = "sp-finality-grandpa" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "finality-grandpa", "log", @@ -10957,7 +10916,7 @@ dependencies = [ [[package]] name = "sp-inherents" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "async-trait", "impl-trait-for-tuples", @@ -10970,10 +10929,11 @@ dependencies = [ [[package]] name = "sp-io" -version = "6.0.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +version = "7.0.0" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "bytes", + "ed25519-dalek", "futures", "hash-db", "libsecp256k1", @@ -10996,8 +10956,8 @@ dependencies = [ [[package]] name = "sp-keyring" -version = "6.0.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +version = "7.0.0" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "lazy_static", "sp-core", @@ -11007,8 +10967,8 @@ dependencies = [ [[package]] name = "sp-keystore" -version = "0.12.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +version = "0.13.0" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "async-trait", "futures", @@ -11025,7 +10985,7 @@ dependencies = [ [[package]] name = "sp-maybe-compressed-blob" version = "4.1.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "thiserror", "zstd", @@ -11034,8 +10994,9 @@ dependencies = [ [[package]] name = "sp-mmr-primitives" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ + "ckb-merkle-mountain-range", "log", "parity-scale-codec", "scale-info", @@ -11051,7 +11012,7 @@ dependencies = [ [[package]] name = "sp-npos-elections" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "parity-scale-codec", "scale-info", @@ -11065,7 +11026,7 @@ dependencies = [ [[package]] name = "sp-offchain" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "sp-api", "sp-core", @@ -11074,8 +11035,8 @@ dependencies = [ [[package]] name = "sp-panic-handler" -version = "4.0.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +version = "5.0.0" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "backtrace", "lazy_static", @@ -11085,7 +11046,7 @@ dependencies = [ [[package]] name = "sp-rpc" version = "6.0.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "rustc-hash", "serde", @@ -11094,15 +11055,14 @@ dependencies = [ [[package]] name = "sp-runtime" -version = "6.0.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +version = "7.0.0" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "either", "hash256-std-hasher", "impl-trait-for-tuples", "log", "parity-scale-codec", - "parity-util-mem", "paste", "rand 0.7.3", "scale-info", @@ -11117,8 +11077,8 @@ dependencies = [ [[package]] name = "sp-runtime-interface" -version = "6.0.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +version = "7.0.0" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "bytes", "impl-trait-for-tuples", @@ -11135,8 +11095,8 @@ dependencies = [ [[package]] name = "sp-runtime-interface-proc-macro" -version = "5.0.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +version = "6.0.0" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "Inflector", "proc-macro-crate", @@ -11145,24 +11105,10 @@ dependencies = [ "syn", ] -[[package]] -name = "sp-sandbox" -version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" -dependencies = [ - "log", - "parity-scale-codec", - "sp-core", - "sp-io", - "sp-std", - "sp-wasm-interface", - "wasmi", -] - [[package]] name = "sp-session" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "parity-scale-codec", "scale-info", @@ -11176,18 +11122,19 @@ dependencies = [ [[package]] name = "sp-staking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "parity-scale-codec", "scale-info", + "sp-core", "sp-runtime", "sp-std", ] [[package]] name = "sp-state-machine" -version = "0.12.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +version = "0.13.0" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "hash-db", "log", @@ -11208,13 +11155,13 @@ dependencies = [ [[package]] name = "sp-std" -version = "4.0.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +version = "5.0.0" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" [[package]] name = "sp-storage" -version = "6.0.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +version = "7.0.0" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "impl-serde", "parity-scale-codec", @@ -11227,7 +11174,7 @@ dependencies = [ [[package]] name = "sp-timestamp" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "async-trait", "futures-timer", @@ -11242,8 +11189,8 @@ dependencies = [ [[package]] name = "sp-tracing" -version = "5.0.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +version = "6.0.0" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "parity-scale-codec", "sp-std", @@ -11255,7 +11202,7 @@ dependencies = [ [[package]] name = "sp-transaction-pool" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "sp-api", "sp-runtime", @@ -11264,7 +11211,7 @@ dependencies = [ [[package]] name = "sp-transaction-storage-proof" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "async-trait", "log", @@ -11279,8 +11226,8 @@ dependencies = [ [[package]] name = "sp-trie" -version = "6.0.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +version = "7.0.0" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "ahash", "hash-db", @@ -11303,7 +11250,7 @@ dependencies = [ [[package]] name = "sp-version" version = "5.0.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "impl-serde", "parity-scale-codec", @@ -11320,7 +11267,7 @@ dependencies = [ [[package]] name = "sp-version-proc-macro" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "parity-scale-codec", "proc-macro2", @@ -11330,8 +11277,8 @@ dependencies = [ [[package]] name = "sp-wasm-interface" -version = "6.0.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +version = "7.0.0" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "impl-trait-for-tuples", "log", @@ -11344,7 +11291,7 @@ dependencies = [ [[package]] name = "sp-weights" version = "4.0.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "impl-trait-for-tuples", "parity-scale-codec", @@ -11557,7 +11504,7 @@ dependencies = [ [[package]] name = "substrate-build-script-utils" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "platforms", ] @@ -11565,7 +11512,7 @@ dependencies = [ [[package]] name = "substrate-frame-rpc-system" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "frame-system-rpc-runtime-api", "futures", @@ -11586,7 +11533,7 @@ dependencies = [ [[package]] name = "substrate-prometheus-endpoint" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "futures-util", "hyper", @@ -11599,7 +11546,7 @@ dependencies = [ [[package]] name = "substrate-rpc-client" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "async-trait", "jsonrpsee", @@ -11612,7 +11559,7 @@ dependencies = [ [[package]] name = "substrate-state-trie-migration-rpc" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "jsonrpsee", "log", @@ -11633,7 +11580,7 @@ dependencies = [ [[package]] name = "substrate-wasm-builder" version = "5.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "ansi_term", "build-helper", @@ -11655,9 +11602,9 @@ checksum = "6bdef32e8150c2a081110b42772ffe7d7c9032b606bc226c8260fd97e0976601" [[package]] name = "syn" -version = "1.0.103" +version = "1.0.107" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a864042229133ada95abf3b54fdc62ef5ccabe9515b64717bcb9a1919e59445d" +checksum = "1f4064b5b16e03ae50984a5a8ed5d4f8803e6bc1fd170a3cda91a1be4b18e3f5" dependencies = [ "proc-macro2", "quote", @@ -11838,18 +11785,18 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.37" +version = "1.0.38" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10deb33631e3c9018b9baf9dcbbc4f737320d2b576bac10f6aefa048fa407e3e" +checksum = "6a9cd18aa97d5c45c6603caea1da6628790b37f7a34b6ca89522331c5180fed0" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.37" +version = "1.0.38" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "982d17546b47146b28f7c22e3d08465f6b8903d0ea13c1660d9d84a6e7adcdbb" +checksum = "1fb327af4685e4d03fa8cbcf1716380da910eeb2bb8be417e7f9fd3fb164f36f" dependencies = [ "proc-macro2", "quote", @@ -11893,6 +11840,17 @@ dependencies = [ "threadpool", ] +[[package]] +name = "tikv-jemalloc-ctl" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e37706572f4b151dff7a0146e040804e9c26fe3a3118591112f05cf12a4216c1" +dependencies = [ + "libc", + "paste", + "tikv-jemalloc-sys", +] + [[package]] name = "tikv-jemalloc-sys" version = "0.5.2+5.3.0-patched" @@ -11960,9 +11918,9 @@ checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" [[package]] name = "tokio" -version = "1.22.0" +version = "1.24.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d76ce4a75fb488c605c54bf610f221cea8b0dafb53333c1a67e8ee199dcd2ae3" +checksum = "1d9f76183f91ecfb55e1d7d5602bd1d979e38a3a522fe900241cf195624d67ae" dependencies = [ "autocfg", "bytes", @@ -11975,7 +11933,7 @@ dependencies = [ "signal-hook-registry", "socket2", "tokio-macros", - "winapi", + "windows-sys 0.42.0", ] [[package]] @@ -12035,6 +11993,41 @@ dependencies = [ "serde", ] +[[package]] +name = "tower" +version = "0.4.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8fa9be0de6cf49e536ce1851f987bd21a43b771b09473c3549a6c853db37c1c" +dependencies = [ + "tower-layer", + "tower-service", + "tracing", +] + +[[package]] +name = "tower-http" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f873044bf02dd1e8239e9c1293ea39dad76dc594ec16185d0a1bf31d8dc8d858" +dependencies = [ + "bitflags", + "bytes", + "futures-core", + "futures-util", + "http", + "http-body", + "http-range-header", + "pin-project-lite 0.2.9", + "tower-layer", + "tower-service", +] + +[[package]] +name = "tower-layer" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c20c8dbed6283a09604c3e69b4b7eeb54e298b8a600d4d5ecb5ad39de609f1d0" + [[package]] name = "tower-service" version = "0.3.2" @@ -12048,6 +12041,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8ce8c33a8d48bd45d624a6e523445fd21ec13d3653cd51f681abf67418f54eb8" dependencies = [ "cfg-if", + "log", "pin-project-lite 0.2.9", "tracing-attributes", "tracing-core", @@ -12086,8 +12080,8 @@ dependencies = [ [[package]] name = "tracing-gum" -version = "0.9.33" -source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.33#c7d6c21242fc654f6f069e12c00951484dff334d" +version = "0.9.36" +source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.36#dc25abc712e42b9b51d87ad1168e453a42b5f0bc" dependencies = [ "polkadot-node-jaeger", "polkadot-primitives", @@ -12097,8 +12091,8 @@ dependencies = [ [[package]] name = "tracing-gum-proc-macro" -version = "0.9.33" -source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.33#c7d6c21242fc654f6f069e12c00951484dff334d" +version = "0.9.36" +source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.36#dc25abc712e42b9b51d87ad1168e453a42b5f0bc" dependencies = [ "expander 0.0.6", "proc-macro-crate", @@ -12193,6 +12187,7 @@ dependencies = [ "smallvec", "thiserror", "tinyvec", + "tokio", "tracing", "url", ] @@ -12212,6 +12207,7 @@ dependencies = [ "resolv-conf", "smallvec", "thiserror", + "tokio", "tracing", "trust-dns-proto", ] @@ -12225,22 +12221,26 @@ checksum = "59547bce71d9c38b83d9c0e92b6066c4253371f15005def0c30d9657f50c7642" [[package]] name = "try-runtime-cli" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.33#2dff067e9f7f6f3cc4dbfdaaa97753eccc407689" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.36#cb4f2491b00af7d7817f3a54209c26b20faa1f51" dependencies = [ "clap", + "frame-remote-externalities", "frame-try-runtime", + "hex", "log", "parity-scale-codec", - "remote-externalities", "sc-chain-spec", "sc-cli", "sc-executor", "sc-service", "serde", + "sp-api", "sp-core", + "sp-debug-derive", "sp-externalities", "sp-io", "sp-keystore", + "sp-rpc", "sp-runtime", "sp-state-machine", "sp-version", @@ -12279,7 +12279,7 @@ checksum = "97fee6b57c6a41524a810daee9286c02d7752c4253064d0b05472833a438f675" dependencies = [ "cfg-if", "digest 0.10.6", - "rand 0.8.5", + "rand 0.7.3", "static_assertions", ] @@ -12307,15 +12307,6 @@ dependencies = [ "static_assertions", ] -[[package]] -name = "unicase" -version = "2.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50f37be617794602aabbeee0be4f259dc1778fabe05e2d67ee8f79326d5cb4f6" -dependencies = [ - "version_check", -] - [[package]] name = "unicode-bidi" version = "0.3.8" @@ -12658,7 +12649,7 @@ dependencies = [ "indexmap", "libc", "log", - "object", + "object 0.29.0", "once_cell", "paste", "psm", @@ -12715,9 +12706,9 @@ dependencies = [ "cranelift-frontend", "cranelift-native", "cranelift-wasm", - "gimli", + "gimli 0.26.2", "log", - "object", + "object 0.29.0", "target-lexicon", "thiserror", "wasmparser", @@ -12732,10 +12723,10 @@ checksum = "ebb881c61f4f627b5d45c54e629724974f8a8890d455bcbe634330cc27309644" dependencies = [ "anyhow", "cranelift-entity", - "gimli", + "gimli 0.26.2", "indexmap", "log", - "object", + "object 0.29.0", "serde", "target-lexicon", "thiserror", @@ -12749,14 +12740,14 @@ version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1985c628011fe26adf5e23a5301bdc79b245e0e338f14bb58b39e4e25e4d8681" dependencies = [ - "addr2line", + "addr2line 0.17.0", "anyhow", "bincode", "cfg-if", "cpp_demangle", - "gimli", + "gimli 0.26.2", "log", - "object", + "object 0.29.0", "rustc-demangle", "rustix 0.35.13", "serde", @@ -12774,7 +12765,7 @@ version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f671b588486f5ccec8c5a3dba6b4c07eac2e66ab8c60e6f4e53717c77f709731" dependencies = [ - "object", + "object 0.29.0", "once_cell", "rustix 0.35.13", ] @@ -12865,10 +12856,9 @@ dependencies = [ [[package]] name = "westend-runtime" -version = "0.9.33" -source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.33#c7d6c21242fc654f6f069e12c00951484dff334d" +version = "0.9.36" +source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.36#dc25abc712e42b9b51d87ad1168e453a42b5f0bc" dependencies = [ - "beefy-primitives", "bitvec", "frame-benchmarking", "frame-election-provider-support", @@ -12932,6 +12922,7 @@ dependencies = [ "smallvec", "sp-api", "sp-authority-discovery", + "sp-beefy", "sp-block-builder", "sp-consensus-babe", "sp-core", @@ -12955,8 +12946,8 @@ dependencies = [ [[package]] name = "westend-runtime-constants" -version = "0.9.33" -source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.33#c7d6c21242fc654f6f069e12c00951484dff334d" +version = "0.9.36" +source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.36#dc25abc712e42b9b51d87ad1168e453a42b5f0bc" dependencies = [ "frame-support", "polkadot-primitives", @@ -13189,8 +13180,8 @@ dependencies = [ [[package]] name = "xcm" -version = "0.9.33" -source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.33#c7d6c21242fc654f6f069e12c00951484dff334d" +version = "0.9.36" +source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.36#dc25abc712e42b9b51d87ad1168e453a42b5f0bc" dependencies = [ "derivative", "impl-trait-for-tuples", @@ -13203,8 +13194,8 @@ dependencies = [ [[package]] name = "xcm-builder" -version = "0.9.33" -source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.33#c7d6c21242fc654f6f069e12c00951484dff334d" +version = "0.9.36" +source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.36#dc25abc712e42b9b51d87ad1168e453a42b5f0bc" dependencies = [ "frame-support", "frame-system", @@ -13223,8 +13214,8 @@ dependencies = [ [[package]] name = "xcm-executor" -version = "0.9.33" -source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.33#c7d6c21242fc654f6f069e12c00951484dff334d" +version = "0.9.36" +source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.36#dc25abc712e42b9b51d87ad1168e453a42b5f0bc" dependencies = [ "frame-benchmarking", "frame-support", @@ -13241,8 +13232,8 @@ dependencies = [ [[package]] name = "xcm-procedural" -version = "0.9.33" -source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.33#c7d6c21242fc654f6f069e12c00951484dff334d" +version = "0.9.36" +source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.36#dc25abc712e42b9b51d87ad1168e453a42b5f0bc" dependencies = [ "Inflector", "proc-macro2", @@ -13252,8 +13243,8 @@ dependencies = [ [[package]] name = "xcm-simulator" -version = "0.9.33" -source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.33#c7d6c21242fc654f6f069e12c00951484dff334d" +version = "0.9.36" +source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.36#dc25abc712e42b9b51d87ad1168e453a42b5f0bc" dependencies = [ "frame-support", "parity-scale-codec", diff --git a/Cargo.toml b/Cargo.toml index de868f5b..f09415be 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -25,8 +25,6 @@ exclude = [ "vendor/webpki", "vendor/ring", "polkadot/node/service", - "substrate/client/db", - "substrate/client/state-db", ] members = [ @@ -55,8 +53,3 @@ members = [ [patch."https://github.com/paritytech/polkadot"] polkadot-service = { path = "polkadot/node/service" } - -# Remove after we upgrade to polkadot-v0.9.36 -[patch."https://github.com/paritytech/substrate"] -sc-state-db = { path = "substrate/client/state-db" } -sc-client-db = { path = "substrate/client/db" } diff --git a/crates/phala-mq/Cargo.toml b/crates/phala-mq/Cargo.toml index bcfda487..ace6fa0e 100644 --- a/crates/phala-mq/Cargo.toml +++ b/crates/phala-mq/Cargo.toml @@ -11,7 +11,7 @@ hex = { version = "0.4.3", default-features = false, features = ['alloc'] } derive_more = { version = "0.99", default-features = false, features = ["display"] } parity-scale-codec = { version = "3.0", default-features = false, features = ["derive"] } scale-info = { version = "2.0", default-features = false, features = ["derive"] } -sp-core = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } +sp-core = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } serde = { version = "1.0", default-features = false, features = ["derive"] } spin = { version = "0.9", default-features = false, features = ["mutex", "use_ticket_mutex"], optional = true } diff --git a/crates/phala-node-rpc-ext/Cargo.toml b/crates/phala-node-rpc-ext/Cargo.toml index 9f6c5225..b0afd196 100644 --- a/crates/phala-node-rpc-ext/Cargo.toml +++ b/crates/phala-node-rpc-ext/Cargo.toml @@ -11,7 +11,7 @@ repository = "https://github.com/Phala-Network/phala-blockchain" # third-party dependencies serde = { version = "1.0.102", features = ["derive"] } thiserror = "1.0" -jsonrpsee = { version = "0.15.1", features = ["server"] } +jsonrpsee = { version = "0.16.2", features = ["server"] } impl-serde = "0.4.0" log = { version = "0.4.14", default-features = false } hex = { version = "0.4.3", default-features = false } @@ -19,14 +19,14 @@ codec = { package = "parity-scale-codec", version = "3.1" } scale-info = { version = "2.1", default-features = false } # primitives -sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33" } -sp-blockchain = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33" } -sp-api = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33" } +sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36" } +sp-blockchain = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36" } +sp-api = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36" } # client dependencies -sc-client-api = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33" } -sc-transaction-pool-api = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33" } -sc-rpc = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33" } +sc-client-api = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36" } +sc-transaction-pool-api = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36" } +sc-rpc = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36" } phala-mq = { path = "../../crates/phala-mq" } phala-pallets = { path = "../../pallets/phala" } diff --git a/crates/phala-pallet-common/Cargo.toml b/crates/phala-pallet-common/Cargo.toml index d99f12ad..f4d00ce2 100644 --- a/crates/phala-pallet-common/Cargo.toml +++ b/crates/phala-pallet-common/Cargo.toml @@ -13,20 +13,20 @@ codec = { package = "parity-scale-codec", version = "3.0", default-features = fa scale-info = { version = "2.0", default-features = false, features = ["derive", "serde", "decode"] } # Substrate -sp-io = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -sp-core = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -sp-std = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -frame-support = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -frame-system = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } +sp-io = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +sp-core = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +sp-std = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +frame-support = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +frame-system = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } [dev-dependencies] -sp-io = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33" } -sp-core = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33" } -sp-std = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33" } -sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33" } -frame-support = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33" } -frame-system = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33" } +sp-io = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36" } +sp-core = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36" } +sp-std = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36" } +sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36" } +frame-support = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36" } +frame-system = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36" } [features] default = ["std"] diff --git a/crates/phala-serde-more/Cargo.toml b/crates/phala-serde-more/Cargo.toml index 126e9a0e..2e5f03f2 100644 --- a/crates/phala-serde-more/Cargo.toml +++ b/crates/phala-serde-more/Cargo.toml @@ -5,7 +5,7 @@ edition = "2021" [dependencies] serde = { version = "1.0.130", default-features = false, features = ["derive", "alloc"] } -sp-core = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } +sp-core = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } scale = { package = "parity-scale-codec", version = "3.1", default-features = false } hex = { version = "0.4.3", default-features = false, features = ["alloc"] } diff --git a/crates/phala-trie-storage/Cargo.toml b/crates/phala-trie-storage/Cargo.toml index 27815e4f..989ce5d3 100644 --- a/crates/phala-trie-storage/Cargo.toml +++ b/crates/phala-trie-storage/Cargo.toml @@ -10,19 +10,18 @@ repository = "https://github.com/Phala-Network/phala-blockchain" [dependencies] parity-scale-codec = { version = "3.0", default-features = false } scale-info = { version = "2.0", default-features = false, features = ["derive"] } -sp-core = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", features = ["full_crypto"] } -sp-trie = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33" } -sp-state-machine = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } +sp-core = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", features = ["full_crypto"] } +sp-trie = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36" } +sp-state-machine = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } serde = { version = "1.0", default-features = false, features = ["derive", "alloc"], optional = true } hash-db = "0.15.2" trie-db = "0.24.0" im = { version = "15", features = ["serde"] } -parity-util-mem = "0.12.0" [dev-dependencies] -sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33" } -sp-application-crypto = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", features = ["full_crypto"] } +sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36" } +sp-application-crypto = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", features = ["full_crypto"] } hash256-std-hasher = { version = "0.15", default-features = false } hex = "0.4" serde_json = "1.0" diff --git a/crates/phala-trie-storage/src/lib.rs b/crates/phala-trie-storage/src/lib.rs index 856ca727..42b3f0af 100644 --- a/crates/phala-trie-storage/src/lib.rs +++ b/crates/phala-trie-storage/src/lib.rs @@ -1,5 +1,3 @@ -extern crate alloc; - #[cfg(feature = "serde")] pub mod ser; @@ -10,8 +8,6 @@ use serde::{Deserialize, Deserializer, Serialize, Serializer}; use core::iter::FromIterator; -use alloc::vec::Vec; - use parity_scale_codec::Codec; use sp_core::storage::ChildInfo; use sp_core::Hasher; diff --git a/crates/phala-trie-storage/src/memdb.rs b/crates/phala-trie-storage/src/memdb.rs index 80688653..d9fad75f 100644 --- a/crates/phala-trie-storage/src/memdb.rs +++ b/crates/phala-trie-storage/src/memdb.rs @@ -1,10 +1,27 @@ +// Copyright 2017-2020 Parity Technologies +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + //! Reference-counted memory-based `HashDB` implementation. + use hash_db::{ AsHashDB, AsPlainDB, HashDB, HashDBRef, Hasher as KeyHasher, PlainDB, PlainDBRef, Prefix, }; use im::{hashmap::Entry, HashMap}; -use parity_util_mem::{malloc_size, MallocSizeOf, MallocSizeOfOps}; -use std::{borrow::Borrow, cmp::Eq, hash, marker::PhantomData, mem}; +use std::{ + borrow::Borrow, cmp::Eq, hash, + marker::PhantomData, mem, +}; use sp_state_machine::{backend::Consolidate, DefaultError, TrieBackendStorage}; use trie_db::DBValue; @@ -12,39 +29,88 @@ use trie_db::DBValue; pub trait MaybeDebug: std::fmt::Debug {} impl MaybeDebug for T {} -pub type DefaultMemTracker = MemCounter; +pub type GenericMemoryDB = MemoryDB, trie_db::DBValue>; + +impl Consolidate for GenericMemoryDB { + fn consolidate(&mut self, other: Self) { + MemoryDB::consolidate(self, other) + } +} + +impl TrieBackendStorage for GenericMemoryDB { + type Overlay = Self; + + fn get( + &self, + key: &::Out, + prefix: Prefix, + ) -> Result, DefaultError> { + Ok(hash_db::HashDB::get(self, key, prefix)) + } +} -pub struct MemoryDB> -where - H: KeyHasher, - KF: KeyFunction, - M: MemTracker, +/// Reference-counted memory-based `HashDB` implementation. +/// +/// Use `new()` to create a new database. Insert items with `insert()`, remove items +/// with `remove()`, check for existence with `contains()` and lookup a hash to derive +/// the data with `get()`. Clear with `clear()` and purge the portions of the data +/// that have no references with `purge()`. +/// +pub struct MemoryDB + where + H: KeyHasher, + KF: KeyFunction, { data: HashMap, - malloc_tracker: M, hashed_null_node: H::Out, null_node_data: T, _kf: PhantomData, } -impl Clone for MemoryDB -where - H: KeyHasher, - KF: KeyFunction, - T: Clone, - M: MemTracker + Copy, +impl Clone for MemoryDB + where + H: KeyHasher, + KF: KeyFunction, + T: Clone, { fn clone(&self) -> Self { Self { data: self.data.clone(), hashed_null_node: self.hashed_null_node, null_node_data: self.null_node_data.clone(), - malloc_tracker: self.malloc_tracker, _kf: Default::default(), } } } +impl PartialEq> for MemoryDB + where + H: KeyHasher, + KF: KeyFunction, + >::Key: Eq + MaybeDebug, + T: Eq + MaybeDebug, +{ + fn eq(&self, other: &MemoryDB) -> bool { + for a in self.data.iter() { + match other.data.get(&a.0) { + Some(v) if v != a.1 => return false, + None => return false, + _ => (), + } + } + true + } +} + +impl Eq for MemoryDB + where + H: KeyHasher, + KF: KeyFunction, + >::Key: Eq + MaybeDebug, + T: Eq + MaybeDebug, +{ +} + pub trait KeyFunction { type Key: Send + Sync + Clone + hash::Hash + Eq; @@ -113,12 +179,11 @@ pub fn prefixed_key(key: &H::Out, prefix: Prefix) -> Vec { prefixed_key } -impl Default for MemoryDB -where - H: KeyHasher, - T: for<'a> From<&'a [u8]> + Clone, - KF: KeyFunction, - M: MemTracker + Default, +impl Default for MemoryDB + where + H: KeyHasher, + T: for<'a> From<&'a [u8]> + Clone, + KF: KeyFunction, { fn default() -> Self { Self::from_null_node(&[0u8][..], [0u8][..].into()) @@ -126,53 +191,50 @@ where } /// Create a new `MemoryDB` from a given null key/data -impl MemoryDB -where - H: KeyHasher, - T: Default + Clone, - KF: KeyFunction, - M: MemTracker, +impl MemoryDB + where + H: KeyHasher, + T: Default + Clone, + KF: KeyFunction, { /// Remove an element and delete it from storage if reference count reaches zero. /// If the value was purged, return the old value. pub fn remove_and_purge(&mut self, key: &::Out, prefix: Prefix) -> Option { if key == &self.hashed_null_node { - return None; + return None } let key = KF::key(key, prefix); match self.data.entry(key) { - Entry::Occupied(mut entry) => { + Entry::Occupied(mut entry) => if entry.get().1 == 1 { let (value, _) = entry.remove(); - self.malloc_tracker.on_remove(&value); Some(value) } else { entry.get_mut().1 -= 1; None - } - } + }, Entry::Vacant(entry) => { let value = T::default(); - self.malloc_tracker.on_insert(&value); entry.insert((value, -1)); None - } + }, } } - /// Shrinks the capacity of the map as much as possible. It will drop - /// down as much as possible while maintaining the internal rules - /// and possibly leaving some space in accordance with the resize policy. - #[inline] - pub fn shrink_to_fit(&mut self) {} + // /// Shrinks the capacity of the map as much as possible. It will drop + // /// down as much as possible while maintaining the internal rules + // /// and possibly leaving some space in accordance with the resize policy. + // #[inline] + // pub fn shrink_to_fit(&mut self) { + // self.data.shrink_to_fit(); + // } } -impl MemoryDB -where - H: KeyHasher, - T: for<'a> From<&'a [u8]> + Clone, - KF: KeyFunction, - M: MemTracker + Default, +impl MemoryDB + where + H: KeyHasher, + T: for<'a> From<&'a [u8]> + Clone, + KF: KeyFunction, { /// Create a new `MemoryDB` from a given null key/data pub fn from_null_node(null_key: &[u8], null_node_data: T) -> Self { @@ -180,7 +242,6 @@ where data: HashMap::default(), hashed_null_node: H::hash(null_key), null_node_data, - malloc_tracker: M::default(), _kf: Default::default(), } } @@ -207,26 +268,21 @@ where } /// Clear all data from the database. + /// pub fn clear(&mut self) { - self.malloc_tracker.on_clear(); self.data.clear(); } /// Purge all zero-referenced data from the database. pub fn purge(&mut self) { - let malloc_tracker = &mut self.malloc_tracker; - self.data.retain(|_, (v, rc)| { + self.data.retain(|_, (_, rc)| { let keep = *rc != 0; - if !keep { - malloc_tracker.on_remove(v); - } keep }); } /// Return the internal key-value HashMap, clearing the current state. pub fn drain(&mut self) -> HashMap { - self.malloc_tracker.on_clear(); mem::take(&mut self.data) } @@ -237,38 +293,25 @@ where /// when the refs > 0. pub fn raw(&self, key: &::Out, prefix: Prefix) -> Option<(&T, i32)> { if key == &self.hashed_null_node { - return Some((&self.null_node_data, 1)); + return Some((&self.null_node_data, 1)) } - self.data - .get(&KF::key(key, prefix)) - .map(|(value, count)| (value, *count)) + self.data.get(&KF::key(key, prefix)).map(|(value, count)| (value, *count)) } /// Consolidate all the entries of `other` into `self`. pub fn consolidate(&mut self, mut other: Self) { for (key, (value, rc)) in other.drain() { - if rc == 0 { - continue; - } match self.data.entry(key) { Entry::Occupied(mut entry) => { if entry.get().1 < 0 { - self.malloc_tracker.on_insert(&value); - self.malloc_tracker.on_remove(&entry.get().0); entry.get_mut().0 = value; } entry.get_mut().1 += rc; - - if entry.get().1 == 0 { - let (value, _) = entry.remove(); - self.malloc_tracker.on_remove(&value); - } - } + }, Entry::Vacant(entry) => { - self.malloc_tracker.on_insert(&value); entry.insert((value, rc)); - } + }, } } } @@ -277,41 +320,17 @@ where pub fn keys(&self) -> HashMap { self.data .iter() - .filter_map(|(k, v)| { - if v.1 != 0 { - Some((k.clone(), v.1)) - } else { - None - } - }) + .filter_map(|(k, v)| if v.1 != 0 { Some((k.clone(), v.1)) } else { None }) .collect() } } -impl MallocSizeOf for MemoryDB -where - H: KeyHasher, - H::Out: MallocSizeOf, - T: MallocSizeOf, - KF: KeyFunction, - KF::Key: MallocSizeOf, - M: MemTracker, -{ - fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { - shallow_size_of_hashmap(&self.data, ops) - + self.malloc_tracker.get_size() - + self.null_node_data.size_of(ops) - + self.hashed_null_node.size_of(ops) - } -} - -impl PlainDB for MemoryDB -where - H: KeyHasher, - T: Default + PartialEq + for<'a> From<&'a [u8]> + Clone + Send + Sync, - KF: Send + Sync + KeyFunction, - KF::Key: Borrow<[u8]> + for<'a> From<&'a [u8]>, - M: MemTracker + Send + Sync, +impl PlainDB for MemoryDB + where + H: KeyHasher, + T: Default + PartialEq + for<'a> From<&'a [u8]> + Clone + Send + Sync, + KF: Send + Sync + KeyFunction, + KF::Key: Borrow<[u8]> + for<'a> From<&'a [u8]>, { fn get(&self, key: &H::Out) -> Option { match self.data.get(key.as_ref()) { @@ -321,7 +340,10 @@ where } fn contains(&self, key: &H::Out) -> bool { - matches!(self.data.get(key.as_ref()), Some(&(_, x)) if x > 0) + match self.data.get(key.as_ref()) { + Some(&(_, x)) if x > 0 => true, + _ => false, + } } fn emplace(&mut self, key: H::Out, value: T) { @@ -329,16 +351,13 @@ where Entry::Occupied(mut entry) => { let &mut (ref mut old_value, ref mut rc) = entry.get_mut(); if *rc <= 0 { - self.malloc_tracker.on_insert(&value); - self.malloc_tracker.on_remove(old_value); *old_value = value; } *rc += 1; - } + }, Entry::Vacant(entry) => { - self.malloc_tracker.on_insert(&value); entry.insert((value, 1)); - } + }, } } @@ -347,27 +366,21 @@ where Entry::Occupied(mut entry) => { let &mut (_, ref mut rc) = entry.get_mut(); *rc -= 1; - if *rc == 0 { - let (value, _) = entry.remove(); - self.malloc_tracker.on_remove(&value); - } - } + }, Entry::Vacant(entry) => { let value = T::default(); - self.malloc_tracker.on_insert(&value); entry.insert((value, -1)); - } + }, } } } -impl PlainDBRef for MemoryDB -where - H: KeyHasher, - T: Default + PartialEq + for<'a> From<&'a [u8]> + Clone + Send + Sync, - KF: Send + Sync + KeyFunction, - KF::Key: Borrow<[u8]> + for<'a> From<&'a [u8]>, - M: MemTracker + Send + Sync, +impl PlainDBRef for MemoryDB + where + H: KeyHasher, + T: Default + PartialEq + for<'a> From<&'a [u8]> + Clone + Send + Sync, + KF: Send + Sync + KeyFunction, + KF::Key: Borrow<[u8]> + for<'a> From<&'a [u8]>, { fn get(&self, key: &H::Out) -> Option { PlainDB::get(self, key) @@ -377,16 +390,15 @@ where } } -impl HashDB for MemoryDB -where - H: KeyHasher, - T: Default + PartialEq + AsRef<[u8]> + for<'a> From<&'a [u8]> + Clone + Send + Sync, - KF: KeyFunction + Send + Sync, - M: MemTracker + Send + Sync, +impl HashDB for MemoryDB + where + H: KeyHasher, + T: Default + PartialEq + AsRef<[u8]> + for<'a> From<&'a [u8]> + Clone + Send + Sync, + KF: KeyFunction + Send + Sync, { fn get(&self, key: &H::Out, prefix: Prefix) -> Option { if key == &self.hashed_null_node { - return Some(self.null_node_data.clone()); + return Some(self.null_node_data.clone()) } let key = KF::key(key, prefix); @@ -398,16 +410,19 @@ where fn contains(&self, key: &H::Out, prefix: Prefix) -> bool { if key == &self.hashed_null_node { - return true; + return true } let key = KF::key(key, prefix); - matches!(self.data.get(&key), Some(&(_, x)) if x > 0) + match self.data.get(&key) { + Some(&(_, x)) if x > 0 => true, + _ => false, + } } fn emplace(&mut self, key: H::Out, prefix: Prefix, value: T) { if value == self.null_node_data { - return; + return } let key = KF::key(&key, prefix); @@ -415,22 +430,19 @@ where Entry::Occupied(mut entry) => { let &mut (ref mut old_value, ref mut rc) = entry.get_mut(); if *rc <= 0 { - self.malloc_tracker.on_insert(&value); - self.malloc_tracker.on_remove(old_value); *old_value = value; } *rc += 1; - } + }, Entry::Vacant(entry) => { - self.malloc_tracker.on_insert(&value); entry.insert((value, 1)); - } + }, } } fn insert(&mut self, prefix: Prefix, value: &[u8]) -> H::Out { if T::from(value) == self.null_node_data { - return self.hashed_null_node; + return self.hashed_null_node } let key = H::hash(value); @@ -440,7 +452,7 @@ where fn remove(&mut self, key: &H::Out, prefix: Prefix) { if key == &self.hashed_null_node { - return; + return } let key = KF::key(key, prefix); @@ -448,26 +460,20 @@ where Entry::Occupied(mut entry) => { let &mut (_, ref mut rc) = entry.get_mut(); *rc -= 1; - if *rc == 0 { - let (value, _) = entry.remove(); - self.malloc_tracker.on_remove(&value); - } - } + }, Entry::Vacant(entry) => { let value = T::default(); - self.malloc_tracker.on_insert(&value); entry.insert((value, -1)); - } + }, } } } -impl HashDBRef for MemoryDB -where - H: KeyHasher, - T: Default + PartialEq + AsRef<[u8]> + for<'a> From<&'a [u8]> + Clone + Send + Sync, - KF: KeyFunction + Send + Sync, - M: MemTracker + Send + Sync, +impl HashDBRef for MemoryDB + where + H: KeyHasher, + T: Default + PartialEq + AsRef<[u8]> + for<'a> From<&'a [u8]> + Clone + Send + Sync, + KF: KeyFunction + Send + Sync, { fn get(&self, key: &H::Out, prefix: Prefix) -> Option { HashDB::get(self, key, prefix) @@ -477,13 +483,12 @@ where } } -impl AsPlainDB for MemoryDB -where - H: KeyHasher, - T: Default + PartialEq + for<'a> From<&'a [u8]> + Clone + Send + Sync, - KF: KeyFunction + Send + Sync, - KF::Key: Borrow<[u8]> + for<'a> From<&'a [u8]>, - M: MemTracker + Send + Sync, +impl AsPlainDB for MemoryDB + where + H: KeyHasher, + T: Default + PartialEq + for<'a> From<&'a [u8]> + Clone + Send + Sync, + KF: KeyFunction + Send + Sync, + KF::Key: Borrow<[u8]> + for<'a> From<&'a [u8]>, { fn as_plain_db(&self) -> &dyn PlainDB { self @@ -493,12 +498,11 @@ where } } -impl AsHashDB for MemoryDB -where - H: KeyHasher, - T: Default + PartialEq + AsRef<[u8]> + for<'a> From<&'a [u8]> + Clone + Send + Sync, - KF: KeyFunction + Send + Sync, - M: MemTracker + Send + Sync, +impl AsHashDB for MemoryDB + where + H: KeyHasher, + T: Default + PartialEq + AsRef<[u8]> + for<'a> From<&'a [u8]> + Clone + Send + Sync, + KF: KeyFunction + Send + Sync, { fn as_hash_db(&self) -> &dyn HashDB { self @@ -508,146 +512,11 @@ where } } -/// Used to implement incremental evaluation of `MallocSizeOf` for a collection. -pub trait MemTracker { - /// Update `malloc_size_of` when a value is removed. - fn on_remove(&mut self, _value: &T) {} - /// Update `malloc_size_of` when a value is inserted. - fn on_insert(&mut self, _value: &T) {} - /// Reset `malloc_size_of` to zero. - fn on_clear(&mut self) {} - /// Get the allocated size of the values. - fn get_size(&self) -> usize { - 0 - } -} - -/// `MemTracker` implementation for types -/// which implement `MallocSizeOf`. -#[derive(Eq, PartialEq)] -pub struct MemCounter { - malloc_size_of_values: usize, - _phantom: PhantomData, -} - -impl MemCounter { - // Create a new instance of MemCounter. - pub fn new() -> Self { - Self { - malloc_size_of_values: 0, - _phantom: PhantomData, - } - } -} - -impl Default for MemCounter { - fn default() -> Self { - Self::new() - } -} - -impl Clone for MemCounter { - fn clone(&self) -> Self { - Self { - malloc_size_of_values: self.malloc_size_of_values, - _phantom: PhantomData, - } - } -} - -impl Copy for MemCounter {} - -impl MemTracker for MemCounter { - fn on_remove(&mut self, value: &T) { - self.malloc_size_of_values -= malloc_size(value); - } - fn on_insert(&mut self, value: &T) { - self.malloc_size_of_values += malloc_size(value); - } - fn on_clear(&mut self) { - self.malloc_size_of_values = 0; - } - fn get_size(&self) -> usize { - self.malloc_size_of_values - } -} - -/// No-op `MemTracker` implementation for when we want to -/// construct a `MemoryDB` instance that does not track memory usage. -#[derive(PartialEq, Eq)] -pub struct NoopTracker(PhantomData); - -impl Default for NoopTracker { - fn default() -> Self { - Self(PhantomData) - } -} - -impl Clone for NoopTracker { - fn clone(&self) -> Self { - Self::default() - } -} - -impl Copy for NoopTracker {} - -impl MemTracker for NoopTracker {} - -fn shallow_size_of_hashmap(map: &HashMap, ops: &mut MallocSizeOfOps) -> usize { - // See the implementation for std::collections::HashSet for details. - if ops.has_malloc_enclosing_size_of() { - map.values() - .next() - .map_or(0, |v| unsafe { ops.malloc_enclosing_size_of(v) }) - } else { - map.len() * (mem::size_of::() + mem::size_of::() + mem::size_of::()) - } -} - -#[cfg(test)] -fn size_of_hash_map(map: &HashMap) -> usize -where - K: MallocSizeOf, - V: MallocSizeOf, -{ - let ops = &mut parity_util_mem::allocators::new_malloc_size_ops(); - let mut n = shallow_size_of_hashmap(map, ops); - if let (Some(k), Some(v)) = (K::constant_size(), V::constant_size()) { - n += map.len() * (k + v) - } else { - n = map - .iter() - .fold(n, |acc, (k, v)| acc + k.size_of(ops) + v.size_of(ops)) - } - n -} - -pub type GenericMemoryDB = MemoryDB, DBValue, NoopTracker>; - -impl Consolidate for GenericMemoryDB { - fn consolidate(&mut self, other: Self) { - MemoryDB::consolidate(self, other) - } -} - -impl TrieBackendStorage for GenericMemoryDB { - type Overlay = Self; - - fn get( - &self, - key: &::Out, - prefix: Prefix, - ) -> Result, DefaultError> { - Ok(hash_db::HashDB::get(self, key, prefix)) - } -} - #[cfg(test)] mod tests { use super::{HashDB, HashKey, KeyHasher, MemoryDB}; use hash_db::EMPTY_PREFIX; use keccak_hasher::KeccakHasher; - use parity_util_mem::malloc_size; #[test] fn memorydb_remove_and_purge() { @@ -670,10 +539,7 @@ mod tests { m.insert(EMPTY_PREFIX, hello_bytes); m.insert(EMPTY_PREFIX, hello_bytes); assert_eq!(m.raw(&hello_key, EMPTY_PREFIX).unwrap().1, 1); - assert_eq!( - &*m.remove_and_purge(&hello_key, EMPTY_PREFIX).unwrap(), - hello_bytes - ); + assert_eq!(&*m.remove_and_purge(&hello_key, EMPTY_PREFIX).unwrap(), hello_bytes); assert_eq!(m.raw(&hello_key, EMPTY_PREFIX), None); assert!(m.remove_and_purge(&hello_key, EMPTY_PREFIX).is_none()); } @@ -695,14 +561,11 @@ mod tests { main.consolidate(other); - assert_eq!(main.raw(&remove_key, EMPTY_PREFIX), None); - assert_eq!( - main.raw(&insert_key, EMPTY_PREFIX).unwrap(), - (&"arf".as_bytes().to_vec(), 2) - ); + assert_eq!(main.raw(&remove_key, EMPTY_PREFIX).unwrap(), (&"doggo".as_bytes().to_vec(), 0)); + assert_eq!(main.raw(&insert_key, EMPTY_PREFIX).unwrap(), (&"arf".as_bytes().to_vec(), 2)); assert_eq!( main.raw(&negative_remove_key, EMPTY_PREFIX).unwrap(), - (&"".as_bytes().to_vec(), -2), + (&"negative".as_bytes().to_vec(), -2), ); } @@ -716,20 +579,4 @@ mod tests { assert!(db2.contains(&root, EMPTY_PREFIX)); assert!(db.contains(&root, EMPTY_PREFIX)); } - - #[test] - fn malloc_size_of() { - let mut db = MemoryDB::, Vec>::default(); - for i in 0u32..1024 { - let bytes = i.to_be_bytes(); - let prefix = (bytes.as_ref(), None); - db.insert(prefix, &bytes); - } - assert_eq!( - malloc_size(&db), - super::size_of_hash_map(&db.data) - + malloc_size(&db.null_node_data) - + malloc_size(&db.hashed_null_node) - ); - } } diff --git a/crates/phala-trie-storage/src/ser.rs b/crates/phala-trie-storage/src/ser.rs index 29d0c4d7..3ece47ae 100644 --- a/crates/phala-trie-storage/src/ser.rs +++ b/crates/phala-trie-storage/src/ser.rs @@ -1,4 +1,3 @@ -use alloc::vec::Vec; use serde::{Deserialize, Serialize}; use parity_scale_codec::{Encode, Decode}; use scale_info::TypeInfo; diff --git a/crates/phala-types/Cargo.toml b/crates/phala-types/Cargo.toml index 2e382fb2..f405224b 100644 --- a/crates/phala-types/Cargo.toml +++ b/crates/phala-types/Cargo.toml @@ -9,7 +9,7 @@ hex = { version = "0.4", default-features = false, features = ["alloc"] } serde = { version = "1.0.101", default-features = false, optional = true } codec = { package = "parity-scale-codec", version = "3.1", default-features = false, features = ["full"] } scale-info = { version = "2.1", default-features = false, features = ["derive"] } -sp-core = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } +sp-core = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } phala-mq = { path = "../../crates/phala-mq", default-features = false } prpc = { path = "../../crates/prpc", default-features = false } diff --git a/node/Cargo.toml b/node/Cargo.toml index 4ce2d2d5..c84051fe 100644 --- a/node/Cargo.toml +++ b/node/Cargo.toml @@ -27,7 +27,7 @@ serde = { version = "1.0.144", features = ["derive"] } serde_json = { version = "1.0" } # RPC related dependencies -jsonrpsee = { version = "0.15.1", features = ["server"] } +jsonrpsee = { version = "0.16.2", features = ["server"] } parachains-common = { path = "../parachains-common" } phala-parachain-runtime = { path = "../runtime/phala", optional = true } @@ -39,80 +39,77 @@ shell-parachain-runtime = { path = "../runtime/shell", package = "shell-runtime" pallet-mq-runtime-api = { path = "../pallets/phala/mq-runtime-api" } phala-node-rpc-ext = { path = "../crates/phala-node-rpc-ext" } -frame-benchmarking = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33" } -frame-benchmarking-cli = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33" } -try-runtime-cli = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", optional = true } +frame-benchmarking = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36" } +frame-benchmarking-cli = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36" } +try-runtime-cli = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", optional = true } -pallet-transaction-payment-rpc = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33" } -frame-rpc-system = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", package = "substrate-frame-rpc-system" } +pallet-transaction-payment-rpc = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36" } +frame-rpc-system = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", package = "substrate-frame-rpc-system" } # RMRK dependencies -pallet-rmrk-core = { git = "https://github.com/Phala-Network/rmrk-substrate", branch = "polkadot-v0.9.33" } -pallet-rmrk-equip = { git = "https://github.com/Phala-Network/rmrk-substrate", branch = "polkadot-v0.9.33" } -rmrk-traits = { git = "https://github.com/Phala-Network/rmrk-substrate", branch = "polkadot-v0.9.33" } -pallet-rmrk-rpc-runtime-api = { git = "https://github.com/Phala-Network/rmrk-substrate", branch = "polkadot-v0.9.33" } -pallet-rmrk-rpc = { git = "https://github.com/Phala-Network/rmrk-substrate", branch = "polkadot-v0.9.33" } +pallet-rmrk-core = { git = "https://github.com/Phala-Network/rmrk-substrate", branch = "polkadot-v0.9.36" } +pallet-rmrk-equip = { git = "https://github.com/Phala-Network/rmrk-substrate", branch = "polkadot-v0.9.36" } +rmrk-traits = { git = "https://github.com/Phala-Network/rmrk-substrate", branch = "polkadot-v0.9.36" } +pallet-rmrk-rpc-runtime-api = { git = "https://github.com/Phala-Network/rmrk-substrate", branch = "polkadot-v0.9.36" } +pallet-rmrk-rpc = { git = "https://github.com/Phala-Network/rmrk-substrate", branch = "polkadot-v0.9.36" } -substrate-prometheus-endpoint = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33" } +substrate-prometheus-endpoint = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36" } # Substrate Client Dependencies -sc-basic-authorship = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33" } -sc-chain-spec = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33" } -sc-cli = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", features = ["wasmtime"] } -sc-client-api = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33" } -sc-consensus = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33" } -sc-sysinfo = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33" } -sc-executor = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", features = ["wasmtime"] } -sc-network = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33" } -sc-network-common = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33" } -sc-keystore = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33" } -sc-rpc = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33" } -sc-rpc-api = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33" } -sc-service = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", features = ["wasmtime"] } -sc-telemetry = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33" } -sc-transaction-pool = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33" } -sc-transaction-pool-api = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33" } -sc-tracing = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33" } +sc-basic-authorship = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36" } +sc-chain-spec = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36" } +sc-cli = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36" } +sc-client-api = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36" } +sc-consensus = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36" } +sc-sysinfo = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36" } +sc-executor = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36" } +sc-network = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36" } +sc-network-common = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36" } +sc-keystore = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36" } +sc-rpc = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36" } +sc-rpc-api = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36" } +sc-service = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36" } +sc-telemetry = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36" } +sc-transaction-pool = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36" } +sc-transaction-pool-api = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36" } +sc-tracing = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36" } # Substrate Primitive Dependencies -sp-io = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33" } -sp-api = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33" } -sp-block-builder = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33" } -sp-blockchain = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33" } -sp-consensus = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33" } -sp-consensus-aura = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33" } -sp-core = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33" } -sp-inherents = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33" } -sp-keystore = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33" } -sp-offchain = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33" } -sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33" } -sp-session = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33" } -sp-timestamp = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33" } -sp-transaction-pool = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33" } +sp-io = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36" } +sp-api = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36" } +sp-block-builder = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36" } +sp-blockchain = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36" } +sp-consensus = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36" } +sp-consensus-aura = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36" } +sp-core = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36" } +sp-inherents = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36" } +sp-keystore = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36" } +sp-offchain = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36" } +sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36" } +sp-session = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36" } +sp-timestamp = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36" } +sp-transaction-pool = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36" } # Cumulus dependencies -cumulus-client-cli = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.33" } -cumulus-client-consensus-aura = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.33" } -cumulus-client-consensus-relay-chain = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.33" } -cumulus-client-consensus-common = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.33" } -cumulus-client-network = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.33" } -cumulus-client-service = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.33" } -cumulus-primitives-core = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.33" } -cumulus-primitives-parachain-inherent = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.33" } -cumulus-relay-chain-interface = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.33" } -cumulus-relay-chain-inprocess-interface = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.33" } -cumulus-relay-chain-rpc-interface = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.33" } -cumulus-relay-chain-minimal-node = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.33" } +cumulus-client-cli = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.36" } +cumulus-client-consensus-aura = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.36" } +cumulus-client-consensus-relay-chain = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.36" } +cumulus-client-consensus-common = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.36" } +cumulus-client-network = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.36" } +cumulus-client-service = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.36" } +cumulus-primitives-core = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.36" } +cumulus-primitives-parachain-inherent = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.36" } +cumulus-relay-chain-interface = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.36" } # Polkadot dependencies -polkadot-cli = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.33" } -polkadot-parachain = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.33" } -polkadot-primitives = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.33" } -polkadot-service = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.33" } -xcm = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.33" } +polkadot-cli = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.36" } +polkadot-parachain = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.36" } +polkadot-primitives = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.36" } +polkadot-service = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.36" } +xcm = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.36" } [build-dependencies] -substrate-build-script-utils = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33" } +substrate-build-script-utils = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36" } [features] default = ["all-runtimes"] @@ -137,5 +134,4 @@ try-runtime = [ "khala-parachain-runtime/try-runtime", "rhala-parachain-runtime/try-runtime", "thala-parachain-runtime/try-runtime", - "shell-parachain-runtime/try-runtime" ] diff --git a/node/src/command.rs b/node/src/command.rs index cfbebc2d..824ba60e 100644 --- a/node/src/command.rs +++ b/node/src/command.rs @@ -640,39 +640,55 @@ pub fn run() -> Result<()> { sc_service::TaskManager::new(runner.config().tokio_handle.clone(), *registry) .map_err(|e| format!("Error: {:?}", e))?; + use sc_executor::{sp_wasm_interface::ExtendedHostFunctions, NativeExecutionDispatch}; + type HostFunctionsOf = ExtendedHostFunctions< + sp_io::SubstrateHostFunctions, + ::ExtendHostFunctions, + >; + #[cfg(feature = "phala-native")] if runner.config().chain_spec.is_phala() { - return runner.async_run(|config| { - Ok((cmd.run::(config), task_manager)) + return runner.async_run(|_config| { + Ok(( + cmd.run::>(), + task_manager, + )) }) } #[cfg(feature = "khala-native")] if runner.config().chain_spec.is_khala() { - return runner.async_run(|config| { - Ok((cmd.run::(config), task_manager)) + return runner.async_run(|_config| { + Ok(( + cmd.run::>(), + task_manager, + )) }) } #[cfg(feature = "rhala-native")] if runner.config().chain_spec.is_rhala() { - return runner.async_run(|config| { - Ok((cmd.run::(config), task_manager)) + return runner.async_run(|_config| { + Ok(( + cmd.run::>(), + task_manager, + )) }) } #[cfg(feature = "thala-native")] if runner.config().chain_spec.is_thala() { - return runner.async_run(|config| { - Ok((cmd.run::(config), task_manager)) + return runner.async_run(|_config| { + Ok(( + cmd.run::>(), + task_manager, + )) }) } #[cfg(feature = "shell-native")] if runner.config().chain_spec.is_shell() { - return runner.async_run(|config| { - Ok((cmd.run::(config), task_manager)) - }) + return Err("Shell runtime doesn't support try-runtime".into()) } Err("Can't determine runtime from chain_spec".into()) @@ -731,7 +747,7 @@ pub fn run() -> Result<()> { "no" } ); - if collator_options.relay_chain_rpc_url.is_some() && cli.relaychain_args.len() > 0 { + if !collator_options.relay_chain_rpc_urls.is_empty() && cli.relaychain_args.len() > 0 { warn!("Detected relay chain node arguments together with --relay-chain-rpc-url. This command starts a minimal Polkadot node that only uses a network-related subset of all relay chain CLI options."); } diff --git a/node/src/service/mod.rs b/node/src/service/mod.rs index 67a29982..0c10dabc 100644 --- a/node/src/service/mod.rs +++ b/node/src/service/mod.rs @@ -21,16 +21,14 @@ use cumulus_client_consensus_common::{ }; use cumulus_client_network::BlockAnnounceValidator; use cumulus_client_service::{ - prepare_node_config, start_collator, start_full_node, StartCollatorParams, StartFullNodeParams, + build_relay_chain_interface, prepare_node_config, start_collator, start_full_node, + StartCollatorParams, StartFullNodeParams, }; +use cumulus_relay_chain_interface::{RelayChainError, RelayChainInterface}; use cumulus_primitives_core::ParaId; -use cumulus_relay_chain_inprocess_interface::build_inprocess_relay_chain; -use cumulus_relay_chain_interface::{RelayChainError, RelayChainInterface, RelayChainResult}; -use cumulus_relay_chain_minimal_node::build_minimal_relay_chain_node; -use polkadot_service::CollatorPair; use sc_executor::WasmExecutor; - +use sc_consensus::ImportQueue; use sc_network::NetworkService; use sc_network_common::service::NetworkBlock; use sc_service::{ @@ -74,31 +72,7 @@ type HostFunctions = ( pub(crate) type ParachainClient = TFullClient>; pub(crate) type ParachainBackend = TFullBackend; -pub(crate) type ParachainBlockImport = TParachainBlockImport>>; - -async fn build_relay_chain_interface( - polkadot_config: Configuration, - parachain_config: &Configuration, - telemetry_worker_handle: Option, - task_manager: &mut TaskManager, - collator_options: CollatorOptions, - hwbench: Option, -) -> RelayChainResult<( - Arc<(dyn RelayChainInterface + 'static)>, - Option, -)> { - match collator_options.relay_chain_rpc_url { - Some(relay_chain_url) => - build_minimal_relay_chain_node(polkadot_config, task_manager, relay_chain_url).await, - None => build_inprocess_relay_chain( - polkadot_config, - parachain_config, - telemetry_worker_handle, - task_manager, - hwbench, - ), - } -} +pub(crate) type ParachainBlockImport = TParachainBlockImport>, ParachainBackend>; /// Starts a `ServiceBuilder` for a full service. /// @@ -185,7 +159,7 @@ where client.clone(), ); - let block_import = ParachainBlockImport::new(client.clone()); + let block_import = ParachainBlockImport::new(client.clone(), backend.clone()); let import_queue = build_import_queue( client.clone(), @@ -320,14 +294,15 @@ where let validator = parachain_config.role.is_authority(); let prometheus_registry = parachain_config.prometheus_registry().cloned(); let transaction_pool = params.transaction_pool.clone(); - let import_queue = cumulus_client_service::SharedImportQueue::new(params.import_queue); + let import_queue_service = params.import_queue.service(); + let (network, system_rpc_tx, tx_handler_controller, start_network) = sc_service::build_network(sc_service::BuildNetworkParams { config: ¶chain_config, client: client.clone(), transaction_pool: transaction_pool.clone(), spawn_handle: task_manager.spawn_handle(), - import_queue: import_queue.clone(), + import_queue: params.import_queue, block_announce_validator_builder: Some(Box::new(|_| { Box::new(block_announce_validator) })), @@ -418,7 +393,7 @@ where relay_chain_interface: relay_chain_interface.clone(), spawner, parachain_consensus, - import_queue, + import_queue: import_queue_service, collator_key: collator_key.expect("Command line arguments do not allow this. qed"), relay_chain_slot_duration, }; @@ -432,7 +407,7 @@ where para_id, relay_chain_interface, relay_chain_slot_duration, - import_queue, + import_queue: import_queue_service, }; start_full_node(params)?; diff --git a/node/src/service/phala.rs b/node/src/service/phala.rs index a5777a4f..d756ce3f 100644 --- a/node/src/service/phala.rs +++ b/node/src/service/phala.rs @@ -7,13 +7,15 @@ use cumulus_client_consensus_aura::{ use cumulus_client_network::BlockAnnounceValidator; use cumulus_client_consensus_common::ParachainConsensus; use cumulus_client_service::{ - prepare_node_config, start_collator, start_full_node, StartCollatorParams, StartFullNodeParams, + build_relay_chain_interface, prepare_node_config, start_collator, start_full_node, + StartCollatorParams, StartFullNodeParams, }; use cumulus_primitives_core::ParaId; use cumulus_relay_chain_interface::{RelayChainError, RelayChainInterface}; pub use parachains_common::{AccountId, Balance, Block, Hash, Header, Index as Nonce}; +use sc_consensus::ImportQueue; use sc_network::NetworkService; use sc_network_common::service::NetworkBlock; use sc_service::{ @@ -238,7 +240,7 @@ async fn start_node_impl( let backend = params.backend.clone(); let mut task_manager = params.task_manager; - let (relay_chain_interface, collator_key) = crate::service::build_relay_chain_interface( + let (relay_chain_interface, collator_key) = build_relay_chain_interface( polkadot_config, ¶chain_config, telemetry_worker_handle, @@ -259,14 +261,15 @@ async fn start_node_impl( let validator = parachain_config.role.is_authority(); let prometheus_registry = parachain_config.prometheus_registry().cloned(); let transaction_pool = params.transaction_pool.clone(); - let import_queue = cumulus_client_service::SharedImportQueue::new(params.import_queue); + let import_queue_service = params.import_queue.service(); + let (network, system_rpc_tx, tx_handler_controller, start_network) = sc_service::build_network(sc_service::BuildNetworkParams { config: ¶chain_config, client: client.clone(), transaction_pool: transaction_pool.clone(), spawn_handle: task_manager.spawn_handle(), - import_queue: import_queue.clone(), + import_queue: params.import_queue, block_announce_validator_builder: Some(Box::new(|_| { Box::new(block_announce_validator) })), @@ -357,7 +360,7 @@ async fn start_node_impl( relay_chain_interface: relay_chain_interface.clone(), spawner, parachain_consensus, - import_queue, + import_queue: import_queue_service, collator_key: collator_key.expect("Command line arguments do not allow this. qed"), relay_chain_slot_duration, }; @@ -371,7 +374,7 @@ async fn start_node_impl( para_id, relay_chain_interface, relay_chain_slot_duration, - import_queue, + import_queue: import_queue_service, }; start_full_node(params)?; diff --git a/node/src/service/shell.rs b/node/src/service/shell.rs index 134a12f0..72ef61e6 100644 --- a/node/src/service/shell.rs +++ b/node/src/service/shell.rs @@ -4,13 +4,15 @@ use cumulus_client_cli::CollatorOptions; use cumulus_client_network::BlockAnnounceValidator; use cumulus_client_consensus_common::ParachainConsensus; use cumulus_client_service::{ - prepare_node_config, start_collator, start_full_node, StartCollatorParams, StartFullNodeParams, + build_relay_chain_interface, prepare_node_config, start_collator, start_full_node, + StartCollatorParams, StartFullNodeParams, }; use cumulus_primitives_core::ParaId; use cumulus_relay_chain_interface::{RelayChainError, RelayChainInterface}; pub use parachains_common::{AccountId, Balance, Block, Hash, Header, Index as Nonce}; +use sc_consensus::ImportQueue; use sc_network::NetworkService; use sc_network_common::service::NetworkBlock; use sc_service::{ @@ -133,7 +135,7 @@ async fn start_node_impl( let mut task_manager = params.task_manager; - let (relay_chain_interface, collator_key) = crate::service::build_relay_chain_interface( + let (relay_chain_interface, collator_key) = build_relay_chain_interface( polkadot_config, ¶chain_config, telemetry_worker_handle, @@ -154,14 +156,15 @@ async fn start_node_impl( let validator = parachain_config.role.is_authority(); let prometheus_registry = parachain_config.prometheus_registry().cloned(); let transaction_pool = params.transaction_pool.clone(); - let import_queue = cumulus_client_service::SharedImportQueue::new(params.import_queue); + let import_queue_service = params.import_queue.service(); + let (network, system_rpc_tx, tx_handler_controller, start_network) = sc_service::build_network(sc_service::BuildNetworkParams { config: ¶chain_config, client: client.clone(), transaction_pool: transaction_pool.clone(), spawn_handle: task_manager.spawn_handle(), - import_queue: import_queue.clone(), + import_queue: params.import_queue, block_announce_validator_builder: Some(Box::new(|_| { Box::new(block_announce_validator) })), @@ -230,7 +233,7 @@ async fn start_node_impl( relay_chain_interface, spawner, parachain_consensus, - import_queue, + import_queue: import_queue_service, collator_key: collator_key.expect("Command line arguments do not allow this. qed"), relay_chain_slot_duration, }; @@ -244,7 +247,7 @@ async fn start_node_impl( para_id, relay_chain_interface, relay_chain_slot_duration, - import_queue, + import_queue: import_queue_service, }; start_full_node(params)?; diff --git a/pallets/assets-registry/Cargo.toml b/pallets/assets-registry/Cargo.toml index 15b7739b..9ad0ac31 100644 --- a/pallets/assets-registry/Cargo.toml +++ b/pallets/assets-registry/Cargo.toml @@ -13,27 +13,27 @@ log = { version = "0.4.14", default-features = false } hex-literal = "0.3.1" # Substrate -sp-io = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -sp-core = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -sp-std = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -frame-support = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -frame-system = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -frame-benchmarking = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false, optional = true } -sp-arithmetic = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -pallet-assets = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -pallet-uniques = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -pallet-balances = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } +sp-io = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +sp-core = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +sp-std = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +frame-support = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +frame-system = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +frame-benchmarking = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false, optional = true } +sp-arithmetic = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +pallet-assets = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +pallet-uniques = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +pallet-balances = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } # Polkadot -xcm = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.33", default-features = false } -xcm-executor = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.33", default-features = false } -xcm-builder = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.33", default-features = false } +xcm = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.36", default-features = false } +xcm-executor = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.36", default-features = false } +xcm-builder = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.36", default-features = false } # Cumulus -cumulus-primitives-core = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.33", default-features = false } -cumulus-pallet-xcm = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.33", default-features = false } -cumulus-primitives-utility = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.33", default-features = false } +cumulus-primitives-core = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.36", default-features = false } +cumulus-pallet-xcm = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.36", default-features = false } +cumulus-primitives-utility = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.36", default-features = false } # Local parachains-common = { path = "../../parachains-common", default-features = false } @@ -44,31 +44,31 @@ assert_matches = "1.4.0" hex-literal = "0.3" # Substrate -sp-io = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33" } -sp-core = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33" } -sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33" } -frame-support = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33" } -frame-system = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33" } -pallet-balances = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33" } -pallet-timestamp = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } +sp-io = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36" } +sp-core = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36" } +sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36" } +frame-support = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36" } +frame-system = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36" } +pallet-balances = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36" } +pallet-timestamp = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } # Polkadot -xcm = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.33" } -xcm-executor = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.33" } -xcm-builder = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.33" } -polkadot-parachain = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.33" } -pallet-xcm = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.33" } -polkadot-runtime-parachains = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.33" } -xcm-simulator = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.33" } -polkadot-runtime-common = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.33" } +xcm = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.36" } +xcm-executor = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.36" } +xcm-builder = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.36" } +polkadot-parachain = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.36" } +pallet-xcm = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.36" } +polkadot-runtime-parachains = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.36" } +xcm-simulator = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.36" } +polkadot-runtime-common = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.36" } # Cumulus -cumulus-primitives-core = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.33" } -cumulus-pallet-parachain-system = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.33" } -cumulus-pallet-dmp-queue = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.33" } -cumulus-pallet-xcmp-queue = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.33" } -cumulus-pallet-xcm = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.33" } -cumulus-primitives-utility = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.33" } +cumulus-primitives-core = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.36" } +cumulus-pallet-parachain-system = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.36" } +cumulus-pallet-dmp-queue = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.36" } +cumulus-pallet-xcmp-queue = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.36" } +cumulus-pallet-xcm = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.36" } +cumulus-primitives-utility = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.36" } # Local parachains-common = { path = "../../parachains-common" } diff --git a/pallets/assets-registry/src/lib.rs b/pallets/assets-registry/src/lib.rs index d4209d5a..13e53c60 100644 --- a/pallets/assets-registry/src/lib.rs +++ b/pallets/assets-registry/src/lib.rs @@ -362,6 +362,7 @@ pub mod pallet { { /// Force withdraw some amount of assets from ASSETS_REGISTRY_ID, if the given asset_id is None, /// would performance withdraw PHA from this account + #[pallet::call_index(0)] #[pallet::weight(195_000_000)] #[transactional] pub fn force_withdraw_fund( @@ -392,6 +393,7 @@ pub mod pallet { Ok(()) } + #[pallet::call_index(1)] #[pallet::weight(195_000_000)] #[transactional] pub fn force_register_asset( @@ -455,6 +457,7 @@ pub mod pallet { /// By cleaning them in current pallet, xcm and bridge transfering on this asset /// will not success anymore, we should call pallet_assets::destory() manually /// if we want to delete this asset from our chain + #[pallet::call_index(2)] #[pallet::weight(195_000_000)] #[transactional] pub fn force_unregister_asset( @@ -493,6 +496,7 @@ pub mod pallet { Ok(()) } + #[pallet::call_index(3)] #[pallet::weight(195_000_000)] #[transactional] pub fn force_set_metadata( @@ -517,6 +521,7 @@ pub mod pallet { Ok(()) } + #[pallet::call_index(4)] #[pallet::weight(195_000_000)] #[transactional] pub fn force_mint( @@ -540,6 +545,7 @@ pub mod pallet { Ok(()) } + #[pallet::call_index(5)] #[pallet::weight(195_000_000)] #[transactional] pub fn force_burn( @@ -561,6 +567,7 @@ pub mod pallet { Ok(()) } + #[pallet::call_index(6)] #[pallet::weight(195_000_000)] #[transactional] pub fn force_set_price( @@ -577,6 +584,7 @@ pub mod pallet { Ok(()) } + #[pallet::call_index(7)] #[pallet::weight(195_000_000)] #[transactional] pub fn force_set_location( @@ -632,6 +640,7 @@ pub mod pallet { Ok(()) } + #[pallet::call_index(8)] #[pallet::weight(195_000_000)] #[transactional] pub fn force_enable_chainbridge( @@ -682,6 +691,7 @@ pub mod pallet { Ok(()) } + #[pallet::call_index(9)] #[pallet::weight(195_000_000)] #[transactional] pub fn force_disable_chainbridge( diff --git a/pallets/parachain-info/Cargo.toml b/pallets/parachain-info/Cargo.toml index 111aec76..188daef2 100644 --- a/pallets/parachain-info/Cargo.toml +++ b/pallets/parachain-info/Cargo.toml @@ -8,10 +8,10 @@ version = "0.1.0" codec = { package = "parity-scale-codec", version = "3.0", default-features = false, features = ["derive"] } scale-info = { version = "2.0", default-features = false, features = ["derive"] } -frame-support = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -frame-system = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } +frame-support = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +frame-system = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } -cumulus-primitives-core = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.33", default-features = false } +cumulus-primitives-core = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.36", default-features = false } [features] default = ["std"] diff --git a/pallets/phala-world/Cargo.toml b/pallets/phala-world/Cargo.toml index eef46157..eb862078 100644 --- a/pallets/phala-world/Cargo.toml +++ b/pallets/phala-world/Cargo.toml @@ -16,28 +16,28 @@ serde = { version = "1.0.111", default-features = false, features = ["derive"] } codec = { package = "parity-scale-codec", version = "3.0", default-features = false, features = ["derive"] } scale-info = { version = "2.0", default-features = false, features = ["derive"] } -sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -sp-std = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -sp-core = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -sp-io = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } +sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +sp-std = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +sp-core = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +sp-io = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } -frame-support = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -frame-system = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -frame-benchmarking = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false, optional = true } +frame-support = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +frame-system = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +frame-benchmarking = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false, optional = true } -pallet-uniques = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -pallet-balances = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -pallet-timestamp = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } +pallet-uniques = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +pallet-balances = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +pallet-timestamp = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } # RMRK dependencies -pallet-rmrk-core = { git = "https://github.com/Phala-Network/rmrk-substrate", branch = "polkadot-v0.9.33", default-features = false } -pallet-rmrk-equip = { git = "https://github.com/Phala-Network/rmrk-substrate", branch = "polkadot-v0.9.33", default-features = false } -pallet-rmrk-market = { git = "https://github.com/Phala-Network/rmrk-substrate", branch = "polkadot-v0.9.33", default-features = false } -rmrk-traits = { git = "https://github.com/Phala-Network/rmrk-substrate", branch = "polkadot-v0.9.33", default-features = false } +pallet-rmrk-core = { git = "https://github.com/Phala-Network/rmrk-substrate", branch = "polkadot-v0.9.36", default-features = false } +pallet-rmrk-equip = { git = "https://github.com/Phala-Network/rmrk-substrate", branch = "polkadot-v0.9.36", default-features = false } +pallet-rmrk-market = { git = "https://github.com/Phala-Network/rmrk-substrate", branch = "polkadot-v0.9.36", default-features = false } +rmrk-traits = { git = "https://github.com/Phala-Network/rmrk-substrate", branch = "polkadot-v0.9.36", default-features = false } [dev-dependencies] -sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -sp-std = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } +sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +sp-std = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } [features] default = ["std"] diff --git a/pallets/phala-world/src/incubation.rs b/pallets/phala-world/src/incubation.rs index a5103b23..f69a00b7 100644 --- a/pallets/phala-world/src/incubation.rs +++ b/pallets/phala-world/src/incubation.rs @@ -215,6 +215,7 @@ pub mod pallet { /// - origin: The origin of the extrinsic starting the incubation process /// - collection_id: The collection id of the Origin of Shell RMRK NFT /// - nft_id: The NFT id of the Origin of Shell RMRK NFT + #[pallet::call_index(0)] #[pallet::weight(Weight::from_ref_time(10_000) + T::DbWeight::get().reads_writes(1,1))] #[transactional] pub fn start_incubation( @@ -270,6 +271,7 @@ pub mod pallet { /// - origin: The origin of the extrinsic feeding the target Origin of Shell. /// - collection_id: The collection id of the Origin of Shell. /// - nft_id: The NFT id of the Origin of Shell. + #[pallet::call_index(1)] #[pallet::weight(Weight::from_ref_time(10_000) + T::DbWeight::get().reads_writes(1,1))] #[transactional] pub fn feed_origin_of_shell( @@ -370,6 +372,7 @@ pub mod pallet { /// - `nft_id`: The NFT id of the Origin of Shell RMRK NFT /// - `default_shell_metadata`: File resource URI in decentralized storage for Shell NFT /// parts that render the Shell NFT + #[pallet::call_index(2)] #[pallet::weight(Weight::from_ref_time(10_000) + T::DbWeight::get().reads_writes(1,1))] #[transactional] pub fn hatch_origin_of_shell( @@ -553,6 +556,7 @@ pub mod pallet { /// Parameters: /// `origin`: Expected to be the `Overlord` account /// `status`: `bool` value to set for the status in storage + #[pallet::call_index(3)] #[pallet::weight(0)] pub fn set_can_start_incubation_status( origin: OriginFor, @@ -584,6 +588,7 @@ pub mod pallet { /// Parameters: /// - `origin` - Expected Overlord admin account to set the Shell Collection ID /// - `collection_id` - Collection ID of the Shell Collection + #[pallet::call_index(4)] #[pallet::weight(0)] pub fn set_shell_collection_id( origin: OriginFor, @@ -609,6 +614,7 @@ pub mod pallet { /// Parameters: /// - `origin` - Expected Overlord admin account to set the Shell Parts Collection ID /// - `collection_id` - Collection ID of the Shell Parts Collection + #[pallet::call_index(5)] #[pallet::weight(0)] pub fn set_shell_parts_collection_id( origin: OriginFor, @@ -636,6 +642,7 @@ pub mod pallet { /// - `collection_id` - Collection ID of Origin of Shell /// - `nft_id` - NFT ID of the Origin of Shell /// - `chosen_parts` - Shell parts to be stored in Storage + #[pallet::call_index(6)] #[pallet::weight(0)] pub fn set_origin_of_shell_chosen_parts( origin: OriginFor, diff --git a/pallets/phala-world/src/lib.rs b/pallets/phala-world/src/lib.rs index 97e7f6ee..f858e1dc 100644 --- a/pallets/phala-world/src/lib.rs +++ b/pallets/phala-world/src/lib.rs @@ -8,9 +8,10 @@ mod tests; mod traits; pub mod incubation; -pub mod migration; pub mod nft_sale; +pub mod migration; + // Alias pub use incubation as pallet_pw_incubation; pub use nft_sale as pallet_pw_nft_sale; diff --git a/pallets/phala-world/src/nft_sale.rs b/pallets/phala-world/src/nft_sale.rs index e8c8738b..022b9e35 100644 --- a/pallets/phala-world/src/nft_sale.rs +++ b/pallets/phala-world/src/nft_sale.rs @@ -451,6 +451,7 @@ pub mod pallet { /// /// Parameters: /// - origin: The origin of the extrinsic. + #[pallet::call_index(0)] #[pallet::weight(Weight::from_ref_time(10_000) + T::DbWeight::get().reads_writes(1,1))] #[transactional] pub fn claim_spirit(origin: OriginFor) -> DispatchResult { @@ -480,6 +481,7 @@ pub mod pallet { /// /// Parameters: /// - origin: The origin of the extrinsic. + #[pallet::call_index(1)] #[pallet::weight(Weight::from_ref_time(10_000) + T::DbWeight::get().reads_writes(1,1))] #[transactional] pub fn redeem_spirit( @@ -514,6 +516,7 @@ pub mod pallet { /// - race: The race of the origin_of_shell chosen by the user. /// - career: The career of the origin_of_shell chosen by the user or auto-generated based /// on metadata + #[pallet::call_index(2)] #[pallet::weight(Weight::from_ref_time(10_000) + T::DbWeight::get().reads_writes(1,1))] #[transactional] pub fn buy_rare_origin_of_shell( @@ -559,6 +562,7 @@ pub mod pallet { /// - signature: The signature of the account that is claiming the spirit. /// - race: The race that the user has chosen (limited # of races) /// - career: The career that the user has chosen (unlimited careers) + #[pallet::call_index(3)] #[pallet::weight(Weight::from_ref_time(10_000) + T::DbWeight::get().reads_writes(1,1))] #[transactional] pub fn buy_prime_origin_of_shell( @@ -610,6 +614,7 @@ pub mod pallet { /// - origin: The origin of the extrinsic preordering the origin_of_shell /// - race: The race that the user has chosen (limited # of races) /// - career: The career that the user has chosen (limited # of careers) + #[pallet::call_index(4)] #[pallet::weight(Weight::from_ref_time(10_000) + T::DbWeight::get().reads_writes(1,1))] #[transactional] pub fn preorder_origin_of_shell( @@ -684,6 +689,7 @@ pub mod pallet { /// Parameters: /// `origin`: Expected to come from Overlord admin account /// `preorders`: Vec of Preorder IDs that were `Chosen` + #[pallet::call_index(5)] #[pallet::weight(Weight::from_ref_time(10_000) + T::DbWeight::get().reads_writes(1,1))] #[transactional] pub fn mint_chosen_preorders( @@ -752,6 +758,7 @@ pub mod pallet { /// Parameters: /// `origin`: Expected to come from Overlord admin account /// `preorders`: Preorder ids of the not chosen preorders + #[pallet::call_index(6)] #[pallet::weight(Weight::from_ref_time(10_000) + T::DbWeight::get().reads_writes(1,1))] #[transactional] pub fn refund_not_chosen_preorders( @@ -809,6 +816,7 @@ pub mod pallet { /// - `career`: The career of the origin_of_shell chosen by the user or auto-generated based /// on metadata /// - `nft_sale_type`: Either a `NftSaleType::Giveaway` or `NftSaleType::Reserved` + #[pallet::call_index(7)] #[pallet::weight(Weight::from_ref_time(10_000) + T::DbWeight::get().reads_writes(1,1))] #[transactional] pub fn mint_gift_origin_of_shell( @@ -853,6 +861,7 @@ pub mod pallet { /// Parameters: /// - origin: Expected to be called by `GovernanceOrigin` /// - new_overlord: T::AccountId + #[pallet::call_index(8)] #[pallet::weight(0)] pub fn set_overlord( origin: OriginFor, @@ -876,6 +885,7 @@ pub mod pallet { /// /// Parameters: /// `origin`: Expected to be called by `Overlord` admin account + #[pallet::call_index(9)] #[pallet::weight(0)] pub fn initialize_world_clock(origin: OriginFor) -> DispatchResultWithPostInfo { // Ensure Overlord account makes call @@ -903,6 +913,7 @@ pub mod pallet { /// - `origin` - Expected Overlord admin account to set the status /// - `status` - `bool` to set the status to /// - `status_type` - `StatusType` to set the status for + #[pallet::call_index(10)] #[pallet::weight(0)] pub fn set_status_type( origin: OriginFor, @@ -936,6 +947,7 @@ pub mod pallet { /// /// Parameters: /// - `origin` - Expected Overlord admin account + #[pallet::call_index(11)] #[pallet::weight(0)] pub fn init_rarity_type_counts(origin: OriginFor) -> DispatchResult { // Ensure Overlord account makes call @@ -958,6 +970,7 @@ pub mod pallet { /// - `for_sale_count` - Number of Origin of Shells for sale /// - `giveaway_count` - Number of Origin of Shells for giveaways /// - `reserve_count` - Number of Origin of Shells to be reserved + #[pallet::call_index(12)] #[pallet::weight(0)] pub fn update_rarity_type_counts( origin: OriginFor, @@ -1009,6 +1022,7 @@ pub mod pallet { /// Parameters: /// - `origin` - Expected Overlord admin account to set the Spirit Collection ID /// - `collection_id` - Collection ID of the Spirit Collection + #[pallet::call_index(13)] #[pallet::weight(0)] pub fn set_spirit_collection_id( origin: OriginFor, @@ -1034,6 +1048,7 @@ pub mod pallet { /// Parameters: /// - `origin` - Expected Overlord admin account to set the Origin of Shell Collection ID /// - `collection_id` - Collection ID of the Origin of Shell Collection + #[pallet::call_index(14)] #[pallet::weight(0)] pub fn set_origin_of_shell_collection_id( origin: OriginFor, @@ -1064,6 +1079,7 @@ pub mod pallet { /// - `metadata`: Metadata pertaining to the collection /// - `max`: Optional max u32 for the size of the collection /// - `symbol`: BoundedString of the collection's symbol i.e 'OVRLD' + #[pallet::call_index(15)] #[pallet::weight(Weight::from_ref_time(10_000) + T::DbWeight::get().reads_writes(1,1))] #[transactional] pub fn pw_create_collection( @@ -1094,6 +1110,7 @@ pub mod pallet { /// Parameters: /// - `origin`: Expected to be called from the Overlord account /// - `spirits_metadata`: `BoundedVec` to be added in storage + #[pallet::call_index(16)] #[pallet::weight(0)] pub fn set_spirits_metadata( origin: OriginFor, @@ -1117,6 +1134,7 @@ pub mod pallet { /// Parameters: /// - `origin`: Expected to be called from the Overlord account /// - `origin_of_shells_metadata`: A Vec of `((RaceType, CareerType), BoundedVec>)` to be added in storage + #[pallet::call_index(17)] #[pallet::weight(0)] pub fn set_origin_of_shells_metadata( origin: OriginFor, @@ -1143,6 +1161,7 @@ pub mod pallet { /// Parameters: /// - origin: Expected to be called by `Overlord`. /// - new_payee: T::AccountId of the Payee account. + #[pallet::call_index(18)] #[pallet::weight(0)] pub fn set_payee( origin: OriginFor, @@ -1167,6 +1186,7 @@ pub mod pallet { /// Parameters: /// - origin: Expected to be called by `Overlord`. /// - new_signer: T::AccountId of the Signer. + #[pallet::call_index(19)] #[pallet::weight(0)] pub fn set_signer( origin: OriginFor, diff --git a/pallets/phala/Cargo.toml b/pallets/phala/Cargo.toml index 351a25be..bfb11383 100644 --- a/pallets/phala/Cargo.toml +++ b/pallets/phala/Cargo.toml @@ -14,28 +14,28 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.1", default-features = false, features = ["derive"] } scale-info = { version = "2.1", default-features = false, features = ["derive"] } -primitive-types = { version = "0.12.0", default-features = false, features = ["codec", "byteorder"] } -pallet-assets = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -pallet-democracy = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -pallet-uniques = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -pallet-preimage = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } +primitive-types = { version = "0.12.1", default-features = false, features = ["codec", "byteorder"] } +pallet-assets = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +pallet-democracy = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +pallet-uniques = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +pallet-preimage = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } # RMRK dependencies -pallet-rmrk-core = { git = "https://github.com/Phala-Network/rmrk-substrate", branch = "polkadot-v0.9.33", default-features = false } -rmrk-traits = { git = "https://github.com/Phala-Network/rmrk-substrate", branch = "polkadot-v0.9.33", default-features = false } +pallet-rmrk-core = { git = "https://github.com/Phala-Network/rmrk-substrate", branch = "polkadot-v0.9.36", default-features = false } +rmrk-traits = { git = "https://github.com/Phala-Network/rmrk-substrate", branch = "polkadot-v0.9.36", default-features = false } -frame-support = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -frame-system = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -pallet-randomness-collective-flip = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -sp-std = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -sp-io = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -sp-core = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -sp-application-crypto = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -frame-benchmarking = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false, optional = true } +frame-support = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +frame-system = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +pallet-randomness-collective-flip = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +sp-std = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +sp-io = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +sp-core = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +sp-application-crypto = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +frame-benchmarking = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false, optional = true } log = { version = "0.4.14", default-features = false } -pallet-balances = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } +pallet-balances = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } phala-types = { path = "../../crates/phala-types", default-features = false } chrono = { version = "0.4.22", default-features = false } @@ -55,13 +55,13 @@ webpki = { version = "0.22", default-features = false, features = ["alloc"] } webpki_wasm = { package = "webpki", path = "../../vendor/webpki", default-features = false, features = ["alloc"] } [dev-dependencies] -frame-support-test = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33" } +frame-support-test = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36" } assert_matches = "1.4.0" -pallet-timestamp = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33" } +pallet-timestamp = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36" } rand = "0.8.5" insta = "1" -pallet-scheduler = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -pallet-collective = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } +pallet-scheduler = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +pallet-collective = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } [features] default = ["std"] diff --git a/pallets/phala/mq-runtime-api/Cargo.toml b/pallets/phala/mq-runtime-api/Cargo.toml index 306c16b1..549cee17 100644 --- a/pallets/phala/mq-runtime-api/Cargo.toml +++ b/pallets/phala/mq-runtime-api/Cargo.toml @@ -4,7 +4,7 @@ version = "0.1.0" edition = "2021" [dependencies] -sp-api = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } +sp-api = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } phala-mq = { path = "../../../crates/phala-mq", default-features = false } [features] diff --git a/pallets/phala/src/compute/base_pool.rs b/pallets/phala/src/compute/base_pool.rs index 28786f94..78cdc467 100644 --- a/pallets/phala/src/compute/base_pool.rs +++ b/pallets/phala/src/compute/base_pool.rs @@ -439,6 +439,7 @@ pub mod pallet { /// The caller must be the owner of the pool. /// If a pool hasn't registed in the wihtelist map, any staker could contribute as what they use to do. /// The whitelist has a lmit len of 100 stakers. + #[pallet::call_index(0)] #[pallet::weight(0)] pub fn add_staker_to_whitelist( origin: OriginFor, @@ -477,6 +478,7 @@ pub mod pallet { /// Adds a description to the pool /// /// The caller must be the owner of the pool. + #[pallet::call_index(1)] #[pallet::weight(0)] pub fn set_pool_description( origin: OriginFor, @@ -496,6 +498,7 @@ pub mod pallet { Ok(()) } + #[pallet::call_index(2)] #[pallet::weight(0)] #[frame_support::transactional] pub fn reset_lock_iter_pos(origin: OriginFor) -> DispatchResult { @@ -505,6 +508,7 @@ pub mod pallet { Ok(()) } + #[pallet::call_index(3)] #[pallet::weight(0)] pub fn remove_unused_lock(origin: OriginFor, max_iterations: u32) -> DispatchResult { let who = ensure_signed(origin)?; @@ -546,6 +550,7 @@ pub mod pallet { /// /// The caller must be the owner of the pool. /// If the last staker in the whitelist is removed, the pool will return back to a normal pool that allow anyone to contribute. + #[pallet::call_index(4)] #[pallet::weight(0)] pub fn remove_staker_from_whitelist( origin: OriginFor, @@ -585,6 +590,7 @@ pub mod pallet { Ok(()) } + #[pallet::call_index(5)] #[pallet::weight(0)] #[frame_support::transactional] pub fn backfill_transfer_shares( diff --git a/pallets/phala/src/compute/computation.rs b/pallets/phala/src/compute/computation.rs index fbc35700..dd8c164b 100644 --- a/pallets/phala/src/compute/computation.rs +++ b/pallets/phala/src/compute/computation.rs @@ -430,6 +430,7 @@ pub mod pallet { /// Sets the cool down expiration time in seconds. /// /// Can only be called by root. + #[pallet::call_index(0)] #[pallet::weight(0)] pub fn set_cool_down_expiration(origin: OriginFor, period: u64) -> DispatchResult { ensure_root(origin)?; @@ -443,6 +444,7 @@ pub mod pallet { /// /// It will trigger a force stop of computing if the worker is still in computing state. Anyone /// can call it. + #[pallet::call_index(1)] #[pallet::weight(0)] pub fn unbind(origin: OriginFor, session: T::AccountId) -> DispatchResult { let who = ensure_signed(origin)?; @@ -458,6 +460,7 @@ pub mod pallet { /// Triggers a force heartbeat request to all workers by sending a MAX pow target /// /// Only for integration test. + #[pallet::call_index(2)] #[pallet::weight(1)] pub fn force_heartbeat(origin: OriginFor) -> DispatchResult { ensure_root(origin)?; @@ -471,6 +474,7 @@ pub mod pallet { /// Start computing /// /// Only for integration test. + #[pallet::call_index(3)] #[pallet::weight(1)] pub fn force_start_computing( origin: OriginFor, @@ -485,6 +489,7 @@ pub mod pallet { /// Stop computing /// /// Only for integration test. + #[pallet::call_index(4)] #[pallet::weight(1)] pub fn force_stop_computing(origin: OriginFor, session: T::AccountId) -> DispatchResult { ensure_root(origin)?; @@ -495,6 +500,7 @@ pub mod pallet { /// Updates the tokenomic parameters at the end of this block. /// /// Can only be called by the tokenomic admin. + #[pallet::call_index(5)] #[pallet::weight(1)] pub fn update_tokenomic( origin: OriginFor, @@ -512,6 +518,7 @@ pub mod pallet { /// but never be paid out until the heartbeat is resumed. /// /// Can only be called by root. + #[pallet::call_index(6)] #[pallet::weight(1)] pub fn set_heartbeat_paused(origin: OriginFor, paused: bool) -> DispatchResult { T::GovernanceOrigin::ensure_origin(origin)?; diff --git a/pallets/phala/src/compute/stake_pool_v2.rs b/pallets/phala/src/compute/stake_pool_v2.rs index 8f1f3d14..02a0d76f 100644 --- a/pallets/phala/src/compute/stake_pool_v2.rs +++ b/pallets/phala/src/compute/stake_pool_v2.rs @@ -325,6 +325,7 @@ pub mod pallet { T: Config + vault::Config, { /// Creates a new stake pool + #[pallet::call_index(0)] #[pallet::weight(0)] #[frame_support::transactional] pub fn create(origin: OriginFor) -> DispatchResult { @@ -389,6 +390,7 @@ pub mod pallet { /// Requires: /// 1. The worker is registered and benchmarked /// 2. The worker is not bound a pool + #[pallet::call_index(1)] #[pallet::weight(0)] pub fn add_worker( origin: OriginFor, @@ -457,6 +459,7 @@ pub mod pallet { /// 1. The worker is registered /// 2. The worker is associated with a pool /// 3. The worker is removable (not in computing) + #[pallet::call_index(2)] #[pallet::weight(0)] pub fn remove_worker( origin: OriginFor, @@ -496,6 +499,7 @@ pub mod pallet { /// Note: a smaller cap than current total_value if not allowed. /// Requires: /// 1. The sender is the owner + #[pallet::call_index(3)] #[pallet::weight(0)] pub fn set_cap(origin: OriginFor, pid: u64, cap: BalanceOf) -> DispatchResult { let owner = ensure_signed(origin)?; @@ -523,6 +527,7 @@ pub mod pallet { /// /// Requires: /// 1. The sender is the owner + #[pallet::call_index(4)] #[pallet::weight(0)] pub fn set_payout_pref( origin: OriginFor, @@ -548,6 +553,7 @@ pub mod pallet { Ok(()) } + #[pallet::call_index(5)] #[pallet::weight(0)] #[frame_support::transactional] pub fn claim_legacy_rewards( @@ -562,6 +568,7 @@ pub mod pallet { Ok(()) } + #[pallet::call_index(6)] #[pallet::weight(0)] pub fn backfill_add_missing_reward( origin: OriginFor, @@ -582,6 +589,7 @@ pub mod pallet { /// /// Requires: /// 1. The sender is a pool owner + #[pallet::call_index(7)] #[pallet::weight(0)] pub fn claim_owner_rewards( origin: OriginFor, @@ -618,6 +626,7 @@ pub mod pallet { /// If the shutdown condition is met, all workers in the pool will be forced shutdown. /// Note: This function doesn't guarantee no-op when there's error. /// TODO(mingxuan): add more detail comment later. + #[pallet::call_index(8)] #[pallet::weight(0)] #[frame_support::transactional] pub fn check_and_maybe_force_withdraw(origin: OriginFor, pid: u64) -> DispatchResult { @@ -666,6 +675,7 @@ pub mod pallet { /// Requires: /// 1. The pool exists /// 2. After the deposit, the pool doesn't reach the cap + #[pallet::call_index(9)] #[pallet::weight(0)] #[frame_support::transactional] pub fn contribute( @@ -763,6 +773,7 @@ pub mod pallet { /// Once a withdraw request is proceeded successfully, The withdrawal would be queued and waiting to be dealed. /// Afer the withdrawal is queued, The withdraw queue will be automaticly consumed util there are not enough free stakes to fullfill withdrawals. /// Everytime the free stakes in the pools increases (except for rewards distributing), the withdraw queue will be consumed as it describes above. + #[pallet::call_index(10)] #[pallet::weight(0)] #[frame_support::transactional] pub fn withdraw( @@ -838,6 +849,7 @@ pub mod pallet { Ok(()) } + #[pallet::call_index(11)] #[pallet::weight(0)] #[frame_support::transactional] pub fn reset_iter_pos(origin: OriginFor) -> DispatchResult { @@ -847,6 +859,7 @@ pub mod pallet { Ok(()) } + #[pallet::call_index(12)] #[pallet::weight(0)] #[frame_support::transactional] pub fn fix_missing_worker_lock( @@ -910,6 +923,7 @@ pub mod pallet { /// Requires: /// 1. The worker is bound to the pool and is in Ready state /// 2. The remaining stake in the pool can cover the minimal stake required + #[pallet::call_index(13)] #[pallet::weight(0)] pub fn start_computing( origin: OriginFor, @@ -926,6 +940,7 @@ pub mod pallet { /// /// Requires: /// 1. There worker is bound to the pool and is in a stoppable state + #[pallet::call_index(14)] #[pallet::weight(0)] pub fn stop_computing( origin: OriginFor, @@ -937,6 +952,7 @@ pub mod pallet { } /// Reclaims the releasing stake of a worker in a pool. + #[pallet::call_index(15)] #[pallet::weight(0)] pub fn reclaim_pool_worker( origin: OriginFor, @@ -950,6 +966,7 @@ pub mod pallet { } /// Enables or disables computing. Must be called with the council or root permission. + #[pallet::call_index(16)] #[pallet::weight(0)] pub fn set_working_enabled(origin: OriginFor, enable: bool) -> DispatchResult { T::ComputingSwitchOrigin::ensure_origin(origin)?; @@ -958,6 +975,7 @@ pub mod pallet { } /// Restarts the worker with a higher stake + #[pallet::call_index(17)] #[pallet::weight(195_000_000)] #[frame_support::transactional] pub fn restart_computing( diff --git a/pallets/phala/src/compute/vault.rs b/pallets/phala/src/compute/vault.rs index fe397226..1e916418 100644 --- a/pallets/phala/src/compute/vault.rs +++ b/pallets/phala/src/compute/vault.rs @@ -146,6 +146,7 @@ pub mod pallet { T: pallet_assets::Config>, { /// Creates a new vault + #[pallet::call_index(0)] #[pallet::weight(0)] #[frame_support::transactional] pub fn create(origin: OriginFor) -> DispatchResult { @@ -202,6 +203,7 @@ pub mod pallet { /// /// Requires: /// 1. The sender is the owner + #[pallet::call_index(1)] #[pallet::weight(0)] pub fn set_payout_pref( origin: OriginFor, @@ -232,6 +234,7 @@ pub mod pallet { /// /// Requires: /// 1. The sender is the owner + #[pallet::call_index(2)] #[pallet::weight(0)] pub fn claim_owner_shares( origin: OriginFor, @@ -280,6 +283,7 @@ pub mod pallet { /// /// Requires: /// 1. The sender is the owner + #[pallet::call_index(3)] #[pallet::weight(0)] pub fn maybe_gain_owner_shares(origin: OriginFor, vault_pid: u64) -> DispatchResult { let who = ensure_signed(origin)?; @@ -325,6 +329,7 @@ pub mod pallet { /// If the shutdown condition is met, all shares owned by the vault will be forced withdraw. /// Note: This function doesn't guarantee no-op when there's error. /// TODO(mingxuan): add more detail comment later. + #[pallet::call_index(4)] #[pallet::weight(0)] #[frame_support::transactional] pub fn check_and_maybe_force_withdraw( @@ -418,6 +423,7 @@ pub mod pallet { /// Requires: /// 1. The pool exists /// 2. After the deposit, the pool doesn't reach the cap + #[pallet::call_index(5)] #[pallet::weight(0)] #[frame_support::transactional] pub fn contribute(origin: OriginFor, pid: u64, amount: BalanceOf) -> DispatchResult { @@ -476,6 +482,7 @@ pub mod pallet { /// Once a withdraw request is proceeded successfully, The withdrawal would be queued and waiting to be dealed. /// Afer the withdrawal is queued, The withdraw queue will be automaticly consumed util there are not enough free stakes to fullfill withdrawals. /// Everytime the free stakes in the pools increases, the withdraw queue will be consumed as it describes above. + #[pallet::call_index(6)] #[pallet::weight(0)] #[frame_support::transactional] pub fn withdraw(origin: OriginFor, pid: u64, shares: BalanceOf) -> DispatchResult { diff --git a/pallets/phala/src/compute/wrapped_balances.rs b/pallets/phala/src/compute/wrapped_balances.rs index ade90b57..f9c5b4b6 100644 --- a/pallets/phala/src/compute/wrapped_balances.rs +++ b/pallets/phala/src/compute/wrapped_balances.rs @@ -208,6 +208,7 @@ pub mod pallet { /// Wraps some pha and gain equal amount of W-PHA /// /// The wrapped pha is stored in `WrappedBalancesAccountId`'s wallet and can not be taken away + #[pallet::call_index(0)] #[pallet::weight(0)] #[frame_support::transactional] pub fn wrap(origin: OriginFor, amount: BalanceOf) -> DispatchResult { @@ -233,6 +234,7 @@ pub mod pallet { /// Burns the amount of all free W-PHA and unwraps equal amount of pha /// /// The unwrapped pha is transfered from `WrappedBalancesAccountId` to the user's wallet + #[pallet::call_index(1)] #[pallet::weight(0)] #[frame_support::transactional] pub fn unwrap_all(origin: OriginFor) -> DispatchResult { @@ -257,6 +259,7 @@ pub mod pallet { /// Unwraps some pha by burning equal amount of W-PHA /// /// The unwrapped pha is transfered from `WrappedBalancesAccountId` to the user's wallet + #[pallet::call_index(2)] #[pallet::weight(0)] #[frame_support::transactional] pub fn unwrap(origin: OriginFor, amount: BalanceOf) -> DispatchResult { @@ -290,6 +293,7 @@ pub mod pallet { /// /// Can both approve and oppose a vote at the same time /// The W-PHA used in vote will be locked until the vote is finished or canceled + #[pallet::call_index(3)] #[pallet::weight(0)] #[frame_support::transactional] pub fn vote( @@ -325,6 +329,7 @@ pub mod pallet { /// Tries to unlock W-PHAs used in vote after the vote finished or canceled /// /// Must assign the max iterations to avoid computing complexity overwhelm + #[pallet::call_index(4)] #[pallet::weight(0)] #[frame_support::transactional] pub fn unlock( diff --git a/pallets/phala/src/fat.rs b/pallets/phala/src/fat.rs index 0efe596f..3dcc5f1f 100644 --- a/pallets/phala/src/fat.rs +++ b/pallets/phala/src/fat.rs @@ -201,6 +201,7 @@ pub mod pallet { /// - `deposit_per_item` - Price for contract storage per item. /// - `deposit_per_byte` - Price for contract storage per byte. /// - `treasury_account` - The treasury account used to collect the gas and storage fee. + #[pallet::call_index(0)] #[pallet::weight(0)] pub fn add_cluster( origin: OriginFor, @@ -282,6 +283,7 @@ pub mod pallet { Ok(()) } + #[pallet::call_index(1)] #[pallet::weight(0)] pub fn cluster_upload_resource( origin: OriginFor, @@ -316,6 +318,7 @@ pub mod pallet { } /// Transfer `amount` of on-chain token to the `dest_account` in the cluster of id `cluster_id`. + #[pallet::call_index(2)] #[pallet::weight(0)] pub fn transfer_to_cluster( origin: OriginFor, @@ -344,6 +347,7 @@ pub mod pallet { } // Push message to contract with some deposit into the cluster to pay the gas fee + #[pallet::call_index(3)] #[pallet::weight(Weight::from_ref_time(10_000u64))] pub fn push_contract_message( origin: OriginFor, @@ -360,6 +364,7 @@ pub mod pallet { PalletMq::::push_message(origin, command_topic(contract_id), payload) } + #[pallet::call_index(4)] #[pallet::weight(0)] pub fn instantiate_contract( origin: OriginFor, @@ -418,6 +423,7 @@ pub mod pallet { Ok(()) } + #[pallet::call_index(5)] #[pallet::weight(0)] pub fn cluster_destroy(origin: OriginFor, cluster: ContractClusterId) -> DispatchResult { ensure_root(origin)?; @@ -428,6 +434,7 @@ pub mod pallet { Ok(()) } + #[pallet::call_index(6)] #[pallet::weight(0)] pub fn set_pink_system_code( origin: OriginFor, diff --git a/pallets/phala/src/fat_tokenomic.rs b/pallets/phala/src/fat_tokenomic.rs index 8f6782ee..47500d6e 100644 --- a/pallets/phala/src/fat_tokenomic.rs +++ b/pallets/phala/src/fat_tokenomic.rs @@ -94,6 +94,7 @@ pub mod pallet { /// If users stake on a contract doesn't deployed yet. The deposit would send to the cluster /// even if the contract is deployed later. User can re-stake with or without changing the amount /// to sync the depoit the the cluster after the contract is actually deployed. + #[pallet::call_index(0)] #[pallet::weight(0)] pub fn adjust_stake( origin: OriginFor, diff --git a/pallets/phala/src/mock.rs b/pallets/phala/src/mock.rs index 4096043a..fc3c94b1 100644 --- a/pallets/phala/src/mock.rs +++ b/pallets/phala/src/mock.rs @@ -342,6 +342,7 @@ impl pallet_assets::Config for Test { type RuntimeEvent = RuntimeEvent; type Balance = Balance; type AssetId = u32; + type AssetIdParameter = codec::Compact; type Currency = Balances; type ForceOrigin = frame_system::EnsureRoot; type AssetDeposit = AssetDeposit; @@ -353,6 +354,9 @@ impl pallet_assets::Config for Test { type Freezer = (); type Extra = (); type WeightInfo = (); + type RemoveItemsLimit = ConstU32<1000>; + #[cfg(feature = "runtime-benchmarks")] + type BenchmarkHelper = (); type CreateOrigin = AsEnsureOriginWithArg>; } diff --git a/pallets/phala/src/mq.rs b/pallets/phala/src/mq.rs index 85e12d12..51f41021 100644 --- a/pallets/phala/src/mq.rs +++ b/pallets/phala/src/mq.rs @@ -62,6 +62,7 @@ pub mod pallet { T::AccountId: IntoH256, { /// Syncs an unverified offchain message to the message queue + #[pallet::call_index(0)] #[pallet::weight(Weight::from_ref_time(10_000u64) + T::DbWeight::get().writes(1u64))] pub fn sync_offchain_message( origin: OriginFor, @@ -96,6 +97,7 @@ pub mod pallet { // Messaging API for end user. // TODO.kevin: confirm the weight + #[pallet::call_index(1)] #[pallet::weight(Weight::from_ref_time(10_000u64) + T::DbWeight::get().writes(1u64))] pub fn push_message( origin: OriginFor, @@ -110,6 +112,7 @@ pub mod pallet { } // Force push a from-pallet message. + #[pallet::call_index(2)] #[pallet::weight(Weight::from_ref_time(10_000u64) + T::DbWeight::get().writes(1u64))] pub fn force_push_pallet_message( origin: OriginFor, diff --git a/pallets/phala/src/ott.rs b/pallets/phala/src/ott.rs index 42223448..bfc8923f 100644 --- a/pallets/phala/src/ott.rs +++ b/pallets/phala/src/ott.rs @@ -62,6 +62,7 @@ pub mod pallet { impl Pallet { /// Distributes some amounts to each specified accounts and mark the sender and destination /// accounts as blacklisted. + #[pallet::call_index(0)] #[pallet::weight(0)] #[transactional] pub fn distribute( diff --git a/pallets/phala/src/registry.rs b/pallets/phala/src/registry.rs index 0dc8c14b..7168b630 100644 --- a/pallets/phala/src/registry.rs +++ b/pallets/phala/src/registry.rs @@ -290,6 +290,7 @@ pub mod pallet { /// Sets [`BenchmarkDuration`] /// /// Can only be called by `GovernanceOrigin`. + #[pallet::call_index(0)] #[pallet::weight(Weight::from_ref_time(10_000u64) + T::DbWeight::get().writes(1u64))] pub fn force_set_benchmark_duration(origin: OriginFor, value: u32) -> DispatchResult { T::GovernanceOrigin::ensure_origin(origin)?; @@ -300,6 +301,7 @@ pub mod pallet { /// Force register a worker with the given pubkey with sudo permission /// /// For test only. + #[pallet::call_index(1)] #[pallet::weight(Weight::from_ref_time(10_000u64) + T::DbWeight::get().writes(1u64))] pub fn force_register_worker( origin: OriginFor, @@ -339,6 +341,7 @@ pub mod pallet { /// Force register a topic pubkey /// /// For test only. + #[pallet::call_index(2)] #[pallet::weight(Weight::from_ref_time(10_000u64) + T::DbWeight::get().writes(1u64))] pub fn force_register_topic_pubkey( origin: OriginFor, @@ -353,6 +356,7 @@ pub mod pallet { /// Register a gatekeeper. /// /// Can only be called by `GovernanceOrigin`. + #[pallet::call_index(3)] #[pallet::weight(Weight::from_ref_time(10_000u64) + T::DbWeight::get().writes(1u64))] pub fn register_gatekeeper( origin: OriginFor, @@ -398,6 +402,7 @@ pub mod pallet { /// Unregister a gatekeeper /// /// At least one gatekeeper should be available + #[pallet::call_index(4)] #[pallet::weight(Weight::from_ref_time(10_000u64) + T::DbWeight::get().writes(1u64))] pub fn unregister_gatekeeper( origin: OriginFor, @@ -426,6 +431,7 @@ pub mod pallet { } /// Rotate the master key + #[pallet::call_index(5)] #[pallet::weight(Weight::from_ref_time(10_000u64) + T::DbWeight::get().writes(1u64))] pub fn rotate_master_key(origin: OriginFor) -> DispatchResult { T::GovernanceOrigin::ensure_origin(origin)?; @@ -463,6 +469,7 @@ pub mod pallet { /// /// Usually called by a bridging relayer program (`pherry` and `prb`). Can be called by /// anyone on behalf of a worker. + #[pallet::call_index(6)] #[pallet::weight(0)] pub fn register_worker( origin: OriginFor, @@ -560,6 +567,7 @@ pub mod pallet { /// /// Usually called by a bridging relayer program (`pherry` and `prb`). Can be called by /// anyone on behalf of a worker. + #[pallet::call_index(7)] #[pallet::weight(0)] pub fn register_worker_v2( origin: OriginFor, @@ -671,6 +679,7 @@ pub mod pallet { Ok(()) } + #[pallet::call_index(8)] #[pallet::weight(0)] pub fn update_worker_endpoint( origin: OriginFor, @@ -717,6 +726,7 @@ pub mod pallet { /// Registers a pruntime binary to [`PRuntimeAllowList`] /// /// Can only be called by `GovernanceOrigin`. + #[pallet::call_index(9)] #[pallet::weight(0)] pub fn add_pruntime(origin: OriginFor, pruntime_hash: Vec) -> DispatchResult { T::GovernanceOrigin::ensure_origin(origin)?; @@ -739,6 +749,7 @@ pub mod pallet { /// Removes a pruntime binary from [`PRuntimeAllowList`] /// /// Can only be called by `GovernanceOrigin`. + #[pallet::call_index(10)] #[pallet::weight(0)] pub fn remove_pruntime(origin: OriginFor, pruntime_hash: Vec) -> DispatchResult { T::GovernanceOrigin::ensure_origin(origin)?; @@ -760,6 +771,7 @@ pub mod pallet { /// Adds an entry in [`RelaychainGenesisBlockHashAllowList`] /// /// Can only be called by `GovernanceOrigin`. + #[pallet::call_index(11)] #[pallet::weight(0)] pub fn add_relaychain_genesis_block_hash( origin: OriginFor, @@ -782,6 +794,7 @@ pub mod pallet { /// Deletes an entry in [`RelaychainGenesisBlockHashAllowList`] /// /// Can only be called by `GovernanceOrigin`. + #[pallet::call_index(12)] #[pallet::weight(0)] pub fn remove_relaychain_genesis_block_hash( origin: OriginFor, @@ -804,6 +817,7 @@ pub mod pallet { /// Set minimum pRuntime version. Versions less than MinimumPRuntimeVersion would be forced to quit. /// /// Can only be called by `GovernanceOrigin`. + #[pallet::call_index(13)] #[pallet::weight(0)] pub fn set_minimum_pruntime_version( origin: OriginFor, @@ -823,6 +837,7 @@ pub mod pallet { /// the current consensus version. /// /// Can only be called by `GovernanceOrigin`. + #[pallet::call_index(14)] #[pallet::weight(0)] pub fn set_pruntime_consensus_version( origin: OriginFor, diff --git a/pallets/subbridge/Cargo.toml b/pallets/subbridge/Cargo.toml index 26d94bdb..1b473d74 100644 --- a/pallets/subbridge/Cargo.toml +++ b/pallets/subbridge/Cargo.toml @@ -14,26 +14,26 @@ impl-trait-for-tuples = "0.2.2" log = { version = "0.4.14", default-features = false } # Substrate -sp-io = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -sp-core = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -sp-std = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -frame-support = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -frame-system = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -frame-benchmarking = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false, optional = true } -sp-arithmetic = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -pallet-assets = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -pallet-balances = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } +sp-io = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +sp-core = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +sp-std = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +frame-support = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +frame-system = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +frame-benchmarking = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false, optional = true } +sp-arithmetic = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +pallet-assets = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +pallet-balances = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } # Polkadot -xcm = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.33", default-features = false } -xcm-executor = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.33", default-features = false } -xcm-builder = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.33", default-features = false } +xcm = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.36", default-features = false } +xcm-executor = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.36", default-features = false } +xcm-builder = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.36", default-features = false } # Cumulus -cumulus-primitives-core = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.33", default-features = false } -cumulus-pallet-xcm = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.33", default-features = false } -cumulus-primitives-utility = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.33", default-features = false } +cumulus-primitives-core = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.36", default-features = false } +cumulus-pallet-xcm = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.36", default-features = false } +cumulus-primitives-utility = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.36", default-features = false } # Local assets-registry = { path = "../assets-registry", default-features = false } @@ -45,31 +45,31 @@ assert_matches = "1.4.0" hex-literal = "0.3" # Substrate -sp-io = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33" } -sp-core = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33" } -sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33" } -frame-support = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33" } -frame-system = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33" } -pallet-balances = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33" } -pallet-timestamp = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } +sp-io = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36" } +sp-core = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36" } +sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36" } +frame-support = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36" } +frame-system = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36" } +pallet-balances = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36" } +pallet-timestamp = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } # Polkadot -xcm = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.33" } -xcm-executor = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.33" } -xcm-builder = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.33" } -polkadot-parachain = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.33" } -pallet-xcm = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.33" } -polkadot-runtime-parachains = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.33" } -xcm-simulator = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.33" } -polkadot-runtime-common = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.33" } +xcm = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.36" } +xcm-executor = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.36" } +xcm-builder = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.36" } +polkadot-parachain = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.36" } +pallet-xcm = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.36" } +polkadot-runtime-parachains = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.36" } +xcm-simulator = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.36" } +polkadot-runtime-common = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.36" } # Cumulus -cumulus-primitives-core = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.33" } -cumulus-pallet-parachain-system = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.33" } -cumulus-pallet-dmp-queue = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.33" } -cumulus-pallet-xcmp-queue = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.33" } -cumulus-pallet-xcm = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.33" } -cumulus-primitives-utility = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.33" } +cumulus-primitives-core = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.36" } +cumulus-pallet-parachain-system = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.36" } +cumulus-pallet-dmp-queue = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.36" } +cumulus-pallet-xcmp-queue = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.36" } +cumulus-pallet-xcm = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.36" } +cumulus-primitives-utility = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.36" } # Local assets-registry = { path = "../assets-registry" } diff --git a/pallets/subbridge/src/chainbridge.rs b/pallets/subbridge/src/chainbridge.rs index fa1c9ab4..1d47508f 100644 --- a/pallets/subbridge/src/chainbridge.rs +++ b/pallets/subbridge/src/chainbridge.rs @@ -337,6 +337,7 @@ pub mod pallet { /// # /// - O(1) lookup and insert /// # + #[pallet::call_index(0)] #[pallet::weight(195_000_000)] pub fn set_threshold(origin: OriginFor, threshold: u32) -> DispatchResult { T::BridgeCommitteeOrigin::ensure_origin(origin)?; @@ -348,6 +349,7 @@ pub mod pallet { /// # /// - O(1) lookup and insert /// # + #[pallet::call_index(1)] #[pallet::weight(195_000_000)] pub fn whitelist_chain(origin: OriginFor, id: BridgeChainId) -> DispatchResult { T::BridgeCommitteeOrigin::ensure_origin(origin)?; @@ -359,6 +361,7 @@ pub mod pallet { /// # /// - O(1) lookup and insert /// # + #[pallet::call_index(2)] #[pallet::weight(195_000_000)] pub fn add_relayer(origin: OriginFor, v: T::AccountId) -> DispatchResult { T::BridgeCommitteeOrigin::ensure_origin(origin)?; @@ -370,6 +373,7 @@ pub mod pallet { /// # /// - O(1) lookup and removal /// # + #[pallet::call_index(3)] #[pallet::weight(195_000_000)] pub fn remove_relayer(origin: OriginFor, v: T::AccountId) -> DispatchResult { T::BridgeCommitteeOrigin::ensure_origin(origin)?; @@ -381,6 +385,7 @@ pub mod pallet { /// # /// - O(1) lookup and insert /// # + #[pallet::call_index(4)] #[pallet::weight(195_000_000)] pub fn update_fee( origin: OriginFor, @@ -405,6 +410,7 @@ pub mod pallet { /// # /// - weight of proposed call, regardless of whether execution is performed /// # + #[pallet::call_index(5)] #[pallet::weight({ let dispatch_info = call.get_dispatch_info(); (dispatch_info.weight.saturating_add(Weight::from_ref_time(195_000_000)), dispatch_info.class, Pays::Yes) @@ -432,6 +438,7 @@ pub mod pallet { /// # /// - Fixed, since execution of proposal should not be included /// # + #[pallet::call_index(6)] #[pallet::weight(195_000_000)] #[transactional] pub fn reject_proposal( @@ -459,6 +466,7 @@ pub mod pallet { /// # /// - weight of proposed call, regardless of whether execution is performed /// # + #[pallet::call_index(7)] #[pallet::weight({ let dispatch_info = prop.get_dispatch_info(); (dispatch_info.weight.saturating_add(Weight::from_ref_time(195_000_000)), dispatch_info.class, Pays::Yes) @@ -476,6 +484,7 @@ pub mod pallet { } /// Triggered by a initial transfer on source chain, executed by relayer when proposal was resolved. + #[pallet::call_index(8)] #[pallet::weight(195_000_000)] #[transactional] pub fn handle_fungible_transfer( diff --git a/pallets/subbridge/src/xtransfer.rs b/pallets/subbridge/src/xtransfer.rs index ec66643e..e970dd79 100644 --- a/pallets/subbridge/src/xtransfer.rs +++ b/pallets/subbridge/src/xtransfer.rs @@ -61,6 +61,7 @@ pub mod pallet { where T::AccountId: Into<[u8; 32]> + From<[u8; 32]>, { + #[pallet::call_index(0)] #[pallet::weight(195_000_000)] #[transactional] pub fn transfer( @@ -74,6 +75,7 @@ pub mod pallet { Ok(()) } + #[pallet::call_index(1)] #[pallet::weight(195_000_000)] #[transactional] pub fn transfer_generic( diff --git a/parachains-common/Cargo.toml b/parachains-common/Cargo.toml index 33b0ecbf..0d17f8e0 100644 --- a/parachains-common/Cargo.toml +++ b/parachains-common/Cargo.toml @@ -14,33 +14,33 @@ codec = { package = "parity-scale-codec", version = "3.0", default-features = fa scale-info = { version = "2.0", default-features = false, features = ["derive"] } # Substrate dependencies -sp-consensus-aura = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -sp-std = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -sp-io = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -frame-executive = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -frame-support = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -frame-system = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -pallet-balances = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -sp-core = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } +sp-consensus-aura = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +sp-std = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +sp-io = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +frame-executive = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +frame-support = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +frame-system = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +pallet-balances = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +sp-core = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } # Polkadot dependencies -polkadot-runtime-common = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.33", default-features = false } -polkadot-primitives = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.33", default-features = false } -xcm = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.33", default-features = false } -xcm-executor = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.33", default-features = false } +polkadot-runtime-common = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.36", default-features = false } +polkadot-primitives = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.36", default-features = false } +xcm = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.36", default-features = false } +xcm-executor = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.36", default-features = false } # Cumulus dependencies -pallet-collator-selection = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.33", default-features = false } -cumulus-primitives-core = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.33", default-features = false } +pallet-collator-selection = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.36", default-features = false } +cumulus-primitives-core = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.36", default-features = false } [dev-dependencies] serde = { version = "1.0.119" } -sp-io = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -pallet-authorship = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } +sp-io = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +pallet-authorship = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } [build-dependencies] -substrate-wasm-builder = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33" } +substrate-wasm-builder = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36" } [features] default = ["std"] diff --git a/parachains-common/src/lib.rs b/parachains-common/src/lib.rs index e29f23d4..d8fe8a05 100644 --- a/parachains-common/src/lib.rs +++ b/parachains-common/src/lib.rs @@ -82,7 +82,7 @@ mod types { /// Common constants of parachains. mod constants { use super::types::BlockNumber; - use frame_support::weights::{constants::WEIGHT_PER_SECOND, Weight}; + use frame_support::weights::{constants::WEIGHT_REF_TIME_PER_SECOND, Weight}; use sp_runtime::Perbill; /// This determines the average expected block time that we are targeting. Blocks will be @@ -108,9 +108,10 @@ mod constants { pub const NORMAL_DISPATCH_RATIO: Perbill = Perbill::from_percent(75); /// We allow for 0.5 seconds of compute with a 6 second average block time. - pub const MAXIMUM_BLOCK_WEIGHT: Weight = WEIGHT_PER_SECOND - .saturating_div(2) - .set_proof_size(cumulus_primitives_core::relay_chain::v2::MAX_POV_SIZE as u64); + pub const MAXIMUM_BLOCK_WEIGHT: Weight = Weight::from_parts( + WEIGHT_REF_TIME_PER_SECOND.saturating_div(2), + polkadot_primitives::v2::MAX_POV_SIZE as u64, + ); } /// Opaque types. These are used by the CLI to instantiate machinery that don't need to know diff --git a/polkadot/node/service/Cargo.toml b/polkadot/node/service/Cargo.toml index 308a6537..a229ecde 100644 --- a/polkadot/node/service/Cargo.toml +++ b/polkadot/node/service/Cargo.toml @@ -1,133 +1,135 @@ [package] name = "polkadot-service" -version = "0.9.33" +rust-version = "1.60" +version = "0.9.36" authors = ["Parity Technologies "] edition = "2021" -rust-version = "1.60" [dependencies] # Substrate Client -sc-authority-discovery = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33" } -babe = { package = "sc-consensus-babe", git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33" } -beefy-primitives = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33" } -beefy-gadget = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33" } -frame-support = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33" } -grandpa = { package = "sc-finality-grandpa", git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33" } -sc-block-builder = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33" } -sc-chain-spec = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33" } -sc-client-api = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33" } -sc-client-db = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33" } -sc-consensus = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33" } -sc-consensus-slots = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33" } -sc-executor = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33" } -sc-network = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33" } -sc-network-common = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33" } -sc-transaction-pool = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33" } -sc-sync-state-rpc = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33" } -sc-keystore = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33" } -sc-basic-authorship = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33" } -sc-offchain = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33" } -sc-sysinfo = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33" } -service = { package = "sc-service", git = "https://github.com/paritytech/substrate", default-features = false , branch = "polkadot-v0.9.33" } -telemetry = { package = "sc-telemetry", git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33" } +sc-authority-discovery = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36" } +babe = { package = "sc-consensus-babe", git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36" } +beefy-primitives = { git = "https://github.com/paritytech/substrate", package = "sp-beefy" , branch = "polkadot-v0.9.36" } +beefy-gadget = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36" } +frame-support = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36" } +grandpa = { package = "sc-finality-grandpa", git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36" } +mmr-gadget = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36" } +sp-mmr-primitives = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36" } +sc-block-builder = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36" } +sc-chain-spec = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36" } +sc-client-api = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36" } +sc-client-db = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36" } +sc-consensus = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36" } +sc-consensus-slots = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36" } +sc-executor = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36" } +sc-network = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36" } +sc-network-common = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36" } +sc-transaction-pool = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36" } +sc-sync-state-rpc = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36" } +sc-keystore = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36" } +sc-basic-authorship = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36" } +sc-offchain = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36" } +sc-sysinfo = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36" } +service = { package = "sc-service", git = "https://github.com/paritytech/substrate", default-features = false , branch = "polkadot-v0.9.36" } +telemetry = { package = "sc-telemetry", git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36" } # Substrate Primitives -sp-authority-discovery = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33" } -consensus_common = { package = "sp-consensus", git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33" } -grandpa_primitives = { package = "sp-finality-grandpa", git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33" } -inherents = { package = "sp-inherents", git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33" } -sp-api = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33" } -sp-block-builder = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33" } -sp-blockchain = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33" } -sp-core = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33" } -sp-io = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33" } -sp-keystore = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33" } -sp-offchain = { package = "sp-offchain", git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33" } -sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33" } -sp-session = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33" } -sp-storage = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33" } -sp-transaction-pool = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33" } -sp-trie = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33" } -sp-timestamp = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33" } -sp-consensus-babe = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33" } -sp-state-machine = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33" } +sp-authority-discovery = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36" } +consensus_common = { package = "sp-consensus", git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36" } +grandpa_primitives = { package = "sp-finality-grandpa", git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36" } +inherents = { package = "sp-inherents", git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36" } +sp-api = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36" } +sp-block-builder = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36" } +sp-blockchain = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36" } +sp-core = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36" } +sp-io = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36" } +sp-keystore = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36" } +sp-offchain = { package = "sp-offchain", git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36" } +sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36" } +sp-session = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36" } +sp-storage = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36" } +sp-transaction-pool = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36" } +sp-trie = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36" } +sp-timestamp = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36" } +sp-consensus-babe = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36" } +sp-state-machine = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36" } # Substrate Pallets -pallet-babe = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33" } -pallet-im-online = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33" } -pallet-staking = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33" } -pallet-transaction-payment-rpc-runtime-api = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33" } +pallet-babe = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36" } +pallet-im-online = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36" } +pallet-staking = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36" } +pallet-transaction-payment-rpc-runtime-api = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36" } # Substrate Other -frame-system-rpc-runtime-api = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33" } -prometheus-endpoint = { package = "substrate-prometheus-endpoint", git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33" } +frame-system-rpc-runtime-api = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36" } +prometheus-endpoint = { package = "substrate-prometheus-endpoint", git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36" } # External Crates futures = "0.3.21" hex-literal = "0.3.4" -gum = { package = "tracing-gum", git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.33" } +gum = { package = "tracing-gum", git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.36" } serde = { version = "1.0.137", features = ["derive"] } serde_json = "1.0.81" thiserror = "1.0.31" -kvdb = "0.12.0" -kvdb-rocksdb = { version = "0.16.0", optional = true } +kvdb = "0.13.0" +kvdb-rocksdb = { version = "0.17.0", optional = true } parity-db = { version = "0.4.2", optional = true } async-trait = "0.1.57" lru = "0.8" # Polkadot -polkadot-node-core-parachains-inherent = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.33" } -polkadot-overseer = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.33" } -polkadot-client = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.33", default-features = false, optional = true } -polkadot-parachain = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.33" } -polkadot-primitives = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.33" } -polkadot-node-primitives = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.33" } -polkadot-rpc = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.33" } -polkadot-node-subsystem = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.33" } -polkadot-node-subsystem-util = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.33" } -polkadot-node-subsystem-types = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.33" } -polkadot-runtime-parachains = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.33" } -polkadot-node-network-protocol = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.33" } +polkadot-node-core-parachains-inherent = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.36" } +polkadot-overseer = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.36" } +polkadot-client = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.36", default-features = false, optional = true } +polkadot-parachain = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.36" } +polkadot-primitives = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.36" } +polkadot-node-primitives = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.36" } +polkadot-rpc = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.36" } +polkadot-node-subsystem = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.36" } +polkadot-node-subsystem-util = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.36" } +polkadot-node-subsystem-types = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.36" } +polkadot-runtime-parachains = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.36" } +polkadot-node-network-protocol = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.36" } # Polkadot Runtime Constants -polkadot-runtime-constants = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.33", optional = true } -kusama-runtime-constants = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.33", optional = true } -rococo-runtime-constants = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.33", optional = true } -westend-runtime-constants = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.33", optional = true } +polkadot-runtime-constants = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.36", optional = true } +kusama-runtime-constants = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.36", optional = true } +rococo-runtime-constants = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.36", optional = true } +westend-runtime-constants = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.36", optional = true } # Polkadot Runtimes -polkadot-runtime = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.33", optional = true } -kusama-runtime = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.33", optional = true } -westend-runtime = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.33", optional = true } -rococo-runtime = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.33", optional = true } +polkadot-runtime = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.36", optional = true } +kusama-runtime = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.36", optional = true } +westend-runtime = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.36", optional = true } +rococo-runtime = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.36", optional = true } # Polkadot Subsystems -polkadot-approval-distribution = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.33", optional = true } -polkadot-availability-bitfield-distribution = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.33", optional = true } -polkadot-availability-distribution = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.33", optional = true } -polkadot-availability-recovery = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.33", optional = true } -polkadot-collator-protocol = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.33", optional = true } -polkadot-dispute-distribution = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.33", optional = true } -polkadot-gossip-support = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.33", optional = true } -polkadot-network-bridge = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.33", optional = true } -polkadot-node-collation-generation = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.33", optional = true } -polkadot-node-core-approval-voting = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.33", optional = true } -polkadot-node-core-av-store = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.33", optional = true } -polkadot-node-core-backing = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.33", optional = true } -polkadot-node-core-bitfield-signing = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.33", optional = true } -polkadot-node-core-candidate-validation = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.33", optional = true } -polkadot-node-core-chain-api = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.33", optional = true } -polkadot-node-core-chain-selection = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.33", optional = true } -polkadot-node-core-dispute-coordinator = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.33", optional = true } -polkadot-node-core-provisioner = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.33", optional = true } -polkadot-node-core-pvf-checker = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.33", optional = true } -polkadot-node-core-runtime-api = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.33", optional = true } -polkadot-statement-distribution = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.33", optional = true } +polkadot-approval-distribution = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.36", optional = true } +polkadot-availability-bitfield-distribution = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.36", optional = true } +polkadot-availability-distribution = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.36", optional = true } +polkadot-availability-recovery = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.36", optional = true } +polkadot-collator-protocol = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.36", optional = true } +polkadot-dispute-distribution = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.36", optional = true } +polkadot-gossip-support = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.36", optional = true } +polkadot-network-bridge = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.36", optional = true } +polkadot-node-collation-generation = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.36", optional = true } +polkadot-node-core-approval-voting = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.36", optional = true } +polkadot-node-core-av-store = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.36", optional = true } +polkadot-node-core-backing = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.36", optional = true } +polkadot-node-core-bitfield-signing = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.36", optional = true } +polkadot-node-core-candidate-validation = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.36", optional = true } +polkadot-node-core-chain-api = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.36", optional = true } +polkadot-node-core-chain-selection = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.36", optional = true } +polkadot-node-core-dispute-coordinator = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.36", optional = true } +polkadot-node-core-provisioner = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.36", optional = true } +polkadot-node-core-pvf-checker = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.36", optional = true } +polkadot-node-core-runtime-api = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.36", optional = true } +polkadot-statement-distribution = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.36", optional = true } [dev-dependencies] -polkadot-test-client = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.33" } -polkadot-node-subsystem-test-helpers = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.33" } +polkadot-test-client = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.36" } +polkadot-node-subsystem-test-helpers = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.36" } env_logger = "0.9.0" log = "0.4.17" assert_matches = "1.5.0" diff --git a/polkadot/node/service/chain-specs/kusama.json b/polkadot/node/service/chain-specs/kusama.json index 5d369a72..581f40af 100644 --- a/polkadot/node/service/chain-specs/kusama.json +++ b/polkadot/node/service/chain-specs/kusama.json @@ -21,7 +21,11 @@ "/dns/kusama-bootnode-1.paritytech.net/tcp/30333/p2p/12D3KooWQKqane1SqWJNWMQkbia9qiMWXkcHtAdfW5eVF8hbwEDw", "/dns/kusama-bootnode.dwellir.com/tcp/30333/ws/p2p/12D3KooWFj2ndawdYyk2spc42Y2arYwb2TUoHLHFAsKuHRzWXwoJ", "/dns/boot.stake.plus/tcp/31333/p2p/12D3KooWLa1UyG5xLPds2GbiRBCTJjpsVwRWHWN7Dff14yiNJRpR", - "/dns/boot.stake.plus/tcp/31334/wss/p2p/12D3KooWLa1UyG5xLPds2GbiRBCTJjpsVwRWHWN7Dff14yiNJRpR" + "/dns/boot.stake.plus/tcp/31334/wss/p2p/12D3KooWLa1UyG5xLPds2GbiRBCTJjpsVwRWHWN7Dff14yiNJRpR", + "/dns/boot-node.helikon.io/tcp/7060/p2p/12D3KooWL4KPqfAsPE2aY1g5Zo1CxsDwcdJ7mmAghK7cg6M2fdbD", + "/dns/boot-node.helikon.io/tcp/7062/wss/p2p/12D3KooWL4KPqfAsPE2aY1g5Zo1CxsDwcdJ7mmAghK7cg6M2fdbD", + "/dns/kusama.bootnode.amforc.com/tcp/30333/p2p/12D3KooWLx6nsj6Fpd8biP1VDyuCUjazvRiGWyBam8PsqRJkbUb9", + "/dns/kusama.bootnode.amforc.com/tcp/30334/wss/p2p/12D3KooWLx6nsj6Fpd8biP1VDyuCUjazvRiGWyBam8PsqRJkbUb9" ], "telemetryEndpoints": [ [ diff --git a/polkadot/node/service/chain-specs/polkadot.json b/polkadot/node/service/chain-specs/polkadot.json index 1a5ef949..8fc8ac63 100644 --- a/polkadot/node/service/chain-specs/polkadot.json +++ b/polkadot/node/service/chain-specs/polkadot.json @@ -21,7 +21,11 @@ "/dns/cc1-1.parity.tech/tcp/30333/p2p/12D3KooWFN2mhgpkJsDBuNuE5427AcDrsib8EoqGMZmkxWwx3Md4", "/dns/polkadot-bootnode.dwellir.com/tcp/30333/ws/p2p/12D3KooWKvdDyRKqUfSAaUCbYiLwKY8uK3wDWpCuy2FiDLbkPTDJ", "/dns/boot.stake.plus/tcp/30333/p2p/12D3KooWKT4ZHNxXH4icMjdrv7EwWBkfbz5duxE5sdJKKeWFYi5n", - "/dns/boot.stake.plus/tcp/30334/wss/p2p/12D3KooWKT4ZHNxXH4icMjdrv7EwWBkfbz5duxE5sdJKKeWFYi5n" + "/dns/boot.stake.plus/tcp/30334/wss/p2p/12D3KooWKT4ZHNxXH4icMjdrv7EwWBkfbz5duxE5sdJKKeWFYi5n", + "/dns/boot-node.helikon.io/tcp/7070/p2p/12D3KooWS9ZcvRxyzrSf6p63QfTCWs12nLoNKhGux865crgxVA4H", + "/dns/boot-node.helikon.io/tcp/7072/wss/p2p/12D3KooWS9ZcvRxyzrSf6p63QfTCWs12nLoNKhGux865crgxVA4H", + "/dns/polkadot.bootnode.amforc.com/tcp/30333/p2p/12D3KooWAsuCEVCzUVUrtib8W82Yne3jgVGhQZN3hizko5FTnDg3", + "/dns/polkadot.bootnode.amforc.com/tcp/30334/wss/p2p/12D3KooWAsuCEVCzUVUrtib8W82Yne3jgVGhQZN3hizko5FTnDg3" ], "telemetryEndpoints": [ [ diff --git a/polkadot/node/service/chain-specs/westend.json b/polkadot/node/service/chain-specs/westend.json index 03f1f5ec..6527d810 100644 --- a/polkadot/node/service/chain-specs/westend.json +++ b/polkadot/node/service/chain-specs/westend.json @@ -13,7 +13,11 @@ "/dns/westend-connect-0.polkadot.io/tcp/443/wss/p2p/12D3KooWNg8iUqhux7X7voNU9Nty5pzehrFJwkQwg1CJnqN3CTzE", "/dns/westend-connect-1.polkadot.io/tcp/443/wss/p2p/12D3KooWAq2A7UNFS6725XFatD5QW7iYBezTLdAUx1SmRkxN79Ne", "/dns/boot.stake.plus/tcp/32333/p2p/12D3KooWK8fjVoSvMq5copQYMsdYreSGPGgcMbGMgbMDPfpf3sm7", - "/dns/boot.stake.plus/tcp/32334/wss/p2p/12D3KooWK8fjVoSvMq5copQYMsdYreSGPGgcMbGMgbMDPfpf3sm7" + "/dns/boot.stake.plus/tcp/32334/wss/p2p/12D3KooWK8fjVoSvMq5copQYMsdYreSGPGgcMbGMgbMDPfpf3sm7", + "/dns/boot-node.helikon.io/tcp/7080/p2p/12D3KooWRFDPyT8vA8mLzh6dJoyujn4QNjeqi6Ch79eSMz9beKXC", + "/dns/boot-node.helikon.io/tcp/7082/wss/p2p/12D3KooWRFDPyT8vA8mLzh6dJoyujn4QNjeqi6Ch79eSMz9beKXC", + "/dns/westend.bootnode.amforc.com/tcp/30333/p2p/12D3KooWJ5y9ZgVepBQNW4aabrxgmnrApdVnscqgKWiUu4BNJbC8", + "/dns/westend.bootnode.amforc.com/tcp/30334/wss/p2p/12D3KooWJ5y9ZgVepBQNW4aabrxgmnrApdVnscqgKWiUu4BNJbC8" ], "telemetryEndpoints": [ [ @@ -133,4 +137,4 @@ "childrenDefault": {} } } -} +} \ No newline at end of file diff --git a/polkadot/node/service/src/chain_spec.rs b/polkadot/node/service/src/chain_spec.rs index e2c0a7bf..8650499d 100644 --- a/polkadot/node/service/src/chain_spec.rs +++ b/polkadot/node/service/src/chain_spec.rs @@ -767,10 +767,10 @@ fn kusama_staging_testnet_config_genesis(wasm_binary: &[u8]) -> kusama::GenesisC configuration: kusama::ConfigurationConfig { config: default_parachains_host_configuration(), }, - gilt: Default::default(), paras: Default::default(), xcm_pallet: Default::default(), nomination_pools: Default::default(), + nis_counterpart_balances: Default::default(), } } @@ -1073,11 +1073,11 @@ fn rococo_staging_testnet_config_genesis(wasm_binary: &[u8]) -> rococo_runtime:: configuration: rococo_runtime::ConfigurationConfig { config: default_parachains_host_configuration(), }, - gilt: Default::default(), registrar: rococo_runtime::RegistrarConfig { next_free_para_id: polkadot_primitives::v2::LOWEST_PUBLIC_ID, }, xcm_pallet: Default::default(), + nis_counterpart_balances: Default::default(), } } @@ -1466,10 +1466,10 @@ pub fn kusama_testnet_genesis( configuration: kusama::ConfigurationConfig { config: default_parachains_host_configuration(), }, - gilt: Default::default(), paras: Default::default(), xcm_pallet: Default::default(), nomination_pools: Default::default(), + nis_counterpart_balances: Default::default(), } } @@ -1629,12 +1629,12 @@ pub fn rococo_testnet_genesis( ..default_parachains_host_configuration() }, }, - gilt: Default::default(), paras: rococo_runtime::ParasConfig { paras: vec![] }, registrar: rococo_runtime::RegistrarConfig { next_free_para_id: polkadot_primitives::v2::LOWEST_PUBLIC_ID, }, xcm_pallet: Default::default(), + nis_counterpart_balances: Default::default(), } } diff --git a/polkadot/node/service/src/lib.rs b/polkadot/node/service/src/lib.rs index ca7ae151..4252474a 100644 --- a/polkadot/node/service/src/lib.rs +++ b/polkadot/node/service/src/lib.rs @@ -95,6 +95,7 @@ pub use polkadot_client::PolkadotExecutorDispatch; pub use chain_spec::{KusamaChainSpec, PolkadotChainSpec, RococoChainSpec, WestendChainSpec}; pub use consensus_common::{block_validation::Chain, Proposal, SelectChain}; +use mmr_gadget::MmrGadget; #[cfg(feature = "full-node")] pub use polkadot_client::{ AbstractClient, Client, ClientHandle, ExecuteWithClient, FullBackend, FullClient, @@ -758,6 +759,7 @@ where { use polkadot_node_network_protocol::request_response::IncomingRequest; + let is_offchain_indexing_enabled = config.offchain_worker.indexing_enabled; let role = config.role.clone(); let force_authoring = config.force_authoring; let backoff_authoring_blocks = { @@ -1152,11 +1154,12 @@ where let overseer_handle = overseer_handle.clone(); async move { - let parachain = polkadot_node_core_parachains_inherent::ParachainsInherentDataProvider::create( - &*client_clone, - overseer_handle, - parent, - ).await.map_err(|e| Box::new(e))?; + let parachain = + polkadot_node_core_parachains_inherent::ParachainsInherentDataProvider::new( + client_clone, + overseer_handle, + parent, + ); let timestamp = sp_timestamp::InherentDataProvider::from_system_time(); @@ -1219,25 +1222,26 @@ where } else { task_manager.spawn_handle().spawn_blocking("beefy-gadget", None, gadget); } - } - // Reduce grandpa load on Kusama and test networks. This will slow down finality by - // approximately one slot duration, but will reduce load. We would like to see the impact on - // Kusama, see: https://github.com/paritytech/polkadot/issues/5464 - let gossip_duration = if chain_spec.is_versi() || - chain_spec.is_wococo() || - chain_spec.is_rococo() || - chain_spec.is_kusama() - { - Duration::from_millis(2000) - } else { - Duration::from_millis(1000) - }; + if is_offchain_indexing_enabled { + task_manager.spawn_handle().spawn_blocking( + "mmr-gadget", + None, + MmrGadget::start( + client.clone(), + backend.clone(), + sp_mmr_primitives::INDEXING_PREFIX.to_vec(), + ), + ); + } + } let config = grandpa::Config { // FIXME substrate#1578 make this available through chainspec - gossip_duration, - justification_period: 1, + // Grandpa performance can be improved a bit by tuning this parameter, see: + // https://github.com/paritytech/polkadot/issues/5464 + gossip_duration: Duration::from_millis(1000), + justification_period: 512, name: Some(name), observer_enabled: false, keystore: keystore_opt, diff --git a/polkadot/node/service/src/overseer.rs b/polkadot/node/service/src/overseer.rs index a8ce3e5e..7dff8669 100644 --- a/polkadot/node/service/src/overseer.rs +++ b/polkadot/node/service/src/overseer.rs @@ -129,7 +129,7 @@ where /// Obtain a prepared `OverseerBuilder`, that is initialized /// with all default values. -pub fn prepared_overseer_builder<'a, Spawner, RuntimeClient>( +pub fn prepared_overseer_builder( OverseerGenArgs { leaves, keystore, @@ -155,7 +155,7 @@ pub fn prepared_overseer_builder<'a, Spawner, RuntimeClient>( overseer_message_channel_capacity_override, req_protocol_names, peerset_protocol_names, - }: OverseerGenArgs<'a, Spawner, RuntimeClient>, + }: OverseerGenArgs, ) -> Result< InitializedOverseerBuilder< SpawnGlue, @@ -257,7 +257,7 @@ where .collator_protocol({ let side = match is_collator { IsCollator::Yes(collator_pair) => ProtocolSide::Collator( - network_service.local_peer_id().clone(), + network_service.local_peer_id(), collator_pair, collation_req_receiver, Metrics::register(registry)?, @@ -334,10 +334,10 @@ where /// would do. pub trait OverseerGen { /// Overwrite the full generation of the overseer, including the subsystems. - fn generate<'a, Spawner, RuntimeClient>( + fn generate( &self, connector: OverseerConnector, - args: OverseerGenArgs<'a, Spawner, RuntimeClient>, + args: OverseerGenArgs, ) -> Result<(Overseer, Arc>, OverseerHandle), Error> where RuntimeClient: 'static + ProvideRuntimeApi + HeaderBackend + AuxStore, @@ -358,10 +358,10 @@ use polkadot_overseer::KNOWN_LEAVES_CACHE_SIZE; pub struct RealOverseerGen; impl OverseerGen for RealOverseerGen { - fn generate<'a, Spawner, RuntimeClient>( + fn generate( &self, connector: OverseerConnector, - args: OverseerGenArgs<'a, Spawner, RuntimeClient>, + args: OverseerGenArgs, ) -> Result<(Overseer, Arc>, OverseerHandle), Error> where RuntimeClient: 'static + ProvideRuntimeApi + HeaderBackend + AuxStore, diff --git a/polkadot/node/service/src/relay_chain_selection.rs b/polkadot/node/service/src/relay_chain_selection.rs index df3e68cc..890e4c16 100644 --- a/polkadot/node/service/src/relay_chain_selection.rs +++ b/polkadot/node/service/src/relay_chain_selection.rs @@ -343,12 +343,11 @@ where // The Chain Selection subsystem is supposed to treat the finalized // block as the best leaf in the case that there are no viable // leaves, so this should not happen in practice. - let best_leaf = self + let best_leaf = *self .leaves() .await? .first() - .ok_or_else(|| ConsensusError::Other(Box::new(Error::EmptyLeaves)))? - .clone(); + .ok_or_else(|| ConsensusError::Other(Box::new(Error::EmptyLeaves)))?; gum::trace!(target: LOG_TARGET, ?best_leaf, "Best chain"); diff --git a/runtime/khala/Cargo.toml b/runtime/khala/Cargo.toml index 03706b5e..2a8cb362 100644 --- a/runtime/khala/Cargo.toml +++ b/runtime/khala/Cargo.toml @@ -17,80 +17,80 @@ phala-types = { path = "../../crates/phala-types", default-features = false } parachains-common = { path = "../../parachains-common", default-features = false } # Substrate dependencies -sp-std = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -sp-api = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -sp-io = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -sp-version = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -sp-core = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -sp-session = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -sp-offchain = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -sp-block-builder = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -sp-transaction-pool = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -sp-inherents = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -sp-consensus-aura = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } +sp-std = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +sp-api = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +sp-io = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +sp-version = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +sp-core = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +sp-session = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +sp-offchain = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +sp-block-builder = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +sp-transaction-pool = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +sp-inherents = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +sp-consensus-aura = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } -frame-support = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -frame-executive = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -frame-system = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -frame-system-rpc-runtime-api = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -frame-system-benchmarking = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", optional = true, default-features = false } -frame-benchmarking = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", optional = true, default-features = false } -frame-try-runtime = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false, optional = true } +frame-support = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +frame-executive = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +frame-system = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +frame-system-rpc-runtime-api = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +frame-system-benchmarking = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", optional = true, default-features = false } +frame-benchmarking = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", optional = true, default-features = false } +frame-try-runtime = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false, optional = true } -pallet-aura = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -pallet-authorship = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -pallet-session = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -pallet-balances = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -pallet-randomness-collective-flip = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -pallet-timestamp = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -pallet-transaction-payment = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -pallet-transaction-payment-rpc-runtime-api = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -pallet-utility = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -pallet-multisig = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -pallet-proxy = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -pallet-scheduler = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -pallet-collective = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -pallet-membership = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -pallet-treasury = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -pallet-bounties = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -pallet-child-bounties = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -pallet-lottery = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -pallet-vesting = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -pallet-identity = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -pallet-democracy = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -pallet-elections-phragmen = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -pallet-tips = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -pallet-preimage = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -pallet-assets = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -pallet-uniques = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } +pallet-aura = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +pallet-authorship = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +pallet-session = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +pallet-balances = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +pallet-randomness-collective-flip = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +pallet-timestamp = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +pallet-transaction-payment = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +pallet-transaction-payment-rpc-runtime-api = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +pallet-utility = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +pallet-multisig = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +pallet-proxy = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +pallet-scheduler = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +pallet-collective = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +pallet-membership = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +pallet-treasury = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +pallet-bounties = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +pallet-child-bounties = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +pallet-lottery = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +pallet-vesting = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +pallet-identity = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +pallet-democracy = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +pallet-elections-phragmen = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +pallet-tips = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +pallet-preimage = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +pallet-assets = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +pallet-uniques = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } # Cumulus dependencies -pallet-collator-selection = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.33", default-features = false } -cumulus-pallet-aura-ext = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.33", default-features = false } -cumulus-pallet-parachain-system = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.33", default-features = false } -cumulus-pallet-session-benchmarking = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.33", default-features = false } -cumulus-primitives-core = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.33", default-features = false } -cumulus-primitives-timestamp = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.33", default-features = false } -cumulus-primitives-utility = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.33", default-features = false } -cumulus-pallet-dmp-queue = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.33", default-features = false } -cumulus-pallet-xcmp-queue = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.33", default-features = false } -cumulus-pallet-xcm = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.33", default-features = false } +pallet-collator-selection = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.36", default-features = false } +cumulus-pallet-aura-ext = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.36", default-features = false } +cumulus-pallet-parachain-system = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.36", default-features = false } +cumulus-pallet-session-benchmarking = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.36", default-features = false } +cumulus-primitives-core = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.36", default-features = false } +cumulus-primitives-timestamp = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.36", default-features = false } +cumulus-primitives-utility = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.36", default-features = false } +cumulus-pallet-dmp-queue = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.36", default-features = false } +cumulus-pallet-xcmp-queue = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.36", default-features = false } +cumulus-pallet-xcm = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.36", default-features = false } # Polkadot dependencies -polkadot-parachain = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.33", default-features = false } -polkadot-runtime-common = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.33", default-features = false } -xcm = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.33", default-features = false } -xcm-builder = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.33", default-features = false } -xcm-executor = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.33", default-features = false } -pallet-xcm = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.33", default-features = false } +polkadot-parachain = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.36", default-features = false } +polkadot-runtime-common = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.36", default-features = false } +xcm = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.36", default-features = false } +xcm-builder = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.36", default-features = false } +xcm-executor = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.36", default-features = false } +pallet-xcm = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.36", default-features = false } # RMRK dependencies -pallet-rmrk-core = { git = "https://github.com/Phala-Network/rmrk-substrate", branch = "polkadot-v0.9.33", default-features = false } -pallet-rmrk-equip = { git = "https://github.com/Phala-Network/rmrk-substrate", branch = "polkadot-v0.9.33", default-features = false } -pallet-rmrk-market = { git = "https://github.com/Phala-Network/rmrk-substrate", branch = "polkadot-v0.9.33", default-features = false } -rmrk-traits = { git = "https://github.com/Phala-Network/rmrk-substrate", branch = "polkadot-v0.9.33", default-features = false } -pallet-rmrk-rpc-runtime-api = { git = "https://github.com/Phala-Network/rmrk-substrate", branch = "polkadot-v0.9.33", default-features = false } +pallet-rmrk-core = { git = "https://github.com/Phala-Network/rmrk-substrate", branch = "polkadot-v0.9.36", default-features = false } +pallet-rmrk-equip = { git = "https://github.com/Phala-Network/rmrk-substrate", branch = "polkadot-v0.9.36", default-features = false } +pallet-rmrk-market = { git = "https://github.com/Phala-Network/rmrk-substrate", branch = "polkadot-v0.9.36", default-features = false } +rmrk-traits = { git = "https://github.com/Phala-Network/rmrk-substrate", branch = "polkadot-v0.9.36", default-features = false } +pallet-rmrk-rpc-runtime-api = { git = "https://github.com/Phala-Network/rmrk-substrate", branch = "polkadot-v0.9.36", default-features = false } # Local dependencies assets-registry = { path = "../../pallets/assets-registry", default-features = false } @@ -101,7 +101,7 @@ subbridge-pallets = { path = "../../pallets/subbridge", default-features = false pallet-phala-world = { path = "../../pallets/phala-world", default-features = false } [build-dependencies] -substrate-wasm-builder = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", optional = true } +substrate-wasm-builder = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", optional = true } [features] default = ["std", "include-wasm"] diff --git a/runtime/khala/src/constants.rs b/runtime/khala/src/constants.rs index 84ffaf41..60478dfb 100644 --- a/runtime/khala/src/constants.rs +++ b/runtime/khala/src/constants.rs @@ -34,7 +34,7 @@ pub mod currency { /// Fee-related. pub mod fee { use frame_support::weights::{ - constants::{ExtrinsicBaseWeight, WEIGHT_PER_SECOND}, + constants::{ExtrinsicBaseWeight, WEIGHT_REF_TIME_PER_SECOND}, WeightToFeeCoefficient, WeightToFeeCoefficients, WeightToFeePolynomial, }; use parachains_common::Balance; @@ -74,7 +74,7 @@ pub mod fee { // one cent cost per tx let base_tx_fee = super::currency::CENTS; let base_weight = Balance::from(ExtrinsicBaseWeight::get().ref_time()); - let tx_per_second = (WEIGHT_PER_SECOND.ref_time() as u128) / base_weight; + let tx_per_second = (WEIGHT_REF_TIME_PER_SECOND as u128) / base_weight; base_tx_fee * tx_per_second } } diff --git a/runtime/khala/src/lib.rs b/runtime/khala/src/lib.rs index 9e9d97b7..6f4d5d05 100644 --- a/runtime/khala/src/lib.rs +++ b/runtime/khala/src/lib.rs @@ -81,7 +81,7 @@ pub use frame_support::{ WithdrawReasons, }, weights::{ - constants::{BlockExecutionWeight, ExtrinsicBaseWeight, RocksDbWeight, WEIGHT_PER_SECOND}, + constants::{BlockExecutionWeight, ExtrinsicBaseWeight, RocksDbWeight, WEIGHT_REF_TIME_PER_SECOND}, ConstantMultiplier, IdentityFee, Weight, }, BoundedVec, PalletId, RuntimeDebug, StorageValue, @@ -728,6 +728,7 @@ impl pallet_assets::Config for Runtime { type RuntimeEvent = RuntimeEvent; type Balance = Balance; type AssetId = u32; + type AssetIdParameter = codec::Compact; type Currency = Balances; type CreateOrigin = AsEnsureOriginWithArg>; type ForceOrigin = EnsureRoot; @@ -737,9 +738,12 @@ impl pallet_assets::Config for Runtime { type MetadataDepositPerByte = MetadataDepositPerByte; type ApprovalDeposit = ApprovalDeposit; type StringLimit = AssetsStringLimit; + type RemoveItemsLimit = ConstU32<1000>; type Freezer = (); type Extra = (); type WeightInfo = pallet_assets::weights::SubstrateWeight; + #[cfg(feature = "runtime-benchmarks")] + type BenchmarkHelper = (); } parameter_types! { @@ -1065,7 +1069,7 @@ pub type FungiblesTransactor = FungiblesAdapter< parameter_types! { pub NativeExecutionPrice: u128 = pha_per_second(); - pub WeightPerSecond: XCMWeight = WEIGHT_PER_SECOND.ref_time(); + pub WeightPerSecond: XCMWeight = WEIGHT_REF_TIME_PER_SECOND; } pub struct XcmConfig; @@ -1909,22 +1913,21 @@ impl_runtime_apis! { #[cfg(feature = "try-runtime")] impl frame_try_runtime::TryRuntime for Runtime { - fn on_runtime_upgrade() -> (Weight, Weight) { - log::info!("try-runtime::on_runtime_upgrade statemine."); - let weight = Executive::try_runtime_upgrade().unwrap(); + fn on_runtime_upgrade(checks: bool) -> (Weight, Weight) { + let weight = Executive::try_runtime_upgrade(checks).unwrap(); (weight, RuntimeBlockWeights::get().max_block) } - fn execute_block(block: Block, state_root_check: bool, select: frame_try_runtime::TryStateSelect) -> Weight { - log::info!( - target: "runtime::statemine", "try-runtime: executing block #{} ({:?}) / root checks: {:?} / sanity-checks: {:?}", - block.header.number, - block.header.hash(), - state_root_check, - select, - ); - Executive::try_execute_block(block, state_root_check, select).expect("try_execute_block failed") - } + fn execute_block( + block: Block, + state_root_check: bool, + signature_check: bool, + select: frame_try_runtime::TryStateSelect, + ) -> Weight { + // NOTE: intentional unwrap: we don't want to propagate the error backwards, and want to + // have a backtrace here. + Executive::try_execute_block(block, state_root_check, signature_check, select).unwrap() + } } #[cfg(feature = "runtime-benchmarks")] diff --git a/runtime/phala/Cargo.toml b/runtime/phala/Cargo.toml index cb3261f1..574f4ee1 100644 --- a/runtime/phala/Cargo.toml +++ b/runtime/phala/Cargo.toml @@ -15,72 +15,72 @@ smallvec = "1.6.1" parachains-common = { path = "../../parachains-common", default-features = false } # Substrate dependencies -sp-std = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -sp-api = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -sp-io = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -sp-version = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -sp-core = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -sp-session = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -sp-offchain = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -sp-block-builder = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -sp-transaction-pool = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -sp-inherents = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -sp-consensus-aura = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } +sp-std = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +sp-api = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +sp-io = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +sp-version = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +sp-core = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +sp-session = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +sp-offchain = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +sp-block-builder = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +sp-transaction-pool = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +sp-inherents = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +sp-consensus-aura = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } -frame-support = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -frame-executive = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -frame-system = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -frame-system-rpc-runtime-api = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -frame-system-benchmarking = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", optional = true, default-features = false } -frame-benchmarking = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", optional = true, default-features = false } -frame-try-runtime = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false, optional = true } +frame-support = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +frame-executive = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +frame-system = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +frame-system-rpc-runtime-api = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +frame-system-benchmarking = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", optional = true, default-features = false } +frame-benchmarking = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", optional = true, default-features = false } +frame-try-runtime = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false, optional = true } -pallet-aura = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -pallet-authorship = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -pallet-session = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -pallet-balances = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -pallet-randomness-collective-flip = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -pallet-timestamp = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -pallet-transaction-payment = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -pallet-transaction-payment-rpc-runtime-api = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -pallet-utility = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -pallet-multisig = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -pallet-proxy = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -pallet-scheduler = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -pallet-collective = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -pallet-membership = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -pallet-treasury = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -pallet-bounties = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -pallet-child-bounties = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -pallet-lottery = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -pallet-vesting = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -pallet-identity = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -pallet-democracy = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -pallet-elections-phragmen = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -pallet-tips = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -pallet-preimage = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -pallet-assets = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } +pallet-aura = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +pallet-authorship = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +pallet-session = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +pallet-balances = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +pallet-randomness-collective-flip = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +pallet-timestamp = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +pallet-transaction-payment = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +pallet-transaction-payment-rpc-runtime-api = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +pallet-utility = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +pallet-multisig = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +pallet-proxy = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +pallet-scheduler = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +pallet-collective = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +pallet-membership = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +pallet-treasury = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +pallet-bounties = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +pallet-child-bounties = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +pallet-lottery = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +pallet-vesting = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +pallet-identity = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +pallet-democracy = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +pallet-elections-phragmen = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +pallet-tips = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +pallet-preimage = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +pallet-assets = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } # Cumulus dependencies -pallet-collator-selection = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.33", default-features = false } -cumulus-pallet-aura-ext = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.33", default-features = false } -cumulus-pallet-parachain-system = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.33", default-features = false } -cumulus-pallet-session-benchmarking = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.33", default-features = false } -cumulus-primitives-core = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.33", default-features = false } -cumulus-primitives-timestamp = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.33", default-features = false } -cumulus-primitives-utility = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.33", default-features = false } -cumulus-pallet-dmp-queue = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.33", default-features = false } -cumulus-pallet-xcmp-queue = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.33", default-features = false } -cumulus-pallet-xcm = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.33", default-features = false } +pallet-collator-selection = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.36", default-features = false } +cumulus-pallet-aura-ext = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.36", default-features = false } +cumulus-pallet-parachain-system = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.36", default-features = false } +cumulus-pallet-session-benchmarking = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.36", default-features = false } +cumulus-primitives-core = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.36", default-features = false } +cumulus-primitives-timestamp = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.36", default-features = false } +cumulus-primitives-utility = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.36", default-features = false } +cumulus-pallet-dmp-queue = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.36", default-features = false } +cumulus-pallet-xcmp-queue = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.36", default-features = false } +cumulus-pallet-xcm = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.36", default-features = false } # Polkadot dependencies -polkadot-parachain = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.33", default-features = false } -polkadot-runtime-common = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.33", default-features = false } -xcm = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.33", default-features = false } -xcm-builder = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.33", default-features = false } -xcm-executor = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.33", default-features = false } -pallet-xcm = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.33", default-features = false } +polkadot-parachain = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.36", default-features = false } +polkadot-runtime-common = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.36", default-features = false } +xcm = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.36", default-features = false } +xcm-builder = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.36", default-features = false } +xcm-executor = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.36", default-features = false } +pallet-xcm = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.36", default-features = false } # Local dependencies pallet-parachain-info = { path = "../../pallets/parachain-info", default-features = false } @@ -88,7 +88,7 @@ assets-registry = { path = "../../pallets/assets-registry", default-features = f subbridge-pallets = { path = "../../pallets/subbridge", default-features = false } [build-dependencies] -substrate-wasm-builder = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", optional = true } +substrate-wasm-builder = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", optional = true } [features] default = ["std", "include-wasm"] diff --git a/runtime/phala/src/constants.rs b/runtime/phala/src/constants.rs index 84ffaf41..60478dfb 100644 --- a/runtime/phala/src/constants.rs +++ b/runtime/phala/src/constants.rs @@ -34,7 +34,7 @@ pub mod currency { /// Fee-related. pub mod fee { use frame_support::weights::{ - constants::{ExtrinsicBaseWeight, WEIGHT_PER_SECOND}, + constants::{ExtrinsicBaseWeight, WEIGHT_REF_TIME_PER_SECOND}, WeightToFeeCoefficient, WeightToFeeCoefficients, WeightToFeePolynomial, }; use parachains_common::Balance; @@ -74,7 +74,7 @@ pub mod fee { // one cent cost per tx let base_tx_fee = super::currency::CENTS; let base_weight = Balance::from(ExtrinsicBaseWeight::get().ref_time()); - let tx_per_second = (WEIGHT_PER_SECOND.ref_time() as u128) / base_weight; + let tx_per_second = (WEIGHT_REF_TIME_PER_SECOND as u128) / base_weight; base_tx_fee * tx_per_second } } diff --git a/runtime/phala/src/lib.rs b/runtime/phala/src/lib.rs index 56872d63..6616a25b 100644 --- a/runtime/phala/src/lib.rs +++ b/runtime/phala/src/lib.rs @@ -70,10 +70,10 @@ pub use frame_support::{ traits::{ Contains, Currency, EitherOfDiverse, EqualPrivilegeOnly, Everything, Imbalance, InstanceFilter, IsInVec, KeyOwnerProofSystem, LockIdentifier, Nothing, OnUnbalanced, - Randomness, U128CurrencyToVote, WithdrawReasons, + Randomness, U128CurrencyToVote, WithdrawReasons, ConstU32, }, weights::{ - constants::{BlockExecutionWeight, ExtrinsicBaseWeight, RocksDbWeight, WEIGHT_PER_SECOND}, + constants::{BlockExecutionWeight, ExtrinsicBaseWeight, RocksDbWeight, WEIGHT_REF_TIME_PER_SECOND}, ConstantMultiplier, IdentityFee, Weight, }, PalletId, RuntimeDebug, StorageValue, @@ -622,6 +622,7 @@ impl pallet_assets::Config for Runtime { type RuntimeEvent = RuntimeEvent; type Balance = Balance; type AssetId = u32; + type AssetIdParameter = codec::Compact; type Currency = Balances; type CreateOrigin = AsEnsureOriginWithArg>; type ForceOrigin = EnsureRoot; @@ -631,9 +632,12 @@ impl pallet_assets::Config for Runtime { type MetadataDepositPerByte = MetadataDepositPerByte; type ApprovalDeposit = ApprovalDeposit; type StringLimit = AssetsStringLimit; + type RemoveItemsLimit = ConstU32<1000>; type Freezer = (); type Extra = (); type WeightInfo = pallet_assets::weights::SubstrateWeight; + #[cfg(feature = "runtime-benchmarks")] + type BenchmarkHelper = (); } parameter_types! { @@ -1170,7 +1174,7 @@ pub type FungiblesTransactor = FungiblesAdapter< parameter_types! { pub NativeExecutionPrice: u128 = pha_per_second(); - pub WeightPerSecond: XCMWeight = WEIGHT_PER_SECOND.ref_time(); + pub WeightPerSecond: XCMWeight = WEIGHT_REF_TIME_PER_SECOND; } pub struct XcmConfig; @@ -1465,22 +1469,21 @@ impl_runtime_apis! { #[cfg(feature = "try-runtime")] impl frame_try_runtime::TryRuntime for Runtime { - fn on_runtime_upgrade() -> (Weight, Weight) { - log::info!("try-runtime::on_runtime_upgrade statemine."); - let weight = Executive::try_runtime_upgrade().unwrap(); + fn on_runtime_upgrade(checks: bool) -> (Weight, Weight) { + let weight = Executive::try_runtime_upgrade(checks).unwrap(); (weight, RuntimeBlockWeights::get().max_block) } - fn execute_block(block: Block, state_root_check: bool, select: frame_try_runtime::TryStateSelect) -> Weight { - log::info!( - target: "runtime::statemine", "try-runtime: executing block #{} ({:?}) / root checks: {:?} / sanity-checks: {:?}", - block.header.number, - block.header.hash(), - state_root_check, - select, - ); - Executive::try_execute_block(block, state_root_check, select).expect("try_execute_block failed") - } + fn execute_block( + block: Block, + state_root_check: bool, + signature_check: bool, + select: frame_try_runtime::TryStateSelect, + ) -> Weight { + // NOTE: intentional unwrap: we don't want to propagate the error backwards, and want to + // have a backtrace here. + Executive::try_execute_block(block, state_root_check, signature_check, select).unwrap() + } } #[cfg(feature = "runtime-benchmarks")] diff --git a/runtime/rhala/Cargo.toml b/runtime/rhala/Cargo.toml index fd097f00..f3c7ec97 100644 --- a/runtime/rhala/Cargo.toml +++ b/runtime/rhala/Cargo.toml @@ -16,81 +16,81 @@ phala-types = { path = "../../crates/phala-types", default-features = false } parachains-common = { path = "../../parachains-common", default-features = false } # Substrate dependencies -sp-std = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -sp-api = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -sp-io = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -sp-version = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -sp-core = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -sp-session = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -sp-offchain = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -sp-block-builder = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -sp-transaction-pool = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -sp-inherents = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -sp-consensus-aura = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } +sp-std = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +sp-api = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +sp-io = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +sp-version = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +sp-core = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +sp-session = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +sp-offchain = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +sp-block-builder = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +sp-transaction-pool = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +sp-inherents = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +sp-consensus-aura = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } -frame-support = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -frame-executive = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -frame-system = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -frame-system-rpc-runtime-api = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -frame-system-benchmarking = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", optional = true, default-features = false } -frame-benchmarking = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", optional = true, default-features = false } -frame-try-runtime = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false, optional = true } +frame-support = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +frame-executive = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +frame-system = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +frame-system-rpc-runtime-api = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +frame-system-benchmarking = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", optional = true, default-features = false } +frame-benchmarking = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", optional = true, default-features = false } +frame-try-runtime = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false, optional = true } -pallet-aura = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -pallet-authorship = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -pallet-session = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -pallet-balances = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -pallet-randomness-collective-flip = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -pallet-timestamp = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -pallet-sudo = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -pallet-transaction-payment = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -pallet-transaction-payment-rpc-runtime-api = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -pallet-utility = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -pallet-multisig = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -pallet-proxy = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -pallet-scheduler = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -pallet-collective = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -pallet-membership = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -pallet-treasury = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -pallet-bounties = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -pallet-child-bounties = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -pallet-lottery = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -pallet-vesting = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -pallet-identity = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -pallet-democracy = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -pallet-elections-phragmen = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -pallet-tips = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -pallet-preimage = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -pallet-assets = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -pallet-uniques = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } +pallet-aura = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +pallet-authorship = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +pallet-session = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +pallet-balances = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +pallet-randomness-collective-flip = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +pallet-timestamp = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +pallet-sudo = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +pallet-transaction-payment = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +pallet-transaction-payment-rpc-runtime-api = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +pallet-utility = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +pallet-multisig = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +pallet-proxy = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +pallet-scheduler = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +pallet-collective = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +pallet-membership = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +pallet-treasury = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +pallet-bounties = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +pallet-child-bounties = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +pallet-lottery = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +pallet-vesting = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +pallet-identity = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +pallet-democracy = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +pallet-elections-phragmen = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +pallet-tips = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +pallet-preimage = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +pallet-assets = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +pallet-uniques = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } # Cumulus dependencies -pallet-collator-selection = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.33", default-features = false } -cumulus-pallet-aura-ext = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.33", default-features = false } -cumulus-pallet-parachain-system = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.33", default-features = false } -cumulus-pallet-session-benchmarking = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.33", default-features = false } -cumulus-primitives-core = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.33", default-features = false } -cumulus-primitives-timestamp = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.33", default-features = false } -cumulus-primitives-utility = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.33", default-features = false } -cumulus-pallet-dmp-queue = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.33", default-features = false } -cumulus-pallet-xcmp-queue = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.33", default-features = false } -cumulus-pallet-xcm = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.33", default-features = false } +pallet-collator-selection = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.36", default-features = false } +cumulus-pallet-aura-ext = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.36", default-features = false } +cumulus-pallet-parachain-system = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.36", default-features = false } +cumulus-pallet-session-benchmarking = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.36", default-features = false } +cumulus-primitives-core = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.36", default-features = false } +cumulus-primitives-timestamp = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.36", default-features = false } +cumulus-primitives-utility = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.36", default-features = false } +cumulus-pallet-dmp-queue = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.36", default-features = false } +cumulus-pallet-xcmp-queue = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.36", default-features = false } +cumulus-pallet-xcm = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.36", default-features = false } # Polkadot dependencies -polkadot-parachain = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.33", default-features = false } -polkadot-runtime-common = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.33", default-features = false } -xcm = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.33", default-features = false } -xcm-builder = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.33", default-features = false } -xcm-executor = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.33", default-features = false } -pallet-xcm = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.33", default-features = false } +polkadot-parachain = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.36", default-features = false } +polkadot-runtime-common = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.36", default-features = false } +xcm = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.36", default-features = false } +xcm-builder = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.36", default-features = false } +xcm-executor = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.36", default-features = false } +pallet-xcm = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.36", default-features = false } # RMRK dependencies -pallet-rmrk-core = { git = "https://github.com/Phala-Network/rmrk-substrate", branch = "polkadot-v0.9.33", default-features = false } -pallet-rmrk-equip = { git = "https://github.com/Phala-Network/rmrk-substrate", branch = "polkadot-v0.9.33", default-features = false } -pallet-rmrk-market = { git = "https://github.com/Phala-Network/rmrk-substrate", branch = "polkadot-v0.9.33", default-features = false } -rmrk-traits = { git = "https://github.com/Phala-Network/rmrk-substrate", branch = "polkadot-v0.9.33", default-features = false } -pallet-rmrk-rpc-runtime-api = { git = "https://github.com/Phala-Network/rmrk-substrate", branch = "polkadot-v0.9.33", default-features = false } +pallet-rmrk-core = { git = "https://github.com/Phala-Network/rmrk-substrate", branch = "polkadot-v0.9.36", default-features = false } +pallet-rmrk-equip = { git = "https://github.com/Phala-Network/rmrk-substrate", branch = "polkadot-v0.9.36", default-features = false } +pallet-rmrk-market = { git = "https://github.com/Phala-Network/rmrk-substrate", branch = "polkadot-v0.9.36", default-features = false } +rmrk-traits = { git = "https://github.com/Phala-Network/rmrk-substrate", branch = "polkadot-v0.9.36", default-features = false } +pallet-rmrk-rpc-runtime-api = { git = "https://github.com/Phala-Network/rmrk-substrate", branch = "polkadot-v0.9.36", default-features = false } # Local dependencies assets-registry = { path = "../../pallets/assets-registry", default-features = false } @@ -101,7 +101,7 @@ subbridge-pallets = { path = "../../pallets/subbridge", default-features = false pallet-phala-world = { path = "../../pallets/phala-world", default-features = false } [build-dependencies] -substrate-wasm-builder = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", optional = true } +substrate-wasm-builder = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", optional = true } [features] default = ["std", "include-wasm"] diff --git a/runtime/rhala/src/constants.rs b/runtime/rhala/src/constants.rs index 84ffaf41..60478dfb 100644 --- a/runtime/rhala/src/constants.rs +++ b/runtime/rhala/src/constants.rs @@ -34,7 +34,7 @@ pub mod currency { /// Fee-related. pub mod fee { use frame_support::weights::{ - constants::{ExtrinsicBaseWeight, WEIGHT_PER_SECOND}, + constants::{ExtrinsicBaseWeight, WEIGHT_REF_TIME_PER_SECOND}, WeightToFeeCoefficient, WeightToFeeCoefficients, WeightToFeePolynomial, }; use parachains_common::Balance; @@ -74,7 +74,7 @@ pub mod fee { // one cent cost per tx let base_tx_fee = super::currency::CENTS; let base_weight = Balance::from(ExtrinsicBaseWeight::get().ref_time()); - let tx_per_second = (WEIGHT_PER_SECOND.ref_time() as u128) / base_weight; + let tx_per_second = (WEIGHT_REF_TIME_PER_SECOND as u128) / base_weight; base_tx_fee * tx_per_second } } diff --git a/runtime/rhala/src/lib.rs b/runtime/rhala/src/lib.rs index efe36549..18a473e0 100644 --- a/runtime/rhala/src/lib.rs +++ b/runtime/rhala/src/lib.rs @@ -81,7 +81,7 @@ pub use frame_support::{ WithdrawReasons, }, weights::{ - constants::{BlockExecutionWeight, ExtrinsicBaseWeight, RocksDbWeight, WEIGHT_PER_SECOND}, + constants::{BlockExecutionWeight, ExtrinsicBaseWeight, RocksDbWeight, WEIGHT_REF_TIME_PER_SECOND}, ConstantMultiplier, IdentityFee, Weight, }, BoundedVec, PalletId, RuntimeDebug, StorageValue, @@ -723,6 +723,7 @@ impl pallet_assets::Config for Runtime { type RuntimeEvent = RuntimeEvent; type Balance = Balance; type AssetId = u32; + type AssetIdParameter = codec::Compact; type Currency = Balances; type CreateOrigin = AsEnsureOriginWithArg>; type ForceOrigin = EnsureRoot; @@ -732,9 +733,12 @@ impl pallet_assets::Config for Runtime { type MetadataDepositPerByte = MetadataDepositPerByte; type ApprovalDeposit = ApprovalDeposit; type StringLimit = AssetsStringLimit; + type RemoveItemsLimit = ConstU32<1000>; type Freezer = (); type Extra = (); type WeightInfo = pallet_assets::weights::SubstrateWeight; + #[cfg(feature = "runtime-benchmarks")] + type BenchmarkHelper = (); } parameter_types! { @@ -1065,7 +1069,7 @@ pub type FungiblesTransactor = FungiblesAdapter< parameter_types! { pub NativeExecutionPrice: u128 = pha_per_second(); - pub WeightPerSecond: XCMWeight = WEIGHT_PER_SECOND.ref_time(); + pub WeightPerSecond: XCMWeight = WEIGHT_REF_TIME_PER_SECOND; } pub struct XcmConfig; @@ -1908,22 +1912,21 @@ impl_runtime_apis! { #[cfg(feature = "try-runtime")] impl frame_try_runtime::TryRuntime for Runtime { - fn on_runtime_upgrade() -> (Weight, Weight) { - log::info!("try-runtime::on_runtime_upgrade statemine."); - let weight = Executive::try_runtime_upgrade().unwrap(); + fn on_runtime_upgrade(checks: bool) -> (Weight, Weight) { + let weight = Executive::try_runtime_upgrade(checks).unwrap(); (weight, RuntimeBlockWeights::get().max_block) } - fn execute_block(block: Block, state_root_check: bool, select: frame_try_runtime::TryStateSelect) -> Weight { - log::info!( - target: "runtime::statemine", "try-runtime: executing block #{} ({:?}) / root checks: {:?} / sanity-checks: {:?}", - block.header.number, - block.header.hash(), - state_root_check, - select, - ); - Executive::try_execute_block(block, state_root_check, select).expect("try_execute_block failed") - } + fn execute_block( + block: Block, + state_root_check: bool, + signature_check: bool, + select: frame_try_runtime::TryStateSelect, + ) -> Weight { + // NOTE: intentional unwrap: we don't want to propagate the error backwards, and want to + // have a backtrace here. + Executive::try_execute_block(block, state_root_check, signature_check, select).unwrap() + } } #[cfg(feature = "runtime-benchmarks")] diff --git a/runtime/shell/Cargo.toml b/runtime/shell/Cargo.toml index 49c3df95..28854bee 100644 --- a/runtime/shell/Cargo.toml +++ b/runtime/shell/Cargo.toml @@ -11,43 +11,43 @@ serde = { version = "1.0.132", optional = true, features = ["derive"] } log = { version = "0.4.14", default-features = false } # Substrate dependencies -sp-std = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "polkadot-v0.9.33" } -sp-api = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "polkadot-v0.9.33" } -sp-io = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "polkadot-v0.9.33" } -sp-version = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "polkadot-v0.9.33" } -sp-runtime = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "polkadot-v0.9.33" } -sp-core = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "polkadot-v0.9.33" } -sp-session = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "polkadot-v0.9.33" } -sp-offchain = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "polkadot-v0.9.33" } -sp-block-builder = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "polkadot-v0.9.33" } -sp-transaction-pool = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "polkadot-v0.9.33" } -sp-inherents = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "polkadot-v0.9.33" } +sp-std = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "polkadot-v0.9.36" } +sp-api = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "polkadot-v0.9.36" } +sp-io = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "polkadot-v0.9.36" } +sp-version = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "polkadot-v0.9.36" } +sp-runtime = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "polkadot-v0.9.36" } +sp-core = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "polkadot-v0.9.36" } +sp-session = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "polkadot-v0.9.36" } +sp-offchain = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "polkadot-v0.9.36" } +sp-block-builder = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "polkadot-v0.9.36" } +sp-transaction-pool = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "polkadot-v0.9.36" } +sp-inherents = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "polkadot-v0.9.36" } -frame-support = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "polkadot-v0.9.33" } -frame-executive = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "polkadot-v0.9.33" } -frame-system = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "polkadot-v0.9.33" } +frame-support = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "polkadot-v0.9.36" } +frame-executive = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "polkadot-v0.9.36" } +frame-system = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "polkadot-v0.9.36" } # try-runtime stuff. -frame-try-runtime = { git = "https://github.com/paritytech/substrate", default-features = false, optional = true , branch = "polkadot-v0.9.33" } +frame-try-runtime = { git = "https://github.com/paritytech/substrate", default-features = false, optional = true , branch = "polkadot-v0.9.36" } # Cumulus dependencies -cumulus-pallet-parachain-system = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.33", default-features = false } -cumulus-primitives-core = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.33", default-features = false } -cumulus-primitives-utility = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.33", default-features = false } -cumulus-pallet-dmp-queue = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.33", default-features = false } -cumulus-pallet-xcm = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.33", default-features = false } -parachain-info = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.33", default-features = false } +cumulus-pallet-parachain-system = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.36", default-features = false } +cumulus-primitives-core = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.36", default-features = false } +cumulus-primitives-utility = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.36", default-features = false } +cumulus-pallet-dmp-queue = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.36", default-features = false } +cumulus-pallet-xcm = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.36", default-features = false } +parachain-info = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.36", default-features = false } -xcm = { git = "https://github.com/paritytech/polkadot", default-features = false, branch = "release-v0.9.33" } -xcm-builder = { git = "https://github.com/paritytech/polkadot", default-features = false, branch = "release-v0.9.33" } -xcm-executor = { git = "https://github.com/paritytech/polkadot", default-features = false, branch = "release-v0.9.33" } +xcm = { git = "https://github.com/paritytech/polkadot", default-features = false, branch = "release-v0.9.36" } +xcm-builder = { git = "https://github.com/paritytech/polkadot", default-features = false, branch = "release-v0.9.36" } +xcm-executor = { git = "https://github.com/paritytech/polkadot", default-features = false, branch = "release-v0.9.36" } # Pallets -pallet-sudo = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } +pallet-sudo = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } [build-dependencies] -substrate-wasm-builder = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", optional = true } +substrate-wasm-builder = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", optional = true } [features] default = ["std"] diff --git a/runtime/shell/src/lib.rs b/runtime/shell/src/lib.rs index 01586009..751b7b81 100644 --- a/runtime/shell/src/lib.rs +++ b/runtime/shell/src/lib.rs @@ -46,7 +46,7 @@ pub use frame_support::{ construct_runtime, match_types, parameter_types, traits::{Everything, IsInVec, Randomness}, weights::{ - constants::{BlockExecutionWeight, ExtrinsicBaseWeight, RocksDbWeight, WEIGHT_PER_SECOND}, + constants::{BlockExecutionWeight, ExtrinsicBaseWeight, RocksDbWeight, WEIGHT_REF_TIME_PER_SECOND}, IdentityFee, Weight, }, dispatch::DispatchClass, @@ -86,7 +86,10 @@ const AVERAGE_ON_INITIALIZE_RATIO: Perbill = Perbill::from_percent(10); /// by Operational extrinsics. const NORMAL_DISPATCH_RATIO: Perbill = Perbill::from_percent(75); /// We allow for .5 seconds of compute with a 12 second average block time. -const MAXIMUM_BLOCK_WEIGHT: Weight = WEIGHT_PER_SECOND.saturating_div(2).set_proof_size(u64::MAX); +pub const MAXIMUM_BLOCK_WEIGHT: Weight = Weight::from_parts( + WEIGHT_REF_TIME_PER_SECOND.saturating_div(2), + 5 * 1024 * 1024 as u64, +); parameter_types! { pub const BlockHashCount: BlockNumber = 250; diff --git a/runtime/thala/Cargo.toml b/runtime/thala/Cargo.toml index 365fb452..de72b6ac 100644 --- a/runtime/thala/Cargo.toml +++ b/runtime/thala/Cargo.toml @@ -16,81 +16,81 @@ phala-types = { path = "../../crates/phala-types", default-features = false } parachains-common = { path = "../../parachains-common", default-features = false } # Substrate dependencies -sp-std = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -sp-api = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -sp-io = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -sp-version = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -sp-core = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -sp-session = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -sp-offchain = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -sp-block-builder = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -sp-transaction-pool = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -sp-inherents = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -sp-consensus-aura = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } +sp-std = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +sp-api = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +sp-io = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +sp-version = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +sp-core = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +sp-session = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +sp-offchain = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +sp-block-builder = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +sp-transaction-pool = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +sp-inherents = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +sp-consensus-aura = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } -frame-support = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -frame-executive = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -frame-system = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -frame-system-rpc-runtime-api = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -frame-system-benchmarking = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", optional = true, default-features = false } -frame-benchmarking = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", optional = true, default-features = false } -frame-try-runtime = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false, optional = true } +frame-support = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +frame-executive = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +frame-system = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +frame-system-rpc-runtime-api = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +frame-system-benchmarking = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", optional = true, default-features = false } +frame-benchmarking = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", optional = true, default-features = false } +frame-try-runtime = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false, optional = true } -pallet-aura = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -pallet-authorship = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -pallet-session = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -pallet-balances = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -pallet-randomness-collective-flip = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -pallet-timestamp = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -pallet-sudo = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -pallet-transaction-payment = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -pallet-transaction-payment-rpc-runtime-api = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -pallet-utility = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -pallet-multisig = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -pallet-proxy = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -pallet-scheduler = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -pallet-collective = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -pallet-membership = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -pallet-treasury = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -pallet-bounties = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -pallet-child-bounties = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -pallet-lottery = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -pallet-vesting = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -pallet-identity = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -pallet-democracy = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -pallet-elections-phragmen = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -pallet-tips = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -pallet-preimage = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -pallet-assets = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } -pallet-uniques = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", default-features = false } +pallet-aura = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +pallet-authorship = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +pallet-session = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +pallet-balances = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +pallet-randomness-collective-flip = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +pallet-timestamp = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +pallet-sudo = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +pallet-transaction-payment = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +pallet-transaction-payment-rpc-runtime-api = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +pallet-utility = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +pallet-multisig = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +pallet-proxy = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +pallet-scheduler = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +pallet-collective = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +pallet-membership = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +pallet-treasury = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +pallet-bounties = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +pallet-child-bounties = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +pallet-lottery = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +pallet-vesting = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +pallet-identity = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +pallet-democracy = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +pallet-elections-phragmen = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +pallet-tips = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +pallet-preimage = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +pallet-assets = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } +pallet-uniques = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", default-features = false } # Cumulus dependencies -pallet-collator-selection = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.33", default-features = false } -cumulus-pallet-aura-ext = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.33", default-features = false } -cumulus-pallet-parachain-system = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.33", default-features = false } -cumulus-pallet-session-benchmarking = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.33", default-features = false } -cumulus-primitives-core = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.33", default-features = false } -cumulus-primitives-timestamp = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.33", default-features = false } -cumulus-primitives-utility = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.33", default-features = false } -cumulus-pallet-dmp-queue = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.33", default-features = false } -cumulus-pallet-xcmp-queue = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.33", default-features = false } -cumulus-pallet-xcm = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.33", default-features = false } +pallet-collator-selection = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.36", default-features = false } +cumulus-pallet-aura-ext = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.36", default-features = false } +cumulus-pallet-parachain-system = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.36", default-features = false } +cumulus-pallet-session-benchmarking = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.36", default-features = false } +cumulus-primitives-core = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.36", default-features = false } +cumulus-primitives-timestamp = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.36", default-features = false } +cumulus-primitives-utility = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.36", default-features = false } +cumulus-pallet-dmp-queue = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.36", default-features = false } +cumulus-pallet-xcmp-queue = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.36", default-features = false } +cumulus-pallet-xcm = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.36", default-features = false } # Polkadot dependencies -polkadot-parachain = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.33", default-features = false } -polkadot-runtime-common = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.33", default-features = false } -xcm = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.33", default-features = false } -xcm-builder = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.33", default-features = false } -xcm-executor = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.33", default-features = false } -pallet-xcm = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.33", default-features = false } +polkadot-parachain = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.36", default-features = false } +polkadot-runtime-common = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.36", default-features = false } +xcm = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.36", default-features = false } +xcm-builder = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.36", default-features = false } +xcm-executor = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.36", default-features = false } +pallet-xcm = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.36", default-features = false } # RMRK dependencies -pallet-rmrk-core = { git = "https://github.com/Phala-Network/rmrk-substrate", branch = "polkadot-v0.9.33", default-features = false } -pallet-rmrk-equip = { git = "https://github.com/Phala-Network/rmrk-substrate", branch = "polkadot-v0.9.33", default-features = false } -pallet-rmrk-market = { git = "https://github.com/Phala-Network/rmrk-substrate", branch = "polkadot-v0.9.33", default-features = false } -rmrk-traits = { git = "https://github.com/Phala-Network/rmrk-substrate", branch = "polkadot-v0.9.33", default-features = false } -pallet-rmrk-rpc-runtime-api = { git = "https://github.com/Phala-Network/rmrk-substrate", branch = "polkadot-v0.9.33", default-features = false } +pallet-rmrk-core = { git = "https://github.com/Phala-Network/rmrk-substrate", branch = "polkadot-v0.9.36", default-features = false } +pallet-rmrk-equip = { git = "https://github.com/Phala-Network/rmrk-substrate", branch = "polkadot-v0.9.36", default-features = false } +pallet-rmrk-market = { git = "https://github.com/Phala-Network/rmrk-substrate", branch = "polkadot-v0.9.36", default-features = false } +rmrk-traits = { git = "https://github.com/Phala-Network/rmrk-substrate", branch = "polkadot-v0.9.36", default-features = false } +pallet-rmrk-rpc-runtime-api = { git = "https://github.com/Phala-Network/rmrk-substrate", branch = "polkadot-v0.9.36", default-features = false } # Local dependencies assets-registry = { path = "../../pallets/assets-registry", default-features = false } @@ -101,7 +101,7 @@ subbridge-pallets = { path = "../../pallets/subbridge", default-features = false pallet-phala-world = { path = "../../pallets/phala-world", default-features = false } [build-dependencies] -substrate-wasm-builder = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33", optional = true } +substrate-wasm-builder = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.36", optional = true } [features] default = ["std", "include-wasm"] diff --git a/runtime/thala/src/constants.rs b/runtime/thala/src/constants.rs index 84ffaf41..60478dfb 100644 --- a/runtime/thala/src/constants.rs +++ b/runtime/thala/src/constants.rs @@ -34,7 +34,7 @@ pub mod currency { /// Fee-related. pub mod fee { use frame_support::weights::{ - constants::{ExtrinsicBaseWeight, WEIGHT_PER_SECOND}, + constants::{ExtrinsicBaseWeight, WEIGHT_REF_TIME_PER_SECOND}, WeightToFeeCoefficient, WeightToFeeCoefficients, WeightToFeePolynomial, }; use parachains_common::Balance; @@ -74,7 +74,7 @@ pub mod fee { // one cent cost per tx let base_tx_fee = super::currency::CENTS; let base_weight = Balance::from(ExtrinsicBaseWeight::get().ref_time()); - let tx_per_second = (WEIGHT_PER_SECOND.ref_time() as u128) / base_weight; + let tx_per_second = (WEIGHT_REF_TIME_PER_SECOND as u128) / base_weight; base_tx_fee * tx_per_second } } diff --git a/runtime/thala/src/lib.rs b/runtime/thala/src/lib.rs index ddcd9980..e859dbe5 100644 --- a/runtime/thala/src/lib.rs +++ b/runtime/thala/src/lib.rs @@ -78,7 +78,7 @@ pub use frame_support::{ WithdrawReasons, }, weights::{ - constants::{BlockExecutionWeight, ExtrinsicBaseWeight, RocksDbWeight, WEIGHT_PER_SECOND}, + constants::{BlockExecutionWeight, ExtrinsicBaseWeight, RocksDbWeight, WEIGHT_REF_TIME_PER_SECOND}, ConstantMultiplier, IdentityFee, Weight, }, pallet_prelude::Get, @@ -707,6 +707,7 @@ impl pallet_assets::Config for Runtime { type RuntimeEvent = RuntimeEvent; type Balance = Balance; type AssetId = u32; + type AssetIdParameter = codec::Compact; type Currency = Balances; type CreateOrigin = AsEnsureOriginWithArg>; type ForceOrigin = EnsureRoot; @@ -716,9 +717,12 @@ impl pallet_assets::Config for Runtime { type MetadataDepositPerByte = MetadataDepositPerByte; type ApprovalDeposit = ApprovalDeposit; type StringLimit = AssetsStringLimit; + type RemoveItemsLimit = ConstU32<1000>; type Freezer = (); type Extra = (); type WeightInfo = pallet_assets::weights::SubstrateWeight; + #[cfg(feature = "runtime-benchmarks")] + type BenchmarkHelper = (); } parameter_types! { @@ -1055,7 +1059,7 @@ pub type FungiblesTransactor = FungiblesAdapter< parameter_types! { pub NativeExecutionPrice: u128 = pha_per_second(); - pub WeightPerSecond: XCMWeight = WEIGHT_PER_SECOND.ref_time(); + pub WeightPerSecond: XCMWeight = WEIGHT_REF_TIME_PER_SECOND; } pub struct XcmConfig; @@ -1899,22 +1903,21 @@ impl_runtime_apis! { #[cfg(feature = "try-runtime")] impl frame_try_runtime::TryRuntime for Runtime { - fn on_runtime_upgrade() -> (Weight, Weight) { - log::info!("try-runtime::on_runtime_upgrade statemine."); - let weight = Executive::try_runtime_upgrade().unwrap(); + fn on_runtime_upgrade(checks: bool) -> (Weight, Weight) { + let weight = Executive::try_runtime_upgrade(checks).unwrap(); (weight, RuntimeBlockWeights::get().max_block) } - fn execute_block(block: Block, state_root_check: bool, select: frame_try_runtime::TryStateSelect) -> Weight { - log::info!( - target: "runtime::statemine", "try-runtime: executing block #{} ({:?}) / root checks: {:?} / sanity-checks: {:?}", - block.header.number, - block.header.hash(), - state_root_check, - select, - ); - Executive::try_execute_block(block, state_root_check, select).expect("try_execute_block failed") - } + fn execute_block( + block: Block, + state_root_check: bool, + signature_check: bool, + select: frame_try_runtime::TryStateSelect, + ) -> Weight { + // NOTE: intentional unwrap: we don't want to propagate the error backwards, and want to + // have a backtrace here. + Executive::try_execute_block(block, state_root_check, signature_check, select).unwrap() + } } #[cfg(feature = "runtime-benchmarks")] diff --git a/substrate/client/db/Cargo.toml b/substrate/client/db/Cargo.toml deleted file mode 100644 index 967fe79b..00000000 --- a/substrate/client/db/Cargo.toml +++ /dev/null @@ -1,58 +0,0 @@ -[package] -name = "sc-client-db" -version = "0.10.0-dev" -authors = ["Parity Technologies "] -edition = "2021" -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" -homepage = "https://substrate.io" -repository = "https://github.com/paritytech/substrate/" -description = "Client backend that uses RocksDB database as storage." -readme = "README.md" - -[package.metadata.docs.rs] -targets = ["x86_64-unknown-linux-gnu"] - -[dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0", features = [ - "derive", -] } -hash-db = "0.15.2" -kvdb = "0.12.0" -kvdb-memorydb = "0.12.0" -kvdb-rocksdb = { version = "0.16.0", optional = true } -linked-hash-map = "0.5.4" -log = "0.4.17" -parity-db = "0.4.2" -parking_lot = "0.12.1" -sc-client-api = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33" } -sc-state-db = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33" } -sp-arithmetic = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33" } -sp-blockchain = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33" } -sp-core = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33" } -sp-database = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33" } -sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33" } -sp-state-machine = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33" } -sp-trie = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33" } - -[dev-dependencies] -criterion = "0.3.3" -kvdb-rocksdb = "0.16.0" -rand = "0.8.4" -tempfile = "3.1.0" -quickcheck = { version = "1.0.3", default-features = false } -kitchensink-runtime = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33" } -sp-tracing = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33" } -substrate-test-runtime-client = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33" } - -[features] -default = [] -test-helpers = [] -runtime-benchmarks = [] -rocksdb = ["kvdb-rocksdb"] - -[[bench]] -name = "state_access" -harness = false - -[lib] -bench = false diff --git a/substrate/client/db/README.md b/substrate/client/db/README.md deleted file mode 100644 index e5fb3fce..00000000 --- a/substrate/client/db/README.md +++ /dev/null @@ -1,11 +0,0 @@ -Client backend that is backed by a database. - -# Canonicality vs. Finality - -Finality indicates that a block will not be reverted, according to the consensus algorithm, -while canonicality indicates that the block may be reverted, but we will be unable to do so, -having discarded heavy state that will allow a chain reorganization. - -Finality implies canonicality but not vice-versa. - -License: GPL-3.0-or-later WITH Classpath-exception-2.0 \ No newline at end of file diff --git a/substrate/client/db/benches/state_access.rs b/substrate/client/db/benches/state_access.rs deleted file mode 100644 index bab79fe7..00000000 --- a/substrate/client/db/benches/state_access.rs +++ /dev/null @@ -1,311 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2021 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 - -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -use criterion::{criterion_group, criterion_main, BatchSize, Criterion}; -use rand::{distributions::Uniform, rngs::StdRng, Rng, SeedableRng}; -use sc_client_api::{Backend as _, BlockImportOperation, NewBlockState, StateBackend}; -use sc_client_db::{Backend, BlocksPruning, DatabaseSettings, DatabaseSource, PruningMode}; -use sp_core::H256; -use sp_runtime::{ - testing::{Block as RawBlock, ExtrinsicWrapper, Header}, - StateVersion, Storage, -}; -use tempfile::TempDir; - -pub(crate) type Block = RawBlock>; - -fn insert_blocks(db: &Backend, storage: Vec<(Vec, Vec)>) -> H256 { - let mut op = db.begin_operation().unwrap(); - let mut header = Header { - number: 0, - parent_hash: Default::default(), - state_root: Default::default(), - digest: Default::default(), - extrinsics_root: Default::default(), - }; - - header.state_root = op - .set_genesis_state( - Storage { - top: vec![( - sp_core::storage::well_known_keys::CODE.to_vec(), - kitchensink_runtime::wasm_binary_unwrap().to_vec(), - )] - .into_iter() - .collect(), - children_default: Default::default(), - }, - true, - StateVersion::V1, - ) - .unwrap(); - - op.set_block_data(header.clone(), Some(vec![]), None, None, NewBlockState::Best) - .unwrap(); - - db.commit_operation(op).unwrap(); - - let mut number = 1; - let mut parent_hash = header.hash(); - - for i in 0..10 { - let mut op = db.begin_operation().unwrap(); - - db.begin_state_operation(&mut op, parent_hash).unwrap(); - - let mut header = Header { - number, - parent_hash, - state_root: Default::default(), - digest: Default::default(), - extrinsics_root: Default::default(), - }; - - let changes = storage - .iter() - .skip(i * 100_000) - .take(100_000) - .map(|(k, v)| (k.clone(), Some(v.clone()))) - .collect::>(); - - let (state_root, tx) = db.state_at(parent_hash).unwrap().storage_root( - changes.iter().map(|(k, v)| (k.as_slice(), v.as_deref())), - StateVersion::V1, - ); - header.state_root = state_root; - - op.update_db_storage(tx).unwrap(); - op.update_storage(changes.clone(), Default::default()).unwrap(); - - op.set_block_data(header.clone(), Some(vec![]), None, None, NewBlockState::Best) - .unwrap(); - - db.commit_operation(op).unwrap(); - - number += 1; - parent_hash = header.hash(); - } - - parent_hash -} - -enum BenchmarkConfig { - NoCache, - TrieNodeCache, -} - -fn create_backend(config: BenchmarkConfig, temp_dir: &TempDir) -> Backend { - let path = temp_dir.path().to_owned(); - - let trie_cache_maximum_size = match config { - BenchmarkConfig::NoCache => None, - BenchmarkConfig::TrieNodeCache => Some(2 * 1024 * 1024 * 1024), - }; - - let settings = DatabaseSettings { - trie_cache_maximum_size, - state_pruning: Some(PruningMode::ArchiveAll), - source: DatabaseSource::ParityDb { path }, - blocks_pruning: BlocksPruning::KeepAll, - }; - - Backend::new(settings, 100).expect("Creates backend") -} - -/// Generate the storage that will be used for the benchmark -/// -/// Returns the `Vec` and the `Vec<(key, value)>` -fn generate_storage() -> (Vec>, Vec<(Vec, Vec)>) { - let mut rng = StdRng::seed_from_u64(353893213); - - let mut storage = Vec::new(); - let mut keys = Vec::new(); - - for _ in 0..1_000_000 { - let key_len: usize = rng.gen_range(32..128); - let key = (&mut rng) - .sample_iter(Uniform::new_inclusive(0, 255)) - .take(key_len) - .collect::>(); - - let value_len: usize = rng.gen_range(20..60); - let value = (&mut rng) - .sample_iter(Uniform::new_inclusive(0, 255)) - .take(value_len) - .collect::>(); - - keys.push(key.clone()); - storage.push((key, value)); - } - - (keys, storage) -} - -fn state_access_benchmarks(c: &mut Criterion) { - sp_tracing::try_init_simple(); - - let (keys, storage) = generate_storage(); - let path = TempDir::new().expect("Creates temporary directory"); - - let block_hash = { - let backend = create_backend(BenchmarkConfig::NoCache, &path); - insert_blocks(&backend, storage.clone()) - }; - - let mut group = c.benchmark_group("Reading entire state"); - group.sample_size(20); - - let mut bench_multiple_values = |config, desc, multiplier| { - let backend = create_backend(config, &path); - - group.bench_function(desc, |b| { - b.iter_batched( - || backend.state_at(block_hash).expect("Creates state"), - |state| { - for key in keys.iter().cycle().take(keys.len() * multiplier) { - let _ = state.storage(&key).expect("Doesn't fail").unwrap(); - } - }, - BatchSize::SmallInput, - ) - }); - }; - - bench_multiple_values( - BenchmarkConfig::TrieNodeCache, - "with trie node cache and reading each key once", - 1, - ); - bench_multiple_values(BenchmarkConfig::NoCache, "no cache and reading each key once", 1); - - bench_multiple_values( - BenchmarkConfig::TrieNodeCache, - "with trie node cache and reading 4 times each key in a row", - 4, - ); - bench_multiple_values( - BenchmarkConfig::NoCache, - "no cache and reading 4 times each key in a row", - 4, - ); - - group.finish(); - - let mut group = c.benchmark_group("Reading a single value"); - - let mut bench_single_value = |config, desc, multiplier| { - let backend = create_backend(config, &path); - - group.bench_function(desc, |b| { - b.iter_batched( - || backend.state_at(block_hash).expect("Creates state"), - |state| { - for key in keys.iter().take(1).cycle().take(multiplier) { - let _ = state.storage(&key).expect("Doesn't fail").unwrap(); - } - }, - BatchSize::SmallInput, - ) - }); - }; - - bench_single_value( - BenchmarkConfig::TrieNodeCache, - "with trie node cache and reading the key once", - 1, - ); - bench_single_value(BenchmarkConfig::NoCache, "no cache and reading the key once", 1); - - bench_single_value( - BenchmarkConfig::TrieNodeCache, - "with trie node cache and reading 4 times each key in a row", - 4, - ); - bench_single_value( - BenchmarkConfig::NoCache, - "no cache and reading 4 times each key in a row", - 4, - ); - - group.finish(); - - let mut group = c.benchmark_group("Hashing a value"); - - let mut bench_single_value = |config, desc, multiplier| { - let backend = create_backend(config, &path); - - group.bench_function(desc, |b| { - b.iter_batched( - || backend.state_at(block_hash).expect("Creates state"), - |state| { - for key in keys.iter().take(1).cycle().take(multiplier) { - let _ = state.storage_hash(&key).expect("Doesn't fail").unwrap(); - } - }, - BatchSize::SmallInput, - ) - }); - }; - - bench_single_value( - BenchmarkConfig::TrieNodeCache, - "with trie node cache and hashing the key once", - 1, - ); - bench_single_value(BenchmarkConfig::NoCache, "no cache and hashing the key once", 1); - - bench_single_value( - BenchmarkConfig::TrieNodeCache, - "with trie node cache and hashing 4 times each key in a row", - 4, - ); - bench_single_value( - BenchmarkConfig::NoCache, - "no cache and hashing 4 times each key in a row", - 4, - ); - - group.finish(); - - let mut group = c.benchmark_group("Hashing `:code`"); - - let mut bench_single_value = |config, desc| { - let backend = create_backend(config, &path); - - group.bench_function(desc, |b| { - b.iter_batched( - || backend.state_at(block_hash).expect("Creates state"), - |state| { - let _ = state - .storage_hash(sp_core::storage::well_known_keys::CODE) - .expect("Doesn't fail") - .unwrap(); - }, - BatchSize::SmallInput, - ) - }); - }; - - bench_single_value(BenchmarkConfig::TrieNodeCache, "with trie node cache"); - bench_single_value(BenchmarkConfig::NoCache, "no cache"); - - group.finish(); -} - -criterion_group!(benches, state_access_benchmarks); -criterion_main!(benches); diff --git a/substrate/client/db/src/bench.rs b/substrate/client/db/src/bench.rs deleted file mode 100644 index 13d91fff..00000000 --- a/substrate/client/db/src/bench.rs +++ /dev/null @@ -1,673 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2020-2022 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 - -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -//! State backend that's useful for benchmarking - -use crate::{DbState, DbStateBuilder}; -use hash_db::{Hasher, Prefix}; -use kvdb::{DBTransaction, KeyValueDB}; -use linked_hash_map::LinkedHashMap; -use sp_core::{ - hexdisplay::HexDisplay, - storage::{ChildInfo, TrackedStorageKey}, -}; -use sp_runtime::{ - traits::{Block as BlockT, HashFor}, - StateVersion, Storage, -}; -use sp_state_machine::{ - backend::Backend as StateBackend, ChildStorageCollection, DBValue, StorageCollection, -}; -use sp_trie::{ - cache::{CacheSize, SharedTrieCache}, - prefixed_key, MemoryDB, -}; -use std::{ - cell::{Cell, RefCell}, - collections::HashMap, - sync::Arc, -}; - -type State = DbState; - -struct StorageDb { - db: Arc, - _block: std::marker::PhantomData, -} - -impl sp_state_machine::Storage> for StorageDb { - fn get(&self, key: &Block::Hash, prefix: Prefix) -> Result, String> { - let prefixed_key = prefixed_key::>(key, prefix); - self.db - .get(0, &prefixed_key) - .map_err(|e| format!("Database backend error: {:?}", e)) - } -} - -/// State that manages the backend database reference. Allows runtime to control the database. -pub struct BenchmarkingState { - root: Cell, - genesis_root: B::Hash, - state: RefCell>>, - db: Cell>>, - genesis: HashMap, (Vec, i32)>, - record: Cell>>, - /// Key tracker for keys in the main trie. - /// We track the total number of reads and writes to these keys, - /// not de-duplicated for repeats. - main_key_tracker: RefCell, TrackedStorageKey>>, - /// Key tracker for keys in a child trie. - /// Child trie are identified by their storage key (i.e. `ChildInfo::storage_key()`) - /// We track the total number of reads and writes to these keys, - /// not de-duplicated for repeats. - child_key_tracker: RefCell, LinkedHashMap, TrackedStorageKey>>>, - whitelist: RefCell>, - proof_recorder: Option>>, - proof_recorder_root: Cell, - enable_tracking: bool, - shared_trie_cache: SharedTrieCache>, -} - -impl BenchmarkingState { - /// Create a new instance that creates a database in a temporary dir. - pub fn new( - genesis: Storage, - _cache_size_mb: Option, - record_proof: bool, - enable_tracking: bool, - ) -> Result { - let state_version = sp_runtime::StateVersion::default(); - let mut root = B::Hash::default(); - let mut mdb = MemoryDB::>::default(); - sp_trie::trie_types::TrieDBMutBuilderV1::>::new(&mut mdb, &mut root).build(); - - let mut state = BenchmarkingState { - state: RefCell::new(None), - db: Cell::new(None), - root: Cell::new(root), - genesis: Default::default(), - genesis_root: Default::default(), - record: Default::default(), - main_key_tracker: Default::default(), - child_key_tracker: Default::default(), - whitelist: Default::default(), - proof_recorder: record_proof.then(Default::default), - proof_recorder_root: Cell::new(root), - enable_tracking, - // Enable the cache, but do not sync anything to the shared state. - shared_trie_cache: SharedTrieCache::new(CacheSize::Maximum(0)), - }; - - state.add_whitelist_to_tracker(); - - state.reopen()?; - let child_delta = genesis.children_default.values().map(|child_content| { - ( - &child_content.child_info, - child_content.data.iter().map(|(k, v)| (k.as_ref(), Some(v.as_ref()))), - ) - }); - let (root, transaction): (B::Hash, _) = - state.state.borrow_mut().as_mut().unwrap().full_storage_root( - genesis.top.iter().map(|(k, v)| (k.as_ref(), Some(v.as_ref()))), - child_delta, - state_version, - ); - state.genesis = transaction.clone().drain(); - state.genesis_root = root; - state.commit(root, transaction, Vec::new(), Vec::new())?; - state.record.take(); - Ok(state) - } - - fn reopen(&self) -> Result<(), String> { - *self.state.borrow_mut() = None; - let db = match self.db.take() { - Some(db) => db, - None => Arc::new(kvdb_memorydb::create(1)), - }; - self.db.set(Some(db.clone())); - if let Some(recorder) = &self.proof_recorder { - recorder.reset(); - self.proof_recorder_root.set(self.root.get()); - } - let storage_db = Arc::new(StorageDb:: { db, _block: Default::default() }); - *self.state.borrow_mut() = Some( - DbStateBuilder::::new(storage_db, self.root.get()) - .with_optional_recorder(self.proof_recorder.clone()) - .with_cache(self.shared_trie_cache.local_cache()) - .build(), - ); - Ok(()) - } - - fn add_whitelist_to_tracker(&self) { - let mut main_key_tracker = self.main_key_tracker.borrow_mut(); - - let whitelist = self.whitelist.borrow(); - - whitelist.iter().for_each(|key| { - let mut whitelisted = TrackedStorageKey::new(key.key.clone()); - whitelisted.whitelist(); - main_key_tracker.insert(key.key.clone(), whitelisted); - }); - } - - fn wipe_tracker(&self) { - *self.main_key_tracker.borrow_mut() = LinkedHashMap::new(); - *self.child_key_tracker.borrow_mut() = LinkedHashMap::new(); - self.add_whitelist_to_tracker(); - } - - // Childtrie is identified by its storage key (i.e. `ChildInfo::storage_key`) - fn add_read_key(&self, childtrie: Option<&[u8]>, key: &[u8]) { - if !self.enable_tracking { - return - } - - let mut child_key_tracker = self.child_key_tracker.borrow_mut(); - let mut main_key_tracker = self.main_key_tracker.borrow_mut(); - - let key_tracker = if let Some(childtrie) = childtrie { - child_key_tracker.entry(childtrie.to_vec()).or_insert_with(LinkedHashMap::new) - } else { - &mut main_key_tracker - }; - - let should_log = match key_tracker.get_mut(key) { - None => { - let mut has_been_read = TrackedStorageKey::new(key.to_vec()); - has_been_read.add_read(); - key_tracker.insert(key.to_vec(), has_been_read); - true - }, - Some(tracker) => { - let should_log = !tracker.has_been_read(); - tracker.add_read(); - should_log - }, - }; - - if should_log { - if let Some(childtrie) = childtrie { - log::trace!( - target: "benchmark", - "Childtrie Read: {} {}", HexDisplay::from(&childtrie), HexDisplay::from(&key) - ); - } else { - log::trace!(target: "benchmark", "Read: {}", HexDisplay::from(&key)); - } - } - } - - // Childtrie is identified by its storage key (i.e. `ChildInfo::storage_key`) - fn add_write_key(&self, childtrie: Option<&[u8]>, key: &[u8]) { - if !self.enable_tracking { - return - } - - let mut child_key_tracker = self.child_key_tracker.borrow_mut(); - let mut main_key_tracker = self.main_key_tracker.borrow_mut(); - - let key_tracker = if let Some(childtrie) = childtrie { - child_key_tracker.entry(childtrie.to_vec()).or_insert_with(LinkedHashMap::new) - } else { - &mut main_key_tracker - }; - - // If we have written to the key, we also consider that we have read from it. - let should_log = match key_tracker.get_mut(key) { - None => { - let mut has_been_written = TrackedStorageKey::new(key.to_vec()); - has_been_written.add_write(); - key_tracker.insert(key.to_vec(), has_been_written); - true - }, - Some(tracker) => { - let should_log = !tracker.has_been_written(); - tracker.add_write(); - should_log - }, - }; - - if should_log { - if let Some(childtrie) = childtrie { - log::trace!( - target: "benchmark", - "Childtrie Write: {} {}", HexDisplay::from(&childtrie), HexDisplay::from(&key) - ); - } else { - log::trace!(target: "benchmark", "Write: {}", HexDisplay::from(&key)); - } - } - } - - // Return all the tracked storage keys among main and child trie. - fn all_trackers(&self) -> Vec { - let mut all_trackers = Vec::new(); - - self.main_key_tracker.borrow().iter().for_each(|(_, tracker)| { - all_trackers.push(tracker.clone()); - }); - - self.child_key_tracker.borrow().iter().for_each(|(_, child_tracker)| { - child_tracker.iter().for_each(|(_, tracker)| { - all_trackers.push(tracker.clone()); - }); - }); - - all_trackers - } -} - -fn state_err() -> String { - "State is not open".into() -} - -impl StateBackend> for BenchmarkingState { - type Error = as StateBackend>>::Error; - type Transaction = as StateBackend>>::Transaction; - type TrieBackendStorage = as StateBackend>>::TrieBackendStorage; - - fn storage(&self, key: &[u8]) -> Result>, Self::Error> { - self.add_read_key(None, key); - self.state.borrow().as_ref().ok_or_else(state_err)?.storage(key) - } - - fn storage_hash(&self, key: &[u8]) -> Result, Self::Error> { - self.add_read_key(None, key); - self.state.borrow().as_ref().ok_or_else(state_err)?.storage_hash(key) - } - - fn child_storage( - &self, - child_info: &ChildInfo, - key: &[u8], - ) -> Result>, Self::Error> { - self.add_read_key(Some(child_info.storage_key()), key); - self.state - .borrow() - .as_ref() - .ok_or_else(state_err)? - .child_storage(child_info, key) - } - - fn child_storage_hash( - &self, - child_info: &ChildInfo, - key: &[u8], - ) -> Result, Self::Error> { - self.add_read_key(Some(child_info.storage_key()), key); - self.state - .borrow() - .as_ref() - .ok_or_else(state_err)? - .child_storage_hash(child_info, key) - } - - fn exists_storage(&self, key: &[u8]) -> Result { - self.add_read_key(None, key); - self.state.borrow().as_ref().ok_or_else(state_err)?.exists_storage(key) - } - - fn exists_child_storage( - &self, - child_info: &ChildInfo, - key: &[u8], - ) -> Result { - self.add_read_key(Some(child_info.storage_key()), key); - self.state - .borrow() - .as_ref() - .ok_or_else(state_err)? - .exists_child_storage(child_info, key) - } - - fn next_storage_key(&self, key: &[u8]) -> Result>, Self::Error> { - self.add_read_key(None, key); - self.state.borrow().as_ref().ok_or_else(state_err)?.next_storage_key(key) - } - - fn next_child_storage_key( - &self, - child_info: &ChildInfo, - key: &[u8], - ) -> Result>, Self::Error> { - self.add_read_key(Some(child_info.storage_key()), key); - self.state - .borrow() - .as_ref() - .ok_or_else(state_err)? - .next_child_storage_key(child_info, key) - } - - fn for_keys_with_prefix(&self, prefix: &[u8], f: F) { - if let Some(ref state) = *self.state.borrow() { - state.for_keys_with_prefix(prefix, f) - } - } - - fn for_key_values_with_prefix(&self, prefix: &[u8], f: F) { - if let Some(ref state) = *self.state.borrow() { - state.for_key_values_with_prefix(prefix, f) - } - } - - fn apply_to_key_values_while, Vec) -> bool>( - &self, - child_info: Option<&ChildInfo>, - prefix: Option<&[u8]>, - start_at: Option<&[u8]>, - f: F, - allow_missing: bool, - ) -> Result { - self.state.borrow().as_ref().ok_or_else(state_err)?.apply_to_key_values_while( - child_info, - prefix, - start_at, - f, - allow_missing, - ) - } - - fn apply_to_keys_while bool>( - &self, - child_info: Option<&ChildInfo>, - prefix: Option<&[u8]>, - start_at: Option<&[u8]>, - f: F, - ) { - if let Some(ref state) = *self.state.borrow() { - state.apply_to_keys_while(child_info, prefix, start_at, f) - } - } - - fn for_child_keys_with_prefix( - &self, - child_info: &ChildInfo, - prefix: &[u8], - f: F, - ) { - if let Some(ref state) = *self.state.borrow() { - state.for_child_keys_with_prefix(child_info, prefix, f) - } - } - - fn storage_root<'a>( - &self, - delta: impl Iterator)>, - state_version: StateVersion, - ) -> (B::Hash, Self::Transaction) - where - B::Hash: Ord, - { - self.state - .borrow() - .as_ref() - .map_or(Default::default(), |s| s.storage_root(delta, state_version)) - } - - fn child_storage_root<'a>( - &self, - child_info: &ChildInfo, - delta: impl Iterator)>, - state_version: StateVersion, - ) -> (B::Hash, bool, Self::Transaction) - where - B::Hash: Ord, - { - self.state - .borrow() - .as_ref() - .map_or(Default::default(), |s| s.child_storage_root(child_info, delta, state_version)) - } - - fn pairs(&self) -> Vec<(Vec, Vec)> { - self.state.borrow().as_ref().map_or(Default::default(), |s| s.pairs()) - } - - fn keys(&self, prefix: &[u8]) -> Vec> { - self.state.borrow().as_ref().map_or(Default::default(), |s| s.keys(prefix)) - } - - fn child_keys(&self, child_info: &ChildInfo, prefix: &[u8]) -> Vec> { - self.state - .borrow() - .as_ref() - .map_or(Default::default(), |s| s.child_keys(child_info, prefix)) - } - - fn commit( - &self, - storage_root: as Hasher>::Out, - mut transaction: Self::Transaction, - main_storage_changes: StorageCollection, - child_storage_changes: ChildStorageCollection, - ) -> Result<(), Self::Error> { - if let Some(db) = self.db.take() { - let mut db_transaction = DBTransaction::new(); - let changes = transaction.drain(); - let mut keys = Vec::with_capacity(changes.len()); - for (key, (val, rc)) in changes { - if rc > 0 { - db_transaction.put(0, &key, &val); - } else if rc < 0 { - db_transaction.delete(0, &key); - } - keys.push(key); - } - let mut record = self.record.take(); - record.extend(keys); - self.record.set(record); - db.write(db_transaction) - .map_err(|_| String::from("Error committing transaction"))?; - self.root.set(storage_root); - self.db.set(Some(db)); - - // Track DB Writes - main_storage_changes.iter().for_each(|(key, _)| { - self.add_write_key(None, key); - }); - child_storage_changes.iter().for_each(|(child_storage_key, storage_changes)| { - storage_changes.iter().for_each(|(key, _)| { - self.add_write_key(Some(child_storage_key), key); - }) - }); - } else { - return Err("Trying to commit to a closed db".into()) - } - self.reopen() - } - - fn wipe(&self) -> Result<(), Self::Error> { - // Restore to genesis - let record = self.record.take(); - if let Some(db) = self.db.take() { - let mut db_transaction = DBTransaction::new(); - for key in record { - match self.genesis.get(&key) { - Some((v, _)) => db_transaction.put(0, &key, v), - None => db_transaction.delete(0, &key), - } - } - db.write(db_transaction) - .map_err(|_| String::from("Error committing transaction"))?; - self.db.set(Some(db)); - } - - self.root.set(self.genesis_root); - self.reopen()?; - self.wipe_tracker(); - Ok(()) - } - - /// Get the key tracking information for the state db. - /// 1. `reads` - Total number of DB reads. - /// 2. `repeat_reads` - Total number of in-memory reads. - /// 3. `writes` - Total number of DB writes. - /// 4. `repeat_writes` - Total number of in-memory writes. - fn read_write_count(&self) -> (u32, u32, u32, u32) { - let mut reads = 0; - let mut repeat_reads = 0; - let mut writes = 0; - let mut repeat_writes = 0; - - self.all_trackers().iter().for_each(|tracker| { - if !tracker.whitelisted { - if tracker.reads > 0 { - reads += 1; - repeat_reads += tracker.reads - 1; - } - - if tracker.writes > 0 { - writes += 1; - repeat_writes += tracker.writes - 1; - } - } - }); - (reads, repeat_reads, writes, repeat_writes) - } - - /// Reset the key tracking information for the state db. - fn reset_read_write_count(&self) { - self.wipe_tracker() - } - - fn get_whitelist(&self) -> Vec { - self.whitelist.borrow().to_vec() - } - - fn set_whitelist(&self, new: Vec) { - *self.whitelist.borrow_mut() = new; - } - - fn get_read_and_written_keys(&self) -> Vec<(Vec, u32, u32, bool)> { - // We only track at the level of a key-prefix and not whitelisted for now for memory size. - // TODO: Refactor to enable full storage key transparency, where we can remove the - // `prefix_key_tracker`. - let mut prefix_key_tracker = LinkedHashMap::, (u32, u32, bool)>::new(); - self.all_trackers().iter().for_each(|tracker| { - if !tracker.whitelisted { - let prefix_length = tracker.key.len().min(32); - let prefix = tracker.key[0..prefix_length].to_vec(); - // each read / write of a specific key is counted at most one time, since - // additional reads / writes happen in the memory overlay. - let reads = tracker.reads.min(1); - let writes = tracker.writes.min(1); - if let Some(prefix_tracker) = prefix_key_tracker.get_mut(&prefix) { - prefix_tracker.0 += reads; - prefix_tracker.1 += writes; - } else { - prefix_key_tracker.insert(prefix, (reads, writes, tracker.whitelisted)); - } - } - }); - - prefix_key_tracker - .iter() - .map(|(key, tracker)| -> (Vec, u32, u32, bool) { - (key.to_vec(), tracker.0, tracker.1, tracker.2) - }) - .collect::>() - } - - fn register_overlay_stats(&self, stats: &sp_state_machine::StateMachineStats) { - self.state.borrow_mut().as_mut().map(|s| s.register_overlay_stats(stats)); - } - - fn usage_info(&self) -> sp_state_machine::UsageInfo { - self.state - .borrow() - .as_ref() - .map_or(sp_state_machine::UsageInfo::empty(), |s| s.usage_info()) - } - - fn proof_size(&self) -> Option { - self.proof_recorder.as_ref().map(|recorder| { - let proof_size = recorder.estimate_encoded_size() as u32; - - let proof = recorder.to_storage_proof(); - - let proof_recorder_root = self.proof_recorder_root.get(); - if proof_recorder_root == Default::default() || proof_size == 1 { - // empty trie - proof_size - } else { - if let Some(size) = proof.encoded_compact_size::>(proof_recorder_root) { - size as u32 - } else { - panic!( - "proof rec root {:?}, root {:?}, genesis {:?}, rec_len {:?}", - self.proof_recorder_root.get(), - self.root.get(), - self.genesis_root, - proof_size, - ); - } - } - }) - } -} - -impl std::fmt::Debug for BenchmarkingState { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "Bench DB") - } -} - -#[cfg(test)] -mod test { - use crate::bench::BenchmarkingState; - use sp_state_machine::backend::Backend as _; - - #[test] - fn read_to_main_and_child_tries() { - let bench_state = - BenchmarkingState::::new(Default::default(), None, false, true) - .unwrap(); - - for _ in 0..2 { - let child1 = sp_core::storage::ChildInfo::new_default(b"child1"); - let child2 = sp_core::storage::ChildInfo::new_default(b"child2"); - - bench_state.storage(b"foo").unwrap(); - bench_state.child_storage(&child1, b"foo").unwrap(); - bench_state.child_storage(&child2, b"foo").unwrap(); - - bench_state.storage(b"bar").unwrap(); - bench_state.child_storage(&child1, b"bar").unwrap(); - bench_state.child_storage(&child2, b"bar").unwrap(); - - bench_state - .commit( - Default::default(), - Default::default(), - vec![("foo".as_bytes().to_vec(), None)], - vec![("child1".as_bytes().to_vec(), vec![("foo".as_bytes().to_vec(), None)])], - ) - .unwrap(); - - let rw_tracker = bench_state.read_write_count(); - assert_eq!(rw_tracker.0, 6); - assert_eq!(rw_tracker.1, 0); - assert_eq!(rw_tracker.2, 2); - assert_eq!(rw_tracker.3, 0); - bench_state.wipe().unwrap(); - } - } -} diff --git a/substrate/client/db/src/children.rs b/substrate/client/db/src/children.rs deleted file mode 100644 index 538e5185..00000000 --- a/substrate/client/db/src/children.rs +++ /dev/null @@ -1,123 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2019-2022 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 - -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -//! Functionality for reading and storing children hashes from db. - -use crate::DbHash; -use codec::{Decode, Encode}; -use sp_blockchain; -use sp_database::{Database, Transaction}; -use std::hash::Hash; - -/// Returns the hashes of the children blocks of the block with `parent_hash`. -pub fn read_children< - K: Eq + Hash + Clone + Encode + Decode, - V: Eq + Hash + Clone + Encode + Decode, ->( - db: &dyn Database, - column: u32, - prefix: &[u8], - parent_hash: K, -) -> sp_blockchain::Result> { - let mut buf = prefix.to_vec(); - parent_hash.using_encoded(|s| buf.extend(s)); - - let raw_val_opt = db.get(column, &buf[..]); - - let raw_val = match raw_val_opt { - Some(val) => val, - None => return Ok(Vec::new()), - }; - - let children: Vec = match Decode::decode(&mut &raw_val[..]) { - Ok(children) => children, - Err(_) => return Err(sp_blockchain::Error::Backend("Error decoding children".into())), - }; - - Ok(children) -} - -/// Insert the key-value pair (`parent_hash`, `children_hashes`) in the transaction. -/// Any existing value is overwritten upon write. -pub fn write_children< - K: Eq + Hash + Clone + Encode + Decode, - V: Eq + Hash + Clone + Encode + Decode, ->( - tx: &mut Transaction, - column: u32, - prefix: &[u8], - parent_hash: K, - children_hashes: V, -) { - let mut key = prefix.to_vec(); - parent_hash.using_encoded(|s| key.extend(s)); - tx.set_from_vec(column, &key[..], children_hashes.encode()); -} - -/// Prepare transaction to remove the children of `parent_hash`. -pub fn remove_children( - tx: &mut Transaction, - column: u32, - prefix: &[u8], - parent_hash: K, -) { - let mut key = prefix.to_vec(); - parent_hash.using_encoded(|s| key.extend(s)); - tx.remove(column, &key); -} - -#[cfg(test)] -mod tests { - use super::*; - use std::sync::Arc; - - #[test] - fn children_write_read_remove() { - const PREFIX: &[u8] = b"children"; - let db = Arc::new(sp_database::MemDb::default()); - - let mut tx = Transaction::new(); - - let mut children1 = Vec::new(); - children1.push(1_3); - children1.push(1_5); - write_children(&mut tx, 0, PREFIX, 1_1, children1); - - let mut children2 = Vec::new(); - children2.push(1_4); - children2.push(1_6); - write_children(&mut tx, 0, PREFIX, 1_2, children2); - - db.commit(tx.clone()).unwrap(); - - let r1: Vec = read_children(&*db, 0, PREFIX, 1_1).expect("(1) Getting r1 failed"); - let r2: Vec = read_children(&*db, 0, PREFIX, 1_2).expect("(1) Getting r2 failed"); - - assert_eq!(r1, vec![1_3, 1_5]); - assert_eq!(r2, vec![1_4, 1_6]); - - remove_children(&mut tx, 0, PREFIX, 1_2); - db.commit(tx).unwrap(); - - let r1: Vec = read_children(&*db, 0, PREFIX, 1_1).expect("(2) Getting r1 failed"); - let r2: Vec = read_children(&*db, 0, PREFIX, 1_2).expect("(2) Getting r2 failed"); - - assert_eq!(r1, vec![1_3, 1_5]); - assert_eq!(r2.len(), 0); - } -} diff --git a/substrate/client/db/src/lib.rs b/substrate/client/db/src/lib.rs deleted file mode 100644 index 8625f1fd..00000000 --- a/substrate/client/db/src/lib.rs +++ /dev/null @@ -1,3797 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2017-2022 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 - -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -//! Client backend that is backed by a database. -//! -//! # Canonicality vs. Finality -//! -//! Finality indicates that a block will not be reverted, according to the consensus algorithm, -//! while canonicality indicates that the block may be reverted, but we will be unable to do so, -//! having discarded heavy state that will allow a chain reorganization. -//! -//! Finality implies canonicality but not vice-versa. - -#![warn(missing_docs)] - -pub mod offchain; - -pub mod bench; - -mod children; -mod parity_db; -mod record_stats_state; -mod stats; -#[cfg(any(feature = "rocksdb", test))] -mod upgrade; -mod utils; - -use linked_hash_map::LinkedHashMap; -use log::{debug, trace, warn}; -use parking_lot::{Mutex, RwLock}; -use std::{ - collections::{HashMap, HashSet}, - io, - path::{Path, PathBuf}, - sync::Arc, -}; - -use crate::{ - record_stats_state::RecordStatsState, - stats::StateUsageStats, - utils::{meta_keys, read_db, read_meta, DatabaseType, Meta}, -}; -use codec::{Decode, Encode}; -use hash_db::Prefix; -use sc_client_api::{ - backend::NewBlockState, - leaves::{FinalizationOutcome, LeafSet}, - utils::is_descendent_of, - IoInfo, MemoryInfo, MemorySize, UsageInfo, -}; -use sc_state_db::{IsPruned, StateDb}; -use sp_arithmetic::traits::Saturating; -use sp_blockchain::{ - well_known_cache_keys, Backend as _, CachedHeaderMetadata, Error as ClientError, HeaderBackend, - HeaderMetadata, HeaderMetadataCache, Result as ClientResult, -}; -use sp_core::{ - offchain::OffchainOverlayedChange, - storage::{well_known_keys, ChildInfo}, -}; -use sp_database::Transaction; -use sp_runtime::{ - generic::BlockId, - traits::{ - Block as BlockT, Hash, HashFor, Header as HeaderT, NumberFor, One, SaturatedConversion, - Zero, - }, - Justification, Justifications, StateVersion, Storage, -}; -use sp_state_machine::{ - backend::{AsTrieBackend, Backend as StateBackend}, - ChildStorageCollection, DBValue, IndexOperation, OffchainChangesCollection, StateMachineStats, - StorageCollection, UsageInfo as StateUsageInfo, -}; -use sp_trie::{cache::SharedTrieCache, prefixed_key, MemoryDB, PrefixedMemoryDB}; - -// Re-export the Database trait so that one can pass an implementation of it. -pub use sc_state_db::PruningMode; -pub use sp_database::Database; - -pub use bench::BenchmarkingState; - -const CACHE_HEADERS: usize = 8; - -/// DB-backed patricia trie state, transaction type is an overlay of changes to commit. -pub type DbState = - sp_state_machine::TrieBackend>>, HashFor>; - -/// Builder for [`DbState`]. -pub type DbStateBuilder = sp_state_machine::TrieBackendBuilder< - Arc>>, - HashFor, ->; - -/// Length of a [`DbHash`]. -const DB_HASH_LEN: usize = 32; - -/// Hash type that this backend uses for the database. -pub type DbHash = sp_core::H256; - -/// An extrinsic entry in the database. -#[derive(Debug, Encode, Decode)] -enum DbExtrinsic { - /// Extrinsic that contains indexed data. - Indexed { - /// Hash of the indexed part. - hash: DbHash, - /// Extrinsic header. - header: Vec, - }, - /// Complete extrinsic data. - Full(B::Extrinsic), -} - -/// A reference tracking state. -/// -/// It makes sure that the hash we are using stays pinned in storage -/// until this structure is dropped. -pub struct RefTrackingState { - state: DbState, - storage: Arc>, - parent_hash: Option, -} - -impl RefTrackingState { - fn new(state: DbState, storage: Arc>, parent_hash: Option) -> Self { - RefTrackingState { state, parent_hash, storage } - } -} - -impl Drop for RefTrackingState { - fn drop(&mut self) { - if let Some(hash) = &self.parent_hash { - self.storage.state_db.unpin(hash); - } - } -} - -impl std::fmt::Debug for RefTrackingState { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "Block {:?}", self.parent_hash) - } -} - -impl StateBackend> for RefTrackingState { - type Error = as StateBackend>>::Error; - type Transaction = as StateBackend>>::Transaction; - type TrieBackendStorage = as StateBackend>>::TrieBackendStorage; - - fn storage(&self, key: &[u8]) -> Result>, Self::Error> { - self.state.storage(key) - } - - fn storage_hash(&self, key: &[u8]) -> Result, Self::Error> { - self.state.storage_hash(key) - } - - fn child_storage( - &self, - child_info: &ChildInfo, - key: &[u8], - ) -> Result>, Self::Error> { - self.state.child_storage(child_info, key) - } - - fn child_storage_hash( - &self, - child_info: &ChildInfo, - key: &[u8], - ) -> Result, Self::Error> { - self.state.child_storage_hash(child_info, key) - } - - fn exists_storage(&self, key: &[u8]) -> Result { - self.state.exists_storage(key) - } - - fn exists_child_storage( - &self, - child_info: &ChildInfo, - key: &[u8], - ) -> Result { - self.state.exists_child_storage(child_info, key) - } - - fn next_storage_key(&self, key: &[u8]) -> Result>, Self::Error> { - self.state.next_storage_key(key) - } - - fn next_child_storage_key( - &self, - child_info: &ChildInfo, - key: &[u8], - ) -> Result>, Self::Error> { - self.state.next_child_storage_key(child_info, key) - } - - fn for_keys_with_prefix(&self, prefix: &[u8], f: F) { - self.state.for_keys_with_prefix(prefix, f) - } - - fn for_key_values_with_prefix(&self, prefix: &[u8], f: F) { - self.state.for_key_values_with_prefix(prefix, f) - } - - fn apply_to_key_values_while, Vec) -> bool>( - &self, - child_info: Option<&ChildInfo>, - prefix: Option<&[u8]>, - start_at: Option<&[u8]>, - f: F, - allow_missing: bool, - ) -> Result { - self.state - .apply_to_key_values_while(child_info, prefix, start_at, f, allow_missing) - } - - fn apply_to_keys_while bool>( - &self, - child_info: Option<&ChildInfo>, - prefix: Option<&[u8]>, - start_at: Option<&[u8]>, - f: F, - ) { - self.state.apply_to_keys_while(child_info, prefix, start_at, f) - } - - fn for_child_keys_with_prefix( - &self, - child_info: &ChildInfo, - prefix: &[u8], - f: F, - ) { - self.state.for_child_keys_with_prefix(child_info, prefix, f) - } - - fn storage_root<'a>( - &self, - delta: impl Iterator)>, - state_version: StateVersion, - ) -> (B::Hash, Self::Transaction) - where - B::Hash: Ord, - { - self.state.storage_root(delta, state_version) - } - - fn child_storage_root<'a>( - &self, - child_info: &ChildInfo, - delta: impl Iterator)>, - state_version: StateVersion, - ) -> (B::Hash, bool, Self::Transaction) - where - B::Hash: Ord, - { - self.state.child_storage_root(child_info, delta, state_version) - } - - fn pairs(&self) -> Vec<(Vec, Vec)> { - self.state.pairs() - } - - fn keys(&self, prefix: &[u8]) -> Vec> { - self.state.keys(prefix) - } - - fn child_keys(&self, child_info: &ChildInfo, prefix: &[u8]) -> Vec> { - self.state.child_keys(child_info, prefix) - } - - fn register_overlay_stats(&self, stats: &StateMachineStats) { - self.state.register_overlay_stats(stats); - } - - fn usage_info(&self) -> StateUsageInfo { - self.state.usage_info() - } -} - -impl AsTrieBackend> for RefTrackingState { - type TrieBackendStorage = as StateBackend>>::TrieBackendStorage; - - fn as_trie_backend( - &self, - ) -> &sp_state_machine::TrieBackend> { - &self.state.as_trie_backend() - } -} - -/// Database settings. -pub struct DatabaseSettings { - /// The maximum trie cache size in bytes. - /// - /// If `None` is given, the cache is disabled. - pub trie_cache_maximum_size: Option, - /// Requested state pruning mode. - pub state_pruning: Option, - /// Where to find the database. - pub source: DatabaseSource, - /// Block pruning mode. - /// - /// NOTE: only finalized blocks are subject for removal! - pub blocks_pruning: BlocksPruning, -} - -/// Block pruning settings. -#[derive(Debug, Clone, Copy, PartialEq)] -pub enum BlocksPruning { - /// Keep full block history, of every block that was ever imported. - KeepAll, - /// Keep full finalized block history. - KeepFinalized, - /// Keep N recent finalized blocks. - Some(u32), -} - -/// Where to find the database.. -#[derive(Debug, Clone)] -pub enum DatabaseSource { - /// Check given path, and see if there is an existing database there. If it's either `RocksDb` - /// or `ParityDb`, use it. If there is none, create a new instance of `ParityDb`. - Auto { - /// Path to the paritydb database. - paritydb_path: PathBuf, - /// Path to the rocksdb database. - rocksdb_path: PathBuf, - /// Cache size in MiB. Used only by `RocksDb` variant of `DatabaseSource`. - cache_size: usize, - }, - /// Load a RocksDB database from a given path. Recommended for most uses. - #[cfg(feature = "rocksdb")] - RocksDb { - /// Path to the database. - path: PathBuf, - /// Cache size in MiB. - cache_size: usize, - }, - - /// Load a ParityDb database from a given path. - ParityDb { - /// Path to the database. - path: PathBuf, - }, - - /// Use a custom already-open database. - Custom { - /// the handle to the custom storage - db: Arc>, - - /// if set, the `create` flag will be required to open such datasource - require_create_flag: bool, - }, -} - -impl DatabaseSource { - /// Return path for databases that are stored on disk. - pub fn path(&self) -> Option<&Path> { - match self { - // as per https://github.com/paritytech/substrate/pull/9500#discussion_r684312550 - // - // IIUC this is needed for polkadot to create its own dbs, so until it can use parity db - // I would think rocksdb, but later parity-db. - DatabaseSource::Auto { paritydb_path, .. } => Some(paritydb_path), - #[cfg(feature = "rocksdb")] - DatabaseSource::RocksDb { path, .. } => Some(path), - DatabaseSource::ParityDb { path } => Some(path), - DatabaseSource::Custom { .. } => None, - } - } - - /// Set path for databases that are stored on disk. - pub fn set_path(&mut self, p: &Path) -> bool { - match self { - DatabaseSource::Auto { ref mut paritydb_path, .. } => { - *paritydb_path = p.into(); - true - }, - #[cfg(feature = "rocksdb")] - DatabaseSource::RocksDb { ref mut path, .. } => { - *path = p.into(); - true - }, - DatabaseSource::ParityDb { ref mut path } => { - *path = p.into(); - true - }, - DatabaseSource::Custom { .. } => false, - } - } -} - -impl std::fmt::Display for DatabaseSource { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - let name = match self { - DatabaseSource::Auto { .. } => "Auto", - #[cfg(feature = "rocksdb")] - DatabaseSource::RocksDb { .. } => "RocksDb", - DatabaseSource::ParityDb { .. } => "ParityDb", - DatabaseSource::Custom { .. } => "Custom", - }; - write!(f, "{}", name) - } -} - -pub(crate) mod columns { - pub const META: u32 = crate::utils::COLUMN_META; - pub const STATE: u32 = 1; - pub const STATE_META: u32 = 2; - /// maps hashes to lookup keys and numbers to canon hashes. - pub const KEY_LOOKUP: u32 = 3; - pub const HEADER: u32 = 4; - pub const BODY: u32 = 5; - pub const JUSTIFICATIONS: u32 = 6; - pub const AUX: u32 = 8; - /// Offchain workers local storage - pub const OFFCHAIN: u32 = 9; - /// Transactions - pub const TRANSACTION: u32 = 11; - pub const BODY_INDEX: u32 = 12; -} - -struct PendingBlock { - header: Block::Header, - justifications: Option, - body: Option>, - indexed_body: Option>>, - leaf_state: NewBlockState, -} - -// wrapper that implements trait required for state_db -#[derive(Clone)] -struct StateMetaDb(Arc>); - -impl sc_state_db::MetaDb for StateMetaDb { - type Error = sp_database::error::DatabaseError; - - fn get_meta(&self, key: &[u8]) -> Result>, Self::Error> { - Ok(self.0.get(columns::STATE_META, key)) - } -} - -struct MetaUpdate { - pub hash: Block::Hash, - pub number: NumberFor, - pub is_best: bool, - pub is_finalized: bool, - pub with_state: bool, -} - -fn cache_header( - cache: &mut LinkedHashMap>, - hash: Hash, - header: Option
, -) { - cache.insert(hash, header); - while cache.len() > CACHE_HEADERS { - cache.pop_front(); - } -} - -/// Block database -pub struct BlockchainDb { - db: Arc>, - meta: Arc, Block::Hash>>>, - leaves: RwLock>>, - header_metadata_cache: Arc>, - header_cache: Mutex>>, -} - -impl BlockchainDb { - fn new(db: Arc>) -> ClientResult { - let meta = read_meta::(&*db, columns::HEADER)?; - let leaves = LeafSet::read_from_db(&*db, columns::META, meta_keys::LEAF_PREFIX)?; - Ok(BlockchainDb { - db, - leaves: RwLock::new(leaves), - meta: Arc::new(RwLock::new(meta)), - header_metadata_cache: Arc::new(HeaderMetadataCache::default()), - header_cache: Default::default(), - }) - } - - fn update_meta(&self, update: MetaUpdate) { - let MetaUpdate { hash, number, is_best, is_finalized, with_state } = update; - let mut meta = self.meta.write(); - if number.is_zero() { - meta.genesis_hash = hash; - } - - if is_best { - meta.best_number = number; - meta.best_hash = hash; - } - - if is_finalized { - if with_state { - meta.finalized_state = Some((hash, number)); - } - meta.finalized_number = number; - meta.finalized_hash = hash; - } - } - - fn update_block_gap(&self, gap: Option<(NumberFor, NumberFor)>) { - let mut meta = self.meta.write(); - meta.block_gap = gap; - } -} - -impl sc_client_api::blockchain::HeaderBackend for BlockchainDb { - fn header(&self, id: BlockId) -> ClientResult> { - match &id { - BlockId::Hash(h) => { - let mut cache = self.header_cache.lock(); - if let Some(result) = cache.get_refresh(h) { - return Ok(result.clone()) - } - let header = - utils::read_header(&*self.db, columns::KEY_LOOKUP, columns::HEADER, id)?; - cache_header(&mut cache, *h, header.clone()); - Ok(header) - }, - BlockId::Number(_) => - utils::read_header(&*self.db, columns::KEY_LOOKUP, columns::HEADER, id), - } - } - - fn info(&self) -> sc_client_api::blockchain::Info { - let meta = self.meta.read(); - sc_client_api::blockchain::Info { - best_hash: meta.best_hash, - best_number: meta.best_number, - genesis_hash: meta.genesis_hash, - finalized_hash: meta.finalized_hash, - finalized_number: meta.finalized_number, - finalized_state: meta.finalized_state, - number_leaves: self.leaves.read().count(), - block_gap: meta.block_gap, - } - } - - fn status(&self, id: BlockId) -> ClientResult { - let exists = match id { - BlockId::Hash(_) => self.header(id)?.is_some(), - BlockId::Number(n) => n <= self.meta.read().best_number, - }; - match exists { - true => Ok(sc_client_api::blockchain::BlockStatus::InChain), - false => Ok(sc_client_api::blockchain::BlockStatus::Unknown), - } - } - - fn number(&self, hash: Block::Hash) -> ClientResult>> { - Ok(self.header_metadata(hash).ok().map(|header_metadata| header_metadata.number)) - } - - fn hash(&self, number: NumberFor) -> ClientResult> { - self.header(BlockId::Number(number)) - .map(|maybe_header| maybe_header.map(|header| header.hash())) - } -} - -impl sc_client_api::blockchain::Backend for BlockchainDb { - fn body(&self, hash: Block::Hash) -> ClientResult>> { - if let Some(body) = - read_db(&*self.db, columns::KEY_LOOKUP, columns::BODY, BlockId::Hash::(hash))? - { - // Plain body - match Decode::decode(&mut &body[..]) { - Ok(body) => return Ok(Some(body)), - Err(err) => - return Err(sp_blockchain::Error::Backend(format!( - "Error decoding body: {}", - err - ))), - } - } - - if let Some(index) = read_db( - &*self.db, - columns::KEY_LOOKUP, - columns::BODY_INDEX, - BlockId::Hash::(hash), - )? { - match Vec::>::decode(&mut &index[..]) { - Ok(index) => { - let mut body = Vec::new(); - for ex in index { - match ex { - DbExtrinsic::Indexed { hash, header } => { - match self.db.get(columns::TRANSACTION, hash.as_ref()) { - Some(t) => { - let mut input = - utils::join_input(header.as_ref(), t.as_ref()); - let ex = Block::Extrinsic::decode(&mut input).map_err( - |err| { - sp_blockchain::Error::Backend(format!( - "Error decoding indexed extrinsic: {}", - err - )) - }, - )?; - body.push(ex); - }, - None => - return Err(sp_blockchain::Error::Backend(format!( - "Missing indexed transaction {:?}", - hash - ))), - }; - }, - DbExtrinsic::Full(ex) => { - body.push(ex); - }, - } - } - return Ok(Some(body)) - }, - Err(err) => - return Err(sp_blockchain::Error::Backend(format!( - "Error decoding body list: {}", - err - ))), - } - } - Ok(None) - } - - fn justifications(&self, hash: Block::Hash) -> ClientResult> { - match read_db( - &*self.db, - columns::KEY_LOOKUP, - columns::JUSTIFICATIONS, - BlockId::::Hash(hash), - )? { - Some(justifications) => match Decode::decode(&mut &justifications[..]) { - Ok(justifications) => Ok(Some(justifications)), - Err(err) => - return Err(sp_blockchain::Error::Backend(format!( - "Error decoding justifications: {}", - err - ))), - }, - None => Ok(None), - } - } - - fn last_finalized(&self) -> ClientResult { - Ok(self.meta.read().finalized_hash) - } - - fn leaves(&self) -> ClientResult> { - Ok(self.leaves.read().hashes()) - } - - fn displaced_leaves_after_finalizing( - &self, - block_number: NumberFor, - ) -> ClientResult> { - Ok(self - .leaves - .read() - .displaced_by_finalize_height(block_number) - .leaves() - .cloned() - .collect::>()) - } - - fn children(&self, parent_hash: Block::Hash) -> ClientResult> { - children::read_children(&*self.db, columns::META, meta_keys::CHILDREN_PREFIX, parent_hash) - } - - fn indexed_transaction(&self, hash: Block::Hash) -> ClientResult>> { - Ok(self.db.get(columns::TRANSACTION, hash.as_ref())) - } - - fn has_indexed_transaction(&self, hash: Block::Hash) -> ClientResult { - Ok(self.db.contains(columns::TRANSACTION, hash.as_ref())) - } - - fn block_indexed_body(&self, hash: Block::Hash) -> ClientResult>>> { - let body = match read_db( - &*self.db, - columns::KEY_LOOKUP, - columns::BODY_INDEX, - BlockId::::Hash(hash), - )? { - Some(body) => body, - None => return Ok(None), - }; - match Vec::>::decode(&mut &body[..]) { - Ok(index) => { - let mut transactions = Vec::new(); - for ex in index.into_iter() { - if let DbExtrinsic::Indexed { hash, .. } = ex { - match self.db.get(columns::TRANSACTION, hash.as_ref()) { - Some(t) => transactions.push(t), - None => - return Err(sp_blockchain::Error::Backend(format!( - "Missing indexed transaction {:?}", - hash - ))), - } - } - } - Ok(Some(transactions)) - }, - Err(err) => - Err(sp_blockchain::Error::Backend(format!("Error decoding body list: {}", err))), - } - } -} - -impl HeaderMetadata for BlockchainDb { - type Error = sp_blockchain::Error; - - fn header_metadata( - &self, - hash: Block::Hash, - ) -> Result, Self::Error> { - self.header_metadata_cache.header_metadata(hash).map_or_else( - || { - self.header(BlockId::hash(hash))? - .map(|header| { - let header_metadata = CachedHeaderMetadata::from(&header); - self.header_metadata_cache - .insert_header_metadata(header_metadata.hash, header_metadata.clone()); - header_metadata - }) - .ok_or_else(|| { - ClientError::UnknownBlock(format!( - "Header was not found in the database: {:?}", - hash - )) - }) - }, - Ok, - ) - } - - fn insert_header_metadata(&self, hash: Block::Hash, metadata: CachedHeaderMetadata) { - self.header_metadata_cache.insert_header_metadata(hash, metadata) - } - - fn remove_header_metadata(&self, hash: Block::Hash) { - self.header_cache.lock().remove(&hash); - self.header_metadata_cache.remove_header_metadata(hash); - } -} - -/// Database transaction -pub struct BlockImportOperation { - old_state: RecordStatsState, Block>, - db_updates: PrefixedMemoryDB>, - storage_updates: StorageCollection, - child_storage_updates: ChildStorageCollection, - offchain_storage_updates: OffchainChangesCollection, - pending_block: Option>, - aux_ops: Vec<(Vec, Option>)>, - finalized_blocks: Vec<(Block::Hash, Option)>, - set_head: Option, - commit_state: bool, - index_ops: Vec, -} - -impl BlockImportOperation { - fn apply_offchain(&mut self, transaction: &mut Transaction) { - let mut count = 0; - for ((prefix, key), value_operation) in self.offchain_storage_updates.drain(..) { - count += 1; - let key = crate::offchain::concatenate_prefix_and_key(&prefix, &key); - match value_operation { - OffchainOverlayedChange::SetValue(val) => - transaction.set_from_vec(columns::OFFCHAIN, &key, val), - OffchainOverlayedChange::Remove => transaction.remove(columns::OFFCHAIN, &key), - } - } - - if count > 0 { - log::debug!(target: "sc_offchain", "Applied {} offchain indexing changes.", count); - } - } - - fn apply_aux(&mut self, transaction: &mut Transaction) { - for (key, maybe_val) in self.aux_ops.drain(..) { - match maybe_val { - Some(val) => transaction.set_from_vec(columns::AUX, &key, val), - None => transaction.remove(columns::AUX, &key), - } - } - } - - fn apply_new_state( - &mut self, - storage: Storage, - state_version: StateVersion, - ) -> ClientResult { - if storage.top.keys().any(|k| well_known_keys::is_child_storage_key(k)) { - return Err(sp_blockchain::Error::InvalidState) - } - - let child_delta = storage.children_default.values().map(|child_content| { - ( - &child_content.child_info, - child_content.data.iter().map(|(k, v)| (&k[..], Some(&v[..]))), - ) - }); - - let (root, transaction) = self.old_state.full_storage_root( - storage.top.iter().map(|(k, v)| (&k[..], Some(&v[..]))), - child_delta, - state_version, - ); - - self.db_updates = transaction; - Ok(root) - } -} - -impl sc_client_api::backend::BlockImportOperation - for BlockImportOperation -{ - type State = RecordStatsState, Block>; - - fn state(&self) -> ClientResult> { - Ok(Some(&self.old_state)) - } - - fn set_block_data( - &mut self, - header: Block::Header, - body: Option>, - indexed_body: Option>>, - justifications: Option, - leaf_state: NewBlockState, - ) -> ClientResult<()> { - assert!(self.pending_block.is_none(), "Only one block per operation is allowed"); - self.pending_block = - Some(PendingBlock { header, body, indexed_body, justifications, leaf_state }); - Ok(()) - } - - fn update_cache(&mut self, _cache: HashMap>) { - // Currently cache isn't implemented on full nodes. - } - - fn update_db_storage(&mut self, update: PrefixedMemoryDB>) -> ClientResult<()> { - self.db_updates = update; - Ok(()) - } - - fn reset_storage( - &mut self, - storage: Storage, - state_version: StateVersion, - ) -> ClientResult { - let root = self.apply_new_state(storage, state_version)?; - self.commit_state = true; - Ok(root) - } - - fn set_genesis_state( - &mut self, - storage: Storage, - commit: bool, - state_version: StateVersion, - ) -> ClientResult { - let root = self.apply_new_state(storage, state_version)?; - self.commit_state = commit; - Ok(root) - } - - fn insert_aux(&mut self, ops: I) -> ClientResult<()> - where - I: IntoIterator, Option>)>, - { - self.aux_ops.append(&mut ops.into_iter().collect()); - Ok(()) - } - - fn update_storage( - &mut self, - update: StorageCollection, - child_update: ChildStorageCollection, - ) -> ClientResult<()> { - self.storage_updates = update; - self.child_storage_updates = child_update; - Ok(()) - } - - fn update_offchain_storage( - &mut self, - offchain_update: OffchainChangesCollection, - ) -> ClientResult<()> { - self.offchain_storage_updates = offchain_update; - Ok(()) - } - - fn mark_finalized( - &mut self, - block: Block::Hash, - justification: Option, - ) -> ClientResult<()> { - self.finalized_blocks.push((block, justification)); - Ok(()) - } - - fn mark_head(&mut self, hash: Block::Hash) -> ClientResult<()> { - assert!(self.set_head.is_none(), "Only one set head per operation is allowed"); - self.set_head = Some(hash); - Ok(()) - } - - fn update_transaction_index(&mut self, index_ops: Vec) -> ClientResult<()> { - self.index_ops = index_ops; - Ok(()) - } -} - -struct StorageDb { - pub db: Arc>, - pub state_db: StateDb, StateMetaDb>, - prefix_keys: bool, -} - -impl sp_state_machine::Storage> for StorageDb { - fn get(&self, key: &Block::Hash, prefix: Prefix) -> Result, String> { - if self.prefix_keys { - let key = prefixed_key::>(key, prefix); - self.state_db.get(&key, self) - } else { - self.state_db.get(key.as_ref(), self) - } - .map_err(|e| format!("Database backend error: {:?}", e)) - } -} - -impl sc_state_db::NodeDb for StorageDb { - type Error = io::Error; - type Key = [u8]; - - fn get(&self, key: &[u8]) -> Result>, Self::Error> { - Ok(self.db.get(columns::STATE, key)) - } -} - -struct DbGenesisStorage { - root: Block::Hash, - storage: PrefixedMemoryDB>, -} - -impl DbGenesisStorage { - pub fn new(root: Block::Hash, storage: PrefixedMemoryDB>) -> Self { - DbGenesisStorage { root, storage } - } -} - -impl sp_state_machine::Storage> for DbGenesisStorage { - fn get(&self, key: &Block::Hash, prefix: Prefix) -> Result, String> { - use hash_db::HashDB; - Ok(self.storage.get(key, prefix)) - } -} - -struct EmptyStorage(pub Block::Hash); - -impl EmptyStorage { - pub fn new() -> Self { - let mut root = Block::Hash::default(); - let mut mdb = MemoryDB::>::default(); - // both triedbmut are the same on empty storage. - sp_trie::trie_types::TrieDBMutBuilderV1::>::new(&mut mdb, &mut root).build(); - EmptyStorage(root) - } -} - -impl sp_state_machine::Storage> for EmptyStorage { - fn get(&self, _key: &Block::Hash, _prefix: Prefix) -> Result, String> { - Ok(None) - } -} - -/// Frozen `value` at time `at`. -/// -/// Used as inner structure under lock in `FrozenForDuration`. -struct Frozen { - at: std::time::Instant, - value: Option, -} - -/// Some value frozen for period of time. -/// -/// If time `duration` not passed since the value was instantiated, -/// current frozen value is returned. Otherwise, you have to provide -/// a new value which will be again frozen for `duration`. -pub(crate) struct FrozenForDuration { - duration: std::time::Duration, - value: parking_lot::Mutex>, -} - -impl FrozenForDuration { - fn new(duration: std::time::Duration) -> Self { - Self { duration, value: Frozen { at: std::time::Instant::now(), value: None }.into() } - } - - fn take_or_else(&self, f: F) -> T - where - F: FnOnce() -> T, - { - let mut lock = self.value.lock(); - let now = std::time::Instant::now(); - if now.saturating_duration_since(lock.at) > self.duration || lock.value.is_none() { - let new_value = f(); - lock.at = now; - lock.value = Some(new_value.clone()); - new_value - } else { - lock.value.as_ref().expect("Checked with in branch above; qed").clone() - } - } -} - -/// Disk backend. -/// -/// Disk backend keeps data in a key-value store. In archive mode, trie nodes are kept from all -/// blocks. Otherwise, trie nodes are kept only from some recent blocks. -pub struct Backend { - storage: Arc>, - offchain_storage: offchain::LocalStorage, - blockchain: BlockchainDb, - canonicalization_delay: u64, - import_lock: Arc>, - is_archive: bool, - blocks_pruning: BlocksPruning, - io_stats: FrozenForDuration<(kvdb::IoStats, StateUsageInfo)>, - state_usage: Arc, - genesis_state: RwLock>>>, - shared_trie_cache: Option>>, -} - -impl Backend { - /// Create a new instance of database backend. - /// - /// The pruning window is how old a block must be before the state is pruned. - pub fn new(db_config: DatabaseSettings, canonicalization_delay: u64) -> ClientResult { - use utils::OpenDbError; - - let db_source = &db_config.source; - - let (needs_init, db) = - match crate::utils::open_database::(db_source, DatabaseType::Full, false) { - Ok(db) => (false, db), - Err(OpenDbError::DoesNotExist) => { - let db = - crate::utils::open_database::(db_source, DatabaseType::Full, true)?; - (true, db) - }, - Err(as_is) => return Err(as_is.into()), - }; - - Self::from_database(db as Arc<_>, canonicalization_delay, &db_config, needs_init) - } - - /// Create new memory-backed client backend for tests. - #[cfg(any(test, feature = "test-helpers"))] - pub fn new_test(blocks_pruning: u32, canonicalization_delay: u64) -> Self { - Self::new_test_with_tx_storage(BlocksPruning::Some(blocks_pruning), canonicalization_delay) - } - - /// Create new memory-backed client backend for tests. - #[cfg(any(test, feature = "test-helpers"))] - pub fn new_test_with_tx_storage( - blocks_pruning: BlocksPruning, - canonicalization_delay: u64, - ) -> Self { - let db = kvdb_memorydb::create(crate::utils::NUM_COLUMNS); - let db = sp_database::as_database(db); - let state_pruning = match blocks_pruning { - BlocksPruning::KeepAll => PruningMode::ArchiveAll, - BlocksPruning::KeepFinalized => PruningMode::ArchiveCanonical, - BlocksPruning::Some(n) => PruningMode::blocks_pruning(n), - }; - let db_setting = DatabaseSettings { - trie_cache_maximum_size: Some(16 * 1024 * 1024), - state_pruning: Some(state_pruning), - source: DatabaseSource::Custom { db, require_create_flag: true }, - blocks_pruning, - }; - - Self::new(db_setting, canonicalization_delay).expect("failed to create test-db") - } - - /// Expose the Database that is used by this backend. - /// The second argument is the Column that stores the State. - /// - /// Should only be needed for benchmarking. - #[cfg(any(feature = "runtime-benchmarks"))] - pub fn expose_db(&self) -> (Arc>, sp_database::ColumnId) { - (self.storage.db.clone(), columns::STATE) - } - - /// Expose the Storage that is used by this backend. - /// - /// Should only be needed for benchmarking. - #[cfg(any(feature = "runtime-benchmarks"))] - pub fn expose_storage(&self) -> Arc>> { - self.storage.clone() - } - - fn from_database( - db: Arc>, - canonicalization_delay: u64, - config: &DatabaseSettings, - should_init: bool, - ) -> ClientResult { - let mut db_init_transaction = Transaction::new(); - - let requested_state_pruning = config.state_pruning.clone(); - let state_meta_db = StateMetaDb(db.clone()); - let map_e = sp_blockchain::Error::from_state_db; - - let (state_db_init_commit_set, state_db) = StateDb::open( - state_meta_db, - requested_state_pruning, - !db.supports_ref_counting(), - should_init, - ) - .map_err(map_e)?; - - apply_state_commit(&mut db_init_transaction, state_db_init_commit_set); - - let state_pruning_used = state_db.pruning_mode(); - let is_archive_pruning = state_pruning_used.is_archive(); - let blockchain = BlockchainDb::new(db.clone())?; - - let storage_db = - StorageDb { db: db.clone(), state_db, prefix_keys: !db.supports_ref_counting() }; - - let offchain_storage = offchain::LocalStorage::new(db.clone()); - - let backend = Backend { - storage: Arc::new(storage_db), - offchain_storage, - blockchain, - canonicalization_delay, - import_lock: Default::default(), - is_archive: is_archive_pruning, - io_stats: FrozenForDuration::new(std::time::Duration::from_secs(1)), - state_usage: Arc::new(StateUsageStats::new()), - blocks_pruning: config.blocks_pruning, - genesis_state: RwLock::new(None), - shared_trie_cache: config.trie_cache_maximum_size.map(|maximum_size| { - SharedTrieCache::new(sp_trie::cache::CacheSize::Maximum(maximum_size)) - }), - }; - - // Older DB versions have no last state key. Check if the state is available and set it. - let info = backend.blockchain.info(); - if info.finalized_state.is_none() && - info.finalized_hash != Default::default() && - sc_client_api::Backend::have_state_at( - &backend, - info.finalized_hash, - info.finalized_number, - ) { - backend.blockchain.update_meta(MetaUpdate { - hash: info.finalized_hash, - number: info.finalized_number, - is_best: info.finalized_hash == info.best_hash, - is_finalized: true, - with_state: true, - }); - } - - db.commit(db_init_transaction)?; - - Ok(backend) - } - - /// Handle setting head within a transaction. `route_to` should be the last - /// block that existed in the database. `best_to` should be the best block - /// to be set. - /// - /// In the case where the new best block is a block to be imported, `route_to` - /// should be the parent of `best_to`. In the case where we set an existing block - /// to be best, `route_to` should equal to `best_to`. - fn set_head_with_transaction( - &self, - transaction: &mut Transaction, - route_to: Block::Hash, - best_to: (NumberFor, Block::Hash), - ) -> ClientResult<(Vec, Vec)> { - let mut enacted = Vec::default(); - let mut retracted = Vec::default(); - - let (best_number, best_hash) = best_to; - - let meta = self.blockchain.meta.read(); - - if meta.best_number > best_number && - (meta.best_number - best_number).saturated_into::() > - self.canonicalization_delay - { - return Err(sp_blockchain::Error::SetHeadTooOld) - } - - let parent_exists = - self.blockchain.status(BlockId::Hash(route_to))? == sp_blockchain::BlockStatus::InChain; - - // Cannot find tree route with empty DB or when imported a detached block. - if meta.best_hash != Default::default() && parent_exists { - let tree_route = sp_blockchain::tree_route(&self.blockchain, meta.best_hash, route_to)?; - - // uncanonicalize: check safety violations and ensure the numbers no longer - // point to these block hashes in the key mapping. - for r in tree_route.retracted() { - if r.hash == meta.finalized_hash { - warn!( - "Potential safety failure: reverting finalized block {:?}", - (&r.number, &r.hash) - ); - - return Err(sp_blockchain::Error::NotInFinalizedChain) - } - - retracted.push(r.hash); - utils::remove_number_to_key_mapping(transaction, columns::KEY_LOOKUP, r.number)?; - } - - // canonicalize: set the number lookup to map to this block's hash. - for e in tree_route.enacted() { - enacted.push(e.hash); - utils::insert_number_to_key_mapping( - transaction, - columns::KEY_LOOKUP, - e.number, - e.hash, - )?; - } - } - - let lookup_key = utils::number_and_hash_to_lookup_key(best_number, &best_hash)?; - transaction.set_from_vec(columns::META, meta_keys::BEST_BLOCK, lookup_key); - utils::insert_number_to_key_mapping( - transaction, - columns::KEY_LOOKUP, - best_number, - best_hash, - )?; - - Ok((enacted, retracted)) - } - - fn ensure_sequential_finalization( - &self, - header: &Block::Header, - last_finalized: Option, - ) -> ClientResult<()> { - let last_finalized = - last_finalized.unwrap_or_else(|| self.blockchain.meta.read().finalized_hash); - if last_finalized != self.blockchain.meta.read().genesis_hash && - *header.parent_hash() != last_finalized - { - return Err(sp_blockchain::Error::NonSequentialFinalization(format!( - "Last finalized {:?} not parent of {:?}", - last_finalized, - header.hash() - ))) - } - Ok(()) - } - - fn finalize_block_with_transaction( - &self, - transaction: &mut Transaction, - hash: Block::Hash, - header: &Block::Header, - last_finalized: Option, - justification: Option, - finalization_displaced: &mut Option>>, - ) -> ClientResult> { - // TODO: ensure best chain contains this block. - let number = *header.number(); - self.ensure_sequential_finalization(header, last_finalized)?; - let with_state = sc_client_api::Backend::have_state_at(self, hash, number); - - self.note_finalized(transaction, header, hash, finalization_displaced, with_state)?; - - if let Some(justification) = justification { - transaction.set_from_vec( - columns::JUSTIFICATIONS, - &utils::number_and_hash_to_lookup_key(number, hash)?, - Justifications::from(justification).encode(), - ); - } - Ok(MetaUpdate { hash, number, is_best: false, is_finalized: true, with_state }) - } - - // performs forced canonicalization with a delay after importing a non-finalized block. - fn force_delayed_canonicalize( - &self, - transaction: &mut Transaction, - hash: Block::Hash, - number: NumberFor, - ) -> ClientResult<()> { - let number_u64 = number.saturated_into::(); - if number_u64 > self.canonicalization_delay { - let new_canonical = number_u64 - self.canonicalization_delay; - - if new_canonical <= self.storage.state_db.best_canonical().unwrap_or(0) { - return Ok(()) - } - let hash = if new_canonical == number_u64 { - hash - } else { - sc_client_api::blockchain::HeaderBackend::hash( - &self.blockchain, - new_canonical.saturated_into(), - )? - .ok_or_else(|| { - sp_blockchain::Error::Backend(format!( - "Can't canonicalize missing block number #{} when importing {:?} (#{})", - new_canonical, hash, number, - )) - })? - }; - if !sc_client_api::Backend::have_state_at(self, hash, new_canonical.saturated_into()) { - return Ok(()) - } - - trace!(target: "db", "Canonicalize block #{} ({:?})", new_canonical, hash); - let commit = self.storage.state_db.canonicalize_block(&hash).map_err( - sp_blockchain::Error::from_state_db::< - sc_state_db::Error, - >, - )?; - apply_state_commit(transaction, commit); - } - Ok(()) - } - - fn try_commit_operation(&self, mut operation: BlockImportOperation) -> ClientResult<()> { - let mut transaction = Transaction::new(); - let mut finalization_displaced_leaves = None; - - operation.apply_aux(&mut transaction); - operation.apply_offchain(&mut transaction); - - let mut meta_updates = Vec::with_capacity(operation.finalized_blocks.len()); - let (best_num, mut last_finalized_hash, mut last_finalized_num, mut block_gap) = { - let meta = self.blockchain.meta.read(); - (meta.best_number, meta.finalized_hash, meta.finalized_number, meta.block_gap) - }; - - for (block_hash, justification) in operation.finalized_blocks { - let block_header = self.blockchain.expect_header(BlockId::Hash(block_hash))?; - meta_updates.push(self.finalize_block_with_transaction( - &mut transaction, - block_hash, - &block_header, - Some(last_finalized_hash), - justification, - &mut finalization_displaced_leaves, - )?); - last_finalized_hash = block_hash; - last_finalized_num = *block_header.number(); - } - - let imported = if let Some(pending_block) = operation.pending_block { - let hash = pending_block.header.hash(); - - let parent_hash = *pending_block.header.parent_hash(); - let number = *pending_block.header.number(); - let highest_leaf = self - .blockchain - .leaves - .read() - .highest_leaf() - .map(|(n, _)| n) - .unwrap_or(Zero::zero()); - let existing_header = - number <= highest_leaf && self.blockchain.header(BlockId::hash(hash))?.is_some(); - - // blocks are keyed by number + hash. - let lookup_key = utils::number_and_hash_to_lookup_key(number, hash)?; - - if pending_block.leaf_state.is_best() { - self.set_head_with_transaction(&mut transaction, parent_hash, (number, hash))?; - }; - - utils::insert_hash_to_key_mapping(&mut transaction, columns::KEY_LOOKUP, number, hash)?; - - transaction.set_from_vec(columns::HEADER, &lookup_key, pending_block.header.encode()); - if let Some(body) = pending_block.body { - // If we have any index operations we save block in the new format with indexed - // extrinsic headers Otherwise we save the body as a single blob. - if operation.index_ops.is_empty() { - transaction.set_from_vec(columns::BODY, &lookup_key, body.encode()); - } else { - let body = - apply_index_ops::(&mut transaction, body, operation.index_ops); - transaction.set_from_vec(columns::BODY_INDEX, &lookup_key, body); - } - } - if let Some(body) = pending_block.indexed_body { - apply_indexed_body::(&mut transaction, body); - } - if let Some(justifications) = pending_block.justifications { - transaction.set_from_vec( - columns::JUSTIFICATIONS, - &lookup_key, - justifications.encode(), - ); - } - - if number.is_zero() { - transaction.set(columns::META, meta_keys::GENESIS_HASH, hash.as_ref()); - - if operation.commit_state { - transaction.set_from_vec(columns::META, meta_keys::FINALIZED_STATE, lookup_key); - } else { - // When we don't want to commit the genesis state, we still preserve it in - // memory to bootstrap consensus. It is queried for an initial list of - // authorities, etc. - *self.genesis_state.write() = Some(Arc::new(DbGenesisStorage::new( - *pending_block.header.state_root(), - operation.db_updates.clone(), - ))); - } - } - - let finalized = if operation.commit_state { - let mut changeset: sc_state_db::ChangeSet> = - sc_state_db::ChangeSet::default(); - let mut ops: u64 = 0; - let mut bytes: u64 = 0; - let mut removal: u64 = 0; - let mut bytes_removal: u64 = 0; - for (mut key, (val, rc)) in operation.db_updates.drain() { - self.storage.db.sanitize_key(&mut key); - if rc > 0 { - ops += 1; - bytes += key.len() as u64 + val.len() as u64; - if rc == 1 { - changeset.inserted.push((key, val.to_vec())); - } else { - changeset.inserted.push((key.clone(), val.to_vec())); - for _ in 0..rc - 1 { - changeset.inserted.push((key.clone(), Default::default())); - } - } - } else if rc < 0 { - removal += 1; - bytes_removal += key.len() as u64; - if rc == -1 { - changeset.deleted.push(key); - } else { - for _ in 0..-rc { - changeset.deleted.push(key.clone()); - } - } - } - } - self.state_usage.tally_writes_nodes(ops, bytes); - self.state_usage.tally_removed_nodes(removal, bytes_removal); - - let mut ops: u64 = 0; - let mut bytes: u64 = 0; - for (key, value) in operation - .storage_updates - .iter() - .chain(operation.child_storage_updates.iter().flat_map(|(_, s)| s.iter())) - { - ops += 1; - bytes += key.len() as u64; - if let Some(v) = value.as_ref() { - bytes += v.len() as u64; - } - } - self.state_usage.tally_writes(ops, bytes); - let number_u64 = number.saturated_into::(); - let commit = self - .storage - .state_db - .insert_block(&hash, number_u64, pending_block.header.parent_hash(), changeset) - .map_err(|e: sc_state_db::Error| { - sp_blockchain::Error::from_state_db(e) - })?; - apply_state_commit(&mut transaction, commit); - if number <= last_finalized_num { - // Canonicalize in the db when re-importing existing blocks with state. - let commit = self.storage.state_db.canonicalize_block(&hash).map_err( - sp_blockchain::Error::from_state_db::< - sc_state_db::Error, - >, - )?; - apply_state_commit(&mut transaction, commit); - meta_updates.push(MetaUpdate { - hash, - number, - is_best: false, - is_finalized: true, - with_state: true, - }); - } - - // Check if need to finalize. Genesis is always finalized instantly. - let finalized = number_u64 == 0 || pending_block.leaf_state.is_final(); - finalized - } else { - (number.is_zero() && last_finalized_num.is_zero()) || - pending_block.leaf_state.is_final() - }; - - let header = &pending_block.header; - let is_best = pending_block.leaf_state.is_best(); - debug!( - target: "db", - "DB Commit {:?} ({}), best={}, state={}, existing={}, finalized={}", - hash, - number, - is_best, - operation.commit_state, - existing_header, - finalized, - ); - - self.state_usage.merge_sm(operation.old_state.usage_info()); - - // release state reference so that it can be finalized - // VERY IMPORTANT - drop(operation.old_state); - - if finalized { - // TODO: ensure best chain contains this block. - self.ensure_sequential_finalization(header, Some(last_finalized_hash))?; - self.note_finalized( - &mut transaction, - header, - hash, - &mut finalization_displaced_leaves, - operation.commit_state, - )?; - } else { - // canonicalize blocks which are old enough, regardless of finality. - self.force_delayed_canonicalize(&mut transaction, hash, *header.number())? - } - - if !existing_header { - // Add a new leaf if the block has the potential to be finalized. - if number > last_finalized_num || last_finalized_num.is_zero() { - let mut leaves = self.blockchain.leaves.write(); - leaves.import(hash, number, parent_hash); - leaves.prepare_transaction( - &mut transaction, - columns::META, - meta_keys::LEAF_PREFIX, - ); - } - - let mut children = children::read_children( - &*self.storage.db, - columns::META, - meta_keys::CHILDREN_PREFIX, - parent_hash, - )?; - if !children.contains(&hash) { - children.push(hash); - children::write_children( - &mut transaction, - columns::META, - meta_keys::CHILDREN_PREFIX, - parent_hash, - children, - ); - } - - if let Some((mut start, end)) = block_gap { - if number == start { - start += One::one(); - utils::insert_number_to_key_mapping( - &mut transaction, - columns::KEY_LOOKUP, - number, - hash, - )?; - } - if start > end { - transaction.remove(columns::META, meta_keys::BLOCK_GAP); - block_gap = None; - debug!(target: "db", "Removed block gap."); - } else { - block_gap = Some((start, end)); - debug!(target: "db", "Update block gap. {:?}", block_gap); - transaction.set( - columns::META, - meta_keys::BLOCK_GAP, - &(start, end).encode(), - ); - } - } else if number > best_num + One::one() && - number > One::one() && self - .blockchain - .header(BlockId::hash(parent_hash))? - .is_none() - { - let gap = (best_num + One::one(), number - One::one()); - transaction.set(columns::META, meta_keys::BLOCK_GAP, &gap.encode()); - block_gap = Some(gap); - debug!(target: "db", "Detected block gap {:?}", block_gap); - } - } - - meta_updates.push(MetaUpdate { - hash, - number, - is_best: pending_block.leaf_state.is_best(), - is_finalized: finalized, - with_state: operation.commit_state, - }); - Some((pending_block.header, hash)) - } else { - None - }; - - if let Some(set_head) = operation.set_head { - if let Some(header) = sc_client_api::blockchain::HeaderBackend::header( - &self.blockchain, - BlockId::Hash(set_head), - )? { - let number = header.number(); - let hash = header.hash(); - - self.set_head_with_transaction(&mut transaction, hash, (*number, hash))?; - - meta_updates.push(MetaUpdate { - hash, - number: *number, - is_best: true, - is_finalized: false, - with_state: false, - }); - } else { - return Err(sp_blockchain::Error::UnknownBlock(format!( - "Cannot set head {:?}", - set_head - ))) - } - } - - self.storage.db.commit(transaction)?; - - // Apply all in-memory state changes. - // Code beyond this point can't fail. - - if let Some((header, hash)) = imported { - trace!(target: "db", "DB Commit done {:?}", hash); - let header_metadata = CachedHeaderMetadata::from(&header); - self.blockchain.insert_header_metadata(header_metadata.hash, header_metadata); - cache_header(&mut self.blockchain.header_cache.lock(), hash, Some(header)); - } - - for m in meta_updates { - self.blockchain.update_meta(m); - } - self.blockchain.update_block_gap(block_gap); - - Ok(()) - } - - // write stuff to a transaction after a new block is finalized. - // this canonicalizes finalized blocks. Fails if called with a block which - // was not a child of the last finalized block. - fn note_finalized( - &self, - transaction: &mut Transaction, - f_header: &Block::Header, - f_hash: Block::Hash, - displaced: &mut Option>>, - with_state: bool, - ) -> ClientResult<()> { - let f_num = *f_header.number(); - - let lookup_key = utils::number_and_hash_to_lookup_key(f_num, f_hash)?; - if with_state { - transaction.set_from_vec(columns::META, meta_keys::FINALIZED_STATE, lookup_key.clone()); - } - transaction.set_from_vec(columns::META, meta_keys::FINALIZED_BLOCK, lookup_key); - - if sc_client_api::Backend::have_state_at(self, f_hash, f_num) && - self.storage - .state_db - .best_canonical() - .map(|c| f_num.saturated_into::() > c) - .unwrap_or(true) - { - let commit = self.storage.state_db.canonicalize_block(&f_hash).map_err( - sp_blockchain::Error::from_state_db::< - sc_state_db::Error, - >, - )?; - apply_state_commit(transaction, commit); - } - - let new_displaced = self.blockchain.leaves.write().finalize_height(f_num); - self.prune_blocks(transaction, f_num, &new_displaced)?; - match displaced { - x @ &mut None => *x = Some(new_displaced), - &mut Some(ref mut displaced) => displaced.merge(new_displaced), - } - - Ok(()) - } - - fn prune_blocks( - &self, - transaction: &mut Transaction, - finalized: NumberFor, - displaced: &FinalizationOutcome>, - ) -> ClientResult<()> { - match self.blocks_pruning { - BlocksPruning::KeepAll => {}, - BlocksPruning::Some(blocks_pruning) => { - // Always keep the last finalized block - let keep = std::cmp::max(blocks_pruning, 1); - if finalized >= keep.into() { - let number = finalized.saturating_sub(keep.into()); - self.prune_block(transaction, BlockId::::number(number))?; - } - self.prune_displaced_branches(transaction, finalized, displaced)?; - }, - BlocksPruning::KeepFinalized => { - self.prune_displaced_branches(transaction, finalized, displaced)?; - }, - } - Ok(()) - } - - fn prune_displaced_branches( - &self, - transaction: &mut Transaction, - finalized: NumberFor, - displaced: &FinalizationOutcome>, - ) -> ClientResult<()> { - // Discard all blocks from displaced branches - for h in displaced.leaves() { - let mut number = finalized; - let mut hash = *h; - // Follow displaced chains back until we reach a finalized block. - // Since leaves are discarded due to finality, they can't have parents - // that are canonical, but not yet finalized. So we stop deleting as soon as - // we reach canonical chain. - while self.blockchain.hash(number)? != Some(hash) { - let id = BlockId::::hash(hash); - match self.blockchain.header(id)? { - Some(header) => { - self.prune_block(transaction, id)?; - number = header.number().saturating_sub(One::one()); - hash = *header.parent_hash(); - }, - None => break, - } - } - } - Ok(()) - } - - fn prune_block( - &self, - transaction: &mut Transaction, - id: BlockId, - ) -> ClientResult<()> { - debug!(target: "db", "Removing block #{}", id); - utils::remove_from_db( - transaction, - &*self.storage.db, - columns::KEY_LOOKUP, - columns::BODY, - id, - )?; - utils::remove_from_db( - transaction, - &*self.storage.db, - columns::KEY_LOOKUP, - columns::JUSTIFICATIONS, - id, - )?; - if let Some(index) = - read_db(&*self.storage.db, columns::KEY_LOOKUP, columns::BODY_INDEX, id)? - { - utils::remove_from_db( - transaction, - &*self.storage.db, - columns::KEY_LOOKUP, - columns::BODY_INDEX, - id, - )?; - match Vec::>::decode(&mut &index[..]) { - Ok(index) => - for ex in index { - if let DbExtrinsic::Indexed { hash, .. } = ex { - transaction.release(columns::TRANSACTION, hash); - } - }, - Err(err) => - return Err(sp_blockchain::Error::Backend(format!( - "Error decoding body list: {}", - err - ))), - } - } - Ok(()) - } - - fn empty_state(&self) -> ClientResult, Block>> { - let root = EmptyStorage::::new().0; // Empty trie - let db_state = DbStateBuilder::::new(self.storage.clone(), root) - .with_optional_cache(self.shared_trie_cache.as_ref().map(|c| c.local_cache())) - .build(); - let state = RefTrackingState::new(db_state, self.storage.clone(), None); - Ok(RecordStatsState::new(state, None, self.state_usage.clone())) - } -} - -fn apply_state_commit( - transaction: &mut Transaction, - commit: sc_state_db::CommitSet>, -) { - for (key, val) in commit.data.inserted.into_iter() { - transaction.set_from_vec(columns::STATE, &key[..], val); - } - for key in commit.data.deleted.into_iter() { - transaction.remove(columns::STATE, &key[..]); - } - for (key, val) in commit.meta.inserted.into_iter() { - transaction.set_from_vec(columns::STATE_META, &key[..], val); - } - for key in commit.meta.deleted.into_iter() { - transaction.remove(columns::STATE_META, &key[..]); - } -} - -fn apply_index_ops( - transaction: &mut Transaction, - body: Vec, - ops: Vec, -) -> Vec { - let mut extrinsic_index: Vec> = Vec::with_capacity(body.len()); - let mut index_map = HashMap::new(); - let mut renewed_map = HashMap::new(); - for op in ops { - match op { - IndexOperation::Insert { extrinsic, hash, size } => { - index_map.insert(extrinsic, (hash, size)); - }, - IndexOperation::Renew { extrinsic, hash } => { - renewed_map.insert(extrinsic, DbHash::from_slice(hash.as_ref())); - }, - } - } - for (index, extrinsic) in body.into_iter().enumerate() { - let db_extrinsic = if let Some(hash) = renewed_map.get(&(index as u32)) { - // Bump ref counter - let extrinsic = extrinsic.encode(); - transaction.reference(columns::TRANSACTION, DbHash::from_slice(hash.as_ref())); - DbExtrinsic::Indexed { hash: *hash, header: extrinsic } - } else { - match index_map.get(&(index as u32)) { - Some((hash, size)) => { - let encoded = extrinsic.encode(); - if *size as usize <= encoded.len() { - let offset = encoded.len() - *size as usize; - transaction.store( - columns::TRANSACTION, - DbHash::from_slice(hash.as_ref()), - encoded[offset..].to_vec(), - ); - DbExtrinsic::Indexed { - hash: DbHash::from_slice(hash.as_ref()), - header: encoded[..offset].to_vec(), - } - } else { - // Invalid indexed slice. Just store full data and don't index anything. - DbExtrinsic::Full(extrinsic) - } - }, - _ => DbExtrinsic::Full(extrinsic), - } - }; - extrinsic_index.push(db_extrinsic); - } - debug!( - target: "db", - "DB transaction index: {} inserted, {} renewed, {} full", - index_map.len(), - renewed_map.len(), - extrinsic_index.len() - index_map.len() - renewed_map.len(), - ); - extrinsic_index.encode() -} - -fn apply_indexed_body(transaction: &mut Transaction, body: Vec>) { - for extrinsic in body { - let hash = sp_runtime::traits::BlakeTwo256::hash(&extrinsic); - transaction.store(columns::TRANSACTION, DbHash::from_slice(hash.as_ref()), extrinsic); - } -} - -impl sc_client_api::backend::AuxStore for Backend -where - Block: BlockT, -{ - fn insert_aux< - 'a, - 'b: 'a, - 'c: 'a, - I: IntoIterator, - D: IntoIterator, - >( - &self, - insert: I, - delete: D, - ) -> ClientResult<()> { - let mut transaction = Transaction::new(); - for (k, v) in insert { - transaction.set(columns::AUX, k, v); - } - for k in delete { - transaction.remove(columns::AUX, k); - } - self.storage.db.commit(transaction)?; - Ok(()) - } - - fn get_aux(&self, key: &[u8]) -> ClientResult>> { - Ok(self.storage.db.get(columns::AUX, key)) - } -} - -impl sc_client_api::backend::Backend for Backend { - type BlockImportOperation = BlockImportOperation; - type Blockchain = BlockchainDb; - type State = RecordStatsState, Block>; - type OffchainStorage = offchain::LocalStorage; - - fn begin_operation(&self) -> ClientResult { - Ok(BlockImportOperation { - pending_block: None, - old_state: self.empty_state()?, - db_updates: PrefixedMemoryDB::default(), - storage_updates: Default::default(), - child_storage_updates: Default::default(), - offchain_storage_updates: Default::default(), - aux_ops: Vec::new(), - finalized_blocks: Vec::new(), - set_head: None, - commit_state: false, - index_ops: Default::default(), - }) - } - - fn begin_state_operation( - &self, - operation: &mut Self::BlockImportOperation, - block: Block::Hash, - ) -> ClientResult<()> { - if block == Default::default() { - operation.old_state = self.empty_state()?; - } else { - operation.old_state = self.state_at(block)?; - } - - operation.commit_state = true; - Ok(()) - } - - fn commit_operation(&self, operation: Self::BlockImportOperation) -> ClientResult<()> { - let usage = operation.old_state.usage_info(); - self.state_usage.merge_sm(usage); - - if let Err(e) = self.try_commit_operation(operation) { - let state_meta_db = StateMetaDb(self.storage.db.clone()); - self.storage - .state_db - .reset(state_meta_db) - .map_err(sp_blockchain::Error::from_state_db)?; - Err(e) - } else { - self.storage.state_db.sync(); - Ok(()) - } - } - - fn finalize_block( - &self, - hash: Block::Hash, - justification: Option, - ) -> ClientResult<()> { - let mut transaction = Transaction::new(); - let header = self.blockchain.expect_header(BlockId::Hash(hash))?; - let mut displaced = None; - - let m = self.finalize_block_with_transaction( - &mut transaction, - hash, - &header, - None, - justification, - &mut displaced, - )?; - self.storage.db.commit(transaction)?; - self.blockchain.update_meta(m); - Ok(()) - } - - fn append_justification( - &self, - hash: Block::Hash, - justification: Justification, - ) -> ClientResult<()> { - let mut transaction: Transaction = Transaction::new(); - let header = self.blockchain.expect_header(BlockId::Hash(hash))?; - let number = *header.number(); - - // Check if the block is finalized first. - let is_descendent_of = is_descendent_of(&self.blockchain, None); - let last_finalized = self.blockchain.last_finalized()?; - - // We can do a quick check first, before doing a proper but more expensive check - if number > self.blockchain.info().finalized_number || - (hash != last_finalized && !is_descendent_of(&hash, &last_finalized)?) - { - return Err(ClientError::NotInFinalizedChain) - } - - let justifications = if let Some(mut stored_justifications) = - self.blockchain.justifications(hash)? - { - if !stored_justifications.append(justification) { - return Err(ClientError::BadJustification("Duplicate consensus engine ID".into())) - } - stored_justifications - } else { - Justifications::from(justification) - }; - - transaction.set_from_vec( - columns::JUSTIFICATIONS, - &utils::number_and_hash_to_lookup_key(number, hash)?, - justifications.encode(), - ); - - self.storage.db.commit(transaction)?; - - Ok(()) - } - - fn offchain_storage(&self) -> Option { - Some(self.offchain_storage.clone()) - } - - fn usage_info(&self) -> Option { - let (io_stats, state_stats) = self.io_stats.take_or_else(|| { - ( - // TODO: implement DB stats and cache size retrieval - kvdb::IoStats::empty(), - self.state_usage.take(), - ) - }); - let database_cache = MemorySize::from_bytes(0); - let state_cache = MemorySize::from_bytes( - self.shared_trie_cache.as_ref().map_or(0, |c| c.used_memory_size()), - ); - let state_db = self.storage.state_db.memory_info(); - - Some(UsageInfo { - memory: MemoryInfo { state_cache, database_cache, state_db }, - io: IoInfo { - transactions: io_stats.transactions, - bytes_read: io_stats.bytes_read, - bytes_written: io_stats.bytes_written, - writes: io_stats.writes, - reads: io_stats.reads, - average_transaction_size: io_stats.avg_transaction_size() as u64, - state_reads: state_stats.reads.ops, - state_writes: state_stats.writes.ops, - state_writes_cache: state_stats.overlay_writes.ops, - state_reads_cache: state_stats.cache_reads.ops, - state_writes_nodes: state_stats.nodes_writes.ops, - }, - }) - } - - fn revert( - &self, - n: NumberFor, - revert_finalized: bool, - ) -> ClientResult<(NumberFor, HashSet)> { - let mut reverted_finalized = HashSet::new(); - - let info = self.blockchain.info(); - - let highest_leaf = self - .blockchain - .leaves - .read() - .highest_leaf() - .and_then(|(n, h)| h.last().map(|h| (n, *h))); - - let best_number = info.best_number; - let best_hash = info.best_hash; - - let finalized = info.finalized_number; - - let revertible = best_number - finalized; - let n = if !revert_finalized && revertible < n { revertible } else { n }; - - let (n, mut number_to_revert, mut hash_to_revert) = match highest_leaf { - Some((l_n, l_h)) => (n + (l_n - best_number), l_n, l_h), - None => (n, best_number, best_hash), - }; - - let mut revert_blocks = || -> ClientResult> { - for c in 0..n.saturated_into::() { - if number_to_revert.is_zero() { - return Ok(c.saturated_into::>()) - } - let mut transaction = Transaction::new(); - let removed = - self.blockchain.header(BlockId::Hash(hash_to_revert))?.ok_or_else(|| { - sp_blockchain::Error::UnknownBlock(format!( - "Error reverting to {}. Block header not found.", - hash_to_revert, - )) - })?; - let removed_hash = removed.hash(); - - let prev_number = number_to_revert.saturating_sub(One::one()); - let prev_hash = - if prev_number == best_number { best_hash } else { *removed.parent_hash() }; - - if !self.have_state_at(prev_hash, prev_number) { - return Ok(c.saturated_into::>()) - } - - match self.storage.state_db.revert_one() { - Some(commit) => { - apply_state_commit(&mut transaction, commit); - - number_to_revert = prev_number; - hash_to_revert = prev_hash; - - let update_finalized = number_to_revert < finalized; - - let key = utils::number_and_hash_to_lookup_key( - number_to_revert, - &hash_to_revert, - )?; - if update_finalized { - transaction.set_from_vec( - columns::META, - meta_keys::FINALIZED_BLOCK, - key.clone(), - ); - - reverted_finalized.insert(removed_hash); - if let Some((hash, _)) = self.blockchain.info().finalized_state { - if hash == hash_to_revert { - if !number_to_revert.is_zero() && - self.have_state_at( - prev_hash, - number_to_revert - One::one(), - ) { - let lookup_key = utils::number_and_hash_to_lookup_key( - number_to_revert - One::one(), - prev_hash, - )?; - transaction.set_from_vec( - columns::META, - meta_keys::FINALIZED_STATE, - lookup_key, - ); - } else { - transaction - .remove(columns::META, meta_keys::FINALIZED_STATE); - } - } - } - } - transaction.set_from_vec(columns::META, meta_keys::BEST_BLOCK, key); - transaction.remove(columns::KEY_LOOKUP, removed.hash().as_ref()); - children::remove_children( - &mut transaction, - columns::META, - meta_keys::CHILDREN_PREFIX, - hash_to_revert, - ); - self.storage.db.commit(transaction)?; - - let is_best = number_to_revert < best_number; - - self.blockchain.update_meta(MetaUpdate { - hash: hash_to_revert, - number: number_to_revert, - is_best, - is_finalized: update_finalized, - with_state: false, - }); - }, - None => return Ok(c.saturated_into::>()), - } - } - - Ok(n) - }; - - let reverted = revert_blocks()?; - - let revert_leaves = || -> ClientResult<()> { - let mut transaction = Transaction::new(); - let mut leaves = self.blockchain.leaves.write(); - - leaves.revert(hash_to_revert, number_to_revert); - leaves.prepare_transaction(&mut transaction, columns::META, meta_keys::LEAF_PREFIX); - self.storage.db.commit(transaction)?; - - Ok(()) - }; - - revert_leaves()?; - - Ok((reverted, reverted_finalized)) - } - - fn remove_leaf_block(&self, hash: Block::Hash) -> ClientResult<()> { - let best_hash = self.blockchain.info().best_hash; - - if best_hash == hash { - return Err(sp_blockchain::Error::Backend(format!("Can't remove best block {:?}", hash))) - } - - let hdr = self.blockchain.header_metadata(hash)?; - if !self.have_state_at(hash, hdr.number) { - return Err(sp_blockchain::Error::UnknownBlock(format!( - "State already discarded for {:?}", - hash - ))) - } - - let mut leaves = self.blockchain.leaves.write(); - if !leaves.contains(hdr.number, hash) { - return Err(sp_blockchain::Error::Backend(format!( - "Can't remove non-leaf block {:?}", - hash - ))) - } - - let mut transaction = Transaction::new(); - if let Some(commit) = self.storage.state_db.remove(&hash) { - apply_state_commit(&mut transaction, commit); - } - transaction.remove(columns::KEY_LOOKUP, hash.as_ref()); - - let children: Vec<_> = self - .blockchain() - .children(hdr.parent)? - .into_iter() - .filter(|child_hash| *child_hash != hash) - .collect(); - let parent_leaf = if children.is_empty() { - children::remove_children( - &mut transaction, - columns::META, - meta_keys::CHILDREN_PREFIX, - hdr.parent, - ); - Some(hdr.parent) - } else { - children::write_children( - &mut transaction, - columns::META, - meta_keys::CHILDREN_PREFIX, - hdr.parent, - children, - ); - None - }; - - let remove_outcome = leaves.remove(hash, hdr.number, parent_leaf); - leaves.prepare_transaction(&mut transaction, columns::META, meta_keys::LEAF_PREFIX); - if let Err(e) = self.storage.db.commit(transaction) { - if let Some(outcome) = remove_outcome { - leaves.undo().undo_remove(outcome); - } - return Err(e.into()) - } - self.blockchain().remove_header_metadata(hash); - Ok(()) - } - - fn blockchain(&self) -> &BlockchainDb { - &self.blockchain - } - - fn state_at(&self, hash: Block::Hash) -> ClientResult { - if hash == self.blockchain.meta.read().genesis_hash { - if let Some(genesis_state) = &*self.genesis_state.read() { - let root = genesis_state.root; - let db_state = DbStateBuilder::::new(genesis_state.clone(), root) - .with_optional_cache(self.shared_trie_cache.as_ref().map(|c| c.local_cache())) - .build(); - - let state = RefTrackingState::new(db_state, self.storage.clone(), None); - return Ok(RecordStatsState::new(state, None, self.state_usage.clone())) - } - } - - match self.blockchain.header_metadata(hash) { - Ok(ref hdr) => { - let hint = || { - sc_state_db::NodeDb::get(self.storage.as_ref(), hdr.state_root.as_ref()) - .unwrap_or(None) - .is_some() - }; - if let Ok(()) = - self.storage.state_db.pin(&hash, hdr.number.saturated_into::(), hint) - { - let root = hdr.state_root; - let db_state = DbStateBuilder::::new(self.storage.clone(), root) - .with_optional_cache( - self.shared_trie_cache.as_ref().map(|c| c.local_cache()), - ) - .build(); - let state = RefTrackingState::new(db_state, self.storage.clone(), Some(hash)); - Ok(RecordStatsState::new(state, Some(hash), self.state_usage.clone())) - } else { - Err(sp_blockchain::Error::UnknownBlock(format!( - "State already discarded for {:?}", - hash - ))) - } - }, - Err(e) => Err(e), - } - } - - fn have_state_at(&self, hash: Block::Hash, number: NumberFor) -> bool { - if self.is_archive { - match self.blockchain.header_metadata(hash) { - Ok(header) => sp_state_machine::Storage::get( - self.storage.as_ref(), - &header.state_root, - (&[], None), - ) - .unwrap_or(None) - .is_some(), - _ => false, - } - } else { - match self.storage.state_db.is_pruned(&hash, number.saturated_into::()) { - IsPruned::Pruned => false, - IsPruned::NotPruned => true, - IsPruned::MaybePruned => match self.blockchain.header_metadata(hash) { - Ok(header) => sp_state_machine::Storage::get( - self.storage.as_ref(), - &header.state_root, - (&[], None), - ) - .unwrap_or(None) - .is_some(), - _ => false, - }, - } - } - } - - fn get_import_lock(&self) -> &RwLock<()> { - &self.import_lock - } - - fn requires_full_sync(&self) -> bool { - matches!( - self.storage.state_db.pruning_mode(), - PruningMode::ArchiveAll | PruningMode::ArchiveCanonical - ) - } -} - -impl sc_client_api::backend::LocalBackend for Backend {} - -#[cfg(test)] -pub(crate) mod tests { - use super::*; - use crate::columns; - use hash_db::{HashDB, EMPTY_PREFIX}; - use sc_client_api::{ - backend::{Backend as BTrait, BlockImportOperation as Op}, - blockchain::Backend as BLBTrait, - }; - use sp_blockchain::{lowest_common_ancestor, tree_route}; - use sp_core::H256; - use sp_runtime::{ - testing::{Block as RawBlock, ExtrinsicWrapper, Header}, - traits::{BlakeTwo256, Hash}, - ConsensusEngineId, StateVersion, - }; - - const CONS0_ENGINE_ID: ConsensusEngineId = *b"CON0"; - const CONS1_ENGINE_ID: ConsensusEngineId = *b"CON1"; - - pub(crate) type Block = RawBlock>; - - pub fn insert_header( - backend: &Backend, - number: u64, - parent_hash: H256, - changes: Option, Vec)>>, - extrinsics_root: H256, - ) -> H256 { - insert_block(backend, number, parent_hash, changes, extrinsics_root, Vec::new(), None) - .unwrap() - } - - pub fn insert_block( - backend: &Backend, - number: u64, - parent_hash: H256, - _changes: Option, Vec)>>, - extrinsics_root: H256, - body: Vec>, - transaction_index: Option>, - ) -> Result { - use sp_runtime::testing::Digest; - - let digest = Digest::default(); - let header = Header { - number, - parent_hash, - state_root: BlakeTwo256::trie_root(Vec::new(), StateVersion::V1), - digest, - extrinsics_root, - }; - let header_hash = header.hash(); - - let block_hash = if number == 0 { Default::default() } else { parent_hash }; - let mut op = backend.begin_operation().unwrap(); - backend.begin_state_operation(&mut op, block_hash).unwrap(); - op.set_block_data(header, Some(body), None, None, NewBlockState::Best).unwrap(); - if let Some(index) = transaction_index { - op.update_transaction_index(index).unwrap(); - } - backend.commit_operation(op)?; - - Ok(header_hash) - } - - pub fn insert_header_no_head( - backend: &Backend, - number: u64, - parent_hash: H256, - extrinsics_root: H256, - ) -> H256 { - use sp_runtime::testing::Digest; - - let digest = Digest::default(); - let header = Header { - number, - parent_hash, - state_root: BlakeTwo256::trie_root(Vec::new(), StateVersion::V1), - digest, - extrinsics_root, - }; - let header_hash = header.hash(); - let mut op = backend.begin_operation().unwrap(); - op.set_block_data(header, None, None, None, NewBlockState::Normal).unwrap(); - backend.commit_operation(op).unwrap(); - header_hash - } - - #[test] - fn block_hash_inserted_correctly() { - let backing = { - let db = Backend::::new_test(1, 0); - for i in 0..10 { - assert!(db.blockchain().hash(i).unwrap().is_none()); - - { - let hash = if i == 0 { - Default::default() - } else { - db.blockchain.hash(i - 1).unwrap().unwrap() - }; - - let mut op = db.begin_operation().unwrap(); - db.begin_state_operation(&mut op, hash).unwrap(); - let header = Header { - number: i, - parent_hash: hash, - state_root: Default::default(), - digest: Default::default(), - extrinsics_root: Default::default(), - }; - - op.set_block_data(header, Some(vec![]), None, None, NewBlockState::Best) - .unwrap(); - db.commit_operation(op).unwrap(); - } - - assert!(db.blockchain().hash(i).unwrap().is_some()) - } - db.storage.db.clone() - }; - - let backend = Backend::::new( - DatabaseSettings { - trie_cache_maximum_size: Some(16 * 1024 * 1024), - state_pruning: Some(PruningMode::blocks_pruning(1)), - source: DatabaseSource::Custom { db: backing, require_create_flag: false }, - blocks_pruning: BlocksPruning::KeepFinalized, - }, - 0, - ) - .unwrap(); - assert_eq!(backend.blockchain().info().best_number, 9); - for i in 0..10 { - assert!(backend.blockchain().hash(i).unwrap().is_some()) - } - } - - #[test] - fn set_state_data() { - set_state_data_inner(StateVersion::V0); - set_state_data_inner(StateVersion::V1); - } - fn set_state_data_inner(state_version: StateVersion) { - let db = Backend::::new_test(2, 0); - let hash = { - let mut op = db.begin_operation().unwrap(); - let mut header = Header { - number: 0, - parent_hash: Default::default(), - state_root: Default::default(), - digest: Default::default(), - extrinsics_root: Default::default(), - }; - - let storage = vec![(vec![1, 3, 5], vec![2, 4, 6]), (vec![1, 2, 3], vec![9, 9, 9])]; - - header.state_root = op - .old_state - .storage_root(storage.iter().map(|(x, y)| (&x[..], Some(&y[..]))), state_version) - .0 - .into(); - let hash = header.hash(); - - op.reset_storage( - Storage { - top: storage.into_iter().collect(), - children_default: Default::default(), - }, - state_version, - ) - .unwrap(); - op.set_block_data(header.clone(), Some(vec![]), None, None, NewBlockState::Best) - .unwrap(); - - db.commit_operation(op).unwrap(); - - let state = db.state_at(hash).unwrap(); - - assert_eq!(state.storage(&[1, 3, 5]).unwrap(), Some(vec![2, 4, 6])); - assert_eq!(state.storage(&[1, 2, 3]).unwrap(), Some(vec![9, 9, 9])); - assert_eq!(state.storage(&[5, 5, 5]).unwrap(), None); - - hash - }; - - { - let mut op = db.begin_operation().unwrap(); - db.begin_state_operation(&mut op, hash).unwrap(); - let mut header = Header { - number: 1, - parent_hash: hash, - state_root: Default::default(), - digest: Default::default(), - extrinsics_root: Default::default(), - }; - - let storage = vec![(vec![1, 3, 5], None), (vec![5, 5, 5], Some(vec![4, 5, 6]))]; - - let (root, overlay) = op.old_state.storage_root( - storage.iter().map(|(k, v)| (k.as_slice(), v.as_ref().map(|v| &v[..]))), - state_version, - ); - op.update_db_storage(overlay).unwrap(); - header.state_root = root.into(); - - op.update_storage(storage, Vec::new()).unwrap(); - op.set_block_data(header.clone(), Some(vec![]), None, None, NewBlockState::Best) - .unwrap(); - - db.commit_operation(op).unwrap(); - - let state = db.state_at(header.hash()).unwrap(); - - assert_eq!(state.storage(&[1, 3, 5]).unwrap(), None); - assert_eq!(state.storage(&[1, 2, 3]).unwrap(), Some(vec![9, 9, 9])); - assert_eq!(state.storage(&[5, 5, 5]).unwrap(), Some(vec![4, 5, 6])); - } - } - - #[test] - fn delete_only_when_negative_rc() { - sp_tracing::try_init_simple(); - let state_version = StateVersion::default(); - let key; - let backend = Backend::::new_test(1, 0); - - let hash = { - let mut op = backend.begin_operation().unwrap(); - backend.begin_state_operation(&mut op, Default::default()).unwrap(); - let mut header = Header { - number: 0, - parent_hash: Default::default(), - state_root: Default::default(), - digest: Default::default(), - extrinsics_root: Default::default(), - }; - - header.state_root = - op.old_state.storage_root(std::iter::empty(), state_version).0.into(); - let hash = header.hash(); - - op.reset_storage( - Storage { top: Default::default(), children_default: Default::default() }, - state_version, - ) - .unwrap(); - - key = op.db_updates.insert(EMPTY_PREFIX, b"hello"); - op.set_block_data(header, Some(vec![]), None, None, NewBlockState::Best) - .unwrap(); - - backend.commit_operation(op).unwrap(); - assert_eq!( - backend - .storage - .db - .get(columns::STATE, &sp_trie::prefixed_key::(&key, EMPTY_PREFIX)) - .unwrap(), - &b"hello"[..] - ); - hash - }; - - let hashof1 = { - let mut op = backend.begin_operation().unwrap(); - backend.begin_state_operation(&mut op, hash).unwrap(); - let mut header = Header { - number: 1, - parent_hash: hash, - state_root: Default::default(), - digest: Default::default(), - extrinsics_root: Default::default(), - }; - - let storage: Vec<(_, _)> = vec![]; - - header.state_root = op - .old_state - .storage_root(storage.iter().cloned().map(|(x, y)| (x, Some(y))), state_version) - .0 - .into(); - let hash = header.hash(); - - op.db_updates.insert(EMPTY_PREFIX, b"hello"); - op.db_updates.remove(&key, EMPTY_PREFIX); - op.set_block_data(header, Some(vec![]), None, None, NewBlockState::Best) - .unwrap(); - - backend.commit_operation(op).unwrap(); - assert_eq!( - backend - .storage - .db - .get(columns::STATE, &sp_trie::prefixed_key::(&key, EMPTY_PREFIX)) - .unwrap(), - &b"hello"[..] - ); - hash - }; - - let hashof2 = { - let mut op = backend.begin_operation().unwrap(); - backend.begin_state_operation(&mut op, hashof1).unwrap(); - let mut header = Header { - number: 2, - parent_hash: hashof1, - state_root: Default::default(), - digest: Default::default(), - extrinsics_root: Default::default(), - }; - - let storage: Vec<(_, _)> = vec![]; - - header.state_root = op - .old_state - .storage_root(storage.iter().cloned().map(|(x, y)| (x, Some(y))), state_version) - .0 - .into(); - let hash = header.hash(); - - op.db_updates.remove(&key, EMPTY_PREFIX); - op.set_block_data(header, Some(vec![]), None, None, NewBlockState::Best) - .unwrap(); - - backend.commit_operation(op).unwrap(); - - assert!(backend - .storage - .db - .get(columns::STATE, &sp_trie::prefixed_key::(&key, EMPTY_PREFIX)) - .is_some()); - hash - }; - - let hashof3 = { - let mut op = backend.begin_operation().unwrap(); - backend.begin_state_operation(&mut op, hashof2).unwrap(); - let mut header = Header { - number: 3, - parent_hash: hashof2, - state_root: Default::default(), - digest: Default::default(), - extrinsics_root: Default::default(), - }; - - let storage: Vec<(_, _)> = vec![]; - - header.state_root = op - .old_state - .storage_root(storage.iter().cloned().map(|(x, y)| (x, Some(y))), state_version) - .0 - .into(); - let hash = header.hash(); - - op.set_block_data(header, Some(vec![]), None, None, NewBlockState::Best) - .unwrap(); - - backend.commit_operation(op).unwrap(); - assert!(backend - .storage - .db - .get(columns::STATE, &sp_trie::prefixed_key::(&key, EMPTY_PREFIX)) - .is_none()); - hash - }; - - backend.finalize_block(hashof1, None).unwrap(); - backend.finalize_block(hashof2, None).unwrap(); - backend.finalize_block(hashof3, None).unwrap(); - assert!(backend - .storage - .db - .get(columns::STATE, &sp_trie::prefixed_key::(&key, EMPTY_PREFIX)) - .is_none()); - } - - #[test] - fn tree_route_works() { - let backend = Backend::::new_test(1000, 100); - let blockchain = backend.blockchain(); - let block0 = insert_header(&backend, 0, Default::default(), None, Default::default()); - - // fork from genesis: 3 prong. - let a1 = insert_header(&backend, 1, block0, None, Default::default()); - let a2 = insert_header(&backend, 2, a1, None, Default::default()); - let a3 = insert_header(&backend, 3, a2, None, Default::default()); - - // fork from genesis: 2 prong. - let b1 = insert_header(&backend, 1, block0, None, H256::from([1; 32])); - let b2 = insert_header(&backend, 2, b1, None, Default::default()); - - { - let tree_route = tree_route(blockchain, a3, b2).unwrap(); - - assert_eq!(tree_route.common_block().hash, block0); - assert_eq!( - tree_route.retracted().iter().map(|r| r.hash).collect::>(), - vec![a3, a2, a1] - ); - assert_eq!( - tree_route.enacted().iter().map(|r| r.hash).collect::>(), - vec![b1, b2] - ); - } - - { - let tree_route = tree_route(blockchain, a1, a3).unwrap(); - - assert_eq!(tree_route.common_block().hash, a1); - assert!(tree_route.retracted().is_empty()); - assert_eq!( - tree_route.enacted().iter().map(|r| r.hash).collect::>(), - vec![a2, a3] - ); - } - - { - let tree_route = tree_route(blockchain, a3, a1).unwrap(); - - assert_eq!(tree_route.common_block().hash, a1); - assert_eq!( - tree_route.retracted().iter().map(|r| r.hash).collect::>(), - vec![a3, a2] - ); - assert!(tree_route.enacted().is_empty()); - } - - { - let tree_route = tree_route(blockchain, a2, a2).unwrap(); - - assert_eq!(tree_route.common_block().hash, a2); - assert!(tree_route.retracted().is_empty()); - assert!(tree_route.enacted().is_empty()); - } - } - - #[test] - fn tree_route_child() { - let backend = Backend::::new_test(1000, 100); - let blockchain = backend.blockchain(); - - let block0 = insert_header(&backend, 0, Default::default(), None, Default::default()); - let block1 = insert_header(&backend, 1, block0, None, Default::default()); - - { - let tree_route = tree_route(blockchain, block0, block1).unwrap(); - - assert_eq!(tree_route.common_block().hash, block0); - assert!(tree_route.retracted().is_empty()); - assert_eq!( - tree_route.enacted().iter().map(|r| r.hash).collect::>(), - vec![block1] - ); - } - } - - #[test] - fn lowest_common_ancestor_works() { - let backend = Backend::::new_test(1000, 100); - let blockchain = backend.blockchain(); - let block0 = insert_header(&backend, 0, Default::default(), None, Default::default()); - - // fork from genesis: 3 prong. - let a1 = insert_header(&backend, 1, block0, None, Default::default()); - let a2 = insert_header(&backend, 2, a1, None, Default::default()); - let a3 = insert_header(&backend, 3, a2, None, Default::default()); - - // fork from genesis: 2 prong. - let b1 = insert_header(&backend, 1, block0, None, H256::from([1; 32])); - let b2 = insert_header(&backend, 2, b1, None, Default::default()); - - { - let lca = lowest_common_ancestor(blockchain, a3, b2).unwrap(); - - assert_eq!(lca.hash, block0); - assert_eq!(lca.number, 0); - } - - { - let lca = lowest_common_ancestor(blockchain, a1, a3).unwrap(); - - assert_eq!(lca.hash, a1); - assert_eq!(lca.number, 1); - } - - { - let lca = lowest_common_ancestor(blockchain, a3, a1).unwrap(); - - assert_eq!(lca.hash, a1); - assert_eq!(lca.number, 1); - } - - { - let lca = lowest_common_ancestor(blockchain, a2, a3).unwrap(); - - assert_eq!(lca.hash, a2); - assert_eq!(lca.number, 2); - } - - { - let lca = lowest_common_ancestor(blockchain, a2, a1).unwrap(); - - assert_eq!(lca.hash, a1); - assert_eq!(lca.number, 1); - } - - { - let lca = lowest_common_ancestor(blockchain, a2, a2).unwrap(); - - assert_eq!(lca.hash, a2); - assert_eq!(lca.number, 2); - } - } - - #[test] - fn test_tree_route_regression() { - // NOTE: this is a test for a regression introduced in #3665, the result - // of tree_route would be erroneously computed, since it was taking into - // account the `ancestor` in `CachedHeaderMetadata` for the comparison. - // in this test we simulate the same behavior with the side-effect - // triggering the issue being eviction of a previously fetched record - // from the cache, therefore this test is dependent on the LRU cache - // size for header metadata, which is currently set to 5000 elements. - let backend = Backend::::new_test(10000, 10000); - let blockchain = backend.blockchain(); - - let genesis = insert_header(&backend, 0, Default::default(), None, Default::default()); - - let block100 = (1..=100).fold(genesis, |parent, n| { - insert_header(&backend, n, parent, None, Default::default()) - }); - - let block7000 = (101..=7000).fold(block100, |parent, n| { - insert_header(&backend, n, parent, None, Default::default()) - }); - - // This will cause the ancestor of `block100` to be set to `genesis` as a side-effect. - lowest_common_ancestor(blockchain, genesis, block100).unwrap(); - - // While traversing the tree we will have to do 6900 calls to - // `header_metadata`, which will make sure we will exhaust our cache - // which only takes 5000 elements. In particular, the `CachedHeaderMetadata` struct for - // block #100 will be evicted and will get a new value (with ancestor set to its parent). - let tree_route = tree_route(blockchain, block100, block7000).unwrap(); - - assert!(tree_route.retracted().is_empty()); - } - - #[test] - fn test_leaves_with_complex_block_tree() { - let backend: Arc> = - Arc::new(Backend::new_test(20, 20)); - substrate_test_runtime_client::trait_tests::test_leaves_for_backend(backend); - } - - #[test] - fn test_children_with_complex_block_tree() { - let backend: Arc> = - Arc::new(Backend::new_test(20, 20)); - substrate_test_runtime_client::trait_tests::test_children_for_backend(backend); - } - - #[test] - fn test_blockchain_query_by_number_gets_canonical() { - let backend: Arc> = - Arc::new(Backend::new_test(20, 20)); - substrate_test_runtime_client::trait_tests::test_blockchain_query_by_number_gets_canonical( - backend, - ); - } - - #[test] - fn test_leaves_pruned_on_finality() { - let backend: Backend = Backend::new_test(10, 10); - let block0 = insert_header(&backend, 0, Default::default(), None, Default::default()); - - let block1_a = insert_header(&backend, 1, block0, None, Default::default()); - let block1_b = insert_header(&backend, 1, block0, None, [1; 32].into()); - let block1_c = insert_header(&backend, 1, block0, None, [2; 32].into()); - - assert_eq!(backend.blockchain().leaves().unwrap(), vec![block1_a, block1_b, block1_c]); - - let block2_a = insert_header(&backend, 2, block1_a, None, Default::default()); - let block2_b = insert_header(&backend, 2, block1_b, None, Default::default()); - let block2_c = insert_header(&backend, 2, block1_b, None, [1; 32].into()); - - assert_eq!( - backend.blockchain().leaves().unwrap(), - vec![block2_a, block2_b, block2_c, block1_c] - ); - - backend.finalize_block(block1_a, None).unwrap(); - backend.finalize_block(block2_a, None).unwrap(); - - // leaves at same height stay. Leaves at lower heights pruned. - assert_eq!(backend.blockchain().leaves().unwrap(), vec![block2_a, block2_b, block2_c]); - } - - #[test] - fn test_aux() { - let backend: Backend = - Backend::new_test(0, 0); - assert!(backend.get_aux(b"test").unwrap().is_none()); - backend.insert_aux(&[(&b"test"[..], &b"hello"[..])], &[]).unwrap(); - assert_eq!(b"hello", &backend.get_aux(b"test").unwrap().unwrap()[..]); - backend.insert_aux(&[], &[&b"test"[..]]).unwrap(); - assert!(backend.get_aux(b"test").unwrap().is_none()); - } - - #[test] - fn test_finalize_block_with_justification() { - use sc_client_api::blockchain::Backend as BlockChainBackend; - - let backend = Backend::::new_test(10, 10); - - let block0 = insert_header(&backend, 0, Default::default(), None, Default::default()); - let block1 = insert_header(&backend, 1, block0, None, Default::default()); - - let justification = Some((CONS0_ENGINE_ID, vec![1, 2, 3])); - backend.finalize_block(block1, justification.clone()).unwrap(); - - assert_eq!( - backend.blockchain().justifications(block1).unwrap(), - justification.map(Justifications::from), - ); - } - - #[test] - fn test_append_justification_to_finalized_block() { - use sc_client_api::blockchain::Backend as BlockChainBackend; - - let backend = Backend::::new_test(10, 10); - - let block0 = insert_header(&backend, 0, Default::default(), None, Default::default()); - let block1 = insert_header(&backend, 1, block0, None, Default::default()); - - let just0 = (CONS0_ENGINE_ID, vec![1, 2, 3]); - backend.finalize_block(block1, Some(just0.clone().into())).unwrap(); - - let just1 = (CONS1_ENGINE_ID, vec![4, 5]); - backend.append_justification(block1, just1.clone()).unwrap(); - - let just2 = (CONS1_ENGINE_ID, vec![6, 7]); - assert!(matches!( - backend.append_justification(block1, just2), - Err(ClientError::BadJustification(_)) - )); - - let justifications = { - let mut just = Justifications::from(just0); - just.append(just1); - just - }; - assert_eq!(backend.blockchain().justifications(block1).unwrap(), Some(justifications),); - } - - #[test] - fn test_finalize_multiple_blocks_in_single_op() { - let backend = Backend::::new_test(10, 10); - - let block0 = insert_header(&backend, 0, Default::default(), None, Default::default()); - let block1 = insert_header(&backend, 1, block0, None, Default::default()); - let block2 = insert_header(&backend, 2, block1, None, Default::default()); - let block3 = insert_header(&backend, 3, block2, None, Default::default()); - let block4 = insert_header(&backend, 4, block3, None, Default::default()); - { - let mut op = backend.begin_operation().unwrap(); - backend.begin_state_operation(&mut op, block0).unwrap(); - op.mark_finalized(block1, None).unwrap(); - op.mark_finalized(block2, None).unwrap(); - backend.commit_operation(op).unwrap(); - } - { - let mut op = backend.begin_operation().unwrap(); - backend.begin_state_operation(&mut op, block2).unwrap(); - op.mark_finalized(block3, None).unwrap(); - op.mark_finalized(block4, None).unwrap(); - backend.commit_operation(op).unwrap(); - } - } - - #[test] - fn storage_hash_is_cached_correctly() { - let state_version = StateVersion::default(); - let backend = Backend::::new_test(10, 10); - - let hash0 = { - let mut op = backend.begin_operation().unwrap(); - backend.begin_state_operation(&mut op, Default::default()).unwrap(); - let mut header = Header { - number: 0, - parent_hash: Default::default(), - state_root: Default::default(), - digest: Default::default(), - extrinsics_root: Default::default(), - }; - - let storage = vec![(b"test".to_vec(), b"test".to_vec())]; - - header.state_root = op - .old_state - .storage_root(storage.iter().map(|(x, y)| (&x[..], Some(&y[..]))), state_version) - .0 - .into(); - let hash = header.hash(); - - op.reset_storage( - Storage { - top: storage.into_iter().collect(), - children_default: Default::default(), - }, - state_version, - ) - .unwrap(); - op.set_block_data(header.clone(), Some(vec![]), None, None, NewBlockState::Best) - .unwrap(); - - backend.commit_operation(op).unwrap(); - - hash - }; - - let block0_hash = backend.state_at(hash0).unwrap().storage_hash(&b"test"[..]).unwrap(); - - let hash1 = { - let mut op = backend.begin_operation().unwrap(); - backend.begin_state_operation(&mut op, hash0).unwrap(); - let mut header = Header { - number: 1, - parent_hash: hash0, - state_root: Default::default(), - digest: Default::default(), - extrinsics_root: Default::default(), - }; - - let storage = vec![(b"test".to_vec(), Some(b"test2".to_vec()))]; - - let (root, overlay) = op.old_state.storage_root( - storage.iter().map(|(k, v)| (k.as_slice(), v.as_ref().map(|v| &v[..]))), - state_version, - ); - op.update_db_storage(overlay).unwrap(); - header.state_root = root.into(); - let hash = header.hash(); - - op.update_storage(storage, Vec::new()).unwrap(); - op.set_block_data(header, Some(vec![]), None, None, NewBlockState::Normal) - .unwrap(); - - backend.commit_operation(op).unwrap(); - - hash - }; - - { - let header = backend.blockchain().header(BlockId::Hash(hash1)).unwrap().unwrap(); - let mut op = backend.begin_operation().unwrap(); - op.set_block_data(header, None, None, None, NewBlockState::Best).unwrap(); - backend.commit_operation(op).unwrap(); - } - - let block1_hash = backend.state_at(hash1).unwrap().storage_hash(&b"test"[..]).unwrap(); - - assert_ne!(block0_hash, block1_hash); - } - - #[test] - fn test_finalize_non_sequential() { - let backend = Backend::::new_test(10, 10); - - let block0 = insert_header(&backend, 0, Default::default(), None, Default::default()); - let block1 = insert_header(&backend, 1, block0, None, Default::default()); - let block2 = insert_header(&backend, 2, block1, None, Default::default()); - { - let mut op = backend.begin_operation().unwrap(); - backend.begin_state_operation(&mut op, block0).unwrap(); - op.mark_finalized(block2, None).unwrap(); - backend.commit_operation(op).unwrap_err(); - } - } - - #[test] - fn prune_blocks_on_finalize() { - let backend = Backend::::new_test_with_tx_storage(BlocksPruning::Some(2), 0); - let mut blocks = Vec::new(); - let mut prev_hash = Default::default(); - for i in 0..5 { - let hash = insert_block( - &backend, - i, - prev_hash, - None, - Default::default(), - vec![i.into()], - None, - ) - .unwrap(); - blocks.push(hash); - prev_hash = hash; - } - - { - let mut op = backend.begin_operation().unwrap(); - backend.begin_state_operation(&mut op, blocks[4]).unwrap(); - for i in 1..5 { - op.mark_finalized(blocks[i], None).unwrap(); - } - backend.commit_operation(op).unwrap(); - } - let bc = backend.blockchain(); - assert_eq!(None, bc.body(blocks[0]).unwrap()); - assert_eq!(None, bc.body(blocks[1]).unwrap()); - assert_eq!(None, bc.body(blocks[2]).unwrap()); - assert_eq!(Some(vec![3.into()]), bc.body(blocks[3]).unwrap()); - assert_eq!(Some(vec![4.into()]), bc.body(blocks[4]).unwrap()); - } - - #[test] - fn prune_blocks_on_finalize_in_keep_all() { - let backend = Backend::::new_test_with_tx_storage(BlocksPruning::KeepAll, 0); - let mut blocks = Vec::new(); - let mut prev_hash = Default::default(); - for i in 0..5 { - let hash = insert_block( - &backend, - i, - prev_hash, - None, - Default::default(), - vec![i.into()], - None, - ) - .unwrap(); - blocks.push(hash); - prev_hash = hash; - } - - let mut op = backend.begin_operation().unwrap(); - backend.begin_state_operation(&mut op, blocks[4]).unwrap(); - for i in 1..3 { - op.mark_finalized(blocks[i], None).unwrap(); - } - backend.commit_operation(op).unwrap(); - - let bc = backend.blockchain(); - assert_eq!(Some(vec![0.into()]), bc.body(blocks[0]).unwrap()); - assert_eq!(Some(vec![1.into()]), bc.body(blocks[1]).unwrap()); - assert_eq!(Some(vec![2.into()]), bc.body(blocks[2]).unwrap()); - assert_eq!(Some(vec![3.into()]), bc.body(blocks[3]).unwrap()); - assert_eq!(Some(vec![4.into()]), bc.body(blocks[4]).unwrap()); - } - - #[test] - fn prune_blocks_on_finalize_with_fork_in_keep_all() { - let backend = Backend::::new_test_with_tx_storage(BlocksPruning::KeepAll, 10); - let mut blocks = Vec::new(); - let mut prev_hash = Default::default(); - for i in 0..5 { - let hash = insert_block( - &backend, - i, - prev_hash, - None, - Default::default(), - vec![i.into()], - None, - ) - .unwrap(); - blocks.push(hash); - prev_hash = hash; - } - - // insert a fork at block 2 - let fork_hash_root = insert_block( - &backend, - 2, - blocks[1], - None, - sp_core::H256::random(), - vec![2.into()], - None, - ) - .unwrap(); - insert_block( - &backend, - 3, - fork_hash_root, - None, - H256::random(), - vec![3.into(), 11.into()], - None, - ) - .unwrap(); - - let mut op = backend.begin_operation().unwrap(); - backend.begin_state_operation(&mut op, blocks[4]).unwrap(); - op.mark_head(blocks[4]).unwrap(); - backend.commit_operation(op).unwrap(); - - let bc = backend.blockchain(); - assert_eq!(Some(vec![2.into()]), bc.body(fork_hash_root).unwrap()); - - for i in 1..5 { - let mut op = backend.begin_operation().unwrap(); - backend.begin_state_operation(&mut op, blocks[i]).unwrap(); - op.mark_finalized(blocks[i], None).unwrap(); - backend.commit_operation(op).unwrap(); - } - - assert_eq!(Some(vec![0.into()]), bc.body(blocks[0]).unwrap()); - assert_eq!(Some(vec![1.into()]), bc.body(blocks[1]).unwrap()); - assert_eq!(Some(vec![2.into()]), bc.body(blocks[2]).unwrap()); - assert_eq!(Some(vec![3.into()]), bc.body(blocks[3]).unwrap()); - assert_eq!(Some(vec![4.into()]), bc.body(blocks[4]).unwrap()); - - assert_eq!(Some(vec![2.into()]), bc.body(fork_hash_root).unwrap()); - assert_eq!(bc.info().best_number, 4); - for i in 0..5 { - assert!(bc.hash(i).unwrap().is_some()); - } - } - - #[test] - fn prune_blocks_on_finalize_with_fork() { - let backend = Backend::::new_test_with_tx_storage(BlocksPruning::Some(2), 10); - let mut blocks = Vec::new(); - let mut prev_hash = Default::default(); - for i in 0..5 { - let hash = insert_block( - &backend, - i, - prev_hash, - None, - Default::default(), - vec![i.into()], - None, - ) - .unwrap(); - blocks.push(hash); - prev_hash = hash; - } - - // insert a fork at block 2 - let fork_hash_root = insert_block( - &backend, - 2, - blocks[1], - None, - sp_core::H256::random(), - vec![2.into()], - None, - ) - .unwrap(); - insert_block( - &backend, - 3, - fork_hash_root, - None, - H256::random(), - vec![3.into(), 11.into()], - None, - ) - .unwrap(); - let mut op = backend.begin_operation().unwrap(); - backend.begin_state_operation(&mut op, blocks[4]).unwrap(); - op.mark_head(blocks[4]).unwrap(); - backend.commit_operation(op).unwrap(); - - for i in 1..5 { - let mut op = backend.begin_operation().unwrap(); - backend.begin_state_operation(&mut op, blocks[4]).unwrap(); - op.mark_finalized(blocks[i], None).unwrap(); - backend.commit_operation(op).unwrap(); - } - - let bc = backend.blockchain(); - assert_eq!(None, bc.body(blocks[0]).unwrap()); - assert_eq!(None, bc.body(blocks[1]).unwrap()); - assert_eq!(None, bc.body(blocks[2]).unwrap()); - assert_eq!(Some(vec![3.into()]), bc.body(blocks[3]).unwrap()); - assert_eq!(Some(vec![4.into()]), bc.body(blocks[4]).unwrap()); - } - - #[test] - fn indexed_data_block_body() { - let backend = Backend::::new_test_with_tx_storage(BlocksPruning::Some(1), 10); - - let x0 = ExtrinsicWrapper::from(0u64).encode(); - let x1 = ExtrinsicWrapper::from(1u64).encode(); - let x0_hash = as sp_core::Hasher>::hash(&x0[1..]); - let x1_hash = as sp_core::Hasher>::hash(&x1[1..]); - let index = vec![ - IndexOperation::Insert { - extrinsic: 0, - hash: x0_hash.as_ref().to_vec(), - size: (x0.len() - 1) as u32, - }, - IndexOperation::Insert { - extrinsic: 1, - hash: x1_hash.as_ref().to_vec(), - size: (x1.len() - 1) as u32, - }, - ]; - let hash = insert_block( - &backend, - 0, - Default::default(), - None, - Default::default(), - vec![0u64.into(), 1u64.into()], - Some(index), - ) - .unwrap(); - let bc = backend.blockchain(); - assert_eq!(bc.indexed_transaction(x0_hash).unwrap().unwrap(), &x0[1..]); - assert_eq!(bc.indexed_transaction(x1_hash).unwrap().unwrap(), &x1[1..]); - - let hashof0 = bc.info().genesis_hash; - // Push one more blocks and make sure block is pruned and transaction index is cleared. - let block1 = - insert_block(&backend, 1, hash, None, Default::default(), vec![], None).unwrap(); - backend.finalize_block(block1, None).unwrap(); - assert_eq!(bc.body(hashof0).unwrap(), None); - assert_eq!(bc.indexed_transaction(x0_hash).unwrap(), None); - assert_eq!(bc.indexed_transaction(x1_hash).unwrap(), None); - } - - #[test] - fn index_invalid_size() { - let backend = Backend::::new_test_with_tx_storage(BlocksPruning::Some(1), 10); - - let x0 = ExtrinsicWrapper::from(0u64).encode(); - let x1 = ExtrinsicWrapper::from(1u64).encode(); - let x0_hash = as sp_core::Hasher>::hash(&x0[..]); - let x1_hash = as sp_core::Hasher>::hash(&x1[..]); - let index = vec![ - IndexOperation::Insert { - extrinsic: 0, - hash: x0_hash.as_ref().to_vec(), - size: (x0.len()) as u32, - }, - IndexOperation::Insert { - extrinsic: 1, - hash: x1_hash.as_ref().to_vec(), - size: (x1.len() + 1) as u32, - }, - ]; - insert_block( - &backend, - 0, - Default::default(), - None, - Default::default(), - vec![0u64.into(), 1u64.into()], - Some(index), - ) - .unwrap(); - let bc = backend.blockchain(); - assert_eq!(bc.indexed_transaction(x0_hash).unwrap().unwrap(), &x0[..]); - assert_eq!(bc.indexed_transaction(x1_hash).unwrap(), None); - } - - #[test] - fn renew_transaction_storage() { - let backend = Backend::::new_test_with_tx_storage(BlocksPruning::Some(2), 10); - let mut blocks = Vec::new(); - let mut prev_hash = Default::default(); - let x1 = ExtrinsicWrapper::from(0u64).encode(); - let x1_hash = as sp_core::Hasher>::hash(&x1[1..]); - for i in 0..10 { - let mut index = Vec::new(); - if i == 0 { - index.push(IndexOperation::Insert { - extrinsic: 0, - hash: x1_hash.as_ref().to_vec(), - size: (x1.len() - 1) as u32, - }); - } else if i < 5 { - // keep renewing 1st - index.push(IndexOperation::Renew { extrinsic: 0, hash: x1_hash.as_ref().to_vec() }); - } // else stop renewing - let hash = insert_block( - &backend, - i, - prev_hash, - None, - Default::default(), - vec![i.into()], - Some(index), - ) - .unwrap(); - blocks.push(hash); - prev_hash = hash; - } - - for i in 1..10 { - let mut op = backend.begin_operation().unwrap(); - backend.begin_state_operation(&mut op, blocks[4]).unwrap(); - op.mark_finalized(blocks[i], None).unwrap(); - backend.commit_operation(op).unwrap(); - let bc = backend.blockchain(); - if i < 6 { - assert!(bc.indexed_transaction(x1_hash).unwrap().is_some()); - } else { - assert!(bc.indexed_transaction(x1_hash).unwrap().is_none()); - } - } - } - - #[test] - fn remove_leaf_block_works() { - let backend = Backend::::new_test_with_tx_storage(BlocksPruning::Some(2), 10); - let mut blocks = Vec::new(); - let mut prev_hash = Default::default(); - for i in 0..2 { - let hash = insert_block( - &backend, - i, - prev_hash, - None, - Default::default(), - vec![i.into()], - None, - ) - .unwrap(); - blocks.push(hash); - prev_hash = hash; - } - - for i in 0..2 { - let hash = insert_block( - &backend, - 2, - blocks[1], - None, - sp_core::H256::random(), - vec![i.into()], - None, - ) - .unwrap(); - blocks.push(hash); - } - - // insert a fork at block 1, which becomes best block - let best_hash = insert_block( - &backend, - 1, - blocks[0], - None, - sp_core::H256::random(), - vec![42.into()], - None, - ) - .unwrap(); - - assert_eq!(backend.blockchain().info().best_hash, best_hash); - assert!(backend.remove_leaf_block(best_hash).is_err()); - - assert_eq!(backend.blockchain().leaves().unwrap(), vec![blocks[2], blocks[3], best_hash]); - assert_eq!(backend.blockchain().children(blocks[1]).unwrap(), vec![blocks[2], blocks[3]]); - - assert!(backend.have_state_at(blocks[3], 2)); - assert!(backend.blockchain().header(BlockId::hash(blocks[3])).unwrap().is_some()); - backend.remove_leaf_block(blocks[3]).unwrap(); - assert!(!backend.have_state_at(blocks[3], 2)); - assert!(backend.blockchain().header(BlockId::hash(blocks[3])).unwrap().is_none()); - assert_eq!(backend.blockchain().leaves().unwrap(), vec![blocks[2], best_hash]); - assert_eq!(backend.blockchain().children(blocks[1]).unwrap(), vec![blocks[2]]); - - assert!(backend.have_state_at(blocks[2], 2)); - assert!(backend.blockchain().header(BlockId::hash(blocks[2])).unwrap().is_some()); - backend.remove_leaf_block(blocks[2]).unwrap(); - assert!(!backend.have_state_at(blocks[2], 2)); - assert!(backend.blockchain().header(BlockId::hash(blocks[2])).unwrap().is_none()); - assert_eq!(backend.blockchain().leaves().unwrap(), vec![best_hash, blocks[1]]); - assert_eq!(backend.blockchain().children(blocks[1]).unwrap(), vec![]); - - assert!(backend.have_state_at(blocks[1], 1)); - assert!(backend.blockchain().header(BlockId::hash(blocks[1])).unwrap().is_some()); - backend.remove_leaf_block(blocks[1]).unwrap(); - assert!(!backend.have_state_at(blocks[1], 1)); - assert!(backend.blockchain().header(BlockId::hash(blocks[1])).unwrap().is_none()); - assert_eq!(backend.blockchain().leaves().unwrap(), vec![best_hash]); - assert_eq!(backend.blockchain().children(blocks[0]).unwrap(), vec![best_hash]); - } - - #[test] - fn test_import_existing_block_as_new_head() { - let backend: Backend = Backend::new_test(10, 3); - let block0 = insert_header(&backend, 0, Default::default(), None, Default::default()); - let block1 = insert_header(&backend, 1, block0, None, Default::default()); - let block2 = insert_header(&backend, 2, block1, None, Default::default()); - let block3 = insert_header(&backend, 3, block2, None, Default::default()); - let block4 = insert_header(&backend, 4, block3, None, Default::default()); - let block5 = insert_header(&backend, 5, block4, None, Default::default()); - assert_eq!(backend.blockchain().info().best_hash, block5); - - // Insert 1 as best again. This should fail because canonicalization_delay == 3 and best == - // 5 - let header = Header { - number: 1, - parent_hash: block0, - state_root: BlakeTwo256::trie_root(Vec::new(), StateVersion::V1), - digest: Default::default(), - extrinsics_root: Default::default(), - }; - let mut op = backend.begin_operation().unwrap(); - op.set_block_data(header, None, None, None, NewBlockState::Best).unwrap(); - assert!(matches!(backend.commit_operation(op), Err(sp_blockchain::Error::SetHeadTooOld))); - - // Insert 2 as best again. - let header = Header { - number: 2, - parent_hash: block1, - state_root: BlakeTwo256::trie_root(Vec::new(), StateVersion::V1), - digest: Default::default(), - extrinsics_root: Default::default(), - }; - let mut op = backend.begin_operation().unwrap(); - op.set_block_data(header, None, None, None, NewBlockState::Best).unwrap(); - backend.commit_operation(op).unwrap(); - assert_eq!(backend.blockchain().info().best_hash, block2); - } - - #[test] - fn test_import_existing_block_as_final() { - let backend: Backend = Backend::new_test(10, 10); - let block0 = insert_header(&backend, 0, Default::default(), None, Default::default()); - let block1 = insert_header(&backend, 1, block0, None, Default::default()); - let _block2 = insert_header(&backend, 2, block1, None, Default::default()); - // Genesis is auto finalized, the rest are not. - assert_eq!(backend.blockchain().info().finalized_hash, block0); - - // Insert 1 as final again. - let header = Header { - number: 1, - parent_hash: block0, - state_root: BlakeTwo256::trie_root(Vec::new(), StateVersion::V1), - digest: Default::default(), - extrinsics_root: Default::default(), - }; - - let mut op = backend.begin_operation().unwrap(); - op.set_block_data(header, None, None, None, NewBlockState::Final).unwrap(); - backend.commit_operation(op).unwrap(); - - assert_eq!(backend.blockchain().info().finalized_hash, block1); - } - - #[test] - fn test_import_existing_state_fails() { - let backend: Backend = Backend::new_test(10, 10); - let genesis = - insert_block(&backend, 0, Default::default(), None, Default::default(), vec![], None) - .unwrap(); - - insert_block(&backend, 1, genesis, None, Default::default(), vec![], None).unwrap(); - let err = insert_block(&backend, 1, genesis, None, Default::default(), vec![], None) - .err() - .unwrap(); - match err { - sp_blockchain::Error::StateDatabase(m) if m == "Block already exists" => (), - e @ _ => panic!("Unexpected error {:?}", e), - } - } - - #[test] - fn test_leaves_not_created_for_ancient_blocks() { - let backend: Backend = Backend::new_test(10, 10); - let block0 = insert_header(&backend, 0, Default::default(), None, Default::default()); - - let block1_a = insert_header(&backend, 1, block0, None, Default::default()); - let block2_a = insert_header(&backend, 2, block1_a, None, Default::default()); - backend.finalize_block(block1_a, None).unwrap(); - assert_eq!(backend.blockchain().leaves().unwrap(), vec![block2_a]); - - // Insert a fork prior to finalization point. Leave should not be created. - insert_header_no_head(&backend, 1, block0, [1; 32].into()); - assert_eq!(backend.blockchain().leaves().unwrap(), vec![block2_a]); - } - - #[test] - fn revert_non_best_blocks() { - let backend = Backend::::new_test(10, 10); - - let genesis = - insert_block(&backend, 0, Default::default(), None, Default::default(), vec![], None) - .unwrap(); - - let block1 = - insert_block(&backend, 1, genesis, None, Default::default(), vec![], None).unwrap(); - - let block2 = - insert_block(&backend, 2, block1, None, Default::default(), vec![], None).unwrap(); - - let block3 = { - let mut op = backend.begin_operation().unwrap(); - backend.begin_state_operation(&mut op, block1).unwrap(); - let header = Header { - number: 3, - parent_hash: block2, - state_root: BlakeTwo256::trie_root(Vec::new(), StateVersion::V1), - digest: Default::default(), - extrinsics_root: Default::default(), - }; - - op.set_block_data(header.clone(), Some(Vec::new()), None, None, NewBlockState::Normal) - .unwrap(); - - backend.commit_operation(op).unwrap(); - - header.hash() - }; - - let block4 = { - let mut op = backend.begin_operation().unwrap(); - backend.begin_state_operation(&mut op, block2).unwrap(); - let header = Header { - number: 4, - parent_hash: block3, - state_root: BlakeTwo256::trie_root(Vec::new(), StateVersion::V1), - digest: Default::default(), - extrinsics_root: Default::default(), - }; - - op.set_block_data(header.clone(), Some(Vec::new()), None, None, NewBlockState::Normal) - .unwrap(); - - backend.commit_operation(op).unwrap(); - - header.hash() - }; - - let block3_fork = { - let mut op = backend.begin_operation().unwrap(); - backend.begin_state_operation(&mut op, block2).unwrap(); - let header = Header { - number: 3, - parent_hash: block2, - state_root: BlakeTwo256::trie_root(Vec::new(), StateVersion::V1), - digest: Default::default(), - extrinsics_root: H256::from_low_u64_le(42), - }; - - op.set_block_data(header.clone(), Some(Vec::new()), None, None, NewBlockState::Normal) - .unwrap(); - - backend.commit_operation(op).unwrap(); - - header.hash() - }; - - assert!(backend.have_state_at(block1, 1)); - assert!(backend.have_state_at(block2, 2)); - assert!(backend.have_state_at(block3, 3)); - assert!(backend.have_state_at(block4, 4)); - assert!(backend.have_state_at(block3_fork, 3)); - - assert_eq!(backend.blockchain.leaves().unwrap(), vec![block4, block3_fork]); - assert_eq!(4, backend.blockchain.leaves.read().highest_leaf().unwrap().0); - - assert_eq!(3, backend.revert(1, false).unwrap().0); - - assert!(backend.have_state_at(block1, 1)); - assert!(!backend.have_state_at(block2, 2)); - assert!(!backend.have_state_at(block3, 3)); - assert!(!backend.have_state_at(block4, 4)); - assert!(!backend.have_state_at(block3_fork, 3)); - - assert_eq!(backend.blockchain.leaves().unwrap(), vec![block1]); - assert_eq!(1, backend.blockchain.leaves.read().highest_leaf().unwrap().0); - } - - #[test] - fn test_no_duplicated_leaves_allowed() { - let backend: Backend = Backend::new_test(10, 10); - let block0 = insert_header(&backend, 0, Default::default(), None, Default::default()); - let block1 = insert_header(&backend, 1, block0, None, Default::default()); - // Add block 2 not as the best block - let block2 = insert_header_no_head(&backend, 2, block1, Default::default()); - assert_eq!(backend.blockchain().leaves().unwrap(), vec![block2]); - assert_eq!(backend.blockchain().info().best_hash, block1); - - // Add block 2 as the best block - let block2 = insert_header(&backend, 2, block1, None, Default::default()); - assert_eq!(backend.blockchain().leaves().unwrap(), vec![block2]); - assert_eq!(backend.blockchain().info().best_hash, block2); - } -} diff --git a/substrate/client/db/src/offchain.rs b/substrate/client/db/src/offchain.rs deleted file mode 100644 index 030a4109..00000000 --- a/substrate/client/db/src/offchain.rs +++ /dev/null @@ -1,150 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2017-2022 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 - -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -//! RocksDB-based offchain workers local storage. - -use std::{collections::HashMap, sync::Arc}; - -use crate::{columns, Database, DbHash, Transaction}; -use log::error; -use parking_lot::Mutex; - -/// Offchain local storage -#[derive(Clone)] -pub struct LocalStorage { - db: Arc>, - locks: Arc, Arc>>>>, -} - -impl std::fmt::Debug for LocalStorage { - fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result { - fmt.debug_struct("LocalStorage").finish() - } -} - -impl LocalStorage { - /// Create new offchain storage for tests (backed by memorydb) - #[cfg(any(feature = "test-helpers", test))] - pub fn new_test() -> Self { - let db = kvdb_memorydb::create(crate::utils::NUM_COLUMNS); - let db = sp_database::as_database(db); - Self::new(db as _) - } - - /// Create offchain local storage with given `KeyValueDB` backend. - pub fn new(db: Arc>) -> Self { - Self { db, locks: Default::default() } - } -} - -impl sp_core::offchain::OffchainStorage for LocalStorage { - fn set(&mut self, prefix: &[u8], key: &[u8], value: &[u8]) { - let mut tx = Transaction::new(); - tx.set(columns::OFFCHAIN, &concatenate_prefix_and_key(prefix, key), value); - - if let Err(err) = self.db.commit(tx) { - error!("Error setting on local storage: {}", err) - } - } - - fn remove(&mut self, prefix: &[u8], key: &[u8]) { - let mut tx = Transaction::new(); - tx.remove(columns::OFFCHAIN, &concatenate_prefix_and_key(prefix, key)); - - if let Err(err) = self.db.commit(tx) { - error!("Error removing on local storage: {}", err) - } - } - - fn get(&self, prefix: &[u8], key: &[u8]) -> Option> { - self.db.get(columns::OFFCHAIN, &concatenate_prefix_and_key(prefix, key)) - } - - fn compare_and_set( - &mut self, - prefix: &[u8], - item_key: &[u8], - old_value: Option<&[u8]>, - new_value: &[u8], - ) -> bool { - let key = concatenate_prefix_and_key(prefix, item_key); - let key_lock = { - let mut locks = self.locks.lock(); - locks.entry(key.clone()).or_default().clone() - }; - - let is_set; - { - let _key_guard = key_lock.lock(); - let val = self.db.get(columns::OFFCHAIN, &key); - is_set = val.as_deref() == old_value; - - if is_set { - self.set(prefix, item_key, new_value) - } - } - - // clean the lock map if we're the only entry - let mut locks = self.locks.lock(); - { - drop(key_lock); - let key_lock = locks.get_mut(&key); - if key_lock.and_then(Arc::get_mut).is_some() { - locks.remove(&key); - } - } - is_set - } -} - -/// Concatenate the prefix and key to create an offchain key in the db. -pub(crate) fn concatenate_prefix_and_key(prefix: &[u8], key: &[u8]) -> Vec { - prefix.iter().chain(key.iter()).cloned().collect() -} - -#[cfg(test)] -mod tests { - use super::*; - use sp_core::offchain::OffchainStorage; - - #[test] - fn should_compare_and_set_and_clear_the_locks_map() { - let mut storage = LocalStorage::new_test(); - let prefix = b"prefix"; - let key = b"key"; - let value = b"value"; - - storage.set(prefix, key, value); - assert_eq!(storage.get(prefix, key), Some(value.to_vec())); - - assert_eq!(storage.compare_and_set(prefix, key, Some(value), b"asd"), true); - assert_eq!(storage.get(prefix, key), Some(b"asd".to_vec())); - assert!(storage.locks.lock().is_empty(), "Locks map should be empty!"); - } - - #[test] - fn should_compare_and_set_on_empty_field() { - let mut storage = LocalStorage::new_test(); - let prefix = b"prefix"; - let key = b"key"; - - assert_eq!(storage.compare_and_set(prefix, key, None, b"asd"), true); - assert_eq!(storage.get(prefix, key), Some(b"asd".to_vec())); - assert!(storage.locks.lock().is_empty(), "Locks map should be empty!"); - } -} diff --git a/substrate/client/db/src/parity_db.rs b/substrate/client/db/src/parity_db.rs deleted file mode 100644 index 4adacbf6..00000000 --- a/substrate/client/db/src/parity_db.rs +++ /dev/null @@ -1,162 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2017-2022 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 - -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . -use crate::{ - columns, - utils::{DatabaseType, NUM_COLUMNS}, -}; -/// A `Database` adapter for parity-db. -use sp_database::{error::DatabaseError, Change, ColumnId, Database, Transaction}; - -struct DbAdapter(parity_db::Db); - -fn handle_err(result: parity_db::Result) -> T { - match result { - Ok(r) => r, - Err(e) => { - panic!("Critical database error: {:?}", e); - }, - } -} - -/// Wrap parity-db database into a trait object that implements `sp_database::Database` -pub fn open>( - path: &std::path::Path, - db_type: DatabaseType, - create: bool, - upgrade: bool, -) -> parity_db::Result>> { - let mut config = parity_db::Options::with_columns(path, NUM_COLUMNS as u8); - - match db_type { - DatabaseType::Full => { - let compressed = [ - columns::STATE, - columns::HEADER, - columns::BODY, - columns::BODY_INDEX, - columns::TRANSACTION, - columns::JUSTIFICATIONS, - ]; - - for i in compressed { - let mut column = &mut config.columns[i as usize]; - column.compression = parity_db::CompressionType::Lz4; - } - - let mut state_col = &mut config.columns[columns::STATE as usize]; - state_col.ref_counted = true; - state_col.preimage = true; - state_col.uniform = true; - - let mut tx_col = &mut config.columns[columns::TRANSACTION as usize]; - tx_col.ref_counted = true; - tx_col.preimage = true; - tx_col.uniform = true; - }, - } - - if upgrade { - log::info!("Upgrading database metadata."); - if let Some(meta) = parity_db::Options::load_metadata(path)? { - config.write_metadata_with_version(path, &meta.salt, Some(meta.version))?; - } - } - - let db = if create { - parity_db::Db::open_or_create(&config)? - } else { - parity_db::Db::open(&config)? - }; - - Ok(std::sync::Arc::new(DbAdapter(db))) -} - -fn ref_counted_column(col: u32) -> bool { - col == columns::TRANSACTION || col == columns::STATE -} - -impl> Database for DbAdapter { - fn commit(&self, transaction: Transaction) -> Result<(), DatabaseError> { - let mut not_ref_counted_column = Vec::new(); - let result = self.0.commit(transaction.0.into_iter().filter_map(|change| { - Some(match change { - Change::Set(col, key, value) => (col as u8, key, Some(value)), - Change::Remove(col, key) => (col as u8, key, None), - Change::Store(col, key, value) => - if ref_counted_column(col) { - (col as u8, key.as_ref().to_vec(), Some(value)) - } else { - if !not_ref_counted_column.contains(&col) { - not_ref_counted_column.push(col); - } - return None - }, - Change::Reference(col, key) => { - if ref_counted_column(col) { - // FIXME accessing value is not strictly needed, optimize this in parity-db. - let value = >::get(self, col, key.as_ref()); - (col as u8, key.as_ref().to_vec(), value) - } else { - if !not_ref_counted_column.contains(&col) { - not_ref_counted_column.push(col); - } - return None - } - }, - Change::Release(col, key) => - if ref_counted_column(col) { - (col as u8, key.as_ref().to_vec(), None) - } else { - if !not_ref_counted_column.contains(&col) { - not_ref_counted_column.push(col); - } - return None - }, - }) - })); - - if not_ref_counted_column.len() > 0 { - return Err(DatabaseError(Box::new(parity_db::Error::InvalidInput(format!( - "Ref counted operation on non ref counted columns {:?}", - not_ref_counted_column - ))))) - } - - result.map_err(|e| DatabaseError(Box::new(e))) - } - - fn get(&self, col: ColumnId, key: &[u8]) -> Option> { - handle_err(self.0.get(col as u8, key)) - } - - fn contains(&self, col: ColumnId, key: &[u8]) -> bool { - handle_err(self.0.get_size(col as u8, key)).is_some() - } - - fn value_size(&self, col: ColumnId, key: &[u8]) -> Option { - handle_err(self.0.get_size(col as u8, key)).map(|s| s as usize) - } - - fn supports_ref_counting(&self) -> bool { - true - } - - fn sanitize_key(&self, key: &mut Vec) { - let _prefix = key.drain(0..key.len() - crate::DB_HASH_LEN); - } -} diff --git a/substrate/client/db/src/record_stats_state.rs b/substrate/client/db/src/record_stats_state.rs deleted file mode 100644 index 0b51d3fe..00000000 --- a/substrate/client/db/src/record_stats_state.rs +++ /dev/null @@ -1,230 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2019-2022 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 - -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -//! Provides [`RecordStatsState`] for recording stats about state access. - -use crate::stats::StateUsageStats; -use sp_core::storage::ChildInfo; -use sp_runtime::{ - traits::{Block as BlockT, HashFor}, - StateVersion, -}; -use sp_state_machine::{ - backend::{AsTrieBackend, Backend as StateBackend}, - TrieBackend, -}; -use std::sync::Arc; - -/// State abstraction for recording stats about state access. -pub struct RecordStatsState { - /// Usage statistics - usage: StateUsageStats, - /// State machine registered stats - overlay_stats: sp_state_machine::StateMachineStats, - /// Backing state. - state: S, - /// The hash of the block is state belongs to. - block_hash: Option, - /// The usage statistics of the backend. These will be updated on drop. - state_usage: Arc, -} - -impl std::fmt::Debug for RecordStatsState { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "Block {:?}", self.block_hash) - } -} - -impl Drop for RecordStatsState { - fn drop(&mut self) { - self.state_usage.merge_sm(self.usage.take()); - } -} - -impl>, B: BlockT> RecordStatsState { - /// Create a new instance wrapping generic State and shared cache. - pub(crate) fn new( - state: S, - block_hash: Option, - state_usage: Arc, - ) -> Self { - RecordStatsState { - usage: StateUsageStats::new(), - overlay_stats: sp_state_machine::StateMachineStats::default(), - state, - block_hash, - state_usage, - } - } -} - -impl>, B: BlockT> StateBackend> for RecordStatsState { - type Error = S::Error; - type Transaction = S::Transaction; - type TrieBackendStorage = S::TrieBackendStorage; - - fn storage(&self, key: &[u8]) -> Result>, Self::Error> { - let value = self.state.storage(key)?; - self.usage.tally_key_read(key, value.as_ref(), false); - Ok(value) - } - - fn storage_hash(&self, key: &[u8]) -> Result, Self::Error> { - self.state.storage_hash(key) - } - - fn child_storage( - &self, - child_info: &ChildInfo, - key: &[u8], - ) -> Result>, Self::Error> { - let key = (child_info.storage_key().to_vec(), key.to_vec()); - let value = self.state.child_storage(child_info, &key.1)?; - - // just pass it through the usage counter - let value = self.usage.tally_child_key_read(&key, value, false); - - Ok(value) - } - - fn child_storage_hash( - &self, - child_info: &ChildInfo, - key: &[u8], - ) -> Result, Self::Error> { - self.state.child_storage_hash(child_info, key) - } - - fn exists_storage(&self, key: &[u8]) -> Result { - self.state.exists_storage(key) - } - - fn exists_child_storage( - &self, - child_info: &ChildInfo, - key: &[u8], - ) -> Result { - self.state.exists_child_storage(child_info, key) - } - - fn apply_to_key_values_while, Vec) -> bool>( - &self, - child_info: Option<&ChildInfo>, - prefix: Option<&[u8]>, - start_at: Option<&[u8]>, - f: F, - allow_missing: bool, - ) -> Result { - self.state - .apply_to_key_values_while(child_info, prefix, start_at, f, allow_missing) - } - - fn apply_to_keys_while bool>( - &self, - child_info: Option<&ChildInfo>, - prefix: Option<&[u8]>, - start_at: Option<&[u8]>, - f: F, - ) { - self.state.apply_to_keys_while(child_info, prefix, start_at, f) - } - - fn next_storage_key(&self, key: &[u8]) -> Result>, Self::Error> { - self.state.next_storage_key(key) - } - - fn next_child_storage_key( - &self, - child_info: &ChildInfo, - key: &[u8], - ) -> Result>, Self::Error> { - self.state.next_child_storage_key(child_info, key) - } - - fn for_keys_with_prefix(&self, prefix: &[u8], f: F) { - self.state.for_keys_with_prefix(prefix, f) - } - - fn for_key_values_with_prefix(&self, prefix: &[u8], f: F) { - self.state.for_key_values_with_prefix(prefix, f) - } - - fn for_child_keys_with_prefix( - &self, - child_info: &ChildInfo, - prefix: &[u8], - f: F, - ) { - self.state.for_child_keys_with_prefix(child_info, prefix, f) - } - - fn storage_root<'a>( - &self, - delta: impl Iterator)>, - state_version: StateVersion, - ) -> (B::Hash, Self::Transaction) - where - B::Hash: Ord, - { - self.state.storage_root(delta, state_version) - } - - fn child_storage_root<'a>( - &self, - child_info: &ChildInfo, - delta: impl Iterator)>, - state_version: StateVersion, - ) -> (B::Hash, bool, Self::Transaction) - where - B::Hash: Ord, - { - self.state.child_storage_root(child_info, delta, state_version) - } - - fn pairs(&self) -> Vec<(Vec, Vec)> { - self.state.pairs() - } - - fn keys(&self, prefix: &[u8]) -> Vec> { - self.state.keys(prefix) - } - - fn child_keys(&self, child_info: &ChildInfo, prefix: &[u8]) -> Vec> { - self.state.child_keys(child_info, prefix) - } - - fn register_overlay_stats(&self, stats: &sp_state_machine::StateMachineStats) { - self.overlay_stats.add(stats); - } - - fn usage_info(&self) -> sp_state_machine::UsageInfo { - let mut info = self.usage.take(); - info.include_state_machine_states(&self.overlay_stats); - info - } -} - -impl> + AsTrieBackend>, B: BlockT> AsTrieBackend> - for RecordStatsState -{ - type TrieBackendStorage = >>::TrieBackendStorage; - - fn as_trie_backend(&self) -> &TrieBackend> { - self.state.as_trie_backend() - } -} diff --git a/substrate/client/db/src/stats.rs b/substrate/client/db/src/stats.rs deleted file mode 100644 index f6c14568..00000000 --- a/substrate/client/db/src/stats.rs +++ /dev/null @@ -1,145 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2017-2022 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 - -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -//! Database usage statistics - -use std::sync::atomic::{AtomicU64, Ordering as AtomicOrdering}; - -/// Accumulated usage statistics for state queries. -pub struct StateUsageStats { - started: std::time::Instant, - reads: AtomicU64, - bytes_read: AtomicU64, - writes: AtomicU64, - bytes_written: AtomicU64, - writes_nodes: AtomicU64, - bytes_written_nodes: AtomicU64, - removed_nodes: AtomicU64, - bytes_removed_nodes: AtomicU64, - reads_cache: AtomicU64, - bytes_read_cache: AtomicU64, -} - -impl StateUsageStats { - /// New empty usage stats. - pub fn new() -> Self { - Self { - started: std::time::Instant::now(), - reads: 0.into(), - bytes_read: 0.into(), - writes: 0.into(), - bytes_written: 0.into(), - writes_nodes: 0.into(), - bytes_written_nodes: 0.into(), - removed_nodes: 0.into(), - bytes_removed_nodes: 0.into(), - reads_cache: 0.into(), - bytes_read_cache: 0.into(), - } - } - - /// Tally one read operation, of some length. - pub fn tally_read(&self, data_bytes: u64, cache: bool) { - self.reads.fetch_add(1, AtomicOrdering::Relaxed); - self.bytes_read.fetch_add(data_bytes, AtomicOrdering::Relaxed); - if cache { - self.reads_cache.fetch_add(1, AtomicOrdering::Relaxed); - self.bytes_read_cache.fetch_add(data_bytes, AtomicOrdering::Relaxed); - } - } - - /// Tally one key read. - pub fn tally_key_read(&self, key: &[u8], val: Option<&Vec>, cache: bool) { - self.tally_read( - key.len() as u64 + val.as_ref().map(|x| x.len() as u64).unwrap_or(0), - cache, - ); - } - - /// Tally one child key read. - pub fn tally_child_key_read( - &self, - key: &(Vec, Vec), - val: Option>, - cache: bool, - ) -> Option> { - let bytes = key.0.len() + key.1.len() + val.as_ref().map(|x| x.len()).unwrap_or(0); - self.tally_read(bytes as u64, cache); - val - } - - /// Tally some write trie nodes operations, including their byte count. - pub fn tally_writes_nodes(&self, ops: u64, data_bytes: u64) { - self.writes_nodes.fetch_add(ops, AtomicOrdering::Relaxed); - self.bytes_written_nodes.fetch_add(data_bytes, AtomicOrdering::Relaxed); - } - - /// Tally some removed trie nodes operations, including their byte count. - pub fn tally_removed_nodes(&self, ops: u64, data_bytes: u64) { - self.removed_nodes.fetch_add(ops, AtomicOrdering::Relaxed); - self.bytes_removed_nodes.fetch_add(data_bytes, AtomicOrdering::Relaxed); - } - - /// Tally some write trie nodes operations, including their byte count. - pub fn tally_writes(&self, ops: u64, data_bytes: u64) { - self.writes.fetch_add(ops, AtomicOrdering::Relaxed); - self.bytes_written.fetch_add(data_bytes, AtomicOrdering::Relaxed); - } - - /// Merge state machine usage info. - pub fn merge_sm(&self, info: sp_state_machine::UsageInfo) { - self.reads.fetch_add(info.reads.ops, AtomicOrdering::Relaxed); - self.bytes_read.fetch_add(info.reads.bytes, AtomicOrdering::Relaxed); - self.writes_nodes.fetch_add(info.nodes_writes.ops, AtomicOrdering::Relaxed); - self.bytes_written_nodes - .fetch_add(info.nodes_writes.bytes, AtomicOrdering::Relaxed); - self.removed_nodes.fetch_add(info.removed_nodes.ops, AtomicOrdering::Relaxed); - self.bytes_removed_nodes - .fetch_add(info.removed_nodes.bytes, AtomicOrdering::Relaxed); - self.reads_cache.fetch_add(info.cache_reads.ops, AtomicOrdering::Relaxed); - self.bytes_read_cache.fetch_add(info.cache_reads.bytes, AtomicOrdering::Relaxed); - } - - /// Returns the collected `UsageInfo` and resets the internal state. - pub fn take(&self) -> sp_state_machine::UsageInfo { - use sp_state_machine::UsageUnit; - - fn unit(ops: &AtomicU64, bytes: &AtomicU64) -> UsageUnit { - UsageUnit { - ops: ops.swap(0, AtomicOrdering::Relaxed), - bytes: bytes.swap(0, AtomicOrdering::Relaxed), - } - } - - sp_state_machine::UsageInfo { - reads: unit(&self.reads, &self.bytes_read), - writes: unit(&self.writes, &self.bytes_written), - nodes_writes: unit(&self.writes_nodes, &self.bytes_written_nodes), - removed_nodes: unit(&self.removed_nodes, &self.bytes_removed_nodes), - cache_reads: unit(&self.reads_cache, &self.bytes_read_cache), - modified_reads: Default::default(), - overlay_writes: Default::default(), - // TODO: Proper tracking state of memory footprint here requires - // imposing `MallocSizeOf` requirement on half of the codebase, - // so it is an open question how to do it better - memory: 0, - started: self.started, - span: self.started.elapsed(), - } - } -} diff --git a/substrate/client/db/src/storage_cache.rs b/substrate/client/db/src/storage_cache.rs deleted file mode 100644 index d9253fe0..00000000 --- a/substrate/client/db/src/storage_cache.rs +++ /dev/null @@ -1,1979 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2019-2022 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 - -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -//! Global state cache. Maintains recently queried/committed state values -//! Tracks changes over the span of a few recent blocks and handles forks -//! by tracking/removing cache entries for conflicting changes. - -use crate::{stats::StateUsageStats, utils::Meta}; -use hash_db::Hasher; -use linked_hash_map::{Entry, LinkedHashMap}; -use log::trace; -use parking_lot::{RwLock, RwLockUpgradableReadGuard}; -use sp_core::{hexdisplay::HexDisplay, storage::ChildInfo}; -use sp_runtime::{ - traits::{Block as BlockT, HashFor, Header, NumberFor}, - StateVersion, -}; -use sp_state_machine::{ - backend::Backend as StateBackend, ChildStorageCollection, StorageCollection, StorageKey, - StorageValue, TrieBackend, -}; -use std::{ - collections::{HashMap, HashSet, VecDeque}, - hash::Hash as StdHash, - sync::Arc, -}; - -const STATE_CACHE_BLOCKS: usize = 12; - -type ChildStorageKey = (Vec, Vec); - -/// Shared canonical state cache. -pub struct Cache { - /// Storage cache. `None` indicates that key is known to be missing. - lru_storage: LRUMap>, - /// Storage hashes cache. `None` indicates that key is known to be missing. - lru_hashes: LRUMap>, - /// Storage cache for child trie. `None` indicates that key is known to be missing. - lru_child_storage: LRUMap>, - /// Information on the modifications in recently committed blocks; specifically which keys - /// changed in which block. Ordered by block number. - modifications: VecDeque>, -} - -struct LRUMap(LinkedHashMap, usize, usize); - -/// Internal trait similar to `heapsize` but using -/// a simply estimation. -/// -/// This should not be made public, it is implementation -/// detail trait. If it need to become public please -/// consider using `malloc_size_of`. -trait EstimateSize { - /// Return a size estimation of additional size needed - /// to cache this struct (in bytes). - fn estimate_size(&self) -> usize; -} - -impl EstimateSize for Vec { - fn estimate_size(&self) -> usize { - self.capacity() - } -} - -impl EstimateSize for Option> { - fn estimate_size(&self) -> usize { - self.as_ref().map(|v| v.capacity()).unwrap_or(0) - } -} - -struct OptionHOut>(Option); - -impl> EstimateSize for OptionHOut { - fn estimate_size(&self) -> usize { - // capacity would be better - self.0.as_ref().map(|v| v.as_ref().len()).unwrap_or(0) - } -} - -impl EstimateSize for (T, T) { - fn estimate_size(&self) -> usize { - self.0.estimate_size() + self.1.estimate_size() - } -} - -impl LRUMap { - fn remove(&mut self, k: &K) { - let map = &mut self.0; - let storage_used_size = &mut self.1; - if let Some(v) = map.remove(k) { - *storage_used_size -= k.estimate_size(); - *storage_used_size -= v.estimate_size(); - } - } - - fn add(&mut self, k: K, v: V) { - let lmap = &mut self.0; - let storage_used_size = &mut self.1; - let limit = self.2; - let klen = k.estimate_size(); - *storage_used_size += v.estimate_size(); - // TODO assert k v size fit into limit?? to avoid insert remove? - match lmap.entry(k) { - Entry::Occupied(mut entry) => { - // note that in this case we are not running pure lru as - // it would require to remove first - *storage_used_size -= entry.get().estimate_size(); - entry.insert(v); - }, - Entry::Vacant(entry) => { - *storage_used_size += klen; - entry.insert(v); - }, - }; - - while *storage_used_size > limit { - if let Some((k, v)) = lmap.pop_front() { - *storage_used_size -= k.estimate_size(); - *storage_used_size -= v.estimate_size(); - } else { - // can happen fairly often as we get value from multiple lru - // and only remove from a single lru - break - } - } - } - - fn get(&mut self, k: &Q) -> Option<&mut V> - where - K: std::borrow::Borrow, - Q: StdHash + Eq, - { - self.0.get_refresh(k) - } - - fn used_size(&self) -> usize { - self.1 - } - fn clear(&mut self) { - self.0.clear(); - self.1 = 0; - } -} - -impl Cache { - /// Returns the used memory size of the storage cache in bytes. - pub fn used_storage_cache_size(&self) -> usize { - self.lru_storage.used_size() + self.lru_child_storage.used_size() - // ignore small hashes storage and self.lru_hashes.used_size() - } - - /// Synchronize the shared cache with the best block state. - /// - /// This function updates the shared cache by removing entries - /// that are invalidated by chain reorganization. It should be called - /// externally when chain reorg happens without importing a new block. - pub fn sync(&mut self, enacted: &[B::Hash], retracted: &[B::Hash]) { - trace!("Syncing shared cache, enacted = {:?}, retracted = {:?}", enacted, retracted); - - // Purge changes from re-enacted and retracted blocks. - let mut clear = false; - for block in enacted { - clear = clear || { - if let Some(m) = self.modifications.iter_mut().find(|m| &m.hash == block) { - trace!("Reverting enacted block {:?}", block); - m.is_canon = true; - for a in &m.storage { - trace!("Reverting enacted key {:?}", HexDisplay::from(a)); - self.lru_storage.remove(a); - self.lru_hashes.remove(a); - } - for a in &m.child_storage { - trace!("Reverting enacted child key {:?}", a); - self.lru_child_storage.remove(a); - } - false - } else { - true - } - }; - } - - for block in retracted { - clear = clear || { - if let Some(m) = self.modifications.iter_mut().find(|m| &m.hash == block) { - trace!("Retracting block {:?}", block); - m.is_canon = false; - for a in &m.storage { - trace!("Retracted key {:?}", HexDisplay::from(a)); - self.lru_storage.remove(a); - self.lru_hashes.remove(a); - } - for a in &m.child_storage { - trace!("Retracted child key {:?}", a); - self.lru_child_storage.remove(a); - } - false - } else { - true - } - }; - } - if clear { - // We don't know anything about the block; clear everything - trace!("Wiping cache"); - self.lru_storage.clear(); - self.lru_child_storage.clear(); - self.lru_hashes.clear(); - self.modifications.clear(); - } - } -} - -pub type SharedCache = Arc>>; - -/// Fix lru storage size for hash (small 64ko). -const FIX_LRU_HASH_SIZE: usize = 65_536; - -/// Create a new shared cache instance with given max memory usage. -pub fn new_shared_cache( - shared_cache_size: usize, - child_ratio: (usize, usize), -) -> SharedCache { - let top = child_ratio.1.saturating_sub(child_ratio.0); - Arc::new(RwLock::new(Cache { - lru_storage: LRUMap(LinkedHashMap::new(), 0, shared_cache_size * top / child_ratio.1), - lru_hashes: LRUMap(LinkedHashMap::new(), 0, FIX_LRU_HASH_SIZE), - lru_child_storage: LRUMap( - LinkedHashMap::new(), - 0, - shared_cache_size * child_ratio.0 / child_ratio.1, - ), - modifications: VecDeque::new(), - })) -} - -#[derive(Debug)] -/// Accumulates a list of storage changed in a block. -struct BlockChanges { - /// Block number. - number: B::Number, - /// Block hash. - hash: B::Hash, - /// Parent block hash. - parent: B::Hash, - /// A set of modified storage keys. - storage: HashSet, - /// A set of modified child storage keys. - child_storage: HashSet, - /// Block is part of the canonical chain. - is_canon: bool, -} - -/// Cached values specific to a state. -struct LocalCache { - /// Storage cache. - /// - /// `None` indicates that key is known to be missing. - storage: HashMap>, - /// Storage hashes cache. - /// - /// `None` indicates that key is known to be missing. - hashes: HashMap>, - /// Child storage cache. - /// - /// `None` indicates that key is known to be missing. - child_storage: HashMap>, -} - -/// Cache changes. -pub struct CacheChanges { - /// Shared canonical state cache. - shared_cache: SharedCache, - /// Local cache of values for this state. - local_cache: RwLock>>, - /// Hash of the block on top of which this instance was created or - /// `None` if cache is disabled - pub parent_hash: Option, -} - -/// State cache abstraction. -/// -/// Manages shared global state cache which reflects the canonical -/// state as it is on the disk. -/// -/// A instance of `CachingState` may be created as canonical or not. -/// For canonical instances local cache is accumulated and applied -/// in `sync_cache` along with the change overlay. -/// For non-canonical clones local cache and changes are dropped. -pub struct CachingState { - /// Usage statistics - usage: StateUsageStats, - /// State machine registered stats - overlay_stats: sp_state_machine::StateMachineStats, - /// Backing state. - state: S, - /// Cache data. - cache: CacheChanges, -} - -impl std::fmt::Debug for CachingState { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "Block {:?}", self.cache.parent_hash) - } -} - -impl CacheChanges { - /// Propagate local cache into the shared cache and synchronize - /// the shared cache with the best block state. - /// - /// This function updates the shared cache by removing entries - /// that are invalidated by chain reorganization. `sync_cache` - /// should be called after the block has been committed and the - /// blockchain route has been calculated. - pub fn sync_cache( - &mut self, - enacted: &[B::Hash], - retracted: &[B::Hash], - changes: StorageCollection, - child_changes: ChildStorageCollection, - commit_hash: Option, - commit_number: Option>, - is_best: bool, - ) { - let mut cache = self.shared_cache.write(); - trace!( - "Syncing cache, id = (#{:?}, {:?}), parent={:?}, best={}", - commit_number, - commit_hash, - self.parent_hash, - is_best, - ); - let cache = &mut *cache; - // Filter out committing block if any. - let mut enacted: Vec<_> = enacted - .iter() - .filter(|h| commit_hash.as_ref().map_or(true, |p| *h != p)) - .cloned() - .collect(); - - let mut retracted = std::borrow::Cow::Borrowed(retracted); - if let Some(commit_hash) = &commit_hash { - if let Some(m) = cache.modifications.iter_mut().find(|m| &m.hash == commit_hash) { - if m.is_canon != is_best { - // Same block comitted twice with different state changes. - // Treat it as reenacted/retracted. - if is_best { - enacted.push(*commit_hash); - } else { - retracted.to_mut().push(*commit_hash); - } - } - } - } - cache.sync(&enacted, &retracted); - // Propagate cache only if committing on top of the latest canonical state - // blocks are ordered by number and only one block with a given number is marked as - // canonical (contributed to canonical state cache) - if self.parent_hash.is_some() { - let mut local_cache = self.local_cache.write(); - if is_best { - trace!( - "Committing {} local, {} hashes, {} modified root entries, {} modified child entries", - local_cache.storage.len(), - local_cache.hashes.len(), - changes.len(), - child_changes.iter().map(|v|v.1.len()).sum::(), - ); - for (k, v) in local_cache.storage.drain() { - cache.lru_storage.add(k, v); - } - for (k, v) in local_cache.child_storage.drain() { - cache.lru_child_storage.add(k, v); - } - for (k, v) in local_cache.hashes.drain() { - cache.lru_hashes.add(k, OptionHOut(v)); - } - } - } - - if let (Some(ref number), Some(ref hash), Some(ref parent)) = - (commit_number, commit_hash, self.parent_hash) - { - if cache.modifications.len() == STATE_CACHE_BLOCKS { - cache.modifications.pop_back(); - } - let mut modifications = HashSet::new(); - let mut child_modifications = HashSet::new(); - child_changes.into_iter().for_each(|(sk, changes)| { - for (k, v) in changes.into_iter() { - let k = (sk.clone(), k); - if is_best { - cache.lru_child_storage.add(k.clone(), v); - } - child_modifications.insert(k); - } - }); - for (k, v) in changes.into_iter() { - if is_best { - cache.lru_hashes.remove(&k); - cache.lru_storage.add(k.clone(), v); - } - modifications.insert(k); - } - - // Save modified storage. These are ordered by the block number in reverse. - let block_changes = BlockChanges { - storage: modifications, - child_storage: child_modifications, - number: *number, - hash: *hash, - is_canon: is_best, - parent: *parent, - }; - let insert_at = cache - .modifications - .iter() - .enumerate() - .find(|(_, m)| m.number < *number) - .map(|(i, _)| i); - trace!("Inserting modifications at {:?}", insert_at); - if let Some(insert_at) = insert_at { - cache.modifications.insert(insert_at, block_changes); - } else { - cache.modifications.push_back(block_changes); - } - } - } -} - -impl>, B: BlockT> CachingState { - /// Create a new instance wrapping generic State and shared cache. - pub(crate) fn new( - state: S, - shared_cache: SharedCache, - parent_hash: Option, - ) -> Self { - CachingState { - usage: StateUsageStats::new(), - overlay_stats: sp_state_machine::StateMachineStats::default(), - state, - cache: CacheChanges { - shared_cache, - local_cache: RwLock::new(LocalCache { - storage: Default::default(), - hashes: Default::default(), - child_storage: Default::default(), - }), - parent_hash, - }, - } - } - - /// Check if the key can be returned from cache by matching current block parent hash against - /// canonical state and filtering out entries modified in later blocks. - fn is_allowed( - key: Option<&[u8]>, - child_key: Option<&ChildStorageKey>, - parent_hash: &Option, - modifications: &VecDeque>, - ) -> bool { - let mut parent = match *parent_hash { - None => { - trace!( - "Cache lookup skipped for {:?}: no parent hash", - key.as_ref().map(HexDisplay::from) - ); - return false - }, - Some(ref parent) => parent, - }; - // Ignore all storage entries modified in later blocks. - // Modifications contains block ordered by the number - // We search for our parent in that list first and then for - // all its parents until we hit the canonical block, - // checking against all the intermediate modifications. - for m in modifications { - if &m.hash == parent { - if m.is_canon { - return true - } - parent = &m.parent; - } - if let Some(key) = key { - if m.storage.contains(key) { - trace!( - "Cache lookup skipped for {:?}: modified in a later block", - HexDisplay::from(&key) - ); - return false - } - } - if let Some(child_key) = child_key { - if m.child_storage.contains(child_key) { - trace!("Cache lookup skipped for {:?}: modified in a later block", child_key); - return false - } - } - } - trace!( - "Cache lookup skipped for {:?}: parent hash is unknown", - key.as_ref().map(HexDisplay::from), - ); - false - } -} - -impl>, B: BlockT> StateBackend> for CachingState { - type Error = S::Error; - type Transaction = S::Transaction; - type TrieBackendStorage = S::TrieBackendStorage; - - fn storage(&self, key: &[u8]) -> Result>, Self::Error> { - let local_cache = self.cache.local_cache.upgradable_read(); - // Note that local cache makes that lru is not refreshed - if let Some(entry) = local_cache.storage.get(key).cloned() { - trace!("Found in local cache: {:?}", HexDisplay::from(&key)); - self.usage.tally_key_read(key, entry.as_ref(), true); - - return Ok(entry) - } - { - let cache = self.cache.shared_cache.upgradable_read(); - if Self::is_allowed(Some(key), None, &self.cache.parent_hash, &cache.modifications) { - let mut cache = RwLockUpgradableReadGuard::upgrade(cache); - if let Some(entry) = cache.lru_storage.get(key).map(|a| a.clone()) { - trace!("Found in shared cache: {:?}", HexDisplay::from(&key)); - self.usage.tally_key_read(key, entry.as_ref(), true); - return Ok(entry) - } - } - } - trace!("Cache miss: {:?}", HexDisplay::from(&key)); - let value = self.state.storage(key)?; - RwLockUpgradableReadGuard::upgrade(local_cache) - .storage - .insert(key.to_vec(), value.clone()); - self.usage.tally_key_read(key, value.as_ref(), false); - Ok(value) - } - - fn storage_hash(&self, key: &[u8]) -> Result, Self::Error> { - let local_cache = self.cache.local_cache.upgradable_read(); - if let Some(entry) = local_cache.hashes.get(key).cloned() { - trace!("Found hash in local cache: {:?}", HexDisplay::from(&key)); - return Ok(entry) - } - { - let cache = self.cache.shared_cache.upgradable_read(); - if Self::is_allowed(Some(key), None, &self.cache.parent_hash, &cache.modifications) { - let mut cache = RwLockUpgradableReadGuard::upgrade(cache); - if let Some(entry) = cache.lru_hashes.get(key).map(|a| a.0) { - trace!("Found hash in shared cache: {:?}", HexDisplay::from(&key)); - return Ok(entry) - } - } - } - trace!("Cache hash miss: {:?}", HexDisplay::from(&key)); - let hash = self.state.storage_hash(key)?; - RwLockUpgradableReadGuard::upgrade(local_cache) - .hashes - .insert(key.to_vec(), hash); - Ok(hash) - } - - fn child_storage( - &self, - child_info: &ChildInfo, - key: &[u8], - ) -> Result>, Self::Error> { - let key = (child_info.storage_key().to_vec(), key.to_vec()); - let local_cache = self.cache.local_cache.upgradable_read(); - if let Some(entry) = local_cache.child_storage.get(&key).cloned() { - trace!("Found in local cache: {:?}", key); - return Ok(self.usage.tally_child_key_read(&key, entry, true)) - } - { - let cache = self.cache.shared_cache.upgradable_read(); - if Self::is_allowed(None, Some(&key), &self.cache.parent_hash, &cache.modifications) { - let mut cache = RwLockUpgradableReadGuard::upgrade(cache); - if let Some(entry) = cache.lru_child_storage.get(&key).map(|a| a.clone()) { - trace!("Found in shared cache: {:?}", key); - return Ok(self.usage.tally_child_key_read(&key, entry, true)) - } - } - } - trace!("Cache miss: {:?}", key); - let value = self.state.child_storage(child_info, &key.1[..])?; - - // just pass it through the usage counter - let value = self.usage.tally_child_key_read(&key, value, false); - - RwLockUpgradableReadGuard::upgrade(local_cache) - .child_storage - .insert(key, value.clone()); - Ok(value) - } - - fn exists_storage(&self, key: &[u8]) -> Result { - Ok(self.storage(key)?.is_some()) - } - - fn exists_child_storage( - &self, - child_info: &ChildInfo, - key: &[u8], - ) -> Result { - self.state.exists_child_storage(child_info, key) - } - - fn apply_to_key_values_while, Vec) -> bool>( - &self, - child_info: Option<&ChildInfo>, - prefix: Option<&[u8]>, - start_at: Option<&[u8]>, - f: F, - allow_missing: bool, - ) -> Result { - self.state - .apply_to_key_values_while(child_info, prefix, start_at, f, allow_missing) - } - - fn apply_to_keys_while bool>( - &self, - child_info: Option<&ChildInfo>, - prefix: Option<&[u8]>, - start_at: Option<&[u8]>, - f: F, - ) { - self.state.apply_to_keys_while(child_info, prefix, start_at, f) - } - - fn next_storage_key(&self, key: &[u8]) -> Result>, Self::Error> { - self.state.next_storage_key(key) - } - - fn next_child_storage_key( - &self, - child_info: &ChildInfo, - key: &[u8], - ) -> Result>, Self::Error> { - self.state.next_child_storage_key(child_info, key) - } - - fn for_keys_with_prefix(&self, prefix: &[u8], f: F) { - self.state.for_keys_with_prefix(prefix, f) - } - - fn for_key_values_with_prefix(&self, prefix: &[u8], f: F) { - self.state.for_key_values_with_prefix(prefix, f) - } - - fn for_child_keys_with_prefix( - &self, - child_info: &ChildInfo, - prefix: &[u8], - f: F, - ) { - self.state.for_child_keys_with_prefix(child_info, prefix, f) - } - - fn storage_root<'a>( - &self, - delta: impl Iterator)>, - state_version: StateVersion, - ) -> (B::Hash, Self::Transaction) - where - B::Hash: Ord, - { - self.state.storage_root(delta, state_version) - } - - fn child_storage_root<'a>( - &self, - child_info: &ChildInfo, - delta: impl Iterator)>, - state_version: StateVersion, - ) -> (B::Hash, bool, Self::Transaction) - where - B::Hash: Ord, - { - self.state.child_storage_root(child_info, delta, state_version) - } - - fn pairs(&self) -> Vec<(Vec, Vec)> { - self.state.pairs() - } - - fn keys(&self, prefix: &[u8]) -> Vec> { - self.state.keys(prefix) - } - - fn child_keys(&self, child_info: &ChildInfo, prefix: &[u8]) -> Vec> { - self.state.child_keys(child_info, prefix) - } - - fn as_trie_backend(&self) -> Option<&TrieBackend>> { - self.state.as_trie_backend() - } - - fn register_overlay_stats(&self, stats: &sp_state_machine::StateMachineStats) { - self.overlay_stats.add(stats); - } - - fn usage_info(&self) -> sp_state_machine::UsageInfo { - let mut info = self.usage.take(); - info.include_state_machine_states(&self.overlay_stats); - info - } -} - -/// Extended [`CachingState`] that will sync the caches on drop. -pub struct SyncingCachingState { - /// The usage statistics of the backend. These will be updated on drop. - state_usage: Arc, - /// Reference to the meta db. - meta: Arc, Block::Hash>>>, - /// Mutex to lock get exlusive access to the backend. - lock: Arc>, - /// The wrapped caching state. - /// - /// This is required to be a `Option`, because sometimes we want to extract - /// the cache changes and Rust does not allow to move fields from types that - /// implement `Drop`. - caching_state: Option>, - /// Disable syncing of the cache. This is by default always `false`. However, - /// we need to disable syncing when this is a state in a - /// [`BlockImportOperation`](crate::BlockImportOperation). The import operation - /// takes care to sync the cache and more importantly we want to prevent a dead - /// lock. - disable_syncing: bool, -} - -impl SyncingCachingState { - /// Create new automatic syncing state. - pub fn new( - caching_state: CachingState, - state_usage: Arc, - meta: Arc, B::Hash>>>, - lock: Arc>, - ) -> Self { - Self { caching_state: Some(caching_state), state_usage, meta, lock, disable_syncing: false } - } - - /// Returns the reference to the internal [`CachingState`]. - fn caching_state(&self) -> &CachingState { - self.caching_state - .as_ref() - .expect("`caching_state` is always valid for the lifetime of the object; qed") - } - - /// Convert `Self` into the cache changes. - pub fn into_cache_changes(mut self) -> CacheChanges { - self.caching_state - .take() - .expect("`caching_state` is always valid for the lifetime of the object; qed") - .cache - } - - /// Disable syncing the cache on drop. - pub fn disable_syncing(&mut self) { - self.disable_syncing = true; - } -} - -impl std::fmt::Debug for SyncingCachingState { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - self.caching_state().fmt(f) - } -} - -impl>, B: BlockT> StateBackend> - for SyncingCachingState -{ - type Error = S::Error; - type Transaction = S::Transaction; - type TrieBackendStorage = S::TrieBackendStorage; - - fn storage(&self, key: &[u8]) -> Result>, Self::Error> { - self.caching_state().storage(key) - } - - fn storage_hash(&self, key: &[u8]) -> Result, Self::Error> { - self.caching_state().storage_hash(key) - } - - fn child_storage( - &self, - child_info: &ChildInfo, - key: &[u8], - ) -> Result>, Self::Error> { - self.caching_state().child_storage(child_info, key) - } - - fn exists_storage(&self, key: &[u8]) -> Result { - self.caching_state().exists_storage(key) - } - - fn exists_child_storage( - &self, - child_info: &ChildInfo, - key: &[u8], - ) -> Result { - self.caching_state().exists_child_storage(child_info, key) - } - - fn apply_to_key_values_while, Vec) -> bool>( - &self, - child_info: Option<&ChildInfo>, - prefix: Option<&[u8]>, - start_at: Option<&[u8]>, - f: F, - allow_missing: bool, - ) -> Result { - self.caching_state().apply_to_key_values_while( - child_info, - prefix, - start_at, - f, - allow_missing, - ) - } - - fn apply_to_keys_while bool>( - &self, - child_info: Option<&ChildInfo>, - prefix: Option<&[u8]>, - start_at: Option<&[u8]>, - f: F, - ) { - self.caching_state().apply_to_keys_while(child_info, prefix, start_at, f) - } - - fn next_storage_key(&self, key: &[u8]) -> Result>, Self::Error> { - self.caching_state().next_storage_key(key) - } - - fn next_child_storage_key( - &self, - child_info: &ChildInfo, - key: &[u8], - ) -> Result>, Self::Error> { - self.caching_state().next_child_storage_key(child_info, key) - } - - fn for_keys_with_prefix(&self, prefix: &[u8], f: F) { - self.caching_state().for_keys_with_prefix(prefix, f) - } - - fn for_key_values_with_prefix(&self, prefix: &[u8], f: F) { - self.caching_state().for_key_values_with_prefix(prefix, f) - } - - fn for_child_keys_with_prefix( - &self, - child_info: &ChildInfo, - prefix: &[u8], - f: F, - ) { - self.caching_state().for_child_keys_with_prefix(child_info, prefix, f) - } - - fn storage_root<'a>( - &self, - delta: impl Iterator)>, - state_version: StateVersion, - ) -> (B::Hash, Self::Transaction) - where - B::Hash: Ord, - { - self.caching_state().storage_root(delta, state_version) - } - - fn child_storage_root<'a>( - &self, - child_info: &ChildInfo, - delta: impl Iterator)>, - state_version: StateVersion, - ) -> (B::Hash, bool, Self::Transaction) - where - B::Hash: Ord, - { - self.caching_state().child_storage_root(child_info, delta, state_version) - } - - fn pairs(&self) -> Vec<(Vec, Vec)> { - self.caching_state().pairs() - } - - fn keys(&self, prefix: &[u8]) -> Vec> { - self.caching_state().keys(prefix) - } - - fn child_keys(&self, child_info: &ChildInfo, prefix: &[u8]) -> Vec> { - self.caching_state().child_keys(child_info, prefix) - } - - fn as_trie_backend(&self) -> Option<&TrieBackend>> { - self.caching_state - .as_ref() - .expect("`caching_state` is valid for the lifetime of the object; qed") - .as_trie_backend() - } - - fn register_overlay_stats(&self, stats: &sp_state_machine::StateMachineStats) { - self.caching_state().register_overlay_stats(stats); - } - - fn usage_info(&self) -> sp_state_machine::UsageInfo { - self.caching_state().usage_info() - } -} - -impl Drop for SyncingCachingState { - fn drop(&mut self) { - if self.disable_syncing { - return - } - - if let Some(mut caching_state) = self.caching_state.take() { - let _lock = self.lock.read(); - - self.state_usage.merge_sm(caching_state.usage.take()); - if let Some(hash) = caching_state.cache.parent_hash { - let is_best = self.meta.read().best_hash == hash; - caching_state.cache.sync_cache(&[], &[], vec![], vec![], None, None, is_best); - } - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - use sp_runtime::{ - testing::{Block as RawBlock, ExtrinsicWrapper, H256}, - traits::BlakeTwo256, - }; - use sp_state_machine::InMemoryBackend; - - type Block = RawBlock>; - - #[test] - fn smoke() { - // init_log(); - let root_parent = H256::random(); - let key = H256::random()[..].to_vec(); - let h0 = H256::random(); - let h1a = H256::random(); - let h1b = H256::random(); - let h2a = H256::random(); - let h2b = H256::random(); - let h3a = H256::random(); - let h3b = H256::random(); - - let shared = new_shared_cache::(256 * 1024, (0, 1)); - - // blocks [ 3a(c) 2a(c) 2b 1b 1a(c) 0 ] - // state [ 5 5 4 3 2 2 ] - let mut s = CachingState::new( - InMemoryBackend::::default(), - shared.clone(), - Some(root_parent), - ); - s.cache.sync_cache( - &[], - &[], - vec![(key.clone(), Some(vec![2]))], - vec![], - Some(h0), - Some(0), - true, - ); - - let mut s = - CachingState::new(InMemoryBackend::::default(), shared.clone(), Some(h0)); - s.cache.sync_cache(&[], &[], vec![], vec![], Some(h1a), Some(1), true); - - let mut s = - CachingState::new(InMemoryBackend::::default(), shared.clone(), Some(h0)); - s.cache.sync_cache( - &[], - &[], - vec![(key.clone(), Some(vec![3]))], - vec![], - Some(h1b), - Some(1), - false, - ); - - let mut s = - CachingState::new(InMemoryBackend::::default(), shared.clone(), Some(h1b)); - s.cache.sync_cache( - &[], - &[], - vec![(key.clone(), Some(vec![4]))], - vec![], - Some(h2b), - Some(2), - false, - ); - - let mut s = - CachingState::new(InMemoryBackend::::default(), shared.clone(), Some(h1a)); - s.cache.sync_cache( - &[], - &[], - vec![(key.clone(), Some(vec![5]))], - vec![], - Some(h2a), - Some(2), - true, - ); - - let mut s = - CachingState::new(InMemoryBackend::::default(), shared.clone(), Some(h2a)); - s.cache.sync_cache(&[], &[], vec![], vec![], Some(h3a), Some(3), true); - - let s = - CachingState::new(InMemoryBackend::::default(), shared.clone(), Some(h3a)); - assert_eq!(s.storage(&key).unwrap().unwrap(), vec![5]); - - let s = - CachingState::new(InMemoryBackend::::default(), shared.clone(), Some(h1a)); - assert!(s.storage(&key).unwrap().is_none()); - - let s = - CachingState::new(InMemoryBackend::::default(), shared.clone(), Some(h2b)); - assert!(s.storage(&key).unwrap().is_none()); - - let s = - CachingState::new(InMemoryBackend::::default(), shared.clone(), Some(h1b)); - assert!(s.storage(&key).unwrap().is_none()); - - // reorg to 3b - // blocks [ 3b(c) 3a 2a 2b(c) 1b 1a 0 ] - let mut s = - CachingState::new(InMemoryBackend::::default(), shared.clone(), Some(h2b)); - s.cache.sync_cache( - &[h1b, h2b, h3b], - &[h1a, h2a, h3a], - vec![], - vec![], - Some(h3b), - Some(3), - true, - ); - let s = - CachingState::new(InMemoryBackend::::default(), shared.clone(), Some(h3a)); - assert!(s.storage(&key).unwrap().is_none()); - } - - #[test] - fn simple_fork() { - sp_tracing::try_init_simple(); - - let root_parent = H256::random(); - let key = H256::random()[..].to_vec(); - let h1 = H256::random(); - let h2a = H256::random(); - let h2b = H256::random(); - let h3b = H256::random(); - - let shared = new_shared_cache::(256 * 1024, (0, 1)); - - let mut s = CachingState::new( - InMemoryBackend::::default(), - shared.clone(), - Some(root_parent), - ); - s.cache.sync_cache( - &[], - &[], - vec![(key.clone(), Some(vec![2]))], - vec![], - Some(h1), - Some(1), - true, - ); - - let mut s = - CachingState::new(InMemoryBackend::::default(), shared.clone(), Some(h1)); - s.cache.sync_cache(&[], &[], vec![], vec![], Some(h2a), Some(2), true); - - let mut s = - CachingState::new(InMemoryBackend::::default(), shared.clone(), Some(h1)); - s.cache.sync_cache( - &[], - &[], - vec![(key.clone(), Some(vec![3]))], - vec![], - Some(h2b), - Some(2), - false, - ); - - let mut s = - CachingState::new(InMemoryBackend::::default(), shared.clone(), Some(h2b)); - s.cache.sync_cache( - &[], - &[], - vec![(key.clone(), Some(vec![3]))], - vec![], - Some(h3b), - Some(2), - false, - ); - - let s = - CachingState::new(InMemoryBackend::::default(), shared.clone(), Some(h2a)); - assert_eq!(s.storage(&key).unwrap().unwrap(), vec![2]); - } - - #[test] - fn double_fork() { - let root_parent = H256::random(); - let key = H256::random()[..].to_vec(); - let h1 = H256::random(); - let h2a = H256::random(); - let h2b = H256::random(); - let h3a = H256::random(); - let h3b = H256::random(); - - let shared = new_shared_cache::(256 * 1024, (0, 1)); - - let mut s = CachingState::new( - InMemoryBackend::::default(), - shared.clone(), - Some(root_parent), - ); - s.cache.sync_cache(&[], &[], vec![], vec![], Some(h1), Some(1), true); - - let mut s = - CachingState::new(InMemoryBackend::::default(), shared.clone(), Some(h1)); - s.cache.sync_cache(&[], &[], vec![], vec![], Some(h2a), Some(2), true); - - let mut s = - CachingState::new(InMemoryBackend::::default(), shared.clone(), Some(h2a)); - s.cache.sync_cache( - &[], - &[], - vec![(key.clone(), Some(vec![2]))], - vec![], - Some(h3a), - Some(3), - true, - ); - - let mut s = - CachingState::new(InMemoryBackend::::default(), shared.clone(), Some(h1)); - s.cache.sync_cache(&[], &[], vec![], vec![], Some(h2b), Some(2), false); - - let mut s = - CachingState::new(InMemoryBackend::::default(), shared.clone(), Some(h2b)); - s.cache.sync_cache( - &[], - &[], - vec![(key.clone(), Some(vec![3]))], - vec![], - Some(h3b), - Some(3), - false, - ); - - let s = - CachingState::new(InMemoryBackend::::default(), shared.clone(), Some(h3a)); - assert_eq!(s.storage(&key).unwrap().unwrap(), vec![2]); - } - - #[test] - fn reverts_storage_hash() { - let root_parent = H256::random(); - let key = H256::random()[..].to_vec(); - let h1a = H256::random(); - let h1b = H256::random(); - - let shared = new_shared_cache::(256 * 1024, (0, 1)); - let mut backend = InMemoryBackend::::default(); - backend.insert( - std::iter::once((None, vec![(key.clone(), Some(vec![1]))])), - Default::default(), - ); - - let mut s = CachingState::new(backend.clone(), shared.clone(), Some(root_parent)); - s.cache.sync_cache( - &[], - &[], - vec![(key.clone(), Some(vec![2]))], - vec![], - Some(h1a), - Some(1), - true, - ); - - let mut s = CachingState::new(backend.clone(), shared.clone(), Some(root_parent)); - s.cache.sync_cache(&[], &[h1a], vec![], vec![], Some(h1b), Some(1), true); - - let s = CachingState::new(backend.clone(), shared.clone(), Some(h1b)); - assert_eq!(s.storage_hash(&key).unwrap().unwrap(), BlakeTwo256::hash(&vec![1])); - } - - #[test] - fn should_track_used_size_correctly() { - let root_parent = H256::random(); - let shared = new_shared_cache::(109, ((109 - 36), 109)); - let h0 = H256::random(); - - let mut s = CachingState::new( - InMemoryBackend::::default(), - shared.clone(), - Some(root_parent), - ); - - let key = H256::random()[..].to_vec(); - let s_key = H256::random()[..].to_vec(); - s.cache.sync_cache( - &[], - &[], - vec![(key.clone(), Some(vec![1, 2, 3]))], - vec![], - Some(h0), - Some(0), - true, - ); - // 32 key, 3 byte size - assert_eq!(shared.read().used_storage_cache_size(), 35 /* bytes */); - - let key = H256::random()[..].to_vec(); - s.cache.sync_cache( - &[], - &[], - vec![], - vec![(s_key.clone(), vec![(key.clone(), Some(vec![1, 2]))])], - Some(h0), - Some(0), - true, - ); - // 35 + (2 * 32) key, 2 byte size - assert_eq!(shared.read().used_storage_cache_size(), 101 /* bytes */); - } - - #[test] - fn should_remove_lru_items_based_on_tracking_used_size() { - let root_parent = H256::random(); - let shared = new_shared_cache::(36 * 3, (2, 3)); - let h0 = H256::random(); - - let mut s = CachingState::new( - InMemoryBackend::::default(), - shared.clone(), - Some(root_parent), - ); - - let key = H256::random()[..].to_vec(); - s.cache.sync_cache( - &[], - &[], - vec![(key.clone(), Some(vec![1, 2, 3, 4]))], - vec![], - Some(h0), - Some(0), - true, - ); - // 32 key, 4 byte size - assert_eq!(shared.read().used_storage_cache_size(), 36 /* bytes */); - - let key = H256::random()[..].to_vec(); - s.cache.sync_cache( - &[], - &[], - vec![(key.clone(), Some(vec![1, 2]))], - vec![], - Some(h0), - Some(0), - true, - ); - // 32 key, 2 byte size - assert_eq!(shared.read().used_storage_cache_size(), 34 /* bytes */); - } - - #[test] - fn fix_storage_mismatch_issue() { - sp_tracing::try_init_simple(); - let root_parent = H256::random(); - - let key = H256::random()[..].to_vec(); - - let h0 = H256::random(); - let h1 = H256::random(); - - let shared = new_shared_cache::(256 * 1024, (0, 1)); - let mut s = CachingState::new( - InMemoryBackend::::default(), - shared.clone(), - Some(root_parent), - ); - s.cache.sync_cache( - &[], - &[], - vec![(key.clone(), Some(vec![2]))], - vec![], - Some(h0), - Some(0), - true, - ); - - let mut s = - CachingState::new(InMemoryBackend::::default(), shared.clone(), Some(h0)); - s.cache.sync_cache( - &[], - &[], - vec![(key.clone(), Some(vec![3]))], - vec![], - Some(h1), - Some(1), - true, - ); - - let mut s = - CachingState::new(InMemoryBackend::::default(), shared.clone(), Some(h1)); - assert_eq!(s.storage(&key).unwrap(), Some(vec![3])); - - // Restart (or unknown block?), clear caches. - { - let mut cache = s.cache.shared_cache.write(); - let cache = &mut *cache; - cache.lru_storage.clear(); - cache.lru_hashes.clear(); - cache.lru_child_storage.clear(); - cache.modifications.clear(); - } - - // New value is written because of cache miss. - s.cache.local_cache.write().storage.insert(key.clone(), Some(vec![42])); - - // New value is propagated. - s.cache.sync_cache(&[], &[], vec![], vec![], None, None, true); - - let s = - CachingState::new(InMemoryBackend::::default(), shared.clone(), Some(h1)); - assert_eq!(s.storage(&key).unwrap(), None); - } - - #[test] - fn same_block_no_changes() { - sp_tracing::try_init_simple(); - - let root_parent = H256::random(); - let key = H256::random()[..].to_vec(); - let h1 = H256::random(); - let h2 = H256::random(); - - let shared = new_shared_cache::(256 * 1024, (0, 1)); - - let mut s = CachingState::new( - InMemoryBackend::::default(), - shared.clone(), - Some(root_parent), - ); - s.cache.sync_cache( - &[], - &[], - vec![(key.clone(), Some(vec![1]))], - vec![], - Some(h1), - Some(1), - true, - ); - assert_eq!(shared.write().lru_storage.get(&key).unwrap(), &Some(vec![1])); - - let mut s = - CachingState::new(InMemoryBackend::::default(), shared.clone(), Some(h1)); - - // commit as non-best - s.cache.sync_cache( - &[], - &[], - vec![(key.clone(), Some(vec![2]))], - vec![], - Some(h2), - Some(2), - false, - ); - - assert_eq!(shared.write().lru_storage.get(&key).unwrap(), &Some(vec![1])); - - let mut s = - CachingState::new(InMemoryBackend::::default(), shared.clone(), Some(h1)); - - // commit again as best with no changes - s.cache.sync_cache(&[], &[], vec![], vec![], Some(h2), Some(2), true); - assert_eq!(s.storage(&key).unwrap(), None); - } -} - -#[cfg(test)] -mod qc { - use std::collections::{hash_map::Entry, HashMap}; - - use quickcheck::{quickcheck, Arbitrary, TestResult}; - - use super::*; - use sp_runtime::{ - testing::{Block as RawBlock, ExtrinsicWrapper, H256}, - traits::BlakeTwo256, - }; - use sp_state_machine::InMemoryBackend; - - type Block = RawBlock>; - - type KeySet = Vec<(Vec, Option>)>; - - type KeyMap = HashMap, Option>>; - - #[derive(Debug, Clone)] - struct Node { - hash: H256, - #[allow(unused)] - parent: H256, - state: KeyMap, - changes: KeySet, - } - - impl Node { - fn new_next(&self, hash: H256, changes: KeySet) -> Self { - let mut state = self.state.clone(); - - for (k, v) in self.state.iter() { - state.insert(k.clone(), v.clone()); - } - for (k, v) in changes.clone().into_iter() { - state.insert(k, v); - } - - Self { hash, parent: self.hash, changes, state } - } - - fn new(hash: H256, parent: H256, changes: KeySet) -> Self { - let mut state = KeyMap::new(); - - for (k, v) in changes.clone().into_iter() { - state.insert(k, v); - } - - Self { hash, parent, state, changes } - } - - fn purge(&mut self, other_changes: &KeySet) { - for (k, _) in other_changes.iter() { - self.state.remove(k); - } - } - } - - #[derive(Debug, Clone)] - enum Action { - Next { hash: H256, changes: KeySet }, - Fork { depth: usize, hash: H256, changes: KeySet }, - ReorgWithImport { depth: usize, hash: H256 }, - FinalizationReorg { fork_depth: usize, depth: usize }, - } - - impl Arbitrary for Action { - fn arbitrary(gen: &mut quickcheck::Gen) -> Self { - let path = u8::arbitrary(gen); - let buf = (0..32).map(|_| u8::arbitrary(gen)).collect::>(); - - match path { - 0..=175 => Action::Next { - hash: H256::from_slice(&buf[..]), - changes: { - let mut set = Vec::new(); - for _ in 0..::arbitrary(gen) / (64 * 256 * 256 * 256) { - set.push((vec![u8::arbitrary(gen)], Some(vec![u8::arbitrary(gen)]))); - } - set - }, - }, - 176..=220 => Action::Fork { - hash: H256::from_slice(&buf[..]), - depth: ((u8::arbitrary(gen)) / 32) as usize, - changes: { - let mut set = Vec::new(); - for _ in 0..::arbitrary(gen) / (64 * 256 * 256 * 256) { - set.push((vec![u8::arbitrary(gen)], Some(vec![u8::arbitrary(gen)]))); - } - set - }, - }, - 221..=240 => { - Action::ReorgWithImport { - hash: H256::from_slice(&buf[..]), - depth: ((u8::arbitrary(gen)) / 32) as usize, // 0-7 - } - }, - _ => { - Action::FinalizationReorg { - fork_depth: ((u8::arbitrary(gen)) / 32) as usize, // 0-7 - depth: ((u8::arbitrary(gen)) / 64) as usize, // 0-3 - } - }, - } - } - } - - struct Mutator { - shared: SharedCache, - canon: Vec, - forks: HashMap>, - } - - impl Mutator { - fn new_empty() -> Self { - let shared = new_shared_cache::(256 * 1024, (0, 1)); - - Self { shared, canon: vec![], forks: HashMap::new() } - } - - fn head_state(&self, hash: H256) -> CachingState, Block> { - CachingState::new( - InMemoryBackend::::default(), - self.shared.clone(), - Some(hash), - ) - } - - fn canon_head_state(&self) -> CachingState, Block> { - self.head_state(self.canon.last().expect("Expected to be one commit").hash) - } - - fn mutate_static( - &mut self, - action: Action, - ) -> CachingState, Block> { - self.mutate(action) - .expect("Expected to provide only valid actions to the mutate_static") - } - - fn canon_len(&self) -> usize { - return self.canon.len() - } - - fn head_storage_ref(&self) -> &KeyMap { - &self.canon.last().expect("Expected to be one commit").state - } - - fn key_permutations() -> Vec> { - (0u8..255).map(|x| vec![x]).collect() - } - - fn mutate( - &mut self, - action: Action, - ) -> Result, Block>, ()> { - let state = match action { - Action::Fork { depth, hash, changes } => { - let pos = self.canon.len() as isize - depth as isize; - if pos < 0 || self.canon.len() == 0 || pos >= (self.canon.len() - 1) as isize - // no fork on top also, thus len-1 - { - return Err(()) - } - - let pos = pos as usize; - - let fork_at = self.canon[pos].hash; - - let (total_h, parent) = match self.forks.entry(fork_at) { - Entry::Occupied(occupied) => { - let chain = occupied.into_mut(); - let parent = - chain.last().expect("No empty forks are ever created").clone(); - let mut node = parent.new_next(hash, changes.clone()); - - for earlier in chain.iter() { - node.purge(&earlier.changes.clone()); - } - - chain.push(node); - - (pos + chain.len(), parent.hash) - }, - Entry::Vacant(vacant) => { - let canon_parent = &self.canon[pos]; - vacant.insert(vec![canon_parent.new_next(hash, changes.clone())]); - - (pos + 1, fork_at) - }, - }; - - let mut state = CachingState::new( - InMemoryBackend::::default(), - self.shared.clone(), - Some(parent), - ); - - state.cache.sync_cache( - &[], - &[], - changes, - vec![], - Some(hash), - Some(total_h as u64), - false, - ); - - state - }, - Action::Next { hash, changes } => { - let (next, parent_hash) = match self.canon.last() { - None => { - let parent_hash = H256::from(&[0u8; 32]); - (Node::new(hash, parent_hash, changes.clone()), parent_hash) - }, - Some(parent) => (parent.new_next(hash, changes.clone()), parent.hash), - }; - - // delete cache entries for earlier - for node in self.canon.iter_mut() { - node.purge(&next.changes); - if let Some(fork) = self.forks.get_mut(&node.hash) { - for node in fork.iter_mut() { - node.purge(&next.changes); - } - } - } - - let mut state = CachingState::new( - InMemoryBackend::::default(), - self.shared.clone(), - Some(parent_hash), - ); - - state.cache.sync_cache( - &[], - &[], - next.changes.clone(), - vec![], - Some(hash), - Some(self.canon.len() as u64 + 1), - true, - ); - - self.canon.push(next); - - state - }, - Action::ReorgWithImport { depth, hash } => { - let pos = self.canon.len() as isize - depth as isize; - if pos < 0 || pos + 1 >= self.canon.len() as isize { - return Err(()) - } - let fork_at = self.canon[pos as usize].hash; - let pos = pos as usize; - - match self.forks.get_mut(&fork_at) { - Some(chain) => { - let mut new_fork = self.canon.drain(pos + 1..).collect::>(); - - let retracted: Vec = - new_fork.iter().map(|node| node.hash).collect(); - let enacted: Vec = chain.iter().map(|node| node.hash).collect(); - - std::mem::swap(chain, &mut new_fork); - - let mut node = new_fork - .last() - .map(|node| node.new_next(hash, vec![])) - .expect("No empty fork ever created!"); - - for invalidators in chain.iter().chain(new_fork.iter()) { - node.purge(&invalidators.changes); - } - - self.canon.extend(new_fork.into_iter()); - - self.canon.push(node); - - let mut state = CachingState::new( - InMemoryBackend::::default(), - self.shared.clone(), - Some(fork_at), - ); - - let height = pos as u64 + enacted.len() as u64 + 2; - state.cache.sync_cache( - &enacted[..], - &retracted[..], - vec![], - vec![], - Some(hash), - Some(height), - true, - ); - - state - }, - None => { - return Err(()) // no reorg without a fork atm! - }, - } - }, - Action::FinalizationReorg { fork_depth, depth } => { - let pos = self.canon.len() as isize - fork_depth as isize; - if pos < 0 || pos + 1 >= self.canon.len() as isize { - return Err(()) - } - let fork_at = self.canon[pos as usize].hash; - let pos = pos as usize; - - match self.forks.get_mut(&fork_at) { - Some(fork_chain) => { - let sync_pos = fork_chain.len() as isize - - fork_chain.len() as isize - depth as isize; - if sync_pos < 0 || sync_pos >= fork_chain.len() as isize { - return Err(()) - } - let sync_pos = sync_pos as usize; - - let mut new_fork = self.canon.drain(pos + 1..).collect::>(); - - let retracted: Vec = - new_fork.iter().map(|node| node.hash).collect(); - let enacted: Vec = fork_chain - .iter() - .take(sync_pos + 1) - .map(|node| node.hash) - .collect(); - - std::mem::swap(fork_chain, &mut new_fork); - - self.shared.write().sync(&retracted, &enacted); - - self.head_state( - self.canon - .last() - .expect("wasn't forking to emptiness so there should be one!") - .hash, - ) - }, - None => { - return Err(()) // no reorg to nothing pls! - }, - } - }, - }; - - Ok(state) - } - } - - #[test] - fn smoke() { - let key = H256::random()[..].to_vec(); - let h0 = H256::random(); - let h1a = H256::random(); - let h1b = H256::random(); - let h2a = H256::random(); - let h2b = H256::random(); - let h3a = H256::random(); - let h3b = H256::random(); - - let mut mutator = Mutator::new_empty(); - mutator - .mutate_static(Action::Next { hash: h0, changes: vec![(key.clone(), Some(vec![2]))] }); - mutator.mutate_static(Action::Next { hash: h1a, changes: vec![] }); - mutator.mutate_static(Action::Fork { - depth: 2, - hash: h1b, - changes: vec![(key.clone(), Some(vec![3]))], - }); - mutator.mutate_static(Action::Fork { - depth: 2, - hash: h2b, - changes: vec![(key.clone(), Some(vec![4]))], - }); - mutator - .mutate_static(Action::Next { hash: h2a, changes: vec![(key.clone(), Some(vec![5]))] }); - mutator.mutate_static(Action::Next { hash: h3a, changes: vec![] }); - - assert_eq!( - mutator.head_state(h3a).storage(&key).unwrap().expect("there should be a value"), - vec![5] - ); - assert!(mutator.head_state(h1a).storage(&key).unwrap().is_none()); - assert!(mutator.head_state(h2b).storage(&key).unwrap().is_none()); - assert!(mutator.head_state(h1b).storage(&key).unwrap().is_none()); - - mutator.mutate_static(Action::ReorgWithImport { depth: 4, hash: h3b }); - assert!(mutator.head_state(h3a).storage(&key).unwrap().is_none()); - } - - fn is_head_match(mutator: &Mutator) -> bool { - let head_state = mutator.canon_head_state(); - - for key in Mutator::key_permutations() { - match (head_state.storage(&key).unwrap(), mutator.head_storage_ref().get(&key)) { - (Some(x), Some(y)) => - if Some(&x) != y.as_ref() { - eprintln!("{:?} != {:?}", x, y); - return false - }, - (None, Some(_y)) => { - // TODO: cache miss is not tracked atm - }, - (Some(x), None) => { - eprintln!("{:?} != ", x); - return false - }, - _ => continue, - } - } - true - } - - fn is_canon_match(mutator: &Mutator) -> bool { - for node in mutator.canon.iter() { - let head_state = mutator.head_state(node.hash); - for key in Mutator::key_permutations() { - match (head_state.storage(&key).unwrap(), node.state.get(&key)) { - (Some(x), Some(y)) => - if Some(&x) != y.as_ref() { - eprintln!("at [{}]: {:?} != {:?}", node.hash, x, y); - return false - }, - (None, Some(_y)) => { - // cache miss is not tracked atm - }, - (Some(x), None) => { - eprintln!("at [{}]: {:?} != ", node.hash, x); - return false - }, - _ => continue, - } - } - } - true - } - - #[test] - fn reorg() { - let key = H256::random()[..].to_vec(); - let h0 = H256::random(); - let h1 = H256::random(); - let h2 = H256::random(); - let h1b = H256::random(); - let h2b = H256::random(); - - let mut mutator = Mutator::new_empty(); - mutator.mutate_static(Action::Next { hash: h0, changes: vec![] }); - mutator.mutate_static(Action::Next { hash: h1, changes: vec![] }); - mutator - .mutate_static(Action::Next { hash: h2, changes: vec![(key.clone(), Some(vec![2]))] }); - mutator.mutate_static(Action::Fork { - depth: 2, - hash: h1b, - changes: vec![(key.clone(), Some(vec![3]))], - }); - mutator.mutate_static(Action::ReorgWithImport { depth: 2, hash: h2b }); - - assert!(is_head_match(&mutator)) - } - - fn key(k: u8) -> Vec { - vec![k] - } - fn val(v: u8) -> Option> { - Some(vec![v]) - } - fn keyval(k: u8, v: u8) -> KeySet { - vec![(key(k), val(v))] - } - - #[test] - fn reorg2() { - let h0 = H256::random(); - let h1a = H256::random(); - let h1b = H256::random(); - let h2b = H256::random(); - let h2a = H256::random(); - let h3a = H256::random(); - - let mut mutator = Mutator::new_empty(); - mutator.mutate_static(Action::Next { hash: h0, changes: keyval(1, 1) }); - mutator.mutate_static(Action::Next { hash: h1a, changes: keyval(1, 1) }); - mutator.mutate_static(Action::Fork { depth: 2, hash: h1b, changes: keyval(2, 2) }); - - mutator.mutate_static(Action::Next { hash: h2a, changes: keyval(3, 3) }); - mutator.mutate_static(Action::Next { hash: h3a, changes: keyval(4, 4) }); - mutator.mutate_static(Action::ReorgWithImport { depth: 4, hash: h2b }); - - assert!(is_head_match(&mutator)) - } - - #[test] - fn fork2() { - let h1 = H256::random(); - let h2a = H256::random(); - let h2b = H256::random(); - let h3a = H256::random(); - let h3b = H256::random(); - - let mut mutator = Mutator::new_empty(); - mutator.mutate_static(Action::Next { hash: h1, changes: vec![] }); - mutator.mutate_static(Action::Next { hash: h2a, changes: vec![] }); - mutator.mutate_static(Action::Next { hash: h3a, changes: keyval(1, 1) }); - - mutator.mutate_static(Action::Fork { depth: 2, hash: h2b, changes: vec![] }); - mutator.mutate_static(Action::Fork { depth: 2, hash: h3b, changes: keyval(1, 2) }); - - assert!(is_head_match(&mutator)) - } - - #[test] - fn fork3() { - let h1 = H256::random(); - let h2a = H256::random(); - let h2b = H256::random(); - let h3a = H256::random(); - - let mut mutator = Mutator::new_empty(); - mutator.mutate_static(Action::Next { hash: h1, changes: keyval(1, 1) }); - mutator.mutate_static(Action::Next { hash: h2a, changes: keyval(2, 2) }); - mutator.mutate_static(Action::Next { hash: h3a, changes: keyval(3, 3) }); - - mutator.mutate_static(Action::Fork { depth: 2, hash: h2b, changes: keyval(1, 3) }); - - assert!(is_canon_match(&mutator)) - } - - quickcheck! { - fn head_complete(actions: Vec) -> TestResult { - let mut mutator = Mutator::new_empty(); - - for action in actions.into_iter() { - if let Err(_) = mutator.mutate(action) { - return TestResult::discard(); - } - } - - if mutator.canon_len() == 0 { - return TestResult::discard(); - } - - TestResult::from_bool(is_head_match(&mutator)) - } - - fn canon_complete(actions: Vec) -> TestResult { - let mut mutator = Mutator::new_empty(); - - for action in actions.into_iter() { - if let Err(_) = mutator.mutate(action) { - return TestResult::discard(); - } - } - - if mutator.canon_len() == 0 { - return TestResult::discard(); - } - - TestResult::from_bool(is_canon_match(&mutator)) - } - } -} diff --git a/substrate/client/db/src/upgrade.rs b/substrate/client/db/src/upgrade.rs deleted file mode 100644 index 51750bf6..00000000 --- a/substrate/client/db/src/upgrade.rs +++ /dev/null @@ -1,256 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2019-2022 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 - -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -//! Database upgrade logic. - -use std::{ - fmt, fs, - io::{self, ErrorKind, Read, Write}, - path::{Path, PathBuf}, -}; - -use crate::{columns, utils::DatabaseType}; -use codec::{Decode, Encode}; -use kvdb_rocksdb::{Database, DatabaseConfig}; -use sp_runtime::traits::Block as BlockT; - -/// Version file name. -const VERSION_FILE_NAME: &str = "db_version"; - -/// Current db version. -const CURRENT_VERSION: u32 = 4; - -/// Number of columns in v1. -const V1_NUM_COLUMNS: u32 = 11; -const V2_NUM_COLUMNS: u32 = 12; -const V3_NUM_COLUMNS: u32 = 12; - -/// Database upgrade errors. -#[derive(Debug)] -pub enum UpgradeError { - /// Database version cannot be read from existing db_version file. - UnknownDatabaseVersion, - /// Missing database version file. - MissingDatabaseVersionFile, - /// Database version no longer supported. - UnsupportedVersion(u32), - /// Database version comes from future version of the client. - FutureDatabaseVersion(u32), - /// Invalid justification block. - DecodingJustificationBlock, - /// Common io error. - Io(io::Error), -} - -pub type UpgradeResult = Result; - -impl From for UpgradeError { - fn from(err: io::Error) -> Self { - UpgradeError::Io(err) - } -} - -impl fmt::Display for UpgradeError { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self { - UpgradeError::UnknownDatabaseVersion => { - write!(f, "Database version cannot be read from existing db_version file") - }, - UpgradeError::MissingDatabaseVersionFile => write!(f, "Missing database version file"), - UpgradeError::UnsupportedVersion(version) => { - write!(f, "Database version no longer supported: {}", version) - }, - UpgradeError::FutureDatabaseVersion(version) => { - write!(f, "Database version comes from future version of the client: {}", version) - }, - UpgradeError::DecodingJustificationBlock => { - write!(f, "Decodoning justification block failed") - }, - UpgradeError::Io(err) => write!(f, "Io error: {}", err), - } - } -} - -/// Upgrade database to current version. -pub fn upgrade_db(db_path: &Path, db_type: DatabaseType) -> UpgradeResult<()> { - let db_version = current_version(db_path)?; - match db_version { - 0 => return Err(UpgradeError::UnsupportedVersion(db_version)), - 1 => { - migrate_1_to_2::(db_path, db_type)?; - migrate_2_to_3::(db_path, db_type)?; - migrate_3_to_4::(db_path, db_type)?; - }, - 2 => { - migrate_2_to_3::(db_path, db_type)?; - migrate_3_to_4::(db_path, db_type)?; - }, - 3 => { - migrate_3_to_4::(db_path, db_type)?; - }, - CURRENT_VERSION => (), - _ => return Err(UpgradeError::FutureDatabaseVersion(db_version)), - } - update_version(db_path)?; - Ok(()) -} - -/// Migration from version1 to version2: -/// 1) the number of columns has changed from 11 to 12; -/// 2) transactions column is added; -fn migrate_1_to_2(db_path: &Path, _db_type: DatabaseType) -> UpgradeResult<()> { - let db_cfg = DatabaseConfig::with_columns(V1_NUM_COLUMNS); - let mut db = Database::open(&db_cfg, db_path)?; - db.add_column().map_err(Into::into) -} - -/// Migration from version2 to version3: -/// - The format of the stored Justification changed to support multiple Justifications. -fn migrate_2_to_3(db_path: &Path, _db_type: DatabaseType) -> UpgradeResult<()> { - let db_cfg = DatabaseConfig::with_columns(V2_NUM_COLUMNS); - let db = Database::open(&db_cfg, db_path)?; - - // Get all the keys we need to update - let keys: Vec<_> = db - .iter(columns::JUSTIFICATIONS) - .map(|r| r.map(|e| e.0)) - .collect::>()?; - - // Read and update each entry - let mut transaction = db.transaction(); - for key in keys { - if let Some(justification) = db.get(columns::JUSTIFICATIONS, &key)? { - // Tag each justification with the hardcoded ID for GRANDPA to avoid the dependency on - // the GRANDPA crate. - // NOTE: when storing justifications the previous API would get a `Vec` and still - // call encode on it. - let justification = Vec::::decode(&mut &justification[..]) - .map_err(|_| UpgradeError::DecodingJustificationBlock)?; - let justifications = sp_runtime::Justifications::from((*b"FRNK", justification)); - transaction.put_vec(columns::JUSTIFICATIONS, &key, justifications.encode()); - } - } - db.write(transaction)?; - - Ok(()) -} - -/// Migration from version3 to version4: -/// 1) the number of columns has changed from 12 to 13; -/// 2) BODY_INDEX column is added; -fn migrate_3_to_4(db_path: &Path, _db_type: DatabaseType) -> UpgradeResult<()> { - let db_cfg = DatabaseConfig::with_columns(V3_NUM_COLUMNS); - let mut db = Database::open(&db_cfg, db_path)?; - db.add_column().map_err(Into::into) -} - -/// Reads current database version from the file at given path. -/// If the file does not exist returns 0. -fn current_version(path: &Path) -> UpgradeResult { - match fs::File::open(version_file_path(path)) { - Err(ref err) if err.kind() == ErrorKind::NotFound => - Err(UpgradeError::MissingDatabaseVersionFile), - Err(_) => Err(UpgradeError::UnknownDatabaseVersion), - Ok(mut file) => { - let mut s = String::new(); - file.read_to_string(&mut s).map_err(|_| UpgradeError::UnknownDatabaseVersion)?; - u32::from_str_radix(&s, 10).map_err(|_| UpgradeError::UnknownDatabaseVersion) - }, - } -} - -/// Writes current database version to the file. -/// Creates a new file if the version file does not exist yet. -pub fn update_version(path: &Path) -> io::Result<()> { - fs::create_dir_all(path)?; - let mut file = fs::File::create(version_file_path(path))?; - file.write_all(format!("{}", CURRENT_VERSION).as_bytes())?; - Ok(()) -} - -/// Returns the version file path. -fn version_file_path(path: &Path) -> PathBuf { - let mut file_path = path.to_owned(); - file_path.push(VERSION_FILE_NAME); - file_path -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::{tests::Block, DatabaseSource}; - - fn create_db(db_path: &Path, version: Option) { - if let Some(version) = version { - fs::create_dir_all(db_path).unwrap(); - let mut file = fs::File::create(version_file_path(db_path)).unwrap(); - file.write_all(format!("{}", version).as_bytes()).unwrap(); - } - } - - fn open_database(db_path: &Path, db_type: DatabaseType) -> sp_blockchain::Result<()> { - crate::utils::open_database::( - &DatabaseSource::RocksDb { path: db_path.to_owned(), cache_size: 128 }, - db_type, - true, - ) - .map(|_| ()) - .map_err(|e| sp_blockchain::Error::Backend(e.to_string())) - } - - #[test] - fn downgrade_never_happens() { - let db_dir = tempfile::TempDir::new().unwrap(); - create_db(db_dir.path(), Some(CURRENT_VERSION + 1)); - assert!(open_database(db_dir.path(), DatabaseType::Full).is_err()); - } - - #[test] - fn open_empty_database_works() { - let db_type = DatabaseType::Full; - let db_dir = tempfile::TempDir::new().unwrap(); - let db_dir = db_dir.path().join(db_type.as_str()); - open_database(&db_dir, db_type).unwrap(); - open_database(&db_dir, db_type).unwrap(); - assert_eq!(current_version(&db_dir).unwrap(), CURRENT_VERSION); - } - - #[test] - fn upgrade_to_3_works() { - let db_type = DatabaseType::Full; - for version_from_file in &[None, Some(1), Some(2)] { - let db_dir = tempfile::TempDir::new().unwrap(); - let db_path = db_dir.path().join(db_type.as_str()); - create_db(&db_path, *version_from_file); - open_database(&db_path, db_type).unwrap(); - assert_eq!(current_version(&db_path).unwrap(), CURRENT_VERSION); - } - } - - #[test] - fn upgrade_to_4_works() { - let db_type = DatabaseType::Full; - for version_from_file in &[None, Some(1), Some(2), Some(3)] { - let db_dir = tempfile::TempDir::new().unwrap(); - let db_path = db_dir.path().join(db_type.as_str()); - create_db(&db_path, *version_from_file); - open_database(&db_path, db_type).unwrap(); - assert_eq!(current_version(&db_path).unwrap(), CURRENT_VERSION); - } - } -} diff --git a/substrate/client/db/src/utils.rs b/substrate/client/db/src/utils.rs deleted file mode 100644 index 567950d0..00000000 --- a/substrate/client/db/src/utils.rs +++ /dev/null @@ -1,824 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2017-2022 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 - -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -//! Db-based backend utility structures and functions, used by both -//! full and light storages. - -use std::{fmt, fs, io, path::Path, sync::Arc}; - -use log::{debug, info}; - -use crate::{Database, DatabaseSource, DbHash}; -use codec::Decode; -use sp_database::Transaction; -use sp_runtime::{ - generic::BlockId, - traits::{Block as BlockT, Header as HeaderT, UniqueSaturatedFrom, UniqueSaturatedInto, Zero}, -}; -use sp_trie::DBValue; - -/// Number of columns in the db. Must be the same for both full && light dbs. -/// Otherwise RocksDb will fail to open database && check its type. -pub const NUM_COLUMNS: u32 = 13; -/// Meta column. The set of keys in the column is shared by full && light storages. -pub const COLUMN_META: u32 = 0; - -/// Keys of entries in COLUMN_META. -pub mod meta_keys { - /// Type of storage (full or light). - pub const TYPE: &[u8; 4] = b"type"; - /// Best block key. - pub const BEST_BLOCK: &[u8; 4] = b"best"; - /// Last finalized block key. - pub const FINALIZED_BLOCK: &[u8; 5] = b"final"; - /// Last finalized state key. - pub const FINALIZED_STATE: &[u8; 6] = b"fstate"; - /// Block gap. - pub const BLOCK_GAP: &[u8; 3] = b"gap"; - /// Genesis block hash. - pub const GENESIS_HASH: &[u8; 3] = b"gen"; - /// Leaves prefix list key. - pub const LEAF_PREFIX: &[u8; 4] = b"leaf"; - /// Children prefix list key. - pub const CHILDREN_PREFIX: &[u8; 8] = b"children"; -} - -/// Database metadata. -#[derive(Debug)] -pub struct Meta { - /// Hash of the best known block. - pub best_hash: H, - /// Number of the best known block. - pub best_number: N, - /// Hash of the best finalized block. - pub finalized_hash: H, - /// Number of the best finalized block. - pub finalized_number: N, - /// Hash of the genesis block. - pub genesis_hash: H, - /// Finalized state, if any - pub finalized_state: Option<(H, N)>, - /// Block gap, start and end inclusive, if any. - pub block_gap: Option<(N, N)>, -} - -/// A block lookup key: used for canonical lookup from block number to hash -pub type NumberIndexKey = [u8; 4]; - -/// Database type. -#[derive(Clone, Copy, Debug, PartialEq)] -pub enum DatabaseType { - /// Full node database. - Full, -} - -/// Convert block number into short lookup key (LE representation) for -/// blocks that are in the canonical chain. -/// -/// In the current database schema, this kind of key is only used for -/// lookups into an index, NOT for storing header data or others. -pub fn number_index_key>(n: N) -> sp_blockchain::Result { - let n = n.try_into().map_err(|_| { - sp_blockchain::Error::Backend("Block number cannot be converted to u32".into()) - })?; - - Ok([(n >> 24) as u8, ((n >> 16) & 0xff) as u8, ((n >> 8) & 0xff) as u8, (n & 0xff) as u8]) -} - -/// Convert number and hash into long lookup key for blocks that are -/// not in the canonical chain. -pub fn number_and_hash_to_lookup_key(number: N, hash: H) -> sp_blockchain::Result> -where - N: TryInto, - H: AsRef<[u8]>, -{ - let mut lookup_key = number_index_key(number)?.to_vec(); - lookup_key.extend_from_slice(hash.as_ref()); - Ok(lookup_key) -} - -/// Delete number to hash mapping in DB transaction. -pub fn remove_number_to_key_mapping>( - transaction: &mut Transaction, - key_lookup_col: u32, - number: N, -) -> sp_blockchain::Result<()> { - transaction.remove(key_lookup_col, number_index_key(number)?.as_ref()); - Ok(()) -} - -/// Place a number mapping into the database. This maps number to current perceived -/// block hash at that position. -pub fn insert_number_to_key_mapping + Clone, H: AsRef<[u8]>>( - transaction: &mut Transaction, - key_lookup_col: u32, - number: N, - hash: H, -) -> sp_blockchain::Result<()> { - transaction.set_from_vec( - key_lookup_col, - number_index_key(number.clone())?.as_ref(), - number_and_hash_to_lookup_key(number, hash)?, - ); - Ok(()) -} - -/// Insert a hash to key mapping in the database. -pub fn insert_hash_to_key_mapping, H: AsRef<[u8]> + Clone>( - transaction: &mut Transaction, - key_lookup_col: u32, - number: N, - hash: H, -) -> sp_blockchain::Result<()> { - transaction.set_from_vec( - key_lookup_col, - hash.as_ref(), - number_and_hash_to_lookup_key(number, hash.clone())?, - ); - Ok(()) -} - -/// Convert block id to block lookup key. -/// block lookup key is the DB-key header, block and justification are stored under. -/// looks up lookup key by hash from DB as necessary. -pub fn block_id_to_lookup_key( - db: &dyn Database, - key_lookup_col: u32, - id: BlockId, -) -> Result>, sp_blockchain::Error> -where - Block: BlockT, - ::sp_runtime::traits::NumberFor: UniqueSaturatedFrom + UniqueSaturatedInto, -{ - Ok(match id { - BlockId::Number(n) => db.get(key_lookup_col, number_index_key(n)?.as_ref()), - BlockId::Hash(h) => db.get(key_lookup_col, h.as_ref()), - }) -} - -/// Opens the configured database. -pub fn open_database( - db_source: &DatabaseSource, - db_type: DatabaseType, - create: bool, -) -> OpenDbResult { - // Maybe migrate (copy) the database to a type specific subdirectory to make it - // possible that light and full databases coexist - // NOTE: This function can be removed in a few releases - maybe_migrate_to_type_subdir::(db_source, db_type)?; - - open_database_at::(db_source, db_type, create) -} - -fn open_database_at( - db_source: &DatabaseSource, - db_type: DatabaseType, - create: bool, -) -> OpenDbResult { - let db: Arc> = match &db_source { - DatabaseSource::ParityDb { path } => open_parity_db::(path, db_type, create)?, - #[cfg(feature = "rocksdb")] - DatabaseSource::RocksDb { path, cache_size } => - open_kvdb_rocksdb::(path, db_type, create, *cache_size)?, - DatabaseSource::Custom { db, require_create_flag } => { - if *require_create_flag && !create { - return Err(OpenDbError::DoesNotExist) - } - db.clone() - }, - DatabaseSource::Auto { paritydb_path, rocksdb_path, cache_size } => { - // check if rocksdb exists first, if not, open paritydb - match open_kvdb_rocksdb::(rocksdb_path, db_type, false, *cache_size) { - Ok(db) => db, - Err(OpenDbError::NotEnabled(_)) | Err(OpenDbError::DoesNotExist) => - open_parity_db::(paritydb_path, db_type, create)?, - Err(as_is) => return Err(as_is), - } - }, - }; - - check_database_type(&*db, db_type)?; - Ok(db) -} - -#[derive(Debug)] -pub enum OpenDbError { - // constructed only when rocksdb and paritydb are disabled - #[allow(dead_code)] - NotEnabled(&'static str), - DoesNotExist, - Internal(String), - DatabaseError(sp_database::error::DatabaseError), - UnexpectedDbType { - expected: DatabaseType, - found: Vec, - }, -} - -type OpenDbResult = Result>, OpenDbError>; - -impl fmt::Display for OpenDbError { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match self { - OpenDbError::Internal(e) => write!(f, "{}", e), - OpenDbError::DoesNotExist => write!(f, "Database does not exist at given location"), - OpenDbError::NotEnabled(feat) => { - write!(f, "`{}` feature not enabled, database can not be opened", feat) - }, - OpenDbError::DatabaseError(db_error) => { - write!(f, "Database Error: {}", db_error) - }, - OpenDbError::UnexpectedDbType { expected, found } => { - write!( - f, - "Unexpected DB-Type. Expected: {:?}, Found: {:?}", - expected.as_str().as_bytes(), - found - ) - }, - } - } -} - -impl From for sp_blockchain::Error { - fn from(err: OpenDbError) -> Self { - sp_blockchain::Error::Backend(err.to_string()) - } -} - -impl From for OpenDbError { - fn from(err: parity_db::Error) -> Self { - if matches!(err, parity_db::Error::DatabaseNotFound) { - OpenDbError::DoesNotExist - } else { - OpenDbError::Internal(err.to_string()) - } - } -} - -impl From for OpenDbError { - fn from(err: io::Error) -> Self { - if err.to_string().contains("create_if_missing is false") { - OpenDbError::DoesNotExist - } else { - OpenDbError::Internal(err.to_string()) - } - } -} - -fn open_parity_db(path: &Path, db_type: DatabaseType, create: bool) -> OpenDbResult { - match crate::parity_db::open(path, db_type, create, false) { - Ok(db) => Ok(db), - Err(parity_db::Error::InvalidConfiguration(_)) => { - log::warn!("Invalid parity db configuration, attempting database metadata update."); - // Try to update the database with the new config - Ok(crate::parity_db::open(path, db_type, create, true)?) - }, - Err(e) => Err(e.into()), - } -} - -#[cfg(any(feature = "rocksdb", test))] -fn open_kvdb_rocksdb( - path: &Path, - db_type: DatabaseType, - create: bool, - cache_size: usize, -) -> OpenDbResult { - // first upgrade database to required version - match crate::upgrade::upgrade_db::(path, db_type) { - // in case of missing version file, assume that database simply does not exist at given - // location - Ok(_) | Err(crate::upgrade::UpgradeError::MissingDatabaseVersionFile) => (), - Err(err) => return Err(io::Error::new(io::ErrorKind::Other, err.to_string()).into()), - } - - // and now open database assuming that it has the latest version - let mut db_config = kvdb_rocksdb::DatabaseConfig::with_columns(NUM_COLUMNS); - db_config.create_if_missing = create; - - let mut memory_budget = std::collections::HashMap::new(); - match db_type { - DatabaseType::Full => { - let state_col_budget = (cache_size as f64 * 0.9) as usize; - let other_col_budget = (cache_size - state_col_budget) / (NUM_COLUMNS as usize - 1); - - for i in 0..NUM_COLUMNS { - if i == crate::columns::STATE { - memory_budget.insert(i, state_col_budget); - } else { - memory_budget.insert(i, other_col_budget); - } - } - log::trace!( - target: "db", - "Open RocksDB database at {:?}, state column budget: {} MiB, others({}) column cache: {} MiB", - path, - state_col_budget, - NUM_COLUMNS, - other_col_budget, - ); - }, - } - db_config.memory_budget = memory_budget; - - let db = kvdb_rocksdb::Database::open(&db_config, path)?; - // write database version only after the database is succesfully opened - crate::upgrade::update_version(path)?; - Ok(sp_database::as_database(db)) -} - -#[cfg(not(any(feature = "rocksdb", test)))] -fn open_kvdb_rocksdb( - _path: &Path, - _db_type: DatabaseType, - _create: bool, - _cache_size: usize, -) -> OpenDbResult { - Err(OpenDbError::NotEnabled("with-kvdb-rocksdb")) -} - -/// Check database type. -pub fn check_database_type( - db: &dyn Database, - db_type: DatabaseType, -) -> Result<(), OpenDbError> { - match db.get(COLUMN_META, meta_keys::TYPE) { - Some(stored_type) => - if db_type.as_str().as_bytes() != &*stored_type { - return Err(OpenDbError::UnexpectedDbType { - expected: db_type, - found: stored_type.to_owned(), - }) - }, - None => { - let mut transaction = Transaction::new(); - transaction.set(COLUMN_META, meta_keys::TYPE, db_type.as_str().as_bytes()); - db.commit(transaction).map_err(OpenDbError::DatabaseError)?; - }, - } - - Ok(()) -} - -fn maybe_migrate_to_type_subdir( - source: &DatabaseSource, - db_type: DatabaseType, -) -> Result<(), OpenDbError> { - if let Some(p) = source.path() { - let mut basedir = p.to_path_buf(); - basedir.pop(); - - // Do we have to migrate to a database-type-based subdirectory layout: - // See if there's a file identifying a rocksdb or paritydb folder in the parent dir and - // the target path ends in a role specific directory - if (basedir.join("db_version").exists() || basedir.join("metadata").exists()) && - (p.ends_with(DatabaseType::Full.as_str())) - { - // Try to open the database to check if the current `DatabaseType` matches the type of - // database stored in the target directory and close the database on success. - let mut old_source = source.clone(); - old_source.set_path(&basedir); - open_database_at::(&old_source, db_type, false)?; - - info!( - "Migrating database to a database-type-based subdirectory: '{:?}' -> '{:?}'", - basedir, - basedir.join(db_type.as_str()) - ); - - let mut tmp_dir = basedir.clone(); - tmp_dir.pop(); - tmp_dir.push("tmp"); - - fs::rename(&basedir, &tmp_dir)?; - fs::create_dir_all(&p)?; - fs::rename(tmp_dir, &p)?; - } - } - - Ok(()) -} - -/// Read database column entry for the given block. -pub fn read_db( - db: &dyn Database, - col_index: u32, - col: u32, - id: BlockId, -) -> sp_blockchain::Result> -where - Block: BlockT, -{ - block_id_to_lookup_key(db, col_index, id).map(|key| match key { - Some(key) => db.get(col, key.as_ref()), - None => None, - }) -} - -/// Remove database column entry for the given block. -pub fn remove_from_db( - transaction: &mut Transaction, - db: &dyn Database, - col_index: u32, - col: u32, - id: BlockId, -) -> sp_blockchain::Result<()> -where - Block: BlockT, -{ - block_id_to_lookup_key(db, col_index, id).map(|key| { - if let Some(key) = key { - transaction.remove(col, key.as_ref()); - } - }) -} - -/// Read a header from the database. -pub fn read_header( - db: &dyn Database, - col_index: u32, - col: u32, - id: BlockId, -) -> sp_blockchain::Result> { - match read_db(db, col_index, col, id)? { - Some(header) => match Block::Header::decode(&mut &header[..]) { - Ok(header) => Ok(Some(header)), - Err(_) => Err(sp_blockchain::Error::Backend("Error decoding header".into())), - }, - None => Ok(None), - } -} - -/// Read meta from the database. -pub fn read_meta( - db: &dyn Database, - col_header: u32, -) -> Result::Header as HeaderT>::Number, Block::Hash>, sp_blockchain::Error> -where - Block: BlockT, -{ - let genesis_hash: Block::Hash = match read_genesis_hash(db)? { - Some(genesis_hash) => genesis_hash, - None => - return Ok(Meta { - best_hash: Default::default(), - best_number: Zero::zero(), - finalized_hash: Default::default(), - finalized_number: Zero::zero(), - genesis_hash: Default::default(), - finalized_state: None, - block_gap: None, - }), - }; - - let load_meta_block = |desc, key| -> Result<_, sp_blockchain::Error> { - if let Some(Some(header)) = db - .get(COLUMN_META, key) - .and_then(|id| db.get(col_header, &id).map(|b| Block::Header::decode(&mut &b[..]).ok())) - { - let hash = header.hash(); - debug!( - target: "db", - "Opened blockchain db, fetched {} = {:?} ({})", - desc, - hash, - header.number(), - ); - Ok((hash, *header.number())) - } else { - Ok((Default::default(), Zero::zero())) - } - }; - - let (best_hash, best_number) = load_meta_block("best", meta_keys::BEST_BLOCK)?; - let (finalized_hash, finalized_number) = load_meta_block("final", meta_keys::FINALIZED_BLOCK)?; - let (finalized_state_hash, finalized_state_number) = - load_meta_block("final_state", meta_keys::FINALIZED_STATE)?; - let finalized_state = if finalized_state_hash != Default::default() { - Some((finalized_state_hash, finalized_state_number)) - } else { - None - }; - let block_gap = db - .get(COLUMN_META, meta_keys::BLOCK_GAP) - .and_then(|d| Decode::decode(&mut d.as_slice()).ok()); - debug!(target: "db", "block_gap={:?}", block_gap); - - Ok(Meta { - best_hash, - best_number, - finalized_hash, - finalized_number, - genesis_hash, - finalized_state, - block_gap, - }) -} - -/// Read genesis hash from database. -pub fn read_genesis_hash( - db: &dyn Database, -) -> sp_blockchain::Result> { - match db.get(COLUMN_META, meta_keys::GENESIS_HASH) { - Some(h) => match Decode::decode(&mut &h[..]) { - Ok(h) => Ok(Some(h)), - Err(err) => - Err(sp_blockchain::Error::Backend(format!("Error decoding genesis hash: {}", err))), - }, - None => Ok(None), - } -} - -impl DatabaseType { - /// Returns str representation of the type. - pub fn as_str(&self) -> &'static str { - match *self { - DatabaseType::Full => "full", - } - } -} - -pub(crate) struct JoinInput<'a, 'b>(&'a [u8], &'b [u8]); - -pub(crate) fn join_input<'a, 'b>(i1: &'a [u8], i2: &'b [u8]) -> JoinInput<'a, 'b> { - JoinInput(i1, i2) -} - -impl<'a, 'b> codec::Input for JoinInput<'a, 'b> { - fn remaining_len(&mut self) -> Result, codec::Error> { - Ok(Some(self.0.len() + self.1.len())) - } - - fn read(&mut self, into: &mut [u8]) -> Result<(), codec::Error> { - let mut read = 0; - if self.0.len() > 0 { - read = std::cmp::min(self.0.len(), into.len()); - self.0.read(&mut into[..read])?; - } - if read < into.len() { - self.1.read(&mut into[read..])?; - } - Ok(()) - } -} - -#[cfg(test)] -mod tests { - use super::*; - use codec::Input; - use sp_runtime::testing::{Block as RawBlock, ExtrinsicWrapper}; - use std::path::PathBuf; - type Block = RawBlock>; - - #[cfg(any(feature = "rocksdb", test))] - #[test] - fn database_type_subdir_migration() { - type Block = RawBlock>; - - fn check_dir_for_db_type( - db_type: DatabaseType, - mut source: DatabaseSource, - db_check_file: &str, - ) { - let base_path = tempfile::TempDir::new().unwrap(); - let old_db_path = base_path.path().join("chains/dev/db"); - - source.set_path(&old_db_path); - - { - let db_res = open_database::(&source, db_type, true); - assert!(db_res.is_ok(), "New database should be created."); - assert!(old_db_path.join(db_check_file).exists()); - assert!(!old_db_path.join(db_type.as_str()).join("db_version").exists()); - } - - source.set_path(&old_db_path.join(db_type.as_str())); - - let db_res = open_database::(&source, db_type, true); - assert!(db_res.is_ok(), "Reopening the db with the same role should work"); - // check if the database dir had been migrated - assert!(!old_db_path.join(db_check_file).exists()); - assert!(old_db_path.join(db_type.as_str()).join(db_check_file).exists()); - } - - check_dir_for_db_type( - DatabaseType::Full, - DatabaseSource::RocksDb { path: PathBuf::new(), cache_size: 128 }, - "db_version", - ); - - check_dir_for_db_type( - DatabaseType::Full, - DatabaseSource::ParityDb { path: PathBuf::new() }, - "metadata", - ); - - // check failure on reopening with wrong role - { - let base_path = tempfile::TempDir::new().unwrap(); - let old_db_path = base_path.path().join("chains/dev/db"); - - let source = DatabaseSource::RocksDb { path: old_db_path.clone(), cache_size: 128 }; - { - let db_res = open_database::(&source, DatabaseType::Full, true); - assert!(db_res.is_ok(), "New database should be created."); - - // check if the database dir had been migrated - assert!(old_db_path.join("db_version").exists()); - assert!(!old_db_path.join("light/db_version").exists()); - assert!(!old_db_path.join("full/db_version").exists()); - } - // assert nothing was changed - assert!(old_db_path.join("db_version").exists()); - assert!(!old_db_path.join("full/db_version").exists()); - } - } - - #[test] - fn number_index_key_doesnt_panic() { - let id = BlockId::::Number(72340207214430721); - match id { - BlockId::Number(n) => number_index_key(n).expect_err("number should overflow u32"), - _ => unreachable!(), - }; - } - - #[test] - fn database_type_as_str_works() { - assert_eq!(DatabaseType::Full.as_str(), "full"); - } - - #[test] - fn join_input_works() { - let buf1 = [1, 2, 3, 4]; - let buf2 = [5, 6, 7, 8]; - let mut test = [0, 0, 0]; - let mut joined = join_input(buf1.as_ref(), buf2.as_ref()); - assert_eq!(joined.remaining_len().unwrap(), Some(8)); - - joined.read(&mut test).unwrap(); - assert_eq!(test, [1, 2, 3]); - assert_eq!(joined.remaining_len().unwrap(), Some(5)); - - joined.read(&mut test).unwrap(); - assert_eq!(test, [4, 5, 6]); - assert_eq!(joined.remaining_len().unwrap(), Some(2)); - - joined.read(&mut test[0..2]).unwrap(); - assert_eq!(test, [7, 8, 6]); - assert_eq!(joined.remaining_len().unwrap(), Some(0)); - } - - #[test] - fn test_open_database_auto_new() { - let db_dir = tempfile::TempDir::new().unwrap(); - let db_path = db_dir.path().to_owned(); - let paritydb_path = db_path.join("paritydb"); - let rocksdb_path = db_path.join("rocksdb_path"); - let source = DatabaseSource::Auto { - paritydb_path: paritydb_path.clone(), - rocksdb_path: rocksdb_path.clone(), - cache_size: 128, - }; - - // it should create new auto (paritydb) database - { - let db_res = open_database::(&source, DatabaseType::Full, true); - assert!(db_res.is_ok(), "New database should be created."); - } - - // it should reopen existing auto (pairtydb) database - { - let db_res = open_database::(&source, DatabaseType::Full, true); - assert!(db_res.is_ok(), "Existing parity database should be reopened"); - } - - // it should fail to open existing auto (pairtydb) database - { - let db_res = open_database::( - &DatabaseSource::RocksDb { path: rocksdb_path, cache_size: 128 }, - DatabaseType::Full, - true, - ); - assert!(db_res.is_ok(), "New database should be opened."); - } - - // it should reopen existing auto (pairtydb) database - { - let db_res = open_database::( - &DatabaseSource::ParityDb { path: paritydb_path }, - DatabaseType::Full, - true, - ); - assert!(db_res.is_ok(), "Existing parity database should be reopened"); - } - } - - #[test] - fn test_open_database_rocksdb_new() { - let db_dir = tempfile::TempDir::new().unwrap(); - let db_path = db_dir.path().to_owned(); - let paritydb_path = db_path.join("paritydb"); - let rocksdb_path = db_path.join("rocksdb_path"); - - let source = DatabaseSource::RocksDb { path: rocksdb_path.clone(), cache_size: 128 }; - - // it should create new rocksdb database - { - let db_res = open_database::(&source, DatabaseType::Full, true); - assert!(db_res.is_ok(), "New rocksdb database should be created"); - } - - // it should reopen existing auto (rocksdb) database - { - let db_res = open_database::( - &DatabaseSource::Auto { - paritydb_path: paritydb_path.clone(), - rocksdb_path: rocksdb_path.clone(), - cache_size: 128, - }, - DatabaseType::Full, - true, - ); - assert!(db_res.is_ok(), "Existing rocksdb database should be reopened"); - } - - // it should fail to open existing auto (rocksdb) database - { - let db_res = open_database::( - &DatabaseSource::ParityDb { path: paritydb_path }, - DatabaseType::Full, - true, - ); - assert!(db_res.is_ok(), "New paritydb database should be created"); - } - - // it should reopen existing auto (pairtydb) database - { - let db_res = open_database::( - &DatabaseSource::RocksDb { path: rocksdb_path, cache_size: 128 }, - DatabaseType::Full, - true, - ); - assert!(db_res.is_ok(), "Existing rocksdb database should be reopened"); - } - } - - #[test] - fn test_open_database_paritydb_new() { - let db_dir = tempfile::TempDir::new().unwrap(); - let db_path = db_dir.path().to_owned(); - let paritydb_path = db_path.join("paritydb"); - let rocksdb_path = db_path.join("rocksdb_path"); - - let source = DatabaseSource::ParityDb { path: paritydb_path.clone() }; - - // it should create new paritydb database - { - let db_res = open_database::(&source, DatabaseType::Full, true); - assert!(db_res.is_ok(), "New database should be created."); - } - - // it should reopen existing pairtydb database - { - let db_res = open_database::(&source, DatabaseType::Full, true); - assert!(db_res.is_ok(), "Existing parity database should be reopened"); - } - - // it should fail to open existing pairtydb database - { - let db_res = open_database::( - &DatabaseSource::RocksDb { path: rocksdb_path.clone(), cache_size: 128 }, - DatabaseType::Full, - true, - ); - assert!(db_res.is_ok(), "New rocksdb database should be created"); - } - - // it should reopen existing auto (pairtydb) database - { - let db_res = open_database::( - &DatabaseSource::Auto { paritydb_path, rocksdb_path, cache_size: 128 }, - DatabaseType::Full, - true, - ); - assert!(db_res.is_ok(), "Existing parity database should be reopened"); - } - } -} diff --git a/substrate/client/state-db/Cargo.toml b/substrate/client/state-db/Cargo.toml deleted file mode 100644 index ddce7737..00000000 --- a/substrate/client/state-db/Cargo.toml +++ /dev/null @@ -1,22 +0,0 @@ -[package] -name = "sc-state-db" -version = "0.10.0-dev" -authors = ["Parity Technologies "] -edition = "2021" -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" -homepage = "https://substrate.io" -repository = "https://github.com/paritytech/substrate/" -description = "State database maintenance. Handles canonicalization and pruning in the database." -readme = "README.md" - -[package.metadata.docs.rs] -targets = ["x86_64-unknown-linux-gnu"] - -[dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0", features = ["derive"] } -log = "0.4.17" -parity-util-mem = { version = "0.12.0", default-features = false, features = ["primitive-types"] } -parity-util-mem-derive = "0.1.0" -parking_lot = "0.12.1" -sc-client-api = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33" } -sp-core = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.33" } diff --git a/substrate/client/state-db/README.md b/substrate/client/state-db/README.md deleted file mode 100644 index a02b3929..00000000 --- a/substrate/client/state-db/README.md +++ /dev/null @@ -1,16 +0,0 @@ -State database maintenance. Handles canonicalization and pruning in the database. The input to -this module is a `ChangeSet` which is basically a list of key-value pairs (trie nodes) that -were added or deleted during block execution. - -# Canonicalization. -Canonicalization window tracks a tree of blocks identified by header hash. The in-memory -overlay allows to get any node that was inserted in any of the blocks within the window. -The tree is journaled to the backing database and rebuilt on startup. -Canonicalization function selects one root from the top of the tree and discards all other roots and -their subtrees. - -# Pruning. -See `RefWindow` for pruning algorithm details. `StateDb` prunes on each canonicalization until pruning -constraints are satisfied. - -License: GPL-3.0-or-later WITH Classpath-exception-2.0 \ No newline at end of file diff --git a/substrate/client/state-db/src/lib.rs b/substrate/client/state-db/src/lib.rs deleted file mode 100644 index b8d319ff..00000000 --- a/substrate/client/state-db/src/lib.rs +++ /dev/null @@ -1,954 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2017-2022 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 - -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -//! State database maintenance. Handles canonicalization and pruning in the database. -//! -//! # Canonicalization. -//! Canonicalization window tracks a tree of blocks identified by header hash. The in-memory -//! overlay allows to get any trie node that was inserted in any of the blocks within the window. -//! The overlay is journaled to the backing database and rebuilt on startup. -//! There's a limit of 32 blocks that may have the same block number in the canonicalization window. -//! -//! Canonicalization function selects one root from the top of the tree and discards all other roots -//! and their subtrees. Upon canonicalization all trie nodes that were inserted in the block are -//! added to the backing DB and block tracking is moved to the pruning window, where no forks are -//! allowed. -//! -//! # Canonicalization vs Finality -//! Database engine uses a notion of canonicality, rather then finality. A canonical block may not -//! be yet finalized from the perspective of the consensus engine, but it still can't be reverted in -//! the database. Most of the time during normal operation last canonical block is the same as last -//! finalized. However if finality stall for a long duration for some reason, there's only a certain -//! number of blocks that can fit in the non-canonical overlay, so canonicalization of an -//! unfinalized block may be forced. -//! -//! # Pruning. -//! See `RefWindow` for pruning algorithm details. `StateDb` prunes on each canonicalization until -//! pruning constraints are satisfied. - -mod noncanonical; -mod pruning; -#[cfg(test)] -mod test; - -use codec::Codec; -use log::trace; -use noncanonical::NonCanonicalOverlay; -use parity_util_mem::{malloc_size, MallocSizeOf}; -use parking_lot::RwLock; -use pruning::{HaveBlock, RefWindow}; -use sc_client_api::{MemorySize, StateDbMemoryInfo}; -use std::{ - collections::{hash_map::Entry, HashMap}, - fmt, -}; - -const PRUNING_MODE: &[u8] = b"mode"; -const PRUNING_MODE_ARCHIVE: &[u8] = b"archive"; -const PRUNING_MODE_ARCHIVE_CANON: &[u8] = b"archive_canonical"; -const PRUNING_MODE_CONSTRAINED: &[u8] = b"constrained"; -pub(crate) const DEFAULT_MAX_BLOCK_CONSTRAINT: u32 = 256; - -/// Database value type. -pub type DBValue = Vec; - -/// Basic set of requirements for the Block hash and node key types. -pub trait Hash: - Send - + Sync - + Sized - + Eq - + PartialEq - + Clone - + Default - + fmt::Debug - + Codec - + std::hash::Hash - + 'static -{ -} -impl< - T: Send - + Sync - + Sized - + Eq - + PartialEq - + Clone - + Default - + fmt::Debug - + Codec - + std::hash::Hash - + 'static, - > Hash for T -{ -} - -/// Backend database trait. Read-only. -pub trait MetaDb { - type Error: fmt::Debug; - - /// Get meta value, such as the journal. - fn get_meta(&self, key: &[u8]) -> Result, Self::Error>; -} - -/// Backend database trait. Read-only. -pub trait NodeDb { - type Key: ?Sized; - type Error: fmt::Debug; - - /// Get state trie node. - fn get(&self, key: &Self::Key) -> Result, Self::Error>; -} - -/// Error type. -#[derive(Eq, PartialEq)] -pub enum Error { - /// Database backend error. - Db(E), - StateDb(StateDbError), -} - -#[derive(Eq, PartialEq)] -pub enum StateDbError { - /// `Codec` decoding error. - Decoding(codec::Error), - /// Trying to canonicalize invalid block. - InvalidBlock, - /// Trying to insert block with invalid number. - InvalidBlockNumber, - /// Trying to insert block with unknown parent. - InvalidParent, - /// Invalid pruning mode specified. Contains expected mode. - IncompatiblePruningModes { stored: PruningMode, requested: PruningMode }, - /// Too many unfinalized sibling blocks inserted. - TooManySiblingBlocks, - /// Trying to insert existing block. - BlockAlreadyExists, - /// Invalid metadata - Metadata(String), - /// Trying to get a block record from db while it is not commit to db yet - BlockUnavailable, - /// Block record is missing from the pruning window - BlockMissing, -} - -impl From for Error { - fn from(inner: StateDbError) -> Self { - Self::StateDb(inner) - } -} - -/// Pinning error type. -pub enum PinError { - /// Trying to pin invalid block. - InvalidBlock, -} - -impl From for Error { - fn from(x: codec::Error) -> Self { - StateDbError::Decoding(x).into() - } -} - -impl fmt::Debug for Error { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match self { - Self::Db(e) => e.fmt(f), - Self::StateDb(e) => e.fmt(f), - } - } -} - -impl fmt::Debug for StateDbError { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match self { - Self::Decoding(e) => write!(f, "Error decoding sliceable value: {}", e), - Self::InvalidBlock => write!(f, "Trying to canonicalize invalid block"), - Self::InvalidBlockNumber => write!(f, "Trying to insert block with invalid number"), - Self::InvalidParent => write!(f, "Trying to insert block with unknown parent"), - Self::IncompatiblePruningModes { stored, requested } => write!( - f, - "Incompatible pruning modes [stored: {:?}; requested: {:?}]", - stored, requested - ), - Self::TooManySiblingBlocks => write!(f, "Too many sibling blocks inserted"), - Self::BlockAlreadyExists => write!(f, "Block already exists"), - Self::Metadata(message) => write!(f, "Invalid metadata: {}", message), - Self::BlockUnavailable => - write!(f, "Trying to get a block record from db while it is not commit to db yet"), - Self::BlockMissing => write!(f, "Block record is missing from the pruning window"), - } - } -} - -/// A set of state node changes. -#[derive(Default, Debug, Clone)] -pub struct ChangeSet { - /// Inserted nodes. - pub inserted: Vec<(H, DBValue)>, - /// Deleted nodes. - pub deleted: Vec, -} - -/// A set of changes to the backing database. -#[derive(Default, Debug, Clone)] -pub struct CommitSet { - /// State node changes. - pub data: ChangeSet, - /// Metadata changes. - pub meta: ChangeSet>, -} - -/// Pruning constraints. If none are specified pruning is -#[derive(Debug, Clone, Eq, PartialEq)] -pub struct Constraints { - /// Maximum blocks. Defaults to 0 when unspecified, effectively keeping only non-canonical - /// states. - pub max_blocks: Option, - /// Maximum memory in the pruning overlay. - pub max_mem: Option, -} - -/// Pruning mode. -#[derive(Debug, Clone, Eq, PartialEq)] -pub enum PruningMode { - /// Maintain a pruning window. - Constrained(Constraints), - /// No pruning. Canonicalization is a no-op. - ArchiveAll, - /// Canonicalization discards non-canonical nodes. All the canonical nodes are kept in the DB. - ArchiveCanonical, -} - -impl PruningMode { - /// Create a mode that keeps given number of blocks. - pub fn blocks_pruning(n: u32) -> PruningMode { - PruningMode::Constrained(Constraints { max_blocks: Some(n), max_mem: None }) - } - - /// Is this an archive (either ArchiveAll or ArchiveCanonical) pruning mode? - pub fn is_archive(&self) -> bool { - match *self { - PruningMode::ArchiveAll | PruningMode::ArchiveCanonical => true, - PruningMode::Constrained(_) => false, - } - } - - /// Returns the pruning mode - pub fn id(&self) -> &[u8] { - match self { - PruningMode::ArchiveAll => PRUNING_MODE_ARCHIVE, - PruningMode::ArchiveCanonical => PRUNING_MODE_ARCHIVE_CANON, - PruningMode::Constrained(_) => PRUNING_MODE_CONSTRAINED, - } - } - - pub fn from_id(id: &[u8]) -> Option { - match id { - PRUNING_MODE_ARCHIVE => Some(Self::ArchiveAll), - PRUNING_MODE_ARCHIVE_CANON => Some(Self::ArchiveCanonical), - PRUNING_MODE_CONSTRAINED => Some(Self::Constrained(Default::default())), - _ => None, - } - } -} - -impl Default for PruningMode { - fn default() -> Self { - PruningMode::Constrained(Default::default()) - } -} - -impl Default for Constraints { - fn default() -> Self { - Self { max_blocks: Some(DEFAULT_MAX_BLOCK_CONSTRAINT), max_mem: None } - } -} - -fn to_meta_key(suffix: &[u8], data: &S) -> Vec { - let mut buffer = data.encode(); - buffer.extend(suffix); - buffer -} - -pub struct StateDbSync { - mode: PruningMode, - non_canonical: NonCanonicalOverlay, - pruning: Option>, - pinned: HashMap, - ref_counting: bool, -} - -impl - StateDbSync -{ - fn new( - mode: PruningMode, - ref_counting: bool, - db: D, - ) -> Result, Error> { - trace!(target: "state-db", "StateDb settings: {:?}. Ref-counting: {}", mode, ref_counting); - - let non_canonical: NonCanonicalOverlay = NonCanonicalOverlay::new(&db)?; - let pruning: Option> = match mode { - PruningMode::Constrained(Constraints { max_mem: Some(_), .. }) => unimplemented!(), - PruningMode::Constrained(Constraints { max_blocks, .. }) => - Some(RefWindow::new(db, max_blocks.unwrap_or(0), ref_counting)?), - PruningMode::ArchiveAll | PruningMode::ArchiveCanonical => None, - }; - - Ok(StateDbSync { mode, non_canonical, pruning, pinned: Default::default(), ref_counting }) - } - - fn insert_block( - &mut self, - hash: &BlockHash, - number: u64, - parent_hash: &BlockHash, - mut changeset: ChangeSet, - ) -> Result, Error> { - match self.mode { - PruningMode::ArchiveAll => { - changeset.deleted.clear(); - // write changes immediately - Ok(CommitSet { data: changeset, meta: Default::default() }) - }, - PruningMode::Constrained(_) | PruningMode::ArchiveCanonical => self - .non_canonical - .insert(hash, number, parent_hash, changeset) - .map_err(Into::into), - } - } - - fn canonicalize_block(&mut self, hash: &BlockHash) -> Result, Error> { - // NOTE: it is important that the change to `LAST_CANONICAL` (emit from - // `non_canonical.canonicalize`) and the insert of the new pruning journal (emit from - // `pruning.note_canonical`) are collected into the same `CommitSet` and are committed to - // the database atomically to keep their consistency when restarting the node - let mut commit = CommitSet::default(); - if self.mode == PruningMode::ArchiveAll { - return Ok(commit) - } - let number = self.non_canonical.canonicalize(hash, &mut commit)?; - if self.mode == PruningMode::ArchiveCanonical { - commit.data.deleted.clear(); - } - if let Some(ref mut pruning) = self.pruning { - pruning.note_canonical(hash, number, &mut commit)?; - } - self.prune(&mut commit)?; - Ok(commit) - } - - fn best_canonical(&self) -> Option { - self.non_canonical.last_canonicalized_block_number() - } - - fn is_pruned(&self, hash: &BlockHash, number: u64) -> IsPruned { - match self.mode { - PruningMode::ArchiveAll => IsPruned::NotPruned, - PruningMode::ArchiveCanonical | PruningMode::Constrained(_) => { - if self.best_canonical().map(|c| number > c).unwrap_or(true) { - if self.non_canonical.have_block(hash) { - IsPruned::NotPruned - } else { - IsPruned::Pruned - } - } else { - match self.pruning.as_ref() { - None => IsPruned::NotPruned, - Some(pruning) => match pruning.have_block(hash, number) { - HaveBlock::No => IsPruned::Pruned, - HaveBlock::Yes => IsPruned::NotPruned, - HaveBlock::Maybe => IsPruned::MaybePruned, - }, - } - } - }, - } - } - - fn prune(&mut self, commit: &mut CommitSet) -> Result<(), Error> { - if let (&mut Some(ref mut pruning), &PruningMode::Constrained(ref constraints)) = - (&mut self.pruning, &self.mode) - { - loop { - if pruning.window_size() <= constraints.max_blocks.unwrap_or(0) as u64 { - break - } - - if constraints.max_mem.map_or(false, |m| pruning.mem_used() > m) { - break - } - - let pinned = &self.pinned; - match pruning.next_hash() { - // the block record is temporary unavailable, break and try next time - Err(Error::StateDb(StateDbError::BlockUnavailable)) => break, - res => - if res?.map_or(false, |h| pinned.contains_key(&h)) { - break - }, - } - match pruning.prune_one(commit) { - // this branch should not reach as previous `next_hash` don't return error - // keeping it for robustness - Err(Error::StateDb(StateDbError::BlockUnavailable)) => break, - res => res?, - } - } - } - Ok(()) - } - - /// Revert all non-canonical blocks with the best block number. - /// Returns a database commit or `None` if not possible. - /// For archive an empty commit set is returned. - fn revert_one(&mut self) -> Option> { - match self.mode { - PruningMode::ArchiveAll => Some(CommitSet::default()), - PruningMode::ArchiveCanonical | PruningMode::Constrained(_) => - self.non_canonical.revert_one(), - } - } - - fn remove(&mut self, hash: &BlockHash) -> Option> { - match self.mode { - PruningMode::ArchiveAll => Some(CommitSet::default()), - PruningMode::ArchiveCanonical | PruningMode::Constrained(_) => - self.non_canonical.remove(hash), - } - } - - fn pin(&mut self, hash: &BlockHash, number: u64, hint: F) -> Result<(), PinError> - where - F: Fn() -> bool, - { - match self.mode { - PruningMode::ArchiveAll => Ok(()), - PruningMode::ArchiveCanonical | PruningMode::Constrained(_) => { - let have_block = self.non_canonical.have_block(hash) || - self.pruning.as_ref().map_or(false, |pruning| { - match pruning.have_block(hash, number) { - HaveBlock::No => false, - HaveBlock::Yes => true, - HaveBlock::Maybe => hint(), - } - }); - if have_block { - let refs = self.pinned.entry(hash.clone()).or_default(); - if *refs == 0 { - trace!(target: "state-db-pin", "Pinned block: {:?}", hash); - self.non_canonical.pin(hash); - } - *refs += 1; - Ok(()) - } else { - Err(PinError::InvalidBlock) - } - }, - } - } - - fn unpin(&mut self, hash: &BlockHash) { - match self.pinned.entry(hash.clone()) { - Entry::Occupied(mut entry) => { - *entry.get_mut() -= 1; - if *entry.get() == 0 { - trace!(target: "state-db-pin", "Unpinned block: {:?}", hash); - entry.remove(); - self.non_canonical.unpin(hash); - } else { - trace!(target: "state-db-pin", "Releasing reference for {:?}", hash); - } - }, - Entry::Vacant(_) => {}, - } - } - - fn sync(&mut self) { - self.non_canonical.sync(); - } - - pub fn get( - &self, - key: &Q, - db: &DB, - ) -> Result, Error> - where - Q: AsRef, - Key: std::borrow::Borrow, - Q: std::hash::Hash + Eq, - { - if let Some(value) = self.non_canonical.get(key) { - return Ok(Some(value)) - } - db.get(key.as_ref()).map_err(Error::Db) - } - - fn memory_info(&self) -> StateDbMemoryInfo { - StateDbMemoryInfo { - non_canonical: MemorySize::from_bytes(malloc_size(&self.non_canonical)), - pruning: self.pruning.as_ref().map(|p| MemorySize::from_bytes(malloc_size(&p))), - pinned: MemorySize::from_bytes(malloc_size(&self.pinned)), - } - } -} - -/// State DB maintenance. See module description. -/// Can be shared across threads. -pub struct StateDb { - db: RwLock>, -} - -impl - StateDb -{ - /// Create an instance of [`StateDb`]. - pub fn open( - db: D, - requested_mode: Option, - ref_counting: bool, - should_init: bool, - ) -> Result<(CommitSet, StateDb), Error> { - let stored_mode = fetch_stored_pruning_mode(&db)?; - - let selected_mode = match (should_init, stored_mode, requested_mode) { - (true, stored_mode, requested_mode) => { - assert!(stored_mode.is_none(), "The storage has just been initialized. No meta-data is expected to be found in it."); - requested_mode.unwrap_or_default() - }, - - (false, None, _) => - return Err(StateDbError::Metadata( - "An existing StateDb does not have PRUNING_MODE stored in its meta-data".into(), - ) - .into()), - - (false, Some(stored), None) => stored, - - (false, Some(stored), Some(requested)) => choose_pruning_mode(stored, requested)?, - }; - - let db_init_commit_set = if should_init { - let mut cs: CommitSet = Default::default(); - - let key = to_meta_key(PRUNING_MODE, &()); - let value = selected_mode.id().to_owned(); - - cs.meta.inserted.push((key, value)); - - cs - } else { - Default::default() - }; - - let state_db = - StateDb { db: RwLock::new(StateDbSync::new(selected_mode, ref_counting, db)?) }; - - Ok((db_init_commit_set, state_db)) - } - - pub fn pruning_mode(&self) -> PruningMode { - self.db.read().mode.clone() - } - - /// Add a new non-canonical block. - pub fn insert_block( - &self, - hash: &BlockHash, - number: u64, - parent_hash: &BlockHash, - changeset: ChangeSet, - ) -> Result, Error> { - self.db.write().insert_block(hash, number, parent_hash, changeset) - } - - /// Finalize a previously inserted block. - pub fn canonicalize_block(&self, hash: &BlockHash) -> Result, Error> { - self.db.write().canonicalize_block(hash) - } - - /// Prevents pruning of specified block and its descendants. - /// `hint` used for futher checking if the given block exists - pub fn pin(&self, hash: &BlockHash, number: u64, hint: F) -> Result<(), PinError> - where - F: Fn() -> bool, - { - self.db.write().pin(hash, number, hint) - } - - /// Allows pruning of specified block. - pub fn unpin(&self, hash: &BlockHash) { - self.db.write().unpin(hash) - } - - /// Confirm that all changes made to commit sets are on disk. Allows for temporarily pinned - /// blocks to be released. - pub fn sync(&self) { - self.db.write().sync() - } - - /// Get a value from non-canonical/pruning overlay or the backing DB. - pub fn get( - &self, - key: &Q, - db: &DB, - ) -> Result, Error> - where - Q: AsRef, - Key: std::borrow::Borrow, - Q: std::hash::Hash + Eq, - { - self.db.read().get(key, db) - } - - /// Revert all non-canonical blocks with the best block number. - /// Returns a database commit or `None` if not possible. - /// For archive an empty commit set is returned. - pub fn revert_one(&self) -> Option> { - self.db.write().revert_one() - } - - /// Remove specified non-canonical block. - /// Returns a database commit or `None` if not possible. - pub fn remove(&self, hash: &BlockHash) -> Option> { - self.db.write().remove(hash) - } - - /// Returns last finalized block number. - pub fn best_canonical(&self) -> Option { - return self.db.read().best_canonical() - } - - /// Check if block is pruned away. - pub fn is_pruned(&self, hash: &BlockHash, number: u64) -> IsPruned { - return self.db.read().is_pruned(hash, number) - } - - /// Reset in-memory changes to the last disk-backed state. - pub fn reset(&self, db: D) -> Result<(), Error> { - let mut state_db = self.db.write(); - *state_db = StateDbSync::new(state_db.mode.clone(), state_db.ref_counting, db)?; - Ok(()) - } - - /// Returns the current memory statistics of this instance. - pub fn memory_info(&self) -> StateDbMemoryInfo { - self.db.read().memory_info() - } -} - -/// The result return by `StateDb::is_pruned` -#[derive(Debug, PartialEq, Eq)] -pub enum IsPruned { - /// Definitely pruned - Pruned, - /// Definitely not pruned - NotPruned, - /// May or may not pruned, need futher checking - MaybePruned, -} - -fn fetch_stored_pruning_mode(db: &D) -> Result, Error> { - let meta_key_mode = to_meta_key(PRUNING_MODE, &()); - if let Some(stored_mode) = db.get_meta(&meta_key_mode).map_err(Error::Db)? { - if let Some(mode) = PruningMode::from_id(&stored_mode) { - Ok(Some(mode)) - } else { - Err(StateDbError::Metadata(format!( - "Invalid value stored for PRUNING_MODE: {:02x?}", - stored_mode - )) - .into()) - } - } else { - Ok(None) - } -} - -fn choose_pruning_mode( - stored: PruningMode, - requested: PruningMode, -) -> Result { - match (stored, requested) { - (PruningMode::ArchiveAll, PruningMode::ArchiveAll) => Ok(PruningMode::ArchiveAll), - (PruningMode::ArchiveCanonical, PruningMode::ArchiveCanonical) => - Ok(PruningMode::ArchiveCanonical), - (PruningMode::Constrained(_), PruningMode::Constrained(requested)) => - Ok(PruningMode::Constrained(requested)), - (stored, requested) => Err(StateDbError::IncompatiblePruningModes { requested, stored }), - } -} - -#[cfg(test)] -mod tests { - use crate::{ - test::{make_changeset, make_db, TestDb}, - Constraints, Error, IsPruned, PruningMode, StateDb, StateDbError, - }; - use sp_core::H256; - - fn make_test_db(settings: PruningMode) -> (TestDb, StateDb) { - let mut db = make_db(&[91, 921, 922, 93, 94]); - let (state_db_init, state_db) = - StateDb::open(db.clone(), Some(settings), false, true).unwrap(); - db.commit(&state_db_init); - - db.commit( - &state_db - .insert_block( - &H256::from_low_u64_be(1), - 1, - &H256::from_low_u64_be(0), - make_changeset(&[1], &[91]), - ) - .unwrap(), - ); - db.commit( - &state_db - .insert_block( - &H256::from_low_u64_be(21), - 2, - &H256::from_low_u64_be(1), - make_changeset(&[21], &[921, 1]), - ) - .unwrap(), - ); - db.commit( - &state_db - .insert_block( - &H256::from_low_u64_be(22), - 2, - &H256::from_low_u64_be(1), - make_changeset(&[22], &[922]), - ) - .unwrap(), - ); - db.commit( - &state_db - .insert_block( - &H256::from_low_u64_be(3), - 3, - &H256::from_low_u64_be(21), - make_changeset(&[3], &[93]), - ) - .unwrap(), - ); - db.commit(&state_db.canonicalize_block(&H256::from_low_u64_be(1)).unwrap()); - db.commit( - &state_db - .insert_block( - &H256::from_low_u64_be(4), - 4, - &H256::from_low_u64_be(3), - make_changeset(&[4], &[94]), - ) - .unwrap(), - ); - db.commit(&state_db.canonicalize_block(&H256::from_low_u64_be(21)).unwrap()); - db.commit(&state_db.canonicalize_block(&H256::from_low_u64_be(3)).unwrap()); - - (db, state_db) - } - - #[test] - fn full_archive_keeps_everything() { - let (db, sdb) = make_test_db(PruningMode::ArchiveAll); - assert!(db.data_eq(&make_db(&[1, 21, 22, 3, 4, 91, 921, 922, 93, 94]))); - assert_eq!(sdb.is_pruned(&H256::from_low_u64_be(0), 0), IsPruned::NotPruned); - } - - #[test] - fn canonical_archive_keeps_canonical() { - let (db, _) = make_test_db(PruningMode::ArchiveCanonical); - assert!(db.data_eq(&make_db(&[1, 21, 3, 91, 921, 922, 93, 94]))); - } - - #[test] - fn block_record_unavailable() { - let (mut db, state_db) = make_test_db(PruningMode::Constrained(Constraints { - max_blocks: Some(1), - max_mem: None, - })); - // import 2 blocks - for i in &[5, 6] { - db.commit( - &state_db - .insert_block( - &H256::from_low_u64_be(*i), - *i, - &H256::from_low_u64_be(*i - 1), - make_changeset(&[], &[]), - ) - .unwrap(), - ); - } - // canonicalize block 4 but not commit it to db - let c1 = state_db.canonicalize_block(&H256::from_low_u64_be(4)).unwrap(); - assert_eq!(state_db.is_pruned(&H256::from_low_u64_be(3), 3), IsPruned::Pruned); - - // canonicalize block 5 but not commit it to db, block 4 is not pruned due to it is not - // commit to db yet (unavailable), return `MaybePruned` here because `apply_pending` is not - // called and block 3 is still in cache - let c2 = state_db.canonicalize_block(&H256::from_low_u64_be(5)).unwrap(); - assert_eq!(state_db.is_pruned(&H256::from_low_u64_be(4), 4), IsPruned::MaybePruned); - - // commit block 4 and 5 to db, and import a new block will prune both block 4 and 5 - db.commit(&c1); - db.commit(&c2); - db.commit(&state_db.canonicalize_block(&H256::from_low_u64_be(6)).unwrap()); - assert_eq!(state_db.is_pruned(&H256::from_low_u64_be(4), 4), IsPruned::Pruned); - assert_eq!(state_db.is_pruned(&H256::from_low_u64_be(5), 5), IsPruned::Pruned); - } - - #[test] - fn prune_window_0() { - let (db, _) = make_test_db(PruningMode::Constrained(Constraints { - max_blocks: Some(0), - max_mem: None, - })); - assert!(db.data_eq(&make_db(&[21, 3, 922, 94]))); - } - - #[test] - fn prune_window_1() { - let (db, sdb) = make_test_db(PruningMode::Constrained(Constraints { - max_blocks: Some(1), - max_mem: None, - })); - assert_eq!(sdb.is_pruned(&H256::from_low_u64_be(0), 0), IsPruned::Pruned); - assert_eq!(sdb.is_pruned(&H256::from_low_u64_be(1), 1), IsPruned::Pruned); - assert_eq!(sdb.is_pruned(&H256::from_low_u64_be(21), 2), IsPruned::Pruned); - assert_eq!(sdb.is_pruned(&H256::from_low_u64_be(22), 2), IsPruned::Pruned); - assert!(db.data_eq(&make_db(&[21, 3, 922, 93, 94]))); - } - - #[test] - fn prune_window_2() { - let (db, sdb) = make_test_db(PruningMode::Constrained(Constraints { - max_blocks: Some(2), - max_mem: None, - })); - assert_eq!(sdb.is_pruned(&H256::from_low_u64_be(0), 0), IsPruned::Pruned); - assert_eq!(sdb.is_pruned(&H256::from_low_u64_be(1), 1), IsPruned::Pruned); - assert_eq!(sdb.is_pruned(&H256::from_low_u64_be(21), 2), IsPruned::NotPruned); - assert_eq!(sdb.is_pruned(&H256::from_low_u64_be(22), 2), IsPruned::Pruned); - assert!(db.data_eq(&make_db(&[1, 21, 3, 921, 922, 93, 94]))); - } - - #[test] - fn detects_incompatible_mode() { - let mut db = make_db(&[]); - let (state_db_init, state_db) = - StateDb::open(db.clone(), Some(PruningMode::ArchiveAll), false, true).unwrap(); - db.commit(&state_db_init); - db.commit( - &state_db - .insert_block( - &H256::from_low_u64_be(0), - 0, - &H256::from_low_u64_be(0), - make_changeset(&[], &[]), - ) - .unwrap(), - ); - let new_mode = PruningMode::Constrained(Constraints { max_blocks: Some(2), max_mem: None }); - let state_db_open_result: Result<(_, StateDb), _> = - StateDb::open(db.clone(), Some(new_mode), false, false); - assert!(state_db_open_result.is_err()); - } - - fn check_stored_and_requested_mode_compatibility( - mode_when_created: Option, - mode_when_reopened: Option, - expected_effective_mode_when_reopenned: Result, - ) { - let mut db = make_db(&[]); - let (state_db_init, state_db) = - StateDb::::open(db.clone(), mode_when_created, false, true) - .unwrap(); - db.commit(&state_db_init); - std::mem::drop(state_db); - - let state_db_reopen_result = - StateDb::::open(db.clone(), mode_when_reopened, false, false); - if let Ok(expected_mode) = expected_effective_mode_when_reopenned { - let (state_db_init, state_db_reopened) = state_db_reopen_result.unwrap(); - db.commit(&state_db_init); - assert_eq!(state_db_reopened.pruning_mode(), expected_mode,) - } else { - assert!(matches!( - state_db_reopen_result, - Err(Error::StateDb(StateDbError::IncompatiblePruningModes { .. })) - )); - } - } - - #[test] - fn pruning_mode_compatibility() { - for (created, reopened, expected) in [ - (None, None, Ok(PruningMode::blocks_pruning(256))), - (None, Some(PruningMode::blocks_pruning(256)), Ok(PruningMode::blocks_pruning(256))), - (None, Some(PruningMode::blocks_pruning(128)), Ok(PruningMode::blocks_pruning(128))), - (None, Some(PruningMode::blocks_pruning(512)), Ok(PruningMode::blocks_pruning(512))), - (None, Some(PruningMode::ArchiveAll), Err(())), - (None, Some(PruningMode::ArchiveCanonical), Err(())), - (Some(PruningMode::blocks_pruning(256)), None, Ok(PruningMode::blocks_pruning(256))), - ( - Some(PruningMode::blocks_pruning(256)), - Some(PruningMode::blocks_pruning(256)), - Ok(PruningMode::blocks_pruning(256)), - ), - ( - Some(PruningMode::blocks_pruning(256)), - Some(PruningMode::blocks_pruning(128)), - Ok(PruningMode::blocks_pruning(128)), - ), - ( - Some(PruningMode::blocks_pruning(256)), - Some(PruningMode::blocks_pruning(512)), - Ok(PruningMode::blocks_pruning(512)), - ), - (Some(PruningMode::blocks_pruning(256)), Some(PruningMode::ArchiveAll), Err(())), - (Some(PruningMode::blocks_pruning(256)), Some(PruningMode::ArchiveCanonical), Err(())), - (Some(PruningMode::ArchiveAll), None, Ok(PruningMode::ArchiveAll)), - (Some(PruningMode::ArchiveAll), Some(PruningMode::blocks_pruning(256)), Err(())), - (Some(PruningMode::ArchiveAll), Some(PruningMode::blocks_pruning(128)), Err(())), - (Some(PruningMode::ArchiveAll), Some(PruningMode::blocks_pruning(512)), Err(())), - ( - Some(PruningMode::ArchiveAll), - Some(PruningMode::ArchiveAll), - Ok(PruningMode::ArchiveAll), - ), - (Some(PruningMode::ArchiveAll), Some(PruningMode::ArchiveCanonical), Err(())), - (Some(PruningMode::ArchiveCanonical), None, Ok(PruningMode::ArchiveCanonical)), - (Some(PruningMode::ArchiveCanonical), Some(PruningMode::blocks_pruning(256)), Err(())), - (Some(PruningMode::ArchiveCanonical), Some(PruningMode::blocks_pruning(128)), Err(())), - (Some(PruningMode::ArchiveCanonical), Some(PruningMode::blocks_pruning(512)), Err(())), - (Some(PruningMode::ArchiveCanonical), Some(PruningMode::ArchiveAll), Err(())), - ( - Some(PruningMode::ArchiveCanonical), - Some(PruningMode::ArchiveCanonical), - Ok(PruningMode::ArchiveCanonical), - ), - ] { - check_stored_and_requested_mode_compatibility(created, reopened, expected); - } - } -} diff --git a/substrate/client/state-db/src/noncanonical.rs b/substrate/client/state-db/src/noncanonical.rs deleted file mode 100644 index e36164a8..00000000 --- a/substrate/client/state-db/src/noncanonical.rs +++ /dev/null @@ -1,1106 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2017-2022 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 - -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -//! Canonicalization window. -//! Maintains trees of block overlays and allows discarding trees/roots -//! The overlays are added in `insert` and removed in `canonicalize`. - -use super::{to_meta_key, ChangeSet, CommitSet, DBValue, Error, Hash, MetaDb, StateDbError}; -use codec::{Decode, Encode}; -use log::trace; -use std::collections::{hash_map::Entry, HashMap, VecDeque}; - -const NON_CANONICAL_JOURNAL: &[u8] = b"noncanonical_journal"; -pub(crate) const LAST_CANONICAL: &[u8] = b"last_canonical"; -const MAX_BLOCKS_PER_LEVEL: u64 = 32; - -/// See module documentation. -#[derive(parity_util_mem_derive::MallocSizeOf)] -pub struct NonCanonicalOverlay { - last_canonicalized: Option<(BlockHash, u64)>, - levels: VecDeque>, - parents: HashMap, - values: HashMap, // ref counted - // would be deleted but kept around because block is pinned, ref counted. - pinned: HashMap, - pinned_insertions: HashMap, u32)>, - pinned_canonincalized: Vec, -} - -#[derive(parity_util_mem_derive::MallocSizeOf)] -#[cfg_attr(test, derive(PartialEq, Debug))] -struct OverlayLevel { - blocks: Vec>, - used_indicies: u64, // Bitmask of available journal indicies. -} - -impl OverlayLevel { - fn push(&mut self, overlay: BlockOverlay) { - self.used_indicies |= 1 << overlay.journal_index; - self.blocks.push(overlay) - } - - fn available_index(&self) -> u64 { - self.used_indicies.trailing_ones() as u64 - } - - fn remove(&mut self, index: usize) -> BlockOverlay { - self.used_indicies &= !(1 << self.blocks[index].journal_index); - self.blocks.remove(index) - } - - fn new() -> OverlayLevel { - OverlayLevel { blocks: Vec::new(), used_indicies: 0 } - } -} - -#[derive(Encode, Decode)] -struct JournalRecord { - hash: BlockHash, - parent_hash: BlockHash, - inserted: Vec<(Key, DBValue)>, - deleted: Vec, -} - -fn to_journal_key(block: u64, index: u64) -> Vec { - to_meta_key(NON_CANONICAL_JOURNAL, &(block, index)) -} - -#[cfg_attr(test, derive(PartialEq, Debug))] -#[derive(parity_util_mem_derive::MallocSizeOf)] -struct BlockOverlay { - hash: BlockHash, - journal_index: u64, - journal_key: Vec, - inserted: Vec, - deleted: Vec, -} - -fn insert_values( - values: &mut HashMap, - inserted: Vec<(Key, DBValue)>, -) { - for (k, v) in inserted { - debug_assert!(values.get(&k).map_or(true, |(_, value)| *value == v)); - let (ref mut counter, _) = values.entry(k).or_insert_with(|| (0, v)); - *counter += 1; - } -} - -fn discard_values(values: &mut HashMap, inserted: Vec) { - for k in inserted { - match values.entry(k) { - Entry::Occupied(mut e) => { - let (ref mut counter, _) = e.get_mut(); - *counter -= 1; - if *counter == 0 { - e.remove_entry(); - } - }, - Entry::Vacant(_) => { - debug_assert!(false, "Trying to discard missing value"); - }, - } - } -} - -fn discard_descendants( - levels: &mut (&mut [OverlayLevel], &mut [OverlayLevel]), - values: &mut HashMap, - parents: &mut HashMap, - pinned: &HashMap, - pinned_insertions: &mut HashMap, u32)>, - hash: &BlockHash, -) -> u32 { - let (first, mut remainder) = if let Some((first, rest)) = levels.0.split_first_mut() { - (Some(first), (rest, &mut *levels.1)) - } else if let Some((first, rest)) = levels.1.split_first_mut() { - (Some(first), (&mut *levels.0, rest)) - } else { - (None, (&mut *levels.0, &mut *levels.1)) - }; - let mut pinned_children = 0; - if let Some(level) = first { - while let Some(i) = level.blocks.iter().position(|overlay| { - parents - .get(&overlay.hash) - .expect("there is a parent entry for each entry in levels; qed") == - hash - }) { - let overlay = level.remove(i); - let mut num_pinned = discard_descendants( - &mut remainder, - values, - parents, - pinned, - pinned_insertions, - &overlay.hash, - ); - if pinned.contains_key(&overlay.hash) { - num_pinned += 1; - } - if num_pinned != 0 { - // save to be discarded later. - pinned_insertions.insert(overlay.hash.clone(), (overlay.inserted, num_pinned)); - pinned_children += num_pinned; - } else { - // discard immediately. - parents.remove(&overlay.hash); - discard_values(values, overlay.inserted); - } - } - } - pinned_children -} - -impl NonCanonicalOverlay { - /// Creates a new instance. Does not expect any metadata to be present in the DB. - pub fn new(db: &D) -> Result, Error> { - let last_canonicalized = - db.get_meta(&to_meta_key(LAST_CANONICAL, &())).map_err(Error::Db)?; - let last_canonicalized = last_canonicalized - .map(|buffer| <(BlockHash, u64)>::decode(&mut buffer.as_slice())) - .transpose()?; - let mut levels = VecDeque::new(); - let mut parents = HashMap::new(); - let mut values = HashMap::new(); - if let Some((ref hash, mut block)) = last_canonicalized { - // read the journal - trace!(target: "state-db", "Reading uncanonicalized journal. Last canonicalized #{} ({:?})", block, hash); - let mut total: u64 = 0; - block += 1; - loop { - let mut level = OverlayLevel::new(); - for index in 0..MAX_BLOCKS_PER_LEVEL { - let journal_key = to_journal_key(block, index); - if let Some(record) = db.get_meta(&journal_key).map_err(Error::Db)? { - let record: JournalRecord = - Decode::decode(&mut record.as_slice())?; - let inserted = record.inserted.iter().map(|(k, _)| k.clone()).collect(); - let overlay = BlockOverlay { - hash: record.hash.clone(), - journal_index: index, - journal_key, - inserted, - deleted: record.deleted, - }; - insert_values(&mut values, record.inserted); - trace!( - target: "state-db", - "Uncanonicalized journal entry {}.{} ({:?}) ({} inserted, {} deleted)", - block, - index, - record.hash, - overlay.inserted.len(), - overlay.deleted.len() - ); - level.push(overlay); - parents.insert(record.hash, record.parent_hash); - total += 1; - } - } - if level.blocks.is_empty() { - break - } - levels.push_back(level); - block += 1; - } - trace!(target: "state-db", "Finished reading uncanonicalized journal, {} entries", total); - } - Ok(NonCanonicalOverlay { - last_canonicalized, - levels, - parents, - pinned: Default::default(), - pinned_insertions: Default::default(), - values, - pinned_canonincalized: Default::default(), - }) - } - - /// Insert a new block into the overlay. If inserted on the second level or lover expects parent - /// to be present in the window. - pub fn insert( - &mut self, - hash: &BlockHash, - number: u64, - parent_hash: &BlockHash, - changeset: ChangeSet, - ) -> Result, StateDbError> { - let mut commit = CommitSet::default(); - let front_block_number = self.front_block_number(); - if self.levels.is_empty() && self.last_canonicalized.is_none() && number > 0 { - // assume that parent was canonicalized - let last_canonicalized = (parent_hash.clone(), number - 1); - commit - .meta - .inserted - .push((to_meta_key(LAST_CANONICAL, &()), last_canonicalized.encode())); - self.last_canonicalized = Some(last_canonicalized); - } else if self.last_canonicalized.is_some() { - if number < front_block_number || number > front_block_number + self.levels.len() as u64 - { - trace!(target: "state-db", "Failed to insert block {}, current is {} .. {})", - number, - front_block_number, - front_block_number + self.levels.len() as u64, - ); - return Err(StateDbError::InvalidBlockNumber) - } - // check for valid parent if inserting on second level or higher - if number == front_block_number { - if !self - .last_canonicalized - .as_ref() - .map_or(false, |&(ref h, n)| h == parent_hash && n == number - 1) - { - return Err(StateDbError::InvalidParent) - } - } else if !self.parents.contains_key(parent_hash) { - return Err(StateDbError::InvalidParent) - } - } - let level = if self.levels.is_empty() || - number == front_block_number + self.levels.len() as u64 - { - self.levels.push_back(OverlayLevel::new()); - self.levels.back_mut().expect("can't be empty after insertion; qed") - } else { - self.levels.get_mut((number - front_block_number) as usize) - .expect("number is [front_block_number .. front_block_number + levels.len()) is asserted in precondition; qed") - }; - - if level.blocks.len() >= MAX_BLOCKS_PER_LEVEL as usize { - return Err(StateDbError::TooManySiblingBlocks) - } - if level.blocks.iter().any(|b| b.hash == *hash) { - return Err(StateDbError::BlockAlreadyExists) - } - - let index = level.available_index(); - let journal_key = to_journal_key(number, index); - - let inserted = changeset.inserted.iter().map(|(k, _)| k.clone()).collect(); - let overlay = BlockOverlay { - hash: hash.clone(), - journal_index: index, - journal_key: journal_key.clone(), - inserted, - deleted: changeset.deleted.clone(), - }; - level.push(overlay); - self.parents.insert(hash.clone(), parent_hash.clone()); - let journal_record = JournalRecord { - hash: hash.clone(), - parent_hash: parent_hash.clone(), - inserted: changeset.inserted, - deleted: changeset.deleted, - }; - commit.meta.inserted.push((journal_key, journal_record.encode())); - trace!(target: "state-db", "Inserted uncanonicalized changeset {}.{} {:?} ({} inserted, {} deleted)", number, index, hash, journal_record.inserted.len(), journal_record.deleted.len()); - insert_values(&mut self.values, journal_record.inserted); - Ok(commit) - } - - fn discard_journals( - &self, - level_index: usize, - discarded_journals: &mut Vec>, - discarded_blocks: &mut Vec, - hash: &BlockHash, - ) { - if let Some(level) = self.levels.get(level_index) { - level.blocks.iter().for_each(|overlay| { - let parent = self - .parents - .get(&overlay.hash) - .expect("there is a parent entry for each entry in levels; qed") - .clone(); - if parent == *hash { - discarded_journals.push(overlay.journal_key.clone()); - discarded_blocks.push(overlay.hash.clone()); - self.discard_journals( - level_index + 1, - discarded_journals, - discarded_blocks, - &overlay.hash, - ); - } - }); - } - } - - fn front_block_number(&self) -> u64 { - self.last_canonicalized.as_ref().map(|&(_, n)| n + 1).unwrap_or(0) - } - - pub fn last_canonicalized_block_number(&self) -> Option { - self.last_canonicalized.as_ref().map(|&(_, n)| n) - } - - /// Confirm that all changes made to commit sets are on disk. Allows for temporarily pinned - /// blocks to be released. - pub fn sync(&mut self) { - let mut pinned = std::mem::take(&mut self.pinned_canonincalized); - for hash in pinned.iter() { - self.unpin(hash) - } - pinned.clear(); - // Reuse the same memory buffer - self.pinned_canonincalized = pinned; - } - - /// Select a top-level root and canonicalized it. Discards all sibling subtrees and the root. - /// Add a set of changes of the canonicalized block to `CommitSet` - /// Return the block number of the canonicalized block - pub fn canonicalize( - &mut self, - hash: &BlockHash, - commit: &mut CommitSet, - ) -> Result { - trace!(target: "state-db", "Canonicalizing {:?}", hash); - let level = match self.levels.pop_front() { - Some(level) => level, - None => return Err(StateDbError::InvalidBlock), - }; - let index = level - .blocks - .iter() - .position(|overlay| overlay.hash == *hash) - .ok_or(StateDbError::InvalidBlock)?; - - // No failures are possible beyond this point. - - // Force pin canonicalized block so that it is no discarded immediately - self.pin(hash); - self.pinned_canonincalized.push(hash.clone()); - - let mut discarded_journals = Vec::new(); - let mut discarded_blocks = Vec::new(); - for (i, overlay) in level.blocks.into_iter().enumerate() { - let mut pinned_children = 0; - // That's the one we need to canonicalize - if i == index { - commit.data.inserted.extend(overlay.inserted.iter().map(|k| { - ( - k.clone(), - self.values - .get(k) - .expect("For each key in overlays there's a value in values") - .1 - .clone(), - ) - })); - commit.data.deleted.extend(overlay.deleted.clone()); - } else { - // Discard this overlay - self.discard_journals( - 0, - &mut discarded_journals, - &mut discarded_blocks, - &overlay.hash, - ); - pinned_children = discard_descendants( - &mut self.levels.as_mut_slices(), - &mut self.values, - &mut self.parents, - &self.pinned, - &mut self.pinned_insertions, - &overlay.hash, - ); - } - if self.pinned.contains_key(&overlay.hash) { - pinned_children += 1; - } - if pinned_children != 0 { - self.pinned_insertions - .insert(overlay.hash.clone(), (overlay.inserted, pinned_children)); - } else { - self.parents.remove(&overlay.hash); - discard_values(&mut self.values, overlay.inserted); - } - discarded_journals.push(overlay.journal_key.clone()); - discarded_blocks.push(overlay.hash.clone()); - } - commit.meta.deleted.append(&mut discarded_journals); - - let canonicalized = (hash.clone(), self.front_block_number()); - commit - .meta - .inserted - .push((to_meta_key(LAST_CANONICAL, &()), canonicalized.encode())); - trace!(target: "state-db", "Discarding {} records", commit.meta.deleted.len()); - - let num = canonicalized.1; - self.last_canonicalized = Some(canonicalized); - Ok(num) - } - - /// Get a value from the node overlay. This searches in every existing changeset. - pub fn get(&self, key: &Q) -> Option - where - Key: std::borrow::Borrow, - Q: std::hash::Hash + Eq, - { - self.values.get(key).map(|v| v.1.clone()) - } - - /// Check if the block is in the canonicalization queue. - pub fn have_block(&self, hash: &BlockHash) -> bool { - self.parents.contains_key(hash) - } - - /// Revert a single level. Returns commit set that deletes the journal or `None` if not - /// possible. - pub fn revert_one(&mut self) -> Option> { - self.levels.pop_back().map(|level| { - let mut commit = CommitSet::default(); - for overlay in level.blocks.into_iter() { - commit.meta.deleted.push(overlay.journal_key); - self.parents.remove(&overlay.hash); - discard_values(&mut self.values, overlay.inserted); - } - commit - }) - } - - /// Revert a single block. Returns commit set that deletes the journal or `None` if not - /// possible. - pub fn remove(&mut self, hash: &BlockHash) -> Option> { - let mut commit = CommitSet::default(); - let level_count = self.levels.len(); - for (level_index, level) in self.levels.iter_mut().enumerate().rev() { - let index = match level.blocks.iter().position(|overlay| &overlay.hash == hash) { - Some(index) => index, - None => continue, - }; - // Check that it does not have any children - if (level_index != level_count - 1) && self.parents.values().any(|h| h == hash) { - log::debug!(target: "state-db", "Trying to remove block {:?} with children", hash); - return None - } - let overlay = level.remove(index); - commit.meta.deleted.push(overlay.journal_key); - self.parents.remove(&overlay.hash); - discard_values(&mut self.values, overlay.inserted); - break - } - if self.levels.back().map_or(false, |l| l.blocks.is_empty()) { - self.levels.pop_back(); - } - if !commit.meta.deleted.is_empty() { - Some(commit) - } else { - None - } - } - - /// Pin state values in memory - pub fn pin(&mut self, hash: &BlockHash) { - let refs = self.pinned.entry(hash.clone()).or_default(); - if *refs == 0 { - trace!(target: "state-db-pin", "Pinned non-canon block: {:?}", hash); - } - *refs += 1; - } - - /// Discard pinned state - pub fn unpin(&mut self, hash: &BlockHash) { - let removed = match self.pinned.entry(hash.clone()) { - Entry::Occupied(mut entry) => { - *entry.get_mut() -= 1; - if *entry.get() == 0 { - entry.remove(); - true - } else { - false - } - }, - Entry::Vacant(_) => false, - }; - - if removed { - let mut parent = Some(hash.clone()); - while let Some(hash) = parent { - parent = self.parents.get(&hash).cloned(); - match self.pinned_insertions.entry(hash.clone()) { - Entry::Occupied(mut entry) => { - entry.get_mut().1 -= 1; - if entry.get().1 == 0 { - let (inserted, _) = entry.remove(); - trace!(target: "state-db-pin", "Discarding unpinned non-canon block: {:?}", hash); - discard_values(&mut self.values, inserted); - self.parents.remove(&hash); - true - } else { - false - } - }, - Entry::Vacant(_) => break, - }; - } - } - } -} - -#[cfg(test)] -mod tests { - use super::{to_journal_key, NonCanonicalOverlay}; - use crate::{ - test::{make_changeset, make_db}, - ChangeSet, CommitSet, MetaDb, StateDbError, - }; - use sp_core::H256; - - fn contains(overlay: &NonCanonicalOverlay, key: u64) -> bool { - overlay.get(&H256::from_low_u64_be(key)) == - Some(H256::from_low_u64_be(key).as_bytes().to_vec()) - } - - #[test] - fn created_from_empty_db() { - let db = make_db(&[]); - let overlay: NonCanonicalOverlay = NonCanonicalOverlay::new(&db).unwrap(); - assert_eq!(overlay.last_canonicalized, None); - assert!(overlay.levels.is_empty()); - assert!(overlay.parents.is_empty()); - } - - #[test] - #[should_panic] - fn canonicalize_empty_panics() { - let db = make_db(&[]); - let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); - let mut commit = CommitSet::default(); - overlay.canonicalize(&H256::default(), &mut commit).unwrap(); - } - - #[test] - #[should_panic] - fn insert_ahead_panics() { - let db = make_db(&[]); - let h1 = H256::random(); - let h2 = H256::random(); - let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); - overlay.insert(&h1, 2, &H256::default(), ChangeSet::default()).unwrap(); - overlay.insert(&h2, 1, &h1, ChangeSet::default()).unwrap(); - } - - #[test] - #[should_panic] - fn insert_behind_panics() { - let h1 = H256::random(); - let h2 = H256::random(); - let db = make_db(&[]); - let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); - overlay.insert(&h1, 1, &H256::default(), ChangeSet::default()).unwrap(); - overlay.insert(&h2, 3, &h1, ChangeSet::default()).unwrap(); - } - - #[test] - #[should_panic] - fn insert_unknown_parent_panics() { - let db = make_db(&[]); - let h1 = H256::random(); - let h2 = H256::random(); - let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); - overlay.insert(&h1, 1, &H256::default(), ChangeSet::default()).unwrap(); - overlay.insert(&h2, 2, &H256::default(), ChangeSet::default()).unwrap(); - } - - #[test] - fn insert_existing_fails() { - let db = make_db(&[]); - let h1 = H256::random(); - let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); - overlay.insert(&h1, 2, &H256::default(), ChangeSet::default()).unwrap(); - assert!(matches!( - overlay.insert(&h1, 2, &H256::default(), ChangeSet::default()), - Err(StateDbError::BlockAlreadyExists) - )); - } - - #[test] - #[should_panic] - fn canonicalize_unknown_panics() { - let h1 = H256::random(); - let h2 = H256::random(); - let db = make_db(&[]); - let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); - overlay.insert(&h1, 1, &H256::default(), ChangeSet::default()).unwrap(); - let mut commit = CommitSet::default(); - overlay.canonicalize(&h2, &mut commit).unwrap(); - } - - #[test] - fn insert_canonicalize_one() { - let h1 = H256::random(); - let mut db = make_db(&[1, 2]); - let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); - let changeset = make_changeset(&[3, 4], &[2]); - let insertion = overlay.insert(&h1, 1, &H256::default(), changeset.clone()).unwrap(); - assert_eq!(insertion.data.inserted.len(), 0); - assert_eq!(insertion.data.deleted.len(), 0); - assert_eq!(insertion.meta.inserted.len(), 2); - assert_eq!(insertion.meta.deleted.len(), 0); - db.commit(&insertion); - let mut finalization = CommitSet::default(); - overlay.canonicalize(&h1, &mut finalization).unwrap(); - assert_eq!(finalization.data.inserted.len(), changeset.inserted.len()); - assert_eq!(finalization.data.deleted.len(), changeset.deleted.len()); - assert_eq!(finalization.meta.inserted.len(), 1); - assert_eq!(finalization.meta.deleted.len(), 1); - db.commit(&finalization); - assert!(db.data_eq(&make_db(&[1, 3, 4]))); - } - - #[test] - fn restore_from_journal() { - let h1 = H256::random(); - let h2 = H256::random(); - let mut db = make_db(&[1, 2]); - let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); - db.commit( - &overlay - .insert(&h1, 10, &H256::default(), make_changeset(&[3, 4], &[2])) - .unwrap(), - ); - db.commit(&overlay.insert(&h2, 11, &h1, make_changeset(&[5], &[3])).unwrap()); - assert_eq!(db.meta_len(), 3); - - let overlay2 = NonCanonicalOverlay::::new(&db).unwrap(); - assert_eq!(overlay.levels, overlay2.levels); - assert_eq!(overlay.parents, overlay2.parents); - assert_eq!(overlay.last_canonicalized, overlay2.last_canonicalized); - } - - #[test] - fn restore_from_journal_after_canonicalize() { - let h1 = H256::random(); - let h2 = H256::random(); - let mut db = make_db(&[1, 2]); - let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); - db.commit( - &overlay - .insert(&h1, 10, &H256::default(), make_changeset(&[3, 4], &[2])) - .unwrap(), - ); - db.commit(&overlay.insert(&h2, 11, &h1, make_changeset(&[5], &[3])).unwrap()); - let mut commit = CommitSet::default(); - overlay.canonicalize(&h1, &mut commit).unwrap(); - overlay.unpin(&h1); - db.commit(&commit); - assert_eq!(overlay.levels.len(), 1); - - let overlay2 = NonCanonicalOverlay::::new(&db).unwrap(); - assert_eq!(overlay.levels, overlay2.levels); - assert_eq!(overlay.parents, overlay2.parents); - assert_eq!(overlay.last_canonicalized, overlay2.last_canonicalized); - } - - #[test] - fn insert_canonicalize_two() { - let h1 = H256::random(); - let h2 = H256::random(); - let mut db = make_db(&[1, 2, 3, 4]); - let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); - let changeset1 = make_changeset(&[5, 6], &[2]); - let changeset2 = make_changeset(&[7, 8], &[5, 3]); - db.commit(&overlay.insert(&h1, 1, &H256::default(), changeset1).unwrap()); - assert!(contains(&overlay, 5)); - db.commit(&overlay.insert(&h2, 2, &h1, changeset2).unwrap()); - assert!(contains(&overlay, 7)); - assert!(contains(&overlay, 5)); - assert_eq!(overlay.levels.len(), 2); - assert_eq!(overlay.parents.len(), 2); - let mut commit = CommitSet::default(); - overlay.canonicalize(&h1, &mut commit).unwrap(); - db.commit(&commit); - overlay.sync(); - assert!(!contains(&overlay, 5)); - assert!(contains(&overlay, 7)); - assert_eq!(overlay.levels.len(), 1); - assert_eq!(overlay.parents.len(), 1); - let mut commit = CommitSet::default(); - overlay.canonicalize(&h2, &mut commit).unwrap(); - db.commit(&commit); - overlay.sync(); - assert_eq!(overlay.levels.len(), 0); - assert_eq!(overlay.parents.len(), 0); - assert!(db.data_eq(&make_db(&[1, 4, 6, 7, 8]))); - } - - #[test] - fn insert_same_key() { - let mut db = make_db(&[]); - let (h_1, c_1) = (H256::random(), make_changeset(&[1], &[])); - let (h_2, c_2) = (H256::random(), make_changeset(&[1], &[])); - let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); - db.commit(&overlay.insert(&h_1, 1, &H256::default(), c_1).unwrap()); - db.commit(&overlay.insert(&h_2, 1, &H256::default(), c_2).unwrap()); - assert!(contains(&overlay, 1)); - let mut commit = CommitSet::default(); - overlay.canonicalize(&h_1, &mut commit).unwrap(); - db.commit(&commit); - overlay.sync(); - assert!(!contains(&overlay, 1)); - } - - #[test] - fn insert_and_canonicalize() { - let h1 = H256::random(); - let h2 = H256::random(); - let h3 = H256::random(); - let mut db = make_db(&[]); - let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); - let changeset = make_changeset(&[], &[]); - db.commit(&overlay.insert(&h1, 1, &H256::default(), changeset.clone()).unwrap()); - db.commit(&overlay.insert(&h2, 2, &h1, changeset.clone()).unwrap()); - let mut commit = CommitSet::default(); - overlay.canonicalize(&h1, &mut commit).unwrap(); - overlay.canonicalize(&h2, &mut commit).unwrap(); - db.commit(&commit); - db.commit(&overlay.insert(&h3, 3, &h2, changeset.clone()).unwrap()); - assert_eq!(overlay.levels.len(), 1); - } - - #[test] - fn complex_tree() { - let mut db = make_db(&[]); - #[rustfmt::skip] - // - 1 - 1_1 - 1_1_1 - // \ 1_2 - 1_2_1 - // \ 1_2_2 - // \ 1_2_3 - // - // - 2 - 2_1 - 2_1_1 - // \ 2_2 - // - // 1_2_2 is the winner - let (h_1, c_1) = (H256::random(), make_changeset(&[1], &[])); - let (h_2, c_2) = (H256::random(), make_changeset(&[2], &[])); - let (h_1_1, c_1_1) = (H256::random(), make_changeset(&[11], &[])); - let (h_1_2, c_1_2) = (H256::random(), make_changeset(&[12], &[])); - let (h_2_1, c_2_1) = (H256::random(), make_changeset(&[21], &[])); - let (h_2_2, c_2_2) = (H256::random(), make_changeset(&[22], &[])); - let (h_1_1_1, c_1_1_1) = (H256::random(), make_changeset(&[111], &[])); - let (h_1_2_1, c_1_2_1) = (H256::random(), make_changeset(&[121], &[])); - let (h_1_2_2, c_1_2_2) = (H256::random(), make_changeset(&[122], &[])); - let (h_1_2_3, c_1_2_3) = (H256::random(), make_changeset(&[123], &[])); - let (h_2_1_1, c_2_1_1) = (H256::random(), make_changeset(&[211], &[])); - let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); - db.commit(&overlay.insert(&h_1, 1, &H256::default(), c_1).unwrap()); - db.commit(&overlay.insert(&h_1_1, 2, &h_1, c_1_1).unwrap()); - db.commit(&overlay.insert(&h_1_2, 2, &h_1, c_1_2).unwrap()); - db.commit(&overlay.insert(&h_2, 1, &H256::default(), c_2).unwrap()); - db.commit(&overlay.insert(&h_2_1, 2, &h_2, c_2_1).unwrap()); - db.commit(&overlay.insert(&h_2_2, 2, &h_2, c_2_2).unwrap()); - db.commit(&overlay.insert(&h_1_1_1, 3, &h_1_1, c_1_1_1).unwrap()); - db.commit(&overlay.insert(&h_1_2_1, 3, &h_1_2, c_1_2_1).unwrap()); - db.commit(&overlay.insert(&h_1_2_2, 3, &h_1_2, c_1_2_2).unwrap()); - db.commit(&overlay.insert(&h_1_2_3, 3, &h_1_2, c_1_2_3).unwrap()); - db.commit(&overlay.insert(&h_2_1_1, 3, &h_2_1, c_2_1_1).unwrap()); - assert!(contains(&overlay, 2)); - assert!(contains(&overlay, 11)); - assert!(contains(&overlay, 21)); - assert!(contains(&overlay, 111)); - assert!(contains(&overlay, 122)); - assert!(contains(&overlay, 211)); - assert_eq!(overlay.levels.len(), 3); - assert_eq!(overlay.parents.len(), 11); - assert_eq!(overlay.last_canonicalized, Some((H256::default(), 0))); - // check if restoration from journal results in the same tree - let overlay2 = NonCanonicalOverlay::::new(&db).unwrap(); - assert_eq!(overlay.levels, overlay2.levels); - assert_eq!(overlay.parents, overlay2.parents); - assert_eq!(overlay.last_canonicalized, overlay2.last_canonicalized); - // canonicalize 1. 2 and all its children should be discarded - let mut commit = CommitSet::default(); - overlay.canonicalize(&h_1, &mut commit).unwrap(); - db.commit(&commit); - overlay.sync(); - assert_eq!(overlay.levels.len(), 2); - assert_eq!(overlay.parents.len(), 6); - assert!(!contains(&overlay, 1)); - assert!(!contains(&overlay, 2)); - assert!(!contains(&overlay, 21)); - assert!(!contains(&overlay, 22)); - assert!(!contains(&overlay, 211)); - assert!(contains(&overlay, 111)); - assert!(!contains(&overlay, 211)); - // check that journals are deleted - assert!(db.get_meta(&to_journal_key(1, 0)).unwrap().is_none()); - assert!(db.get_meta(&to_journal_key(1, 1)).unwrap().is_none()); - assert!(db.get_meta(&to_journal_key(2, 1)).unwrap().is_some()); - assert!(db.get_meta(&to_journal_key(2, 2)).unwrap().is_none()); - assert!(db.get_meta(&to_journal_key(2, 3)).unwrap().is_none()); - // canonicalize 1_2. 1_1 and all its children should be discarded - let mut commit = CommitSet::default(); - overlay.canonicalize(&h_1_2, &mut commit).unwrap(); - db.commit(&commit); - overlay.sync(); - assert_eq!(overlay.levels.len(), 1); - assert_eq!(overlay.parents.len(), 3); - assert!(!contains(&overlay, 11)); - assert!(!contains(&overlay, 111)); - assert!(contains(&overlay, 121)); - assert!(contains(&overlay, 122)); - assert!(contains(&overlay, 123)); - assert!(overlay.have_block(&h_1_2_1)); - assert!(!overlay.have_block(&h_1_2)); - assert!(!overlay.have_block(&h_1_1)); - assert!(!overlay.have_block(&h_1_1_1)); - // canonicalize 1_2_2 - let mut commit = CommitSet::default(); - overlay.canonicalize(&h_1_2_2, &mut commit).unwrap(); - db.commit(&commit); - overlay.sync(); - assert_eq!(overlay.levels.len(), 0); - assert_eq!(overlay.parents.len(), 0); - assert!(db.data_eq(&make_db(&[1, 12, 122]))); - assert_eq!(overlay.last_canonicalized, Some((h_1_2_2, 3))); - } - - #[test] - fn insert_revert() { - let h1 = H256::random(); - let h2 = H256::random(); - let mut db = make_db(&[1, 2, 3, 4]); - let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); - assert!(overlay.revert_one().is_none()); - let changeset1 = make_changeset(&[5, 6], &[2]); - let changeset2 = make_changeset(&[7, 8], &[5, 3]); - db.commit(&overlay.insert(&h1, 1, &H256::default(), changeset1).unwrap()); - db.commit(&overlay.insert(&h2, 2, &h1, changeset2).unwrap()); - assert!(contains(&overlay, 7)); - db.commit(&overlay.revert_one().unwrap()); - assert_eq!(overlay.parents.len(), 1); - assert!(contains(&overlay, 5)); - assert!(!contains(&overlay, 7)); - db.commit(&overlay.revert_one().unwrap()); - assert_eq!(overlay.levels.len(), 0); - assert_eq!(overlay.parents.len(), 0); - assert!(overlay.revert_one().is_none()); - } - - #[test] - fn keeps_pinned() { - let mut db = make_db(&[]); - - #[rustfmt::skip] - // - 0 - 1_1 - // \ 1_2 - - let (h_1, c_1) = (H256::random(), make_changeset(&[1], &[])); - let (h_2, c_2) = (H256::random(), make_changeset(&[2], &[])); - - let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); - db.commit(&overlay.insert(&h_1, 1, &H256::default(), c_1).unwrap()); - db.commit(&overlay.insert(&h_2, 1, &H256::default(), c_2).unwrap()); - - overlay.pin(&h_1); - - let mut commit = CommitSet::default(); - overlay.canonicalize(&h_2, &mut commit).unwrap(); - db.commit(&commit); - assert!(contains(&overlay, 1)); - overlay.unpin(&h_1); - assert!(!contains(&overlay, 1)); - } - - #[test] - fn keeps_pinned_ref_count() { - let mut db = make_db(&[]); - - #[rustfmt::skip] - // - 0 - 1_1 - // \ 1_2 - // \ 1_3 - - // 1_1 and 1_2 both make the same change - let (h_1, c_1) = (H256::random(), make_changeset(&[1], &[])); - let (h_2, c_2) = (H256::random(), make_changeset(&[1], &[])); - let (h_3, c_3) = (H256::random(), make_changeset(&[], &[])); - - let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); - db.commit(&overlay.insert(&h_1, 1, &H256::default(), c_1).unwrap()); - db.commit(&overlay.insert(&h_2, 1, &H256::default(), c_2).unwrap()); - db.commit(&overlay.insert(&h_3, 1, &H256::default(), c_3).unwrap()); - - overlay.pin(&h_1); - - let mut commit = CommitSet::default(); - overlay.canonicalize(&h_3, &mut commit).unwrap(); - db.commit(&commit); - - assert!(contains(&overlay, 1)); - overlay.unpin(&h_1); - assert!(!contains(&overlay, 1)); - } - - #[test] - fn pins_canonicalized() { - let mut db = make_db(&[]); - - let (h_1, c_1) = (H256::random(), make_changeset(&[1], &[])); - let (h_2, c_2) = (H256::random(), make_changeset(&[2], &[])); - - let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); - db.commit(&overlay.insert(&h_1, 1, &H256::default(), c_1).unwrap()); - db.commit(&overlay.insert(&h_2, 2, &h_1, c_2).unwrap()); - - let mut commit = CommitSet::default(); - overlay.canonicalize(&h_1, &mut commit).unwrap(); - overlay.canonicalize(&h_2, &mut commit).unwrap(); - assert!(contains(&overlay, 1)); - assert!(contains(&overlay, 2)); - db.commit(&commit); - overlay.sync(); - assert!(!contains(&overlay, 1)); - assert!(!contains(&overlay, 2)); - } - - #[test] - fn pin_keeps_parent() { - let mut db = make_db(&[]); - - #[rustfmt::skip] - // - 0 - 1_1 - 2_1 - // \ 1_2 - - let (h_11, c_11) = (H256::random(), make_changeset(&[1], &[])); - let (h_12, c_12) = (H256::random(), make_changeset(&[], &[])); - let (h_21, c_21) = (H256::random(), make_changeset(&[], &[])); - - let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); - db.commit(&overlay.insert(&h_11, 1, &H256::default(), c_11).unwrap()); - db.commit(&overlay.insert(&h_12, 1, &H256::default(), c_12).unwrap()); - db.commit(&overlay.insert(&h_21, 2, &h_11, c_21).unwrap()); - - overlay.pin(&h_21); - - let mut commit = CommitSet::default(); - overlay.canonicalize(&h_12, &mut commit).unwrap(); - db.commit(&commit); - - assert!(contains(&overlay, 1)); - overlay.unpin(&h_21); - assert!(!contains(&overlay, 1)); - overlay.unpin(&h_12); - assert!(overlay.pinned.is_empty()); - } - - #[test] - fn restore_from_journal_after_canonicalize_no_first() { - // This test discards a branch that is journaled under a non-zero index on level 1, - // making sure all journals are loaded for each level even if some of them are missing. - let root = H256::random(); - let h1 = H256::random(); - let h2 = H256::random(); - let h11 = H256::random(); - let h21 = H256::random(); - let mut db = make_db(&[]); - let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); - db.commit(&overlay.insert(&root, 10, &H256::default(), make_changeset(&[], &[])).unwrap()); - db.commit(&overlay.insert(&h1, 11, &root, make_changeset(&[1], &[])).unwrap()); - db.commit(&overlay.insert(&h2, 11, &root, make_changeset(&[2], &[])).unwrap()); - db.commit(&overlay.insert(&h11, 12, &h1, make_changeset(&[11], &[])).unwrap()); - db.commit(&overlay.insert(&h21, 12, &h2, make_changeset(&[21], &[])).unwrap()); - let mut commit = CommitSet::default(); - overlay.canonicalize(&root, &mut commit).unwrap(); - overlay.canonicalize(&h2, &mut commit).unwrap(); // h11 should stay in the DB - db.commit(&commit); - assert_eq!(overlay.levels.len(), 1); - assert!(contains(&overlay, 21)); - assert!(!contains(&overlay, 11)); - assert!(db.get_meta(&to_journal_key(12, 1)).unwrap().is_some()); - - // Restore into a new overlay and check that journaled value exists. - let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); - assert!(contains(&overlay, 21)); - - let mut commit = CommitSet::default(); - overlay.canonicalize(&h21, &mut commit).unwrap(); // h11 should stay in the DB - db.commit(&commit); - overlay.sync(); - assert!(!contains(&overlay, 21)); - } - - #[test] - fn index_reuse() { - // This test discards a branch that is journaled under a non-zero index on level 1, - // making sure all journals are loaded for each level even if some of them are missing. - let root = H256::random(); - let h1 = H256::random(); - let h2 = H256::random(); - let h11 = H256::random(); - let h21 = H256::random(); - let mut db = make_db(&[]); - let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); - db.commit(&overlay.insert(&root, 10, &H256::default(), make_changeset(&[], &[])).unwrap()); - db.commit(&overlay.insert(&h1, 11, &root, make_changeset(&[1], &[])).unwrap()); - db.commit(&overlay.insert(&h2, 11, &root, make_changeset(&[2], &[])).unwrap()); - db.commit(&overlay.insert(&h11, 12, &h1, make_changeset(&[11], &[])).unwrap()); - db.commit(&overlay.insert(&h21, 12, &h2, make_changeset(&[21], &[])).unwrap()); - let mut commit = CommitSet::default(); - overlay.canonicalize(&root, &mut commit).unwrap(); - overlay.canonicalize(&h2, &mut commit).unwrap(); // h11 should stay in the DB - db.commit(&commit); - - // add another block at top level. It should reuse journal index 0 of previously discarded - // block - let h22 = H256::random(); - db.commit(&overlay.insert(&h22, 12, &h2, make_changeset(&[22], &[])).unwrap()); - assert_eq!(overlay.levels[0].blocks[0].journal_index, 1); - assert_eq!(overlay.levels[0].blocks[1].journal_index, 0); - - // Restore into a new overlay and check that journaled value exists. - let overlay = NonCanonicalOverlay::::new(&db).unwrap(); - assert_eq!(overlay.parents.len(), 2); - assert!(contains(&overlay, 21)); - assert!(contains(&overlay, 22)); - } - - #[test] - fn remove_works() { - let root = H256::random(); - let h1 = H256::random(); - let h2 = H256::random(); - let h11 = H256::random(); - let h21 = H256::random(); - let mut db = make_db(&[]); - let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); - db.commit(&overlay.insert(&root, 10, &H256::default(), make_changeset(&[], &[])).unwrap()); - db.commit(&overlay.insert(&h1, 11, &root, make_changeset(&[1], &[])).unwrap()); - db.commit(&overlay.insert(&h2, 11, &root, make_changeset(&[2], &[])).unwrap()); - db.commit(&overlay.insert(&h11, 12, &h1, make_changeset(&[11], &[])).unwrap()); - db.commit(&overlay.insert(&h21, 12, &h2, make_changeset(&[21], &[])).unwrap()); - assert!(overlay.remove(&h1).is_none()); - assert!(overlay.remove(&h2).is_none()); - assert_eq!(overlay.levels.len(), 3); - - db.commit(&overlay.remove(&h11).unwrap()); - assert!(!contains(&overlay, 11)); - - db.commit(&overlay.remove(&h21).unwrap()); - assert_eq!(overlay.levels.len(), 2); - - db.commit(&overlay.remove(&h2).unwrap()); - assert!(!contains(&overlay, 2)); - } -} diff --git a/substrate/client/state-db/src/pruning.rs b/substrate/client/state-db/src/pruning.rs deleted file mode 100644 index 458522b8..00000000 --- a/substrate/client/state-db/src/pruning.rs +++ /dev/null @@ -1,853 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2017-2022 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 - -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -//! Pruning window. -//! -//! For each block we maintain a list of nodes pending deletion. -//! There is also a global index of node key to block number. -//! If a node is re-inserted into the window it gets removed from -//! the death list. -//! The changes are journaled in the DB. - -use crate::{ - noncanonical::LAST_CANONICAL, to_meta_key, CommitSet, Error, Hash, MetaDb, StateDbError, - DEFAULT_MAX_BLOCK_CONSTRAINT, -}; -use codec::{Decode, Encode}; -use log::trace; -use std::collections::{HashMap, HashSet, VecDeque}; - -pub(crate) const LAST_PRUNED: &[u8] = b"last_pruned"; -const PRUNING_JOURNAL: &[u8] = b"pruning_journal"; - -/// See module documentation. -#[derive(parity_util_mem_derive::MallocSizeOf)] -pub struct RefWindow { - /// A queue of blocks keep tracking keys that should be deleted for each block in the - /// pruning window. - queue: DeathRowQueue, - /// Block number that is next to be pruned. - base: u64, -} - -/// `DeathRowQueue` used to keep track of blocks in the pruning window, there are two flavors: -/// - `Mem`, used when the backend database do not supports reference counting, keep all -/// blocks in memory, and keep track of re-inserted keys to not delete them when pruning -/// - `DbBacked`, used when the backend database supports reference counting, only keep -/// a few number of blocks in memory and load more blocks on demand -#[derive(parity_util_mem_derive::MallocSizeOf)] -enum DeathRowQueue { - Mem { - /// A queue of keys that should be deleted for each block in the pruning window. - death_rows: VecDeque>, - /// An index that maps each key from `death_rows` to block number. - death_index: HashMap, - }, - DbBacked { - // The backend database - #[ignore_malloc_size_of = "Shared data"] - db: D, - /// A queue of keys that should be deleted for each block in the pruning window. - /// Only caching the first few blocks of the pruning window, blocks inside are - /// successive and ordered by block number - cache: VecDeque>, - /// A soft limit of the cache's size - cache_capacity: usize, - /// Last block number added to the window - last: Option, - }, -} - -impl DeathRowQueue { - /// Return a `DeathRowQueue` that all blocks are keep in memory - fn new_mem(db: &D, base: u64) -> Result, Error> { - let mut block = base; - let mut queue = DeathRowQueue::::Mem { - death_rows: VecDeque::new(), - death_index: HashMap::new(), - }; - // read the journal - trace!(target: "state-db", "Reading pruning journal for the memory queue. Pending #{}", base); - loop { - let journal_key = to_journal_key(block); - match db.get_meta(&journal_key).map_err(Error::Db)? { - Some(record) => { - let record: JournalRecord = - Decode::decode(&mut record.as_slice())?; - trace!(target: "state-db", "Pruning journal entry {} ({} inserted, {} deleted)", block, record.inserted.len(), record.deleted.len()); - queue.import(base, block, record); - }, - None => break, - } - block += 1; - } - Ok(queue) - } - - /// Return a `DeathRowQueue` that backed by an database, and only keep a few number - /// of blocks in memory - fn new_db_backed( - db: D, - base: u64, - last: Option, - window_size: u32, - ) -> Result, Error> { - // limit the cache capacity from 1 to `DEFAULT_MAX_BLOCK_CONSTRAINT` - let cache_capacity = window_size.clamp(1, DEFAULT_MAX_BLOCK_CONSTRAINT) as usize; - let mut cache = VecDeque::with_capacity(cache_capacity); - trace!(target: "state-db", "Reading pruning journal for the database-backed queue. Pending #{}", base); - DeathRowQueue::load_batch_from_db(&db, &mut cache, base, cache_capacity)?; - Ok(DeathRowQueue::DbBacked { db, cache, cache_capacity, last }) - } - - /// import a new block to the back of the queue - fn import(&mut self, base: u64, num: u64, journal_record: JournalRecord) { - let JournalRecord { hash, inserted, deleted } = journal_record; - trace!(target: "state-db", "Importing {}, base={}", num, base); - match self { - DeathRowQueue::DbBacked { cache, cache_capacity, last, .. } => { - // If the new block continues cached range and there is space, load it directly into - // cache. - if num == base + cache.len() as u64 && cache.len() < *cache_capacity { - trace!(target: "state-db", "Adding to DB backed cache {:?} (#{})", hash, num); - cache.push_back(DeathRow { hash, deleted: deleted.into_iter().collect() }); - } - *last = Some(num); - }, - DeathRowQueue::Mem { death_rows, death_index } => { - // remove all re-inserted keys from death rows - for k in inserted { - if let Some(block) = death_index.remove(&k) { - death_rows[(block - base) as usize].deleted.remove(&k); - } - } - // add new keys - let imported_block = base + death_rows.len() as u64; - for k in deleted.iter() { - death_index.insert(k.clone(), imported_block); - } - death_rows.push_back(DeathRow { hash, deleted: deleted.into_iter().collect() }); - }, - } - } - - /// Pop out one block from the front of the queue, `base` is the block number - /// of the first block of the queue - fn pop_front( - &mut self, - base: u64, - ) -> Result>, Error> { - match self { - DeathRowQueue::DbBacked { db, cache, cache_capacity, .. } => { - if cache.is_empty() { - DeathRowQueue::load_batch_from_db(db, cache, base, *cache_capacity)?; - } - Ok(cache.pop_front()) - }, - DeathRowQueue::Mem { death_rows, death_index } => match death_rows.pop_front() { - Some(row) => { - for k in row.deleted.iter() { - death_index.remove(k); - } - Ok(Some(row)) - }, - None => Ok(None), - }, - } - } - - /// Load a batch of blocks from the backend database into `cache`, starting from `base` and up - /// to `base + cache_capacity` - fn load_batch_from_db( - db: &D, - cache: &mut VecDeque>, - base: u64, - cache_capacity: usize, - ) -> Result<(), Error> { - let start = base + cache.len() as u64; - let batch_size = cache_capacity; - for i in 0..batch_size as u64 { - match load_death_row_from_db::(db, start + i)? { - Some(row) => { - cache.push_back(row); - }, - None => break, - } - } - Ok(()) - } - - /// Check if the block at the given `index` of the queue exist - /// it is the caller's responsibility to ensure `index` won't be out of bounds - fn have_block(&self, hash: &BlockHash, index: usize) -> HaveBlock { - match self { - DeathRowQueue::DbBacked { cache, .. } => { - if cache.len() > index { - (cache[index].hash == *hash).into() - } else { - // The block is not in the cache but it still may exist on disk. - HaveBlock::Maybe - } - }, - DeathRowQueue::Mem { death_rows, .. } => (death_rows[index].hash == *hash).into(), - } - } - - /// Return the number of block in the pruning window - fn len(&self, base: u64) -> u64 { - match self { - DeathRowQueue::DbBacked { last, .. } => last.map_or(0, |l| l + 1 - base), - DeathRowQueue::Mem { death_rows, .. } => death_rows.len() as u64, - } - } - - #[cfg(test)] - fn get_mem_queue_state( - &self, - ) -> Option<(&VecDeque>, &HashMap)> { - match self { - DeathRowQueue::DbBacked { .. } => None, - DeathRowQueue::Mem { death_rows, death_index } => Some((death_rows, death_index)), - } - } - - #[cfg(test)] - fn get_db_backed_queue_state( - &self, - ) -> Option<(&VecDeque>, Option)> { - match self { - DeathRowQueue::DbBacked { cache, last, .. } => Some((cache, *last)), - DeathRowQueue::Mem { .. } => None, - } - } -} - -fn load_death_row_from_db( - db: &D, - block: u64, -) -> Result>, Error> { - let journal_key = to_journal_key(block); - match db.get_meta(&journal_key).map_err(Error::Db)? { - Some(record) => { - let JournalRecord { hash, deleted, .. } = Decode::decode(&mut record.as_slice())?; - Ok(Some(DeathRow { hash, deleted: deleted.into_iter().collect() })) - }, - None => Ok(None), - } -} - -#[derive(Clone, Debug, PartialEq, Eq, parity_util_mem_derive::MallocSizeOf)] -struct DeathRow { - hash: BlockHash, - deleted: HashSet, -} - -#[derive(Encode, Decode, Default)] -struct JournalRecord { - hash: BlockHash, - inserted: Vec, - deleted: Vec, -} - -fn to_journal_key(block: u64) -> Vec { - to_meta_key(PRUNING_JOURNAL, &block) -} - -/// The result return by `RefWindow::have_block` -#[derive(Debug, PartialEq, Eq)] -pub enum HaveBlock { - /// Definitely don't have this block. - No, - /// May or may not have this block, need further checking - Maybe, - /// Definitely has this block - Yes, -} - -impl From for HaveBlock { - fn from(have: bool) -> Self { - if have { - HaveBlock::Yes - } else { - HaveBlock::No - } - } -} - -impl RefWindow { - pub fn new( - db: D, - window_size: u32, - count_insertions: bool, - ) -> Result, Error> { - // the block number of the first block in the queue or the next block number if the queue is - // empty - let base = match db.get_meta(&to_meta_key(LAST_PRUNED, &())).map_err(Error::Db)? { - Some(buffer) => u64::decode(&mut buffer.as_slice())? + 1, - None => 0, - }; - // the block number of the last block in the queue - let last_canonicalized_number = - match db.get_meta(&to_meta_key(LAST_CANONICAL, &())).map_err(Error::Db)? { - Some(buffer) => Some(<(BlockHash, u64)>::decode(&mut buffer.as_slice())?.1), - None => None, - }; - - let queue = if count_insertions { - DeathRowQueue::new_mem(&db, base)? - } else { - let last = match last_canonicalized_number { - Some(last_canonicalized_number) => { - debug_assert!(last_canonicalized_number + 1 >= base); - Some(last_canonicalized_number) - }, - // None means `LAST_CANONICAL` is never been wrote, since the pruning journals are - // in the same `CommitSet` as `LAST_CANONICAL`, it means no pruning journal have - // ever been committed to the db, thus set `unload` to zero - None => None, - }; - DeathRowQueue::new_db_backed(db, base, last, window_size)? - }; - - Ok(RefWindow { queue, base }) - } - - pub fn window_size(&self) -> u64 { - self.queue.len(self.base) as u64 - } - - /// Get the hash of the next pruning block - pub fn next_hash(&mut self) -> Result, Error> { - let res = match &mut self.queue { - DeathRowQueue::DbBacked { db, cache, cache_capacity, .. } => { - if cache.is_empty() { - DeathRowQueue::load_batch_from_db(db, cache, self.base, *cache_capacity)?; - } - cache.front().map(|r| r.hash.clone()) - }, - DeathRowQueue::Mem { death_rows, .. } => death_rows.front().map(|r| r.hash.clone()), - }; - Ok(res) - } - - pub fn mem_used(&self) -> usize { - 0 - } - - fn is_empty(&self) -> bool { - self.window_size() == 0 - } - - // Check if a block is in the pruning window and not be pruned yet - pub fn have_block(&self, hash: &BlockHash, number: u64) -> HaveBlock { - // if the queue is empty or the block number exceed the pruning window, we definitely - // do not have this block - if self.is_empty() || number < self.base || number >= self.base + self.window_size() { - return HaveBlock::No - } - self.queue.have_block(hash, (number - self.base) as usize) - } - - /// Prune next block. Expects at least one block in the window. Adds changes to `commit`. - pub fn prune_one(&mut self, commit: &mut CommitSet) -> Result<(), Error> { - if let Some(pruned) = self.queue.pop_front(self.base)? { - trace!(target: "state-db", "Pruning {:?} ({} deleted)", pruned.hash, pruned.deleted.len()); - let index = self.base; - commit.data.deleted.extend(pruned.deleted.into_iter()); - commit.meta.inserted.push((to_meta_key(LAST_PRUNED, &()), index.encode())); - commit.meta.deleted.push(to_journal_key(self.base)); - self.base += 1; - Ok(()) - } else { - trace!(target: "state-db", "Trying to prune when there's nothing to prune"); - Err(Error::StateDb(StateDbError::BlockUnavailable)) - } - } - - /// Add a change set to the window. Creates a journal record and pushes it to `commit` - pub fn note_canonical( - &mut self, - hash: &BlockHash, - number: u64, - commit: &mut CommitSet, - ) -> Result<(), Error> { - if self.base == 0 && self.is_empty() && number > 0 { - // assume that parent was canonicalized - self.base = number; - } else if (self.base + self.window_size()) != number { - return Err(Error::StateDb(StateDbError::InvalidBlockNumber)) - } - trace!(target: "state-db", "Adding to pruning window: {:?} ({} inserted, {} deleted)", hash, commit.data.inserted.len(), commit.data.deleted.len()); - let inserted = if matches!(self.queue, DeathRowQueue::Mem { .. }) { - commit.data.inserted.iter().map(|(k, _)| k.clone()).collect() - } else { - Default::default() - }; - let deleted = std::mem::take(&mut commit.data.deleted); - let journal_record = JournalRecord { hash: hash.clone(), inserted, deleted }; - commit.meta.inserted.push((to_journal_key(number), journal_record.encode())); - self.queue.import(self.base, number, journal_record); - Ok(()) - } -} - -#[cfg(test)] -mod tests { - use super::{to_journal_key, DeathRowQueue, HaveBlock, JournalRecord, RefWindow, LAST_PRUNED}; - use crate::{ - noncanonical::LAST_CANONICAL, - test::{make_commit, make_db, TestDb}, - to_meta_key, CommitSet, Error, Hash, StateDbError, DEFAULT_MAX_BLOCK_CONSTRAINT, - }; - use codec::Encode; - use sp_core::H256; - - fn check_journal(pruning: &RefWindow, db: &TestDb) { - let count_insertions = matches!(pruning.queue, DeathRowQueue::Mem { .. }); - let restored: RefWindow = - RefWindow::new(db.clone(), DEFAULT_MAX_BLOCK_CONSTRAINT, count_insertions).unwrap(); - assert_eq!(pruning.base, restored.base); - assert_eq!(pruning.queue.get_mem_queue_state(), restored.queue.get_mem_queue_state()); - } - - #[test] - fn created_from_empty_db() { - let db = make_db(&[]); - let pruning: RefWindow = - RefWindow::new(db, DEFAULT_MAX_BLOCK_CONSTRAINT, true).unwrap(); - assert_eq!(pruning.base, 0); - let (death_rows, death_index) = pruning.queue.get_mem_queue_state().unwrap(); - assert!(death_rows.is_empty()); - assert!(death_index.is_empty()); - } - - #[test] - fn prune_empty() { - let db = make_db(&[]); - let mut pruning: RefWindow = - RefWindow::new(db, DEFAULT_MAX_BLOCK_CONSTRAINT, true).unwrap(); - let mut commit = CommitSet::default(); - assert_eq!( - Err(Error::StateDb(StateDbError::BlockUnavailable)), - pruning.prune_one(&mut commit) - ); - assert_eq!(pruning.base, 0); - let (death_rows, death_index) = pruning.queue.get_mem_queue_state().unwrap(); - assert!(death_rows.is_empty()); - assert!(death_index.is_empty()); - } - - #[test] - fn prune_one() { - let mut db = make_db(&[1, 2, 3]); - let mut pruning: RefWindow = - RefWindow::new(db.clone(), DEFAULT_MAX_BLOCK_CONSTRAINT, true).unwrap(); - let mut commit = make_commit(&[4, 5], &[1, 3]); - let hash = H256::random(); - pruning.note_canonical(&hash, 0, &mut commit).unwrap(); - db.commit(&commit); - assert_eq!(pruning.have_block(&hash, 0), HaveBlock::Yes); - assert_eq!(pruning.have_block(&hash, 0), HaveBlock::Yes); - assert!(commit.data.deleted.is_empty()); - let (death_rows, death_index) = pruning.queue.get_mem_queue_state().unwrap(); - assert_eq!(death_rows.len(), 1); - assert_eq!(death_index.len(), 2); - assert!(db.data_eq(&make_db(&[1, 2, 3, 4, 5]))); - check_journal(&pruning, &db); - - let mut commit = CommitSet::default(); - pruning.prune_one(&mut commit).unwrap(); - assert_eq!(pruning.have_block(&hash, 0), HaveBlock::No); - db.commit(&commit); - assert_eq!(pruning.have_block(&hash, 0), HaveBlock::No); - assert!(db.data_eq(&make_db(&[2, 4, 5]))); - let (death_rows, death_index) = pruning.queue.get_mem_queue_state().unwrap(); - assert!(death_rows.is_empty()); - assert!(death_index.is_empty()); - assert_eq!(pruning.base, 1); - } - - #[test] - fn prune_two() { - let mut db = make_db(&[1, 2, 3]); - let mut pruning: RefWindow = - RefWindow::new(db.clone(), DEFAULT_MAX_BLOCK_CONSTRAINT, true).unwrap(); - let mut commit = make_commit(&[4], &[1]); - pruning.note_canonical(&H256::random(), 0, &mut commit).unwrap(); - db.commit(&commit); - let mut commit = make_commit(&[5], &[2]); - pruning.note_canonical(&H256::random(), 1, &mut commit).unwrap(); - db.commit(&commit); - assert!(db.data_eq(&make_db(&[1, 2, 3, 4, 5]))); - - check_journal(&pruning, &db); - - let mut commit = CommitSet::default(); - pruning.prune_one(&mut commit).unwrap(); - db.commit(&commit); - assert!(db.data_eq(&make_db(&[2, 3, 4, 5]))); - let mut commit = CommitSet::default(); - pruning.prune_one(&mut commit).unwrap(); - db.commit(&commit); - assert!(db.data_eq(&make_db(&[3, 4, 5]))); - assert_eq!(pruning.base, 2); - } - - #[test] - fn prune_two_pending() { - let mut db = make_db(&[1, 2, 3]); - let mut pruning: RefWindow = - RefWindow::new(db.clone(), DEFAULT_MAX_BLOCK_CONSTRAINT, true).unwrap(); - let mut commit = make_commit(&[4], &[1]); - pruning.note_canonical(&H256::random(), 0, &mut commit).unwrap(); - db.commit(&commit); - let mut commit = make_commit(&[5], &[2]); - pruning.note_canonical(&H256::random(), 1, &mut commit).unwrap(); - db.commit(&commit); - assert!(db.data_eq(&make_db(&[1, 2, 3, 4, 5]))); - let mut commit = CommitSet::default(); - pruning.prune_one(&mut commit).unwrap(); - db.commit(&commit); - assert!(db.data_eq(&make_db(&[2, 3, 4, 5]))); - let mut commit = CommitSet::default(); - pruning.prune_one(&mut commit).unwrap(); - db.commit(&commit); - assert!(db.data_eq(&make_db(&[3, 4, 5]))); - assert_eq!(pruning.base, 2); - } - - #[test] - fn reinserted_survives() { - let mut db = make_db(&[1, 2, 3]); - let mut pruning: RefWindow = - RefWindow::new(db.clone(), DEFAULT_MAX_BLOCK_CONSTRAINT, true).unwrap(); - let mut commit = make_commit(&[], &[2]); - pruning.note_canonical(&H256::random(), 0, &mut commit).unwrap(); - db.commit(&commit); - let mut commit = make_commit(&[2], &[]); - pruning.note_canonical(&H256::random(), 1, &mut commit).unwrap(); - db.commit(&commit); - let mut commit = make_commit(&[], &[2]); - pruning.note_canonical(&H256::random(), 2, &mut commit).unwrap(); - db.commit(&commit); - assert!(db.data_eq(&make_db(&[1, 2, 3]))); - - check_journal(&pruning, &db); - - let mut commit = CommitSet::default(); - pruning.prune_one(&mut commit).unwrap(); - db.commit(&commit); - assert!(db.data_eq(&make_db(&[1, 2, 3]))); - let mut commit = CommitSet::default(); - pruning.prune_one(&mut commit).unwrap(); - db.commit(&commit); - assert!(db.data_eq(&make_db(&[1, 2, 3]))); - pruning.prune_one(&mut commit).unwrap(); - db.commit(&commit); - assert!(db.data_eq(&make_db(&[1, 3]))); - assert_eq!(pruning.base, 3); - } - - #[test] - fn reinserted_survive_pending() { - let mut db = make_db(&[1, 2, 3]); - let mut pruning: RefWindow = - RefWindow::new(db.clone(), DEFAULT_MAX_BLOCK_CONSTRAINT, true).unwrap(); - let mut commit = make_commit(&[], &[2]); - pruning.note_canonical(&H256::random(), 0, &mut commit).unwrap(); - db.commit(&commit); - let mut commit = make_commit(&[2], &[]); - pruning.note_canonical(&H256::random(), 1, &mut commit).unwrap(); - db.commit(&commit); - let mut commit = make_commit(&[], &[2]); - pruning.note_canonical(&H256::random(), 2, &mut commit).unwrap(); - db.commit(&commit); - assert!(db.data_eq(&make_db(&[1, 2, 3]))); - - let mut commit = CommitSet::default(); - pruning.prune_one(&mut commit).unwrap(); - db.commit(&commit); - assert!(db.data_eq(&make_db(&[1, 2, 3]))); - let mut commit = CommitSet::default(); - pruning.prune_one(&mut commit).unwrap(); - db.commit(&commit); - assert!(db.data_eq(&make_db(&[1, 2, 3]))); - pruning.prune_one(&mut commit).unwrap(); - db.commit(&commit); - assert!(db.data_eq(&make_db(&[1, 3]))); - assert_eq!(pruning.base, 3); - } - - #[test] - fn reinserted_ignores() { - let mut db = make_db(&[1, 2, 3]); - let mut pruning: RefWindow = - RefWindow::new(db.clone(), DEFAULT_MAX_BLOCK_CONSTRAINT, false).unwrap(); - let mut commit = make_commit(&[], &[2]); - pruning.note_canonical(&H256::random(), 0, &mut commit).unwrap(); - db.commit(&commit); - let mut commit = make_commit(&[2], &[]); - pruning.note_canonical(&H256::random(), 1, &mut commit).unwrap(); - db.commit(&commit); - let mut commit = make_commit(&[], &[2]); - pruning.note_canonical(&H256::random(), 2, &mut commit).unwrap(); - db.commit(&commit); - assert!(db.data_eq(&make_db(&[1, 2, 3]))); - - check_journal(&pruning, &db); - - let mut commit = CommitSet::default(); - pruning.prune_one(&mut commit).unwrap(); - db.commit(&commit); - assert!(db.data_eq(&make_db(&[1, 3]))); - } - - fn push_last_canonicalized(block: u64, commit: &mut CommitSet) { - commit - .meta - .inserted - .push((to_meta_key(LAST_CANONICAL, &()), (block, block).encode())); - } - - fn push_last_pruned(block: u64, commit: &mut CommitSet) { - commit.meta.inserted.push((to_meta_key(LAST_PRUNED, &()), block.encode())); - } - - #[test] - fn init_db_backed_queue() { - let mut db = make_db(&[]); - let mut commit = CommitSet::default(); - - fn load_pruning_from_db(db: TestDb) -> (usize, u64) { - let pruning: RefWindow = - RefWindow::new(db, DEFAULT_MAX_BLOCK_CONSTRAINT, false).unwrap(); - let (cache, _) = pruning.queue.get_db_backed_queue_state().unwrap(); - (cache.len(), pruning.base) - } - - fn push_record(block: u64, commit: &mut CommitSet) { - commit - .meta - .inserted - .push((to_journal_key(block), JournalRecord::::default().encode())); - } - - // empty database - let (loaded_blocks, base) = load_pruning_from_db(db.clone()); - assert_eq!(loaded_blocks, 0); - assert_eq!(base, 0); - - // canonicalized the genesis block but no pruning - push_last_canonicalized(0, &mut commit); - push_record(0, &mut commit); - db.commit(&commit); - let (loaded_blocks, base) = load_pruning_from_db(db.clone()); - assert_eq!(loaded_blocks, 1); - assert_eq!(base, 0); - - // pruned the genesis block - push_last_pruned(0, &mut commit); - db.commit(&commit); - let (loaded_blocks, base) = load_pruning_from_db(db.clone()); - assert_eq!(loaded_blocks, 0); - assert_eq!(base, 1); - - // canonicalize more blocks - push_last_canonicalized(10, &mut commit); - for i in 1..=10 { - push_record(i, &mut commit); - } - db.commit(&commit); - let (loaded_blocks, base) = load_pruning_from_db(db.clone()); - assert_eq!(loaded_blocks, 10); - assert_eq!(base, 1); - - // pruned all blocks - push_last_pruned(10, &mut commit); - db.commit(&commit); - let (loaded_blocks, base) = load_pruning_from_db(db.clone()); - assert_eq!(loaded_blocks, 0); - assert_eq!(base, 11); - } - - #[test] - fn db_backed_queue() { - let mut db = make_db(&[]); - let mut pruning: RefWindow = - RefWindow::new(db.clone(), DEFAULT_MAX_BLOCK_CONSTRAINT, false).unwrap(); - let cache_capacity = DEFAULT_MAX_BLOCK_CONSTRAINT as usize; - - // start as an empty queue - let (cache, last) = pruning.queue.get_db_backed_queue_state().unwrap(); - assert_eq!(cache.len(), 0); - assert_eq!(last, None); - - // import blocks - // queue size and content should match - for i in 0..(cache_capacity + 10) { - let mut commit = make_commit(&[], &[]); - pruning.note_canonical(&(i as u64), i as u64, &mut commit).unwrap(); - push_last_canonicalized(i as u64, &mut commit); - db.commit(&commit); - // blocks will fill the cache first - let (cache, last) = pruning.queue.get_db_backed_queue_state().unwrap(); - if i < cache_capacity { - assert_eq!(cache.len(), i + 1); - } else { - assert_eq!(cache.len(), cache_capacity); - } - assert_eq!(last, Some(i as u64)); - } - assert_eq!(pruning.window_size(), cache_capacity as u64 + 10); - let (cache, last) = pruning.queue.get_db_backed_queue_state().unwrap(); - assert_eq!(cache.len(), cache_capacity); - assert_eq!(last, Some(cache_capacity as u64 + 10 - 1)); - for i in 0..cache_capacity { - assert_eq!(cache[i].hash, i as u64); - } - - // import a new block to the end of the queue - // won't keep the new block in memory - let mut commit = CommitSet::default(); - pruning - .note_canonical(&(cache_capacity as u64 + 10), cache_capacity as u64 + 10, &mut commit) - .unwrap(); - assert_eq!(pruning.window_size(), cache_capacity as u64 + 11); - let (cache, _) = pruning.queue.get_db_backed_queue_state().unwrap(); - assert_eq!(cache.len(), cache_capacity); - - // revert the last add that no apply yet - // NOTE: do not commit the previous `CommitSet` to db - pruning = RefWindow::new(db.clone(), DEFAULT_MAX_BLOCK_CONSTRAINT, false).unwrap(); - let cache_capacity = DEFAULT_MAX_BLOCK_CONSTRAINT as usize; - assert_eq!(pruning.window_size(), cache_capacity as u64 + 10); - let (cache, _) = pruning.queue.get_db_backed_queue_state().unwrap(); - assert_eq!(cache.len(), cache_capacity); - - // remove one block from the start of the queue - // block is removed from the head of cache - let mut commit = CommitSet::default(); - pruning.prune_one(&mut commit).unwrap(); - db.commit(&commit); - assert_eq!(pruning.window_size(), cache_capacity as u64 + 9); - let (cache, _) = pruning.queue.get_db_backed_queue_state().unwrap(); - assert_eq!(cache.len(), cache_capacity - 1); - for i in 0..(cache_capacity - 1) { - assert_eq!(cache[i].hash, (i + 1) as u64); - } - - // load a new queue from db - // `cache` is full again but the content of the queue should be the same - let pruning: RefWindow = - RefWindow::new(db, DEFAULT_MAX_BLOCK_CONSTRAINT, false).unwrap(); - assert_eq!(pruning.window_size(), cache_capacity as u64 + 9); - let (cache, _) = pruning.queue.get_db_backed_queue_state().unwrap(); - assert_eq!(cache.len(), cache_capacity); - for i in 0..cache_capacity { - assert_eq!(cache[i].hash, (i + 1) as u64); - } - } - - #[test] - fn load_block_from_db() { - let mut db = make_db(&[]); - let mut pruning: RefWindow = - RefWindow::new(db.clone(), DEFAULT_MAX_BLOCK_CONSTRAINT, false).unwrap(); - let cache_capacity = DEFAULT_MAX_BLOCK_CONSTRAINT as usize; - - // import blocks - for i in 0..(cache_capacity as u64 * 2 + 10) { - let mut commit = make_commit(&[], &[]); - pruning.note_canonical(&i, i, &mut commit).unwrap(); - push_last_canonicalized(i as u64, &mut commit); - db.commit(&commit); - } - - // the following operations won't trigger loading block from db: - // - getting block in cache - // - getting block not in the queue - assert_eq!(pruning.next_hash().unwrap().unwrap(), 0); - let (cache, last) = pruning.queue.get_db_backed_queue_state().unwrap(); - assert_eq!(cache.len(), cache_capacity); - assert_eq!(last, Some(cache_capacity as u64 * 2 + 10 - 1)); - - // clear all block loaded in cache - for _ in 0..cache_capacity * 2 { - let mut commit = CommitSet::default(); - pruning.prune_one(&mut commit).unwrap(); - db.commit(&commit); - } - let (cache, _) = pruning.queue.get_db_backed_queue_state().unwrap(); - assert!(cache.is_empty()); - - // getting the hash of block that not in cache will also trigger loading - // the remaining blocks from db - assert_eq!(pruning.next_hash().unwrap().unwrap(), (cache_capacity * 2) as u64); - let (cache, _) = pruning.queue.get_db_backed_queue_state().unwrap(); - assert_eq!(cache.len(), 10); - - // load a new queue from db - // `cache` should be the same - let pruning: RefWindow = - RefWindow::new(db, DEFAULT_MAX_BLOCK_CONSTRAINT, false).unwrap(); - assert_eq!(pruning.window_size(), 10); - let (cache, _) = pruning.queue.get_db_backed_queue_state().unwrap(); - assert_eq!(cache.len(), 10); - for i in 0..10 { - assert_eq!(cache[i].hash, (cache_capacity * 2 + i) as u64); - } - } - - #[test] - fn get_block_from_queue() { - let mut db = make_db(&[]); - let mut pruning: RefWindow = - RefWindow::new(db.clone(), DEFAULT_MAX_BLOCK_CONSTRAINT, false).unwrap(); - let cache_capacity = DEFAULT_MAX_BLOCK_CONSTRAINT as u64; - - // import blocks and commit to db - let mut commit = make_commit(&[], &[]); - for i in 0..(cache_capacity + 10) { - pruning.note_canonical(&i, i, &mut commit).unwrap(); - } - db.commit(&commit); - - // import a block but not commit to db yet - let mut pending_commit = make_commit(&[], &[]); - let index = cache_capacity + 10; - pruning.note_canonical(&index, index, &mut pending_commit).unwrap(); - - let mut commit = make_commit(&[], &[]); - // prune blocks that had committed to db - for i in 0..(cache_capacity + 10) { - assert_eq!(pruning.next_hash().unwrap(), Some(i)); - pruning.prune_one(&mut commit).unwrap(); - } - // return `None` for block that did not commit to db - assert_eq!(pruning.next_hash().unwrap(), None); - assert_eq!( - pruning.prune_one(&mut commit).unwrap_err(), - Error::StateDb(StateDbError::BlockUnavailable) - ); - // commit block to db and no error return - db.commit(&pending_commit); - assert_eq!(pruning.next_hash().unwrap(), Some(index)); - pruning.prune_one(&mut commit).unwrap(); - db.commit(&commit); - } -} diff --git a/substrate/client/state-db/src/test.rs b/substrate/client/state-db/src/test.rs deleted file mode 100644 index 314ec290..00000000 --- a/substrate/client/state-db/src/test.rs +++ /dev/null @@ -1,98 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2017-2022 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 - -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -//! Test utils - -use crate::{ChangeSet, CommitSet, DBValue, MetaDb, NodeDb}; -use sp_core::H256; -use std::{ - collections::HashMap, - sync::{Arc, RwLock}, -}; - -#[derive(Default, Debug, Clone)] -pub struct TestDb(Arc>); - -#[derive(Default, Debug, Clone, PartialEq, Eq)] -struct TestDbInner { - pub data: HashMap, - pub meta: HashMap, DBValue>, -} - -impl MetaDb for TestDb { - type Error = (); - - fn get_meta(&self, key: &[u8]) -> Result, ()> { - Ok(self.0.read().unwrap().meta.get(key).cloned()) - } -} - -impl NodeDb for TestDb { - type Error = (); - type Key = H256; - - fn get(&self, key: &H256) -> Result, ()> { - Ok(self.0.read().unwrap().data.get(key).cloned()) - } -} - -impl TestDb { - pub fn commit(&mut self, commit: &CommitSet) { - self.0.write().unwrap().data.extend(commit.data.inserted.iter().cloned()); - self.0.write().unwrap().meta.extend(commit.meta.inserted.iter().cloned()); - for k in commit.data.deleted.iter() { - self.0.write().unwrap().data.remove(k); - } - self.0.write().unwrap().meta.extend(commit.meta.inserted.iter().cloned()); - for k in commit.meta.deleted.iter() { - self.0.write().unwrap().meta.remove(k); - } - } - - pub fn data_eq(&self, other: &TestDb) -> bool { - self.0.read().unwrap().data == other.0.read().unwrap().data - } - - pub fn meta_len(&self) -> usize { - self.0.read().unwrap().meta.len() - } -} - -pub fn make_changeset(inserted: &[u64], deleted: &[u64]) -> ChangeSet { - ChangeSet { - inserted: inserted - .iter() - .map(|v| (H256::from_low_u64_be(*v), H256::from_low_u64_be(*v).as_bytes().to_vec())) - .collect(), - deleted: deleted.iter().map(|v| H256::from_low_u64_be(*v)).collect(), - } -} - -pub fn make_commit(inserted: &[u64], deleted: &[u64]) -> CommitSet { - CommitSet { data: make_changeset(inserted, deleted), meta: ChangeSet::default() } -} - -pub fn make_db(inserted: &[u64]) -> TestDb { - TestDb(Arc::new(RwLock::new(TestDbInner { - data: inserted - .iter() - .map(|v| (H256::from_low_u64_be(*v), H256::from_low_u64_be(*v).as_bytes().to_vec())) - .collect(), - meta: Default::default(), - }))) -}