diff --git a/Cargo.lock b/Cargo.lock index 46c3c00de13c..f01b5b365f99 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -42,7 +42,7 @@ version = "0.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9e8b47f52ea9bae42228d07ec09eb676433d7c4ed1ebdf0f1d1c29ed446f1ab8" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "cipher", "cpufeatures", "opaque-debug 0.3.0", @@ -130,15 +130,6 @@ version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a4c527152e37cf757a3f78aae5a06fbeefdb07ccc535c980a3208ee3060dd544" -[[package]] -name = "arrayvec" -version = "0.4.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd9fd44efafa8690358b7408d253adf110036b88f55672a933f01d616ad9b1b9" -dependencies = [ - "nodrop", -] - [[package]] name = "arrayvec" version = "0.5.2" @@ -273,7 +264,7 @@ checksum = "83137067e3a2a6a06d67168e49e68a0957d215410473a740cea95a2425c0b7c6" dependencies = [ "async-io", "blocking", - "cfg-if 1.0.0", + "cfg-if", "event-listener", "futures-lite", "libc", @@ -386,7 +377,7 @@ checksum = "5e121dee8023ce33ab248d9ce1493df03c3b38a659b240096fcbd7048ff9c31f" dependencies = [ "addr2line", "cc", - "cfg-if 1.0.0", + "cfg-if", "libc", "miniz_oxide", "object 0.27.1", @@ -435,7 +426,7 @@ dependencies = [ [[package]] name = "beefy-gadget" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "array-bytes", "async-trait", @@ -472,7 +463,7 @@ dependencies = [ [[package]] name = "beefy-gadget-rpc" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "beefy-gadget", "beefy-primitives", @@ -492,7 +483,7 @@ dependencies = [ [[package]] name = "beefy-merkle-tree" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "beefy-primitives", "sp-api", @@ -502,7 +493,7 @@ dependencies = [ [[package]] name = "beefy-primitives" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "parity-scale-codec", "scale-info", @@ -571,16 +562,6 @@ dependencies = [ "digest 0.10.3", ] -[[package]] -name = "blake2-rfc" -version = "0.2.18" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d6d530bdd2d52966a6d03b7a964add7ae1a288d25214066fd4b600f0f796400" -dependencies = [ - "arrayvec 0.4.12", - "constant_time_eq", -] - [[package]] name = "blake2b_simd" version = "1.0.0" @@ -612,7 +593,7 @@ dependencies = [ "arrayref", "arrayvec 0.7.2", "cc", - "cfg-if 1.0.0", + "cfg-if", "constant_time_eq", "digest 0.10.3", ] @@ -810,12 +791,6 @@ dependencies = [ "smallvec", ] -[[package]] -name = "cfg-if" -version = "0.1.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822" - [[package]] name = "cfg-if" version = "1.0.0" @@ -834,7 +809,7 @@ version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5c80e5460aa66fe3b91d40bcbdab953a597b60053e34d684ac6903f863b680a6" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "cipher", "cpufeatures", "zeroize", @@ -890,11 +865,11 @@ dependencies = [ [[package]] name = "ckb-merkle-mountain-range" -version = "0.3.2" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f061f97d64fd1822664bdfb722f7ae5469a97b77567390f7442be5b5dc82a5b" +checksum = "56ccb671c5921be8a84686e6212ca184cb1d7c51cadcdbfcbd1cc3f042f5dfb8" dependencies = [ - "cfg-if 0.1.10", + "cfg-if", ] [[package]] @@ -1049,7 +1024,7 @@ version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "931ab2a3e6330a07900b8e7ca4e106cdcbb93f2b9a52df55e54ee53d8305b55d" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", ] [[package]] @@ -1165,7 +1140,7 @@ version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "738c290dfaea84fc1ca15ad9c168d083b05a714e1efddd8edaab678dc28d2836" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", ] [[package]] @@ -1174,7 +1149,7 @@ version = "0.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4c02a4d71819009c192cf4872265391563fd6a84c81ff2c0f2a7026ca4c1d85c" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "crossbeam-utils", ] @@ -1184,7 +1159,7 @@ version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6455c0ca19f0d2fbf751b908d5c55c1f5cbc65e03c4225427254b46890bdde1e" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "crossbeam-epoch", "crossbeam-utils", ] @@ -1195,7 +1170,7 @@ version = "0.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4ec02e091aa634e2c3ada4a392989e7c3116673ef0ac5b72232439094d73b7fd" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "crossbeam-utils", "lazy_static", "memoffset", @@ -1208,7 +1183,7 @@ version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1f25d8400f4a7a5778f0e4e52384a48cbd9b5c495d110786187fc750075277a2" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "crossbeam-utils", ] @@ -1218,7 +1193,7 @@ version = "0.8.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8ff1f980957787286a554052d03c7aee98d99cc32e09f6d45f0a814133c87978" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "once_cell", ] @@ -1497,7 +1472,7 @@ version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "339ee130d97a610ea5a5872d2bbb130fdf68884ff09d3028b81bec8a1ac23bbc" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "dirs-sys-next", ] @@ -1679,7 +1654,7 @@ version = "0.8.30" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7896dc8abb250ffdda33912550faa54c88ec8b998dec0b2c55ab224921ce11df" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", ] [[package]] @@ -1904,7 +1879,7 @@ version = "2.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cc04871e5ae3aa2952d552dae6b291b3099723bf779a8054281c1366a54613ef" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "js-sys", "log", "serde", @@ -1940,7 +1915,7 @@ version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e94a7bbaa59354bc20dd75b67f23e2797b4490e9d6928203fb105c79e448c86c" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "libc", "redox_syscall", "windows-sys 0.36.1", @@ -1998,7 +1973,7 @@ version = "1.0.22" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e6988e897c1c9c485f43b47a529cef42fde0547f9d8d41a7062518f1d8fc53f" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "crc32fast", "libc", "libz-sys", @@ -2038,7 +2013,7 @@ checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" [[package]] name = "fork-tree" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "parity-scale-codec", ] @@ -2062,7 +2037,7 @@ checksum = "85dcb89d2b10c5f6133de2efd8c11959ce9dbb46a2f7a4cab208c4eeda6ce1ab" [[package]] name = "frame-benchmarking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "frame-support", "frame-system", @@ -2085,7 +2060,7 @@ dependencies = [ [[package]] name = "frame-benchmarking-cli" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "Inflector", "array-bytes", @@ -2137,7 +2112,7 @@ dependencies = [ [[package]] name = "frame-election-provider-solution-type" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -2148,7 +2123,7 @@ dependencies = [ [[package]] name = "frame-election-provider-support" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "frame-election-provider-solution-type", "frame-support", @@ -2164,7 +2139,7 @@ dependencies = [ [[package]] name = "frame-executive" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "frame-support", "frame-system", @@ -2184,7 +2159,7 @@ version = "15.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "df6bb8542ef006ef0de09a5c4420787d79823c0ed7924225822362fd2bf2ff2d" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "parity-scale-codec", "scale-info", "serde", @@ -2193,7 +2168,7 @@ dependencies = [ [[package]] name = "frame-support" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "bitflags", "frame-metadata", @@ -2225,7 +2200,7 @@ dependencies = [ [[package]] name = "frame-support-procedural" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "Inflector", "cfg-expr", @@ -2239,7 +2214,7 @@ dependencies = [ [[package]] name = "frame-support-procedural-tools" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "frame-support-procedural-tools-derive", "proc-macro-crate", @@ -2251,7 +2226,7 @@ dependencies = [ [[package]] name = "frame-support-procedural-tools-derive" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "proc-macro2", "quote", @@ -2261,7 +2236,7 @@ dependencies = [ [[package]] name = "frame-support-test" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "frame-support", "frame-support-test-pallet", @@ -2284,7 +2259,7 @@ dependencies = [ [[package]] name = "frame-support-test-pallet" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "frame-support", "frame-system", @@ -2295,7 +2270,7 @@ dependencies = [ [[package]] name = "frame-system" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "frame-support", "log", @@ -2313,7 +2288,7 @@ dependencies = [ [[package]] name = "frame-system-benchmarking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "frame-benchmarking", "frame-support", @@ -2328,7 +2303,7 @@ dependencies = [ [[package]] name = "frame-system-rpc-runtime-api" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "parity-scale-codec", "sp-api", @@ -2337,7 +2312,7 @@ dependencies = [ [[package]] name = "frame-try-runtime" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "frame-support", "parity-scale-codec", @@ -2508,7 +2483,7 @@ dependencies = [ [[package]] name = "generate-bags" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "chrono", "frame-election-provider-support", @@ -2555,7 +2530,7 @@ version = "0.1.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8fc3cb4d91f53b50155bdcfd23f6a4c39ae1969c2ae85982b135750cccaf5fce" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "js-sys", "libc", "wasi 0.9.0+wasi-snapshot-preview1", @@ -2568,7 +2543,7 @@ version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7fcd999463524c52659517fe2cea98493cfe485d10565e7b0fb07dbba7ad2753" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "libc", "wasi 0.10.0+wasi-snapshot-preview1", ] @@ -2782,7 +2757,7 @@ checksum = "848e9c511092e0daa0a35a63e8e6e475a3e8f870741448b9f6028d69b142f18e" dependencies = [ "arbitrary", "lazy_static", - "memmap2 0.5.0", + "memmap2", "rustc_version", ] @@ -2805,7 +2780,7 @@ checksum = "75f43d41e26995c17e71ee126451dd3941010b0514a81a9d11f3b341debc2399" dependencies = [ "bytes", "fnv", - "itoa 1.0.1", + "itoa", ] [[package]] @@ -2861,7 +2836,7 @@ dependencies = [ "http-body", "httparse", "httpdate", - "itoa 1.0.1", + "itoa", "pin-project-lite 0.2.7", "socket2", "tokio", @@ -2989,7 +2964,7 @@ version = "0.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", ] [[package]] @@ -3048,15 +3023,9 @@ dependencies = [ [[package]] name = "itoa" -version = "0.4.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b71991ff56294aa922b450139ee08b3bfc70982c6b2c7562771375cf73542dd4" - -[[package]] -name = "itoa" -version = "1.0.1" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1aab8fc367588b89dcee83ab0fd66b72b50b72fa1904d7095045ace2b0c81c35" +checksum = "4217ad341ebadf8d8e724e264f13e593e0648f5b3e94b3896a5df283be015ecc" [[package]] name = "jobserver" @@ -3226,7 +3195,7 @@ version = "0.11.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "72c1e0b51e7ec0a97369623508396067a486bd0cbed95a2659a4b863d28cfc8b" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "ecdsa", "elliptic-curve", "sha2 0.10.2", @@ -3291,7 +3260,6 @@ dependencies = [ "pallet-session-benchmarking", "pallet-society", "pallet-staking", - "pallet-staking-reward-fn", "pallet-timestamp", "pallet-tips", "pallet-transaction-payment", @@ -3445,7 +3413,7 @@ version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "afe203d669ec979b7128619bae5a63b7b42e9203c1b29146079ee05e2f604b52" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "winapi", ] @@ -3549,7 +3517,7 @@ dependencies = [ "libp2p-core", "libp2p-swarm", "log", - "lru 0.8.0", + "lru", "prost", "prost-build", "prost-codec", @@ -3918,25 +3886,16 @@ version = "0.4.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "abb12e687cfb44aa40f41fc3978ef76448f9b6038cad6aef4259d3c095a2382e" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "serde", "value-bag", ] [[package]] name = "lru" -version = "0.7.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e999beba7b6e8345721bd280141ed958096a2e4abdf74f67ff4ce49b4b54e47a" -dependencies = [ - "hashbrown", -] - -[[package]] -name = "lru" -version = "0.8.0" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "936d98d2ddd79c18641c6709e7bb09981449694e402d1a0f0f657ea8d61f4a51" +checksum = "b6e8aaa3f231bb4bd57b84b2d5dc3ae7f350265df8aa96492e0bc394a1571909" dependencies = [ "hashbrown", ] @@ -3952,9 +3911,9 @@ dependencies = [ [[package]] name = "lz4" -version = "1.23.2" +version = "1.24.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aac20ed6991e01bf6a2e68cc73df2b389707403662a8ba89f68511fb340f724c" +checksum = "7e9e2dd86df36ce760a60f6ff6ad526f7ba1f14ba0356f8254fb6905e6494df1" dependencies = [ "libc", "lz4-sys", @@ -3962,9 +3921,9 @@ dependencies = [ [[package]] name = "lz4-sys" -version = "1.9.2" +version = "1.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dca79aa95d8b3226213ad454d328369853be3a1382d89532a854f4d69640acae" +checksum = "57d27b317e207b10f69f5e75494119e391a96f48861ae870d1da6edac98ca900" dependencies = [ "cc", "libc", @@ -4039,15 +3998,6 @@ dependencies = [ "rustix", ] -[[package]] -name = "memmap2" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "723e3ebdcdc5c023db1df315364573789f8857c11b631a2fdfad7c00f5c046b4" -dependencies = [ - "libc", -] - [[package]] name = "memmap2" version = "0.5.0" @@ -4083,7 +4033,7 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ce95ae042940bad7e312857b929ee3d11b8f799a80cb7b9c7ec5125516906395" dependencies = [ - "lru 0.8.0", + "lru", ] [[package]] @@ -4166,7 +4116,7 @@ version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e2be9a9090bc1cac2930688fa9478092a64c6a92ddc6ae0692d46b37d9cab709" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "downcast", "fragile", "lazy_static", @@ -4181,7 +4131,7 @@ version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "86d702a0530a0141cf4ed147cf5ec7be6f2c187d4e37fcbefc39cf34116bfe8f" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "proc-macro2", "quote", "syn", @@ -4277,7 +4227,7 @@ dependencies = [ "matrixmultiply", "nalgebra-macros", "num-complex", - "num-rational 0.4.0", + "num-rational", "num-traits", "rand 0.8.5", "rand_distr", @@ -4403,7 +4353,7 @@ checksum = "9f866317acbd3a240710c63f065ffb1e4fd466259045ccb504130b7f668f35c6" dependencies = [ "bitflags", "cc", - "cfg-if 1.0.0", + "cfg-if", "libc", "memoffset", ] @@ -4415,17 +4365,11 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8f17df307904acd05aa8e32e97bb20f2a0df1728bbc2d771ae8f9a90463441e9" dependencies = [ "bitflags", - "cfg-if 1.0.0", + "cfg-if", "libc", "memoffset", ] -[[package]] -name = "nodrop" -version = "0.1.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72ef4a56884ca558e5ddb05a1d1e7e1bfd9a68d9ed024c21704cc98872dae1bb" - [[package]] name = "nohash-hasher" version = "0.2.0" @@ -4458,17 +4402,6 @@ dependencies = [ "winapi", ] -[[package]] -name = "num-bigint" -version = "0.2.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "090c7f9998ee0ff65aa5b723e4009f7b217707f1fb5ea551329cc4d6231fb304" -dependencies = [ - "autocfg", - "num-integer", - "num-traits", -] - [[package]] name = "num-bigint" version = "0.4.3" @@ -4491,12 +4424,12 @@ dependencies = [ [[package]] name = "num-format" -version = "0.4.0" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bafe4179722c2894288ee77a9f044f02811c86af699344c498b0840c698a2465" +checksum = "54b862ff8df690cf089058c98b183676a7ed0f974cc08b426800093227cbff3b" dependencies = [ - "arrayvec 0.4.12", - "itoa 0.4.8", + "arrayvec 0.7.2", + "itoa", ] [[package]] @@ -4511,24 +4444,12 @@ dependencies = [ [[package]] name = "num-rational" -version = "0.2.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c000134b5dbf44adc5cb772486d335293351644b801551abe8f75c84cfa4aef" -dependencies = [ - "autocfg", - "num-bigint 0.2.6", - "num-integer", - "num-traits", -] - -[[package]] -name = "num-rational" -version = "0.4.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d41702bd167c2df5520b384281bc111a4b5efcf7fbc4c9c222c815b07e0a6a6a" +checksum = "0638a1c9d0a3c0914158145bc76cff373a75a627e6ecbfb71cbe6f453a5a19b0" dependencies = [ "autocfg", - "num-bigint 0.4.3", + "num-bigint", "num-integer", "num-traits", ] @@ -4599,7 +4520,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0c7ae222234c30df141154f159066c5093ff73b63204dcda7121eb082fc56a95" dependencies = [ "bitflags", - "cfg-if 1.0.0", + "cfg-if", "foreign-types", "libc", "once_cell", @@ -4690,7 +4611,7 @@ checksum = "20448fd678ec04e6ea15bbe0476874af65e98a01515d667aa49f1434dc44ebf4" [[package]] name = "pallet-assets" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "frame-benchmarking", "frame-support", @@ -4704,7 +4625,7 @@ dependencies = [ [[package]] name = "pallet-authority-discovery" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "frame-support", "frame-system", @@ -4720,7 +4641,7 @@ dependencies = [ [[package]] name = "pallet-authorship" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "frame-support", "frame-system", @@ -4735,7 +4656,7 @@ dependencies = [ [[package]] name = "pallet-babe" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "frame-benchmarking", "frame-support", @@ -4759,7 +4680,7 @@ dependencies = [ [[package]] name = "pallet-bags-list" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "frame-benchmarking", "frame-election-provider-support", @@ -4779,7 +4700,7 @@ dependencies = [ [[package]] name = "pallet-bags-list-remote-tests" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "frame-election-provider-support", "frame-support", @@ -4798,7 +4719,7 @@ dependencies = [ [[package]] name = "pallet-balances" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "frame-benchmarking", "frame-support", @@ -4813,7 +4734,7 @@ dependencies = [ [[package]] name = "pallet-beefy" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "beefy-primitives", "frame-support", @@ -4829,7 +4750,7 @@ dependencies = [ [[package]] name = "pallet-beefy-mmr" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "array-bytes", "beefy-merkle-tree", @@ -4852,7 +4773,7 @@ dependencies = [ [[package]] name = "pallet-bounties" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "frame-benchmarking", "frame-support", @@ -4870,7 +4791,7 @@ dependencies = [ [[package]] name = "pallet-child-bounties" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "frame-benchmarking", "frame-support", @@ -4889,7 +4810,7 @@ dependencies = [ [[package]] name = "pallet-collective" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "frame-benchmarking", "frame-support", @@ -4906,7 +4827,7 @@ dependencies = [ [[package]] name = "pallet-conviction-voting" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "assert_matches", "frame-benchmarking", @@ -4923,7 +4844,7 @@ dependencies = [ [[package]] name = "pallet-democracy" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "frame-benchmarking", "frame-support", @@ -4941,7 +4862,7 @@ dependencies = [ [[package]] name = "pallet-election-provider-multi-phase" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "frame-benchmarking", "frame-election-provider-support", @@ -4965,7 +4886,7 @@ dependencies = [ [[package]] name = "pallet-election-provider-support-benchmarking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "frame-benchmarking", "frame-election-provider-support", @@ -4978,7 +4899,7 @@ dependencies = [ [[package]] name = "pallet-elections-phragmen" version = "5.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "frame-benchmarking", "frame-support", @@ -4996,7 +4917,7 @@ dependencies = [ [[package]] name = "pallet-fast-unstake" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "frame-benchmarking", "frame-election-provider-support", @@ -5014,7 +4935,7 @@ dependencies = [ [[package]] name = "pallet-gilt" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "frame-benchmarking", "frame-support", @@ -5029,7 +4950,7 @@ dependencies = [ [[package]] name = "pallet-grandpa" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "frame-benchmarking", "frame-support", @@ -5052,7 +4973,7 @@ dependencies = [ [[package]] name = "pallet-identity" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "enumflags2", "frame-benchmarking", @@ -5068,7 +4989,7 @@ dependencies = [ [[package]] name = "pallet-im-online" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "frame-benchmarking", "frame-support", @@ -5088,7 +5009,7 @@ dependencies = [ [[package]] name = "pallet-indices" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "frame-benchmarking", "frame-support", @@ -5105,7 +5026,7 @@ dependencies = [ [[package]] name = "pallet-membership" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "frame-benchmarking", "frame-support", @@ -5122,7 +5043,7 @@ dependencies = [ [[package]] name = "pallet-mmr" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "ckb-merkle-mountain-range", "frame-benchmarking", @@ -5140,8 +5061,9 @@ dependencies = [ [[package]] name = "pallet-mmr-rpc" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ + "anyhow", "jsonrpsee", "parity-scale-codec", "serde", @@ -5155,7 +5077,7 @@ dependencies = [ [[package]] name = "pallet-multisig" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "frame-benchmarking", "frame-support", @@ -5171,7 +5093,7 @@ dependencies = [ [[package]] name = "pallet-nomination-pools" version = "1.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "frame-support", "frame-system", @@ -5188,7 +5110,7 @@ dependencies = [ [[package]] name = "pallet-nomination-pools-benchmarking" version = "1.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "frame-benchmarking", "frame-election-provider-support", @@ -5208,7 +5130,7 @@ dependencies = [ [[package]] name = "pallet-nomination-pools-runtime-api" version = "1.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "parity-scale-codec", "sp-api", @@ -5218,7 +5140,7 @@ dependencies = [ [[package]] name = "pallet-offences" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "frame-support", "frame-system", @@ -5235,7 +5157,7 @@ dependencies = [ [[package]] name = "pallet-offences-benchmarking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "frame-benchmarking", "frame-election-provider-support", @@ -5258,7 +5180,7 @@ dependencies = [ [[package]] name = "pallet-preimage" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "frame-benchmarking", "frame-support", @@ -5275,7 +5197,7 @@ dependencies = [ [[package]] name = "pallet-proxy" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "frame-benchmarking", "frame-support", @@ -5290,7 +5212,7 @@ dependencies = [ [[package]] name = "pallet-ranked-collective" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "frame-benchmarking", "frame-support", @@ -5308,7 +5230,7 @@ dependencies = [ [[package]] name = "pallet-recovery" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "frame-benchmarking", "frame-support", @@ -5323,7 +5245,7 @@ dependencies = [ [[package]] name = "pallet-referenda" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "assert_matches", "frame-benchmarking", @@ -5341,7 +5263,7 @@ dependencies = [ [[package]] name = "pallet-scheduler" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "frame-benchmarking", "frame-support", @@ -5357,7 +5279,7 @@ dependencies = [ [[package]] name = "pallet-session" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "frame-support", "frame-system", @@ -5378,7 +5300,7 @@ dependencies = [ [[package]] name = "pallet-session-benchmarking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "frame-benchmarking", "frame-support", @@ -5394,7 +5316,7 @@ dependencies = [ [[package]] name = "pallet-society" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "frame-support", "frame-system", @@ -5408,7 +5330,7 @@ dependencies = [ [[package]] name = "pallet-staking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "frame-benchmarking", "frame-election-provider-support", @@ -5431,7 +5353,7 @@ dependencies = [ [[package]] name = "pallet-staking-reward-curve" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -5442,7 +5364,7 @@ dependencies = [ [[package]] name = "pallet-staking-reward-fn" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "log", "sp-arithmetic", @@ -5451,7 +5373,7 @@ dependencies = [ [[package]] name = "pallet-sudo" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "frame-support", "frame-system", @@ -5465,7 +5387,7 @@ dependencies = [ [[package]] name = "pallet-timestamp" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "frame-benchmarking", "frame-support", @@ -5483,7 +5405,7 @@ dependencies = [ [[package]] name = "pallet-tips" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "frame-benchmarking", "frame-support", @@ -5502,7 +5424,7 @@ dependencies = [ [[package]] name = "pallet-transaction-payment" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "frame-support", "frame-system", @@ -5518,7 +5440,7 @@ dependencies = [ [[package]] name = "pallet-transaction-payment-rpc" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "jsonrpsee", "pallet-transaction-payment-rpc-runtime-api", @@ -5528,23 +5450,25 @@ dependencies = [ "sp-core", "sp-rpc", "sp-runtime", + "sp-weights", ] [[package]] name = "pallet-transaction-payment-rpc-runtime-api" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "pallet-transaction-payment", "parity-scale-codec", "sp-api", "sp-runtime", + "sp-weights", ] [[package]] name = "pallet-treasury" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "frame-benchmarking", "frame-support", @@ -5561,7 +5485,7 @@ dependencies = [ [[package]] name = "pallet-utility" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "frame-benchmarking", "frame-support", @@ -5577,7 +5501,7 @@ dependencies = [ [[package]] name = "pallet-vesting" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "frame-benchmarking", "frame-support", @@ -5592,7 +5516,7 @@ dependencies = [ [[package]] name = "pallet-whitelist" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "frame-benchmarking", "frame-support", @@ -5653,19 +5577,19 @@ dependencies = [ [[package]] name = "parity-db" -version = "0.3.16" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2bb474d0ed0836e185cb998a6b140ed1073d1fbf27d690ecf9ede8030289382c" +checksum = "3a7511a0bec4a336b5929999d02b560d2439c993cccf98c26481484e811adc43" dependencies = [ - "blake2-rfc", + "blake2", "crc32fast", "fs2", "hex", "libc", "log", "lz4", - "memmap2 0.2.3", - "parking_lot 0.11.2", + "memmap2", + "parking_lot 0.12.1", "rand 0.8.5", "snap", ] @@ -5709,7 +5633,7 @@ version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0d32c34f4f5ca7f9196001c0aba5a1f9a5a12382c8944b8b0f90233282d1e8f8" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "hashbrown", "impl-trait-for-tuples", "parity-util-mem-derive", @@ -5771,7 +5695,7 @@ version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d76e8e1493bcac0d2766c42737f34458f1c8c50c0d23bcb24ea953affb273216" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "instant", "libc", "redox_syscall", @@ -5785,7 +5709,7 @@ version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "28141e0cc4143da2443301914478dc976a61ffdb3f043058310c70df2fed8954" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "libc", "redox_syscall", "smallvec", @@ -6014,7 +5938,7 @@ dependencies = [ "fatality", "futures", "futures-timer", - "lru 0.8.0", + "lru", "parity-scale-codec", "polkadot-erasure-coding", "polkadot-node-network-protocol", @@ -6044,7 +5968,7 @@ dependencies = [ "futures", "futures-timer", "log", - "lru 0.8.0", + "lru", "parity-scale-codec", "polkadot-erasure-coding", "polkadot-node-network-protocol", @@ -6184,7 +6108,7 @@ dependencies = [ "futures-timer", "indexmap", "lazy_static", - "lru 0.8.0", + "lru", "parity-scale-codec", "polkadot-erasure-coding", "polkadot-node-network-protocol", @@ -6304,7 +6228,7 @@ dependencies = [ "futures-timer", "kvdb", "kvdb-memorydb", - "lru 0.8.0", + "lru", "merlin", "parity-scale-codec", "parking_lot 0.12.1", @@ -6408,6 +6332,7 @@ dependencies = [ "assert_matches", "async-trait", "futures", + "futures-timer", "parity-scale-codec", "polkadot-node-core-pvf", "polkadot-node-primitives", @@ -6473,7 +6398,7 @@ dependencies = [ "futures-timer", "kvdb", "kvdb-memorydb", - "lru 0.8.0", + "lru", "parity-scale-codec", "polkadot-node-primitives", "polkadot-node-subsystem", @@ -6757,10 +6682,11 @@ dependencies = [ "futures", "itertools", "kvdb", + "kvdb-memorydb", "kvdb-shared-tests", "lazy_static", "log", - "lru 0.8.0", + "lru", "parity-db", "parity-scale-codec", "parity-util-mem", @@ -6794,7 +6720,7 @@ dependencies = [ "femme", "futures", "futures-timer", - "lru 0.8.0", + "lru", "orchestra", "parity-util-mem", "parking_lot 0.12.1", @@ -7029,6 +6955,7 @@ dependencies = [ "pallet-election-provider-multi-phase", "pallet-session", "pallet-staking", + "pallet-staking-reward-fn", "pallet-timestamp", "pallet-transaction-payment", "pallet-treasury", @@ -7149,7 +7076,7 @@ dependencies = [ "kvdb", "kvdb-rocksdb", "log", - "lru 0.8.0", + "lru", "pallet-babe", "pallet-im-online", "pallet-staking", @@ -7476,7 +7403,7 @@ version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "685404d509889fade3e86fe3a5803bca2ec09b0c0778d5ada6ec8bf7a8de5259" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "libc", "log", "wepoll-ffi", @@ -7500,7 +7427,7 @@ version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8419d2b623c7c0896ff2d5d96e2cb4ede590fed28fcc34934f4c33c036e620a1" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "cpufeatures", "opaque-debug 0.3.0", "universal-hash", @@ -7513,7 +7440,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "55f35f865aa964be21fcde114cbd1cfbd9bf8a471460ed965b0f84f96c711401" dependencies = [ "backtrace", - "cfg-if 1.0.0", + "cfg-if", "findshlibs", "lazy_static", "libc", @@ -7652,7 +7579,7 @@ version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b7f64969ffd5dd8f39bd57a68ac53c163a095ed9d0fb707146da1b27025a3504" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "fnv", "lazy_static", "memchr", @@ -7667,7 +7594,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "83cd1b99916654a69008fd66b4f9397fbe08e6e51dfe23d4417acf5d3b8cb87c" dependencies = [ "dtoa", - "itoa 1.0.1", + "itoa", "parking_lot 0.12.1", "prometheus-client-derive-text-encode", ] @@ -8063,7 +7990,7 @@ dependencies = [ [[package]] name = "remote-externalities" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "env_logger 0.9.0", "log", @@ -8402,7 +8329,7 @@ dependencies = [ [[package]] name = "sc-allocator" version = "4.1.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "log", "sp-core", @@ -8413,7 +8340,7 @@ dependencies = [ [[package]] name = "sc-authority-discovery" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "async-trait", "futures", @@ -8440,7 +8367,7 @@ dependencies = [ [[package]] name = "sc-basic-authorship" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "futures", "futures-timer", @@ -8463,7 +8390,7 @@ dependencies = [ [[package]] name = "sc-block-builder" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "parity-scale-codec", "sc-client-api", @@ -8479,10 +8406,10 @@ dependencies = [ [[package]] name = "sc-chain-spec" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "impl-trait-for-tuples", - "memmap2 0.5.0", + "memmap2", "parity-scale-codec", "sc-chain-spec-derive", "sc-network-common", @@ -8496,7 +8423,7 @@ dependencies = [ [[package]] name = "sc-chain-spec-derive" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -8507,7 +8434,7 @@ dependencies = [ [[package]] name = "sc-cli" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "array-bytes", "chrono", @@ -8547,7 +8474,7 @@ dependencies = [ [[package]] name = "sc-client-api" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "fnv", "futures", @@ -8575,7 +8502,7 @@ dependencies = [ [[package]] name = "sc-client-db" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "hash-db", "kvdb", @@ -8600,7 +8527,7 @@ dependencies = [ [[package]] name = "sc-consensus" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "async-trait", "futures", @@ -8624,19 +8551,18 @@ dependencies = [ [[package]] name = "sc-consensus-babe" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "async-trait", "fork-tree", "futures", "log", "merlin", - "num-bigint 0.2.6", - "num-rational 0.2.4", + "num-bigint", + "num-rational", "num-traits", "parity-scale-codec", "parking_lot 0.12.1", - "rand 0.7.3", "sc-client-api", "sc-consensus", "sc-consensus-epochs", @@ -8666,7 +8592,7 @@ dependencies = [ [[package]] name = "sc-consensus-babe-rpc" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "futures", "jsonrpsee", @@ -8688,7 +8614,7 @@ dependencies = [ [[package]] name = "sc-consensus-epochs" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "fork-tree", "parity-scale-codec", @@ -8701,7 +8627,7 @@ dependencies = [ [[package]] name = "sc-consensus-slots" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "async-trait", "futures", @@ -8725,10 +8651,10 @@ dependencies = [ [[package]] name = "sc-executor" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "lazy_static", - "lru 0.7.8", + "lru", "parity-scale-codec", "parking_lot 0.12.1", "sc-executor-common", @@ -8752,7 +8678,7 @@ dependencies = [ [[package]] name = "sc-executor-common" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "environmental", "parity-scale-codec", @@ -8768,7 +8694,7 @@ dependencies = [ [[package]] name = "sc-executor-wasmi" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "log", "parity-scale-codec", @@ -8783,9 +8709,9 @@ dependencies = [ [[package]] name = "sc-executor-wasmtime" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "libc", "log", "once_cell", @@ -8803,7 +8729,7 @@ dependencies = [ [[package]] name = "sc-finality-grandpa" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "ahash", "array-bytes", @@ -8844,7 +8770,7 @@ dependencies = [ [[package]] name = "sc-finality-grandpa-rpc" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "finality-grandpa", "futures", @@ -8865,7 +8791,7 @@ dependencies = [ [[package]] name = "sc-informant" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "ansi_term", "futures", @@ -8882,7 +8808,7 @@ dependencies = [ [[package]] name = "sc-keystore" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "array-bytes", "async-trait", @@ -8897,7 +8823,7 @@ dependencies = [ [[package]] name = "sc-network" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "array-bytes", "async-trait", @@ -8915,7 +8841,7 @@ dependencies = [ "linked-hash-map", "linked_hash_set", "log", - "lru 0.7.8", + "lru", "parity-scale-codec", "parking_lot 0.12.1", "pin-project", @@ -8944,7 +8870,7 @@ dependencies = [ [[package]] name = "sc-network-bitswap" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "cid", "futures", @@ -8964,7 +8890,7 @@ dependencies = [ [[package]] name = "sc-network-common" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "async-trait", "bitflags", @@ -8990,14 +8916,14 @@ dependencies = [ [[package]] name = "sc-network-gossip" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "ahash", "futures", "futures-timer", "libp2p", "log", - "lru 0.7.8", + "lru", "sc-network-common", "sc-peerset", "sp-runtime", @@ -9008,7 +8934,7 @@ dependencies = [ [[package]] name = "sc-network-light" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "array-bytes", "futures", @@ -9029,14 +8955,14 @@ dependencies = [ [[package]] name = "sc-network-sync" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "array-bytes", "fork-tree", "futures", "libp2p", "log", - "lru 0.7.8", + "lru", "mockall", "parity-scale-codec", "prost", @@ -9059,7 +8985,7 @@ dependencies = [ [[package]] name = "sc-network-transactions" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "array-bytes", "futures", @@ -9078,7 +9004,7 @@ dependencies = [ [[package]] name = "sc-offchain" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "array-bytes", "bytes", @@ -9108,7 +9034,7 @@ dependencies = [ [[package]] name = "sc-peerset" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "futures", "libp2p", @@ -9121,7 +9047,7 @@ dependencies = [ [[package]] name = "sc-proposer-metrics" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "log", "substrate-prometheus-endpoint", @@ -9130,7 +9056,7 @@ dependencies = [ [[package]] name = "sc-rpc" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "futures", "hash-db", @@ -9160,7 +9086,7 @@ dependencies = [ [[package]] name = "sc-rpc-api" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "futures", "jsonrpsee", @@ -9183,7 +9109,7 @@ dependencies = [ [[package]] name = "sc-rpc-server" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "futures", "jsonrpsee", @@ -9196,7 +9122,7 @@ dependencies = [ [[package]] name = "sc-rpc-spec-v2" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "futures", "hex", @@ -9215,7 +9141,7 @@ dependencies = [ [[package]] name = "sc-service" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "async-trait", "directories", @@ -9286,7 +9212,7 @@ dependencies = [ [[package]] name = "sc-state-db" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "log", "parity-scale-codec", @@ -9300,7 +9226,7 @@ dependencies = [ [[package]] name = "sc-sync-state-rpc" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "jsonrpsee", "parity-scale-codec", @@ -9319,7 +9245,7 @@ dependencies = [ [[package]] name = "sc-sysinfo" version = "6.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "futures", "libc", @@ -9338,7 +9264,7 @@ dependencies = [ [[package]] name = "sc-telemetry" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "chrono", "futures", @@ -9356,7 +9282,7 @@ dependencies = [ [[package]] name = "sc-tracing" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "ansi_term", "atty", @@ -9387,7 +9313,7 @@ dependencies = [ [[package]] name = "sc-tracing-proc-macro" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -9398,7 +9324,7 @@ dependencies = [ [[package]] name = "sc-transaction-pool" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "async-trait", "futures", @@ -9425,7 +9351,7 @@ dependencies = [ [[package]] name = "sc-transaction-pool-api" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "async-trait", "futures", @@ -9439,7 +9365,7 @@ dependencies = [ [[package]] name = "sc-utils" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "futures", "futures-timer", @@ -9456,7 +9382,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c46be926081c9f4dd5dd9b6f1d3e3229f2360bc6502dd8836f84a93b7c75e99a" dependencies = [ "bitvec", - "cfg-if 1.0.0", + "cfg-if", "derive_more", "parity-scale-codec", "scale-info-derive", @@ -9654,7 +9580,7 @@ version = "1.0.85" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e55a28e3aaef9d5ce0506d0a14dbba8054ddc7e499ef522dd8b26859ec9d4a44" dependencies = [ - "itoa 1.0.1", + "itoa", "ryu", "serde", ] @@ -9675,7 +9601,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d3491c14715ca2294c4d6a88f15e84739788c1d030eed8c110436aafdaa2f3fd" dependencies = [ "form_urlencoded", - "itoa 1.0.1", + "itoa", "ryu", "serde", ] @@ -9699,7 +9625,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "99cd6713db3cf16b6c84e06321e049a9b9f699826e16096d23bbcc44d15d51a6" dependencies = [ "block-buffer 0.9.0", - "cfg-if 1.0.0", + "cfg-if", "cpufeatures", "digest 0.9.0", "opaque-debug 0.3.0", @@ -9711,7 +9637,7 @@ version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "028f48d513f9678cda28f6e4064755b3fbb2af6acd672f2c209b62323f7aea0f" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "cpufeatures", "digest 0.10.3", ] @@ -9735,7 +9661,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b69f9a4c9740d74c5baa3fd2e547f9525fa8088a8a958e0ca2409a514e33f5fa" dependencies = [ "block-buffer 0.9.0", - "cfg-if 1.0.0", + "cfg-if", "cpufeatures", "digest 0.9.0", "opaque-debug 0.3.0", @@ -9747,7 +9673,7 @@ version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "55deaec60f81eefe3cce0dc50bda92d6d8e88f2a27df7c5033b42afeb1ed2676" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "cpufeatures", "digest 0.10.3", ] @@ -9920,7 +9846,7 @@ dependencies = [ [[package]] name = "sp-api" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "hash-db", "log", @@ -9938,7 +9864,7 @@ dependencies = [ [[package]] name = "sp-api-proc-macro" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "blake2", "proc-macro-crate", @@ -9950,7 +9876,7 @@ dependencies = [ [[package]] name = "sp-application-crypto" version = "6.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "parity-scale-codec", "scale-info", @@ -9963,7 +9889,7 @@ dependencies = [ [[package]] name = "sp-arithmetic" version = "5.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "integer-sqrt", "num-traits", @@ -9978,7 +9904,7 @@ dependencies = [ [[package]] name = "sp-authority-discovery" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "parity-scale-codec", "scale-info", @@ -9991,7 +9917,7 @@ dependencies = [ [[package]] name = "sp-authorship" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "async-trait", "parity-scale-codec", @@ -10003,7 +9929,7 @@ dependencies = [ [[package]] name = "sp-block-builder" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "parity-scale-codec", "sp-api", @@ -10015,11 +9941,11 @@ dependencies = [ [[package]] name = "sp-blockchain" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "futures", "log", - "lru 0.7.8", + "lru", "parity-scale-codec", "parking_lot 0.12.1", "sp-api", @@ -10033,7 +9959,7 @@ dependencies = [ [[package]] name = "sp-consensus" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "async-trait", "futures", @@ -10052,7 +9978,7 @@ dependencies = [ [[package]] name = "sp-consensus-babe" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "async-trait", "merlin", @@ -10075,7 +10001,7 @@ dependencies = [ [[package]] name = "sp-consensus-slots" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "parity-scale-codec", "scale-info", @@ -10089,7 +10015,7 @@ dependencies = [ [[package]] name = "sp-consensus-vrf" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "parity-scale-codec", "scale-info", @@ -10102,7 +10028,7 @@ dependencies = [ [[package]] name = "sp-core" version = "6.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "array-bytes", "base58", @@ -10148,7 +10074,7 @@ dependencies = [ [[package]] name = "sp-core-hashing" version = "4.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "blake2", "byteorder", @@ -10162,7 +10088,7 @@ dependencies = [ [[package]] name = "sp-core-hashing-proc-macro" version = "5.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "proc-macro2", "quote", @@ -10173,7 +10099,7 @@ dependencies = [ [[package]] name = "sp-database" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "kvdb", "parking_lot 0.12.1", @@ -10182,7 +10108,7 @@ dependencies = [ [[package]] name = "sp-debug-derive" version = "4.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "proc-macro2", "quote", @@ -10192,7 +10118,7 @@ dependencies = [ [[package]] name = "sp-externalities" version = "0.12.0" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "environmental", "parity-scale-codec", @@ -10203,7 +10129,7 @@ dependencies = [ [[package]] name = "sp-finality-grandpa" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "finality-grandpa", "log", @@ -10221,7 +10147,7 @@ dependencies = [ [[package]] name = "sp-inherents" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "async-trait", "impl-trait-for-tuples", @@ -10235,7 +10161,7 @@ dependencies = [ [[package]] name = "sp-io" version = "6.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "bytes", "futures", @@ -10261,7 +10187,7 @@ dependencies = [ [[package]] name = "sp-keyring" version = "6.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "lazy_static", "sp-core", @@ -10272,7 +10198,7 @@ dependencies = [ [[package]] name = "sp-keystore" version = "0.12.0" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "async-trait", "futures", @@ -10289,7 +10215,7 @@ dependencies = [ [[package]] name = "sp-maybe-compressed-blob" version = "4.1.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "thiserror", "zstd", @@ -10298,7 +10224,7 @@ dependencies = [ [[package]] name = "sp-mmr-primitives" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "log", "parity-scale-codec", @@ -10309,12 +10235,13 @@ dependencies = [ "sp-debug-derive", "sp-runtime", "sp-std", + "thiserror", ] [[package]] name = "sp-npos-elections" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "parity-scale-codec", "scale-info", @@ -10328,7 +10255,7 @@ dependencies = [ [[package]] name = "sp-offchain" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "sp-api", "sp-core", @@ -10338,7 +10265,7 @@ dependencies = [ [[package]] name = "sp-panic-handler" version = "4.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "backtrace", "lazy_static", @@ -10348,7 +10275,7 @@ dependencies = [ [[package]] name = "sp-rpc" version = "6.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "rustc-hash", "serde", @@ -10358,7 +10285,7 @@ dependencies = [ [[package]] name = "sp-runtime" version = "6.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "either", "hash256-std-hasher", @@ -10381,7 +10308,7 @@ dependencies = [ [[package]] name = "sp-runtime-interface" version = "6.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "bytes", "impl-trait-for-tuples", @@ -10399,7 +10326,7 @@ dependencies = [ [[package]] name = "sp-runtime-interface-proc-macro" version = "5.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "Inflector", "proc-macro-crate", @@ -10411,7 +10338,7 @@ dependencies = [ [[package]] name = "sp-sandbox" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "log", "parity-scale-codec", @@ -10425,7 +10352,7 @@ dependencies = [ [[package]] name = "sp-session" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "parity-scale-codec", "scale-info", @@ -10439,7 +10366,7 @@ dependencies = [ [[package]] name = "sp-staking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "parity-scale-codec", "scale-info", @@ -10450,7 +10377,7 @@ dependencies = [ [[package]] name = "sp-state-machine" version = "0.12.0" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "hash-db", "log", @@ -10472,12 +10399,12 @@ dependencies = [ [[package]] name = "sp-std" version = "4.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" [[package]] name = "sp-storage" version = "6.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "impl-serde", "parity-scale-codec", @@ -10490,7 +10417,7 @@ dependencies = [ [[package]] name = "sp-tasks" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "log", "sp-core", @@ -10503,7 +10430,7 @@ dependencies = [ [[package]] name = "sp-timestamp" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "async-trait", "futures-timer", @@ -10519,7 +10446,7 @@ dependencies = [ [[package]] name = "sp-tracing" version = "5.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "parity-scale-codec", "sp-std", @@ -10531,7 +10458,7 @@ dependencies = [ [[package]] name = "sp-transaction-pool" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "sp-api", "sp-runtime", @@ -10540,7 +10467,7 @@ dependencies = [ [[package]] name = "sp-transaction-storage-proof" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "async-trait", "log", @@ -10556,13 +10483,13 @@ dependencies = [ [[package]] name = "sp-trie" version = "6.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "ahash", "hash-db", "hashbrown", "lazy_static", - "lru 0.7.8", + "lru", "memory-db", "nohash-hasher", "parity-scale-codec", @@ -10579,7 +10506,7 @@ dependencies = [ [[package]] name = "sp-version" version = "5.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "impl-serde", "parity-scale-codec", @@ -10596,7 +10523,7 @@ dependencies = [ [[package]] name = "sp-version-proc-macro" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "parity-scale-codec", "proc-macro2", @@ -10607,7 +10534,7 @@ dependencies = [ [[package]] name = "sp-wasm-interface" version = "6.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "impl-trait-for-tuples", "log", @@ -10620,7 +10547,7 @@ dependencies = [ [[package]] name = "sp-weights" version = "4.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "impl-trait-for-tuples", "parity-scale-codec", @@ -10835,7 +10762,7 @@ dependencies = [ [[package]] name = "substrate-build-script-utils" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "platforms", ] @@ -10843,7 +10770,7 @@ dependencies = [ [[package]] name = "substrate-frame-rpc-system" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "frame-system-rpc-runtime-api", "futures", @@ -10864,7 +10791,7 @@ dependencies = [ [[package]] name = "substrate-prometheus-endpoint" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "futures-util", "hyper", @@ -10877,7 +10804,7 @@ dependencies = [ [[package]] name = "substrate-rpc-client" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "async-trait", "jsonrpsee", @@ -10890,7 +10817,7 @@ dependencies = [ [[package]] name = "substrate-state-trie-migration-rpc" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "jsonrpsee", "log", @@ -10911,7 +10838,7 @@ dependencies = [ [[package]] name = "substrate-test-client" version = "2.0.1" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "array-bytes", "async-trait", @@ -10937,7 +10864,7 @@ dependencies = [ [[package]] name = "substrate-test-utils" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "futures", "substrate-test-utils-derive", @@ -10947,7 +10874,7 @@ dependencies = [ [[package]] name = "substrate-test-utils-derive" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -10958,7 +10885,7 @@ dependencies = [ [[package]] name = "substrate-wasm-builder" version = "5.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "ansi_term", "build-helper", @@ -10994,7 +10921,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e92a52f07eed9afba3d6f883652cde7cd75fcf327dd44e84f210958379158737" dependencies = [ "debugid", - "memmap2 0.5.0", + "memmap2", "stable_deref_trait", "uuid", ] @@ -11072,7 +10999,7 @@ version = "3.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5cdb1ef4eaeeaddc8fbd371e5017057064af0911902ef36b39801f67cc6d79e4" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "fastrand", "libc", "redox_syscall", @@ -11470,7 +11397,7 @@ version = "0.1.35" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a400e31aa60b9d44a52a8ee0343b5b18566b03a8321e0d321f695cf56e940160" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "pin-project-lite 0.2.7", "tracing-attributes", "tracing-core", @@ -11620,7 +11547,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4f7f83d1e4a0e4358ac54c5c3681e5d7da5efc5a7a632c90bb6d6669ddd9bc26" dependencies = [ "async-trait", - "cfg-if 1.0.0", + "cfg-if", "data-encoding", "enum-as-inner", "futures-channel", @@ -11643,7 +11570,7 @@ version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "aff21aa4dcefb0a1afbfac26deb0adc93888c7d295fb63ab273ef276ba2b7cfe" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "futures-util", "ipconfig", "lazy_static", @@ -11665,7 +11592,7 @@ checksum = "59547bce71d9c38b83d9c0e92b6066c4253371f15005def0c30d9657f50c7642" [[package]] name = "try-runtime-cli" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#3cd6db7907bd0efc9e4299b168196d97651dfc25" +source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" dependencies = [ "clap", "frame-try-runtime", @@ -11736,7 +11663,7 @@ version = "1.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "97fee6b57c6a41524a810daee9286c02d7752c4253064d0b05472833a438f675" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "digest 0.10.3", "rand 0.8.5", "static_assertions", @@ -11958,7 +11885,7 @@ version = "0.2.78" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "632f73e236b219150ea279196e54e610f5dbafa5d61786303d4da54f84e47fce" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "serde", "serde_json", "wasm-bindgen-macro", @@ -11985,7 +11912,7 @@ version = "0.4.28" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8e8d7523cb1f2a4c96c1317ca690031b714a51cc14e05f712446691f413f5d39" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "js-sys", "wasm-bindgen", "web-sys", @@ -12114,7 +12041,7 @@ dependencies = [ "downcast-rs", "libm", "memory_units", - "num-rational 0.4.0", + "num-rational", "num-traits", ] @@ -12135,7 +12062,7 @@ checksum = "8a10dc9784d8c3a33c970e3939180424955f08af2e7f20368ec02685a0e8f065" dependencies = [ "anyhow", "bincode", - "cfg-if 1.0.0", + "cfg-if", "indexmap", "libc", "log", @@ -12161,7 +12088,7 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ee4dbdc6daf68528cad1275ac91e3f51848ce9824385facc94c759f529decdf8" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", ] [[package]] @@ -12233,7 +12160,7 @@ dependencies = [ "addr2line", "anyhow", "bincode", - "cfg-if 1.0.0", + "cfg-if", "cpp_demangle", "gimli", "log", @@ -12268,7 +12195,7 @@ checksum = "ae79e0515160bd5abee5df50a16c4eb8db9f71b530fc988ae1d9ce34dcb8dd01" dependencies = [ "anyhow", "cc", - "cfg-if 1.0.0", + "cfg-if", "indexmap", "libc", "log", diff --git a/node/client/src/lib.rs b/node/client/src/lib.rs index ae5d6b4d05db..d2c119ba04a8 100644 --- a/node/client/src/lib.rs +++ b/node/client/src/lib.rs @@ -327,7 +327,7 @@ impl UsageProvider for Client { impl sc_client_api::BlockBackend for Client { fn block_body( &self, - hash: &::Hash, + hash: ::Hash, ) -> sp_blockchain::Result::Extrinsic>>> { with_client! { self, @@ -360,7 +360,7 @@ impl sc_client_api::BlockBackend for Client { fn justifications( &self, - hash: &::Hash, + hash: ::Hash, ) -> sp_blockchain::Result> { with_client! { self, @@ -386,7 +386,7 @@ impl sc_client_api::BlockBackend for Client { fn indexed_transaction( &self, - id: &::Hash, + id: ::Hash, ) -> sp_blockchain::Result>> { with_client! { self, @@ -399,7 +399,7 @@ impl sc_client_api::BlockBackend for Client { fn block_indexed_body( &self, - id: &::Hash, + id: ::Hash, ) -> sp_blockchain::Result>>> { with_client! { self, @@ -424,7 +424,7 @@ impl sc_client_api::BlockBackend for Client { impl sc_client_api::StorageProvider for Client { fn storage( &self, - hash: &::Hash, + hash: ::Hash, key: &StorageKey, ) -> sp_blockchain::Result> { with_client! { @@ -438,7 +438,7 @@ impl sc_client_api::StorageProvider for Client { fn storage_keys( &self, - hash: &::Hash, + hash: ::Hash, key_prefix: &StorageKey, ) -> sp_blockchain::Result> { with_client! { @@ -452,7 +452,7 @@ impl sc_client_api::StorageProvider for Client { fn storage_hash( &self, - hash: &::Hash, + hash: ::Hash, key: &StorageKey, ) -> sp_blockchain::Result::Hash>> { with_client! { @@ -466,7 +466,7 @@ impl sc_client_api::StorageProvider for Client { fn storage_pairs( &self, - hash: &::Hash, + hash: ::Hash, key_prefix: &StorageKey, ) -> sp_blockchain::Result> { with_client! { @@ -480,7 +480,7 @@ impl sc_client_api::StorageProvider for Client { fn storage_keys_iter<'a>( &self, - hash: &::Hash, + hash: ::Hash, prefix: Option<&'a StorageKey>, start_key: Option<&StorageKey>, ) -> sp_blockchain::Result< @@ -497,7 +497,7 @@ impl sc_client_api::StorageProvider for Client { fn child_storage( &self, - hash: &::Hash, + hash: ::Hash, child_info: &ChildInfo, key: &StorageKey, ) -> sp_blockchain::Result> { @@ -512,7 +512,7 @@ impl sc_client_api::StorageProvider for Client { fn child_storage_keys( &self, - hash: &::Hash, + hash: ::Hash, child_info: &ChildInfo, key_prefix: &StorageKey, ) -> sp_blockchain::Result> { @@ -527,7 +527,7 @@ impl sc_client_api::StorageProvider for Client { fn child_storage_keys_iter<'a>( &self, - hash: &::Hash, + hash: ::Hash, child_info: ChildInfo, prefix: Option<&'a StorageKey>, start_key: Option<&StorageKey>, @@ -545,7 +545,7 @@ impl sc_client_api::StorageProvider for Client { fn child_storage_hash( &self, - hash: &::Hash, + hash: ::Hash, child_info: &ChildInfo, key: &StorageKey, ) -> sp_blockchain::Result::Hash>> { diff --git a/node/core/approval-voting/src/approval_db/v1/mod.rs b/node/core/approval-voting/src/approval_db/v1/mod.rs index 03b7aa68f134..858bcb8c36fe 100644 --- a/node/core/approval-voting/src/approval_db/v1/mod.rs +++ b/node/core/approval-voting/src/approval_db/v1/mod.rs @@ -90,41 +90,45 @@ impl Backend for DbBackend { match op { BackendWriteOp::WriteStoredBlockRange(stored_block_range) => { tx.put_vec( - self.config.col_data, + self.config.col_approval_data, &STORED_BLOCKS_KEY, stored_block_range.encode(), ); }, BackendWriteOp::DeleteStoredBlockRange => { - tx.delete(self.config.col_data, &STORED_BLOCKS_KEY); + tx.delete(self.config.col_approval_data, &STORED_BLOCKS_KEY); }, BackendWriteOp::WriteBlocksAtHeight(h, blocks) => { - tx.put_vec(self.config.col_data, &blocks_at_height_key(h), blocks.encode()); + tx.put_vec( + self.config.col_approval_data, + &blocks_at_height_key(h), + blocks.encode(), + ); }, BackendWriteOp::DeleteBlocksAtHeight(h) => { - tx.delete(self.config.col_data, &blocks_at_height_key(h)); + tx.delete(self.config.col_approval_data, &blocks_at_height_key(h)); }, BackendWriteOp::WriteBlockEntry(block_entry) => { let block_entry: BlockEntry = block_entry.into(); tx.put_vec( - self.config.col_data, + self.config.col_approval_data, &block_entry_key(&block_entry.block_hash), block_entry.encode(), ); }, BackendWriteOp::DeleteBlockEntry(hash) => { - tx.delete(self.config.col_data, &block_entry_key(&hash)); + tx.delete(self.config.col_approval_data, &block_entry_key(&hash)); }, BackendWriteOp::WriteCandidateEntry(candidate_entry) => { let candidate_entry: CandidateEntry = candidate_entry.into(); tx.put_vec( - self.config.col_data, + self.config.col_approval_data, &candidate_entry_key(&candidate_entry.candidate.hash()), candidate_entry.encode(), ); }, BackendWriteOp::DeleteCandidateEntry(candidate_hash) => { - tx.delete(self.config.col_data, &candidate_entry_key(&candidate_hash)); + tx.delete(self.config.col_approval_data, &candidate_entry_key(&candidate_hash)); }, } } @@ -149,7 +153,9 @@ pub type Bitfield = BitVec; #[derive(Debug, Clone, Copy)] pub struct Config { /// The column family in the database where data is stored. - pub col_data: u32, + pub col_approval_data: u32, + /// The column of the database where rolling session window data is stored. + pub col_session_data: u32, } /// Details pertaining to our assignment on a block. @@ -243,10 +249,10 @@ pub type Result = std::result::Result; pub(crate) fn load_decode( store: &dyn Database, - col_data: u32, + col_approval_data: u32, key: &[u8], ) -> Result> { - match store.get(col_data, key)? { + match store.get(col_approval_data, key)? { None => Ok(None), Some(raw) => D::decode(&mut &raw[..]).map(Some).map_err(Into::into), } @@ -303,7 +309,7 @@ pub fn load_stored_blocks( store: &dyn Database, config: &Config, ) -> SubsystemResult> { - load_decode(store, config.col_data, STORED_BLOCKS_KEY) + load_decode(store, config.col_approval_data, STORED_BLOCKS_KEY) .map_err(|e| SubsystemError::with_origin("approval-voting", e)) } @@ -313,7 +319,7 @@ pub fn load_blocks_at_height( config: &Config, block_number: &BlockNumber, ) -> SubsystemResult> { - load_decode(store, config.col_data, &blocks_at_height_key(*block_number)) + load_decode(store, config.col_approval_data, &blocks_at_height_key(*block_number)) .map(|x| x.unwrap_or_default()) .map_err(|e| SubsystemError::with_origin("approval-voting", e)) } @@ -324,7 +330,7 @@ pub fn load_block_entry( config: &Config, block_hash: &Hash, ) -> SubsystemResult> { - load_decode(store, config.col_data, &block_entry_key(block_hash)) + load_decode(store, config.col_approval_data, &block_entry_key(block_hash)) .map(|u: Option| u.map(|v| v.into())) .map_err(|e| SubsystemError::with_origin("approval-voting", e)) } @@ -335,7 +341,7 @@ pub fn load_candidate_entry( config: &Config, candidate_hash: &CandidateHash, ) -> SubsystemResult> { - load_decode(store, config.col_data, &candidate_entry_key(candidate_hash)) + load_decode(store, config.col_approval_data, &candidate_entry_key(candidate_hash)) .map(|u: Option| u.map(|v| v.into())) .map_err(|e| SubsystemError::with_origin("approval-voting", e)) } diff --git a/node/core/approval-voting/src/approval_db/v1/tests.rs b/node/core/approval-voting/src/approval_db/v1/tests.rs index 548c64bcef03..06923c6a539f 100644 --- a/node/core/approval-voting/src/approval_db/v1/tests.rs +++ b/node/core/approval-voting/src/approval_db/v1/tests.rs @@ -28,9 +28,12 @@ use std::{collections::HashMap, sync::Arc}; use ::test_helpers::{dummy_candidate_receipt, dummy_candidate_receipt_bad_sig, dummy_hash}; const DATA_COL: u32 = 0; -const NUM_COLUMNS: u32 = 1; +const SESSION_DATA_COL: u32 = 1; -const TEST_CONFIG: Config = Config { col_data: DATA_COL }; +const NUM_COLUMNS: u32 = 2; + +const TEST_CONFIG: Config = + Config { col_approval_data: DATA_COL, col_session_data: SESSION_DATA_COL }; fn make_db() -> (DbBackend, Arc) { let db = kvdb_memorydb::create(NUM_COLUMNS); diff --git a/node/core/approval-voting/src/import.rs b/node/core/approval-voting/src/import.rs index df713143750f..20629dd022d4 100644 --- a/node/core/approval-voting/src/import.rs +++ b/node/core/approval-voting/src/import.rs @@ -632,14 +632,15 @@ pub(crate) mod tests { pub(crate) use sp_runtime::{Digest, DigestItem}; use std::{pin::Pin, sync::Arc}; - use crate::{ - approval_db::v1::Config as DatabaseConfig, criteria, BlockEntry, APPROVAL_SESSIONS, - }; + use crate::{approval_db::v1::Config as DatabaseConfig, criteria, BlockEntry}; const DATA_COL: u32 = 0; - const NUM_COLUMNS: u32 = 1; + const SESSION_DATA_COL: u32 = 1; + + const NUM_COLUMNS: u32 = 2; - const TEST_CONFIG: DatabaseConfig = DatabaseConfig { col_data: DATA_COL }; + const TEST_CONFIG: DatabaseConfig = + DatabaseConfig { col_approval_data: DATA_COL, col_session_data: SESSION_DATA_COL }; #[derive(Default)] struct MockClock; @@ -654,22 +655,23 @@ pub(crate) mod tests { } fn blank_state() -> State { + let db = kvdb_memorydb::create(NUM_COLUMNS); + let db = polkadot_node_subsystem_util::database::kvdb_impl::DbAdapter::new(db, &[]); + let db: Arc = Arc::new(db); State { session_window: None, keystore: Arc::new(LocalKeystore::in_memory()), slot_duration_millis: 6_000, clock: Box::new(MockClock::default()), assignment_criteria: Box::new(MockAssignmentCriteria), + db, + db_config: TEST_CONFIG, } } fn single_session_state(index: SessionIndex, info: SessionInfo) -> State { State { - session_window: Some(RollingSessionWindow::with_session_info( - APPROVAL_SESSIONS, - index, - vec![info], - )), + session_window: Some(RollingSessionWindow::with_session_info(index, vec![info])), ..blank_state() } } @@ -782,11 +784,8 @@ pub(crate) mod tests { .map(|(r, c, g)| (r.hash(), r.clone(), *c, *g)) .collect::>(); - let session_window = RollingSessionWindow::with_session_info( - APPROVAL_SESSIONS, - session, - vec![session_info], - ); + let session_window = + RollingSessionWindow::with_session_info(session, vec![session_info]); let header = header.clone(); Box::pin(async move { @@ -891,11 +890,8 @@ pub(crate) mod tests { .collect::>(); let test_fut = { - let session_window = RollingSessionWindow::with_session_info( - APPROVAL_SESSIONS, - session, - vec![session_info], - ); + let session_window = + RollingSessionWindow::with_session_info(session, vec![session_info]); let header = header.clone(); Box::pin(async move { @@ -1089,11 +1085,8 @@ pub(crate) mod tests { .map(|(r, c, g)| (r.hash(), r.clone(), *c, *g)) .collect::>(); - let session_window = Some(RollingSessionWindow::with_session_info( - APPROVAL_SESSIONS, - session, - vec![session_info], - )); + let session_window = + Some(RollingSessionWindow::with_session_info(session, vec![session_info])); let header = header.clone(); Box::pin(async move { @@ -1304,38 +1297,6 @@ pub(crate) mod tests { } ); - // Caching of sesssions needs sessoion of first unfinalied block. - assert_matches!( - handle.recv().await, - AllMessages::ChainApi(ChainApiMessage::FinalizedBlockNumber( - s_tx, - )) => { - let _ = s_tx.send(Ok(header.number)); - } - ); - - assert_matches!( - handle.recv().await, - AllMessages::ChainApi(ChainApiMessage::FinalizedBlockHash( - block_number, - s_tx, - )) => { - assert_eq!(block_number, header.number); - let _ = s_tx.send(Ok(Some(header.hash()))); - } - ); - - assert_matches!( - handle.recv().await, - AllMessages::RuntimeApi(RuntimeApiMessage::Request( - h, - RuntimeApiRequest::SessionIndexForChild(s_tx), - )) => { - assert_eq!(h, header.hash()); - let _ = s_tx.send(Ok(session)); - } - ); - // determine_new_blocks exits early as the parent_hash is in the DB assert_matches!( diff --git a/node/core/approval-voting/src/lib.rs b/node/core/approval-voting/src/lib.rs index e9757071f15e..b96992df2c88 100644 --- a/node/core/approval-voting/src/lib.rs +++ b/node/core/approval-voting/src/lib.rs @@ -44,8 +44,7 @@ use polkadot_node_subsystem_util::{ database::Database, metrics::{self, prometheus}, rolling_session_window::{ - new_session_window_size, RollingSessionWindow, SessionWindowSize, SessionWindowUpdate, - SessionsUnavailable, + DatabaseParams, RollingSessionWindow, SessionWindowUpdate, SessionsUnavailable, }, TimeoutExt, }; @@ -97,8 +96,6 @@ use crate::{ #[cfg(test)] mod tests; -pub const APPROVAL_SESSIONS: SessionWindowSize = new_session_window_size!(6); - const APPROVAL_CHECKING_TIMEOUT: Duration = Duration::from_secs(120); /// How long are we willing to wait for approval signatures? /// @@ -118,7 +115,9 @@ const LOG_TARGET: &str = "parachain::approval-voting"; #[derive(Debug, Clone)] pub struct Config { /// The column family in the DB where approval-voting data is stored. - pub col_data: u32, + pub col_approval_data: u32, + /// The of the DB where rolling session info is stored. + pub col_session_data: u32, /// The slot duration of the consensus algorithm, in milliseconds. Should be evenly /// divisible by 500. pub slot_duration_millis: u64, @@ -358,7 +357,10 @@ impl ApprovalVotingSubsystem { keystore, slot_duration_millis: config.slot_duration_millis, db, - db_config: DatabaseConfig { col_data: config.col_data }, + db_config: DatabaseConfig { + col_approval_data: config.col_approval_data, + col_session_data: config.col_session_data, + }, mode: Mode::Syncing(sync_oracle), metrics, } @@ -367,7 +369,10 @@ impl ApprovalVotingSubsystem { /// Revert to the block corresponding to the specified `hash`. /// The operation is not allowed for blocks older than the last finalized one. pub fn revert_to(&self, hash: Hash) -> Result<(), SubsystemError> { - let config = approval_db::v1::Config { col_data: self.db_config.col_data }; + let config = approval_db::v1::Config { + col_approval_data: self.db_config.col_approval_data, + col_session_data: self.db_config.col_session_data, + }; let mut backend = approval_db::v1::DbBackend::new(self.db.clone(), config); let mut overlay = OverlayedBackend::new(&backend); @@ -634,6 +639,9 @@ struct State { slot_duration_millis: u64, clock: Box, assignment_criteria: Box, + // Require for `RollingSessionWindow`. + db_config: DatabaseConfig, + db: Arc, } #[overseer::contextbounds(ApprovalVoting, prefix = self::overseer)] @@ -655,8 +663,17 @@ impl State { match session_window { None => { let sender = ctx.sender().clone(); - self.session_window = - Some(RollingSessionWindow::new(sender, APPROVAL_SESSIONS, head).await?); + self.session_window = Some( + RollingSessionWindow::new( + sender, + head, + DatabaseParams { + db: self.db.clone(), + db_column: self.db_config.col_session_data, + }, + ) + .await?, + ); Ok(None) }, Some(mut session_window) => { @@ -751,7 +768,7 @@ async fn run( where B: Backend, { - if let Err(err) = db_sanity_check(subsystem.db, subsystem.db_config) { + if let Err(err) = db_sanity_check(subsystem.db.clone(), subsystem.db_config.clone()) { gum::warn!(target: LOG_TARGET, ?err, "Could not run approval vote DB sanity check"); } @@ -761,6 +778,8 @@ where slot_duration_millis: subsystem.slot_duration_millis, clock, assignment_criteria, + db_config: subsystem.db_config, + db: subsystem.db, }; let mut wakeups = Wakeups::default(); diff --git a/node/core/approval-voting/src/tests.rs b/node/core/approval-voting/src/tests.rs index d5c8d3c01da4..b9063c8ade25 100644 --- a/node/core/approval-voting/src/tests.rs +++ b/node/core/approval-voting/src/tests.rs @@ -14,6 +14,8 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . +use crate::tests::test_constants::TEST_CONFIG; + use super::*; use polkadot_node_primitives::{ approval::{ @@ -111,9 +113,12 @@ fn make_sync_oracle(val: bool) -> (Box, TestSyncOracleHan pub mod test_constants { use crate::approval_db::v1::Config as DatabaseConfig; const DATA_COL: u32 = 0; - pub(crate) const NUM_COLUMNS: u32 = 1; + const SESSION_DATA_COL: u32 = 1; + + pub(crate) const NUM_COLUMNS: u32 = 2; - pub(crate) const TEST_CONFIG: DatabaseConfig = DatabaseConfig { col_data: DATA_COL }; + pub(crate) const TEST_CONFIG: DatabaseConfig = + DatabaseConfig { col_approval_data: DATA_COL, col_session_data: SESSION_DATA_COL }; } struct MockSupportsParachains; @@ -487,8 +492,9 @@ fn test_harness>( context, ApprovalVotingSubsystem::with_config( Config { - col_data: test_constants::TEST_CONFIG.col_data, + col_approval_data: test_constants::TEST_CONFIG.col_approval_data, slot_duration_millis: SLOT_DURATION_MILLIS, + col_session_data: TEST_CONFIG.col_session_data, }, Arc::new(db), Arc::new(keystore), @@ -810,38 +816,38 @@ async fn import_block( } ); - assert_matches!( - overseer_recv(overseer).await, - AllMessages::ChainApi(ChainApiMessage::FinalizedBlockNumber( - s_tx, - )) => { - let _ = s_tx.send(Ok(number)); - } - ); + if !fork { + assert_matches!( + overseer_recv(overseer).await, + AllMessages::ChainApi(ChainApiMessage::FinalizedBlockNumber( + s_tx, + )) => { + let _ = s_tx.send(Ok(number)); + } + ); - assert_matches!( - overseer_recv(overseer).await, - AllMessages::ChainApi(ChainApiMessage::FinalizedBlockHash( - block_number, - s_tx, - )) => { - assert_eq!(block_number, number); - let _ = s_tx.send(Ok(Some(hashes[number as usize].0))); - } - ); + assert_matches!( + overseer_recv(overseer).await, + AllMessages::ChainApi(ChainApiMessage::FinalizedBlockHash( + block_number, + s_tx, + )) => { + assert_eq!(block_number, number); + let _ = s_tx.send(Ok(Some(hashes[number as usize].0))); + } + ); - assert_matches!( - overseer_recv(overseer).await, - AllMessages::RuntimeApi(RuntimeApiMessage::Request( - h, - RuntimeApiRequest::SessionIndexForChild(s_tx), - )) => { - assert_eq!(h, hashes[number as usize].0); - let _ = s_tx.send(Ok(number.into())); - } - ); + assert_matches!( + overseer_recv(overseer).await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + h, + RuntimeApiRequest::SessionIndexForChild(s_tx), + )) => { + assert_eq!(h, hashes[number as usize].0); + let _ = s_tx.send(Ok(number.into())); + } + ); - if !fork { assert_matches!( overseer_recv(overseer).await, AllMessages::RuntimeApi( diff --git a/node/core/candidate-validation/Cargo.toml b/node/core/candidate-validation/Cargo.toml index 8634cfe5a75e..105d7c1a21dc 100644 --- a/node/core/candidate-validation/Cargo.toml +++ b/node/core/candidate-validation/Cargo.toml @@ -7,6 +7,7 @@ edition = "2021" [dependencies] async-trait = "0.1.57" futures = "0.3.21" +futures-timer = "3.0.2" gum = { package = "tracing-gum", path = "../../gum" } sp-maybe-compressed-blob = { package = "sp-maybe-compressed-blob", git = "https://github.com/paritytech/substrate", branch = "master" } diff --git a/node/core/candidate-validation/src/lib.rs b/node/core/candidate-validation/src/lib.rs index c3775ba1c453..a82a0feb78a0 100644 --- a/node/core/candidate-validation/src/lib.rs +++ b/node/core/candidate-validation/src/lib.rs @@ -60,6 +60,12 @@ mod tests; const LOG_TARGET: &'static str = "parachain::candidate-validation"; +/// The amount of time to wait before retrying after an AmbiguousWorkerDeath validation error. +#[cfg(not(test))] +const PVF_EXECUTION_RETRY_DELAY: Duration = Duration::from_secs(3); +#[cfg(test)] +const PVF_EXECUTION_RETRY_DELAY: Duration = Duration::from_millis(200); + /// Configuration for the candidate validation subsystem #[derive(Clone)] pub struct Config { @@ -490,7 +496,7 @@ where } async fn validate_candidate_exhaustive( - mut validation_backend: impl ValidationBackend, + mut validation_backend: impl ValidationBackend + Send, persisted_validation_data: PersistedValidationData, validation_code: ValidationCode, candidate_receipt: CandidateReceipt, @@ -551,7 +557,7 @@ async fn validate_candidate_exhaustive( }; let result = validation_backend - .validate_candidate(raw_validation_code.to_vec(), timeout, params) + .validate_candidate_with_retry(raw_validation_code.to_vec(), timeout, params) .await; if let Err(ref error) = result { @@ -604,45 +610,63 @@ async fn validate_candidate_exhaustive( #[async_trait] trait ValidationBackend { async fn validate_candidate( + &mut self, + pvf: Pvf, + timeout: Duration, + encoded_params: Vec, + ) -> Result; + + async fn validate_candidate_with_retry( &mut self, raw_validation_code: Vec, timeout: Duration, params: ValidationParams, - ) -> Result; + ) -> Result { + // Construct the PVF a single time, since it is an expensive operation. Cloning it is cheap. + let pvf = Pvf::from_code(raw_validation_code); + + let validation_result = + self.validate_candidate(pvf.clone(), timeout, params.encode()).await; + + // If we get an AmbiguousWorkerDeath error, retry once after a brief delay, on the + // assumption that the conditions that caused this error may have been transient. + if let Err(ValidationError::InvalidCandidate(WasmInvalidCandidate::AmbiguousWorkerDeath)) = + validation_result + { + // Wait a brief delay before retrying. + futures_timer::Delay::new(PVF_EXECUTION_RETRY_DELAY).await; + // Encode the params again when re-trying. We expect the retry case to be relatively + // rare, and we want to avoid unconditionally cloning data. + self.validate_candidate(pvf, timeout, params.encode()).await + } else { + validation_result + } + } async fn precheck_pvf(&mut self, pvf: Pvf) -> Result<(), PrepareError>; } #[async_trait] impl ValidationBackend for ValidationHost { + /// Tries executing a PVF a single time (no retries). async fn validate_candidate( &mut self, - raw_validation_code: Vec, + pvf: Pvf, timeout: Duration, - params: ValidationParams, + encoded_params: Vec, ) -> Result { + let priority = polkadot_node_core_pvf::Priority::Normal; + let (tx, rx) = oneshot::channel(); - if let Err(err) = self - .execute_pvf( - Pvf::from_code(raw_validation_code), - timeout, - params.encode(), - polkadot_node_core_pvf::Priority::Normal, - tx, - ) - .await - { + if let Err(err) = self.execute_pvf(pvf, timeout, encoded_params, priority, tx).await { return Err(ValidationError::InternalError(format!( "cannot send pvf to the validation host: {:?}", err ))) } - let validation_result = rx - .await - .map_err(|_| ValidationError::InternalError("validation was cancelled".into()))?; - - validation_result + rx.await + .map_err(|_| ValidationError::InternalError("validation was cancelled".into()))? } async fn precheck_pvf(&mut self, pvf: Pvf) -> Result<(), PrepareError> { diff --git a/node/core/candidate-validation/src/tests.rs b/node/core/candidate-validation/src/tests.rs index ecac13d1440d..cf467cd5c057 100644 --- a/node/core/candidate-validation/src/tests.rs +++ b/node/core/candidate-validation/src/tests.rs @@ -345,12 +345,19 @@ fn check_does_not_match() { } struct MockValidateCandidateBackend { - result: Result, + result_list: Vec>, + num_times_called: usize, } impl MockValidateCandidateBackend { fn with_hardcoded_result(result: Result) -> Self { - Self { result } + Self { result_list: vec![result], num_times_called: 0 } + } + + fn with_hardcoded_result_list( + result_list: Vec>, + ) -> Self { + Self { result_list, num_times_called: 0 } } } @@ -358,11 +365,16 @@ impl MockValidateCandidateBackend { impl ValidationBackend for MockValidateCandidateBackend { async fn validate_candidate( &mut self, - _raw_validation_code: Vec, + _pvf: Pvf, _timeout: Duration, - _params: ValidationParams, + _encoded_params: Vec, ) -> Result { - self.result.clone() + // This is expected to panic if called more times than expected, indicating an error in the + // test. + let result = self.result_list[self.num_times_called].clone(); + self.num_times_called += 1; + + result } async fn precheck_pvf(&mut self, _pvf: Pvf) -> Result<(), PrepareError> { @@ -468,7 +480,7 @@ fn candidate_validation_bad_return_is_invalid() { let v = executor::block_on(validate_candidate_exhaustive( MockValidateCandidateBackend::with_hardcoded_result(Err( - ValidationError::InvalidCandidate(WasmInvalidCandidate::AmbiguousWorkerDeath), + ValidationError::InvalidCandidate(WasmInvalidCandidate::HardTimeout), )), validation_data, validation_code, @@ -479,6 +491,122 @@ fn candidate_validation_bad_return_is_invalid() { )) .unwrap(); + assert_matches!(v, ValidationResult::Invalid(InvalidCandidate::Timeout)); +} + +#[test] +fn candidate_validation_one_ambiguous_error_is_valid() { + let validation_data = PersistedValidationData { max_pov_size: 1024, ..Default::default() }; + + let pov = PoV { block_data: BlockData(vec![1; 32]) }; + let head_data = HeadData(vec![1, 1, 1]); + let validation_code = ValidationCode(vec![2; 16]); + + let descriptor = make_valid_candidate_descriptor( + ParaId::from(1_u32), + dummy_hash(), + validation_data.hash(), + pov.hash(), + validation_code.hash(), + head_data.hash(), + dummy_hash(), + Sr25519Keyring::Alice, + ); + + let check = perform_basic_checks( + &descriptor, + validation_data.max_pov_size, + &pov, + &validation_code.hash(), + ); + assert!(check.is_ok()); + + let validation_result = WasmValidationResult { + head_data, + new_validation_code: Some(vec![2, 2, 2].into()), + upward_messages: Vec::new(), + horizontal_messages: Vec::new(), + processed_downward_messages: 0, + hrmp_watermark: 0, + }; + + let commitments = CandidateCommitments { + head_data: validation_result.head_data.clone(), + upward_messages: validation_result.upward_messages.clone(), + horizontal_messages: validation_result.horizontal_messages.clone(), + new_validation_code: validation_result.new_validation_code.clone(), + processed_downward_messages: validation_result.processed_downward_messages, + hrmp_watermark: validation_result.hrmp_watermark, + }; + + let candidate_receipt = CandidateReceipt { descriptor, commitments_hash: commitments.hash() }; + + let v = executor::block_on(validate_candidate_exhaustive( + MockValidateCandidateBackend::with_hardcoded_result_list(vec![ + Err(ValidationError::InvalidCandidate(WasmInvalidCandidate::AmbiguousWorkerDeath)), + Ok(validation_result), + ]), + validation_data.clone(), + validation_code, + candidate_receipt, + Arc::new(pov), + Duration::from_secs(0), + &Default::default(), + )) + .unwrap(); + + assert_matches!(v, ValidationResult::Valid(outputs, used_validation_data) => { + assert_eq!(outputs.head_data, HeadData(vec![1, 1, 1])); + assert_eq!(outputs.upward_messages, Vec::::new()); + assert_eq!(outputs.horizontal_messages, Vec::new()); + assert_eq!(outputs.new_validation_code, Some(vec![2, 2, 2].into())); + assert_eq!(outputs.hrmp_watermark, 0); + assert_eq!(used_validation_data, validation_data); + }); +} + +#[test] +fn candidate_validation_multiple_ambiguous_errors_is_invalid() { + let validation_data = PersistedValidationData { max_pov_size: 1024, ..Default::default() }; + + let pov = PoV { block_data: BlockData(vec![1; 32]) }; + let validation_code = ValidationCode(vec![2; 16]); + + let descriptor = make_valid_candidate_descriptor( + ParaId::from(1_u32), + dummy_hash(), + validation_data.hash(), + pov.hash(), + validation_code.hash(), + dummy_hash(), + dummy_hash(), + Sr25519Keyring::Alice, + ); + + let check = perform_basic_checks( + &descriptor, + validation_data.max_pov_size, + &pov, + &validation_code.hash(), + ); + assert!(check.is_ok()); + + let candidate_receipt = CandidateReceipt { descriptor, commitments_hash: Hash::zero() }; + + let v = executor::block_on(validate_candidate_exhaustive( + MockValidateCandidateBackend::with_hardcoded_result_list(vec![ + Err(ValidationError::InvalidCandidate(WasmInvalidCandidate::AmbiguousWorkerDeath)), + Err(ValidationError::InvalidCandidate(WasmInvalidCandidate::AmbiguousWorkerDeath)), + ]), + validation_data, + validation_code, + candidate_receipt, + Arc::new(pov), + Duration::from_secs(0), + &Default::default(), + )) + .unwrap(); + assert_matches!(v, ValidationResult::Invalid(InvalidCandidate::ExecutionError(_))); } @@ -779,9 +907,9 @@ impl MockPreCheckBackend { impl ValidationBackend for MockPreCheckBackend { async fn validate_candidate( &mut self, - _raw_validation_code: Vec, + _pvf: Pvf, _timeout: Duration, - _params: ValidationParams, + _encoded_params: Vec, ) -> Result { unreachable!() } diff --git a/node/core/dispute-coordinator/src/db/v1.rs b/node/core/dispute-coordinator/src/db/v1.rs index 2c643d341de2..bb1456a59745 100644 --- a/node/core/dispute-coordinator/src/db/v1.rs +++ b/node/core/dispute-coordinator/src/db/v1.rs @@ -99,10 +99,10 @@ impl DbBackend { encoded = ?candidate_votes_session_prefix(index), "Cleaning votes for session index" ); - tx.delete_prefix(self.config.col_data, &candidate_votes_session_prefix(index)); + tx.delete_prefix(self.config.col_dispute_data, &candidate_votes_session_prefix(index)); } // New watermark: - tx.put_vec(self.config.col_data, CLEANED_VOTES_WATERMARK_KEY, clean_until.encode()); + tx.put_vec(self.config.col_dispute_data, CLEANED_VOTES_WATERMARK_KEY, clean_until.encode()); Ok(()) } } @@ -148,21 +148,32 @@ impl Backend for DbBackend { self.add_vote_cleanup_tx(&mut tx, session)?; // Actually write the earliest session. - tx.put_vec(self.config.col_data, EARLIEST_SESSION_KEY, session.encode()); + tx.put_vec( + self.config.col_dispute_data, + EARLIEST_SESSION_KEY, + session.encode(), + ); }, BackendWriteOp::WriteRecentDisputes(recent_disputes) => { - tx.put_vec(self.config.col_data, RECENT_DISPUTES_KEY, recent_disputes.encode()); + tx.put_vec( + self.config.col_dispute_data, + RECENT_DISPUTES_KEY, + recent_disputes.encode(), + ); }, BackendWriteOp::WriteCandidateVotes(session, candidate_hash, votes) => { gum::trace!(target: LOG_TARGET, ?session, "Writing candidate votes"); tx.put_vec( - self.config.col_data, + self.config.col_dispute_data, &candidate_votes_key(session, &candidate_hash), votes.encode(), ); }, BackendWriteOp::DeleteCandidateVotes(session, candidate_hash) => { - tx.delete(self.config.col_data, &candidate_votes_key(session, &candidate_hash)); + tx.delete( + self.config.col_dispute_data, + &candidate_votes_key(session, &candidate_hash), + ); }, } } @@ -195,7 +206,9 @@ fn candidate_votes_session_prefix(session: SessionIndex) -> [u8; 15 + 4] { #[derive(Debug, Clone)] pub struct ColumnConfiguration { /// The column in the key-value DB where data is stored. - pub col_data: u32, + pub col_dispute_data: u32, + /// The column in the key-value DB where session data is stored. + pub col_session_data: u32, } /// Tracked votes on candidates, for the purposes of dispute resolution. @@ -257,8 +270,12 @@ impl From for crate::error::Error { /// Result alias for DB errors. pub type Result = std::result::Result; -fn load_decode(db: &dyn Database, col_data: u32, key: &[u8]) -> Result> { - match db.get(col_data, key)? { +fn load_decode( + db: &dyn Database, + col_dispute_data: u32, + key: &[u8], +) -> Result> { + match db.get(col_dispute_data, key)? { None => Ok(None), Some(raw) => D::decode(&mut &raw[..]).map(Some).map_err(Into::into), } @@ -271,7 +288,7 @@ pub(crate) fn load_candidate_votes( session: SessionIndex, candidate_hash: &CandidateHash, ) -> SubsystemResult> { - load_decode(db, config.col_data, &candidate_votes_key(session, candidate_hash)) + load_decode(db, config.col_dispute_data, &candidate_votes_key(session, candidate_hash)) .map_err(|e| SubsystemError::with_origin("dispute-coordinator", e)) } @@ -280,7 +297,7 @@ pub(crate) fn load_earliest_session( db: &dyn Database, config: &ColumnConfiguration, ) -> SubsystemResult> { - load_decode(db, config.col_data, EARLIEST_SESSION_KEY) + load_decode(db, config.col_dispute_data, EARLIEST_SESSION_KEY) .map_err(|e| SubsystemError::with_origin("dispute-coordinator", e)) } @@ -289,7 +306,7 @@ pub(crate) fn load_recent_disputes( db: &dyn Database, config: &ColumnConfiguration, ) -> SubsystemResult> { - load_decode(db, config.col_data, RECENT_DISPUTES_KEY) + load_decode(db, config.col_dispute_data, RECENT_DISPUTES_KEY) .map_err(|e| SubsystemError::with_origin("dispute-coordinator", e)) } @@ -347,7 +364,7 @@ fn load_cleaned_votes_watermark( db: &dyn Database, config: &ColumnConfiguration, ) -> FatalResult> { - load_decode(db, config.col_data, CLEANED_VOTES_WATERMARK_KEY) + load_decode(db, config.col_dispute_data, CLEANED_VOTES_WATERMARK_KEY) .map_err(|e| FatalError::DbReadFailed(e)) } @@ -362,7 +379,7 @@ mod tests { let db = kvdb_memorydb::create(1); let db = polkadot_node_subsystem_util::database::kvdb_impl::DbAdapter::new(db, &[0]); let store = Arc::new(db); - let config = ColumnConfiguration { col_data: 0 }; + let config = ColumnConfiguration { col_dispute_data: 0, col_session_data: 1 }; DbBackend::new(store, config, Metrics::default()) } diff --git a/node/core/dispute-coordinator/src/lib.rs b/node/core/dispute-coordinator/src/lib.rs index 6289eb2f11a2..03abd8f59d60 100644 --- a/node/core/dispute-coordinator/src/lib.rs +++ b/node/core/dispute-coordinator/src/lib.rs @@ -35,7 +35,8 @@ use polkadot_node_subsystem::{ overseer, ActivatedLeaf, FromOrchestra, OverseerSignal, SpawnedSubsystem, SubsystemError, }; use polkadot_node_subsystem_util::{ - database::Database, rolling_session_window::RollingSessionWindow, + database::Database, + rolling_session_window::{DatabaseParams, RollingSessionWindow}, }; use polkadot_primitives::v2::{ScrapedOnChainVotes, ValidatorIndex, ValidatorPair}; @@ -117,12 +118,17 @@ pub struct DisputeCoordinatorSubsystem { #[derive(Debug, Clone, Copy)] pub struct Config { /// The data column in the store to use for dispute data. - pub col_data: u32, + pub col_dispute_data: u32, + /// The data column in the store to use for session data. + pub col_session_data: u32, } impl Config { fn column_config(&self) -> db::v1::ColumnConfiguration { - db::v1::ColumnConfiguration { col_data: self.col_data } + db::v1::ColumnConfiguration { + col_dispute_data: self.col_dispute_data, + col_session_data: self.col_session_data, + } } } @@ -199,17 +205,21 @@ impl DisputeCoordinatorSubsystem { B: Backend + 'static, { loop { - let (first_leaf, rolling_session_window) = match get_rolling_session_window(ctx).await { - Ok(Some(update)) => update, - Ok(None) => { - gum::info!(target: LOG_TARGET, "received `Conclude` signal, exiting"); - return Ok(None) - }, - Err(e) => { - e.split()?.log(); - continue - }, - }; + let db_params = + DatabaseParams { db: self.store.clone(), db_column: self.config.col_session_data }; + + let (first_leaf, rolling_session_window) = + match get_rolling_session_window(ctx, db_params).await { + Ok(Some(update)) => update, + Ok(None) => { + gum::info!(target: LOG_TARGET, "received `Conclude` signal, exiting"); + return Ok(None) + }, + Err(e) => { + e.split()?.log(); + continue + }, + }; let mut overlay_db = OverlayedBackend::new(&mut backend); let (participations, votes, spam_slots, ordering_provider) = match self @@ -352,12 +362,13 @@ impl DisputeCoordinatorSubsystem { #[overseer::contextbounds(DisputeCoordinator, prefix = self::overseer)] async fn get_rolling_session_window( ctx: &mut Context, + db_params: DatabaseParams, ) -> Result> { if let Some(leaf) = { wait_for_first_leaf(ctx) }.await? { let sender = ctx.sender().clone(); Ok(Some(( leaf.clone(), - RollingSessionWindow::new(sender, DISPUTE_WINDOW, leaf.hash) + RollingSessionWindow::new(sender, leaf.hash, db_params) .await .map_err(JfyiError::RollingSessionWindow)?, ))) diff --git a/node/core/dispute-coordinator/src/tests.rs b/node/core/dispute-coordinator/src/tests.rs index c6fe328d9537..a1ad315d2ea0 100644 --- a/node/core/dispute-coordinator/src/tests.rs +++ b/node/core/dispute-coordinator/src/tests.rs @@ -176,7 +176,7 @@ impl Default for TestState { let db = kvdb_memorydb::create(1); let db = polkadot_node_subsystem_util::database::kvdb_impl::DbAdapter::new(db, &[]); let db = Arc::new(db); - let config = Config { col_data: 0 }; + let config = Config { col_dispute_data: 0, col_session_data: 1 }; let genesis_header = Header { parent_hash: Hash::zero(), @@ -251,6 +251,7 @@ impl TestState { session: SessionIndex, ) { // Order of messages is not fixed (different on initializing): + #[derive(Debug)] struct FinishedSteps { got_session_information: bool, got_scraping_information: bool, @@ -268,7 +269,8 @@ impl TestState { let mut finished_steps = FinishedSteps::new(); while !finished_steps.is_done() { - match overseer_recv(virtual_overseer).await { + let recv = overseer_recv(virtual_overseer).await; + match recv { AllMessages::RuntimeApi(RuntimeApiMessage::Request( h, RuntimeApiRequest::SessionIndexForChild(tx), @@ -282,36 +284,38 @@ impl TestState { let _ = tx.send(Ok(session)); // Queries for fetching earliest unfinalized block session. See `RollingSessionWindow`. - assert_matches!( - overseer_recv(virtual_overseer).await, - AllMessages::ChainApi(ChainApiMessage::FinalizedBlockNumber( - s_tx, - )) => { - let _ = s_tx.send(Ok(block_number)); - } - ); + if self.known_session.is_none() { + assert_matches!( + overseer_recv(virtual_overseer).await, + AllMessages::ChainApi(ChainApiMessage::FinalizedBlockNumber( + s_tx, + )) => { + let _ = s_tx.send(Ok(block_number)); + } + ); - assert_matches!( - overseer_recv(virtual_overseer).await, - AllMessages::ChainApi(ChainApiMessage::FinalizedBlockHash( - number, - s_tx, - )) => { - assert_eq!(block_number, number); - let _ = s_tx.send(Ok(Some(block_hash))); - } - ); + assert_matches!( + overseer_recv(virtual_overseer).await, + AllMessages::ChainApi(ChainApiMessage::FinalizedBlockHash( + number, + s_tx, + )) => { + assert_eq!(block_number, number); + let _ = s_tx.send(Ok(Some(block_hash))); + } + ); - assert_matches!( - overseer_recv(virtual_overseer).await, - AllMessages::RuntimeApi(RuntimeApiMessage::Request( - h, - RuntimeApiRequest::SessionIndexForChild(s_tx), - )) => { - assert_eq!(h, block_hash); - let _ = s_tx.send(Ok(session)); - } - ); + assert_matches!( + overseer_recv(virtual_overseer).await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + h, + RuntimeApiRequest::SessionIndexForChild(s_tx), + )) => { + assert_eq!(h, block_hash); + let _ = s_tx.send(Ok(session)); + } + ); + } // No queries, if subsystem knows about this session already. if self.known_session == Some(session) { @@ -754,6 +758,7 @@ fn approval_vote_import_works() { let approval_votes = [(ValidatorIndex(4), approval_vote.into_validator_signature())] .into_iter() .collect(); + handle_approval_vote_request(&mut virtual_overseer, &candidate_hash1, approval_votes) .await; @@ -2255,6 +2260,7 @@ fn resume_dispute_with_local_statement() { }, }) .await; + handle_approval_vote_request(&mut virtual_overseer, &candidate_hash, HashMap::new()) .await; @@ -2469,6 +2475,7 @@ fn resume_dispute_with_local_statement_without_local_key() { test_state }) }); + // No keys: test_state.subsystem_keystore = make_keystore(vec![Sr25519Keyring::Two.to_seed()].into_iter()).into(); diff --git a/node/core/pvf/src/artifacts.rs b/node/core/pvf/src/artifacts.rs index 32f487cfc062..038d8e803299 100644 --- a/node/core/pvf/src/artifacts.rs +++ b/node/core/pvf/src/artifacts.rs @@ -103,10 +103,22 @@ pub enum ArtifactState { last_time_needed: SystemTime, }, /// A task to prepare this artifact is scheduled. - Preparing { waiting_for_response: Vec }, + Preparing { + /// List of result senders that are waiting for a response. + waiting_for_response: Vec, + /// The number of times this artifact has failed to prepare. + num_failures: u32, + }, /// The code couldn't be compiled due to an error. Such artifacts /// never reach the executor and stay in the host's memory. - FailedToProcess(PrepareError), + FailedToProcess { + /// Keep track of the last time that processing this artifact failed. + last_time_failed: SystemTime, + /// The number of times this artifact has failed to prepare. + num_failures: u32, + /// The last error encountered for preparation. + error: PrepareError, + }, } /// A container of all known artifact ids and their states. @@ -150,7 +162,7 @@ impl Artifacts { // See the precondition. always!(self .artifacts - .insert(artifact_id, ArtifactState::Preparing { waiting_for_response }) + .insert(artifact_id, ArtifactState::Preparing { waiting_for_response, num_failures: 0 }) .is_none()); } diff --git a/node/core/pvf/src/execute/queue.rs b/node/core/pvf/src/execute/queue.rs index 9b240e02df17..b4c6a66b7719 100644 --- a/node/core/pvf/src/execute/queue.rs +++ b/node/core/pvf/src/execute/queue.rs @@ -252,8 +252,8 @@ fn handle_job_finish( "execute worker concluded", ); - // First we send the result. It may fail due the other end of the channel being dropped, that's - // legitimate and we don't treat that as an error. + // First we send the result. It may fail due to the other end of the channel being dropped, + // that's legitimate and we don't treat that as an error. let _ = result_tx.send(result); // Then, we should deal with the worker: @@ -305,7 +305,7 @@ async fn spawn_worker_task(program_path: PathBuf, spawn_timeout: Duration) -> Qu Err(err) => { gum::warn!(target: LOG_TARGET, "failed to spawn an execute worker: {:?}", err); - // Assume that the failure intermittent and retry after a delay. + // Assume that the failure is intermittent and retry after a delay. Delay::new(Duration::from_secs(3)).await; }, } diff --git a/node/core/pvf/src/host.rs b/node/core/pvf/src/host.rs index 69f2e07b56cc..5c29072da1c3 100644 --- a/node/core/pvf/src/host.rs +++ b/node/core/pvf/src/host.rs @@ -22,6 +22,7 @@ use crate::{ artifacts::{ArtifactId, ArtifactPathId, ArtifactState, Artifacts}, + error::PrepareError, execute, metrics::Metrics, prepare, PrepareResult, Priority, Pvf, ValidationError, LOG_TARGET, @@ -49,6 +50,16 @@ pub const PRECHECK_PREPARATION_TIMEOUT: Duration = Duration::from_secs(60); // NOTE: If you change this make sure to fix the buckets of `pvf_preparation_time` metric. pub const LENIENT_PREPARATION_TIMEOUT: Duration = Duration::from_secs(360); +/// The time period after which a failed preparation artifact is considered ready to be retried. +/// Note that we will only retry if another request comes in after this cooldown has passed. +#[cfg(not(test))] +pub const PREPARE_FAILURE_COOLDOWN: Duration = Duration::from_secs(15 * 60); +#[cfg(test)] +pub const PREPARE_FAILURE_COOLDOWN: Duration = Duration::from_millis(200); + +/// The amount of times we will retry failed prepare jobs. +pub const NUM_PREPARE_RETRIES: u32 = 5; + /// An alias to not spell the type for the oneshot sender for the PVF execution result. pub(crate) type ResultSender = oneshot::Sender>; @@ -97,7 +108,13 @@ impl ValidationHost { result_tx: ResultSender, ) -> Result<(), String> { self.to_host_tx - .send(ToHost::ExecutePvf { pvf, execution_timeout, params, priority, result_tx }) + .send(ToHost::ExecutePvf(ExecutePvfInputs { + pvf, + execution_timeout, + params, + priority, + result_tx, + })) .await .map_err(|_| "the inner loop hung up".to_string()) } @@ -117,20 +134,17 @@ impl ValidationHost { } enum ToHost { - PrecheckPvf { - pvf: Pvf, - result_tx: PrepareResultSender, - }, - ExecutePvf { - pvf: Pvf, - execution_timeout: Duration, - params: Vec, - priority: Priority, - result_tx: ResultSender, - }, - HeadsUp { - active_pvfs: Vec, - }, + PrecheckPvf { pvf: Pvf, result_tx: PrepareResultSender }, + ExecutePvf(ExecutePvfInputs), + HeadsUp { active_pvfs: Vec }, +} + +struct ExecutePvfInputs { + pvf: Pvf, + execution_timeout: Duration, + params: Vec, + priority: Priority, + result_tx: ResultSender, } /// Configuration for the validation host. @@ -361,6 +375,8 @@ async fn run( Some(to_host) => to_host, }; + // If the artifact failed before, it could be re-scheduled for preparation here if + // the preparation failure cooldown has elapsed. break_if_fatal!(handle_to_host( &cache_path, &mut artifacts, @@ -377,9 +393,9 @@ async fn run( // Note that preparation always succeeds. // // That's because the error conditions are written into the artifact and will be - // reported at the time of the execution. It potentially, but not necessarily, - // can be scheduled as a result of this function call, in case there are pending - // executions. + // reported at the time of the execution. It potentially, but not necessarily, can + // be scheduled for execution as a result of this function call, in case there are + // pending executions. // // We could be eager in terms of reporting and plumb the result from the preparation // worker but we don't for the sake of simplicity. @@ -407,24 +423,19 @@ async fn handle_to_host( ToHost::PrecheckPvf { pvf, result_tx } => { handle_precheck_pvf(artifacts, prepare_queue, pvf, result_tx).await?; }, - ToHost::ExecutePvf { pvf, execution_timeout, params, priority, result_tx } => { + ToHost::ExecutePvf(inputs) => { handle_execute_pvf( cache_path, artifacts, prepare_queue, execute_queue, awaiting_prepare, - pvf, - execution_timeout, - params, - priority, - result_tx, + inputs, ) .await?; }, - ToHost::HeadsUp { active_pvfs } => { - handle_heads_up(artifacts, prepare_queue, active_pvfs).await?; - }, + ToHost::HeadsUp { active_pvfs } => + handle_heads_up(artifacts, prepare_queue, active_pvfs).await?, } Ok(()) @@ -432,8 +443,9 @@ async fn handle_to_host( /// Handles PVF prechecking requests. /// -/// This tries to prepare the PVF by compiling the WASM blob within a given timeout -/// ([`PRECHECK_PREPARATION_TIMEOUT`]). +/// This tries to prepare the PVF by compiling the WASM blob within a given timeout ([`PRECHECK_COMPILATION_TIMEOUT`]). +/// +/// If the prepare job failed previously, we may retry it under certain conditions. async fn handle_precheck_pvf( artifacts: &mut Artifacts, prepare_queue: &mut mpsc::Sender, @@ -448,10 +460,12 @@ async fn handle_precheck_pvf( *last_time_needed = SystemTime::now(); let _ = result_sender.send(Ok(())); }, - ArtifactState::Preparing { waiting_for_response } => + ArtifactState::Preparing { waiting_for_response, num_failures: _ } => waiting_for_response.push(result_sender), - ArtifactState::FailedToProcess(result) => { - let _ = result_sender.send(PrepareResult::Err(result.clone())); + ArtifactState::FailedToProcess { error, .. } => { + // Do not retry failed preparation if another pre-check request comes in. We do not retry pre-checking, + // anyway. + let _ = result_sender.send(PrepareResult::Err(error.clone())); }, } } else { @@ -471,22 +485,22 @@ async fn handle_precheck_pvf( /// Handles PVF execution. /// -/// This will first try to prepare the PVF, if a prepared artifact does not already exist. If there -/// is already a preparation job, we coalesce the two preparation jobs. When preparing for -/// execution, we use a more lenient timeout ([`LENIENT_PREPARATION_TIMEOUT`]) than when -/// prechecking. +/// This will try to prepare the PVF, if a prepared artifact does not already exist. If there is already a +/// preparation job, we coalesce the two preparation jobs. +/// +/// If the prepare job failed previously, we may retry it under certain conditions. +/// +/// When preparing for execution, we use a more lenient timeout ([`EXECUTE_COMPILATION_TIMEOUT`]) +/// than when prechecking. async fn handle_execute_pvf( cache_path: &Path, artifacts: &mut Artifacts, prepare_queue: &mut mpsc::Sender, execute_queue: &mut mpsc::Sender, awaiting_prepare: &mut AwaitingPrepare, - pvf: Pvf, - execution_timeout: Duration, - params: Vec, - priority: Priority, - result_tx: ResultSender, + inputs: ExecutePvfInputs, ) -> Result<(), Fatal> { + let ExecutePvfInputs { pvf, execution_timeout, params, priority, result_tx } = inputs; let artifact_id = pvf.as_artifact_id(); if let Some(state) = artifacts.artifact_state_mut(&artifact_id) { @@ -494,6 +508,7 @@ async fn handle_execute_pvf( ArtifactState::Prepared { last_time_needed } => { *last_time_needed = SystemTime::now(); + // This artifact has already been prepared, send it to the execute queue. send_execute( execute_queue, execute::ToQueue::Enqueue { @@ -505,11 +520,29 @@ async fn handle_execute_pvf( ) .await?; }, - ArtifactState::Preparing { waiting_for_response: _ } => { + ArtifactState::Preparing { .. } => { awaiting_prepare.add(artifact_id, execution_timeout, params, result_tx); }, - ArtifactState::FailedToProcess(error) => { - let _ = result_tx.send(Err(ValidationError::from(error.clone()))); + ArtifactState::FailedToProcess { last_time_failed, num_failures, error } => { + if can_retry_prepare_after_failure(*last_time_failed, *num_failures, error) { + // If we are allowed to retry the failed prepare job, change the state to + // Preparing and re-queue this job. + *state = ArtifactState::Preparing { + waiting_for_response: Vec::new(), + num_failures: *num_failures, + }; + send_prepare( + prepare_queue, + prepare::ToQueue::Enqueue { + priority, + pvf, + preparation_timeout: LENIENT_PREPARATION_TIMEOUT, + }, + ) + .await?; + } else { + let _ = result_tx.send(Err(ValidationError::from(error.clone()))); + } }, } } else { @@ -526,6 +559,7 @@ async fn handle_execute_pvf( ) .await?; + // Add an execution request that will wait to run after this prepare job has finished. awaiting_prepare.add(artifact_id, execution_timeout, params, result_tx); } @@ -546,10 +580,28 @@ async fn handle_heads_up( ArtifactState::Prepared { last_time_needed, .. } => { *last_time_needed = now; }, - ArtifactState::Preparing { waiting_for_response: _ } => { + ArtifactState::Preparing { .. } => { // The artifact is already being prepared, so we don't need to do anything. }, - ArtifactState::FailedToProcess(_) => {}, + ArtifactState::FailedToProcess { last_time_failed, num_failures, error } => { + if can_retry_prepare_after_failure(*last_time_failed, *num_failures, error) { + // If we are allowed to retry the failed prepare job, change the state to + // Preparing and re-queue this job. + *state = ArtifactState::Preparing { + waiting_for_response: vec![], + num_failures: *num_failures, + }; + send_prepare( + prepare_queue, + prepare::ToQueue::Enqueue { + priority: Priority::Normal, + pvf: active_pvf, + preparation_timeout: LENIENT_PREPARATION_TIMEOUT, + }, + ) + .await?; + } + }, } } else { // It's not in the artifacts, so we need to enqueue a job to prepare it. @@ -599,20 +651,26 @@ async fn handle_prepare_done( never!("the artifact is already prepared: {:?}", artifact_id); return Ok(()) }, - Some(ArtifactState::FailedToProcess(_)) => { + Some(ArtifactState::FailedToProcess { .. }) => { // The reasoning is similar to the above, the artifact cannot be // processed at this point. never!("the artifact is already processed unsuccessfully: {:?}", artifact_id); return Ok(()) }, - Some(state @ ArtifactState::Preparing { waiting_for_response: _ }) => state, + Some(state @ ArtifactState::Preparing { .. }) => state, }; - if let ArtifactState::Preparing { waiting_for_response } = state { + let num_failures = if let ArtifactState::Preparing { waiting_for_response, num_failures } = + state + { for result_sender in waiting_for_response.drain(..) { let _ = result_sender.send(result.clone()); } - } + num_failures + } else { + never!("The reasoning is similar to the above, the artifact can only be preparing at this point; qed"); + return Ok(()) + }; // It's finally time to dispatch all the execution requests that were waiting for this artifact // to be prepared. @@ -644,7 +702,11 @@ async fn handle_prepare_done( *state = match result { Ok(()) => ArtifactState::Prepared { last_time_needed: SystemTime::now() }, - Err(error) => ArtifactState::FailedToProcess(error.clone()), + Err(error) => ArtifactState::FailedToProcess { + last_time_failed: SystemTime::now(), + num_failures: *num_failures + 1, + error: error.clone(), + }, }; Ok(()) @@ -707,6 +769,24 @@ async fn sweeper_task(mut sweeper_rx: mpsc::Receiver) { } } +/// Check if the conditions to retry a prepare job have been met. +fn can_retry_prepare_after_failure( + last_time_failed: SystemTime, + num_failures: u32, + error: &PrepareError, +) -> bool { + use PrepareError::*; + match error { + // Gracefully returned an error, so it will probably be reproducible. Don't retry. + Prevalidation(_) | Preparation(_) => false, + // Retry if the retry cooldown has elapsed and if we have already retried less than + // `NUM_PREPARE_RETRIES` times. + Panic(_) | TimedOut | DidNotMakeIt => + SystemTime::now() >= last_time_failed + PREPARE_FAILURE_COOLDOWN && + num_failures <= NUM_PREPARE_RETRIES, + } +} + /// A stream that yields a pulse continuously at a given interval. fn pulse_every(interval: std::time::Duration) -> impl futures::Stream { futures::stream::unfold(interval, { @@ -834,6 +914,25 @@ mod tests { .await } + async fn poll_ensure_to_prepare_queue_is_empty(&mut self) { + use futures_timer::Delay; + + let to_prepare_queue_rx = &mut self.to_prepare_queue_rx; + run_until( + &mut self.run, + async { + futures::select! { + _ = Delay::new(Duration::from_millis(500)).fuse() => (), + _ = to_prepare_queue_rx.next().fuse() => { + panic!("the prepare queue is supposed to be empty") + } + } + } + .boxed(), + ) + .await + } + async fn poll_ensure_to_execute_queue_is_empty(&mut self) { use futures_timer::Delay; @@ -844,7 +943,7 @@ mod tests { futures::select! { _ = Delay::new(Duration::from_millis(500)).fuse() => (), _ = to_execute_queue_rx.next().fuse() => { - panic!("the execute queue supposed to be empty") + panic!("the execute queue is supposed to be empty") } } } @@ -1168,6 +1267,228 @@ mod tests { } } + // Test that multiple prechecking requests do not trigger preparation retries if the first one + // failed. + #[async_std::test] + async fn test_precheck_prepare_retry() { + let mut test = Builder::default().build(); + let mut host = test.host_handle(); + + // Submit a precheck request that fails. + let (result_tx, _result_rx) = oneshot::channel(); + host.precheck_pvf(Pvf::from_discriminator(1), result_tx).await.unwrap(); + + // The queue received the prepare request. + assert_matches!( + test.poll_and_recv_to_prepare_queue().await, + prepare::ToQueue::Enqueue { .. } + ); + // Send a PrepareError. + test.from_prepare_queue_tx + .send(prepare::FromQueue { + artifact_id: artifact_id(1), + result: Err(PrepareError::TimedOut), + }) + .await + .unwrap(); + + // Submit another precheck request. + let (result_tx_2, _result_rx_2) = oneshot::channel(); + host.precheck_pvf(Pvf::from_discriminator(1), result_tx_2).await.unwrap(); + + // Assert the prepare queue is empty. + test.poll_ensure_to_prepare_queue_is_empty().await; + + // Pause for enough time to reset the cooldown for this failed prepare request. + futures_timer::Delay::new(PREPARE_FAILURE_COOLDOWN).await; + + // Submit another precheck request. + let (result_tx_3, _result_rx_3) = oneshot::channel(); + host.precheck_pvf(Pvf::from_discriminator(1), result_tx_3).await.unwrap(); + + // Assert the prepare queue is empty - we do not retry for precheck requests. + test.poll_ensure_to_prepare_queue_is_empty().await; + } + + // Test that multiple execution requests trigger preparation retries if the first one failed due + // to a potentially non-reproducible error. + #[async_std::test] + async fn test_execute_prepare_retry() { + let mut test = Builder::default().build(); + let mut host = test.host_handle(); + + // Submit a execute request that fails. + let (result_tx, _result_rx) = oneshot::channel(); + host.execute_pvf( + Pvf::from_discriminator(1), + TEST_EXECUTION_TIMEOUT, + b"pvf".to_vec(), + Priority::Critical, + result_tx, + ) + .await + .unwrap(); + + // The queue received the prepare request. + assert_matches!( + test.poll_and_recv_to_prepare_queue().await, + prepare::ToQueue::Enqueue { .. } + ); + // Send a PrepareError. + test.from_prepare_queue_tx + .send(prepare::FromQueue { + artifact_id: artifact_id(1), + result: Err(PrepareError::TimedOut), + }) + .await + .unwrap(); + + // Submit another execute request. + let (result_tx_2, _result_rx_2) = oneshot::channel(); + host.execute_pvf( + Pvf::from_discriminator(1), + TEST_EXECUTION_TIMEOUT, + b"pvf".to_vec(), + Priority::Critical, + result_tx_2, + ) + .await + .unwrap(); + + // Assert the prepare queue is empty. + test.poll_ensure_to_prepare_queue_is_empty().await; + + // Pause for enough time to reset the cooldown for this failed prepare request. + futures_timer::Delay::new(PREPARE_FAILURE_COOLDOWN).await; + + // Submit another execute request. + let (result_tx_3, _result_rx_3) = oneshot::channel(); + host.execute_pvf( + Pvf::from_discriminator(1), + TEST_EXECUTION_TIMEOUT, + b"pvf".to_vec(), + Priority::Critical, + result_tx_3, + ) + .await + .unwrap(); + + // Assert the prepare queue contains the request. + assert_matches!( + test.poll_and_recv_to_prepare_queue().await, + prepare::ToQueue::Enqueue { .. } + ); + } + + // Test that multiple execution requests don't trigger preparation retries if the first one + // failed due to reproducible error (e.g. Prevalidation). + #[async_std::test] + async fn test_execute_prepare_no_retry() { + let mut test = Builder::default().build(); + let mut host = test.host_handle(); + + // Submit a execute request that fails. + let (result_tx, _result_rx) = oneshot::channel(); + host.execute_pvf( + Pvf::from_discriminator(1), + TEST_EXECUTION_TIMEOUT, + b"pvf".to_vec(), + Priority::Critical, + result_tx, + ) + .await + .unwrap(); + + // The queue received the prepare request. + assert_matches!( + test.poll_and_recv_to_prepare_queue().await, + prepare::ToQueue::Enqueue { .. } + ); + // Send a PrepareError. + test.from_prepare_queue_tx + .send(prepare::FromQueue { + artifact_id: artifact_id(1), + result: Err(PrepareError::Prevalidation("reproducible error".into())), + }) + .await + .unwrap(); + + // Submit another execute request. + let (result_tx_2, _result_rx_2) = oneshot::channel(); + host.execute_pvf( + Pvf::from_discriminator(1), + TEST_EXECUTION_TIMEOUT, + b"pvf".to_vec(), + Priority::Critical, + result_tx_2, + ) + .await + .unwrap(); + + // Assert the prepare queue is empty. + test.poll_ensure_to_prepare_queue_is_empty().await; + + // Pause for enough time to reset the cooldown for this failed prepare request. + futures_timer::Delay::new(PREPARE_FAILURE_COOLDOWN).await; + + // Submit another execute request. + let (result_tx_3, _result_rx_3) = oneshot::channel(); + host.execute_pvf( + Pvf::from_discriminator(1), + TEST_EXECUTION_TIMEOUT, + b"pvf".to_vec(), + Priority::Critical, + result_tx_3, + ) + .await + .unwrap(); + + // Assert the prepare queue is empty - we do not retry for prevalidation errors. + test.poll_ensure_to_prepare_queue_is_empty().await; + } + + // Test that multiple heads-up requests trigger preparation retries if the first one failed. + #[async_std::test] + async fn test_heads_up_prepare_retry() { + let mut test = Builder::default().build(); + let mut host = test.host_handle(); + + // Submit a heads-up request that fails. + host.heads_up(vec![Pvf::from_discriminator(1)]).await.unwrap(); + + // The queue received the prepare request. + assert_matches!( + test.poll_and_recv_to_prepare_queue().await, + prepare::ToQueue::Enqueue { .. } + ); + // Send a PrepareError. + test.from_prepare_queue_tx + .send(prepare::FromQueue { + artifact_id: artifact_id(1), + result: Err(PrepareError::TimedOut), + }) + .await + .unwrap(); + + // Submit another heads-up request. + host.heads_up(vec![Pvf::from_discriminator(1)]).await.unwrap(); + + // Assert the prepare queue is empty. + test.poll_ensure_to_prepare_queue_is_empty().await; + + // Pause for enough time to reset the cooldown for this failed prepare request. + futures_timer::Delay::new(PREPARE_FAILURE_COOLDOWN).await; + + // Submit another heads-up request. + host.heads_up(vec![Pvf::from_discriminator(1)]).await.unwrap(); + + // Assert the prepare queue contains the request. + assert_matches!( + test.poll_and_recv_to_prepare_queue().await, + prepare::ToQueue::Enqueue { .. } + ); + } + #[async_std::test] async fn cancellation() { let mut test = Builder::default().build(); diff --git a/node/service/Cargo.toml b/node/service/Cargo.toml index 82109f2e6ce4..e6e073546a13 100644 --- a/node/service/Cargo.toml +++ b/node/service/Cargo.toml @@ -71,7 +71,8 @@ serde_json = "1.0.81" thiserror = "1.0.31" kvdb = "0.12.0" kvdb-rocksdb = { version = "0.16.0", optional = true } -parity-db = { version = "0.3.16", optional = true } +parity-db = { version = "0.4.2", optional = true } + async-trait = "0.1.57" lru = "0.8" diff --git a/node/service/src/lib.rs b/node/service/src/lib.rs index 3619d05c7592..18218a8aba8e 100644 --- a/node/service/src/lib.rs +++ b/node/service/src/lib.rs @@ -943,7 +943,8 @@ where let parachains_db = open_database(&config.database)?; let approval_voting_config = ApprovalVotingConfig { - col_data: parachains_db::REAL_COLUMNS.col_approval_data, + col_approval_data: parachains_db::REAL_COLUMNS.col_approval_data, + col_session_data: parachains_db::REAL_COLUMNS.col_session_window_data, slot_duration_millis: slot_duration.as_millis() as u64, }; @@ -966,7 +967,8 @@ where }; let dispute_coordinator_config = DisputeCoordinatorConfig { - col_data: parachains_db::REAL_COLUMNS.col_dispute_coordinator_data, + col_dispute_data: parachains_db::REAL_COLUMNS.col_dispute_coordinator_data, + col_session_data: parachains_db::REAL_COLUMNS.col_session_window_data, }; let rpc_handlers = service::spawn_tasks(service::SpawnTasksParams { @@ -1537,7 +1539,8 @@ fn revert_chain_selection(db: Arc, hash: Hash) -> sp_blockchain::R fn revert_approval_voting(db: Arc, hash: Hash) -> sp_blockchain::Result<()> { let config = approval_voting_subsystem::Config { - col_data: parachains_db::REAL_COLUMNS.col_approval_data, + col_approval_data: parachains_db::REAL_COLUMNS.col_approval_data, + col_session_data: parachains_db::REAL_COLUMNS.col_session_window_data, slot_duration_millis: Default::default(), }; diff --git a/node/service/src/parachains_db/mod.rs b/node/service/src/parachains_db/mod.rs index de12a8ac1a32..74e7e13dc657 100644 --- a/node/service/src/parachains_db/mod.rs +++ b/node/service/src/parachains_db/mod.rs @@ -23,6 +23,7 @@ mod upgrade; const LOG_TARGET: &str = "parachain::db"; +/// Column configuration per version. #[cfg(any(test, feature = "full-node"))] pub(crate) mod columns { pub mod v0 { @@ -31,12 +32,17 @@ pub(crate) mod columns { pub mod v1 { pub const NUM_COLUMNS: u32 = 5; + } + pub mod v2 { + pub const NUM_COLUMNS: u32 = 6; pub const COL_AVAILABILITY_DATA: u32 = 0; pub const COL_AVAILABILITY_META: u32 = 1; pub const COL_APPROVAL_DATA: u32 = 2; pub const COL_CHAIN_SELECTION_DATA: u32 = 3; pub const COL_DISPUTE_COORDINATOR_DATA: u32 = 4; + pub const COL_SESSION_WINDOW_DATA: u32 = 5; + pub const ORDERED_COL: &[u32] = &[COL_AVAILABILITY_META, COL_CHAIN_SELECTION_DATA, COL_DISPUTE_COORDINATOR_DATA]; } @@ -56,16 +62,19 @@ pub struct ColumnsConfig { pub col_chain_selection_data: u32, /// The column used by dispute coordinator for data. pub col_dispute_coordinator_data: u32, + /// The column used for session window data. + pub col_session_window_data: u32, } /// The real columns used by the parachains DB. #[cfg(any(test, feature = "full-node"))] pub const REAL_COLUMNS: ColumnsConfig = ColumnsConfig { - col_availability_data: columns::v1::COL_AVAILABILITY_DATA, - col_availability_meta: columns::v1::COL_AVAILABILITY_META, - col_approval_data: columns::v1::COL_APPROVAL_DATA, - col_chain_selection_data: columns::v1::COL_CHAIN_SELECTION_DATA, - col_dispute_coordinator_data: columns::v1::COL_DISPUTE_COORDINATOR_DATA, + col_availability_data: columns::v2::COL_AVAILABILITY_DATA, + col_availability_meta: columns::v2::COL_AVAILABILITY_META, + col_approval_data: columns::v2::COL_APPROVAL_DATA, + col_chain_selection_data: columns::v2::COL_CHAIN_SELECTION_DATA, + col_dispute_coordinator_data: columns::v2::COL_DISPUTE_COORDINATOR_DATA, + col_session_window_data: columns::v2::COL_SESSION_WINDOW_DATA, }; #[derive(PartialEq)] @@ -83,11 +92,18 @@ pub struct CacheSizes { pub availability_meta: usize, /// Cache used by approval data. pub approval_data: usize, + /// Cache used by session window data + pub session_data: usize, } impl Default for CacheSizes { fn default() -> Self { - CacheSizes { availability_data: 25, availability_meta: 1, approval_data: 5 } + CacheSizes { + availability_data: 25, + availability_meta: 1, + approval_data: 5, + session_data: 1, + } } } @@ -106,17 +122,20 @@ pub fn open_creating_rocksdb( let path = root.join("parachains").join("db"); - let mut db_config = DatabaseConfig::with_columns(columns::v1::NUM_COLUMNS); + let mut db_config = DatabaseConfig::with_columns(columns::v2::NUM_COLUMNS); let _ = db_config .memory_budget - .insert(columns::v1::COL_AVAILABILITY_DATA, cache_sizes.availability_data); + .insert(columns::v2::COL_AVAILABILITY_DATA, cache_sizes.availability_data); + let _ = db_config + .memory_budget + .insert(columns::v2::COL_AVAILABILITY_META, cache_sizes.availability_meta); let _ = db_config .memory_budget - .insert(columns::v1::COL_AVAILABILITY_META, cache_sizes.availability_meta); + .insert(columns::v2::COL_APPROVAL_DATA, cache_sizes.approval_data); let _ = db_config .memory_budget - .insert(columns::v1::COL_APPROVAL_DATA, cache_sizes.approval_data); + .insert(columns::v2::COL_SESSION_WINDOW_DATA, cache_sizes.session_data); let path_str = path .to_str() @@ -127,7 +146,7 @@ pub fn open_creating_rocksdb( let db = Database::open(&db_config, &path_str)?; let db = polkadot_node_subsystem_util::database::kvdb_impl::DbAdapter::new( db, - columns::v1::ORDERED_COL, + columns::v2::ORDERED_COL, ); Ok(Arc::new(db)) @@ -147,12 +166,12 @@ pub fn open_creating_paritydb( std::fs::create_dir_all(&path_str)?; upgrade::try_upgrade_db(&path, DatabaseKind::ParityDB)?; - let db = parity_db::Db::open_or_create(&upgrade::paritydb_version_1_config(&path)) + let db = parity_db::Db::open_or_create(&upgrade::paritydb_version_2_config(&path)) .map_err(|err| io::Error::new(io::ErrorKind::Other, format!("{:?}", err)))?; let db = polkadot_node_subsystem_util::database::paritydb_impl::DbAdapter::new( db, - columns::v1::ORDERED_COL, + columns::v2::ORDERED_COL, ); Ok(Arc::new(db)) } diff --git a/node/service/src/parachains_db/upgrade.rs b/node/service/src/parachains_db/upgrade.rs index 73321ae04c09..01d4fb62f7f6 100644 --- a/node/service/src/parachains_db/upgrade.rs +++ b/node/service/src/parachains_db/upgrade.rs @@ -28,7 +28,7 @@ type Version = u32; const VERSION_FILE_NAME: &'static str = "parachain_db_version"; /// Current db version. -const CURRENT_VERSION: Version = 1; +const CURRENT_VERSION: Version = 2; #[derive(thiserror::Error, Debug)] pub enum Error { @@ -36,7 +36,7 @@ pub enum Error { Io(#[from] io::Error), #[error("The version file format is incorrect")] CorruptedVersionFile, - #[error("Future version (expected {current:?}, found {got:?})")] + #[error("Parachains DB has a future version (expected {current:?}, found {got:?})")] FutureVersion { current: Version, got: Version }, } @@ -56,6 +56,8 @@ pub(crate) fn try_upgrade_db(db_path: &Path, db_kind: DatabaseKind) -> Result<() match get_db_version(db_path)? { // 0 -> 1 migration Some(0) => migrate_from_version_0_to_1(db_path, db_kind)?, + // 1 -> 2 migration + Some(1) => migrate_from_version_1_to_2(db_path, db_kind)?, // Already at current version, do nothing. Some(CURRENT_VERSION) => (), // This is an arbitrary future version, we don't handle it. @@ -112,6 +114,19 @@ fn migrate_from_version_0_to_1(path: &Path, db_kind: DatabaseKind) -> Result<(), }) } +fn migrate_from_version_1_to_2(path: &Path, db_kind: DatabaseKind) -> Result<(), Error> { + gum::info!(target: LOG_TARGET, "Migrating parachains db from version 1 to version 2 ..."); + + match db_kind { + DatabaseKind::ParityDB => paritydb_migrate_from_version_1_to_2(path), + DatabaseKind::RocksDB => rocksdb_migrate_from_version_1_to_2(path), + } + .and_then(|result| { + gum::info!(target: LOG_TARGET, "Migration complete! "); + Ok(result) + }) +} + /// Migration from version 0 to version 1: /// * the number of columns has changed from 3 to 5; fn rocksdb_migrate_from_version_0_to_1(path: &Path) -> Result<(), Error> { @@ -129,6 +144,22 @@ fn rocksdb_migrate_from_version_0_to_1(path: &Path) -> Result<(), Error> { Ok(()) } +/// Migration from version 1 to version 2: +/// * the number of columns has changed from 5 to 6; +fn rocksdb_migrate_from_version_1_to_2(path: &Path) -> Result<(), Error> { + use kvdb_rocksdb::{Database, DatabaseConfig}; + + let db_path = path + .to_str() + .ok_or_else(|| super::other_io_error("Invalid database path".into()))?; + let db_cfg = DatabaseConfig::with_columns(super::columns::v1::NUM_COLUMNS); + let mut db = Database::open(&db_cfg, db_path)?; + + db.add_column()?; + + Ok(()) +} + // This currently clears columns which had their configs altered between versions. // The columns to be changed are constrained by the `allowed_columns` vector. fn paritydb_fix_columns( @@ -190,7 +221,18 @@ fn paritydb_fix_columns( pub(crate) fn paritydb_version_1_config(path: &Path) -> parity_db::Options { let mut options = parity_db::Options::with_columns(&path, super::columns::v1::NUM_COLUMNS as u8); - for i in columns::v1::ORDERED_COL { + for i in columns::v2::ORDERED_COL { + options.columns[*i as usize].btree_index = true; + } + + options +} + +/// Database configuration for version 2. +pub(crate) fn paritydb_version_2_config(path: &Path) -> parity_db::Options { + let mut options = + parity_db::Options::with_columns(&path, super::columns::v2::NUM_COLUMNS as u8); + for i in columns::v2::ORDERED_COL { options.columns[*i as usize].btree_index = true; } @@ -202,8 +244,8 @@ pub(crate) fn paritydb_version_1_config(path: &Path) -> parity_db::Options { pub(crate) fn paritydb_version_0_config(path: &Path) -> parity_db::Options { let mut options = parity_db::Options::with_columns(&path, super::columns::v1::NUM_COLUMNS as u8); - options.columns[super::columns::v1::COL_AVAILABILITY_META as usize].btree_index = true; - options.columns[super::columns::v1::COL_CHAIN_SELECTION_DATA as usize].btree_index = true; + options.columns[super::columns::v2::COL_AVAILABILITY_META as usize].btree_index = true; + options.columns[super::columns::v2::COL_CHAIN_SELECTION_DATA as usize].btree_index = true; options } @@ -218,17 +260,30 @@ fn paritydb_migrate_from_version_0_to_1(path: &Path) -> Result<(), Error> { paritydb_fix_columns( path, paritydb_version_1_config(path), - vec![super::columns::v1::COL_DISPUTE_COORDINATOR_DATA], + vec![super::columns::v2::COL_DISPUTE_COORDINATOR_DATA], )?; Ok(()) } +/// Migration from version 1 to version 2: +/// - add a new column for session information storage +fn paritydb_migrate_from_version_1_to_2(path: &Path) -> Result<(), Error> { + let mut options = paritydb_version_1_config(path); + + // Adds the session info column. + parity_db::Db::add_column(&mut options, Default::default()) + .map_err(|e| other_io_error(format!("Error adding column {:?}", e)))?; + + Ok(()) +} + #[cfg(test)] mod tests { + use super::{columns::v2::*, *}; + #[test] - fn test_paritydb_migrate_0_1() { - use super::{columns::v1::*, *}; + fn test_paritydb_migrate_0_to_1() { use parity_db::Db; let db_dir = tempfile::tempdir().unwrap(); @@ -246,13 +301,119 @@ mod tests { try_upgrade_db(&path, DatabaseKind::ParityDB).unwrap(); let db = Db::open(&paritydb_version_1_config(&path)).unwrap(); + assert_eq!(db.get(COL_DISPUTE_COORDINATOR_DATA as u8, b"1234").unwrap(), None); assert_eq!( - db.get(super::columns::v1::COL_DISPUTE_COORDINATOR_DATA as u8, b"1234").unwrap(), - None + db.get(COL_AVAILABILITY_META as u8, b"5678").unwrap(), + Some("somevalue".as_bytes().to_vec()) ); + } + + #[test] + fn test_paritydb_migrate_1_to_2() { + use parity_db::Db; + + let db_dir = tempfile::tempdir().unwrap(); + let path = db_dir.path(); + + // We need to properly set db version for upgrade to work. + fs::write(version_file_path(path), "1").expect("Failed to write DB version"); + + { + let db = Db::open_or_create(&paritydb_version_1_config(&path)).unwrap(); + + // Write some dummy data + db.commit(vec![( + COL_DISPUTE_COORDINATOR_DATA as u8, + b"1234".to_vec(), + Some(b"somevalue".to_vec()), + )]) + .unwrap(); + + assert_eq!(db.num_columns(), columns::v1::NUM_COLUMNS as u8); + } + + try_upgrade_db(&path, DatabaseKind::ParityDB).unwrap(); + + let db = Db::open(&paritydb_version_2_config(&path)).unwrap(); + + assert_eq!(db.num_columns(), columns::v2::NUM_COLUMNS as u8); + assert_eq!( - db.get(super::columns::v1::COL_AVAILABILITY_META as u8, b"5678").unwrap(), + db.get(COL_DISPUTE_COORDINATOR_DATA as u8, b"1234").unwrap(), Some("somevalue".as_bytes().to_vec()) ); + + // Test we can write the new column. + db.commit(vec![( + COL_SESSION_WINDOW_DATA as u8, + b"1337".to_vec(), + Some(b"0xdeadb00b".to_vec()), + )]) + .unwrap(); + + // Read back data from new column. + assert_eq!( + db.get(COL_SESSION_WINDOW_DATA as u8, b"1337").unwrap(), + Some("0xdeadb00b".as_bytes().to_vec()) + ); + } + + #[test] + fn test_rocksdb_migrate_1_to_2() { + use kvdb::{DBKey, DBOp}; + use kvdb_rocksdb::{Database, DatabaseConfig}; + use polkadot_node_subsystem_util::database::{ + kvdb_impl::DbAdapter, DBTransaction, KeyValueDB, + }; + + let db_dir = tempfile::tempdir().unwrap(); + let db_path = db_dir.path().to_str().unwrap(); + let db_cfg = DatabaseConfig::with_columns(super::columns::v1::NUM_COLUMNS); + let db = Database::open(&db_cfg, db_path).unwrap(); + assert_eq!(db.num_columns(), super::columns::v1::NUM_COLUMNS as u32); + + // We need to properly set db version for upgrade to work. + fs::write(version_file_path(db_dir.path()), "1").expect("Failed to write DB version"); + { + let db = DbAdapter::new(db, columns::v2::ORDERED_COL); + db.write(DBTransaction { + ops: vec![DBOp::Insert { + col: COL_DISPUTE_COORDINATOR_DATA, + key: DBKey::from_slice(b"1234"), + value: b"0xdeadb00b".to_vec(), + }], + }) + .unwrap(); + } + + try_upgrade_db(&db_dir.path(), DatabaseKind::RocksDB).unwrap(); + + let db_cfg = DatabaseConfig::with_columns(super::columns::v2::NUM_COLUMNS); + let db = Database::open(&db_cfg, db_path).unwrap(); + + assert_eq!(db.num_columns(), super::columns::v2::NUM_COLUMNS); + + let db = DbAdapter::new(db, columns::v2::ORDERED_COL); + + assert_eq!( + db.get(COL_DISPUTE_COORDINATOR_DATA, b"1234").unwrap(), + Some("0xdeadb00b".as_bytes().to_vec()) + ); + + // Test we can write the new column. + db.write(DBTransaction { + ops: vec![DBOp::Insert { + col: COL_SESSION_WINDOW_DATA, + key: DBKey::from_slice(b"1337"), + value: b"0xdeadb00b".to_vec(), + }], + }) + .unwrap(); + + // Read back data from new column. + assert_eq!( + db.get(COL_SESSION_WINDOW_DATA, b"1337").unwrap(), + Some("0xdeadb00b".as_bytes().to_vec()) + ); } } diff --git a/node/subsystem-util/Cargo.toml b/node/subsystem-util/Cargo.toml index ab886c0c4078..d390fd2b42cc 100644 --- a/node/subsystem-util/Cargo.toml +++ b/node/subsystem-util/Cargo.toml @@ -34,7 +34,7 @@ sp-keystore = { git = "https://github.com/paritytech/substrate", branch = "maste kvdb = "0.12.0" parity-util-mem = { version = "0.12.0", default-features = false } -parity-db = { version = "0.3.13" } +parity-db = { version = "0.4.2"} [dev-dependencies] assert_matches = "1.4.0" @@ -46,3 +46,4 @@ lazy_static = "1.4.0" polkadot-primitives-test-helpers = { path = "../../primitives/test-helpers" } kvdb-shared-tests = "0.10.0" tempfile = "3.1.0" +kvdb-memorydb = "0.12.0" diff --git a/node/subsystem-util/src/rolling_session_window.rs b/node/subsystem-util/src/rolling_session_window.rs index 700feb2ccff8..beac31292b7d 100644 --- a/node/subsystem-util/src/rolling_session_window.rs +++ b/node/subsystem-util/src/rolling_session_window.rs @@ -19,8 +19,13 @@ //! This is useful for consensus components which need to stay up-to-date about recent sessions but don't //! care about the state of particular blocks. +use super::database::{DBTransaction, Database}; +use kvdb::{DBKey, DBOp}; + +use parity_scale_codec::{Decode, Encode}; pub use polkadot_node_primitives::{new_session_window_size, SessionWindowSize}; use polkadot_primitives::v2::{BlockNumber, Hash, SessionIndex, SessionInfo}; +use std::sync::Arc; use futures::channel::oneshot; use polkadot_node_subsystem::{ @@ -29,7 +34,11 @@ use polkadot_node_subsystem::{ overseer, }; +// The window size is equal to the `approval-voting` and `dispute-coordinator` constants that +// have been obsoleted. +const SESSION_WINDOW_SIZE: SessionWindowSize = new_session_window_size!(6); const LOG_TARGET: &str = "parachain::rolling-session-window"; +const STORED_ROLLING_SESSION_WINDOW: &[u8] = b"Rolling_session_window"; /// Sessions unavailable in state to cache. #[derive(Debug, Clone, thiserror::Error)] @@ -94,55 +103,176 @@ pub enum SessionWindowUpdate { Unchanged, } +/// A structure to store rolling session database parameters. +#[derive(Clone)] +pub struct DatabaseParams { + /// Database reference. + pub db: Arc, + /// The column which stores the rolling session info. + pub db_column: u32, +} /// A rolling window of sessions and cached session info. pub struct RollingSessionWindow { earliest_session: SessionIndex, session_info: Vec, window_size: SessionWindowSize, + // The option is just to enable some approval-voting tests to force feed sessions + // in the window without dealing with the DB. + db_params: Option, +} + +/// The rolling session data we persist in the database. +#[derive(Encode, Decode, Default)] +struct StoredWindow { + earliest_session: SessionIndex, + session_info: Vec, } impl RollingSessionWindow { /// Initialize a new session info cache with the given window size. + /// Invariant: The database always contains the earliest session. Then, + /// we can always extend the session info vector using chain state. pub async fn new( mut sender: Sender, - window_size: SessionWindowSize, block_hash: Hash, + db_params: DatabaseParams, ) -> Result where Sender: overseer::SubsystemSender + overseer::SubsystemSender, { + // At first, determine session window start using the chain state. let session_index = get_session_index_for_child(&mut sender, block_hash).await?; let earliest_non_finalized_block_session = Self::earliest_non_finalized_block_session(&mut sender).await?; // This will increase the session window to cover the full unfinalized chain. - let window_start = std::cmp::min( - session_index.saturating_sub(window_size.get() - 1), + let on_chain_window_start = std::cmp::min( + session_index.saturating_sub(SESSION_WINDOW_SIZE.get() - 1), earliest_non_finalized_block_session, ); - match load_all_sessions(&mut sender, block_hash, window_start, session_index).await { - Err(kind) => Err(SessionsUnavailable { - kind, - info: Some(SessionsUnavailableInfo { - window_start, - window_end: session_index, - block_hash, + // Fetch session information from DB. + let maybe_stored_window = Self::db_load(db_params.clone()); + + // Get the DB stored sessions and recompute window start based on DB data. + let (mut window_start, stored_sessions) = + if let Some(mut stored_window) = maybe_stored_window { + // Check if DB is ancient. + if earliest_non_finalized_block_session > + stored_window.earliest_session + stored_window.session_info.len() as u32 + { + // If ancient, we scrap it and fetch from chain state. + stored_window.session_info.clear(); + } + + // The session window might extend beyond the last finalized block, but that's fine as we'll prune it at + // next update. + let window_start = if stored_window.session_info.len() > 0 { + // If there is at least one entry in db, we always take the DB as source of truth. + stored_window.earliest_session + } else { + on_chain_window_start + }; + + (window_start, stored_window.session_info) + } else { + (on_chain_window_start, Vec::new()) + }; + + // Compute the amount of sessions missing from the window that will be fetched from chain state. + let sessions_missing_count = session_index + .saturating_sub(window_start) + .saturating_add(1) + .saturating_sub(stored_sessions.len() as u32); + + // Extend from chain state. + let sessions = if sessions_missing_count > 0 { + match extend_sessions_from_chain_state( + stored_sessions, + &mut sender, + block_hash, + &mut window_start, + session_index, + ) + .await + { + Err(kind) => Err(SessionsUnavailable { + kind, + info: Some(SessionsUnavailableInfo { + window_start, + window_end: session_index, + block_hash, + }), }), - }), - Ok(s) => Ok(Self { earliest_session: window_start, session_info: s, window_size }), + Ok(sessions) => Ok(sessions), + }? + } else { + // There are no new sessions to be fetched from chain state. + Vec::new() + }; + + Ok(Self { + earliest_session: window_start, + session_info: sessions, + window_size: SESSION_WINDOW_SIZE, + db_params: Some(db_params), + }) + } + + // Load session information from the parachains db. + fn db_load(db_params: DatabaseParams) -> Option { + match db_params.db.get(db_params.db_column, STORED_ROLLING_SESSION_WINDOW).ok()? { + None => None, + Some(raw) => { + let maybe_decoded = StoredWindow::decode(&mut &raw[..]).map(Some); + match maybe_decoded { + Ok(decoded) => decoded, + Err(err) => { + gum::warn!( + target: LOG_TARGET, + ?err, + "Failed decoding db entry; will start with onchain session infos and self-heal DB entry on next update." + ); + None + }, + } + }, + } + } + + // Saves/Updates all sessions in the database. + // TODO: https://github.com/paritytech/polkadot/issues/6144 + fn db_save(&mut self, stored_window: StoredWindow) { + if let Some(db_params) = self.db_params.as_ref() { + match db_params.db.write(DBTransaction { + ops: vec![DBOp::Insert { + col: db_params.db_column, + key: DBKey::from_slice(STORED_ROLLING_SESSION_WINDOW), + value: stored_window.encode(), + }], + }) { + Ok(_) => {}, + Err(err) => { + gum::warn!(target: LOG_TARGET, ?err, "Failed writing db entry"); + }, + } } } /// Initialize a new session info cache with the given window size and /// initial data. + /// This is only used in `approval voting` tests. pub fn with_session_info( - window_size: SessionWindowSize, earliest_session: SessionIndex, session_info: Vec, ) -> Self { - RollingSessionWindow { earliest_session, session_info, window_size } + RollingSessionWindow { + earliest_session, + session_info, + window_size: SESSION_WINDOW_SIZE, + db_params: None, + } } /// Access the session info for the given session index, if stored within the window. @@ -262,11 +392,6 @@ impl RollingSessionWindow { + overseer::SubsystemSender, { let session_index = get_session_index_for_child(sender, block_hash).await?; - let earliest_non_finalized_block_session = - Self::earliest_non_finalized_block_session(sender).await?; - - let old_window_start = self.earliest_session; - let latest = self.latest_session(); // Either cached or ancient. @@ -274,6 +399,10 @@ impl RollingSessionWindow { return Ok(SessionWindowUpdate::Unchanged) } + let earliest_non_finalized_block_session = + Self::earliest_non_finalized_block_session(sender).await?; + + let old_window_start = self.earliest_session; let old_window_end = latest; // Ensure we keep sessions up to last finalized block by adjusting the window start. @@ -283,16 +412,34 @@ impl RollingSessionWindow { earliest_non_finalized_block_session, ); - // keep some of the old window, if applicable. - let overlap_start = window_start.saturating_sub(old_window_start); + // Never look back past earliest session, since if sessions beyond were not needed or available + // in the past remains valid for the future (window only advances forward). + let mut window_start = std::cmp::max(window_start, self.earliest_session); + + let mut sessions = self.session_info.clone(); + let sessions_out_of_window = window_start.saturating_sub(old_window_start) as usize; - let fresh_start = if latest < window_start { window_start } else { latest + 1 }; + let sessions = if sessions_out_of_window < sessions.len() { + // Drop sessions based on how much the window advanced. + sessions.split_off((window_start as usize).saturating_sub(old_window_start as usize)) + } else { + // Window has jumped such that we need to fetch all sessions from on chain. + Vec::new() + }; - match load_all_sessions(sender, block_hash, fresh_start, session_index).await { + match extend_sessions_from_chain_state( + sessions, + sender, + block_hash, + &mut window_start, + session_index, + ) + .await + { Err(kind) => Err(SessionsUnavailable { kind, info: Some(SessionsUnavailableInfo { - window_start: fresh_start, + window_start, window_end: session_index, block_hash, }), @@ -305,15 +452,19 @@ impl RollingSessionWindow { new_window_end: session_index, }; - let outdated = std::cmp::min(overlap_start as usize, self.session_info.len()); - self.session_info.drain(..outdated); - self.session_info.extend(s); + self.session_info = s; + // we need to account for this case: // window_start ................................... session_index // old_window_start ........... latest let new_earliest = std::cmp::max(window_start, old_window_start); self.earliest_session = new_earliest; + // Update current window in DB. + self.db_save(StoredWindow { + earliest_session: self.earliest_session, + session_info: self.session_info.clone(), + }); Ok(update) }, } @@ -354,13 +505,23 @@ async fn get_session_index_for_child( } } -async fn load_all_sessions( +/// Attempts to extend db stored sessions with sessions missing between `start` and up to `end_inclusive`. +/// Runtime session info fetching errors are ignored if that doesn't create a gap in the window. +async fn extend_sessions_from_chain_state( + stored_sessions: Vec, sender: &mut impl overseer::SubsystemSender, block_hash: Hash, - start: SessionIndex, + window_start: &mut SessionIndex, end_inclusive: SessionIndex, ) -> Result, SessionsUnavailableReason> { - let mut v = Vec::new(); + // Start from the db sessions. + let mut sessions = stored_sessions; + // We allow session fetch failures only if we won't create a gap in the window by doing so. + // If `allow_failure` is set to true here, fetching errors are ignored until we get a first session. + let mut allow_failure = sessions.is_empty(); + + let start = *window_start + sessions.len() as u32; + for i in start..=end_inclusive { let (tx, rx) = oneshot::channel(); sender @@ -370,22 +531,58 @@ async fn load_all_sessions( )) .await; - let session_info = match rx.await { - Ok(Ok(Some(s))) => s, - Ok(Ok(None)) => return Err(SessionsUnavailableReason::Missing(i)), - Ok(Err(e)) => return Err(SessionsUnavailableReason::RuntimeApi(e)), - Err(canceled) => return Err(SessionsUnavailableReason::RuntimeApiUnavailable(canceled)), + match rx.await { + Ok(Ok(Some(session_info))) => { + // We do not allow failure anymore after having at least 1 session in window. + allow_failure = false; + sessions.push(session_info); + }, + Ok(Ok(None)) if !allow_failure => return Err(SessionsUnavailableReason::Missing(i)), + Ok(Ok(None)) => { + // Handle `allow_failure` true. + // If we didn't get the session, we advance window start. + *window_start += 1; + gum::debug!( + target: LOG_TARGET, + session = ?i, + "Session info missing from runtime." + ); + }, + Ok(Err(e)) if !allow_failure => return Err(SessionsUnavailableReason::RuntimeApi(e)), + Err(canceled) if !allow_failure => + return Err(SessionsUnavailableReason::RuntimeApiUnavailable(canceled)), + Ok(Err(err)) => { + // Handle `allow_failure` true. + // If we didn't get the session, we advance window start. + *window_start += 1; + gum::debug!( + target: LOG_TARGET, + session = ?i, + ?err, + "Error while fetching session information." + ); + }, + Err(err) => { + // Handle `allow_failure` true. + // If we didn't get the session, we advance window start. + *window_start += 1; + gum::debug!( + target: LOG_TARGET, + session = ?i, + ?err, + "Channel error while fetching session information." + ); + }, }; - - v.push(session_info); } - Ok(v) + Ok(sessions) } #[cfg(test)] mod tests { use super::*; + use crate::database::kvdb_impl::DbAdapter; use assert_matches::assert_matches; use polkadot_node_subsystem::{ messages::{AllMessages, AvailabilityRecoveryMessage}, @@ -395,7 +592,16 @@ mod tests { use polkadot_primitives::v2::Header; use sp_core::testing::TaskExecutor; - pub const TEST_WINDOW_SIZE: SessionWindowSize = new_session_window_size!(6); + const SESSION_DATA_COL: u32 = 0; + + const NUM_COLUMNS: u32 = 1; + + fn dummy_db_params() -> DatabaseParams { + let db = kvdb_memorydb::create(NUM_COLUMNS); + let db = DbAdapter::new(db, &[]); + let db: Arc = Arc::new(db); + DatabaseParams { db, db_column: SESSION_DATA_COL } + } fn dummy_session_info(index: SessionIndex) -> SessionInfo { SessionInfo { @@ -420,7 +626,10 @@ mod tests { session: SessionIndex, window: Option, expect_requests_from: SessionIndex, - ) { + db_params: Option, + ) -> RollingSessionWindow { + let db_params = db_params.unwrap_or(dummy_db_params()); + let header = Header { digest: Default::default(), extrinsics_root: Default::default(), @@ -448,9 +657,8 @@ mod tests { let test_fut = { Box::pin(async move { let window = match window { - None => RollingSessionWindow::new(sender.clone(), TEST_WINDOW_SIZE, hash) - .await - .unwrap(), + None => + RollingSessionWindow::new(sender.clone(), hash, db_params).await.unwrap(), Some(mut window) => { window.cache_session_info_for_head(sender, hash).await.unwrap(); window @@ -461,6 +669,8 @@ mod tests { window.session_info, (expected_start_session..=session).map(dummy_session_info).collect::>(), ); + + window }) }; @@ -522,12 +732,43 @@ mod tests { } }); - futures::executor::block_on(futures::future::join(test_fut, aux_fut)); + let (window, _) = futures::executor::block_on(futures::future::join(test_fut, aux_fut)); + window + } + + #[test] + fn cache_session_info_start_empty_db() { + let db_params = dummy_db_params(); + + let window = cache_session_info_test( + (10 as SessionIndex).saturating_sub(SESSION_WINDOW_SIZE.get() - 1), + 10, + None, + (10 as SessionIndex).saturating_sub(SESSION_WINDOW_SIZE.get() - 1), + Some(db_params.clone()), + ); + + let window = cache_session_info_test( + (11 as SessionIndex).saturating_sub(SESSION_WINDOW_SIZE.get() - 1), + 11, + Some(window), + 11, + None, + ); + assert_eq!(window.session_info.len(), SESSION_WINDOW_SIZE.get() as usize); + + cache_session_info_test( + (11 as SessionIndex).saturating_sub(SESSION_WINDOW_SIZE.get() - 1), + 12, + None, + 12, + Some(db_params), + ); } #[test] fn cache_session_info_first_early() { - cache_session_info_test(0, 1, None, 0); + cache_session_info_test(0, 1, None, 0, None); } #[test] @@ -535,19 +776,21 @@ mod tests { let window = RollingSessionWindow { earliest_session: 1, session_info: vec![dummy_session_info(1)], - window_size: TEST_WINDOW_SIZE, + window_size: SESSION_WINDOW_SIZE, + db_params: Some(dummy_db_params()), }; - cache_session_info_test(1, 2, Some(window), 2); + cache_session_info_test(1, 2, Some(window), 2, None); } #[test] fn cache_session_info_first_late() { cache_session_info_test( - (100 as SessionIndex).saturating_sub(TEST_WINDOW_SIZE.get() - 1), + (100 as SessionIndex).saturating_sub(SESSION_WINDOW_SIZE.get() - 1), 100, None, - (100 as SessionIndex).saturating_sub(TEST_WINDOW_SIZE.get() - 1), + (100 as SessionIndex).saturating_sub(SESSION_WINDOW_SIZE.get() - 1), + None, ); } @@ -560,48 +803,88 @@ mod tests { dummy_session_info(51), dummy_session_info(52), ], - window_size: TEST_WINDOW_SIZE, + window_size: SESSION_WINDOW_SIZE, + db_params: Some(dummy_db_params()), }; cache_session_info_test( - (100 as SessionIndex).saturating_sub(TEST_WINDOW_SIZE.get() - 1), + (100 as SessionIndex).saturating_sub(SESSION_WINDOW_SIZE.get() - 1), 100, Some(window), - (100 as SessionIndex).saturating_sub(TEST_WINDOW_SIZE.get() - 1), + (100 as SessionIndex).saturating_sub(SESSION_WINDOW_SIZE.get() - 1), + None, ); } #[test] fn cache_session_info_roll_full() { - let start = 99 - (TEST_WINDOW_SIZE.get() - 1); + let start = 99 - (SESSION_WINDOW_SIZE.get() - 1); let window = RollingSessionWindow { earliest_session: start, session_info: (start..=99).map(dummy_session_info).collect(), - window_size: TEST_WINDOW_SIZE, + window_size: SESSION_WINDOW_SIZE, + db_params: Some(dummy_db_params()), }; cache_session_info_test( - (100 as SessionIndex).saturating_sub(TEST_WINDOW_SIZE.get() - 1), + (100 as SessionIndex).saturating_sub(SESSION_WINDOW_SIZE.get() - 1), 100, Some(window), 100, // should only make one request. + None, + ); + } + + #[test] + fn cache_session_info_roll_many_full_db() { + let db_params = dummy_db_params(); + let start = 97 - (SESSION_WINDOW_SIZE.get() - 1); + let window = RollingSessionWindow { + earliest_session: start, + session_info: (start..=97).map(dummy_session_info).collect(), + window_size: SESSION_WINDOW_SIZE, + db_params: Some(db_params.clone()), + }; + + cache_session_info_test( + (100 as SessionIndex).saturating_sub(SESSION_WINDOW_SIZE.get() - 1), + 100, + Some(window), + 98, + None, + ); + + // We expect the session to be populated from DB, and only fetch 101 from on chain. + cache_session_info_test( + (100 as SessionIndex).saturating_sub(SESSION_WINDOW_SIZE.get() - 1), + 101, + None, + 101, + Some(db_params.clone()), ); + + // Session warps in the future. + let window = cache_session_info_test(195, 200, None, 195, Some(db_params)); + + assert_eq!(window.session_info.len(), SESSION_WINDOW_SIZE.get() as usize); } #[test] fn cache_session_info_roll_many_full() { - let start = 97 - (TEST_WINDOW_SIZE.get() - 1); + let start = 97 - (SESSION_WINDOW_SIZE.get() - 1); let window = RollingSessionWindow { earliest_session: start, session_info: (start..=97).map(dummy_session_info).collect(), - window_size: TEST_WINDOW_SIZE, + window_size: SESSION_WINDOW_SIZE, + db_params: Some(dummy_db_params()), }; cache_session_info_test( - (100 as SessionIndex).saturating_sub(TEST_WINDOW_SIZE.get() - 1), + (100 as SessionIndex).saturating_sub(SESSION_WINDOW_SIZE.get() - 1), 100, Some(window), 98, + None, ); } @@ -611,7 +894,8 @@ mod tests { let window = RollingSessionWindow { earliest_session: start, session_info: (0..=1).map(dummy_session_info).collect(), - window_size: TEST_WINDOW_SIZE, + window_size: SESSION_WINDOW_SIZE, + db_params: Some(dummy_db_params()), }; cache_session_info_test( @@ -619,6 +903,7 @@ mod tests { 2, Some(window), 2, // should only make one request. + None, ); } @@ -628,14 +913,17 @@ mod tests { let window = RollingSessionWindow { earliest_session: start, session_info: (0..=1).map(dummy_session_info).collect(), - window_size: TEST_WINDOW_SIZE, + window_size: SESSION_WINDOW_SIZE, + db_params: Some(dummy_db_params()), }; - cache_session_info_test(0, 3, Some(window), 2); + let actual_window_size = window.session_info.len() as u32; + + cache_session_info_test(0, 3, Some(window), actual_window_size, None); } #[test] - fn any_session_stretch_for_unfinalized_chain() { + fn cache_session_fails_for_gap_in_window() { // Session index of the tip of our fake test chain. let session: SessionIndex = 100; let genesis_session: SessionIndex = 0; @@ -664,7 +952,8 @@ mod tests { let test_fut = { let sender = ctx.sender().clone(); Box::pin(async move { - let res = RollingSessionWindow::new(sender, TEST_WINDOW_SIZE, hash).await; + let res = RollingSessionWindow::new(sender, hash, dummy_db_params()).await; + assert!(res.is_err()); }) }; @@ -713,6 +1002,135 @@ mod tests { ); // Unfinalized chain starts at geneisis block, so session 0 is how far we stretch. + // First 50 sessions are missing. + for i in genesis_session..=50 { + assert_matches!( + handle.recv().await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + h, + RuntimeApiRequest::SessionInfo(j, s_tx), + )) => { + assert_eq!(h, hash); + assert_eq!(i, j); + let _ = s_tx.send(Ok(None)); + } + ); + } + // next 10 sessions are present + for i in 51..=60 { + assert_matches!( + handle.recv().await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + h, + RuntimeApiRequest::SessionInfo(j, s_tx), + )) => { + assert_eq!(h, hash); + assert_eq!(i, j); + let _ = s_tx.send(Ok(Some(dummy_session_info(i)))); + } + ); + } + // gap of 1 session + assert_matches!( + handle.recv().await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + h, + RuntimeApiRequest::SessionInfo(j, s_tx), + )) => { + assert_eq!(h, hash); + assert_eq!(61, j); + let _ = s_tx.send(Ok(None)); + } + ); + }); + + futures::executor::block_on(futures::future::join(test_fut, aux_fut)); + } + + #[test] + fn any_session_stretch_with_failure_allowed_for_unfinalized_chain() { + // Session index of the tip of our fake test chain. + let session: SessionIndex = 100; + let genesis_session: SessionIndex = 0; + + let header = Header { + digest: Default::default(), + extrinsics_root: Default::default(), + number: 5, + state_root: Default::default(), + parent_hash: Default::default(), + }; + + let finalized_header = Header { + digest: Default::default(), + extrinsics_root: Default::default(), + number: 0, + state_root: Default::default(), + parent_hash: Default::default(), + }; + + let pool = TaskExecutor::new(); + let (mut ctx, mut handle) = make_subsystem_context::<(), _>(pool.clone()); + + let hash = header.hash(); + + let test_fut = { + let sender = ctx.sender().clone(); + Box::pin(async move { + let res = RollingSessionWindow::new(sender, hash, dummy_db_params()).await; + assert!(res.is_ok()); + let rsw = res.unwrap(); + // Since first 50 sessions are missing the earliest should be 50. + assert_eq!(rsw.earliest_session, 50); + assert_eq!(rsw.session_info.len(), 51); + }) + }; + + let aux_fut = Box::pin(async move { + assert_matches!( + handle.recv().await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + h, + RuntimeApiRequest::SessionIndexForChild(s_tx), + )) => { + assert_eq!(h, hash); + let _ = s_tx.send(Ok(session)); + } + ); + + assert_matches!( + handle.recv().await, + AllMessages::ChainApi(ChainApiMessage::FinalizedBlockNumber( + s_tx, + )) => { + let _ = s_tx.send(Ok(finalized_header.number)); + } + ); + + assert_matches!( + handle.recv().await, + AllMessages::ChainApi(ChainApiMessage::FinalizedBlockHash( + block_number, + s_tx, + )) => { + assert_eq!(block_number, finalized_header.number); + let _ = s_tx.send(Ok(Some(finalized_header.hash()))); + } + ); + + assert_matches!( + handle.recv().await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + h, + RuntimeApiRequest::SessionIndexForChild(s_tx), + )) => { + assert_eq!(h, finalized_header.hash()); + let _ = s_tx.send(Ok(0)); + } + ); + + // Unfinalized chain starts at geneisis block, so session 0 is how far we stretch. + // We also test if failure is allowed for 50 first missing sessions. for i in genesis_session..=session { assert_matches!( handle.recv().await, @@ -723,7 +1141,7 @@ mod tests { assert_eq!(h, hash); assert_eq!(i, j); - let _ = s_tx.send(Ok(if i == session { + let _ = s_tx.send(Ok(if i < 50 { None } else { Some(dummy_session_info(i)) @@ -739,7 +1157,7 @@ mod tests { #[test] fn any_session_unavailable_for_caching_means_no_change() { let session: SessionIndex = 6; - let start_session = session.saturating_sub(TEST_WINDOW_SIZE.get() - 1); + let start_session = session.saturating_sub(SESSION_WINDOW_SIZE.get() - 1); let header = Header { digest: Default::default(), @@ -765,7 +1183,7 @@ mod tests { let test_fut = { let sender = ctx.sender().clone(); Box::pin(async move { - let res = RollingSessionWindow::new(sender, TEST_WINDOW_SIZE, hash).await; + let res = RollingSessionWindow::new(sender, hash, dummy_db_params()).await; assert!(res.is_err()); }) }; @@ -857,7 +1275,7 @@ mod tests { Box::pin(async move { let sender = ctx.sender().clone(); let window = - RollingSessionWindow::new(sender, TEST_WINDOW_SIZE, hash).await.unwrap(); + RollingSessionWindow::new(sender, hash, dummy_db_params()).await.unwrap(); assert_eq!(window.earliest_session, session); assert_eq!(window.session_info, vec![dummy_session_info(session)]); diff --git a/roadmap/implementers-guide/README.md b/roadmap/implementers-guide/README.md index 1775fb44d876..7f3f8cef7e63 100644 --- a/roadmap/implementers-guide/README.md +++ b/roadmap/implementers-guide/README.md @@ -4,13 +4,14 @@ The implementers' guide is compiled from several source files with [`mdBook`](ht ## Hosted build -This is avalible at https://paritytech.github.io/polkadot/book/ +This is available [here](https://paritytech.github.io/polkadot/book/). ## Local build To view it locally from the repo root: Ensure graphviz is installed: + ```sh brew install graphviz # for macOS sudo apt-get install graphviz # for Ubuntu/Debian @@ -19,11 +20,11 @@ sudo apt-get install graphviz # for Ubuntu/Debian Then install and build the book: ```sh -cargo install mdbook mdbook-linkcheck mdbook-graphviz mdbook-mermaid +cargo install mdbook mdbook-linkcheck mdbook-graphviz mdbook-mermaid mdbook-last-changed mdbook serve roadmap/implementers-guide open http://localhost:3000 ``` ## Specification -See also the Polkadot specificaton [hosted](https://spec.polkadot.network/), and it's [source](https://github.com/w3f/polkadot-spec)). +See also the Polkadot specification [hosted](https://spec.polkadot.network/), and its [source](https://github.com/w3f/polkadot-spec). diff --git a/roadmap/implementers-guide/book.toml b/roadmap/implementers-guide/book.toml index 8805ca4c38c3..0ced0e26f9a0 100644 --- a/roadmap/implementers-guide/book.toml +++ b/roadmap/implementers-guide/book.toml @@ -9,8 +9,14 @@ title = "The Polkadot Parachain Host Implementers' Guide" command = "mdbook-graphviz" [preprocessor.mermaid] command = "mdbook-mermaid" +[preprocessor.last-changed] +command = "mdbook-last-changed" +renderer = ["html"] [output.html] +additional-css = ["last-changed.css"] additional-js = ["mermaid.min.js", "mermaid-init.js"] +# Repository URL used in the last-changed link. +git-repository-url = "https://github.com/paritytech/polkadot" [output.linkcheck] diff --git a/roadmap/implementers-guide/last-changed.css b/roadmap/implementers-guide/last-changed.css new file mode 100644 index 000000000000..744dc6efc7ec --- /dev/null +++ b/roadmap/implementers-guide/last-changed.css @@ -0,0 +1,7 @@ +footer { + font-size: 0.8em; + text-align: center; + margin-top: 50px; + border-top: 1px solid black; + padding: 5px 0; +} diff --git a/runtime/common/Cargo.toml b/runtime/common/Cargo.toml index 9ee5de41188d..96f29eae9257 100644 --- a/runtime/common/Cargo.toml +++ b/runtime/common/Cargo.toml @@ -31,6 +31,7 @@ pallet-balances = { git = "https://github.com/paritytech/substrate", branch = "m pallet-session = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } frame-support = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } pallet-staking = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-staking-reward-fn = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } frame-system = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } pallet-timestamp = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } pallet-vesting = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } @@ -84,6 +85,7 @@ std = [ "pallet-beefy-mmr/std", "pallet-session/std", "pallet-staking/std", + "pallet-staking-reward-fn/std", "pallet-timestamp/std", "pallet-vesting/std", "pallet-transaction-payment/std", diff --git a/runtime/common/src/impls.rs b/runtime/common/src/impls.rs index 939b753092f5..2b10c79cfdff 100644 --- a/runtime/common/src/impls.rs +++ b/runtime/common/src/impls.rs @@ -18,6 +18,8 @@ use crate::NegativeImbalance; use frame_support::traits::{Currency, Imbalance, OnUnbalanced}; +use primitives::v2::Balance; +use sp_runtime::Perquintill; /// Logic for the author to get a portion of fees. pub struct ToAuthor(sp_std::marker::PhantomData); @@ -57,6 +59,45 @@ where } } +pub fn era_payout( + total_staked: Balance, + non_gilt_issuance: Balance, + max_annual_inflation: Perquintill, + period_fraction: Perquintill, + auctioned_slots: u64, +) -> (Balance, Balance) { + use pallet_staking_reward_fn::compute_inflation; + use sp_runtime::traits::Saturating; + + let min_annual_inflation = Perquintill::from_rational(25u64, 1000u64); + let delta_annual_inflation = max_annual_inflation.saturating_sub(min_annual_inflation); + + // 30% reserved for up to 60 slots. + let auction_proportion = Perquintill::from_rational(auctioned_slots.min(60), 200u64); + + // Therefore the ideal amount at stake (as a percentage of total issuance) is 75% less the + // amount that we expect to be taken up with auctions. + let ideal_stake = Perquintill::from_percent(75).saturating_sub(auction_proportion); + + let stake = Perquintill::from_rational(total_staked, non_gilt_issuance); + let falloff = Perquintill::from_percent(5); + let adjustment = compute_inflation(stake, ideal_stake, falloff); + let staking_inflation = + min_annual_inflation.saturating_add(delta_annual_inflation * adjustment); + + let max_payout = period_fraction * max_annual_inflation * non_gilt_issuance; + let staking_payout = (period_fraction * staking_inflation) * non_gilt_issuance; + let rest = max_payout.saturating_sub(staking_payout); + + let other_issuance = non_gilt_issuance.saturating_sub(total_staked); + if total_staked > other_issuance { + let _cap_rest = Perquintill::from_rational(other_issuance, total_staked) * staking_payout; + // We don't do anything with this, but if we wanted to, we could introduce a cap on the + // treasury amount with: `rest = rest.min(cap_rest);` + } + (staking_payout, rest) +} + #[cfg(test)] mod tests { use super::*; @@ -209,4 +250,44 @@ mod tests { assert_eq!(Balances::free_balance(Treasury::account_id()), 8); }); } + + #[test] + fn compute_inflation_should_give_sensible_results() { + assert_eq!( + pallet_staking_reward_fn::compute_inflation( + Perquintill::from_percent(75), + Perquintill::from_percent(75), + Perquintill::from_percent(5), + ), + Perquintill::one() + ); + assert_eq!( + pallet_staking_reward_fn::compute_inflation( + Perquintill::from_percent(50), + Perquintill::from_percent(75), + Perquintill::from_percent(5), + ), + Perquintill::from_rational(2u64, 3u64) + ); + assert_eq!( + pallet_staking_reward_fn::compute_inflation( + Perquintill::from_percent(80), + Perquintill::from_percent(75), + Perquintill::from_percent(5), + ), + Perquintill::from_rational(1u64, 2u64) + ); + } + + #[test] + fn era_payout_should_give_sensible_results() { + assert_eq!( + era_payout(75, 100, Perquintill::from_percent(10), Perquintill::one(), 0,), + (10, 0) + ); + assert_eq!( + era_payout(80, 100, Perquintill::from_percent(10), Perquintill::one(), 0,), + (6, 4) + ); + } } diff --git a/runtime/kusama/Cargo.toml b/runtime/kusama/Cargo.toml index 72500677c534..ec3de40ec5e6 100644 --- a/runtime/kusama/Cargo.toml +++ b/runtime/kusama/Cargo.toml @@ -72,7 +72,6 @@ pallet-session = { git = "https://github.com/paritytech/substrate", branch = "ma pallet-society = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } frame-support = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } pallet-staking = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-staking-reward-fn = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } frame-system = {git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } frame-system-rpc-runtime-api = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } pallet-timestamp = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } @@ -170,7 +169,6 @@ std = [ "pallet-session/std", "pallet-society/std", "pallet-staking/std", - "pallet-staking-reward-fn/std", "pallet-timestamp/std", "pallet-tips/std", "pallet-treasury/std", diff --git a/runtime/kusama/src/lib.rs b/runtime/kusama/src/lib.rs index 0eb8cc0b4944..3f77ece6c86a 100644 --- a/runtime/kusama/src/lib.rs +++ b/runtime/kusama/src/lib.rs @@ -26,6 +26,7 @@ use primitives::v2::{ CoreState, GroupRotationInfo, Hash, Id as ParaId, InboundDownwardMessage, InboundHrmpMessage, Moment, Nonce, OccupiedCoreAssumption, PersistedValidationData, ScrapedOnChainVotes, SessionInfo, Signature, ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, + LOWEST_PUBLIC_ID, }; use runtime_common::{ auctions, claims, crowdloan, impl_runtime_weights, impls::DealWithFees, paras_registrar, @@ -410,6 +411,9 @@ parameter_types! { pub const MaxElectableTargets: u16 = u16::MAX; pub NposSolutionPriority: TransactionPriority = Perbill::from_percent(90) * TransactionPriority::max_value(); + /// Setup election pallet to support maximum winners upto 2000. This will mean Staking Pallet + /// cannot have active validators higher than this count. + pub const MaxActiveValidators: u32 = 2000; } generate_solution_type!( @@ -428,6 +432,9 @@ impl onchain::Config for OnChainSeqPhragmen { type Solver = SequentialPhragmen; type DataProvider = Staking; type WeightInfo = weights::frame_election_provider_support::WeightInfo; + type MaxWinners = MaxActiveValidators; + type VotersBound = MaxElectingVoters; + type TargetsBound = MaxElectableTargets; } impl pallet_election_provider_multi_phase::MinerConfig for Runtime { @@ -475,10 +482,15 @@ impl pallet_election_provider_multi_phase::Config for Runtime { type MinerTxPriority = NposSolutionPriority; type DataProvider = Staking; #[cfg(feature = "fast-runtime")] - type Fallback = onchain::UnboundedExecution; + type Fallback = onchain::OnChainExecution; #[cfg(not(feature = "fast-runtime"))] - type Fallback = pallet_election_provider_multi_phase::NoFallback; - type GovernanceFallback = onchain::UnboundedExecution; + type Fallback = frame_election_provider_support::NoElection<( + AccountId, + BlockNumber, + Staking, + MaxActiveValidators, + )>; + type GovernanceFallback = onchain::OnChainExecution; type Solver = SequentialPhragmen< AccountId, pallet_election_provider_multi_phase::SolutionAccuracyOf, @@ -489,6 +501,7 @@ impl pallet_election_provider_multi_phase::Config for Runtime { type WeightInfo = weights::pallet_election_provider_multi_phase::WeightInfo; type MaxElectingVoters = MaxElectingVoters; type MaxElectableTargets = MaxElectableTargets; + type MaxWinners = MaxActiveValidators; } parameter_types! { @@ -504,45 +517,6 @@ impl pallet_bags_list::Config for Runtime { type Score = sp_npos_elections::VoteWeight; } -fn era_payout( - total_staked: Balance, - non_gilt_issuance: Balance, - max_annual_inflation: Perquintill, - period_fraction: Perquintill, - auctioned_slots: u64, -) -> (Balance, Balance) { - use pallet_staking_reward_fn::compute_inflation; - use sp_arithmetic::traits::Saturating; - - let min_annual_inflation = Perquintill::from_rational(25u64, 1000u64); - let delta_annual_inflation = max_annual_inflation.saturating_sub(min_annual_inflation); - - // 30% reserved for up to 60 slots. - let auction_proportion = Perquintill::from_rational(auctioned_slots.min(60), 200u64); - - // Therefore the ideal amount at stake (as a percentage of total issuance) is 75% less the amount that we expect - // to be taken up with auctions. - let ideal_stake = Perquintill::from_percent(75).saturating_sub(auction_proportion); - - let stake = Perquintill::from_rational(total_staked, non_gilt_issuance); - let falloff = Perquintill::from_percent(5); - let adjustment = compute_inflation(stake, ideal_stake, falloff); - let staking_inflation = - min_annual_inflation.saturating_add(delta_annual_inflation * adjustment); - - let max_payout = period_fraction * max_annual_inflation * non_gilt_issuance; - let staking_payout = (period_fraction * staking_inflation) * non_gilt_issuance; - let rest = max_payout.saturating_sub(staking_payout); - - let other_issuance = non_gilt_issuance.saturating_sub(total_staked); - if total_staked > other_issuance { - let _cap_rest = Perquintill::from_rational(other_issuance, total_staked) * staking_payout; - // We don't do anything with this, but if we wanted to, we could introduce a cap on the treasury amount - // with: `rest = rest.min(cap_rest);` - } - (staking_payout, rest) -} - pub struct EraPayout; impl pallet_staking::EraPayout for EraPayout { fn era_payout( @@ -550,13 +524,18 @@ impl pallet_staking::EraPayout for EraPayout { _total_issuance: Balance, era_duration_millis: u64, ) -> (Balance, Balance) { - // TODO: #3011 Update with proper auctioned slots tracking. - // This should be fine for the first year of parachains. - let auctioned_slots: u64 = auctions::Pallet::::auction_counter().into(); + // all para-ids that are currently active. + let auctioned_slots = Paras::parachains() + .into_iter() + // all active para-ids that do not belong to a system or common good chain is the number + // of parachains that we should take into account for inflation. + .filter(|i| *i >= LOWEST_PUBLIC_ID) + .count() as u64; + const MAX_ANNUAL_INFLATION: Perquintill = Perquintill::from_percent(10); const MILLISECONDS_PER_YEAR: u64 = 1000 * 3600 * 24 * 36525 / 100; - era_payout( + runtime_common::impls::era_payout( total_staked, Gilt::issuance().non_gilt, MAX_ANNUAL_INFLATION, @@ -573,7 +552,7 @@ parameter_types! { pub const BondingDuration: sp_staking::EraIndex = 28; // 27 eras in which slashes can be cancelled (slightly less than 7 days). pub const SlashDeferDuration: sp_staking::EraIndex = 27; - pub const MaxNominatorRewardedPerValidator: u32 = 256; + pub const MaxNominatorRewardedPerValidator: u32 = 512; pub const OffendingValidatorsThreshold: Perbill = Perbill::from_percent(17); // 24 pub const MaxNominations: u32 = ::LIMIT as u32; @@ -586,7 +565,7 @@ impl pallet_staking::Config for Runtime { type UnixTime = Timestamp; type CurrencyToVote = CurrencyToVote; type ElectionProvider = ElectionProviderMultiPhase; - type GenesisElectionProvider = onchain::UnboundedExecution; + type GenesisElectionProvider = onchain::OnChainExecution; type RewardRemainder = Treasury; type RuntimeEvent = RuntimeEvent; type Slash = Treasury; @@ -613,6 +592,7 @@ impl pallet_staking::Config for Runtime { impl pallet_fast_unstake::Config for Runtime { type RuntimeEvent = RuntimeEvent; type Currency = Balances; + type BatchSize = frame_support::traits::ConstU32<64>; type Deposit = frame_support::traits::ConstU128<{ CENTS * 100 }>; type ControlOrigin = EitherOfDiverse< EnsureRoot, @@ -1473,6 +1453,7 @@ pub type Executive = frame_executive::Executive< // "Properly migrate weights to v2" parachains_configuration::migration::v3::MigrateToV3, pallet_election_provider_multi_phase::migrations::v1::MigrateToV1, + pallet_fast_unstake::migrations::v1::MigrateToV1, ), >; /// The payload being signed in the transactions. @@ -1710,53 +1691,27 @@ sp_api::impl_runtime_apis! { } impl mmr::MmrApi for Runtime { - fn generate_proof(_block_number: BlockNumber) - -> Result<(mmr::EncodableOpaqueLeaf, mmr::Proof), mmr::Error> - { - Err(mmr::Error::PalletNotIncluded) - } - - fn verify_proof(_leaf: mmr::EncodableOpaqueLeaf, _proof: mmr::Proof) - -> Result<(), mmr::Error> - { - Err(mmr::Error::PalletNotIncluded) - } - - fn verify_proof_stateless( - _root: Hash, - _leaf: mmr::EncodableOpaqueLeaf, - _proof: mmr::Proof - ) -> Result<(), mmr::Error> { - Err(mmr::Error::PalletNotIncluded) - } - fn mmr_root() -> Result { Err(mmr::Error::PalletNotIncluded) } - fn generate_batch_proof(_block_numbers: Vec) - -> Result<(Vec, mmr::BatchProof), mmr::Error> - { - Err(mmr::Error::PalletNotIncluded) - } - - fn generate_historical_batch_proof( + fn generate_proof( _block_numbers: Vec, - _best_known_block_number: BlockNumber, - ) -> Result<(Vec, mmr::BatchProof), mmr::Error> { + _best_known_block_number: Option, + ) -> Result<(Vec, mmr::Proof), mmr::Error> { Err(mmr::Error::PalletNotIncluded) } - fn verify_batch_proof(_leaves: Vec, _proof: mmr::BatchProof) + fn verify_proof(_leaves: Vec, _proof: mmr::Proof) -> Result<(), mmr::Error> { Err(mmr::Error::PalletNotIncluded) } - fn verify_batch_proof_stateless( + fn verify_proof_stateless( _root: Hash, _leaves: Vec, - _proof: mmr::BatchProof + _proof: mmr::Proof ) -> Result<(), mmr::Error> { Err(mmr::Error::PalletNotIncluded) } diff --git a/runtime/kusama/src/tests.rs b/runtime/kusama/src/tests.rs index 12d02da131f8..ef09a9dc6131 100644 --- a/runtime/kusama/src/tests.rs +++ b/runtime/kusama/src/tests.rs @@ -137,40 +137,6 @@ fn nominator_limit() { println!("can support {} nominators to yield a weight of {}", active, weight_with(active)); } -#[test] -fn compute_inflation_should_give_sensible_results() { - assert_eq!( - pallet_staking_reward_fn::compute_inflation( - Perquintill::from_percent(75), - Perquintill::from_percent(75), - Perquintill::from_percent(5), - ), - Perquintill::one() - ); - assert_eq!( - pallet_staking_reward_fn::compute_inflation( - Perquintill::from_percent(50), - Perquintill::from_percent(75), - Perquintill::from_percent(5), - ), - Perquintill::from_rational(2u64, 3u64) - ); - assert_eq!( - pallet_staking_reward_fn::compute_inflation( - Perquintill::from_percent(80), - Perquintill::from_percent(75), - Perquintill::from_percent(5), - ), - Perquintill::from_rational(1u64, 2u64) - ); -} - -#[test] -fn era_payout_should_give_sensible_results() { - assert_eq!(era_payout(75, 100, Perquintill::from_percent(10), Perquintill::one(), 0,), (10, 0)); - assert_eq!(era_payout(80, 100, Perquintill::from_percent(10), Perquintill::one(), 0,), (6, 4)); -} - #[test] fn call_size() { RuntimeCall::assert_size_under(230); diff --git a/runtime/polkadot/src/lib.rs b/runtime/polkadot/src/lib.rs index 5adf4e11e67f..0f71a37f425f 100644 --- a/runtime/polkadot/src/lib.rs +++ b/runtime/polkadot/src/lib.rs @@ -58,6 +58,7 @@ use primitives::v2::{ CoreState, GroupRotationInfo, Hash, Id as ParaId, InboundDownwardMessage, InboundHrmpMessage, Moment, Nonce, OccupiedCoreAssumption, PersistedValidationData, ScrapedOnChainVotes, SessionInfo, Signature, ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, + LOWEST_PUBLIC_ID, }; use sp_core::OpaqueMetadata; use sp_mmr_primitives as mmr; @@ -405,6 +406,9 @@ parameter_types! { /// ... and all of the validators as electable targets. Whilst this is the case, we cannot and /// shall not increase the size of the validator intentions. pub const MaxElectableTargets: u16 = u16::MAX; + /// Setup election pallet to support maximum winners upto 1200. This will mean Staking Pallet + /// cannot have active validators higher than this count. + pub const MaxActiveValidators: u32 = 1200; } generate_solution_type!( @@ -423,6 +427,9 @@ impl onchain::Config for OnChainSeqPhragmen { type Solver = SequentialPhragmen; type DataProvider = Staking; type WeightInfo = weights::frame_election_provider_support::WeightInfo; + type MaxWinners = MaxActiveValidators; + type VotersBound = MaxElectingVoters; + type TargetsBound = MaxElectableTargets; } impl pallet_election_provider_multi_phase::MinerConfig for Runtime { @@ -470,10 +477,15 @@ impl pallet_election_provider_multi_phase::Config for Runtime { type MinerTxPriority = NposSolutionPriority; type DataProvider = Staking; #[cfg(feature = "fast-runtime")] - type Fallback = onchain::UnboundedExecution; + type Fallback = onchain::OnChainExecution; #[cfg(not(feature = "fast-runtime"))] - type Fallback = pallet_election_provider_multi_phase::NoFallback; - type GovernanceFallback = onchain::UnboundedExecution; + type Fallback = frame_election_provider_support::NoElection<( + AccountId, + BlockNumber, + Staking, + MaxActiveValidators, + )>; + type GovernanceFallback = onchain::OnChainExecution; type Solver = SequentialPhragmen< AccountId, pallet_election_provider_multi_phase::SolutionAccuracyOf, @@ -487,6 +499,7 @@ impl pallet_election_provider_multi_phase::Config for Runtime { type WeightInfo = weights::pallet_election_provider_multi_phase::WeightInfo; type MaxElectingVoters = MaxElectingVoters; type MaxElectableTargets = MaxElectableTargets; + type MaxWinners = MaxActiveValidators; } parameter_types! { @@ -525,7 +538,7 @@ parameter_types! { pub const BondingDuration: sp_staking::EraIndex = 28; pub const SlashDeferDuration: sp_staking::EraIndex = 27; pub const RewardCurve: &'static PiecewiseLinear<'static> = &REWARD_CURVE; - pub const MaxNominatorRewardedPerValidator: u32 = 256; + pub const MaxNominatorRewardedPerValidator: u32 = 512; pub const OffendingValidatorsThreshold: Perbill = Perbill::from_percent(17); // 16 pub const MaxNominations: u32 = ::LIMIT as u32; @@ -536,6 +549,35 @@ type SlashCancelOrigin = EitherOfDiverse< pallet_collective::EnsureProportionAtLeast, >; +pub struct EraPayout; +impl pallet_staking::EraPayout for EraPayout { + fn era_payout( + total_staked: Balance, + total_issuance: Balance, + era_duration_millis: u64, + ) -> (Balance, Balance) { + // all para-ids that are not active. + let auctioned_slots = Paras::parachains() + .into_iter() + // all active para-ids that do not belong to a system or common good chain is the number + // of parachains that we should take into account for inflation. + .filter(|i| *i >= LOWEST_PUBLIC_ID) + .count() as u64; + + const MAX_ANNUAL_INFLATION: Perquintill = Perquintill::from_percent(10); + const MILLISECONDS_PER_YEAR: u64 = 1000 * 3600 * 24 * 36525 / 100; + + runtime_common::impls::era_payout( + total_staked, + // Polkadot has no notion of gilts, the entire issuance is non-guilt. + total_issuance, + MAX_ANNUAL_INFLATION, + Perquintill::from_rational(era_duration_millis, MILLISECONDS_PER_YEAR), + auctioned_slots, + ) + } +} + impl pallet_staking::Config for Runtime { type MaxNominations = MaxNominations; type Currency = Balances; @@ -552,12 +594,12 @@ impl pallet_staking::Config for Runtime { // A super-majority of the council can cancel the slash. type SlashCancelOrigin = SlashCancelOrigin; type SessionInterface = Self; - type EraPayout = pallet_staking::ConvertCurve; + type EraPayout = EraPayout; type MaxNominatorRewardedPerValidator = MaxNominatorRewardedPerValidator; type OffendingValidatorsThreshold = OffendingValidatorsThreshold; type NextNewSession = Session; type ElectionProvider = ElectionProviderMultiPhase; - type GenesisElectionProvider = onchain::UnboundedExecution; + type GenesisElectionProvider = onchain::OnChainExecution; type VoterList = VoterList; type TargetList = UseValidatorsMap; type MaxUnlockingChunks = frame_support::traits::ConstU32<32>; @@ -570,6 +612,7 @@ impl pallet_staking::Config for Runtime { impl pallet_fast_unstake::Config for Runtime { type RuntimeEvent = RuntimeEvent; type Currency = Balances; + type BatchSize = frame_support::traits::ConstU32<64>; type Deposit = frame_support::traits::ConstU128<{ UNITS }>; type ControlOrigin = EitherOfDiverse< EnsureRoot, @@ -1566,6 +1609,7 @@ pub type Executive = frame_executive::Executive< // "Properly migrate weights to v2" parachains_configuration::migration::v3::MigrateToV3, pallet_election_provider_multi_phase::migrations::v1::MigrateToV1, + pallet_fast_unstake::migrations::v1::MigrateToV1, ), >; @@ -1804,53 +1848,27 @@ sp_api::impl_runtime_apis! { } impl mmr::MmrApi for Runtime { - fn generate_proof(_block_number: BlockNumber) - -> Result<(mmr::EncodableOpaqueLeaf, mmr::Proof), mmr::Error> - { - Err(mmr::Error::PalletNotIncluded) - } - - fn verify_proof(_leaf: mmr::EncodableOpaqueLeaf, _proof: mmr::Proof) - -> Result<(), mmr::Error> - { - Err(mmr::Error::PalletNotIncluded) - } - - fn verify_proof_stateless( - _root: Hash, - _leaf: mmr::EncodableOpaqueLeaf, - _proof: mmr::Proof - ) -> Result<(), mmr::Error> { - Err(mmr::Error::PalletNotIncluded) - } - fn mmr_root() -> Result { Err(mmr::Error::PalletNotIncluded) } - fn generate_batch_proof(_block_numbers: Vec) - -> Result<(Vec, mmr::BatchProof), mmr::Error> - { - Err(mmr::Error::PalletNotIncluded) - } - - fn generate_historical_batch_proof( + fn generate_proof( _block_numbers: Vec, - _best_known_block_number: BlockNumber, - ) -> Result<(Vec, mmr::BatchProof), mmr::Error> { + _best_known_block_number: Option, + ) -> Result<(Vec, mmr::Proof), mmr::Error> { Err(mmr::Error::PalletNotIncluded) } - fn verify_batch_proof(_leaves: Vec, _proof: mmr::BatchProof) + fn verify_proof(_leaves: Vec, _proof: mmr::Proof) -> Result<(), mmr::Error> { Err(mmr::Error::PalletNotIncluded) } - fn verify_batch_proof_stateless( + fn verify_proof_stateless( _root: Hash, _leaves: Vec, - _proof: mmr::BatchProof + _proof: mmr::Proof ) -> Result<(), mmr::Error> { Err(mmr::Error::PalletNotIncluded) } diff --git a/runtime/rococo/src/lib.rs b/runtime/rococo/src/lib.rs index b46d9a15469a..a0975d1ba6bc 100644 --- a/runtime/rococo/src/lib.rs +++ b/runtime/rococo/src/lib.rs @@ -1691,52 +1691,15 @@ sp_api::impl_runtime_apis! { } impl mmr::MmrApi for Runtime { - fn generate_proof(block_number: BlockNumber) - -> Result<(mmr::EncodableOpaqueLeaf, mmr::Proof), mmr::Error> - { - Mmr::generate_batch_proof(vec![block_number]) - .and_then(|(leaves, proof)| Ok(( - mmr::EncodableOpaqueLeaf::from_leaf(&leaves[0]), - mmr::BatchProof::into_single_leaf_proof(proof)? - ))) - } - - fn verify_proof(leaf: mmr::EncodableOpaqueLeaf, proof: mmr::Proof) - -> Result<(), mmr::Error> - { - pub type MmrLeaf = <::LeafData as mmr::LeafDataProvider>::LeafData; - let leaf: MmrLeaf = leaf - .into_opaque_leaf() - .try_decode() - .ok_or(mmr::Error::Verify)?; - Mmr::verify_leaves(vec![leaf], mmr::Proof::into_batch_proof(proof)) - } - - fn verify_proof_stateless( - root: Hash, - leaf: mmr::EncodableOpaqueLeaf, - proof: mmr::Proof - ) -> Result<(), mmr::Error> { - let node = mmr::DataOrHash::Data(leaf.into_opaque_leaf()); - pallet_mmr::verify_leaves_proof::(root, vec![node], mmr::Proof::into_batch_proof(proof)) - } - fn mmr_root() -> Result { Ok(Mmr::mmr_root()) } - fn generate_batch_proof(block_numbers: Vec) - -> Result<(Vec, mmr::BatchProof), mmr::Error> - { - Mmr::generate_batch_proof(block_numbers) - .map(|(leaves, proof)| (leaves.into_iter().map(|leaf| mmr::EncodableOpaqueLeaf::from_leaf(&leaf)).collect(), proof)) - } - - fn generate_historical_batch_proof( + fn generate_proof( block_numbers: Vec, - best_known_block_number: BlockNumber, - ) -> Result<(Vec, mmr::BatchProof), mmr::Error> { - Mmr::generate_historical_batch_proof(block_numbers, best_known_block_number).map( + best_known_block_number: Option, + ) -> Result<(Vec, mmr::Proof), mmr::Error> { + Mmr::generate_proof(block_numbers, best_known_block_number).map( |(leaves, proof)| { ( leaves @@ -1749,7 +1712,7 @@ sp_api::impl_runtime_apis! { ) } - fn verify_batch_proof(leaves: Vec, proof: mmr::BatchProof) + fn verify_proof(leaves: Vec, proof: mmr::Proof) -> Result<(), mmr::Error> { pub type MmrLeaf = <::LeafData as mmr::LeafDataProvider>::LeafData; @@ -1760,10 +1723,10 @@ sp_api::impl_runtime_apis! { Mmr::verify_leaves(leaves, proof) } - fn verify_batch_proof_stateless( + fn verify_proof_stateless( root: Hash, leaves: Vec, - proof: mmr::BatchProof + proof: mmr::Proof ) -> Result<(), mmr::Error> { let nodes = leaves.into_iter().map(|leaf|mmr::DataOrHash::Data(leaf.into_opaque_leaf())).collect(); pallet_mmr::verify_leaves_proof::(root, nodes, proof) diff --git a/runtime/test-runtime/src/lib.rs b/runtime/test-runtime/src/lib.rs index 7fba0e20d9fe..0210f1ecd8b7 100644 --- a/runtime/test-runtime/src/lib.rs +++ b/runtime/test-runtime/src/lib.rs @@ -319,6 +319,9 @@ parameter_types! { pub storage MaxNominatorRewardedPerValidator: u32 = 64; pub storage OffendingValidatorsThreshold: Perbill = Perbill::from_percent(17); pub const MaxAuthorities: u32 = 100_000; + pub const OnChainMaxWinners: u32 = u32::MAX; + pub const MaxElectingVoters: u32 = u32::MAX; + pub const MaxElectableTargets: u16 = u16::MAX; } pub struct OnChainSeqPhragmen; @@ -327,6 +330,9 @@ impl onchain::Config for OnChainSeqPhragmen { type Solver = SequentialPhragmen; type DataProvider = Staking; type WeightInfo = (); + type MaxWinners = OnChainMaxWinners; + type VotersBound = MaxElectingVoters; + type TargetsBound = MaxElectableTargets; } impl pallet_staking::Config for Runtime { @@ -349,8 +355,8 @@ impl pallet_staking::Config for Runtime { type MaxNominatorRewardedPerValidator = MaxNominatorRewardedPerValidator; type OffendingValidatorsThreshold = OffendingValidatorsThreshold; type NextNewSession = Session; - type ElectionProvider = onchain::UnboundedExecution; - type GenesisElectionProvider = onchain::UnboundedExecution; + type ElectionProvider = onchain::OnChainExecution; + type GenesisElectionProvider = onchain::OnChainExecution; // Use the nominator map to iter voter AND no-ops for all SortedListProvider hooks. The migration // to bags-list is a no-op, but the storage version will be updated. type VoterList = pallet_staking::UseNominatorsAndValidatorsMap; @@ -920,53 +926,27 @@ sp_api::impl_runtime_apis! { } impl mmr::MmrApi for Runtime { - fn generate_proof(_block_number: BlockNumber) - -> Result<(mmr::EncodableOpaqueLeaf, mmr::Proof), mmr::Error> - { - Err(mmr::Error::PalletNotIncluded) - } - - fn verify_proof(_leaf: mmr::EncodableOpaqueLeaf, _proof: mmr::Proof) - -> Result<(), mmr::Error> - { - Err(mmr::Error::PalletNotIncluded) - } - - fn verify_proof_stateless( - _root: Hash, - _leaf: mmr::EncodableOpaqueLeaf, - _proof: mmr::Proof - ) -> Result<(), mmr::Error> { - Err(mmr::Error::PalletNotIncluded) - } - fn mmr_root() -> Result { Err(mmr::Error::PalletNotIncluded) } - fn generate_batch_proof(_block_numbers: Vec) - -> Result<(Vec, mmr::BatchProof), mmr::Error> - { - Err(mmr::Error::PalletNotIncluded) - } - - fn generate_historical_batch_proof( + fn generate_proof( _block_numbers: Vec, - _best_known_block_number: BlockNumber, - ) -> Result<(Vec, mmr::BatchProof), mmr::Error> { + _best_known_block_number: Option, + ) -> Result<(Vec, mmr::Proof), mmr::Error> { Err(mmr::Error::PalletNotIncluded) } - fn verify_batch_proof(_leaves: Vec, _proof: mmr::BatchProof) + fn verify_proof(_leaves: Vec, _proof: mmr::Proof) -> Result<(), mmr::Error> { Err(mmr::Error::PalletNotIncluded) } - fn verify_batch_proof_stateless( + fn verify_proof_stateless( _root: Hash, _leaves: Vec, - _proof: mmr::BatchProof + _proof: mmr::Proof ) -> Result<(), mmr::Error> { Err(mmr::Error::PalletNotIncluded) } diff --git a/runtime/westend/src/lib.rs b/runtime/westend/src/lib.rs index fd5e8a1c7f43..a7930ff06ea6 100644 --- a/runtime/westend/src/lib.rs +++ b/runtime/westend/src/lib.rs @@ -366,6 +366,9 @@ parameter_types! { /// ... and all of the validators as electable targets. Whilst this is the case, we cannot and /// shall not increase the size of the validator intentions. pub const MaxElectableTargets: u16 = u16::MAX; + // Maximum winners that can be chosen as active validators + pub const MaxActiveValidators: u32 = 1000; + } frame_election_provider_support::generate_solution_type!( @@ -384,6 +387,9 @@ impl onchain::Config for OnChainSeqPhragmen { type Solver = SequentialPhragmen; type DataProvider = Staking; type WeightInfo = weights::frame_election_provider_support::WeightInfo; + type MaxWinners = MaxActiveValidators; + type VotersBound = MaxElectingVoters; + type TargetsBound = MaxElectableTargets; } impl pallet_election_provider_multi_phase::MinerConfig for Runtime { @@ -431,10 +437,15 @@ impl pallet_election_provider_multi_phase::Config for Runtime { type MinerTxPriority = NposSolutionPriority; type DataProvider = Staking; #[cfg(any(feature = "fast-runtime", feature = "runtime-benchmarks"))] - type Fallback = onchain::UnboundedExecution; + type Fallback = onchain::OnChainExecution; #[cfg(not(any(feature = "fast-runtime", feature = "runtime-benchmarks")))] - type Fallback = pallet_election_provider_multi_phase::NoFallback; - type GovernanceFallback = onchain::UnboundedExecution; + type Fallback = frame_election_provider_support::NoElection<( + AccountId, + BlockNumber, + Staking, + MaxActiveValidators, + )>; + type GovernanceFallback = onchain::OnChainExecution; type Solver = SequentialPhragmen< AccountId, pallet_election_provider_multi_phase::SolutionAccuracyOf, @@ -445,6 +456,7 @@ impl pallet_election_provider_multi_phase::Config for Runtime { type WeightInfo = weights::pallet_election_provider_multi_phase::WeightInfo; type MaxElectingVoters = MaxElectingVoters; type MaxElectableTargets = MaxElectableTargets; + type MaxWinners = MaxActiveValidators; } parameter_types! { @@ -505,7 +517,7 @@ impl pallet_staking::Config for Runtime { type OffendingValidatorsThreshold = OffendingValidatorsThreshold; type NextNewSession = Session; type ElectionProvider = ElectionProviderMultiPhase; - type GenesisElectionProvider = onchain::UnboundedExecution; + type GenesisElectionProvider = onchain::OnChainExecution; type VoterList = VoterList; type TargetList = UseValidatorsMap; type MaxUnlockingChunks = frame_support::traits::ConstU32<32>; @@ -518,6 +530,7 @@ impl pallet_staking::Config for Runtime { impl pallet_fast_unstake::Config for Runtime { type RuntimeEvent = RuntimeEvent; type Currency = Balances; + type BatchSize = frame_support::traits::ConstU32<64>; type Deposit = frame_support::traits::ConstU128<{ UNITS }>; type ControlOrigin = EnsureRoot; type WeightInfo = weights::pallet_fast_unstake::WeightInfo; @@ -1217,6 +1230,7 @@ pub type Executive = frame_executive::Executive< // "Properly migrate weights to v2" parachains_configuration::migration::v3::MigrateToV3, pallet_election_provider_multi_phase::migrations::v1::MigrateToV1, + pallet_fast_unstake::migrations::v1::MigrateToV1, ), >; /// The payload being signed in transactions. @@ -1444,58 +1458,29 @@ sp_api::impl_runtime_apis! { } impl mmr::MmrApi for Runtime { - fn generate_proof(_block_number: BlockNumber) - -> Result<(mmr::EncodableOpaqueLeaf, mmr::Proof), mmr::Error> - { - - Err(mmr::Error::PalletNotIncluded) - } - - fn verify_proof(_leaf: mmr::EncodableOpaqueLeaf, _proof: mmr::Proof) - -> Result<(), mmr::Error> - { - - Err(mmr::Error::PalletNotIncluded) - } - - fn verify_proof_stateless( - _root: Hash, - _leaf: mmr::EncodableOpaqueLeaf, - _proof: mmr::Proof - ) -> Result<(), mmr::Error> { - - Err(mmr::Error::PalletNotIncluded) - } - fn mmr_root() -> Result { Err(mmr::Error::PalletNotIncluded) } - fn generate_batch_proof(_block_numbers: Vec) - -> Result<(Vec, mmr::BatchProof), mmr::Error> - { - Err(mmr::Error::PalletNotIncluded) - } - - fn generate_historical_batch_proof( + fn generate_proof( _block_numbers: Vec, - _best_known_block_number: BlockNumber, - ) -> Result<(Vec, mmr::BatchProof), mmr::Error> { + _best_known_block_number: Option, + ) -> Result<(Vec, mmr::Proof), mmr::Error> { Err(mmr::Error::PalletNotIncluded) } - fn verify_batch_proof(_leaves: Vec, _proof: mmr::BatchProof) + fn verify_proof(_leaves: Vec, _proof: mmr::Proof) -> Result<(), mmr::Error> { Err(mmr::Error::PalletNotIncluded) } - fn verify_batch_proof_stateless( + fn verify_proof_stateless( _root: Hash, _leaves: Vec, - _proof: mmr::BatchProof + _proof: mmr::Proof ) -> Result<(), mmr::Error> { Err(mmr::Error::PalletNotIncluded) diff --git a/scripts/ci/gitlab/pipeline/build.yml b/scripts/ci/gitlab/pipeline/build.yml index 791b01c2b632..c6fe5916c3b6 100644 --- a/scripts/ci/gitlab/pipeline/build.yml +++ b/scripts/ci/gitlab/pipeline/build.yml @@ -170,7 +170,7 @@ build-implementers-guide: - .collect-artifacts-short script: - apt-get -y update; apt-get install -y graphviz - - cargo install mdbook mdbook-mermaid mdbook-linkcheck mdbook-graphviz + - cargo install mdbook mdbook-mermaid mdbook-linkcheck mdbook-graphviz mdbook-last-changed - mdbook build ./roadmap/implementers-guide - mkdir -p artifacts - mv roadmap/implementers-guide/book artifacts/ diff --git a/utils/staking-miner/src/emergency_solution.rs b/utils/staking-miner/src/emergency_solution.rs index cbf86af4c1d0..fdb3a47a270d 100644 --- a/utils/staking-miner/src/emergency_solution.rs +++ b/utils/staking-miner/src/emergency_solution.rs @@ -36,21 +36,23 @@ macro_rules! emergency_solution_cmd_for { ($runtime:ident) => { paste::paste! { log::info!(target: LOG_TARGET, "mined solution with {:?}", &raw_solution.score); - let mut ready_solution = EPM::Pallet::::feasibility_check(raw_solution, EPM::ElectionCompute::Signed)?; - + let ready_solution = EPM::Pallet::::feasibility_check(raw_solution, EPM::ElectionCompute::Signed)?; + let encoded_size = ready_solution.encoded_size(); + let score = ready_solution.score; + let mut supports = ready_solution.supports.into_inner(); // maybe truncate. if let Some(take) = config.take { - log::info!(target: LOG_TARGET, "truncating {} winners to {}", ready_solution.supports.len(), take); - ready_solution.supports.sort_unstable_by_key(|(_, s)| s.total); - ready_solution.supports.truncate(take); + log::info!(target: LOG_TARGET, "truncating {} winners to {}", supports.len(), take); + supports.sort_unstable_by_key(|(_, s)| s.total); + supports.truncate(take); } // write to file and stdout. - let encoded_support = ready_solution.supports.encode(); + let encoded_support = supports.encode(); let mut supports_file = std::fs::File::create("solution.supports.bin")?; supports_file.write_all(&encoded_support)?; - log::info!(target: LOG_TARGET, "ReadySolution: size {:?} / score = {:?}", ready_solution.encoded_size(), ready_solution.score); + log::info!(target: LOG_TARGET, "ReadySolution: size {:?} / score = {:?}", encoded_size, score); log::trace!(target: LOG_TARGET, "Supports: {}", sp_core::hexdisplay::HexDisplay::from(&encoded_support)); Ok(()) diff --git a/utils/staking-miner/src/main.rs b/utils/staking-miner/src/main.rs index 6e5b53423548..8c134874f297 100644 --- a/utils/staking-miner/src/main.rs +++ b/utils/staking-miner/src/main.rs @@ -253,7 +253,6 @@ enum Error { AlreadySubmitted, VersionMismatch, StrategyNotSatisfied, - QueueFull, Other(String), } diff --git a/utils/staking-miner/src/monitor.rs b/utils/staking-miner/src/monitor.rs index 0780ef881fce..bfc075668e66 100644 --- a/utils/staking-miner/src/monitor.rs +++ b/utils/staking-miner/src/monitor.rs @@ -120,9 +120,8 @@ async fn ensure_strategy_met( .map_err::, _>(Into::into)? .unwrap_or_default(); - // we check the queue here as well. Could be checked elsewhere. - if indices.len() as u32 >= max_submissions { - return Err(Error::::QueueFull) + if indices.len() >= max_submissions as usize { + log::debug!(target: LOG_TARGET, "The submissions queue is full"); } // default score is all zeros, any score is better than it. diff --git a/xcm/xcm-executor/integration-tests/src/lib.rs b/xcm/xcm-executor/integration-tests/src/lib.rs index 821987531aa0..f038e45d3edc 100644 --- a/xcm/xcm-executor/integration-tests/src/lib.rs +++ b/xcm/xcm-executor/integration-tests/src/lib.rs @@ -60,7 +60,7 @@ fn basic_buy_fees_message_executes() { futures::executor::block_on(client.import(sp_consensus::BlockOrigin::Own, block)) .expect("imports the block"); - client.state_at(&block_hash).expect("state should exist").inspect_state(|| { + client.state_at(block_hash).expect("state should exist").inspect_state(|| { assert!(polkadot_test_runtime::System::events().iter().any(|r| matches!( r.event, polkadot_test_runtime::RuntimeEvent::Xcm(pallet_xcm::Event::Attempted( @@ -101,7 +101,7 @@ fn query_response_fires() { .expect("imports the block"); let mut query_id = None; - client.state_at(&block_hash).expect("state should exist").inspect_state(|| { + client.state_at(block_hash).expect("state should exist").inspect_state(|| { for r in polkadot_test_runtime::System::events().iter() { match r.event { TestNotifier(QueryPrepared(q)) => query_id = Some(q), @@ -136,7 +136,7 @@ fn query_response_fires() { futures::executor::block_on(client.import(sp_consensus::BlockOrigin::Own, block)) .expect("imports the block"); - client.state_at(&block_hash).expect("state should exist").inspect_state(|| { + client.state_at(block_hash).expect("state should exist").inspect_state(|| { assert!(polkadot_test_runtime::System::events().iter().any(|r| matches!( r.event, polkadot_test_runtime::RuntimeEvent::Xcm(pallet_xcm::Event::ResponseReady( @@ -184,7 +184,7 @@ fn query_response_elicits_handler() { .expect("imports the block"); let mut query_id = None; - client.state_at(&block_hash).expect("state should exist").inspect_state(|| { + client.state_at(block_hash).expect("state should exist").inspect_state(|| { for r in polkadot_test_runtime::System::events().iter() { match r.event { TestNotifier(NotifyQueryPrepared(q)) => query_id = Some(q), @@ -218,7 +218,7 @@ fn query_response_elicits_handler() { futures::executor::block_on(client.import(sp_consensus::BlockOrigin::Own, block)) .expect("imports the block"); - client.state_at(&block_hash).expect("state should exist").inspect_state(|| { + client.state_at(block_hash).expect("state should exist").inspect_state(|| { assert!(polkadot_test_runtime::System::events().iter().any(|r| matches!( r.event, TestNotifier(ResponseReceived(