From 029e2a994abc5c1664b47b811d568365053fd78c Mon Sep 17 00:00:00 2001 From: Guillaume Thiolliere Date: Mon, 14 Jun 2021 21:16:58 +0200 Subject: [PATCH 01/67] improve variable name (#9108) --- frame/staking/src/lib.rs | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/frame/staking/src/lib.rs b/frame/staking/src/lib.rs index 58ab459d1bf28..734afb0824615 100644 --- a/frame/staking/src/lib.rs +++ b/frame/staking/src/lib.rs @@ -2821,7 +2821,8 @@ impl frame_election_provider_support::ElectionDataProvider frame_election_provider_support::ElectionDataProvider::get() { Forcing::ForceNone => Bounded::max_value(), Forcing::ForceNew | Forcing::ForceAlways => Zero::zero(), - Forcing::NotForcing if era_length >= T::SessionsPerEra::get() => Zero::zero(), + Forcing::NotForcing if era_progress >= T::SessionsPerEra::get() => Zero::zero(), Forcing::NotForcing => T::SessionsPerEra::get() - .saturating_sub(era_length) + .saturating_sub(era_progress) // One session is computed in this_session_end. .saturating_sub(1) .into(), From b81332bc354c55a82882e8fa479479ed73849850 Mon Sep 17 00:00:00 2001 From: Guillaume Thiolliere Date: Mon, 14 Jun 2021 22:31:04 +0200 Subject: [PATCH 02/67] execute system integrity_test also (#9104) --- frame/support/procedural/src/construct_runtime/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/frame/support/procedural/src/construct_runtime/mod.rs b/frame/support/procedural/src/construct_runtime/mod.rs index eb3550355aa40..87fce6e37cf0a 100644 --- a/frame/support/procedural/src/construct_runtime/mod.rs +++ b/frame/support/procedural/src/construct_runtime/mod.rs @@ -367,7 +367,7 @@ fn decl_integrity_test(scrate: &TokenStream2) -> TokenStream2 { #[test] pub fn runtime_integrity_tests() { - ::integrity_test(); + ::integrity_test(); } } ) From 05cac0ddb96e1ca03775e491540e450537548844 Mon Sep 17 00:00:00 2001 From: Sergei Shulepov Date: Mon, 14 Jun 2021 22:07:06 +0100 Subject: [PATCH 03/67] Decommit instance memory after a runtime call on Linux (#8998) * Decommit instance memory after a runtime call on Linux * Update documentation for the test * Remove unfinished comment * Use saturating_sub. Also update the doc comment. * Precise RSS tracking in the test Instead of tracking RSS for the whole process we just look at the particular mapping that is associated with the linear memory of the runtime instance * Remove unused import * Fix unused imports * Fix the unused imports error for good * Rollback an accidental change to benches * Fix the test * Remove now unneeded code --- Cargo.lock | 3 + client/executor/Cargo.toml | 1 + client/executor/common/src/wasm_runtime.rs | 9 ++ client/executor/runtime-test/src/lib.rs | 30 +++++++ .../executor/src/integration_tests/linux.rs | 73 +++++++++++++++++ .../src/integration_tests/linux/smaps.rs | 82 +++++++++++++++++++ client/executor/src/integration_tests/mod.rs | 3 + client/executor/wasmtime/Cargo.toml | 2 + .../executor/wasmtime/src/instance_wrapper.rs | 37 +++++++++ client/executor/wasmtime/src/runtime.rs | 21 ++++- 10 files changed, 260 insertions(+), 1 deletion(-) create mode 100644 client/executor/src/integration_tests/linux.rs create mode 100644 client/executor/src/integration_tests/linux/smaps.rs diff --git a/Cargo.lock b/Cargo.lock index 1abbfd3947077..84f487ceedc99 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7468,6 +7468,7 @@ dependencies = [ "parity-wasm 0.42.2", "parking_lot 0.11.1", "paste 1.0.4", + "regex", "sc-executor-common", "sc-executor-wasmi", "sc-executor-wasmtime", @@ -7529,6 +7530,8 @@ name = "sc-executor-wasmtime" version = "0.9.0" dependencies = [ "assert_matches", + "cfg-if 1.0.0", + "libc", "log", "parity-scale-codec", "parity-wasm 0.42.2", diff --git a/client/executor/Cargo.toml b/client/executor/Cargo.toml index 7cb2e12fd3913..27e90ddcc85e6 100644 --- a/client/executor/Cargo.toml +++ b/client/executor/Cargo.toml @@ -50,6 +50,7 @@ sc-tracing = { version = "3.0.0", path = "../tracing" } tracing = "0.1.25" tracing-subscriber = "0.2.18" paste = "1.0" +regex = "1" [features] default = [ "std" ] diff --git a/client/executor/common/src/wasm_runtime.rs b/client/executor/common/src/wasm_runtime.rs index cca0d99c4b91c..12ff92a2c607f 100644 --- a/client/executor/common/src/wasm_runtime.rs +++ b/client/executor/common/src/wasm_runtime.rs @@ -93,4 +93,13 @@ pub trait WasmInstance: Send { /// /// This method is only suitable for getting immutable globals. fn get_global_const(&self, name: &str) -> Result, Error>; + + /// **Testing Only**. This function returns the base address of the linear memory. + /// + /// This is meant to be the starting address of the memory mapped area for the linear memory. + /// + /// This function is intended only for a specific test that measures physical memory consumption. + fn linear_memory_base_ptr(&self) -> Option<*const u8> { + None + } } diff --git a/client/executor/runtime-test/src/lib.rs b/client/executor/runtime-test/src/lib.rs index bfba4ef039395..115683bffa62d 100644 --- a/client/executor/runtime-test/src/lib.rs +++ b/client/executor/runtime-test/src/lib.rs @@ -69,6 +69,36 @@ sp_core::wasm_export_functions! { fn test_empty_return() {} + fn test_dirty_plenty_memory(heap_base: u32, heap_pages: u32) { + // This piece of code will dirty multiple pages of memory. The number of pages is given by + // the `heap_pages`. It's unit is a wasm page (64KiB). The first page to be cleared + // is a wasm page that that follows the one that holds the `heap_base` address. + // + // This function dirties the **host** pages. I.e. we dirty 4KiB at a time and it will take + // 16 writes to process a single wasm page. + + let mut heap_ptr = heap_base as usize; + + // Find the next wasm page boundary. + let heap_ptr = round_up_to(heap_ptr, 65536); + + // Make it an actual pointer + let heap_ptr = heap_ptr as *mut u8; + + // Traverse the host pages and make each one dirty + let host_pages = heap_pages as usize * 16; + for i in 0..host_pages { + unsafe { + // technically this is an UB, but there is no way Rust can find this out. + heap_ptr.add(i * 4096).write(0); + } + } + + fn round_up_to(n: usize, divisor: usize) -> usize { + (n + divisor - 1) / divisor + } + } + fn test_exhaust_heap() -> Vec { Vec::with_capacity(16777216) } fn test_panic() { panic!("test panic") } diff --git a/client/executor/src/integration_tests/linux.rs b/client/executor/src/integration_tests/linux.rs new file mode 100644 index 0000000000000..057cc1332717b --- /dev/null +++ b/client/executor/src/integration_tests/linux.rs @@ -0,0 +1,73 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Tests that are only relevant for Linux. + +// Constrain this only to wasmtime for the time being. Without this rustc will complain on unused +// imports and items. The alternative is to plop `cfg(feature = wasmtime)` everywhere which seems +// borthersome. +#![cfg(feature = "wasmtime")] + +use crate::WasmExecutionMethod; +use super::mk_test_runtime; +use codec::Encode as _; + +mod smaps; + +use self::smaps::Smaps; + +#[test] +fn memory_consumption_compiled() { + // This aims to see if linear memory stays backed by the physical memory after a runtime call. + // + // For that we make a series of runtime calls, probing the RSS for the VMA matching the linear + // memory. After the call we expect RSS to be equal to 0. + + let runtime = mk_test_runtime(WasmExecutionMethod::Compiled, 1024); + + let instance = runtime.new_instance().unwrap(); + let heap_base = instance + .get_global_const("__heap_base") + .expect("`__heap_base` is valid") + .expect("`__heap_base` exists") + .as_i32() + .expect("`__heap_base` is an `i32`"); + + fn probe_rss(instance: &dyn sc_executor_common::wasm_runtime::WasmInstance) -> usize { + let base_addr = instance.linear_memory_base_ptr().unwrap() as usize; + Smaps::new().get_rss(base_addr).expect("failed to get rss") + } + + instance + .call_export( + "test_dirty_plenty_memory", + &(heap_base as u32, 1u32).encode(), + ) + .unwrap(); + let probe_1 = probe_rss(&*instance); + instance + .call_export( + "test_dirty_plenty_memory", + &(heap_base as u32, 1024u32).encode(), + ) + .unwrap(); + let probe_2 = probe_rss(&*instance); + + assert_eq!(probe_1, 0); + assert_eq!(probe_2, 0); +} diff --git a/client/executor/src/integration_tests/linux/smaps.rs b/client/executor/src/integration_tests/linux/smaps.rs new file mode 100644 index 0000000000000..8088a5a3ea952 --- /dev/null +++ b/client/executor/src/integration_tests/linux/smaps.rs @@ -0,0 +1,82 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! A tool for extracting information about the memory consumption of the current process from +//! the procfs. + +use std::ops::Range; +use std::collections::BTreeMap; + +/// An interface to the /proc/self/smaps +/// +/// See docs about [procfs on kernel.org][procfs] +/// +/// [procfs]: https://www.kernel.org/doc/html/latest/filesystems/proc.html +pub struct Smaps(Vec<(Range, BTreeMap)>); + +impl Smaps { + pub fn new() -> Self { + let regex_start = regex::RegexBuilder::new("^([0-9a-f]+)-([0-9a-f]+)") + .multi_line(true) + .build() + .unwrap(); + let regex_kv = regex::RegexBuilder::new(r#"^([^:]+):\s*(\d+) kB"#) + .multi_line(true) + .build() + .unwrap(); + let smaps = std::fs::read_to_string("/proc/self/smaps").unwrap(); + let boundaries: Vec<_> = regex_start + .find_iter(&smaps) + .map(|matched| matched.start()) + .chain(std::iter::once(smaps.len())) + .collect(); + + let mut output = Vec::new(); + for window in boundaries.windows(2) { + let chunk = &smaps[window[0]..window[1]]; + let caps = regex_start.captures(chunk).unwrap(); + let start = usize::from_str_radix(caps.get(1).unwrap().as_str(), 16).unwrap(); + let end = usize::from_str_radix(caps.get(2).unwrap().as_str(), 16).unwrap(); + + let values = regex_kv + .captures_iter(chunk) + .map(|cap| { + let key = cap.get(1).unwrap().as_str().to_owned(); + let value = cap.get(2).unwrap().as_str().parse().unwrap(); + (key, value) + }) + .collect(); + + output.push((start..end, values)); + } + + Self(output) + } + + fn get_map(&self, addr: usize) -> &BTreeMap { + &self.0 + .iter() + .find(|(range, _)| addr >= range.start && addr < range.end) + .unwrap() + .1 + } + + pub fn get_rss(&self, addr: usize) -> Option { + self.get_map(addr).get("Rss").cloned() + } +} diff --git a/client/executor/src/integration_tests/mod.rs b/client/executor/src/integration_tests/mod.rs index fb39429dfdb24..8c8674fc3ca95 100644 --- a/client/executor/src/integration_tests/mod.rs +++ b/client/executor/src/integration_tests/mod.rs @@ -15,6 +15,9 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . + +#[cfg(target_os = "linux")] +mod linux; mod sandbox; use std::sync::Arc; diff --git a/client/executor/wasmtime/Cargo.toml b/client/executor/wasmtime/Cargo.toml index 591565276a9d8..1e886d15beb18 100644 --- a/client/executor/wasmtime/Cargo.toml +++ b/client/executor/wasmtime/Cargo.toml @@ -13,6 +13,8 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] +libc = "0.2.90" +cfg-if = "1.0" log = "0.4.8" scoped-tls = "1.0" parity-wasm = "0.42.0" diff --git a/client/executor/wasmtime/src/instance_wrapper.rs b/client/executor/wasmtime/src/instance_wrapper.rs index 381ae993442a2..866dbfb2e2bfc 100644 --- a/client/executor/wasmtime/src/instance_wrapper.rs +++ b/client/executor/wasmtime/src/instance_wrapper.rs @@ -415,6 +415,43 @@ impl InstanceWrapper { slice::from_raw_parts_mut(ptr, len) } } + + /// Returns the pointer to the first byte of the linear memory for this instance. + pub fn base_ptr(&self) -> *const u8 { + self.memory.data_ptr() + } + + /// Removes physical backing from the allocated linear memory. This leads to returning the memory + /// back to the system. While the memory is zeroed this is considered as a side-effect and is not + /// relied upon. Thus this function acts as a hint. + pub fn decommit(&self) { + if self.memory.data_size() == 0 { + return; + } + + cfg_if::cfg_if! { + if #[cfg(target_os = "linux")] { + use std::sync::Once; + + unsafe { + let ptr = self.memory.data_ptr(); + let len = self.memory.data_size(); + + // Linux handles MADV_DONTNEED reliably. The result is that the given area + // is unmapped and will be zeroed on the next pagefault. + if libc::madvise(ptr as _, len, libc::MADV_DONTNEED) != 0 { + static LOGGED: Once = Once::new(); + LOGGED.call_once(|| { + log::warn!( + "madvise(MADV_DONTNEED) failed: {}", + std::io::Error::last_os_error(), + ); + }); + } + } + } + } + } } impl runtime_blob::InstanceGlobals for InstanceWrapper { diff --git a/client/executor/wasmtime/src/runtime.rs b/client/executor/wasmtime/src/runtime.rs index fc45345256d1d..5018b11264d71 100644 --- a/client/executor/wasmtime/src/runtime.rs +++ b/client/executor/wasmtime/src/runtime.rs @@ -150,7 +150,13 @@ impl WasmInstance for WasmtimeInstance { globals_snapshot.apply(&**instance_wrapper); let allocator = FreeingBumpHeapAllocator::new(*heap_base); - perform_call(data, Rc::clone(&instance_wrapper), entrypoint, allocator) + let result = perform_call(data, Rc::clone(&instance_wrapper), entrypoint, allocator); + + // Signal to the OS that we are done with the linear memory and that it can be + // reclaimed. + instance_wrapper.decommit(); + + result } Strategy::RecreateInstance(instance_creator) => { let instance_wrapper = instance_creator.instantiate()?; @@ -173,6 +179,19 @@ impl WasmInstance for WasmtimeInstance { } } } + + fn linear_memory_base_ptr(&self) -> Option<*const u8> { + match &self.strategy { + Strategy::RecreateInstance(_) => { + // We do not keep the wasm instance around, therefore there is no linear memory + // associated with it. + None + } + Strategy::FastInstanceReuse { + instance_wrapper, .. + } => Some(instance_wrapper.base_ptr()), + } + } } /// Prepare a directory structure and a config file to enable wasmtime caching. From f32aa2243f57f63a7f7c8c10783f1455bd0005a3 Mon Sep 17 00:00:00 2001 From: Sergei Shulepov Date: Tue, 15 Jun 2021 11:58:09 +0100 Subject: [PATCH 04/67] Test restoring zeroed data (#9011) * Test restoring zeroed data * Change to u64 --- client/executor/runtime-test/src/lib.rs | 15 +++++++++++++++ client/executor/src/integration_tests/mod.rs | 19 +++++++++++++++++++ 2 files changed, 34 insertions(+) diff --git a/client/executor/runtime-test/src/lib.rs b/client/executor/runtime-test/src/lib.rs index 115683bffa62d..0cfa06a94c610 100644 --- a/client/executor/runtime-test/src/lib.rs +++ b/client/executor/runtime-test/src/lib.rs @@ -39,6 +39,14 @@ extern "C" { /// the initialized value at the start of a runtime call. static mut MUTABLE_STATIC: u64 = 32; +#[cfg(not(feature = "std"))] +/// This is similar to `MUTABLE_STATIC`. The tests need `MUTABLE_STATIC` for testing that +/// non-null initialization data is properly restored during instance reusing. +/// +/// `MUTABLE_STATIC_BSS` on the other hand focuses on the zeroed data. This is important since there +/// may be differences in handling zeroed and non-zeroed data. +static mut MUTABLE_STATIC_BSS: u64 = 0; + sp_core::wasm_export_functions! { fn test_calling_missing_external() { unsafe { missing_external() } @@ -309,6 +317,13 @@ sp_core::wasm_export_functions! { } } + fn returns_mutable_static_bss() -> u64 { + unsafe { + MUTABLE_STATIC_BSS += 1; + MUTABLE_STATIC_BSS + } + } + fn allocates_huge_stack_array(trap: bool) -> Vec { // Allocate a stack frame that is approx. 75% of the stack (assuming it is 1MB). // This will just decrease (stacks in wasm32-u-u grow downwards) the stack diff --git a/client/executor/src/integration_tests/mod.rs b/client/executor/src/integration_tests/mod.rs index 8c8674fc3ca95..0762306309df4 100644 --- a/client/executor/src/integration_tests/mod.rs +++ b/client/executor/src/integration_tests/mod.rs @@ -588,6 +588,25 @@ fn returns_mutable_static(wasm_method: WasmExecutionMethod) { assert_eq!(33, u64::decode(&mut &res[..]).unwrap()); } +test_wasm_execution!(returns_mutable_static_bss); +fn returns_mutable_static_bss(wasm_method: WasmExecutionMethod) { + let runtime = mk_test_runtime(wasm_method, 1024); + + let instance = runtime.new_instance().unwrap(); + let res = instance + .call_export("returns_mutable_static_bss", &[0]) + .unwrap(); + assert_eq!(1, u64::decode(&mut &res[..]).unwrap()); + + // We expect that every invocation will need to return the initial + // value plus one. If the value increases more than that then it is + // a sign that the wasm runtime preserves the memory content. + let res = instance + .call_export("returns_mutable_static_bss", &[0]) + .unwrap(); + assert_eq!(1, u64::decode(&mut &res[..]).unwrap()); +} + // If we didn't restore the wasm instance properly, on a trap the stack pointer would not be // returned to its initial value and thus the stack space is going to be leaked. // From cdc55fe6b838410750c897f189ef73064c44396d Mon Sep 17 00:00:00 2001 From: cheme Date: Tue, 15 Jun 2021 15:23:58 +0200 Subject: [PATCH 05/67] Frame `remove_all` with size limit. (#9106) * remove prefixed content with limit. * test match * factor comment and factor ext limit removal. * fix benchmark Co-authored-by: Shawn Tabrizi --- client/db/src/bench.rs | 7 +- client/db/src/lib.rs | 7 +- client/db/src/storage_cache.rs | 14 ++- client/executor/runtime-test/src/lib.rs | 2 +- client/light/src/backend.rs | 7 +- frame/contracts/src/storage.rs | 6 +- frame/elections-phragmen/src/benchmarking.rs | 2 +- frame/im-online/src/lib.rs | 4 +- frame/society/src/lib.rs | 6 +- frame/staking/src/lib.rs | 6 +- frame/staking/src/slashing.rs | 4 +- frame/staking/src/testing_utils.rs | 4 +- frame/support/src/lib.rs | 5 +- frame/support/src/storage/child.rs | 4 +- .../src/storage/generator/double_map.rs | 5 +- frame/support/src/storage/generator/nmap.rs | 4 +- frame/support/src/storage/migration.rs | 2 +- frame/support/src/storage/mod.rs | 16 +-- frame/support/src/storage/types/double_map.rs | 15 +-- frame/support/src/storage/types/map.rs | 8 +- frame/support/src/storage/types/nmap.rs | 20 ++-- frame/support/src/storage/unhashed.rs | 4 +- frame/system/src/lib.rs | 6 +- frame/uniques/src/lib.rs | 4 +- primitives/externalities/src/lib.rs | 9 +- primitives/io/src/lib.rs | 110 ++++++++++-------- primitives/state-machine/src/backend.rs | 7 +- primitives/state-machine/src/basic.rs | 13 ++- primitives/state-machine/src/ext.rs | 102 +++++++++------- primitives/state-machine/src/lib.rs | 30 ++++- .../state-machine/src/proving_backend.rs | 7 +- primitives/state-machine/src/read_only.rs | 5 +- primitives/state-machine/src/trie_backend.rs | 7 +- .../state-machine/src/trie_backend_essence.rs | 65 ++++++----- primitives/tasks/src/async_externalities.rs | 5 +- primitives/trie/src/lib.rs | 29 ----- 36 files changed, 312 insertions(+), 239 deletions(-) diff --git a/client/db/src/bench.rs b/client/db/src/bench.rs index c198fb400408e..1f2f46af0079e 100644 --- a/client/db/src/bench.rs +++ b/client/db/src/bench.rs @@ -373,13 +373,14 @@ impl StateBackend> for BenchmarkingState { } } - fn apply_to_child_keys_while bool>( + fn apply_to_keys_while bool>( &self, - child_info: &ChildInfo, + child_info: Option<&ChildInfo>, + prefix: Option<&[u8]>, f: F, ) { if let Some(ref state) = *self.state.borrow() { - state.apply_to_child_keys_while(child_info, f) + state.apply_to_keys_while(child_info, prefix, f) } } diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index cda197ab0687a..38b9d7a7adff4 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -205,12 +205,13 @@ impl StateBackend> for RefTrackingState { self.state.for_key_values_with_prefix(prefix, f) } - fn apply_to_child_keys_while bool>( + fn apply_to_keys_while bool>( &self, - child_info: &ChildInfo, + child_info: Option<&ChildInfo>, + prefix: Option<&[u8]>, f: F, ) { - self.state.apply_to_child_keys_while(child_info, f) + self.state.apply_to_keys_while(child_info, prefix, f) } fn for_child_keys_with_prefix( diff --git a/client/db/src/storage_cache.rs b/client/db/src/storage_cache.rs index cb2ab1de1b6c9..788e011fb2f05 100644 --- a/client/db/src/storage_cache.rs +++ b/client/db/src/storage_cache.rs @@ -605,12 +605,13 @@ impl>, B: BlockT> StateBackend> for Cachin self.state.exists_child_storage(child_info, key) } - fn apply_to_child_keys_while bool>( + fn apply_to_keys_while bool>( &self, - child_info: &ChildInfo, + child_info: Option<&ChildInfo>, + prefix: Option<&[u8]>, f: F, ) { - self.state.apply_to_child_keys_while(child_info, f) + self.state.apply_to_keys_while(child_info, prefix, f) } fn next_storage_key(&self, key: &[u8]) -> Result>, Self::Error> { @@ -787,12 +788,13 @@ impl>, B: BlockT> StateBackend> for Syncin self.caching_state().exists_child_storage(child_info, key) } - fn apply_to_child_keys_while bool>( + fn apply_to_keys_while bool>( &self, - child_info: &ChildInfo, + child_info: Option<&ChildInfo>, + prefix: Option<&[u8]>, f: F, ) { - self.caching_state().apply_to_child_keys_while(child_info, f) + self.caching_state().apply_to_keys_while(child_info, prefix, f) } fn next_storage_key(&self, key: &[u8]) -> Result>, Self::Error> { diff --git a/client/executor/runtime-test/src/lib.rs b/client/executor/runtime-test/src/lib.rs index 0cfa06a94c610..439d4f66b1879 100644 --- a/client/executor/runtime-test/src/lib.rs +++ b/client/executor/runtime-test/src/lib.rs @@ -71,7 +71,7 @@ sp_core::wasm_export_functions! { } fn test_clear_prefix(input: Vec) -> Vec { - storage::clear_prefix(&input); + storage::clear_prefix(&input, None); b"all ok!".to_vec() } diff --git a/client/light/src/backend.rs b/client/light/src/backend.rs index 4c8ac3fe40f4d..a7f1b8e0c1696 100644 --- a/client/light/src/backend.rs +++ b/client/light/src/backend.rs @@ -461,14 +461,15 @@ impl StateBackend for GenesisOrUnavailableState } } - fn apply_to_child_keys_while bool>( + fn apply_to_keys_while bool>( &self, - child_info: &ChildInfo, + child_info: Option<&ChildInfo>, + prefix: Option<&[u8]>, action: A, ) { match *self { GenesisOrUnavailableState::Genesis(ref state) => - state.apply_to_child_keys_while(child_info, action), + state.apply_to_keys_while(child_info, prefix, action), GenesisOrUnavailableState::Unavailable => (), } } diff --git a/frame/contracts/src/storage.rs b/frame/contracts/src/storage.rs index 17486b274f2c9..15782d7d1e459 100644 --- a/frame/contracts/src/storage.rs +++ b/frame/contracts/src/storage.rs @@ -33,7 +33,7 @@ use sp_runtime::{ use sp_core::crypto::UncheckedFrom; use frame_support::{ dispatch::{DispatchError, DispatchResult}, - storage::child::{self, KillChildStorageResult, ChildInfo}, + storage::child::{self, KillStorageResult, ChildInfo}, traits::Get, weights::Weight, }; @@ -331,14 +331,14 @@ where let removed = queue.swap_remove(0); match outcome { // This should not happen as our budget was large enough to remove all keys. - KillChildStorageResult::SomeRemaining(_) => { + KillStorageResult::SomeRemaining(_) => { log::error!( target: "runtime::contracts", "After deletion keys are remaining in this child trie: {:?}", removed.trie_id, ); }, - KillChildStorageResult::AllRemoved(_) => (), + KillStorageResult::AllRemoved(_) => (), } } remaining_key_budget = remaining_key_budget diff --git a/frame/elections-phragmen/src/benchmarking.rs b/frame/elections-phragmen/src/benchmarking.rs index 3534a62ac3ce0..86a0116978067 100644 --- a/frame/elections-phragmen/src/benchmarking.rs +++ b/frame/elections-phragmen/src/benchmarking.rs @@ -142,7 +142,7 @@ fn clean() { >::kill(); >::kill(); >::kill(); - >::remove_all(); + >::remove_all(None); } benchmarks! { diff --git a/frame/im-online/src/lib.rs b/frame/im-online/src/lib.rs index e132f7f929a06..318e3d2de3ad2 100644 --- a/frame/im-online/src/lib.rs +++ b/frame/im-online/src/lib.rs @@ -809,8 +809,8 @@ impl OneSessionHandler for Pallet { // Remove all received heartbeats and number of authored blocks from the // current session, they have already been processed and won't be needed // anymore. - ReceivedHeartbeats::::remove_prefix(&T::ValidatorSet::session_index()); - AuthoredBlocks::::remove_prefix(&T::ValidatorSet::session_index()); + ReceivedHeartbeats::::remove_prefix(&T::ValidatorSet::session_index(), None); + AuthoredBlocks::::remove_prefix(&T::ValidatorSet::session_index(), None); if offenders.is_empty() { Self::deposit_event(Event::::AllGood); diff --git a/frame/society/src/lib.rs b/frame/society/src/lib.rs index 3b661386da23e..ff6cc0786dcb1 100644 --- a/frame/society/src/lib.rs +++ b/frame/society/src/lib.rs @@ -871,7 +871,7 @@ decl_module! { Founder::::kill(); Rules::::kill(); Candidates::::kill(); - SuspendedCandidates::::remove_all(); + SuspendedCandidates::::remove_all(None); Self::deposit_event(RawEvent::Unfounded(founder)); } @@ -1402,7 +1402,7 @@ impl, I: Instance> Module { }).collect::>(); // Clean up all votes. - >::remove_all(); + >::remove_all(None); // Reward one of the voters who voted the right way. if !total_slash.is_zero() { @@ -1570,7 +1570,7 @@ impl, I: Instance> Module { } // Clean up all votes. - >::remove_all(); + >::remove_all(None); } // Avoid challenging if there's only two members since we never challenge the Head or diff --git a/frame/staking/src/lib.rs b/frame/staking/src/lib.rs index 734afb0824615..ff7a1ae8a8820 100644 --- a/frame/staking/src/lib.rs +++ b/frame/staking/src/lib.rs @@ -2655,9 +2655,9 @@ impl Pallet { /// Clear all era information for given era. fn clear_era_information(era_index: EraIndex) { - >::remove_prefix(era_index); - >::remove_prefix(era_index); - >::remove_prefix(era_index); + >::remove_prefix(era_index, None); + >::remove_prefix(era_index, None); + >::remove_prefix(era_index, None); >::remove(era_index); >::remove(era_index); >::remove(era_index); diff --git a/frame/staking/src/slashing.rs b/frame/staking/src/slashing.rs index 50cab1103b95a..1e959e9341add 100644 --- a/frame/staking/src/slashing.rs +++ b/frame/staking/src/slashing.rs @@ -543,8 +543,8 @@ impl<'a, T: 'a + Config> Drop for InspectingSpans<'a, T> { /// Clear slashing metadata for an obsolete era. pub(crate) fn clear_era_metadata(obsolete_era: EraIndex) { - as Store>::ValidatorSlashInEra::remove_prefix(&obsolete_era); - as Store>::NominatorSlashInEra::remove_prefix(&obsolete_era); + as Store>::ValidatorSlashInEra::remove_prefix(&obsolete_era, None); + as Store>::NominatorSlashInEra::remove_prefix(&obsolete_era, None); } /// Clear slashing metadata for a dead account. diff --git a/frame/staking/src/testing_utils.rs b/frame/staking/src/testing_utils.rs index 185b96983ab94..f3af4ac0920d1 100644 --- a/frame/staking/src/testing_utils.rs +++ b/frame/staking/src/testing_utils.rs @@ -29,8 +29,8 @@ const SEED: u32 = 0; /// This function removes all validators and nominators from storage. pub fn clear_validators_and_nominators() { - Validators::::remove_all(); - Nominators::::remove_all(); + Validators::::remove_all(None); + Nominators::::remove_all(None); } /// Grab a funded user. diff --git a/frame/support/src/lib.rs b/frame/support/src/lib.rs index 43891c158200e..49e61eea569bd 100644 --- a/frame/support/src/lib.rs +++ b/frame/support/src/lib.rs @@ -1005,7 +1005,10 @@ pub mod tests { DoubleMap::insert(&key1, &(key2 + 1), &4u64); DoubleMap::insert(&(key1 + 1), &key2, &4u64); DoubleMap::insert(&(key1 + 1), &(key2 + 1), &4u64); - DoubleMap::remove_prefix(&key1); + assert!(matches!( + DoubleMap::remove_prefix(&key1, None), + sp_io::KillStorageResult::AllRemoved(0), // all in overlay + )); assert_eq!(DoubleMap::get(&key1, &key2), 0u64); assert_eq!(DoubleMap::get(&key1, &(key2 + 1)), 0u64); assert_eq!(DoubleMap::get(&(key1 + 1), &key2), 4u64); diff --git a/frame/support/src/storage/child.rs b/frame/support/src/storage/child.rs index 6f99874743946..52830c8ac5dc8 100644 --- a/frame/support/src/storage/child.rs +++ b/frame/support/src/storage/child.rs @@ -24,7 +24,7 @@ use crate::sp_std::prelude::*; use codec::{Codec, Encode, Decode}; pub use sp_core::storage::{ChildInfo, ChildType}; -pub use crate::sp_io::KillChildStorageResult; +pub use crate::sp_io::KillStorageResult; /// Return the value of the item in storage under `key`, or `None` if there is no explicit entry. pub fn get( @@ -174,7 +174,7 @@ pub fn exists( pub fn kill_storage( child_info: &ChildInfo, limit: Option, -) -> KillChildStorageResult { +) -> KillStorageResult { match child_info.child_type() { ChildType::ParentKeyId => sp_io::default_child_storage::storage_kill( child_info.storage_key(), diff --git a/frame/support/src/storage/generator/double_map.rs b/frame/support/src/storage/generator/double_map.rs index c02ebe48290eb..836ae25bdbbcf 100644 --- a/frame/support/src/storage/generator/double_map.rs +++ b/frame/support/src/storage/generator/double_map.rs @@ -212,8 +212,9 @@ impl storage::StorageDoubleMap for G where unhashed::kill(&Self::storage_double_map_final_key(k1, k2)) } - fn remove_prefix(k1: KArg1) where KArg1: EncodeLike { - unhashed::kill_prefix(Self::storage_double_map_final_key1(k1).as_ref()) + fn remove_prefix(k1: KArg1, limit: Option) -> sp_io::KillStorageResult + where KArg1: EncodeLike { + unhashed::kill_prefix(Self::storage_double_map_final_key1(k1).as_ref(), limit) } fn iter_prefix_values(k1: KArg1) -> storage::PrefixIterator where diff --git a/frame/support/src/storage/generator/nmap.rs b/frame/support/src/storage/generator/nmap.rs index d1f00adda5e55..62f188a26db8d 100755 --- a/frame/support/src/storage/generator/nmap.rs +++ b/frame/support/src/storage/generator/nmap.rs @@ -196,11 +196,11 @@ where unhashed::kill(&Self::storage_n_map_final_key::(key)); } - fn remove_prefix(partial_key: KP) + fn remove_prefix(partial_key: KP, limit: Option) -> sp_io::KillStorageResult where K: HasKeyPrefix, { - unhashed::kill_prefix(&Self::storage_n_map_partial_key(partial_key)); + unhashed::kill_prefix(&Self::storage_n_map_partial_key(partial_key), limit) } fn iter_prefix_values(partial_key: KP) -> PrefixIterator diff --git a/frame/support/src/storage/migration.rs b/frame/support/src/storage/migration.rs index b4a1a9225dd1f..62db2eff839fb 100644 --- a/frame/support/src/storage/migration.rs +++ b/frame/support/src/storage/migration.rs @@ -244,7 +244,7 @@ pub fn remove_storage_prefix(module: &[u8], item: &[u8], hash: &[u8]) { key[0..16].copy_from_slice(&Twox128::hash(module)); key[16..32].copy_from_slice(&Twox128::hash(item)); key[32..].copy_from_slice(hash); - frame_support::storage::unhashed::kill_prefix(&key) + frame_support::storage::unhashed::kill_prefix(&key, None); } /// Get a particular value in storage by the `module`, the map's `item` name and the key `hash`. diff --git a/frame/support/src/storage/mod.rs b/frame/support/src/storage/mod.rs index 34d217f5c31b6..6a02c6572c7f2 100644 --- a/frame/support/src/storage/mod.rs +++ b/frame/support/src/storage/mod.rs @@ -464,7 +464,8 @@ pub trait StorageDoubleMap { KArg2: EncodeLike; /// Remove all values under the first key. - fn remove_prefix(k1: KArg1) where KArg1: ?Sized + EncodeLike; + fn remove_prefix(k1: KArg1, limit: Option) -> sp_io::KillStorageResult + where KArg1: ?Sized + EncodeLike; /// Iterate over values that share the first key. fn iter_prefix_values(k1: KArg1) -> PrefixIterator @@ -589,7 +590,8 @@ pub trait StorageNMap { fn remove + TupleToEncodedIter>(key: KArg); /// Remove all values under the partial prefix key. - fn remove_prefix(partial_key: KP) where K: HasKeyPrefix; + fn remove_prefix(partial_key: KP, limit: Option) -> sp_io::KillStorageResult + where K: HasKeyPrefix; /// Iterate over values that share the partial prefix key. fn iter_prefix_values(partial_key: KP) -> PrefixIterator where K: HasKeyPrefix; @@ -880,8 +882,8 @@ pub trait StoragePrefixedMap { } /// Remove all value of the storage. - fn remove_all() { - sp_io::storage::clear_prefix(&Self::final_prefix()) + fn remove_all(limit: Option) -> sp_io::KillStorageResult { + sp_io::storage::clear_prefix(&Self::final_prefix(), limit) } /// Iter over all value of the storage. @@ -1184,7 +1186,7 @@ mod test { assert_eq!(MyStorage::iter_values().collect::>(), vec![1, 2, 3, 4]); // test removal - MyStorage::remove_all(); + MyStorage::remove_all(None); assert!(MyStorage::iter_values().collect::>().is_empty()); // test migration @@ -1194,7 +1196,7 @@ mod test { assert!(MyStorage::iter_values().collect::>().is_empty()); MyStorage::translate_values(|v: u32| Some(v as u64)); assert_eq!(MyStorage::iter_values().collect::>(), vec![1, 2]); - MyStorage::remove_all(); + MyStorage::remove_all(None); // test migration 2 unhashed::put(&[&k[..], &vec![1][..]].concat(), &1u128); @@ -1206,7 +1208,7 @@ mod test { assert_eq!(MyStorage::iter_values().collect::>(), vec![1, 2, 3]); MyStorage::translate_values(|v: u128| Some(v as u64)); assert_eq!(MyStorage::iter_values().collect::>(), vec![1, 2, 3]); - MyStorage::remove_all(); + MyStorage::remove_all(None); // test that other values are not modified. assert_eq!(unhashed::get(&key_before[..]), Some(32u64)); diff --git a/frame/support/src/storage/types/double_map.rs b/frame/support/src/storage/types/double_map.rs index f0ed1999d912e..6f03e9b8b2dd0 100644 --- a/frame/support/src/storage/types/double_map.rs +++ b/frame/support/src/storage/types/double_map.rs @@ -205,8 +205,9 @@ where } /// Remove all values under the first key. - pub fn remove_prefix(k1: KArg1) where KArg1: ?Sized + EncodeLike { - >::remove_prefix(k1) + pub fn remove_prefix(k1: KArg1, limit: Option) -> sp_io::KillStorageResult + where KArg1: ?Sized + EncodeLike { + >::remove_prefix(k1, limit) } /// Iterate over values that share the first key. @@ -316,8 +317,8 @@ where } /// Remove all value of the storage. - pub fn remove_all() { - >::remove_all() + pub fn remove_all(limit: Option) -> sp_io::KillStorageResult { + >::remove_all(limit) } /// Iter over all value of the storage. @@ -615,7 +616,7 @@ mod test { A::insert(3, 30, 10); A::insert(4, 40, 10); - A::remove_all(); + A::remove_all(None); assert_eq!(A::contains_key(3, 30), false); assert_eq!(A::contains_key(4, 40), false); @@ -655,7 +656,7 @@ mod test { assert_eq!(AValueQueryWithAnOnEmpty::DEFAULT.0.default_byte(), 97u32.encode()); assert_eq!(A::DEFAULT.0.default_byte(), Option::::None.encode()); - WithLen::remove_all(); + WithLen::remove_all(None); assert_eq!(WithLen::decode_len(3, 30), None); WithLen::append(0, 100, 10); assert_eq!(WithLen::decode_len(0, 100), Some(1)); @@ -669,7 +670,7 @@ mod test { assert_eq!(A::iter_prefix_values(4).collect::>(), vec![13, 14]); assert_eq!(A::iter_prefix(4).collect::>(), vec![(40, 13), (41, 14)]); - A::remove_prefix(3); + A::remove_prefix(3, None); assert_eq!(A::iter_prefix(3).collect::>(), vec![]); assert_eq!(A::iter_prefix(4).collect::>(), vec![(40, 13), (41, 14)]); diff --git a/frame/support/src/storage/types/map.rs b/frame/support/src/storage/types/map.rs index 35062fbc61b25..db3a5e73c9cb1 100644 --- a/frame/support/src/storage/types/map.rs +++ b/frame/support/src/storage/types/map.rs @@ -231,8 +231,8 @@ where } /// Remove all value of the storage. - pub fn remove_all() { - >::remove_all() + pub fn remove_all(limit: Option) -> sp_io::KillStorageResult { + >::remove_all(limit) } /// Iter over all value of the storage. @@ -498,7 +498,7 @@ mod test { A::insert(3, 10); A::insert(4, 10); - A::remove_all(); + A::remove_all(None); assert_eq!(A::contains_key(3), false); assert_eq!(A::contains_key(4), false); @@ -533,7 +533,7 @@ mod test { assert_eq!(AValueQueryWithAnOnEmpty::DEFAULT.0.default_byte(), 97u32.encode()); assert_eq!(A::DEFAULT.0.default_byte(), Option::::None.encode()); - WithLen::remove_all(); + WithLen::remove_all(None); assert_eq!(WithLen::decode_len(3), None); WithLen::append(0, 10); assert_eq!(WithLen::decode_len(0), Some(1)); diff --git a/frame/support/src/storage/types/nmap.rs b/frame/support/src/storage/types/nmap.rs index e1f5feb956ef3..a9fc121d42d2e 100755 --- a/frame/support/src/storage/types/nmap.rs +++ b/frame/support/src/storage/types/nmap.rs @@ -166,11 +166,11 @@ where } /// Remove all values under the first key. - pub fn remove_prefix(partial_key: KP) + pub fn remove_prefix(partial_key: KP, limit: Option) -> sp_io::KillStorageResult where Key: HasKeyPrefix, { - >::remove_prefix(partial_key) + >::remove_prefix(partial_key, limit) } /// Iterate over values that share the first key. @@ -266,8 +266,8 @@ where } /// Remove all value of the storage. - pub fn remove_all() { - >::remove_all() + pub fn remove_all(limit: Option) -> sp_io::KillStorageResult { + >::remove_all(limit) } /// Iter over all value of the storage. @@ -546,7 +546,7 @@ mod test { A::insert((3,), 10); A::insert((4,), 10); - A::remove_all(); + A::remove_all(None); assert_eq!(A::contains_key((3,)), false); assert_eq!(A::contains_key((4,)), false); @@ -582,7 +582,7 @@ mod test { ); assert_eq!(A::DEFAULT.0.default_byte(), Option::::None.encode()); - WithLen::remove_all(); + WithLen::remove_all(None); assert_eq!(WithLen::decode_len((3,)), None); WithLen::append((0,), 10); assert_eq!(WithLen::decode_len((0,)), Some(1)); @@ -720,7 +720,7 @@ mod test { A::insert((3, 30), 10); A::insert((4, 40), 10); - A::remove_all(); + A::remove_all(None); assert_eq!(A::contains_key((3, 30)), false); assert_eq!(A::contains_key((4, 40)), false); @@ -768,7 +768,7 @@ mod test { ); assert_eq!(A::DEFAULT.0.default_byte(), Option::::None.encode()); - WithLen::remove_all(); + WithLen::remove_all(None); assert_eq!(WithLen::decode_len((3, 30)), None); WithLen::append((0, 100), 10); assert_eq!(WithLen::decode_len((0, 100)), Some(1)); @@ -953,7 +953,7 @@ mod test { A::insert((3, 30, 300), 10); A::insert((4, 40, 400), 10); - A::remove_all(); + A::remove_all(None); assert_eq!(A::contains_key((3, 30, 300)), false); assert_eq!(A::contains_key((4, 40, 400)), false); @@ -1003,7 +1003,7 @@ mod test { ); assert_eq!(A::DEFAULT.0.default_byte(), Option::::None.encode()); - WithLen::remove_all(); + WithLen::remove_all(None); assert_eq!(WithLen::decode_len((3, 30, 300)), None); WithLen::append((0, 100, 1000), 10); assert_eq!(WithLen::decode_len((0, 100, 1000)), Some(1)); diff --git a/frame/support/src/storage/unhashed.rs b/frame/support/src/storage/unhashed.rs index d3d54f3de5795..134b3debcd31b 100644 --- a/frame/support/src/storage/unhashed.rs +++ b/frame/support/src/storage/unhashed.rs @@ -92,8 +92,8 @@ pub fn kill(key: &[u8]) { } /// Ensure keys with the given `prefix` have no entries in storage. -pub fn kill_prefix(prefix: &[u8]) { - sp_io::storage::clear_prefix(prefix); +pub fn kill_prefix(prefix: &[u8], limit: Option) -> sp_io::KillStorageResult { + sp_io::storage::clear_prefix(prefix, limit) } /// Get a Vec of bytes from storage. diff --git a/frame/system/src/lib.rs b/frame/system/src/lib.rs index e3a110f2e7e2c..17ea3a71bec8c 100644 --- a/frame/system/src/lib.rs +++ b/frame/system/src/lib.rs @@ -463,7 +463,7 @@ pub mod pallet { _subkeys: u32, ) -> DispatchResultWithPostInfo { ensure_root(origin)?; - storage::unhashed::kill_prefix(&prefix); + storage::unhashed::kill_prefix(&prefix, None); Ok(().into()) } @@ -1334,7 +1334,7 @@ impl Pallet { if let InitKind::Full = kind { >::kill(); EventCount::::kill(); - >::remove_all(); + >::remove_all(None); } } @@ -1447,7 +1447,7 @@ impl Pallet { pub fn reset_events() { >::kill(); EventCount::::kill(); - >::remove_all(); + >::remove_all(None); } /// Assert the given `event` exists. diff --git a/frame/uniques/src/lib.rs b/frame/uniques/src/lib.rs index 28518843c96fc..b98a038ecff36 100644 --- a/frame/uniques/src/lib.rs +++ b/frame/uniques/src/lib.rs @@ -417,9 +417,9 @@ pub mod pallet { for (instance, details) in Asset::::drain_prefix(&class) { Account::::remove((&details.owner, &class, &instance)); } - InstanceMetadataOf::::remove_prefix(&class); + InstanceMetadataOf::::remove_prefix(&class, None); ClassMetadataOf::::remove(&class); - Attribute::::remove_prefix((&class,)); + Attribute::::remove_prefix((&class,), None); T::Currency::unreserve(&class_details.owner, class_details.total_deposit); Self::deposit_event(Event::Destroyed(class)); diff --git a/primitives/externalities/src/lib.rs b/primitives/externalities/src/lib.rs index 14145e8798498..7a8771bd623ed 100644 --- a/primitives/externalities/src/lib.rs +++ b/primitives/externalities/src/lib.rs @@ -151,14 +151,19 @@ pub trait Externalities: ExtensionStore { fn kill_child_storage(&mut self, child_info: &ChildInfo, limit: Option) -> (bool, u32); /// Clear storage entries which keys are start with the given prefix. - fn clear_prefix(&mut self, prefix: &[u8]); + /// + /// `limit` and result works as for `kill_child_storage`. + fn clear_prefix(&mut self, prefix: &[u8], limit: Option) -> (bool, u32); /// Clear child storage entries which keys are start with the given prefix. + /// + /// `limit` and result works as for `kill_child_storage`. fn clear_child_prefix( &mut self, child_info: &ChildInfo, prefix: &[u8], - ); + limit: Option, + ) -> (bool, u32); /// Set or clear a storage entry (`key`) of current contract being called (effective immediately). fn place_storage(&mut self, key: Vec, value: Option>); diff --git a/primitives/io/src/lib.rs b/primitives/io/src/lib.rs index f0fcc4f1b0672..12cbf09e86507 100644 --- a/primitives/io/src/lib.rs +++ b/primitives/io/src/lib.rs @@ -86,7 +86,7 @@ pub enum EcdsaVerifyError { /// The outcome of calling `storage_kill`. Returned value is the number of storage items /// removed from the trie from making the `storage_kill` call. #[derive(PassByCodec, Encode, Decode)] -pub enum KillChildStorageResult { +pub enum KillStorageResult { /// No key remains in the child trie. AllRemoved(u32), /// At least one key still resides in the child trie due to the supplied limit. @@ -133,9 +133,44 @@ pub trait Storage { /// Clear the storage of each key-value pair where the key starts with the given `prefix`. fn clear_prefix(&mut self, prefix: &[u8]) { - Externalities::clear_prefix(*self, prefix) + let _ = Externalities::clear_prefix(*self, prefix, None); } + /// Clear the storage of each key-value pair where the key starts with the given `prefix`. + /// + /// # Limit + /// + /// Deletes all keys from the overlay and up to `limit` keys from the backend if + /// it is set to `Some`. No limit is applied when `limit` is set to `None`. + /// + /// The limit can be used to partially delete a prefix storage in case it is too large + /// to delete in one go (block). + /// + /// It returns a boolean false iff some keys are remaining in + /// the prefix after the functions returns. Also returns a `u32` with + /// the number of keys removed from the process. + /// + /// # Note + /// + /// Please note that keys that are residing in the overlay for that prefix when + /// issuing this call are all deleted without counting towards the `limit`. Only keys + /// written during the current block are part of the overlay. Deleting with a `limit` + /// mostly makes sense with an empty overlay for that prefix. + /// + /// Calling this function multiple times per block for the same `prefix` does + /// not make much sense because it is not cumulative when called inside the same block. + /// Use this function to distribute the deletion of a single child trie across multiple + /// blocks. + #[version(2)] + fn clear_prefix(&mut self, prefix: &[u8], limit: Option) -> KillStorageResult { + let (all_removed, num_removed) = Externalities::clear_prefix(*self, prefix, limit); + match all_removed { + true => KillStorageResult::AllRemoved(num_removed), + false => KillStorageResult::SomeRemaining(num_removed), + } + } + + /// Append the encoded `value` to the storage item at `key`. /// /// The storage item needs to implement [`EncodeAppend`](codec::EncodeAppend). @@ -296,26 +331,7 @@ pub trait DefaultChildStorage { /// Clear a child storage key. /// - /// Deletes all keys from the overlay and up to `limit` keys from the backend if - /// it is set to `Some`. No limit is applied when `limit` is set to `None`. - /// - /// The limit can be used to partially delete a child trie in case it is too large - /// to delete in one go (block). - /// - /// It returns a boolean false iff some keys are remaining in - /// the child trie after the functions returns. - /// - /// # Note - /// - /// Please note that keys that are residing in the overlay for that child trie when - /// issuing this call are all deleted without counting towards the `limit`. Only keys - /// written during the current block are part of the overlay. Deleting with a `limit` - /// mostly makes sense with an empty overlay for that child trie. - /// - /// Calling this function multiple times per block for the same `storage_key` does - /// not make much sense because it is not cumulative when called inside the same block. - /// Use this function to distribute the deletion of a single child trie across multiple - /// blocks. + /// See `Storage` module `clear_prefix` documentation for `limit` usage. #[version(2)] fn storage_kill(&mut self, storage_key: &[u8], limit: Option) -> bool { let child_info = ChildInfo::new_default(storage_key); @@ -325,34 +341,14 @@ pub trait DefaultChildStorage { /// Clear a child storage key. /// - /// Deletes all keys from the overlay and up to `limit` keys from the backend if - /// it is set to `Some`. No limit is applied when `limit` is set to `None`. - /// - /// The limit can be used to partially delete a child trie in case it is too large - /// to delete in one go (block). - /// - /// It returns a boolean false iff some keys are remaining in - /// the child trie after the functions returns. Also returns a `u32` with - /// the number of keys removed from the process. - /// - /// # Note - /// - /// Please note that keys that are residing in the overlay for that child trie when - /// issuing this call are all deleted without counting towards the `limit`. Only keys - /// written during the current block are part of the overlay. Deleting with a `limit` - /// mostly makes sense with an empty overlay for that child trie. - /// - /// Calling this function multiple times per block for the same `storage_key` does - /// not make much sense because it is not cumulative when called inside the same block. - /// Use this function to distribute the deletion of a single child trie across multiple - /// blocks. + /// See `Storage` module `clear_prefix` documentation for `limit` usage. #[version(3)] - fn storage_kill(&mut self, storage_key: &[u8], limit: Option) -> KillChildStorageResult { + fn storage_kill(&mut self, storage_key: &[u8], limit: Option) -> KillStorageResult { let child_info = ChildInfo::new_default(storage_key); let (all_removed, num_removed) = self.kill_child_storage(&child_info, limit); match all_removed { - true => KillChildStorageResult::AllRemoved(num_removed), - false => KillChildStorageResult::SomeRemaining(num_removed), + true => KillStorageResult::AllRemoved(num_removed), + false => KillStorageResult::SomeRemaining(num_removed), } } @@ -377,7 +373,25 @@ pub trait DefaultChildStorage { prefix: &[u8], ) { let child_info = ChildInfo::new_default(storage_key); - self.clear_child_prefix(&child_info, prefix); + let _ = self.clear_child_prefix(&child_info, prefix, None); + } + + /// Clear the child storage of each key-value pair where the key starts with the given `prefix`. + /// + /// See `Storage` module `clear_prefix` documentation for `limit` usage. + #[version(2)] + fn clear_prefix( + &mut self, + storage_key: &[u8], + prefix: &[u8], + limit: Option, + ) -> KillStorageResult { + let child_info = ChildInfo::new_default(storage_key); + let (all_removed, num_removed) = self.clear_child_prefix(&child_info, prefix, limit); + match all_removed { + true => KillStorageResult::AllRemoved(num_removed), + false => KillStorageResult::SomeRemaining(num_removed), + } } /// Default child root calculation. @@ -1531,7 +1545,7 @@ mod tests { }); t.execute_with(|| { - storage::clear_prefix(b":abc"); + assert!(matches!(storage::clear_prefix(b":abc", None), KillStorageResult::AllRemoved(2))); assert!(storage::get(b":a").is_some()); assert!(storage::get(b":abdd").is_some()); diff --git a/primitives/state-machine/src/backend.rs b/primitives/state-machine/src/backend.rs index 92b4c83314e72..18b89acbc6f13 100644 --- a/primitives/state-machine/src/backend.rs +++ b/primitives/state-machine/src/backend.rs @@ -93,11 +93,12 @@ pub trait Backend: sp_std::fmt::Debug { key: &[u8] ) -> Result, Self::Error>; - /// Retrieve all entries keys of child storage and call `f` for each of those keys. + /// Retrieve all entries keys of storage and call `f` for each of those keys. /// Aborts as soon as `f` returns false. - fn apply_to_child_keys_while bool>( + fn apply_to_keys_while bool>( &self, - child_info: &ChildInfo, + child_info: Option<&ChildInfo>, + prefix: Option<&[u8]>, f: F, ); diff --git a/primitives/state-machine/src/basic.rs b/primitives/state-machine/src/basic.rs index dda8f523b77f9..08849ebcc69ab 100644 --- a/primitives/state-machine/src/basic.rs +++ b/primitives/state-machine/src/basic.rs @@ -216,13 +216,13 @@ impl Externalities for BasicExternalities { (true, num_removed as u32) } - fn clear_prefix(&mut self, prefix: &[u8]) { + fn clear_prefix(&mut self, prefix: &[u8], _limit: Option) -> (bool, u32) { if is_child_storage_key(prefix) { warn!( target: "trie", "Refuse to clear prefix that is part of child storage key via main storage" ); - return; + return (false, 0); } let to_remove = self.inner.top.range::<[u8], _>((Bound::Included(prefix), Bound::Unbounded)) @@ -231,16 +231,19 @@ impl Externalities for BasicExternalities { .cloned() .collect::>(); + let num_removed = to_remove.len(); for key in to_remove { self.inner.top.remove(&key); } + (true, num_removed as u32) } fn clear_child_prefix( &mut self, child_info: &ChildInfo, prefix: &[u8], - ) { + _limit: Option, + ) -> (bool, u32) { if let Some(child) = self.inner.children_default.get_mut(child_info.storage_key()) { let to_remove = child.data.range::<[u8], _>((Bound::Included(prefix), Bound::Unbounded)) .map(|(k, _)| k) @@ -248,9 +251,13 @@ impl Externalities for BasicExternalities { .cloned() .collect::>(); + let num_removed = to_remove.len(); for key in to_remove { child.data.remove(&key); } + (true, num_removed as u32) + } else { + (true, 0) } } diff --git a/primitives/state-machine/src/ext.rs b/primitives/state-machine/src/ext.rs index 8bcf1f28a0778..e66664647d9d8 100644 --- a/primitives/state-machine/src/ext.rs +++ b/primitives/state-machine/src/ext.rs @@ -460,36 +460,10 @@ where let _guard = guard(); self.mark_dirty(); self.overlay.clear_child_storage(child_info); - let mut num_deleted: u32 = 0; - - if let Some(limit) = limit { - let mut all_deleted = true; - self.backend.apply_to_child_keys_while(child_info, |key| { - if num_deleted == limit { - all_deleted = false; - return false; - } - if let Some(num) = num_deleted.checked_add(1) { - num_deleted = num; - } else { - all_deleted = false; - return false; - } - self.overlay.set_child_storage(child_info, key.to_vec(), None); - true - }); - (all_deleted, num_deleted) - } else { - self.backend.apply_to_child_keys_while(child_info, |key| { - num_deleted = num_deleted.saturating_add(1); - self.overlay.set_child_storage(child_info, key.to_vec(), None); - true - }); - (true, num_deleted) - } + self.limit_remove_from_backend(Some(child_info), None, limit) } - fn clear_prefix(&mut self, prefix: &[u8]) { + fn clear_prefix(&mut self, prefix: &[u8], limit: Option) -> (bool, u32) { trace!(target: "state", "{:04x}: ClearPrefix {}", self.id, HexDisplay::from(&prefix), @@ -498,21 +472,20 @@ where if sp_core::storage::well_known_keys::starts_with_child_storage_key(prefix) { warn!(target: "trie", "Refuse to directly clear prefix that is part or contains of child storage key"); - return; + return (false, 0); } self.mark_dirty(); self.overlay.clear_prefix(prefix); - self.backend.for_keys_with_prefix(prefix, |key| { - self.overlay.set_storage(key.to_vec(), None); - }); + self.limit_remove_from_backend(None, Some(prefix), limit) } fn clear_child_prefix( &mut self, child_info: &ChildInfo, prefix: &[u8], - ) { + limit: Option, + ) -> (bool, u32) { trace!(target: "state", "{:04x}: ClearChildPrefix({}) {}", self.id, HexDisplay::from(&child_info.storage_key()), @@ -522,9 +495,7 @@ where self.mark_dirty(); self.overlay.clear_child_prefix(child_info, prefix); - self.backend.for_child_keys_with_prefix(child_info, prefix, |key| { - self.overlay.set_child_storage(child_info, key.to_vec(), None); - }); + self.limit_remove_from_backend(Some(child_info), Some(prefix), limit) } fn storage_append( @@ -780,6 +751,57 @@ where } } +impl<'a, H, N, B> Ext<'a, H, N, B> +where + H: Hasher, + H::Out: Ord + 'static + codec::Codec, + B: Backend, + N: crate::changes_trie::BlockNumber, +{ + fn limit_remove_from_backend( + &mut self, + child_info: Option<&ChildInfo>, + prefix: Option<&[u8]>, + limit: Option, + ) -> (bool, u32) { + let mut num_deleted: u32 = 0; + + if let Some(limit) = limit { + let mut all_deleted = true; + self.backend.apply_to_keys_while(child_info, prefix, |key| { + if num_deleted == limit { + all_deleted = false; + return false; + } + if let Some(num) = num_deleted.checked_add(1) { + num_deleted = num; + } else { + all_deleted = false; + return false; + } + if let Some(child_info) = child_info { + self.overlay.set_child_storage(child_info, key.to_vec(), None); + } else { + self.overlay.set_storage(key.to_vec(), None); + } + true + }); + (all_deleted, num_deleted) + } else { + self.backend.apply_to_keys_while(child_info, prefix, |key| { + num_deleted = num_deleted.saturating_add(1); + if let Some(child_info) = child_info { + self.overlay.set_child_storage(child_info, key.to_vec(), None); + } else { + self.overlay.set_storage(key.to_vec(), None); + } + true + }); + (true, num_deleted) + } + } +} + /// Implement `Encode` by forwarding the stored raw vec. struct EncodeOpaqueValue(Vec); @@ -1155,14 +1177,14 @@ mod tests { not_under_prefix.extend(b"path"); ext.set_storage(not_under_prefix.clone(), vec![10]); - ext.clear_prefix(&[]); - ext.clear_prefix(&well_known_keys::CHILD_STORAGE_KEY_PREFIX[..4]); + ext.clear_prefix(&[], None); + ext.clear_prefix(&well_known_keys::CHILD_STORAGE_KEY_PREFIX[..4], None); let mut under_prefix = well_known_keys::CHILD_STORAGE_KEY_PREFIX.to_vec(); under_prefix.extend(b"path"); - ext.clear_prefix(&well_known_keys::CHILD_STORAGE_KEY_PREFIX[..4]); + ext.clear_prefix(&well_known_keys::CHILD_STORAGE_KEY_PREFIX[..4], None); assert_eq!(ext.child_storage(child_info, &[30]), Some(vec![40])); assert_eq!(ext.storage(not_under_prefix.as_slice()), Some(vec![10])); - ext.clear_prefix(¬_under_prefix[..5]); + ext.clear_prefix(¬_under_prefix[..5], None); assert_eq!(ext.storage(not_under_prefix.as_slice()), None); } diff --git a/primitives/state-machine/src/lib.rs b/primitives/state-machine/src/lib.rs index 0508bfb780929..c4ba39e160160 100644 --- a/primitives/state-machine/src/lib.rs +++ b/primitives/state-machine/src/lib.rs @@ -1102,6 +1102,7 @@ mod tests { overlay.set_storage(b"abd".to_vec(), Some(b"69".to_vec())); overlay.set_storage(b"bbd".to_vec(), Some(b"42".to_vec())); + let overlay_limit = overlay.clone(); { let mut cache = StorageTransactionCache::default(); let mut ext = Ext::new( @@ -1111,7 +1112,7 @@ mod tests { changes_trie::disabled_state::<_, u64>(), None, ); - ext.clear_prefix(b"ab"); + ext.clear_prefix(b"ab", None); } overlay.commit_transaction().unwrap(); @@ -1128,6 +1129,33 @@ mod tests { b"bbd".to_vec() => Some(b"42".to_vec()).into() ], ); + + let mut overlay = overlay_limit; + { + let mut cache = StorageTransactionCache::default(); + let mut ext = Ext::new( + &mut overlay, + &mut cache, + backend, + changes_trie::disabled_state::<_, u64>(), + None, + ); + assert_eq!((false, 1), ext.clear_prefix(b"ab", Some(1))); + } + overlay.commit_transaction().unwrap(); + + assert_eq!( + overlay.changes().map(|(k, v)| (k.clone(), v.value().cloned())) + .collect::>(), + map![ + b"abb".to_vec() => None.into(), + b"aba".to_vec() => None.into(), + b"abd".to_vec() => None.into(), + + b"bab".to_vec() => Some(b"228".to_vec()).into(), + b"bbd".to_vec() => Some(b"42".to_vec()).into() + ], + ); } #[test] diff --git a/primitives/state-machine/src/proving_backend.rs b/primitives/state-machine/src/proving_backend.rs index 963582a3cc35d..d68a87f9f56a5 100644 --- a/primitives/state-machine/src/proving_backend.rs +++ b/primitives/state-machine/src/proving_backend.rs @@ -260,12 +260,13 @@ impl<'a, S, H> Backend for ProvingBackend<'a, S, H> self.0.child_storage(child_info, key) } - fn apply_to_child_keys_while bool>( + fn apply_to_keys_while bool>( &self, - child_info: &ChildInfo, + child_info: Option<&ChildInfo>, + prefix: Option<&[u8]>, f: F, ) { - self.0.apply_to_child_keys_while(child_info, f) + self.0.apply_to_keys_while(child_info, prefix, f) } fn next_storage_key(&self, key: &[u8]) -> Result>, Self::Error> { diff --git a/primitives/state-machine/src/read_only.rs b/primitives/state-machine/src/read_only.rs index 296520900c952..7b67b61eea822 100644 --- a/primitives/state-machine/src/read_only.rs +++ b/primitives/state-machine/src/read_only.rs @@ -136,7 +136,7 @@ impl<'a, H: Hasher, B: 'a + Backend> Externalities for ReadOnlyExternalities< unimplemented!("kill_child_storage is not supported in ReadOnlyExternalities") } - fn clear_prefix(&mut self, _prefix: &[u8]) { + fn clear_prefix(&mut self, _prefix: &[u8], _limit: Option) -> (bool, u32) { unimplemented!("clear_prefix is not supported in ReadOnlyExternalities") } @@ -144,7 +144,8 @@ impl<'a, H: Hasher, B: 'a + Backend> Externalities for ReadOnlyExternalities< &mut self, _child_info: &ChildInfo, _prefix: &[u8], - ) { + _limit: Option, + ) -> (bool, u32) { unimplemented!("clear_child_prefix is not supported in ReadOnlyExternalities") } diff --git a/primitives/state-machine/src/trie_backend.rs b/primitives/state-machine/src/trie_backend.rs index 5dd8fb7562f72..98deca23a9570 100644 --- a/primitives/state-machine/src/trie_backend.rs +++ b/primitives/state-machine/src/trie_backend.rs @@ -113,12 +113,13 @@ impl, H: Hasher> Backend for TrieBackend where self.essence.for_key_values_with_prefix(prefix, f) } - fn apply_to_child_keys_while bool>( + fn apply_to_keys_while bool>( &self, - child_info: &ChildInfo, + child_info: Option<&ChildInfo>, + prefix: Option<&[u8]>, f: F, ) { - self.essence.apply_to_child_keys_while(child_info, f) + self.essence.apply_to_keys_while(child_info, prefix, f) } fn for_child_keys_with_prefix( diff --git a/primitives/state-machine/src/trie_backend_essence.rs b/primitives/state-machine/src/trie_backend_essence.rs index c085099da77d8..e0a24c08393c7 100644 --- a/primitives/state-machine/src/trie_backend_essence.rs +++ b/primitives/state-machine/src/trie_backend_essence.rs @@ -25,7 +25,7 @@ use crate::{warn, debug}; use hash_db::{self, Hasher, Prefix}; use sp_trie::{Trie, MemoryDB, PrefixedMemoryDB, DBValue, empty_child_trie_root, read_trie_value, read_child_trie_value, - for_keys_in_child_trie, KeySpacedDB, TrieDBIterator}; + KeySpacedDB, TrieDBIterator}; use sp_trie::trie_types::{TrieDB, TrieError, Layout}; use crate::{backend::Consolidate, StorageKey, StorageValue}; use sp_core::storage::ChildInfo; @@ -189,29 +189,30 @@ impl, H: Hasher> TrieBackendEssence where H::Out: .map_err(map_e) } - /// Retrieve all entries keys of child storage and call `f` for each of those keys. + /// Retrieve all entries keys of a storage and call `f` for each of those keys. /// Aborts as soon as `f` returns false. - pub fn apply_to_child_keys_while bool>( + pub fn apply_to_keys_while bool>( &self, - child_info: &ChildInfo, - f: F, + child_info: Option<&ChildInfo>, + prefix: Option<&[u8]>, + mut f: F, ) { - let root = match self.child_root(child_info) { - Ok(v) => v.unwrap_or_else(|| empty_child_trie_root::>().encode()), - Err(e) => { - debug!(target: "trie", "Error while iterating child storage: {}", e); - return; - } + let mut child_root = H::Out::default(); + let root = if let Some(child_info) = child_info.as_ref() { + let root_vec = match self.child_root(child_info) { + Ok(v) => v.unwrap_or_else(|| empty_child_trie_root::>().encode()), + Err(e) => { + debug!(target: "trie", "Error while iterating child storage: {}", e); + return; + } + }; + child_root.as_mut().copy_from_slice(&root_vec); + &child_root + } else { + &self.root }; - if let Err(e) = for_keys_in_child_trie::, _, _>( - child_info.keyspace(), - self, - &root, - f, - ) { - debug!(target: "trie", "Error while iterating child storage: {}", e); - } + self.trie_iter_inner(root, prefix, |k, _v| f(k), child_info) } /// Execute given closure for all keys starting with prefix. @@ -230,30 +231,38 @@ impl, H: Hasher> TrieBackendEssence where H::Out: }; let mut root = H::Out::default(); root.as_mut().copy_from_slice(&root_vec); - self.keys_values_with_prefix_inner(&root, prefix, |k, _v| f(k), Some(child_info)) + self.trie_iter_inner(&root, Some(prefix), |k, _v| { f(k); true }, Some(child_info)) } /// Execute given closure for all keys starting with prefix. pub fn for_keys_with_prefix(&self, prefix: &[u8], mut f: F) { - self.keys_values_with_prefix_inner(&self.root, prefix, |k, _v| f(k), None) + self.trie_iter_inner(&self.root, Some(prefix), |k, _v| { f(k); true }, None) } - fn keys_values_with_prefix_inner( + fn trie_iter_inner bool>( &self, root: &H::Out, - prefix: &[u8], + prefix: Option<&[u8]>, mut f: F, child_info: Option<&ChildInfo>, ) { let mut iter = move |db| -> sp_std::result::Result<(), Box>> { let trie = TrieDB::::new(db, root)?; - for x in TrieDBIterator::new_prefixed(&trie, prefix)? { + let iter = if let Some(prefix) = prefix.as_ref() { + TrieDBIterator::new_prefixed(&trie, prefix)? + } else { + TrieDBIterator::new(&trie)? + }; + + for x in iter { let (key, value) = x?; - debug_assert!(key.starts_with(prefix)); + debug_assert!(prefix.as_ref().map(|prefix| key.starts_with(prefix)).unwrap_or(true)); - f(&key, &value); + if !f(&key, &value) { + break; + } } Ok(()) @@ -271,8 +280,8 @@ impl, H: Hasher> TrieBackendEssence where H::Out: } /// Execute given closure for all key and values starting with prefix. - pub fn for_key_values_with_prefix(&self, prefix: &[u8], f: F) { - self.keys_values_with_prefix_inner(&self.root, prefix, f, None) + pub fn for_key_values_with_prefix(&self, prefix: &[u8], mut f: F) { + self.trie_iter_inner(&self.root, Some(prefix), |k, v| { f(k, v); true }, None) } } diff --git a/primitives/tasks/src/async_externalities.rs b/primitives/tasks/src/async_externalities.rs index 5d99ca4368d0b..b646149912643 100644 --- a/primitives/tasks/src/async_externalities.rs +++ b/primitives/tasks/src/async_externalities.rs @@ -123,7 +123,7 @@ impl Externalities for AsyncExternalities { panic!("`kill_child_storage`: should not be used in async externalities!") } - fn clear_prefix(&mut self, _prefix: &[u8]) { + fn clear_prefix(&mut self, _prefix: &[u8], _limit: Option) -> (bool, u32) { panic!("`clear_prefix`: should not be used in async externalities!") } @@ -131,7 +131,8 @@ impl Externalities for AsyncExternalities { &mut self, _child_info: &ChildInfo, _prefix: &[u8], - ) { + _limit: Option, + ) -> (bool, u32) { panic!("`clear_child_prefix`: should not be used in async externalities!") } diff --git a/primitives/trie/src/lib.rs b/primitives/trie/src/lib.rs index 89bef715ba99a..f815d2af44ad7 100644 --- a/primitives/trie/src/lib.rs +++ b/primitives/trie/src/lib.rs @@ -279,35 +279,6 @@ pub fn child_delta_trie_root( ) } -/// Call `f` for all keys in a child trie. -/// Aborts as soon as `f` returns false. -pub fn for_keys_in_child_trie bool, DB>( - keyspace: &[u8], - db: &DB, - root_slice: &[u8], - mut f: F -) -> Result<(), Box>> - where - DB: hash_db::HashDBRef -{ - let mut root = TrieHash::::default(); - // root is fetched from DB, not writable by runtime, so it's always valid. - root.as_mut().copy_from_slice(root_slice); - - let db = KeySpacedDB::new(&*db, keyspace); - let trie = TrieDB::::new(&db, &root)?; - let iter = trie.iter()?; - - for x in iter { - let (key, _) = x?; - if !f(&key) { - break; - } - } - - Ok(()) -} - /// Record all keys for a given root. pub fn record_all_keys( db: &DB, From 7f09a7619c863c964a93ceb915d6e0b9ddc39e2e Mon Sep 17 00:00:00 2001 From: Ethan Brierley Date: Tue, 15 Jun 2021 17:23:57 +0100 Subject: [PATCH 06/67] fix: CARGO_TARGET_DIR_freeze (#9114) --- utils/wasm-builder/src/prerequisites.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/utils/wasm-builder/src/prerequisites.rs b/utils/wasm-builder/src/prerequisites.rs index 5dedcc4641a72..dbbd9c0a56229 100644 --- a/utils/wasm-builder/src/prerequisites.rs +++ b/utils/wasm-builder/src/prerequisites.rs @@ -129,6 +129,10 @@ fn check_wasm_toolchain_installed( let mut run_cmd = cargo_command.command(); run_cmd.args(&["run", "--manifest-path", &manifest_path]); + // Unset the `CARGO_TARGET_DIR` to prevent a cargo deadlock + build_cmd.env_remove("CARGO_TARGET_DIR"); + run_cmd.env_remove("CARGO_TARGET_DIR"); + build_cmd .output() .map_err(|_| err_msg.clone()) From d30b6e373dd9f470e0f0738e34519ec65a98f9f9 Mon Sep 17 00:00:00 2001 From: Keith Yeung Date: Tue, 15 Jun 2021 20:44:22 -0700 Subject: [PATCH 07/67] Emit error when construct_runtime imports a non-existent pallet part (#8949) * Emit error when construct_runtime imports a non-existent Call part * Reword and display pallet name when emitting part not found error * Migrate decl_outer_dispatch to a proc macro * Rename calls.rs to call.rs * Create new construct_runtime_v2 macro * Add UI test for importing non-existent call part in construct_runtime * Emit error when construct_runtime imports a non-existent Config part * Emit error when construct_runtime imports a non-existent Event part * Migrate decl_outer_inherent to a proc macro * Emit error when construct_runtime imports a non-existent Inherent part * Migrate decl_outer_validate_unsigned to a proc macro * Emit error when construct_runtime imports a non-existent ValidateUnsigned part * impl for old macro * fix line width * add doc * hide macroes and use unique counter everywhere * Remove construct_runtime_v2 * Encapsulate pallet part check macros in a module * Fix macro definitions in dummy part checker * Tag ProvideInherent impl with #[pallet::inherent] properly for authorship pallet * Remove Call part from pallets that do not define it * Add Call part unit tests * Remove undefined Call part import from offences pallet * Add tests for expand_outer_inherent * Remove Call part from pallets that do not define them * Remove Call part imports from pallets that do not have it defined * Remove Call part import of the offences pallet from grandpa pallet mocks * Update frame/support/test/tests/pallet.rs Co-authored-by: Guillaume Thiolliere * Remove Call part imports for pallets that do not define them * Move inherent tests to inherent_expand * Add unit tests for expand_outer_validate_unsigned * Add newline at the end of file * fix ui test * Small prayer to RNGsus for fixing CI * Remove Call part from construct_runtime for randomness collective flip pallet * Remove Call part import for randomness collective flip pallet * Summon Laplace's demon instead of praying to RNGsus * Update test expectations * fix ui test and make sure it's flaky * Revert "fix ui test and make sure it's flaky" This reverts commit 362b6881389c911ef8d9ef85d71c9463f5694b20. * Comment out test instead of putting it in conditional compilation * Update UI test expectations * Update UI test expectations * Emit error when construct_runtime imports a non-existent Origin part Co-authored-by: thiolliere Co-authored-by: Denis P --- bin/node-template/runtime/src/lib.rs | 2 +- bin/node/runtime/src/lib.rs | 6 +- frame/aura/src/mock.rs | 2 +- frame/authority-discovery/src/lib.rs | 2 +- frame/authorship/src/lib.rs | 124 +++++------ frame/babe/src/mock.rs | 2 +- frame/contracts/src/tests.rs | 2 +- frame/grandpa/src/mock.rs | 2 +- frame/offences/benchmarking/src/mock.rs | 2 +- frame/offences/src/mock.rs | 2 +- frame/randomness-collective-flip/src/lib.rs | 2 +- .../src/construct_runtime/expand/call.rs | 145 +++++++++++++ .../src/construct_runtime/expand/config.rs | 18 +- .../src/construct_runtime/expand/event.rs | 13 +- .../src/construct_runtime/expand/inherent.rs | 204 ++++++++++++++++++ .../src/construct_runtime/expand/mod.rs | 6 + .../src/construct_runtime/expand/origin.rs | 14 +- .../src/construct_runtime/expand/unsigned.rs | 72 +++++++ .../procedural/src/construct_runtime/mod.rs | 75 +------ .../procedural/src/dummy_part_checker.rs | 104 +++++++++ frame/support/procedural/src/lib.rs | 26 +++ .../procedural/src/pallet/expand/call.rs | 33 ++- .../procedural/src/pallet/expand/event.rs | 47 +++- .../src/pallet/expand/genesis_config.rs | 52 ++++- .../procedural/src/pallet/expand/inherent.rs | 56 +++++ .../procedural/src/pallet/expand/mod.rs | 9 + .../procedural/src/pallet/expand/origin.rs | 55 +++++ .../src/pallet/expand/validate_unsigned.rs | 56 +++++ frame/support/src/dispatch.rs | 2 + frame/support/src/lib.rs | 5 +- frame/support/test/tests/construct_runtime.rs | 158 ++++++++++++++ .../undefined_call_part.rs | 33 +++ .../undefined_call_part.stderr | 49 +++++ .../undefined_event_part.rs | 33 +++ .../undefined_event_part.stderr | 101 +++++++++ .../undefined_genesis_config_part.rs | 33 +++ .../undefined_genesis_config_part.stderr | 67 ++++++ .../undefined_inherent_part.rs | 33 +++ .../undefined_inherent_part.stderr | 49 +++++ .../undefined_origin_part.rs | 33 +++ .../undefined_origin_part.stderr | 87 ++++++++ .../undefined_validate_unsigned_part.rs | 33 +++ .../undefined_validate_unsigned_part.stderr | 49 +++++ frame/support/test/tests/pallet.rs | 180 +++++++++++++++- frame/support/test/tests/pallet_instance.rs | 4 +- .../storage_info_unsatisfied_nmap.rs | 37 ++-- .../storage_info_unsatisfied_nmap.stderr | 12 +- 47 files changed, 1934 insertions(+), 197 deletions(-) create mode 100644 frame/support/procedural/src/construct_runtime/expand/call.rs create mode 100644 frame/support/procedural/src/construct_runtime/expand/inherent.rs create mode 100644 frame/support/procedural/src/construct_runtime/expand/unsigned.rs create mode 100644 frame/support/procedural/src/dummy_part_checker.rs create mode 100644 frame/support/procedural/src/pallet/expand/inherent.rs create mode 100644 frame/support/procedural/src/pallet/expand/origin.rs create mode 100644 frame/support/procedural/src/pallet/expand/validate_unsigned.rs create mode 100644 frame/support/test/tests/construct_runtime_ui/undefined_call_part.rs create mode 100644 frame/support/test/tests/construct_runtime_ui/undefined_call_part.stderr create mode 100644 frame/support/test/tests/construct_runtime_ui/undefined_event_part.rs create mode 100644 frame/support/test/tests/construct_runtime_ui/undefined_event_part.stderr create mode 100644 frame/support/test/tests/construct_runtime_ui/undefined_genesis_config_part.rs create mode 100644 frame/support/test/tests/construct_runtime_ui/undefined_genesis_config_part.stderr create mode 100644 frame/support/test/tests/construct_runtime_ui/undefined_inherent_part.rs create mode 100644 frame/support/test/tests/construct_runtime_ui/undefined_inherent_part.stderr create mode 100644 frame/support/test/tests/construct_runtime_ui/undefined_origin_part.rs create mode 100644 frame/support/test/tests/construct_runtime_ui/undefined_origin_part.stderr create mode 100644 frame/support/test/tests/construct_runtime_ui/undefined_validate_unsigned_part.rs create mode 100644 frame/support/test/tests/construct_runtime_ui/undefined_validate_unsigned_part.stderr diff --git a/bin/node-template/runtime/src/lib.rs b/bin/node-template/runtime/src/lib.rs index f98517b91d24c..2ff4272747ee5 100644 --- a/bin/node-template/runtime/src/lib.rs +++ b/bin/node-template/runtime/src/lib.rs @@ -287,7 +287,7 @@ construct_runtime!( UncheckedExtrinsic = UncheckedExtrinsic { System: frame_system::{Pallet, Call, Config, Storage, Event}, - RandomnessCollectiveFlip: pallet_randomness_collective_flip::{Pallet, Call, Storage}, + RandomnessCollectiveFlip: pallet_randomness_collective_flip::{Pallet, Storage}, Timestamp: pallet_timestamp::{Pallet, Call, Storage, Inherent}, Aura: pallet_aura::{Pallet, Config}, Grandpa: pallet_grandpa::{Pallet, Call, Storage, Config, Event}, diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index 3e8053ac4f1bb..2e11ab54e4316 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -1159,10 +1159,10 @@ construct_runtime!( Contracts: pallet_contracts::{Pallet, Call, Storage, Event}, Sudo: pallet_sudo::{Pallet, Call, Config, Storage, Event}, ImOnline: pallet_im_online::{Pallet, Call, Storage, Event, ValidateUnsigned, Config}, - AuthorityDiscovery: pallet_authority_discovery::{Pallet, Call, Config}, - Offences: pallet_offences::{Pallet, Call, Storage, Event}, + AuthorityDiscovery: pallet_authority_discovery::{Pallet, Config}, + Offences: pallet_offences::{Pallet, Storage, Event}, Historical: pallet_session_historical::{Pallet}, - RandomnessCollectiveFlip: pallet_randomness_collective_flip::{Pallet, Call, Storage}, + RandomnessCollectiveFlip: pallet_randomness_collective_flip::{Pallet, Storage}, Identity: pallet_identity::{Pallet, Call, Storage, Event}, Society: pallet_society::{Pallet, Call, Storage, Event, Config}, Recovery: pallet_recovery::{Pallet, Call, Storage, Event}, diff --git a/frame/aura/src/mock.rs b/frame/aura/src/mock.rs index 26d5a2754974f..443ac9890ac79 100644 --- a/frame/aura/src/mock.rs +++ b/frame/aura/src/mock.rs @@ -36,7 +36,7 @@ frame_support::construct_runtime!( { System: frame_system::{Pallet, Call, Config, Storage, Event}, Timestamp: pallet_timestamp::{Pallet, Call, Storage, Inherent}, - Aura: pallet_aura::{Pallet, Call, Storage, Config}, + Aura: pallet_aura::{Pallet, Storage, Config}, } ); diff --git a/frame/authority-discovery/src/lib.rs b/frame/authority-discovery/src/lib.rs index 868fbfc605363..791fbda103820 100644 --- a/frame/authority-discovery/src/lib.rs +++ b/frame/authority-discovery/src/lib.rs @@ -184,7 +184,7 @@ mod tests { { System: frame_system::{Pallet, Call, Config, Storage, Event}, Session: pallet_session::{Pallet, Call, Storage, Event, Config}, - AuthorityDiscovery: pallet_authority_discovery::{Pallet, Call, Config}, + AuthorityDiscovery: pallet_authority_discovery::{Pallet, Config}, } ); diff --git a/frame/authorship/src/lib.rs b/frame/authorship/src/lib.rs index 98d20ec621406..d40fb93b901a0 100644 --- a/frame/authorship/src/lib.rs +++ b/frame/authorship/src/lib.rs @@ -24,7 +24,6 @@ use sp_std::{result, prelude::*, collections::btree_set::BTreeSet}; use frame_support::{ dispatch, traits::{FindAuthor, VerifySeal, Get}, - inherent::{InherentData, ProvideInherent, InherentIdentifier}, }; use codec::{Encode, Decode}; use sp_runtime::traits::{Header as HeaderT, One, Saturating}; @@ -238,6 +237,68 @@ pub mod pallet { Self::verify_and_import_uncles(new_uncles) } } + + #[pallet::inherent] + impl ProvideInherent for Pallet { + type Call = Call; + type Error = InherentError; + const INHERENT_IDENTIFIER: InherentIdentifier = INHERENT_IDENTIFIER; + + fn create_inherent(data: &InherentData) -> Option { + let uncles = data.uncles().unwrap_or_default(); + let mut set_uncles = Vec::new(); + + if !uncles.is_empty() { + let prev_uncles = >::get(); + let mut existing_hashes: Vec<_> = prev_uncles.into_iter().filter_map(|entry| + match entry { + UncleEntryItem::InclusionHeight(_) => None, + UncleEntryItem::Uncle(h, _) => Some(h), + } + ).collect(); + + let mut acc: >::Accumulator = Default::default(); + + for uncle in uncles { + match Self::verify_uncle(&uncle, &existing_hashes, &mut acc) { + Ok(_) => { + let hash = uncle.hash(); + set_uncles.push(uncle); + existing_hashes.push(hash); + + if set_uncles.len() == MAX_UNCLES { + break + } + } + Err(_) => { + // skip this uncle + } + } + } + } + + if set_uncles.is_empty() { + None + } else { + Some(Call::set_uncles(set_uncles)) + } + } + + fn check_inherent(call: &Self::Call, _data: &InherentData) -> result::Result<(), Self::Error> { + match call { + Call::set_uncles(ref uncles) if uncles.len() > MAX_UNCLES => { + Err(InherentError::Uncles(Error::::TooManyUncles.as_str().into())) + }, + _ => { + Ok(()) + }, + } + } + + fn is_inherent(call: &Self::Call) -> bool { + matches!(call, Call::set_uncles(_)) + } + } } impl Pallet { @@ -348,67 +409,6 @@ impl Pallet { } } -impl ProvideInherent for Pallet { - type Call = Call; - type Error = InherentError; - const INHERENT_IDENTIFIER: InherentIdentifier = INHERENT_IDENTIFIER; - - fn create_inherent(data: &InherentData) -> Option { - let uncles = data.uncles().unwrap_or_default(); - let mut set_uncles = Vec::new(); - - if !uncles.is_empty() { - let prev_uncles = >::get(); - let mut existing_hashes: Vec<_> = prev_uncles.into_iter().filter_map(|entry| - match entry { - UncleEntryItem::InclusionHeight(_) => None, - UncleEntryItem::Uncle(h, _) => Some(h), - } - ).collect(); - - let mut acc: >::Accumulator = Default::default(); - - for uncle in uncles { - match Self::verify_uncle(&uncle, &existing_hashes, &mut acc) { - Ok(_) => { - let hash = uncle.hash(); - set_uncles.push(uncle); - existing_hashes.push(hash); - - if set_uncles.len() == MAX_UNCLES { - break - } - } - Err(_) => { - // skip this uncle - } - } - } - } - - if set_uncles.is_empty() { - None - } else { - Some(Call::set_uncles(set_uncles)) - } - } - - fn check_inherent(call: &Self::Call, _data: &InherentData) -> result::Result<(), Self::Error> { - match call { - Call::set_uncles(ref uncles) if uncles.len() > MAX_UNCLES => { - Err(InherentError::Uncles(Error::::TooManyUncles.as_str().into())) - }, - _ => { - Ok(()) - }, - } - } - - fn is_inherent(call: &Self::Call) -> bool { - matches!(call, Call::set_uncles(_)) - } -} - #[cfg(test)] mod tests { use crate as pallet_authorship; diff --git a/frame/babe/src/mock.rs b/frame/babe/src/mock.rs index 770e20cb786e2..a8d0bba9632d8 100644 --- a/frame/babe/src/mock.rs +++ b/frame/babe/src/mock.rs @@ -54,7 +54,7 @@ frame_support::construct_runtime!( Authorship: pallet_authorship::{Pallet, Call, Storage, Inherent}, Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, Historical: pallet_session_historical::{Pallet}, - Offences: pallet_offences::{Pallet, Call, Storage, Event}, + Offences: pallet_offences::{Pallet, Storage, Event}, Babe: pallet_babe::{Pallet, Call, Storage, Config, ValidateUnsigned}, Staking: pallet_staking::{Pallet, Call, Storage, Config, Event}, Session: pallet_session::{Pallet, Call, Storage, Event, Config}, diff --git a/frame/contracts/src/tests.rs b/frame/contracts/src/tests.rs index 3e687643cdc8a..619bd8eac9d35 100644 --- a/frame/contracts/src/tests.rs +++ b/frame/contracts/src/tests.rs @@ -62,7 +62,7 @@ frame_support::construct_runtime!( System: frame_system::{Pallet, Call, Config, Storage, Event}, Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, Timestamp: pallet_timestamp::{Pallet, Call, Storage, Inherent}, - Randomness: pallet_randomness_collective_flip::{Pallet, Call, Storage}, + Randomness: pallet_randomness_collective_flip::{Pallet, Storage}, Contracts: pallet_contracts::{Pallet, Call, Storage, Event}, } ); diff --git a/frame/grandpa/src/mock.rs b/frame/grandpa/src/mock.rs index fe8a1bd4a3951..9206b3ff2dfaf 100644 --- a/frame/grandpa/src/mock.rs +++ b/frame/grandpa/src/mock.rs @@ -57,7 +57,7 @@ frame_support::construct_runtime!( Staking: pallet_staking::{Pallet, Call, Config, Storage, Event}, Session: pallet_session::{Pallet, Call, Storage, Event, Config}, Grandpa: pallet_grandpa::{Pallet, Call, Storage, Config, Event, ValidateUnsigned}, - Offences: pallet_offences::{Pallet, Call, Storage, Event}, + Offences: pallet_offences::{Pallet, Storage, Event}, Historical: pallet_session_historical::{Pallet}, } ); diff --git a/frame/offences/benchmarking/src/mock.rs b/frame/offences/benchmarking/src/mock.rs index b780662b92cd7..cd72780ec5ad2 100644 --- a/frame/offences/benchmarking/src/mock.rs +++ b/frame/offences/benchmarking/src/mock.rs @@ -219,7 +219,7 @@ frame_support::construct_runtime!( Staking: pallet_staking::{Pallet, Call, Config, Storage, Event}, Session: pallet_session::{Pallet, Call, Storage, Event, Config}, ImOnline: pallet_im_online::{Pallet, Call, Storage, Event, ValidateUnsigned, Config}, - Offences: pallet_offences::{Pallet, Call, Storage, Event}, + Offences: pallet_offences::{Pallet, Storage, Event}, Historical: pallet_session_historical::{Pallet}, } ); diff --git a/frame/offences/src/mock.rs b/frame/offences/src/mock.rs index a494ab02ebbd1..fff1973e334ea 100644 --- a/frame/offences/src/mock.rs +++ b/frame/offences/src/mock.rs @@ -75,7 +75,7 @@ frame_support::construct_runtime!( UncheckedExtrinsic = UncheckedExtrinsic, { System: frame_system::{Pallet, Call, Config, Storage, Event}, - Offences: offences::{Pallet, Call, Storage, Event}, + Offences: offences::{Pallet, Storage, Event}, } ); diff --git a/frame/randomness-collective-flip/src/lib.rs b/frame/randomness-collective-flip/src/lib.rs index 3285addc5bf48..eaefa9ac86c3b 100644 --- a/frame/randomness-collective-flip/src/lib.rs +++ b/frame/randomness-collective-flip/src/lib.rs @@ -182,7 +182,7 @@ mod tests { UncheckedExtrinsic = UncheckedExtrinsic, { System: frame_system::{Pallet, Call, Config, Storage, Event}, - CollectiveFlip: pallet_randomness_collective_flip::{Pallet, Call, Storage}, + CollectiveFlip: pallet_randomness_collective_flip::{Pallet, Storage}, } ); diff --git a/frame/support/procedural/src/construct_runtime/expand/call.rs b/frame/support/procedural/src/construct_runtime/expand/call.rs new file mode 100644 index 0000000000000..6a44468f25b2c --- /dev/null +++ b/frame/support/procedural/src/construct_runtime/expand/call.rs @@ -0,0 +1,145 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License + +use crate::construct_runtime::Pallet; +use proc_macro2::TokenStream; +use quote::quote; +use syn::Ident; + +pub fn expand_outer_dispatch( + runtime: &Ident, + pallet_decls: &[Pallet], + scrate: &TokenStream, +) -> TokenStream { + let mut variant_defs = TokenStream::new(); + let mut variant_patterns = Vec::new(); + let mut query_call_part_macros = Vec::new(); + let mut pallet_names = Vec::new(); + + let pallets_with_call = pallet_decls + .iter() + .filter(|decl| decl.exists_part("Call")); + + for pallet_declaration in pallets_with_call { + let name = &pallet_declaration.name; + let path = &pallet_declaration.path; + let index = pallet_declaration.index; + + variant_defs.extend(quote!(#[codec(index = #index)] #name( #scrate::dispatch::CallableCallFor<#name, #runtime> ),)); + variant_patterns.push(quote!(Call::#name(call))); + pallet_names.push(name); + query_call_part_macros.push(quote! { + #path::__substrate_call_check::is_call_part_defined!(#name); + }); + } + + quote! { + #( #query_call_part_macros )* + + #[derive( + Clone, PartialEq, Eq, + #scrate::codec::Encode, + #scrate::codec::Decode, + #scrate::RuntimeDebug, + )] + pub enum Call { + #variant_defs + } + impl #scrate::dispatch::GetDispatchInfo for Call { + fn get_dispatch_info(&self) -> #scrate::dispatch::DispatchInfo { + match self { + #( #variant_patterns => call.get_dispatch_info(), )* + } + } + } + impl #scrate::dispatch::GetCallMetadata for Call { + fn get_call_metadata(&self) -> #scrate::dispatch::CallMetadata { + use #scrate::dispatch::GetCallName; + match self { + #( + #variant_patterns => { + let function_name = call.get_call_name(); + let pallet_name = stringify!(#pallet_names); + #scrate::dispatch::CallMetadata { function_name, pallet_name } + } + )* + } + } + + fn get_module_names() -> &'static [&'static str] { + &[#( + stringify!(#pallet_names), + )*] + } + + fn get_call_names(module: &str) -> &'static [&'static str] { + use #scrate::dispatch::{Callable, GetCallName}; + match module { + #( + stringify!(#pallet_names) => + <<#pallet_names as Callable<#runtime>>::Call + as GetCallName>::get_call_names(), + )* + _ => unreachable!(), + } + } + } + impl #scrate::dispatch::Dispatchable for Call { + type Origin = Origin; + type Config = Call; + type Info = #scrate::weights::DispatchInfo; + type PostInfo = #scrate::weights::PostDispatchInfo; + fn dispatch(self, origin: Origin) -> #scrate::dispatch::DispatchResultWithPostInfo { + if !::filter_call(&origin, &self) { + return #scrate::sp_std::result::Result::Err(#scrate::dispatch::DispatchError::BadOrigin.into()); + } + + #scrate::traits::UnfilteredDispatchable::dispatch_bypass_filter(self, origin) + } + } + impl #scrate::traits::UnfilteredDispatchable for Call { + type Origin = Origin; + fn dispatch_bypass_filter(self, origin: Origin) -> #scrate::dispatch::DispatchResultWithPostInfo { + match self { + #( + #variant_patterns => + #scrate::traits::UnfilteredDispatchable::dispatch_bypass_filter(call, origin), + )* + } + } + } + + #( + impl #scrate::traits::IsSubType<#scrate::dispatch::CallableCallFor<#pallet_names, #runtime>> for Call { + #[allow(unreachable_patterns)] + fn is_sub_type(&self) -> Option<&#scrate::dispatch::CallableCallFor<#pallet_names, #runtime>> { + match self { + #variant_patterns => Some(call), + // May be unreachable + _ => None, + } + } + } + + impl From<#scrate::dispatch::CallableCallFor<#pallet_names, #runtime>> for Call { + fn from(call: #scrate::dispatch::CallableCallFor<#pallet_names, #runtime>) -> Self { + #variant_patterns + } + } + )* + } +} diff --git a/frame/support/procedural/src/construct_runtime/expand/config.rs b/frame/support/procedural/src/construct_runtime/expand/config.rs index 0400bd52f433a..b87d3685beeaa 100644 --- a/frame/support/procedural/src/construct_runtime/expand/config.rs +++ b/frame/support/procedural/src/construct_runtime/expand/config.rs @@ -29,21 +29,31 @@ pub fn expand_outer_config( let mut types = TokenStream::new(); let mut fields = TokenStream::new(); let mut build_storage_calls = TokenStream::new(); + let mut query_genesis_config_part_macros = Vec::new(); for decl in pallet_decls { if let Some(pallet_entry) = decl.find_part("Config") { - let config = format_ident!("{}Config", decl.name); - let pallet_name = &decl.name.to_string().to_snake_case(); - let field_name = &Ident::new(pallet_name, decl.name.span()); + let path = &decl.path; + let pallet_name = &decl.name; + let config = format_ident!("{}Config", pallet_name); + let field_name = &Ident::new( + &pallet_name.to_string().to_snake_case(), + decl.name.span(), + ); let part_is_generic = !pallet_entry.generics.params.is_empty(); types.extend(expand_config_types(runtime, decl, &config, part_is_generic)); fields.extend(quote!(pub #field_name: #config,)); build_storage_calls.extend(expand_config_build_storage_call(scrate, runtime, decl, &field_name)); + query_genesis_config_part_macros.push(quote! { + #path::__substrate_genesis_config_check::is_genesis_config_defined!(#pallet_name); + }); } } - quote!{ + quote! { + #( #query_genesis_config_part_macros )* + #types #[cfg(any(feature = "std", test))] diff --git a/frame/support/procedural/src/construct_runtime/expand/event.rs b/frame/support/procedural/src/construct_runtime/expand/event.rs index afedb3ed92508..d304a30b7df01 100644 --- a/frame/support/procedural/src/construct_runtime/expand/event.rs +++ b/frame/support/procedural/src/construct_runtime/expand/event.rs @@ -27,10 +27,12 @@ pub fn expand_outer_event( ) -> syn::Result { let mut event_variants = TokenStream::new(); let mut event_conversions = TokenStream::new(); + let mut query_event_part_macros = Vec::new(); for pallet_decl in pallet_decls { if let Some(pallet_entry) = pallet_decl.find_part("Event") { let path = &pallet_decl.path; + let pallet_name = &pallet_decl.name; let index = pallet_decl.index; let instance = pallet_decl.instance.as_ref(); let generics = &pallet_entry.generics; @@ -39,9 +41,9 @@ pub fn expand_outer_event( let msg = format!( "Instantiable pallet with no generic `Event` cannot \ be constructed: pallet `{}` must have generic `Event`", - pallet_decl.name, + pallet_name, ); - return Err(syn::Error::new(pallet_decl.name.span(), msg)); + return Err(syn::Error::new(pallet_name.span(), msg)); } let part_is_generic = !generics.params.is_empty(); @@ -54,10 +56,15 @@ pub fn expand_outer_event( event_variants.extend(expand_event_variant(runtime, pallet_decl, index, instance, generics)); event_conversions.extend(expand_event_conversion(scrate, pallet_decl, &pallet_event)); + query_event_part_macros.push(quote! { + #path::__substrate_event_check::is_event_part_defined!(#pallet_name); + }); } } - Ok(quote!{ + Ok(quote! { + #( #query_event_part_macros )* + #[derive( Clone, PartialEq, Eq, #scrate::codec::Encode, diff --git a/frame/support/procedural/src/construct_runtime/expand/inherent.rs b/frame/support/procedural/src/construct_runtime/expand/inherent.rs new file mode 100644 index 0000000000000..fd30416782687 --- /dev/null +++ b/frame/support/procedural/src/construct_runtime/expand/inherent.rs @@ -0,0 +1,204 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License + +use crate::construct_runtime::Pallet; +use proc_macro2::TokenStream; +use quote::quote; +use syn::{Ident, TypePath}; + +pub fn expand_outer_inherent( + runtime: &Ident, + block: &TypePath, + unchecked_extrinsic: &TypePath, + pallet_decls: &[Pallet], + scrate: &TokenStream, +) -> TokenStream { + let mut pallet_names = Vec::new(); + let mut query_inherent_part_macros = Vec::new(); + + for pallet_decl in pallet_decls { + if pallet_decl.exists_part("Inherent") { + let name = &pallet_decl.name; + let path = &pallet_decl.path; + + pallet_names.push(name); + query_inherent_part_macros.push(quote! { + #path::__substrate_inherent_check::is_inherent_part_defined!(#name); + }); + } + } + + quote! { + #( #query_inherent_part_macros )* + + trait InherentDataExt { + fn create_extrinsics(&self) -> + #scrate::inherent::Vec<<#block as #scrate::inherent::BlockT>::Extrinsic>; + fn check_extrinsics(&self, block: &#block) -> #scrate::inherent::CheckInherentsResult; + } + + impl InherentDataExt for #scrate::inherent::InherentData { + fn create_extrinsics(&self) -> + #scrate::inherent::Vec<<#block as #scrate::inherent::BlockT>::Extrinsic> + { + use #scrate::inherent::ProvideInherent; + + let mut inherents = Vec::new(); + + #( + if let Some(inherent) = #pallet_names::create_inherent(self) { + let inherent = <#unchecked_extrinsic as #scrate::inherent::Extrinsic>::new( + inherent.into(), + None, + ).expect("Runtime UncheckedExtrinsic is not Opaque, so it has to return \ + `Some`; qed"); + + inherents.push(inherent); + } + )* + + inherents + } + + fn check_extrinsics(&self, block: &#block) -> #scrate::inherent::CheckInherentsResult { + use #scrate::inherent::{ProvideInherent, IsFatalError}; + use #scrate::traits::{IsSubType, ExtrinsicCall}; + use #scrate::sp_runtime::traits::Block as _; + + let mut result = #scrate::inherent::CheckInherentsResult::new(); + + for xt in block.extrinsics() { + // Inherents are before any other extrinsics. + // And signed extrinsics are not inherents. + if #scrate::inherent::Extrinsic::is_signed(xt).unwrap_or(false) { + break + } + + let mut is_inherent = false; + + #({ + let call = <#unchecked_extrinsic as ExtrinsicCall>::call(xt); + if let Some(call) = IsSubType::<_>::is_sub_type(call) { + if #pallet_names::is_inherent(call) { + is_inherent = true; + if let Err(e) = #pallet_names::check_inherent(call, self) { + result.put_error( + #pallet_names::INHERENT_IDENTIFIER, &e + ).expect("There is only one fatal error; qed"); + if e.is_fatal_error() { + return result; + } + } + } + } + })* + + // Inherents are before any other extrinsics. + // No module marked it as inherent thus it is not. + if !is_inherent { + break + } + } + + #( + match #pallet_names::is_inherent_required(self) { + Ok(Some(e)) => { + let found = block.extrinsics().iter().any(|xt| { + let is_signed = #scrate::inherent::Extrinsic::is_signed(xt) + .unwrap_or(false); + + if !is_signed { + let call = < + #unchecked_extrinsic as ExtrinsicCall + >::call(xt); + if let Some(call) = IsSubType::<_>::is_sub_type(call) { + #pallet_names::is_inherent(&call) + } else { + false + } + } else { + // Signed extrinsics are not inherents. + false + } + }); + + if !found { + result.put_error( + #pallet_names::INHERENT_IDENTIFIER, &e + ).expect("There is only one fatal error; qed"); + if e.is_fatal_error() { + return result; + } + } + }, + Ok(None) => (), + Err(e) => { + result.put_error( + #pallet_names::INHERENT_IDENTIFIER, &e + ).expect("There is only one fatal error; qed"); + if e.is_fatal_error() { + return result; + } + }, + } + )* + + result + } + } + + impl #scrate::traits::EnsureInherentsAreFirst<#block> for #runtime { + fn ensure_inherents_are_first(block: &#block) -> Result<(), u32> { + use #scrate::inherent::ProvideInherent; + use #scrate::traits::{IsSubType, ExtrinsicCall}; + use #scrate::sp_runtime::traits::Block as _; + + let mut first_signed_observed = false; + + for (i, xt) in block.extrinsics().iter().enumerate() { + let is_signed = #scrate::inherent::Extrinsic::is_signed(xt).unwrap_or(false); + + let is_inherent = if is_signed { + // Signed extrinsics are not inherents. + false + } else { + let mut is_inherent = false; + #({ + let call = <#unchecked_extrinsic as ExtrinsicCall>::call(xt); + if let Some(call) = IsSubType::<_>::is_sub_type(call) { + if #pallet_names::is_inherent(&call) { + is_inherent = true; + } + } + })* + is_inherent + }; + + if !is_inherent { + first_signed_observed = true; + } + + if first_signed_observed && is_inherent { + return Err(i as u32) + } + } + + Ok(()) + } + } + } +} diff --git a/frame/support/procedural/src/construct_runtime/expand/mod.rs b/frame/support/procedural/src/construct_runtime/expand/mod.rs index ab2242ba0546e..cf8b5eef8d105 100644 --- a/frame/support/procedural/src/construct_runtime/expand/mod.rs +++ b/frame/support/procedural/src/construct_runtime/expand/mod.rs @@ -15,12 +15,18 @@ // See the License for the specific language governing permissions and // limitations under the License +mod call; mod config; mod event; +mod inherent; mod metadata; mod origin; +mod unsigned; +pub use call::expand_outer_dispatch; pub use config::expand_outer_config; pub use event::expand_outer_event; +pub use inherent::expand_outer_inherent; pub use metadata::expand_runtime_metadata; pub use origin::expand_outer_origin; +pub use unsigned::expand_outer_validate_unsigned; diff --git a/frame/support/procedural/src/construct_runtime/expand/origin.rs b/frame/support/procedural/src/construct_runtime/expand/origin.rs index 2d0cc8300cb76..962d258359409 100644 --- a/frame/support/procedural/src/construct_runtime/expand/origin.rs +++ b/frame/support/procedural/src/construct_runtime/expand/origin.rs @@ -36,20 +36,23 @@ pub fn expand_outer_origin( let mut caller_variants = TokenStream::new(); let mut pallet_conversions = TokenStream::new(); + let mut query_origin_part_macros = Vec::new(); for pallet_decl in pallets.iter().filter(|pallet| pallet.name != SYSTEM_PALLET_NAME) { if let Some(pallet_entry) = pallet_decl.find_part("Origin") { let instance = pallet_decl.instance.as_ref(); let index = pallet_decl.index; let generics = &pallet_entry.generics; + let name = &pallet_decl.name; + let path = &pallet_decl.path; if instance.is_some() && generics.params.is_empty() { let msg = format!( "Instantiable pallet with no generic `Origin` cannot \ be constructed: pallet `{}` must have generic `Origin`", - pallet_decl.name + name ); - return Err(syn::Error::new(pallet_decl.name.span(), msg)); + return Err(syn::Error::new(name.span(), msg)); } caller_variants.extend( @@ -58,13 +61,18 @@ pub fn expand_outer_origin( pallet_conversions.extend( expand_origin_pallet_conversions(scrate, runtime, pallet_decl, instance, generics), ); + query_origin_part_macros.push(quote! { + #path::__substrate_origin_check::is_origin_part_defined!(#name); + }); } } let system_path = &system_pallet.path; let system_index = system_pallet.index; - Ok(quote!{ + Ok(quote! { + #( #query_origin_part_macros )* + // WARNING: All instance must hold the filter `frame_system::Config::BaseCallFilter`, except // when caller is system Root. One can use `OriginTrait::reset_filter` to do so. #[derive(Clone)] diff --git a/frame/support/procedural/src/construct_runtime/expand/unsigned.rs b/frame/support/procedural/src/construct_runtime/expand/unsigned.rs new file mode 100644 index 0000000000000..d51792dd4a8d5 --- /dev/null +++ b/frame/support/procedural/src/construct_runtime/expand/unsigned.rs @@ -0,0 +1,72 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License + +use crate::construct_runtime::Pallet; +use proc_macro2::TokenStream; +use quote::quote; +use syn::Ident; + +pub fn expand_outer_validate_unsigned( + runtime: &Ident, + pallet_decls: &[Pallet], + scrate: &TokenStream, +) -> TokenStream { + let mut pallet_names = Vec::new(); + let mut query_validate_unsigned_part_macros = Vec::new(); + + for pallet_decl in pallet_decls { + if pallet_decl.exists_part("ValidateUnsigned") { + let name = &pallet_decl.name; + let path = &pallet_decl.path; + + pallet_names.push(name); + query_validate_unsigned_part_macros.push(quote! { + #path::__substrate_validate_unsigned_check::is_validate_unsigned_part_defined!(#name); + }); + } + } + + quote! { + #( #query_validate_unsigned_part_macros )* + + impl #scrate::unsigned::ValidateUnsigned for #runtime { + type Call = Call; + + fn pre_dispatch(call: &Self::Call) -> Result<(), #scrate::unsigned::TransactionValidityError> { + #[allow(unreachable_patterns)] + match call { + #( Call::#pallet_names(inner_call) => #pallet_names::pre_dispatch(inner_call), )* + // pre-dispatch should not stop inherent extrinsics, validation should prevent + // including arbitrary (non-inherent) extrinsics to blocks. + _ => Ok(()), + } + } + + fn validate_unsigned( + #[allow(unused_variables)] + source: #scrate::unsigned::TransactionSource, + call: &Self::Call, + ) -> #scrate::unsigned::TransactionValidity { + #[allow(unreachable_patterns)] + match call { + #( Call::#pallet_names(inner_call) => #pallet_names::validate_unsigned(source, inner_call), )* + _ => #scrate::unsigned::UnknownTransaction::NoUnsignedValidator.into(), + } + } + } + } +} diff --git a/frame/support/procedural/src/construct_runtime/mod.rs b/frame/support/procedural/src/construct_runtime/mod.rs index 87fce6e37cf0a..6f8924a14bccb 100644 --- a/frame/support/procedural/src/construct_runtime/mod.rs +++ b/frame/support/procedural/src/construct_runtime/mod.rs @@ -145,17 +145,17 @@ fn construct_runtime_parsed(definition: RuntimeDefinition) -> Result Result( - runtime: &'a Ident, - pallet_declarations: impl Iterator, - scrate: &'a TokenStream2, -) -> TokenStream2 { - let pallets_tokens = pallet_declarations - .filter(|pallet_declaration| pallet_declaration.exists_part("ValidateUnsigned")) - .map(|pallet_declaration| &pallet_declaration.name); - quote!( - #scrate::impl_outer_validate_unsigned!( - impl ValidateUnsigned for #runtime { - #( #pallets_tokens )* - } - ); - ) -} - -fn decl_outer_inherent<'a>( - runtime: &'a Ident, - block: &'a syn::TypePath, - unchecked_extrinsic: &'a syn::TypePath, - pallet_declarations: impl Iterator, - scrate: &'a TokenStream2, -) -> TokenStream2 { - let pallets_tokens = pallet_declarations.filter_map(|pallet_declaration| { - let maybe_config_part = pallet_declaration.find_part("Inherent"); - maybe_config_part.map(|_| { - let name = &pallet_declaration.name; - quote!(#name,) - }) - }); - quote!( - #scrate::impl_outer_inherent!( - impl Inherents where - Block = #block, - UncheckedExtrinsic = #unchecked_extrinsic, - Runtime = #runtime, - { - #(#pallets_tokens)* - } - ); - ) -} - -fn decl_outer_dispatch<'a>( - runtime: &'a Ident, - pallet_declarations: impl Iterator, - scrate: &'a TokenStream2, -) -> TokenStream2 { - let pallets_tokens = pallet_declarations - .filter(|pallet_declaration| pallet_declaration.exists_part("Call")) - .map(|pallet_declaration| { - let pallet = &pallet_declaration.path.inner.segments.last().unwrap(); - let name = &pallet_declaration.name; - let index = pallet_declaration.index; - quote!(#[codec(index = #index)] #pallet::#name) - }); - - quote!( - #scrate::impl_outer_dispatch! { - pub enum Call for #runtime where origin: Origin { - #(#pallets_tokens,)* - } - } - ) -} - fn decl_all_pallets<'a>( runtime: &'a Ident, pallet_declarations: impl Iterator, diff --git a/frame/support/procedural/src/dummy_part_checker.rs b/frame/support/procedural/src/dummy_part_checker.rs new file mode 100644 index 0000000000000..8bc893b3123fa --- /dev/null +++ b/frame/support/procedural/src/dummy_part_checker.rs @@ -0,0 +1,104 @@ +use proc_macro::TokenStream; +use crate::COUNTER; + +pub fn generate_dummy_part_checker(input: TokenStream) -> TokenStream { + if !input.is_empty() { + return syn::Error::new(proc_macro2::Span::call_site(), "No arguments expected") + .to_compile_error().into() + } + + let count = COUNTER.with(|counter| counter.borrow_mut().inc()); + + let genesis_config_macro_ident = syn::Ident::new( + &format!("__is_genesis_config_defined_{}", count), + proc_macro2::Span::call_site(), + ); + let event_macro_ident = syn::Ident::new( + &format!("__is_event_part_defined_{}", count), + proc_macro2::Span::call_site(), + ); + let inherent_macro_ident = syn::Ident::new( + &format!("__is_inherent_part_defined_{}", count), + proc_macro2::Span::call_site(), + ); + let validate_unsigned_macro_ident = syn::Ident::new( + &format!("__is_validate_unsigned_part_defined_{}", count), + proc_macro2::Span::call_site(), + ); + let call_macro_ident = syn::Ident::new( + &format!("__is_call_part_defined_{}", count), + proc_macro2::Span::call_site(), + ); + let origin_macro_ident = syn::Ident::new( + &format!("__is_origin_part_defined_{}", count), + proc_macro2::Span::call_site(), + ); + + quote::quote!( + #[doc(hidden)] + pub mod __substrate_genesis_config_check { + #[macro_export] + #[doc(hidden)] + macro_rules! #genesis_config_macro_ident { + ($pallet_name:ident) => {}; + } + #[doc(hidden)] + pub use #genesis_config_macro_ident as is_genesis_config_defined; + } + + #[doc(hidden)] + pub mod __substrate_event_check { + #[macro_export] + #[doc(hidden)] + macro_rules! #event_macro_ident { + ($pallet_name:ident) => {}; + } + #[doc(hidden)] + pub use #event_macro_ident as is_event_part_defined; + } + + #[doc(hidden)] + pub mod __substrate_inherent_check { + #[macro_export] + #[doc(hidden)] + macro_rules! #inherent_macro_ident { + ($pallet_name:ident) => {}; + } + #[doc(hidden)] + pub use #inherent_macro_ident as is_inherent_part_defined; + } + + #[doc(hidden)] + pub mod __substrate_validate_unsigned_check { + #[macro_export] + #[doc(hidden)] + macro_rules! #validate_unsigned_macro_ident { + ($pallet_name:ident) => {}; + } + #[doc(hidden)] + pub use #validate_unsigned_macro_ident as is_validate_unsigned_part_defined; + } + + #[doc(hidden)] + pub mod __substrate_call_check { + #[macro_export] + #[doc(hidden)] + macro_rules! #call_macro_ident { + ($pallet_name:ident) => {}; + } + #[doc(hidden)] + pub use #call_macro_ident as is_call_part_defined; + } + + #[doc(hidden)] + pub mod __substrate_origin_check { + #[macro_export] + #[doc(hidden)] + macro_rules! #origin_macro_ident { + ($pallet_name:ident) => {}; + } + #[doc(hidden)] + pub use #origin_macro_ident as is_origin_part_defined; + } + ).into() +} diff --git a/frame/support/procedural/src/lib.rs b/frame/support/procedural/src/lib.rs index d3ddd2360b31f..2768608cb6f5b 100644 --- a/frame/support/procedural/src/lib.rs +++ b/frame/support/procedural/src/lib.rs @@ -29,9 +29,29 @@ mod clone_no_bound; mod partial_eq_no_bound; mod default_no_bound; mod key_prefix; +mod dummy_part_checker; pub(crate) use storage::INHERENT_INSTANCE_NAME; use proc_macro::TokenStream; +use std::cell::RefCell; + +thread_local! { + /// A global counter, can be used to generate a relatively unique identifier. + static COUNTER: RefCell = RefCell::new(Counter(0)); +} + +/// Counter to generate a relatively unique identifier for macros querying for the existence of +/// pallet parts. This is necessary because declarative macros gets hoisted to the crate root, +/// which shares the namespace with other pallets containing the very same query macros. +struct Counter(u64); + +impl Counter { + fn inc(&mut self) -> u64 { + let ret = self.0; + self.0 += 1; + ret + } +} /// Declares strongly-typed wrappers around codec-compatible types in storage. /// @@ -453,3 +473,9 @@ pub(crate) const NUMBER_OF_INSTANCE: u8 = 16; pub fn impl_key_prefix_for_tuples(input: TokenStream) -> TokenStream { key_prefix::impl_key_prefix_for_tuples(input).unwrap_or_else(syn::Error::into_compile_error).into() } + +/// Internal macro use by frame_support to generate dummy part checker for old pallet declaration +#[proc_macro] +pub fn __generate_dummy_part_checker(input: TokenStream) -> TokenStream { + dummy_part_checker::generate_dummy_part_checker(input) +} diff --git a/frame/support/procedural/src/pallet/expand/call.rs b/frame/support/procedural/src/pallet/expand/call.rs index a3ac7ecc5f865..28280a5e89220 100644 --- a/frame/support/procedural/src/pallet/expand/call.rs +++ b/frame/support/procedural/src/pallet/expand/call.rs @@ -17,6 +17,7 @@ use crate::pallet::Def; use frame_support_procedural_tools::clean_type_string; +use crate::COUNTER; use syn::spanned::Spanned; /// * Generate enum call and implement various trait on it. @@ -31,7 +32,7 @@ pub fn expand_call(def: &mut Def) -> proc_macro2::TokenStream { (span, where_clause, methods, docs) } - None => (def.pallet_struct.attr_span, None, Vec::new(), Vec::new()), + None => (def.item.span(), None, Vec::new(), Vec::new()), }; let frame_support = &def.frame_support; let frame_system = &def.frame_system; @@ -89,7 +90,37 @@ pub fn expand_call(def: &mut Def) -> proc_macro2::TokenStream { &docs[..] }; + let maybe_compile_error = if def.call.is_none() { + quote::quote!{ + compile_error!(concat!( + "`", + stringify!($pallet_name), + "` does not have #[pallet::call] defined, perhaps you should remove `Call` from \ + construct_runtime?", + )); + } + } else { + proc_macro2::TokenStream::new() + }; + + let count = COUNTER.with(|counter| counter.borrow_mut().inc()); + let macro_ident = syn::Ident::new(&format!("__is_call_part_defined_{}", count), span); + quote::quote_spanned!(span => + #[doc(hidden)] + pub mod __substrate_call_check { + #[macro_export] + #[doc(hidden)] + macro_rules! #macro_ident { + ($pallet_name:ident) => { + #maybe_compile_error + }; + } + + #[doc(hidden)] + pub use #macro_ident as is_call_part_defined; + } + #( #[doc = #docs] )* #[derive( #frame_support::RuntimeDebugNoBound, diff --git a/frame/support/procedural/src/pallet/expand/event.rs b/frame/support/procedural/src/pallet/expand/event.rs index 204b5a23611cc..d932206be09f4 100644 --- a/frame/support/procedural/src/pallet/expand/event.rs +++ b/frame/support/procedural/src/pallet/expand/event.rs @@ -16,15 +16,44 @@ // limitations under the License. use crate::pallet::{Def, parse::helper::get_doc_literals}; +use crate::COUNTER; +use syn::{spanned::Spanned, Ident}; /// * Add __Ignore variant on Event /// * Impl various trait on Event including metadata /// * if deposit_event is defined, implement deposit_event on module. pub fn expand_event(def: &mut Def) -> proc_macro2::TokenStream { - let event = if let Some(event) = &def.event { - event + let count = COUNTER.with(|counter| counter.borrow_mut().inc()); + + let (event, macro_ident) = if let Some(event) = &def.event { + let ident = Ident::new(&format!("__is_event_part_defined_{}", count), event.attr_span); + (event, ident) } else { - return Default::default() + let macro_ident = Ident::new( + &format!("__is_event_part_defined_{}", count), + def.item.span(), + ); + + return quote::quote! { + #[doc(hidden)] + pub mod __substrate_event_check { + #[macro_export] + #[doc(hidden)] + macro_rules! #macro_ident { + ($pallet_name:ident) => { + compile_error!(concat!( + "`", + stringify!($pallet_name), + "` does not have #[pallet::event] defined, perhaps you should \ + remove `Event` from construct_runtime?", + )); + } + } + + #[doc(hidden)] + pub use #macro_ident as is_event_part_defined; + } + }; }; let event_where_clause = &event.where_clause; @@ -130,6 +159,18 @@ pub fn expand_event(def: &mut Def) -> proc_macro2::TokenStream { }; quote::quote_spanned!(event.attr_span => + #[doc(hidden)] + pub mod __substrate_event_check { + #[macro_export] + #[doc(hidden)] + macro_rules! #macro_ident { + ($pallet_name:ident) => {}; + } + + #[doc(hidden)] + pub use #macro_ident as is_event_part_defined; + } + #deposit_event impl<#event_impl_gen> From<#event_ident<#event_use_gen>> for () #event_where_clause { diff --git a/frame/support/procedural/src/pallet/expand/genesis_config.rs b/frame/support/procedural/src/pallet/expand/genesis_config.rs index 23ccdfa5ddc9a..ac0bdacefc772 100644 --- a/frame/support/procedural/src/pallet/expand/genesis_config.rs +++ b/frame/support/procedural/src/pallet/expand/genesis_config.rs @@ -16,13 +16,45 @@ // limitations under the License. use crate::pallet::{Def, parse::helper::get_doc_literals}; +use crate::COUNTER; +use syn::{Ident, spanned::Spanned}; /// * add various derive trait on GenesisConfig struct. pub fn expand_genesis_config(def: &mut Def) -> proc_macro2::TokenStream { - let genesis_config = if let Some(genesis_config) = &def.genesis_config { - genesis_config + let count = COUNTER.with(|counter| counter.borrow_mut().inc()); + + let (genesis_config, macro_ident) = if let Some(genesis_config) = &def.genesis_config { + let ident = Ident::new( + &format!("__is_genesis_config_defined_{}", count), + genesis_config.genesis_config.span(), + ); + (genesis_config, ident) } else { - return Default::default() + let macro_ident = Ident::new( + &format!("__is_genesis_config_defined_{}", count), + def.item.span(), + ); + + return quote::quote! { + #[doc(hidden)] + pub mod __substrate_genesis_config_check { + #[macro_export] + #[doc(hidden)] + macro_rules! #macro_ident { + ($pallet_name:ident) => { + compile_error!(concat!( + "`", + stringify!($pallet_name), + "` does not have #[pallet::genesis_config] defined, perhaps you should \ + remove `Config` from construct_runtime?", + )); + } + } + + #[doc(hidden)] + pub use #macro_ident as is_genesis_config_defined; + } + }; }; let frame_support = &def.frame_support; @@ -57,5 +89,17 @@ pub fn expand_genesis_config(def: &mut Def) -> proc_macro2::TokenStream { _ => unreachable!("Checked by genesis_config parser"), } - Default::default() + quote::quote! { + #[doc(hidden)] + pub mod __substrate_genesis_config_check { + #[macro_export] + #[doc(hidden)] + macro_rules! #macro_ident { + ($pallet_name:ident) => {}; + } + + #[doc(hidden)] + pub use #macro_ident as is_genesis_config_defined; + } + } } diff --git a/frame/support/procedural/src/pallet/expand/inherent.rs b/frame/support/procedural/src/pallet/expand/inherent.rs new file mode 100644 index 0000000000000..f1d58b28a5142 --- /dev/null +++ b/frame/support/procedural/src/pallet/expand/inherent.rs @@ -0,0 +1,56 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::pallet::Def; +use proc_macro2::TokenStream; +use quote::quote; +use crate::COUNTER; +use syn::{Ident, spanned::Spanned}; + +pub fn expand_inherents(def: &mut Def) -> TokenStream { + let count = COUNTER.with(|counter| counter.borrow_mut().inc()); + let macro_ident = Ident::new(&format!("__is_inherent_part_defined_{}", count), def.item.span()); + + let maybe_compile_error = if def.inherent.is_none() { + quote! { + compile_error!(concat!( + "`", + stringify!($pallet_name), + "` does not have #[pallet::inherent] defined, perhaps you should \ + remove `Inherent` from construct_runtime?", + )); + } + } else { + TokenStream::new() + }; + + quote! { + #[doc(hidden)] + pub mod __substrate_inherent_check { + #[macro_export] + #[doc(hidden)] + macro_rules! #macro_ident { + ($pallet_name:ident) => { + #maybe_compile_error + } + } + + #[doc(hidden)] + pub use #macro_ident as is_inherent_part_defined; + } + } +} diff --git a/frame/support/procedural/src/pallet/expand/mod.rs b/frame/support/procedural/src/pallet/expand/mod.rs index 22ef268177789..f3a42dfa868b2 100644 --- a/frame/support/procedural/src/pallet/expand/mod.rs +++ b/frame/support/procedural/src/pallet/expand/mod.rs @@ -24,10 +24,13 @@ mod event; mod storage; mod hooks; mod store_trait; +mod inherent; mod instances; mod genesis_build; mod genesis_config; mod type_value; +mod origin; +mod validate_unsigned; use crate::pallet::{Def, parse::helper::get_doc_literals}; use quote::ToTokens; @@ -54,12 +57,15 @@ pub fn expand(mut def: Def) -> proc_macro2::TokenStream { let error = error::expand_error(&mut def); let event = event::expand_event(&mut def); let storages = storage::expand_storages(&mut def); + let inherents = inherent::expand_inherents(&mut def); let instances = instances::expand_instances(&mut def); let store_trait = store_trait::expand_store_trait(&mut def); let hooks = hooks::expand_hooks(&mut def); let genesis_build = genesis_build::expand_genesis_build(&mut def); let genesis_config = genesis_config::expand_genesis_config(&mut def); let type_values = type_value::expand_type_values(&mut def); + let origins = origin::expand_origins(&mut def); + let validate_unsigned = validate_unsigned::expand_validate_unsigned(&mut def); if get_doc_literals(&def.item.attrs).is_empty() { def.item.attrs.push(syn::parse_quote!( @@ -80,12 +86,15 @@ pub fn expand(mut def: Def) -> proc_macro2::TokenStream { #error #event #storages + #inherents #instances #store_trait #hooks #genesis_build #genesis_config #type_values + #origins + #validate_unsigned ); def.item.content.as_mut().expect("This is checked by parsing").1 diff --git a/frame/support/procedural/src/pallet/expand/origin.rs b/frame/support/procedural/src/pallet/expand/origin.rs new file mode 100644 index 0000000000000..578c641b43e41 --- /dev/null +++ b/frame/support/procedural/src/pallet/expand/origin.rs @@ -0,0 +1,55 @@ +// This file is part of Substrate. + +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::{pallet::Def, COUNTER}; +use proc_macro2::TokenStream; +use quote::quote; +use syn::{Ident, spanned::Spanned}; + +pub fn expand_origins(def: &mut Def) -> TokenStream { + let count = COUNTER.with(|counter| counter.borrow_mut().inc()); + let macro_ident = Ident::new(&format!("__is_origin_part_defined_{}", count), def.item.span()); + + let maybe_compile_error = if def.origin.is_none() { + quote! { + compile_error!(concat!( + "`", + stringify!($pallet_name), + "` does not have #[pallet::origin] defined, perhaps you should \ + remove `Origin` from construct_runtime?", + )); + } + } else { + TokenStream::new() + }; + + quote! { + #[doc(hidden)] + pub mod __substrate_origin_check { + #[macro_export] + #[doc(hidden)] + macro_rules! #macro_ident { + ($pallet_name:ident) => { + #maybe_compile_error + } + } + + #[doc(hidden)] + pub use #macro_ident as is_origin_part_defined; + } + } +} diff --git a/frame/support/procedural/src/pallet/expand/validate_unsigned.rs b/frame/support/procedural/src/pallet/expand/validate_unsigned.rs new file mode 100644 index 0000000000000..1abf7d893b933 --- /dev/null +++ b/frame/support/procedural/src/pallet/expand/validate_unsigned.rs @@ -0,0 +1,56 @@ +// This file is part of Substrate. + +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::pallet::Def; +use proc_macro2::TokenStream; +use quote::quote; +use crate::COUNTER; +use syn::{Ident, spanned::Spanned}; + +pub fn expand_validate_unsigned(def: &mut Def) -> TokenStream { + let count = COUNTER.with(|counter| counter.borrow_mut().inc()); + let macro_ident = Ident::new(&format!("__is_validate_unsigned_part_defined_{}", count), def.item.span()); + + let maybe_compile_error = if def.validate_unsigned.is_none() { + quote! { + compile_error!(concat!( + "`", + stringify!($pallet_name), + "` does not have #[pallet::validate_unsigned] defined, perhaps you should \ + remove `ValidateUnsigned` from construct_runtime?", + )); + } + } else { + TokenStream::new() + }; + + quote! { + #[doc(hidden)] + pub mod __substrate_validate_unsigned_check { + #[macro_export] + #[doc(hidden)] + macro_rules! #macro_ident { + ($pallet_name:ident) => { + #maybe_compile_error + } + } + + #[doc(hidden)] + pub use #macro_ident as is_validate_unsigned_part_defined; + } + } +} diff --git a/frame/support/src/dispatch.rs b/frame/support/src/dispatch.rs index d6f133a8d20a3..ee290a31d5a41 100644 --- a/frame/support/src/dispatch.rs +++ b/frame/support/src/dispatch.rs @@ -2159,6 +2159,8 @@ macro_rules! decl_module { <$error_type as $crate::dispatch::ModuleErrorMetadata>::metadata() } } + + $crate::__generate_dummy_part_checker!(); } } diff --git a/frame/support/src/lib.rs b/frame/support/src/lib.rs index 49e61eea569bd..45988c1c7372b 100644 --- a/frame/support/src/lib.rs +++ b/frame/support/src/lib.rs @@ -501,9 +501,12 @@ pub fn debug(data: &impl sp_std::fmt::Debug) { #[doc(inline)] pub use frame_support_procedural::{ - decl_storage, construct_runtime, transactional, RuntimeDebugNoBound + decl_storage, construct_runtime, transactional, RuntimeDebugNoBound, }; +#[doc(hidden)] +pub use frame_support_procedural::__generate_dummy_part_checker; + /// Derive [`Clone`] but do not bound any generic. /// /// This is useful for type generic over runtime: diff --git a/frame/support/test/tests/construct_runtime.rs b/frame/support/test/tests/construct_runtime.rs index 7858595108b0e..98d0c45d2425a 100644 --- a/frame/support/test/tests/construct_runtime.rs +++ b/frame/support/test/tests/construct_runtime.rs @@ -172,6 +172,22 @@ pub mod module3 { pub fn fail(_origin) -> frame_support::dispatch::DispatchResult { Err(Error::::Something.into()) } + #[weight = 0] + pub fn aux_1(_origin, #[compact] _data: u32) -> frame_support::dispatch::DispatchResult { + unreachable!() + } + #[weight = 0] + pub fn aux_2(_origin, _data: i32, #[compact] _data2: u32) -> frame_support::dispatch::DispatchResult { + unreachable!() + } + #[weight = 0] + fn aux_3(_origin, _data: i32, _data2: String) -> frame_support::dispatch::DispatchResult { + unreachable!() + } + #[weight = 3] + fn aux_4(_origin) -> frame_support::dispatch::DispatchResult { unreachable!() } + #[weight = (5, frame_support::weights::DispatchClass::Operational)] + fn operational(_origin) { unreachable!() } } } @@ -465,6 +481,100 @@ fn call_codec() { assert_eq!(Call::Module1_9(module1::Call::fail()).encode()[0], 13); } +#[test] +fn call_compact_attr() { + use codec::Encode; + let call: module3::Call = module3::Call::aux_1(1); + let encoded = call.encode(); + assert_eq!(2, encoded.len()); + assert_eq!(vec![1, 4], encoded); + + let call: module3::Call = module3::Call::aux_2(1, 2); + let encoded = call.encode(); + assert_eq!(6, encoded.len()); + assert_eq!(vec![2, 1, 0, 0, 0, 8], encoded); +} + +#[test] +fn call_encode_is_correct_and_decode_works() { + use codec::{Decode, Encode}; + let call: module3::Call = module3::Call::fail(); + let encoded = call.encode(); + assert_eq!(vec![0], encoded); + let decoded = module3::Call::::decode(&mut &encoded[..]).unwrap(); + assert_eq!(decoded, call); + + let call: module3::Call = module3::Call::aux_3(32, "hello".into()); + let encoded = call.encode(); + assert_eq!(vec![3, 32, 0, 0, 0, 20, 104, 101, 108, 108, 111], encoded); + let decoded = module3::Call::::decode(&mut &encoded[..]).unwrap(); + assert_eq!(decoded, call); +} + +#[test] +fn call_weight_should_attach_to_call_enum() { + use frame_support::{ + dispatch::{DispatchInfo, GetDispatchInfo}, + weights::{DispatchClass, Pays}, + }; + // operational. + assert_eq!( + module3::Call::::operational().get_dispatch_info(), + DispatchInfo { weight: 5, class: DispatchClass::Operational, pays_fee: Pays::Yes }, + ); + // custom basic + assert_eq!( + module3::Call::::aux_4().get_dispatch_info(), + DispatchInfo { weight: 3, class: DispatchClass::Normal, pays_fee: Pays::Yes }, + ); +} + +#[test] +fn call_name() { + use frame_support::dispatch::GetCallName; + let name = module3::Call::::aux_4().get_call_name(); + assert_eq!("aux_4", name); +} + +#[test] +fn call_metadata() { + use frame_support::dispatch::{CallMetadata, GetCallMetadata}; + let call = Call::Module3(module3::Call::::aux_4()); + let metadata = call.get_call_metadata(); + let expected = CallMetadata { function_name: "aux_4".into(), pallet_name: "Module3".into() }; + assert_eq!(metadata, expected); +} + +#[test] +fn get_call_names() { + use frame_support::dispatch::GetCallName; + let call_names = module3::Call::::get_call_names(); + assert_eq!(["fail", "aux_1", "aux_2", "aux_3", "aux_4", "operational"], call_names); +} + +#[test] +fn get_module_names() { + use frame_support::dispatch::GetCallMetadata; + let module_names = Call::get_module_names(); + assert_eq!([ + "System", "Module1_1", "Module2", "Module1_2", "NestedModule3", "Module3", + "Module1_4", "Module1_6", "Module1_7", "Module1_8", "Module1_9", + ], module_names); +} + +#[test] +fn call_subtype_conversion() { + use frame_support::{dispatch::CallableCallFor, traits::IsSubType}; + let call = Call::Module3(module3::Call::::fail()); + let subcall: Option<&CallableCallFor> = call.is_sub_type(); + let subcall_none: Option<&CallableCallFor> = call.is_sub_type(); + assert_eq!(Some(&module3::Call::::fail()), subcall); + assert_eq!(None, subcall_none); + + let from = Call::from(subcall.unwrap().clone()); + assert_eq!(from, call); +} + #[test] fn test_metadata() { use frame_metadata::*; @@ -601,6 +711,54 @@ fn test_metadata() { arguments: DecodeDifferent::Encode(&[]), documentation: DecodeDifferent::Encode(&[]), }, + FunctionMetadata { + name: DecodeDifferent::Encode("aux_1"), + arguments: DecodeDifferent::Encode(&[ + FunctionArgumentMetadata { + name: DecodeDifferent::Encode("_data"), + ty: DecodeDifferent::Encode("Compact"), + }, + ]), + documentation: DecodeDifferent::Encode(&[]), + }, + FunctionMetadata { + name: DecodeDifferent::Encode("aux_2"), + arguments: DecodeDifferent::Encode(&[ + FunctionArgumentMetadata { + name: DecodeDifferent::Encode("_data"), + ty: DecodeDifferent::Encode("i32"), + }, + FunctionArgumentMetadata { + name: DecodeDifferent::Encode("_data2"), + ty: DecodeDifferent::Encode("Compact"), + }, + ]), + documentation: DecodeDifferent::Encode(&[]), + }, + FunctionMetadata { + name: DecodeDifferent::Encode("aux_3"), + arguments: DecodeDifferent::Encode(&[ + FunctionArgumentMetadata { + name: DecodeDifferent::Encode("_data"), + ty: DecodeDifferent::Encode("i32"), + }, + FunctionArgumentMetadata { + name: DecodeDifferent::Encode("_data2"), + ty: DecodeDifferent::Encode("String"), + }, + ]), + documentation: DecodeDifferent::Encode(&[]), + }, + FunctionMetadata { + name: DecodeDifferent::Encode("aux_4"), + arguments: DecodeDifferent::Encode(&[]), + documentation: DecodeDifferent::Encode(&[]), + }, + FunctionMetadata { + name: DecodeDifferent::Encode("operational"), + arguments: DecodeDifferent::Encode(&[]), + documentation: DecodeDifferent::Encode(&[]), + }, ]))), event: Some(DecodeDifferent::Encode(FnEncode(|| &[ EventMetadata { diff --git a/frame/support/test/tests/construct_runtime_ui/undefined_call_part.rs b/frame/support/test/tests/construct_runtime_ui/undefined_call_part.rs new file mode 100644 index 0000000000000..c5b9fcca1f318 --- /dev/null +++ b/frame/support/test/tests/construct_runtime_ui/undefined_call_part.rs @@ -0,0 +1,33 @@ +use frame_support::construct_runtime; +use sp_runtime::{generic, traits::BlakeTwo256}; +use sp_core::sr25519; + +#[frame_support::pallet] +mod pallet { + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(_); +} + +pub type Signature = sr25519::Signature; +pub type BlockNumber = u64; +pub type Header = generic::Header; +pub type Block = generic::Block; +pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; + +impl pallet::Config for Runtime {} + +construct_runtime! { + pub enum Runtime where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic + { + System: system::{Pallet, Call, Storage, Config, Event}, + Pallet: pallet::{Pallet, Call}, + } +} + +fn main() {} diff --git a/frame/support/test/tests/construct_runtime_ui/undefined_call_part.stderr b/frame/support/test/tests/construct_runtime_ui/undefined_call_part.stderr new file mode 100644 index 0000000000000..201609b2abaf6 --- /dev/null +++ b/frame/support/test/tests/construct_runtime_ui/undefined_call_part.stderr @@ -0,0 +1,49 @@ +error: `Pallet` does not have #[pallet::call] defined, perhaps you should remove `Call` from construct_runtime? + --> $DIR/undefined_call_part.rs:5:1 + | +5 | #[frame_support::pallet] + | ^^^^^^^^^^^^^^^^^^^^^^^^ +... +22 | / construct_runtime! { +23 | | pub enum Runtime where +24 | | Block = Block, +25 | | NodeBlock = Block, +... | +30 | | } +31 | | } + | |_- in this macro invocation + | + = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0433]: failed to resolve: use of undeclared crate or module `system` + --> $DIR/undefined_call_part.rs:28:11 + | +28 | System: system::{Pallet, Call, Storage, Config, Event}, + | ^^^^^^ use of undeclared crate or module `system` + +error[E0433]: failed to resolve: use of undeclared crate or module `system` + --> $DIR/undefined_call_part.rs:22:1 + | +22 | / construct_runtime! { +23 | | pub enum Runtime where +24 | | Block = Block, +25 | | NodeBlock = Block, +... | +30 | | } +31 | | } + | |_^ not found in `system` + | + = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) +help: consider importing this enum + | +1 | use frame_system::RawOrigin; + | + +error[E0277]: the trait bound `Runtime: frame_system::Config` is not satisfied + --> $DIR/undefined_call_part.rs:20:6 + | +8 | pub trait Config: frame_system::Config {} + | -------------------- required by this bound in `pallet::Config` +... +20 | impl pallet::Config for Runtime {} + | ^^^^^^^^^^^^^^ the trait `frame_system::Config` is not implemented for `Runtime` diff --git a/frame/support/test/tests/construct_runtime_ui/undefined_event_part.rs b/frame/support/test/tests/construct_runtime_ui/undefined_event_part.rs new file mode 100644 index 0000000000000..6aec45f240c90 --- /dev/null +++ b/frame/support/test/tests/construct_runtime_ui/undefined_event_part.rs @@ -0,0 +1,33 @@ +use frame_support::construct_runtime; +use sp_runtime::{generic, traits::BlakeTwo256}; +use sp_core::sr25519; + +#[frame_support::pallet] +mod pallet { + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(_); +} + +pub type Signature = sr25519::Signature; +pub type BlockNumber = u64; +pub type Header = generic::Header; +pub type Block = generic::Block; +pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; + +impl pallet::Config for Runtime {} + +construct_runtime! { + pub enum Runtime where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic + { + System: system::{Pallet, Call, Storage, Config, Event}, + Pallet: pallet::{Pallet, Event}, + } +} + +fn main() {} diff --git a/frame/support/test/tests/construct_runtime_ui/undefined_event_part.stderr b/frame/support/test/tests/construct_runtime_ui/undefined_event_part.stderr new file mode 100644 index 0000000000000..b68beb2b3fc65 --- /dev/null +++ b/frame/support/test/tests/construct_runtime_ui/undefined_event_part.stderr @@ -0,0 +1,101 @@ +error: `Pallet` does not have #[pallet::event] defined, perhaps you should remove `Event` from construct_runtime? + --> $DIR/undefined_event_part.rs:5:1 + | +5 | #[frame_support::pallet] + | ^^^^^^^^^^^^^^^^^^^^^^^^ +... +22 | / construct_runtime! { +23 | | pub enum Runtime where +24 | | Block = Block, +25 | | NodeBlock = Block, +... | +30 | | } +31 | | } + | |_- in this macro invocation + | + = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0433]: failed to resolve: use of undeclared crate or module `system` + --> $DIR/undefined_event_part.rs:28:11 + | +28 | System: system::{Pallet, Call, Storage, Config, Event}, + | ^^^^^^ use of undeclared crate or module `system` + +error[E0433]: failed to resolve: could not find `Event` in `pallet` + --> $DIR/undefined_event_part.rs:22:1 + | +22 | / construct_runtime! { +23 | | pub enum Runtime where +24 | | Block = Block, +25 | | NodeBlock = Block, +... | +30 | | } +31 | | } + | |_^ could not find `Event` in `pallet` + | + = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0412]: cannot find type `Event` in module `pallet` + --> $DIR/undefined_event_part.rs:22:1 + | +22 | / construct_runtime! { +23 | | pub enum Runtime where +24 | | Block = Block, +25 | | NodeBlock = Block, +... | +30 | | } +31 | | } + | |_^ not found in `pallet` + | + = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) +help: consider importing this enum + | +1 | use frame_system::Event; + | + +error[E0412]: cannot find type `Event` in module `pallet` + --> $DIR/undefined_event_part.rs:22:1 + | +22 | / construct_runtime! { +23 | | pub enum Runtime where +24 | | Block = Block, +25 | | NodeBlock = Block, +... | +30 | | } +31 | | } + | |_^ not found in `pallet` + | + = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) +help: consider importing one of these items + | +1 | use crate::Event; + | +1 | use frame_system::Event; + | + +error[E0433]: failed to resolve: use of undeclared crate or module `system` + --> $DIR/undefined_event_part.rs:22:1 + | +22 | / construct_runtime! { +23 | | pub enum Runtime where +24 | | Block = Block, +25 | | NodeBlock = Block, +... | +30 | | } +31 | | } + | |_^ not found in `system` + | + = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) +help: consider importing this enum + | +1 | use frame_system::RawOrigin; + | + +error[E0277]: the trait bound `Runtime: frame_system::Config` is not satisfied + --> $DIR/undefined_event_part.rs:20:6 + | +8 | pub trait Config: frame_system::Config {} + | -------------------- required by this bound in `pallet::Config` +... +20 | impl pallet::Config for Runtime {} + | ^^^^^^^^^^^^^^ the trait `frame_system::Config` is not implemented for `Runtime` diff --git a/frame/support/test/tests/construct_runtime_ui/undefined_genesis_config_part.rs b/frame/support/test/tests/construct_runtime_ui/undefined_genesis_config_part.rs new file mode 100644 index 0000000000000..5e08fd96fa1ad --- /dev/null +++ b/frame/support/test/tests/construct_runtime_ui/undefined_genesis_config_part.rs @@ -0,0 +1,33 @@ +use frame_support::construct_runtime; +use sp_runtime::{generic, traits::BlakeTwo256}; +use sp_core::sr25519; + +#[frame_support::pallet] +mod pallet { + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(_); +} + +pub type Signature = sr25519::Signature; +pub type BlockNumber = u64; +pub type Header = generic::Header; +pub type Block = generic::Block; +pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; + +impl pallet::Config for Runtime {} + +construct_runtime! { + pub enum Runtime where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic + { + System: system::{Pallet, Call, Storage, Config, Event}, + Pallet: pallet::{Pallet, Config}, + } +} + +fn main() {} diff --git a/frame/support/test/tests/construct_runtime_ui/undefined_genesis_config_part.stderr b/frame/support/test/tests/construct_runtime_ui/undefined_genesis_config_part.stderr new file mode 100644 index 0000000000000..686875d83a4f4 --- /dev/null +++ b/frame/support/test/tests/construct_runtime_ui/undefined_genesis_config_part.stderr @@ -0,0 +1,67 @@ +error: `Pallet` does not have #[pallet::genesis_config] defined, perhaps you should remove `Config` from construct_runtime? + --> $DIR/undefined_genesis_config_part.rs:5:1 + | +5 | #[frame_support::pallet] + | ^^^^^^^^^^^^^^^^^^^^^^^^ +... +22 | / construct_runtime! { +23 | | pub enum Runtime where +24 | | Block = Block, +25 | | NodeBlock = Block, +... | +30 | | } +31 | | } + | |_- in this macro invocation + | + = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0433]: failed to resolve: use of undeclared crate or module `system` + --> $DIR/undefined_genesis_config_part.rs:28:17 + | +28 | System: system::{Pallet, Call, Storage, Config, Event}, + | ^^^^^^ use of undeclared crate or module `system` + +error[E0433]: failed to resolve: use of undeclared crate or module `system` + --> $DIR/undefined_genesis_config_part.rs:22:1 + | +22 | / construct_runtime! { +23 | | pub enum Runtime where +24 | | Block = Block, +25 | | NodeBlock = Block, +... | +30 | | } +31 | | } + | |_^ not found in `system` + | + = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) +help: consider importing this enum + | +1 | use frame_system::RawOrigin; + | + +error[E0412]: cannot find type `GenesisConfig` in module `pallet` + --> $DIR/undefined_genesis_config_part.rs:22:1 + | +22 | / construct_runtime! { +23 | | pub enum Runtime where +24 | | Block = Block, +25 | | NodeBlock = Block, +... | +30 | | } +31 | | } + | |_^ not found in `pallet` + | + = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) +help: consider importing this struct + | +1 | use frame_system::GenesisConfig; + | + +error[E0277]: the trait bound `Runtime: frame_system::Config` is not satisfied + --> $DIR/undefined_genesis_config_part.rs:20:6 + | +8 | pub trait Config: frame_system::Config {} + | -------------------- required by this bound in `pallet::Config` +... +20 | impl pallet::Config for Runtime {} + | ^^^^^^^^^^^^^^ the trait `frame_system::Config` is not implemented for `Runtime` diff --git a/frame/support/test/tests/construct_runtime_ui/undefined_inherent_part.rs b/frame/support/test/tests/construct_runtime_ui/undefined_inherent_part.rs new file mode 100644 index 0000000000000..06c36a30f5506 --- /dev/null +++ b/frame/support/test/tests/construct_runtime_ui/undefined_inherent_part.rs @@ -0,0 +1,33 @@ +use frame_support::construct_runtime; +use sp_runtime::{generic, traits::BlakeTwo256}; +use sp_core::sr25519; + +#[frame_support::pallet] +mod pallet { + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(_); +} + +pub type Signature = sr25519::Signature; +pub type BlockNumber = u64; +pub type Header = generic::Header; +pub type Block = generic::Block; +pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; + +impl pallet::Config for Runtime {} + +construct_runtime! { + pub enum Runtime where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic + { + System: system::{Pallet, Call, Storage, Config, Event}, + Pallet: pallet::{Pallet, Inherent}, + } +} + +fn main() {} diff --git a/frame/support/test/tests/construct_runtime_ui/undefined_inherent_part.stderr b/frame/support/test/tests/construct_runtime_ui/undefined_inherent_part.stderr new file mode 100644 index 0000000000000..303819b45dd7c --- /dev/null +++ b/frame/support/test/tests/construct_runtime_ui/undefined_inherent_part.stderr @@ -0,0 +1,49 @@ +error: `Pallet` does not have #[pallet::inherent] defined, perhaps you should remove `Inherent` from construct_runtime? + --> $DIR/undefined_inherent_part.rs:5:1 + | +5 | #[frame_support::pallet] + | ^^^^^^^^^^^^^^^^^^^^^^^^ +... +22 | / construct_runtime! { +23 | | pub enum Runtime where +24 | | Block = Block, +25 | | NodeBlock = Block, +... | +30 | | } +31 | | } + | |_- in this macro invocation + | + = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0433]: failed to resolve: use of undeclared crate or module `system` + --> $DIR/undefined_inherent_part.rs:28:11 + | +28 | System: system::{Pallet, Call, Storage, Config, Event}, + | ^^^^^^ use of undeclared crate or module `system` + +error[E0433]: failed to resolve: use of undeclared crate or module `system` + --> $DIR/undefined_inherent_part.rs:22:1 + | +22 | / construct_runtime! { +23 | | pub enum Runtime where +24 | | Block = Block, +25 | | NodeBlock = Block, +... | +30 | | } +31 | | } + | |_^ not found in `system` + | + = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) +help: consider importing this enum + | +1 | use frame_system::RawOrigin; + | + +error[E0277]: the trait bound `Runtime: frame_system::Config` is not satisfied + --> $DIR/undefined_inherent_part.rs:20:6 + | +8 | pub trait Config: frame_system::Config {} + | -------------------- required by this bound in `pallet::Config` +... +20 | impl pallet::Config for Runtime {} + | ^^^^^^^^^^^^^^ the trait `frame_system::Config` is not implemented for `Runtime` diff --git a/frame/support/test/tests/construct_runtime_ui/undefined_origin_part.rs b/frame/support/test/tests/construct_runtime_ui/undefined_origin_part.rs new file mode 100644 index 0000000000000..bec5c27ec0346 --- /dev/null +++ b/frame/support/test/tests/construct_runtime_ui/undefined_origin_part.rs @@ -0,0 +1,33 @@ +use frame_support::construct_runtime; +use sp_runtime::{generic, traits::BlakeTwo256}; +use sp_core::sr25519; + +#[frame_support::pallet] +mod pallet { + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(_); +} + +pub type Signature = sr25519::Signature; +pub type BlockNumber = u64; +pub type Header = generic::Header; +pub type Block = generic::Block; +pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; + +impl pallet::Config for Runtime {} + +construct_runtime! { + pub enum Runtime where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic + { + System: system::{Pallet, Call, Storage, Config, Event}, + Pallet: pallet::{Pallet, Origin}, + } +} + +fn main() {} diff --git a/frame/support/test/tests/construct_runtime_ui/undefined_origin_part.stderr b/frame/support/test/tests/construct_runtime_ui/undefined_origin_part.stderr new file mode 100644 index 0000000000000..f49dcf5783e74 --- /dev/null +++ b/frame/support/test/tests/construct_runtime_ui/undefined_origin_part.stderr @@ -0,0 +1,87 @@ +error: `Pallet` does not have #[pallet::origin] defined, perhaps you should remove `Origin` from construct_runtime? + --> $DIR/undefined_origin_part.rs:5:1 + | +5 | #[frame_support::pallet] + | ^^^^^^^^^^^^^^^^^^^^^^^^ +... +22 | / construct_runtime! { +23 | | pub enum Runtime where +24 | | Block = Block, +25 | | NodeBlock = Block, +... | +30 | | } +31 | | } + | |_- in this macro invocation + | + = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0433]: failed to resolve: use of undeclared crate or module `system` + --> $DIR/undefined_origin_part.rs:28:11 + | +28 | System: system::{Pallet, Call, Storage, Config, Event}, + | ^^^^^^ use of undeclared crate or module `system` + +error[E0433]: failed to resolve: use of undeclared crate or module `system` + --> $DIR/undefined_origin_part.rs:22:1 + | +22 | / construct_runtime! { +23 | | pub enum Runtime where +24 | | Block = Block, +25 | | NodeBlock = Block, +... | +30 | | } +31 | | } + | |_^ not found in `system` + | + = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) +help: consider importing this enum + | +1 | use frame_system::RawOrigin; + | + +error[E0412]: cannot find type `Origin` in module `pallet` + --> $DIR/undefined_origin_part.rs:22:1 + | +22 | / construct_runtime! { +23 | | pub enum Runtime where +24 | | Block = Block, +25 | | NodeBlock = Block, +... | +30 | | } +31 | | } + | |_^ not found in `pallet` + | + = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) +help: consider importing this type alias + | +1 | use frame_system::Origin; + | + +error[E0412]: cannot find type `Origin` in module `pallet` + --> $DIR/undefined_origin_part.rs:22:1 + | +22 | / construct_runtime! { +23 | | pub enum Runtime where +24 | | Block = Block, +25 | | NodeBlock = Block, +... | +30 | | } +31 | | } + | |_^ not found in `pallet` + | + = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) +help: consider importing one of these items + | +1 | use crate::Origin; + | +1 | use frame_system::Origin; + | + +error[E0277]: the trait bound `Runtime: frame_system::Config` is not satisfied + --> $DIR/undefined_origin_part.rs:20:6 + | +8 | pub trait Config: frame_system::Config {} + | -------------------- required by this bound in `pallet::Config` +... +20 | impl pallet::Config for Runtime {} + | ^^^^^^^^^^^^^^ the trait `frame_system::Config` is not implemented for `Runtime` diff --git a/frame/support/test/tests/construct_runtime_ui/undefined_validate_unsigned_part.rs b/frame/support/test/tests/construct_runtime_ui/undefined_validate_unsigned_part.rs new file mode 100644 index 0000000000000..816f52b91cccb --- /dev/null +++ b/frame/support/test/tests/construct_runtime_ui/undefined_validate_unsigned_part.rs @@ -0,0 +1,33 @@ +use frame_support::construct_runtime; +use sp_runtime::{generic, traits::BlakeTwo256}; +use sp_core::sr25519; + +#[frame_support::pallet] +mod pallet { + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(_); +} + +pub type Signature = sr25519::Signature; +pub type BlockNumber = u64; +pub type Header = generic::Header; +pub type Block = generic::Block; +pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; + +impl pallet::Config for Runtime {} + +construct_runtime! { + pub enum Runtime where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic + { + System: system::{Pallet, Call, Storage, Config, Event}, + Pallet: pallet::{Pallet, ValidateUnsigned}, + } +} + +fn main() {} diff --git a/frame/support/test/tests/construct_runtime_ui/undefined_validate_unsigned_part.stderr b/frame/support/test/tests/construct_runtime_ui/undefined_validate_unsigned_part.stderr new file mode 100644 index 0000000000000..41202c3b005b7 --- /dev/null +++ b/frame/support/test/tests/construct_runtime_ui/undefined_validate_unsigned_part.stderr @@ -0,0 +1,49 @@ +error: `Pallet` does not have #[pallet::validate_unsigned] defined, perhaps you should remove `ValidateUnsigned` from construct_runtime? + --> $DIR/undefined_validate_unsigned_part.rs:5:1 + | +5 | #[frame_support::pallet] + | ^^^^^^^^^^^^^^^^^^^^^^^^ +... +22 | / construct_runtime! { +23 | | pub enum Runtime where +24 | | Block = Block, +25 | | NodeBlock = Block, +... | +30 | | } +31 | | } + | |_- in this macro invocation + | + = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0433]: failed to resolve: use of undeclared crate or module `system` + --> $DIR/undefined_validate_unsigned_part.rs:28:11 + | +28 | System: system::{Pallet, Call, Storage, Config, Event}, + | ^^^^^^ use of undeclared crate or module `system` + +error[E0433]: failed to resolve: use of undeclared crate or module `system` + --> $DIR/undefined_validate_unsigned_part.rs:22:1 + | +22 | / construct_runtime! { +23 | | pub enum Runtime where +24 | | Block = Block, +25 | | NodeBlock = Block, +... | +30 | | } +31 | | } + | |_^ not found in `system` + | + = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) +help: consider importing this enum + | +1 | use frame_system::RawOrigin; + | + +error[E0277]: the trait bound `Runtime: frame_system::Config` is not satisfied + --> $DIR/undefined_validate_unsigned_part.rs:20:6 + | +8 | pub trait Config: frame_system::Config {} + | -------------------- required by this bound in `pallet::Config` +... +20 | impl pallet::Config for Runtime {} + | ^^^^^^^^^^^^^^ the trait `frame_system::Config` is not implemented for `Runtime` diff --git a/frame/support/test/tests/pallet.rs b/frame/support/test/tests/pallet.rs index 412622b3b194d..4f1e66a868947 100644 --- a/frame/support/test/tests/pallet.rs +++ b/frame/support/test/tests/pallet.rs @@ -304,10 +304,13 @@ pub mod pallet { type Call = Call; fn validate_unsigned( _source: TransactionSource, - _call: &Self::Call + call: &Self::Call ) -> TransactionValidity { T::AccountId::from(SomeType1); // Test for where clause T::AccountId::from(SomeType5); // Test for where clause + if matches!(call, Call::foo_transactional(_)) { + return Ok(ValidTransaction::default()); + } Err(TransactionValidityError::Invalid(InvalidTransaction::Call)) } } @@ -324,22 +327,40 @@ pub mod pallet { fn create_inherent(_data: &InherentData) -> Option { T::AccountId::from(SomeType1); // Test for where clause T::AccountId::from(SomeType6); // Test for where clause - unimplemented!(); + Some(Call::foo_no_post_info()) + } + + fn is_inherent(call: &Self::Call) -> bool { + matches!(call, Call::foo_no_post_info() | Call::foo(..)) + } + + fn check_inherent(call: &Self::Call, _: &InherentData) -> Result<(), Self::Error> { + match call { + Call::foo_no_post_info() => Ok(()), + Call::foo(0, 0) => Err(InherentError::Fatal), + Call::foo(..) => Ok(()), + _ => unreachable!("other calls are not inherents"), + } } - fn is_inherent(_call: &Self::Call) -> bool { - unimplemented!(); + fn is_inherent_required(d: &InherentData) -> Result, Self::Error> { + match d.get_data::(b"required") { + Ok(Some(true)) => Ok(Some(InherentError::Fatal)), + Ok(Some(false)) | Ok(None) => Ok(None), + Err(_) => unreachable!("should not happen in tests"), + } } } #[derive(codec::Encode, sp_runtime::RuntimeDebug)] #[cfg_attr(feature = "std", derive(codec::Decode))] pub enum InherentError { + Fatal, } impl frame_support::inherent::IsFatalError for InherentError { fn is_fatal_error(&self) -> bool { - unimplemented!(); + matches!(self, InherentError::Fatal) } } @@ -538,6 +559,155 @@ fn instance_expand() { let _: pallet::__InherentHiddenInstance = (); } +#[test] +fn inherent_expand() { + use frame_support::{ + inherent::{BlockT, InherentData}, + traits::EnsureInherentsAreFirst, + }; + use sp_core::Hasher; + use sp_runtime::{traits::{BlakeTwo256, Header}, Digest}; + + let inherents = InherentData::new().create_extrinsics(); + + let expected = vec![ + UncheckedExtrinsic { function: Call::Example(pallet::Call::foo_no_post_info()), signature: None }, + ]; + assert_eq!(expected, inherents); + + let block = Block::new( + Header::new( + 1, + BlakeTwo256::hash(b"test"), + BlakeTwo256::hash(b"test"), + BlakeTwo256::hash(b"test"), + Digest::default(), + ), + vec![ + UncheckedExtrinsic { function: Call::Example(pallet::Call::foo_no_post_info()), signature: None }, + UncheckedExtrinsic { function: Call::Example(pallet::Call::foo(1, 0)), signature: None }, + ], + ); + + assert!(InherentData::new().check_extrinsics(&block).ok()); + + let block = Block::new( + Header::new( + 1, + BlakeTwo256::hash(b"test"), + BlakeTwo256::hash(b"test"), + BlakeTwo256::hash(b"test"), + Digest::default(), + ), + vec![ + UncheckedExtrinsic { function: Call::Example(pallet::Call::foo_no_post_info()), signature: None }, + UncheckedExtrinsic { function: Call::Example(pallet::Call::foo(0, 0)), signature: None }, + ], + ); + + assert!(InherentData::new().check_extrinsics(&block).fatal_error()); + + let block = Block::new( + Header::new( + 1, + BlakeTwo256::hash(b"test"), + BlakeTwo256::hash(b"test"), + BlakeTwo256::hash(b"test"), + Digest::default(), + ), + vec![ + UncheckedExtrinsic { function: Call::Example(pallet::Call::foo_transactional(0)), signature: None }, + ], + ); + + let mut inherent = InherentData::new(); + inherent.put_data(*b"required", &true).unwrap(); + assert!(inherent.check_extrinsics(&block).fatal_error()); + + let block = Block::new( + Header::new( + 1, + BlakeTwo256::hash(b"test"), + BlakeTwo256::hash(b"test"), + BlakeTwo256::hash(b"test"), + Digest::default(), + ), + vec![ + UncheckedExtrinsic { function: Call::Example(pallet::Call::foo_no_post_info()), signature: Some((1, (), ())) }, + ], + ); + + let mut inherent = InherentData::new(); + inherent.put_data(*b"required", &true).unwrap(); + assert!(inherent.check_extrinsics(&block).fatal_error()); + + let block = Block::new( + Header::new( + 1, + BlakeTwo256::hash(b"test"), + BlakeTwo256::hash(b"test"), + BlakeTwo256::hash(b"test"), + Digest::default(), + ), + vec![ + UncheckedExtrinsic { function: Call::Example(pallet::Call::foo(1, 1)), signature: None }, + UncheckedExtrinsic { function: Call::Example(pallet::Call::foo_transactional(0)), signature: None }, + ], + ); + + assert!(Runtime::ensure_inherents_are_first(&block).is_ok()); + + let block = Block::new( + Header::new( + 1, + BlakeTwo256::hash(b"test"), + BlakeTwo256::hash(b"test"), + BlakeTwo256::hash(b"test"), + Digest::default(), + ), + vec![ + UncheckedExtrinsic { function: Call::Example(pallet::Call::foo(1, 1)), signature: None }, + UncheckedExtrinsic { function: Call::Example(pallet::Call::foo_transactional(0)), signature: None }, + UncheckedExtrinsic { function: Call::Example(pallet::Call::foo_no_post_info()), signature: None }, + ], + ); + + assert_eq!(Runtime::ensure_inherents_are_first(&block).err().unwrap(), 2); + + let block = Block::new( + Header::new( + 1, + BlakeTwo256::hash(b"test"), + BlakeTwo256::hash(b"test"), + BlakeTwo256::hash(b"test"), + Digest::default(), + ), + vec![ + UncheckedExtrinsic { function: Call::Example(pallet::Call::foo(1, 1)), signature: None }, + UncheckedExtrinsic { function: Call::Example(pallet::Call::foo(1, 0)), signature: Some((1, (), ())) }, + UncheckedExtrinsic { function: Call::Example(pallet::Call::foo_no_post_info()), signature: None }, + ], + ); + + assert_eq!(Runtime::ensure_inherents_are_first(&block).err().unwrap(), 2); +} + +#[test] +fn validate_unsigned_expand() { + use frame_support::pallet_prelude::{ + InvalidTransaction, TransactionSource, TransactionValidityError, ValidTransaction, ValidateUnsigned, + }; + let call = pallet::Call::::foo_no_post_info(); + + let validity = pallet::Pallet::validate_unsigned(TransactionSource::Local, &call).unwrap_err(); + assert_eq!(validity, TransactionValidityError::Invalid(InvalidTransaction::Call)); + + let call = pallet::Call::::foo_transactional(0); + + let validity = pallet::Pallet::validate_unsigned(TransactionSource::External, &call).unwrap(); + assert_eq!(validity, ValidTransaction::default()); +} + #[test] fn trait_store_expand() { TestExternalities::default().execute_with(|| { diff --git a/frame/support/test/tests/pallet_instance.rs b/frame/support/test/tests/pallet_instance.rs index f0b72da2c7fbf..ccac97100a4be 100644 --- a/frame/support/test/tests/pallet_instance.rs +++ b/frame/support/test/tests/pallet_instance.rs @@ -306,8 +306,8 @@ frame_support::construct_runtime!( Instance1Example: pallet::::{ Pallet, Call, Event, Config, Storage, Inherent, Origin, ValidateUnsigned }, - Example2: pallet2::{Pallet, Call, Event, Config, Storage}, - Instance1Example2: pallet2::::{Pallet, Call, Event, Config, Storage}, + Example2: pallet2::{Pallet, Event, Config, Storage}, + Instance1Example2: pallet2::::{Pallet, Event, Config, Storage}, } ); diff --git a/frame/support/test/tests/pallet_ui/storage_info_unsatisfied_nmap.rs b/frame/support/test/tests/pallet_ui/storage_info_unsatisfied_nmap.rs index 3d03099c3c4b6..ef31af92e5a37 100644 --- a/frame/support/test/tests/pallet_ui/storage_info_unsatisfied_nmap.rs +++ b/frame/support/test/tests/pallet_ui/storage_info_unsatisfied_nmap.rs @@ -1,27 +1,28 @@ -#[frame_support::pallet] -mod pallet { - use frame_support::pallet_prelude::{Hooks, StorageNMap, Twox64Concat, NMapKey}; - use frame_system::pallet_prelude::BlockNumberFor; +// #[frame_support::pallet] +// mod pallet { +// use frame_support::pallet_prelude::{Hooks, StorageNMap, Twox64Concat, NMapKey}; +// use frame_system::pallet_prelude::BlockNumberFor; - #[pallet::config] - pub trait Config: frame_system::Config {} +// #[pallet::config] +// pub trait Config: frame_system::Config {} - #[pallet::pallet] - #[pallet::generate_storage_info] - pub struct Pallet(core::marker::PhantomData); +// #[pallet::pallet] +// #[pallet::generate_storage_info] +// pub struct Pallet(core::marker::PhantomData); - #[pallet::hooks] - impl Hooks> for Pallet {} +// #[pallet::hooks] +// impl Hooks> for Pallet {} - #[pallet::call] - impl Pallet {} +// #[pallet::call] +// impl Pallet {} - #[derive(codec::Encode, codec::Decode)] - struct Bar; +// #[derive(codec::Encode, codec::Decode)] +// struct Bar; - #[pallet::storage] - type Foo = StorageNMap<_, NMapKey, u32>; -} +// #[pallet::storage] +// type Foo = StorageNMap<_, NMapKey, u32>; +// } fn main() { + compile_error!("Temporarily disabled due to test flakiness"); } diff --git a/frame/support/test/tests/pallet_ui/storage_info_unsatisfied_nmap.stderr b/frame/support/test/tests/pallet_ui/storage_info_unsatisfied_nmap.stderr index 545520124bfee..9c69a3f076e34 100644 --- a/frame/support/test/tests/pallet_ui/storage_info_unsatisfied_nmap.stderr +++ b/frame/support/test/tests/pallet_ui/storage_info_unsatisfied_nmap.stderr @@ -1,9 +1,5 @@ -error[E0277]: the trait bound `Bar: MaxEncodedLen` is not satisfied - --> $DIR/storage_info_unsatisfied_nmap.rs:10:12 +error: Temporarily disabled due to test flakiness + --> $DIR/storage_info_unsatisfied_nmap.rs:27:2 | -10 | #[pallet::generate_storage_info] - | ^^^^^^^^^^^^^^^^^^^^^ the trait `MaxEncodedLen` is not implemented for `Bar` - | - = note: required because of the requirements on the impl of `KeyGeneratorMaxEncodedLen` for `NMapKey` - = note: required because of the requirements on the impl of `StorageInfoTrait` for `frame_support::pallet_prelude::StorageNMap<_GeneratedPrefixForStorageFoo, NMapKey, u32>` - = note: required by `storage_info` +27 | compile_error!("Temporarily disabled due to test flakiness"); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ From f88f4edbc42b76053404f9ba45d641dafb147ab7 Mon Sep 17 00:00:00 2001 From: Shawn Tabrizi Date: Wed, 16 Jun 2021 05:57:14 +0100 Subject: [PATCH 08/67] Add Control to Growth of the Staking Pallet (#8920) * start count * track count * add max limit * min bonds for participating * respect min bond when unbonding * revert a bit of u32 * fix merge * more merge fixes * update to `Current*` * add helper functions * Update frame/staking/src/lib.rs Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> * fix * minbond as storage * checkpoint * chill_other * better bond tracking * MinBond to MinNominatorBond * better doc * use helper function * oops * simple hard limits to validators / nominators. * better doc * update storage version * fix tests * enable migrations * min bond tests * chill other tests * tests for max cap * check `None` on cap too * benchmarks * Update frame/staking/src/lib.rs * Update frame/staking/src/lib.rs Co-authored-by: Zeke Mostov <32168567+emostov@users.noreply.github.com> * Update frame/staking/src/lib.rs Co-authored-by: Zeke Mostov <32168567+emostov@users.noreply.github.com> * Update frame/staking/src/tests.rs Co-authored-by: Zeke Mostov <32168567+emostov@users.noreply.github.com> * fix benchmark * cargo run --release --features=runtime-benchmarks --manifest-path=bin/node/cli/Cargo.toml -- benchmark --chain=dev --steps=50 --repeat=20 --pallet=pallet_staking --extrinsic=* --execution=wasm --wasm-execution=compiled --heap-pages=4096 --output=./frame/staking/src/weights.rs --template=./.maintain/frame-weight-template.hbs * nits * fix reap_stash benchmark * remove lower bound to min bond Co-authored-by: kianenigma Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> Co-authored-by: Parity Bot Co-authored-by: Zeke Mostov <32168567+emostov@users.noreply.github.com> --- frame/staking/src/benchmarking.rs | 53 +++- frame/staking/src/lib.rs | 295 ++++++++++++++++++--- frame/staking/src/mock.rs | 25 +- frame/staking/src/testing_utils.rs | 2 + frame/staking/src/tests.rs | 397 +++++++++++++++++++---------- frame/staking/src/weights.rs | 284 +++++++++++---------- 6 files changed, 750 insertions(+), 306 deletions(-) diff --git a/frame/staking/src/benchmarking.rs b/frame/staking/src/benchmarking.rs index 2ad939e5b166c..8adf797abe9e9 100644 --- a/frame/staking/src/benchmarking.rs +++ b/frame/staking/src/benchmarking.rs @@ -30,6 +30,7 @@ pub use frame_benchmarking::{ const SEED: u32 = 0; const MAX_SPANS: u32 = 100; const MAX_VALIDATORS: u32 = 1000; +const MAX_NOMINATORS: u32 = 1000; const MAX_SLASHES: u32 = 1000; // Add slashing spans to a user account. Not relevant for actual use, only to benchmark @@ -463,12 +464,18 @@ benchmarks! { reap_stash { let s in 1 .. MAX_SPANS; let (stash, controller) = create_stash_controller::(0, 100, Default::default())?; + Staking::::validate(RawOrigin::Signed(controller.clone()).into(), ValidatorPrefs::default())?; add_slashing_spans::(&stash, s); T::Currency::make_free_balance_be(&stash, T::Currency::minimum_balance()); whitelist_account!(controller); + + assert!(Bonded::::contains_key(&stash)); + assert!(Validators::::contains_key(&stash)); + }: _(RawOrigin::Signed(controller), stash.clone(), s) verify { assert!(!Bonded::::contains_key(&stash)); + assert!(!Validators::::contains_key(&stash)); } new_era { @@ -563,9 +570,9 @@ benchmarks! { get_npos_voters { // number of validator intention. - let v in 200 .. 400; + let v in (MAX_VALIDATORS / 2) .. MAX_VALIDATORS; // number of nominator intention. - let n in 200 .. 400; + let n in (MAX_NOMINATORS / 2) .. MAX_NOMINATORS; // total number of slashing spans. Assigned to validators randomly. let s in 1 .. 20; @@ -584,15 +591,42 @@ benchmarks! { get_npos_targets { // number of validator intention. - let v in 200 .. 400; + let v in (MAX_VALIDATORS / 2) .. MAX_VALIDATORS; // number of nominator intention. - let n = 500; + let n = MAX_NOMINATORS; let _ = create_validators_with_nominators_for_era::(v, n, T::MAX_NOMINATIONS as usize, false, None)?; }: { let targets = >::get_npos_targets(); assert_eq!(targets.len() as u32, v); } + + update_staking_limits { + // This function always does the same thing... just write to 4 storage items. + }: _( + RawOrigin::Root, + BalanceOf::::max_value(), + BalanceOf::::max_value(), + Some(u32::max_value()), + Some(u32::max_value()) + ) verify { + assert_eq!(MinNominatorBond::::get(), BalanceOf::::max_value()); + assert_eq!(MinValidatorBond::::get(), BalanceOf::::max_value()); + assert_eq!(MaxNominatorsCount::::get(), Some(u32::max_value())); + assert_eq!(MaxValidatorsCount::::get(), Some(u32::max_value())); + } + + chill_other { + let (_, controller) = create_stash_controller::(USER_SEED, 100, Default::default())?; + Staking::::validate(RawOrigin::Signed(controller.clone()).into(), ValidatorPrefs::default())?; + Staking::::update_staking_limits( + RawOrigin::Root.into(), BalanceOf::::max_value(), BalanceOf::::max_value(), None, None, + )?; + let caller = whitelisted_caller(); + }: _(RawOrigin::Signed(caller), controller.clone()) + verify { + assert!(!Validators::::contains_key(controller)); + } } #[cfg(test)] @@ -603,7 +637,7 @@ mod tests { #[test] fn create_validators_with_nominators_for_era_works() { - ExtBuilder::default().has_stakers(true).build().execute_with(|| { + ExtBuilder::default().has_stakers(true).build_and_execute(|| { let v = 10; let n = 100; @@ -625,7 +659,7 @@ mod tests { #[test] fn create_validator_with_nominators_works() { - ExtBuilder::default().has_stakers(true).build().execute_with(|| { + ExtBuilder::default().has_stakers(true).build_and_execute(|| { let n = 10; let (validator_stash, nominators) = create_validator_with_nominators::( @@ -649,7 +683,7 @@ mod tests { #[test] fn add_slashing_spans_works() { - ExtBuilder::default().has_stakers(true).build().execute_with(|| { + ExtBuilder::default().has_stakers(true).build_and_execute(|| { let n = 10; let (validator_stash, _nominators) = create_validator_with_nominators::( @@ -680,7 +714,7 @@ mod tests { #[test] fn test_payout_all() { - ExtBuilder::default().has_stakers(true).build().execute_with(|| { + ExtBuilder::default().has_stakers(true).build_and_execute(|| { let v = 10; let n = 100; @@ -700,6 +734,7 @@ mod tests { impl_benchmark_test_suite!( Staking, - crate::mock::ExtBuilder::default().has_stakers(true).build(), + crate::mock::ExtBuilder::default().has_stakers(true), crate::mock::Test, + exec_name = build_and_execute ); diff --git a/frame/staking/src/lib.rs b/frame/staking/src/lib.rs index ff7a1ae8a8820..b6d02fa2fd30d 100644 --- a/frame/staking/src/lib.rs +++ b/frame/staking/src/lib.rs @@ -745,17 +745,46 @@ enum Releases { V4_0_0, V5_0_0, // blockable validators. V6_0_0, // removal of all storage associated with offchain phragmen. + V7_0_0, // keep track of number of nominators / validators in map } impl Default for Releases { fn default() -> Self { - Releases::V6_0_0 + Releases::V7_0_0 } } pub mod migrations { use super::*; + pub mod v7 { + use super::*; + + pub fn pre_migrate() -> Result<(), &'static str> { + assert!(CurrentValidatorsCount::::get().is_zero(), "CurrentValidatorsCount already set."); + assert!(CurrentNominatorsCount::::get().is_zero(), "CurrentNominatorsCount already set."); + assert!(StorageVersion::::get() == Releases::V6_0_0); + Ok(()) + } + + pub fn migrate() -> Weight { + log!(info, "Migrating staking to Releases::V7_0_0"); + let validator_count = Validators::::iter().count() as u32; + let nominator_count = Nominators::::iter().count() as u32; + + CurrentValidatorsCount::::put(validator_count); + CurrentNominatorsCount::::put(nominator_count); + + StorageVersion::::put(Releases::V7_0_0); + log!(info, "Completed staking migration to Releases::V7_0_0"); + + T::DbWeight::get().reads_writes( + validator_count.saturating_add(nominator_count).into(), + 2, + ) + } + } + pub mod v6 { use super::*; use frame_support::{traits::Get, weights::Weight, generate_storage_alias}; @@ -940,6 +969,14 @@ pub mod pallet { #[pallet::getter(fn bonded)] pub type Bonded = StorageMap<_, Twox64Concat, T::AccountId, T::AccountId>; + /// The minimum active bond to become and maintain the role of a nominator. + #[pallet::storage] + pub type MinNominatorBond = StorageValue<_, BalanceOf, ValueQuery>; + + /// The minimum active bond to become and maintain the role of a validator. + #[pallet::storage] + pub type MinValidatorBond = StorageValue<_, BalanceOf, ValueQuery>; + /// Map from all (unlocked) "controller" accounts to the info regarding the staking. #[pallet::storage] #[pallet::getter(fn ledger)] @@ -960,15 +997,39 @@ pub mod pallet { >; /// The map from (wannabe) validator stash key to the preferences of that validator. + /// + /// When updating this storage item, you must also update the `CurrentValidatorsCount`. #[pallet::storage] #[pallet::getter(fn validators)] pub type Validators = StorageMap<_, Twox64Concat, T::AccountId, ValidatorPrefs, ValueQuery>; + /// A tracker to keep count of the number of items in the `Validators` map. + #[pallet::storage] + pub type CurrentValidatorsCount = StorageValue<_, u32, ValueQuery>; + + /// The maximum validator count before we stop allowing new validators to join. + /// + /// When this value is not set, no limits are enforced. + #[pallet::storage] + pub type MaxValidatorsCount = StorageValue<_, u32, OptionQuery>; + /// The map from nominator stash key to the set of stash keys of all validators to nominate. + /// + /// When updating this storage item, you must also update the `CurrentNominatorsCount`. #[pallet::storage] #[pallet::getter(fn nominators)] pub type Nominators = StorageMap<_, Twox64Concat, T::AccountId, Nominations>; + /// A tracker to keep count of the number of items in the `Nominators` map. + #[pallet::storage] + pub type CurrentNominatorsCount = StorageValue<_, u32, ValueQuery>; + + /// The maximum nominator count before we stop allowing new validators to join. + /// + /// When this value is not set, no limits are enforced. + #[pallet::storage] + pub type MaxNominatorsCount = StorageValue<_, u32, OptionQuery>; + /// The current era index. /// /// This is the latest planned era, depending on how the Session pallet queues the validator @@ -1165,6 +1226,8 @@ pub mod pallet { pub slash_reward_fraction: Perbill, pub canceled_payout: BalanceOf, pub stakers: Vec<(T::AccountId, T::AccountId, BalanceOf, StakerStatus)>, + pub min_nominator_bond: BalanceOf, + pub min_validator_bond: BalanceOf, } #[cfg(feature = "std")] @@ -1179,6 +1242,8 @@ pub mod pallet { slash_reward_fraction: Default::default(), canceled_payout: Default::default(), stakers: Default::default(), + min_nominator_bond: Default::default(), + min_validator_bond: Default::default(), } } } @@ -1194,6 +1259,8 @@ pub mod pallet { CanceledSlashPayout::::put(self.canceled_payout); SlashRewardFraction::::put(self.slash_reward_fraction); StorageVersion::::put(Releases::V6_0_0); + MinNominatorBond::::put(self.min_nominator_bond); + MinValidatorBond::::put(self.min_validator_bond); for &(ref stash, ref controller, balance, ref status) in &self.stakers { assert!( @@ -1274,8 +1341,8 @@ pub mod pallet { DuplicateIndex, /// Slash record index out of bounds. InvalidSlashIndex, - /// Can not bond with value less than minimum balance. - InsufficientValue, + /// Can not bond with value less than minimum required. + InsufficientBond, /// Can not schedule more unlock chunks. NoMoreChunks, /// Can not rebond without unlocking chunks. @@ -1300,18 +1367,35 @@ pub mod pallet { TooManyTargets, /// A nomination target was supplied that was blocked or otherwise not a validator. BadTarget, + /// The user has enough bond and thus cannot be chilled forcefully by an external person. + CannotChillOther, + /// There are too many nominators in the system. Governance needs to adjust the staking settings + /// to keep things safe for the runtime. + TooManyNominators, + /// There are too many validators in the system. Governance needs to adjust the staking settings + /// to keep things safe for the runtime. + TooManyValidators, } #[pallet::hooks] impl Hooks> for Pallet { fn on_runtime_upgrade() -> Weight { - if StorageVersion::::get() == Releases::V5_0_0 { - migrations::v6::migrate::() + if StorageVersion::::get() == Releases::V6_0_0 { + migrations::v7::migrate::() } else { T::DbWeight::get().reads(1) } } + #[cfg(feature = "try-runtime")] + fn pre_upgrade() -> Result<(), &'static str> { + if StorageVersion::::get() == Releases::V6_0_0 { + migrations::v7::pre_migrate::() + } else { + Ok(()) + } + } + fn on_initialize(_now: BlockNumberFor) -> Weight { // just return the weight of the on_finalize. T::DbWeight::get().reads(1) @@ -1389,7 +1473,7 @@ pub mod pallet { // Reject a bond which is considered to be _dust_. if value < T::Currency::minimum_balance() { - Err(Error::::InsufficientValue)? + Err(Error::::InsufficientBond)? } frame_system::Pallet::::inc_consumers(&stash).map_err(|_| Error::::BadState)?; @@ -1454,7 +1538,7 @@ pub mod pallet { ledger.total += extra; ledger.active += extra; // Last check: the new active amount of ledger must be more than ED. - ensure!(ledger.active >= T::Currency::minimum_balance(), Error::::InsufficientValue); + ensure!(ledger.active >= T::Currency::minimum_balance(), Error::::InsufficientBond); Self::deposit_event(Event::::Bonded(stash, extra)); Self::update_ledger(&controller, &ledger); @@ -1473,6 +1557,9 @@ pub mod pallet { /// can co-exists at the same time. In that case, [`Call::withdraw_unbonded`] need /// to be called first to remove some of the chunks (if possible). /// + /// If a user encounters the `InsufficientBond` error when calling this extrinsic, + /// they should call `chill` first in order to free up their bonded funds. + /// /// The dispatch origin for this call must be _Signed_ by the controller, not the stash. /// And, it can be only called when [`EraElectionStatus`] is `Closed`. /// @@ -1514,6 +1601,18 @@ pub mod pallet { ledger.active = Zero::zero(); } + let min_active_bond = if Nominators::::contains_key(&ledger.stash) { + MinNominatorBond::::get() + } else if Validators::::contains_key(&ledger.stash) { + MinValidatorBond::::get() + } else { + Zero::zero() + }; + + // Make sure that the user maintains enough active bond for their role. + // If a user runs into this error, they should chill first. + ensure!(ledger.active >= min_active_bond, Error::::InsufficientBond); + // Note: in case there is no current era it is fine to bond one era more. let era = Self::current_era().unwrap_or(0) + T::BondingDuration::get(); ledger.unlocking.push(UnlockChunk { value, era }); @@ -1614,10 +1713,19 @@ pub mod pallet { #[pallet::weight(T::WeightInfo::validate())] pub fn validate(origin: OriginFor, prefs: ValidatorPrefs) -> DispatchResult { let controller = ensure_signed(origin)?; + + // If this error is reached, we need to adjust the `MinValidatorBond` and start calling `chill_other`. + // Until then, we explicitly block new validators to protect the runtime. + if let Some(max_validators) = MaxValidatorsCount::::get() { + ensure!(CurrentValidatorsCount::::get() < max_validators, Error::::TooManyValidators); + } + let ledger = Self::ledger(&controller).ok_or(Error::::NotController)?; + ensure!(ledger.active >= MinValidatorBond::::get(), Error::::InsufficientBond); + let stash = &ledger.stash; - >::remove(stash); - >::insert(stash, prefs); + Self::do_remove_nominator(stash); + Self::do_add_validator(stash, prefs); Ok(()) } @@ -1646,7 +1754,16 @@ pub mod pallet { targets: Vec<::Source>, ) -> DispatchResult { let controller = ensure_signed(origin)?; + + // If this error is reached, we need to adjust the `MinNominatorBond` and start calling `chill_other`. + // Until then, we explicitly block new nominators to protect the runtime. + if let Some(max_nominators) = MaxNominatorsCount::::get() { + ensure!(CurrentNominatorsCount::::get() < max_nominators, Error::::TooManyNominators); + } + let ledger = Self::ledger(&controller).ok_or(Error::::NotController)?; + ensure!(ledger.active >= MinNominatorBond::::get(), Error::::InsufficientBond); + let stash = &ledger.stash; ensure!(!targets.is_empty(), Error::::EmptyTargets); ensure!(targets.len() <= T::MAX_NOMINATIONS as usize, Error::::TooManyTargets); @@ -1669,8 +1786,8 @@ pub mod pallet { suppressed: false, }; - >::remove(stash); - >::insert(stash, &nominations); + Self::do_remove_validator(stash); + Self::do_add_nominator(stash, nominations); Ok(()) } @@ -2022,7 +2139,7 @@ pub mod pallet { let ledger = ledger.rebond(value); // Last check: the new active amount of ledger must be more than ED. - ensure!(ledger.active >= T::Currency::minimum_balance(), Error::::InsufficientValue); + ensure!(ledger.active >= T::Currency::minimum_balance(), Error::::InsufficientBond); Self::deposit_event(Event::::Bonded(ledger.stash.clone(), value)); Self::update_ledger(&controller, &ledger); @@ -2135,6 +2252,80 @@ pub mod pallet { Ok(()) } + + /// Update the various staking limits this pallet. + /// + /// * `min_nominator_bond`: The minimum active bond needed to be a nominator. + /// * `min_validator_bond`: The minimum active bond needed to be a validator. + /// * `max_nominator_count`: The max number of users who can be a nominator at once. + /// When set to `None`, no limit is enforced. + /// * `max_validator_count`: The max number of users who can be a validator at once. + /// When set to `None`, no limit is enforced. + /// + /// Origin must be Root to call this function. + /// + /// NOTE: Existing nominators and validators will not be affected by this update. + /// to kick people under the new limits, `chill_other` should be called. + #[pallet::weight(T::WeightInfo::update_staking_limits())] + pub fn update_staking_limits( + origin: OriginFor, + min_nominator_bond: BalanceOf, + min_validator_bond: BalanceOf, + max_nominator_count: Option, + max_validator_count: Option, + ) -> DispatchResult { + ensure_root(origin)?; + MinNominatorBond::::set(min_nominator_bond); + MinValidatorBond::::set(min_validator_bond); + MaxNominatorsCount::::set(max_nominator_count); + MaxValidatorsCount::::set(max_validator_count); + Ok(()) + } + + /// Declare a `controller` as having no desire to either validator or nominate. + /// + /// Effects will be felt at the beginning of the next era. + /// + /// The dispatch origin for this call must be _Signed_, but can be called by anyone. + /// + /// If the caller is the same as the controller being targeted, then no further checks + /// are enforced. However, this call can also be made by an third party user who witnesses + /// that this controller does not satisfy the minimum bond requirements to be in their role. + /// + /// This can be helpful if bond requirements are updated, and we need to remove old users + /// who do not satisfy these requirements. + /// + // TODO: Maybe we can deprecate `chill` in the future. + // https://github.com/paritytech/substrate/issues/9111 + #[pallet::weight(T::WeightInfo::chill_other())] + pub fn chill_other( + origin: OriginFor, + controller: T::AccountId, + ) -> DispatchResult { + // Anyone can call this function. + let caller = ensure_signed(origin)?; + let ledger = Self::ledger(&controller).ok_or(Error::::NotController)?; + let stash = ledger.stash; + + // If the caller is not the controller, we want to check that the minimum bond + // requirements are not satisfied, and thus we have reason to chill this user. + // + // Otherwise, if caller is the same as the controller, this is just like `chill`. + if caller != controller { + let min_active_bond = if Nominators::::contains_key(&stash) { + MinNominatorBond::::get() + } else if Validators::::contains_key(&stash) { + MinValidatorBond::::get() + } else { + Zero::zero() + }; + + ensure!(ledger.active < min_active_bond, Error::::CannotChillOther); + } + + Self::chill_stash(&stash); + Ok(()) + } } } @@ -2296,8 +2487,8 @@ impl Pallet { /// Chill a stash account. fn chill_stash(stash: &T::AccountId) { - >::remove(stash); - >::remove(stash); + Self::do_remove_validator(stash); + Self::do_remove_nominator(stash); } /// Actually make a payment to a staker. This uses the currency's reward function @@ -2645,8 +2836,8 @@ impl Pallet { >::remove(&controller); >::remove(stash); - >::remove(stash); - >::remove(stash); + Self::do_remove_validator(stash); + Self::do_remove_nominator(stash); frame_system::Pallet::::dec_consumers(stash); @@ -2749,7 +2940,7 @@ impl Pallet { // Collect all slashing spans into a BTreeMap for further queries. let slashing_spans = >::iter().collect::>(); - for (nominator, nominations) in >::iter() { + for (nominator, nominations) in Nominators::::iter() { let Nominations { submitted_in, mut targets, suppressed: _ } = nominations; // Filter out nomination targets which were nominated before the most recent @@ -2769,8 +2960,49 @@ impl Pallet { all_voters } + /// This is a very expensive function and result should be cached versus being called multiple times. pub fn get_npos_targets() -> Vec { - >::iter().map(|(v, _)| v).collect::>() + Validators::::iter().map(|(v, _)| v).collect::>() + } + + /// This function will add a nominator to the `Nominators` storage map, + /// and keep track of the `CurrentNominatorsCount`. + /// + /// If the nominator already exists, their nominations will be updated. + pub fn do_add_nominator(who: &T::AccountId, nominations: Nominations) { + if !Nominators::::contains_key(who) { + CurrentNominatorsCount::::mutate(|x| x.saturating_inc()) + } + Nominators::::insert(who, nominations); + } + + /// This function will remove a nominator from the `Nominators` storage map, + /// and keep track of the `CurrentNominatorsCount`. + pub fn do_remove_nominator(who: &T::AccountId) { + if Nominators::::contains_key(who) { + Nominators::::remove(who); + CurrentNominatorsCount::::mutate(|x| x.saturating_dec()); + } + } + + /// This function will add a validator to the `Validators` storage map, + /// and keep track of the `CurrentValidatorsCount`. + /// + /// If the validator already exists, their preferences will be updated. + pub fn do_add_validator(who: &T::AccountId, prefs: ValidatorPrefs) { + if !Validators::::contains_key(who) { + CurrentValidatorsCount::::mutate(|x| x.saturating_inc()) + } + Validators::::insert(who, prefs); + } + + /// This function will remove a validator from the `Validators` storage map, + /// and keep track of the `CurrentValidatorsCount`. + pub fn do_remove_validator(who: &T::AccountId) { + if Validators::::contains_key(who) { + Validators::::remove(who); + CurrentValidatorsCount::::mutate(|x| x.saturating_dec()); + } } } @@ -2785,12 +3017,11 @@ impl frame_election_provider_support::ElectionDataProvider, ) -> data_provider::Result<(Vec<(T::AccountId, VoteWeight, Vec)>, Weight)> { - // NOTE: reading these counts already needs to iterate a lot of storage keys, but they get - // cached. This is okay for the case of `Ok(_)`, but bad for `Err(_)`, as the trait does not - // report weight in failures. - let nominator_count = >::iter().count(); - let validator_count = >::iter().count(); - let voter_count = nominator_count.saturating_add(validator_count); + let nominator_count = CurrentNominatorsCount::::get(); + let validator_count = CurrentValidatorsCount::::get(); + let voter_count = nominator_count.saturating_add(validator_count) as usize; + debug_assert!(>::iter().count() as u32 == CurrentNominatorsCount::::get()); + debug_assert!(>::iter().count() as u32 == CurrentValidatorsCount::::get()); if maybe_max_len.map_or(false, |max_len| voter_count > max_len) { return Err("Voter snapshot too big"); @@ -2798,15 +3029,15 @@ impl frame_election_provider_support::ElectionDataProvider>::iter().count(); let weight = T::WeightInfo::get_npos_voters( - validator_count as u32, - nominator_count as u32, + nominator_count, + validator_count, slashing_span_count as u32, ); Ok((Self::get_npos_voters(), weight)) } fn targets(maybe_max_len: Option) -> data_provider::Result<(Vec, Weight)> { - let target_count = >::iter().count(); + let target_count = CurrentValidatorsCount::::get() as usize; if maybe_max_len.map_or(false, |max_len| target_count > max_len) { return Err("Target snapshot too big"); @@ -2859,7 +3090,7 @@ impl frame_election_provider_support::ElectionDataProvider = target_stake .and_then(|w| >::try_from(w).ok()) - .unwrap_or(T::Currency::minimum_balance() * 100u32.into()); + .unwrap_or(MinNominatorBond::::get() * 100u32.into()); >::insert(v.clone(), v.clone()); >::insert( v.clone(), @@ -2871,8 +3102,8 @@ impl frame_election_provider_support::ElectionDataProvider>::insert( - v, + Self::do_add_validator( + &v, ValidatorPrefs { commission: Perbill::zero(), blocked: false }, ); }); @@ -2892,8 +3123,8 @@ impl frame_election_provider_support::ElectionDataProvider>::insert( - v, + Self::do_add_nominator( + &v, Nominations { targets: t, submitted_in: 0, suppressed: false }, ); }); diff --git a/frame/staking/src/mock.rs b/frame/staking/src/mock.rs index f58cdf0d2350f..35a1fa45284da 100644 --- a/frame/staking/src/mock.rs +++ b/frame/staking/src/mock.rs @@ -242,6 +242,7 @@ impl onchain::Config for Test { type Accuracy = Perbill; type DataProvider = Staking; } + impl Config for Test { const MAX_NOMINATIONS: u32 = 16; type Currency = Balances; @@ -286,6 +287,8 @@ pub struct ExtBuilder { invulnerables: Vec, has_stakers: bool, initialize_first_session: bool, + min_nominator_bond: Balance, + min_validator_bond: Balance, } impl Default for ExtBuilder { @@ -300,6 +303,8 @@ impl Default for ExtBuilder { invulnerables: vec![], has_stakers: true, initialize_first_session: true, + min_nominator_bond: ExistentialDeposit::get(), + min_validator_bond: ExistentialDeposit::get(), } } } @@ -361,7 +366,15 @@ impl ExtBuilder { OFFSET.with(|v| *v.borrow_mut() = offset); self } - pub fn build(self) -> sp_io::TestExternalities { + pub fn min_nominator_bond(mut self, amount: Balance) -> Self { + self.min_nominator_bond = amount; + self + } + pub fn min_validator_bond(mut self, amount: Balance) -> Self { + self.min_validator_bond = amount; + self + } + fn build(self) -> sp_io::TestExternalities { sp_tracing::try_init_simple(); let mut storage = frame_system::GenesisConfig::default() .build_storage::() @@ -434,6 +447,8 @@ impl ExtBuilder { minimum_validator_count: self.minimum_validator_count, invulnerables: self.invulnerables, slash_reward_fraction: Perbill::from_percent(10), + min_nominator_bond: self.min_nominator_bond, + min_validator_bond: self.min_validator_bond, ..Default::default() } .assimilate_storage(&mut storage); @@ -477,6 +492,14 @@ fn post_conditions() { check_nominators(); check_exposures(); check_ledgers(); + check_count(); +} + +fn check_count() { + let nominator_count = Nominators::::iter().count() as u32; + let validator_count = Validators::::iter().count() as u32; + assert_eq!(nominator_count, CurrentNominatorsCount::::get()); + assert_eq!(validator_count, CurrentValidatorsCount::::get()); } fn check_ledgers() { diff --git a/frame/staking/src/testing_utils.rs b/frame/staking/src/testing_utils.rs index f3af4ac0920d1..8a4392edfed25 100644 --- a/frame/staking/src/testing_utils.rs +++ b/frame/staking/src/testing_utils.rs @@ -30,7 +30,9 @@ const SEED: u32 = 0; /// This function removes all validators and nominators from storage. pub fn clear_validators_and_nominators() { Validators::::remove_all(None); + CurrentValidatorsCount::::kill(); Nominators::::remove_all(None); + CurrentNominatorsCount::::kill(); } /// Grab a funded user. diff --git a/frame/staking/src/tests.rs b/frame/staking/src/tests.rs index ee8f78769e70a..976ee34d9b8eb 100644 --- a/frame/staking/src/tests.rs +++ b/frame/staking/src/tests.rs @@ -297,8 +297,7 @@ fn staking_should_work() { ExtBuilder::default() .nominate(false) .fair(false) // to give 20 more staked value - .build() - .execute_with(|| { + .build_and_execute(|| { // remember + compare this along with the test. assert_eq_uvec!(validator_controllers(), vec![20, 10]); @@ -374,8 +373,7 @@ fn blocking_and_kicking_works() { .validator_count(4) .nominate(true) .num_validators(3) - .build() - .execute_with(|| { + .build_and_execute(|| { // block validator 10/11 assert_ok!(Staking::validate(Origin::signed(10), ValidatorPrefs { blocked: true, .. Default::default() })); // attempt to nominate from 100/101... @@ -398,8 +396,7 @@ fn less_than_needed_candidates_works() { .validator_count(4) .nominate(false) .num_validators(3) - .build() - .execute_with(|| { + .build_and_execute(|| { assert_eq!(Staking::validator_count(), 4); assert_eq!(Staking::minimum_validator_count(), 1); assert_eq_uvec!(validator_controllers(), vec![30, 20, 10]); @@ -426,8 +423,7 @@ fn no_candidate_emergency_condition() { .num_validators(4) .validator_pool(true) .nominate(false) - .build() - .execute_with(|| { + .build_and_execute(|| { // initial validators assert_eq_uvec!(validator_controllers(), vec![10, 20, 30, 40]); let prefs = ValidatorPrefs { commission: Perbill::one(), .. Default::default() }; @@ -468,8 +464,7 @@ fn nominating_and_rewards_should_work() { ExtBuilder::default() .nominate(false) .validator_pool(true) - .build() - .execute_with(|| { + .build_and_execute(|| { // initial validators -- everyone is actually even. assert_eq_uvec!(validator_controllers(), vec![40, 30]); @@ -1254,8 +1249,7 @@ fn rebond_works() { // * it can re-bond a portion of the funds scheduled to unlock. ExtBuilder::default() .nominate(false) - .build() - .execute_with(|| { + .build_and_execute(|| { // Set payee to controller. avoids confusion assert_ok!(Staking::set_payee( Origin::signed(10), @@ -1399,8 +1393,7 @@ fn rebond_is_fifo() { // Rebond should proceed by reversing the most recent bond operations. ExtBuilder::default() .nominate(false) - .build() - .execute_with(|| { + .build_and_execute(|| { // Set payee to controller. avoids confusion assert_ok!(Staking::set_payee( Origin::signed(10), @@ -1547,109 +1540,117 @@ fn reward_to_stake_works() { fn on_free_balance_zero_stash_removes_validator() { // Tests that validator storage items are cleaned up when stash is empty // Tests that storage items are untouched when controller is empty - ExtBuilder::default().existential_deposit(10).build_and_execute(|| { - // Check the balance of the validator account - assert_eq!(Balances::free_balance(10), 256); - // Check the balance of the stash account - assert_eq!(Balances::free_balance(11), 256000); - // Check these two accounts are bonded - assert_eq!(Staking::bonded(&11), Some(10)); - - // Set some storage items which we expect to be cleaned up - // Set payee information - assert_ok!(Staking::set_payee(Origin::signed(10), RewardDestination::Stash)); - - // Check storage items that should be cleaned up - assert!(>::contains_key(&10)); - assert!(>::contains_key(&11)); - assert!(>::contains_key(&11)); - assert!(>::contains_key(&11)); - - // Reduce free_balance of controller to 0 - let _ = Balances::slash(&10, Balance::max_value()); - - // Check the balance of the stash account has not been touched - assert_eq!(Balances::free_balance(11), 256000); - // Check these two accounts are still bonded - assert_eq!(Staking::bonded(&11), Some(10)); - - // Check storage items have not changed - assert!(>::contains_key(&10)); - assert!(>::contains_key(&11)); - assert!(>::contains_key(&11)); - assert!(>::contains_key(&11)); - - // Reduce free_balance of stash to 0 - let _ = Balances::slash(&11, Balance::max_value()); - // Check total balance of stash - assert_eq!(Balances::total_balance(&11), 10); - - // Reap the stash - assert_ok!(Staking::reap_stash(Origin::none(), 11, 0)); - - // Check storage items do not exist - assert!(!>::contains_key(&10)); - assert!(!>::contains_key(&11)); - assert!(!>::contains_key(&11)); - assert!(!>::contains_key(&11)); - assert!(!>::contains_key(&11)); - }); + ExtBuilder::default() + .existential_deposit(10) + .min_nominator_bond(10) + .min_validator_bond(10) + .build_and_execute(|| { + // Check the balance of the validator account + assert_eq!(Balances::free_balance(10), 256); + // Check the balance of the stash account + assert_eq!(Balances::free_balance(11), 256000); + // Check these two accounts are bonded + assert_eq!(Staking::bonded(&11), Some(10)); + + // Set some storage items which we expect to be cleaned up + // Set payee information + assert_ok!(Staking::set_payee(Origin::signed(10), RewardDestination::Stash)); + + // Check storage items that should be cleaned up + assert!(>::contains_key(&10)); + assert!(>::contains_key(&11)); + assert!(>::contains_key(&11)); + assert!(>::contains_key(&11)); + + // Reduce free_balance of controller to 0 + let _ = Balances::slash(&10, Balance::max_value()); + + // Check the balance of the stash account has not been touched + assert_eq!(Balances::free_balance(11), 256000); + // Check these two accounts are still bonded + assert_eq!(Staking::bonded(&11), Some(10)); + + // Check storage items have not changed + assert!(>::contains_key(&10)); + assert!(>::contains_key(&11)); + assert!(>::contains_key(&11)); + assert!(>::contains_key(&11)); + + // Reduce free_balance of stash to 0 + let _ = Balances::slash(&11, Balance::max_value()); + // Check total balance of stash + assert_eq!(Balances::total_balance(&11), 10); + + // Reap the stash + assert_ok!(Staking::reap_stash(Origin::none(), 11, 0)); + + // Check storage items do not exist + assert!(!>::contains_key(&10)); + assert!(!>::contains_key(&11)); + assert!(!>::contains_key(&11)); + assert!(!>::contains_key(&11)); + assert!(!>::contains_key(&11)); + }); } #[test] fn on_free_balance_zero_stash_removes_nominator() { // Tests that nominator storage items are cleaned up when stash is empty // Tests that storage items are untouched when controller is empty - ExtBuilder::default().existential_deposit(10).build_and_execute(|| { - // Make 10 a nominator - assert_ok!(Staking::nominate(Origin::signed(10), vec![20])); - // Check that account 10 is a nominator - assert!(>::contains_key(11)); - // Check the balance of the nominator account - assert_eq!(Balances::free_balance(10), 256); - // Check the balance of the stash account - assert_eq!(Balances::free_balance(11), 256000); - - // Set payee information - assert_ok!(Staking::set_payee(Origin::signed(10), RewardDestination::Stash)); - - // Check storage items that should be cleaned up - assert!(>::contains_key(&10)); - assert!(>::contains_key(&11)); - assert!(>::contains_key(&11)); - assert!(>::contains_key(&11)); - - // Reduce free_balance of controller to 0 - let _ = Balances::slash(&10, Balance::max_value()); - // Check total balance of account 10 - assert_eq!(Balances::total_balance(&10), 0); - - // Check the balance of the stash account has not been touched - assert_eq!(Balances::free_balance(11), 256000); - // Check these two accounts are still bonded - assert_eq!(Staking::bonded(&11), Some(10)); - - // Check storage items have not changed - assert!(>::contains_key(&10)); - assert!(>::contains_key(&11)); - assert!(>::contains_key(&11)); - assert!(>::contains_key(&11)); - - // Reduce free_balance of stash to 0 - let _ = Balances::slash(&11, Balance::max_value()); - // Check total balance of stash - assert_eq!(Balances::total_balance(&11), 10); - - // Reap the stash - assert_ok!(Staking::reap_stash(Origin::none(), 11, 0)); - - // Check storage items do not exist - assert!(!>::contains_key(&10)); - assert!(!>::contains_key(&11)); - assert!(!>::contains_key(&11)); - assert!(!>::contains_key(&11)); - assert!(!>::contains_key(&11)); - }); + ExtBuilder::default() + .existential_deposit(10) + .min_nominator_bond(10) + .min_validator_bond(10) + .build_and_execute(|| { + // Make 10 a nominator + assert_ok!(Staking::nominate(Origin::signed(10), vec![20])); + // Check that account 10 is a nominator + assert!(>::contains_key(11)); + // Check the balance of the nominator account + assert_eq!(Balances::free_balance(10), 256); + // Check the balance of the stash account + assert_eq!(Balances::free_balance(11), 256000); + + // Set payee information + assert_ok!(Staking::set_payee(Origin::signed(10), RewardDestination::Stash)); + + // Check storage items that should be cleaned up + assert!(>::contains_key(&10)); + assert!(>::contains_key(&11)); + assert!(>::contains_key(&11)); + assert!(>::contains_key(&11)); + + // Reduce free_balance of controller to 0 + let _ = Balances::slash(&10, Balance::max_value()); + // Check total balance of account 10 + assert_eq!(Balances::total_balance(&10), 0); + + // Check the balance of the stash account has not been touched + assert_eq!(Balances::free_balance(11), 256000); + // Check these two accounts are still bonded + assert_eq!(Staking::bonded(&11), Some(10)); + + // Check storage items have not changed + assert!(>::contains_key(&10)); + assert!(>::contains_key(&11)); + assert!(>::contains_key(&11)); + assert!(>::contains_key(&11)); + + // Reduce free_balance of stash to 0 + let _ = Balances::slash(&11, Balance::max_value()); + // Check total balance of stash + assert_eq!(Balances::total_balance(&11), 10); + + // Reap the stash + assert_ok!(Staking::reap_stash(Origin::none(), 11, 0)); + + // Check storage items do not exist + assert!(!>::contains_key(&10)); + assert!(!>::contains_key(&11)); + assert!(!>::contains_key(&11)); + assert!(!>::contains_key(&11)); + assert!(!>::contains_key(&11)); + }); } @@ -1725,14 +1726,15 @@ fn bond_with_no_staked_value() { ExtBuilder::default() .validator_count(3) .existential_deposit(5) + .min_nominator_bond(5) + .min_validator_bond(5) .nominate(false) .minimum_validator_count(1) - .build() - .execute_with(|| { + .build_and_execute(|| { // Can't bond with 1 assert_noop!( Staking::bond(Origin::signed(1), 2, 1, RewardDestination::Controller), - Error::::InsufficientValue, + Error::::InsufficientBond, ); // bonded with absolute minimum value possible. assert_ok!(Staking::bond(Origin::signed(1), 2, 5, RewardDestination::Controller)); @@ -1774,8 +1776,7 @@ fn bond_with_little_staked_value_bounded() { .validator_count(3) .nominate(false) .minimum_validator_count(1) - .build() - .execute_with(|| { + .build_and_execute(|| { // setup assert_ok!(Staking::chill(Origin::signed(30))); assert_ok!(Staking::set_payee(Origin::signed(10), RewardDestination::Controller)); @@ -1828,8 +1829,7 @@ fn bond_with_duplicate_vote_should_be_ignored_by_election_provider() { .validator_count(2) .nominate(false) .minimum_validator_count(1) - .build() - .execute_with(|| { + .build_and_execute(|| { // disable the nominator assert_ok!(Staking::chill(Origin::signed(100))); // make stakes equal. @@ -1876,8 +1876,7 @@ fn bond_with_duplicate_vote_should_be_ignored_by_election_provider_elected() { .validator_count(2) .nominate(false) .minimum_validator_count(1) - .build() - .execute_with(|| { + .build_and_execute(|| { // disable the nominator assert_ok!(Staking::chill(Origin::signed(100))); // 31/30 will have less stake @@ -1923,8 +1922,7 @@ fn new_era_elects_correct_number_of_validators() { .validator_pool(true) .fair(true) .validator_count(1) - .build() - .execute_with(|| { + .build_and_execute(|| { assert_eq!(Staking::validator_count(), 1); assert_eq!(validator_controllers().len(), 1); @@ -2466,7 +2464,11 @@ fn only_slash_for_max_in_era() { #[test] fn garbage_collection_after_slashing() { // ensures that `SlashingSpans` and `SpanSlash` of an account is removed after reaping. - ExtBuilder::default().existential_deposit(2).build_and_execute(|| { + ExtBuilder::default() + .existential_deposit(2) + .min_nominator_bond(2) + .min_validator_bond(2) + .build_and_execute(|| { assert_eq!(Balances::free_balance(11), 256_000); on_offence_now( @@ -3723,6 +3725,8 @@ fn session_buffering_no_offset() { fn cannot_rebond_to_lower_than_ed() { ExtBuilder::default() .existential_deposit(10) + .min_nominator_bond(10) + .min_validator_bond(10) .build_and_execute(|| { // stash must have more balance than bonded for this to work. assert_eq!(Balances::free_balance(&21), 512_000); @@ -3739,7 +3743,8 @@ fn cannot_rebond_to_lower_than_ed() { } ); - // unbond all of it. + // unbond all of it. must be chilled first. + assert_ok!(Staking::chill(Origin::signed(20))); assert_ok!(Staking::unbond(Origin::signed(20), 1000)); assert_eq!( Staking::ledger(&20).unwrap(), @@ -3755,7 +3760,7 @@ fn cannot_rebond_to_lower_than_ed() { // now bond a wee bit more assert_noop!( Staking::rebond(Origin::signed(20), 5), - Error::::InsufficientValue, + Error::::InsufficientBond, ); }) } @@ -3764,6 +3769,8 @@ fn cannot_rebond_to_lower_than_ed() { fn cannot_bond_extra_to_lower_than_ed() { ExtBuilder::default() .existential_deposit(10) + .min_nominator_bond(10) + .min_validator_bond(10) .build_and_execute(|| { // stash must have more balance than bonded for this to work. assert_eq!(Balances::free_balance(&21), 512_000); @@ -3780,7 +3787,8 @@ fn cannot_bond_extra_to_lower_than_ed() { } ); - // unbond all of it. + // unbond all of it. must be chilled first. + assert_ok!(Staking::chill(Origin::signed(20))); assert_ok!(Staking::unbond(Origin::signed(20), 1000)); assert_eq!( Staking::ledger(&20).unwrap(), @@ -3799,7 +3807,7 @@ fn cannot_bond_extra_to_lower_than_ed() { // now bond a wee bit more assert_noop!( Staking::bond_extra(Origin::signed(21), 5), - Error::::InsufficientValue, + Error::::InsufficientBond, ); }) } @@ -3809,6 +3817,8 @@ fn do_not_die_when_active_is_ed() { let ed = 10; ExtBuilder::default() .existential_deposit(ed) + .min_nominator_bond(ed) + .min_validator_bond(ed) .build_and_execute(|| { // initial stuff. assert_eq!( @@ -3888,7 +3898,7 @@ mod election_data_provider { #[test] fn voters_include_self_vote() { - ExtBuilder::default().nominate(false).build().execute_with(|| { + ExtBuilder::default().nominate(false).build_and_execute(|| { assert!(>::iter().map(|(x, _)| x).all(|v| Staking::voters(None) .unwrap() .0 @@ -3900,7 +3910,7 @@ mod election_data_provider { #[test] fn voters_exclude_slashed() { - ExtBuilder::default().build().execute_with(|| { + ExtBuilder::default().build_and_execute(|| { assert_eq!(Staking::nominators(101).unwrap().targets, vec![11, 21]); assert_eq!( >::voters(None) @@ -3946,7 +3956,7 @@ mod election_data_provider { #[test] fn respects_len_limits() { - ExtBuilder::default().build().execute_with(|| { + ExtBuilder::default().build_and_execute(|| { assert_eq!(Staking::voters(Some(1)).unwrap_err(), "Voter snapshot too big"); assert_eq!(Staking::targets(Some(1)).unwrap_err(), "Target snapshot too big"); }); @@ -3954,7 +3964,7 @@ mod election_data_provider { #[test] fn estimate_next_election_works() { - ExtBuilder::default().session_per_era(5).period(5).build().execute_with(|| { + ExtBuilder::default().session_per_era(5).period(5).build_and_execute(|| { // first session is always length 0. for b in 1..20 { run_to_block(b); @@ -4013,4 +4023,129 @@ mod election_data_provider { assert_eq!(ForceEra::::get(), Forcing::NotForcing); }) } + + #[test] + #[should_panic] + fn count_check_works() { + ExtBuilder::default().build_and_execute(|| { + // We should never insert into the validators or nominators map directly as this will + // not keep track of the count. This test should panic as we verify the count is accurate + // after every test using the `post_checks` in `mock`. + Validators::::insert(987654321, ValidatorPrefs::default()); + Nominators::::insert(987654321, Nominations { + targets: vec![], + submitted_in: Default::default(), + suppressed: false, + }); + }) + } + + #[test] + fn min_bond_checks_work() { + ExtBuilder::default() + .existential_deposit(100) + .min_nominator_bond(1_000) + .min_validator_bond(1_500) + .build_and_execute(|| { + // 500 is not enough for any role + assert_ok!(Staking::bond(Origin::signed(3), 4, 500, RewardDestination::Controller)); + assert_noop!(Staking::nominate(Origin::signed(4), vec![1]), Error::::InsufficientBond); + assert_noop!(Staking::validate(Origin::signed(4), ValidatorPrefs::default()), Error::::InsufficientBond); + + // 1000 is enough for nominator + assert_ok!(Staking::bond_extra(Origin::signed(3), 500)); + assert_ok!(Staking::nominate(Origin::signed(4), vec![1])); + assert_noop!(Staking::validate(Origin::signed(4), ValidatorPrefs::default()), Error::::InsufficientBond); + + // 1500 is enough for validator + assert_ok!(Staking::bond_extra(Origin::signed(3), 500)); + assert_ok!(Staking::nominate(Origin::signed(4), vec![1])); + assert_ok!(Staking::validate(Origin::signed(4), ValidatorPrefs::default())); + + // Can't unbond anything as validator + assert_noop!(Staking::unbond(Origin::signed(4), 500), Error::::InsufficientBond); + + // Once they are a nominator, they can unbond 500 + assert_ok!(Staking::nominate(Origin::signed(4), vec![1])); + assert_ok!(Staking::unbond(Origin::signed(4), 500)); + assert_noop!(Staking::unbond(Origin::signed(4), 500), Error::::InsufficientBond); + + // Once they are chilled they can unbond everything + assert_ok!(Staking::chill(Origin::signed(4))); + assert_ok!(Staking::unbond(Origin::signed(4), 1000)); + }) + } + + #[test] + fn chill_other_works() { + ExtBuilder::default() + .existential_deposit(100) + .min_nominator_bond(1_000) + .min_validator_bond(1_500) + .build_and_execute(|| { + // Nominator + assert_ok!(Staking::bond(Origin::signed(1), 2, 1000, RewardDestination::Controller)); + assert_ok!(Staking::nominate(Origin::signed(2), vec![1])); + + // Validator + assert_ok!(Staking::bond(Origin::signed(3), 4, 1500, RewardDestination::Controller)); + assert_ok!(Staking::validate(Origin::signed(4), ValidatorPrefs::default())); + + // Can't chill these users + assert_noop!(Staking::chill_other(Origin::signed(1), 2), Error::::CannotChillOther); + assert_noop!(Staking::chill_other(Origin::signed(1), 4), Error::::CannotChillOther); + + // Change the minimum bond + assert_ok!(Staking::update_staking_limits(Origin::root(), 1_500, 2_000, None, None)); + + // Users can now be chilled + assert_ok!(Staking::chill_other(Origin::signed(1), 2)); + assert_ok!(Staking::chill_other(Origin::signed(1), 4)); + }) + } + + #[test] + fn capped_stakers_works() { + ExtBuilder::default().build_and_execute(|| { + let validator_count = CurrentValidatorsCount::::get(); + assert_eq!(validator_count, 3); + let nominator_count = CurrentNominatorsCount::::get(); + assert_eq!(nominator_count, 1); + + // Change the maximums + let max = 10; + assert_ok!(Staking::update_staking_limits(Origin::root(), 10, 10, Some(max), Some(max))); + + // can create `max - validator_count` validators + assert_ok!(testing_utils::create_validators::(max - validator_count, 100)); + + // but no more + let (_, last_validator) = testing_utils::create_stash_controller::( + 1337, 100, RewardDestination::Controller, + ).unwrap(); + assert_noop!( + Staking::validate(Origin::signed(last_validator), ValidatorPrefs::default()), + Error::::TooManyValidators, + ); + + // same with nominators + for i in 0 .. max - nominator_count { + let (_, controller) = testing_utils::create_stash_controller::( + i + 10_000_000, 100, RewardDestination::Controller, + ).unwrap(); + assert_ok!(Staking::nominate(Origin::signed(controller), vec![1])); + } + + // one more is too many + let (_, last_nominator) = testing_utils::create_stash_controller::( + 20_000_000, 100, RewardDestination::Controller, + ).unwrap(); + assert_noop!(Staking::nominate(Origin::signed(last_nominator), vec![1]), Error::::TooManyNominators); + + // No problem when we set to `None` again + assert_ok!(Staking::update_staking_limits(Origin::root(), 10, 10, None, None)); + assert_ok!(Staking::nominate(Origin::signed(last_nominator), vec![1])); + assert_ok!(Staking::validate(Origin::signed(last_validator), ValidatorPrefs::default())); + }) + } } diff --git a/frame/staking/src/weights.rs b/frame/staking/src/weights.rs index 5960d6612566e..980b0855fbd81 100644 --- a/frame/staking/src/weights.rs +++ b/frame/staking/src/weights.rs @@ -18,7 +18,7 @@ //! Autogenerated weights for pallet_staking //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0 -//! DATE: 2021-06-07, STEPS: `[50, ]`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2021-06-15, STEPS: `[50, ]`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 128 // Executed Command: @@ -70,365 +70,383 @@ pub trait WeightInfo { fn new_era(v: u32, n: u32, ) -> Weight; fn get_npos_voters(v: u32, n: u32, s: u32, ) -> Weight; fn get_npos_targets(v: u32, ) -> Weight; + fn update_staking_limits() -> Weight; + fn chill_other() -> Weight; } /// Weights for pallet_staking using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { fn bond() -> Weight { - (91_959_000 as Weight) + (91_278_000 as Weight) .saturating_add(T::DbWeight::get().reads(5 as Weight)) .saturating_add(T::DbWeight::get().writes(4 as Weight)) } fn bond_extra() -> Weight { - (69_291_000 as Weight) + (69_833_000 as Weight) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn unbond() -> Weight { - (63_513_000 as Weight) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) + (75_020_000 as Weight) + .saturating_add(T::DbWeight::get().reads(6 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) } fn withdraw_unbonded_update(s: u32, ) -> Weight { - (64_747_000 as Weight) - // Standard Error: 0 - .saturating_add((77_000 as Weight).saturating_mul(s as Weight)) + (63_898_000 as Weight) + // Standard Error: 1_000 + .saturating_add((50_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) } fn withdraw_unbonded_kill(s: u32, ) -> Weight { - (100_375_000 as Weight) + (103_717_000 as Weight) // Standard Error: 1_000 - .saturating_add((3_067_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(T::DbWeight::get().reads(6 as Weight)) - .saturating_add(T::DbWeight::get().writes(8 as Weight)) + .saturating_add((2_942_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(T::DbWeight::get().reads(8 as Weight)) + .saturating_add(T::DbWeight::get().writes(6 as Weight)) .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) } fn validate() -> Weight { - (17_849_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) + (40_702_000 as Weight) + .saturating_add(T::DbWeight::get().reads(6 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn kick(k: u32, ) -> Weight { - (27_939_000 as Weight) - // Standard Error: 16_000 - .saturating_add((21_431_000 as Weight).saturating_mul(k as Weight)) + (33_572_000 as Weight) + // Standard Error: 18_000 + .saturating_add((20_771_000 as Weight).saturating_mul(k as Weight)) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(k as Weight))) .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(k as Weight))) } fn nominate(n: u32, ) -> Weight { - (32_791_000 as Weight) - // Standard Error: 33_000 - .saturating_add((7_006_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) + (53_561_000 as Weight) + // Standard Error: 34_000 + .saturating_add((6_652_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(T::DbWeight::get().reads(7 as Weight)) .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(n as Weight))) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn chill() -> Weight { - (17_014_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + (21_489_000 as Weight) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) } fn set_payee() -> Weight { - (14_816_000 as Weight) + (14_514_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn set_controller() -> Weight { - (33_600_000 as Weight) + (32_598_000 as Weight) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) } fn set_validator_count() -> Weight { - (2_706_000 as Weight) + (2_477_000 as Weight) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn force_no_eras() -> Weight { - (2_973_000 as Weight) + (2_743_000 as Weight) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn force_new_era() -> Weight { - (2_949_000 as Weight) + (2_784_000 as Weight) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn force_new_era_always() -> Weight { - (3_011_000 as Weight) + (2_749_000 as Weight) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn set_invulnerables(v: u32, ) -> Weight { - (3_078_000 as Weight) + (2_798_000 as Weight) // Standard Error: 0 .saturating_add((5_000 as Weight).saturating_mul(v as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn force_unstake(s: u32, ) -> Weight { - (69_220_000 as Weight) - // Standard Error: 1_000 - .saturating_add((3_070_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().writes(8 as Weight)) + (70_372_000 as Weight) + // Standard Error: 13_000 + .saturating_add((3_029_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(T::DbWeight::get().reads(6 as Weight)) + .saturating_add(T::DbWeight::get().writes(6 as Weight)) .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) } fn cancel_deferred_slash(s: u32, ) -> Weight { - (3_460_399_000 as Weight) - // Standard Error: 222_000 - .saturating_add((19_782_000 as Weight).saturating_mul(s as Weight)) + (3_436_822_000 as Weight) + // Standard Error: 221_000 + .saturating_add((19_799_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn payout_stakers_dead_controller(n: u32, ) -> Weight { - (120_436_000 as Weight) + (132_018_000 as Weight) // Standard Error: 27_000 - .saturating_add((63_092_000 as Weight).saturating_mul(n as Weight)) + .saturating_add((61_340_000 as Weight).saturating_mul(n as Weight)) .saturating_add(T::DbWeight::get().reads(10 as Weight)) .saturating_add(T::DbWeight::get().reads((3 as Weight).saturating_mul(n as Weight))) .saturating_add(T::DbWeight::get().writes(2 as Weight)) .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(n as Weight))) } fn payout_stakers_alive_staked(n: u32, ) -> Weight { - (181_424_000 as Weight) - // Standard Error: 51_000 - .saturating_add((78_631_000 as Weight).saturating_mul(n as Weight)) + (158_346_000 as Weight) + // Standard Error: 61_000 + .saturating_add((77_147_000 as Weight).saturating_mul(n as Weight)) .saturating_add(T::DbWeight::get().reads(11 as Weight)) .saturating_add(T::DbWeight::get().reads((5 as Weight).saturating_mul(n as Weight))) .saturating_add(T::DbWeight::get().writes(3 as Weight)) .saturating_add(T::DbWeight::get().writes((3 as Weight).saturating_mul(n as Weight))) } fn rebond(l: u32, ) -> Weight { - (59_349_000 as Weight) + (57_756_000 as Weight) // Standard Error: 2_000 - .saturating_add((64_000 as Weight).saturating_mul(l as Weight)) + .saturating_add((79_000 as Weight).saturating_mul(l as Weight)) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) } fn set_history_depth(e: u32, ) -> Weight { (0 as Weight) - // Standard Error: 97_000 - .saturating_add((44_609_000 as Weight).saturating_mul(e as Weight)) + // Standard Error: 100_000 + .saturating_add((44_873_000 as Weight).saturating_mul(e as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(4 as Weight)) .saturating_add(T::DbWeight::get().writes((7 as Weight).saturating_mul(e as Weight))) } fn reap_stash(s: u32, ) -> Weight { - (72_356_000 as Weight) - // Standard Error: 2_000 - .saturating_add((3_066_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().writes(8 as Weight)) + (75_073_000 as Weight) + // Standard Error: 4_000 + .saturating_add((2_988_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(T::DbWeight::get().reads(6 as Weight)) + .saturating_add(T::DbWeight::get().writes(6 as Weight)) .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) } fn new_era(v: u32, n: u32, ) -> Weight { (0 as Weight) - // Standard Error: 1_462_000 - .saturating_add((393_007_000 as Weight).saturating_mul(v as Weight)) - // Standard Error: 73_000 - .saturating_add((72_014_000 as Weight).saturating_mul(n as Weight)) + // Standard Error: 1_146_000 + .saturating_add((362_986_000 as Weight).saturating_mul(v as Weight)) + // Standard Error: 57_000 + .saturating_add((60_216_000 as Weight).saturating_mul(n as Weight)) .saturating_add(T::DbWeight::get().reads(10 as Weight)) .saturating_add(T::DbWeight::get().reads((3 as Weight).saturating_mul(v as Weight))) .saturating_add(T::DbWeight::get().reads((3 as Weight).saturating_mul(n as Weight))) - .saturating_add(T::DbWeight::get().writes(9 as Weight)) + .saturating_add(T::DbWeight::get().writes(4 as Weight)) .saturating_add(T::DbWeight::get().writes((3 as Weight).saturating_mul(v as Weight))) } fn get_npos_voters(v: u32, n: u32, s: u32, ) -> Weight { (0 as Weight) - // Standard Error: 235_000 - .saturating_add((35_212_000 as Weight).saturating_mul(v as Weight)) - // Standard Error: 235_000 - .saturating_add((38_391_000 as Weight).saturating_mul(n as Weight)) - // Standard Error: 3_200_000 - .saturating_add((31_130_000 as Weight).saturating_mul(s as Weight)) + // Standard Error: 230_000 + .saturating_add((35_891_000 as Weight).saturating_mul(v as Weight)) + // Standard Error: 230_000 + .saturating_add((37_854_000 as Weight).saturating_mul(n as Weight)) + // Standard Error: 7_842_000 + .saturating_add((32_492_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().reads((3 as Weight).saturating_mul(v as Weight))) .saturating_add(T::DbWeight::get().reads((3 as Weight).saturating_mul(n as Weight))) .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(s as Weight))) } fn get_npos_targets(v: u32, ) -> Weight { - (52_314_000 as Weight) - // Standard Error: 71_000 - .saturating_add((15_195_000 as Weight).saturating_mul(v as Weight)) + (0 as Weight) + // Standard Error: 74_000 + .saturating_add((16_370_000 as Weight).saturating_mul(v as Weight)) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(v as Weight))) } + fn update_staking_limits() -> Weight { + (6_398_000 as Weight) + .saturating_add(T::DbWeight::get().writes(4 as Weight)) + } + fn chill_other() -> Weight { + (44_694_000 as Weight) + .saturating_add(T::DbWeight::get().reads(5 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) + } } // For backwards compatibility and tests impl WeightInfo for () { fn bond() -> Weight { - (91_959_000 as Weight) + (91_278_000 as Weight) .saturating_add(RocksDbWeight::get().reads(5 as Weight)) .saturating_add(RocksDbWeight::get().writes(4 as Weight)) } fn bond_extra() -> Weight { - (69_291_000 as Weight) + (69_833_000 as Weight) .saturating_add(RocksDbWeight::get().reads(3 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } fn unbond() -> Weight { - (63_513_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(4 as Weight)) + (75_020_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(6 as Weight)) .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } fn withdraw_unbonded_update(s: u32, ) -> Weight { - (64_747_000 as Weight) - // Standard Error: 0 - .saturating_add((77_000 as Weight).saturating_mul(s as Weight)) + (63_898_000 as Weight) + // Standard Error: 1_000 + .saturating_add((50_000 as Weight).saturating_mul(s as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } fn withdraw_unbonded_kill(s: u32, ) -> Weight { - (100_375_000 as Weight) + (103_717_000 as Weight) // Standard Error: 1_000 - .saturating_add((3_067_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(RocksDbWeight::get().reads(6 as Weight)) - .saturating_add(RocksDbWeight::get().writes(8 as Weight)) + .saturating_add((2_942_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(RocksDbWeight::get().reads(8 as Weight)) + .saturating_add(RocksDbWeight::get().writes(6 as Weight)) .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) } fn validate() -> Weight { - (17_849_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + (40_702_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(6 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } fn kick(k: u32, ) -> Weight { - (27_939_000 as Weight) - // Standard Error: 16_000 - .saturating_add((21_431_000 as Weight).saturating_mul(k as Weight)) + (33_572_000 as Weight) + // Standard Error: 18_000 + .saturating_add((20_771_000 as Weight).saturating_mul(k as Weight)) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().reads((1 as Weight).saturating_mul(k as Weight))) .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(k as Weight))) } fn nominate(n: u32, ) -> Weight { - (32_791_000 as Weight) - // Standard Error: 33_000 - .saturating_add((7_006_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(RocksDbWeight::get().reads(3 as Weight)) + (53_561_000 as Weight) + // Standard Error: 34_000 + .saturating_add((6_652_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(RocksDbWeight::get().reads(7 as Weight)) .saturating_add(RocksDbWeight::get().reads((1 as Weight).saturating_mul(n as Weight))) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } fn chill() -> Weight { - (17_014_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + (21_489_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(3 as Weight)) } fn set_payee() -> Weight { - (14_816_000 as Weight) + (14_514_000 as Weight) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn set_controller() -> Weight { - (33_600_000 as Weight) + (32_598_000 as Weight) .saturating_add(RocksDbWeight::get().reads(3 as Weight)) .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } fn set_validator_count() -> Weight { - (2_706_000 as Weight) + (2_477_000 as Weight) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn force_no_eras() -> Weight { - (2_973_000 as Weight) + (2_743_000 as Weight) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn force_new_era() -> Weight { - (2_949_000 as Weight) + (2_784_000 as Weight) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn force_new_era_always() -> Weight { - (3_011_000 as Weight) + (2_749_000 as Weight) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn set_invulnerables(v: u32, ) -> Weight { - (3_078_000 as Weight) + (2_798_000 as Weight) // Standard Error: 0 .saturating_add((5_000 as Weight).saturating_mul(v as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn force_unstake(s: u32, ) -> Weight { - (69_220_000 as Weight) - // Standard Error: 1_000 - .saturating_add((3_070_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - .saturating_add(RocksDbWeight::get().writes(8 as Weight)) + (70_372_000 as Weight) + // Standard Error: 13_000 + .saturating_add((3_029_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(RocksDbWeight::get().reads(6 as Weight)) + .saturating_add(RocksDbWeight::get().writes(6 as Weight)) .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) } fn cancel_deferred_slash(s: u32, ) -> Weight { - (3_460_399_000 as Weight) - // Standard Error: 222_000 - .saturating_add((19_782_000 as Weight).saturating_mul(s as Weight)) + (3_436_822_000 as Weight) + // Standard Error: 221_000 + .saturating_add((19_799_000 as Weight).saturating_mul(s as Weight)) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn payout_stakers_dead_controller(n: u32, ) -> Weight { - (120_436_000 as Weight) + (132_018_000 as Weight) // Standard Error: 27_000 - .saturating_add((63_092_000 as Weight).saturating_mul(n as Weight)) + .saturating_add((61_340_000 as Weight).saturating_mul(n as Weight)) .saturating_add(RocksDbWeight::get().reads(10 as Weight)) .saturating_add(RocksDbWeight::get().reads((3 as Weight).saturating_mul(n as Weight))) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(n as Weight))) } fn payout_stakers_alive_staked(n: u32, ) -> Weight { - (181_424_000 as Weight) - // Standard Error: 51_000 - .saturating_add((78_631_000 as Weight).saturating_mul(n as Weight)) + (158_346_000 as Weight) + // Standard Error: 61_000 + .saturating_add((77_147_000 as Weight).saturating_mul(n as Weight)) .saturating_add(RocksDbWeight::get().reads(11 as Weight)) .saturating_add(RocksDbWeight::get().reads((5 as Weight).saturating_mul(n as Weight))) .saturating_add(RocksDbWeight::get().writes(3 as Weight)) .saturating_add(RocksDbWeight::get().writes((3 as Weight).saturating_mul(n as Weight))) } fn rebond(l: u32, ) -> Weight { - (59_349_000 as Weight) + (57_756_000 as Weight) // Standard Error: 2_000 - .saturating_add((64_000 as Weight).saturating_mul(l as Weight)) + .saturating_add((79_000 as Weight).saturating_mul(l as Weight)) .saturating_add(RocksDbWeight::get().reads(3 as Weight)) .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } fn set_history_depth(e: u32, ) -> Weight { (0 as Weight) - // Standard Error: 97_000 - .saturating_add((44_609_000 as Weight).saturating_mul(e as Weight)) + // Standard Error: 100_000 + .saturating_add((44_873_000 as Weight).saturating_mul(e as Weight)) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(4 as Weight)) .saturating_add(RocksDbWeight::get().writes((7 as Weight).saturating_mul(e as Weight))) } fn reap_stash(s: u32, ) -> Weight { - (72_356_000 as Weight) - // Standard Error: 2_000 - .saturating_add((3_066_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - .saturating_add(RocksDbWeight::get().writes(8 as Weight)) + (75_073_000 as Weight) + // Standard Error: 4_000 + .saturating_add((2_988_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(RocksDbWeight::get().reads(6 as Weight)) + .saturating_add(RocksDbWeight::get().writes(6 as Weight)) .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) } fn new_era(v: u32, n: u32, ) -> Weight { (0 as Weight) - // Standard Error: 1_462_000 - .saturating_add((393_007_000 as Weight).saturating_mul(v as Weight)) - // Standard Error: 73_000 - .saturating_add((72_014_000 as Weight).saturating_mul(n as Weight)) + // Standard Error: 1_146_000 + .saturating_add((362_986_000 as Weight).saturating_mul(v as Weight)) + // Standard Error: 57_000 + .saturating_add((60_216_000 as Weight).saturating_mul(n as Weight)) .saturating_add(RocksDbWeight::get().reads(10 as Weight)) .saturating_add(RocksDbWeight::get().reads((3 as Weight).saturating_mul(v as Weight))) .saturating_add(RocksDbWeight::get().reads((3 as Weight).saturating_mul(n as Weight))) - .saturating_add(RocksDbWeight::get().writes(9 as Weight)) + .saturating_add(RocksDbWeight::get().writes(4 as Weight)) .saturating_add(RocksDbWeight::get().writes((3 as Weight).saturating_mul(v as Weight))) } fn get_npos_voters(v: u32, n: u32, s: u32, ) -> Weight { (0 as Weight) - // Standard Error: 235_000 - .saturating_add((35_212_000 as Weight).saturating_mul(v as Weight)) - // Standard Error: 235_000 - .saturating_add((38_391_000 as Weight).saturating_mul(n as Weight)) - // Standard Error: 3_200_000 - .saturating_add((31_130_000 as Weight).saturating_mul(s as Weight)) + // Standard Error: 230_000 + .saturating_add((35_891_000 as Weight).saturating_mul(v as Weight)) + // Standard Error: 230_000 + .saturating_add((37_854_000 as Weight).saturating_mul(n as Weight)) + // Standard Error: 7_842_000 + .saturating_add((32_492_000 as Weight).saturating_mul(s as Weight)) .saturating_add(RocksDbWeight::get().reads(3 as Weight)) .saturating_add(RocksDbWeight::get().reads((3 as Weight).saturating_mul(v as Weight))) .saturating_add(RocksDbWeight::get().reads((3 as Weight).saturating_mul(n as Weight))) .saturating_add(RocksDbWeight::get().reads((1 as Weight).saturating_mul(s as Weight))) } fn get_npos_targets(v: u32, ) -> Weight { - (52_314_000 as Weight) - // Standard Error: 71_000 - .saturating_add((15_195_000 as Weight).saturating_mul(v as Weight)) + (0 as Weight) + // Standard Error: 74_000 + .saturating_add((16_370_000 as Weight).saturating_mul(v as Weight)) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().reads((1 as Weight).saturating_mul(v as Weight))) } + fn update_staking_limits() -> Weight { + (6_398_000 as Weight) + .saturating_add(RocksDbWeight::get().writes(4 as Weight)) + } + fn chill_other() -> Weight { + (44_694_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(5 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + } } From c3e9fcf11c043c245d938d65b34f02152f7a3caf Mon Sep 17 00:00:00 2001 From: Guillaume Thiolliere Date: Wed, 16 Jun 2021 11:09:24 +0200 Subject: [PATCH 09/67] Do not run pallet_ui test with conditional-storage feature (#9122) * do not run pallet_ui test with conditional-compilation feature * fix --- frame/support/test/tests/pallet_ui.rs | 1 + .../storage_info_unsatisfied_nmap.rs | 37 +++++++++---------- .../storage_info_unsatisfied_nmap.stderr | 12 ++++-- 3 files changed, 27 insertions(+), 23 deletions(-) diff --git a/frame/support/test/tests/pallet_ui.rs b/frame/support/test/tests/pallet_ui.rs index e5f4a54dfb000..fea7a2c7e7ad4 100644 --- a/frame/support/test/tests/pallet_ui.rs +++ b/frame/support/test/tests/pallet_ui.rs @@ -16,6 +16,7 @@ // limitations under the License. #[rustversion::attr(not(stable), ignore)] +#[cfg(not(feature = "conditional-storage"))] #[test] fn pallet_ui() { // As trybuild is using `cargo check`, we don't need the real WASM binaries. diff --git a/frame/support/test/tests/pallet_ui/storage_info_unsatisfied_nmap.rs b/frame/support/test/tests/pallet_ui/storage_info_unsatisfied_nmap.rs index ef31af92e5a37..3d03099c3c4b6 100644 --- a/frame/support/test/tests/pallet_ui/storage_info_unsatisfied_nmap.rs +++ b/frame/support/test/tests/pallet_ui/storage_info_unsatisfied_nmap.rs @@ -1,28 +1,27 @@ -// #[frame_support::pallet] -// mod pallet { -// use frame_support::pallet_prelude::{Hooks, StorageNMap, Twox64Concat, NMapKey}; -// use frame_system::pallet_prelude::BlockNumberFor; +#[frame_support::pallet] +mod pallet { + use frame_support::pallet_prelude::{Hooks, StorageNMap, Twox64Concat, NMapKey}; + use frame_system::pallet_prelude::BlockNumberFor; -// #[pallet::config] -// pub trait Config: frame_system::Config {} + #[pallet::config] + pub trait Config: frame_system::Config {} -// #[pallet::pallet] -// #[pallet::generate_storage_info] -// pub struct Pallet(core::marker::PhantomData); + #[pallet::pallet] + #[pallet::generate_storage_info] + pub struct Pallet(core::marker::PhantomData); -// #[pallet::hooks] -// impl Hooks> for Pallet {} + #[pallet::hooks] + impl Hooks> for Pallet {} -// #[pallet::call] -// impl Pallet {} + #[pallet::call] + impl Pallet {} -// #[derive(codec::Encode, codec::Decode)] -// struct Bar; + #[derive(codec::Encode, codec::Decode)] + struct Bar; -// #[pallet::storage] -// type Foo = StorageNMap<_, NMapKey, u32>; -// } + #[pallet::storage] + type Foo = StorageNMap<_, NMapKey, u32>; +} fn main() { - compile_error!("Temporarily disabled due to test flakiness"); } diff --git a/frame/support/test/tests/pallet_ui/storage_info_unsatisfied_nmap.stderr b/frame/support/test/tests/pallet_ui/storage_info_unsatisfied_nmap.stderr index 9c69a3f076e34..545520124bfee 100644 --- a/frame/support/test/tests/pallet_ui/storage_info_unsatisfied_nmap.stderr +++ b/frame/support/test/tests/pallet_ui/storage_info_unsatisfied_nmap.stderr @@ -1,5 +1,9 @@ -error: Temporarily disabled due to test flakiness - --> $DIR/storage_info_unsatisfied_nmap.rs:27:2 +error[E0277]: the trait bound `Bar: MaxEncodedLen` is not satisfied + --> $DIR/storage_info_unsatisfied_nmap.rs:10:12 | -27 | compile_error!("Temporarily disabled due to test flakiness"); - | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +10 | #[pallet::generate_storage_info] + | ^^^^^^^^^^^^^^^^^^^^^ the trait `MaxEncodedLen` is not implemented for `Bar` + | + = note: required because of the requirements on the impl of `KeyGeneratorMaxEncodedLen` for `NMapKey` + = note: required because of the requirements on the impl of `StorageInfoTrait` for `frame_support::pallet_prelude::StorageNMap<_GeneratedPrefixForStorageFoo, NMapKey, u32>` + = note: required by `storage_info` From 286d7ce1c983e06cedeefa3b44f410da0181f9aa Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andr=C3=A9=20Silva?= <123550+andresilva@users.noreply.github.com> Date: Wed, 16 Jun 2021 13:51:09 +0100 Subject: [PATCH 10/67] grandpa: cleanup sync bounds (#9127) * grandpa: cleanup sync bounds * grandpa: cleanup imports * remove cargo patch --- Cargo.lock | 4 +- client/finality-grandpa-warp-sync/Cargo.toml | 2 +- client/finality-grandpa-warp-sync/src/lib.rs | 5 +- client/finality-grandpa/Cargo.toml | 4 +- client/finality-grandpa/rpc/Cargo.toml | 2 +- .../finality-grandpa/src/communication/mod.rs | 2 +- client/finality-grandpa/src/environment.rs | 123 ++++++++++-------- client/finality-grandpa/src/finality_proof.rs | 7 +- client/finality-grandpa/src/import.rs | 83 ++++++------ client/finality-grandpa/src/lib.rs | 14 +- client/finality-grandpa/src/observer.rs | 60 +++++---- client/finality-grandpa/src/tests.rs | 2 +- client/finality-grandpa/src/until_imported.rs | 19 ++- client/finality-grandpa/src/voting_rule.rs | 5 +- frame/grandpa/Cargo.toml | 2 +- primitives/finality-grandpa/Cargo.toml | 2 +- 16 files changed, 188 insertions(+), 148 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 84f487ceedc99..fb944b782abd9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1679,9 +1679,9 @@ dependencies = [ [[package]] name = "finality-grandpa" -version = "0.14.0" +version = "0.14.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c6447e2f8178843749e8c8003206def83ec124a7859475395777a28b5338647c" +checksum = "74a1bfdcc776e63e49f741c7ce6116fa1b887e8ac2e3ccb14dd4aa113e54feb9" dependencies = [ "either", "futures 0.3.15", diff --git a/client/finality-grandpa-warp-sync/Cargo.toml b/client/finality-grandpa-warp-sync/Cargo.toml index 3557d543c987e..27728e159c762 100644 --- a/client/finality-grandpa-warp-sync/Cargo.toml +++ b/client/finality-grandpa-warp-sync/Cargo.toml @@ -28,7 +28,7 @@ sp-finality-grandpa = { version = "3.0.0", path = "../../primitives/finality-gra sp-runtime = { version = "3.0.0", path = "../../primitives/runtime" } [dev-dependencies] -finality-grandpa = { version = "0.14.0" } +finality-grandpa = { version = "0.14.1" } rand = "0.8" sc-block-builder = { version = "0.9.0", path = "../block-builder" } sp-consensus = { version = "0.9.0", path = "../../primitives/consensus/common" } diff --git a/client/finality-grandpa-warp-sync/src/lib.rs b/client/finality-grandpa-warp-sync/src/lib.rs index a6b7e46a0f029..c0ef93e625fd8 100644 --- a/client/finality-grandpa-warp-sync/src/lib.rs +++ b/client/finality-grandpa-warp-sync/src/lib.rs @@ -40,7 +40,8 @@ pub fn request_response_config_for_chain, authority_set: SharedAuthoritySet>, ) -> RequestResponseConfig - where NumberFor: sc_finality_grandpa::BlockNumberOps, +where + NumberFor: sc_finality_grandpa::BlockNumberOps, { let protocol_id = config.protocol_id(); @@ -54,7 +55,7 @@ pub fn request_response_config_for_chain SyncCryptoStorePtr{ + fn keystore(&self) -> SyncCryptoStorePtr { (self.0).1.clone() } } diff --git a/client/finality-grandpa/src/environment.rs b/client/finality-grandpa/src/environment.rs index d3a5b49b50726..3d593a17ffdbf 100644 --- a/client/finality-grandpa/src/environment.rs +++ b/client/finality-grandpa/src/environment.rs @@ -23,43 +23,42 @@ use std::pin::Pin; use std::sync::Arc; use std::time::Duration; +use finality_grandpa::{ + round::State as RoundState, voter, voter_set::VoterSet, BlockNumberOps, Error as GrandpaError, +}; use futures::prelude::*; use futures_timer::Delay; use log::{debug, warn}; use parity_scale_codec::{Decode, Encode}; use parking_lot::RwLock; +use prometheus_endpoint::{register, Counter, Gauge, PrometheusError, U64}; -use sc_client_api::{backend::{Backend, apply_aux}, utils::is_descendent_of}; -use finality_grandpa::{ - BlockNumberOps, Error as GrandpaError, round::State as RoundState, - voter, voter_set::VoterSet, +use sc_client_api::{ + backend::{apply_aux, Backend}, + utils::is_descendent_of, }; +use sc_telemetry::{telemetry, TelemetryHandle, CONSENSUS_DEBUG, CONSENSUS_INFO}; use sp_blockchain::HeaderMetadata; -use sp_runtime::generic::BlockId; -use sp_runtime::traits::{ - Block as BlockT, Header as HeaderT, NumberFor, Zero, +use sp_consensus::SelectChain; +use sp_finality_grandpa::{ + AuthorityId, AuthoritySignature, Equivocation, EquivocationProof, GrandpaApi, RoundNumber, + SetId, GRANDPA_ENGINE_ID, }; -use sc_telemetry::{telemetry, TelemetryHandle, CONSENSUS_DEBUG, CONSENSUS_INFO}; +use sp_runtime::generic::BlockId; +use sp_runtime::traits::{Block as BlockT, Header as HeaderT, NumberFor, Zero}; use crate::{ - local_authority_id, CommandOrError, Commit, Config, Error, NewAuthoritySet, Precommit, Prevote, + authorities::{AuthoritySet, SharedAuthoritySet}, + communication::Network as NetworkT, + justification::GrandpaJustification, + local_authority_id, + notification::GrandpaJustificationSender, + until_imported::UntilVoteTargetImported, + voting_rule::VotingRule, + ClientForGrandpa, CommandOrError, Commit, Config, Error, NewAuthoritySet, Precommit, Prevote, PrimaryPropose, SignedMessage, VoterCommand, }; -use sp_consensus::SelectChain; - -use crate::authorities::{AuthoritySet, SharedAuthoritySet}; -use crate::communication::Network as NetworkT; -use crate::notification::GrandpaJustificationSender; -use crate::justification::GrandpaJustification; -use crate::until_imported::UntilVoteTargetImported; -use crate::voting_rule::VotingRule; -use sp_finality_grandpa::{ - AuthorityId, AuthoritySignature, Equivocation, EquivocationProof, GRANDPA_ENGINE_ID, - GrandpaApi, RoundNumber, SetId, -}; -use prometheus_endpoint::{register, Counter, Gauge, PrometheusError, U64}; - type HistoricalVotes = finality_grandpa::HistoricalVotes< ::Hash, NumberFor, @@ -480,10 +479,10 @@ impl Environment where Block: BlockT, BE: Backend, - C: crate::ClientForGrandpa, + C: ClientForGrandpa, C::Api: GrandpaApi, N: NetworkT, - SC: SelectChain + 'static, + SC: SelectChain, { /// Report the given equivocation to the GRANDPA runtime module. This method /// generates a session membership proof of the offender and then submits an @@ -578,24 +577,26 @@ where } } -impl - finality_grandpa::Chain> -for Environment +impl finality_grandpa::Chain> + for Environment where - Block: 'static, + Block: BlockT, BE: Backend, - C: crate::ClientForGrandpa, - N: NetworkT + 'static + Send, - SC: SelectChain + 'static, + C: ClientForGrandpa, + N: NetworkT, + SC: SelectChain, VR: VotingRule, NumberFor: BlockNumberOps, { - fn ancestry(&self, base: Block::Hash, block: Block::Hash) -> Result, GrandpaError> { + fn ancestry( + &self, + base: Block::Hash, + block: Block::Hash, + ) -> Result, GrandpaError> { ancestry(&self.client, base, block) } } - pub(crate) fn ancestry( client: &Arc, base: Block::Hash, @@ -624,27 +625,31 @@ where // skip one because our ancestry is meant to start from the parent of `block`, // and `tree_route` includes it. - Ok(tree_route.retracted().iter().skip(1).map(|e| e.hash).collect()) + Ok(tree_route + .retracted() + .iter() + .skip(1) + .map(|e| e.hash) + .collect()) } -impl voter::Environment> +impl voter::Environment> for Environment where - Block: 'static, + Block: BlockT, B: Backend, - C: crate::ClientForGrandpa + 'static, + C: ClientForGrandpa + 'static, C::Api: GrandpaApi, - N: NetworkT + 'static + Send + Sync, - SC: SelectChain + 'static, + N: NetworkT, + SC: SelectChain, VR: VotingRule, NumberFor: BlockNumberOps, { - type Timer = Pin> + Send + Sync>>; + type Timer = Pin> + Send>>; type BestChain = Pin< Box< dyn Future)>, Self::Error>> - + Send - + Sync + + Send, >, >; @@ -652,13 +657,29 @@ where type Signature = AuthoritySignature; // regular round message streams - type In = Pin, Self::Signature, Self::Id>, Self::Error> - > + Send + Sync>>; - type Out = Pin>, - Error = Self::Error, - > + Send + Sync>>; + type In = Pin< + Box< + dyn Stream< + Item = Result< + ::finality_grandpa::SignedMessage< + Block::Hash, + NumberFor, + Self::Signature, + Self::Id, + >, + Self::Error, + >, + > + Send, + >, + >; + type Out = Pin< + Box< + dyn Sink< + ::finality_grandpa::Message>, + Error = Self::Error, + > + Send, + >, + >; type Error = CommandOrError>; @@ -1223,7 +1244,7 @@ pub(crate) fn finalize_block( where Block: BlockT, BE: Backend, - Client: crate::ClientForGrandpa, + Client: ClientForGrandpa, { // NOTE: lock must be held through writing to DB to avoid race. this lock // also implicitly synchronizes the check for last finalized number diff --git a/client/finality-grandpa/src/finality_proof.rs b/client/finality-grandpa/src/finality_proof.rs index 6735d91ba8b75..ec33d48774ae5 100644 --- a/client/finality-grandpa/src/finality_proof.rs +++ b/client/finality-grandpa/src/finality_proof.rs @@ -62,9 +62,10 @@ pub struct FinalityProofProvider { shared_authority_set: Option>>, } -impl FinalityProofProvider +impl FinalityProofProvider where - B: Backend + Send + Sync + 'static, + Block: BlockT, + B: Backend, { /// Create new finality proof provider using: /// @@ -97,7 +98,7 @@ where impl FinalityProofProvider where Block: BlockT, - B: Backend + Send + Sync + 'static, + B: Backend, { /// Prove finality for the given block number by returning a Justification for the last block of /// the authority set. diff --git a/client/finality-grandpa/src/import.rs b/client/finality-grandpa/src/import.rs index 3d22cc8866100..de02ea357cac4 100644 --- a/client/finality-grandpa/src/import.rs +++ b/client/finality-grandpa/src/import.rs @@ -16,36 +16,33 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use std::{sync::Arc, collections::HashMap}; +use std::{collections::HashMap, marker::PhantomData, sync::Arc}; use log::debug; use parity_scale_codec::Encode; -use sp_blockchain::{BlockStatus, well_known_cache_keys}; use sc_client_api::{backend::Backend, utils::is_descendent_of}; +use sc_consensus::shared_data::{SharedDataLocked, SharedDataLockedUpgradable}; use sc_telemetry::TelemetryHandle; -use sp_utils::mpsc::TracingUnboundedSender; use sp_api::TransactionFor; -use sc_consensus::shared_data::{SharedDataLockedUpgradable, SharedDataLocked}; - +use sp_blockchain::{well_known_cache_keys, BlockStatus}; use sp_consensus::{ - BlockImport, Error as ConsensusError, - BlockCheckParams, BlockImportParams, BlockOrigin, ImportResult, JustificationImport, - SelectChain, + BlockCheckParams, BlockImport, BlockImportParams, BlockOrigin, Error as ConsensusError, + ImportResult, JustificationImport, SelectChain, }; use sp_finality_grandpa::{ConsensusLog, ScheduledChange, SetId, GRANDPA_ENGINE_ID}; -use sp_runtime::Justification; use sp_runtime::generic::{BlockId, OpaqueDigestItemId}; -use sp_runtime::traits::{ - Block as BlockT, DigestFor, Header as HeaderT, NumberFor, Zero, -}; +use sp_runtime::traits::{Block as BlockT, DigestFor, Header as HeaderT, NumberFor, Zero}; +use sp_runtime::Justification; +use sp_utils::mpsc::TracingUnboundedSender; -use crate::{Error, CommandOrError, NewAuthoritySet, VoterCommand}; -use crate::authorities::{AuthoritySet, SharedAuthoritySet, DelayKind, PendingChange}; -use crate::environment::finalize_block; -use crate::justification::GrandpaJustification; -use crate::notification::GrandpaJustificationSender; -use std::marker::PhantomData; +use crate::{ + authorities::{AuthoritySet, DelayKind, PendingChange, SharedAuthoritySet}, + environment::finalize_block, + justification::GrandpaJustification, + notification::GrandpaJustificationSender, + ClientForGrandpa, CommandOrError, Error, NewAuthoritySet, VoterCommand, +}; /// A block-import handler for GRANDPA. /// @@ -67,8 +64,8 @@ pub struct GrandpaBlockImport { _phantom: PhantomData, } -impl Clone for - GrandpaBlockImport +impl Clone + for GrandpaBlockImport { fn clone(&self) -> Self { GrandpaBlockImport { @@ -85,12 +82,13 @@ impl Clone for } impl JustificationImport - for GrandpaBlockImport where - NumberFor: finality_grandpa::BlockNumberOps, - DigestFor: Encode, - BE: Backend, - Client: crate::ClientForGrandpa, - SC: SelectChain, + for GrandpaBlockImport +where + NumberFor: finality_grandpa::BlockNumberOps, + DigestFor: Encode, + BE: Backend, + Client: ClientForGrandpa, + SC: SelectChain, { type Error = ConsensusError; @@ -219,13 +217,12 @@ pub fn find_forced_change( header.digest().convert_first(|l| l.try_to(id).and_then(filter_log)) } -impl - GrandpaBlockImport +impl GrandpaBlockImport where NumberFor: finality_grandpa::BlockNumberOps, DigestFor: Encode, BE: Backend, - Client: crate::ClientForGrandpa, + Client: ClientForGrandpa, { // check for a new authority set change. fn check_new_change( @@ -416,21 +413,25 @@ where let just_in_case = just_in_case.map(|(o, i)| (o, i.release_mutex())); - Ok(PendingSetChanges { just_in_case, applied_changes, do_pause }) + Ok(PendingSetChanges { + just_in_case, + applied_changes, + do_pause, + }) } } #[async_trait::async_trait] -impl BlockImport - for GrandpaBlockImport where - NumberFor: finality_grandpa::BlockNumberOps, - DigestFor: Encode, - BE: Backend, - Client: crate::ClientForGrandpa, - for<'a> &'a Client: - BlockImport>, - TransactionFor: Send + 'static, - SC: Send, +impl BlockImport for GrandpaBlockImport +where + NumberFor: finality_grandpa::BlockNumberOps, + DigestFor: Encode, + BE: Backend, + Client: ClientForGrandpa, + for<'a> &'a Client: + BlockImport>, + TransactionFor: 'static, + SC: Send, { type Error = ConsensusError; type Transaction = TransactionFor; @@ -630,7 +631,7 @@ impl GrandpaBlockImport GrandpaBlockImport where BE: Backend, - Client: crate::ClientForGrandpa, + Client: ClientForGrandpa, NumberFor: finality_grandpa::BlockNumberOps, { /// Import a block justification and finalize the block. diff --git a/client/finality-grandpa/src/lib.rs b/client/finality-grandpa/src/lib.rs index f249d3982cf25..a133319fdbef4 100644 --- a/client/finality-grandpa/src/lib.rs +++ b/client/finality-grandpa/src/lib.rs @@ -627,13 +627,17 @@ fn global_communication( metrics: Option, ) -> ( impl Stream< - Item = Result, CommandOrError>>, + Item = Result< + CommunicationInH, + CommandOrError>, + >, >, impl Sink< CommunicationOutH, Error = CommandOrError>, - > + Unpin, -) where + >, +) +where BE: Backend + 'static, C: ClientForGrandpa + 'static, N: NetworkT, @@ -707,11 +711,11 @@ pub fn grandpa_peers_set_config() -> sc_network::config::NonDefaultSetConfig { /// block import worker that has already been instantiated with `block_import`. pub fn run_grandpa_voter( grandpa_params: GrandpaParams, -) -> sp_blockchain::Result + Unpin + Send + 'static> +) -> sp_blockchain::Result + Send> where Block::Hash: Ord, BE: Backend + 'static, - N: NetworkT + Send + Sync + Clone + 'static, + N: NetworkT + Sync + 'static, SC: SelectChain + 'static, VR: VotingRule + Clone + 'static, NumberFor: BlockNumberOps, diff --git a/client/finality-grandpa/src/observer.rs b/client/finality-grandpa/src/observer.rs index 5434cd08a91d0..23c4f873a10b7 100644 --- a/client/finality-grandpa/src/observer.rs +++ b/client/finality-grandpa/src/observer.rs @@ -16,33 +16,33 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . +use std::marker::{PhantomData, Unpin}; use std::pin::Pin; use std::sync::Arc; use std::task::{Context, Poll}; +use finality_grandpa::{voter, voter_set::VoterSet, BlockNumberOps, Error as GrandpaError}; use futures::prelude::*; - -use finality_grandpa::{ - BlockNumberOps, Error as GrandpaError, voter, voter_set::VoterSet -}; use log::{debug, info, warn}; -use sp_keystore::SyncCryptoStorePtr; -use sp_consensus::SelectChain; + use sc_client_api::backend::Backend; use sc_telemetry::TelemetryHandle; -use sp_utils::mpsc::TracingUnboundedReceiver; -use sp_runtime::traits::{NumberFor, Block as BlockT}; use sp_blockchain::HeaderMetadata; +use sp_consensus::SelectChain; +use sp_finality_grandpa::AuthorityId; +use sp_keystore::SyncCryptoStorePtr; +use sp_runtime::traits::{Block as BlockT, NumberFor}; +use sp_utils::mpsc::TracingUnboundedReceiver; use crate::{ - global_communication, CommandOrError, CommunicationIn, Config, environment, - LinkHalf, Error, aux_schema::PersistentData, VoterCommand, VoterSetState, + authorities::SharedAuthoritySet, + aux_schema::PersistentData, + communication::{Network as NetworkT, NetworkBridge}, + environment, global_communication, + notification::GrandpaJustificationSender, + ClientForGrandpa, CommandOrError, CommunicationIn, Config, Error, LinkHalf, VoterCommand, + VoterSetState, }; -use crate::authorities::SharedAuthoritySet; -use crate::communication::{Network as NetworkT, NetworkBridge}; -use crate::notification::GrandpaJustificationSender; -use sp_finality_grandpa::AuthorityId; -use std::marker::{PhantomData, Unpin}; struct ObserverChain<'a, Block: BlockT, Client> { client: &'a Arc, @@ -50,12 +50,17 @@ struct ObserverChain<'a, Block: BlockT, Client> { } impl<'a, Block, Client> finality_grandpa::Chain> - for ObserverChain<'a, Block, Client> where - Block: BlockT, - Client: HeaderMetadata, - NumberFor: BlockNumberOps, + for ObserverChain<'a, Block, Client> +where + Block: BlockT, + Client: HeaderMetadata, + NumberFor: BlockNumberOps, { - fn ancestry(&self, base: Block::Hash, block: Block::Hash) -> Result, GrandpaError> { + fn ancestry( + &self, + base: Block::Hash, + block: Block::Hash, + ) -> Result, GrandpaError> { environment::ancestry(&self.client, base, block) } } @@ -75,7 +80,7 @@ where S: Stream, CommandOrError>>>, F: Fn(u64), BE: Backend, - Client: crate::ClientForGrandpa, + Client: ClientForGrandpa, { let authority_set = authority_set.clone(); let client = client.clone(); @@ -160,13 +165,13 @@ pub fn run_grandpa_observer( config: Config, link: LinkHalf, network: N, -) -> sp_blockchain::Result + Unpin + Send + 'static> +) -> sp_blockchain::Result + Send> where BE: Backend + Unpin + 'static, - N: NetworkT + Send + Clone + 'static, - SC: SelectChain + 'static, + N: NetworkT, + SC: SelectChain, NumberFor: BlockNumberOps, - Client: crate::ClientForGrandpa + 'static, + Client: ClientForGrandpa + 'static, { let LinkHalf { client, @@ -223,7 +228,7 @@ impl ObserverWork where B: BlockT, BE: Backend + 'static, - Client: crate::ClientForGrandpa + 'static, + Client: ClientForGrandpa + 'static, Network: NetworkT, NumberFor: BlockNumberOps, { @@ -236,7 +241,6 @@ where justification_sender: Option>, telemetry: Option, ) -> Self { - let mut work = ObserverWork { // `observer` is set to a temporary value and replaced below when // calling `rebuild_observer`. @@ -344,7 +348,7 @@ impl Future for ObserverWork where B: BlockT, BE: Backend + Unpin + 'static, - C: crate::ClientForGrandpa + 'static, + C: ClientForGrandpa + 'static, N: NetworkT, NumberFor: BlockNumberOps, { diff --git a/client/finality-grandpa/src/tests.rs b/client/finality-grandpa/src/tests.rs index 475c11191b10c..725beec6a94b2 100644 --- a/client/finality-grandpa/src/tests.rs +++ b/client/finality-grandpa/src/tests.rs @@ -1013,7 +1013,7 @@ fn voter_persists_its_votes() { fn alice_voter2( peers: &[Ed25519Keyring], net: Arc>, - ) -> impl Future + Unpin + Send + 'static { + ) -> impl Future + Send { let (keystore, _) = create_keystore(peers[0]); let mut net = net.lock(); diff --git a/client/finality-grandpa/src/until_imported.rs b/client/finality-grandpa/src/until_imported.rs index d2e896685658b..7cfd9e6074c47 100644 --- a/client/finality-grandpa/src/until_imported.rs +++ b/client/finality-grandpa/src/until_imported.rs @@ -136,12 +136,14 @@ impl Drop for Metrics { fn drop(&mut self) { // Reduce the global counter by the amount of messages that were still left in the dropped // queue. - self.global_waiting_messages.sub(self.local_waiting_messages) + self.global_waiting_messages + .sub(self.local_waiting_messages) } } /// Buffering incoming messages until blocks with given hashes are imported. -pub(crate) struct UntilImported where +pub(crate) struct UntilImported +where Block: BlockT, I: Stream + Unpin, M: BlockUntilImported, @@ -152,7 +154,7 @@ pub(crate) struct UntilImported wh incoming_messages: Fuse, ready: VecDeque, /// Interval at which to check status of each awaited block. - check_pending: Pin> + Send + Sync>>, + check_pending: Pin> + Send>>, /// Mapping block hashes to their block number, the point in time it was /// first encountered (Instant) and a list of GRANDPA messages referencing /// the block hash. @@ -164,13 +166,18 @@ pub(crate) struct UntilImported wh metrics: Option, } -impl Unpin for UntilImported where +impl Unpin + for UntilImported +where Block: BlockT, I: Stream + Unpin, M: BlockUntilImported, -{} +{ +} -impl UntilImported where +impl + UntilImported +where Block: BlockT, BlockStatus: BlockStatusT, BlockSyncRequester: BlockSyncRequesterT, diff --git a/client/finality-grandpa/src/voting_rule.rs b/client/finality-grandpa/src/voting_rule.rs index 3ede7649a1387..a5515c1be23ed 100644 --- a/client/finality-grandpa/src/voting_rule.rs +++ b/client/finality-grandpa/src/voting_rule.rs @@ -34,10 +34,11 @@ use sp_runtime::traits::{Block as BlockT, Header, NumberFor, One, Zero}; /// A future returned by a `VotingRule` to restrict a given vote, if any restriction is necessary. pub type VotingRuleResult = - Pin::Hash, NumberFor)>> + Send + Sync>>; + Pin::Hash, NumberFor)>> + Send>>; /// A trait for custom voting rules in GRANDPA. -pub trait VotingRule: DynClone + Send + Sync where +pub trait VotingRule: DynClone + Send + Sync +where Block: BlockT, B: HeaderBackend, { diff --git a/frame/grandpa/Cargo.toml b/frame/grandpa/Cargo.toml index c6cfa96f7da1b..5c3cac8f82182 100644 --- a/frame/grandpa/Cargo.toml +++ b/frame/grandpa/Cargo.toml @@ -31,7 +31,7 @@ log = { version = "0.4.14", default-features = false } [dev-dependencies] frame-benchmarking = { version = "3.1.0", path = "../benchmarking" } -grandpa = { package = "finality-grandpa", version = "0.14.0", features = ["derive-codec"] } +grandpa = { package = "finality-grandpa", version = "0.14.1", features = ["derive-codec"] } sp-keyring = { version = "3.0.0", path = "../../primitives/keyring" } pallet-balances = { version = "3.0.0", path = "../balances" } pallet-offences = { version = "3.0.0", path = "../offences" } diff --git a/primitives/finality-grandpa/Cargo.toml b/primitives/finality-grandpa/Cargo.toml index 95aa65c930f78..ec9e89105d581 100644 --- a/primitives/finality-grandpa/Cargo.toml +++ b/primitives/finality-grandpa/Cargo.toml @@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } -grandpa = { package = "finality-grandpa", version = "0.14.0", default-features = false, features = ["derive-codec"] } +grandpa = { package = "finality-grandpa", version = "0.14.1", default-features = false, features = ["derive-codec"] } log = { version = "0.4.8", optional = true } serde = { version = "1.0.101", optional = true, features = ["derive"] } sp-api = { version = "3.0.0", default-features = false, path = "../api" } From 63a0d0bcf4cb0327dd78ed39d71b770bfc1dfc8a Mon Sep 17 00:00:00 2001 From: Shaun Wang Date: Thu, 17 Jun 2021 02:17:57 +1200 Subject: [PATCH 11/67] Migrate pallet-elections to pallet attribute macro (#9088) * Migrate elections pallet to pallet attribute macro. * Metadata fix. * Update frame/elections/src/lib.rs Co-authored-by: Guillaume Thiolliere --- frame/elections/src/lib.rs | 545 ++++++++++++++++++++---------------- frame/elections/src/mock.rs | 4 +- 2 files changed, 313 insertions(+), 236 deletions(-) diff --git a/frame/elections/src/lib.rs b/frame/elections/src/lib.rs index 46ec62bf75174..b536713935626 100644 --- a/frame/elections/src/lib.rs +++ b/frame/elections/src/lib.rs @@ -22,7 +22,7 @@ //! //! --- //! -//! Election module for stake-weighted membership selection of a collective. +//! Election pallet for stake-weighted membership selection of a collective. //! //! The composition of a set of account IDs works according to one or more approval votes //! weighted by stake. There is a partial carry-over facility to give greater weight to those @@ -33,19 +33,20 @@ use sp_std::prelude::*; use sp_runtime::{ - RuntimeDebug, DispatchResult, print, + RuntimeDebug, print, traits::{Zero, One, StaticLookup, Saturating}, }; use frame_support::{ - decl_storage, decl_event, ensure, decl_module, decl_error, + pallet_prelude::*, ensure, weights::{Weight, DispatchClass}, traits::{ - Currency, ExistenceRequirement, Get, LockableCurrency, LockIdentifier, BalanceStatus, + Currency, ExistenceRequirement, LockableCurrency, LockIdentifier, BalanceStatus, OnUnbalanced, ReservableCurrency, WithdrawReasons, ChangeMembers, } }; use codec::{Encode, Decode}; -use frame_system::{ensure_signed, ensure_root}; +use frame_system::pallet_prelude::*; +pub use pallet::*; mod mock; mod tests; @@ -152,141 +153,250 @@ type ApprovalFlag = u32; /// Number of approval flags that can fit into [`ApprovalFlag`] type. const APPROVAL_FLAG_LEN: usize = 32; -pub trait Config: frame_system::Config { - type Event: From> + Into<::Event>; +#[frame_support::pallet] +pub mod pallet { + use super::*; - /// Identifier for the elections pallet's lock - type PalletId: Get; + #[pallet::pallet] + #[pallet::generate_store(pub(super) trait Store)] + pub struct Pallet(_); - /// The currency that people are electing with. - type Currency: - LockableCurrency - + ReservableCurrency; + #[pallet::config] + pub trait Config: frame_system::Config { + type Event: From> + IsType<::Event>; - /// Handler for the unbalanced reduction when slashing a validator. - type BadPresentation: OnUnbalanced>; + /// Identifier for the elections pallet's lock + #[pallet::constant] + type PalletId: Get; - /// Handler for the unbalanced reduction when slashing an invalid reaping attempt. - type BadReaper: OnUnbalanced>; + /// The currency that people are electing with. + type Currency: + LockableCurrency + + ReservableCurrency; - /// Handler for the unbalanced reduction when submitting a bad `voter_index`. - type BadVoterIndex: OnUnbalanced>; + /// Handler for the unbalanced reduction when slashing a validator. + type BadPresentation: OnUnbalanced>; - /// Handler for the unbalanced reduction when a candidate has lost (and is not a runner up) - type LoserCandidate: OnUnbalanced>; + /// Handler for the unbalanced reduction when slashing an invalid reaping attempt. + type BadReaper: OnUnbalanced>; - /// What to do when the members change. - type ChangeMembers: ChangeMembers; + /// Handler for the unbalanced reduction when submitting a bad `voter_index`. + type BadVoterIndex: OnUnbalanced>; - /// How much should be locked up in order to submit one's candidacy. A reasonable - /// default value is 9. - type CandidacyBond: Get>; + /// Handler for the unbalanced reduction when a candidate has lost (and is not a runner up) + type LoserCandidate: OnUnbalanced>; - /// How much should be locked up in order to be able to submit votes. - type VotingBond: Get>; + /// What to do when the members change. + type ChangeMembers: ChangeMembers; - /// The amount of fee paid upon each vote submission, unless if they submit a - /// _hole_ index and replace it. - type VotingFee: Get>; + /// How much should be locked up in order to submit one's candidacy. A reasonable + /// default value is 9. + #[pallet::constant] + type CandidacyBond: Get>; - /// Minimum about that can be used as the locked value for voting. - type MinimumVotingLock: Get>; + /// How much should be locked up in order to be able to submit votes. + #[pallet::constant] + type VotingBond: Get>; - /// The punishment, per voter, if you provide an invalid presentation. A - /// reasonable default value is 1. - type PresentSlashPerVoter: Get>; + /// The amount of fee paid upon each vote submission, unless if they submit a + /// _hole_ index and replace it. + #[pallet::constant] + type VotingFee: Get>; - /// How many runners-up should have their approvals persist until the next - /// vote. A reasonable default value is 2. - type CarryCount: Get; + /// Minimum about that can be used as the locked value for voting. + #[pallet::constant] + type MinimumVotingLock: Get>; - /// How many vote indices need to go by after a target voter's last vote before - /// they can be reaped if their approvals are moot. A reasonable default value - /// is 1. - type InactiveGracePeriod: Get; + /// The punishment, per voter, if you provide an invalid presentation. A + /// reasonable default value is 1. + #[pallet::constant] + type PresentSlashPerVoter: Get>; - /// How often (in blocks) to check for new votes. A reasonable default value - /// is 1000. - type VotingPeriod: Get; + /// How many runners-up should have their approvals persist until the next + /// vote. A reasonable default value is 2. + #[pallet::constant] + type CarryCount: Get; - /// Decay factor of weight when being accumulated. It should typically be set to - /// __at least__ `membership_size -1` to keep the collective secure. - /// When set to `N`, it indicates `(1/N)^t` of staked is decayed at weight - /// increment step `t`. 0 will result in no weight being added at all (normal - /// approval voting). A reasonable default value is 24. - type DecayRatio: Get; -} + /// How many vote indices need to go by after a target voter's last vote before + /// they can be reaped if their approvals are moot. A reasonable default value + /// is 1. + #[pallet::constant] + type InactiveGracePeriod: Get; -decl_storage! { - trait Store for Module as Elections { - // ---- parameters - - /// How long to give each top candidate to present themselves after the vote ends. - pub PresentationDuration get(fn presentation_duration) config(): T::BlockNumber; - /// How long each position is active for. - pub TermDuration get(fn term_duration) config(): T::BlockNumber; - /// Number of accounts that should constitute the collective. - pub DesiredSeats get(fn desired_seats) config(): u32; - - // ---- permanent state (always relevant, changes only at the finalization of voting) - - /// The current membership. When there's a vote going on, this should still be used for - /// executive matters. The block number (second element in the tuple) is the block that - /// their position is active until (calculated by the sum of the block number when the - /// member was elected and their term duration). - pub Members get(fn members) config(): Vec<(T::AccountId, T::BlockNumber)>; - /// The total number of vote rounds that have happened or are in progress. - pub VoteCount get(fn vote_index): VoteIndex; - - // ---- persistent state (always relevant, changes constantly) - - // A list of votes for each voter. The votes are stored as numeric values and parsed in a - // bit-wise manner. In order to get a human-readable representation (`Vec`), use - // [`all_approvals_of`]. Furthermore, each vector of scalars is chunked with the cap of - // `APPROVAL_SET_SIZE`. - /// - /// TWOX-NOTE: SAFE as `AccountId` is a crypto hash and `SetIndex` is not - /// attacker-controlled. - pub ApprovalsOf get(fn approvals_of): - map hasher(twox_64_concat) (T::AccountId, SetIndex) => Vec; - /// The vote index and list slot that the candidate `who` was registered or `None` if they - /// are not currently registered. - /// - /// TWOX-NOTE: SAFE as `AccountId` is a crypto hash. - pub RegisterInfoOf get(fn candidate_reg_info): - map hasher(twox_64_concat) T::AccountId => Option<(VoteIndex, u32)>; - /// Basic information about a voter. - /// - /// TWOX-NOTE: SAFE as `AccountId` is a crypto hash. - pub VoterInfoOf get(fn voter_info): - map hasher(twox_64_concat) T::AccountId => Option>>; - /// The present voter list (chunked and capped at [`VOTER_SET_SIZE`]). - /// - /// TWOX-NOTE: OKAY ― `SetIndex` is not user-controlled data. - pub Voters get(fn voters): map hasher(twox_64_concat) SetIndex => Vec>; - /// the next free set to store a voter in. This will keep growing. - pub NextVoterSet get(fn next_nonfull_voter_set): SetIndex = 0; - /// Current number of Voters. - pub VoterCount get(fn voter_count): SetIndex = 0; - /// The present candidate list. - pub Candidates get(fn candidates): Vec; // has holes - /// Current number of active candidates - pub CandidateCount get(fn candidate_count): u32; - - // ---- temporary state (only relevant during finalization/presentation) - - /// The accounts holding the seats that will become free on the next tally. - pub NextFinalize get(fn next_finalize): Option<(T::BlockNumber, u32, Vec)>; - /// Get the leaderboard if we're in the presentation phase. The first element is the weight - /// of each entry; It may be the direct summed approval stakes, or a weighted version of it. - /// Sorted from low to high. - pub Leaderboard get(fn leaderboard): Option, T::AccountId)> >; + /// How often (in blocks) to check for new votes. A reasonable default value + /// is 1000. + #[pallet::constant] + type VotingPeriod: Get; + + /// Decay factor of weight when being accumulated. It should typically be set to + /// __at least__ `membership_size -1` to keep the collective secure. + /// When set to `N`, it indicates `(1/N)^t` of staked is decayed at weight + /// increment step `t`. 0 will result in no weight being added at all (normal + /// approval voting). A reasonable default value is 24. + #[pallet::constant] + type DecayRatio: Get; } -} -decl_error! { - /// Error for the elections module. - pub enum Error for Module { + #[pallet::extra_constants] + impl Pallet { + //TODO: rename to snake case after https://github.com/paritytech/substrate/issues/8826 fixed. + /// The chunk size of the voter vector. + #[allow(non_snake_case)] + fn VOTER_SET_SIZE() -> u32 { + VOTER_SET_SIZE as u32 + } + + //TODO: rename to snake case after https://github.com/paritytech/substrate/issues/8826 fixed. + /// The chunk size of the approval vector. + #[allow(non_snake_case)] + fn APPROVAL_SET_SIZE() -> u32 { + APPROVAL_SET_SIZE as u32 + } + } + + // ---- permanent state (always relevant, changes only at the finalization of voting) + + /// How long to give each top candidate to present themselves after the vote ends. + #[pallet::storage] + #[pallet::getter(fn presentation_duration)] + pub type PresentationDuration = StorageValue<_, T::BlockNumber, ValueQuery>; + + /// How long each position is active for. + #[pallet::storage] + #[pallet::getter(fn term_duration)] + pub type TermDuration = StorageValue<_, T::BlockNumber, ValueQuery>; + + /// Number of accounts that should constitute the collective. + #[pallet::storage] + #[pallet::getter(fn desired_seats)] + pub type DesiredSeats = StorageValue<_, u32, ValueQuery>; + + // ---- permanent state (always relevant, changes only at the finalization of voting) + + /// The current membership. When there's a vote going on, this should still be used for + /// executive matters. The block number (second element in the tuple) is the block that + /// their position is active until (calculated by the sum of the block number when the + /// member was elected and their term duration). + #[pallet::storage] + #[pallet::getter(fn members)] + pub type Members = StorageValue<_, Vec<(T::AccountId, T::BlockNumber)>, ValueQuery>; + + /// The total number of vote rounds that have happened or are in progress. + #[pallet::storage] + #[pallet::getter(fn vote_index)] + pub type VoteCount = StorageValue<_, VoteIndex, ValueQuery>; + + // ---- persistent state (always relevant, changes constantly) + + // A list of votes for each voter. The votes are stored as numeric values and parsed in a + // bit-wise manner. In order to get a human-readable representation (`Vec`), use + // [`all_approvals_of`]. Furthermore, each vector of scalars is chunked with the cap of + // `APPROVAL_SET_SIZE`. + /// + /// TWOX-NOTE: SAFE as `AccountId` is a crypto hash and `SetIndex` is not + /// attacker-controlled. + #[pallet::storage] + #[pallet::getter(fn approvals_of)] + pub type ApprovalsOf = StorageMap< + _, + Twox64Concat, (T::AccountId, SetIndex), + Vec, + ValueQuery, + >; + + /// The vote index and list slot that the candidate `who` was registered or `None` if they + /// are not currently registered. + /// + /// TWOX-NOTE: SAFE as `AccountId` is a crypto hash. + #[pallet::storage] + #[pallet::getter(fn candidate_reg_info)] + pub type RegisterInfoOf = StorageMap<_, Twox64Concat, T::AccountId, (VoteIndex, u32)>; + + /// Basic information about a voter. + /// + /// TWOX-NOTE: SAFE as `AccountId` is a crypto hash. + #[pallet::storage] + #[pallet::getter(fn voter_info)] + pub type VoterInfoOf = StorageMap<_, Twox64Concat, T::AccountId, VoterInfo>>; + + /// The present voter list (chunked and capped at [`VOTER_SET_SIZE`]). + /// + /// TWOX-NOTE: OKAY ― `SetIndex` is not user-controlled data. + #[pallet::storage] + #[pallet::getter(fn voters)] + pub type Voters = StorageMap< + _, + Twox64Concat, SetIndex, + Vec>, + ValueQuery, + >; + + /// the next free set to store a voter in. This will keep growing. + #[pallet::storage] + #[pallet::getter(fn next_nonfull_voter_set)] + pub type NextVoterSet = StorageValue<_, SetIndex, ValueQuery>; + + /// Current number of Voters. + #[pallet::storage] + #[pallet::getter(fn voter_count)] + pub type VoterCount = StorageValue<_, SetIndex, ValueQuery>; + + /// The present candidate list. + #[pallet::storage] + #[pallet::getter(fn candidates)] + pub type Candidates = StorageValue<_, Vec, ValueQuery>; // has holes + + /// Current number of active candidates + #[pallet::storage] + #[pallet::getter(fn candidate_count)] + pub type CandidateCount = StorageValue<_, u32, ValueQuery>; + + // ---- temporary state (only relevant during finalization/presentation) + + /// The accounts holding the seats that will become free on the next tally. + #[pallet::storage] + #[pallet::getter(fn next_finalize)] + pub type NextFinalize = StorageValue<_, (T::BlockNumber, u32, Vec)>; + + /// Get the leaderboard if we're in the presentation phase. The first element is the weight + /// of each entry; It may be the direct summed approval stakes, or a weighted version of it. + /// Sorted from low to high. + #[pallet::storage] + #[pallet::getter(fn leaderboard)] + pub type Leaderboard = StorageValue<_, Vec<(BalanceOf, T::AccountId)>>; + + #[pallet::genesis_config] + pub struct GenesisConfig { + pub presentation_duration: T::BlockNumber, + pub term_duration: T::BlockNumber, + pub desired_seats: u32, + pub members: Vec<(T::AccountId, T::BlockNumber)>, + } + + #[cfg(feature = "std")] + impl Default for GenesisConfig { + fn default() -> Self { + GenesisConfig { + presentation_duration: Default::default(), + term_duration: Default::default(), + desired_seats: Default::default(), + members: Default::default(), + } + } + } + + #[pallet::genesis_build] + impl GenesisBuild for GenesisConfig { + fn build(&self) { + PresentationDuration::::put(self.presentation_duration); + TermDuration::::put(self.term_duration); + DesiredSeats::::put(self.desired_seats); + Members::::put(&self.members); + } + } + + #[pallet::error] + pub enum Error { /// Reporter must be a voter. NotVoter, /// Target for inactivity cleanup must be active. @@ -342,59 +452,35 @@ decl_error! { /// No approval changes during presentation period. ApprovalPresentation, } -} -decl_module! { - pub struct Module for enum Call where origin: T::Origin { - type Error = Error; - - /// How much should be locked up in order to submit one's candidacy. A reasonable - /// default value is 9. - const CandidacyBond: BalanceOf = T::CandidacyBond::get(); - - /// How much should be locked up in order to be able to submit votes. - const VotingBond: BalanceOf = T::VotingBond::get(); - - /// The amount of fee paid upon each vote submission, unless if they submit a - /// _hole_ index and replace it. - const VotingFee: BalanceOf = T::VotingFee::get(); - - /// The punishment, per voter, if you provide an invalid presentation. A - /// reasonable default value is 1. - const PresentSlashPerVoter: BalanceOf = T::PresentSlashPerVoter::get(); - - /// How many runners-up should have their approvals persist until the next - /// vote. A reasonable default value is 2. - const CarryCount: u32 = T::CarryCount::get(); - - /// How many vote indices need to go by after a target voter's last vote before - /// they can be reaped if their approvals are moot. A reasonable default value - /// is 1. - const InactiveGracePeriod: VoteIndex = T::InactiveGracePeriod::get(); - - /// How often (in blocks) to check for new votes. A reasonable default value - /// is 1000. - const VotingPeriod: T::BlockNumber = T::VotingPeriod::get(); - - /// Minimum about that can be used as the locked value for voting. - const MinimumVotingLock: BalanceOf = T::MinimumVotingLock::get(); - - /// Decay factor of weight when being accumulated. It should typically be set to - /// __at least__ `membership_size -1` to keep the collective secure. - /// When set to `N`, it indicates `(1/N)^t` of staked is decayed at weight - /// increment step `t`. 0 will result in no weight being added at all (normal - /// approval voting). A reasonable default value is 24. - const DecayRatio: u32 = T::DecayRatio::get(); - - /// The chunk size of the voter vector. - const VOTER_SET_SIZE: u32 = VOTER_SET_SIZE as u32; - /// The chunk size of the approval vector. - const APPROVAL_SET_SIZE: u32 = APPROVAL_SET_SIZE as u32; - - const PalletId: LockIdentifier = T::PalletId::get(); + #[pallet::hooks] + impl Hooks> for Pallet { + fn on_initialize(n: T::BlockNumber) -> Weight { + if let Err(e) = Self::end_block(n) { + print("Guru meditation"); + print(e); + } + 0 + } + } - fn deposit_event() = default; + #[pallet::event] + #[pallet::generate_deposit(pub(super) fn deposit_event)] + #[pallet::metadata(T::AccountId = "AccountId", Vec = "Vec")] + pub enum Event { + /// Reaped \[voter, reaper\]. + VoterReaped(T::AccountId, T::AccountId), + /// Slashed \[reaper\]. + BadReaperSlashed(T::AccountId), + /// A tally (for approval votes of \[seats\]) has started. + TallyStarted(u32), + /// A tally (for approval votes of seat(s)) has ended (with one or more new members). + /// \[incoming, outgoing\] + TallyFinalized(Vec, Vec), + } + #[pallet::call] + impl Pallet { /// Set candidate approvals. Approval slots stay valid as long as candidates in those slots /// are registered. /// @@ -419,13 +505,13 @@ decl_module! { /// - Two extra DB entries, one DB change. /// - Argument `votes` is limited in length to number of candidates. /// # - #[weight = 2_500_000_000] - fn set_approvals( - origin, + #[pallet::weight(2_500_000_000)] + pub fn set_approvals( + origin: OriginFor, votes: Vec, - #[compact] index: VoteIndex, + #[pallet::compact] index: VoteIndex, hint: SetIndex, - #[compact] value: BalanceOf, + #[pallet::compact] value: BalanceOf, ) -> DispatchResult { let who = ensure_signed(origin)?; Self::do_set_approvals(who, votes, index, hint, value) @@ -443,14 +529,14 @@ decl_module! { /// - O(1). /// - Two fewer DB entries, one DB change. /// # - #[weight = 2_500_000_000] - fn reap_inactive_voter( - origin, - #[compact] reporter_index: u32, + #[pallet::weight(2_500_000_000)] + pub fn reap_inactive_voter( + origin: OriginFor, + #[pallet::compact] reporter_index: u32, who: ::Source, - #[compact] who_index: u32, - #[compact] assumed_vote_index: VoteIndex, - ) { + #[pallet::compact] who_index: u32, + #[pallet::compact] assumed_vote_index: VoteIndex, + ) -> DispatchResult { let reporter = ensure_signed(origin)?; let who = T::Lookup::lookup(who)?; @@ -499,12 +585,13 @@ decl_module! { // This only fails if `reporter` doesn't exist, which it clearly must do since its // the origin. Still, it's no more harmful to propagate any error at this point. T::Currency::repatriate_reserved(&who, &reporter, T::VotingBond::get(), BalanceStatus::Free)?; - Self::deposit_event(RawEvent::VoterReaped(who, reporter)); + Self::deposit_event(Event::::VoterReaped(who, reporter)); } else { let imbalance = T::Currency::slash_reserved(&reporter, T::VotingBond::get()).0; T::BadReaper::on_unbalanced(imbalance); - Self::deposit_event(RawEvent::BadReaperSlashed(reporter)); + Self::deposit_event(Event::::BadReaperSlashed(reporter)); } + Ok(()) } /// Remove a voter. All votes are cancelled and the voter deposit is returned. @@ -517,8 +604,8 @@ decl_module! { /// - O(1). /// - Two fewer DB entries, one DB change. /// # - #[weight = 1_250_000_000] - fn retract_voter(origin, #[compact] index: u32) { + #[pallet::weight(1_250_000_000)] + pub fn retract_voter(origin: OriginFor, #[pallet::compact] index: u32) -> DispatchResult { let who = ensure_signed(origin)?; ensure!(!Self::presentation_active(), Error::::CannotRetractPresenting); @@ -530,6 +617,7 @@ decl_module! { Self::remove_voter(&who, index); T::Currency::unreserve(&who, T::VotingBond::get()); T::Currency::remove_lock(T::PalletId::get(), &who); + Ok(()) } /// Submit oneself for candidacy. @@ -545,8 +633,8 @@ decl_module! { /// - Independent of input. /// - Three DB changes. /// # - #[weight = 2_500_000_000] - fn submit_candidacy(origin, #[compact] slot: u32) { + #[pallet::weight(2_500_000_000)] + pub fn submit_candidacy(origin: OriginFor, #[pallet::compact] slot: u32) -> DispatchResult { let who = ensure_signed(origin)?; ensure!(!Self::is_a_candidate(&who), Error::::DuplicatedCandidate); @@ -570,7 +658,8 @@ decl_module! { candidates[slot] = who; } >::put(candidates); - CandidateCount::put(count as u32 + 1); + CandidateCount::::put(count as u32 + 1); + Ok(()) } /// Claim that `candidate` is one of the top `carry_count + desired_seats` candidates. Only @@ -582,12 +671,12 @@ decl_module! { /// - O(voters) compute. /// - One DB change. /// # - #[weight = 10_000_000_000] - fn present_winner( - origin, + #[pallet::weight(10_000_000_000)] + pub fn present_winner( + origin: OriginFor, candidate: ::Source, - #[compact] total: BalanceOf, - #[compact] index: VoteIndex, + #[pallet::compact] total: BalanceOf, + #[pallet::compact] index: VoteIndex, ) -> DispatchResult { let who = ensure_signed(origin)?; ensure!( @@ -656,18 +745,19 @@ decl_module! { /// Set the desired member count; if lower than the current count, then seats will not be up /// election when they expire. If more, then a new vote will be started if one is not /// already in progress. - #[weight = (0, DispatchClass::Operational)] - fn set_desired_seats(origin, #[compact] count: u32) { + #[pallet::weight((0, DispatchClass::Operational))] + pub fn set_desired_seats(origin: OriginFor, #[pallet::compact] count: u32) -> DispatchResult { ensure_root(origin)?; - DesiredSeats::put(count); + DesiredSeats::::put(count); + Ok(()) } /// Remove a particular member from the set. This is effective immediately. /// /// Note: A tally should happen instantly (if not already in a presentation /// period) to fill the seat if removal means that the desired members are not met. - #[weight = (0, DispatchClass::Operational)] - fn remove_member(origin, who: ::Source) { + #[pallet::weight((0, DispatchClass::Operational))] + pub fn remove_member(origin: OriginFor, who: ::Source) -> DispatchResult { ensure_root(origin)?; let who = T::Lookup::lookup(who)?; let new_set: Vec<(T::AccountId, T::BlockNumber)> = Self::members() @@ -677,49 +767,36 @@ decl_module! { >::put(&new_set); let new_set = new_set.into_iter().map(|x| x.0).collect::>(); T::ChangeMembers::change_members(&[], &[who], new_set); + Ok(()) } /// Set the presentation duration. If there is currently a vote being presented for, will /// invoke `finalize_vote`. - #[weight = (0, DispatchClass::Operational)] - fn set_presentation_duration(origin, #[compact] count: T::BlockNumber) { + #[pallet::weight((0, DispatchClass::Operational))] + pub fn set_presentation_duration( + origin: OriginFor, + #[pallet::compact] count: T::BlockNumber, + ) -> DispatchResult { ensure_root(origin)?; >::put(count); + Ok(()) } /// Set the presentation duration. If there is current a vote being presented for, will /// invoke `finalize_vote`. - #[weight = (0, DispatchClass::Operational)] - fn set_term_duration(origin, #[compact] count: T::BlockNumber) { + #[pallet::weight((0, DispatchClass::Operational))] + pub fn set_term_duration( + origin: OriginFor, + #[pallet::compact] count: T::BlockNumber, + ) -> DispatchResult { ensure_root(origin)?; >::put(count); - } - - fn on_initialize(n: T::BlockNumber) -> Weight { - if let Err(e) = Self::end_block(n) { - print("Guru meditation"); - print(e); - } - 0 + Ok(()) } } } -decl_event!( - pub enum Event where ::AccountId { - /// Reaped \[voter, reaper\]. - VoterReaped(AccountId, AccountId), - /// Slashed \[reaper\]. - BadReaperSlashed(AccountId), - /// A tally (for approval votes of \[seats\]) has started. - TallyStarted(u32), - /// A tally (for approval votes of seat(s)) has ended (with one or more new members). - /// \[incoming, outgoing\] - TallyFinalized(Vec, Vec), - } -); - -impl Module { +impl Pallet { // exposed immutables. /// True if we're currently in a presentation period. @@ -800,7 +877,7 @@ impl Module { let mut set = Self::voters(set_index); set[vec_index] = None; >::insert(set_index, set); - VoterCount::mutate(|c| *c = *c - 1); + VoterCount::::mutate(|c| *c = *c - 1); Self::remove_all_approvals_of(voter); >::remove(voter); } @@ -879,14 +956,14 @@ impl Module { locked_balance -= T::VotingFee::get(); } if set_len + 1 == VOTER_SET_SIZE { - NextVoterSet::put(next + 1); + NextVoterSet::::put(next + 1); } >::append(next, Some(who.clone())); } } T::Currency::reserve(&who, T::VotingBond::get())?; - VoterCount::mutate(|c| *c = *c + 1); + VoterCount::::mutate(|c| *c = *c + 1); } T::Currency::set_lock( @@ -928,7 +1005,7 @@ impl Module { let leaderboard_size = empty_seats + T::CarryCount::get() as usize; >::put(vec![(BalanceOf::::zero(), T::AccountId::default()); leaderboard_size]); - Self::deposit_event(RawEvent::TallyStarted(empty_seats as u32)); + Self::deposit_event(Event::::TallyStarted(empty_seats as u32)); } } @@ -1017,11 +1094,11 @@ impl Module { new_candidates.truncate(last_index + 1); } - Self::deposit_event(RawEvent::TallyFinalized(incoming, outgoing)); + Self::deposit_event(Event::::TallyFinalized(incoming, outgoing)); >::put(new_candidates); - CandidateCount::put(count); - VoteCount::put(Self::vote_index() + 1); + CandidateCount::::put(count); + VoteCount::::put(Self::vote_index() + 1); Ok(()) } diff --git a/frame/elections/src/mock.rs b/frame/elections/src/mock.rs index bb67622eb7ea1..7eef7f4909982 100644 --- a/frame/elections/src/mock.rs +++ b/frame/elections/src/mock.rs @@ -20,7 +20,7 @@ #![cfg(test)] use frame_support::{ - StorageValue, StorageMap, parameter_types, assert_ok, + parameter_types, assert_ok, traits::{ChangeMembers, Currency, LockIdentifier}, }; use sp_core::H256; @@ -266,7 +266,7 @@ pub(crate) fn new_test_ext_with_candidate_holes() -> sp_io::TestExternalities { let mut t = ExtBuilder::default().build(); t.execute_with(|| { >::put(vec![0, 0, 1]); - elections::CandidateCount::put(1); + elections::CandidateCount::::put(1); >::insert(1, (0, 2)); }); t From 955633c50fcef1e52cb187828d7786798e5f25b0 Mon Sep 17 00:00:00 2001 From: Shawn Tabrizi Date: Wed, 16 Jun 2021 15:19:10 +0100 Subject: [PATCH 12/67] Make backwards compatible with CountedMap (#9126) --- frame/staking/src/lib.rs | 46 +++++++++++++++--------------- frame/staking/src/mock.rs | 4 +-- frame/staking/src/testing_utils.rs | 4 +-- frame/staking/src/tests.rs | 4 +-- 4 files changed, 29 insertions(+), 29 deletions(-) diff --git a/frame/staking/src/lib.rs b/frame/staking/src/lib.rs index b6d02fa2fd30d..ce1f5afc64c1d 100644 --- a/frame/staking/src/lib.rs +++ b/frame/staking/src/lib.rs @@ -761,8 +761,8 @@ pub mod migrations { use super::*; pub fn pre_migrate() -> Result<(), &'static str> { - assert!(CurrentValidatorsCount::::get().is_zero(), "CurrentValidatorsCount already set."); - assert!(CurrentNominatorsCount::::get().is_zero(), "CurrentNominatorsCount already set."); + assert!(CounterForValidators::::get().is_zero(), "CounterForValidators already set."); + assert!(CounterForNominators::::get().is_zero(), "CounterForNominators already set."); assert!(StorageVersion::::get() == Releases::V6_0_0); Ok(()) } @@ -772,8 +772,8 @@ pub mod migrations { let validator_count = Validators::::iter().count() as u32; let nominator_count = Nominators::::iter().count() as u32; - CurrentValidatorsCount::::put(validator_count); - CurrentNominatorsCount::::put(nominator_count); + CounterForValidators::::put(validator_count); + CounterForNominators::::put(nominator_count); StorageVersion::::put(Releases::V7_0_0); log!(info, "Completed staking migration to Releases::V7_0_0"); @@ -998,14 +998,14 @@ pub mod pallet { /// The map from (wannabe) validator stash key to the preferences of that validator. /// - /// When updating this storage item, you must also update the `CurrentValidatorsCount`. + /// When updating this storage item, you must also update the `CounterForValidators`. #[pallet::storage] #[pallet::getter(fn validators)] pub type Validators = StorageMap<_, Twox64Concat, T::AccountId, ValidatorPrefs, ValueQuery>; /// A tracker to keep count of the number of items in the `Validators` map. #[pallet::storage] - pub type CurrentValidatorsCount = StorageValue<_, u32, ValueQuery>; + pub type CounterForValidators = StorageValue<_, u32, ValueQuery>; /// The maximum validator count before we stop allowing new validators to join. /// @@ -1015,14 +1015,14 @@ pub mod pallet { /// The map from nominator stash key to the set of stash keys of all validators to nominate. /// - /// When updating this storage item, you must also update the `CurrentNominatorsCount`. + /// When updating this storage item, you must also update the `CounterForNominators`. #[pallet::storage] #[pallet::getter(fn nominators)] pub type Nominators = StorageMap<_, Twox64Concat, T::AccountId, Nominations>; /// A tracker to keep count of the number of items in the `Nominators` map. #[pallet::storage] - pub type CurrentNominatorsCount = StorageValue<_, u32, ValueQuery>; + pub type CounterForNominators = StorageValue<_, u32, ValueQuery>; /// The maximum nominator count before we stop allowing new validators to join. /// @@ -1717,7 +1717,7 @@ pub mod pallet { // If this error is reached, we need to adjust the `MinValidatorBond` and start calling `chill_other`. // Until then, we explicitly block new validators to protect the runtime. if let Some(max_validators) = MaxValidatorsCount::::get() { - ensure!(CurrentValidatorsCount::::get() < max_validators, Error::::TooManyValidators); + ensure!(CounterForValidators::::get() < max_validators, Error::::TooManyValidators); } let ledger = Self::ledger(&controller).ok_or(Error::::NotController)?; @@ -1758,7 +1758,7 @@ pub mod pallet { // If this error is reached, we need to adjust the `MinNominatorBond` and start calling `chill_other`. // Until then, we explicitly block new nominators to protect the runtime. if let Some(max_nominators) = MaxNominatorsCount::::get() { - ensure!(CurrentNominatorsCount::::get() < max_nominators, Error::::TooManyNominators); + ensure!(CounterForNominators::::get() < max_nominators, Error::::TooManyNominators); } let ledger = Self::ledger(&controller).ok_or(Error::::NotController)?; @@ -2966,42 +2966,42 @@ impl Pallet { } /// This function will add a nominator to the `Nominators` storage map, - /// and keep track of the `CurrentNominatorsCount`. + /// and keep track of the `CounterForNominators`. /// /// If the nominator already exists, their nominations will be updated. pub fn do_add_nominator(who: &T::AccountId, nominations: Nominations) { if !Nominators::::contains_key(who) { - CurrentNominatorsCount::::mutate(|x| x.saturating_inc()) + CounterForNominators::::mutate(|x| x.saturating_inc()) } Nominators::::insert(who, nominations); } /// This function will remove a nominator from the `Nominators` storage map, - /// and keep track of the `CurrentNominatorsCount`. + /// and keep track of the `CounterForNominators`. pub fn do_remove_nominator(who: &T::AccountId) { if Nominators::::contains_key(who) { Nominators::::remove(who); - CurrentNominatorsCount::::mutate(|x| x.saturating_dec()); + CounterForNominators::::mutate(|x| x.saturating_dec()); } } /// This function will add a validator to the `Validators` storage map, - /// and keep track of the `CurrentValidatorsCount`. + /// and keep track of the `CounterForValidators`. /// /// If the validator already exists, their preferences will be updated. pub fn do_add_validator(who: &T::AccountId, prefs: ValidatorPrefs) { if !Validators::::contains_key(who) { - CurrentValidatorsCount::::mutate(|x| x.saturating_inc()) + CounterForValidators::::mutate(|x| x.saturating_inc()) } Validators::::insert(who, prefs); } /// This function will remove a validator from the `Validators` storage map, - /// and keep track of the `CurrentValidatorsCount`. + /// and keep track of the `CounterForValidators`. pub fn do_remove_validator(who: &T::AccountId) { if Validators::::contains_key(who) { Validators::::remove(who); - CurrentValidatorsCount::::mutate(|x| x.saturating_dec()); + CounterForValidators::::mutate(|x| x.saturating_dec()); } } } @@ -3017,11 +3017,11 @@ impl frame_election_provider_support::ElectionDataProvider, ) -> data_provider::Result<(Vec<(T::AccountId, VoteWeight, Vec)>, Weight)> { - let nominator_count = CurrentNominatorsCount::::get(); - let validator_count = CurrentValidatorsCount::::get(); + let nominator_count = CounterForNominators::::get(); + let validator_count = CounterForValidators::::get(); let voter_count = nominator_count.saturating_add(validator_count) as usize; - debug_assert!(>::iter().count() as u32 == CurrentNominatorsCount::::get()); - debug_assert!(>::iter().count() as u32 == CurrentValidatorsCount::::get()); + debug_assert!(>::iter().count() as u32 == CounterForNominators::::get()); + debug_assert!(>::iter().count() as u32 == CounterForValidators::::get()); if maybe_max_len.map_or(false, |max_len| voter_count > max_len) { return Err("Voter snapshot too big"); @@ -3037,7 +3037,7 @@ impl frame_election_provider_support::ElectionDataProvider) -> data_provider::Result<(Vec, Weight)> { - let target_count = CurrentValidatorsCount::::get() as usize; + let target_count = CounterForValidators::::get() as usize; if maybe_max_len.map_or(false, |max_len| target_count > max_len) { return Err("Target snapshot too big"); diff --git a/frame/staking/src/mock.rs b/frame/staking/src/mock.rs index 35a1fa45284da..e0079cc3f375a 100644 --- a/frame/staking/src/mock.rs +++ b/frame/staking/src/mock.rs @@ -498,8 +498,8 @@ fn post_conditions() { fn check_count() { let nominator_count = Nominators::::iter().count() as u32; let validator_count = Validators::::iter().count() as u32; - assert_eq!(nominator_count, CurrentNominatorsCount::::get()); - assert_eq!(validator_count, CurrentValidatorsCount::::get()); + assert_eq!(nominator_count, CounterForNominators::::get()); + assert_eq!(validator_count, CounterForValidators::::get()); } fn check_ledgers() { diff --git a/frame/staking/src/testing_utils.rs b/frame/staking/src/testing_utils.rs index 8a4392edfed25..c643cb283373b 100644 --- a/frame/staking/src/testing_utils.rs +++ b/frame/staking/src/testing_utils.rs @@ -30,9 +30,9 @@ const SEED: u32 = 0; /// This function removes all validators and nominators from storage. pub fn clear_validators_and_nominators() { Validators::::remove_all(None); - CurrentValidatorsCount::::kill(); + CounterForValidators::::kill(); Nominators::::remove_all(None); - CurrentNominatorsCount::::kill(); + CounterForNominators::::kill(); } /// Grab a funded user. diff --git a/frame/staking/src/tests.rs b/frame/staking/src/tests.rs index 976ee34d9b8eb..5d42d866b1336 100644 --- a/frame/staking/src/tests.rs +++ b/frame/staking/src/tests.rs @@ -4107,9 +4107,9 @@ mod election_data_provider { #[test] fn capped_stakers_works() { ExtBuilder::default().build_and_execute(|| { - let validator_count = CurrentValidatorsCount::::get(); + let validator_count = CounterForValidators::::get(); assert_eq!(validator_count, 3); - let nominator_count = CurrentNominatorsCount::::get(); + let nominator_count = CounterForNominators::::get(); assert_eq!(nominator_count, 1); // Change the maximums From ede9bc19e883005d6bb71f325f13c90e03cea9c2 Mon Sep 17 00:00:00 2001 From: Guillaume Thiolliere Date: Wed, 16 Jun 2021 18:19:09 +0200 Subject: [PATCH 13/67] Avoid running some test 2 times when unneeded (#9124) * avoid running some test 2 times when unneeded * Update frame/support/test/Cargo.toml --- .gitlab-ci.yml | 2 +- frame/support/test/Cargo.toml | 2 ++ frame/support/test/tests/pallet_ui.rs | 1 - 3 files changed, 3 insertions(+), 2 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 9b28bb2e25a88..2ffa8a4b977ba 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -319,7 +319,7 @@ test-linux-stable: &test-linux script: # this job runs all tests in former runtime-benchmarks, frame-staking and wasmtime tests - time cargo test --workspace --locked --release --verbose --features runtime-benchmarks --manifest-path bin/node/cli/Cargo.toml - - time cargo test -p frame-support-test --features=conditional-storage --manifest-path frame/support/test/Cargo.toml # does not reuse cache 1 min 44 sec + - time cargo test -p frame-support-test --features=conditional-storage --manifest-path frame/support/test/Cargo.toml --test pallet # does not reuse cache 1 min 44 sec - SUBSTRATE_TEST_TIMEOUT=1 time cargo test -p substrate-test-utils --release --verbose --locked -- --ignored timeout - sccache -s diff --git a/frame/support/test/Cargo.toml b/frame/support/test/Cargo.toml index 1a979cdee6f8e..ce5c8ea7de1fb 100644 --- a/frame/support/test/Cargo.toml +++ b/frame/support/test/Cargo.toml @@ -40,4 +40,6 @@ std = [ "sp-state-machine", ] try-runtime = ["frame-support/try-runtime"] +# WARNING: CI only execute pallet test with this feature, +# if the feature intended to be used outside, CI and this message need to be updated. conditional-storage = [] diff --git a/frame/support/test/tests/pallet_ui.rs b/frame/support/test/tests/pallet_ui.rs index fea7a2c7e7ad4..e5f4a54dfb000 100644 --- a/frame/support/test/tests/pallet_ui.rs +++ b/frame/support/test/tests/pallet_ui.rs @@ -16,7 +16,6 @@ // limitations under the License. #[rustversion::attr(not(stable), ignore)] -#[cfg(not(feature = "conditional-storage"))] #[test] fn pallet_ui() { // As trybuild is using `cargo check`, we don't need the real WASM binaries. From e447c49537e66d0b6e3a408c6ae5c424c7344a7c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Thu, 17 Jun 2021 09:27:53 +0200 Subject: [PATCH 14/67] Aura: Skip initialize block & remove cache (#9132) This instructs the Aura runtime api to skip initialize block, when requesting the authorities. This is important, as we don't want to use the new authorities that should be used from the next block on. Besides that, it removes the caching stuff. The cache is not available on full nodes anyway. In the future we should store the authorities probably in the aux store. --- client/consensus/aura/src/import_queue.rs | 39 ----------------------- client/consensus/aura/src/lib.rs | 13 +++----- primitives/consensus/aura/src/lib.rs | 1 + 3 files changed, 5 insertions(+), 48 deletions(-) diff --git a/client/consensus/aura/src/import_queue.rs b/client/consensus/aura/src/import_queue.rs index 8034fd08a7eb6..c3faa5382686e 100644 --- a/client/consensus/aura/src/import_queue.rs +++ b/client/consensus/aura/src/import_queue.rs @@ -317,43 +317,6 @@ impl Verifier for AuraVerifier w } } -fn initialize_authorities_cache(client: &C) -> Result<(), ConsensusError> where - A: Codec + Debug, - B: BlockT, - C: ProvideRuntimeApi + BlockOf + ProvideCache + UsageProvider, - C::Api: AuraApi, -{ - // no cache => no initialization - let cache = match client.cache() { - Some(cache) => cache, - None => return Ok(()), - }; - - let best_hash = client.usage_info().chain.best_hash; - - // check if we already have initialized the cache - let map_err = |error| sp_consensus::Error::from(sp_consensus::Error::ClientImport( - format!( - "Error initializing authorities cache: {}", - error, - ))); - - let block_id = BlockId::hash(best_hash); - let authorities: Option> = cache - .get_at(&well_known_cache_keys::AUTHORITIES, &block_id) - .unwrap_or(None) - .and_then(|(_, _, v)| Decode::decode(&mut &v[..]).ok()); - if authorities.is_some() { - return Ok(()); - } - - let authorities = crate::authorities(client, &block_id)?; - cache.initialize(&well_known_cache_keys::AUTHORITIES, authorities.encode()) - .map_err(map_err)?; - - Ok(()) -} - /// Should we check for equivocation of a block author? #[derive(Debug, Clone, Copy)] pub enum CheckForEquivocation { @@ -438,8 +401,6 @@ pub fn import_queue<'a, P, Block, I, C, S, CAW, CIDP>( CIDP: CreateInherentDataProviders + Sync + Send + 'static, CIDP::InherentDataProviders: InherentDataProviderExt + Send + Sync, { - initialize_authorities_cache(&*client)?; - let verifier = build_verifier::( BuildVerifierParams { client, diff --git a/client/consensus/aura/src/lib.rs b/client/consensus/aura/src/lib.rs index 702e4dc0bf1bd..d0b0cefe8ddca 100644 --- a/client/consensus/aura/src/lib.rs +++ b/client/consensus/aura/src/lib.rs @@ -45,7 +45,7 @@ use sp_consensus::{ BlockOrigin, Error as ConsensusError, SelectChain, }; use sc_client_api::{backend::AuxStore, BlockOf, UsageProvider}; -use sp_blockchain::{Result as CResult, well_known_cache_keys, ProvideCache, HeaderBackend}; +use sp_blockchain::{Result as CResult, ProvideCache, HeaderBackend}; use sp_core::crypto::Public; use sp_application_crypto::{AppKey, AppPublic}; use sp_runtime::{generic::BlockId, traits::NumberFor}; @@ -546,14 +546,9 @@ fn authorities(client: &C, at: &BlockId) -> Result, Consensus C: ProvideRuntimeApi + BlockOf + ProvideCache, C::Api: AuraApi, { - client - .cache() - .and_then(|cache| cache - .get_at(&well_known_cache_keys::AUTHORITIES, at) - .unwrap_or(None) - .and_then(|(_, _, v)| Decode::decode(&mut &v[..]).ok()) - ) - .or_else(|| AuraApi::authorities(&*client.runtime_api(), at).ok()) + client.runtime_api() + .authorities(at) + .ok() .ok_or_else(|| sp_consensus::Error::InvalidAuthoritiesSet.into()) } diff --git a/primitives/consensus/aura/src/lib.rs b/primitives/consensus/aura/src/lib.rs index a28e681fda27f..ef888a2ab855b 100644 --- a/primitives/consensus/aura/src/lib.rs +++ b/primitives/consensus/aura/src/lib.rs @@ -90,6 +90,7 @@ sp_api::decl_runtime_apis! { fn slot_duration() -> SlotDuration; // Return the current set of authorities. + #[skip_initialize_block] fn authorities() -> Vec; } } From 34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02 Mon Sep 17 00:00:00 2001 From: Kian Paimani <5588131+kianenigma@users.noreply.github.com> Date: Thu, 17 Jun 2021 16:37:43 +0200 Subject: [PATCH 15/67] Make it possible to override maximum payload of RPC (#9019) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Make it possible to override maximum payload of RPC * Finish it. * remove todo. * Update client/cli/src/commands/run_cmd.rs * Apply suggestions from code review Co-authored-by: David * Apply suggestions from code review Co-authored-by: David * Incorporate suggestions * Thread rpc_max_payload from configuration to trace_block * Try obey line gitlab/check_line_width.sh * update state rpc tests * Improve readbility * Apply suggestions from code review * Apply suggestions from code review Co-authored-by: Bastian Köcher Co-authored-by: Zeke Mostov <32168567+emostov@users.noreply.github.com> Co-authored-by: David --- client/cli/src/commands/run_cmd.rs | 32 +++++++++++++++++--------- client/cli/src/config.rs | 6 +++++ client/executor/src/native_executor.rs | 2 +- client/rpc-servers/src/lib.rs | 18 +++++++++++---- client/rpc/src/state/mod.rs | 7 ++++-- client/rpc/src/state/state_full.rs | 21 +++++++++++++---- client/rpc/src/state/tests.rs | 8 +++++++ client/service/src/builder.rs | 1 + client/service/src/config.rs | 2 ++ client/service/src/lib.rs | 2 ++ client/service/test/src/lib.rs | 1 + client/tracing/src/block/mod.rs | 11 ++++++--- test-utils/test-runner/src/utils.rs | 1 + utils/browser/src/lib.rs | 1 + 14 files changed, 87 insertions(+), 26 deletions(-) diff --git a/client/cli/src/commands/run_cmd.rs b/client/cli/src/commands/run_cmd.rs index 3e5823ef733aa..285ffc9fdca16 100644 --- a/client/cli/src/commands/run_cmd.rs +++ b/client/cli/src/commands/run_cmd.rs @@ -42,12 +42,11 @@ pub struct RunCmd { /// The node will be started with the authority role and actively /// participate in any consensus task that it can (e.g. depending on /// availability of local keys). - #[structopt( - long = "validator" - )] + #[structopt(long)] pub validator: bool, - /// Disable GRANDPA voter when running in validator mode, otherwise disable the GRANDPA observer. + /// Disable GRANDPA voter when running in validator mode, otherwise disable the GRANDPA + /// observer. #[structopt(long)] pub no_grandpa: bool, @@ -57,8 +56,8 @@ pub struct RunCmd { /// Listen to all RPC interfaces. /// - /// Default is local. Note: not all RPC methods are safe to be exposed publicly. Use an RPC proxy - /// server to filter out dangerous methods. More details: + /// Default is local. Note: not all RPC methods are safe to be exposed publicly. Use an RPC + /// proxy server to filter out dangerous methods. More details: /// . /// Use `--unsafe-rpc-external` to suppress the warning if you understand the risks. #[structopt(long = "rpc-external")] @@ -74,8 +73,8 @@ pub struct RunCmd { /// /// - `Unsafe`: Exposes every RPC method. /// - `Safe`: Exposes only a safe subset of RPC methods, denying unsafe RPC methods. - /// - `Auto`: Acts as `Safe` if RPC is served externally, e.g. when `--{rpc,ws}-external` is passed, - /// otherwise acts as `Unsafe`. + /// - `Auto`: Acts as `Safe` if RPC is served externally, e.g. when `--{rpc,ws}-external` is + /// passed, otherwise acts as `Unsafe`. #[structopt( long, value_name = "METHOD SET", @@ -88,8 +87,9 @@ pub struct RunCmd { /// Listen to all Websocket interfaces. /// - /// Default is local. Note: not all RPC methods are safe to be exposed publicly. Use an RPC proxy - /// server to filter out dangerous methods. More details: . + /// Default is local. Note: not all RPC methods are safe to be exposed publicly. Use an RPC + /// proxy server to filter out dangerous methods. More details: + /// . /// Use `--unsafe-ws-external` to suppress the warning if you understand the risks. #[structopt(long = "ws-external")] pub ws_external: bool, @@ -100,6 +100,11 @@ pub struct RunCmd { #[structopt(long = "unsafe-ws-external")] pub unsafe_ws_external: bool, + /// Set the the maximum RPC payload size for both requests and responses (both http and ws), in + /// megabytes. Default is 15MiB. + #[structopt(long = "rpc-max-payload")] + pub rpc_max_payload: Option, + /// Listen to all Prometheus data source interfaces. /// /// Default is local. @@ -194,7 +199,8 @@ pub struct RunCmd { #[structopt(long, conflicts_with_all = &["alice", "charlie", "dave", "eve", "ferdie", "one", "two"])] pub bob: bool, - /// Shortcut for `--name Charlie --validator` with session keys for `Charlie` added to keystore. + /// Shortcut for `--name Charlie --validator` with session keys for `Charlie` added to + /// keystore. #[structopt(long, conflicts_with_all = &["alice", "bob", "dave", "eve", "ferdie", "one", "two"])] pub charlie: bool, @@ -435,6 +441,10 @@ impl CliConfiguration for RunCmd { Ok(self.rpc_methods.into()) } + fn rpc_max_payload(&self) -> Result> { + Ok(self.rpc_max_payload) + } + fn transaction_pool(&self) -> Result { Ok(self.pool_config.transaction_pool()) } diff --git a/client/cli/src/config.rs b/client/cli/src/config.rs index 62afc849c09fb..8e435da253c04 100644 --- a/client/cli/src/config.rs +++ b/client/cli/src/config.rs @@ -372,6 +372,11 @@ pub trait CliConfiguration: Sized { Ok(Some(Vec::new())) } + /// Get maximum RPC payload. + fn rpc_max_payload(&self) -> Result> { + Ok(None) + } + /// Get the prometheus configuration (`None` if disabled) /// /// By default this is `None`. @@ -535,6 +540,7 @@ pub trait CliConfiguration: Sized { rpc_ws_max_connections: self.rpc_ws_max_connections()?, rpc_http_threads: self.rpc_http_threads()?, rpc_cors: self.rpc_cors(is_dev)?, + rpc_max_payload: self.rpc_max_payload()?, prometheus_config: self.prometheus_config(DCV::prometheus_listen_port())?, telemetry_endpoints, telemetry_external_transport: self.telemetry_external_transport()?, diff --git a/client/executor/src/native_executor.rs b/client/executor/src/native_executor.rs index c94088a155260..6fc34b6f1a322 100644 --- a/client/executor/src/native_executor.rs +++ b/client/executor/src/native_executor.rs @@ -291,7 +291,7 @@ impl NativeExecutor { default_heap_pages: Option, max_runtime_instances: usize, ) -> Self { - let extended = D::ExtendHostFunctions::host_functions(); + let extended = D::ExtendHostFunctions::host_functions(); let mut host_functions = sp_io::SubstrateHostFunctions::host_functions() .into_iter() // filter out any host function overrides provided. diff --git a/client/rpc-servers/src/lib.rs b/client/rpc-servers/src/lib.rs index cb2704efc82ab..c93451e5cc678 100644 --- a/client/rpc-servers/src/lib.rs +++ b/client/rpc-servers/src/lib.rs @@ -27,8 +27,10 @@ use jsonrpc_core::{IoHandlerExtension, MetaIoHandler}; use log::error; use pubsub::PubSubMetadata; +const MEGABYTE: usize = 1024 * 1024; + /// Maximal payload accepted by RPC servers. -pub const MAX_PAYLOAD: usize = 15 * 1024 * 1024; +pub const RPC_MAX_PAYLOAD_DEFAULT: usize = 15 * MEGABYTE; /// Default maximum number of connections for WS RPC servers. const WS_MAX_CONNECTIONS: usize = 100; @@ -85,7 +87,10 @@ mod inner { thread_pool_size: Option, cors: Option<&Vec>, io: RpcHandler, + maybe_max_payload_mb: Option, ) -> io::Result { + let max_request_body_size = maybe_max_payload_mb.map(|mb| mb.saturating_mul(MEGABYTE)) + .unwrap_or(RPC_MAX_PAYLOAD_DEFAULT); http::ServerBuilder::new(io) .threads(thread_pool_size.unwrap_or(HTTP_THREADS)) .health_api(("/health", "system_health")) @@ -96,7 +101,7 @@ mod inner { http::RestApi::Unsecure }) .cors(map_cors::(cors)) - .max_request_body_size(MAX_PAYLOAD) + .max_request_body_size(max_request_body_size) .start_http(addr) } @@ -120,14 +125,19 @@ mod inner { /// Start WS server listening on given address. /// /// **Note**: Only available if `not(target_os = "unknown")`. - pub fn start_ws>> ( + pub fn start_ws< + M: pubsub::PubSubMetadata + From>, + >( addr: &std::net::SocketAddr, max_connections: Option, cors: Option<&Vec>, io: RpcHandler, + maybe_max_payload_mb: Option, ) -> io::Result { + let rpc_max_payload = maybe_max_payload_mb.map(|mb| mb.saturating_mul(MEGABYTE)) + .unwrap_or(RPC_MAX_PAYLOAD_DEFAULT); ws::ServerBuilder::with_meta_extractor(io, |context: &ws::RequestContext| context.sender().into()) - .max_payload(MAX_PAYLOAD) + .max_payload(rpc_max_payload) .max_connections(max_connections.unwrap_or(WS_MAX_CONNECTIONS)) .allowed_origins(map_cors(cors)) .allowed_hosts(hosts_filtering(cors.is_some())) diff --git a/client/rpc/src/state/mod.rs b/client/rpc/src/state/mod.rs index 803fc6797ee9a..ad9712a41db6b 100644 --- a/client/rpc/src/state/mod.rs +++ b/client/rpc/src/state/mod.rs @@ -182,6 +182,7 @@ pub fn new_full( client: Arc, subscriptions: SubscriptionManager, deny_unsafe: DenyUnsafe, + rpc_max_payload: Option, ) -> (State, ChildState) where Block: BlockT + 'static, @@ -193,9 +194,11 @@ pub fn new_full( Client::Api: Metadata, { let child_backend = Box::new( - self::state_full::FullState::new(client.clone(), subscriptions.clone()) + self::state_full::FullState::new( + client.clone(), subscriptions.clone(), rpc_max_payload + ) ); - let backend = Box::new(self::state_full::FullState::new(client, subscriptions)); + let backend = Box::new(self::state_full::FullState::new(client, subscriptions, rpc_max_payload)); (State { backend, deny_unsafe }, ChildState { backend: child_backend }) } diff --git a/client/rpc/src/state/state_full.rs b/client/rpc/src/state/state_full.rs index bea7ddfbb3b76..218cb35f0086e 100644 --- a/client/rpc/src/state/state_full.rs +++ b/client/rpc/src/state/state_full.rs @@ -67,7 +67,8 @@ struct QueryStorageRange { pub struct FullState { client: Arc, subscriptions: SubscriptionManager, - _phantom: PhantomData<(BE, Block)> + _phantom: PhantomData<(BE, Block)>, + rpc_max_payload: Option, } impl FullState @@ -78,8 +79,12 @@ impl FullState Block: BlockT + 'static, { /// Create new state API backend for full nodes. - pub fn new(client: Arc, subscriptions: SubscriptionManager) -> Self { - Self { client, subscriptions, _phantom: PhantomData } + pub fn new( + client: Arc, + subscriptions: SubscriptionManager, + rpc_max_payload: Option, + ) -> Self { + Self { client, subscriptions, _phantom: PhantomData, rpc_max_payload } } /// Returns given block hash or best block hash if None is passed. @@ -540,9 +545,15 @@ impl StateBackend for FullState, storage_keys: Option, ) -> FutureResult { + let block_executor = sc_tracing::block::BlockExecutor::new( + self.client.clone(), + block, + targets, + storage_keys, + self.rpc_max_payload, + ); Box::new(result( - sc_tracing::block::BlockExecutor::new(self.client.clone(), block, targets, storage_keys) - .trace_block() + block_executor.trace_block() .map_err(|e| invalid_block::(block, None, e.to_string())) )) } diff --git a/client/rpc/src/state/tests.rs b/client/rpc/src/state/tests.rs index cfc27c7bf525e..e413827552c9d 100644 --- a/client/rpc/src/state/tests.rs +++ b/client/rpc/src/state/tests.rs @@ -63,6 +63,7 @@ fn should_return_storage() { Arc::new(client), SubscriptionManager::new(Arc::new(TaskExecutor)), DenyUnsafe::No, + None, ); let key = StorageKey(KEY.to_vec()); @@ -105,6 +106,7 @@ fn should_return_child_storage() { client, SubscriptionManager::new(Arc::new(TaskExecutor)), DenyUnsafe::No, + None, ); let child_key = prefixed_storage_key(); let key = StorageKey(b"key".to_vec()); @@ -144,6 +146,7 @@ fn should_call_contract() { client, SubscriptionManager::new(Arc::new(TaskExecutor)), DenyUnsafe::No, + None, ); assert_matches!( @@ -162,6 +165,7 @@ fn should_notify_about_storage_changes() { client.clone(), SubscriptionManager::new(Arc::new(TaskExecutor)), DenyUnsafe::No, + None, ); api.subscribe_storage(Default::default(), subscriber, None.into()); @@ -200,6 +204,7 @@ fn should_send_initial_storage_changes_and_notifications() { client.clone(), SubscriptionManager::new(Arc::new(TaskExecutor)), DenyUnsafe::No, + None, ); let alice_balance_key = blake2_256(&runtime::system::balance_of_key(AccountKeyring::Alice.into())); @@ -242,6 +247,7 @@ fn should_query_storage() { client.clone(), SubscriptionManager::new(Arc::new(TaskExecutor)), DenyUnsafe::No, + None, ); let mut add_block = |nonce| { @@ -463,6 +469,7 @@ fn should_return_runtime_version() { client.clone(), SubscriptionManager::new(Arc::new(TaskExecutor)), DenyUnsafe::No, + None, ); let result = "{\"specName\":\"test\",\"implName\":\"parity-test\",\"authoringVersion\":1,\ @@ -490,6 +497,7 @@ fn should_notify_on_runtime_version_initially() { client.clone(), SubscriptionManager::new(Arc::new(TaskExecutor)), DenyUnsafe::No, + None, ); api.subscribe_runtime_version(Default::default(), subscriber); diff --git a/client/service/src/builder.rs b/client/service/src/builder.rs index ebf600b12f020..ca22322798463 100644 --- a/client/service/src/builder.rs +++ b/client/service/src/builder.rs @@ -804,6 +804,7 @@ fn gen_handler( client.clone(), subscriptions.clone(), deny_unsafe, + config.rpc_max_payload, ); (chain, state, child_state) }; diff --git a/client/service/src/config.rs b/client/service/src/config.rs index f2c5f2c6ed407..c91cf0a4ef5c3 100644 --- a/client/service/src/config.rs +++ b/client/service/src/config.rs @@ -95,6 +95,8 @@ pub struct Configuration { pub rpc_cors: Option>, /// RPC methods to expose (by default only a safe subset or all of them). pub rpc_methods: RpcMethods, + /// Maximum payload of rpc request/responses. + pub rpc_max_payload: Option, /// Prometheus endpoint configuration. `None` if disabled. pub prometheus_config: Option, /// Telemetry service URL. `None` if disabled. diff --git a/client/service/src/lib.rs b/client/service/src/lib.rs index 51ee0965ebcf4..afc1209280322 100644 --- a/client/service/src/lib.rs +++ b/client/service/src/lib.rs @@ -387,6 +387,7 @@ fn start_rpc_servers< deny_unsafe(&address, &config.rpc_methods), sc_rpc_server::RpcMiddleware::new(rpc_metrics.clone(), "http") ), + config.rpc_max_payload ), )?.map(|s| waiting::HttpServer(Some(s))), maybe_start_server( @@ -399,6 +400,7 @@ fn start_rpc_servers< deny_unsafe(&address, &config.rpc_methods), sc_rpc_server::RpcMiddleware::new(rpc_metrics.clone(), "ws") ), + config.rpc_max_payload ), )?.map(|s| waiting::WsServer(Some(s))), ))) diff --git a/client/service/test/src/lib.rs b/client/service/test/src/lib.rs index 3999b852ac74c..eb437b1aba0af 100644 --- a/client/service/test/src/lib.rs +++ b/client/service/test/src/lib.rs @@ -265,6 +265,7 @@ fn node_config = Result; @@ -174,6 +175,7 @@ pub struct BlockExecutor { block: Block::Hash, targets: Option, storage_keys: Option, + rpc_max_payload: usize, } impl BlockExecutor @@ -189,8 +191,11 @@ impl BlockExecutor block: Block::Hash, targets: Option, storage_keys: Option, + rpc_max_payload: Option, ) -> Self { - Self { client, block, targets, storage_keys } + let rpc_max_payload = rpc_max_payload.map(|mb| mb.saturating_mul(MEGABYTE)) + .unwrap_or(RPC_MAX_PAYLOAD_DEFAULT); + Self { client, block, targets, storage_keys, rpc_max_payload } } /// Execute block, record all spans and events belonging to `Self::targets` @@ -260,7 +265,7 @@ impl BlockExecutor tracing::debug!(target: "state_tracing", "Captured {} spans and {} events", spans.len(), events.len()); let approx_payload_size = BASE_PAYLOAD + events.len() * AVG_EVENT + spans.len() * AVG_SPAN; - let response = if approx_payload_size > MAX_PAYLOAD { + let response = if approx_payload_size > self.rpc_max_payload { TraceBlockResponse::TraceError(TraceError { error: "Payload likely exceeds max payload size of RPC server.".to_string() diff --git a/test-utils/test-runner/src/utils.rs b/test-utils/test-runner/src/utils.rs index 4f5390a7eb863..fae527ededf97 100644 --- a/test-utils/test-runner/src/utils.rs +++ b/test-utils/test-runner/src/utils.rs @@ -127,6 +127,7 @@ pub fn default_config(task_executor: TaskExecutor, mut chain_spec: Box Date: Thu, 17 Jun 2021 18:01:27 +0200 Subject: [PATCH 16/67] double the allocator limit (#9102) * double the allocator limit * 32 MiB should be enough for everybody. * Update doc Co-authored-by: Sergei Shulepov --- primitives/allocator/src/freeing_bump.rs | 33 +++++++++++++++++++----- 1 file changed, 26 insertions(+), 7 deletions(-) diff --git a/primitives/allocator/src/freeing_bump.rs b/primitives/allocator/src/freeing_bump.rs index 14746c8784f8d..64ba136f9a354 100644 --- a/primitives/allocator/src/freeing_bump.rs +++ b/primitives/allocator/src/freeing_bump.rs @@ -36,7 +36,7 @@ //! //! For implementing freeing we maintain a linked lists for each order. The maximum supported //! allocation size is capped, therefore the number of orders and thus the linked lists is as well -//! limited. Currently, the maximum size of an allocation is 16 MiB. +//! limited. Currently, the maximum size of an allocation is 32 MiB. //! //! When the allocator serves an allocation request it first checks the linked list for the respective //! order. If it doesn't have any free chunks, the allocator requests memory from the bump allocator. @@ -44,6 +44,24 @@ //! //! Upon deallocation we get the order of the allocation from its header and then add that //! allocation to the linked list for the respective order. +//! +//! # Caveats +//! +//! This is a fast allocator but it is also dumb. There are specifically two main shortcomings +//! that the user should keep in mind: +//! +//! - Once the bump allocator space is exhausted, there is no way to reclaim the memory. This means +//! that it's possible to end up in a situation where there are no live allocations yet a new +//! allocation will fail. +//! +//! Let's look into an example. Given a heap of 32 MiB. The user makes a 32 MiB allocation that we +//! call `X` . Now the heap is full. Then user deallocates `X`. Since all the space in the bump +//! allocator was consumed by the 32 MiB allocation, allocations of all sizes except 32 MiB will +//! fail. +//! +//! - Sizes of allocations are rounded up to the nearest order. That is, an allocation of 2,00001 MiB +//! will be put into the bucket of 4 MiB. Therefore, typically more than half of the space in allocation +//! will be wasted. This is more pronounced with larger allocation sizes. use crate::Error; use sp_std::{mem, convert::{TryFrom, TryInto}, ops::{Range, Index, IndexMut}}; @@ -78,15 +96,15 @@ macro_rules! trace { // The minimum possible allocation size is chosen to be 8 bytes because in that case we would have // easier time to provide the guaranteed alignment of 8. // -// The maximum possible allocation size was chosen rather arbitrary. 16 MiB should be enough for +// The maximum possible allocation size was chosen rather arbitrary. 32 MiB should be enough for // everybody. // // N_ORDERS - represents the number of orders supported. // // This number corresponds to the number of powers between the minimum possible allocation and -// maximum possible allocation, or: 2^3...2^24 (both ends inclusive, hence 22). -const N_ORDERS: usize = 22; -const MAX_POSSIBLE_ALLOCATION: u32 = 16777216; // 2^24 bytes, 16 MiB +// maximum possible allocation, or: 2^3...2^25 (both ends inclusive, hence 23). +const N_ORDERS: usize = 23; +const MAX_POSSIBLE_ALLOCATION: u32 = 33554432; // 2^25 bytes, 32 MiB const MIN_POSSIBLE_ALLOCATION: u32 = 8; // 2^3 bytes, 8 bytes /// The exponent for the power of two sized block adjusted to the minimum size. @@ -100,6 +118,7 @@ const MIN_POSSIBLE_ALLOCATION: u32 = 8; // 2^3 bytes, 8 bytes /// 64 | 3 /// ... /// 16777216 | 21 +/// 33554432 | 22 /// /// and so on. #[derive(Copy, Clone, PartialEq, Eq, Debug)] @@ -329,7 +348,7 @@ impl FreeingBumpHeapAllocator { } /// Gets requested number of bytes to allocate and returns a pointer. - /// The maximum size which can be allocated at once is 16 MiB. + /// The maximum size which can be allocated at once is 32 MiB. /// There is no minimum size, but whatever size is passed into /// this function is rounded to the next power of two. If the requested /// size is below 8 bytes it will be rounded up to 8 bytes. @@ -813,7 +832,7 @@ mod tests { #[test] fn should_get_max_item_size_from_index() { // given - let raw_order = 21; + let raw_order = 22; // when let item_size = Order::from_raw(raw_order).unwrap().size(); From df1165d7b47d43f7b5032512ad41ac8ab2ead117 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andr=C3=A9=20Silva?= <123550+andresilva@users.noreply.github.com> Date: Fri, 18 Jun 2021 20:31:00 +0100 Subject: [PATCH 17/67] grandpa: restrict grandpa gossip (#9131) * grandpa: make gossip more conservative (and fair) * grandpa: make round commit timer dependent on gossip_duration * grandpa: add gossip tests * grandpa: reduce variance in tests --- .../src/communication/gossip.rs | 534 +++++++++--------- client/finality-grandpa/src/environment.rs | 5 +- client/network-gossip/src/state_machine.rs | 2 +- 3 files changed, 272 insertions(+), 269 deletions(-) diff --git a/client/finality-grandpa/src/communication/gossip.rs b/client/finality-grandpa/src/communication/gossip.rs index 878a630d0e518..8f46e45d635aa 100644 --- a/client/finality-grandpa/src/communication/gossip.rs +++ b/client/finality-grandpa/src/communication/gossip.rs @@ -110,12 +110,23 @@ const CATCH_UP_PROCESS_TIMEOUT: Duration = Duration::from_secs(30); /// catch up request. const CATCH_UP_THRESHOLD: u64 = 2; -const PROPAGATION_ALL: u32 = 4; //in rounds; -const PROPAGATION_ALL_AUTHORITIES: u32 = 2; //in rounds; -const PROPAGATION_SOME_NON_AUTHORITIES: u32 = 3; //in rounds; -const ROUND_DURATION: u32 = 2; // measured in gossip durations +/// The total round duration measured in periods of gossip duration: +/// 2 gossip durations for prevote timer +/// 2 gossip durations for precommit timer +/// 1 gossip duration for precommits to spread +const ROUND_DURATION: u32 = 5; -const MIN_LUCKY: usize = 5; +/// The period, measured in rounds, since the latest round start, after which we will start +/// propagating gossip messages to more nodes than just the lucky ones. +const PROPAGATION_SOME: f32 = 1.5; + +/// The period, measured in rounds, since the latest round start, after which we will start +/// propagating gossip messages to all the nodes we are connected to. +const PROPAGATION_ALL: f32 = 3.0; + +/// Assuming a network of 3000 nodes, using a fanout of 4, after about 6 iterations +/// of gossip a message has very likely reached all nodes on the network (`log4(3000)`). +const LUCKY_PEERS: usize = 4; type Report = (PeerId, ReputationChange); @@ -459,6 +470,7 @@ impl Misbehavior { } } +#[derive(Debug)] struct PeerInfo { view: View, roles: ObservedRole, @@ -473,19 +485,27 @@ impl PeerInfo { } } -/// The peers we're connected do in gossip. +/// The peers we're connected to in gossip. struct Peers { inner: HashMap>, - lucky_peers: HashSet, - lucky_authorities: HashSet, + /// The randomly picked set of `LUCKY_PEERS` we'll gossip to in the first stage of round + /// gossiping. + first_stage_peers: HashSet, + /// The randomly picked set of peers we'll gossip to in the second stage of gossiping if the + /// first stage didn't allow us to spread the voting data enough to conclude the round. This set + /// should have size `sqrt(connected_peers)`. + second_stage_peers: HashSet, + /// The randomly picked set of `LUCKY_PEERS` light clients we'll gossip commit messages to. + lucky_light_peers: HashSet, } impl Default for Peers { fn default() -> Self { Peers { inner: HashMap::new(), - lucky_peers: HashSet::new(), - lucky_authorities: HashSet::new(), + first_stage_peers: HashSet::new(), + second_stage_peers: HashSet::new(), + lucky_light_peers: HashSet::new(), } } } @@ -493,14 +513,18 @@ impl Default for Peers { impl Peers { fn new_peer(&mut self, who: PeerId, role: ObservedRole) { match role { - ObservedRole::Authority if self.lucky_authorities.len() < MIN_LUCKY => { - self.lucky_authorities.insert(who.clone()); + ObservedRole::Authority if self.first_stage_peers.len() < LUCKY_PEERS => { + self.first_stage_peers.insert(who.clone()); } - ObservedRole::Full if self.lucky_peers.len() < MIN_LUCKY => { - self.lucky_peers.insert(who.clone()); + ObservedRole::Authority if self.second_stage_peers.len() < LUCKY_PEERS => { + self.second_stage_peers.insert(who.clone()); + } + ObservedRole::Light if self.lucky_light_peers.len() < LUCKY_PEERS => { + self.lucky_light_peers.insert(who.clone()); } _ => {} } + self.inner.insert(who, PeerInfo::new(role)); } @@ -508,14 +532,17 @@ impl Peers { self.inner.remove(who); // This does not happen often enough compared to round duration, // so we don't reshuffle. - self.lucky_peers.remove(who); - self.lucky_authorities.remove(who); + self.first_stage_peers.remove(who); + self.second_stage_peers.remove(who); + self.lucky_light_peers.remove(who); } // returns a reference to the new view, if the peer is known. - fn update_peer_state(&mut self, who: &PeerId, update: NeighborPacket) - -> Result>, Misbehavior> - { + fn update_peer_state( + &mut self, + who: &PeerId, + update: NeighborPacket, + ) -> Result>, Misbehavior> { let peer = match self.inner.get_mut(who) { None => return Ok(None), Some(p) => p, @@ -563,69 +590,93 @@ impl Peers { self.inner.get(who) } - fn connected_authorities(&self) -> usize { - self.inner - .iter() - .filter(|(_, info)| matches!(info.roles, ObservedRole::Authority)) - .count() - } + fn reshuffle(&mut self) { + // we want to randomly select peers into three sets according to the following logic: + // - first set: LUCKY_PEERS random peers where at least LUCKY_PEERS/2 are authorities (unless + // we're not connected to that many authorities) + // - second set: max(LUCKY_PEERS, sqrt(peers)) peers where at least LUCKY_PEERS are authorities. + // - third set: LUCKY_PEERS random light client peers + + let shuffled_peers = { + let mut peers = self + .inner + .iter() + .map(|(peer_id, info)| (peer_id.clone(), info.clone())) + .collect::>(); - fn connected_full(&self) -> usize { - self.inner - .iter() - .filter(|(_, info)| matches!(info.roles, ObservedRole::Full)) - .count() - } + peers.shuffle(&mut rand::thread_rng()); + peers + }; - fn reshuffle(&mut self) { - let mut lucky_peers: Vec<_> = self - .inner - .iter() - .filter_map(|(id, info)| { - if matches!(info.roles, ObservedRole::Full) { - Some(id.clone()) - } else { - None + let shuffled_authorities = shuffled_peers.iter().filter_map(|(peer_id, info)| { + if matches!(info.roles, ObservedRole::Authority) { + Some(peer_id) + } else { + None + } + }); + + let mut first_stage_peers = HashSet::new(); + let mut second_stage_peers = HashSet::new(); + + // we start by allocating authorities to the first stage set and when the minimum of + // `LUCKY_PEERS / 2` is filled we start allocating to the second stage set. + let half_lucky = LUCKY_PEERS / 2; + let one_and_a_half_lucky = LUCKY_PEERS + half_lucky; + let mut n_authorities_added = 0; + for peer_id in shuffled_authorities { + if n_authorities_added < half_lucky { + first_stage_peers.insert(peer_id.clone()); + } else if n_authorities_added < one_and_a_half_lucky { + second_stage_peers.insert(peer_id.clone()); + } else { + break; + } + + n_authorities_added += 1; + } + + // fill up first and second sets with remaining peers (either full or authorities) + // prioritizing filling the first set over the second. + let n_second_stage_peers = LUCKY_PEERS.max((shuffled_peers.len() as f32).sqrt() as usize); + for (peer_id, info) in &shuffled_peers { + if info.roles.is_light() { + continue; + } + + if first_stage_peers.len() < LUCKY_PEERS { + first_stage_peers.insert(peer_id.clone()); + second_stage_peers.remove(peer_id); + } else if second_stage_peers.len() < n_second_stage_peers { + if !first_stage_peers.contains(peer_id) { + second_stage_peers.insert(peer_id.clone()); } - }) - .collect(); + } else { + break; + } + } - let mut lucky_authorities: Vec<_> = self - .inner - .iter() - .filter_map(|(id, info)| { - if matches!(info.roles, ObservedRole::Authority) { - Some(id.clone()) + // pick `LUCKY_PEERS` random light peers + let lucky_light_peers = shuffled_peers + .into_iter() + .filter_map(|(peer_id, info)| { + if info.roles.is_light() { + Some(peer_id) } else { None } }) + .take(LUCKY_PEERS) .collect(); - let num_non_authorities = ((lucky_peers.len() as f32).sqrt() as usize) - .max(MIN_LUCKY) - .min(lucky_peers.len()); - - let num_authorities = ((lucky_authorities.len() as f32).sqrt() as usize) - .max(MIN_LUCKY) - .min(lucky_authorities.len()); - - lucky_peers.partial_shuffle(&mut rand::thread_rng(), num_non_authorities); - lucky_peers.truncate(num_non_authorities); - - lucky_authorities.partial_shuffle(&mut rand::thread_rng(), num_authorities); - lucky_authorities.truncate(num_authorities); - - self.lucky_peers.clear(); - self.lucky_peers.extend(lucky_peers.into_iter()); - - self.lucky_authorities.clear(); - self.lucky_authorities.extend(lucky_authorities.into_iter()); + self.first_stage_peers = first_stage_peers; + self.second_stage_peers = second_stage_peers; + self.lucky_light_peers = lucky_light_peers; } } #[derive(Debug, PartialEq)] -pub(super) enum Action { +pub(super) enum Action { // repropagate under given topic, to the given peers, applying cost/benefit to originator. Keep(H, ReputationChange), // discard and process. @@ -1182,76 +1233,40 @@ impl Inner { /// The initial logic for filtering round messages follows the given state /// transitions: /// - /// - State 0: not allowed to anyone (only if our local node is not an authority) - /// - State 1: allowed to random `sqrt(authorities)` - /// - State 2: allowed to all authorities - /// - State 3: allowed to random `sqrt(non-authorities)` - /// - State 4: allowed to all non-authorities + /// - State 1: allowed to LUCKY_PEERS random peers (where at least LUCKY_PEERS/2 are authorities) + /// - State 2: allowed to max(LUCKY_PEERS, sqrt(random peers)) (where at least LUCKY_PEERS are authorities) + /// - State 3: allowed to all peers /// - /// Transitions will be triggered on repropagation attempts by the - /// underlying gossip layer, which should happen every 30 seconds. - fn round_message_allowed(&self, who: &PeerId, peer: &PeerInfo) -> bool { + /// Transitions will be triggered on repropagation attempts by the underlying gossip layer. + fn round_message_allowed(&self, who: &PeerId) -> bool { let round_duration = self.config.gossip_duration * ROUND_DURATION; let round_elapsed = match self.local_view { Some(ref local_view) => local_view.round_start.elapsed(), None => return false, }; - if !self.config.local_role.is_authority() - && round_elapsed < round_duration * PROPAGATION_ALL - { - // non-authority nodes don't gossip any messages right away. we - // assume that authorities (and sentries) are strongly connected, so - // it should be unnecessary for non-authorities to gossip all - // messages right away. + if self.config.local_role.is_light() { return false; } - match peer.roles { - ObservedRole::Authority => { - let authorities = self.peers.connected_authorities(); - - // the target node is an authority, on the first round duration we start by - // sending the message to only `sqrt(authorities)` (if we're - // connected to at least `MIN_LUCKY`). - if round_elapsed < round_duration * PROPAGATION_ALL_AUTHORITIES - && authorities > MIN_LUCKY - { - self.peers.lucky_authorities.contains(who) - } else { - // otherwise we already went through the step above, so - // we won't filter the message and send it to all - // authorities for whom it is polite to do so - true - } - } - ObservedRole::Full => { - // the node is not an authority so we apply stricter filters - if round_elapsed >= round_duration * PROPAGATION_ALL { - // if we waited for 3 (or more) rounds - // then it is allowed to be sent to all peers. - true - } else if round_elapsed >= round_duration * PROPAGATION_SOME_NON_AUTHORITIES { - // otherwise we only send it to `sqrt(non-authorities)`. - self.peers.lucky_peers.contains(who) - } else { - false - } - } - ObservedRole::Light => { - // we never gossip round messages to light clients as they don't - // participate in the full grandpa protocol - false - } + if round_elapsed < round_duration.mul_f32(PROPAGATION_SOME) { + self.peers.first_stage_peers.contains(who) + } else if round_elapsed < round_duration.mul_f32(PROPAGATION_ALL) { + self.peers.first_stage_peers.contains(who) + || self.peers.second_stage_peers.contains(who) + } else { + self.peers + .peer(who) + .map(|info| !info.roles.is_light()) + .unwrap_or(false) } } /// The initial logic for filtering global messages follows the given state /// transitions: /// - /// - State 0: send to `sqrt(authorities)` ++ `sqrt(non-authorities)`. - /// - State 1: send to all authorities - /// - State 2: send to all non-authorities + /// - State 1: allowed to max(LUCKY_PEERS, sqrt(peers)) (where at least LUCKY_PEERS are authorities) + /// - State 2: allowed to all peers /// /// We are more lenient with global messages since there should be a lot /// less global messages than round messages (just commits), and we want @@ -1260,49 +1275,23 @@ impl Inner { /// /// Transitions will be triggered on repropagation attempts by the /// underlying gossip layer, which should happen every 30 seconds. - fn global_message_allowed(&self, who: &PeerId, peer: &PeerInfo) -> bool { + fn global_message_allowed(&self, who: &PeerId) -> bool { let round_duration = self.config.gossip_duration * ROUND_DURATION; let round_elapsed = match self.local_view { Some(ref local_view) => local_view.round_start.elapsed(), None => return false, }; - match peer.roles { - ObservedRole::Authority => { - let authorities = self.peers.connected_authorities(); - - // the target node is an authority, on the first round duration we start by - // sending the message to only `sqrt(authorities)` (if we're - // connected to at least `MIN_LUCKY`). - if round_elapsed < round_duration * PROPAGATION_ALL_AUTHORITIES - && authorities > MIN_LUCKY - { - self.peers.lucky_authorities.contains(who) - } else { - // otherwise we already went through the step above, so - // we won't filter the message and send it to all - // authorities for whom it is polite to do so - true - } - } - ObservedRole::Full | ObservedRole::Light => { - let non_authorities = self.peers.connected_full(); - - // the target node is not an authority, on the first and second - // round duration we start by sending the message to only - // `sqrt(non_authorities)` (if we're connected to at least - // `MIN_LUCKY`). - if round_elapsed < round_duration * PROPAGATION_SOME_NON_AUTHORITIES - && non_authorities > MIN_LUCKY - { - self.peers.lucky_peers.contains(who) - } else { - // otherwise we already went through the step above, so - // we won't filter the message and send it to all - // non-authorities for whom it is polite to do so - true - } - } + if self.config.local_role.is_light() { + return false; + } + + if round_elapsed < round_duration.mul_f32(PROPAGATION_ALL) { + self.peers.first_stage_peers.contains(who) + || self.peers.second_stage_peers.contains(who) + || self.peers.lucky_light_peers.contains(who) + } else { + true } } } @@ -1529,9 +1518,12 @@ impl sc_network_gossip::Validator for GossipValidator, who: &PeerId, data: &[u8]) - -> sc_network_gossip::ValidationResult - { + fn validate( + &self, + context: &mut dyn ValidatorContext, + who: &PeerId, + data: &[u8], + ) -> sc_network_gossip::ValidationResult { let (action, broadcast_topics, peer_reply) = self.do_validate(who, data); // not with lock held! @@ -1560,9 +1552,9 @@ impl sc_network_gossip::Validator for GossipValidator(&'a self) - -> Box bool + 'a> - { + fn message_allowed<'a>( + &'a self, + ) -> Box bool + 'a> { let (inner, do_rebroadcast) = { use parking_lot::RwLockWriteGuard; @@ -1598,12 +1590,12 @@ impl sc_network_gossip::Validator for GossipValidator= LUCKY_PEERS / 2); + assert_eq!(trial(test(1.0, &all_peers)), LUCKY_PEERS); + + // after more than 1.5 round durations have elapsed we should gossip to + // `sqrt(peers)` we're connected to, but we guarantee that at least 4 of + // those peers are authorities (plus the `LUCKY_PEERS` from the previous + // stage) + assert!(trial(test(PROPAGATION_SOME * 1.1, &authorities)) >= LUCKY_PEERS); + assert_eq!( + trial(test(2.0, &all_peers)), + LUCKY_PEERS + (all_peers.len() as f64).sqrt() as usize, + ); - // only on the fourth attempt should we gossip to all non-authorities - assert_eq!(trial(test(4, &full_nodes)), 30); + // after 3 rounds durations we should gossip to all peers we are + // connected to + assert_eq!(trial(test(PROPAGATION_ALL * 1.1, &all_peers)), all_peers.len()); } #[test] - fn only_restricts_gossip_to_authorities_after_a_minimum_threshold() { - let (val, _) = GossipValidator::::new( - config(), - voter_set_state(), - None, - None, - ); + fn never_gossips_round_messages_to_light_clients() { + let config = config(); + let round_duration = config.gossip_duration * ROUND_DURATION; + let (val, _) = GossipValidator::::new(config, voter_set_state(), None, None); - // the validator start at set id 0 + // the validator starts at set id 0 val.note_set(SetId(0), Vec::new(), |_, _| {}); - let mut authorities = Vec::new(); - for _ in 0..5 { - let peer_id = PeerId::random(); - val.inner.write().peers.new_peer(peer_id.clone(), ObservedRole::Authority); - authorities.push(peer_id); - } + // add a new light client as peer + let light_peer = PeerId::random(); - let mut message_allowed = val.message_allowed(); + val.inner + .write() + .peers + .new_peer(light_peer.clone(), ObservedRole::Light); - // since we're only connected to 5 authorities, we should never restrict - // sending of gossip messages, and instead just allow them to all - // non-authorities on the first attempt. - for authority in &authorities { - assert!( - message_allowed( - authority, - MessageIntent::Broadcast, - &crate::communication::round_topic::(1, 0), - &[], - ) - ); - } - } + assert!(!val.message_allowed()( + &light_peer, + MessageIntent::Broadcast, + &crate::communication::round_topic::(1, 0), + &[], + )); - #[test] - fn non_authorities_never_gossip_messages_on_first_round_duration() { - let mut config = config(); - config.gossip_duration = Duration::from_secs(300); // Set to high value to prevent test race - config.local_role = Role::Full; - let round_duration = config.gossip_duration * ROUND_DURATION; + // we reverse the round start time so that the elapsed time is higher + // (which should lead to more peers getting the message) + val.inner.write().local_view.as_mut().unwrap().round_start = + Instant::now() - round_duration * 10; - let (val, _) = GossipValidator::::new(config, voter_set_state(), None, None); + // even after the round has been going for 10 round durations we will never + // gossip to light clients + assert!(!val.message_allowed()( + &light_peer, + MessageIntent::Broadcast, + &crate::communication::round_topic::(1, 0), + &[], + )); - // the validator start at set id 0 - val.note_set(SetId(0), Vec::new(), |_, _| {}); + // update the peer state and local state wrt commits + val.inner + .write() + .peers + .update_peer_state( + &light_peer, + NeighborPacket { + round: Round(1), + set_id: SetId(0), + commit_finalized_height: 1, + }, + ) + .unwrap(); - let mut authorities = Vec::new(); - for _ in 0..100 { - let peer_id = PeerId::random(); - val.inner.write().peers.new_peer(peer_id.clone(), ObservedRole::Authority); - authorities.push(peer_id); - } + val.note_commit_finalized(Round(1), SetId(0), 2, |_, _| {}); - { - let mut message_allowed = val.message_allowed(); - // since our node is not an authority we should **never** gossip any - // messages on the first attempt. - for authority in &authorities { - assert!( - !message_allowed( - authority, - MessageIntent::Broadcast, - &crate::communication::round_topic::(1, 0), - &[], - ) - ); - } - } + let commit = { + let commit = finality_grandpa::CompactCommit { + target_hash: H256::random(), + target_number: 2, + precommits: Vec::new(), + auth_data: Vec::new(), + }; - { - val.inner.write().local_view.as_mut().unwrap().round_start = - Instant::now() - round_duration * 4; - let mut message_allowed = val.message_allowed(); - // on the fourth round duration we should allow messages to authorities - // (on the second we would do `sqrt(authorities)`) - for authority in &authorities { - assert!( - message_allowed( - authority, - MessageIntent::Broadcast, - &crate::communication::round_topic::(1, 0), - &[], - ) - ); - } - } + crate::communication::gossip::GossipMessage::::Commit( + crate::communication::gossip::FullCommitMessage { + round: Round(2), + set_id: SetId(0), + message: commit, + }, + ) + .encode() + }; + + // global messages are gossiped to light clients though + assert!(val.message_allowed()( + &light_peer, + MessageIntent::Broadcast, + &crate::communication::global_topic::(0), + &commit, + )); } #[test] fn only_gossip_commits_to_peers_on_same_set() { let (val, _) = GossipValidator::::new(config(), voter_set_state(), None, None); - // the validator start at set id 1 + // the validator starts at set id 1 val.note_set(SetId(1), Vec::new(), |_, _| {}); // add a new peer at set id 1 diff --git a/client/finality-grandpa/src/environment.rs b/client/finality-grandpa/src/environment.rs index 3d593a17ffdbf..62d9a4a8bb9ef 100644 --- a/client/finality-grandpa/src/environment.rs +++ b/client/finality-grandpa/src/environment.rs @@ -1181,8 +1181,9 @@ where fn round_commit_timer(&self) -> Self::Timer { use rand::{thread_rng, Rng}; - //random between 0-1 seconds. - let delay: u64 = thread_rng().gen_range(0, 1000); + // random between `[0, 2 * gossip_duration]` seconds. + let delay: u64 = + thread_rng().gen_range(0, 2 * self.config.gossip_duration.as_millis() as u64); Box::pin(Delay::new(Duration::from_millis(delay)).map(Ok)) } diff --git a/client/network-gossip/src/state_machine.rs b/client/network-gossip/src/state_machine.rs index 74f716133b478..ea1a336585981 100644 --- a/client/network-gossip/src/state_machine.rs +++ b/client/network-gossip/src/state_machine.rs @@ -41,7 +41,7 @@ use wasm_timer::Instant; // this cache should take about 256 KB of memory. const KNOWN_MESSAGES_CACHE_SIZE: usize = 8192; -const REBROADCAST_INTERVAL: time::Duration = time::Duration::from_secs(30); +const REBROADCAST_INTERVAL: time::Duration = time::Duration::from_millis(750); pub(crate) const PERIODIC_MAINTENANCE_INTERVAL: time::Duration = time::Duration::from_millis(1100); From 8b4df6ad44c169e727278a9ad012d065ecca0661 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andr=C3=A9=20Silva?= <123550+andresilva@users.noreply.github.com> Date: Sat, 19 Jun 2021 13:40:53 +0100 Subject: [PATCH 18/67] babe: add comments to block weight and expose block_weight function (#9145) * babe: add comments to block weight and expose block_weight function * babe: expose function for block weight key --- client/consensus/babe/src/aux_schema.rs | 3 +- client/consensus/babe/src/lib.rs | 101 ++++++++++++------------ primitives/consensus/babe/src/lib.rs | 6 +- 3 files changed, 59 insertions(+), 51 deletions(-) diff --git a/client/consensus/babe/src/aux_schema.rs b/client/consensus/babe/src/aux_schema.rs index 8b8804e3bfb02..69c1a1930bbb5 100644 --- a/client/consensus/babe/src/aux_schema.rs +++ b/client/consensus/babe/src/aux_schema.rs @@ -32,7 +32,8 @@ const BABE_EPOCH_CHANGES_VERSION: &[u8] = b"babe_epoch_changes_version"; const BABE_EPOCH_CHANGES_KEY: &[u8] = b"babe_epoch_changes"; const BABE_EPOCH_CHANGES_CURRENT_VERSION: u32 = 2; -fn block_weight_key(block_hash: H) -> Vec { +/// The aux storage key used to store the block weight of the given block hash. +pub fn block_weight_key(block_hash: H) -> Vec { (b"block_weight", block_hash).encode() } diff --git a/client/consensus/babe/src/lib.rs b/client/consensus/babe/src/lib.rs index 409999ef1fdca..8aa92f37815eb 100644 --- a/client/consensus/babe/src/lib.rs +++ b/client/consensus/babe/src/lib.rs @@ -65,70 +65,73 @@ #![forbid(unsafe_code)] #![warn(missing_docs)] -pub use sp_consensus_babe::{ - BabeApi, ConsensusLog, BABE_ENGINE_ID, BabeEpochConfiguration, BabeGenesisConfiguration, - AuthorityId, AuthorityPair, AuthoritySignature, BabeAuthorityWeight, VRF_OUTPUT_LENGTH, - digests::{ - CompatibleDigestItem, NextEpochDescriptor, NextConfigDescriptor, PreDigest, - PrimaryPreDigest, SecondaryPlainPreDigest, - }, -}; -pub use sp_consensus::SyncOracle; -pub use sc_consensus_slots::SlotProportion; + use std::{ - collections::HashMap, sync::Arc, u64, pin::Pin, borrow::Cow, convert::TryInto, - time::Duration, + borrow::Cow, collections::HashMap, convert::TryInto, pin::Pin, sync::Arc, time::Duration, u64, }; -use sp_consensus::{ImportResult, CanAuthorWith, import_queue::BoxJustificationImport}; -use sp_core::crypto::Public; -use sp_application_crypto::AppKey; -use sp_keystore::{SyncCryptoStorePtr, SyncCryptoStore}; -use sp_runtime::{ - generic::{BlockId, OpaqueDigestItemId}, Justifications, - traits::{Block as BlockT, Header, DigestItemFor, Zero}, -}; -use sp_api::{ProvideRuntimeApi, NumberFor}; -use parking_lot::Mutex; -use sp_inherents::{CreateInherentDataProviders, InherentDataProvider, InherentData}; -use sc_telemetry::{telemetry, TelemetryHandle, CONSENSUS_TRACE, CONSENSUS_DEBUG}; -use sp_consensus::{ - BlockImport, Environment, Proposer, BlockCheckParams, - ForkChoiceStrategy, BlockImportParams, BlockOrigin, Error as ConsensusError, - SelectChain, SlotData, import_queue::{Verifier, BasicQueue, DefaultImportQueue, CacheKeyId}, -}; -use sp_consensus_babe::inherents::BabeInherentData; -use sc_client_api::{ - backend::AuxStore, BlockchainEvents, ProvideUncles, UsageProvider -}; -use sp_block_builder::BlockBuilder as BlockBuilderApi; -use futures::channel::mpsc::{channel, Sender, Receiver}; -use futures::channel::oneshot; -use retain_mut::RetainMut; +use codec::{Decode, Encode}; +use futures::channel::mpsc::{channel, Receiver, Sender}; +use futures::channel::oneshot; use futures::prelude::*; use log::{debug, info, log, trace, warn}; +use parking_lot::Mutex; use prometheus_endpoint::Registry; -use sc_consensus_slots::{ - SlotInfo, StorageChanges, CheckedHeader, check_equivocation, - BackoffAuthoringBlocksStrategy, InherentDataProviderExt, -}; +use retain_mut::RetainMut; +use schnorrkel::SignatureError; + +use sc_client_api::{backend::AuxStore, BlockchainEvents, ProvideUncles, UsageProvider}; use sc_consensus_epochs::{ - descendent_query, SharedEpochChanges, EpochChangesFor, Epoch as EpochT, ViableEpochDescriptor, + descendent_query, Epoch as EpochT, EpochChangesFor, SharedEpochChanges, ViableEpochDescriptor, }; -use sp_blockchain::{ - Result as ClientResult, Error as ClientError, - HeaderBackend, ProvideCache, HeaderMetadata +use sc_consensus_slots::{ + check_equivocation, BackoffAuthoringBlocksStrategy, CheckedHeader, InherentDataProviderExt, + SlotInfo, StorageChanges, }; -use schnorrkel::SignatureError; -use codec::{Encode, Decode}; +use sc_telemetry::{telemetry, TelemetryHandle, CONSENSUS_DEBUG, CONSENSUS_TRACE}; use sp_api::ApiExt; +use sp_api::{NumberFor, ProvideRuntimeApi}; +use sp_application_crypto::AppKey; +use sp_block_builder::BlockBuilder as BlockBuilderApi; +use sp_blockchain::{ + Error as ClientError, HeaderBackend, HeaderMetadata, ProvideCache, Result as ClientResult, +}; +use sp_consensus::{import_queue::BoxJustificationImport, CanAuthorWith, ImportResult}; +use sp_consensus::{ + import_queue::{BasicQueue, CacheKeyId, DefaultImportQueue, Verifier}, + BlockCheckParams, BlockImport, BlockImportParams, BlockOrigin, Environment, + Error as ConsensusError, ForkChoiceStrategy, Proposer, SelectChain, SlotData, +}; +use sp_consensus_babe::inherents::BabeInherentData; use sp_consensus_slots::Slot; +use sp_core::crypto::Public; +use sp_inherents::{CreateInherentDataProviders, InherentData, InherentDataProvider}; +use sp_keystore::{SyncCryptoStore, SyncCryptoStorePtr}; +use sp_runtime::{ + generic::{BlockId, OpaqueDigestItemId}, + traits::{Block as BlockT, DigestItemFor, Header, Zero}, + Justifications, +}; + +pub use sc_consensus_slots::SlotProportion; +pub use sp_consensus::SyncOracle; +pub use sp_consensus_babe::{ + digests::{ + CompatibleDigestItem, NextConfigDescriptor, NextEpochDescriptor, PreDigest, + PrimaryPreDigest, SecondaryPlainPreDigest, + }, + AuthorityId, AuthorityPair, AuthoritySignature, BabeApi, BabeAuthorityWeight, + BabeEpochConfiguration, BabeGenesisConfiguration, ConsensusLog, BABE_ENGINE_ID, + VRF_OUTPUT_LENGTH, +}; + +pub use aux_schema::load_block_weight as block_weight; -mod verification; mod migration; +mod verification; -pub mod aux_schema; pub mod authorship; +pub mod aux_schema; #[cfg(test)] mod tests; diff --git a/primitives/consensus/babe/src/lib.rs b/primitives/consensus/babe/src/lib.rs index da9f089e4561c..3609a0b8ce32c 100644 --- a/primitives/consensus/babe/src/lib.rs +++ b/primitives/consensus/babe/src/lib.rs @@ -88,7 +88,11 @@ pub type EquivocationProof = sp_consensus_slots::EquivocationProof Date: Sat, 19 Jun 2021 21:37:33 +0100 Subject: [PATCH 19/67] slots: slot lenience must take into account block proposal portion (#9138) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * aura, babe: proposal slot lenience must take into account proposal portion * slots: add support for max_block_proposal_slot_portion * fix compilation * slots: add tests * aura: fix comment Co-authored-by: Bastian Köcher * slots: log the actual proposing duration after lenience is applied Co-authored-by: Bastian Köcher --- bin/node-template/node/src/service.rs | 1 + bin/node/cli/src/service.rs | 1 + client/consensus/aura/src/lib.rs | 67 ++++++-------- client/consensus/babe/src/lib.rs | 98 +++++++++----------- client/consensus/babe/src/tests.rs | 1 + client/consensus/slots/src/lib.rs | 124 +++++++++++++++++++++++++- 6 files changed, 194 insertions(+), 98 deletions(-) diff --git a/bin/node-template/node/src/service.rs b/bin/node-template/node/src/service.rs index 51b63e614fb8a..c19824e9eaa38 100644 --- a/bin/node-template/node/src/service.rs +++ b/bin/node-template/node/src/service.rs @@ -245,6 +245,7 @@ pub fn new_full(mut config: Configuration) -> Result sync_oracle: network.clone(), justification_sync_link: network.clone(), block_proposal_slot_portion: SlotProportion::new(2f32 / 3f32), + max_block_proposal_slot_portion: None, telemetry: telemetry.as_ref().map(|x| x.handle()), }, )?; diff --git a/bin/node/cli/src/service.rs b/bin/node/cli/src/service.rs index 06e1fcc804773..8fa3d2ed77ceb 100644 --- a/bin/node/cli/src/service.rs +++ b/bin/node/cli/src/service.rs @@ -333,6 +333,7 @@ pub fn new_full_base( babe_link, can_author_with, block_proposal_slot_portion: SlotProportion::new(0.5), + max_block_proposal_slot_portion: None, telemetry: telemetry.as_ref().map(|x| x.handle()), }; diff --git a/client/consensus/aura/src/lib.rs b/client/consensus/aura/src/lib.rs index d0b0cefe8ddca..845e920cfc11a 100644 --- a/client/consensus/aura/src/lib.rs +++ b/client/consensus/aura/src/lib.rs @@ -140,6 +140,9 @@ pub struct StartAuraParams { /// slot. However, the proposing can still take longer when there is some lenience factor applied, /// because there were no blocks produced for some slots. pub block_proposal_slot_portion: SlotProportion, + /// The maximum proportion of the slot dedicated to proposing with any lenience factor applied + /// due to no blocks being produced. + pub max_block_proposal_slot_portion: Option, /// Telemetry instance used to report telemetry metrics. pub telemetry: Option, } @@ -160,9 +163,11 @@ pub fn start_aura( keystore, can_author_with, block_proposal_slot_portion, + max_block_proposal_slot_portion, telemetry, }: StartAuraParams, -) -> Result, sp_consensus::Error> where +) -> Result, sp_consensus::Error> +where P: Pair + Send + Sync, P::Public: AppPublic + Hash + Member + Encode + Decode, P::Signature: TryFrom> + Hash + Member + Encode + Decode, @@ -192,6 +197,7 @@ pub fn start_aura( backoff_authoring_blocks, telemetry, block_proposal_slot_portion, + max_block_proposal_slot_portion, }); Ok(sc_consensus_slots::start_slot_worker( @@ -228,6 +234,9 @@ pub struct BuildAuraWorkerParams { /// slot. However, the proposing can still take longer when there is some lenience factor applied, /// because there were no blocks produced for some slots. pub block_proposal_slot_portion: SlotProportion, + /// The maximum proportion of the slot dedicated to proposing with any lenience factor applied + /// due to no blocks being produced. + pub max_block_proposal_slot_portion: Option, /// Telemetry instance used to report telemetry metrics. pub telemetry: Option, } @@ -245,10 +254,12 @@ pub fn build_aura_worker( backoff_authoring_blocks, keystore, block_proposal_slot_portion, + max_block_proposal_slot_portion, telemetry, force_authoring, }: BuildAuraWorkerParams, -) -> impl sc_consensus_slots::SlotWorker>::Proof> where +) -> impl sc_consensus_slots::SlotWorker>::Proof> +where B: BlockT, C: ProvideRuntimeApi + BlockOf + ProvideCache + AuxStore + HeaderBackend + Send + Sync, C::Api: AuraApi>, @@ -274,6 +285,7 @@ pub fn build_aura_worker( backoff_authoring_blocks, telemetry, block_proposal_slot_portion, + max_block_proposal_slot_portion, _key_type: PhantomData::

, } } @@ -288,6 +300,7 @@ struct AuraWorker { force_authoring: bool, backoff_authoring_blocks: Option, block_proposal_slot_portion: SlotProportion, + max_block_proposal_slot_portion: Option, telemetry: Option, _key_type: PhantomData

, } @@ -452,42 +465,17 @@ where self.telemetry.clone() } - fn proposing_remaining_duration( - &self, - slot_info: &SlotInfo, - ) -> std::time::Duration { - let max_proposing = slot_info.duration.mul_f32(self.block_proposal_slot_portion.get()); - - let slot_remaining = slot_info.ends_at - .checked_duration_since(std::time::Instant::now()) - .unwrap_or_default(); - - let slot_remaining = std::cmp::min(slot_remaining, max_proposing); - - // If parent is genesis block, we don't require any lenience factor. - if slot_info.chain_head.number().is_zero() { - return slot_remaining - } - - let parent_slot = match find_pre_digest::(&slot_info.chain_head) { - Err(_) => return slot_remaining, - Ok(d) => d, - }; - - if let Some(slot_lenience) = - sc_consensus_slots::slot_lenience_exponential(parent_slot, slot_info) - { - debug!( - target: "aura", - "No block for {} slots. Applying linear lenience of {}s", - slot_info.slot.saturating_sub(parent_slot + 1), - slot_lenience.as_secs(), - ); - - slot_remaining + slot_lenience - } else { - slot_remaining - } + fn proposing_remaining_duration(&self, slot_info: &SlotInfo) -> std::time::Duration { + let parent_slot = find_pre_digest::(&slot_info.chain_head).ok(); + + sc_consensus_slots::proposing_remaining_duration( + parent_slot, + slot_info, + &self.block_proposal_slot_portion, + self.max_block_proposal_slot_portion.as_ref(), + sc_consensus_slots::SlotLenienceType::Exponential, + self.logging_target(), + ) } } @@ -759,6 +747,7 @@ mod tests { keystore, can_author_with: sp_consensus::AlwaysCanAuthor, block_proposal_slot_portion: SlotProportion::new(0.5), + max_block_proposal_slot_portion: None, telemetry: None, }).expect("Starts aura")); } @@ -823,6 +812,7 @@ mod tests { telemetry: None, _key_type: PhantomData::, block_proposal_slot_portion: SlotProportion::new(0.5), + max_block_proposal_slot_portion: None, }; let head = Header::new( @@ -873,6 +863,7 @@ mod tests { telemetry: None, _key_type: PhantomData::, block_proposal_slot_portion: SlotProportion::new(0.5), + max_block_proposal_slot_portion: None, }; let head = client.header(&BlockId::Number(0)).unwrap().unwrap(); diff --git a/client/consensus/babe/src/lib.rs b/client/consensus/babe/src/lib.rs index 8aa92f37815eb..8112a00416e31 100644 --- a/client/consensus/babe/src/lib.rs +++ b/client/consensus/babe/src/lib.rs @@ -412,30 +412,35 @@ pub struct BabeParams { /// because there were no blocks produced for some slots. pub block_proposal_slot_portion: SlotProportion, + /// The maximum proportion of the slot dedicated to proposing with any lenience factor applied + /// due to no blocks being produced. + pub max_block_proposal_slot_portion: Option, + /// Handle use to report telemetries. pub telemetry: Option, } /// Start the babe worker. -pub fn start_babe(BabeParams { - keystore, - client, - select_chain, - env, - block_import, - sync_oracle, - justification_sync_link, - create_inherent_data_providers, - force_authoring, - backoff_authoring_blocks, - babe_link, - can_author_with, - block_proposal_slot_portion, - telemetry, -}: BabeParams) -> Result< - BabeWorker, - sp_consensus::Error, -> where +pub fn start_babe( + BabeParams { + keystore, + client, + select_chain, + env, + block_import, + sync_oracle, + justification_sync_link, + create_inherent_data_providers, + force_authoring, + backoff_authoring_blocks, + babe_link, + can_author_with, + block_proposal_slot_portion, + max_block_proposal_slot_portion, + telemetry, + }: BabeParams, +) -> Result, sp_consensus::Error> +where B: BlockT, C: ProvideRuntimeApi + ProvideCache @@ -480,6 +485,7 @@ pub fn start_babe(BabeParams { slot_notification_sinks: slot_notification_sinks.clone(), config: config.clone(), block_proposal_slot_portion, + max_block_proposal_slot_portion, telemetry, }; @@ -630,6 +636,7 @@ struct BabeSlotWorker { slot_notification_sinks: SlotNotificationSinks, config: Config, block_proposal_slot_portion: SlotProportion, + max_block_proposal_slot_portion: Option, telemetry: Option, } @@ -637,10 +644,10 @@ impl sc_consensus_slots::SimpleSlotWorker for BabeSlotWorker where B: BlockT, - C: ProvideRuntimeApi + - ProvideCache + - HeaderBackend + - HeaderMetadata, + C: ProvideRuntimeApi + + ProvideCache + + HeaderBackend + + HeaderMetadata, C::Api: BabeApi, E: Environment, E::Proposer: Proposer>, @@ -832,42 +839,17 @@ where self.telemetry.clone() } - fn proposing_remaining_duration( - &self, - slot_info: &SlotInfo, - ) -> std::time::Duration { - let max_proposing = slot_info.duration.mul_f32(self.block_proposal_slot_portion.get()); - - let slot_remaining = slot_info.ends_at - .checked_duration_since(std::time::Instant::now()) - .unwrap_or_default(); + fn proposing_remaining_duration(&self, slot_info: &SlotInfo) -> std::time::Duration { + let parent_slot = find_pre_digest::(&slot_info.chain_head).ok().map(|d| d.slot()); - let slot_remaining = std::cmp::min(slot_remaining, max_proposing); - - // If parent is genesis block, we don't require any lenience factor. - if slot_info.chain_head.number().is_zero() { - return slot_remaining - } - - let parent_slot = match find_pre_digest::(&slot_info.chain_head) { - Err(_) => return slot_remaining, - Ok(d) => d.slot(), - }; - - if let Some(slot_lenience) = - sc_consensus_slots::slot_lenience_exponential(parent_slot, slot_info) - { - debug!( - target: "babe", - "No block for {} slots. Applying exponential lenience of {}s", - slot_info.slot.saturating_sub(parent_slot + 1), - slot_lenience.as_secs(), - ); - - slot_remaining + slot_lenience - } else { - slot_remaining - } + sc_consensus_slots::proposing_remaining_duration( + parent_slot, + slot_info, + &self.block_proposal_slot_portion, + self.max_block_proposal_slot_portion.as_ref(), + sc_consensus_slots::SlotLenienceType::Exponential, + self.logging_target(), + ) } } diff --git a/client/consensus/babe/src/tests.rs b/client/consensus/babe/src/tests.rs index 467de9683c689..3392ffade98ee 100644 --- a/client/consensus/babe/src/tests.rs +++ b/client/consensus/babe/src/tests.rs @@ -473,6 +473,7 @@ fn run_one_test(mutator: impl Fn(&mut TestHeader, Stage) + Send + Sync + 'static can_author_with: sp_consensus::AlwaysCanAuthor, justification_sync_link: (), block_proposal_slot_portion: SlotProportion::new(0.5), + max_block_proposal_slot_portion: None, telemetry: None, }).expect("Starts babe")); } diff --git a/client/consensus/slots/src/lib.rs b/client/consensus/slots/src/lib.rs index 188aa52881a78..1ec89a6f519af 100644 --- a/client/consensus/slots/src/lib.rs +++ b/client/consensus/slots/src/lib.rs @@ -666,6 +666,96 @@ impl SlotProportion { } } +/// The strategy used to calculate the slot lenience used to increase the block proposal time when +/// slots have been skipped with no blocks authored. +pub enum SlotLenienceType { + /// Increase the lenience linearly with the number of skipped slots. + Linear, + /// Increase the lenience exponentially with the number of skipped slots. + Exponential, +} + +impl SlotLenienceType { + fn as_str(&self) -> &'static str { + match self { + SlotLenienceType::Linear => "linear", + SlotLenienceType::Exponential => "exponential", + } + } +} + +/// Calculate the remaining duration for block proposal taking into account whether any slots have +/// been skipped and applying the given lenience strategy. If `max_block_proposal_slot_portion` is +/// not none this method guarantees that the returned duration must be lower or equal to +/// `slot_info.duration * max_block_proposal_slot_portion`. +pub fn proposing_remaining_duration( + parent_slot: Option, + slot_info: &SlotInfo, + block_proposal_slot_portion: &SlotProportion, + max_block_proposal_slot_portion: Option<&SlotProportion>, + slot_lenience_type: SlotLenienceType, + log_target: &str, +) -> Duration { + use sp_runtime::traits::Zero; + + let proposing_duration = slot_info + .duration + .mul_f32(block_proposal_slot_portion.get()); + + let slot_remaining = slot_info + .ends_at + .checked_duration_since(std::time::Instant::now()) + .unwrap_or_default(); + + let proposing_duration = std::cmp::min(slot_remaining, proposing_duration); + + // If parent is genesis block, we don't require any lenience factor. + if slot_info.chain_head.number().is_zero() { + return proposing_duration; + } + + let parent_slot = match parent_slot { + Some(parent_slot) => parent_slot, + None => return proposing_duration, + }; + + let slot_lenience = match slot_lenience_type { + SlotLenienceType::Exponential => slot_lenience_exponential(parent_slot, slot_info), + SlotLenienceType::Linear => slot_lenience_linear(parent_slot, slot_info), + }; + + if let Some(slot_lenience) = slot_lenience { + let lenient_proposing_duration = + proposing_duration + slot_lenience.mul_f32(block_proposal_slot_portion.get()); + + // if we defined a maximum portion of the slot for proposal then we must make sure the + // lenience doesn't go over it + let lenient_proposing_duration = + if let Some(ref max_block_proposal_slot_portion) = max_block_proposal_slot_portion { + std::cmp::min( + lenient_proposing_duration, + slot_info + .duration + .mul_f32(max_block_proposal_slot_portion.get()), + ) + } else { + lenient_proposing_duration + }; + + debug!( + target: log_target, + "No block for {} slots. Applying {} lenience, total proposing duration: {}", + slot_info.slot.saturating_sub(parent_slot + 1), + slot_lenience_type.as_str(), + lenient_proposing_duration.as_secs(), + ); + + lenient_proposing_duration + } else { + proposing_duration + } +} + /// Calculate a slot duration lenience based on the number of missed slots from current /// to parent. If the number of skipped slots is greated than 0 this method will apply /// an exponential backoff of at most `2^7 * slot_duration`, if no slots were skipped @@ -703,7 +793,7 @@ pub fn slot_lenience_exponential( /// a linear backoff of at most `20 * slot_duration`, if no slots were skipped /// this method will return `None.` pub fn slot_lenience_linear( - parent_slot: u64, + parent_slot: Slot, slot_info: &SlotInfo, ) -> Option { // never give more than 20 times more lenience. @@ -839,7 +929,7 @@ mod test { duration: SLOT_DURATION, timestamp: Default::default(), inherent_data: Default::default(), - ends_at: Instant::now(), + ends_at: Instant::now() + SLOT_DURATION, chain_head: Header::new( 1, Default::default(), @@ -897,6 +987,36 @@ mod test { ); } + #[test] + fn proposing_remaining_duration_should_apply_lenience_based_on_proposal_slot_proportion() { + assert_eq!( + proposing_remaining_duration( + Some(0.into()), + &slot(2), + &SlotProportion(0.25), + None, + SlotLenienceType::Linear, + "test", + ), + SLOT_DURATION.mul_f32(0.25 * 2.0), + ); + } + + #[test] + fn proposing_remaining_duration_should_never_exceed_max_proposal_slot_proportion() { + assert_eq!( + proposing_remaining_duration( + Some(0.into()), + &slot(100), + &SlotProportion(0.25), + Some(SlotProportion(0.9)).as_ref(), + SlotLenienceType::Exponential, + "test", + ), + SLOT_DURATION.mul_f32(0.9), + ); + } + #[derive(PartialEq, Debug)] struct HeadState { head_number: NumberFor, From d03a91a181d0b22d00a6b9ba2a8007dc254779e3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andr=C3=A9=20Silva?= <123550+andresilva@users.noreply.github.com> Date: Sun, 20 Jun 2021 12:01:09 +0100 Subject: [PATCH 20/67] make SelectChain async (#9128) * make SelectChain async * make JustificationImport async --- Cargo.lock | 2 + client/consensus/babe/rpc/src/lib.rs | 51 +- client/consensus/babe/src/lib.rs | 24 +- client/consensus/common/Cargo.toml | 1 + client/consensus/common/src/longest_chain.rs | 32 +- .../consensus/manual-seal/src/seal_block.rs | 52 +- client/consensus/pow/src/lib.rs | 12 +- client/consensus/slots/src/slots.rs | 2 +- client/finality-grandpa/src/environment.rs | 259 +++---- client/finality-grandpa/src/import.rs | 25 +- client/network/test/src/lib.rs | 7 +- client/service/src/lib.rs | 5 +- client/service/test/src/client/mod.rs | 678 ++++++++++++------ primitives/api/test/Cargo.toml | 5 +- primitives/api/test/tests/runtime_calls.rs | 9 +- .../consensus/common/src/block_import.rs | 5 +- .../common/src/import_queue/basic_queue.rs | 54 +- .../consensus/common/src/select_chain.rs | 15 +- 18 files changed, 792 insertions(+), 446 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index fb944b782abd9..a33cb02f7f0d4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7208,6 +7208,7 @@ dependencies = [ name = "sc-consensus" version = "0.9.0" dependencies = [ + "async-trait", "parking_lot 0.11.1", "sc-client-api", "sp-blockchain", @@ -8707,6 +8708,7 @@ name = "sp-api-test" version = "2.0.1" dependencies = [ "criterion", + "futures 0.3.15", "log", "parity-scale-codec", "rustversion", diff --git a/client/consensus/babe/rpc/src/lib.rs b/client/consensus/babe/rpc/src/lib.rs index 6696a65040a5e..e16c24acaca36 100644 --- a/client/consensus/babe/rpc/src/lib.rs +++ b/client/consensus/babe/rpc/src/lib.rs @@ -93,11 +93,14 @@ impl BabeRpcHandler { } impl BabeApi for BabeRpcHandler - where - B: BlockT, - C: ProvideRuntimeApi + HeaderBackend + HeaderMetadata + 'static, - C::Api: BabeRuntimeApi, - SC: SelectChain + Clone + 'static, +where + B: BlockT, + C: ProvideRuntimeApi + + HeaderBackend + + HeaderMetadata + + 'static, + C::Api: BabeRuntimeApi, + SC: SelectChain + Clone + 'static, { fn epoch_authorship(&self) -> FutureResult> { if let Err(err) = self.deny_unsafe.check_if_safe() { @@ -118,28 +121,33 @@ impl BabeApi for BabeRpcHandler self.select_chain.clone(), ); let future = async move { - let header = select_chain.best_chain().map_err(Error::Consensus)?; - let epoch_start = client.runtime_api() + let header = select_chain.best_chain().map_err(Error::Consensus).await?; + let epoch_start = client + .runtime_api() .current_epoch_start(&BlockId::Hash(header.hash())) - .map_err(|err| { - Error::StringError(format!("{:?}", err)) - })?; + .map_err(|err| Error::StringError(format!("{:?}", err)))?; let epoch = epoch_data( &shared_epoch, &client, &babe_config, *epoch_start, &select_chain, - )?; + ) + .await?; let (epoch_start, epoch_end) = (epoch.start_slot(), epoch.end_slot()); let mut claims: HashMap = HashMap::new(); let keys = { - epoch.authorities.iter() + epoch + .authorities + .iter() .enumerate() .filter_map(|(i, a)| { - if SyncCryptoStore::has_keys(&*keystore, &[(a.0.to_raw_vec(), AuthorityId::ID)]) { + if SyncCryptoStore::has_keys( + &*keystore, + &[(a.0.to_raw_vec(), AuthorityId::ID)], + ) { Some((a.0.clone(), i)) } else { None @@ -167,7 +175,8 @@ impl BabeApi for BabeRpcHandler } Ok(claims) - }.boxed(); + } + .boxed(); Box::new(future.compat()) } @@ -203,20 +212,20 @@ impl From for jsonrpc_core::Error { } } -/// fetches the epoch data for a given slot. -fn epoch_data( +/// Fetches the epoch data for a given slot. +async fn epoch_data( epoch_changes: &SharedEpochChanges, client: &Arc, babe_config: &Config, slot: u64, select_chain: &SC, ) -> Result - where - B: BlockT, - C: HeaderBackend + HeaderMetadata + 'static, - SC: SelectChain, +where + B: BlockT, + C: HeaderBackend + HeaderMetadata + 'static, + SC: SelectChain, { - let parent = select_chain.best_chain()?; + let parent = select_chain.best_chain().await?; epoch_changes.shared_data().epoch_data_for_child_of( descendent_query(&**client), &parent.hash(), diff --git a/client/consensus/babe/src/lib.rs b/client/consensus/babe/src/lib.rs index 8112a00416e31..15d16c91f4304 100644 --- a/client/consensus/babe/src/lib.rs +++ b/client/consensus/babe/src/lib.rs @@ -989,7 +989,7 @@ where Ok(()) } - fn check_and_report_equivocation( + async fn check_and_report_equivocation( &self, slot_now: Slot, slot: Slot, @@ -1024,6 +1024,7 @@ where let best_id = self .select_chain .best_chain() + .await .map(|h| BlockId::Hash(h.hash())) .map_err(|e| Error::Client(e.into()))?; @@ -1070,13 +1071,26 @@ where } } +type BlockVerificationResult = Result< + ( + BlockImportParams, + Option)>>, + ), + String, +>; + #[async_trait::async_trait] impl Verifier for BabeVerifier where Block: BlockT, - Client: HeaderMetadata + HeaderBackend + ProvideRuntimeApi - + Send + Sync + AuxStore + ProvideCache, + Client: HeaderMetadata + + HeaderBackend + + ProvideRuntimeApi + + Send + + Sync + + AuxStore + + ProvideCache, Client::Api: BlockBuilderApi + BabeApi, SelectChain: sp_consensus::SelectChain, CAW: CanAuthorWith + Send + Sync, @@ -1089,7 +1103,7 @@ where header: Block::Header, justifications: Option, mut body: Option>, - ) -> Result<(BlockImportParams, Option)>>), String> { + ) -> BlockVerificationResult { trace!( target: "babe", "Verifying origin: {:?} header: {:?} justification(s): {:?} body: {:?}", @@ -1158,7 +1172,7 @@ where &header, &verified_info.author, &origin, - ) { + ).await { warn!(target: "babe", "Error checking/reporting BABE equivocation: {:?}", err); } diff --git a/client/consensus/common/Cargo.toml b/client/consensus/common/Cargo.toml index 5762b9c998b67..32babb02c2bf0 100644 --- a/client/consensus/common/Cargo.toml +++ b/client/consensus/common/Cargo.toml @@ -13,6 +13,7 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] +async-trait = "0.1" sc-client-api = { version = "3.0.0", path = "../../api" } sp-blockchain = { version = "3.0.0", path = "../../../primitives/blockchain" } sp-runtime = { version = "3.0.0", path = "../../../primitives/runtime" } diff --git a/client/consensus/common/src/longest_chain.rs b/client/consensus/common/src/longest_chain.rs index 8cf32a1dbd3c1..e1fbb600fa44f 100644 --- a/client/consensus/common/src/longest_chain.rs +++ b/client/consensus/common/src/longest_chain.rs @@ -46,15 +46,15 @@ impl Clone for LongestChain { } impl LongestChain - where - B: backend::Backend, - Block: BlockT, +where + B: backend::Backend, + Block: BlockT, { /// Instantiate a new LongestChain for Backend B pub fn new(backend: Arc) -> Self { LongestChain { backend, - _phantom: Default::default() + _phantom: Default::default(), } } @@ -75,30 +75,30 @@ impl LongestChain } } +#[async_trait::async_trait] impl SelectChain for LongestChain - where - B: backend::Backend, - Block: BlockT, +where + B: backend::Backend, + Block: BlockT, { - - fn leaves(&self) -> Result::Hash>, ConsensusError> { - LongestChain::leaves(self) - .map_err(|e| ConsensusError::ChainLookup(e.to_string()).into()) + async fn leaves(&self) -> Result::Hash>, ConsensusError> { + LongestChain::leaves(self).map_err(|e| ConsensusError::ChainLookup(e.to_string()).into()) } - fn best_chain(&self) -> Result<::Header, ConsensusError> - { + async fn best_chain(&self) -> Result<::Header, ConsensusError> { LongestChain::best_block_header(&self) .map_err(|e| ConsensusError::ChainLookup(e.to_string()).into()) } - fn finality_target( + async fn finality_target( &self, target_hash: Block::Hash, - maybe_max_number: Option> + maybe_max_number: Option>, ) -> Result, ConsensusError> { let import_lock = self.backend.get_import_lock(); - self.backend.blockchain().best_containing(target_hash, maybe_max_number, import_lock) + self.backend + .blockchain() + .best_containing(target_hash, maybe_max_number, import_lock) .map_err(|e| ConsensusError::ChainLookup(e.to_string()).into()) } } diff --git a/client/consensus/manual-seal/src/seal_block.rs b/client/consensus/manual-seal/src/seal_block.rs index 4aecfc213ab45..6ddd2cb05d498 100644 --- a/client/consensus/manual-seal/src/seal_block.rs +++ b/client/consensus/manual-seal/src/seal_block.rs @@ -80,45 +80,47 @@ pub async fn seal_block( create_inherent_data_providers, consensus_data_provider: digest_provider, mut sender, - }: SealBlockParams<'_, B, BI, SC, C, E, P, CIDP> -) - where - B: BlockT, - BI: BlockImport> - + Send + Sync + 'static, - C: HeaderBackend + ProvideRuntimeApi, - E: Environment, - E::Proposer: Proposer>, - P: txpool::ChainApi, - SC: SelectChain, - TransactionFor: 'static, - CIDP: CreateInherentDataProviders, + }: SealBlockParams<'_, B, BI, SC, C, E, P, CIDP>, +) where + B: BlockT, + BI: BlockImport> + + Send + + Sync + + 'static, + C: HeaderBackend + ProvideRuntimeApi, + E: Environment, + E::Proposer: Proposer>, + P: txpool::ChainApi, + SC: SelectChain, + TransactionFor: 'static, + CIDP: CreateInherentDataProviders, { let future = async { if pool.validated_pool().status().ready == 0 && !create_empty { - return Err(Error::EmptyTransactionPool) + return Err(Error::EmptyTransactionPool); } // get the header to build this new block on. // use the parent_hash supplied via `EngineCommand` // or fetch the best_block. let parent = match parent_hash { - Some(hash) => { - client.header(BlockId::Hash(hash))?.ok_or_else(|| Error::BlockNotFound(format!("{}", hash)))? - } - None => select_chain.best_chain()? + Some(hash) => client + .header(BlockId::Hash(hash))? + .ok_or_else(|| Error::BlockNotFound(format!("{}", hash)))?, + None => select_chain.best_chain().await?, }; - let inherent_data_providers = - create_inherent_data_providers - .create_inherent_data_providers(parent.hash(), ()) - .await - .map_err(|e| Error::Other(e))?; + let inherent_data_providers = create_inherent_data_providers + .create_inherent_data_providers(parent.hash(), ()) + .await + .map_err(|e| Error::Other(e))?; let inherent_data = inherent_data_providers.create_inherent_data()?; - let proposer = env.init(&parent) - .map_err(|err| Error::StringError(format!("{:?}", err))).await?; + let proposer = env + .init(&parent) + .map_err(|err| Error::StringError(format!("{:?}", err))) + .await?; let inherents_len = inherent_data.len(); let digest = if let Some(digest_provider) = digest_provider { diff --git a/client/consensus/pow/src/lib.rs b/client/consensus/pow/src/lib.rs index 6688c14b6375d..e71726564ebe5 100644 --- a/client/consensus/pow/src/lib.rs +++ b/client/consensus/pow/src/lib.rs @@ -341,7 +341,10 @@ where mut block: BlockImportParams, new_cache: HashMap>, ) -> Result { - let best_header = self.select_chain.best_chain() + let best_header = self + .select_chain + .best_chain() + .await .map_err(|e| format!("Fetch best chain failed via select chain: {:?}", e))?; let best_hash = best_header.hash(); @@ -543,7 +546,8 @@ pub fn start_mining_worker( ) -> ( Arc>::Proof>>>, impl Future, -) where +) +where Block: BlockT, C: ProvideRuntimeApi + BlockchainEvents + 'static, S: SelectChain + 'static, @@ -578,7 +582,7 @@ pub fn start_mining_worker( return; } - let best_header = match select_chain.best_chain() { + let best_header = match select_chain.best_chain().await { Ok(x) => x, Err(err) => { warn!( @@ -588,7 +592,7 @@ pub fn start_mining_worker( err ); return; - }, + } }; let best_hash = best_header.hash(); diff --git a/client/consensus/slots/src/slots.rs b/client/consensus/slots/src/slots.rs index 665f7c58ba94b..1e6dadcdf5cf5 100644 --- a/client/consensus/slots/src/slots.rs +++ b/client/consensus/slots/src/slots.rs @@ -151,7 +151,7 @@ where let ends_at = Instant::now() + ends_in; - let chain_head = match self.client.best_chain() { + let chain_head = match self.client.best_chain().await { Ok(x) => x, Err(e) => { log::warn!( diff --git a/client/finality-grandpa/src/environment.rs b/client/finality-grandpa/src/environment.rs index 62d9a4a8bb9ef..77c7ccda7daf6 100644 --- a/client/finality-grandpa/src/environment.rs +++ b/client/finality-grandpa/src/environment.rs @@ -34,12 +34,12 @@ use parking_lot::RwLock; use prometheus_endpoint::{register, Counter, Gauge, PrometheusError, U64}; use sc_client_api::{ - backend::{apply_aux, Backend}, + backend::{apply_aux, Backend as BackendT}, utils::is_descendent_of, }; use sc_telemetry::{telemetry, TelemetryHandle, CONSENSUS_DEBUG, CONSENSUS_INFO}; use sp_blockchain::HeaderMetadata; -use sp_consensus::SelectChain; +use sp_consensus::SelectChain as SelectChainT; use sp_finality_grandpa::{ AuthorityId, AuthoritySignature, Equivocation, EquivocationProof, GrandpaApi, RoundNumber, SetId, GRANDPA_ENGINE_ID, @@ -54,7 +54,7 @@ use crate::{ local_authority_id, notification::GrandpaJustificationSender, until_imported::UntilVoteTargetImported, - voting_rule::VotingRule, + voting_rule::VotingRule as VotingRuleT, ClientForGrandpa, CommandOrError, Commit, Config, Error, NewAuthoritySet, Precommit, Prevote, PrimaryPropose, SignedMessage, VoterCommand, }; @@ -478,11 +478,11 @@ impl, SC, VR> Environment Environment where Block: BlockT, - BE: Backend, + BE: BackendT, C: ClientForGrandpa, C::Api: GrandpaApi, N: NetworkT, - SC: SelectChain, + SC: SelectChainT, { /// Report the given equivocation to the GRANDPA runtime module. This method /// generates a session membership proof of the offender and then submits an @@ -503,9 +503,12 @@ where let is_descendent_of = is_descendent_of(&*self.client, None); - let best_header = self.select_chain - .best_chain() - .map_err(|e| Error::Blockchain(e.to_string()))?; + // TODO: add proper async support here + let best_header = futures::executor::block_on( + self.select_chain + .best_chain() + .map_err(|e| Error::Blockchain(e.to_string())), + )?; let authority_set = self.authority_set.inner(); @@ -581,11 +584,11 @@ impl finality_grandpa::Chain where Block: BlockT, - BE: Backend, + BE: BackendT, C: ClientForGrandpa, N: NetworkT, - SC: SelectChain, - VR: VotingRule, + SC: SelectChainT, + VR: VotingRuleT, NumberFor: BlockNumberOps, { fn ancestry( @@ -637,12 +640,12 @@ impl voter::Environment> for Environment where Block: BlockT, - B: Backend, + B: BackendT, C: ClientForGrandpa + 'static, C::Api: GrandpaApi, N: NetworkT, - SC: SelectChain, - VR: VotingRule, + SC: SelectChainT + 'static, + VR: VotingRuleT + Clone + 'static, NumberFor: BlockNumberOps, { type Timer = Pin> + Send>>; @@ -684,116 +687,25 @@ where type Error = CommandOrError>; fn best_chain_containing(&self, block: Block::Hash) -> Self::BestChain { - let find_best_chain = || { + let client = self.client.clone(); + let authority_set = self.authority_set.clone(); + let select_chain = self.select_chain.clone(); + let voting_rule = self.voting_rule.clone(); + let set_id = self.set_id; + + Box::pin(async move { // NOTE: when we finalize an authority set change through the sync protocol the voter is // signaled asynchronously. therefore the voter could still vote in the next round - // before activating the new set. the `authority_set` is updated immediately thus we - // restrict the voter based on that. - if self.set_id != self.authority_set.set_id() { - return None; - } - - let base_header = match self.client.header(BlockId::Hash(block)).ok()? { - Some(h) => h, - None => { - debug!(target: "afg", "Encountered error finding best chain containing {:?}: couldn't find base block", block); - return None; - } - }; - - // we refuse to vote beyond the current limit number where transitions are scheduled to - // occur. - // once blocks are finalized that make that transition irrelevant or activate it, - // we will proceed onwards. most of the time there will be no pending transition. - // the limit, if any, is guaranteed to be higher than or equal to the given base number. - let limit = self.authority_set.current_limit(*base_header.number()); - debug!(target: "afg", "Finding best chain containing block {:?} with number limit {:?}", block, limit); - - match self.select_chain.finality_target(block, None) { - Ok(Some(best_hash)) => { - let best_header = self - .client - .header(BlockId::Hash(best_hash)) - .ok()? - .expect("Header known to exist after `finality_target` call; qed"); - - // check if our vote is currently being limited due to a pending change - let limit = limit.filter(|limit| limit < best_header.number()); - - if let Some(target_number) = limit { - let mut target_header = best_header.clone(); - - // walk backwards until we find the target block - loop { - if *target_header.number() < target_number { - unreachable!( - "we are traversing backwards from a known block; \ - blocks are stored contiguously; \ - qed" - ); - } - - if *target_header.number() == target_number { - break; - } - - target_header = self - .client - .header(BlockId::Hash(*target_header.parent_hash())) - .ok()? - .expect("Header known to exist after `finality_target` call; qed"); - } - - Some((base_header, best_header, target_header)) - } else { - // otherwise just use the given best as the target - Some((base_header, best_header.clone(), best_header)) - } - } - Ok(None) => { - debug!(target: "afg", "Encountered error finding best chain containing {:?}: couldn't find target block", block); - None - } - Err(e) => { - debug!(target: "afg", "Encountered error finding best chain containing {:?}: {:?}", block, e); - None - } + // before activating the new set. the `authority_set` is updated immediately thus + // we restrict the voter based on that. + if set_id != authority_set.set_id() { + return Ok(None); } - }; - - if let Some((base_header, best_header, target_header)) = find_best_chain() { - // restrict vote according to the given voting rule, if the - // voting rule doesn't restrict the vote then we keep the - // previous target. - // - // note that we pass the original `best_header`, i.e. before the - // authority set limit filter, which can be considered a - // mandatory/implicit voting rule. - // - // we also make sure that the restricted vote is higher than the - // round base (i.e. last finalized), otherwise the value - // returned by the given voting rule is ignored and the original - // target is used instead. - let rule_fut = self.voting_rule.restrict_vote( - self.client.clone(), - &base_header, - &best_header, - &target_header, - ); - Box::pin(async move { - Ok(rule_fut - .await - .filter(|(_, restricted_number)| { - // we can only restrict votes within the interval [base, target] - restricted_number >= base_header.number() - && restricted_number < target_header.number() - }) - .or_else(|| Some((target_header.hash(), *target_header.number())))) - }) - } else { - Box::pin(future::ok(None)) - } + best_chain_containing(block, client, authority_set, select_chain, voting_rule) + .await + .map_err(|e| e.into()) + }) } fn round_data( @@ -1227,6 +1139,111 @@ impl From> for JustificationOrCommit< } } +async fn best_chain_containing( + block: Block::Hash, + client: Arc, + authority_set: SharedAuthoritySet>, + select_chain: SelectChain, + voting_rule: VotingRule, +) -> Result)>, Error> +where + Backend: BackendT, + Block: BlockT, + Client: ClientForGrandpa, + SelectChain: SelectChainT + 'static, + VotingRule: VotingRuleT, +{ + let base_header = match client.header(BlockId::Hash(block))? { + Some(h) => h, + None => { + debug!(target: "afg", + "Encountered error finding best chain containing {:?}: couldn't find base block", + block, + ); + + return Ok(None); + } + }; + + // we refuse to vote beyond the current limit number where transitions are scheduled to occur. + // once blocks are finalized that make that transition irrelevant or activate it, we will + // proceed onwards. most of the time there will be no pending transition. the limit, if any, is + // guaranteed to be higher than or equal to the given base number. + let limit = authority_set.current_limit(*base_header.number()); + debug!(target: "afg", "Finding best chain containing block {:?} with number limit {:?}", block, limit); + + let result = match select_chain.finality_target(block, None).await { + Ok(Some(best_hash)) => { + let best_header = client + .header(BlockId::Hash(best_hash))? + .expect("Header known to exist after `finality_target` call; qed"); + + // check if our vote is currently being limited due to a pending change + let limit = limit.filter(|limit| limit < best_header.number()); + + let (base_header, best_header, target_header) = if let Some(target_number) = limit { + let mut target_header = best_header.clone(); + + // walk backwards until we find the target block + loop { + if *target_header.number() < target_number { + unreachable!( + "we are traversing backwards from a known block; \ + blocks are stored contiguously; \ + qed" + ); + } + + if *target_header.number() == target_number { + break; + } + + target_header = client + .header(BlockId::Hash(*target_header.parent_hash()))? + .expect("Header known to exist after `finality_target` call; qed"); + } + + (base_header, best_header, target_header) + } else { + // otherwise just use the given best as the target + (base_header, best_header.clone(), best_header) + }; + + // restrict vote according to the given voting rule, if the + // voting rule doesn't restrict the vote then we keep the + // previous target. + // + // note that we pass the original `best_header`, i.e. before the + // authority set limit filter, which can be considered a + // mandatory/implicit voting rule. + // + // we also make sure that the restricted vote is higher than the + // round base (i.e. last finalized), otherwise the value + // returned by the given voting rule is ignored and the original + // target is used instead. + voting_rule + .restrict_vote(client.clone(), &base_header, &best_header, &target_header) + .await + .filter(|(_, restricted_number)| { + // we can only restrict votes within the interval [base, target] + restricted_number >= base_header.number() && + restricted_number < target_header.number() + }) + .or_else(|| Some((target_header.hash(), *target_header.number()))) + } + Ok(None) => { + debug!(target: "afg", "Encountered error finding best chain containing {:?}: couldn't find target block", block); + None + } + Err(e) => { + debug!(target: "afg", "Encountered error finding best chain containing {:?}: {:?}", block, e); + None + } + }; + + Ok(result) +} + /// Finalize the given block and apply any authority set changes. If an /// authority set change is enacted then a justification is created (if not /// given) and stored with the block when finalizing it. @@ -1244,7 +1261,7 @@ pub(crate) fn finalize_block( ) -> Result<(), CommandOrError>> where Block: BlockT, - BE: Backend, + BE: BackendT, Client: ClientForGrandpa, { // NOTE: lock must be held through writing to DB to avoid race. this lock diff --git a/client/finality-grandpa/src/import.rs b/client/finality-grandpa/src/import.rs index de02ea357cac4..481f38b617eaf 100644 --- a/client/finality-grandpa/src/import.rs +++ b/client/finality-grandpa/src/import.rs @@ -81,6 +81,7 @@ impl Clone } } +#[async_trait::async_trait] impl JustificationImport for GrandpaBlockImport where @@ -92,22 +93,30 @@ where { type Error = ConsensusError; - fn on_start(&mut self) -> Vec<(Block::Hash, NumberFor)> { + async fn on_start(&mut self) -> Vec<(Block::Hash, NumberFor)> { let mut out = Vec::new(); let chain_info = self.inner.info(); // request justifications for all pending changes for which change blocks have already been imported - let authorities = self.authority_set.inner(); - for pending_change in authorities.pending_changes() { + let pending_changes: Vec<_> = self + .authority_set + .inner() + .pending_changes() + .cloned() + .collect(); + + for pending_change in pending_changes { if pending_change.delay_kind == DelayKind::Finalized && pending_change.effective_number() > chain_info.finalized_number && pending_change.effective_number() <= chain_info.best_number { let effective_block_hash = if !pending_change.delay.is_zero() { - self.select_chain.finality_target( - pending_change.canon_hash, - Some(pending_change.effective_number()), - ) + self.select_chain + .finality_target( + pending_change.canon_hash, + Some(pending_change.effective_number()), + ) + .await } else { Ok(Some(pending_change.canon_hash)) }; @@ -125,7 +134,7 @@ where out } - fn import_justification( + async fn import_justification( &mut self, hash: Block::Hash, number: NumberFor, diff --git a/client/network/test/src/lib.rs b/client/network/test/src/lib.rs index 8e56005dad25d..f55444f8cf121 100644 --- a/client/network/test/src/lib.rs +++ b/client/network/test/src/lib.rs @@ -1075,10 +1075,15 @@ impl TestNetFactory for TestNet { pub struct ForceFinalized(PeersClient); +#[async_trait::async_trait] impl JustificationImport for ForceFinalized { type Error = ConsensusError; - fn import_justification( + async fn on_start(&mut self) -> Vec<(H256, NumberFor)> { + Vec::new() + } + + async fn import_justification( &mut self, hash: H256, _number: NumberFor, diff --git a/client/service/src/lib.rs b/client/service/src/lib.rs index afc1209280322..c8ac03ee0e368 100644 --- a/client/service/src/lib.rs +++ b/client/service/src/lib.rs @@ -561,13 +561,14 @@ mod tests { client.clone(), ); let source = sp_runtime::transaction_validity::TransactionSource::External; - let best = longest_chain.best_chain().unwrap(); + let best = block_on(longest_chain.best_chain()).unwrap(); let transaction = Transfer { amount: 5, nonce: 0, from: AccountKeyring::Alice.into(), to: Default::default(), - }.into_signed_tx(); + } + .into_signed_tx(); block_on(pool.submit_one( &BlockId::hash(best.hash()), source, transaction.clone()), ).unwrap(); diff --git a/client/service/test/src/client/mod.rs b/client/service/test/src/client/mod.rs index 3852ab2d61b5f..bf4105377f9c1 100644 --- a/client/service/test/src/client/mod.rs +++ b/client/service/test/src/client/mod.rs @@ -337,7 +337,6 @@ fn construct_genesis_with_bad_transaction_should_panic() { assert!(r.is_err()); } - #[test] fn client_initializes_from_genesis_ok() { let client = substrate_test_runtime_client::new(); @@ -450,7 +449,9 @@ fn best_containing_with_genesis_block() { assert_eq!( genesis_hash.clone(), - longest_chain_select.finality_target(genesis_hash.clone(), None).unwrap().unwrap() + block_on(longest_chain_select.finality_target(genesis_hash.clone(), None)) + .unwrap() + .unwrap(), ); } @@ -461,11 +462,17 @@ fn best_containing_with_hash_not_found() { let (client, longest_chain_select) = TestClientBuilder::new().build_with_longest_chain(); - let uninserted_block = client.new_block(Default::default()).unwrap().build().unwrap().block; + let uninserted_block = client + .new_block(Default::default()) + .unwrap() + .build() + .unwrap() + .block; assert_eq!( None, - longest_chain_select.finality_target(uninserted_block.hash().clone(), None).unwrap() + block_on(longest_chain_select.finality_target(uninserted_block.hash().clone(), None)) + .unwrap(), ); } @@ -624,18 +631,43 @@ fn best_containing_on_longest_chain_with_single_chain_3_blocks() { let (mut client, longest_chain_select) = TestClientBuilder::new().build_with_longest_chain(); // G -> A1 - let a1 = client.new_block(Default::default()).unwrap().build().unwrap().block; + let a1 = client + .new_block(Default::default()) + .unwrap() + .build() + .unwrap() + .block; block_on(client.import(BlockOrigin::Own, a1.clone())).unwrap(); // A1 -> A2 - let a2 = client.new_block(Default::default()).unwrap().build().unwrap().block; + let a2 = client + .new_block(Default::default()) + .unwrap() + .build() + .unwrap() + .block; block_on(client.import(BlockOrigin::Own, a2.clone())).unwrap(); let genesis_hash = client.chain_info().genesis_hash; - assert_eq!(a2.hash(), longest_chain_select.finality_target(genesis_hash, None).unwrap().unwrap()); - assert_eq!(a2.hash(), longest_chain_select.finality_target(a1.hash(), None).unwrap().unwrap()); - assert_eq!(a2.hash(), longest_chain_select.finality_target(a2.hash(), None).unwrap().unwrap()); + assert_eq!( + a2.hash(), + block_on(longest_chain_select.finality_target(genesis_hash, None)) + .unwrap() + .unwrap() + ); + assert_eq!( + a2.hash(), + block_on(longest_chain_select.finality_target(a1.hash(), None)) + .unwrap() + .unwrap() + ); + assert_eq!( + a2.hash(), + block_on(longest_chain_select.finality_target(a2.hash(), None)) + .unwrap() + .unwrap() + ); } #[test] @@ -715,19 +747,19 @@ fn best_containing_on_longest_chain_with_multiple_forks() { ).unwrap().build().unwrap().block; block_on(client.import(BlockOrigin::Own, b4.clone())).unwrap(); - // // B2 -> C3 - let mut builder = client.new_block_at( - &BlockId::Hash(b2.hash()), - Default::default(), - false, - ).unwrap(); + // B2 -> C3 + let mut builder = client + .new_block_at(&BlockId::Hash(b2.hash()), Default::default(), false) + .unwrap(); // this push is required as otherwise C3 has the same hash as B3 and won't get imported - builder.push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), - amount: 1, - nonce: 1, - }).unwrap(); + builder + .push_transfer(Transfer { + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), + amount: 1, + nonce: 1, + }) + .unwrap(); let c3 = builder.build().unwrap().block; block_on(client.import(BlockOrigin::Own, c3.clone())).unwrap(); @@ -750,7 +782,7 @@ fn best_containing_on_longest_chain_with_multiple_forks() { assert_eq!(client.chain_info().best_hash, a5.hash()); let genesis_hash = client.chain_info().genesis_hash; - let leaves = longest_chain_select.leaves().unwrap(); + let leaves = block_on(longest_chain_select.leaves()).unwrap(); assert!(leaves.contains(&a5.hash())); assert!(leaves.contains(&b4.hash())); @@ -759,208 +791,422 @@ fn best_containing_on_longest_chain_with_multiple_forks() { assert_eq!(leaves.len(), 4); // search without restriction - - assert_eq!(a5.hash(), longest_chain_select.finality_target( - genesis_hash, None).unwrap().unwrap()); - assert_eq!(a5.hash(), longest_chain_select.finality_target( - a1.hash(), None).unwrap().unwrap()); - assert_eq!(a5.hash(), longest_chain_select.finality_target( - a2.hash(), None).unwrap().unwrap()); - assert_eq!(a5.hash(), longest_chain_select.finality_target( - a3.hash(), None).unwrap().unwrap()); - assert_eq!(a5.hash(), longest_chain_select.finality_target( - a4.hash(), None).unwrap().unwrap()); - assert_eq!(a5.hash(), longest_chain_select.finality_target( - a5.hash(), None).unwrap().unwrap()); - - assert_eq!(b4.hash(), longest_chain_select.finality_target( - b2.hash(), None).unwrap().unwrap()); - assert_eq!(b4.hash(), longest_chain_select.finality_target( - b3.hash(), None).unwrap().unwrap()); - assert_eq!(b4.hash(), longest_chain_select.finality_target( - b4.hash(), None).unwrap().unwrap()); - - assert_eq!(c3.hash(), longest_chain_select.finality_target( - c3.hash(), None).unwrap().unwrap()); - - assert_eq!(d2.hash(), longest_chain_select.finality_target( - d2.hash(), None).unwrap().unwrap()); - + assert_eq!( + a5.hash(), + block_on(longest_chain_select.finality_target(genesis_hash, None)) + .unwrap() + .unwrap() + ); + assert_eq!( + a5.hash(), + block_on(longest_chain_select.finality_target(a1.hash(), None)) + .unwrap() + .unwrap() + ); + assert_eq!( + a5.hash(), + block_on(longest_chain_select.finality_target(a2.hash(), None)) + .unwrap() + .unwrap() + ); + assert_eq!( + a5.hash(), + block_on(longest_chain_select.finality_target(a3.hash(), None)) + .unwrap() + .unwrap() + ); + assert_eq!( + a5.hash(), + block_on(longest_chain_select.finality_target(a4.hash(), None)) + .unwrap() + .unwrap() + ); + assert_eq!( + a5.hash(), + block_on(longest_chain_select.finality_target(a5.hash(), None)) + .unwrap() + .unwrap() + ); + assert_eq!( + b4.hash(), + block_on(longest_chain_select.finality_target(b2.hash(), None)) + .unwrap() + .unwrap() + ); + assert_eq!( + b4.hash(), + block_on(longest_chain_select.finality_target(b3.hash(), None)) + .unwrap() + .unwrap() + ); + assert_eq!( + b4.hash(), + block_on(longest_chain_select.finality_target(b4.hash(), None)) + .unwrap() + .unwrap() + ); + assert_eq!( + c3.hash(), + block_on(longest_chain_select.finality_target(c3.hash(), None)) + .unwrap() + .unwrap() + ); + assert_eq!( + d2.hash(), + block_on(longest_chain_select.finality_target(d2.hash(), None)) + .unwrap() + .unwrap() + ); // search only blocks with number <= 5. equivalent to without restriction for this scenario - - assert_eq!(a5.hash(), longest_chain_select.finality_target( - genesis_hash, Some(5)).unwrap().unwrap()); - assert_eq!(a5.hash(), longest_chain_select.finality_target( - a1.hash(), Some(5)).unwrap().unwrap()); - assert_eq!(a5.hash(), longest_chain_select.finality_target( - a2.hash(), Some(5)).unwrap().unwrap()); - assert_eq!(a5.hash(), longest_chain_select.finality_target( - a3.hash(), Some(5)).unwrap().unwrap()); - assert_eq!(a5.hash(), longest_chain_select.finality_target( - a4.hash(), Some(5)).unwrap().unwrap()); - assert_eq!(a5.hash(), longest_chain_select.finality_target( - a5.hash(), Some(5)).unwrap().unwrap()); - - assert_eq!(b4.hash(), longest_chain_select.finality_target( - b2.hash(), Some(5)).unwrap().unwrap()); - assert_eq!(b4.hash(), longest_chain_select.finality_target( - b3.hash(), Some(5)).unwrap().unwrap()); - assert_eq!(b4.hash(), longest_chain_select.finality_target( - b4.hash(), Some(5)).unwrap().unwrap()); - - assert_eq!(c3.hash(), longest_chain_select.finality_target( - c3.hash(), Some(5)).unwrap().unwrap()); - - assert_eq!(d2.hash(), longest_chain_select.finality_target( - d2.hash(), Some(5)).unwrap().unwrap()); - + assert_eq!( + a5.hash(), + block_on(longest_chain_select.finality_target(genesis_hash, Some(5))) + .unwrap() + .unwrap() + ); + assert_eq!( + a5.hash(), + block_on(longest_chain_select.finality_target(a1.hash(), Some(5))) + .unwrap() + .unwrap() + ); + assert_eq!( + a5.hash(), + block_on(longest_chain_select.finality_target(a2.hash(), Some(5))) + .unwrap() + .unwrap() + ); + assert_eq!( + a5.hash(), + block_on(longest_chain_select.finality_target(a3.hash(), Some(5))) + .unwrap() + .unwrap() + ); + assert_eq!( + a5.hash(), + block_on(longest_chain_select.finality_target(a4.hash(), Some(5))) + .unwrap() + .unwrap() + ); + assert_eq!( + a5.hash(), + block_on(longest_chain_select.finality_target(a5.hash(), Some(5))) + .unwrap() + .unwrap() + ); + assert_eq!( + b4.hash(), + block_on(longest_chain_select.finality_target(b2.hash(), Some(5))) + .unwrap() + .unwrap() + ); + assert_eq!( + b4.hash(), + block_on(longest_chain_select.finality_target(b3.hash(), Some(5))) + .unwrap() + .unwrap() + ); + assert_eq!( + b4.hash(), + block_on(longest_chain_select.finality_target(b4.hash(), Some(5))) + .unwrap() + .unwrap() + ); + assert_eq!( + c3.hash(), + block_on(longest_chain_select.finality_target(c3.hash(), Some(5))) + .unwrap() + .unwrap() + ); + assert_eq!( + d2.hash(), + block_on(longest_chain_select.finality_target(d2.hash(), Some(5))) + .unwrap() + .unwrap() + ); // search only blocks with number <= 4 - - assert_eq!(a4.hash(), longest_chain_select.finality_target( - genesis_hash, Some(4)).unwrap().unwrap()); - assert_eq!(a4.hash(), longest_chain_select.finality_target( - a1.hash(), Some(4)).unwrap().unwrap()); - assert_eq!(a4.hash(), longest_chain_select.finality_target( - a2.hash(), Some(4)).unwrap().unwrap()); - assert_eq!(a4.hash(), longest_chain_select.finality_target( - a3.hash(), Some(4)).unwrap().unwrap()); - assert_eq!(a4.hash(), longest_chain_select.finality_target( - a4.hash(), Some(4)).unwrap().unwrap()); - assert_eq!(None, longest_chain_select.finality_target( - a5.hash(), Some(4)).unwrap()); - - assert_eq!(b4.hash(), longest_chain_select.finality_target( - b2.hash(), Some(4)).unwrap().unwrap()); - assert_eq!(b4.hash(), longest_chain_select.finality_target( - b3.hash(), Some(4)).unwrap().unwrap()); - assert_eq!(b4.hash(), longest_chain_select.finality_target( - b4.hash(), Some(4)).unwrap().unwrap()); - - assert_eq!(c3.hash(), longest_chain_select.finality_target( - c3.hash(), Some(4)).unwrap().unwrap()); - - assert_eq!(d2.hash(), longest_chain_select.finality_target( - d2.hash(), Some(4)).unwrap().unwrap()); - + assert_eq!( + a4.hash(), + block_on(longest_chain_select.finality_target(genesis_hash, Some(4))) + .unwrap() + .unwrap() + ); + assert_eq!( + a4.hash(), + block_on(longest_chain_select.finality_target(a1.hash(), Some(4))) + .unwrap() + .unwrap() + ); + assert_eq!( + a4.hash(), + block_on(longest_chain_select.finality_target(a2.hash(), Some(4))) + .unwrap() + .unwrap() + ); + assert_eq!( + a4.hash(), + block_on(longest_chain_select.finality_target(a3.hash(), Some(4))) + .unwrap() + .unwrap() + ); + assert_eq!( + a4.hash(), + block_on(longest_chain_select.finality_target(a4.hash(), Some(4))) + .unwrap() + .unwrap() + ); + assert_eq!( + None, + block_on(longest_chain_select.finality_target(a5.hash(), Some(4))).unwrap() + ); + assert_eq!( + b4.hash(), + block_on(longest_chain_select.finality_target(b2.hash(), Some(4))) + .unwrap() + .unwrap() + ); + assert_eq!( + b4.hash(), + block_on(longest_chain_select.finality_target(b3.hash(), Some(4))) + .unwrap() + .unwrap() + ); + assert_eq!( + b4.hash(), + block_on(longest_chain_select.finality_target(b4.hash(), Some(4))) + .unwrap() + .unwrap() + ); + assert_eq!( + c3.hash(), + block_on(longest_chain_select.finality_target(c3.hash(), Some(4))) + .unwrap() + .unwrap() + ); + assert_eq!( + d2.hash(), + block_on(longest_chain_select.finality_target(d2.hash(), Some(4))) + .unwrap() + .unwrap() + ); // search only blocks with number <= 3 - - assert_eq!(a3.hash(), longest_chain_select.finality_target( - genesis_hash, Some(3)).unwrap().unwrap()); - assert_eq!(a3.hash(), longest_chain_select.finality_target( - a1.hash(), Some(3)).unwrap().unwrap()); - assert_eq!(a3.hash(), longest_chain_select.finality_target( - a2.hash(), Some(3)).unwrap().unwrap()); - assert_eq!(a3.hash(), longest_chain_select.finality_target( - a3.hash(), Some(3)).unwrap().unwrap()); - assert_eq!(None, longest_chain_select.finality_target( - a4.hash(), Some(3)).unwrap()); - assert_eq!(None, longest_chain_select.finality_target( - a5.hash(), Some(3)).unwrap()); - - assert_eq!(b3.hash(), longest_chain_select.finality_target( - b2.hash(), Some(3)).unwrap().unwrap()); - assert_eq!(b3.hash(), longest_chain_select.finality_target( - b3.hash(), Some(3)).unwrap().unwrap()); - assert_eq!(None, longest_chain_select.finality_target( - b4.hash(), Some(3)).unwrap()); - - assert_eq!(c3.hash(), longest_chain_select.finality_target( - c3.hash(), Some(3)).unwrap().unwrap()); - - assert_eq!(d2.hash(), longest_chain_select.finality_target( - d2.hash(), Some(3)).unwrap().unwrap()); - + assert_eq!( + a3.hash(), + block_on(longest_chain_select.finality_target(genesis_hash, Some(3))) + .unwrap() + .unwrap() + ); + assert_eq!( + a3.hash(), + block_on(longest_chain_select.finality_target(a1.hash(), Some(3))) + .unwrap() + .unwrap() + ); + assert_eq!( + a3.hash(), + block_on(longest_chain_select.finality_target(a2.hash(), Some(3))) + .unwrap() + .unwrap() + ); + assert_eq!( + a3.hash(), + block_on(longest_chain_select.finality_target(a3.hash(), Some(3))) + .unwrap() + .unwrap() + ); + assert_eq!( + None, + block_on(longest_chain_select.finality_target(a4.hash(), Some(3))).unwrap() + ); + assert_eq!( + None, + block_on(longest_chain_select.finality_target(a5.hash(), Some(3))).unwrap() + ); + assert_eq!( + b3.hash(), + block_on(longest_chain_select.finality_target(b2.hash(), Some(3))) + .unwrap() + .unwrap() + ); + assert_eq!( + b3.hash(), + block_on(longest_chain_select.finality_target(b3.hash(), Some(3))) + .unwrap() + .unwrap() + ); + assert_eq!( + None, + block_on(longest_chain_select.finality_target(b4.hash(), Some(3))).unwrap() + ); + assert_eq!( + c3.hash(), + block_on(longest_chain_select.finality_target(c3.hash(), Some(3))) + .unwrap() + .unwrap() + ); + assert_eq!( + d2.hash(), + block_on(longest_chain_select.finality_target(d2.hash(), Some(3))) + .unwrap() + .unwrap() + ); // search only blocks with number <= 2 - - assert_eq!(a2.hash(), longest_chain_select.finality_target( - genesis_hash, Some(2)).unwrap().unwrap()); - assert_eq!(a2.hash(), longest_chain_select.finality_target( - a1.hash(), Some(2)).unwrap().unwrap()); - assert_eq!(a2.hash(), longest_chain_select.finality_target( - a2.hash(), Some(2)).unwrap().unwrap()); - assert_eq!(None, longest_chain_select.finality_target( - a3.hash(), Some(2)).unwrap()); - assert_eq!(None, longest_chain_select.finality_target( - a4.hash(), Some(2)).unwrap()); - assert_eq!(None, longest_chain_select.finality_target( - a5.hash(), Some(2)).unwrap()); - - assert_eq!(b2.hash(), longest_chain_select.finality_target( - b2.hash(), Some(2)).unwrap().unwrap()); - assert_eq!(None, longest_chain_select.finality_target( - b3.hash(), Some(2)).unwrap()); - assert_eq!(None, longest_chain_select.finality_target( - b4.hash(), Some(2)).unwrap()); - - assert_eq!(None, longest_chain_select.finality_target( - c3.hash(), Some(2)).unwrap()); - - assert_eq!(d2.hash(), longest_chain_select.finality_target( - d2.hash(), Some(2)).unwrap().unwrap()); - + assert_eq!( + a2.hash(), + block_on(longest_chain_select.finality_target(genesis_hash, Some(2))) + .unwrap() + .unwrap() + ); + assert_eq!( + a2.hash(), + block_on(longest_chain_select.finality_target(a1.hash(), Some(2))) + .unwrap() + .unwrap() + ); + assert_eq!( + a2.hash(), + block_on(longest_chain_select.finality_target(a2.hash(), Some(2))) + .unwrap() + .unwrap() + ); + assert_eq!( + None, + block_on(longest_chain_select.finality_target(a3.hash(), Some(2))).unwrap() + ); + assert_eq!( + None, + block_on(longest_chain_select.finality_target(a4.hash(), Some(2))).unwrap() + ); + assert_eq!( + None, + block_on(longest_chain_select.finality_target(a5.hash(), Some(2))).unwrap() + ); + assert_eq!( + b2.hash(), + block_on(longest_chain_select.finality_target(b2.hash(), Some(2))) + .unwrap() + .unwrap() + ); + assert_eq!( + None, + block_on(longest_chain_select.finality_target(b3.hash(), Some(2))).unwrap() + ); + assert_eq!( + None, + block_on(longest_chain_select.finality_target(b4.hash(), Some(2))).unwrap() + ); + assert_eq!( + None, + block_on(longest_chain_select.finality_target(c3.hash(), Some(2))).unwrap() + ); + assert_eq!( + d2.hash(), + block_on(longest_chain_select.finality_target(d2.hash(), Some(2))) + .unwrap() + .unwrap() + ); // search only blocks with number <= 1 + assert_eq!( + a1.hash(), + block_on(longest_chain_select.finality_target(genesis_hash, Some(1))) + .unwrap() + .unwrap() + ); + assert_eq!( + a1.hash(), + block_on(longest_chain_select.finality_target(a1.hash(), Some(1))) + .unwrap() + .unwrap() + ); + assert_eq!( + None, + block_on(longest_chain_select.finality_target(a2.hash(), Some(1))).unwrap() + ); + assert_eq!( + None, + block_on(longest_chain_select.finality_target(a3.hash(), Some(1))).unwrap() + ); + assert_eq!( + None, + block_on(longest_chain_select.finality_target(a4.hash(), Some(1))).unwrap() + ); + assert_eq!( + None, + block_on(longest_chain_select.finality_target(a5.hash(), Some(1))).unwrap() + ); - assert_eq!(a1.hash(), longest_chain_select.finality_target( - genesis_hash, Some(1)).unwrap().unwrap()); - assert_eq!(a1.hash(), longest_chain_select.finality_target( - a1.hash(), Some(1)).unwrap().unwrap()); - assert_eq!(None, longest_chain_select.finality_target( - a2.hash(), Some(1)).unwrap()); - assert_eq!(None, longest_chain_select.finality_target( - a3.hash(), Some(1)).unwrap()); - assert_eq!(None, longest_chain_select.finality_target( - a4.hash(), Some(1)).unwrap()); - assert_eq!(None, longest_chain_select.finality_target( - a5.hash(), Some(1)).unwrap()); - - assert_eq!(None, longest_chain_select.finality_target( - b2.hash(), Some(1)).unwrap()); - assert_eq!(None, longest_chain_select.finality_target( - b3.hash(), Some(1)).unwrap()); - assert_eq!(None, longest_chain_select.finality_target( - b4.hash(), Some(1)).unwrap()); - - assert_eq!(None, longest_chain_select.finality_target( - c3.hash(), Some(1)).unwrap()); - - assert_eq!(None, longest_chain_select.finality_target( - d2.hash(), Some(1)).unwrap()); + assert_eq!( + None, + block_on(longest_chain_select.finality_target(b2.hash(), Some(1))).unwrap() + ); + assert_eq!( + None, + block_on(longest_chain_select.finality_target(b3.hash(), Some(1))).unwrap() + ); + assert_eq!( + None, + block_on(longest_chain_select.finality_target(b4.hash(), Some(1))).unwrap() + ); + assert_eq!( + None, + block_on(longest_chain_select.finality_target(c3.hash(), Some(1))).unwrap() + ); + assert_eq!( + None, + block_on(longest_chain_select.finality_target(d2.hash(), Some(1))).unwrap() + ); // search only blocks with number <= 0 - - assert_eq!(genesis_hash, longest_chain_select.finality_target( - genesis_hash, Some(0)).unwrap().unwrap()); - assert_eq!(None, longest_chain_select.finality_target( - a1.hash(), Some(0)).unwrap()); - assert_eq!(None, longest_chain_select.finality_target( - a2.hash(), Some(0)).unwrap()); - assert_eq!(None, longest_chain_select.finality_target( - a3.hash(), Some(0)).unwrap()); - assert_eq!(None, longest_chain_select.finality_target( - a4.hash(), Some(0)).unwrap()); - assert_eq!(None, longest_chain_select.finality_target( - a5.hash(), Some(0)).unwrap()); - - assert_eq!(None, longest_chain_select.finality_target( - b2.hash(), Some(0)).unwrap()); - assert_eq!(None, longest_chain_select.finality_target( - b3.hash(), Some(0)).unwrap()); - assert_eq!(None, longest_chain_select.finality_target( - b4.hash(), Some(0)).unwrap()); - + assert_eq!( + genesis_hash, + block_on(longest_chain_select.finality_target(genesis_hash, Some(0))) + .unwrap() + .unwrap() + ); assert_eq!( None, - longest_chain_select.finality_target(c3.hash().clone(), Some(0)).unwrap(), + block_on(longest_chain_select.finality_target(a1.hash(), Some(0))).unwrap() + ); + assert_eq!( + None, + block_on(longest_chain_select.finality_target(a2.hash(), Some(0))).unwrap() + ); + assert_eq!( + None, + block_on(longest_chain_select.finality_target(a3.hash(), Some(0))).unwrap() + ); + assert_eq!( + None, + block_on(longest_chain_select.finality_target(a4.hash(), Some(0))).unwrap() + ); + assert_eq!( + None, + block_on(longest_chain_select.finality_target(a5.hash(), Some(0))).unwrap() + ); + assert_eq!( + None, + block_on(longest_chain_select.finality_target(b2.hash(), Some(0))).unwrap() + ); + assert_eq!( + None, + block_on(longest_chain_select.finality_target(b3.hash(), Some(0))).unwrap() + ); + assert_eq!( + None, + block_on(longest_chain_select.finality_target(b4.hash(), Some(0))).unwrap() ); - assert_eq!( None, - longest_chain_select.finality_target(d2.hash().clone(), Some(0)).unwrap(), + block_on(longest_chain_select.finality_target(c3.hash().clone(), Some(0))).unwrap(), + ); + assert_eq!( + None, + block_on(longest_chain_select.finality_target(d2.hash().clone(), Some(0))).unwrap(), ); } @@ -972,18 +1218,30 @@ fn best_containing_on_longest_chain_with_max_depth_higher_than_best() { let (mut client, longest_chain_select) = TestClientBuilder::new().build_with_longest_chain(); // G -> A1 - let a1 = client.new_block(Default::default()).unwrap().build().unwrap().block; + let a1 = client + .new_block(Default::default()) + .unwrap() + .build() + .unwrap() + .block; block_on(client.import(BlockOrigin::Own, a1.clone())).unwrap(); // A1 -> A2 - let a2 = client.new_block(Default::default()).unwrap().build().unwrap().block; + let a2 = client + .new_block(Default::default()) + .unwrap() + .build() + .unwrap() + .block; block_on(client.import(BlockOrigin::Own, a2.clone())).unwrap(); let genesis_hash = client.chain_info().genesis_hash; assert_eq!( a2.hash(), - longest_chain_select.finality_target(genesis_hash, Some(10)).unwrap().unwrap(), + block_on(longest_chain_select.finality_target(genesis_hash, Some(10))) + .unwrap() + .unwrap(), ); } @@ -1181,7 +1439,7 @@ fn finalizing_diverged_block_should_trigger_reorg() { // `SelectChain` should report B2 as best block though assert_eq!( - select_chain.best_chain().unwrap().hash(), + block_on(select_chain.best_chain()).unwrap().hash(), b2.hash(), ); diff --git a/primitives/api/test/Cargo.toml b/primitives/api/test/Cargo.toml index 5866d44bd479b..d0c45fb7545bc 100644 --- a/primitives/api/test/Cargo.toml +++ b/primitives/api/test/Cargo.toml @@ -27,9 +27,10 @@ rustversion = "1.0.0" [dev-dependencies] criterion = "0.3.0" -substrate-test-runtime-client = { version = "2.0.0", path = "../../../test-utils/runtime/client" } -sp-core = { version = "3.0.0", path = "../../core" } +futures = "0.3.9" log = "0.4.14" +sp-core = { version = "3.0.0", path = "../../core" } +substrate-test-runtime-client = { version = "2.0.0", path = "../../../test-utils/runtime/client" } [[bench]] name = "bench" diff --git a/primitives/api/test/tests/runtime_calls.rs b/primitives/api/test/tests/runtime_calls.rs index e10e1b34012a8..562735834ddca 100644 --- a/primitives/api/test/tests/runtime_calls.rs +++ b/primitives/api/test/tests/runtime_calls.rs @@ -160,10 +160,15 @@ fn record_proof_works() { .build_with_longest_chain(); let block_id = BlockId::Number(client.chain_info().best_number); - let storage_root = longest_chain.best_chain().unwrap().state_root().clone(); + let storage_root = futures::executor::block_on(longest_chain.best_chain()) + .unwrap() + .state_root() + .clone(); let runtime_code = sp_core::traits::RuntimeCode { - code_fetcher: &sp_core::traits::WrappedRuntimeCode(client.code_at(&block_id).unwrap().into()), + code_fetcher: &sp_core::traits::WrappedRuntimeCode( + client.code_at(&block_id).unwrap().into(), + ), hash: vec![1], heap_pages: None, }; diff --git a/primitives/consensus/common/src/block_import.rs b/primitives/consensus/common/src/block_import.rs index 31c3eb74457c3..67978232009e8 100644 --- a/primitives/consensus/common/src/block_import.rs +++ b/primitives/consensus/common/src/block_import.rs @@ -363,15 +363,16 @@ impl BlockImpo } /// Justification import trait +#[async_trait::async_trait] pub trait JustificationImport { type Error: std::error::Error + Send + 'static; /// Called by the import queue when it is started. Returns a list of justifications to request /// from the network. - fn on_start(&mut self) -> Vec<(B::Hash, NumberFor)> { Vec::new() } + async fn on_start(&mut self) -> Vec<(B::Hash, NumberFor)>; /// Import a Block justification and finalize the given block. - fn import_justification( + async fn import_justification( &mut self, hash: B::Hash, number: NumberFor, diff --git a/primitives/consensus/common/src/import_queue/basic_queue.rs b/primitives/consensus/common/src/import_queue/basic_queue.rs index 55fc2eac40ca6..3af983952af75 100644 --- a/primitives/consensus/common/src/import_queue/basic_queue.rs +++ b/primitives/consensus/common/src/import_queue/basic_queue.rs @@ -220,16 +220,16 @@ impl BlockImportWorker { metrics, }; - // Let's initialize `justification_import` - if let Some(justification_import) = worker.justification_import.as_mut() { - for (hash, number) in justification_import.on_start() { - worker.result_sender.request_justification(&hash, number); - } - } - let delay_between_blocks = Duration::default(); let future = async move { + // Let's initialize `justification_import` + if let Some(justification_import) = worker.justification_import.as_mut() { + for (hash, number) in justification_import.on_start().await { + worker.result_sender.request_justification(&hash, number); + } + } + let block_import_process = block_import_process( block_import, verifier, @@ -254,15 +254,18 @@ impl BlockImportWorker { // Make sure to first process all justifications while let Poll::Ready(justification) = futures::poll!(justification_port.next()) { match justification { - Some(ImportJustification(who, hash, number, justification)) => - worker.import_justification(who, hash, number, justification), + Some(ImportJustification(who, hash, number, justification)) => { + worker + .import_justification(who, hash, number, justification) + .await + } None => { log::debug!( target: "block-import", "Stopping block import because justification channel was closed!", ); - return - }, + return; + } } } @@ -278,7 +281,7 @@ impl BlockImportWorker { (future, justification_sender, block_import_sender) } - fn import_justification( + async fn import_justification( &mut self, who: Origin, hash: B::Hash, @@ -286,8 +289,11 @@ impl BlockImportWorker { justification: Justification, ) { let started = wasm_timer::Instant::now(); - let success = self.justification_import.as_mut().map(|justification_import| { - justification_import.import_justification(hash, number, justification) + + let success = match self.justification_import.as_mut() { + Some(justification_import) => justification_import + .import_justification(hash, number, justification) + .await .map_err(|e| { debug!( target: "sync", @@ -298,14 +304,19 @@ impl BlockImportWorker { who, ); e - }).is_ok() - }).unwrap_or(false); + }) + .is_ok(), + None => false, + }; if let Some(metrics) = self.metrics.as_ref() { - metrics.justification_import_time.observe(started.elapsed().as_secs_f64()); + metrics + .justification_import_time + .observe(started.elapsed().as_secs_f64()); } - self.result_sender.justification_imported(who, &hash, number, success); + self.result_sender + .justification_imported(who, &hash, number, success); } } @@ -472,10 +483,15 @@ mod tests { } } + #[async_trait::async_trait] impl JustificationImport for () { type Error = crate::Error; - fn import_justification( + async fn on_start(&mut self) -> Vec<(Hash, BlockNumber)> { + Vec::new() + } + + async fn import_justification( &mut self, _hash: Hash, _number: BlockNumber, diff --git a/primitives/consensus/common/src/select_chain.rs b/primitives/consensus/common/src/select_chain.rs index 11f6fbeb54d37..e99a6756175d2 100644 --- a/primitives/consensus/common/src/select_chain.rs +++ b/primitives/consensus/common/src/select_chain.rs @@ -33,23 +33,24 @@ use sp_runtime::traits::{Block as BlockT, NumberFor}; /// some implementations. /// /// Non-deterministically finalizing chains may only use the `_authoring` functions. +#[async_trait::async_trait] pub trait SelectChain: Sync + Send + Clone { - - /// Get all leaves of the chain: block hashes that have no children currently. + /// Get all leaves of the chain, i.e. block hashes that have no children currently. /// Leaves that can never be finalized will not be returned. - fn leaves(&self) -> Result::Hash>, Error>; + async fn leaves(&self) -> Result::Hash>, Error>; /// Among those `leaves` deterministically pick one chain as the generally - /// best chain to author new blocks upon and probably finalize. - fn best_chain(&self) -> Result<::Header, Error>; + /// best chain to author new blocks upon and probably (but not necessarily) + /// finalize. + async fn best_chain(&self) -> Result<::Header, Error>; /// Get the best descendent of `target_hash` that we should attempt to /// finalize next, if any. It is valid to return the given `target_hash` /// itself if no better descendent exists. - fn finality_target( + async fn finality_target( &self, target_hash: ::Hash, - _maybe_max_number: Option> + _maybe_max_number: Option>, ) -> Result::Hash>, Error> { Ok(Some(target_hash)) } From d6c33e7ec313f9bd5e319dc0a5a3ace5543f9617 Mon Sep 17 00:00:00 2001 From: Shawn Tabrizi Date: Mon, 21 Jun 2021 10:57:43 +0100 Subject: [PATCH 21/67] New Weights for All Pallets (#9148) * Create run_benchmarks.sh * Update run_benchmarks.sh * new weights * Delete run_benchmarks.sh * wrong folder * remove grandpa weight * Update weights.rs --- frame/assets/src/weights.rs | 118 +- frame/balances/src/weights.rs | 26 +- frame/bounties/src/weights.rs | 60 +- frame/collective/src/weights.rs | 218 +-- frame/contracts/src/weights.rs | 1272 ++++++++--------- frame/democracy/src/weights.rs | 217 +-- .../src/weights.rs | 102 +- frame/elections-phragmen/src/weights.rs | 124 +- frame/gilt/src/weights.rs | 54 +- frame/identity/src/weights.rs | 272 ++-- frame/im-online/src/weights.rs | 25 +- frame/indices/src/weights.rs | 42 +- frame/lottery/src/weights.rs | 36 +- frame/membership/src/weights.rs | 58 +- frame/multisig/src/weights.rs | 154 +- frame/proxy/src/weights.rs | 156 +- frame/scheduler/src/weights.rs | 60 +- frame/session/src/weights.rs | 24 +- frame/staking/src/weights.rs | 238 +-- frame/system/src/weights.rs | 43 +- frame/timestamp/src/weights.rs | 28 +- frame/tips/src/weights.rs | 54 +- frame/transaction-storage/src/weights.rs | 22 +- frame/treasury/src/weights.rs | 34 +- frame/uniques/src/weights.rs | 114 +- frame/utility/src/weights.rs | 26 +- frame/vesting/src/weights.rs | 84 +- 27 files changed, 1851 insertions(+), 1810 deletions(-) diff --git a/frame/assets/src/weights.rs b/frame/assets/src/weights.rs index 77db7fa4f05ba..ae5462288a306 100644 --- a/frame/assets/src/weights.rs +++ b/frame/assets/src/weights.rs @@ -18,7 +18,7 @@ //! Autogenerated weights for pallet_assets //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0 -//! DATE: 2021-06-10, STEPS: `[50, ]`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2021-06-19, STEPS: `[50, ]`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 128 // Executed Command: @@ -73,23 +73,23 @@ pub trait WeightInfo { pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { fn create() -> Weight { - (52_735_000 as Weight) + (43_277_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn force_create() -> Weight { - (26_570_000 as Weight) + (21_829_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn destroy(c: u32, s: u32, a: u32, ) -> Weight { (0 as Weight) - // Standard Error: 93_000 - .saturating_add((31_110_000 as Weight).saturating_mul(c as Weight)) - // Standard Error: 93_000 - .saturating_add((38_908_000 as Weight).saturating_mul(s as Weight)) - // Standard Error: 935_000 - .saturating_add((42_765_000 as Weight).saturating_mul(a as Weight)) + // Standard Error: 34_000 + .saturating_add((22_206_000 as Weight).saturating_mul(c as Weight)) + // Standard Error: 34_000 + .saturating_add((28_086_000 as Weight).saturating_mul(s as Weight)) + // Standard Error: 346_000 + .saturating_add((32_168_000 as Weight).saturating_mul(a as Weight)) .saturating_add(T::DbWeight::get().reads(5 as Weight)) .saturating_add(T::DbWeight::get().reads((2 as Weight).saturating_mul(c as Weight))) .saturating_add(T::DbWeight::get().reads((2 as Weight).saturating_mul(s as Weight))) @@ -100,106 +100,106 @@ impl WeightInfo for SubstrateWeight { .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(a as Weight))) } fn mint() -> Weight { - (58_399_000 as Weight) + (45_983_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn burn() -> Weight { - (65_917_000 as Weight) + (52_925_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn transfer() -> Weight { - (100_407_000 as Weight) + (80_375_000 as Weight) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(4 as Weight)) } fn transfer_keep_alive() -> Weight { - (84_243_000 as Weight) + (67_688_000 as Weight) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(4 as Weight)) } fn force_transfer() -> Weight { - (100_407_000 as Weight) + (80_267_000 as Weight) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(4 as Weight)) } fn freeze() -> Weight { - (37_831_000 as Weight) + (30_541_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn thaw() -> Weight { - (37_660_000 as Weight) + (30_494_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn freeze_asset() -> Weight { - (27_175_000 as Weight) + (22_025_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn thaw_asset() -> Weight { - (26_884_000 as Weight) + (21_889_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn transfer_ownership() -> Weight { - (31_877_000 as Weight) + (24_939_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn set_team() -> Weight { - (27_947_000 as Weight) + (21_959_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn set_metadata(_n: u32, s: u32, ) -> Weight { - (57_993_000 as Weight) + (47_510_000 as Weight) // Standard Error: 0 - .saturating_add((12_000 as Weight).saturating_mul(s as Weight)) + .saturating_add((6_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn clear_metadata() -> Weight { - (57_820_000 as Weight) + (46_085_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn force_set_metadata(_n: u32, s: u32, ) -> Weight { - (30_830_000 as Weight) + (24_297_000 as Weight) // Standard Error: 0 .saturating_add((7_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn force_clear_metadata() -> Weight { - (57_292_000 as Weight) + (45_787_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn force_asset_status() -> Weight { - (26_750_000 as Weight) + (20_574_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn approve_transfer() -> Weight { - (65_598_000 as Weight) + (53_893_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn transfer_approved() -> Weight { - (131_312_000 as Weight) + (106_171_000 as Weight) .saturating_add(T::DbWeight::get().reads(5 as Weight)) .saturating_add(T::DbWeight::get().writes(5 as Weight)) } fn cancel_approval() -> Weight { - (66_904_000 as Weight) + (55_213_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn force_cancel_approval() -> Weight { - (67_525_000 as Weight) + (55_946_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } @@ -208,23 +208,23 @@ impl WeightInfo for SubstrateWeight { // For backwards compatibility and tests impl WeightInfo for () { fn create() -> Weight { - (52_735_000 as Weight) + (43_277_000 as Weight) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn force_create() -> Weight { - (26_570_000 as Weight) + (21_829_000 as Weight) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn destroy(c: u32, s: u32, a: u32, ) -> Weight { (0 as Weight) - // Standard Error: 93_000 - .saturating_add((31_110_000 as Weight).saturating_mul(c as Weight)) - // Standard Error: 93_000 - .saturating_add((38_908_000 as Weight).saturating_mul(s as Weight)) - // Standard Error: 935_000 - .saturating_add((42_765_000 as Weight).saturating_mul(a as Weight)) + // Standard Error: 34_000 + .saturating_add((22_206_000 as Weight).saturating_mul(c as Weight)) + // Standard Error: 34_000 + .saturating_add((28_086_000 as Weight).saturating_mul(s as Weight)) + // Standard Error: 346_000 + .saturating_add((32_168_000 as Weight).saturating_mul(a as Weight)) .saturating_add(RocksDbWeight::get().reads(5 as Weight)) .saturating_add(RocksDbWeight::get().reads((2 as Weight).saturating_mul(c as Weight))) .saturating_add(RocksDbWeight::get().reads((2 as Weight).saturating_mul(s as Weight))) @@ -235,106 +235,106 @@ impl WeightInfo for () { .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(a as Weight))) } fn mint() -> Weight { - (58_399_000 as Weight) + (45_983_000 as Weight) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } fn burn() -> Weight { - (65_917_000 as Weight) + (52_925_000 as Weight) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } fn transfer() -> Weight { - (100_407_000 as Weight) + (80_375_000 as Weight) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(4 as Weight)) } fn transfer_keep_alive() -> Weight { - (84_243_000 as Weight) + (67_688_000 as Weight) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(4 as Weight)) } fn force_transfer() -> Weight { - (100_407_000 as Weight) + (80_267_000 as Weight) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(4 as Weight)) } fn freeze() -> Weight { - (37_831_000 as Weight) + (30_541_000 as Weight) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn thaw() -> Weight { - (37_660_000 as Weight) + (30_494_000 as Weight) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn freeze_asset() -> Weight { - (27_175_000 as Weight) + (22_025_000 as Weight) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn thaw_asset() -> Weight { - (26_884_000 as Weight) + (21_889_000 as Weight) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn transfer_ownership() -> Weight { - (31_877_000 as Weight) + (24_939_000 as Weight) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn set_team() -> Weight { - (27_947_000 as Weight) + (21_959_000 as Weight) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn set_metadata(_n: u32, s: u32, ) -> Weight { - (57_993_000 as Weight) + (47_510_000 as Weight) // Standard Error: 0 - .saturating_add((12_000 as Weight).saturating_mul(s as Weight)) + .saturating_add((6_000 as Weight).saturating_mul(s as Weight)) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn clear_metadata() -> Weight { - (57_820_000 as Weight) + (46_085_000 as Weight) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn force_set_metadata(_n: u32, s: u32, ) -> Weight { - (30_830_000 as Weight) + (24_297_000 as Weight) // Standard Error: 0 .saturating_add((7_000 as Weight).saturating_mul(s as Weight)) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn force_clear_metadata() -> Weight { - (57_292_000 as Weight) + (45_787_000 as Weight) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn force_asset_status() -> Weight { - (26_750_000 as Weight) + (20_574_000 as Weight) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn approve_transfer() -> Weight { - (65_598_000 as Weight) + (53_893_000 as Weight) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } fn transfer_approved() -> Weight { - (131_312_000 as Weight) + (106_171_000 as Weight) .saturating_add(RocksDbWeight::get().reads(5 as Weight)) .saturating_add(RocksDbWeight::get().writes(5 as Weight)) } fn cancel_approval() -> Weight { - (66_904_000 as Weight) + (55_213_000 as Weight) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } fn force_cancel_approval() -> Weight { - (67_525_000 as Weight) + (55_946_000 as Weight) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } diff --git a/frame/balances/src/weights.rs b/frame/balances/src/weights.rs index cf1d7dff82848..79e6445dd6bb6 100644 --- a/frame/balances/src/weights.rs +++ b/frame/balances/src/weights.rs @@ -18,7 +18,7 @@ //! Autogenerated weights for pallet_balances //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0 -//! DATE: 2021-06-04, STEPS: `[50, ]`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2021-06-19, STEPS: `[50, ]`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 128 // Executed Command: @@ -56,32 +56,32 @@ pub trait WeightInfo { pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { fn transfer() -> Weight { - (91_896_000 as Weight) + (73_268_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn transfer_keep_alive() -> Weight { - (67_779_000 as Weight) + (54_881_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn set_balance_creating() -> Weight { - (36_912_000 as Weight) + (29_853_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn set_balance_killing() -> Weight { - (44_416_000 as Weight) + (36_007_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn force_transfer() -> Weight { - (90_811_000 as Weight) + (72_541_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn transfer_all() -> Weight { - (84_170_000 as Weight) + (67_360_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } @@ -90,32 +90,32 @@ impl WeightInfo for SubstrateWeight { // For backwards compatibility and tests impl WeightInfo for () { fn transfer() -> Weight { - (91_896_000 as Weight) + (73_268_000 as Weight) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn transfer_keep_alive() -> Weight { - (67_779_000 as Weight) + (54_881_000 as Weight) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn set_balance_creating() -> Weight { - (36_912_000 as Weight) + (29_853_000 as Weight) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn set_balance_killing() -> Weight { - (44_416_000 as Weight) + (36_007_000 as Weight) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn force_transfer() -> Weight { - (90_811_000 as Weight) + (72_541_000 as Weight) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } fn transfer_all() -> Weight { - (84_170_000 as Weight) + (67_360_000 as Weight) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } diff --git a/frame/bounties/src/weights.rs b/frame/bounties/src/weights.rs index 50d76739a938a..9b50d438923c2 100644 --- a/frame/bounties/src/weights.rs +++ b/frame/bounties/src/weights.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// Copyright (C) 2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,12 +17,12 @@ //! Autogenerated weights for pallet_bounties //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0 -//! DATE: 2020-12-16, STEPS: `[50, ]`, REPEAT: 20, LOW RANGE: [], HIGH RANGE: [] +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0 +//! DATE: 2021-06-19, STEPS: `[50, ]`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 128 // Executed Command: -// ./target/release/substrate +// target/release/substrate // benchmark // --chain=dev // --steps=50 @@ -61,61 +61,61 @@ pub trait WeightInfo { pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { fn propose_bounty(d: u32, ) -> Weight { - (64_778_000 as Weight) + (44_351_000 as Weight) // Standard Error: 0 .saturating_add((1_000 as Weight).saturating_mul(d as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(4 as Weight)) } fn approve_bounty() -> Weight { - (18_293_000 as Weight) + (12_417_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn propose_curator() -> Weight { - (14_248_000 as Weight) + (9_692_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn unassign_curator() -> Weight { - (52_100_000 as Weight) + (41_211_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn accept_curator() -> Weight { - (52_564_000 as Weight) + (37_376_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn award_bounty() -> Weight { - (37_426_000 as Weight) + (25_525_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn claim_bounty() -> Weight { - (176_077_000 as Weight) + (125_495_000 as Weight) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(5 as Weight)) } fn close_bounty_proposed() -> Weight { - (51_162_000 as Weight) + (40_464_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) } fn close_bounty_active() -> Weight { - (116_907_000 as Weight) + (84_042_000 as Weight) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(4 as Weight)) } fn extend_bounty_expiry() -> Weight { - (36_419_000 as Weight) + (25_114_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn spend_funds(b: u32, ) -> Weight { - (7_562_000 as Weight) - // Standard Error: 16_000 - .saturating_add((77_328_000 as Weight).saturating_mul(b as Weight)) + (351_000 as Weight) + // Standard Error: 13_000 + .saturating_add((58_724_000 as Weight).saturating_mul(b as Weight)) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().reads((3 as Weight).saturating_mul(b as Weight))) .saturating_add(T::DbWeight::get().writes(1 as Weight)) @@ -126,61 +126,61 @@ impl WeightInfo for SubstrateWeight { // For backwards compatibility and tests impl WeightInfo for () { fn propose_bounty(d: u32, ) -> Weight { - (64_778_000 as Weight) + (44_351_000 as Weight) // Standard Error: 0 .saturating_add((1_000 as Weight).saturating_mul(d as Weight)) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(4 as Weight)) } fn approve_bounty() -> Weight { - (18_293_000 as Weight) + (12_417_000 as Weight) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } fn propose_curator() -> Weight { - (14_248_000 as Weight) + (9_692_000 as Weight) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn unassign_curator() -> Weight { - (52_100_000 as Weight) + (41_211_000 as Weight) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } fn accept_curator() -> Weight { - (52_564_000 as Weight) + (37_376_000 as Weight) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } fn award_bounty() -> Weight { - (37_426_000 as Weight) + (25_525_000 as Weight) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn claim_bounty() -> Weight { - (176_077_000 as Weight) + (125_495_000 as Weight) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(5 as Weight)) } fn close_bounty_proposed() -> Weight { - (51_162_000 as Weight) + (40_464_000 as Weight) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } fn close_bounty_active() -> Weight { - (116_907_000 as Weight) + (84_042_000 as Weight) .saturating_add(RocksDbWeight::get().reads(3 as Weight)) .saturating_add(RocksDbWeight::get().writes(4 as Weight)) } fn extend_bounty_expiry() -> Weight { - (36_419_000 as Weight) + (25_114_000 as Weight) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn spend_funds(b: u32, ) -> Weight { - (7_562_000 as Weight) - // Standard Error: 16_000 - .saturating_add((77_328_000 as Weight).saturating_mul(b as Weight)) + (351_000 as Weight) + // Standard Error: 13_000 + .saturating_add((58_724_000 as Weight).saturating_mul(b as Weight)) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().reads((3 as Weight).saturating_mul(b as Weight))) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) diff --git a/frame/collective/src/weights.rs b/frame/collective/src/weights.rs index 7bdce04d26485..46bd999344add 100644 --- a/frame/collective/src/weights.rs +++ b/frame/collective/src/weights.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// Copyright (C) 2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,9 +15,10 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Weights for pallet_collective -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0 -//! DATE: 2020-10-27, STEPS: `[50, ]`, REPEAT: 20, LOW RANGE: [], HIGH RANGE: [] +//! Autogenerated weights for pallet_collective +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0 +//! DATE: 2021-06-19, STEPS: `[50, ]`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 128 // Executed Command: @@ -43,17 +44,16 @@ use sp_std::marker::PhantomData; /// Weight functions needed for pallet_collective. pub trait WeightInfo { - fn set_members(_m: u32, _n: u32, _p: u32, ) -> Weight; - fn execute(_b: u32, _m: u32, ) -> Weight; - fn propose_execute(_b: u32, _m: u32, ) -> Weight; - fn propose_proposed(_b: u32, _m: u32, _p: u32, ) -> Weight; - fn vote(_m: u32, ) -> Weight; - fn close_early_disapproved(_m: u32, _p: u32, ) -> Weight; - fn close_early_approved(_b: u32, _m: u32, _p: u32, ) -> Weight; - fn close_disapproved(_m: u32, _p: u32, ) -> Weight; - fn close_approved(_b: u32, _m: u32, _p: u32, ) -> Weight; - fn disapprove_proposal(_p: u32, ) -> Weight; - + fn set_members(m: u32, n: u32, p: u32, ) -> Weight; + fn execute(b: u32, m: u32, ) -> Weight; + fn propose_execute(b: u32, m: u32, ) -> Weight; + fn propose_proposed(b: u32, m: u32, p: u32, ) -> Weight; + fn vote(m: u32, ) -> Weight; + fn close_early_disapproved(m: u32, p: u32, ) -> Weight; + fn close_early_approved(b: u32, m: u32, p: u32, ) -> Weight; + fn close_disapproved(m: u32, p: u32, ) -> Weight; + fn close_approved(b: u32, m: u32, p: u32, ) -> Weight; + fn disapprove_proposal(p: u32, ) -> Weight; } /// Weights for pallet_collective using the Substrate node and recommended hardware. @@ -61,170 +61,194 @@ pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { fn set_members(m: u32, n: u32, p: u32, ) -> Weight { (0 as Weight) - .saturating_add((20_933_000 as Weight).saturating_mul(m as Weight)) - .saturating_add((254_000 as Weight).saturating_mul(n as Weight)) - .saturating_add((28_233_000 as Weight).saturating_mul(p as Weight)) + // Standard Error: 5_000 + .saturating_add((15_266_000 as Weight).saturating_mul(m as Weight)) + // Standard Error: 5_000 + .saturating_add((39_000 as Weight).saturating_mul(n as Weight)) + // Standard Error: 5_000 + .saturating_add((20_899_000 as Weight).saturating_mul(p as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(p as Weight))) .saturating_add(T::DbWeight::get().writes(2 as Weight)) .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(p as Weight))) } fn execute(b: u32, m: u32, ) -> Weight { - (31_147_000 as Weight) - .saturating_add((4_000 as Weight).saturating_mul(b as Weight)) - .saturating_add((115_000 as Weight).saturating_mul(m as Weight)) + (21_945_000 as Weight) + // Standard Error: 0 + .saturating_add((3_000 as Weight).saturating_mul(b as Weight)) + // Standard Error: 0 + .saturating_add((93_000 as Weight).saturating_mul(m as Weight)) .saturating_add(T::DbWeight::get().reads(1 as Weight)) - } fn propose_execute(b: u32, m: u32, ) -> Weight { - (38_774_000 as Weight) - .saturating_add((4_000 as Weight).saturating_mul(b as Weight)) - .saturating_add((226_000 as Weight).saturating_mul(m as Weight)) + (26_316_000 as Weight) + // Standard Error: 0 + .saturating_add((3_000 as Weight).saturating_mul(b as Weight)) + // Standard Error: 0 + .saturating_add((184_000 as Weight).saturating_mul(m as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) - } fn propose_proposed(b: u32, m: u32, p: u32, ) -> Weight { - (64_230_000 as Weight) - .saturating_add((5_000 as Weight).saturating_mul(b as Weight)) - .saturating_add((138_000 as Weight).saturating_mul(m as Weight)) - .saturating_add((637_000 as Weight).saturating_mul(p as Weight)) + (42_664_000 as Weight) + // Standard Error: 0 + .saturating_add((2_000 as Weight).saturating_mul(b as Weight)) + // Standard Error: 2_000 + .saturating_add((166_000 as Weight).saturating_mul(m as Weight)) + // Standard Error: 2_000 + .saturating_add((435_000 as Weight).saturating_mul(p as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(4 as Weight)) - } fn vote(m: u32, ) -> Weight { - (57_051_000 as Weight) - .saturating_add((220_000 as Weight).saturating_mul(m as Weight)) + (43_750_000 as Weight) + // Standard Error: 3_000 + .saturating_add((198_000 as Weight).saturating_mul(m as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) - } fn close_early_disapproved(m: u32, p: u32, ) -> Weight { - (61_406_000 as Weight) - .saturating_add((225_000 as Weight).saturating_mul(m as Weight)) - .saturating_add((630_000 as Weight).saturating_mul(p as Weight)) + (44_153_000 as Weight) + // Standard Error: 0 + .saturating_add((185_000 as Weight).saturating_mul(m as Weight)) + // Standard Error: 0 + .saturating_add((454_000 as Weight).saturating_mul(p as Weight)) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) - } fn close_early_approved(b: u32, m: u32, p: u32, ) -> Weight { - (92_864_000 as Weight) - .saturating_add((1_000 as Weight).saturating_mul(b as Weight)) - .saturating_add((233_000 as Weight).saturating_mul(m as Weight)) - .saturating_add((597_000 as Weight).saturating_mul(p as Weight)) + (65_478_000 as Weight) + // Standard Error: 0 + .saturating_add((2_000 as Weight).saturating_mul(b as Weight)) + // Standard Error: 2_000 + .saturating_add((167_000 as Weight).saturating_mul(m as Weight)) + // Standard Error: 2_000 + .saturating_add((434_000 as Weight).saturating_mul(p as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) - } fn close_disapproved(m: u32, p: u32, ) -> Weight { - (67_942_000 as Weight) - .saturating_add((232_000 as Weight).saturating_mul(m as Weight)) - .saturating_add((636_000 as Weight).saturating_mul(p as Weight)) + (49_001_000 as Weight) + // Standard Error: 0 + .saturating_add((189_000 as Weight).saturating_mul(m as Weight)) + // Standard Error: 0 + .saturating_add((464_000 as Weight).saturating_mul(p as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) - } fn close_approved(b: u32, m: u32, p: u32, ) -> Weight { - (99_742_000 as Weight) - .saturating_add((1_000 as Weight).saturating_mul(b as Weight)) - .saturating_add((233_000 as Weight).saturating_mul(m as Weight)) - .saturating_add((598_000 as Weight).saturating_mul(p as Weight)) + (65_049_000 as Weight) + // Standard Error: 0 + .saturating_add((3_000 as Weight).saturating_mul(b as Weight)) + // Standard Error: 1_000 + .saturating_add((192_000 as Weight).saturating_mul(m as Weight)) + // Standard Error: 1_000 + .saturating_add((469_000 as Weight).saturating_mul(p as Weight)) .saturating_add(T::DbWeight::get().reads(5 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) - } fn disapprove_proposal(p: u32, ) -> Weight { - (36_628_000 as Weight) - .saturating_add((640_000 as Weight).saturating_mul(p as Weight)) + (27_288_000 as Weight) + // Standard Error: 1_000 + .saturating_add((477_000 as Weight).saturating_mul(p as Weight)) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) - } - } // For backwards compatibility and tests impl WeightInfo for () { fn set_members(m: u32, n: u32, p: u32, ) -> Weight { (0 as Weight) - .saturating_add((20_933_000 as Weight).saturating_mul(m as Weight)) - .saturating_add((254_000 as Weight).saturating_mul(n as Weight)) - .saturating_add((28_233_000 as Weight).saturating_mul(p as Weight)) + // Standard Error: 5_000 + .saturating_add((15_266_000 as Weight).saturating_mul(m as Weight)) + // Standard Error: 5_000 + .saturating_add((39_000 as Weight).saturating_mul(n as Weight)) + // Standard Error: 5_000 + .saturating_add((20_899_000 as Weight).saturating_mul(p as Weight)) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().reads((1 as Weight).saturating_mul(p as Weight))) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(p as Weight))) } fn execute(b: u32, m: u32, ) -> Weight { - (31_147_000 as Weight) - .saturating_add((4_000 as Weight).saturating_mul(b as Weight)) - .saturating_add((115_000 as Weight).saturating_mul(m as Weight)) + (21_945_000 as Weight) + // Standard Error: 0 + .saturating_add((3_000 as Weight).saturating_mul(b as Weight)) + // Standard Error: 0 + .saturating_add((93_000 as Weight).saturating_mul(m as Weight)) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - } fn propose_execute(b: u32, m: u32, ) -> Weight { - (38_774_000 as Weight) - .saturating_add((4_000 as Weight).saturating_mul(b as Weight)) - .saturating_add((226_000 as Weight).saturating_mul(m as Weight)) + (26_316_000 as Weight) + // Standard Error: 0 + .saturating_add((3_000 as Weight).saturating_mul(b as Weight)) + // Standard Error: 0 + .saturating_add((184_000 as Weight).saturating_mul(m as Weight)) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - } fn propose_proposed(b: u32, m: u32, p: u32, ) -> Weight { - (64_230_000 as Weight) - .saturating_add((5_000 as Weight).saturating_mul(b as Weight)) - .saturating_add((138_000 as Weight).saturating_mul(m as Weight)) - .saturating_add((637_000 as Weight).saturating_mul(p as Weight)) + (42_664_000 as Weight) + // Standard Error: 0 + .saturating_add((2_000 as Weight).saturating_mul(b as Weight)) + // Standard Error: 2_000 + .saturating_add((166_000 as Weight).saturating_mul(m as Weight)) + // Standard Error: 2_000 + .saturating_add((435_000 as Weight).saturating_mul(p as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(4 as Weight)) - } fn vote(m: u32, ) -> Weight { - (57_051_000 as Weight) - .saturating_add((220_000 as Weight).saturating_mul(m as Weight)) + (43_750_000 as Weight) + // Standard Error: 3_000 + .saturating_add((198_000 as Weight).saturating_mul(m as Weight)) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) - } fn close_early_disapproved(m: u32, p: u32, ) -> Weight { - (61_406_000 as Weight) - .saturating_add((225_000 as Weight).saturating_mul(m as Weight)) - .saturating_add((630_000 as Weight).saturating_mul(p as Weight)) + (44_153_000 as Weight) + // Standard Error: 0 + .saturating_add((185_000 as Weight).saturating_mul(m as Weight)) + // Standard Error: 0 + .saturating_add((454_000 as Weight).saturating_mul(p as Weight)) .saturating_add(RocksDbWeight::get().reads(3 as Weight)) .saturating_add(RocksDbWeight::get().writes(3 as Weight)) - } fn close_early_approved(b: u32, m: u32, p: u32, ) -> Weight { - (92_864_000 as Weight) - .saturating_add((1_000 as Weight).saturating_mul(b as Weight)) - .saturating_add((233_000 as Weight).saturating_mul(m as Weight)) - .saturating_add((597_000 as Weight).saturating_mul(p as Weight)) + (65_478_000 as Weight) + // Standard Error: 0 + .saturating_add((2_000 as Weight).saturating_mul(b as Weight)) + // Standard Error: 2_000 + .saturating_add((167_000 as Weight).saturating_mul(m as Weight)) + // Standard Error: 2_000 + .saturating_add((434_000 as Weight).saturating_mul(p as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(3 as Weight)) - } fn close_disapproved(m: u32, p: u32, ) -> Weight { - (67_942_000 as Weight) - .saturating_add((232_000 as Weight).saturating_mul(m as Weight)) - .saturating_add((636_000 as Weight).saturating_mul(p as Weight)) + (49_001_000 as Weight) + // Standard Error: 0 + .saturating_add((189_000 as Weight).saturating_mul(m as Weight)) + // Standard Error: 0 + .saturating_add((464_000 as Weight).saturating_mul(p as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(3 as Weight)) - } fn close_approved(b: u32, m: u32, p: u32, ) -> Weight { - (99_742_000 as Weight) - .saturating_add((1_000 as Weight).saturating_mul(b as Weight)) - .saturating_add((233_000 as Weight).saturating_mul(m as Weight)) - .saturating_add((598_000 as Weight).saturating_mul(p as Weight)) + (65_049_000 as Weight) + // Standard Error: 0 + .saturating_add((3_000 as Weight).saturating_mul(b as Weight)) + // Standard Error: 1_000 + .saturating_add((192_000 as Weight).saturating_mul(m as Weight)) + // Standard Error: 1_000 + .saturating_add((469_000 as Weight).saturating_mul(p as Weight)) .saturating_add(RocksDbWeight::get().reads(5 as Weight)) .saturating_add(RocksDbWeight::get().writes(3 as Weight)) - } fn disapprove_proposal(p: u32, ) -> Weight { - (36_628_000 as Weight) - .saturating_add((640_000 as Weight).saturating_mul(p as Weight)) + (27_288_000 as Weight) + // Standard Error: 1_000 + .saturating_add((477_000 as Weight).saturating_mul(p as Weight)) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(3 as Weight)) - } - } diff --git a/frame/contracts/src/weights.rs b/frame/contracts/src/weights.rs index b96a3cad5b735..5edb4170e4eab 100644 --- a/frame/contracts/src/weights.rs +++ b/frame/contracts/src/weights.rs @@ -18,7 +18,7 @@ //! Autogenerated weights for pallet_contracts //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0 -//! DATE: 2021-05-11, STEPS: `[50, ]`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2021-06-19, STEPS: `[50, ]`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 128 // Executed Command: @@ -62,8 +62,6 @@ pub trait WeightInfo { fn seal_rent_allowance(r: u32, ) -> Weight; fn seal_block_number(r: u32, ) -> Weight; fn seal_now(r: u32, ) -> Weight; - fn seal_rent_params(r: u32, ) -> Weight; - fn seal_rent_status(r: u32, ) -> Weight; fn seal_weight_to_fee(r: u32, ) -> Weight; fn seal_gas(r: u32, ) -> Weight; fn seal_input(r: u32, ) -> Weight; @@ -154,286 +152,272 @@ pub trait WeightInfo { pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { fn on_initialize() -> Weight { - (3_656_000 as Weight) + (3_603_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) } fn on_initialize_per_trie_key(k: u32, ) -> Weight { (0 as Weight) - // Standard Error: 3_000 - .saturating_add((2_241_000 as Weight).saturating_mul(k as Weight)) + // Standard Error: 2_000 + .saturating_add((2_217_000 as Weight).saturating_mul(k as Weight)) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(k as Weight))) } fn on_initialize_per_queue_item(q: u32, ) -> Weight { - (36_820_000 as Weight) - // Standard Error: 4_000 - .saturating_add((34_550_000 as Weight).saturating_mul(q as Weight)) + (0 as Weight) + // Standard Error: 6_000 + .saturating_add((36_769_000 as Weight).saturating_mul(q as Weight)) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn instrument(c: u32, ) -> Weight { - (42_348_000 as Weight) - // Standard Error: 185_000 - .saturating_add((95_664_000 as Weight).saturating_mul(c as Weight)) + (54_463_000 as Weight) + // Standard Error: 105_000 + .saturating_add((77_542_000 as Weight).saturating_mul(c as Weight)) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn instantiate_with_code(c: u32, s: u32, ) -> Weight { - (210_852_000 as Weight) - // Standard Error: 138_000 - .saturating_add((135_241_000 as Weight).saturating_mul(c as Weight)) - // Standard Error: 9_000 - .saturating_add((1_846_000 as Weight).saturating_mul(s as Weight)) + (184_114_000 as Weight) + // Standard Error: 82_000 + .saturating_add((117_247_000 as Weight).saturating_mul(c as Weight)) + // Standard Error: 5_000 + .saturating_add((1_542_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().reads(5 as Weight)) .saturating_add(T::DbWeight::get().writes(4 as Weight)) } fn instantiate(c: u32, s: u32, ) -> Weight { - (217_380_000 as Weight) - // Standard Error: 6_000 - .saturating_add((8_483_000 as Weight).saturating_mul(c as Weight)) + (183_501_000 as Weight) + // Standard Error: 2_000 + .saturating_add((5_645_000 as Weight).saturating_mul(c as Weight)) // Standard Error: 0 - .saturating_add((1_752_000 as Weight).saturating_mul(s as Weight)) + .saturating_add((1_473_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().reads(5 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) } fn call(c: u32, ) -> Weight { - (181_443_000 as Weight) - // Standard Error: 3_000 - .saturating_add((3_955_000 as Weight).saturating_mul(c as Weight)) + (173_411_000 as Weight) + // Standard Error: 1_000 + .saturating_add((2_639_000 as Weight).saturating_mul(c as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn claim_surcharge(c: u32, ) -> Weight { - (132_551_000 as Weight) - // Standard Error: 1_000 - .saturating_add((4_740_000 as Weight).saturating_mul(c as Weight)) + (125_839_000 as Weight) + // Standard Error: 0 + .saturating_add((3_123_000 as Weight).saturating_mul(c as Weight)) .saturating_add(T::DbWeight::get().reads(5 as Weight)) .saturating_add(T::DbWeight::get().writes(4 as Weight)) } fn seal_caller(r: u32, ) -> Weight { - (137_742_000 as Weight) - // Standard Error: 74_000 - .saturating_add((242_261_000 as Weight).saturating_mul(r as Weight)) + (131_793_000 as Weight) + // Standard Error: 84_000 + .saturating_add((231_138_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn seal_address(r: u32, ) -> Weight { - (137_739_000 as Weight) - // Standard Error: 91_000 - .saturating_add((241_803_000 as Weight).saturating_mul(r as Weight)) + (129_995_000 as Weight) + // Standard Error: 78_000 + .saturating_add((231_839_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn seal_gas_left(r: u32, ) -> Weight { - (139_631_000 as Weight) - // Standard Error: 83_000 - .saturating_add((236_790_000 as Weight).saturating_mul(r as Weight)) + (129_710_000 as Weight) + // Standard Error: 85_000 + .saturating_add((227_268_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn seal_balance(r: u32, ) -> Weight { - (142_506_000 as Weight) - // Standard Error: 176_000 - .saturating_add((525_752_000 as Weight).saturating_mul(r as Weight)) + (133_445_000 as Weight) + // Standard Error: 144_000 + .saturating_add((487_125_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn seal_value_transferred(r: u32, ) -> Weight { - (138_569_000 as Weight) - // Standard Error: 76_000 - .saturating_add((237_016_000 as Weight).saturating_mul(r as Weight)) + (129_299_000 as Weight) + // Standard Error: 82_000 + .saturating_add((227_118_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn seal_minimum_balance(r: u32, ) -> Weight { - (134_713_000 as Weight) - // Standard Error: 81_000 - .saturating_add((237_962_000 as Weight).saturating_mul(r as Weight)) + (126_120_000 as Weight) + // Standard Error: 114_000 + .saturating_add((227_326_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn seal_tombstone_deposit(r: u32, ) -> Weight { - (131_523_000 as Weight) - // Standard Error: 90_000 - .saturating_add((237_435_000 as Weight).saturating_mul(r as Weight)) + (130_934_000 as Weight) + // Standard Error: 89_000 + .saturating_add((226_638_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn seal_rent_allowance(r: u32, ) -> Weight { - (141_574_000 as Weight) - // Standard Error: 86_000 - .saturating_add((238_102_000 as Weight).saturating_mul(r as Weight)) + (128_738_000 as Weight) + // Standard Error: 77_000 + .saturating_add((227_062_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn seal_block_number(r: u32, ) -> Weight { - (140_240_000 as Weight) - // Standard Error: 101_000 - .saturating_add((236_568_000 as Weight).saturating_mul(r as Weight)) + (132_375_000 as Weight) + // Standard Error: 88_000 + .saturating_add((226_861_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn seal_now(r: u32, ) -> Weight { - (138_265_000 as Weight) - // Standard Error: 91_000 - .saturating_add((237_187_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) - } - fn seal_rent_params(r: u32, ) -> Weight { - (149_701_000 as Weight) - // Standard Error: 297_000 - .saturating_add((357_149_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) - } - fn seal_rent_status(r: u32, ) -> Weight { - (146_863_000 as Weight) - // Standard Error: 191_000 - .saturating_add((638_683_000 as Weight).saturating_mul(r as Weight)) + (127_888_000 as Weight) + // Standard Error: 86_000 + .saturating_add((227_851_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn seal_weight_to_fee(r: u32, ) -> Weight { - (144_278_000 as Weight) + (131_825_000 as Weight) // Standard Error: 149_000 - .saturating_add((470_264_000 as Weight).saturating_mul(r as Weight)) + .saturating_add((420_149_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(5 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn seal_gas(r: u32, ) -> Weight { - (111_361_000 as Weight) - // Standard Error: 157_000 - .saturating_add((118_441_000 as Weight).saturating_mul(r as Weight)) + (113_641_000 as Weight) + // Standard Error: 114_000 + .saturating_add((113_068_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn seal_input(r: u32, ) -> Weight { - (129_970_000 as Weight) - // Standard Error: 316_000 - .saturating_add((7_160_000 as Weight).saturating_mul(r as Weight)) + (122_982_000 as Weight) + // Standard Error: 74_000 + .saturating_add((6_828_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn seal_input_per_kb(n: u32, ) -> Weight { - (139_275_000 as Weight) + (131_913_000 as Weight) // Standard Error: 0 - .saturating_add((250_000 as Weight).saturating_mul(n as Weight)) + .saturating_add((275_000 as Weight).saturating_mul(n as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn seal_return(r: u32, ) -> Weight { - (119_240_000 as Weight) - // Standard Error: 57_000 - .saturating_add((4_347_000 as Weight).saturating_mul(r as Weight)) + (114_164_000 as Weight) + // Standard Error: 72_000 + .saturating_add((4_318_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn seal_return_per_kb(n: u32, ) -> Weight { - (128_896_000 as Weight) - // Standard Error: 1_000 - .saturating_add((757_000 as Weight).saturating_mul(n as Weight)) + (123_940_000 as Weight) + // Standard Error: 0 + .saturating_add((664_000 as Weight).saturating_mul(n as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn seal_terminate(r: u32, ) -> Weight { - (130_119_000 as Weight) - // Standard Error: 108_000 - .saturating_add((95_078_000 as Weight).saturating_mul(r as Weight)) + (123_340_000 as Weight) + // Standard Error: 99_000 + .saturating_add((89_126_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().reads((2 as Weight).saturating_mul(r as Weight))) .saturating_add(T::DbWeight::get().writes(1 as Weight)) .saturating_add(T::DbWeight::get().writes((4 as Weight).saturating_mul(r as Weight))) } fn seal_terminate_per_code_kb(c: u32, ) -> Weight { - (230_167_000 as Weight) - // Standard Error: 2_000 - .saturating_add((8_495_000 as Weight).saturating_mul(c as Weight)) + (217_499_000 as Weight) + // Standard Error: 1_000 + .saturating_add((5_608_000 as Weight).saturating_mul(c as Weight)) .saturating_add(T::DbWeight::get().reads(6 as Weight)) .saturating_add(T::DbWeight::get().writes(5 as Weight)) } fn seal_restore_to(r: u32, ) -> Weight { - (159_200_000 as Weight) - // Standard Error: 261_000 - .saturating_add((103_048_000 as Weight).saturating_mul(r as Weight)) + (149_019_000 as Weight) + // Standard Error: 903_000 + .saturating_add((87_433_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().reads((4 as Weight).saturating_mul(r as Weight))) .saturating_add(T::DbWeight::get().writes(1 as Weight)) .saturating_add(T::DbWeight::get().writes((5 as Weight).saturating_mul(r as Weight))) } fn seal_restore_to_per_code_kb_delta(c: u32, t: u32, d: u32, ) -> Weight { - (58_389_000 as Weight) - // Standard Error: 131_000 - .saturating_add((7_910_000 as Weight).saturating_mul(c as Weight)) - // Standard Error: 131_000 - .saturating_add((4_036_000 as Weight).saturating_mul(t as Weight)) - // Standard Error: 1_156_000 - .saturating_add((3_714_110_000 as Weight).saturating_mul(d as Weight)) + (18_255_000 as Weight) + // Standard Error: 141_000 + .saturating_add((5_142_000 as Weight).saturating_mul(c as Weight)) + // Standard Error: 141_000 + .saturating_add((2_478_000 as Weight).saturating_mul(t as Weight)) + // Standard Error: 1_242_000 + .saturating_add((2_935_421_000 as Weight).saturating_mul(d as Weight)) .saturating_add(T::DbWeight::get().reads(7 as Weight)) .saturating_add(T::DbWeight::get().reads((100 as Weight).saturating_mul(d as Weight))) .saturating_add(T::DbWeight::get().writes(7 as Weight)) .saturating_add(T::DbWeight::get().writes((100 as Weight).saturating_mul(d as Weight))) } fn seal_random(r: u32, ) -> Weight { - (138_794_000 as Weight) - // Standard Error: 216_000 - .saturating_add((599_742_000 as Weight).saturating_mul(r as Weight)) + (140_411_000 as Weight) + // Standard Error: 146_000 + .saturating_add((566_687_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(5 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn seal_deposit_event(r: u32, ) -> Weight { - (139_890_000 as Weight) - // Standard Error: 263_000 - .saturating_add((885_805_000 as Weight).saturating_mul(r as Weight)) + (132_048_000 as Weight) + // Standard Error: 308_000 + .saturating_add((818_622_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn seal_deposit_event_per_topic_and_kb(t: u32, n: u32, ) -> Weight { - (1_117_962_000 as Weight) - // Standard Error: 4_029_000 - .saturating_add((566_825_000 as Weight).saturating_mul(t as Weight)) - // Standard Error: 794_000 - .saturating_add((251_096_000 as Weight).saturating_mul(n as Weight)) + (1_080_578_000 as Weight) + // Standard Error: 2_337_000 + .saturating_add((534_525_000 as Weight).saturating_mul(t as Weight)) + // Standard Error: 460_000 + .saturating_add((167_990_000 as Weight).saturating_mul(n as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().reads((100 as Weight).saturating_mul(t as Weight))) .saturating_add(T::DbWeight::get().writes(1 as Weight)) .saturating_add(T::DbWeight::get().writes((100 as Weight).saturating_mul(t as Weight))) } fn seal_set_rent_allowance(r: u32, ) -> Weight { - (132_720_000 as Weight) - // Standard Error: 87_000 - .saturating_add((164_134_000 as Weight).saturating_mul(r as Weight)) + (123_998_000 as Weight) + // Standard Error: 53_000 + .saturating_add((155_113_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn seal_debug_message(r: u32, ) -> Weight { - (125_834_000 as Weight) - // Standard Error: 142_000 - .saturating_add((127_200_000 as Weight).saturating_mul(r as Weight)) + (120_514_000 as Weight) + // Standard Error: 93_000 + .saturating_add((124_243_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn seal_set_storage(r: u32, ) -> Weight { - (478_734_000 as Weight) - // Standard Error: 2_559_000 - .saturating_add((3_766_445_000 as Weight).saturating_mul(r as Weight)) + (47_131_000 as Weight) + // Standard Error: 931_000 + .saturating_add((4_033_062_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().reads((100 as Weight).saturating_mul(r as Weight))) .saturating_add(T::DbWeight::get().writes(1 as Weight)) .saturating_add(T::DbWeight::get().writes((100 as Weight).saturating_mul(r as Weight))) } fn seal_set_storage_per_kb(n: u32, ) -> Weight { - (600_306_000 as Weight) - // Standard Error: 234_000 - .saturating_add((70_989_000 as Weight).saturating_mul(n as Weight)) + (549_577_000 as Weight) + // Standard Error: 192_000 + .saturating_add((57_815_000 as Weight).saturating_mul(n as Weight)) .saturating_add(T::DbWeight::get().reads(5 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn seal_clear_storage(r: u32, ) -> Weight { (0 as Weight) - // Standard Error: 2_380_000 - .saturating_add((1_242_131_000 as Weight).saturating_mul(r as Weight)) + // Standard Error: 2_635_000 + .saturating_add((1_214_454_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().reads((100 as Weight).saturating_mul(r as Weight))) .saturating_add(T::DbWeight::get().writes(1 as Weight)) @@ -441,23 +425,23 @@ impl WeightInfo for SubstrateWeight { } fn seal_get_storage(r: u32, ) -> Weight { (0 as Weight) - // Standard Error: 1_060_000 - .saturating_add((910_861_000 as Weight).saturating_mul(r as Weight)) + // Standard Error: 1_044_000 + .saturating_add((883_653_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().reads((100 as Weight).saturating_mul(r as Weight))) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn seal_get_storage_per_kb(n: u32, ) -> Weight { - (605_545_000 as Weight) - // Standard Error: 252_000 - .saturating_add((153_519_000 as Weight).saturating_mul(n as Weight)) + (568_190_000 as Weight) + // Standard Error: 181_000 + .saturating_add((106_420_000 as Weight).saturating_mul(n as Weight)) .saturating_add(T::DbWeight::get().reads(5 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn seal_transfer(r: u32, ) -> Weight { - (36_854_000 as Weight) - // Standard Error: 2_076_000 - .saturating_add((5_183_774_000 as Weight).saturating_mul(r as Weight)) + (0 as Weight) + // Standard Error: 1_553_000 + .saturating_add((4_810_405_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().reads((100 as Weight).saturating_mul(r as Weight))) .saturating_add(T::DbWeight::get().writes(2 as Weight)) @@ -465,645 +449,631 @@ impl WeightInfo for SubstrateWeight { } fn seal_call(r: u32, ) -> Weight { (0 as Weight) - // Standard Error: 6_583_000 - .saturating_add((11_599_057_000 as Weight).saturating_mul(r as Weight)) + // Standard Error: 8_671_000 + .saturating_add((10_965_308_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(5 as Weight)) .saturating_add(T::DbWeight::get().reads((200 as Weight).saturating_mul(r as Weight))) .saturating_add(T::DbWeight::get().writes(1 as Weight)) .saturating_add(T::DbWeight::get().writes((100 as Weight).saturating_mul(r as Weight))) } fn seal_call_per_code_transfer_input_output_kb(c: u32, t: u32, i: u32, o: u32, ) -> Weight { - (10_431_738_000 as Weight) - // Standard Error: 301_000 - .saturating_add((392_174_000 as Weight).saturating_mul(c as Weight)) - // Standard Error: 125_400_000 - .saturating_add((3_698_896_000 as Weight).saturating_mul(t as Weight)) - // Standard Error: 39_000 - .saturating_add((60_692_000 as Weight).saturating_mul(i as Weight)) - // Standard Error: 42_000 - .saturating_add((78_872_000 as Weight).saturating_mul(o as Weight)) + (10_138_403_000 as Weight) + // Standard Error: 162_000 + .saturating_add((264_871_000 as Weight).saturating_mul(c as Weight)) + // Standard Error: 67_846_000 + .saturating_add((3_793_372_000 as Weight).saturating_mul(t as Weight)) + // Standard Error: 21_000 + .saturating_add((49_168_000 as Weight).saturating_mul(i as Weight)) + // Standard Error: 22_000 + .saturating_add((71_664_000 as Weight).saturating_mul(o as Weight)) .saturating_add(T::DbWeight::get().reads(205 as Weight)) .saturating_add(T::DbWeight::get().writes(101 as Weight)) .saturating_add(T::DbWeight::get().writes((101 as Weight).saturating_mul(t as Weight))) } fn seal_instantiate(r: u32, ) -> Weight { (0 as Weight) - // Standard Error: 32_118_000 - .saturating_add((21_117_947_000 as Weight).saturating_mul(r as Weight)) + // Standard Error: 34_546_000 + .saturating_add((19_938_393_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(5 as Weight)) .saturating_add(T::DbWeight::get().reads((300 as Weight).saturating_mul(r as Weight))) .saturating_add(T::DbWeight::get().writes(3 as Weight)) .saturating_add(T::DbWeight::get().writes((300 as Weight).saturating_mul(r as Weight))) } fn seal_instantiate_per_code_input_output_salt_kb(c: u32, i: u32, o: u32, s: u32, ) -> Weight { - (8_542_521_000 as Weight) - // Standard Error: 644_000 - .saturating_add((878_020_000 as Weight).saturating_mul(c as Weight)) - // Standard Error: 91_000 - .saturating_add((63_004_000 as Weight).saturating_mul(i as Weight)) - // Standard Error: 91_000 - .saturating_add((83_203_000 as Weight).saturating_mul(o as Weight)) - // Standard Error: 91_000 - .saturating_add((240_170_000 as Weight).saturating_mul(s as Weight)) + (8_861_543_000 as Weight) + // Standard Error: 566_000 + .saturating_add((585_057_000 as Weight).saturating_mul(c as Weight)) + // Standard Error: 80_000 + .saturating_add((52_025_000 as Weight).saturating_mul(i as Weight)) + // Standard Error: 80_000 + .saturating_add((75_956_000 as Weight).saturating_mul(o as Weight)) + // Standard Error: 80_000 + .saturating_add((198_033_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().reads(206 as Weight)) .saturating_add(T::DbWeight::get().writes(204 as Weight)) } fn seal_hash_sha2_256(r: u32, ) -> Weight { - (130_991_000 as Weight) - // Standard Error: 106_000 - .saturating_add((230_186_000 as Weight).saturating_mul(r as Weight)) + (129_022_000 as Weight) + // Standard Error: 76_000 + .saturating_add((216_764_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn seal_hash_sha2_256_per_kb(n: u32, ) -> Weight { - (508_089_000 as Weight) - // Standard Error: 38_000 - .saturating_add((491_916_000 as Weight).saturating_mul(n as Weight)) + (414_489_000 as Weight) + // Standard Error: 14_000 + .saturating_add((481_873_000 as Weight).saturating_mul(n as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn seal_hash_keccak_256(r: u32, ) -> Weight { - (135_384_000 as Weight) - // Standard Error: 111_000 - .saturating_add((233_638_000 as Weight).saturating_mul(r as Weight)) + (127_636_000 as Weight) + // Standard Error: 104_000 + .saturating_add((225_094_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn seal_hash_keccak_256_per_kb(n: u32, ) -> Weight { - (445_961_000 as Weight) - // Standard Error: 29_000 - .saturating_add((340_992_000 as Weight).saturating_mul(n as Weight)) + (216_668_000 as Weight) + // Standard Error: 16_000 + .saturating_add((331_423_000 as Weight).saturating_mul(n as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn seal_hash_blake2_256(r: u32, ) -> Weight { - (133_593_000 as Weight) - // Standard Error: 112_000 - .saturating_add((208_000_000 as Weight).saturating_mul(r as Weight)) + (129_582_000 as Weight) + // Standard Error: 97_000 + .saturating_add((198_429_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn seal_hash_blake2_256_per_kb(n: u32, ) -> Weight { - (444_562_000 as Weight) - // Standard Error: 27_000 - .saturating_add((159_521_000 as Weight).saturating_mul(n as Weight)) + (288_991_000 as Weight) + // Standard Error: 20_000 + .saturating_add((148_497_000 as Weight).saturating_mul(n as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn seal_hash_blake2_128(r: u32, ) -> Weight { - (131_381_000 as Weight) - // Standard Error: 82_000 - .saturating_add((207_479_000 as Weight).saturating_mul(r as Weight)) + (128_711_000 as Weight) + // Standard Error: 94_000 + .saturating_add((197_050_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn seal_hash_blake2_128_per_kb(n: u32, ) -> Weight { - (576_129_000 as Weight) - // Standard Error: 49_000 - .saturating_add((156_900_000 as Weight).saturating_mul(n as Weight)) + (275_444_000 as Weight) + // Standard Error: 18_000 + .saturating_add((148_469_000 as Weight).saturating_mul(n as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn instr_i64const(r: u32, ) -> Weight { - (20_276_000 as Weight) - // Standard Error: 16_000 - .saturating_add((3_355_000 as Weight).saturating_mul(r as Weight)) + (20_089_000 as Weight) + // Standard Error: 26_000 + .saturating_add((3_376_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64load(r: u32, ) -> Weight { - (22_345_000 as Weight) - // Standard Error: 18_000 - .saturating_add((133_628_000 as Weight).saturating_mul(r as Weight)) + (22_187_000 as Weight) + // Standard Error: 31_000 + .saturating_add((162_969_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64store(r: u32, ) -> Weight { - (22_294_000 as Weight) - // Standard Error: 95_000 - .saturating_add((204_007_000 as Weight).saturating_mul(r as Weight)) + (22_292_000 as Weight) + // Standard Error: 39_000 + .saturating_add((233_277_000 as Weight).saturating_mul(r as Weight)) } fn instr_select(r: u32, ) -> Weight { - (20_266_000 as Weight) - // Standard Error: 25_000 - .saturating_add((12_605_000 as Weight).saturating_mul(r as Weight)) + (20_083_000 as Weight) + // Standard Error: 24_000 + .saturating_add((12_378_000 as Weight).saturating_mul(r as Weight)) } fn instr_if(r: u32, ) -> Weight { - (20_208_000 as Weight) - // Standard Error: 13_000 - .saturating_add((12_589_000 as Weight).saturating_mul(r as Weight)) + (20_091_000 as Weight) + // Standard Error: 24_000 + .saturating_add((12_195_000 as Weight).saturating_mul(r as Weight)) } fn instr_br(r: u32, ) -> Weight { - (20_227_000 as Weight) + (20_082_000 as Weight) // Standard Error: 18_000 - .saturating_add((6_429_000 as Weight).saturating_mul(r as Weight)) + .saturating_add((6_151_000 as Weight).saturating_mul(r as Weight)) } fn instr_br_if(r: u32, ) -> Weight { - (20_279_000 as Weight) - // Standard Error: 15_000 - .saturating_add((14_560_000 as Weight).saturating_mul(r as Weight)) + (20_031_000 as Weight) + // Standard Error: 13_000 + .saturating_add((13_978_000 as Weight).saturating_mul(r as Weight)) } fn instr_br_table(r: u32, ) -> Weight { - (20_210_000 as Weight) - // Standard Error: 16_000 - .saturating_add((15_613_000 as Weight).saturating_mul(r as Weight)) + (20_063_000 as Weight) + // Standard Error: 21_000 + .saturating_add((15_524_000 as Weight).saturating_mul(r as Weight)) } fn instr_br_table_per_entry(e: u32, ) -> Weight { - (34_276_000 as Weight) + (34_332_000 as Weight) // Standard Error: 0 - .saturating_add((130_000 as Weight).saturating_mul(e as Weight)) + .saturating_add((117_000 as Weight).saturating_mul(e as Weight)) } fn instr_call(r: u32, ) -> Weight { - (20_426_000 as Weight) - // Standard Error: 69_000 - .saturating_add((91_850_000 as Weight).saturating_mul(r as Weight)) + (20_446_000 as Weight) + // Standard Error: 121_000 + .saturating_add((90_977_000 as Weight).saturating_mul(r as Weight)) } fn instr_call_indirect(r: u32, ) -> Weight { - (27_099_000 as Weight) - // Standard Error: 111_000 - .saturating_add((169_212_000 as Weight).saturating_mul(r as Weight)) + (28_119_000 as Weight) + // Standard Error: 390_000 + .saturating_add((192_865_000 as Weight).saturating_mul(r as Weight)) } fn instr_call_indirect_per_param(p: u32, ) -> Weight { - (206_492_000 as Weight) + (228_352_000 as Weight) // Standard Error: 4_000 - .saturating_add((4_685_000 as Weight).saturating_mul(p as Weight)) + .saturating_add((3_891_000 as Weight).saturating_mul(p as Weight)) } fn instr_local_get(r: u32, ) -> Weight { - (37_892_000 as Weight) - // Standard Error: 24_000 - .saturating_add((3_510_000 as Weight).saturating_mul(r as Weight)) + (37_745_000 as Weight) + // Standard Error: 13_000 + .saturating_add((3_135_000 as Weight).saturating_mul(r as Weight)) } fn instr_local_set(r: u32, ) -> Weight { - (37_773_000 as Weight) + (37_639_000 as Weight) // Standard Error: 15_000 - .saturating_add((3_814_000 as Weight).saturating_mul(r as Weight)) + .saturating_add((3_541_000 as Weight).saturating_mul(r as Weight)) } fn instr_local_tee(r: u32, ) -> Weight { - (37_785_000 as Weight) - // Standard Error: 20_000 - .saturating_add((4_949_000 as Weight).saturating_mul(r as Weight)) + (37_639_000 as Weight) + // Standard Error: 23_000 + .saturating_add((4_813_000 as Weight).saturating_mul(r as Weight)) } fn instr_global_get(r: u32, ) -> Weight { - (23_467_000 as Weight) - // Standard Error: 25_000 - .saturating_add((7_493_000 as Weight).saturating_mul(r as Weight)) + (23_379_000 as Weight) + // Standard Error: 27_000 + .saturating_add((7_757_000 as Weight).saturating_mul(r as Weight)) } fn instr_global_set(r: u32, ) -> Weight { - (23_492_000 as Weight) - // Standard Error: 28_000 - .saturating_add((8_499_000 as Weight).saturating_mul(r as Weight)) + (23_378_000 as Weight) + // Standard Error: 68_000 + .saturating_add((8_437_000 as Weight).saturating_mul(r as Weight)) } fn instr_memory_current(r: u32, ) -> Weight { - (22_347_000 as Weight) - // Standard Error: 18_000 - .saturating_add((3_565_000 as Weight).saturating_mul(r as Weight)) + (22_245_000 as Weight) + // Standard Error: 17_000 + .saturating_add((3_446_000 as Weight).saturating_mul(r as Weight)) } fn instr_memory_grow(r: u32, ) -> Weight { - (20_849_000 as Weight) - // Standard Error: 2_751_000 - .saturating_add((2_072_517_000 as Weight).saturating_mul(r as Weight)) + (20_714_000 as Weight) + // Standard Error: 478_000 + .saturating_add((2_314_540_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64clz(r: u32, ) -> Weight { - (20_216_000 as Weight) - // Standard Error: 18_000 - .saturating_add((5_067_000 as Weight).saturating_mul(r as Weight)) + (20_126_000 as Weight) + // Standard Error: 15_000 + .saturating_add((5_316_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64ctz(r: u32, ) -> Weight { - (20_218_000 as Weight) - // Standard Error: 11_000 - .saturating_add((5_015_000 as Weight).saturating_mul(r as Weight)) + (20_107_000 as Weight) + // Standard Error: 23_000 + .saturating_add((5_344_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64popcnt(r: u32, ) -> Weight { - (20_215_000 as Weight) - // Standard Error: 16_000 - .saturating_add((5_888_000 as Weight).saturating_mul(r as Weight)) + (20_135_000 as Weight) + // Standard Error: 22_000 + .saturating_add((5_909_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64eqz(r: u32, ) -> Weight { - (20_232_000 as Weight) - // Standard Error: 12_000 - .saturating_add((5_366_000 as Weight).saturating_mul(r as Weight)) + (20_107_000 as Weight) + // Standard Error: 19_000 + .saturating_add((5_515_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64extendsi32(r: u32, ) -> Weight { - (20_205_000 as Weight) - // Standard Error: 17_000 - .saturating_add((4_847_000 as Weight).saturating_mul(r as Weight)) + (20_229_000 as Weight) + // Standard Error: 18_000 + .saturating_add((5_113_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64extendui32(r: u32, ) -> Weight { - (20_181_000 as Weight) - // Standard Error: 12_000 - .saturating_add((4_849_000 as Weight).saturating_mul(r as Weight)) + (20_070_000 as Weight) + // Standard Error: 11_000 + .saturating_add((5_226_000 as Weight).saturating_mul(r as Weight)) } fn instr_i32wrapi64(r: u32, ) -> Weight { - (20_175_000 as Weight) - // Standard Error: 18_000 - .saturating_add((4_981_000 as Weight).saturating_mul(r as Weight)) + (20_090_000 as Weight) + // Standard Error: 15_000 + .saturating_add((5_296_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64eq(r: u32, ) -> Weight { - (20_273_000 as Weight) - // Standard Error: 19_000 - .saturating_add((7_402_000 as Weight).saturating_mul(r as Weight)) + (20_095_000 as Weight) + // Standard Error: 13_000 + .saturating_add((7_323_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64ne(r: u32, ) -> Weight { - (20_260_000 as Weight) - // Standard Error: 11_000 - .saturating_add((7_392_000 as Weight).saturating_mul(r as Weight)) + (20_043_000 as Weight) + // Standard Error: 10_000 + .saturating_add((7_280_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64lts(r: u32, ) -> Weight { - (20_248_000 as Weight) - // Standard Error: 11_000 - .saturating_add((7_363_000 as Weight).saturating_mul(r as Weight)) + (20_061_000 as Weight) + // Standard Error: 15_000 + .saturating_add((7_226_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64ltu(r: u32, ) -> Weight { - (20_229_000 as Weight) - // Standard Error: 11_000 - .saturating_add((7_412_000 as Weight).saturating_mul(r as Weight)) + (20_072_000 as Weight) + // Standard Error: 24_000 + .saturating_add((7_315_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64gts(r: u32, ) -> Weight { - (20_232_000 as Weight) - // Standard Error: 9_000 - .saturating_add((7_364_000 as Weight).saturating_mul(r as Weight)) + (20_054_000 as Weight) + // Standard Error: 27_000 + .saturating_add((7_228_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64gtu(r: u32, ) -> Weight { - (20_252_000 as Weight) - // Standard Error: 19_000 - .saturating_add((7_383_000 as Weight).saturating_mul(r as Weight)) + (20_169_000 as Weight) + // Standard Error: 18_000 + .saturating_add((7_262_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64les(r: u32, ) -> Weight { - (20_258_000 as Weight) - // Standard Error: 16_000 - .saturating_add((7_359_000 as Weight).saturating_mul(r as Weight)) + (20_115_000 as Weight) + // Standard Error: 21_000 + .saturating_add((7_212_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64leu(r: u32, ) -> Weight { - (20_245_000 as Weight) - // Standard Error: 21_000 - .saturating_add((7_400_000 as Weight).saturating_mul(r as Weight)) + (20_122_000 as Weight) + // Standard Error: 23_000 + .saturating_add((7_166_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64ges(r: u32, ) -> Weight { - (20_245_000 as Weight) - // Standard Error: 19_000 - .saturating_add((7_391_000 as Weight).saturating_mul(r as Weight)) + (20_140_000 as Weight) + // Standard Error: 16_000 + .saturating_add((7_242_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64geu(r: u32, ) -> Weight { - (20_230_000 as Weight) - // Standard Error: 15_000 - .saturating_add((7_439_000 as Weight).saturating_mul(r as Weight)) + (20_107_000 as Weight) + // Standard Error: 18_000 + .saturating_add((7_365_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64add(r: u32, ) -> Weight { - (20_254_000 as Weight) - // Standard Error: 16_000 - .saturating_add((7_204_000 as Weight).saturating_mul(r as Weight)) + (20_179_000 as Weight) + // Standard Error: 14_000 + .saturating_add((7_144_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64sub(r: u32, ) -> Weight { - (20_182_000 as Weight) - // Standard Error: 22_000 - .saturating_add((7_327_000 as Weight).saturating_mul(r as Weight)) + (20_143_000 as Weight) + // Standard Error: 17_000 + .saturating_add((7_222_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64mul(r: u32, ) -> Weight { - (20_203_000 as Weight) - // Standard Error: 20_000 - .saturating_add((7_221_000 as Weight).saturating_mul(r as Weight)) + (20_129_000 as Weight) + // Standard Error: 21_000 + .saturating_add((7_247_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64divs(r: u32, ) -> Weight { - (20_187_000 as Weight) + (20_107_000 as Weight) // Standard Error: 16_000 - .saturating_add((13_738_000 as Weight).saturating_mul(r as Weight)) + .saturating_add((12_953_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64divu(r: u32, ) -> Weight { - (20_153_000 as Weight) - // Standard Error: 11_000 - .saturating_add((12_766_000 as Weight).saturating_mul(r as Weight)) + (20_093_000 as Weight) + // Standard Error: 17_000 + .saturating_add((12_040_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64rems(r: u32, ) -> Weight { - (20_219_000 as Weight) + (20_102_000 as Weight) // Standard Error: 13_000 - .saturating_add((13_732_000 as Weight).saturating_mul(r as Weight)) + .saturating_add((12_945_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64remu(r: u32, ) -> Weight { - (20_246_000 as Weight) + (20_132_000 as Weight) // Standard Error: 16_000 - .saturating_add((12_686_000 as Weight).saturating_mul(r as Weight)) + .saturating_add((12_199_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64and(r: u32, ) -> Weight { - (20_228_000 as Weight) - // Standard Error: 13_000 - .saturating_add((7_245_000 as Weight).saturating_mul(r as Weight)) + (20_155_000 as Weight) + // Standard Error: 26_000 + .saturating_add((7_103_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64or(r: u32, ) -> Weight { - (20_238_000 as Weight) - // Standard Error: 17_000 - .saturating_add((7_250_000 as Weight).saturating_mul(r as Weight)) + (20_088_000 as Weight) + // Standard Error: 22_000 + .saturating_add((7_213_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64xor(r: u32, ) -> Weight { - (20_213_000 as Weight) - // Standard Error: 10_000 - .saturating_add((7_292_000 as Weight).saturating_mul(r as Weight)) + (20_060_000 as Weight) + // Standard Error: 18_000 + .saturating_add((7_275_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64shl(r: u32, ) -> Weight { - (20_224_000 as Weight) - // Standard Error: 18_000 - .saturating_add((7_554_000 as Weight).saturating_mul(r as Weight)) + (20_104_000 as Weight) + // Standard Error: 15_000 + .saturating_add((7_282_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64shrs(r: u32, ) -> Weight { - (20_261_000 as Weight) + (20_111_000 as Weight) // Standard Error: 20_000 - .saturating_add((7_551_000 as Weight).saturating_mul(r as Weight)) + .saturating_add((7_264_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64shru(r: u32, ) -> Weight { - (20_212_000 as Weight) - // Standard Error: 15_000 - .saturating_add((7_616_000 as Weight).saturating_mul(r as Weight)) + (20_096_000 as Weight) + // Standard Error: 23_000 + .saturating_add((7_347_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64rotl(r: u32, ) -> Weight { - (20_176_000 as Weight) - // Standard Error: 9_000 - .saturating_add((7_877_000 as Weight).saturating_mul(r as Weight)) + (20_091_000 as Weight) + // Standard Error: 16_000 + .saturating_add((7_370_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64rotr(r: u32, ) -> Weight { - (20_230_000 as Weight) - // Standard Error: 17_000 - .saturating_add((7_347_000 as Weight).saturating_mul(r as Weight)) + (20_102_000 as Weight) + // Standard Error: 28_000 + .saturating_add((7_266_000 as Weight).saturating_mul(r as Weight)) } } // For backwards compatibility and tests impl WeightInfo for () { fn on_initialize() -> Weight { - (3_656_000 as Weight) + (3_603_000 as Weight) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) } fn on_initialize_per_trie_key(k: u32, ) -> Weight { (0 as Weight) - // Standard Error: 3_000 - .saturating_add((2_241_000 as Weight).saturating_mul(k as Weight)) + // Standard Error: 2_000 + .saturating_add((2_217_000 as Weight).saturating_mul(k as Weight)) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(k as Weight))) } fn on_initialize_per_queue_item(q: u32, ) -> Weight { - (36_820_000 as Weight) - // Standard Error: 4_000 - .saturating_add((34_550_000 as Weight).saturating_mul(q as Weight)) + (0 as Weight) + // Standard Error: 6_000 + .saturating_add((36_769_000 as Weight).saturating_mul(q as Weight)) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn instrument(c: u32, ) -> Weight { - (42_348_000 as Weight) - // Standard Error: 185_000 - .saturating_add((95_664_000 as Weight).saturating_mul(c as Weight)) + (54_463_000 as Weight) + // Standard Error: 105_000 + .saturating_add((77_542_000 as Weight).saturating_mul(c as Weight)) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn instantiate_with_code(c: u32, s: u32, ) -> Weight { - (210_852_000 as Weight) - // Standard Error: 138_000 - .saturating_add((135_241_000 as Weight).saturating_mul(c as Weight)) - // Standard Error: 9_000 - .saturating_add((1_846_000 as Weight).saturating_mul(s as Weight)) + (184_114_000 as Weight) + // Standard Error: 82_000 + .saturating_add((117_247_000 as Weight).saturating_mul(c as Weight)) + // Standard Error: 5_000 + .saturating_add((1_542_000 as Weight).saturating_mul(s as Weight)) .saturating_add(RocksDbWeight::get().reads(5 as Weight)) .saturating_add(RocksDbWeight::get().writes(4 as Weight)) } fn instantiate(c: u32, s: u32, ) -> Weight { - (217_380_000 as Weight) - // Standard Error: 6_000 - .saturating_add((8_483_000 as Weight).saturating_mul(c as Weight)) + (183_501_000 as Weight) + // Standard Error: 2_000 + .saturating_add((5_645_000 as Weight).saturating_mul(c as Weight)) // Standard Error: 0 - .saturating_add((1_752_000 as Weight).saturating_mul(s as Weight)) + .saturating_add((1_473_000 as Weight).saturating_mul(s as Weight)) .saturating_add(RocksDbWeight::get().reads(5 as Weight)) .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } fn call(c: u32, ) -> Weight { - (181_443_000 as Weight) - // Standard Error: 3_000 - .saturating_add((3_955_000 as Weight).saturating_mul(c as Weight)) + (173_411_000 as Weight) + // Standard Error: 1_000 + .saturating_add((2_639_000 as Weight).saturating_mul(c as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } fn claim_surcharge(c: u32, ) -> Weight { - (132_551_000 as Weight) - // Standard Error: 1_000 - .saturating_add((4_740_000 as Weight).saturating_mul(c as Weight)) + (125_839_000 as Weight) + // Standard Error: 0 + .saturating_add((3_123_000 as Weight).saturating_mul(c as Weight)) .saturating_add(RocksDbWeight::get().reads(5 as Weight)) .saturating_add(RocksDbWeight::get().writes(4 as Weight)) } fn seal_caller(r: u32, ) -> Weight { - (137_742_000 as Weight) - // Standard Error: 74_000 - .saturating_add((242_261_000 as Weight).saturating_mul(r as Weight)) + (131_793_000 as Weight) + // Standard Error: 84_000 + .saturating_add((231_138_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn seal_address(r: u32, ) -> Weight { - (137_739_000 as Weight) - // Standard Error: 91_000 - .saturating_add((241_803_000 as Weight).saturating_mul(r as Weight)) + (129_995_000 as Weight) + // Standard Error: 78_000 + .saturating_add((231_839_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn seal_gas_left(r: u32, ) -> Weight { - (139_631_000 as Weight) - // Standard Error: 83_000 - .saturating_add((236_790_000 as Weight).saturating_mul(r as Weight)) + (129_710_000 as Weight) + // Standard Error: 85_000 + .saturating_add((227_268_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn seal_balance(r: u32, ) -> Weight { - (142_506_000 as Weight) - // Standard Error: 176_000 - .saturating_add((525_752_000 as Weight).saturating_mul(r as Weight)) + (133_445_000 as Weight) + // Standard Error: 144_000 + .saturating_add((487_125_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn seal_value_transferred(r: u32, ) -> Weight { - (138_569_000 as Weight) - // Standard Error: 76_000 - .saturating_add((237_016_000 as Weight).saturating_mul(r as Weight)) + (129_299_000 as Weight) + // Standard Error: 82_000 + .saturating_add((227_118_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn seal_minimum_balance(r: u32, ) -> Weight { - (134_713_000 as Weight) - // Standard Error: 81_000 - .saturating_add((237_962_000 as Weight).saturating_mul(r as Weight)) + (126_120_000 as Weight) + // Standard Error: 114_000 + .saturating_add((227_326_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn seal_tombstone_deposit(r: u32, ) -> Weight { - (131_523_000 as Weight) - // Standard Error: 90_000 - .saturating_add((237_435_000 as Weight).saturating_mul(r as Weight)) + (130_934_000 as Weight) + // Standard Error: 89_000 + .saturating_add((226_638_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn seal_rent_allowance(r: u32, ) -> Weight { - (141_574_000 as Weight) - // Standard Error: 86_000 - .saturating_add((238_102_000 as Weight).saturating_mul(r as Weight)) + (128_738_000 as Weight) + // Standard Error: 77_000 + .saturating_add((227_062_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn seal_block_number(r: u32, ) -> Weight { - (140_240_000 as Weight) - // Standard Error: 101_000 - .saturating_add((236_568_000 as Weight).saturating_mul(r as Weight)) + (132_375_000 as Weight) + // Standard Error: 88_000 + .saturating_add((226_861_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn seal_now(r: u32, ) -> Weight { - (138_265_000 as Weight) - // Standard Error: 91_000 - .saturating_add((237_187_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) - } - fn seal_rent_params(r: u32, ) -> Weight { - (149_701_000 as Weight) - // Standard Error: 297_000 - .saturating_add((357_149_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) - } - fn seal_rent_status(r: u32, ) -> Weight { - (146_863_000 as Weight) - // Standard Error: 191_000 - .saturating_add((638_683_000 as Weight).saturating_mul(r as Weight)) + (127_888_000 as Weight) + // Standard Error: 86_000 + .saturating_add((227_851_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn seal_weight_to_fee(r: u32, ) -> Weight { - (144_278_000 as Weight) + (131_825_000 as Weight) // Standard Error: 149_000 - .saturating_add((470_264_000 as Weight).saturating_mul(r as Weight)) + .saturating_add((420_149_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(5 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn seal_gas(r: u32, ) -> Weight { - (111_361_000 as Weight) - // Standard Error: 157_000 - .saturating_add((118_441_000 as Weight).saturating_mul(r as Weight)) + (113_641_000 as Weight) + // Standard Error: 114_000 + .saturating_add((113_068_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn seal_input(r: u32, ) -> Weight { - (129_970_000 as Weight) - // Standard Error: 316_000 - .saturating_add((7_160_000 as Weight).saturating_mul(r as Weight)) + (122_982_000 as Weight) + // Standard Error: 74_000 + .saturating_add((6_828_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn seal_input_per_kb(n: u32, ) -> Weight { - (139_275_000 as Weight) + (131_913_000 as Weight) // Standard Error: 0 - .saturating_add((250_000 as Weight).saturating_mul(n as Weight)) + .saturating_add((275_000 as Weight).saturating_mul(n as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn seal_return(r: u32, ) -> Weight { - (119_240_000 as Weight) - // Standard Error: 57_000 - .saturating_add((4_347_000 as Weight).saturating_mul(r as Weight)) + (114_164_000 as Weight) + // Standard Error: 72_000 + .saturating_add((4_318_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn seal_return_per_kb(n: u32, ) -> Weight { - (128_896_000 as Weight) - // Standard Error: 1_000 - .saturating_add((757_000 as Weight).saturating_mul(n as Weight)) + (123_940_000 as Weight) + // Standard Error: 0 + .saturating_add((664_000 as Weight).saturating_mul(n as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn seal_terminate(r: u32, ) -> Weight { - (130_119_000 as Weight) - // Standard Error: 108_000 - .saturating_add((95_078_000 as Weight).saturating_mul(r as Weight)) + (123_340_000 as Weight) + // Standard Error: 99_000 + .saturating_add((89_126_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().reads((2 as Weight).saturating_mul(r as Weight))) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) .saturating_add(RocksDbWeight::get().writes((4 as Weight).saturating_mul(r as Weight))) } fn seal_terminate_per_code_kb(c: u32, ) -> Weight { - (230_167_000 as Weight) - // Standard Error: 2_000 - .saturating_add((8_495_000 as Weight).saturating_mul(c as Weight)) + (217_499_000 as Weight) + // Standard Error: 1_000 + .saturating_add((5_608_000 as Weight).saturating_mul(c as Weight)) .saturating_add(RocksDbWeight::get().reads(6 as Weight)) .saturating_add(RocksDbWeight::get().writes(5 as Weight)) } fn seal_restore_to(r: u32, ) -> Weight { - (159_200_000 as Weight) - // Standard Error: 261_000 - .saturating_add((103_048_000 as Weight).saturating_mul(r as Weight)) + (149_019_000 as Weight) + // Standard Error: 903_000 + .saturating_add((87_433_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().reads((4 as Weight).saturating_mul(r as Weight))) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) .saturating_add(RocksDbWeight::get().writes((5 as Weight).saturating_mul(r as Weight))) } fn seal_restore_to_per_code_kb_delta(c: u32, t: u32, d: u32, ) -> Weight { - (58_389_000 as Weight) - // Standard Error: 131_000 - .saturating_add((7_910_000 as Weight).saturating_mul(c as Weight)) - // Standard Error: 131_000 - .saturating_add((4_036_000 as Weight).saturating_mul(t as Weight)) - // Standard Error: 1_156_000 - .saturating_add((3_714_110_000 as Weight).saturating_mul(d as Weight)) + (18_255_000 as Weight) + // Standard Error: 141_000 + .saturating_add((5_142_000 as Weight).saturating_mul(c as Weight)) + // Standard Error: 141_000 + .saturating_add((2_478_000 as Weight).saturating_mul(t as Weight)) + // Standard Error: 1_242_000 + .saturating_add((2_935_421_000 as Weight).saturating_mul(d as Weight)) .saturating_add(RocksDbWeight::get().reads(7 as Weight)) .saturating_add(RocksDbWeight::get().reads((100 as Weight).saturating_mul(d as Weight))) .saturating_add(RocksDbWeight::get().writes(7 as Weight)) .saturating_add(RocksDbWeight::get().writes((100 as Weight).saturating_mul(d as Weight))) } fn seal_random(r: u32, ) -> Weight { - (138_794_000 as Weight) - // Standard Error: 216_000 - .saturating_add((599_742_000 as Weight).saturating_mul(r as Weight)) + (140_411_000 as Weight) + // Standard Error: 146_000 + .saturating_add((566_687_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(5 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn seal_deposit_event(r: u32, ) -> Weight { - (139_890_000 as Weight) - // Standard Error: 263_000 - .saturating_add((885_805_000 as Weight).saturating_mul(r as Weight)) + (132_048_000 as Weight) + // Standard Error: 308_000 + .saturating_add((818_622_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn seal_deposit_event_per_topic_and_kb(t: u32, n: u32, ) -> Weight { - (1_117_962_000 as Weight) - // Standard Error: 4_029_000 - .saturating_add((566_825_000 as Weight).saturating_mul(t as Weight)) - // Standard Error: 794_000 - .saturating_add((251_096_000 as Weight).saturating_mul(n as Weight)) + (1_080_578_000 as Weight) + // Standard Error: 2_337_000 + .saturating_add((534_525_000 as Weight).saturating_mul(t as Weight)) + // Standard Error: 460_000 + .saturating_add((167_990_000 as Weight).saturating_mul(n as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().reads((100 as Weight).saturating_mul(t as Weight))) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) .saturating_add(RocksDbWeight::get().writes((100 as Weight).saturating_mul(t as Weight))) } fn seal_set_rent_allowance(r: u32, ) -> Weight { - (132_720_000 as Weight) - // Standard Error: 87_000 - .saturating_add((164_134_000 as Weight).saturating_mul(r as Weight)) + (123_998_000 as Weight) + // Standard Error: 53_000 + .saturating_add((155_113_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn seal_debug_message(r: u32, ) -> Weight { - (125_834_000 as Weight) - // Standard Error: 142_000 - .saturating_add((127_200_000 as Weight).saturating_mul(r as Weight)) + (120_514_000 as Weight) + // Standard Error: 93_000 + .saturating_add((124_243_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn seal_set_storage(r: u32, ) -> Weight { - (478_734_000 as Weight) - // Standard Error: 2_559_000 - .saturating_add((3_766_445_000 as Weight).saturating_mul(r as Weight)) + (47_131_000 as Weight) + // Standard Error: 931_000 + .saturating_add((4_033_062_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().reads((100 as Weight).saturating_mul(r as Weight))) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) .saturating_add(RocksDbWeight::get().writes((100 as Weight).saturating_mul(r as Weight))) } fn seal_set_storage_per_kb(n: u32, ) -> Weight { - (600_306_000 as Weight) - // Standard Error: 234_000 - .saturating_add((70_989_000 as Weight).saturating_mul(n as Weight)) + (549_577_000 as Weight) + // Standard Error: 192_000 + .saturating_add((57_815_000 as Weight).saturating_mul(n as Weight)) .saturating_add(RocksDbWeight::get().reads(5 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } fn seal_clear_storage(r: u32, ) -> Weight { (0 as Weight) - // Standard Error: 2_380_000 - .saturating_add((1_242_131_000 as Weight).saturating_mul(r as Weight)) + // Standard Error: 2_635_000 + .saturating_add((1_214_454_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().reads((100 as Weight).saturating_mul(r as Weight))) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) @@ -1111,23 +1081,23 @@ impl WeightInfo for () { } fn seal_get_storage(r: u32, ) -> Weight { (0 as Weight) - // Standard Error: 1_060_000 - .saturating_add((910_861_000 as Weight).saturating_mul(r as Weight)) + // Standard Error: 1_044_000 + .saturating_add((883_653_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().reads((100 as Weight).saturating_mul(r as Weight))) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn seal_get_storage_per_kb(n: u32, ) -> Weight { - (605_545_000 as Weight) - // Standard Error: 252_000 - .saturating_add((153_519_000 as Weight).saturating_mul(n as Weight)) + (568_190_000 as Weight) + // Standard Error: 181_000 + .saturating_add((106_420_000 as Weight).saturating_mul(n as Weight)) .saturating_add(RocksDbWeight::get().reads(5 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn seal_transfer(r: u32, ) -> Weight { - (36_854_000 as Weight) - // Standard Error: 2_076_000 - .saturating_add((5_183_774_000 as Weight).saturating_mul(r as Weight)) + (0 as Weight) + // Standard Error: 1_553_000 + .saturating_add((4_810_405_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().reads((100 as Weight).saturating_mul(r as Weight))) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) @@ -1135,358 +1105,358 @@ impl WeightInfo for () { } fn seal_call(r: u32, ) -> Weight { (0 as Weight) - // Standard Error: 6_583_000 - .saturating_add((11_599_057_000 as Weight).saturating_mul(r as Weight)) + // Standard Error: 8_671_000 + .saturating_add((10_965_308_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(5 as Weight)) .saturating_add(RocksDbWeight::get().reads((200 as Weight).saturating_mul(r as Weight))) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) .saturating_add(RocksDbWeight::get().writes((100 as Weight).saturating_mul(r as Weight))) } fn seal_call_per_code_transfer_input_output_kb(c: u32, t: u32, i: u32, o: u32, ) -> Weight { - (10_431_738_000 as Weight) - // Standard Error: 301_000 - .saturating_add((392_174_000 as Weight).saturating_mul(c as Weight)) - // Standard Error: 125_400_000 - .saturating_add((3_698_896_000 as Weight).saturating_mul(t as Weight)) - // Standard Error: 39_000 - .saturating_add((60_692_000 as Weight).saturating_mul(i as Weight)) - // Standard Error: 42_000 - .saturating_add((78_872_000 as Weight).saturating_mul(o as Weight)) + (10_138_403_000 as Weight) + // Standard Error: 162_000 + .saturating_add((264_871_000 as Weight).saturating_mul(c as Weight)) + // Standard Error: 67_846_000 + .saturating_add((3_793_372_000 as Weight).saturating_mul(t as Weight)) + // Standard Error: 21_000 + .saturating_add((49_168_000 as Weight).saturating_mul(i as Weight)) + // Standard Error: 22_000 + .saturating_add((71_664_000 as Weight).saturating_mul(o as Weight)) .saturating_add(RocksDbWeight::get().reads(205 as Weight)) .saturating_add(RocksDbWeight::get().writes(101 as Weight)) .saturating_add(RocksDbWeight::get().writes((101 as Weight).saturating_mul(t as Weight))) } fn seal_instantiate(r: u32, ) -> Weight { (0 as Weight) - // Standard Error: 32_118_000 - .saturating_add((21_117_947_000 as Weight).saturating_mul(r as Weight)) + // Standard Error: 34_546_000 + .saturating_add((19_938_393_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(5 as Weight)) .saturating_add(RocksDbWeight::get().reads((300 as Weight).saturating_mul(r as Weight))) .saturating_add(RocksDbWeight::get().writes(3 as Weight)) .saturating_add(RocksDbWeight::get().writes((300 as Weight).saturating_mul(r as Weight))) } fn seal_instantiate_per_code_input_output_salt_kb(c: u32, i: u32, o: u32, s: u32, ) -> Weight { - (8_542_521_000 as Weight) - // Standard Error: 644_000 - .saturating_add((878_020_000 as Weight).saturating_mul(c as Weight)) - // Standard Error: 91_000 - .saturating_add((63_004_000 as Weight).saturating_mul(i as Weight)) - // Standard Error: 91_000 - .saturating_add((83_203_000 as Weight).saturating_mul(o as Weight)) - // Standard Error: 91_000 - .saturating_add((240_170_000 as Weight).saturating_mul(s as Weight)) + (8_861_543_000 as Weight) + // Standard Error: 566_000 + .saturating_add((585_057_000 as Weight).saturating_mul(c as Weight)) + // Standard Error: 80_000 + .saturating_add((52_025_000 as Weight).saturating_mul(i as Weight)) + // Standard Error: 80_000 + .saturating_add((75_956_000 as Weight).saturating_mul(o as Weight)) + // Standard Error: 80_000 + .saturating_add((198_033_000 as Weight).saturating_mul(s as Weight)) .saturating_add(RocksDbWeight::get().reads(206 as Weight)) .saturating_add(RocksDbWeight::get().writes(204 as Weight)) } fn seal_hash_sha2_256(r: u32, ) -> Weight { - (130_991_000 as Weight) - // Standard Error: 106_000 - .saturating_add((230_186_000 as Weight).saturating_mul(r as Weight)) + (129_022_000 as Weight) + // Standard Error: 76_000 + .saturating_add((216_764_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn seal_hash_sha2_256_per_kb(n: u32, ) -> Weight { - (508_089_000 as Weight) - // Standard Error: 38_000 - .saturating_add((491_916_000 as Weight).saturating_mul(n as Weight)) + (414_489_000 as Weight) + // Standard Error: 14_000 + .saturating_add((481_873_000 as Weight).saturating_mul(n as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn seal_hash_keccak_256(r: u32, ) -> Weight { - (135_384_000 as Weight) - // Standard Error: 111_000 - .saturating_add((233_638_000 as Weight).saturating_mul(r as Weight)) + (127_636_000 as Weight) + // Standard Error: 104_000 + .saturating_add((225_094_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn seal_hash_keccak_256_per_kb(n: u32, ) -> Weight { - (445_961_000 as Weight) - // Standard Error: 29_000 - .saturating_add((340_992_000 as Weight).saturating_mul(n as Weight)) + (216_668_000 as Weight) + // Standard Error: 16_000 + .saturating_add((331_423_000 as Weight).saturating_mul(n as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn seal_hash_blake2_256(r: u32, ) -> Weight { - (133_593_000 as Weight) - // Standard Error: 112_000 - .saturating_add((208_000_000 as Weight).saturating_mul(r as Weight)) + (129_582_000 as Weight) + // Standard Error: 97_000 + .saturating_add((198_429_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn seal_hash_blake2_256_per_kb(n: u32, ) -> Weight { - (444_562_000 as Weight) - // Standard Error: 27_000 - .saturating_add((159_521_000 as Weight).saturating_mul(n as Weight)) + (288_991_000 as Weight) + // Standard Error: 20_000 + .saturating_add((148_497_000 as Weight).saturating_mul(n as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn seal_hash_blake2_128(r: u32, ) -> Weight { - (131_381_000 as Weight) - // Standard Error: 82_000 - .saturating_add((207_479_000 as Weight).saturating_mul(r as Weight)) + (128_711_000 as Weight) + // Standard Error: 94_000 + .saturating_add((197_050_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn seal_hash_blake2_128_per_kb(n: u32, ) -> Weight { - (576_129_000 as Weight) - // Standard Error: 49_000 - .saturating_add((156_900_000 as Weight).saturating_mul(n as Weight)) + (275_444_000 as Weight) + // Standard Error: 18_000 + .saturating_add((148_469_000 as Weight).saturating_mul(n as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn instr_i64const(r: u32, ) -> Weight { - (20_276_000 as Weight) - // Standard Error: 16_000 - .saturating_add((3_355_000 as Weight).saturating_mul(r as Weight)) + (20_089_000 as Weight) + // Standard Error: 26_000 + .saturating_add((3_376_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64load(r: u32, ) -> Weight { - (22_345_000 as Weight) - // Standard Error: 18_000 - .saturating_add((133_628_000 as Weight).saturating_mul(r as Weight)) + (22_187_000 as Weight) + // Standard Error: 31_000 + .saturating_add((162_969_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64store(r: u32, ) -> Weight { - (22_294_000 as Weight) - // Standard Error: 95_000 - .saturating_add((204_007_000 as Weight).saturating_mul(r as Weight)) + (22_292_000 as Weight) + // Standard Error: 39_000 + .saturating_add((233_277_000 as Weight).saturating_mul(r as Weight)) } fn instr_select(r: u32, ) -> Weight { - (20_266_000 as Weight) - // Standard Error: 25_000 - .saturating_add((12_605_000 as Weight).saturating_mul(r as Weight)) + (20_083_000 as Weight) + // Standard Error: 24_000 + .saturating_add((12_378_000 as Weight).saturating_mul(r as Weight)) } fn instr_if(r: u32, ) -> Weight { - (20_208_000 as Weight) - // Standard Error: 13_000 - .saturating_add((12_589_000 as Weight).saturating_mul(r as Weight)) + (20_091_000 as Weight) + // Standard Error: 24_000 + .saturating_add((12_195_000 as Weight).saturating_mul(r as Weight)) } fn instr_br(r: u32, ) -> Weight { - (20_227_000 as Weight) + (20_082_000 as Weight) // Standard Error: 18_000 - .saturating_add((6_429_000 as Weight).saturating_mul(r as Weight)) + .saturating_add((6_151_000 as Weight).saturating_mul(r as Weight)) } fn instr_br_if(r: u32, ) -> Weight { - (20_279_000 as Weight) - // Standard Error: 15_000 - .saturating_add((14_560_000 as Weight).saturating_mul(r as Weight)) + (20_031_000 as Weight) + // Standard Error: 13_000 + .saturating_add((13_978_000 as Weight).saturating_mul(r as Weight)) } fn instr_br_table(r: u32, ) -> Weight { - (20_210_000 as Weight) - // Standard Error: 16_000 - .saturating_add((15_613_000 as Weight).saturating_mul(r as Weight)) + (20_063_000 as Weight) + // Standard Error: 21_000 + .saturating_add((15_524_000 as Weight).saturating_mul(r as Weight)) } fn instr_br_table_per_entry(e: u32, ) -> Weight { - (34_276_000 as Weight) + (34_332_000 as Weight) // Standard Error: 0 - .saturating_add((130_000 as Weight).saturating_mul(e as Weight)) + .saturating_add((117_000 as Weight).saturating_mul(e as Weight)) } fn instr_call(r: u32, ) -> Weight { - (20_426_000 as Weight) - // Standard Error: 69_000 - .saturating_add((91_850_000 as Weight).saturating_mul(r as Weight)) + (20_446_000 as Weight) + // Standard Error: 121_000 + .saturating_add((90_977_000 as Weight).saturating_mul(r as Weight)) } fn instr_call_indirect(r: u32, ) -> Weight { - (27_099_000 as Weight) - // Standard Error: 111_000 - .saturating_add((169_212_000 as Weight).saturating_mul(r as Weight)) + (28_119_000 as Weight) + // Standard Error: 390_000 + .saturating_add((192_865_000 as Weight).saturating_mul(r as Weight)) } fn instr_call_indirect_per_param(p: u32, ) -> Weight { - (206_492_000 as Weight) + (228_352_000 as Weight) // Standard Error: 4_000 - .saturating_add((4_685_000 as Weight).saturating_mul(p as Weight)) + .saturating_add((3_891_000 as Weight).saturating_mul(p as Weight)) } fn instr_local_get(r: u32, ) -> Weight { - (37_892_000 as Weight) - // Standard Error: 24_000 - .saturating_add((3_510_000 as Weight).saturating_mul(r as Weight)) + (37_745_000 as Weight) + // Standard Error: 13_000 + .saturating_add((3_135_000 as Weight).saturating_mul(r as Weight)) } fn instr_local_set(r: u32, ) -> Weight { - (37_773_000 as Weight) + (37_639_000 as Weight) // Standard Error: 15_000 - .saturating_add((3_814_000 as Weight).saturating_mul(r as Weight)) + .saturating_add((3_541_000 as Weight).saturating_mul(r as Weight)) } fn instr_local_tee(r: u32, ) -> Weight { - (37_785_000 as Weight) - // Standard Error: 20_000 - .saturating_add((4_949_000 as Weight).saturating_mul(r as Weight)) + (37_639_000 as Weight) + // Standard Error: 23_000 + .saturating_add((4_813_000 as Weight).saturating_mul(r as Weight)) } fn instr_global_get(r: u32, ) -> Weight { - (23_467_000 as Weight) - // Standard Error: 25_000 - .saturating_add((7_493_000 as Weight).saturating_mul(r as Weight)) + (23_379_000 as Weight) + // Standard Error: 27_000 + .saturating_add((7_757_000 as Weight).saturating_mul(r as Weight)) } fn instr_global_set(r: u32, ) -> Weight { - (23_492_000 as Weight) - // Standard Error: 28_000 - .saturating_add((8_499_000 as Weight).saturating_mul(r as Weight)) + (23_378_000 as Weight) + // Standard Error: 68_000 + .saturating_add((8_437_000 as Weight).saturating_mul(r as Weight)) } fn instr_memory_current(r: u32, ) -> Weight { - (22_347_000 as Weight) - // Standard Error: 18_000 - .saturating_add((3_565_000 as Weight).saturating_mul(r as Weight)) + (22_245_000 as Weight) + // Standard Error: 17_000 + .saturating_add((3_446_000 as Weight).saturating_mul(r as Weight)) } fn instr_memory_grow(r: u32, ) -> Weight { - (20_849_000 as Weight) - // Standard Error: 2_751_000 - .saturating_add((2_072_517_000 as Weight).saturating_mul(r as Weight)) + (20_714_000 as Weight) + // Standard Error: 478_000 + .saturating_add((2_314_540_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64clz(r: u32, ) -> Weight { - (20_216_000 as Weight) - // Standard Error: 18_000 - .saturating_add((5_067_000 as Weight).saturating_mul(r as Weight)) + (20_126_000 as Weight) + // Standard Error: 15_000 + .saturating_add((5_316_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64ctz(r: u32, ) -> Weight { - (20_218_000 as Weight) - // Standard Error: 11_000 - .saturating_add((5_015_000 as Weight).saturating_mul(r as Weight)) + (20_107_000 as Weight) + // Standard Error: 23_000 + .saturating_add((5_344_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64popcnt(r: u32, ) -> Weight { - (20_215_000 as Weight) - // Standard Error: 16_000 - .saturating_add((5_888_000 as Weight).saturating_mul(r as Weight)) + (20_135_000 as Weight) + // Standard Error: 22_000 + .saturating_add((5_909_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64eqz(r: u32, ) -> Weight { - (20_232_000 as Weight) - // Standard Error: 12_000 - .saturating_add((5_366_000 as Weight).saturating_mul(r as Weight)) + (20_107_000 as Weight) + // Standard Error: 19_000 + .saturating_add((5_515_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64extendsi32(r: u32, ) -> Weight { - (20_205_000 as Weight) - // Standard Error: 17_000 - .saturating_add((4_847_000 as Weight).saturating_mul(r as Weight)) + (20_229_000 as Weight) + // Standard Error: 18_000 + .saturating_add((5_113_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64extendui32(r: u32, ) -> Weight { - (20_181_000 as Weight) - // Standard Error: 12_000 - .saturating_add((4_849_000 as Weight).saturating_mul(r as Weight)) + (20_070_000 as Weight) + // Standard Error: 11_000 + .saturating_add((5_226_000 as Weight).saturating_mul(r as Weight)) } fn instr_i32wrapi64(r: u32, ) -> Weight { - (20_175_000 as Weight) - // Standard Error: 18_000 - .saturating_add((4_981_000 as Weight).saturating_mul(r as Weight)) + (20_090_000 as Weight) + // Standard Error: 15_000 + .saturating_add((5_296_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64eq(r: u32, ) -> Weight { - (20_273_000 as Weight) - // Standard Error: 19_000 - .saturating_add((7_402_000 as Weight).saturating_mul(r as Weight)) + (20_095_000 as Weight) + // Standard Error: 13_000 + .saturating_add((7_323_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64ne(r: u32, ) -> Weight { - (20_260_000 as Weight) - // Standard Error: 11_000 - .saturating_add((7_392_000 as Weight).saturating_mul(r as Weight)) + (20_043_000 as Weight) + // Standard Error: 10_000 + .saturating_add((7_280_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64lts(r: u32, ) -> Weight { - (20_248_000 as Weight) - // Standard Error: 11_000 - .saturating_add((7_363_000 as Weight).saturating_mul(r as Weight)) + (20_061_000 as Weight) + // Standard Error: 15_000 + .saturating_add((7_226_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64ltu(r: u32, ) -> Weight { - (20_229_000 as Weight) - // Standard Error: 11_000 - .saturating_add((7_412_000 as Weight).saturating_mul(r as Weight)) + (20_072_000 as Weight) + // Standard Error: 24_000 + .saturating_add((7_315_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64gts(r: u32, ) -> Weight { - (20_232_000 as Weight) - // Standard Error: 9_000 - .saturating_add((7_364_000 as Weight).saturating_mul(r as Weight)) + (20_054_000 as Weight) + // Standard Error: 27_000 + .saturating_add((7_228_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64gtu(r: u32, ) -> Weight { - (20_252_000 as Weight) - // Standard Error: 19_000 - .saturating_add((7_383_000 as Weight).saturating_mul(r as Weight)) + (20_169_000 as Weight) + // Standard Error: 18_000 + .saturating_add((7_262_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64les(r: u32, ) -> Weight { - (20_258_000 as Weight) - // Standard Error: 16_000 - .saturating_add((7_359_000 as Weight).saturating_mul(r as Weight)) + (20_115_000 as Weight) + // Standard Error: 21_000 + .saturating_add((7_212_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64leu(r: u32, ) -> Weight { - (20_245_000 as Weight) - // Standard Error: 21_000 - .saturating_add((7_400_000 as Weight).saturating_mul(r as Weight)) + (20_122_000 as Weight) + // Standard Error: 23_000 + .saturating_add((7_166_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64ges(r: u32, ) -> Weight { - (20_245_000 as Weight) - // Standard Error: 19_000 - .saturating_add((7_391_000 as Weight).saturating_mul(r as Weight)) + (20_140_000 as Weight) + // Standard Error: 16_000 + .saturating_add((7_242_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64geu(r: u32, ) -> Weight { - (20_230_000 as Weight) - // Standard Error: 15_000 - .saturating_add((7_439_000 as Weight).saturating_mul(r as Weight)) + (20_107_000 as Weight) + // Standard Error: 18_000 + .saturating_add((7_365_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64add(r: u32, ) -> Weight { - (20_254_000 as Weight) - // Standard Error: 16_000 - .saturating_add((7_204_000 as Weight).saturating_mul(r as Weight)) + (20_179_000 as Weight) + // Standard Error: 14_000 + .saturating_add((7_144_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64sub(r: u32, ) -> Weight { - (20_182_000 as Weight) - // Standard Error: 22_000 - .saturating_add((7_327_000 as Weight).saturating_mul(r as Weight)) + (20_143_000 as Weight) + // Standard Error: 17_000 + .saturating_add((7_222_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64mul(r: u32, ) -> Weight { - (20_203_000 as Weight) - // Standard Error: 20_000 - .saturating_add((7_221_000 as Weight).saturating_mul(r as Weight)) + (20_129_000 as Weight) + // Standard Error: 21_000 + .saturating_add((7_247_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64divs(r: u32, ) -> Weight { - (20_187_000 as Weight) + (20_107_000 as Weight) // Standard Error: 16_000 - .saturating_add((13_738_000 as Weight).saturating_mul(r as Weight)) + .saturating_add((12_953_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64divu(r: u32, ) -> Weight { - (20_153_000 as Weight) - // Standard Error: 11_000 - .saturating_add((12_766_000 as Weight).saturating_mul(r as Weight)) + (20_093_000 as Weight) + // Standard Error: 17_000 + .saturating_add((12_040_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64rems(r: u32, ) -> Weight { - (20_219_000 as Weight) + (20_102_000 as Weight) // Standard Error: 13_000 - .saturating_add((13_732_000 as Weight).saturating_mul(r as Weight)) + .saturating_add((12_945_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64remu(r: u32, ) -> Weight { - (20_246_000 as Weight) + (20_132_000 as Weight) // Standard Error: 16_000 - .saturating_add((12_686_000 as Weight).saturating_mul(r as Weight)) + .saturating_add((12_199_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64and(r: u32, ) -> Weight { - (20_228_000 as Weight) - // Standard Error: 13_000 - .saturating_add((7_245_000 as Weight).saturating_mul(r as Weight)) + (20_155_000 as Weight) + // Standard Error: 26_000 + .saturating_add((7_103_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64or(r: u32, ) -> Weight { - (20_238_000 as Weight) - // Standard Error: 17_000 - .saturating_add((7_250_000 as Weight).saturating_mul(r as Weight)) + (20_088_000 as Weight) + // Standard Error: 22_000 + .saturating_add((7_213_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64xor(r: u32, ) -> Weight { - (20_213_000 as Weight) - // Standard Error: 10_000 - .saturating_add((7_292_000 as Weight).saturating_mul(r as Weight)) + (20_060_000 as Weight) + // Standard Error: 18_000 + .saturating_add((7_275_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64shl(r: u32, ) -> Weight { - (20_224_000 as Weight) - // Standard Error: 18_000 - .saturating_add((7_554_000 as Weight).saturating_mul(r as Weight)) + (20_104_000 as Weight) + // Standard Error: 15_000 + .saturating_add((7_282_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64shrs(r: u32, ) -> Weight { - (20_261_000 as Weight) + (20_111_000 as Weight) // Standard Error: 20_000 - .saturating_add((7_551_000 as Weight).saturating_mul(r as Weight)) + .saturating_add((7_264_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64shru(r: u32, ) -> Weight { - (20_212_000 as Weight) - // Standard Error: 15_000 - .saturating_add((7_616_000 as Weight).saturating_mul(r as Weight)) + (20_096_000 as Weight) + // Standard Error: 23_000 + .saturating_add((7_347_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64rotl(r: u32, ) -> Weight { - (20_176_000 as Weight) - // Standard Error: 9_000 - .saturating_add((7_877_000 as Weight).saturating_mul(r as Weight)) + (20_091_000 as Weight) + // Standard Error: 16_000 + .saturating_add((7_370_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64rotr(r: u32, ) -> Weight { - (20_230_000 as Weight) - // Standard Error: 17_000 - .saturating_add((7_347_000 as Weight).saturating_mul(r as Weight)) + (20_102_000 as Weight) + // Standard Error: 28_000 + .saturating_add((7_266_000 as Weight).saturating_mul(r as Weight)) } } diff --git a/frame/democracy/src/weights.rs b/frame/democracy/src/weights.rs index e2e1bd0c8be25..1462e65c409b1 100644 --- a/frame/democracy/src/weights.rs +++ b/frame/democracy/src/weights.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// Copyright (C) 2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,9 +15,10 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Weights for pallet_democracy -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0 -//! DATE: 2020-10-28, STEPS: `[50, ]`, REPEAT: 20, LOW RANGE: [], HIGH RANGE: [] +//! Autogenerated weights for pallet_democracy +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0 +//! DATE: 2021-06-19, STEPS: `[50, ]`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 128 // Executed Command: @@ -74,145 +75,163 @@ pub trait WeightInfo { pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { fn propose() -> Weight { - (87_883_000 as Weight) + (71_782_000 as Weight) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) } fn second(s: u32, ) -> Weight { - (52_998_000 as Weight) - .saturating_add((251_000 as Weight).saturating_mul(s as Weight)) + (41_071_000 as Weight) + // Standard Error: 1_000 + .saturating_add((211_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn vote_new(r: u32, ) -> Weight { - (63_300_000 as Weight) - .saturating_add((284_000 as Weight).saturating_mul(r as Weight)) + (46_179_000 as Weight) + // Standard Error: 0 + .saturating_add((283_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) } fn vote_existing(r: u32, ) -> Weight { - (63_127_000 as Weight) - .saturating_add((289_000 as Weight).saturating_mul(r as Weight)) + (46_169_000 as Weight) + // Standard Error: 0 + .saturating_add((284_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) } fn emergency_cancel() -> Weight { - (38_877_000 as Weight) + (28_615_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn blacklist(p: u32, ) -> Weight { - (108_060_000 as Weight) - .saturating_add((795_000 as Weight).saturating_mul(p as Weight)) + (80_711_000 as Weight) + // Standard Error: 4_000 + .saturating_add((590_000 as Weight).saturating_mul(p as Weight)) .saturating_add(T::DbWeight::get().reads(5 as Weight)) .saturating_add(T::DbWeight::get().writes(6 as Weight)) } fn external_propose(v: u32, ) -> Weight { - (19_052_000 as Weight) - .saturating_add((111_000 as Weight).saturating_mul(v as Weight)) + (13_197_000 as Weight) + // Standard Error: 0 + .saturating_add((90_000 as Weight).saturating_mul(v as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn external_propose_majority() -> Weight { - (4_544_000 as Weight) + (2_712_000 as Weight) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn external_propose_default() -> Weight { - (4_608_000 as Weight) + (2_680_000 as Weight) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn fast_track() -> Weight { - (38_876_000 as Weight) + (28_340_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) } fn veto_external(v: u32, ) -> Weight { - (40_283_000 as Weight) - .saturating_add((187_000 as Weight).saturating_mul(v as Weight)) + (28_894_000 as Weight) + // Standard Error: 0 + .saturating_add((133_000 as Weight).saturating_mul(v as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn cancel_proposal(p: u32, ) -> Weight { - (68_449_000 as Weight) - .saturating_add((876_000 as Weight).saturating_mul(p as Weight)) + (54_339_000 as Weight) + // Standard Error: 1_000 + .saturating_add((561_000 as Weight).saturating_mul(p as Weight)) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) } fn cancel_referendum() -> Weight { - (23_670_000 as Weight) + (17_183_000 as Weight) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn cancel_queued(r: u32, ) -> Weight { - (43_247_000 as Weight) - .saturating_add((4_578_000 as Weight).saturating_mul(r as Weight)) + (30_500_000 as Weight) + // Standard Error: 1_000 + .saturating_add((1_730_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn on_initialize_base(r: u32, ) -> Weight { - (15_278_000 as Weight) - .saturating_add((6_696_000 as Weight).saturating_mul(r as Weight)) + (7_788_000 as Weight) + // Standard Error: 4_000 + .saturating_add((5_422_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(5 as Weight)) .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(r as Weight))) } fn delegate(r: u32, ) -> Weight { - (83_002_000 as Weight) - .saturating_add((9_889_000 as Weight).saturating_mul(r as Weight)) + (55_676_000 as Weight) + // Standard Error: 5_000 + .saturating_add((7_553_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(r as Weight))) .saturating_add(T::DbWeight::get().writes(4 as Weight)) .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(r as Weight))) } fn undelegate(r: u32, ) -> Weight { - (43_552_000 as Weight) - .saturating_add((9_887_000 as Weight).saturating_mul(r as Weight)) + (23_908_000 as Weight) + // Standard Error: 5_000 + .saturating_add((7_551_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(r as Weight))) .saturating_add(T::DbWeight::get().writes(2 as Weight)) .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(r as Weight))) } fn clear_public_proposals() -> Weight { - (4_404_000 as Weight) + (3_023_000 as Weight) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn note_preimage(b: u32, ) -> Weight { - (60_073_000 as Weight) - .saturating_add((4_000 as Weight).saturating_mul(b as Weight)) + (44_069_000 as Weight) + // Standard Error: 0 + .saturating_add((3_000 as Weight).saturating_mul(b as Weight)) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn note_imminent_preimage(b: u32, ) -> Weight { - (38_896_000 as Weight) - .saturating_add((4_000 as Weight).saturating_mul(b as Weight)) + (28_457_000 as Weight) + // Standard Error: 0 + .saturating_add((2_000 as Weight).saturating_mul(b as Weight)) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn reap_preimage(b: u32, ) -> Weight { - (54_861_000 as Weight) - .saturating_add((3_000 as Weight).saturating_mul(b as Weight)) + (39_646_000 as Weight) + // Standard Error: 0 + .saturating_add((2_000 as Weight).saturating_mul(b as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn unlock_remove(r: u32, ) -> Weight { - (52_956_000 as Weight) - .saturating_add((126_000 as Weight).saturating_mul(r as Weight)) + (39_499_000 as Weight) + // Standard Error: 0 + .saturating_add((148_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) } fn unlock_set(r: u32, ) -> Weight { - (49_789_000 as Weight) - .saturating_add((274_000 as Weight).saturating_mul(r as Weight)) + (37_340_000 as Weight) + // Standard Error: 0 + .saturating_add((266_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) } fn remove_vote(r: u32, ) -> Weight { - (29_790_000 as Weight) - .saturating_add((283_000 as Weight).saturating_mul(r as Weight)) + (20_397_000 as Weight) + // Standard Error: 0 + .saturating_add((259_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn remove_other_vote(r: u32, ) -> Weight { - (28_497_000 as Weight) - .saturating_add((217_000 as Weight).saturating_mul(r as Weight)) + (20_425_000 as Weight) + // Standard Error: 0 + .saturating_add((156_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } @@ -221,145 +240,163 @@ impl WeightInfo for SubstrateWeight { // For backwards compatibility and tests impl WeightInfo for () { fn propose() -> Weight { - (87_883_000 as Weight) + (71_782_000 as Weight) .saturating_add(RocksDbWeight::get().reads(3 as Weight)) .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } fn second(s: u32, ) -> Weight { - (52_998_000 as Weight) - .saturating_add((251_000 as Weight).saturating_mul(s as Weight)) + (41_071_000 as Weight) + // Standard Error: 1_000 + .saturating_add((211_000 as Weight).saturating_mul(s as Weight)) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn vote_new(r: u32, ) -> Weight { - (63_300_000 as Weight) - .saturating_add((284_000 as Weight).saturating_mul(r as Weight)) + (46_179_000 as Weight) + // Standard Error: 0 + .saturating_add((283_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(3 as Weight)) .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } fn vote_existing(r: u32, ) -> Weight { - (63_127_000 as Weight) - .saturating_add((289_000 as Weight).saturating_mul(r as Weight)) + (46_169_000 as Weight) + // Standard Error: 0 + .saturating_add((284_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(3 as Weight)) .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } fn emergency_cancel() -> Weight { - (38_877_000 as Weight) + (28_615_000 as Weight) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } fn blacklist(p: u32, ) -> Weight { - (108_060_000 as Weight) - .saturating_add((795_000 as Weight).saturating_mul(p as Weight)) + (80_711_000 as Weight) + // Standard Error: 4_000 + .saturating_add((590_000 as Weight).saturating_mul(p as Weight)) .saturating_add(RocksDbWeight::get().reads(5 as Weight)) .saturating_add(RocksDbWeight::get().writes(6 as Weight)) } fn external_propose(v: u32, ) -> Weight { - (19_052_000 as Weight) - .saturating_add((111_000 as Weight).saturating_mul(v as Weight)) + (13_197_000 as Weight) + // Standard Error: 0 + .saturating_add((90_000 as Weight).saturating_mul(v as Weight)) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn external_propose_majority() -> Weight { - (4_544_000 as Weight) + (2_712_000 as Weight) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn external_propose_default() -> Weight { - (4_608_000 as Weight) + (2_680_000 as Weight) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn fast_track() -> Weight { - (38_876_000 as Weight) + (28_340_000 as Weight) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } fn veto_external(v: u32, ) -> Weight { - (40_283_000 as Weight) - .saturating_add((187_000 as Weight).saturating_mul(v as Weight)) + (28_894_000 as Weight) + // Standard Error: 0 + .saturating_add((133_000 as Weight).saturating_mul(v as Weight)) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } fn cancel_proposal(p: u32, ) -> Weight { - (68_449_000 as Weight) - .saturating_add((876_000 as Weight).saturating_mul(p as Weight)) + (54_339_000 as Weight) + // Standard Error: 1_000 + .saturating_add((561_000 as Weight).saturating_mul(p as Weight)) .saturating_add(RocksDbWeight::get().reads(3 as Weight)) .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } fn cancel_referendum() -> Weight { - (23_670_000 as Weight) + (17_183_000 as Weight) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn cancel_queued(r: u32, ) -> Weight { - (43_247_000 as Weight) - .saturating_add((4_578_000 as Weight).saturating_mul(r as Weight)) + (30_500_000 as Weight) + // Standard Error: 1_000 + .saturating_add((1_730_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } fn on_initialize_base(r: u32, ) -> Weight { - (15_278_000 as Weight) - .saturating_add((6_696_000 as Weight).saturating_mul(r as Weight)) + (7_788_000 as Weight) + // Standard Error: 4_000 + .saturating_add((5_422_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(5 as Weight)) .saturating_add(RocksDbWeight::get().reads((1 as Weight).saturating_mul(r as Weight))) } fn delegate(r: u32, ) -> Weight { - (83_002_000 as Weight) - .saturating_add((9_889_000 as Weight).saturating_mul(r as Weight)) + (55_676_000 as Weight) + // Standard Error: 5_000 + .saturating_add((7_553_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().reads((1 as Weight).saturating_mul(r as Weight))) .saturating_add(RocksDbWeight::get().writes(4 as Weight)) .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(r as Weight))) } fn undelegate(r: u32, ) -> Weight { - (43_552_000 as Weight) - .saturating_add((9_887_000 as Weight).saturating_mul(r as Weight)) + (23_908_000 as Weight) + // Standard Error: 5_000 + .saturating_add((7_551_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().reads((1 as Weight).saturating_mul(r as Weight))) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(r as Weight))) } fn clear_public_proposals() -> Weight { - (4_404_000 as Weight) + (3_023_000 as Weight) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn note_preimage(b: u32, ) -> Weight { - (60_073_000 as Weight) - .saturating_add((4_000 as Weight).saturating_mul(b as Weight)) + (44_069_000 as Weight) + // Standard Error: 0 + .saturating_add((3_000 as Weight).saturating_mul(b as Weight)) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn note_imminent_preimage(b: u32, ) -> Weight { - (38_896_000 as Weight) - .saturating_add((4_000 as Weight).saturating_mul(b as Weight)) + (28_457_000 as Weight) + // Standard Error: 0 + .saturating_add((2_000 as Weight).saturating_mul(b as Weight)) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn reap_preimage(b: u32, ) -> Weight { - (54_861_000 as Weight) - .saturating_add((3_000 as Weight).saturating_mul(b as Weight)) + (39_646_000 as Weight) + // Standard Error: 0 + .saturating_add((2_000 as Weight).saturating_mul(b as Weight)) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn unlock_remove(r: u32, ) -> Weight { - (52_956_000 as Weight) - .saturating_add((126_000 as Weight).saturating_mul(r as Weight)) + (39_499_000 as Weight) + // Standard Error: 0 + .saturating_add((148_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(3 as Weight)) .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } fn unlock_set(r: u32, ) -> Weight { - (49_789_000 as Weight) - .saturating_add((274_000 as Weight).saturating_mul(r as Weight)) + (37_340_000 as Weight) + // Standard Error: 0 + .saturating_add((266_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(3 as Weight)) .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } fn remove_vote(r: u32, ) -> Weight { - (29_790_000 as Weight) - .saturating_add((283_000 as Weight).saturating_mul(r as Weight)) + (20_397_000 as Weight) + // Standard Error: 0 + .saturating_add((259_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } fn remove_other_vote(r: u32, ) -> Weight { - (28_497_000 as Weight) - .saturating_add((217_000 as Weight).saturating_mul(r as Weight)) + (20_425_000 as Weight) + // Standard Error: 0 + .saturating_add((156_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } diff --git a/frame/election-provider-multi-phase/src/weights.rs b/frame/election-provider-multi-phase/src/weights.rs index 3d3a5cede3293..51b99bc962d43 100644 --- a/frame/election-provider-multi-phase/src/weights.rs +++ b/frame/election-provider-multi-phase/src/weights.rs @@ -18,7 +18,7 @@ //! Autogenerated weights for pallet_election_provider_multi_phase //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0 -//! DATE: 2021-03-19, STEPS: `[50, ]`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2021-06-19, STEPS: `[50, ]`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 128 // Executed Command: @@ -57,101 +57,105 @@ pub trait WeightInfo { pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { fn on_initialize_nothing() -> Weight { - (22_730_000 as Weight) - .saturating_add(T::DbWeight::get().reads(7 as Weight)) + (24_579_000 as Weight) + .saturating_add(T::DbWeight::get().reads(8 as Weight)) } fn on_initialize_open_signed() -> Weight { - (112_051_000 as Weight) - .saturating_add(T::DbWeight::get().reads(8 as Weight)) + (87_463_000 as Weight) + .saturating_add(T::DbWeight::get().reads(10 as Weight)) .saturating_add(T::DbWeight::get().writes(4 as Weight)) } fn on_initialize_open_unsigned_with_snapshot() -> Weight { - (112_165_000 as Weight) - .saturating_add(T::DbWeight::get().reads(8 as Weight)) + (87_381_000 as Weight) + .saturating_add(T::DbWeight::get().reads(10 as Weight)) .saturating_add(T::DbWeight::get().writes(4 as Weight)) } fn on_initialize_open_unsigned_without_snapshot() -> Weight { - (21_039_000 as Weight) + (18_489_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn elect_queued() -> Weight { - (7_362_949_000 as Weight) + (6_038_989_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(6 as Weight)) } - fn submit_unsigned(v: u32, _t: u32, a: u32, d: u32, ) -> Weight { + fn submit_unsigned(v: u32, t: u32, a: u32, d: u32, ) -> Weight { (0 as Weight) - // Standard Error: 21_000 - .saturating_add((3_933_000 as Weight).saturating_mul(v as Weight)) - // Standard Error: 21_000 - .saturating_add((13_520_000 as Weight).saturating_mul(a as Weight)) - // Standard Error: 107_000 - .saturating_add((2_880_000 as Weight).saturating_mul(d as Weight)) - .saturating_add(T::DbWeight::get().reads(6 as Weight)) + // Standard Error: 12_000 + .saturating_add((3_480_000 as Weight).saturating_mul(v as Weight)) + // Standard Error: 42_000 + .saturating_add((194_000 as Weight).saturating_mul(t as Weight)) + // Standard Error: 12_000 + .saturating_add((10_498_000 as Weight).saturating_mul(a as Weight)) + // Standard Error: 63_000 + .saturating_add((3_074_000 as Weight).saturating_mul(d as Weight)) + .saturating_add(T::DbWeight::get().reads(7 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn feasibility_check(v: u32, t: u32, a: u32, d: u32, ) -> Weight { (0 as Weight) - // Standard Error: 10_000 - .saturating_add((4_069_000 as Weight).saturating_mul(v as Weight)) + // Standard Error: 7_000 + .saturating_add((3_481_000 as Weight).saturating_mul(v as Weight)) + // Standard Error: 24_000 + .saturating_add((385_000 as Weight).saturating_mul(t as Weight)) + // Standard Error: 7_000 + .saturating_add((8_538_000 as Weight).saturating_mul(a as Weight)) // Standard Error: 36_000 - .saturating_add((503_000 as Weight).saturating_mul(t as Weight)) - // Standard Error: 10_000 - .saturating_add((10_000_000 as Weight).saturating_mul(a as Weight)) - // Standard Error: 54_000 - .saturating_add((3_734_000 as Weight).saturating_mul(d as Weight)) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) + .saturating_add((3_322_000 as Weight).saturating_mul(d as Weight)) + .saturating_add(T::DbWeight::get().reads(4 as Weight)) } } // For backwards compatibility and tests impl WeightInfo for () { fn on_initialize_nothing() -> Weight { - (22_730_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(7 as Weight)) + (24_579_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(8 as Weight)) } fn on_initialize_open_signed() -> Weight { - (112_051_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(8 as Weight)) + (87_463_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(10 as Weight)) .saturating_add(RocksDbWeight::get().writes(4 as Weight)) } fn on_initialize_open_unsigned_with_snapshot() -> Weight { - (112_165_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(8 as Weight)) + (87_381_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(10 as Weight)) .saturating_add(RocksDbWeight::get().writes(4 as Weight)) } fn on_initialize_open_unsigned_without_snapshot() -> Weight { - (21_039_000 as Weight) + (18_489_000 as Weight) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn elect_queued() -> Weight { - (7_362_949_000 as Weight) + (6_038_989_000 as Weight) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(6 as Weight)) } - fn submit_unsigned(v: u32, _t: u32, a: u32, d: u32, ) -> Weight { + fn submit_unsigned(v: u32, t: u32, a: u32, d: u32, ) -> Weight { (0 as Weight) - // Standard Error: 21_000 - .saturating_add((3_933_000 as Weight).saturating_mul(v as Weight)) - // Standard Error: 21_000 - .saturating_add((13_520_000 as Weight).saturating_mul(a as Weight)) - // Standard Error: 107_000 - .saturating_add((2_880_000 as Weight).saturating_mul(d as Weight)) - .saturating_add(RocksDbWeight::get().reads(6 as Weight)) + // Standard Error: 12_000 + .saturating_add((3_480_000 as Weight).saturating_mul(v as Weight)) + // Standard Error: 42_000 + .saturating_add((194_000 as Weight).saturating_mul(t as Weight)) + // Standard Error: 12_000 + .saturating_add((10_498_000 as Weight).saturating_mul(a as Weight)) + // Standard Error: 63_000 + .saturating_add((3_074_000 as Weight).saturating_mul(d as Weight)) + .saturating_add(RocksDbWeight::get().reads(7 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn feasibility_check(v: u32, t: u32, a: u32, d: u32, ) -> Weight { (0 as Weight) - // Standard Error: 10_000 - .saturating_add((4_069_000 as Weight).saturating_mul(v as Weight)) + // Standard Error: 7_000 + .saturating_add((3_481_000 as Weight).saturating_mul(v as Weight)) + // Standard Error: 24_000 + .saturating_add((385_000 as Weight).saturating_mul(t as Weight)) + // Standard Error: 7_000 + .saturating_add((8_538_000 as Weight).saturating_mul(a as Weight)) // Standard Error: 36_000 - .saturating_add((503_000 as Weight).saturating_mul(t as Weight)) - // Standard Error: 10_000 - .saturating_add((10_000_000 as Weight).saturating_mul(a as Weight)) - // Standard Error: 54_000 - .saturating_add((3_734_000 as Weight).saturating_mul(d as Weight)) - .saturating_add(RocksDbWeight::get().reads(3 as Weight)) + .saturating_add((3_322_000 as Weight).saturating_mul(d as Weight)) + .saturating_add(RocksDbWeight::get().reads(4 as Weight)) } } diff --git a/frame/elections-phragmen/src/weights.rs b/frame/elections-phragmen/src/weights.rs index c3d9365c8855a..12a3a433401bb 100644 --- a/frame/elections-phragmen/src/weights.rs +++ b/frame/elections-phragmen/src/weights.rs @@ -17,8 +17,8 @@ //! Autogenerated weights for pallet_elections_phragmen //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.1 -//! DATE: 2021-01-20, STEPS: `[50, ]`, REPEAT: 20, LOW RANGE: [], HIGH RANGE: [] +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0 +//! DATE: 2021-06-19, STEPS: `[50, ]`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 128 // Executed Command: @@ -62,82 +62,80 @@ pub trait WeightInfo { pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { fn vote_equal(v: u32, ) -> Weight { - (45_157_000 as Weight) - // Standard Error: 6_000 - .saturating_add((399_000 as Weight).saturating_mul(v as Weight)) + (43_911_000 as Weight) + // Standard Error: 7_000 + .saturating_add((324_000 as Weight).saturating_mul(v as Weight)) .saturating_add(T::DbWeight::get().reads(5 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn vote_more(v: u32, ) -> Weight { - (69_738_000 as Weight) - // Standard Error: 14_000 - .saturating_add((450_000 as Weight).saturating_mul(v as Weight)) + (68_236_000 as Weight) + // Standard Error: 10_000 + .saturating_add((359_000 as Weight).saturating_mul(v as Weight)) .saturating_add(T::DbWeight::get().reads(5 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn vote_less(v: u32, ) -> Weight { - (73_955_000 as Weight) - // Standard Error: 38_000 - .saturating_add((227_000 as Weight).saturating_mul(v as Weight)) + (68_162_000 as Weight) + // Standard Error: 9_000 + .saturating_add((350_000 as Weight).saturating_mul(v as Weight)) .saturating_add(T::DbWeight::get().reads(5 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn remove_voter() -> Weight { - (68_398_000 as Weight) + (63_005_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn submit_candidacy(c: u32, ) -> Weight { - (59_291_000 as Weight) - // Standard Error: 2_000 - .saturating_add((412_000 as Weight).saturating_mul(c as Weight)) + (58_498_000 as Weight) + // Standard Error: 1_000 + .saturating_add((305_000 as Weight).saturating_mul(c as Weight)) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn renounce_candidacy_candidate(c: u32, ) -> Weight { - (55_026_000 as Weight) - // Standard Error: 2_000 - .saturating_add((207_000 as Weight).saturating_mul(c as Weight)) + (52_062_000 as Weight) + // Standard Error: 0 + .saturating_add((173_000 as Weight).saturating_mul(c as Weight)) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn renounce_candidacy_members() -> Weight { - (77_840_000 as Weight) + (73_234_000 as Weight) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(4 as Weight)) } fn renounce_candidacy_runners_up() -> Weight { - (54_559_000 as Weight) + (51_689_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn remove_member_with_replacement() -> Weight { - (84_311_000 as Weight) + (79_906_000 as Weight) .saturating_add(T::DbWeight::get().reads(5 as Weight)) .saturating_add(T::DbWeight::get().writes(5 as Weight)) } fn remove_member_wrong_refund() -> Weight { - (7_677_000 as Weight) + (6_877_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) } - fn clean_defunct_voters(v: u32, d: u32, ) -> Weight { + fn clean_defunct_voters(v: u32, _d: u32, ) -> Weight { (0 as Weight) - // Standard Error: 55_000 - .saturating_add((114_815_000 as Weight).saturating_mul(v as Weight)) - // Standard Error: 53_000 - .saturating_add((49_000 as Weight).saturating_mul(d as Weight)) + // Standard Error: 39_000 + .saturating_add((112_381_000 as Weight).saturating_mul(v as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().reads((3 as Weight).saturating_mul(v as Weight))) .saturating_add(T::DbWeight::get().writes((3 as Weight).saturating_mul(v as Weight))) } fn election_phragmen(c: u32, v: u32, e: u32, ) -> Weight { (0 as Weight) - // Standard Error: 1_940_000 - .saturating_add((43_557_000 as Weight).saturating_mul(c as Weight)) - // Standard Error: 807_000 - .saturating_add((65_849_000 as Weight).saturating_mul(v as Weight)) - // Standard Error: 55_000 - .saturating_add((4_206_000 as Weight).saturating_mul(e as Weight)) + // Standard Error: 1_789_000 + .saturating_add((42_600_000 as Weight).saturating_mul(c as Weight)) + // Standard Error: 744_000 + .saturating_add((60_743_000 as Weight).saturating_mul(v as Weight)) + // Standard Error: 50_000 + .saturating_add((3_837_000 as Weight).saturating_mul(e as Weight)) .saturating_add(T::DbWeight::get().reads((2 as Weight).saturating_mul(c as Weight))) .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(v as Weight))) .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(c as Weight))) @@ -147,82 +145,80 @@ impl WeightInfo for SubstrateWeight { // For backwards compatibility and tests impl WeightInfo for () { fn vote_equal(v: u32, ) -> Weight { - (45_157_000 as Weight) - // Standard Error: 6_000 - .saturating_add((399_000 as Weight).saturating_mul(v as Weight)) + (43_911_000 as Weight) + // Standard Error: 7_000 + .saturating_add((324_000 as Weight).saturating_mul(v as Weight)) .saturating_add(RocksDbWeight::get().reads(5 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } fn vote_more(v: u32, ) -> Weight { - (69_738_000 as Weight) - // Standard Error: 14_000 - .saturating_add((450_000 as Weight).saturating_mul(v as Weight)) + (68_236_000 as Weight) + // Standard Error: 10_000 + .saturating_add((359_000 as Weight).saturating_mul(v as Weight)) .saturating_add(RocksDbWeight::get().reads(5 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } fn vote_less(v: u32, ) -> Weight { - (73_955_000 as Weight) - // Standard Error: 38_000 - .saturating_add((227_000 as Weight).saturating_mul(v as Weight)) + (68_162_000 as Weight) + // Standard Error: 9_000 + .saturating_add((350_000 as Weight).saturating_mul(v as Weight)) .saturating_add(RocksDbWeight::get().reads(5 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } fn remove_voter() -> Weight { - (68_398_000 as Weight) + (63_005_000 as Weight) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } fn submit_candidacy(c: u32, ) -> Weight { - (59_291_000 as Weight) - // Standard Error: 2_000 - .saturating_add((412_000 as Weight).saturating_mul(c as Weight)) + (58_498_000 as Weight) + // Standard Error: 1_000 + .saturating_add((305_000 as Weight).saturating_mul(c as Weight)) .saturating_add(RocksDbWeight::get().reads(3 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn renounce_candidacy_candidate(c: u32, ) -> Weight { - (55_026_000 as Weight) - // Standard Error: 2_000 - .saturating_add((207_000 as Weight).saturating_mul(c as Weight)) + (52_062_000 as Weight) + // Standard Error: 0 + .saturating_add((173_000 as Weight).saturating_mul(c as Weight)) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn renounce_candidacy_members() -> Weight { - (77_840_000 as Weight) + (73_234_000 as Weight) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(4 as Weight)) } fn renounce_candidacy_runners_up() -> Weight { - (54_559_000 as Weight) + (51_689_000 as Weight) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn remove_member_with_replacement() -> Weight { - (84_311_000 as Weight) + (79_906_000 as Weight) .saturating_add(RocksDbWeight::get().reads(5 as Weight)) .saturating_add(RocksDbWeight::get().writes(5 as Weight)) } fn remove_member_wrong_refund() -> Weight { - (7_677_000 as Weight) + (6_877_000 as Weight) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) } - fn clean_defunct_voters(v: u32, d: u32, ) -> Weight { + fn clean_defunct_voters(v: u32, _d: u32, ) -> Weight { (0 as Weight) - // Standard Error: 55_000 - .saturating_add((114_815_000 as Weight).saturating_mul(v as Weight)) - // Standard Error: 53_000 - .saturating_add((49_000 as Weight).saturating_mul(d as Weight)) + // Standard Error: 39_000 + .saturating_add((112_381_000 as Weight).saturating_mul(v as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().reads((3 as Weight).saturating_mul(v as Weight))) .saturating_add(RocksDbWeight::get().writes((3 as Weight).saturating_mul(v as Weight))) } fn election_phragmen(c: u32, v: u32, e: u32, ) -> Weight { (0 as Weight) - // Standard Error: 1_940_000 - .saturating_add((43_557_000 as Weight).saturating_mul(c as Weight)) - // Standard Error: 807_000 - .saturating_add((65_849_000 as Weight).saturating_mul(v as Weight)) - // Standard Error: 55_000 - .saturating_add((4_206_000 as Weight).saturating_mul(e as Weight)) + // Standard Error: 1_789_000 + .saturating_add((42_600_000 as Weight).saturating_mul(c as Weight)) + // Standard Error: 744_000 + .saturating_add((60_743_000 as Weight).saturating_mul(v as Weight)) + // Standard Error: 50_000 + .saturating_add((3_837_000 as Weight).saturating_mul(e as Weight)) .saturating_add(RocksDbWeight::get().reads((2 as Weight).saturating_mul(c as Weight))) .saturating_add(RocksDbWeight::get().reads((1 as Weight).saturating_mul(v as Weight))) .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(c as Weight))) diff --git a/frame/gilt/src/weights.rs b/frame/gilt/src/weights.rs index 1e0e5fa9b4d39..c9e16c041874c 100644 --- a/frame/gilt/src/weights.rs +++ b/frame/gilt/src/weights.rs @@ -18,7 +18,7 @@ //! Autogenerated weights for pallet_gilt //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0 -//! DATE: 2021-02-23, STEPS: `[50, ]`, REPEAT: 20, LOW RANGE: [], HIGH RANGE: [] +//! DATE: 2021-06-19, STEPS: `[50, ]`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 128 // Executed Command: @@ -58,50 +58,50 @@ pub trait WeightInfo { pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { fn place_bid(l: u32, ) -> Weight { - (79_274_000 as Weight) + (60_401_000 as Weight) // Standard Error: 0 - .saturating_add((289_000 as Weight).saturating_mul(l as Weight)) + .saturating_add((146_000 as Weight).saturating_mul(l as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn place_bid_max() -> Weight { - (297_825_000 as Weight) + (178_653_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn retract_bid(l: u32, ) -> Weight { - (79_731_000 as Weight) + (61_026_000 as Weight) // Standard Error: 0 - .saturating_add((231_000 as Weight).saturating_mul(l as Weight)) + .saturating_add((119_000 as Weight).saturating_mul(l as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn set_target() -> Weight { - (6_113_000 as Weight) + (5_756_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn thaw() -> Weight { - (74_792_000 as Weight) + (72_668_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn pursue_target_noop() -> Weight { - (3_468_000 as Weight) + (3_449_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) } fn pursue_target_per_item(b: u32, ) -> Weight { - (65_792_000 as Weight) - // Standard Error: 2_000 - .saturating_add((11_402_000 as Weight).saturating_mul(b as Weight)) + (58_182_000 as Weight) + // Standard Error: 1_000 + .saturating_add((10_005_000 as Weight).saturating_mul(b as Weight)) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(b as Weight))) } fn pursue_target_per_queue(q: u32, ) -> Weight { - (32_391_000 as Weight) + (21_740_000 as Weight) // Standard Error: 7_000 - .saturating_add((18_500_000 as Weight).saturating_mul(q as Weight)) + .saturating_add((16_849_000 as Weight).saturating_mul(q as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(q as Weight))) .saturating_add(T::DbWeight::get().writes(2 as Weight)) @@ -112,50 +112,50 @@ impl WeightInfo for SubstrateWeight { // For backwards compatibility and tests impl WeightInfo for () { fn place_bid(l: u32, ) -> Weight { - (79_274_000 as Weight) + (60_401_000 as Weight) // Standard Error: 0 - .saturating_add((289_000 as Weight).saturating_mul(l as Weight)) + .saturating_add((146_000 as Weight).saturating_mul(l as Weight)) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } fn place_bid_max() -> Weight { - (297_825_000 as Weight) + (178_653_000 as Weight) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } fn retract_bid(l: u32, ) -> Weight { - (79_731_000 as Weight) + (61_026_000 as Weight) // Standard Error: 0 - .saturating_add((231_000 as Weight).saturating_mul(l as Weight)) + .saturating_add((119_000 as Weight).saturating_mul(l as Weight)) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } fn set_target() -> Weight { - (6_113_000 as Weight) + (5_756_000 as Weight) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn thaw() -> Weight { - (74_792_000 as Weight) + (72_668_000 as Weight) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } fn pursue_target_noop() -> Weight { - (3_468_000 as Weight) + (3_449_000 as Weight) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) } fn pursue_target_per_item(b: u32, ) -> Weight { - (65_792_000 as Weight) - // Standard Error: 2_000 - .saturating_add((11_402_000 as Weight).saturating_mul(b as Weight)) + (58_182_000 as Weight) + // Standard Error: 1_000 + .saturating_add((10_005_000 as Weight).saturating_mul(b as Weight)) .saturating_add(RocksDbWeight::get().reads(3 as Weight)) .saturating_add(RocksDbWeight::get().writes(3 as Weight)) .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(b as Weight))) } fn pursue_target_per_queue(q: u32, ) -> Weight { - (32_391_000 as Weight) + (21_740_000 as Weight) // Standard Error: 7_000 - .saturating_add((18_500_000 as Weight).saturating_mul(q as Weight)) + .saturating_add((16_849_000 as Weight).saturating_mul(q as Weight)) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().reads((1 as Weight).saturating_mul(q as Weight))) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) diff --git a/frame/identity/src/weights.rs b/frame/identity/src/weights.rs index 1635a8d705477..f283b2869bdfc 100644 --- a/frame/identity/src/weights.rs +++ b/frame/identity/src/weights.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// Copyright (C) 2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,9 +15,10 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Weights for pallet_identity -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0 -//! DATE: 2020-10-27, STEPS: `[50, ]`, REPEAT: 20, LOW RANGE: [], HIGH RANGE: [] +//! Autogenerated weights for pallet_identity +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0 +//! DATE: 2021-06-19, STEPS: `[50, ]`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 128 // Executed Command: @@ -43,270 +44,295 @@ use sp_std::marker::PhantomData; /// Weight functions needed for pallet_identity. pub trait WeightInfo { - fn add_registrar(_r: u32, ) -> Weight; - fn set_identity(_r: u32, _x: u32, ) -> Weight; - fn set_subs_new(_s: u32, ) -> Weight; - fn set_subs_old(_p: u32, ) -> Weight; - fn clear_identity(_r: u32, _s: u32, _x: u32, ) -> Weight; - fn request_judgement(_r: u32, _x: u32, ) -> Weight; - fn cancel_request(_r: u32, _x: u32, ) -> Weight; - fn set_fee(_r: u32, ) -> Weight; - fn set_account_id(_r: u32, ) -> Weight; - fn set_fields(_r: u32, ) -> Weight; - fn provide_judgement(_r: u32, _x: u32, ) -> Weight; - fn kill_identity(_r: u32, _s: u32, _x: u32, ) -> Weight; - fn add_sub(_s: u32, ) -> Weight; - fn rename_sub(_s: u32, ) -> Weight; - fn remove_sub(_s: u32, ) -> Weight; - fn quit_sub(_s: u32, ) -> Weight; - + fn add_registrar(r: u32, ) -> Weight; + fn set_identity(r: u32, x: u32, ) -> Weight; + fn set_subs_new(s: u32, ) -> Weight; + fn set_subs_old(p: u32, ) -> Weight; + fn clear_identity(r: u32, s: u32, x: u32, ) -> Weight; + fn request_judgement(r: u32, x: u32, ) -> Weight; + fn cancel_request(r: u32, x: u32, ) -> Weight; + fn set_fee(r: u32, ) -> Weight; + fn set_account_id(r: u32, ) -> Weight; + fn set_fields(r: u32, ) -> Weight; + fn provide_judgement(r: u32, x: u32, ) -> Weight; + fn kill_identity(r: u32, s: u32, x: u32, ) -> Weight; + fn add_sub(s: u32, ) -> Weight; + fn rename_sub(s: u32, ) -> Weight; + fn remove_sub(s: u32, ) -> Weight; + fn quit_sub(s: u32, ) -> Weight; } /// Weights for pallet_identity using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { fn add_registrar(r: u32, ) -> Weight { - (28_965_000 as Weight) - .saturating_add((421_000 as Weight).saturating_mul(r as Weight)) + (21_825_000 as Weight) + // Standard Error: 3_000 + .saturating_add((288_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) - } fn set_identity(r: u32, x: u32, ) -> Weight { - (71_923_000 as Weight) - .saturating_add((529_000 as Weight).saturating_mul(r as Weight)) - .saturating_add((1_763_000 as Weight).saturating_mul(x as Weight)) + (53_354_000 as Weight) + // Standard Error: 15_000 + .saturating_add((274_000 as Weight).saturating_mul(r as Weight)) + // Standard Error: 2_000 + .saturating_add((939_000 as Weight).saturating_mul(x as Weight)) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) - } fn set_subs_new(s: u32, ) -> Weight { - (55_550_000 as Weight) - .saturating_add((9_760_000 as Weight).saturating_mul(s as Weight)) + (42_017_000 as Weight) + // Standard Error: 2_000 + .saturating_add((6_457_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(s as Weight))) .saturating_add(T::DbWeight::get().writes(1 as Weight)) .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) } fn set_subs_old(p: u32, ) -> Weight { - (51_789_000 as Weight) - .saturating_add((3_484_000 as Weight).saturating_mul(p as Weight)) + (41_605_000 as Weight) + // Standard Error: 0 + .saturating_add((2_157_000 as Weight).saturating_mul(p as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(p as Weight))) } fn clear_identity(r: u32, s: u32, x: u32, ) -> Weight { - (65_458_000 as Weight) - .saturating_add((230_000 as Weight).saturating_mul(r as Weight)) - .saturating_add((3_437_000 as Weight).saturating_mul(s as Weight)) - .saturating_add((1_023_000 as Weight).saturating_mul(x as Weight)) + (51_811_000 as Weight) + // Standard Error: 5_000 + .saturating_add((202_000 as Weight).saturating_mul(r as Weight)) + // Standard Error: 0 + .saturating_add((2_157_000 as Weight).saturating_mul(s as Weight)) + // Standard Error: 0 + .saturating_add((618_000 as Weight).saturating_mul(x as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) } fn request_judgement(r: u32, x: u32, ) -> Weight { - (75_299_000 as Weight) - .saturating_add((493_000 as Weight).saturating_mul(r as Weight)) - .saturating_add((2_014_000 as Weight).saturating_mul(x as Weight)) + (54_657_000 as Weight) + // Standard Error: 5_000 + .saturating_add((381_000 as Weight).saturating_mul(r as Weight)) + // Standard Error: 0 + .saturating_add((1_153_000 as Weight).saturating_mul(x as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) - } fn cancel_request(r: u32, x: u32, ) -> Weight { - (67_492_000 as Weight) - .saturating_add((225_000 as Weight).saturating_mul(r as Weight)) - .saturating_add((2_003_000 as Weight).saturating_mul(x as Weight)) + (50_895_000 as Weight) + // Standard Error: 6_000 + .saturating_add((267_000 as Weight).saturating_mul(r as Weight)) + // Standard Error: 0 + .saturating_add((1_141_000 as Weight).saturating_mul(x as Weight)) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) - } fn set_fee(r: u32, ) -> Weight { - (11_375_000 as Weight) - .saturating_add((382_000 as Weight).saturating_mul(r as Weight)) + (8_036_000 as Weight) + // Standard Error: 2_000 + .saturating_add((281_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) - } fn set_account_id(r: u32, ) -> Weight { - (12_898_000 as Weight) - .saturating_add((384_000 as Weight).saturating_mul(r as Weight)) + (9_001_000 as Weight) + // Standard Error: 2_000 + .saturating_add((288_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) - } fn set_fields(r: u32, ) -> Weight { - (11_419_000 as Weight) - .saturating_add((381_000 as Weight).saturating_mul(r as Weight)) + (8_039_000 as Weight) + // Standard Error: 2_000 + .saturating_add((286_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) - } fn provide_judgement(r: u32, x: u32, ) -> Weight { - (51_115_000 as Weight) - .saturating_add((427_000 as Weight).saturating_mul(r as Weight)) - .saturating_add((2_001_000 as Weight).saturating_mul(x as Weight)) + (35_746_000 as Weight) + // Standard Error: 4_000 + .saturating_add((346_000 as Weight).saturating_mul(r as Weight)) + // Standard Error: 0 + .saturating_add((1_164_000 as Weight).saturating_mul(x as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) - } - fn kill_identity(_r: u32, s: u32, _x: u32, ) -> Weight { - (90_911_000 as Weight) - .saturating_add((3_450_000 as Weight).saturating_mul(s as Weight)) + fn kill_identity(r: u32, s: u32, x: u32, ) -> Weight { + (65_304_000 as Weight) + // Standard Error: 4_000 + .saturating_add((149_000 as Weight).saturating_mul(r as Weight)) + // Standard Error: 0 + .saturating_add((2_118_000 as Weight).saturating_mul(s as Weight)) + // Standard Error: 0 + .saturating_add((6_000 as Weight).saturating_mul(x as Weight)) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) } fn add_sub(s: u32, ) -> Weight { - (76_957_000 as Weight) - .saturating_add((261_000 as Weight).saturating_mul(s as Weight)) + (55_491_000 as Weight) + // Standard Error: 0 + .saturating_add((220_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) - } fn rename_sub(s: u32, ) -> Weight { - (26_219_000 as Weight) + (17_564_000 as Weight) + // Standard Error: 0 .saturating_add((84_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) - } fn remove_sub(s: u32, ) -> Weight { - (73_130_000 as Weight) - .saturating_add((239_000 as Weight).saturating_mul(s as Weight)) + (56_535_000 as Weight) + // Standard Error: 0 + .saturating_add((209_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) - } fn quit_sub(s: u32, ) -> Weight { - (48_088_000 as Weight) - .saturating_add((237_000 as Weight).saturating_mul(s as Weight)) + (35_369_000 as Weight) + // Standard Error: 0 + .saturating_add((200_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) - } - } // For backwards compatibility and tests impl WeightInfo for () { fn add_registrar(r: u32, ) -> Weight { - (28_965_000 as Weight) - .saturating_add((421_000 as Weight).saturating_mul(r as Weight)) + (21_825_000 as Weight) + // Standard Error: 3_000 + .saturating_add((288_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) - } fn set_identity(r: u32, x: u32, ) -> Weight { - (71_923_000 as Weight) - .saturating_add((529_000 as Weight).saturating_mul(r as Weight)) - .saturating_add((1_763_000 as Weight).saturating_mul(x as Weight)) + (53_354_000 as Weight) + // Standard Error: 15_000 + .saturating_add((274_000 as Weight).saturating_mul(r as Weight)) + // Standard Error: 2_000 + .saturating_add((939_000 as Weight).saturating_mul(x as Weight)) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) - } fn set_subs_new(s: u32, ) -> Weight { - (55_550_000 as Weight) - .saturating_add((9_760_000 as Weight).saturating_mul(s as Weight)) + (42_017_000 as Weight) + // Standard Error: 2_000 + .saturating_add((6_457_000 as Weight).saturating_mul(s as Weight)) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().reads((1 as Weight).saturating_mul(s as Weight))) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) } fn set_subs_old(p: u32, ) -> Weight { - (51_789_000 as Weight) - .saturating_add((3_484_000 as Weight).saturating_mul(p as Weight)) + (41_605_000 as Weight) + // Standard Error: 0 + .saturating_add((2_157_000 as Weight).saturating_mul(p as Weight)) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(p as Weight))) } fn clear_identity(r: u32, s: u32, x: u32, ) -> Weight { - (65_458_000 as Weight) - .saturating_add((230_000 as Weight).saturating_mul(r as Weight)) - .saturating_add((3_437_000 as Weight).saturating_mul(s as Weight)) - .saturating_add((1_023_000 as Weight).saturating_mul(x as Weight)) + (51_811_000 as Weight) + // Standard Error: 5_000 + .saturating_add((202_000 as Weight).saturating_mul(r as Weight)) + // Standard Error: 0 + .saturating_add((2_157_000 as Weight).saturating_mul(s as Weight)) + // Standard Error: 0 + .saturating_add((618_000 as Weight).saturating_mul(x as Weight)) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) } fn request_judgement(r: u32, x: u32, ) -> Weight { - (75_299_000 as Weight) - .saturating_add((493_000 as Weight).saturating_mul(r as Weight)) - .saturating_add((2_014_000 as Weight).saturating_mul(x as Weight)) + (54_657_000 as Weight) + // Standard Error: 5_000 + .saturating_add((381_000 as Weight).saturating_mul(r as Weight)) + // Standard Error: 0 + .saturating_add((1_153_000 as Weight).saturating_mul(x as Weight)) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) - } fn cancel_request(r: u32, x: u32, ) -> Weight { - (67_492_000 as Weight) - .saturating_add((225_000 as Weight).saturating_mul(r as Weight)) - .saturating_add((2_003_000 as Weight).saturating_mul(x as Weight)) + (50_895_000 as Weight) + // Standard Error: 6_000 + .saturating_add((267_000 as Weight).saturating_mul(r as Weight)) + // Standard Error: 0 + .saturating_add((1_141_000 as Weight).saturating_mul(x as Weight)) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) - } fn set_fee(r: u32, ) -> Weight { - (11_375_000 as Weight) - .saturating_add((382_000 as Weight).saturating_mul(r as Weight)) + (8_036_000 as Weight) + // Standard Error: 2_000 + .saturating_add((281_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) - } fn set_account_id(r: u32, ) -> Weight { - (12_898_000 as Weight) - .saturating_add((384_000 as Weight).saturating_mul(r as Weight)) + (9_001_000 as Weight) + // Standard Error: 2_000 + .saturating_add((288_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) - } fn set_fields(r: u32, ) -> Weight { - (11_419_000 as Weight) - .saturating_add((381_000 as Weight).saturating_mul(r as Weight)) + (8_039_000 as Weight) + // Standard Error: 2_000 + .saturating_add((286_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) - } fn provide_judgement(r: u32, x: u32, ) -> Weight { - (51_115_000 as Weight) - .saturating_add((427_000 as Weight).saturating_mul(r as Weight)) - .saturating_add((2_001_000 as Weight).saturating_mul(x as Weight)) + (35_746_000 as Weight) + // Standard Error: 4_000 + .saturating_add((346_000 as Weight).saturating_mul(r as Weight)) + // Standard Error: 0 + .saturating_add((1_164_000 as Weight).saturating_mul(x as Weight)) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) - } - fn kill_identity(_r: u32, s: u32, _x: u32, ) -> Weight { - (90_911_000 as Weight) - .saturating_add((3_450_000 as Weight).saturating_mul(s as Weight)) + fn kill_identity(r: u32, s: u32, x: u32, ) -> Weight { + (65_304_000 as Weight) + // Standard Error: 4_000 + .saturating_add((149_000 as Weight).saturating_mul(r as Weight)) + // Standard Error: 0 + .saturating_add((2_118_000 as Weight).saturating_mul(s as Weight)) + // Standard Error: 0 + .saturating_add((6_000 as Weight).saturating_mul(x as Weight)) .saturating_add(RocksDbWeight::get().reads(3 as Weight)) .saturating_add(RocksDbWeight::get().writes(3 as Weight)) .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) } fn add_sub(s: u32, ) -> Weight { - (76_957_000 as Weight) - .saturating_add((261_000 as Weight).saturating_mul(s as Weight)) + (55_491_000 as Weight) + // Standard Error: 0 + .saturating_add((220_000 as Weight).saturating_mul(s as Weight)) .saturating_add(RocksDbWeight::get().reads(3 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) - } fn rename_sub(s: u32, ) -> Weight { - (26_219_000 as Weight) + (17_564_000 as Weight) + // Standard Error: 0 .saturating_add((84_000 as Weight).saturating_mul(s as Weight)) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) - } fn remove_sub(s: u32, ) -> Weight { - (73_130_000 as Weight) - .saturating_add((239_000 as Weight).saturating_mul(s as Weight)) + (56_535_000 as Weight) + // Standard Error: 0 + .saturating_add((209_000 as Weight).saturating_mul(s as Weight)) .saturating_add(RocksDbWeight::get().reads(3 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) - } fn quit_sub(s: u32, ) -> Weight { - (48_088_000 as Weight) - .saturating_add((237_000 as Weight).saturating_mul(s as Weight)) + (35_369_000 as Weight) + // Standard Error: 0 + .saturating_add((200_000 as Weight).saturating_mul(s as Weight)) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) - } - } diff --git a/frame/im-online/src/weights.rs b/frame/im-online/src/weights.rs index 83ec294e8edb8..6a1f575b856c5 100644 --- a/frame/im-online/src/weights.rs +++ b/frame/im-online/src/weights.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// Copyright (C) 2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,9 +15,10 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Weights for pallet_im_online -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0 -//! DATE: 2020-10-27, STEPS: `[50, ]`, REPEAT: 20, LOW RANGE: [], HIGH RANGE: [] +//! Autogenerated weights for pallet_im_online +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0 +//! DATE: 2021-06-19, STEPS: `[50, ]`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 128 // Executed Command: @@ -50,9 +51,11 @@ pub trait WeightInfo { pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { fn validate_unsigned_and_then_heartbeat(k: u32, e: u32, ) -> Weight { - (114_379_000 as Weight) - .saturating_add((219_000 as Weight).saturating_mul(k as Weight)) - .saturating_add((481_000 as Weight).saturating_mul(e as Weight)) + (97_166_000 as Weight) + // Standard Error: 0 + .saturating_add((153_000 as Weight).saturating_mul(k as Weight)) + // Standard Error: 1_000 + .saturating_add((328_000 as Weight).saturating_mul(e as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } @@ -61,9 +64,11 @@ impl WeightInfo for SubstrateWeight { // For backwards compatibility and tests impl WeightInfo for () { fn validate_unsigned_and_then_heartbeat(k: u32, e: u32, ) -> Weight { - (114_379_000 as Weight) - .saturating_add((219_000 as Weight).saturating_mul(k as Weight)) - .saturating_add((481_000 as Weight).saturating_mul(e as Weight)) + (97_166_000 as Weight) + // Standard Error: 0 + .saturating_add((153_000 as Weight).saturating_mul(k as Weight)) + // Standard Error: 1_000 + .saturating_add((328_000 as Weight).saturating_mul(e as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } diff --git a/frame/indices/src/weights.rs b/frame/indices/src/weights.rs index e303b943b7e20..559392d3d2ba2 100644 --- a/frame/indices/src/weights.rs +++ b/frame/indices/src/weights.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// Copyright (C) 2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,9 +15,10 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Weights for pallet_indices -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0 -//! DATE: 2020-10-27, STEPS: `[50, ]`, REPEAT: 20, LOW RANGE: [], HIGH RANGE: [] +//! Autogenerated weights for pallet_indices +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0 +//! DATE: 2021-06-19, STEPS: `[50, ]`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 128 // Executed Command: @@ -48,76 +49,63 @@ pub trait WeightInfo { fn free() -> Weight; fn force_transfer() -> Weight; fn freeze() -> Weight; - } /// Weights for pallet_indices using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { fn claim() -> Weight { - (53_799_000 as Weight) + (40_622_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) - } fn transfer() -> Weight { - (60_294_000 as Weight) + (49_166_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) - } fn free() -> Weight { - (48_625_000 as Weight) + (40_802_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) - } fn force_transfer() -> Weight { - (49_762_000 as Weight) + (41_423_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) - } fn freeze() -> Weight { - (44_869_000 as Weight) + (38_476_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) - } - } // For backwards compatibility and tests impl WeightInfo for () { fn claim() -> Weight { - (53_799_000 as Weight) + (40_622_000 as Weight) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) - } fn transfer() -> Weight { - (60_294_000 as Weight) + (49_166_000 as Weight) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) - } fn free() -> Weight { - (48_625_000 as Weight) + (40_802_000 as Weight) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) - } fn force_transfer() -> Weight { - (49_762_000 as Weight) + (41_423_000 as Weight) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) - } fn freeze() -> Weight { - (44_869_000 as Weight) + (38_476_000 as Weight) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) - } - } diff --git a/frame/lottery/src/weights.rs b/frame/lottery/src/weights.rs index 464bb94bbbb79..a73d0b667e351 100644 --- a/frame/lottery/src/weights.rs +++ b/frame/lottery/src/weights.rs @@ -17,8 +17,8 @@ //! Autogenerated weights for pallet_lottery //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0 -//! DATE: 2021-01-05, STEPS: `[50, ]`, REPEAT: 20, LOW RANGE: [], HIGH RANGE: [] +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0 +//! DATE: 2021-06-19, STEPS: `[50, ]`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 128 // Executed Command: @@ -56,33 +56,33 @@ pub trait WeightInfo { pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { fn buy_ticket() -> Weight { - (97_799_000 as Weight) + (71_604_000 as Weight) .saturating_add(T::DbWeight::get().reads(6 as Weight)) .saturating_add(T::DbWeight::get().writes(4 as Weight)) } fn set_calls(n: u32, ) -> Weight { - (20_932_000 as Weight) - // Standard Error: 9_000 - .saturating_add((513_000 as Weight).saturating_mul(n as Weight)) + (15_015_000 as Weight) + // Standard Error: 5_000 + .saturating_add((301_000 as Weight).saturating_mul(n as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn start_lottery() -> Weight { - (77_600_000 as Weight) + (58_855_000 as Weight) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) } fn stop_repeat() -> Weight { - (10_707_000 as Weight) + (7_524_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn on_initialize_end() -> Weight { - (162_126_000 as Weight) + (114_766_000 as Weight) .saturating_add(T::DbWeight::get().reads(6 as Weight)) .saturating_add(T::DbWeight::get().writes(4 as Weight)) } fn on_initialize_repeat() -> Weight { - (169_310_000 as Weight) + (119_402_000 as Weight) .saturating_add(T::DbWeight::get().reads(7 as Weight)) .saturating_add(T::DbWeight::get().writes(5 as Weight)) } @@ -91,33 +91,33 @@ impl WeightInfo for SubstrateWeight { // For backwards compatibility and tests impl WeightInfo for () { fn buy_ticket() -> Weight { - (97_799_000 as Weight) + (71_604_000 as Weight) .saturating_add(RocksDbWeight::get().reads(6 as Weight)) .saturating_add(RocksDbWeight::get().writes(4 as Weight)) } fn set_calls(n: u32, ) -> Weight { - (20_932_000 as Weight) - // Standard Error: 9_000 - .saturating_add((513_000 as Weight).saturating_mul(n as Weight)) + (15_015_000 as Weight) + // Standard Error: 5_000 + .saturating_add((301_000 as Weight).saturating_mul(n as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn start_lottery() -> Weight { - (77_600_000 as Weight) + (58_855_000 as Weight) .saturating_add(RocksDbWeight::get().reads(3 as Weight)) .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } fn stop_repeat() -> Weight { - (10_707_000 as Weight) + (7_524_000 as Weight) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn on_initialize_end() -> Weight { - (162_126_000 as Weight) + (114_766_000 as Weight) .saturating_add(RocksDbWeight::get().reads(6 as Weight)) .saturating_add(RocksDbWeight::get().writes(4 as Weight)) } fn on_initialize_repeat() -> Weight { - (169_310_000 as Weight) + (119_402_000 as Weight) .saturating_add(RocksDbWeight::get().reads(7 as Weight)) .saturating_add(RocksDbWeight::get().writes(5 as Weight)) } diff --git a/frame/membership/src/weights.rs b/frame/membership/src/weights.rs index fbdb44caec84c..8e2d8bb266164 100644 --- a/frame/membership/src/weights.rs +++ b/frame/membership/src/weights.rs @@ -18,7 +18,7 @@ //! Autogenerated weights for pallet_membership //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0 -//! DATE: 2021-04-17, STEPS: `[50, ]`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2021-06-19, STEPS: `[50, ]`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 128 // Executed Command: @@ -57,49 +57,49 @@ pub trait WeightInfo { pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { fn add_member(m: u32, ) -> Weight { - (25_448_000 as Weight) + (24_309_000 as Weight) // Standard Error: 3_000 - .saturating_add((257_000 as Weight).saturating_mul(m as Weight)) + .saturating_add((147_000 as Weight).saturating_mul(m as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) } fn remove_member(m: u32, ) -> Weight { - (31_317_000 as Weight) + (29_722_000 as Weight) // Standard Error: 0 - .saturating_add((215_000 as Weight).saturating_mul(m as Weight)) + .saturating_add((119_000 as Weight).saturating_mul(m as Weight)) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) } fn swap_member(m: u32, ) -> Weight { - (31_208_000 as Weight) + (30_239_000 as Weight) // Standard Error: 0 - .saturating_add((229_000 as Weight).saturating_mul(m as Weight)) + .saturating_add((132_000 as Weight).saturating_mul(m as Weight)) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) } fn reset_member(m: u32, ) -> Weight { - (31_673_000 as Weight) - // Standard Error: 1_000 - .saturating_add((455_000 as Weight).saturating_mul(m as Weight)) + (31_302_000 as Weight) + // Standard Error: 0 + .saturating_add((289_000 as Weight).saturating_mul(m as Weight)) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) } fn change_key(m: u32, ) -> Weight { - (33_499_000 as Weight) + (31_967_000 as Weight) // Standard Error: 0 - .saturating_add((226_000 as Weight).saturating_mul(m as Weight)) + .saturating_add((130_000 as Weight).saturating_mul(m as Weight)) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(4 as Weight)) } fn set_prime(m: u32, ) -> Weight { - (8_865_000 as Weight) + (8_083_000 as Weight) // Standard Error: 0 - .saturating_add((124_000 as Weight).saturating_mul(m as Weight)) + .saturating_add((91_000 as Weight).saturating_mul(m as Weight)) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn clear_prime(m: u32, ) -> Weight { - (3_397_000 as Weight) + (3_360_000 as Weight) // Standard Error: 0 .saturating_add((1_000 as Weight).saturating_mul(m as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) @@ -109,49 +109,49 @@ impl WeightInfo for SubstrateWeight { // For backwards compatibility and tests impl WeightInfo for () { fn add_member(m: u32, ) -> Weight { - (25_448_000 as Weight) + (24_309_000 as Weight) // Standard Error: 3_000 - .saturating_add((257_000 as Weight).saturating_mul(m as Weight)) + .saturating_add((147_000 as Weight).saturating_mul(m as Weight)) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } fn remove_member(m: u32, ) -> Weight { - (31_317_000 as Weight) + (29_722_000 as Weight) // Standard Error: 0 - .saturating_add((215_000 as Weight).saturating_mul(m as Weight)) + .saturating_add((119_000 as Weight).saturating_mul(m as Weight)) .saturating_add(RocksDbWeight::get().reads(3 as Weight)) .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } fn swap_member(m: u32, ) -> Weight { - (31_208_000 as Weight) + (30_239_000 as Weight) // Standard Error: 0 - .saturating_add((229_000 as Weight).saturating_mul(m as Weight)) + .saturating_add((132_000 as Weight).saturating_mul(m as Weight)) .saturating_add(RocksDbWeight::get().reads(3 as Weight)) .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } fn reset_member(m: u32, ) -> Weight { - (31_673_000 as Weight) - // Standard Error: 1_000 - .saturating_add((455_000 as Weight).saturating_mul(m as Weight)) + (31_302_000 as Weight) + // Standard Error: 0 + .saturating_add((289_000 as Weight).saturating_mul(m as Weight)) .saturating_add(RocksDbWeight::get().reads(3 as Weight)) .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } fn change_key(m: u32, ) -> Weight { - (33_499_000 as Weight) + (31_967_000 as Weight) // Standard Error: 0 - .saturating_add((226_000 as Weight).saturating_mul(m as Weight)) + .saturating_add((130_000 as Weight).saturating_mul(m as Weight)) .saturating_add(RocksDbWeight::get().reads(3 as Weight)) .saturating_add(RocksDbWeight::get().writes(4 as Weight)) } fn set_prime(m: u32, ) -> Weight { - (8_865_000 as Weight) + (8_083_000 as Weight) // Standard Error: 0 - .saturating_add((124_000 as Weight).saturating_mul(m as Weight)) + .saturating_add((91_000 as Weight).saturating_mul(m as Weight)) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } fn clear_prime(m: u32, ) -> Weight { - (3_397_000 as Weight) + (3_360_000 as Weight) // Standard Error: 0 .saturating_add((1_000 as Weight).saturating_mul(m as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) diff --git a/frame/multisig/src/weights.rs b/frame/multisig/src/weights.rs index 1c8736616c18b..50f774030015f 100644 --- a/frame/multisig/src/weights.rs +++ b/frame/multisig/src/weights.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// Copyright (C) 2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,9 +15,10 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Weights for pallet_multisig -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0 -//! DATE: 2020-10-27, STEPS: `[50, ]`, REPEAT: 20, LOW RANGE: [], HIGH RANGE: [] +//! Autogenerated weights for pallet_multisig +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0 +//! DATE: 2021-06-19, STEPS: `[50, ]`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 128 // Executed Command: @@ -53,162 +54,165 @@ pub trait WeightInfo { fn approve_as_multi_approve(s: u32, ) -> Weight; fn approve_as_multi_complete(s: u32, ) -> Weight; fn cancel_as_multi(s: u32, ) -> Weight; - } /// Weights for pallet_multisig using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { - fn as_multi_threshold_1(z: u32, ) -> Weight { - (14_183_000 as Weight) - .saturating_add((1_000 as Weight).saturating_mul(z as Weight)) - + fn as_multi_threshold_1(_z: u32, ) -> Weight { + (14_411_000 as Weight) } fn as_multi_create(s: u32, z: u32, ) -> Weight { - (72_350_000 as Weight) - .saturating_add((64_000 as Weight).saturating_mul(s as Weight)) + (54_200_000 as Weight) + // Standard Error: 0 + .saturating_add((127_000 as Weight).saturating_mul(s as Weight)) + // Standard Error: 0 .saturating_add((1_000 as Weight).saturating_mul(z as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) - } fn as_multi_create_store(s: u32, z: u32, ) -> Weight { - (83_175_000 as Weight) - .saturating_add((72_000 as Weight).saturating_mul(s as Weight)) - .saturating_add((3_000 as Weight).saturating_mul(z as Weight)) + (60_502_000 as Weight) + // Standard Error: 0 + .saturating_add((128_000 as Weight).saturating_mul(s as Weight)) + // Standard Error: 0 + .saturating_add((2_000 as Weight).saturating_mul(z as Weight)) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) - } fn as_multi_approve(s: u32, z: u32, ) -> Weight { - (43_035_000 as Weight) - .saturating_add((140_000 as Weight).saturating_mul(s as Weight)) + (32_075_000 as Weight) + // Standard Error: 0 + .saturating_add((132_000 as Weight).saturating_mul(s as Weight)) + // Standard Error: 0 .saturating_add((1_000 as Weight).saturating_mul(z as Weight)) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) - } fn as_multi_approve_store(s: u32, z: u32, ) -> Weight { - (75_190_000 as Weight) - .saturating_add((127_000 as Weight).saturating_mul(s as Weight)) - .saturating_add((3_000 as Weight).saturating_mul(z as Weight)) + (57_742_000 as Weight) + // Standard Error: 0 + .saturating_add((141_000 as Weight).saturating_mul(s as Weight)) + // Standard Error: 0 + .saturating_add((2_000 as Weight).saturating_mul(z as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) - } fn as_multi_complete(s: u32, z: u32, ) -> Weight { - (92_751_000 as Weight) - .saturating_add((282_000 as Weight).saturating_mul(s as Weight)) - .saturating_add((5_000 as Weight).saturating_mul(z as Weight)) + (73_503_000 as Weight) + // Standard Error: 0 + .saturating_add((246_000 as Weight).saturating_mul(s as Weight)) + // Standard Error: 0 + .saturating_add((4_000 as Weight).saturating_mul(z as Weight)) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) - } fn approve_as_multi_create(s: u32, ) -> Weight { - (71_937_000 as Weight) - .saturating_add((87_000 as Weight).saturating_mul(s as Weight)) + (53_659_000 as Weight) + // Standard Error: 0 + .saturating_add((133_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) - } fn approve_as_multi_approve(s: u32, ) -> Weight { - (44_294_000 as Weight) - .saturating_add((89_000 as Weight).saturating_mul(s as Weight)) + (31_353_000 as Weight) + // Standard Error: 0 + .saturating_add((136_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) - } fn approve_as_multi_complete(s: u32, ) -> Weight { - (163_098_000 as Weight) - .saturating_add((276_000 as Weight).saturating_mul(s as Weight)) + (125_011_000 as Weight) + // Standard Error: 0 + .saturating_add((247_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) - } fn cancel_as_multi(s: u32, ) -> Weight { - (115_731_000 as Weight) - .saturating_add((104_000 as Weight).saturating_mul(s as Weight)) + (92_318_000 as Weight) + // Standard Error: 0 + .saturating_add((128_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) - } - } // For backwards compatibility and tests impl WeightInfo for () { - fn as_multi_threshold_1(z: u32, ) -> Weight { - (14_183_000 as Weight) - .saturating_add((1_000 as Weight).saturating_mul(z as Weight)) - + fn as_multi_threshold_1(_z: u32, ) -> Weight { + (14_411_000 as Weight) } fn as_multi_create(s: u32, z: u32, ) -> Weight { - (72_350_000 as Weight) - .saturating_add((64_000 as Weight).saturating_mul(s as Weight)) + (54_200_000 as Weight) + // Standard Error: 0 + .saturating_add((127_000 as Weight).saturating_mul(s as Weight)) + // Standard Error: 0 .saturating_add((1_000 as Weight).saturating_mul(z as Weight)) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) - } fn as_multi_create_store(s: u32, z: u32, ) -> Weight { - (83_175_000 as Weight) - .saturating_add((72_000 as Weight).saturating_mul(s as Weight)) - .saturating_add((3_000 as Weight).saturating_mul(z as Weight)) + (60_502_000 as Weight) + // Standard Error: 0 + .saturating_add((128_000 as Weight).saturating_mul(s as Weight)) + // Standard Error: 0 + .saturating_add((2_000 as Weight).saturating_mul(z as Weight)) .saturating_add(RocksDbWeight::get().reads(3 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) - } fn as_multi_approve(s: u32, z: u32, ) -> Weight { - (43_035_000 as Weight) - .saturating_add((140_000 as Weight).saturating_mul(s as Weight)) + (32_075_000 as Weight) + // Standard Error: 0 + .saturating_add((132_000 as Weight).saturating_mul(s as Weight)) + // Standard Error: 0 .saturating_add((1_000 as Weight).saturating_mul(z as Weight)) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) - } fn as_multi_approve_store(s: u32, z: u32, ) -> Weight { - (75_190_000 as Weight) - .saturating_add((127_000 as Weight).saturating_mul(s as Weight)) - .saturating_add((3_000 as Weight).saturating_mul(z as Weight)) + (57_742_000 as Weight) + // Standard Error: 0 + .saturating_add((141_000 as Weight).saturating_mul(s as Weight)) + // Standard Error: 0 + .saturating_add((2_000 as Weight).saturating_mul(z as Weight)) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) - } fn as_multi_complete(s: u32, z: u32, ) -> Weight { - (92_751_000 as Weight) - .saturating_add((282_000 as Weight).saturating_mul(s as Weight)) - .saturating_add((5_000 as Weight).saturating_mul(z as Weight)) + (73_503_000 as Weight) + // Standard Error: 0 + .saturating_add((246_000 as Weight).saturating_mul(s as Weight)) + // Standard Error: 0 + .saturating_add((4_000 as Weight).saturating_mul(z as Weight)) .saturating_add(RocksDbWeight::get().reads(3 as Weight)) .saturating_add(RocksDbWeight::get().writes(3 as Weight)) - } fn approve_as_multi_create(s: u32, ) -> Weight { - (71_937_000 as Weight) - .saturating_add((87_000 as Weight).saturating_mul(s as Weight)) + (53_659_000 as Weight) + // Standard Error: 0 + .saturating_add((133_000 as Weight).saturating_mul(s as Weight)) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) - } fn approve_as_multi_approve(s: u32, ) -> Weight { - (44_294_000 as Weight) - .saturating_add((89_000 as Weight).saturating_mul(s as Weight)) + (31_353_000 as Weight) + // Standard Error: 0 + .saturating_add((136_000 as Weight).saturating_mul(s as Weight)) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) - } fn approve_as_multi_complete(s: u32, ) -> Weight { - (163_098_000 as Weight) - .saturating_add((276_000 as Weight).saturating_mul(s as Weight)) + (125_011_000 as Weight) + // Standard Error: 0 + .saturating_add((247_000 as Weight).saturating_mul(s as Weight)) .saturating_add(RocksDbWeight::get().reads(3 as Weight)) .saturating_add(RocksDbWeight::get().writes(3 as Weight)) - } fn cancel_as_multi(s: u32, ) -> Weight { - (115_731_000 as Weight) - .saturating_add((104_000 as Weight).saturating_mul(s as Weight)) + (92_318_000 as Weight) + // Standard Error: 0 + .saturating_add((128_000 as Weight).saturating_mul(s as Weight)) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) - } - } diff --git a/frame/proxy/src/weights.rs b/frame/proxy/src/weights.rs index b720a22be120d..f250186ad81d7 100644 --- a/frame/proxy/src/weights.rs +++ b/frame/proxy/src/weights.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// Copyright (C) 2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,9 +15,10 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Weights for pallet_proxy -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0 -//! DATE: 2020-10-27, STEPS: `[50, ]`, REPEAT: 20, LOW RANGE: [], HIGH RANGE: [] +//! Autogenerated weights for pallet_proxy +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0 +//! DATE: 2021-06-19, STEPS: `[50, ]`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 128 // Executed Command: @@ -53,162 +54,167 @@ pub trait WeightInfo { fn remove_proxies(p: u32, ) -> Weight; fn anonymous(p: u32, ) -> Weight; fn kill_anonymous(p: u32, ) -> Weight; - } /// Weights for pallet_proxy using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { fn proxy(p: u32, ) -> Weight { - (32_194_000 as Weight) - .saturating_add((215_000 as Weight).saturating_mul(p as Weight)) + (22_645_000 as Weight) + // Standard Error: 1_000 + .saturating_add((162_000 as Weight).saturating_mul(p as Weight)) .saturating_add(T::DbWeight::get().reads(1 as Weight)) - } fn proxy_announced(a: u32, p: u32, ) -> Weight { - (67_490_000 as Weight) - .saturating_add((859_000 as Weight).saturating_mul(a as Weight)) - .saturating_add((215_000 as Weight).saturating_mul(p as Weight)) + (53_259_000 as Weight) + // Standard Error: 2_000 + .saturating_add((543_000 as Weight).saturating_mul(a as Weight)) + // Standard Error: 2_000 + .saturating_add((153_000 as Weight).saturating_mul(p as Weight)) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) - } fn remove_announcement(a: u32, p: u32, ) -> Weight { - (40_768_000 as Weight) - .saturating_add((882_000 as Weight).saturating_mul(a as Weight)) - .saturating_add((122_000 as Weight).saturating_mul(p as Weight)) + (37_983_000 as Weight) + // Standard Error: 2_000 + .saturating_add((545_000 as Weight).saturating_mul(a as Weight)) + // Standard Error: 2_000 + .saturating_add((4_000 as Weight).saturating_mul(p as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) - } fn reject_announcement(a: u32, p: u32, ) -> Weight { - (42_742_000 as Weight) - .saturating_add((852_000 as Weight).saturating_mul(a as Weight)) - .saturating_add((22_000 as Weight).saturating_mul(p as Weight)) + (37_922_000 as Weight) + // Standard Error: 1_000 + .saturating_add((541_000 as Weight).saturating_mul(a as Weight)) + // Standard Error: 2_000 + .saturating_add((6_000 as Weight).saturating_mul(p as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) - } fn announce(a: u32, p: u32, ) -> Weight { - (67_967_000 as Weight) - .saturating_add((737_000 as Weight).saturating_mul(a as Weight)) - .saturating_add((213_000 as Weight).saturating_mul(p as Weight)) + (51_355_000 as Weight) + // Standard Error: 2_000 + .saturating_add((534_000 as Weight).saturating_mul(a as Weight)) + // Standard Error: 2_000 + .saturating_add((148_000 as Weight).saturating_mul(p as Weight)) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) - } fn add_proxy(p: u32, ) -> Weight { - (45_245_000 as Weight) - .saturating_add((240_000 as Weight).saturating_mul(p as Weight)) + (35_798_000 as Weight) + // Standard Error: 2_000 + .saturating_add((228_000 as Weight).saturating_mul(p as Weight)) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) - } fn remove_proxy(p: u32, ) -> Weight { - (40_742_000 as Weight) - .saturating_add((272_000 as Weight).saturating_mul(p as Weight)) + (35_554_000 as Weight) + // Standard Error: 3_000 + .saturating_add((250_000 as Weight).saturating_mul(p as Weight)) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) - } fn remove_proxies(p: u32, ) -> Weight { - (39_070_000 as Weight) - .saturating_add((214_000 as Weight).saturating_mul(p as Weight)) + (33_911_000 as Weight) + // Standard Error: 1_000 + .saturating_add((165_000 as Weight).saturating_mul(p as Weight)) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) - } fn anonymous(p: u32, ) -> Weight { - (64_851_000 as Weight) - .saturating_add((37_000 as Weight).saturating_mul(p as Weight)) + (48_695_000 as Weight) + // Standard Error: 1_000 + .saturating_add((53_000 as Weight).saturating_mul(p as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) - } fn kill_anonymous(p: u32, ) -> Weight { - (41_831_000 as Weight) - .saturating_add((207_000 as Weight).saturating_mul(p as Weight)) + (35_904_000 as Weight) + // Standard Error: 1_000 + .saturating_add((159_000 as Weight).saturating_mul(p as Weight)) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) - } - } // For backwards compatibility and tests impl WeightInfo for () { fn proxy(p: u32, ) -> Weight { - (32_194_000 as Weight) - .saturating_add((215_000 as Weight).saturating_mul(p as Weight)) + (22_645_000 as Weight) + // Standard Error: 1_000 + .saturating_add((162_000 as Weight).saturating_mul(p as Weight)) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - } fn proxy_announced(a: u32, p: u32, ) -> Weight { - (67_490_000 as Weight) - .saturating_add((859_000 as Weight).saturating_mul(a as Weight)) - .saturating_add((215_000 as Weight).saturating_mul(p as Weight)) + (53_259_000 as Weight) + // Standard Error: 2_000 + .saturating_add((543_000 as Weight).saturating_mul(a as Weight)) + // Standard Error: 2_000 + .saturating_add((153_000 as Weight).saturating_mul(p as Weight)) .saturating_add(RocksDbWeight::get().reads(3 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) - } fn remove_announcement(a: u32, p: u32, ) -> Weight { - (40_768_000 as Weight) - .saturating_add((882_000 as Weight).saturating_mul(a as Weight)) - .saturating_add((122_000 as Weight).saturating_mul(p as Weight)) + (37_983_000 as Weight) + // Standard Error: 2_000 + .saturating_add((545_000 as Weight).saturating_mul(a as Weight)) + // Standard Error: 2_000 + .saturating_add((4_000 as Weight).saturating_mul(p as Weight)) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) - } fn reject_announcement(a: u32, p: u32, ) -> Weight { - (42_742_000 as Weight) - .saturating_add((852_000 as Weight).saturating_mul(a as Weight)) - .saturating_add((22_000 as Weight).saturating_mul(p as Weight)) + (37_922_000 as Weight) + // Standard Error: 1_000 + .saturating_add((541_000 as Weight).saturating_mul(a as Weight)) + // Standard Error: 2_000 + .saturating_add((6_000 as Weight).saturating_mul(p as Weight)) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) - } fn announce(a: u32, p: u32, ) -> Weight { - (67_967_000 as Weight) - .saturating_add((737_000 as Weight).saturating_mul(a as Weight)) - .saturating_add((213_000 as Weight).saturating_mul(p as Weight)) + (51_355_000 as Weight) + // Standard Error: 2_000 + .saturating_add((534_000 as Weight).saturating_mul(a as Weight)) + // Standard Error: 2_000 + .saturating_add((148_000 as Weight).saturating_mul(p as Weight)) .saturating_add(RocksDbWeight::get().reads(3 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) - } fn add_proxy(p: u32, ) -> Weight { - (45_245_000 as Weight) - .saturating_add((240_000 as Weight).saturating_mul(p as Weight)) + (35_798_000 as Weight) + // Standard Error: 2_000 + .saturating_add((228_000 as Weight).saturating_mul(p as Weight)) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) - } fn remove_proxy(p: u32, ) -> Weight { - (40_742_000 as Weight) - .saturating_add((272_000 as Weight).saturating_mul(p as Weight)) + (35_554_000 as Weight) + // Standard Error: 3_000 + .saturating_add((250_000 as Weight).saturating_mul(p as Weight)) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) - } fn remove_proxies(p: u32, ) -> Weight { - (39_070_000 as Weight) - .saturating_add((214_000 as Weight).saturating_mul(p as Weight)) + (33_911_000 as Weight) + // Standard Error: 1_000 + .saturating_add((165_000 as Weight).saturating_mul(p as Weight)) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) - } fn anonymous(p: u32, ) -> Weight { - (64_851_000 as Weight) - .saturating_add((37_000 as Weight).saturating_mul(p as Weight)) + (48_695_000 as Weight) + // Standard Error: 1_000 + .saturating_add((53_000 as Weight).saturating_mul(p as Weight)) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) - } fn kill_anonymous(p: u32, ) -> Weight { - (41_831_000 as Weight) - .saturating_add((207_000 as Weight).saturating_mul(p as Weight)) + (35_904_000 as Weight) + // Standard Error: 1_000 + .saturating_add((159_000 as Weight).saturating_mul(p as Weight)) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) - } - } diff --git a/frame/scheduler/src/weights.rs b/frame/scheduler/src/weights.rs index 1d7273353f347..648652428cbb8 100644 --- a/frame/scheduler/src/weights.rs +++ b/frame/scheduler/src/weights.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// Copyright (C) 2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,9 +15,10 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Weights for pallet_scheduler -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0 -//! DATE: 2020-10-27, STEPS: `[50, ]`, REPEAT: 20, LOW RANGE: [], HIGH RANGE: [] +//! Autogenerated weights for pallet_scheduler +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0 +//! DATE: 2021-06-19, STEPS: `[50, ]`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 128 // Executed Command: @@ -47,72 +48,69 @@ pub trait WeightInfo { fn cancel(s: u32, ) -> Weight; fn schedule_named(s: u32, ) -> Weight; fn cancel_named(s: u32, ) -> Weight; - } /// Weights for pallet_scheduler using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { fn schedule(s: u32, ) -> Weight { - (35_029_000 as Weight) - .saturating_add((77_000 as Weight).saturating_mul(s as Weight)) + (24_811_000 as Weight) + // Standard Error: 1_000 + .saturating_add((116_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) - } fn cancel(s: u32, ) -> Weight { - (31_419_000 as Weight) - .saturating_add((4_015_000 as Weight).saturating_mul(s as Weight)) + (23_851_000 as Weight) + // Standard Error: 3_000 + .saturating_add((1_439_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) - } fn schedule_named(s: u32, ) -> Weight { - (44_752_000 as Weight) - .saturating_add((123_000 as Weight).saturating_mul(s as Weight)) + (31_096_000 as Weight) + // Standard Error: 1_000 + .saturating_add((141_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) - } fn cancel_named(s: u32, ) -> Weight { - (35_712_000 as Weight) - .saturating_add((4_008_000 as Weight).saturating_mul(s as Weight)) + (26_715_000 as Weight) + // Standard Error: 4_000 + .saturating_add((1_455_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) - } - } // For backwards compatibility and tests impl WeightInfo for () { fn schedule(s: u32, ) -> Weight { - (35_029_000 as Weight) - .saturating_add((77_000 as Weight).saturating_mul(s as Weight)) + (24_811_000 as Weight) + // Standard Error: 1_000 + .saturating_add((116_000 as Weight).saturating_mul(s as Weight)) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) - } fn cancel(s: u32, ) -> Weight { - (31_419_000 as Weight) - .saturating_add((4_015_000 as Weight).saturating_mul(s as Weight)) + (23_851_000 as Weight) + // Standard Error: 3_000 + .saturating_add((1_439_000 as Weight).saturating_mul(s as Weight)) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) - } fn schedule_named(s: u32, ) -> Weight { - (44_752_000 as Weight) - .saturating_add((123_000 as Weight).saturating_mul(s as Weight)) + (31_096_000 as Weight) + // Standard Error: 1_000 + .saturating_add((141_000 as Weight).saturating_mul(s as Weight)) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) - } fn cancel_named(s: u32, ) -> Weight { - (35_712_000 as Weight) - .saturating_add((4_008_000 as Weight).saturating_mul(s as Weight)) + (26_715_000 as Weight) + // Standard Error: 4_000 + .saturating_add((1_455_000 as Weight).saturating_mul(s as Weight)) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) - } - } diff --git a/frame/session/src/weights.rs b/frame/session/src/weights.rs index 88ed9e6d8ece0..ec911d8c01cce 100644 --- a/frame/session/src/weights.rs +++ b/frame/session/src/weights.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// Copyright (C) 2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,9 +15,10 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Weights for pallet_session -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0 -//! DATE: 2020-10-27, STEPS: `[50, ]`, REPEAT: 20, LOW RANGE: [], HIGH RANGE: [] +//! Autogenerated weights for pallet_session +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0 +//! DATE: 2021-06-19, STEPS: `[50, ]`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 128 // Executed Command: @@ -45,40 +46,33 @@ use sp_std::marker::PhantomData; pub trait WeightInfo { fn set_keys() -> Weight; fn purge_keys() -> Weight; - } /// Weights for pallet_session using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { fn set_keys() -> Weight { - (86_033_000 as Weight) + (70_351_000 as Weight) .saturating_add(T::DbWeight::get().reads(6 as Weight)) .saturating_add(T::DbWeight::get().writes(5 as Weight)) - } fn purge_keys() -> Weight { - (54_334_000 as Weight) + (45_866_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(5 as Weight)) - } - } // For backwards compatibility and tests impl WeightInfo for () { fn set_keys() -> Weight { - (86_033_000 as Weight) + (70_351_000 as Weight) .saturating_add(RocksDbWeight::get().reads(6 as Weight)) .saturating_add(RocksDbWeight::get().writes(5 as Weight)) - } fn purge_keys() -> Weight { - (54_334_000 as Weight) + (45_866_000 as Weight) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(5 as Weight)) - } - } diff --git a/frame/staking/src/weights.rs b/frame/staking/src/weights.rs index 980b0855fbd81..dbf5f3fc82bf9 100644 --- a/frame/staking/src/weights.rs +++ b/frame/staking/src/weights.rs @@ -18,7 +18,7 @@ //! Autogenerated weights for pallet_staking //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0 -//! DATE: 2021-06-15, STEPS: `[50, ]`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2021-06-19, STEPS: `[50, ]`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 128 // Executed Command: @@ -78,154 +78,154 @@ pub trait WeightInfo { pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { fn bond() -> Weight { - (91_278_000 as Weight) + (72_617_000 as Weight) .saturating_add(T::DbWeight::get().reads(5 as Weight)) .saturating_add(T::DbWeight::get().writes(4 as Weight)) } fn bond_extra() -> Weight { - (69_833_000 as Weight) + (55_590_000 as Weight) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn unbond() -> Weight { - (75_020_000 as Weight) + (59_730_000 as Weight) .saturating_add(T::DbWeight::get().reads(6 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) } fn withdraw_unbonded_update(s: u32, ) -> Weight { - (63_898_000 as Weight) - // Standard Error: 1_000 - .saturating_add((50_000 as Weight).saturating_mul(s as Weight)) + (52_279_000 as Weight) + // Standard Error: 0 + .saturating_add((68_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) } fn withdraw_unbonded_kill(s: u32, ) -> Weight { - (103_717_000 as Weight) + (86_629_000 as Weight) // Standard Error: 1_000 - .saturating_add((2_942_000 as Weight).saturating_mul(s as Weight)) + .saturating_add((2_379_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().reads(8 as Weight)) .saturating_add(T::DbWeight::get().writes(6 as Weight)) .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) } fn validate() -> Weight { - (40_702_000 as Weight) + (32_393_000 as Weight) .saturating_add(T::DbWeight::get().reads(6 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn kick(k: u32, ) -> Weight { - (33_572_000 as Weight) - // Standard Error: 18_000 - .saturating_add((20_771_000 as Weight).saturating_mul(k as Weight)) + (36_986_000 as Weight) + // Standard Error: 13_000 + .saturating_add((16_574_000 as Weight).saturating_mul(k as Weight)) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(k as Weight))) .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(k as Weight))) } fn nominate(n: u32, ) -> Weight { - (53_561_000 as Weight) - // Standard Error: 34_000 - .saturating_add((6_652_000 as Weight).saturating_mul(n as Weight)) + (43_228_000 as Weight) + // Standard Error: 21_000 + .saturating_add((5_119_000 as Weight).saturating_mul(n as Weight)) .saturating_add(T::DbWeight::get().reads(7 as Weight)) .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(n as Weight))) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn chill() -> Weight { - (21_489_000 as Weight) + (17_800_000 as Weight) .saturating_add(T::DbWeight::get().reads(3 as Weight)) } fn set_payee() -> Weight { - (14_514_000 as Weight) + (12_612_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn set_controller() -> Weight { - (32_598_000 as Weight) + (27_503_000 as Weight) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) } fn set_validator_count() -> Weight { - (2_477_000 as Weight) + (2_119_000 as Weight) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn force_no_eras() -> Weight { - (2_743_000 as Weight) + (2_320_000 as Weight) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn force_new_era() -> Weight { - (2_784_000 as Weight) + (2_269_000 as Weight) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn force_new_era_always() -> Weight { - (2_749_000 as Weight) + (2_334_000 as Weight) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn set_invulnerables(v: u32, ) -> Weight { - (2_798_000 as Weight) + (2_354_000 as Weight) // Standard Error: 0 .saturating_add((5_000 as Weight).saturating_mul(v as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn force_unstake(s: u32, ) -> Weight { - (70_372_000 as Weight) - // Standard Error: 13_000 - .saturating_add((3_029_000 as Weight).saturating_mul(s as Weight)) + (61_556_000 as Weight) + // Standard Error: 1_000 + .saturating_add((2_377_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().reads(6 as Weight)) .saturating_add(T::DbWeight::get().writes(6 as Weight)) .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) } fn cancel_deferred_slash(s: u32, ) -> Weight { - (3_436_822_000 as Weight) - // Standard Error: 221_000 - .saturating_add((19_799_000 as Weight).saturating_mul(s as Weight)) + (3_367_105_000 as Weight) + // Standard Error: 222_000 + .saturating_add((19_817_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn payout_stakers_dead_controller(n: u32, ) -> Weight { - (132_018_000 as Weight) - // Standard Error: 27_000 - .saturating_add((61_340_000 as Weight).saturating_mul(n as Weight)) + (47_229_000 as Weight) + // Standard Error: 53_000 + .saturating_add((48_365_000 as Weight).saturating_mul(n as Weight)) .saturating_add(T::DbWeight::get().reads(10 as Weight)) .saturating_add(T::DbWeight::get().reads((3 as Weight).saturating_mul(n as Weight))) .saturating_add(T::DbWeight::get().writes(2 as Weight)) .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(n as Weight))) } fn payout_stakers_alive_staked(n: u32, ) -> Weight { - (158_346_000 as Weight) - // Standard Error: 61_000 - .saturating_add((77_147_000 as Weight).saturating_mul(n as Weight)) + (156_788_000 as Weight) + // Standard Error: 20_000 + .saturating_add((61_280_000 as Weight).saturating_mul(n as Weight)) .saturating_add(T::DbWeight::get().reads(11 as Weight)) .saturating_add(T::DbWeight::get().reads((5 as Weight).saturating_mul(n as Weight))) .saturating_add(T::DbWeight::get().writes(3 as Weight)) .saturating_add(T::DbWeight::get().writes((3 as Weight).saturating_mul(n as Weight))) } fn rebond(l: u32, ) -> Weight { - (57_756_000 as Weight) - // Standard Error: 2_000 - .saturating_add((79_000 as Weight).saturating_mul(l as Weight)) + (47_815_000 as Weight) + // Standard Error: 1_000 + .saturating_add((65_000 as Weight).saturating_mul(l as Weight)) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) } fn set_history_depth(e: u32, ) -> Weight { (0 as Weight) - // Standard Error: 100_000 - .saturating_add((44_873_000 as Weight).saturating_mul(e as Weight)) + // Standard Error: 74_000 + .saturating_add((34_945_000 as Weight).saturating_mul(e as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(4 as Weight)) .saturating_add(T::DbWeight::get().writes((7 as Weight).saturating_mul(e as Weight))) } fn reap_stash(s: u32, ) -> Weight { - (75_073_000 as Weight) - // Standard Error: 4_000 - .saturating_add((2_988_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(T::DbWeight::get().reads(6 as Weight)) - .saturating_add(T::DbWeight::get().writes(6 as Weight)) + (73_483_000 as Weight) + // Standard Error: 0 + .saturating_add((2_384_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(T::DbWeight::get().reads(7 as Weight)) + .saturating_add(T::DbWeight::get().writes(8 as Weight)) .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) } fn new_era(v: u32, n: u32, ) -> Weight { (0 as Weight) - // Standard Error: 1_146_000 - .saturating_add((362_986_000 as Weight).saturating_mul(v as Weight)) - // Standard Error: 57_000 - .saturating_add((60_216_000 as Weight).saturating_mul(n as Weight)) + // Standard Error: 846_000 + .saturating_add((305_234_000 as Weight).saturating_mul(v as Weight)) + // Standard Error: 42_000 + .saturating_add((48_280_000 as Weight).saturating_mul(n as Weight)) .saturating_add(T::DbWeight::get().reads(10 as Weight)) .saturating_add(T::DbWeight::get().reads((3 as Weight).saturating_mul(v as Weight))) .saturating_add(T::DbWeight::get().reads((3 as Weight).saturating_mul(n as Weight))) @@ -234,12 +234,12 @@ impl WeightInfo for SubstrateWeight { } fn get_npos_voters(v: u32, n: u32, s: u32, ) -> Weight { (0 as Weight) - // Standard Error: 230_000 - .saturating_add((35_891_000 as Weight).saturating_mul(v as Weight)) - // Standard Error: 230_000 - .saturating_add((37_854_000 as Weight).saturating_mul(n as Weight)) - // Standard Error: 7_842_000 - .saturating_add((32_492_000 as Weight).saturating_mul(s as Weight)) + // Standard Error: 99_000 + .saturating_add((25_735_000 as Weight).saturating_mul(v as Weight)) + // Standard Error: 99_000 + .saturating_add((28_122_000 as Weight).saturating_mul(n as Weight)) + // Standard Error: 3_388_000 + .saturating_add((21_500_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().reads((3 as Weight).saturating_mul(v as Weight))) .saturating_add(T::DbWeight::get().reads((3 as Weight).saturating_mul(n as Weight))) @@ -247,17 +247,17 @@ impl WeightInfo for SubstrateWeight { } fn get_npos_targets(v: u32, ) -> Weight { (0 as Weight) - // Standard Error: 74_000 - .saturating_add((16_370_000 as Weight).saturating_mul(v as Weight)) + // Standard Error: 30_000 + .saturating_add((11_065_000 as Weight).saturating_mul(v as Weight)) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(v as Weight))) } fn update_staking_limits() -> Weight { - (6_398_000 as Weight) + (5_028_000 as Weight) .saturating_add(T::DbWeight::get().writes(4 as Weight)) } fn chill_other() -> Weight { - (44_694_000 as Weight) + (35_758_000 as Weight) .saturating_add(T::DbWeight::get().reads(5 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } @@ -266,154 +266,154 @@ impl WeightInfo for SubstrateWeight { // For backwards compatibility and tests impl WeightInfo for () { fn bond() -> Weight { - (91_278_000 as Weight) + (72_617_000 as Weight) .saturating_add(RocksDbWeight::get().reads(5 as Weight)) .saturating_add(RocksDbWeight::get().writes(4 as Weight)) } fn bond_extra() -> Weight { - (69_833_000 as Weight) + (55_590_000 as Weight) .saturating_add(RocksDbWeight::get().reads(3 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } fn unbond() -> Weight { - (75_020_000 as Weight) + (59_730_000 as Weight) .saturating_add(RocksDbWeight::get().reads(6 as Weight)) .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } fn withdraw_unbonded_update(s: u32, ) -> Weight { - (63_898_000 as Weight) - // Standard Error: 1_000 - .saturating_add((50_000 as Weight).saturating_mul(s as Weight)) + (52_279_000 as Weight) + // Standard Error: 0 + .saturating_add((68_000 as Weight).saturating_mul(s as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } fn withdraw_unbonded_kill(s: u32, ) -> Weight { - (103_717_000 as Weight) + (86_629_000 as Weight) // Standard Error: 1_000 - .saturating_add((2_942_000 as Weight).saturating_mul(s as Weight)) + .saturating_add((2_379_000 as Weight).saturating_mul(s as Weight)) .saturating_add(RocksDbWeight::get().reads(8 as Weight)) .saturating_add(RocksDbWeight::get().writes(6 as Weight)) .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) } fn validate() -> Weight { - (40_702_000 as Weight) + (32_393_000 as Weight) .saturating_add(RocksDbWeight::get().reads(6 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } fn kick(k: u32, ) -> Weight { - (33_572_000 as Weight) - // Standard Error: 18_000 - .saturating_add((20_771_000 as Weight).saturating_mul(k as Weight)) + (36_986_000 as Weight) + // Standard Error: 13_000 + .saturating_add((16_574_000 as Weight).saturating_mul(k as Weight)) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().reads((1 as Weight).saturating_mul(k as Weight))) .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(k as Weight))) } fn nominate(n: u32, ) -> Weight { - (53_561_000 as Weight) - // Standard Error: 34_000 - .saturating_add((6_652_000 as Weight).saturating_mul(n as Weight)) + (43_228_000 as Weight) + // Standard Error: 21_000 + .saturating_add((5_119_000 as Weight).saturating_mul(n as Weight)) .saturating_add(RocksDbWeight::get().reads(7 as Weight)) .saturating_add(RocksDbWeight::get().reads((1 as Weight).saturating_mul(n as Weight))) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } fn chill() -> Weight { - (21_489_000 as Weight) + (17_800_000 as Weight) .saturating_add(RocksDbWeight::get().reads(3 as Weight)) } fn set_payee() -> Weight { - (14_514_000 as Weight) + (12_612_000 as Weight) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn set_controller() -> Weight { - (32_598_000 as Weight) + (27_503_000 as Weight) .saturating_add(RocksDbWeight::get().reads(3 as Weight)) .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } fn set_validator_count() -> Weight { - (2_477_000 as Weight) + (2_119_000 as Weight) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn force_no_eras() -> Weight { - (2_743_000 as Weight) + (2_320_000 as Weight) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn force_new_era() -> Weight { - (2_784_000 as Weight) + (2_269_000 as Weight) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn force_new_era_always() -> Weight { - (2_749_000 as Weight) + (2_334_000 as Weight) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn set_invulnerables(v: u32, ) -> Weight { - (2_798_000 as Weight) + (2_354_000 as Weight) // Standard Error: 0 .saturating_add((5_000 as Weight).saturating_mul(v as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn force_unstake(s: u32, ) -> Weight { - (70_372_000 as Weight) - // Standard Error: 13_000 - .saturating_add((3_029_000 as Weight).saturating_mul(s as Weight)) + (61_556_000 as Weight) + // Standard Error: 1_000 + .saturating_add((2_377_000 as Weight).saturating_mul(s as Weight)) .saturating_add(RocksDbWeight::get().reads(6 as Weight)) .saturating_add(RocksDbWeight::get().writes(6 as Weight)) .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) } fn cancel_deferred_slash(s: u32, ) -> Weight { - (3_436_822_000 as Weight) - // Standard Error: 221_000 - .saturating_add((19_799_000 as Weight).saturating_mul(s as Weight)) + (3_367_105_000 as Weight) + // Standard Error: 222_000 + .saturating_add((19_817_000 as Weight).saturating_mul(s as Weight)) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn payout_stakers_dead_controller(n: u32, ) -> Weight { - (132_018_000 as Weight) - // Standard Error: 27_000 - .saturating_add((61_340_000 as Weight).saturating_mul(n as Weight)) + (47_229_000 as Weight) + // Standard Error: 53_000 + .saturating_add((48_365_000 as Weight).saturating_mul(n as Weight)) .saturating_add(RocksDbWeight::get().reads(10 as Weight)) .saturating_add(RocksDbWeight::get().reads((3 as Weight).saturating_mul(n as Weight))) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(n as Weight))) } fn payout_stakers_alive_staked(n: u32, ) -> Weight { - (158_346_000 as Weight) - // Standard Error: 61_000 - .saturating_add((77_147_000 as Weight).saturating_mul(n as Weight)) + (156_788_000 as Weight) + // Standard Error: 20_000 + .saturating_add((61_280_000 as Weight).saturating_mul(n as Weight)) .saturating_add(RocksDbWeight::get().reads(11 as Weight)) .saturating_add(RocksDbWeight::get().reads((5 as Weight).saturating_mul(n as Weight))) .saturating_add(RocksDbWeight::get().writes(3 as Weight)) .saturating_add(RocksDbWeight::get().writes((3 as Weight).saturating_mul(n as Weight))) } fn rebond(l: u32, ) -> Weight { - (57_756_000 as Weight) - // Standard Error: 2_000 - .saturating_add((79_000 as Weight).saturating_mul(l as Weight)) + (47_815_000 as Weight) + // Standard Error: 1_000 + .saturating_add((65_000 as Weight).saturating_mul(l as Weight)) .saturating_add(RocksDbWeight::get().reads(3 as Weight)) .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } fn set_history_depth(e: u32, ) -> Weight { (0 as Weight) - // Standard Error: 100_000 - .saturating_add((44_873_000 as Weight).saturating_mul(e as Weight)) + // Standard Error: 74_000 + .saturating_add((34_945_000 as Weight).saturating_mul(e as Weight)) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(4 as Weight)) .saturating_add(RocksDbWeight::get().writes((7 as Weight).saturating_mul(e as Weight))) } fn reap_stash(s: u32, ) -> Weight { - (75_073_000 as Weight) - // Standard Error: 4_000 - .saturating_add((2_988_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(RocksDbWeight::get().reads(6 as Weight)) - .saturating_add(RocksDbWeight::get().writes(6 as Weight)) + (73_483_000 as Weight) + // Standard Error: 0 + .saturating_add((2_384_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(RocksDbWeight::get().reads(7 as Weight)) + .saturating_add(RocksDbWeight::get().writes(8 as Weight)) .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) } fn new_era(v: u32, n: u32, ) -> Weight { (0 as Weight) - // Standard Error: 1_146_000 - .saturating_add((362_986_000 as Weight).saturating_mul(v as Weight)) - // Standard Error: 57_000 - .saturating_add((60_216_000 as Weight).saturating_mul(n as Weight)) + // Standard Error: 846_000 + .saturating_add((305_234_000 as Weight).saturating_mul(v as Weight)) + // Standard Error: 42_000 + .saturating_add((48_280_000 as Weight).saturating_mul(n as Weight)) .saturating_add(RocksDbWeight::get().reads(10 as Weight)) .saturating_add(RocksDbWeight::get().reads((3 as Weight).saturating_mul(v as Weight))) .saturating_add(RocksDbWeight::get().reads((3 as Weight).saturating_mul(n as Weight))) @@ -422,12 +422,12 @@ impl WeightInfo for () { } fn get_npos_voters(v: u32, n: u32, s: u32, ) -> Weight { (0 as Weight) - // Standard Error: 230_000 - .saturating_add((35_891_000 as Weight).saturating_mul(v as Weight)) - // Standard Error: 230_000 - .saturating_add((37_854_000 as Weight).saturating_mul(n as Weight)) - // Standard Error: 7_842_000 - .saturating_add((32_492_000 as Weight).saturating_mul(s as Weight)) + // Standard Error: 99_000 + .saturating_add((25_735_000 as Weight).saturating_mul(v as Weight)) + // Standard Error: 99_000 + .saturating_add((28_122_000 as Weight).saturating_mul(n as Weight)) + // Standard Error: 3_388_000 + .saturating_add((21_500_000 as Weight).saturating_mul(s as Weight)) .saturating_add(RocksDbWeight::get().reads(3 as Weight)) .saturating_add(RocksDbWeight::get().reads((3 as Weight).saturating_mul(v as Weight))) .saturating_add(RocksDbWeight::get().reads((3 as Weight).saturating_mul(n as Weight))) @@ -435,17 +435,17 @@ impl WeightInfo for () { } fn get_npos_targets(v: u32, ) -> Weight { (0 as Weight) - // Standard Error: 74_000 - .saturating_add((16_370_000 as Weight).saturating_mul(v as Weight)) + // Standard Error: 30_000 + .saturating_add((11_065_000 as Weight).saturating_mul(v as Weight)) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().reads((1 as Weight).saturating_mul(v as Weight))) } fn update_staking_limits() -> Weight { - (6_398_000 as Weight) + (5_028_000 as Weight) .saturating_add(RocksDbWeight::get().writes(4 as Weight)) } fn chill_other() -> Weight { - (44_694_000 as Weight) + (35_758_000 as Weight) .saturating_add(RocksDbWeight::get().reads(5 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } diff --git a/frame/system/src/weights.rs b/frame/system/src/weights.rs index 04e95de4ba37d..c6284ba17d63f 100644 --- a/frame/system/src/weights.rs +++ b/frame/system/src/weights.rs @@ -18,7 +18,7 @@ //! Autogenerated weights for frame_system //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0 -//! DATE: 2021-02-28, STEPS: \[50, \], REPEAT: 20, LOW RANGE: [], HIGH RANGE: [] +//! DATE: 2021-06-19, STEPS: `[50, ]`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 128 // Executed Command: @@ -34,7 +34,6 @@ // --heap-pages=4096 // --output=./frame/system/src/weights.rs // --template=./.maintain/frame-weight-template.hbs -// --output-analysis=max #![allow(unused_parens)] @@ -58,38 +57,38 @@ pub trait WeightInfo { pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { fn remark(_b: u32, ) -> Weight { - (1_345_000 as Weight) + (1_038_000 as Weight) } fn remark_with_event(b: u32, ) -> Weight { - (9_697_000 as Weight) + (5_246_000 as Weight) // Standard Error: 0 .saturating_add((1_000 as Weight).saturating_mul(b as Weight)) } fn set_heap_pages() -> Weight { - (2_070_000 as Weight) + (1_586_000 as Weight) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn set_changes_trie_config() -> Weight { - (10_111_000 as Weight) + (7_181_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn set_storage(i: u32, ) -> Weight { (0 as Weight) // Standard Error: 0 - .saturating_add((619_000 as Weight).saturating_mul(i as Weight)) + .saturating_add((568_000 as Weight).saturating_mul(i as Weight)) .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(i as Weight))) } fn kill_storage(i: u32, ) -> Weight { - (1_647_000 as Weight) + (2_278_000 as Weight) // Standard Error: 0 - .saturating_add((460_000 as Weight).saturating_mul(i as Weight)) + .saturating_add((423_000 as Weight).saturating_mul(i as Weight)) .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(i as Weight))) } fn kill_prefix(p: u32, ) -> Weight { - (10_678_000 as Weight) - // Standard Error: 0 - .saturating_add((862_000 as Weight).saturating_mul(p as Weight)) + (8_243_000 as Weight) + // Standard Error: 1_000 + .saturating_add((795_000 as Weight).saturating_mul(p as Weight)) .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(p as Weight))) } } @@ -97,38 +96,38 @@ impl WeightInfo for SubstrateWeight { // For backwards compatibility and tests impl WeightInfo for () { fn remark(_b: u32, ) -> Weight { - (1_345_000 as Weight) + (1_038_000 as Weight) } fn remark_with_event(b: u32, ) -> Weight { - (9_697_000 as Weight) + (5_246_000 as Weight) // Standard Error: 0 .saturating_add((1_000 as Weight).saturating_mul(b as Weight)) } fn set_heap_pages() -> Weight { - (2_070_000 as Weight) + (1_586_000 as Weight) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn set_changes_trie_config() -> Weight { - (10_111_000 as Weight) + (7_181_000 as Weight) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } fn set_storage(i: u32, ) -> Weight { (0 as Weight) // Standard Error: 0 - .saturating_add((619_000 as Weight).saturating_mul(i as Weight)) + .saturating_add((568_000 as Weight).saturating_mul(i as Weight)) .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(i as Weight))) } fn kill_storage(i: u32, ) -> Weight { - (1_647_000 as Weight) + (2_278_000 as Weight) // Standard Error: 0 - .saturating_add((460_000 as Weight).saturating_mul(i as Weight)) + .saturating_add((423_000 as Weight).saturating_mul(i as Weight)) .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(i as Weight))) } fn kill_prefix(p: u32, ) -> Weight { - (10_678_000 as Weight) - // Standard Error: 0 - .saturating_add((862_000 as Weight).saturating_mul(p as Weight)) + (8_243_000 as Weight) + // Standard Error: 1_000 + .saturating_add((795_000 as Weight).saturating_mul(p as Weight)) .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(p as Weight))) } } diff --git a/frame/timestamp/src/weights.rs b/frame/timestamp/src/weights.rs index 875d78c31d22e..cf4fa6ea3d639 100644 --- a/frame/timestamp/src/weights.rs +++ b/frame/timestamp/src/weights.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// Copyright (C) 2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,9 +15,10 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Weights for pallet_timestamp -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0 -//! DATE: 2020-10-27, STEPS: `[50, ]`, REPEAT: 20, LOW RANGE: [], HIGH RANGE: [] +//! Autogenerated weights for pallet_timestamp +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0 +//! DATE: 2021-06-19, STEPS: `[50, ]`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 128 // Executed Command: @@ -45,36 +46,29 @@ use sp_std::marker::PhantomData; pub trait WeightInfo { fn set() -> Weight; fn on_finalize() -> Weight; - } /// Weights for pallet_timestamp using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { fn set() -> Weight { - (11_650_000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) + (10_277_000 as Weight) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) - } fn on_finalize() -> Weight { - (6_681_000 as Weight) - + (4_859_000 as Weight) } - } // For backwards compatibility and tests impl WeightInfo for () { fn set() -> Weight { - (11_650_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + (10_277_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(3 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) - } fn on_finalize() -> Weight { - (6_681_000 as Weight) - + (4_859_000 as Weight) } - } diff --git a/frame/tips/src/weights.rs b/frame/tips/src/weights.rs index f5cd4bc23c860..ceee79bd6f07e 100644 --- a/frame/tips/src/weights.rs +++ b/frame/tips/src/weights.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// Copyright (C) 2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,8 +17,8 @@ //! Autogenerated weights for pallet_tips //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0 -//! DATE: 2020-12-20, STEPS: `[50, ]`, REPEAT: 20, LOW RANGE: [], HIGH RANGE: [] +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0 +//! DATE: 2021-06-19, STEPS: `[50, ]`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 128 // Executed Command: @@ -56,44 +56,44 @@ pub trait WeightInfo { pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { fn report_awesome(r: u32, ) -> Weight { - (73_795_000 as Weight) + (49_844_000 as Weight) // Standard Error: 0 .saturating_add((2_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn retract_tip() -> Weight { - (61_753_000 as Weight) + (45_934_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn tip_new(r: u32, t: u32, ) -> Weight { - (47_731_000 as Weight) + (31_777_000 as Weight) // Standard Error: 0 .saturating_add((2_000 as Weight).saturating_mul(r as Weight)) // Standard Error: 0 - .saturating_add((154_000 as Weight).saturating_mul(t as Weight)) + .saturating_add((127_000 as Weight).saturating_mul(t as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn tip(t: u32, ) -> Weight { - (35_215_000 as Weight) - // Standard Error: 1_000 - .saturating_add((712_000 as Weight).saturating_mul(t as Weight)) + (22_361_000 as Weight) + // Standard Error: 0 + .saturating_add((584_000 as Weight).saturating_mul(t as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn close_tip(t: u32, ) -> Weight { - (117_027_000 as Weight) - // Standard Error: 1_000 - .saturating_add((375_000 as Weight).saturating_mul(t as Weight)) + (84_470_000 as Weight) + // Standard Error: 0 + .saturating_add((326_000 as Weight).saturating_mul(t as Weight)) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) } fn slash_tip(t: u32, ) -> Weight { - (37_184_000 as Weight) + (25_214_000 as Weight) // Standard Error: 0 - .saturating_add((11_000 as Weight).saturating_mul(t as Weight)) + .saturating_add((8_000 as Weight).saturating_mul(t as Weight)) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } @@ -102,44 +102,44 @@ impl WeightInfo for SubstrateWeight { // For backwards compatibility and tests impl WeightInfo for () { fn report_awesome(r: u32, ) -> Weight { - (73_795_000 as Weight) + (49_844_000 as Weight) // Standard Error: 0 .saturating_add((2_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } fn retract_tip() -> Weight { - (61_753_000 as Weight) + (45_934_000 as Weight) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } fn tip_new(r: u32, t: u32, ) -> Weight { - (47_731_000 as Weight) + (31_777_000 as Weight) // Standard Error: 0 .saturating_add((2_000 as Weight).saturating_mul(r as Weight)) // Standard Error: 0 - .saturating_add((154_000 as Weight).saturating_mul(t as Weight)) + .saturating_add((127_000 as Weight).saturating_mul(t as Weight)) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } fn tip(t: u32, ) -> Weight { - (35_215_000 as Weight) - // Standard Error: 1_000 - .saturating_add((712_000 as Weight).saturating_mul(t as Weight)) + (22_361_000 as Weight) + // Standard Error: 0 + .saturating_add((584_000 as Weight).saturating_mul(t as Weight)) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn close_tip(t: u32, ) -> Weight { - (117_027_000 as Weight) - // Standard Error: 1_000 - .saturating_add((375_000 as Weight).saturating_mul(t as Weight)) + (84_470_000 as Weight) + // Standard Error: 0 + .saturating_add((326_000 as Weight).saturating_mul(t as Weight)) .saturating_add(RocksDbWeight::get().reads(3 as Weight)) .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } fn slash_tip(t: u32, ) -> Weight { - (37_184_000 as Weight) + (25_214_000 as Weight) // Standard Error: 0 - .saturating_add((11_000 as Weight).saturating_mul(t as Weight)) + .saturating_add((8_000 as Weight).saturating_mul(t as Weight)) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } diff --git a/frame/transaction-storage/src/weights.rs b/frame/transaction-storage/src/weights.rs index 7951db8828d07..46fc664d977c6 100644 --- a/frame/transaction-storage/src/weights.rs +++ b/frame/transaction-storage/src/weights.rs @@ -18,16 +18,14 @@ //! Autogenerated weights for pallet_transaction_storage //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0 -//! DATE: 2021-06-03, STEPS: `[20, ]`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2021-06-19, STEPS: `[50, ]`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 128 // Executed Command: -// ./target/release/substrate +// target/release/substrate // benchmark -// --chain -// dev -// --steps -// 20 +// --chain=dev +// --steps=50 // --repeat=20 // --pallet=pallet_transaction_storage // --extrinsic=* @@ -57,17 +55,17 @@ impl WeightInfo for SubstrateWeight { fn store(l: u32, ) -> Weight { (0 as Weight) // Standard Error: 0 - .saturating_add((10_000 as Weight).saturating_mul(l as Weight)) + .saturating_add((8_000 as Weight).saturating_mul(l as Weight)) .saturating_add(T::DbWeight::get().reads(6 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn renew() -> Weight { - (97_000_000 as Weight) + (65_933_000 as Weight) .saturating_add(T::DbWeight::get().reads(6 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn check_proof_max() -> Weight { - (99_000_000 as Weight) + (163_549_000 as Weight) .saturating_add(T::DbWeight::get().reads(5 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } @@ -78,17 +76,17 @@ impl WeightInfo for () { fn store(l: u32, ) -> Weight { (0 as Weight) // Standard Error: 0 - .saturating_add((10_000 as Weight).saturating_mul(l as Weight)) + .saturating_add((8_000 as Weight).saturating_mul(l as Weight)) .saturating_add(RocksDbWeight::get().reads(6 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn renew() -> Weight { - (97_000_000 as Weight) + (65_933_000 as Weight) .saturating_add(RocksDbWeight::get().reads(6 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn check_proof_max() -> Weight { - (99_000_000 as Weight) + (163_549_000 as Weight) .saturating_add(RocksDbWeight::get().reads(5 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } diff --git a/frame/treasury/src/weights.rs b/frame/treasury/src/weights.rs index 9d627f1c287e2..b22380e3c476c 100644 --- a/frame/treasury/src/weights.rs +++ b/frame/treasury/src/weights.rs @@ -18,7 +18,7 @@ //! Autogenerated weights for pallet_treasury //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0 -//! DATE: 2021-04-26, STEPS: `[50, ]`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2021-06-19, STEPS: `[50, ]`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 128 // Executed Command: @@ -54,26 +54,26 @@ pub trait WeightInfo { pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { fn propose_spend() -> Weight { - (45_393_000 as Weight) + (41_763_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn reject_proposal() -> Weight { - (42_796_000 as Weight) + (39_049_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn approve_proposal(p: u32, ) -> Weight { - (14_153_000 as Weight) - // Standard Error: 1_000 - .saturating_add((94_000 as Weight).saturating_mul(p as Weight)) + (13_547_000 as Weight) + // Standard Error: 0 + .saturating_add((124_000 as Weight).saturating_mul(p as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn on_initialize_proposals(p: u32, ) -> Weight { - (51_633_000 as Weight) - // Standard Error: 42_000 - .saturating_add((65_705_000 as Weight).saturating_mul(p as Weight)) + (48_990_000 as Weight) + // Standard Error: 19_000 + .saturating_add((59_621_000 as Weight).saturating_mul(p as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().reads((3 as Weight).saturating_mul(p as Weight))) .saturating_add(T::DbWeight::get().writes(2 as Weight)) @@ -84,26 +84,26 @@ impl WeightInfo for SubstrateWeight { // For backwards compatibility and tests impl WeightInfo for () { fn propose_spend() -> Weight { - (45_393_000 as Weight) + (41_763_000 as Weight) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } fn reject_proposal() -> Weight { - (42_796_000 as Weight) + (39_049_000 as Weight) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } fn approve_proposal(p: u32, ) -> Weight { - (14_153_000 as Weight) - // Standard Error: 1_000 - .saturating_add((94_000 as Weight).saturating_mul(p as Weight)) + (13_547_000 as Weight) + // Standard Error: 0 + .saturating_add((124_000 as Weight).saturating_mul(p as Weight)) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn on_initialize_proposals(p: u32, ) -> Weight { - (51_633_000 as Weight) - // Standard Error: 42_000 - .saturating_add((65_705_000 as Weight).saturating_mul(p as Weight)) + (48_990_000 as Weight) + // Standard Error: 19_000 + .saturating_add((59_621_000 as Weight).saturating_mul(p as Weight)) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().reads((3 as Weight).saturating_mul(p as Weight))) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) diff --git a/frame/uniques/src/weights.rs b/frame/uniques/src/weights.rs index 9272ae6026a9f..a2263d6cd3486 100644 --- a/frame/uniques/src/weights.rs +++ b/frame/uniques/src/weights.rs @@ -18,7 +18,7 @@ //! Autogenerated weights for pallet_uniques //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0 -//! DATE: 2021-05-24, STEPS: `[50, ]`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2021-06-19, STEPS: `[50, ]`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 128 // Executed Command: @@ -72,23 +72,23 @@ pub trait WeightInfo { pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { fn create() -> Weight { - (55_264_000 as Weight) + (43_219_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn force_create() -> Weight { - (28_173_000 as Weight) + (21_919_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn destroy(n: u32, m: u32, a: u32, ) -> Weight { (0 as Weight) - // Standard Error: 32_000 - .saturating_add((23_077_000 as Weight).saturating_mul(n as Weight)) - // Standard Error: 32_000 - .saturating_add((1_723_000 as Weight).saturating_mul(m as Weight)) - // Standard Error: 32_000 - .saturating_add((1_534_000 as Weight).saturating_mul(a as Weight)) + // Standard Error: 13_000 + .saturating_add((16_619_000 as Weight).saturating_mul(n as Weight)) + // Standard Error: 13_000 + .saturating_add((967_000 as Weight).saturating_mul(m as Weight)) + // Standard Error: 13_000 + .saturating_add((834_000 as Weight).saturating_mul(a as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(n as Weight))) .saturating_add(T::DbWeight::get().writes(2 as Weight)) @@ -97,101 +97,101 @@ impl WeightInfo for SubstrateWeight { .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(a as Weight))) } fn mint() -> Weight { - (73_250_000 as Weight) + (57_627_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) } fn burn() -> Weight { - (74_443_000 as Weight) + (58_615_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) } fn transfer() -> Weight { - (54_690_000 as Weight) + (43_335_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) } fn redeposit(i: u32, ) -> Weight { (0 as Weight) - // Standard Error: 19_000 - .saturating_add((34_624_000 as Weight).saturating_mul(i as Weight)) + // Standard Error: 13_000 + .saturating_add((26_322_000 as Weight).saturating_mul(i as Weight)) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(i as Weight))) .saturating_add(T::DbWeight::get().writes(1 as Weight)) .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(i as Weight))) } fn freeze() -> Weight { - (39_505_000 as Weight) + (31_020_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn thaw() -> Weight { - (38_844_000 as Weight) + (31_012_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn freeze_class() -> Weight { - (28_739_000 as Weight) + (22_761_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn thaw_class() -> Weight { - (28_963_000 as Weight) + (22_789_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn transfer_ownership() -> Weight { - (65_160_000 as Weight) + (50_779_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn set_team() -> Weight { - (30_000_000 as Weight) + (24_045_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn force_asset_status() -> Weight { - (29_145_000 as Weight) + (22_925_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn set_attribute() -> Weight { - (88_923_000 as Weight) + (70_416_000 as Weight) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn clear_attribute() -> Weight { - (79_878_000 as Weight) + (64_640_000 as Weight) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn set_metadata() -> Weight { - (67_110_000 as Weight) + (53_229_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn clear_metadata() -> Weight { - (66_191_000 as Weight) + (52_145_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn set_class_metadata() -> Weight { - (65_558_000 as Weight) + (51_556_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn clear_class_metadata() -> Weight { - (60_135_000 as Weight) + (47_314_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn approve_transfer() -> Weight { - (40_337_000 as Weight) + (32_946_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn cancel_approval() -> Weight { - (40_770_000 as Weight) + (32_328_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } @@ -200,23 +200,23 @@ impl WeightInfo for SubstrateWeight { // For backwards compatibility and tests impl WeightInfo for () { fn create() -> Weight { - (55_264_000 as Weight) + (43_219_000 as Weight) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn force_create() -> Weight { - (28_173_000 as Weight) + (21_919_000 as Weight) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn destroy(n: u32, m: u32, a: u32, ) -> Weight { (0 as Weight) - // Standard Error: 32_000 - .saturating_add((23_077_000 as Weight).saturating_mul(n as Weight)) - // Standard Error: 32_000 - .saturating_add((1_723_000 as Weight).saturating_mul(m as Weight)) - // Standard Error: 32_000 - .saturating_add((1_534_000 as Weight).saturating_mul(a as Weight)) + // Standard Error: 13_000 + .saturating_add((16_619_000 as Weight).saturating_mul(n as Weight)) + // Standard Error: 13_000 + .saturating_add((967_000 as Weight).saturating_mul(m as Weight)) + // Standard Error: 13_000 + .saturating_add((834_000 as Weight).saturating_mul(a as Weight)) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().reads((1 as Weight).saturating_mul(n as Weight))) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) @@ -225,101 +225,101 @@ impl WeightInfo for () { .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(a as Weight))) } fn mint() -> Weight { - (73_250_000 as Weight) + (57_627_000 as Weight) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } fn burn() -> Weight { - (74_443_000 as Weight) + (58_615_000 as Weight) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } fn transfer() -> Weight { - (54_690_000 as Weight) + (43_335_000 as Weight) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } fn redeposit(i: u32, ) -> Weight { (0 as Weight) - // Standard Error: 19_000 - .saturating_add((34_624_000 as Weight).saturating_mul(i as Weight)) + // Standard Error: 13_000 + .saturating_add((26_322_000 as Weight).saturating_mul(i as Weight)) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().reads((1 as Weight).saturating_mul(i as Weight))) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(i as Weight))) } fn freeze() -> Weight { - (39_505_000 as Weight) + (31_020_000 as Weight) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn thaw() -> Weight { - (38_844_000 as Weight) + (31_012_000 as Weight) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn freeze_class() -> Weight { - (28_739_000 as Weight) + (22_761_000 as Weight) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn thaw_class() -> Weight { - (28_963_000 as Weight) + (22_789_000 as Weight) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn transfer_ownership() -> Weight { - (65_160_000 as Weight) + (50_779_000 as Weight) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } fn set_team() -> Weight { - (30_000_000 as Weight) + (24_045_000 as Weight) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn force_asset_status() -> Weight { - (29_145_000 as Weight) + (22_925_000 as Weight) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn set_attribute() -> Weight { - (88_923_000 as Weight) + (70_416_000 as Weight) .saturating_add(RocksDbWeight::get().reads(3 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } fn clear_attribute() -> Weight { - (79_878_000 as Weight) + (64_640_000 as Weight) .saturating_add(RocksDbWeight::get().reads(3 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } fn set_metadata() -> Weight { - (67_110_000 as Weight) + (53_229_000 as Weight) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } fn clear_metadata() -> Weight { - (66_191_000 as Weight) + (52_145_000 as Weight) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } fn set_class_metadata() -> Weight { - (65_558_000 as Weight) + (51_556_000 as Weight) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } fn clear_class_metadata() -> Weight { - (60_135_000 as Weight) + (47_314_000 as Weight) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn approve_transfer() -> Weight { - (40_337_000 as Weight) + (32_946_000 as Weight) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn cancel_approval() -> Weight { - (40_770_000 as Weight) + (32_328_000 as Weight) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } diff --git a/frame/utility/src/weights.rs b/frame/utility/src/weights.rs index dd4981cf32da7..0bab97201008c 100644 --- a/frame/utility/src/weights.rs +++ b/frame/utility/src/weights.rs @@ -18,7 +18,7 @@ //! Autogenerated weights for pallet_utility //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0 -//! DATE: 2021-06-03, STEPS: `[50, ]`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2021-06-19, STEPS: `[50, ]`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 128 // Executed Command: @@ -53,33 +53,33 @@ pub trait WeightInfo { pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { fn batch(c: u32, ) -> Weight { - (19_099_000 as Weight) - // Standard Error: 1_000 - .saturating_add((640_000 as Weight).saturating_mul(c as Weight)) + (14_618_000 as Weight) + // Standard Error: 0 + .saturating_add((610_000 as Weight).saturating_mul(c as Weight)) } fn as_derivative() -> Weight { - (3_701_000 as Weight) + (3_175_000 as Weight) } fn batch_all(c: u32, ) -> Weight { - (19_199_000 as Weight) + (14_561_000 as Weight) // Standard Error: 0 - .saturating_add((1_061_000 as Weight).saturating_mul(c as Weight)) + .saturating_add((1_013_000 as Weight).saturating_mul(c as Weight)) } } // For backwards compatibility and tests impl WeightInfo for () { fn batch(c: u32, ) -> Weight { - (19_099_000 as Weight) - // Standard Error: 1_000 - .saturating_add((640_000 as Weight).saturating_mul(c as Weight)) + (14_618_000 as Weight) + // Standard Error: 0 + .saturating_add((610_000 as Weight).saturating_mul(c as Weight)) } fn as_derivative() -> Weight { - (3_701_000 as Weight) + (3_175_000 as Weight) } fn batch_all(c: u32, ) -> Weight { - (19_199_000 as Weight) + (14_561_000 as Weight) // Standard Error: 0 - .saturating_add((1_061_000 as Weight).saturating_mul(c as Weight)) + .saturating_add((1_013_000 as Weight).saturating_mul(c as Weight)) } } diff --git a/frame/vesting/src/weights.rs b/frame/vesting/src/weights.rs index 1e44474fbc970..053453d757f38 100644 --- a/frame/vesting/src/weights.rs +++ b/frame/vesting/src/weights.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// Copyright (C) 2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,9 +15,10 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Weights for pallet_vesting -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0 -//! DATE: 2020-10-27, STEPS: `[50, ]`, REPEAT: 20, LOW RANGE: [], HIGH RANGE: [] +//! Autogenerated weights for pallet_vesting +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0 +//! DATE: 2021-06-19, STEPS: `[50, ]`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 128 // Executed Command: @@ -49,100 +50,97 @@ pub trait WeightInfo { fn vest_other_unlocked(l: u32, ) -> Weight; fn vested_transfer(l: u32, ) -> Weight; fn force_vested_transfer(l: u32, ) -> Weight; - } /// Weights for pallet_vesting using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { fn vest_locked(l: u32, ) -> Weight { - (57_472_000 as Weight) - .saturating_add((155_000 as Weight).saturating_mul(l as Weight)) + (42_905_000 as Weight) + // Standard Error: 13_000 + .saturating_add((232_000 as Weight).saturating_mul(l as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) - } fn vest_unlocked(l: u32, ) -> Weight { - (61_681_000 as Weight) - .saturating_add((138_000 as Weight).saturating_mul(l as Weight)) + (45_650_000 as Weight) + // Standard Error: 12_000 + .saturating_add((215_000 as Weight).saturating_mul(l as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) - } fn vest_other_locked(l: u32, ) -> Weight { - (56_910_000 as Weight) - .saturating_add((160_000 as Weight).saturating_mul(l as Weight)) + (42_273_000 as Weight) + // Standard Error: 15_000 + .saturating_add((246_000 as Weight).saturating_mul(l as Weight)) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) - } fn vest_other_unlocked(l: u32, ) -> Weight { - (61_319_000 as Weight) - .saturating_add((144_000 as Weight).saturating_mul(l as Weight)) + (45_324_000 as Weight) + // Standard Error: 12_000 + .saturating_add((214_000 as Weight).saturating_mul(l as Weight)) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) - } fn vested_transfer(l: u32, ) -> Weight { - (124_996_000 as Weight) - .saturating_add((209_000 as Weight).saturating_mul(l as Weight)) + (96_661_000 as Weight) + // Standard Error: 10_000 + .saturating_add((211_000 as Weight).saturating_mul(l as Weight)) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) - } fn force_vested_transfer(l: u32, ) -> Weight { - (123_911_000 as Weight) - .saturating_add((213_000 as Weight).saturating_mul(l as Weight)) + (98_812_000 as Weight) + // Standard Error: 13_000 + .saturating_add((139_000 as Weight).saturating_mul(l as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(4 as Weight)) - } - } // For backwards compatibility and tests impl WeightInfo for () { fn vest_locked(l: u32, ) -> Weight { - (57_472_000 as Weight) - .saturating_add((155_000 as Weight).saturating_mul(l as Weight)) + (42_905_000 as Weight) + // Standard Error: 13_000 + .saturating_add((232_000 as Weight).saturating_mul(l as Weight)) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) - } fn vest_unlocked(l: u32, ) -> Weight { - (61_681_000 as Weight) - .saturating_add((138_000 as Weight).saturating_mul(l as Weight)) + (45_650_000 as Weight) + // Standard Error: 12_000 + .saturating_add((215_000 as Weight).saturating_mul(l as Weight)) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) - } fn vest_other_locked(l: u32, ) -> Weight { - (56_910_000 as Weight) - .saturating_add((160_000 as Weight).saturating_mul(l as Weight)) + (42_273_000 as Weight) + // Standard Error: 15_000 + .saturating_add((246_000 as Weight).saturating_mul(l as Weight)) .saturating_add(RocksDbWeight::get().reads(3 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) - } fn vest_other_unlocked(l: u32, ) -> Weight { - (61_319_000 as Weight) - .saturating_add((144_000 as Weight).saturating_mul(l as Weight)) + (45_324_000 as Weight) + // Standard Error: 12_000 + .saturating_add((214_000 as Weight).saturating_mul(l as Weight)) .saturating_add(RocksDbWeight::get().reads(3 as Weight)) .saturating_add(RocksDbWeight::get().writes(3 as Weight)) - } fn vested_transfer(l: u32, ) -> Weight { - (124_996_000 as Weight) - .saturating_add((209_000 as Weight).saturating_mul(l as Weight)) + (96_661_000 as Weight) + // Standard Error: 10_000 + .saturating_add((211_000 as Weight).saturating_mul(l as Weight)) .saturating_add(RocksDbWeight::get().reads(3 as Weight)) .saturating_add(RocksDbWeight::get().writes(3 as Weight)) - } fn force_vested_transfer(l: u32, ) -> Weight { - (123_911_000 as Weight) - .saturating_add((213_000 as Weight).saturating_mul(l as Weight)) + (98_812_000 as Weight) + // Standard Error: 13_000 + .saturating_add((139_000 as Weight).saturating_mul(l as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(4 as Weight)) - } - } From abbe24e270b4aaa52a5ef8209e6caefa377612dc Mon Sep 17 00:00:00 2001 From: Peter Goodspeed-Niklaus Date: Mon, 21 Jun 2021 15:12:58 +0200 Subject: [PATCH 22/67] fix some failing ui tests (#9157) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * fix some failing ui tests * Update frame/support/test/tests/pallet_ui/call_argument_invalid_bound_2.stderr * Update frame/support/test/tests/pallet_ui/call_argument_invalid_bound_2.stderr * fix ui test * fix ui test * TRYBUILD=overwrite cargo test --workspace -- ui Co-authored-by: Bastian Köcher Co-authored-by: thiolliere --- .../call_argument_invalid_bound.stderr | 22 +++--- .../call_argument_invalid_bound_2.stderr | 74 +++++++++---------- .../call_argument_invalid_bound_3.stderr | 18 ++--- .../pallet_ui/event_field_not_member.stderr | 16 ++-- .../storage_info_unsatisfied_nmap.stderr | 4 +- .../tests/max_encoded_len_ui/union.stderr | 4 +- .../ui/impl_incorrect_method_signature.stderr | 17 ++++- .../tests/ui/mock_only_self_reference.stderr | 24 +++++- ...reference_in_impl_runtime_apis_call.stderr | 17 ++++- 9 files changed, 121 insertions(+), 75 deletions(-) diff --git a/frame/support/test/tests/pallet_ui/call_argument_invalid_bound.stderr b/frame/support/test/tests/pallet_ui/call_argument_invalid_bound.stderr index ead05261b1938..d32d8ada7a11a 100644 --- a/frame/support/test/tests/pallet_ui/call_argument_invalid_bound.stderr +++ b/frame/support/test/tests/pallet_ui/call_argument_invalid_bound.stderr @@ -1,13 +1,12 @@ -error[E0369]: binary operation `==` cannot be applied to type `&::Bar` +error[E0277]: `::Bar` doesn't implement `std::fmt::Debug` --> $DIR/call_argument_invalid_bound.rs:20:41 | 20 | pub fn foo(origin: OriginFor, bar: T::Bar) -> DispatchResultWithPostInfo { - | ^ - | -help: consider further restricting this bound + | ^ `::Bar` cannot be formatted using `{:?}` because it doesn't implement `std::fmt::Debug` | -17 | #[pallet::call + std::cmp::PartialEq] - | ^^^^^^^^^^^^^^^^^^^^^ + = help: the trait `std::fmt::Debug` is not implemented for `::Bar` + = note: required because of the requirements on the impl of `std::fmt::Debug` for `&::Bar` + = note: required for the cast to the object type `dyn std::fmt::Debug` error[E0277]: the trait bound `::Bar: Clone` is not satisfied --> $DIR/call_argument_invalid_bound.rs:20:41 @@ -17,12 +16,13 @@ error[E0277]: the trait bound `::Bar: Clone` is not satisfi | = note: required by `clone` -error[E0277]: `::Bar` doesn't implement `std::fmt::Debug` +error[E0369]: binary operation `==` cannot be applied to type `&::Bar` --> $DIR/call_argument_invalid_bound.rs:20:41 | 20 | pub fn foo(origin: OriginFor, bar: T::Bar) -> DispatchResultWithPostInfo { - | ^ `::Bar` cannot be formatted using `{:?}` because it doesn't implement `std::fmt::Debug` + | ^ | - = help: the trait `std::fmt::Debug` is not implemented for `::Bar` - = note: required because of the requirements on the impl of `std::fmt::Debug` for `&::Bar` - = note: required for the cast to the object type `dyn std::fmt::Debug` +help: consider further restricting this bound + | +17 | #[pallet::call + std::cmp::PartialEq] + | ^^^^^^^^^^^^^^^^^^^^^ diff --git a/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_2.stderr b/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_2.stderr index 2a3bbe1abf4cd..bad37153de7c4 100644 --- a/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_2.stderr +++ b/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_2.stderr @@ -1,39 +1,12 @@ -error[E0277]: the trait bound `::Bar: WrapperTypeDecode` is not satisfied - --> $DIR/call_argument_invalid_bound_2.rs:20:41 - | -20 | pub fn foo(origin: OriginFor, bar: T::Bar) -> DispatchResultWithPostInfo { - | ^ the trait `WrapperTypeDecode` is not implemented for `::Bar` - | - ::: /usr/local/cargo/registry/src/github.com-1ecc6299db9ec823/parity-scale-codec-2.1.1/src/codec.rs:277:18 - | -277 | fn decode(input: &mut I) -> Result; - | ----- required by this bound in `pallet::_::_parity_scale_codec::Decode::decode` - | - = note: required because of the requirements on the impl of `Decode` for `::Bar` - -error[E0277]: the trait bound `::Bar: WrapperTypeEncode` is not satisfied - --> $DIR/call_argument_invalid_bound_2.rs:20:41 - | -20 | pub fn foo(origin: OriginFor, bar: T::Bar) -> DispatchResultWithPostInfo { - | ^ the trait `WrapperTypeEncode` is not implemented for `::Bar` - | - ::: /usr/local/cargo/registry/src/github.com-1ecc6299db9ec823/parity-scale-codec-2.1.1/src/codec.rs:216:21 - | -216 | fn encode_to(&self, dest: &mut T) { - | ------ required by this bound in `encode_to` - | - = note: required because of the requirements on the impl of `pallet::_::_parity_scale_codec::Encode` for `::Bar` - -error[E0369]: binary operation `==` cannot be applied to type `&::Bar` +error[E0277]: `::Bar` doesn't implement `std::fmt::Debug` --> $DIR/call_argument_invalid_bound_2.rs:20:41 | 20 | pub fn foo(origin: OriginFor, bar: T::Bar) -> DispatchResultWithPostInfo { - | ^ - | -help: consider further restricting this bound + | ^ `::Bar` cannot be formatted using `{:?}` because it doesn't implement `std::fmt::Debug` | -17 | #[pallet::call + std::cmp::PartialEq] - | ^^^^^^^^^^^^^^^^^^^^^ + = help: the trait `std::fmt::Debug` is not implemented for `::Bar` + = note: required because of the requirements on the impl of `std::fmt::Debug` for `&::Bar` + = note: required for the cast to the object type `dyn std::fmt::Debug` error[E0277]: the trait bound `::Bar: Clone` is not satisfied --> $DIR/call_argument_invalid_bound_2.rs:20:41 @@ -43,12 +16,39 @@ error[E0277]: the trait bound `::Bar: Clone` is not satisfi | = note: required by `clone` -error[E0277]: `::Bar` doesn't implement `std::fmt::Debug` +error[E0369]: binary operation `==` cannot be applied to type `&::Bar` --> $DIR/call_argument_invalid_bound_2.rs:20:41 | 20 | pub fn foo(origin: OriginFor, bar: T::Bar) -> DispatchResultWithPostInfo { - | ^ `::Bar` cannot be formatted using `{:?}` because it doesn't implement `std::fmt::Debug` + | ^ | - = help: the trait `std::fmt::Debug` is not implemented for `::Bar` - = note: required because of the requirements on the impl of `std::fmt::Debug` for `&::Bar` - = note: required for the cast to the object type `dyn std::fmt::Debug` +help: consider further restricting this bound + | +17 | #[pallet::call + std::cmp::PartialEq] + | ^^^^^^^^^^^^^^^^^^^^^ + +error[E0277]: the trait bound `::Bar: WrapperTypeEncode` is not satisfied + --> $DIR/call_argument_invalid_bound_2.rs:20:41 + | +20 | pub fn foo(origin: OriginFor, bar: T::Bar) -> DispatchResultWithPostInfo { + | ^ the trait `WrapperTypeEncode` is not implemented for `::Bar` + | + ::: /usr/local/cargo/registry/src/github.com-1ecc6299db9ec823/parity-scale-codec-2.1.1/src/codec.rs:216:21 + | +216 | fn encode_to(&self, dest: &mut T) { + | ------ required by this bound in `encode_to` + | + = note: required because of the requirements on the impl of `pallet::_::_parity_scale_codec::Encode` for `::Bar` + +error[E0277]: the trait bound `::Bar: WrapperTypeDecode` is not satisfied + --> $DIR/call_argument_invalid_bound_2.rs:20:41 + | +20 | pub fn foo(origin: OriginFor, bar: T::Bar) -> DispatchResultWithPostInfo { + | ^ the trait `WrapperTypeDecode` is not implemented for `::Bar` + | + ::: /usr/local/cargo/registry/src/github.com-1ecc6299db9ec823/parity-scale-codec-2.1.1/src/codec.rs:277:18 + | +277 | fn decode(input: &mut I) -> Result; + | ----- required by this bound in `pallet::_::_parity_scale_codec::Decode::decode` + | + = note: required because of the requirements on the impl of `Decode` for `::Bar` diff --git a/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_3.stderr b/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_3.stderr index 73c3069719ea2..b6f4494033f7b 100644 --- a/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_3.stderr +++ b/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_3.stderr @@ -1,10 +1,13 @@ -error[E0369]: binary operation `==` cannot be applied to type `&Bar` +error[E0277]: `Bar` doesn't implement `std::fmt::Debug` --> $DIR/call_argument_invalid_bound_3.rs:22:41 | 22 | pub fn foo(origin: OriginFor, bar: Bar) -> DispatchResultWithPostInfo { - | ^^^ + | ^^^ `Bar` cannot be formatted using `{:?}` | - = note: an implementation of `std::cmp::PartialEq` might be missing for `&Bar` + = help: the trait `std::fmt::Debug` is not implemented for `Bar` + = note: add `#[derive(Debug)]` or manually implement `std::fmt::Debug` + = note: required because of the requirements on the impl of `std::fmt::Debug` for `&Bar` + = note: required for the cast to the object type `dyn std::fmt::Debug` error[E0277]: the trait bound `Bar: Clone` is not satisfied --> $DIR/call_argument_invalid_bound_3.rs:22:41 @@ -14,13 +17,10 @@ error[E0277]: the trait bound `Bar: Clone` is not satisfied | = note: required by `clone` -error[E0277]: `Bar` doesn't implement `std::fmt::Debug` +error[E0369]: binary operation `==` cannot be applied to type `&Bar` --> $DIR/call_argument_invalid_bound_3.rs:22:41 | 22 | pub fn foo(origin: OriginFor, bar: Bar) -> DispatchResultWithPostInfo { - | ^^^ `Bar` cannot be formatted using `{:?}` + | ^^^ | - = help: the trait `std::fmt::Debug` is not implemented for `Bar` - = note: add `#[derive(Debug)]` or manually implement `std::fmt::Debug` - = note: required because of the requirements on the impl of `std::fmt::Debug` for `&Bar` - = note: required for the cast to the object type `dyn std::fmt::Debug` + = note: an implementation of `std::cmp::PartialEq` might be missing for `&Bar` diff --git a/frame/support/test/tests/pallet_ui/event_field_not_member.stderr b/frame/support/test/tests/pallet_ui/event_field_not_member.stderr index 97d4db798e611..d48012a6c952d 100644 --- a/frame/support/test/tests/pallet_ui/event_field_not_member.stderr +++ b/frame/support/test/tests/pallet_ui/event_field_not_member.stderr @@ -1,12 +1,10 @@ -error[E0277]: `::Bar` doesn't implement `std::fmt::Debug` +error[E0277]: the trait bound `::Bar: Clone` is not satisfied --> $DIR/event_field_not_member.rs:23:7 | 23 | B { b: T::Bar }, - | ^ `::Bar` cannot be formatted using `{:?}` because it doesn't implement `std::fmt::Debug` + | ^ the trait `Clone` is not implemented for `::Bar` | - = help: the trait `std::fmt::Debug` is not implemented for `::Bar` - = note: required because of the requirements on the impl of `std::fmt::Debug` for `&::Bar` - = note: required for the cast to the object type `dyn std::fmt::Debug` + = note: required by `clone` error[E0369]: binary operation `==` cannot be applied to type `&::Bar` --> $DIR/event_field_not_member.rs:23:7 @@ -19,10 +17,12 @@ help: consider further restricting this bound 22 | pub enum Event { | ^^^^^^^^^^^^^^^^^^^^^ -error[E0277]: the trait bound `::Bar: Clone` is not satisfied +error[E0277]: `::Bar` doesn't implement `std::fmt::Debug` --> $DIR/event_field_not_member.rs:23:7 | 23 | B { b: T::Bar }, - | ^ the trait `Clone` is not implemented for `::Bar` + | ^ `::Bar` cannot be formatted using `{:?}` because it doesn't implement `std::fmt::Debug` | - = note: required by `clone` + = help: the trait `std::fmt::Debug` is not implemented for `::Bar` + = note: required because of the requirements on the impl of `std::fmt::Debug` for `&::Bar` + = note: required for the cast to the object type `dyn std::fmt::Debug` diff --git a/frame/support/test/tests/pallet_ui/storage_info_unsatisfied_nmap.stderr b/frame/support/test/tests/pallet_ui/storage_info_unsatisfied_nmap.stderr index 545520124bfee..6c92423c6a7fe 100644 --- a/frame/support/test/tests/pallet_ui/storage_info_unsatisfied_nmap.stderr +++ b/frame/support/test/tests/pallet_ui/storage_info_unsatisfied_nmap.stderr @@ -4,6 +4,6 @@ error[E0277]: the trait bound `Bar: MaxEncodedLen` is not satisfied 10 | #[pallet::generate_storage_info] | ^^^^^^^^^^^^^^^^^^^^^ the trait `MaxEncodedLen` is not implemented for `Bar` | - = note: required because of the requirements on the impl of `KeyGeneratorMaxEncodedLen` for `NMapKey` - = note: required because of the requirements on the impl of `StorageInfoTrait` for `frame_support::pallet_prelude::StorageNMap<_GeneratedPrefixForStorageFoo, NMapKey, u32>` + = note: required because of the requirements on the impl of `KeyGeneratorMaxEncodedLen` for `Key` + = note: required because of the requirements on the impl of `StorageInfoTrait` for `frame_support::pallet_prelude::StorageNMap<_GeneratedPrefixForStorageFoo, Key, u32>` = note: required by `storage_info` diff --git a/max-encoded-len/tests/max_encoded_len_ui/union.stderr b/max-encoded-len/tests/max_encoded_len_ui/union.stderr index bc5519d674d9d..d09a3f4673e18 100644 --- a/max-encoded-len/tests/max_encoded_len_ui/union.stderr +++ b/max-encoded-len/tests/max_encoded_len_ui/union.stderr @@ -1,10 +1,10 @@ -error: Union types are not supported +error: Union types are not supported. --> $DIR/union.rs:5:1 | 5 | union Union { | ^^^^^ -error: Union types are not supported. +error: Union types are not supported --> $DIR/union.rs:5:1 | 5 | union Union { diff --git a/primitives/api/test/tests/ui/impl_incorrect_method_signature.stderr b/primitives/api/test/tests/ui/impl_incorrect_method_signature.stderr index 6b00b7268672f..9dd84c24b6781 100644 --- a/primitives/api/test/tests/ui/impl_incorrect_method_signature.stderr +++ b/primitives/api/test/tests/ui/impl_incorrect_method_signature.stderr @@ -5,7 +5,10 @@ error[E0053]: method `test` has an incompatible type for trait | --- type in trait ... 19 | fn test(data: String) {} - | ^^^^^^ expected `u64`, found struct `std::string::String` + | ^^^^^^ + | | + | expected `u64`, found struct `std::string::String` + | help: change the parameter type to match the trait: `u64` | = note: expected fn pointer `fn(u64)` found fn pointer `fn(std::string::String)` @@ -21,7 +24,17 @@ error[E0053]: method `Api_test_runtime_api_impl` has an incompatible type for tr | |_- type in trait 16 | 17 | sp_api::impl_runtime_apis! { - | ^^^^^^^^^^^^^^^^^^^^^^^^^^ expected `u64`, found struct `std::string::String` + | -^^^^^^^^^^^^^^^^^^^^^^^^^ + | | + | _expected `u64`, found struct `std::string::String` + | | +18 | | impl self::Api for Runtime { +19 | | fn test(data: String) {} +20 | | } +... | +32 | | } +33 | | } + | |_- help: change the parameter type to match the trait: `std::option::Option` | = note: expected fn pointer `fn(&RuntimeApiImpl<__SR_API_BLOCK__, RuntimeApiImplCall>, &BlockId<__SR_API_BLOCK__>, ExecutionContext, std::option::Option, Vec<_>) -> Result<_, _>` found fn pointer `fn(&RuntimeApiImpl<__SR_API_BLOCK__, RuntimeApiImplCall>, &BlockId<__SR_API_BLOCK__>, ExecutionContext, std::option::Option, Vec<_>) -> Result<_, _>` diff --git a/primitives/api/test/tests/ui/mock_only_self_reference.stderr b/primitives/api/test/tests/ui/mock_only_self_reference.stderr index 83cfcf6ca1f9e..7385fe4745989 100644 --- a/primitives/api/test/tests/ui/mock_only_self_reference.stderr +++ b/primitives/api/test/tests/ui/mock_only_self_reference.stderr @@ -22,7 +22,17 @@ error[E0053]: method `Api_test_runtime_api_impl` has an incompatible type for tr | |_- type in trait ... 12 | sp_api::mock_impl_runtime_apis! { - | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ expected `u64`, found `()` + | -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + | | + | _expected `u64`, found `()` + | | +13 | | impl Api for MockApi { +14 | | fn test(self, data: u64) {} +15 | | +16 | | fn test2(&mut self, data: u64) {} +17 | | } +18 | | } + | |_- help: change the parameter type to match the trait: `Option` | = note: expected fn pointer `fn(&MockApi, &BlockId, Extrinsic>>, ExecutionContext, Option, Vec<_>) -> Result<_, _>` found fn pointer `fn(&MockApi, &BlockId, Extrinsic>>, ExecutionContext, Option<()>, Vec<_>) -> Result<_, _>` @@ -40,7 +50,17 @@ error[E0053]: method `Api_test2_runtime_api_impl` has an incompatible type for t | |_- type in trait ... 12 | sp_api::mock_impl_runtime_apis! { - | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ expected `u64`, found `()` + | -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + | | + | _expected `u64`, found `()` + | | +13 | | impl Api for MockApi { +14 | | fn test(self, data: u64) {} +15 | | +16 | | fn test2(&mut self, data: u64) {} +17 | | } +18 | | } + | |_- help: change the parameter type to match the trait: `Option` | = note: expected fn pointer `fn(&MockApi, &BlockId, Extrinsic>>, ExecutionContext, Option, Vec<_>) -> Result<_, _>` found fn pointer `fn(&MockApi, &BlockId, Extrinsic>>, ExecutionContext, Option<()>, Vec<_>) -> Result<_, _>` diff --git a/primitives/api/test/tests/ui/type_reference_in_impl_runtime_apis_call.stderr b/primitives/api/test/tests/ui/type_reference_in_impl_runtime_apis_call.stderr index 689723f8d7509..a0a16c4a493db 100644 --- a/primitives/api/test/tests/ui/type_reference_in_impl_runtime_apis_call.stderr +++ b/primitives/api/test/tests/ui/type_reference_in_impl_runtime_apis_call.stderr @@ -5,7 +5,10 @@ error[E0053]: method `test` has an incompatible type for trait | --- type in trait ... 19 | fn test(data: &u64) { - | ^^^^ expected `u64`, found `&u64` + | ^^^^ + | | + | expected `u64`, found `&u64` + | help: change the parameter type to match the trait: `u64` | = note: expected fn pointer `fn(u64)` found fn pointer `fn(&u64)` @@ -21,7 +24,17 @@ error[E0053]: method `Api_test_runtime_api_impl` has an incompatible type for tr | |_- type in trait 16 | 17 | sp_api::impl_runtime_apis! { - | ^^^^^^^^^^^^^^^^^^^^^^^^^^ expected `u64`, found `&u64` + | -^^^^^^^^^^^^^^^^^^^^^^^^^ + | | + | _expected `u64`, found `&u64` + | | +18 | | impl self::Api for Runtime { +19 | | fn test(data: &u64) { +20 | | unimplemented!() +... | +34 | | } +35 | | } + | |_- help: change the parameter type to match the trait: `std::option::Option` | = note: expected fn pointer `fn(&RuntimeApiImpl<__SR_API_BLOCK__, RuntimeApiImplCall>, &BlockId<__SR_API_BLOCK__>, ExecutionContext, std::option::Option, Vec<_>) -> Result<_, _>` found fn pointer `fn(&RuntimeApiImpl<__SR_API_BLOCK__, RuntimeApiImplCall>, &BlockId<__SR_API_BLOCK__>, ExecutionContext, std::option::Option<&u64>, Vec<_>) -> Result<_, _>` From df4a58833a650cf37fc97764bf6c9314435e3cb2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andr=C3=A9=20Silva?= <123550+andresilva@users.noreply.github.com> Date: Mon, 21 Jun 2021 17:02:08 +0100 Subject: [PATCH 23/67] grandpa: don't use block_on in Environment::report_equivocation (#9154) * grandpa: don't use block_on in Environment::report_equivocation * grandpa: add issue number to todo --- client/finality-grandpa/src/environment.rs | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/client/finality-grandpa/src/environment.rs b/client/finality-grandpa/src/environment.rs index 77c7ccda7daf6..964e199f90968 100644 --- a/client/finality-grandpa/src/environment.rs +++ b/client/finality-grandpa/src/environment.rs @@ -503,19 +503,19 @@ where let is_descendent_of = is_descendent_of(&*self.client, None); - // TODO: add proper async support here - let best_header = futures::executor::block_on( - self.select_chain - .best_chain() - .map_err(|e| Error::Blockchain(e.to_string())), - )?; + let (best_block_hash, best_block_number) = { + // TODO [#9158]: Use SelectChain::best_chain() to get a potentially + // more accurate best block + let info = self.client.info(); + (info.best_hash, info.best_number) + }; let authority_set = self.authority_set.inner(); // block hash and number of the next pending authority set change in the // given best chain. let next_change = authority_set - .next_change(&best_header.hash(), &is_descendent_of) + .next_change(&best_block_hash, &is_descendent_of) .map_err(|e| Error::Safety(e.to_string()))?; // find the hash of the latest block in the current set @@ -528,7 +528,7 @@ where // the next set starts at `n` so the current one lasts until `n - 1`. if // `n` is later than the best block, then the current set is still live // at best block. - Some((_, n)) if n > *best_header.number() => best_header.hash(), + Some((_, n)) if n > best_block_number => best_block_hash, Some((h, _)) => { // this is the header at which the new set will start let header = self.client.header(BlockId::Hash(h))?.expect( @@ -541,7 +541,7 @@ where } // there is no pending change, the latest block for the current set is // the best block. - None => best_header.hash(), + None => best_block_hash, }; // generate key ownership proof at that block @@ -570,7 +570,7 @@ where self.client .runtime_api() .submit_report_equivocation_unsigned_extrinsic( - &BlockId::Hash(best_header.hash()), + &BlockId::Hash(best_block_hash), equivocation_proof, key_owner_proof, ) From 97338fc60fdfcf647ba62e108d373e96acafb9c2 Mon Sep 17 00:00:00 2001 From: Arkadiy Paronyan Date: Tue, 22 Jun 2021 11:32:43 +0200 Subject: [PATCH 24/67] Fast sync (#8884) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * State sync * Importing state fixes * Bugfixes * Sync with proof * Status reporting * Unsafe sync mode * Sync test * Cleanup * Apply suggestions from code review Co-authored-by: cheme Co-authored-by: Pierre Krieger * set_genesis_storage * Extract keys from range proof * Detect iter completion * Download and import bodies with fast sync * Replaced meta updates tuple with a struct * Fixed reverting finalized state * Reverted timeout * Typo * Doc * Doc * Fixed light client test * Fixed error handling * Tweaks * More UpdateMeta changes * Rename convert_transaction * Apply suggestions from code review Co-authored-by: Bastian Köcher * Apply suggestions from code review Co-authored-by: Bastian Köcher * Code review suggestions * Fixed count handling Co-authored-by: cheme Co-authored-by: Pierre Krieger Co-authored-by: Bastian Köcher --- Cargo.lock | 5 +- client/api/src/backend.rs | 5 + client/api/src/in_mem.rs | 57 ++- client/api/src/lib.rs | 1 + client/api/src/proof_provider.rs | 27 ++ .../authority-discovery/src/worker/tests.rs | 1 + client/cli/src/arg_enums.rs | 24 + client/cli/src/params/network_params.rs | 9 + client/consensus/aura/src/lib.rs | 6 +- client/consensus/babe/src/lib.rs | 12 +- .../consensus/manual-seal/src/seal_block.rs | 6 +- client/consensus/pow/src/worker.rs | 7 +- client/db/src/bench.rs | 12 + client/db/src/lib.rs | 454 +++++++++++++----- client/db/src/light.rs | 7 +- client/db/src/storage_cache.rs | 22 + client/db/src/utils.rs | 14 +- client/finality-grandpa/src/import.rs | 6 +- client/informant/src/display.rs | 17 +- client/light/src/backend.rs | 26 +- client/network/src/behaviour.rs | 25 +- client/network/src/chain.rs | 1 + client/network/src/config.rs | 30 ++ client/network/src/gossip/tests.rs | 12 + client/network/src/lib.rs | 6 +- client/network/src/protocol.rs | 157 ++++-- client/network/src/protocol/sync.rs | 328 ++++++++++--- client/network/src/protocol/sync/state.rs | 187 ++++++++ client/network/src/schema/api.v1.proto | 25 + client/network/src/service.rs | 14 +- client/network/src/service/tests.rs | 12 + client/network/src/state_request_handler.rs | 246 ++++++++++ client/network/test/src/block_import.rs | 2 + client/network/test/src/lib.rs | 45 +- client/network/test/src/sync.rs | 40 ++ client/service/Cargo.toml | 1 + client/service/src/builder.rs | 22 +- client/service/src/chain_ops/import_blocks.rs | 2 + client/service/src/client/client.rs | 223 ++++++--- client/service/test/src/client/light.rs | 2 +- primitives/blockchain/src/backend.rs | 4 +- primitives/blockchain/src/error.rs | 4 +- .../consensus/common/src/block_import.rs | 65 ++- .../consensus/common/src/import_queue.rs | 18 +- .../common/src/import_queue/basic_queue.rs | 2 + primitives/consensus/common/src/lib.rs | 3 +- primitives/runtime/src/generic/block.rs | 13 + primitives/state-machine/src/backend.rs | 16 + primitives/state-machine/src/lib.rs | 146 +++++- .../src/overlayed_changes/mod.rs | 2 +- .../state-machine/src/proving_backend.rs | 19 + primitives/state-machine/src/trie_backend.rs | 11 + .../state-machine/src/trie_backend_essence.rs | 81 +++- test-utils/client/src/lib.rs | 9 + 54 files changed, 2120 insertions(+), 371 deletions(-) create mode 100644 client/network/src/protocol/sync/state.rs create mode 100644 client/network/src/state_request_handler.rs diff --git a/Cargo.lock b/Cargo.lock index a33cb02f7f0d4..ffcf95820342d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8022,6 +8022,7 @@ dependencies = [ "sp-runtime", "sp-session", "sp-state-machine", + "sp-storage", "sp-tracing", "sp-transaction-pool", "sp-transaction-storage-proof", @@ -10551,9 +10552,9 @@ dependencies = [ [[package]] name = "trie-db" -version = "0.22.3" +version = "0.22.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec051edf7f0fc9499a2cb0947652cab2148b9d7f61cee7605e312e9f970dacaf" +checksum = "cd81fe0c8bc2b528a51c9d2c31dae4483367a26a723a3c9a4a8120311d7774e3" dependencies = [ "hash-db", "hashbrown", diff --git a/client/api/src/backend.rs b/client/api/src/backend.rs index 09e9e0cb2e173..1f1ad13067b34 100644 --- a/client/api/src/backend.rs +++ b/client/api/src/backend.rs @@ -41,6 +41,7 @@ use sp_consensus::BlockOrigin; use parking_lot::RwLock; pub use sp_state_machine::Backend as StateBackend; +pub use sp_consensus::ImportedState; use std::marker::PhantomData; /// Extracts the state backend type for the given backend. @@ -161,6 +162,10 @@ pub trait BlockImportOperation { update: TransactionForSB, ) -> sp_blockchain::Result<()>; + /// Set genesis state. If `commit` is `false` the state is saved in memory, but is not written + /// to the database. + fn set_genesis_state(&mut self, storage: Storage, commit: bool) -> sp_blockchain::Result; + /// Inject storage data into the database replacing any existing data. fn reset_storage(&mut self, storage: Storage) -> sp_blockchain::Result; diff --git a/client/api/src/in_mem.rs b/client/api/src/in_mem.rs index 0d40bb3354cc3..916b830f6189d 100644 --- a/client/api/src/in_mem.rs +++ b/client/api/src/in_mem.rs @@ -347,6 +347,11 @@ impl HeaderBackend for Blockchain { genesis_hash: storage.genesis_hash, finalized_hash: storage.finalized_hash, finalized_number: storage.finalized_number, + finalized_state: if storage.finalized_hash != Default::default() { + Some((storage.finalized_hash.clone(), storage.finalized_number)) + } else { + None + }, number_leaves: storage.leaves.count() } } @@ -528,6 +533,32 @@ pub struct BlockImportOperation { set_head: Option>, } +impl BlockImportOperation where + Block::Hash: Ord, +{ + fn apply_storage(&mut self, storage: Storage, commit: bool) -> sp_blockchain::Result { + check_genesis_storage(&storage)?; + + let child_delta = storage.children_default.iter() + .map(|(_storage_key, child_content)| + ( + &child_content.child_info, + child_content.data.iter().map(|(k, v)| (k.as_ref(), Some(v.as_ref()))) + ) + ); + + let (root, transaction) = self.old_state.full_storage_root( + storage.top.iter().map(|(k, v)| (k.as_ref(), Some(v.as_ref()))), + child_delta, + ); + + if commit { + self.new_state = Some(transaction); + } + Ok(root) + } +} + impl backend::BlockImportOperation for BlockImportOperation where Block::Hash: Ord, { @@ -569,24 +600,12 @@ impl backend::BlockImportOperation for BlockImportOperatio Ok(()) } - fn reset_storage(&mut self, storage: Storage) -> sp_blockchain::Result { - check_genesis_storage(&storage)?; - - let child_delta = storage.children_default.iter() - .map(|(_storage_key, child_content)| - ( - &child_content.child_info, - child_content.data.iter().map(|(k, v)| (k.as_ref(), Some(v.as_ref()))) - ) - ); - - let (root, transaction) = self.old_state.full_storage_root( - storage.top.iter().map(|(k, v)| (k.as_ref(), Some(v.as_ref()))), - child_delta, - ); + fn set_genesis_state(&mut self, storage: Storage, commit: bool) -> sp_blockchain::Result { + self.apply_storage(storage, commit) + } - self.new_state = Some(transaction); - Ok(root) + fn reset_storage(&mut self, storage: Storage) -> sp_blockchain::Result { + self.apply_storage(storage, true) } fn insert_aux(&mut self, ops: I) -> sp_blockchain::Result<()> @@ -806,12 +825,12 @@ impl backend::RemoteBackend for Backend where Block /// Check that genesis storage is valid. pub fn check_genesis_storage(storage: &Storage) -> sp_blockchain::Result<()> { if storage.top.iter().any(|(k, _)| well_known_keys::is_child_storage_key(k)) { - return Err(sp_blockchain::Error::GenesisInvalid.into()); + return Err(sp_blockchain::Error::InvalidState.into()); } if storage.children_default.keys() .any(|child_key| !well_known_keys::is_child_storage_key(&child_key)) { - return Err(sp_blockchain::Error::GenesisInvalid.into()); + return Err(sp_blockchain::Error::InvalidState.into()); } Ok(()) diff --git a/client/api/src/lib.rs b/client/api/src/lib.rs index f3cef0e36ff47..71cf499f79943 100644 --- a/client/api/src/lib.rs +++ b/client/api/src/lib.rs @@ -41,6 +41,7 @@ pub use proof_provider::*; pub use sp_blockchain::HeaderBackend; pub use sp_state_machine::{StorageProof, ExecutionStrategy}; +pub use sp_storage::{StorageData, StorageKey, PrefixedStorageKey, ChildInfo}; /// Usage Information Provider interface /// diff --git a/client/api/src/proof_provider.rs b/client/api/src/proof_provider.rs index a0dbcf1d1e807..0e9fd5318ba90 100644 --- a/client/api/src/proof_provider.rs +++ b/client/api/src/proof_provider.rs @@ -70,4 +70,31 @@ pub trait ProofProvider { storage_key: Option<&PrefixedStorageKey>, key: &StorageKey, ) -> sp_blockchain::Result>; + + /// Given a `BlockId` iterate over all storage values starting at `start_key` exclusively, + /// building proofs until size limit is reached. Returns combined proof and the number of collected keys. + fn read_proof_collection( + &self, + id: &BlockId, + start_key: &[u8], + size_limit: usize, + ) -> sp_blockchain::Result<(StorageProof, u32)>; + + /// Given a `BlockId` iterate over all storage values starting at `start_key`. + /// Returns collected keys and values. + fn storage_collection( + &self, + id: &BlockId, + start_key: &[u8], + size_limit: usize, + ) -> sp_blockchain::Result, Vec)>>; + + /// Verify read storage proof for a set of keys. + /// Returns collected key-value pairs and a flag indicating if iteration is complete. + fn verify_range_proof( + &self, + root: Block::Hash, + proof: StorageProof, + start_key: &[u8], + ) -> sp_blockchain::Result<(Vec<(Vec, Vec)>, bool)>; } diff --git a/client/authority-discovery/src/worker/tests.rs b/client/authority-discovery/src/worker/tests.rs index b702cd8c40085..8be23e4840bde 100644 --- a/client/authority-discovery/src/worker/tests.rs +++ b/client/authority-discovery/src/worker/tests.rs @@ -69,6 +69,7 @@ impl HeaderBackend for TestApi { finalized_number: Zero::zero(), genesis_hash: Default::default(), number_leaves: Default::default(), + finalized_state: None, } } diff --git a/client/cli/src/arg_enums.rs b/client/cli/src/arg_enums.rs index fb2f8fdbc21d8..1bca67e782a3b 100644 --- a/client/cli/src/arg_enums.rs +++ b/client/cli/src/arg_enums.rs @@ -232,6 +232,30 @@ arg_enum! { } } +arg_enum! { + /// Syncing mode. + #[allow(missing_docs)] + #[derive(Debug, Clone, Copy)] + pub enum SyncMode { + // Full sync. Donwnload end verify all blocks. + Full, + // Download blocks without executing them. Download latest state with proofs. + Fast, + // Download blocks without executing them. Download latest state without proofs. + FastUnsafe, + } +} + +impl Into for SyncMode { + fn into(self) -> sc_network::config::SyncMode { + match self { + SyncMode::Full => sc_network::config::SyncMode::Full, + SyncMode::Fast => sc_network::config::SyncMode::Fast { skip_proofs: false }, + SyncMode::FastUnsafe => sc_network::config::SyncMode::Fast { skip_proofs: true }, + } + } +} + /// Default value for the `--execution-syncing` parameter. pub const DEFAULT_EXECUTION_SYNCING: ExecutionStrategy = ExecutionStrategy::NativeElseWasm; /// Default value for the `--execution-import-block` parameter. diff --git a/client/cli/src/params/network_params.rs b/client/cli/src/params/network_params.rs index 7549c76378bea..69f4c9d1ba74b 100644 --- a/client/cli/src/params/network_params.rs +++ b/client/cli/src/params/network_params.rs @@ -17,6 +17,7 @@ // along with this program. If not, see . use crate::params::node_key_params::NodeKeyParams; +use crate::arg_enums::SyncMode; use sc_network::{ config::{NetworkConfiguration, NodeKeyConfig, NonReservedPeerMode, SetConfig, TransportConfig}, multiaddr::Protocol, @@ -125,6 +126,13 @@ pub struct NetworkParams { /// Join the IPFS network and serve transactions over bitswap protocol. #[structopt(long)] pub ipfs_server: bool, + + /// Blockchain syncing mode. + /// Full - Download and validate full blockchain history (Default). + /// Fast - Download blocks and the latest state only. + /// FastUnsafe - Same as Fast, but do skips downloading state proofs. + #[structopt(long, default_value = "Full")] + pub sync: SyncMode, } impl NetworkParams { @@ -218,6 +226,7 @@ impl NetworkParams { kademlia_disjoint_query_paths: self.kademlia_disjoint_query_paths, yamux_window_size: None, ipfs_server: self.ipfs_server, + sync_mode: self.sync.into(), } } } diff --git a/client/consensus/aura/src/lib.rs b/client/consensus/aura/src/lib.rs index 845e920cfc11a..d08ce5dfee259 100644 --- a/client/consensus/aura/src/lib.rs +++ b/client/consensus/aura/src/lib.rs @@ -42,7 +42,7 @@ use codec::{Encode, Decode, Codec}; use sp_consensus::{ BlockImport, Environment, Proposer, CanAuthorWith, ForkChoiceStrategy, BlockImportParams, - BlockOrigin, Error as ConsensusError, SelectChain, + BlockOrigin, Error as ConsensusError, SelectChain, StateAction, }; use sc_client_api::{backend::AuxStore, BlockOf, UsageProvider}; use sp_blockchain::{Result as CResult, ProvideCache, HeaderBackend}; @@ -421,7 +421,9 @@ where let mut import_block = BlockImportParams::new(BlockOrigin::Own, header); import_block.post_digests.push(signature_digest_item); import_block.body = Some(body); - import_block.storage_changes = Some(storage_changes); + import_block.state_action = StateAction::ApplyChanges( + sp_consensus::StorageChanges::Changes(storage_changes) + ); import_block.fork_choice = Some(ForkChoiceStrategy::LongestChain); Ok(import_block) diff --git a/client/consensus/babe/src/lib.rs b/client/consensus/babe/src/lib.rs index 15d16c91f4304..61b58bf1b5999 100644 --- a/client/consensus/babe/src/lib.rs +++ b/client/consensus/babe/src/lib.rs @@ -101,6 +101,7 @@ use sp_consensus::{ import_queue::{BasicQueue, CacheKeyId, DefaultImportQueue, Verifier}, BlockCheckParams, BlockImport, BlockImportParams, BlockOrigin, Environment, Error as ConsensusError, ForkChoiceStrategy, Proposer, SelectChain, SlotData, + StateAction, }; use sp_consensus_babe::inherents::BabeInherentData; use sp_consensus_slots::Slot; @@ -790,7 +791,9 @@ where let mut import_block = BlockImportParams::new(BlockOrigin::Own, header); import_block.post_digests.push(digest_item); import_block.body = Some(body); - import_block.storage_changes = Some(storage_changes); + import_block.state_action = StateAction::ApplyChanges( + sp_consensus::StorageChanges::Changes(storage_changes) + ); import_block.intermediates.insert( Cow::from(INTERMEDIATE_KEY), Box::new(BabeIntermediate:: { epoch_descriptor }) as Box<_>, @@ -1295,7 +1298,12 @@ impl BlockImport for BabeBlockImport return Ok(ImportResult::AlreadyInChain), + Ok(sp_blockchain::BlockStatus::InChain) => { + // When re-importing existing block strip away intermediates. + let _ = block.take_intermediate::>(INTERMEDIATE_KEY)?; + block.fork_choice = Some(ForkChoiceStrategy::Custom(false)); + return self.inner.import_block(block, new_cache).await.map_err(Into::into) + }, Ok(sp_blockchain::BlockStatus::Unknown) => {}, Err(e) => return Err(ConsensusError::ClientImport(e.to_string())), } diff --git a/client/consensus/manual-seal/src/seal_block.rs b/client/consensus/manual-seal/src/seal_block.rs index 6ddd2cb05d498..89da02ac49612 100644 --- a/client/consensus/manual-seal/src/seal_block.rs +++ b/client/consensus/manual-seal/src/seal_block.rs @@ -28,7 +28,7 @@ use futures::prelude::*; use sc_transaction_pool::txpool; use sp_consensus::{ self, BlockImport, Environment, Proposer, ForkChoiceStrategy, - BlockImportParams, BlockOrigin, ImportResult, SelectChain, + BlockImportParams, BlockOrigin, ImportResult, SelectChain, StateAction, }; use sp_blockchain::HeaderBackend; use std::collections::HashMap; @@ -145,7 +145,9 @@ pub async fn seal_block( params.body = Some(body); params.finalized = finalize; params.fork_choice = Some(ForkChoiceStrategy::LongestChain); - params.storage_changes = Some(proposal.storage_changes); + params.state_action = StateAction::ApplyChanges( + sp_consensus::StorageChanges::Changes(proposal.storage_changes) + ); if let Some(digest_provider) = digest_provider { digest_provider.append_block_import(&parent, &mut params, &inherent_data)?; diff --git a/client/consensus/pow/src/worker.rs b/client/consensus/pow/src/worker.rs index e5d76592b7fd1..74fbcce81341d 100644 --- a/client/consensus/pow/src/worker.rs +++ b/client/consensus/pow/src/worker.rs @@ -18,7 +18,8 @@ use std::{pin::Pin, time::Duration, collections::HashMap, borrow::Cow}; use sc_client_api::ImportNotifications; -use sp_consensus::{Proposal, BlockOrigin, BlockImportParams, import_queue::BoxBlockImport}; +use sp_consensus::{Proposal, BlockOrigin, BlockImportParams, StorageChanges, + StateAction, import_queue::BoxBlockImport}; use sp_runtime::{ generic::BlockId, traits::{Block as BlockT, Header as HeaderT}, @@ -136,7 +137,9 @@ where let mut import_block = BlockImportParams::new(BlockOrigin::Own, header); import_block.post_digests.push(seal); import_block.body = Some(body); - import_block.storage_changes = Some(build.proposal.storage_changes); + import_block.state_action = StateAction::ApplyChanges( + StorageChanges::Changes(build.proposal.storage_changes) + ); let intermediate = PowIntermediate:: { difficulty: Some(build.metadata.difficulty), diff --git a/client/db/src/bench.rs b/client/db/src/bench.rs index 1f2f46af0079e..470448df76f0b 100644 --- a/client/db/src/bench.rs +++ b/client/db/src/bench.rs @@ -373,6 +373,18 @@ impl StateBackend> for BenchmarkingState { } } + fn apply_to_key_values_while, Vec) -> bool>( + &self, + child_info: Option<&ChildInfo>, + prefix: Option<&[u8]>, + start_at: Option<&[u8]>, + f: F, + allow_missing: bool, + ) -> Result { + self.state.borrow().as_ref().ok_or_else(state_err)? + .apply_to_key_values_while(child_info, prefix, start_at, f, allow_missing) + } + fn apply_to_keys_while bool>( &self, child_info: Option<&ChildInfo>, diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index 38b9d7a7adff4..024f2e5f4e649 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -205,6 +205,17 @@ impl StateBackend> for RefTrackingState { self.state.for_key_values_with_prefix(prefix, f) } + fn apply_to_key_values_while, Vec) -> bool>( + &self, + child_info: Option<&ChildInfo>, + prefix: Option<&[u8]>, + start_at: Option<&[u8]>, + f: F, + allow_missing: bool, + ) -> Result { + self.state.apply_to_key_values_while(child_info, prefix, start_at, f, allow_missing) + } + fn apply_to_keys_while bool>( &self, child_info: Option<&ChildInfo>, @@ -387,6 +398,14 @@ impl<'a> sc_state_db::MetaDb for StateMetaDb<'a> { } } +struct MetaUpdate { + pub hash: Block::Hash, + pub number: NumberFor, + pub is_best: bool, + pub is_finalized: bool, + pub with_state: bool, +} + fn cache_header( cache: &mut LinkedHashMap>, hash: Hash, @@ -427,11 +446,9 @@ impl BlockchainDb { fn update_meta( &self, - hash: Block::Hash, - number: ::Number, - is_best: bool, - is_finalized: bool + update: MetaUpdate, ) { + let MetaUpdate { hash, number, is_best, is_finalized, with_state } = update; let mut meta = self.meta.write(); if number.is_zero() { meta.genesis_hash = hash; @@ -444,6 +461,9 @@ impl BlockchainDb { } if is_finalized { + if with_state { + meta.finalized_state = Some((hash.clone(), number)); + } meta.finalized_number = number; meta.finalized_hash = hash; } @@ -484,6 +504,7 @@ impl sc_client_api::blockchain::HeaderBackend for Blockcha genesis_hash: meta.genesis_hash, finalized_hash: meta.finalized_hash, finalized_number: meta.finalized_number, + finalized_state: meta.finalized_state.clone(), number_leaves: self.leaves.read().count(), } } @@ -754,6 +775,42 @@ impl BlockImportOperation { } } } + + fn apply_new_state( + &mut self, + storage: Storage, + ) -> ClientResult { + if storage.top.keys().any(|k| well_known_keys::is_child_storage_key(&k)) { + return Err(sp_blockchain::Error::InvalidState.into()); + } + + let child_delta = storage.children_default.iter().map(|(_storage_key, child_content)|( + &child_content.child_info, + child_content.data.iter().map(|(k, v)| (&k[..], Some(&v[..]))), + )); + + let mut changes_trie_config = None; + let (root, transaction) = self.old_state.full_storage_root( + storage.top.iter().map(|(k, v)| { + if &k[..] == well_known_keys::CHANGES_TRIE_CONFIG { + changes_trie_config = Some(Decode::decode(&mut &v[..])); + } + (&k[..], Some(&v[..])) + }), + child_delta + ); + + let changes_trie_config = match changes_trie_config { + Some(Ok(c)) => Some(c), + Some(Err(_)) => return Err(sp_blockchain::Error::InvalidState.into()), + None => None, + }; + + self.db_updates = transaction; + self.changes_trie_config_update = Some(changes_trie_config); + Ok(root) + } + } impl sc_client_api::backend::BlockImportOperation for BlockImportOperation { @@ -796,35 +853,21 @@ impl sc_client_api::backend::BlockImportOperation for Bloc &mut self, storage: Storage, ) -> ClientResult { - if storage.top.keys().any(|k| well_known_keys::is_child_storage_key(&k)) { - return Err(sp_blockchain::Error::GenesisInvalid.into()); - } - - let child_delta = storage.children_default.iter().map(|(_storage_key, child_content)|( - &child_content.child_info, - child_content.data.iter().map(|(k, v)| (&k[..], Some(&v[..]))), - )); - - let mut changes_trie_config: Option = None; - let (root, transaction) = self.old_state.full_storage_root( - storage.top.iter().map(|(k, v)| { - if &k[..] == well_known_keys::CHANGES_TRIE_CONFIG { - changes_trie_config = Some( - Decode::decode(&mut &v[..]) - .expect("changes trie configuration is encoded properly at genesis") - ); - } - (&k[..], Some(&v[..])) - }), - child_delta - ); - - self.db_updates = transaction; - self.changes_trie_config_update = Some(changes_trie_config); + let root = self.apply_new_state(storage)?; self.commit_state = true; Ok(root) } + fn set_genesis_state( + &mut self, + storage: Storage, + commit: bool, + ) -> ClientResult { + let root = self.apply_new_state(storage)?; + self.commit_state = commit; + Ok(root) + } + fn update_changes_trie( &mut self, update: ChangesTrieTransaction, NumberFor>, @@ -907,18 +950,39 @@ impl sc_state_db::NodeDb for StorageDb { } } -struct DbGenesisStorage(pub Block::Hash); +struct DbGenesisStorage { + root: Block::Hash, + storage: PrefixedMemoryDB>, +} impl DbGenesisStorage { + pub fn new(root: Block::Hash, storage: PrefixedMemoryDB>) -> Self { + DbGenesisStorage { + root, + storage, + } + } +} + +impl sp_state_machine::Storage> for DbGenesisStorage { + fn get(&self, key: &Block::Hash, prefix: Prefix) -> Result, String> { + use hash_db::HashDB; + Ok(self.storage.get(key, prefix)) + } +} + +struct EmptyStorage(pub Block::Hash); + +impl EmptyStorage { pub fn new() -> Self { let mut root = Block::Hash::default(); let mut mdb = MemoryDB::>::default(); sp_state_machine::TrieDBMut::>::new(&mut mdb, &mut root); - DbGenesisStorage(root) + EmptyStorage(root) } } -impl sp_state_machine::Storage> for DbGenesisStorage { +impl sp_state_machine::Storage> for EmptyStorage { fn get(&self, _key: &Block::Hash, _prefix: Prefix) -> Result, String> { Ok(None) } @@ -980,6 +1044,7 @@ pub struct Backend { transaction_storage: TransactionStorageMode, io_stats: FrozenForDuration<(kvdb::IoStats, StateUsageInfo)>, state_usage: Arc, + genesis_state: RwLock>>>, } impl Backend { @@ -1058,7 +1123,7 @@ impl Backend { }, )?; - Ok(Backend { + let backend = Backend { storage: Arc::new(storage_db), offchain_storage, changes_tries_storage, @@ -1074,7 +1139,24 @@ impl Backend { state_usage: Arc::new(StateUsageStats::new()), keep_blocks: config.keep_blocks.clone(), transaction_storage: config.transaction_storage.clone(), - }) + genesis_state: RwLock::new(None), + }; + + // Older DB versions have no last state key. Check if the state is available and set it. + let info = backend.blockchain.info(); + if info.finalized_state.is_none() + && info.finalized_hash != Default::default() + && sc_client_api::Backend::have_state_at(&backend, &info.finalized_hash, info.finalized_number) + { + backend.blockchain.update_meta(MetaUpdate { + hash: info.finalized_hash, + number: info.finalized_number, + is_best: info.finalized_hash == info.best_hash, + is_finalized: true, + with_state: true, + }); + } + Ok(backend) } /// Handle setting head within a transaction. `route_to` should be the last @@ -1170,10 +1252,11 @@ impl Backend { justification: Option, changes_trie_cache_ops: &mut Option>, finalization_displaced: &mut Option>>, - ) -> ClientResult<(Block::Hash, ::Number, bool, bool)> { + ) -> ClientResult> { // TODO: ensure best chain contains this block. let number = *header.number(); self.ensure_sequential_finalization(header, last_finalized)?; + let with_state = sc_client_api::Backend::have_state_at(self, &hash, number); self.note_finalized( transaction, @@ -1182,6 +1265,7 @@ impl Backend { *hash, changes_trie_cache_ops, finalization_displaced, + with_state, )?; if let Some(justification) = justification { @@ -1191,7 +1275,13 @@ impl Backend { Justifications::from(justification).encode(), ); } - Ok((*hash, number, false, true)) + Ok(MetaUpdate { + hash: *hash, + number, + is_best: false, + is_finalized: true, + with_state, + }) } // performs forced canonicalization with a delay after importing a non-finalized block. @@ -1219,6 +1309,9 @@ impl Backend { )?.expect("existence of block with number `new_canonical` \ implies existence of blocks with all numbers before it; qed") }; + if !sc_client_api::Backend::have_state_at(self, &hash, new_canonical.saturated_into()) { + return Ok(()) + } trace!(target: "db", "Canonicalize block #{} ({:?})", new_canonical, hash); let commit = self.storage.state_db.canonicalize_block(&hash) @@ -1240,12 +1333,13 @@ impl Backend { let mut meta_updates = Vec::with_capacity(operation.finalized_blocks.len()); let mut last_finalized_hash = self.blockchain.meta.read().finalized_hash; + let mut last_finalized_num = self.blockchain.meta.read().finalized_number; + let best_num = self.blockchain.meta.read().best_number; let mut changes_trie_cache_ops = None; for (block, justification) in operation.finalized_blocks { let block_hash = self.blockchain.expect_block_hash_from_id(&block)?; let block_header = self.blockchain.expect_header(BlockId::Hash(block_hash))?; - meta_updates.push(self.finalize_block_with_transaction( &mut transaction, &block_hash, @@ -1256,12 +1350,16 @@ impl Backend { &mut finalization_displaced_leaves, )?); last_finalized_hash = block_hash; + last_finalized_num = block_header.number().clone(); } let imported = if let Some(pending_block) = operation.pending_block { + let hash = pending_block.header.hash(); + let parent_hash = *pending_block.header.parent_hash(); let number = pending_block.header.number().clone(); + let existing_header = number <= best_num && self.blockchain.header(BlockId::hash(hash))?.is_some(); // blocks are keyed by number + hash. let lookup_key = utils::number_and_hash_to_lookup_key(number, hash)?; @@ -1296,13 +1394,24 @@ impl Backend { } if number.is_zero() { - transaction.set_from_vec(columns::META, meta_keys::FINALIZED_BLOCK, lookup_key); + transaction.set_from_vec(columns::META, meta_keys::FINALIZED_BLOCK, lookup_key.clone()); transaction.set(columns::META, meta_keys::GENESIS_HASH, hash.as_ref()); // for tests, because config is set from within the reset_storage if operation.changes_trie_config_update.is_none() { operation.changes_trie_config_update = Some(None); } + + if operation.commit_state { + transaction.set_from_vec(columns::META, meta_keys::FINALIZED_STATE, lookup_key); + } else { + // When we don't want to commit the genesis state, we still preserve it in memory + // to bootstrap consensus. It is queried for an initial list of authorities, etc. + *self.genesis_state.write() = Some(Arc::new(DbGenesisStorage::new( + pending_block.header.state_root().clone(), + operation.db_updates.clone() + ))); + } } let finalized = if operation.commit_state { @@ -1361,79 +1470,111 @@ impl Backend { changeset, ).map_err(|e: sc_state_db::Error| sp_blockchain::Error::from_state_db(e))?; apply_state_commit(&mut transaction, commit); + if number <= last_finalized_num { + // Canonicalize in the db when re-importing existing blocks with state. + let commit = self.storage.state_db.canonicalize_block(&hash) + .map_err(|e: sc_state_db::Error| sp_blockchain::Error::from_state_db(e))?; + apply_state_commit(&mut transaction, commit); + meta_updates.push(MetaUpdate { + hash, + number, + is_best: false, + is_finalized: true, + with_state: true, + }); + } + // Check if need to finalize. Genesis is always finalized instantly. let finalized = number_u64 == 0 || pending_block.leaf_state.is_final(); finalized } else { - false + number.is_zero() || pending_block.leaf_state.is_final() }; let header = &pending_block.header; let is_best = pending_block.leaf_state.is_best(); let changes_trie_updates = operation.changes_trie_updates; - let changes_trie_config_update = operation.changes_trie_config_update; - changes_trie_cache_ops = Some(self.changes_tries_storage.commit( - &mut transaction, - changes_trie_updates, - cache::ComplexBlockId::new( - *header.parent_hash(), - if number.is_zero() { Zero::zero() } else { number - One::one() }, - ), - cache::ComplexBlockId::new(hash, number), - header, - finalized, - changes_trie_config_update, - changes_trie_cache_ops, - )?); - self.state_usage.merge_sm(operation.old_state.usage_info()); - // release state reference so that it can be finalized - let cache = operation.old_state.into_cache_changes(); - - if finalized { - // TODO: ensure best chain contains this block. - self.ensure_sequential_finalization(header, Some(last_finalized_hash))?; - self.note_finalized( + debug!(target: "db", + "DB Commit {:?} ({}), best={}, state={}, existing={}", + hash, number, is_best, operation.commit_state, existing_header, + ); + + if !existing_header { + let changes_trie_config_update = operation.changes_trie_config_update; + changes_trie_cache_ops = Some(self.changes_tries_storage.commit( &mut transaction, - true, + changes_trie_updates, + cache::ComplexBlockId::new( + *header.parent_hash(), + if number.is_zero() { Zero::zero() } else { number - One::one() }, + ), + cache::ComplexBlockId::new(hash, number), header, - hash, - &mut changes_trie_cache_ops, - &mut finalization_displaced_leaves, - )?; - } else { - // canonicalize blocks which are old enough, regardless of finality. - self.force_delayed_canonicalize(&mut transaction, hash, *header.number())? - } + finalized, + changes_trie_config_update, + changes_trie_cache_ops, + )?); + + self.state_usage.merge_sm(operation.old_state.usage_info()); + // release state reference so that it can be finalized + let cache = operation.old_state.into_cache_changes(); + + if finalized { + // TODO: ensure best chain contains this block. + self.ensure_sequential_finalization(header, Some(last_finalized_hash))?; + self.note_finalized( + &mut transaction, + true, + header, + hash, + &mut changes_trie_cache_ops, + &mut finalization_displaced_leaves, + operation.commit_state, + )?; + } else { + // canonicalize blocks which are old enough, regardless of finality. + self.force_delayed_canonicalize(&mut transaction, hash, *header.number())? + } - debug!(target: "db", "DB Commit {:?} ({}), best = {}", hash, number, is_best); - let displaced_leaf = { - let mut leaves = self.blockchain.leaves.write(); - let displaced_leaf = leaves.import(hash, number, parent_hash); - leaves.prepare_transaction(&mut transaction, columns::META, meta_keys::LEAF_PREFIX); + let displaced_leaf = { + let mut leaves = self.blockchain.leaves.write(); + let displaced_leaf = leaves.import(hash, number, parent_hash); + leaves.prepare_transaction(&mut transaction, columns::META, meta_keys::LEAF_PREFIX); - displaced_leaf - }; + displaced_leaf + }; - let mut children = children::read_children( - &*self.storage.db, - columns::META, - meta_keys::CHILDREN_PREFIX, - parent_hash, - )?; - children.push(hash); - children::write_children( - &mut transaction, - columns::META, - meta_keys::CHILDREN_PREFIX, - parent_hash, - children, - ); + let mut children = children::read_children( + &*self.storage.db, + columns::META, + meta_keys::CHILDREN_PREFIX, + parent_hash, + )?; + if !children.contains(&hash) { + children.push(hash); + } + children::write_children( + &mut transaction, + columns::META, + meta_keys::CHILDREN_PREFIX, + parent_hash, + children, + ); - meta_updates.push((hash, number, pending_block.leaf_state.is_best(), finalized)); + meta_updates.push(MetaUpdate { + hash, + number, + is_best: pending_block.leaf_state.is_best(), + is_finalized: finalized, + with_state: operation.commit_state, + }); - Some((pending_block.header, number, hash, enacted, retracted, displaced_leaf, is_best, cache)) + Some((pending_block.header, number, hash, enacted, retracted, displaced_leaf, is_best, cache)) + } else { + None + } } else { None }; @@ -1448,7 +1589,13 @@ impl Backend { hash.clone(), (number.clone(), hash.clone()) )?; - meta_updates.push((hash, *number, true, false)); + meta_updates.push(MetaUpdate { + hash, + number: *number, + is_best: true, + is_finalized: false, + with_state: false, + }); Some((enacted, retracted)) } else { return Err(sp_blockchain::Error::UnknownBlock(format!("Cannot set head {:?}", set_head))) @@ -1472,6 +1619,7 @@ impl Backend { is_best, mut cache, )) = imported { + trace!(target: "db", "DB Commit done {:?}", hash); let header_metadata = CachedHeaderMetadata::from(&header); self.blockchain.insert_header_metadata( header_metadata.hash, @@ -1498,8 +1646,8 @@ impl Backend { self.shared_cache.lock().sync(&enacted, &retracted); } - for (hash, number, is_best, is_finalized) in meta_updates { - self.blockchain.update_meta(hash, number, is_best, is_finalized); + for m in meta_updates { + self.blockchain.update_meta(m); } Ok(()) @@ -1515,29 +1663,35 @@ impl Backend { f_header: &Block::Header, f_hash: Block::Hash, changes_trie_cache_ops: &mut Option>, - displaced: &mut Option>> + displaced: &mut Option>>, + with_state: bool, ) -> ClientResult<()> { let f_num = f_header.number().clone(); - if self.storage.state_db.best_canonical().map(|c| f_num.saturated_into::() > c).unwrap_or(true) { - let lookup_key = utils::number_and_hash_to_lookup_key(f_num, f_hash.clone())?; - transaction.set_from_vec(columns::META, meta_keys::FINALIZED_BLOCK, lookup_key); + let lookup_key = utils::number_and_hash_to_lookup_key(f_num, f_hash.clone())?; + if with_state { + transaction.set_from_vec(columns::META, meta_keys::FINALIZED_STATE, lookup_key.clone()); + } + transaction.set_from_vec(columns::META, meta_keys::FINALIZED_BLOCK, lookup_key); + if sc_client_api::Backend::have_state_at(self, &f_hash, f_num) && + self.storage.state_db.best_canonical().map(|c| f_num.saturated_into::() > c).unwrap_or(true) + { let commit = self.storage.state_db.canonicalize_block(&f_hash) .map_err(|e: sc_state_db::Error| sp_blockchain::Error::from_state_db(e))?; apply_state_commit(transaction, commit); + } - if !f_num.is_zero() { - let new_changes_trie_cache_ops = self.changes_tries_storage.finalize( - transaction, - *f_header.parent_hash(), - f_hash, - f_num, - if is_inserted { Some(&f_header) } else { None }, - changes_trie_cache_ops.take(), - )?; - *changes_trie_cache_ops = Some(new_changes_trie_cache_ops); - } + if !f_num.is_zero() { + let new_changes_trie_cache_ops = self.changes_tries_storage.finalize( + transaction, + *f_header.parent_hash(), + f_hash, + f_num, + if is_inserted { Some(&f_header) } else { None }, + changes_trie_cache_ops.take(), + )?; + *changes_trie_cache_ops = Some(new_changes_trie_cache_ops); } let new_displaced = self.blockchain.leaves.write().finalize_height(f_num); @@ -1628,6 +1782,23 @@ impl Backend { } Ok(()) } + + fn empty_state(&self) -> ClientResult, Block>> { + let root = EmptyStorage::::new().0; // Empty trie + let db_state = DbState::::new(self.storage.clone(), root); + let state = RefTrackingState::new(db_state, self.storage.clone(), None); + let caching_state = CachingState::new( + state, + self.shared_cache.clone(), + None, + ); + Ok(SyncingCachingState::new( + caching_state, + self.state_usage.clone(), + self.blockchain.meta.clone(), + self.import_lock.clone(), + )) + } } @@ -1737,7 +1908,7 @@ impl sc_client_api::backend::Backend for Backend { type OffchainStorage = offchain::LocalStorage; fn begin_operation(&self) -> ClientResult { - let mut old_state = self.state_at(BlockId::Hash(Default::default()))?; + let mut old_state = self.empty_state()?; old_state.disable_syncing(); Ok(BlockImportOperation { @@ -1763,7 +1934,11 @@ impl sc_client_api::backend::Backend for Backend { operation: &mut Self::BlockImportOperation, block: BlockId, ) -> ClientResult<()> { - operation.old_state = self.state_at(block)?; + if block.is_pre_genesis() { + operation.old_state = self.empty_state()?; + } else { + operation.old_state = self.state_at(block)?; + } operation.old_state.disable_syncing(); operation.commit_state = true; @@ -1800,7 +1975,7 @@ impl sc_client_api::backend::Backend for Backend { let mut displaced = None; let mut changes_trie_cache_ops = None; - let (hash, number, is_best, is_finalized) = self.finalize_block_with_transaction( + let m = self.finalize_block_with_transaction( &mut transaction, &hash, &header, @@ -1810,7 +1985,7 @@ impl sc_client_api::backend::Backend for Backend { &mut displaced, )?; self.storage.db.commit(transaction)?; - self.blockchain.update_meta(hash, number, is_best, is_finalized); + self.blockchain.update_meta(m); self.changes_tries_storage.post_commit(changes_trie_cache_ops); Ok(()) } @@ -1967,14 +2142,36 @@ impl sc_client_api::backend::Backend for Backend { meta_keys::FINALIZED_BLOCK, key.clone() ); + reverted_finalized.insert(removed_hash); + if let Some((hash, _)) = self.blockchain.info().finalized_state { + if hash == best_hash { + if !best_number.is_zero() + && self.have_state_at(&prev_hash, best_number - One::one()) + { + let lookup_key = utils::number_and_hash_to_lookup_key( + best_number - One::one(), + prev_hash + )?; + transaction.set_from_vec(columns::META, meta_keys::FINALIZED_STATE, lookup_key); + } else { + transaction.remove(columns::META, meta_keys::FINALIZED_STATE); + } + } + } } transaction.set_from_vec(columns::META, meta_keys::BEST_BLOCK, key); transaction.remove(columns::KEY_LOOKUP, removed.hash().as_ref()); children::remove_children(&mut transaction, columns::META, meta_keys::CHILDREN_PREFIX, best_hash); self.storage.db.commit(transaction)?; self.changes_tries_storage.post_commit(Some(changes_trie_cache_ops)); - self.blockchain.update_meta(best_hash, best_number, true, update_finalized); + self.blockchain.update_meta(MetaUpdate { + hash: best_hash, + number: best_number, + is_best: true, + is_finalized: update_finalized, + with_state: false + }); } None => return Ok(c.saturated_into::>()) } @@ -2061,26 +2258,30 @@ impl sc_client_api::backend::Backend for Backend { fn state_at(&self, block: BlockId) -> ClientResult { use sc_client_api::blockchain::HeaderBackend as BcHeaderBackend; - // special case for genesis initialization - match block { - BlockId::Hash(h) if h == Default::default() => { - let genesis_storage = DbGenesisStorage::::new(); - let root = genesis_storage.0.clone(); - let db_state = DbState::::new(Arc::new(genesis_storage), root); + let is_genesis = match &block { + BlockId::Number(n) if n.is_zero() => true, + BlockId::Hash(h) if h == &self.blockchain.meta.read().genesis_hash => true, + _ => false, + }; + if is_genesis { + if let Some(genesis_state) = &*self.genesis_state.read() { + let root = genesis_state.root.clone(); + let db_state = DbState::::new(genesis_state.clone(), root); let state = RefTrackingState::new(db_state, self.storage.clone(), None); let caching_state = CachingState::new( state, self.shared_cache.clone(), None, ); - return Ok(SyncingCachingState::new( + let mut state = SyncingCachingState::new( caching_state, self.state_usage.clone(), self.blockchain.meta.clone(), self.import_lock.clone(), - )); - }, - _ => {} + ); + state.disable_syncing(); + return Ok(state) + } } let hash = match block { @@ -2305,7 +2506,6 @@ pub(crate) mod tests { let db = Backend::::new_test(2, 0); let hash = { let mut op = db.begin_operation().unwrap(); - db.begin_state_operation(&mut op, BlockId::Hash(Default::default())).unwrap(); let mut header = Header { number: 0, parent_hash: Default::default(), diff --git a/client/db/src/light.rs b/client/db/src/light.rs index bf24197c5b5d9..4e61a9c2ee03d 100644 --- a/client/db/src/light.rs +++ b/client/db/src/light.rs @@ -151,9 +151,14 @@ impl BlockchainHeaderBackend for LightStorage BlockchainInfo { best_hash: meta.best_hash, best_number: meta.best_number, - genesis_hash: meta.genesis_hash, + genesis_hash: meta.genesis_hash.clone(), finalized_hash: meta.finalized_hash, finalized_number: meta.finalized_number, + finalized_state: if meta.finalized_hash != Default::default() { + Some((meta.genesis_hash, Zero::zero())) + } else { + None + }, number_leaves: 1, } } diff --git a/client/db/src/storage_cache.rs b/client/db/src/storage_cache.rs index 788e011fb2f05..9934cccd155a1 100644 --- a/client/db/src/storage_cache.rs +++ b/client/db/src/storage_cache.rs @@ -605,6 +605,17 @@ impl>, B: BlockT> StateBackend> for Cachin self.state.exists_child_storage(child_info, key) } + fn apply_to_key_values_while, Vec) -> bool>( + &self, + child_info: Option<&ChildInfo>, + prefix: Option<&[u8]>, + start_at: Option<&[u8]>, + f: F, + allow_missing: bool, + ) -> Result { + self.state.apply_to_key_values_while(child_info, prefix, start_at, f, allow_missing) + } + fn apply_to_keys_while bool>( &self, child_info: Option<&ChildInfo>, @@ -788,6 +799,17 @@ impl>, B: BlockT> StateBackend> for Syncin self.caching_state().exists_child_storage(child_info, key) } + fn apply_to_key_values_while, Vec) -> bool>( + &self, + child_info: Option<&ChildInfo>, + prefix: Option<&[u8]>, + start_at: Option<&[u8]>, + f: F, + allow_missing: bool, + ) -> Result { + self.caching_state().apply_to_key_values_while(child_info, prefix, start_at, f, allow_missing) + } + fn apply_to_keys_while bool>( &self, child_info: Option<&ChildInfo>, diff --git a/client/db/src/utils.rs b/client/db/src/utils.rs index 7f82cb8489121..bd6dc9841aa63 100644 --- a/client/db/src/utils.rs +++ b/client/db/src/utils.rs @@ -49,6 +49,8 @@ pub mod meta_keys { pub const BEST_BLOCK: &[u8; 4] = b"best"; /// Last finalized block key. pub const FINALIZED_BLOCK: &[u8; 5] = b"final"; + /// Last finalized state key. + pub const FINALIZED_STATE: &[u8; 6] = b"fstate"; /// Meta information prefix for list-based caches. pub const CACHE_META_PREFIX: &[u8; 5] = b"cache"; /// Meta information for changes tries key. @@ -74,6 +76,8 @@ pub struct Meta { pub finalized_number: N, /// Hash of the genesis block. pub genesis_hash: H, + /// Finalized state, if any + pub finalized_state: Option<(H, N)>, } /// A block lookup key: used for canonical lookup from block number to hash @@ -391,6 +395,7 @@ pub fn read_meta(db: &dyn Database, col_header: u32) -> Result< finalized_hash: Default::default(), finalized_number: Zero::zero(), genesis_hash: Default::default(), + finalized_state: None, }), }; @@ -408,12 +413,18 @@ pub fn read_meta(db: &dyn Database, col_header: u32) -> Result< ); Ok((hash, *header.number())) } else { - Ok((genesis_hash.clone(), Zero::zero())) + Ok((Default::default(), Zero::zero())) } }; let (best_hash, best_number) = load_meta_block("best", meta_keys::BEST_BLOCK)?; let (finalized_hash, finalized_number) = load_meta_block("final", meta_keys::FINALIZED_BLOCK)?; + let (finalized_state_hash, finalized_state_number) = load_meta_block("final_state", meta_keys::FINALIZED_STATE)?; + let finalized_state = if finalized_state_hash != Default::default() { + Some((finalized_state_hash, finalized_state_number)) + } else { + None + }; Ok(Meta { best_hash, @@ -421,6 +432,7 @@ pub fn read_meta(db: &dyn Database, col_header: u32) -> Result< finalized_hash, finalized_number, genesis_hash, + finalized_state, }) } diff --git a/client/finality-grandpa/src/import.rs b/client/finality-grandpa/src/import.rs index 481f38b617eaf..c287cc0b3b896 100644 --- a/client/finality-grandpa/src/import.rs +++ b/client/finality-grandpa/src/import.rs @@ -456,7 +456,11 @@ where // early exit if block already in chain, otherwise the check for // authority changes will error when trying to re-import a change block match self.inner.status(BlockId::Hash(hash)) { - Ok(BlockStatus::InChain) => return Ok(ImportResult::AlreadyInChain), + Ok(BlockStatus::InChain) => { + // Strip justifications when re-importing an existing block. + let _justifications = block.justifications.take(); + return (&*self.inner).import_block(block, new_cache).await + } Ok(BlockStatus::Unknown) => {}, Err(e) => return Err(ConsensusError::ClientImport(e.to_string())), } diff --git a/client/informant/src/display.rs b/client/informant/src/display.rs index 0caef4e5fbae8..00c2116fac60a 100644 --- a/client/informant/src/display.rs +++ b/client/informant/src/display.rs @@ -93,10 +93,19 @@ impl InformantDisplay { (diff_bytes_inbound, diff_bytes_outbound) }; - let (level, status, target) = match (net_status.sync_state, net_status.best_seen_block) { - (SyncState::Idle, _) => ("💤", "Idle".into(), "".into()), - (SyncState::Downloading, None) => ("⚙️ ", format!("Preparing{}", speed), "".into()), - (SyncState::Downloading, Some(n)) => ( + let (level, status, target) = match ( + net_status.sync_state, + net_status.best_seen_block, + net_status.state_sync + ) { + (_, _, Some(state)) => ( + "⚙️ ", + "Downloading state".into(), + format!(", {}%, ({:.2}) Mib", state.percentage, (state.size as f32) / (1024f32 * 1024f32)), + ), + (SyncState::Idle, _, _) => ("💤", "Idle".into(), "".into()), + (SyncState::Downloading, None, _) => ("⚙️ ", format!("Preparing{}", speed), "".into()), + (SyncState::Downloading, Some(n), None) => ( "⚙️ ", format!("Syncing{}", speed), format!(", target=#{}", n), diff --git a/client/light/src/backend.rs b/client/light/src/backend.rs index a7f1b8e0c1696..3e53d3b81cc77 100644 --- a/client/light/src/backend.rs +++ b/client/light/src/backend.rs @@ -321,7 +321,7 @@ impl BlockImportOperation for ImportOperation Ok(()) } - fn reset_storage(&mut self, input: Storage) -> ClientResult { + fn set_genesis_state(&mut self, input: Storage, commit: bool) -> ClientResult { check_genesis_storage(&input)?; // changes trie configuration @@ -347,11 +347,17 @@ impl BlockImportOperation for ImportOperation let storage_update = InMemoryBackend::from(storage); let (storage_root, _) = storage_update.full_storage_root(std::iter::empty(), child_delta); - self.storage_update = Some(storage_update); + if commit { + self.storage_update = Some(storage_update); + } Ok(storage_root) } + fn reset_storage(&mut self, _input: Storage) -> ClientResult { + Err(ClientError::NotAvailableOnLightClient) + } + fn insert_aux(&mut self, ops: I) -> ClientResult<()> where I: IntoIterator, Option>)> { @@ -461,6 +467,22 @@ impl StateBackend for GenesisOrUnavailableState } } + fn apply_to_key_values_while, Vec) -> bool>( + &self, + child_info: Option<&ChildInfo>, + prefix: Option<&[u8]>, + start_at: Option<&[u8]>, + action: A, + allow_missing: bool, + ) -> ClientResult { + match *self { + GenesisOrUnavailableState::Genesis(ref state) => + Ok(state.apply_to_key_values_while(child_info, prefix, start_at, action, allow_missing) + .expect(IN_MEMORY_EXPECT_PROOF)), + GenesisOrUnavailableState::Unavailable => Err(ClientError::NotAvailableOnLightClient), + } + } + fn apply_to_keys_while bool>( &self, child_info: Option<&ChildInfo>, diff --git a/client/network/src/behaviour.rs b/client/network/src/behaviour.rs index 17c38b6f95456..576c49d1da366 100644 --- a/client/network/src/behaviour.rs +++ b/client/network/src/behaviour.rs @@ -79,6 +79,11 @@ pub struct Behaviour { /// [`request_responses::RequestResponsesBehaviour`]. #[behaviour(ignore)] block_request_protocol_name: String, + + /// Protocol name used to send out state requests via + /// [`request_responses::RequestResponsesBehaviour`]. + #[behaviour(ignore)] + state_request_protocol_name: String, } /// Event generated by `Behaviour`. @@ -186,6 +191,7 @@ impl Behaviour { light_client_request_sender: light_client_requests::sender::LightClientRequestSender, disco_config: DiscoveryConfig, block_request_protocol_config: request_responses::ProtocolConfig, + state_request_protocol_config: request_responses::ProtocolConfig, bitswap: Option>, light_client_request_protocol_config: request_responses::ProtocolConfig, // All remaining request protocol configs. @@ -193,7 +199,9 @@ impl Behaviour { ) -> Result { // Extract protocol name and add to `request_response_protocols`. let block_request_protocol_name = block_request_protocol_config.name.to_string(); + let state_request_protocol_name = state_request_protocol_config.name.to_string(); request_response_protocols.push(block_request_protocol_config); + request_response_protocols.push(state_request_protocol_config); request_response_protocols.push(light_client_request_protocol_config); @@ -206,8 +214,8 @@ impl Behaviour { request_responses::RequestResponsesBehaviour::new(request_response_protocols.into_iter())?, light_client_request_sender, events: VecDeque::new(), - block_request_protocol_name, + state_request_protocol_name, }) } @@ -329,6 +337,21 @@ Behaviour { &target, &self.block_request_protocol_name, buf, pending_response, IfDisconnected::ImmediateError, ); }, + CustomMessageOutcome::StateRequest { target, request, pending_response } => { + let mut buf = Vec::with_capacity(request.encoded_len()); + if let Err(err) = request.encode(&mut buf) { + log::warn!( + target: "sync", + "Failed to encode state request {:?}: {:?}", + request, err + ); + return + } + + self.request_responses.send_request( + &target, &self.state_request_protocol_name, buf, pending_response, IfDisconnected::ImmediateError, + ); + }, CustomMessageOutcome::NotificationStreamOpened { remote, protocol, negotiated_fallback, roles, notifications_sink } => { diff --git a/client/network/src/chain.rs b/client/network/src/chain.rs index 081d4b0d3ac3d..32d4cc9ff024f 100644 --- a/client/network/src/chain.rs +++ b/client/network/src/chain.rs @@ -21,6 +21,7 @@ use sp_blockchain::{Error, HeaderBackend, HeaderMetadata}; use sc_client_api::{BlockBackend, ProofProvider}; use sp_runtime::traits::{Block as BlockT, BlockIdTo}; +pub use sc_client_api::{StorageKey, StorageData, ImportedState}; /// Local client abstraction for the network. pub trait Client: HeaderBackend + ProofProvider + BlockIdTo diff --git a/client/network/src/config.rs b/client/network/src/config.rs index 4942d1b0fb878..36ae1e831b8ce 100644 --- a/client/network/src/config.rs +++ b/client/network/src/config.rs @@ -123,6 +123,15 @@ pub struct Params { /// [`crate::light_client_requests::handler::LightClientRequestHandler::new`] allowing /// both outgoing and incoming requests. pub light_client_request_protocol_config: RequestResponseConfig, + + /// Request response configuration for the state request protocol. + /// + /// Can be constructed either via + /// [`crate::state_requests::generate_protocol_config`] allowing outgoing but not + /// incoming requests, or constructed via + /// [`crate::state_requests::handler::StateRequestHandler::new`] allowing + /// both outgoing and incoming requests. + pub state_request_protocol_config: RequestResponseConfig, } /// Role of the local node. @@ -373,6 +382,24 @@ impl From for ParseErr { } } +#[derive(Clone, Debug, Eq, PartialEq)] +/// Sync operation mode. +pub enum SyncMode { + /// Full block download and verification. + Full, + /// Download blocks and the latest state. + Fast { + /// Skip state proof download and verification. + skip_proofs: bool + }, +} + +impl Default for SyncMode { + fn default() -> Self { + SyncMode::Full + } +} + /// Network service configuration. #[derive(Clone, Debug)] pub struct NetworkConfiguration { @@ -400,6 +427,8 @@ pub struct NetworkConfiguration { pub transport: TransportConfig, /// Maximum number of peers to ask the same blocks in parallel. pub max_parallel_downloads: u32, + /// Initial syncing mode. + pub sync_mode: SyncMode, /// True if Kademlia random discovery should be enabled. /// @@ -462,6 +491,7 @@ impl NetworkConfiguration { wasm_external_transport: None, }, max_parallel_downloads: 5, + sync_mode: SyncMode::Full, enable_dht_random_walk: true, allow_non_globals_in_dht: false, kademlia_disjoint_query_paths: false, diff --git a/client/network/src/gossip/tests.rs b/client/network/src/gossip/tests.rs index 19ac002aac869..bdef28f9bebe5 100644 --- a/client/network/src/gossip/tests.rs +++ b/client/network/src/gossip/tests.rs @@ -17,6 +17,7 @@ // along with this program. If not, see . use crate::block_request_handler::BlockRequestHandler; +use crate::state_request_handler::StateRequestHandler; use crate::light_client_requests::handler::LightClientRequestHandler; use crate::gossip::QueuedSender; use crate::{config, Event, NetworkService, NetworkWorker}; @@ -107,6 +108,16 @@ fn build_test_full_node(network_config: config::NetworkConfiguration) protocol_config }; + let state_request_protocol_config = { + let (handler, protocol_config) = StateRequestHandler::new( + &protocol_id, + client.clone(), + 50, + ); + async_std::task::spawn(handler.run().boxed()); + protocol_config + }; + let light_client_request_protocol_config = { let (handler, protocol_config) = LightClientRequestHandler::new( &protocol_id, @@ -131,6 +142,7 @@ fn build_test_full_node(network_config: config::NetworkConfiguration) ), metrics_registry: None, block_request_protocol_config, + state_request_protocol_config, light_client_request_protocol_config, }) .unwrap(); diff --git a/client/network/src/lib.rs b/client/network/src/lib.rs index 556e71da23831..11e235bb81ae7 100644 --- a/client/network/src/lib.rs +++ b/client/network/src/lib.rs @@ -260,6 +260,7 @@ mod utils; pub mod block_request_handler; pub mod bitswap; pub mod light_client_requests; +pub mod state_request_handler; pub mod config; pub mod error; pub mod gossip; @@ -268,7 +269,8 @@ pub mod transactions; #[doc(inline)] pub use libp2p::{multiaddr, Multiaddr, PeerId}; -pub use protocol::{event::{DhtEvent, Event, ObservedRole}, sync::SyncState, PeerInfo}; +pub use protocol::{event::{DhtEvent, Event, ObservedRole}, PeerInfo}; +pub use protocol::sync::{SyncState, StateDownloadProgress}; pub use service::{ NetworkService, NetworkWorker, RequestFailure, OutboundFailure, NotificationSender, NotificationSenderReady, IfDisconnected, @@ -321,4 +323,6 @@ pub struct NetworkStatus { pub total_bytes_inbound: u64, /// The total number of bytes sent. pub total_bytes_outbound: u64, + /// State sync in progress. + pub state_sync: Option, } diff --git a/client/network/src/protocol.rs b/client/network/src/protocol.rs index a3a490e097780..b9a189a0f384f 100644 --- a/client/network/src/protocol.rs +++ b/client/network/src/protocol.rs @@ -22,6 +22,7 @@ use crate::{ error, request_responses::RequestFailure, utils::{interval, LruHashSet}, + schema::v1::StateResponse, }; use bytes::Bytes; @@ -49,7 +50,7 @@ use sp_runtime::{ traits::{Block as BlockT, Header as HeaderT, NumberFor, Zero, CheckedSub}, }; use sp_arithmetic::traits::SaturatedConversion; -use sync::{ChainSync, SyncState}; +use sync::{ChainSync, Status as SyncStatus}; use std::borrow::Cow; use std::convert::TryFrom as _; use std::collections::{HashMap, HashSet, VecDeque}; @@ -179,13 +180,19 @@ pub struct Protocol { block_announce_data_cache: lru::LruCache>, } +#[derive(Debug)] +enum PeerRequest { + Block(message::BlockRequest), + State, +} + /// Peer information #[derive(Debug)] struct Peer { info: PeerInfo, - /// Current block request, if any. Started by emitting [`CustomMessageOutcome::BlockRequest`]. - block_request: Option<( - message::BlockRequest, + /// Current request, if any. Started by emitting [`CustomMessageOutcome::BlockRequest`]. + request: Option<( + PeerRequest, oneshot::Receiver, RequestFailure>>, )>, /// Holds a set of blocks known to this peer. @@ -210,6 +217,21 @@ pub struct ProtocolConfig { pub roles: Roles, /// Maximum number of peers to ask the same blocks in parallel. pub max_parallel_downloads: u32, + /// Enable state sync. + pub sync_mode: config::SyncMode, +} + +impl ProtocolConfig { + fn sync_mode(&self) -> sync::SyncMode { + if self.roles.is_light() { + sync::SyncMode::Light + } else { + match self.sync_mode { + config::SyncMode::Full => sync::SyncMode::Full, + config::SyncMode::Fast { skip_proofs } => sync::SyncMode::LightState { skip_proofs }, + } + } + } } impl Default for ProtocolConfig { @@ -217,6 +239,7 @@ impl Default for ProtocolConfig { ProtocolConfig { roles: Roles::FULL, max_parallel_downloads: 5, + sync_mode: config::SyncMode::Full, } } } @@ -263,12 +286,11 @@ impl Protocol { ) -> error::Result<(Protocol, sc_peerset::PeersetHandle, Vec<(PeerId, Multiaddr)>)> { let info = chain.info(); let sync = ChainSync::new( - config.roles, + config.sync_mode(), chain.clone(), - &info, block_announce_validator, config.max_parallel_downloads, - ); + ).map_err(Box::new)?; let boot_node_ids = { let mut list = HashSet::new(); @@ -454,13 +476,13 @@ impl Protocol { pub fn num_active_peers(&self) -> usize { self.peers .values() - .filter(|p| p.block_request.is_some()) + .filter(|p| p.request.is_some()) .count() } /// Current global sync state. - pub fn sync_state(&self) -> SyncState { - self.sync.status().state + pub fn sync_state(&self) -> SyncStatus { + self.sync.status() } /// Target sync block number. @@ -656,6 +678,27 @@ impl Protocol { } } + /// Must be called in response to a [`CustomMessageOutcome::StateRequest`] being emitted. + /// Must contain the same `PeerId` and request that have been emitted. + pub fn on_state_response( + &mut self, + peer_id: PeerId, + response: StateResponse, + ) -> CustomMessageOutcome { + match self.sync.on_state_data(&peer_id, response) { + Ok(sync::OnStateData::Import(origin, block)) => + CustomMessageOutcome::BlockImport(origin, vec![block]), + Ok(sync::OnStateData::Request(peer, req)) => { + prepare_state_request::(&mut self.peers, peer, req) + } + Err(sync::BadPeer(id, repu)) => { + self.behaviour.disconnect_peer(&id, HARDCODED_PEERSETS_SYNC); + self.peerset_handle.report_peer(id, repu); + CustomMessageOutcome::None + } + } + } + /// Perform time based maintenance. /// /// > **Note**: This method normally doesn't have to be called except for testing purposes. @@ -736,7 +779,7 @@ impl Protocol { best_hash: status.best_hash, best_number: status.best_number }, - block_request: None, + request: None, known_blocks: LruHashSet::new(NonZeroUsize::new(MAX_KNOWN_BLOCKS) .expect("Constant is nonzero")), }; @@ -1137,7 +1180,7 @@ fn prepare_block_request( let (tx, rx) = oneshot::channel(); if let Some(ref mut peer) = peers.get_mut(&who) { - peer.block_request = Some((request.clone(), rx)); + peer.request = Some((PeerRequest::Block(request.clone()), rx)); } let request = crate::schema::v1::BlockRequest { @@ -1161,6 +1204,23 @@ fn prepare_block_request( } } +fn prepare_state_request( + peers: &mut HashMap>, + who: PeerId, + request: crate::schema::v1::StateRequest, +) -> CustomMessageOutcome { + let (tx, rx) = oneshot::channel(); + + if let Some(ref mut peer) = peers.get_mut(&who) { + peer.request = Some((PeerRequest::State, rx)); + } + CustomMessageOutcome::StateRequest { + target: who, + request: request, + pending_response: tx, + } +} + /// Outcome of an incoming custom message. #[derive(Debug)] #[must_use] @@ -1192,6 +1252,12 @@ pub enum CustomMessageOutcome { request: crate::schema::v1::BlockRequest, pending_response: oneshot::Sender, RequestFailure>>, }, + /// A new storage request must be emitted. + StateRequest { + target: PeerId, + request: crate::schema::v1::StateRequest, + pending_response: oneshot::Sender, RequestFailure>>, + }, /// Peer has a reported a new head of chain. PeerNewBest(PeerId, NumberFor), /// Now connected to a new peer for syncing purposes. @@ -1254,27 +1320,54 @@ impl NetworkBehaviour for Protocol { // Check for finished outgoing requests. let mut finished_block_requests = Vec::new(); + let mut finished_state_requests = Vec::new(); for (id, peer) in self.peers.iter_mut() { - if let Peer { block_request: Some((_, pending_response)), .. } = peer { + if let Peer { request: Some((_, pending_response)), .. } = peer { match pending_response.poll_unpin(cx) { Poll::Ready(Ok(Ok(resp))) => { - let (req, _) = peer.block_request.take().unwrap(); + let (req, _) = peer.request.take().unwrap(); + match req { + PeerRequest::Block(req) => { + let protobuf_response = match crate::schema::v1::BlockResponse::decode(&resp[..]) { + Ok(proto) => proto, + Err(e) => { + debug!( + target: "sync", + "Failed to decode block response from peer {:?}: {:?}.", + id, + e + ); + self.peerset_handle.report_peer(id.clone(), rep::BAD_MESSAGE); + self.behaviour.disconnect_peer(id, HARDCODED_PEERSETS_SYNC); + continue; + } + }; - let protobuf_response = match crate::schema::v1::BlockResponse::decode(&resp[..]) { - Ok(proto) => proto, - Err(e) => { - debug!(target: "sync", "Failed to decode block request to peer {:?}: {:?}.", id, e); - self.peerset_handle.report_peer(id.clone(), rep::BAD_MESSAGE); - self.behaviour.disconnect_peer(id, HARDCODED_PEERSETS_SYNC); - continue; - } - }; + finished_block_requests.push((id.clone(), req, protobuf_response)); + }, + PeerRequest::State => { + let protobuf_response = match crate::schema::v1::StateResponse::decode(&resp[..]) { + Ok(proto) => proto, + Err(e) => { + debug!( + target: "sync", + "Failed to decode state response from peer {:?}: {:?}.", + id, + e + ); + self.peerset_handle.report_peer(id.clone(), rep::BAD_MESSAGE); + self.behaviour.disconnect_peer(id, HARDCODED_PEERSETS_SYNC); + continue; + } + }; - finished_block_requests.push((id.clone(), req, protobuf_response)); + finished_state_requests.push((id.clone(), protobuf_response)); + }, + } }, Poll::Ready(Ok(Err(e))) => { - peer.block_request.take(); - debug!(target: "sync", "Block request to peer {:?} failed: {:?}.", id, e); + peer.request.take(); + debug!(target: "sync", "Request to peer {:?} failed: {:?}.", id, e); match e { RequestFailure::Network(OutboundFailure::Timeout) => { @@ -1309,10 +1402,10 @@ impl NetworkBehaviour for Protocol { } }, Poll::Ready(Err(oneshot::Canceled)) => { - peer.block_request.take(); + peer.request.take(); trace!( target: "sync", - "Block request to peer {:?} failed due to oneshot being canceled.", + "Request to peer {:?} failed due to oneshot being canceled.", id, ); self.behaviour.disconnect_peer(id, HARDCODED_PEERSETS_SYNC); @@ -1325,6 +1418,10 @@ impl NetworkBehaviour for Protocol { let ev = self.on_block_response(id, req, protobuf_response); self.pending_messages.push_back(ev); } + for (id, protobuf_response) in finished_state_requests { + let ev = self.on_state_response(id, protobuf_response); + self.pending_messages.push_back(ev); + } while let Poll::Ready(Some(())) = self.tick_timeout.poll_next_unpin(cx) { self.tick(); @@ -1334,6 +1431,10 @@ impl NetworkBehaviour for Protocol { let event = prepare_block_request(&mut self.peers, id.clone(), request); self.pending_messages.push_back(event); } + if let Some((id, request)) = self.sync.state_request() { + let event = prepare_state_request(&mut self.peers, id, request); + self.pending_messages.push_back(event); + } for (id, request) in self.sync.justification_requests() { let event = prepare_block_request(&mut self.peers, id, request); self.pending_messages.push_back(event); diff --git a/client/network/src/protocol/sync.rs b/client/network/src/protocol/sync.rs index 7b7ac721b5b47..82df21fe9d044 100644 --- a/client/network/src/protocol/sync.rs +++ b/client/network/src/protocol/sync.rs @@ -31,14 +31,16 @@ use codec::Encode; use blocks::BlockCollection; -use sp_blockchain::{Error as ClientError, Info as BlockchainInfo, HeaderMetadata}; +use state::StateSync; +use sp_blockchain::{Error as ClientError, HeaderMetadata}; use sp_consensus::{BlockOrigin, BlockStatus, block_validation::{BlockAnnounceValidator, Validation}, import_queue::{IncomingBlock, BlockImportResult, BlockImportError} }; use crate::protocol::message::{ - self, BlockAnnounce, BlockAttributes, BlockRequest, BlockResponse, Roles, + self, BlockAnnounce, BlockAttributes, BlockRequest, BlockResponse, }; +use crate::schema::v1::{StateResponse, StateRequest}; use either::Either; use extra_requests::ExtraRequests; use libp2p::PeerId; @@ -59,6 +61,7 @@ use futures::{task::Poll, Future, stream::FuturesUnordered, FutureExt, StreamExt mod blocks; mod extra_requests; +mod state; /// Maximum blocks to request in a single packet. const MAX_BLOCKS_TO_REQUEST: usize = 128; @@ -84,6 +87,9 @@ const MAX_CONCURRENT_BLOCK_ANNOUNCE_VALIDATIONS: usize = 256; /// See [`MAX_CONCURRENT_BLOCK_ANNOUNCE_VALIDATIONS`] for more information. const MAX_CONCURRENT_BLOCK_ANNOUNCE_VALIDATIONS_PER_PEER: usize = 4; +/// Pick the state to sync as the latest finalized number minus this. +const STATE_SYNC_FINALITY_THRESHOLD: u32 = 8; + /// We use a heuristic that with a high likelihood, by the time /// `MAJOR_SYNC_BLOCKS` have been imported we'll be on the same /// chain as (or at least closer to) the peer so we want to delay @@ -183,11 +189,8 @@ pub struct ChainSync { best_queued_number: NumberFor, /// The best block hash in our queue of blocks to import best_queued_hash: B::Hash, - /// The role of this node, e.g. light or full - role: Roles, - /// What block attributes we require for this node, usually derived from - /// what role we are, but could be customized - required_block_attributes: message::BlockAttributes, + /// Current mode (full/light) + mode: SyncMode, /// Any extra justification requests. extra_justifications: ExtraRequests, /// A set of hashes of blocks that are being downloaded or have been @@ -209,6 +212,11 @@ pub struct ChainSync { >, /// Stats per peer about the number of concurrent block announce validations. block_announce_validation_per_peer_stats: HashMap, + /// State sync in progress, if any. + state_sync: Option>, + /// Enable importing existing blocks. This is used used after the state download to + /// catch up to the latest state while re-importing blocks. + import_existing: bool, } /// All the data we have about a Peer that we are trying to sync with @@ -281,6 +289,8 @@ pub enum PeerSyncState { DownloadingStale(B::Hash), /// Downloading justification for given block hash. DownloadingJustification(B::Hash), + /// Downloading state. + DownloadingState, } impl PeerSyncState { @@ -298,6 +308,15 @@ pub enum SyncState { Downloading } +/// Reported state download progress. +#[derive(Clone, Eq, PartialEq, Debug)] +pub struct StateDownloadProgress { + /// Estimated download percentage. + pub percentage: u32, + /// Total state size in bytes downloaded so far. + pub size: u64, +} + /// Syncing status and statistics. #[derive(Clone)] pub struct Status { @@ -309,6 +328,8 @@ pub struct Status { pub num_peers: u32, /// Number of blocks queued for import pub queued_blocks: u32, + /// State sync status in progress, if any. + pub state_sync: Option, } /// A peer did not behave as expected and should be reported. @@ -344,6 +365,15 @@ impl OnBlockData { } } +/// Result of [`ChainSync::on_state_data`]. +#[derive(Debug)] +pub enum OnStateData { + /// The block and state that should be imported. + Import(BlockOrigin, IncomingBlock), + /// A new state request needs to be made to the given peer. + Request(PeerId, StateRequest) +} + /// Result of [`ChainSync::poll_block_announce_validation`]. #[derive(Debug, Clone, PartialEq, Eq)] pub enum PollBlockAnnounceValidation { @@ -429,6 +459,20 @@ pub enum OnBlockJustification { } } + +/// Operation mode. +#[derive(Debug, PartialEq, Eq)] +pub enum SyncMode { + // Sync headers only + Light, + // Sync headers and block bodies + Full, + // Sync headers and the last finalied state + LightState { + skip_proofs: bool + }, +} + /// Result of [`ChainSync::has_slot_for_block_announce_validation`]. enum HasSlotForBlockAnnounceValidation { /// Yes, there is a slot for the block announce validation. @@ -442,27 +486,19 @@ enum HasSlotForBlockAnnounceValidation { impl ChainSync { /// Create a new instance. pub fn new( - role: Roles, + mode: SyncMode, client: Arc>, - info: &BlockchainInfo, block_announce_validator: Box + Send>, max_parallel_downloads: u32, - ) -> Self { - let mut required_block_attributes = BlockAttributes::HEADER | BlockAttributes::JUSTIFICATION; - - if role.is_full() { - required_block_attributes |= BlockAttributes::BODY - } - - ChainSync { + ) -> Result { + let mut sync = ChainSync { client, peers: HashMap::new(), blocks: BlockCollection::new(), - best_queued_hash: info.best_hash, - best_queued_number: info.best_number, + best_queued_hash: Default::default(), + best_queued_number: Zero::zero(), extra_justifications: ExtraRequests::new("justification"), - role, - required_block_attributes, + mode, queue_blocks: Default::default(), fork_targets: Default::default(), pending_requests: Default::default(), @@ -471,6 +507,27 @@ impl ChainSync { downloaded_blocks: 0, block_announce_validation: Default::default(), block_announce_validation_per_peer_stats: Default::default(), + state_sync: None, + import_existing: false, + }; + sync.reset_sync_start_point()?; + Ok(sync) + } + + fn required_block_attributes(&self) -> BlockAttributes { + match self.mode { + SyncMode::Full => BlockAttributes::HEADER | BlockAttributes::JUSTIFICATION | BlockAttributes::BODY, + SyncMode::Light => BlockAttributes::HEADER | BlockAttributes::JUSTIFICATION, + SyncMode::LightState { .. } => + BlockAttributes::HEADER | BlockAttributes::JUSTIFICATION | BlockAttributes::BODY, + } + } + + fn skip_execution(&self) -> bool { + match self.mode { + SyncMode::Full => false, + SyncMode::Light => true, + SyncMode::LightState { .. } => true, } } @@ -502,6 +559,7 @@ impl ChainSync { best_seen_block: best_seen, num_peers: self.peers.len() as u32, queued_blocks: self.queue_blocks.len() as u32, + state_sync: self.state_sync.as_ref().map(|s| s.progress()), } } @@ -607,7 +665,7 @@ impl ChainSync { ); self.peers.insert(who.clone(), PeerSync { peer_id: who.clone(), - common_number: best_number, + common_number: std::cmp::min(self.best_queued_number, best_number), best_hash, best_number, state: PeerSyncState::Available, @@ -718,7 +776,7 @@ impl ChainSync { /// Get an iterator over all block requests of all peers. pub fn block_requests(&mut self) -> impl Iterator)> + '_ { - if self.pending_requests.is_empty() { + if self.pending_requests.is_empty() || self.state_sync.is_some() { return Either::Left(std::iter::empty()) } if self.queue_blocks.len() > MAX_IMPORTING_BLOCKS { @@ -726,10 +784,10 @@ impl ChainSync { return Either::Left(std::iter::empty()) } let major_sync = self.status().state == SyncState::Downloading; + let attrs = self.required_block_attributes(); let blocks = &mut self.blocks; - let attrs = &self.required_block_attributes; let fork_targets = &mut self.fork_targets; - let last_finalized = self.client.info().finalized_number; + let last_finalized = std::cmp::min(self.best_queued_number, self.client.info().finalized_number); let best_queued = self.best_queued_number; let client = &self.client; let queue = &self.queue_blocks; @@ -804,6 +862,28 @@ impl ChainSync { Either::Right(iter) } + /// Get a state request, if any + pub fn state_request(&mut self) -> Option<(PeerId, StateRequest)> { + if let Some(sync) = &self.state_sync { + if sync.is_complete() { + return None; + } + if self.peers.iter().any(|(_, peer)| peer.state == PeerSyncState::DownloadingState) { + // Only one pending state request is allowed. + return None; + } + for (id, peer) in self.peers.iter_mut() { + if peer.state.is_available() && peer.common_number >= sync.target_block_num() { + trace!(target: "sync", "New StateRequest for {}", id); + peer.state = PeerSyncState::DownloadingState; + let request = sync.next_request(); + return Some((id.clone(), request)) + } + } + } + None + } + /// Handle a response from the remote to a block request that we made. /// /// `request` must be the original request that triggered `response`. @@ -848,7 +928,9 @@ impl ChainSync { justifications, origin: block_data.origin, allow_missing_state: true, - import_existing: false, + import_existing: self.import_existing, + skip_execution: self.skip_execution(), + state: None, } }).collect() } @@ -870,7 +952,9 @@ impl ChainSync { justifications, origin: Some(who.clone()), allow_missing_state: true, - import_existing: false, + import_existing: self.import_existing, + skip_execution: self.skip_execution(), + state: None, } }).collect() } @@ -963,10 +1047,11 @@ impl ChainSync { peer.state = PeerSyncState::Available; Vec::new() } - } - - | PeerSyncState::Available - | PeerSyncState::DownloadingJustification(..) => Vec::new() + }, + PeerSyncState::Available + | PeerSyncState::DownloadingJustification(..) + | PeerSyncState::DownloadingState + => Vec::new() } } else { // When request.is_none() this is a block announcement. Just accept blocks. @@ -983,6 +1068,8 @@ impl ChainSync { origin: Some(who.clone()), allow_missing_state: true, import_existing: false, + skip_execution: true, + state: None, } }).collect() } @@ -994,6 +1081,60 @@ impl ChainSync { Ok(self.validate_and_queue_blocks(new_blocks)) } + /// Handle a response from the remote to a state request that we made. + /// + /// Returns next request if any. + pub fn on_state_data( + &mut self, + who: &PeerId, + response: StateResponse, + ) -> Result, BadPeer> { + let import_result = if let Some(sync) = &mut self.state_sync { + debug!( + target: "sync", + "Importing state data from {} with {} keys, {} proof nodes.", + who, + response.entries.len(), + response.proof.len(), + ); + sync.import(response) + } else { + debug!(target: "sync", "Ignored obsolete state response from {}", who); + return Err(BadPeer(who.clone(), rep::NOT_REQUESTED)); + }; + + match import_result { + state::ImportResult::Import(hash, header, state) => { + let origin = if self.status().state != SyncState::Downloading { + BlockOrigin::NetworkBroadcast + } else { + BlockOrigin::NetworkInitialSync + }; + + let block = IncomingBlock { + hash, + header: Some(header), + body: None, + justifications: None, + origin: None, + allow_missing_state: true, + import_existing: true, + skip_execution: self.skip_execution(), + state: Some(state), + }; + debug!(target: "sync", "State sync is complete. Import is queued"); + Ok(OnStateData::Import(origin, block)) + } + state::ImportResult::Continue(request) => { + Ok(OnStateData::Request(who.clone(), request)) + } + state::ImportResult::BadResponse => { + debug!(target: "sync", "Bad state data received from {}", who); + Err(BadPeer(who.clone(), rep::BAD_BLOCK)) + } + } + } + fn validate_and_queue_blocks( &mut self, mut new_blocks: Vec>, @@ -1048,7 +1189,7 @@ impl ChainSync { // We only request one justification at a time let justification = if let Some(block) = response.blocks.into_iter().next() { if hash != block.hash { - info!( + warn!( target: "sync", "💔 Invalid block justification provided by {}: requested: {:?} got: {:?}", who, hash, block.hash ); @@ -1137,7 +1278,7 @@ impl ChainSync { if aux.bad_justification { if let Some(ref peer) = who { - info!("💔 Sent block with bad justification to import"); + warn!("💔 Sent block with bad justification to import"); output.push(Err(BadPeer(peer.clone(), rep::BAD_JUSTIFICATION))); } } @@ -1145,6 +1286,17 @@ impl ChainSync { if let Some(peer) = who.and_then(|p| self.peers.get_mut(&p)) { peer.update_common_number(number); } + let state_sync_complete = self.state_sync.as_ref().map_or(false, |s| s.target() == hash); + if state_sync_complete { + info!( + target: "sync", + "State sync is complete ({} MiB), restarting block sync.", + self.state_sync.as_ref().map_or(0, |s| s.progress().size / (1024 * 1024)), + ); + self.state_sync = None; + self.mode = SyncMode::Full; + output.extend(self.restart()); + } }, Err(BlockImportError::IncompleteHeader(who)) => { if let Some(peer) = who { @@ -1171,7 +1323,7 @@ impl ChainSync { }, Err(BlockImportError::BadBlock(who)) => { if let Some(peer) = who { - info!( + warn!( target: "sync", "💔 Block {:?} received from peer {} has been blacklisted", hash, @@ -1189,6 +1341,7 @@ impl ChainSync { e @ Err(BlockImportError::UnknownParent) | e @ Err(BlockImportError::Other(_)) => { warn!(target: "sync", "💔 Error importing block {:?}: {:?}", hash, e); + self.state_sync = None; output.extend(self.restart()); }, Err(BlockImportError::Cancelled) => {} @@ -1214,6 +1367,29 @@ impl ChainSync { is_descendent_of(&**client, base, block) }); + if let SyncMode::LightState { skip_proofs } = &self.mode { + if self.state_sync.is_none() + && !self.peers.is_empty() + && self.queue_blocks.is_empty() + { + // Finalized a recent block. + let mut heads: Vec<_> = self.peers.iter().map(|(_, peer)| peer.best_number).collect(); + heads.sort(); + let median = heads[heads.len() / 2]; + if number + STATE_SYNC_FINALITY_THRESHOLD.saturated_into() >= median { + if let Ok(Some(header)) = self.client.header(BlockId::hash(hash.clone())) { + log::debug!( + target: "sync", + "Starting state sync for #{} ({})", + number, + hash, + ); + self.state_sync = Some(StateSync::new(self.client.clone(), header, *skip_proofs)); + } + } + } + } + if let Err(err) = r { warn!( target: "sync", @@ -1536,7 +1712,7 @@ impl ChainSync { return PollBlockAnnounceValidation::Nothing { is_best, who, announce } } - let requires_additional_data = !self.role.is_light() || !known_parent; + let requires_additional_data = self.mode != SyncMode::Light || !known_parent; if !requires_additional_data { trace!( target: "sync", @@ -1595,6 +1771,8 @@ impl ChainSync { origin: block_data.origin, allow_missing_state: true, import_existing: false, + skip_execution: self.skip_execution(), + state: None, } }).collect(); if !blocks.is_empty() { @@ -1611,9 +1789,9 @@ impl ChainSync { &'a mut self, ) -> impl Iterator), BadPeer>> + 'a { self.blocks.clear(); - let info = self.client.info(); - self.best_queued_hash = info.best_hash; - self.best_queued_number = info.best_number; + if let Err(e) = self.reset_sync_start_point() { + warn!(target: "sync", "💔 Unable to restart sync. :{:?}", e); + } self.pending_requests.set_all(); debug!(target:"sync", "Restarted with {} ({})", self.best_queued_number, self.best_queued_hash); let old_peers = std::mem::take(&mut self.peers); @@ -1624,7 +1802,7 @@ impl ChainSync { match p.state { PeerSyncState::DownloadingJustification(_) => { // We make sure our commmon number is at least something we have. - p.common_number = info.best_number; + p.common_number = self.best_queued_number; self.peers.insert(id, p); return None; } @@ -1640,6 +1818,38 @@ impl ChainSync { }) } + /// Find a block to start sync from. If we sync with state, that's the latest block we have state for. + fn reset_sync_start_point(&mut self) -> Result<(), ClientError> { + let info = self.client.info(); + if matches!(self.mode, SyncMode::LightState {..}) && info.finalized_state.is_some() { + log::warn!( + target: "sync", + "Can't use fast sync mode with a partially synced database. Reverting to full sync mode." + ); + self.mode = SyncMode::Full; + } + self.import_existing = false; + self.best_queued_hash = info.best_hash; + self.best_queued_number = info.best_number; + if self.mode == SyncMode::Full { + if self.client.block_status(&BlockId::hash(info.best_hash))? != BlockStatus::InChainWithState { + self.import_existing = true; + // Latest state is missing, start with the last finalized state or genesis instead. + if let Some((hash, number)) = info.finalized_state { + log::debug!(target: "sync", "Starting from finalized state #{}", number); + self.best_queued_hash = hash; + self.best_queued_number = number; + } else { + log::debug!(target: "sync", "Restarting from genesis"); + self.best_queued_hash = Default::default(); + self.best_queued_number = Zero::zero(); + } + } + } + log::trace!(target: "sync", "Restarted sync at #{} ({:?})", self.best_queued_number, self.best_queued_hash); + Ok(()) + } + /// What is the status of the block corresponding to the given hash? fn block_status(&self, hash: &B::Hash) -> Result { if self.queue_blocks.contains(hash) { @@ -1764,7 +1974,7 @@ fn peer_block_request( id: &PeerId, peer: &PeerSync, blocks: &mut BlockCollection, - attrs: &message::BlockAttributes, + attrs: message::BlockAttributes, max_parallel_downloads: u32, finalized: NumberFor, best_num: NumberFor, @@ -1815,7 +2025,7 @@ fn fork_sync_request( targets: &mut HashMap>, best_num: NumberFor, finalized: NumberFor, - attributes: &message::BlockAttributes, + attributes: message::BlockAttributes, check_block: impl Fn(&B::Hash) -> BlockStatus, ) -> Option<(B::Hash, BlockRequest)> { targets.retain(|hash, r| { @@ -1994,17 +2204,15 @@ mod test { // internally we should process the response as the justification not being available. let client = Arc::new(TestClientBuilder::new().build()); - let info = client.info(); let block_announce_validator = Box::new(DefaultBlockAnnounceValidator); let peer_id = PeerId::random(); let mut sync = ChainSync::new( - Roles::AUTHORITY, + SyncMode::Full, client.clone(), - &info, block_announce_validator, 1, - ); + ).unwrap(); let (a1_hash, a1_number) = { let a1 = client.new_block(Default::default()).unwrap().build().unwrap().block; @@ -2067,15 +2275,12 @@ mod test { #[test] fn restart_doesnt_affect_peers_downloading_finality_data() { let mut client = Arc::new(TestClientBuilder::new().build()); - let info = client.info(); - let mut sync = ChainSync::new( - Roles::AUTHORITY, + SyncMode::Full, client.clone(), - &info, Box::new(DefaultBlockAnnounceValidator), 1, - ); + ).unwrap(); let peer_id1 = PeerId::random(); let peer_id2 = PeerId::random(); @@ -2242,15 +2447,13 @@ mod test { sp_tracing::try_init_simple(); let mut client = Arc::new(TestClientBuilder::new().build()); - let info = client.info(); let mut sync = ChainSync::new( - Roles::AUTHORITY, + SyncMode::Full, client.clone(), - &info, Box::new(DefaultBlockAnnounceValidator), 5, - ); + ).unwrap(); let peer_id1 = PeerId::random(); let peer_id2 = PeerId::random(); @@ -2359,12 +2562,11 @@ mod test { let info = client.info(); let mut sync = ChainSync::new( - Roles::AUTHORITY, + SyncMode::Full, client.clone(), - &info, Box::new(DefaultBlockAnnounceValidator), 5, - ); + ).unwrap(); let peer_id1 = PeerId::random(); let peer_id2 = PeerId::random(); @@ -2481,12 +2683,11 @@ mod test { let info = client.info(); let mut sync = ChainSync::new( - Roles::AUTHORITY, + SyncMode::Full, client.clone(), - &info, Box::new(DefaultBlockAnnounceValidator), 5, - ); + ).unwrap(); let finalized_block = blocks[MAX_BLOCKS_TO_LOOK_BACKWARDS as usize * 2 - 1].clone(); let just = (*b"TEST", Vec::new()); @@ -2592,15 +2793,12 @@ mod test { .map(|_| build_block(&mut client, None, false)) .collect::>(); - let info = client.info(); - let mut sync = ChainSync::new( - Roles::AUTHORITY, + SyncMode::Full, client.clone(), - &info, Box::new(DefaultBlockAnnounceValidator), 1, - ); + ).unwrap(); let peer_id1 = PeerId::random(); let common_block = blocks[1].clone(); diff --git a/client/network/src/protocol/sync/state.rs b/client/network/src/protocol/sync/state.rs new file mode 100644 index 0000000000000..fc9dfdbb8c376 --- /dev/null +++ b/client/network/src/protocol/sync/state.rs @@ -0,0 +1,187 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::sync::Arc; +use codec::{Encode, Decode}; +use sp_runtime::traits::{Block as BlockT, Header, NumberFor}; +use sc_client_api::StorageProof; +use crate::schema::v1::{StateRequest, StateResponse, StateEntry}; +use crate::chain::{Client, ImportedState}; +use super::StateDownloadProgress; + +/// State sync support. + +/// State sync state machine. Accumulates partial state data until it +/// is ready to be imported. +pub struct StateSync { + target_block: B::Hash, + target_header: B::Header, + target_root: B::Hash, + last_key: Vec, + state: Vec<(Vec, Vec)>, + complete: bool, + client: Arc>, + imported_bytes: u64, + skip_proof: bool, +} + +/// Import state chunk result. +pub enum ImportResult { + /// State is complete and ready for import. + Import(B::Hash, B::Header, ImportedState), + /// Continue dowloading. + Continue(StateRequest), + /// Bad state chunk. + BadResponse, +} + +impl StateSync { + /// Create a new instance. + pub fn new(client: Arc>, target: B::Header, skip_proof: bool) -> Self { + StateSync { + client, + target_block: target.hash(), + target_root: target.state_root().clone(), + target_header: target, + last_key: Vec::default(), + state: Vec::default(), + complete: false, + imported_bytes: 0, + skip_proof, + } + } + + /// Validate and import a state reponse. + pub fn import(&mut self, response: StateResponse) -> ImportResult { + if response.entries.is_empty() && response.proof.is_empty() && !response.complete { + log::debug!( + target: "sync", + "Bad state response", + ); + return ImportResult::BadResponse; + } + if !self.skip_proof && response.proof.is_empty() { + log::debug!( + target: "sync", + "Missing proof", + ); + return ImportResult::BadResponse; + } + let complete = if !self.skip_proof { + log::debug!( + target: "sync", + "Importing state from {} trie nodes", + response.proof.len(), + ); + let proof_size = response.proof.len() as u64; + let proof = match StorageProof::decode(&mut response.proof.as_ref()) { + Ok(proof) => proof, + Err(e) => { + log::debug!(target: "sync", "Error decoding proof: {:?}", e); + return ImportResult::BadResponse; + } + }; + let (values, complete) = match self.client.verify_range_proof( + self.target_root, + proof, + &self.last_key + ) { + Err(e) => { + log::debug!( + target: "sync", + "StateResponse failed proof verification: {:?}", + e, + ); + return ImportResult::BadResponse; + }, + Ok(values) => values, + }; + log::debug!(target: "sync", "Imported with {} keys", values.len()); + + if let Some(last) = values.last().map(|(k, _)| k) { + self.last_key = last.clone(); + } + + for (key, value) in values { + self.imported_bytes += key.len() as u64; + self.state.push((key, value)) + }; + self.imported_bytes += proof_size; + complete + } else { + log::debug!( + target: "sync", + "Importing state from {:?} to {:?}", + response.entries.last().map(|e| sp_core::hexdisplay::HexDisplay::from(&e.key)), + response.entries.first().map(|e| sp_core::hexdisplay::HexDisplay::from(&e.key)), + ); + + if let Some(e) = response.entries.last() { + self.last_key = e.key.clone(); + } + for StateEntry { key, value } in response.entries { + self.imported_bytes += (key.len() + value.len()) as u64; + self.state.push((key, value)) + } + response.complete + }; + if complete { + self.complete = true; + ImportResult::Import(self.target_block.clone(), self.target_header.clone(), ImportedState { + block: self.target_block.clone(), + state: std::mem::take(&mut self.state) + }) + } else { + ImportResult::Continue(self.next_request()) + } + } + + /// Produce next state request. + pub fn next_request(&self) -> StateRequest { + StateRequest { + block: self.target_block.encode(), + start: self.last_key.clone(), + no_proof: self.skip_proof, + } + } + + /// Check if the state is complete. + pub fn is_complete(&self) -> bool { + self.complete + } + + /// Returns target block number. + pub fn target_block_num(&self) -> NumberFor { + self.target_header.number().clone() + } + + /// Returns target block hash. + pub fn target(&self) -> B::Hash { + self.target_block.clone() + } + + /// Returns state sync estimated progress. + pub fn progress(&self) -> StateDownloadProgress { + let percent_done = (*self.last_key.get(0).unwrap_or(&0u8) as u32) * 100 / 256; + StateDownloadProgress { + percentage: percent_done, + size: self.imported_bytes, + } + } +} + diff --git a/client/network/src/schema/api.v1.proto b/client/network/src/schema/api.v1.proto index 23d585b05e9cd..a16fdbaebc81b 100644 --- a/client/network/src/schema/api.v1.proto +++ b/client/network/src/schema/api.v1.proto @@ -68,3 +68,28 @@ message BlockData { bytes justifications = 8; // optional } +// Request storage data from a peer. +message StateRequest { + // Block header hash. + bytes block = 1; + // Start from this key. Equivalent to if omitted. + bytes start = 2; // optional + // if 'true' indicates that response should contain raw key-values, rather than proof. + bool no_proof = 3; +} + +message StateResponse { + // A collection of keys-values. Only populated if `no_proof` is `true` + repeated StateEntry entries = 1; + // If `no_proof` is false in request, this contains proof nodes. + bytes proof = 2; + // Set to true when there are no more keys to return. + bool complete = 3; +} + +// A key-value pair +message StateEntry { + bytes key = 1; + bytes value = 2; +} + diff --git a/client/network/src/service.rs b/client/network/src/service.rs index 666108363f640..0bc28288501a4 100644 --- a/client/network/src/service.rs +++ b/client/network/src/service.rs @@ -48,7 +48,7 @@ use crate::{ Protocol, Ready, event::Event, - sync::SyncState, + sync::{SyncState, Status as SyncStatus}, }, transactions, transport, ReputationChange, @@ -196,6 +196,7 @@ impl NetworkWorker { protocol::ProtocolConfig { roles: From::from(¶ms.role), max_parallel_downloads: params.network_config.max_parallel_downloads, + sync_mode: params.network_config.sync_mode.clone(), }, params.chain.clone(), params.protocol_id.clone(), @@ -331,7 +332,7 @@ impl NetworkWorker { }; let behaviour = { - let bitswap = if params.network_config.ipfs_server { Some(Bitswap::new(client)) } else { None }; + let bitswap = params.network_config.ipfs_server.then(|| Bitswap::new(client)); let result = Behaviour::new( protocol, user_agent, @@ -339,6 +340,7 @@ impl NetworkWorker { light_client_request_sender, discovery_config, params.block_request_protocol_config, + params.state_request_protocol_config, bitswap, params.light_client_request_protocol_config, params.network_config.request_response_protocols, @@ -442,14 +444,16 @@ impl NetworkWorker { /// High-level network status information. pub fn status(&self) -> NetworkStatus { + let status = self.sync_state(); NetworkStatus { - sync_state: self.sync_state(), + sync_state: status.state, best_seen_block: self.best_seen_block(), num_sync_peers: self.num_sync_peers(), num_connected_peers: self.num_connected_peers(), num_active_peers: self.num_active_peers(), total_bytes_inbound: self.total_bytes_inbound(), total_bytes_outbound: self.total_bytes_outbound(), + state_sync: status.state_sync, } } @@ -474,7 +478,7 @@ impl NetworkWorker { } /// Current global sync state. - pub fn sync_state(&self) -> SyncState { + pub fn sync_state(&self) -> SyncStatus { self.network_service.behaviour().user_protocol().sync_state() } @@ -1869,7 +1873,7 @@ impl Future for NetworkWorker { *this.external_addresses.lock() = external_addresses; } - let is_major_syncing = match this.network_service.behaviour_mut().user_protocol_mut().sync_state() { + let is_major_syncing = match this.network_service.behaviour_mut().user_protocol_mut().sync_state().state { SyncState::Idle => false, SyncState::Downloading => true, }; diff --git a/client/network/src/service/tests.rs b/client/network/src/service/tests.rs index 4e5bba8f7d33f..c2e3844849f5c 100644 --- a/client/network/src/service/tests.rs +++ b/client/network/src/service/tests.rs @@ -18,6 +18,7 @@ use crate::{config, Event, NetworkService, NetworkWorker}; use crate::block_request_handler::BlockRequestHandler; +use crate::state_request_handler::StateRequestHandler; use crate::light_client_requests::handler::LightClientRequestHandler; use libp2p::PeerId; @@ -107,6 +108,16 @@ fn build_test_full_node(config: config::NetworkConfiguration) protocol_config }; + let state_request_protocol_config = { + let (handler, protocol_config) = StateRequestHandler::new( + &protocol_id, + client.clone(), + 50, + ); + async_std::task::spawn(handler.run().boxed()); + protocol_config + }; + let light_client_request_protocol_config = { let (handler, protocol_config) = LightClientRequestHandler::new( &protocol_id, @@ -131,6 +142,7 @@ fn build_test_full_node(config: config::NetworkConfiguration) ), metrics_registry: None, block_request_protocol_config, + state_request_protocol_config, light_client_request_protocol_config, }) .unwrap(); diff --git a/client/network/src/state_request_handler.rs b/client/network/src/state_request_handler.rs new file mode 100644 index 0000000000000..bf47b412f46d5 --- /dev/null +++ b/client/network/src/state_request_handler.rs @@ -0,0 +1,246 @@ +// Copyright 2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! Helper for handling (i.e. answering) state requests from a remote peer via the +//! [`crate::request_responses::RequestResponsesBehaviour`]. + +use codec::{Encode, Decode}; +use crate::chain::Client; +use crate::config::ProtocolId; +use crate::request_responses::{IncomingRequest, OutgoingResponse, ProtocolConfig}; +use crate::schema::v1::{StateResponse, StateRequest, StateEntry}; +use crate::{PeerId, ReputationChange}; +use futures::channel::{mpsc, oneshot}; +use futures::stream::StreamExt; +use log::debug; +use lru::LruCache; +use prost::Message; +use sp_runtime::generic::BlockId; +use sp_runtime::traits::Block as BlockT; +use std::sync::Arc; +use std::time::Duration; +use std::hash::{Hasher, Hash}; + +const LOG_TARGET: &str = "sync"; +const MAX_RESPONSE_BYTES: usize = 2 * 1024 * 1024; // Actual reponse may be bigger. +const MAX_NUMBER_OF_SAME_REQUESTS_PER_PEER: usize = 2; + +mod rep { + use super::ReputationChange as Rep; + + /// Reputation change when a peer sent us the same request multiple times. + pub const SAME_REQUEST: Rep = Rep::new(i32::min_value(), "Same state request multiple times"); +} + +/// Generates a [`ProtocolConfig`] for the block request protocol, refusing incoming requests. +pub fn generate_protocol_config(protocol_id: &ProtocolId) -> ProtocolConfig { + ProtocolConfig { + name: generate_protocol_name(protocol_id).into(), + max_request_size: 1024 * 1024, + max_response_size: 16 * 1024 * 1024, + request_timeout: Duration::from_secs(40), + inbound_queue: None, + } +} + +/// Generate the state protocol name from chain specific protocol identifier. +fn generate_protocol_name(protocol_id: &ProtocolId) -> String { + let mut s = String::new(); + s.push_str("/"); + s.push_str(protocol_id.as_ref()); + s.push_str("/state/1"); + s +} + +/// The key of [`BlockRequestHandler::seen_requests`]. +#[derive(Eq, PartialEq, Clone)] +struct SeenRequestsKey { + peer: PeerId, + block: B::Hash, + start: Vec, +} + +impl Hash for SeenRequestsKey { + fn hash(&self, state: &mut H) { + self.peer.hash(state); + self.block.hash(state); + self.start.hash(state); + } +} + +/// The value of [`StateRequestHandler::seen_requests`]. +enum SeenRequestsValue { + /// First time we have seen the request. + First, + /// We have fulfilled the request `n` times. + Fulfilled(usize), +} + +/// Handler for incoming block requests from a remote peer. +pub struct StateRequestHandler { + client: Arc>, + request_receiver: mpsc::Receiver, + /// Maps from request to number of times we have seen this request. + /// + /// This is used to check if a peer is spamming us with the same request. + seen_requests: LruCache, SeenRequestsValue>, +} + +impl StateRequestHandler { + /// Create a new [`StateRequestHandler`]. + pub fn new( + protocol_id: &ProtocolId, + client: Arc>, + num_peer_hint: usize, + ) -> (Self, ProtocolConfig) { + // Reserve enough request slots for one request per peer when we are at the maximum + // number of peers. + let (tx, request_receiver) = mpsc::channel(num_peer_hint); + + let mut protocol_config = generate_protocol_config(protocol_id); + protocol_config.inbound_queue = Some(tx); + + let seen_requests = LruCache::new(num_peer_hint * 2); + + (Self { client, request_receiver, seen_requests }, protocol_config) + } + + /// Run [`StateRequestHandler`]. + pub async fn run(mut self) { + while let Some(request) = self.request_receiver.next().await { + let IncomingRequest { peer, payload, pending_response } = request; + + match self.handle_request(payload, pending_response, &peer) { + Ok(()) => debug!(target: LOG_TARGET, "Handled block request from {}.", peer), + Err(e) => debug!( + target: LOG_TARGET, + "Failed to handle state request from {}: {}", + peer, + e, + ), + } + } + } + + fn handle_request( + &mut self, + payload: Vec, + pending_response: oneshot::Sender, + peer: &PeerId, + ) -> Result<(), HandleRequestError> { + let request = StateRequest::decode(&payload[..])?; + let block: B::Hash = Decode::decode(&mut request.block.as_ref())?; + + let key = SeenRequestsKey { + peer: *peer, + block: block.clone(), + start: request.start.clone(), + }; + + let mut reputation_changes = Vec::new(); + + match self.seen_requests.get_mut(&key) { + Some(SeenRequestsValue::First) => {}, + Some(SeenRequestsValue::Fulfilled(ref mut requests)) => { + *requests = requests.saturating_add(1); + + if *requests > MAX_NUMBER_OF_SAME_REQUESTS_PER_PEER { + reputation_changes.push(rep::SAME_REQUEST); + } + }, + None => { + self.seen_requests.put(key.clone(), SeenRequestsValue::First); + } + } + + log::trace!( + target: LOG_TARGET, + "Handling state request from {}: Block {:?}, Starting at {:?}, no_proof={}", + peer, + request.block, + sp_core::hexdisplay::HexDisplay::from(&request.start), + request.no_proof, + ); + + let result = if reputation_changes.is_empty() { + let mut response = StateResponse::default(); + + if !request.no_proof { + let (proof, count) = self.client.read_proof_collection( + &BlockId::hash(block), + &request.start, + MAX_RESPONSE_BYTES, + )?; + response.proof = proof.encode(); + if count == 0 { + response.complete = true; + } + } else { + let entries = self.client.storage_collection( + &BlockId::hash(block), + &request.start, + MAX_RESPONSE_BYTES, + )?; + response.entries = entries.into_iter().map(|(key, value)| StateEntry { key, value }).collect(); + if response.entries.is_empty() { + response.complete = true; + } + } + + log::trace!( + target: LOG_TARGET, + "StateResponse contains {} keys, {}, proof nodes, complete={}, from {:?} to {:?}", + response.entries.len(), + response.proof.len(), + response.complete, + response.entries.first().map(|e| sp_core::hexdisplay::HexDisplay::from(&e.key)), + response.entries.last().map(|e| sp_core::hexdisplay::HexDisplay::from(&e.key)), + ); + if let Some(value) = self.seen_requests.get_mut(&key) { + // If this is the first time we have processed this request, we need to change + // it to `Fulfilled`. + if let SeenRequestsValue::First = value { + *value = SeenRequestsValue::Fulfilled(1); + } + } + + let mut data = Vec::with_capacity(response.encoded_len()); + response.encode(&mut data)?; + Ok(data) + } else { + Err(()) + }; + + pending_response.send(OutgoingResponse { + result, + reputation_changes, + sent_feedback: None, + }).map_err(|_| HandleRequestError::SendResponse) + } +} + +#[derive(derive_more::Display, derive_more::From)] +enum HandleRequestError { + #[display(fmt = "Failed to decode request: {}.", _0)] + DecodeProto(prost::DecodeError), + #[display(fmt = "Failed to encode response: {}.", _0)] + EncodeProto(prost::EncodeError), + #[display(fmt = "Failed to decode block hash: {}.", _0)] + InvalidHash(codec::Error), + Client(sp_blockchain::Error), + #[display(fmt = "Failed to send response.")] + SendResponse, +} diff --git a/client/network/test/src/block_import.rs b/client/network/test/src/block_import.rs index b3641d4b41214..05169aba8d730 100644 --- a/client/network/test/src/block_import.rs +++ b/client/network/test/src/block_import.rs @@ -46,6 +46,8 @@ fn prepare_good_block() -> (TestClient, Hash, u64, PeerId, IncomingBlock) origin: Some(peer_id.clone()), allow_missing_state: false, import_existing: false, + state: None, + skip_execution: false, }) } diff --git a/client/network/test/src/lib.rs b/client/network/test/src/lib.rs index f55444f8cf121..b6e8f897bb809 100644 --- a/client/network/test/src/lib.rs +++ b/client/network/test/src/lib.rs @@ -29,6 +29,7 @@ use std::{ use libp2p::build_multiaddr; use log::trace; use sc_network::block_request_handler::{self, BlockRequestHandler}; +use sc_network::state_request_handler::{self, StateRequestHandler}; use sc_network::light_client_requests::{self, handler::LightClientRequestHandler}; use sp_blockchain::{ HeaderBackend, Result as ClientResult, @@ -55,7 +56,7 @@ use sc_network::{ NetworkWorker, NetworkService, config::{ProtocolId, MultiaddrWithPeerId, NonReservedPeerMode}, Multiaddr, }; -use sc_network::config::{NetworkConfiguration, NonDefaultSetConfig, TransportConfig}; +use sc_network::config::{NetworkConfiguration, NonDefaultSetConfig, TransportConfig, SyncMode}; use libp2p::PeerId; use parking_lot::Mutex; use sp_core::H256; @@ -179,6 +180,19 @@ impl PeersClient { } } + pub fn has_state_at(&self, block: &BlockId) -> bool { + let header = match self.header(block).unwrap() { + Some(header) => header, + None => return false, + }; + match self { + PeersClient::Full(_client, backend) => + backend.have_state_at(&header.hash(), *header.number()), + PeersClient::Light(_client, backend) => + backend.have_state_at(&header.hash(), *header.number()), + } + } + pub fn justifications(&self, block: &BlockId) -> ClientResult> { match *self { PeersClient::Full(ref client, ref _backend) => client.justifications(block), @@ -235,9 +249,9 @@ impl BlockImport for PeersClient { ) -> Result { match self { PeersClient::Full(client, _) => - client.import_block(block.convert_transaction(), cache).await, + client.import_block(block.clear_storage_changes_and_mutate(), cache).await, PeersClient::Light(client, _) => - client.import_block(block.convert_transaction(), cache).await, + client.import_block(block.clear_storage_changes_and_mutate(), cache).await, } } } @@ -584,7 +598,7 @@ impl BlockImport for BlockImportAdapter where block: BlockImportParams, cache: HashMap>, ) -> Result { - self.inner.import_block(block.convert_transaction(), cache).await + self.inner.import_block(block.clear_storage_changes_and_mutate(), cache).await } } @@ -644,6 +658,8 @@ pub struct FullPeerConfig { pub connect_to_peers: Option>, /// Whether the full peer should have the authority role. pub is_authority: bool, + /// Syncing mode + pub sync_mode: SyncMode, } pub trait TestNetFactory: Sized where >::Transaction: Send { @@ -699,10 +715,13 @@ pub trait TestNetFactory: Sized where >: /// Add a full peer. fn add_full_peer_with_config(&mut self, config: FullPeerConfig) { - let test_client_builder = match config.keep_blocks { + let mut test_client_builder = match config.keep_blocks { Some(keep_blocks) => TestClientBuilder::with_pruning_window(keep_blocks), None => TestClientBuilder::with_default_backend(), }; + if matches!(config.sync_mode, SyncMode::Fast{..}) { + test_client_builder = test_client_builder.set_no_genesis(); + } let backend = test_client_builder.backend(); let (c, longest_chain) = test_client_builder.build_with_longest_chain(); let client = Arc::new(c); @@ -736,6 +755,7 @@ pub trait TestNetFactory: Sized where >: Default::default(), None, ); + network_config.sync_mode = config.sync_mode; network_config.transport = TransportConfig::MemoryOnly; network_config.listen_addresses = vec![listen_addr.clone()]; network_config.allow_non_globals_in_dht = true; @@ -769,6 +789,16 @@ pub trait TestNetFactory: Sized where >: protocol_config }; + let state_request_protocol_config = { + let (handler, protocol_config) = StateRequestHandler::new( + &protocol_id, + client.clone(), + 50, + ); + self.spawn_task(handler.run().boxed()); + protocol_config + }; + let light_client_request_protocol_config = { let (handler, protocol_config) = LightClientRequestHandler::new(&protocol_id, client.clone()); self.spawn_task(handler.run().boxed()); @@ -789,6 +819,7 @@ pub trait TestNetFactory: Sized where >: .unwrap_or_else(|| Box::new(DefaultBlockAnnounceValidator)), metrics_registry: None, block_request_protocol_config, + state_request_protocol_config, light_client_request_protocol_config, }).unwrap(); @@ -862,6 +893,9 @@ pub trait TestNetFactory: Sized where >: let block_request_protocol_config = block_request_handler::generate_protocol_config( &protocol_id, ); + let state_request_protocol_config = state_request_handler::generate_protocol_config( + &protocol_id, + ); let light_client_request_protocol_config = light_client_requests::generate_protocol_config(&protocol_id); @@ -879,6 +913,7 @@ pub trait TestNetFactory: Sized where >: block_announce_validator: Box::new(DefaultBlockAnnounceValidator), metrics_registry: None, block_request_protocol_config, + state_request_protocol_config, light_client_request_protocol_config, }).unwrap(); diff --git a/client/network/test/src/sync.rs b/client/network/test/src/sync.rs index 553a769ec14a4..56cec7e4cdfd9 100644 --- a/client/network/test/src/sync.rs +++ b/client/network/test/src/sync.rs @@ -1087,3 +1087,43 @@ fn syncs_after_missing_announcement() { net.block_until_sync(); assert!(net.peer(1).client().header(&BlockId::Hash(final_block)).unwrap().is_some()); } + +#[test] +fn syncs_state() { + sp_tracing::try_init_simple(); + for skip_proofs in &[ false, true ] { + let mut net = TestNet::new(0); + net.add_full_peer_with_config(Default::default()); + net.add_full_peer_with_config(FullPeerConfig { + sync_mode: SyncMode::Fast { skip_proofs: *skip_proofs }, + ..Default::default() + }); + net.peer(0).push_blocks(64, false); + // Wait for peer 1 to sync header chain. + net.block_until_sync(); + assert!(!net.peer(1).client().has_state_at(&BlockId::Number(64))); + + let just = (*b"FRNK", Vec::new()); + net.peer(1).client().finalize_block(BlockId::Number(60), Some(just), true).unwrap(); + // Wait for state sync. + block_on(futures::future::poll_fn::<(), _>(|cx| { + net.poll(cx); + if net.peer(1).client.info().finalized_state.is_some() { + Poll::Ready(()) + } else { + Poll::Pending + } + })); + assert!(!net.peer(1).client().has_state_at(&BlockId::Number(64))); + // Wait for the rest of the states to be imported. + block_on(futures::future::poll_fn::<(), _>(|cx| { + net.poll(cx); + if net.peer(1).client().has_state_at(&BlockId::Number(64)) { + Poll::Ready(()) + } else { + Poll::Pending + } + })); + } +} + diff --git a/client/service/Cargo.toml b/client/service/Cargo.toml index 6a98cf82f3e55..a90efb02dc5f2 100644 --- a/client/service/Cargo.toml +++ b/client/service/Cargo.toml @@ -55,6 +55,7 @@ sp-state-machine = { version = "0.9.0", path = "../../primitives/state-machine" sp-application-crypto = { version = "3.0.0", path = "../../primitives/application-crypto" } sp-consensus = { version = "0.9.0", path = "../../primitives/consensus/common" } sp-inherents = { version = "3.0.0", path = "../../primitives/inherents" } +sp-storage = { version = "3.0.0", path = "../../primitives/storage" } sc-network = { version = "0.9.0", path = "../network" } sc-chain-spec = { version = "3.0.0", path = "../chain-spec" } sc-light = { version = "3.0.0", path = "../light" } diff --git a/client/service/src/builder.rs b/client/service/src/builder.rs index ca22322798463..b0bffc3c4e12d 100644 --- a/client/service/src/builder.rs +++ b/client/service/src/builder.rs @@ -43,6 +43,7 @@ use log::info; use sc_network::config::{Role, OnDemand}; use sc_network::NetworkService; use sc_network::block_request_handler::{self, BlockRequestHandler}; +use sc_network::state_request_handler::{self, StateRequestHandler}; use sc_network::light_client_requests::{self, handler::LightClientRequestHandler}; use sp_runtime::generic::BlockId; use sp_runtime::traits::{ @@ -70,7 +71,7 @@ use sp_keystore::{CryptoStore, SyncCryptoStore, SyncCryptoStorePtr}; use sp_runtime::BuildStorage; use sc_client_api::{ BlockBackend, BlockchainEvents, - backend::StorageProvider, + StorageProvider, proof_provider::ProofProvider, execution_extensions::ExecutionExtensions }; @@ -377,6 +378,7 @@ pub fn new_full_parts( offchain_worker_enabled : config.offchain_worker.enabled, offchain_indexing_api: config.offchain_worker.indexing_enabled, wasm_runtime_overrides: config.wasm_runtime_overrides.clone(), + no_genesis: matches!(config.network.sync_mode, sc_network::config::SyncMode::Fast {..}), wasm_runtime_substitutes, }, )?; @@ -912,6 +914,23 @@ pub fn build_network( } }; + let state_request_protocol_config = { + if matches!(config.role, Role::Light) { + // Allow outgoing requests but deny incoming requests. + state_request_handler::generate_protocol_config(&protocol_id) + } else { + // Allow both outgoing and incoming requests. + let (handler, protocol_config) = StateRequestHandler::new( + &protocol_id, + client.clone(), + config.network.default_peers_set.in_peers as usize + + config.network.default_peers_set.out_peers as usize, + ); + spawn_handle.spawn("state_request_handler", handler.run()); + protocol_config + } + }; + let light_client_request_protocol_config = { if matches!(config.role, Role::Light) { // Allow outgoing requests but deny incoming requests. @@ -950,6 +969,7 @@ pub fn build_network( block_announce_validator, metrics_registry: config.prometheus_config.as_ref().map(|config| config.registry.clone()), block_request_protocol_config, + state_request_protocol_config, light_client_request_protocol_config, }; diff --git a/client/service/src/chain_ops/import_blocks.rs b/client/service/src/chain_ops/import_blocks.rs index defa4128702a8..90bcc94cb8996 100644 --- a/client/service/src/chain_ops/import_blocks.rs +++ b/client/service/src/chain_ops/import_blocks.rs @@ -172,6 +172,8 @@ fn import_block_to_queue( origin: None, allow_missing_state: false, import_existing: force, + state: None, + skip_execution: false, } ]); } diff --git a/client/service/src/client/client.rs b/client/service/src/client/client.rs index 06d9aec4e4fd3..4a998a12d2b7f 100644 --- a/client/service/src/client/client.rs +++ b/client/service/src/client/client.rs @@ -52,11 +52,12 @@ use sp_state_machine::{ DBValue, Backend as StateBackend, ChangesTrieAnchorBlockId, prove_read, prove_child_read, ChangesTrieRootsStorage, ChangesTrieStorage, ChangesTrieConfigurationRange, key_changes, key_changes_proof, + prove_range_read_with_size, read_range_proof_check, }; use sc_executor::RuntimeVersion; use sp_consensus::{ Error as ConsensusError, BlockStatus, BlockImportParams, BlockCheckParams, - ImportResult, BlockOrigin, ForkChoiceStrategy, + ImportResult, BlockOrigin, ForkChoiceStrategy, StateAction, }; use sp_blockchain::{ self as blockchain, @@ -86,7 +87,7 @@ use sc_client_api::{ execution_extensions::ExecutionExtensions, notifications::{StorageNotifications, StorageEventStream}, KeyIterator, CallExecutor, ExecutorProvider, ProofProvider, - cht, UsageProvider + cht, UsageProvider, }; use sp_utils::mpsc::{TracingUnboundedSender, tracing_unbounded}; use sp_blockchain::Error; @@ -150,6 +151,11 @@ impl PrePostHeader { } } +enum PrepareStorageChangesResult, Block: BlockT> { + Discard(ImportResult), + Import(Option>>), +} + /// Create an instance of in-memory client. #[cfg(feature="test-helpers")] pub fn new_in_mem( @@ -191,6 +197,8 @@ pub struct ClientConfig { pub offchain_indexing_api: bool, /// Path where WASM files exist to override the on-chain WASM. pub wasm_runtime_overrides: Option, + /// Skip writing genesis state on first start. + pub no_genesis: bool, /// Map of WASM runtime substitute starting at the child of the given block until the runtime /// version doesn't match anymore. pub wasm_runtime_substitutes: HashMap>, @@ -202,6 +210,7 @@ impl Default for ClientConfig { offchain_worker_enabled: false, offchain_indexing_api: false, wasm_runtime_overrides: None, + no_genesis: false, wasm_runtime_substitutes: HashMap::new(), } } @@ -324,22 +333,29 @@ impl Client where telemetry: Option, config: ClientConfig, ) -> sp_blockchain::Result { - if backend.blockchain().header(BlockId::Number(Zero::zero()))?.is_none() { + let info = backend.blockchain().info(); + if info.finalized_state.is_none() { let genesis_storage = build_genesis_storage.build_storage() .map_err(sp_blockchain::Error::Storage)?; let mut op = backend.begin_operation()?; - backend.begin_state_operation(&mut op, BlockId::Hash(Default::default()))?; - let state_root = op.reset_storage(genesis_storage)?; + let state_root = op.set_genesis_state(genesis_storage, !config.no_genesis)?; let genesis_block = genesis::construct_genesis_block::(state_root.into()); info!("🔨 Initializing Genesis block/state (state: {}, header-hash: {})", genesis_block.header().state_root(), genesis_block.header().hash() ); + // Genesis may be written after some blocks have been imported and finalized. + // So we only finalize it when the database is empty. + let block_state = if info.best_hash == Default::default() { + NewBlockState::Final + } else { + NewBlockState::Normal + }; op.set_block_data( genesis_block.deconstruct().0, Some(vec![]), None, - NewBlockState::Final + block_state, )?; backend.commit_operation(op)?; } @@ -629,6 +645,7 @@ impl Client where operation: &mut ClientImportOperation, import_block: BlockImportParams>, new_cache: HashMap>, + storage_changes: Option>>, ) -> sp_blockchain::Result where Self: ProvideRuntimeApi, >::Api: CoreApi + @@ -640,7 +657,6 @@ impl Client where justifications, post_digests, body, - storage_changes, finalized, auxiliary, fork_choice, @@ -718,7 +734,7 @@ impl Client where import_headers: PrePostHeader, justifications: Option, body: Option>, - storage_changes: Option, Block>>, + storage_changes: Option>>, new_cache: HashMap>, finalized: bool, aux: Vec<(Vec, Option>)>, @@ -735,15 +751,16 @@ impl Client where (false, blockchain::BlockStatus::InChain) => return Ok(ImportResult::AlreadyInChain), (false, blockchain::BlockStatus::Unknown) => {}, (true, blockchain::BlockStatus::InChain) => {}, - (true, blockchain::BlockStatus::Unknown) => - return Err(Error::UnknownBlock(format!("{:?}", hash))), + (true, blockchain::BlockStatus::Unknown) => {}, } let info = self.backend.blockchain().info(); // the block is lower than our last finalized block so it must revert // finality, refusing import. - if *import_headers.post().number() <= info.finalized_number { + if status == blockchain::BlockStatus::Unknown + && *import_headers.post().number() <= info.finalized_number + { return Err(sp_blockchain::Error::NotInFinalizedChain); } @@ -757,7 +774,48 @@ impl Client where let storage_changes = match storage_changes { Some(storage_changes) => { - self.backend.begin_state_operation(&mut operation.op, BlockId::Hash(parent_hash))?; + let storage_changes = match storage_changes { + sp_consensus::StorageChanges::Changes(storage_changes) => { + self.backend.begin_state_operation(&mut operation.op, BlockId::Hash(parent_hash))?; + let ( + main_sc, + child_sc, + offchain_sc, + tx, _, + changes_trie_tx, + tx_index, + ) = storage_changes.into_inner(); + + if self.config.offchain_indexing_api { + operation.op.update_offchain_storage(offchain_sc)?; + } + + operation.op.update_db_storage(tx)?; + operation.op.update_storage(main_sc.clone(), child_sc.clone())?; + operation.op.update_transaction_index(tx_index)?; + + if let Some(changes_trie_transaction) = changes_trie_tx { + operation.op.update_changes_trie(changes_trie_transaction)?; + } + + Some((main_sc, child_sc)) + } + sp_consensus::StorageChanges::Import(changes) => { + let storage = sp_storage::Storage { + top: changes.state.into_iter().collect(), + children_default: Default::default(), + }; + + let state_root = operation.op.reset_storage(storage)?; + if state_root != *import_headers.post().state_root() { + // State root mismatch when importing state. This should not happen in safe fast sync mode, + // but may happen in unsafe mode. + warn!("Error imporing state: State root mismatch."); + return Err(Error::InvalidStateRoot); + } + None + } + }; // ensure parent block is finalized to maintain invariant that // finality is called sequentially. @@ -772,29 +830,8 @@ impl Client where } operation.op.update_cache(new_cache); + storage_changes - let ( - main_sc, - child_sc, - offchain_sc, - tx, _, - changes_trie_tx, - tx_index, - ) = storage_changes.into_inner(); - - if self.config.offchain_indexing_api { - operation.op.update_offchain_storage(offchain_sc)?; - } - - operation.op.update_db_storage(tx)?; - operation.op.update_storage(main_sc.clone(), child_sc.clone())?; - operation.op.update_transaction_index(tx_index)?; - - if let Some(changes_trie_transaction) = changes_trie_tx { - operation.op.update_changes_trie(changes_trie_transaction)?; - } - - Some((main_sc, child_sc)) }, None => None, }; @@ -867,7 +904,7 @@ impl Client where fn prepare_block_storage_changes( &self, import_block: &mut BlockImportParams>, - ) -> sp_blockchain::Result> + ) -> sp_blockchain::Result> where Self: ProvideRuntimeApi, >::Api: CoreApi + @@ -875,21 +912,28 @@ impl Client where { let parent_hash = import_block.header.parent_hash(); let at = BlockId::Hash(*parent_hash); - let enact_state = match self.block_status(&at)? { - BlockStatus::Unknown => return Ok(Some(ImportResult::UnknownParent)), - BlockStatus::InChainWithState | BlockStatus::Queued => true, - BlockStatus::InChainPruned if import_block.allow_missing_state => false, - BlockStatus::InChainPruned => return Ok(Some(ImportResult::MissingState)), - BlockStatus::KnownBad => return Ok(Some(ImportResult::KnownBad)), + let state_action = std::mem::replace(&mut import_block.state_action, StateAction::Skip); + let (enact_state, storage_changes) = match (self.block_status(&at)?, state_action) { + (BlockStatus::Unknown, _) => return Ok(PrepareStorageChangesResult::Discard(ImportResult::UnknownParent)), + (BlockStatus::KnownBad, _) => return Ok(PrepareStorageChangesResult::Discard(ImportResult::KnownBad)), + (_, StateAction::Skip) => (false, None), + (BlockStatus::InChainPruned, StateAction::ApplyChanges(sp_consensus::StorageChanges::Changes(_))) => + return Ok(PrepareStorageChangesResult::Discard(ImportResult::MissingState)), + (BlockStatus::InChainPruned, StateAction::Execute) => + return Ok(PrepareStorageChangesResult::Discard(ImportResult::MissingState)), + (BlockStatus::InChainPruned, StateAction::ExecuteIfPossible) => (false, None), + (_, StateAction::Execute) => (true, None), + (_, StateAction::ExecuteIfPossible) => (true, None), + (_, StateAction::ApplyChanges(changes)) => (true, Some(changes)), }; - match (enact_state, &mut import_block.storage_changes, &mut import_block.body) { + let storage_changes = match (enact_state, storage_changes, &import_block.body) { // We have storage changes and should enact the state, so we don't need to do anything // here - (true, Some(_), _) => {}, + (true, changes @ Some(_), _) => changes, // We should enact state, but don't have any storage changes, so we need to execute the // block. - (true, ref mut storage_changes @ None, Some(ref body)) => { + (true, None, Some(ref body)) => { let runtime_api = self.runtime_api(); let execution_context = if import_block.origin == BlockOrigin::NetworkInitialSync { ExecutionContext::Syncing @@ -919,19 +963,16 @@ impl Client where != &gen_storage_changes.transaction_storage_root { return Err(Error::InvalidStateRoot) - } else { - **storage_changes = Some(gen_storage_changes); } + Some(sp_consensus::StorageChanges::Changes(gen_storage_changes)) }, // No block body, no storage changes - (true, None, None) => {}, + (true, None, None) => None, // We should not enact the state, so we set the storage changes to `None`. - (false, changes, _) => { - changes.take(); - } + (false, _, _) => None, }; - Ok(None) + Ok(PrepareStorageChangesResult::Import(storage_changes)) } fn apply_finality_with_block_hash( @@ -1307,6 +1348,68 @@ impl ProofProvider for Client where cht::size(), ) } + + fn read_proof_collection( + &self, + id: &BlockId, + start_key: &[u8], + size_limit: usize, + ) -> sp_blockchain::Result<(StorageProof, u32)> { + let state = self.state_at(id)?; + Ok(prove_range_read_with_size::<_, HashFor>( + state, + None, + None, + size_limit, + Some(start_key) + )?) + } + + fn storage_collection( + &self, + id: &BlockId, + start_key: &[u8], + size_limit: usize, + ) -> sp_blockchain::Result, Vec)>> { + let state = self.state_at(id)?; + let mut current_key = start_key.to_vec(); + let mut total_size = 0; + let mut entries = Vec::new(); + while let Some(next_key) = state + .next_storage_key(¤t_key) + .map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))? + { + let value = state + .storage(next_key.as_ref()) + .map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))? + .unwrap_or_default(); + let size = value.len() + next_key.len(); + if total_size + size > size_limit && !entries.is_empty() { + break; + } + total_size += size; + entries.push((next_key.clone(), value)); + current_key = next_key; + } + Ok(entries) + + } + + fn verify_range_proof( + &self, + root: Block::Hash, + proof: StorageProof, + start_key: &[u8], + ) -> sp_blockchain::Result<(Vec<(Vec, Vec)>, bool)> { + Ok(read_range_proof_check::>( + root, + proof, + None, + None, + None, + Some(start_key), + )?) + } } @@ -1751,15 +1854,16 @@ impl sp_consensus::BlockImport for &Client return Ok(res), + PrepareStorageChangesResult::Import(storage_changes) => storage_changes, + }; self.lock_import_and_run(|operation| { - self.apply_block(operation, import_block, new_cache) + self.apply_block(operation, import_block, new_cache, storage_changes) }).map_err(|e| { warn!("Block import error:\n{:?}", e); ConsensusError::ClientImport(e.to_string()).into() @@ -1801,9 +1905,14 @@ impl sp_consensus::BlockImport for &Client return Ok(ImportResult::AlreadyInChain), + BlockStatus::InChainWithState | BlockStatus::Queued if !import_existing => { + return Ok(ImportResult::AlreadyInChain) + }, BlockStatus::InChainWithState | BlockStatus::Queued => {}, - BlockStatus::InChainPruned => return Ok(ImportResult::AlreadyInChain), + BlockStatus::InChainPruned if !import_existing => { + return Ok(ImportResult::AlreadyInChain) + }, + BlockStatus::InChainPruned => {}, BlockStatus::Unknown => {}, BlockStatus::KnownBad => return Ok(ImportResult::KnownBad), } diff --git a/client/service/test/src/client/light.rs b/client/service/test/src/client/light.rs index a183cbce62bdb..8841d498ecfb0 100644 --- a/client/service/test/src/client/light.rs +++ b/client/service/test/src/client/light.rs @@ -272,7 +272,7 @@ fn local_state_is_created_when_genesis_state_is_available() { ); let mut op = backend.begin_operation().unwrap(); op.set_block_data(header0, None, None, NewBlockState::Final).unwrap(); - op.reset_storage(Default::default()).unwrap(); + op.set_genesis_state(Default::default(), true).unwrap(); backend.commit_operation(op).unwrap(); match backend.state_at(BlockId::Number(0)).unwrap() { diff --git a/primitives/blockchain/src/backend.rs b/primitives/blockchain/src/backend.rs index 3441a4f6cf544..dbce364ce7987 100644 --- a/primitives/blockchain/src/backend.rs +++ b/primitives/blockchain/src/backend.rs @@ -269,12 +269,14 @@ pub struct Info { pub finalized_hash: Block::Hash, /// Last finalized block number. pub finalized_number: <::Header as HeaderT>::Number, + /// Last finalized state. + pub finalized_state: Option<(Block::Hash, <::Header as HeaderT>::Number)>, /// Number of concurrent leave forks. pub number_leaves: usize } /// Block status. -#[derive(Debug, PartialEq, Eq)] +#[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum BlockStatus { /// Already in the blockchain. InChain, diff --git a/primitives/blockchain/src/error.rs b/primitives/blockchain/src/error.rs index 58d08d06f049e..0d6ac10a8800e 100644 --- a/primitives/blockchain/src/error.rs +++ b/primitives/blockchain/src/error.rs @@ -90,8 +90,8 @@ pub enum Error { #[error("Failed to get runtime version: {0}")] VersionInvalid(String), - #[error("Genesis config provided is invalid")] - GenesisInvalid, + #[error("Provided state is invalid")] + InvalidState, #[error("error decoding justification for header")] JustificationDecode, diff --git a/primitives/consensus/common/src/block_import.rs b/primitives/consensus/common/src/block_import.rs index 67978232009e8..447ea5761f767 100644 --- a/primitives/consensus/common/src/block_import.rs +++ b/primitives/consensus/common/src/block_import.rs @@ -135,6 +135,43 @@ pub struct BlockCheckParams { pub import_existing: bool, } +/// Precomputed storage. +pub enum StorageChanges { + /// Changes coming from block execution. + Changes(sp_state_machine::StorageChanges, NumberFor>), + /// Whole new state. + Import(ImportedState), +} + +/// Imported state data. A vector of key-value pairs that should form a trie. +#[derive(PartialEq, Eq, Clone)] +pub struct ImportedState { + /// Target block hash. + pub block: B::Hash, + /// State keys and values. + pub state: Vec<(Vec, Vec)>, +} + +impl std::fmt::Debug for ImportedState { + fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result { + fmt.debug_struct("ImportedState") + .field("block", &self.block) + .finish() + } +} + +/// Defines how a new state is computed for a given imported block. +pub enum StateAction { + /// Apply precomputed changes coming from block execution or state sync. + ApplyChanges(StorageChanges), + /// Execute block body (required) and compute state. + Execute, + /// Execute block body if parent state is available and compute state. + ExecuteIfPossible, + /// Don't execute or import state. + Skip, +} + /// Data required to import a Block. #[non_exhaustive] pub struct BlockImportParams { @@ -159,11 +196,8 @@ pub struct BlockImportParams { pub post_digests: Vec>, /// The body of the block. pub body: Option>, - /// The changes to the storage to create the state for the block. If this is `Some(_)`, - /// the block import will not need to re-execute the block for importing it. - pub storage_changes: Option< - sp_state_machine::StorageChanges, NumberFor> - >, + /// Specify how the new state is computed. + pub state_action: StateAction, /// Is this block finalized already? /// `true` implies instant finality. pub finalized: bool, @@ -182,8 +216,6 @@ pub struct BlockImportParams { /// to modify it. If `None` is passed all the way down to bottom block /// importer, the import fails with an `IncompletePipeline` error. pub fork_choice: Option, - /// Allow importing the block skipping state verification if parent state is missing. - pub allow_missing_state: bool, /// Re-validate existing block. pub import_existing: bool, /// Cached full header hash (with post-digests applied). @@ -201,12 +233,11 @@ impl BlockImportParams { justifications: None, post_digests: Vec::new(), body: None, - storage_changes: None, + state_action: StateAction::Execute, finalized: false, intermediates: HashMap::new(), auxiliary: Vec::new(), fork_choice: None, - allow_missing_state: false, import_existing: false, post_hash: None, } @@ -237,20 +268,28 @@ impl BlockImportParams { /// Auxiliary function for "converting" the transaction type. /// - /// Actually this just sets `storage_changes` to `None` and makes rustc think that `Self` now + /// Actually this just sets `StorageChanges::Changes` to `None` and makes rustc think that `Self` now /// uses a different transaction type. - pub fn convert_transaction(self) -> BlockImportParams { + pub fn clear_storage_changes_and_mutate(self) -> BlockImportParams { + // Preserve imported state. + let state_action = match self.state_action { + StateAction::ApplyChanges(StorageChanges::Import(state)) => + StateAction::ApplyChanges(StorageChanges::Import(state)), + StateAction::ApplyChanges(StorageChanges::Changes(_)) => StateAction::Skip, + StateAction::Execute => StateAction::Execute, + StateAction::ExecuteIfPossible => StateAction::ExecuteIfPossible, + StateAction::Skip => StateAction::Skip, + }; BlockImportParams { origin: self.origin, header: self.header, justifications: self.justifications, post_digests: self.post_digests, body: self.body, - storage_changes: None, + state_action, finalized: self.finalized, auxiliary: self.auxiliary, intermediates: self.intermediates, - allow_missing_state: self.allow_missing_state, fork_choice: self.fork_choice, import_existing: self.import_existing, post_hash: self.post_hash, diff --git a/primitives/consensus/common/src/import_queue.rs b/primitives/consensus/common/src/import_queue.rs index 4220c7b14162d..fba5b51e921ca 100644 --- a/primitives/consensus/common/src/import_queue.rs +++ b/primitives/consensus/common/src/import_queue.rs @@ -34,7 +34,7 @@ use crate::{ error::Error as ConsensusError, block_import::{ BlockImport, BlockOrigin, BlockImportParams, ImportedAux, JustificationImport, ImportResult, - BlockCheckParams, + BlockCheckParams, ImportedState, StateAction, }, metrics::Metrics, }; @@ -74,8 +74,12 @@ pub struct IncomingBlock { pub origin: Option, /// Allow importing the block skipping state verification if parent state is missing. pub allow_missing_state: bool, + /// Skip block exection and state verification. + pub skip_execution: bool, /// Re-validate existing block. pub import_existing: bool, + /// Do not compute new state, but rather set it to the given set. + pub state: Option>, } /// Type of keys in the blockchain cache that consensus module could use for its needs. @@ -264,9 +268,17 @@ pub(crate) async fn import_single_block_metered, Trans if let Some(keys) = maybe_keys { cache.extend(keys.into_iter()); } - import_block.allow_missing_state = block.allow_missing_state; + import_block.import_existing = block.import_existing; + let mut import_block = import_block.clear_storage_changes_and_mutate(); + if let Some(state) = block.state { + import_block.state_action = StateAction::ApplyChanges(crate::StorageChanges::Import(state)); + } else if block.skip_execution { + import_block.state_action = StateAction::Skip; + } else if block.allow_missing_state { + import_block.state_action = StateAction::ExecuteIfPossible; + } - let imported = import_handle.import_block(import_block.convert_transaction(), cache).await; + let imported = import_handle.import_block(import_block, cache).await; if let Some(metrics) = metrics.as_ref() { metrics.report_verification_and_import(started.elapsed()); } diff --git a/primitives/consensus/common/src/import_queue/basic_queue.rs b/primitives/consensus/common/src/import_queue/basic_queue.rs index 3af983952af75..5767b72dd8084 100644 --- a/primitives/consensus/common/src/import_queue/basic_queue.rs +++ b/primitives/consensus/common/src/import_queue/basic_queue.rs @@ -564,6 +564,8 @@ mod tests { origin: None, allow_missing_state: false, import_existing: false, + state: None, + skip_execution: false, }], ))) .unwrap(); diff --git a/primitives/consensus/common/src/lib.rs b/primitives/consensus/common/src/lib.rs index 37df7230fd62b..60e260a892829 100644 --- a/primitives/consensus/common/src/lib.rs +++ b/primitives/consensus/common/src/lib.rs @@ -50,7 +50,8 @@ mod metrics; pub use self::error::Error; pub use block_import::{ BlockCheckParams, BlockImport, BlockImportParams, BlockOrigin, ForkChoiceStrategy, - ImportResult, ImportedAux, JustificationImport, JustificationSyncLink, + ImportResult, ImportedAux, ImportedState, JustificationImport, JustificationSyncLink, + StateAction, StorageChanges, }; pub use select_chain::SelectChain; pub use sp_state_machine::Backend as StateBackend; diff --git a/primitives/runtime/src/generic/block.rs b/primitives/runtime/src/generic/block.rs index 1b30d43ccaca7..af4f9e4521e3b 100644 --- a/primitives/runtime/src/generic/block.rs +++ b/primitives/runtime/src/generic/block.rs @@ -54,6 +54,19 @@ impl BlockId { pub fn number(number: NumberFor) -> Self { BlockId::Number(number) } + + /// Check if this block ID refers to the pre-genesis state. + pub fn is_pre_genesis(&self) -> bool { + match self { + BlockId::Hash(hash) => hash == &Default::default(), + BlockId::Number(_) => false, + } + } + + /// Create a block ID for a pre-genesis state. + pub fn pre_genesis() -> Self { + BlockId::Hash(Default::default()) + } } impl Copy for BlockId {} diff --git a/primitives/state-machine/src/backend.rs b/primitives/state-machine/src/backend.rs index 18b89acbc6f13..9b99537130364 100644 --- a/primitives/state-machine/src/backend.rs +++ b/primitives/state-machine/src/backend.rs @@ -93,6 +93,22 @@ pub trait Backend: sp_std::fmt::Debug { key: &[u8] ) -> Result, Self::Error>; + /// Iterate over storage starting at key, for a given prefix and child trie. + /// Aborts as soon as `f` returns false. + /// Warning, this fails at first error when usual iteration skips errors. + /// If `allow_missing` is true, iteration stops when it reaches a missing trie node. + /// Otherwise an error is produced. + /// + /// Returns `true` if trie end is reached. + fn apply_to_key_values_while, Vec) -> bool>( + &self, + child_info: Option<&ChildInfo>, + prefix: Option<&[u8]>, + start_at: Option<&[u8]>, + f: F, + allow_missing: bool, + ) -> Result; + /// Retrieve all entries keys of storage and call `f` for each of those keys. /// Aborts as soon as `f` returns false. fn apply_to_keys_while bool>( diff --git a/primitives/state-machine/src/lib.rs b/primitives/state-machine/src/lib.rs index c4ba39e160160..bc5b48f02db4e 100644 --- a/primitives/state-machine/src/lib.rs +++ b/primitives/state-machine/src/lib.rs @@ -726,6 +726,50 @@ mod execution { prove_read_on_trie_backend(trie_backend, keys) } + /// Generate range storage read proof. + pub fn prove_range_read_with_size( + mut backend: B, + child_info: Option<&ChildInfo>, + prefix: Option<&[u8]>, + size_limit: usize, + start_at: Option<&[u8]>, + ) -> Result<(StorageProof, u32), Box> + where + B: Backend, + H: Hasher, + H::Out: Ord + Codec, + { + let trie_backend = backend.as_trie_backend() + .ok_or_else(|| Box::new(ExecutionError::UnableToGenerateProof) as Box)?; + prove_range_read_with_size_on_trie_backend(trie_backend, child_info, prefix, size_limit, start_at) + } + + /// Generate range storage read proof on an existing trie backend. + pub fn prove_range_read_with_size_on_trie_backend( + trie_backend: &TrieBackend, + child_info: Option<&ChildInfo>, + prefix: Option<&[u8]>, + size_limit: usize, + start_at: Option<&[u8]>, + ) -> Result<(StorageProof, u32), Box> + where + S: trie_backend_essence::TrieBackendStorage, + H: Hasher, + H::Out: Ord + Codec, + { + let proving_backend = proving_backend::ProvingBackend::::new(trie_backend); + let mut count = 0; + proving_backend.apply_to_key_values_while(child_info, prefix, start_at, |_key, _value| { + if count == 0 || proving_backend.estimate_encoded_size() <= size_limit { + count += 1; + true + } else { + false + } + }, false).map_err(|e| Box::new(e) as Box)?; + Ok((proving_backend.extract_proof(), count)) + } + /// Generate child storage read proof. pub fn prove_child_read( mut backend: B, @@ -808,6 +852,29 @@ mod execution { Ok(result) } + /// Check child storage range proof, generated by `prove_range_read` call. + pub fn read_range_proof_check( + root: H::Out, + proof: StorageProof, + child_info: Option<&ChildInfo>, + prefix: Option<&[u8]>, + count: Option, + start_at: Option<&[u8]>, + ) -> Result<(Vec<(Vec, Vec)>, bool), Box> + where + H: Hasher, + H::Out: Ord + Codec, + { + let proving_backend = create_proof_check_backend::(root, proof)?; + read_range_proof_check_on_proving_backend( + &proving_backend, + child_info, + prefix, + count, + start_at, + ) + } + /// Check child storage read proof, generated by `prove_child_read` call. pub fn read_child_proof_check( root: H::Out, @@ -859,6 +926,32 @@ mod execution { proving_backend.child_storage(child_info, key) .map_err(|e| Box::new(e) as Box) } + + /// Check storage range proof on pre-created proving backend. + /// + /// Returns a vector with the read `key => value` pairs and a `bool` that is set to `true` when + /// all `key => value` pairs could be read and no more are left. + pub fn read_range_proof_check_on_proving_backend( + proving_backend: &TrieBackend, H>, + child_info: Option<&ChildInfo>, + prefix: Option<&[u8]>, + count: Option, + start_at: Option<&[u8]>, + ) -> Result<(Vec<(Vec, Vec)>, bool), Box> + where + H: Hasher, + H::Out: Ord + Codec, + { + let mut values = Vec::new(); + let result = proving_backend.apply_to_key_values_while(child_info, prefix, start_at, |key, value| { + values.push((key.to_vec(), value.to_vec())); + count.as_ref().map_or(true, |c| (values.len() as u32) < *c) + }, true); + match result { + Ok(completed) => Ok((values, completed)), + Err(e) => Err(Box::new(e) as Box), + } + } } #[cfg(test)] @@ -1457,7 +1550,7 @@ mod tests { remote_proof.clone(), &[&[0xff]], ).is_ok(); - // check that results are correct + // check that results are correct assert_eq!( local_result1.into_iter().collect::>(), vec![(b"value2".to_vec(), Some(vec![24]))], @@ -1494,6 +1587,57 @@ mod tests { ); } + #[test] + fn prove_read_with_size_limit_works() { + let remote_backend = trie_backend::tests::test_trie(); + let remote_root = remote_backend.storage_root(::std::iter::empty()).0; + let (proof, count) = prove_range_read_with_size(remote_backend, None, None, 0, None).unwrap(); + // Alwasys contains at least some nodes. + assert_eq!(proof.into_memory_db::().drain().len(), 3); + assert_eq!(count, 1); + + let remote_backend = trie_backend::tests::test_trie(); + let (proof, count) = prove_range_read_with_size(remote_backend, None, None, 800, Some(&[])).unwrap(); + assert_eq!(proof.clone().into_memory_db::().drain().len(), 9); + assert_eq!(count, 85); + let (results, completed) = read_range_proof_check::( + remote_root, + proof.clone(), + None, + None, + Some(count), + None, + ).unwrap(); + assert_eq!(results.len() as u32, count); + assert_eq!(completed, false); + // When checking without count limit, proof may actually contain extra values. + let (results, completed) = read_range_proof_check::( + remote_root, + proof, + None, + None, + None, + None, + ).unwrap(); + assert_eq!(results.len() as u32, 101); + assert_eq!(completed, false); + + let remote_backend = trie_backend::tests::test_trie(); + let (proof, count) = prove_range_read_with_size(remote_backend, None, None, 50000, Some(&[])).unwrap(); + assert_eq!(proof.clone().into_memory_db::().drain().len(), 11); + assert_eq!(count, 132); + let (results, completed) = read_range_proof_check::( + remote_root, + proof.clone(), + None, + None, + None, + None, + ).unwrap(); + assert_eq!(results.len() as u32, count); + assert_eq!(completed, true); + } + #[test] fn compact_multiple_child_trie() { // this root will be queried diff --git a/primitives/state-machine/src/overlayed_changes/mod.rs b/primitives/state-machine/src/overlayed_changes/mod.rs index c01d56ab919a0..a261e084eeda9 100644 --- a/primitives/state-machine/src/overlayed_changes/mod.rs +++ b/primitives/state-machine/src/overlayed_changes/mod.rs @@ -303,7 +303,7 @@ impl OverlayedChanges { /// Set a new value for the specified key. /// /// Can be rolled back or committed when called inside a transaction. - pub(crate) fn set_storage(&mut self, key: StorageKey, val: Option) { + pub fn set_storage(&mut self, key: StorageKey, val: Option) { let size_write = val.as_ref().map(|x| x.len() as u64).unwrap_or(0); self.stats.tally_write_overlay(size_write); self.top.set(key, val, self.extrinsic_index()); diff --git a/primitives/state-machine/src/proving_backend.rs b/primitives/state-machine/src/proving_backend.rs index d68a87f9f56a5..5275aa82521c5 100644 --- a/primitives/state-machine/src/proving_backend.rs +++ b/primitives/state-machine/src/proving_backend.rs @@ -212,6 +212,14 @@ impl<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> ProvingBackend<'a, S, H> pub fn extract_proof(&self) -> StorageProof { self.0.essence().backend_storage().proof_recorder.to_storage_proof() } + + /// Returns the estimated encoded size of the proof. + /// + /// The estimation is maybe bigger (by in maximum 4 bytes), but never smaller than the actual + /// encoded proof. + pub fn estimate_encoded_size(&self) -> usize { + self.0.essence().backend_storage().proof_recorder.estimate_encoded_size() + } } impl<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> TrieBackendStorage @@ -260,6 +268,17 @@ impl<'a, S, H> Backend for ProvingBackend<'a, S, H> self.0.child_storage(child_info, key) } + fn apply_to_key_values_while, Vec) -> bool>( + &self, + child_info: Option<&ChildInfo>, + prefix: Option<&[u8]>, + start_at: Option<&[u8]>, + f: F, + allow_missing: bool, + ) -> Result { + self.0.apply_to_key_values_while(child_info, prefix, start_at, f, allow_missing) + } + fn apply_to_keys_while bool>( &self, child_info: Option<&ChildInfo>, diff --git a/primitives/state-machine/src/trie_backend.rs b/primitives/state-machine/src/trie_backend.rs index 98deca23a9570..6162a9866a46c 100644 --- a/primitives/state-machine/src/trie_backend.rs +++ b/primitives/state-machine/src/trie_backend.rs @@ -113,6 +113,17 @@ impl, H: Hasher> Backend for TrieBackend where self.essence.for_key_values_with_prefix(prefix, f) } + fn apply_to_key_values_while, Vec) -> bool>( + &self, + child_info: Option<&ChildInfo>, + prefix: Option<&[u8]>, + start_at: Option<&[u8]>, + f: F, + allow_missing: bool, + ) -> Result { + self.essence.apply_to_key_values_while(child_info, prefix, start_at, f, allow_missing) + } + fn apply_to_keys_while bool>( &self, child_info: Option<&ChildInfo>, diff --git a/primitives/state-machine/src/trie_backend_essence.rs b/primitives/state-machine/src/trie_backend_essence.rs index e0a24c08393c7..54124e6754a52 100644 --- a/primitives/state-machine/src/trie_backend_essence.rs +++ b/primitives/state-machine/src/trie_backend_essence.rs @@ -189,6 +189,43 @@ impl, H: Hasher> TrieBackendEssence where H::Out: .map_err(map_e) } + /// Retrieve all entries keys of storage and call `f` for each of those keys. + /// Aborts as soon as `f` returns false. + /// + /// Returns `true` when all keys were iterated. + pub fn apply_to_key_values_while( + &self, + child_info: Option<&ChildInfo>, + prefix: Option<&[u8]>, + start_at: Option<&[u8]>, + f: impl FnMut(Vec, Vec) -> bool, + allow_missing_nodes: bool, + ) -> Result { + let mut child_root; + let root = if let Some(child_info) = child_info.as_ref() { + if let Some(fetched_child_root) = self.child_root(child_info)? { + child_root = H::Out::default(); + // root is fetched from DB, not writable by runtime, so it's always valid. + child_root.as_mut().copy_from_slice(fetched_child_root.as_slice()); + + &child_root + } else { + return Ok(true); + } + } else { + &self.root + }; + + self.trie_iter_inner( + &root, + prefix, + f, + child_info, + start_at, + allow_missing_nodes, + ) + } + /// Retrieve all entries keys of a storage and call `f` for each of those keys. /// Aborts as soon as `f` returns false. pub fn apply_to_keys_while bool>( @@ -212,15 +249,15 @@ impl, H: Hasher> TrieBackendEssence where H::Out: &self.root }; - self.trie_iter_inner(root, prefix, |k, _v| f(k), child_info) + let _ = self.trie_iter_inner(root, prefix, |k, _v| { f(&k); true}, child_info, None, false); } /// Execute given closure for all keys starting with prefix. - pub fn for_child_keys_with_prefix( + pub fn for_child_keys_with_prefix( &self, child_info: &ChildInfo, prefix: &[u8], - mut f: F, + mut f: impl FnMut(&[u8]), ) { let root_vec = match self.child_root(child_info) { Ok(v) => v.unwrap_or_else(|| empty_child_trie_root::>().encode()), @@ -231,41 +268,43 @@ impl, H: Hasher> TrieBackendEssence where H::Out: }; let mut root = H::Out::default(); root.as_mut().copy_from_slice(&root_vec); - self.trie_iter_inner(&root, Some(prefix), |k, _v| { f(k); true }, Some(child_info)) + let _ = self.trie_iter_inner(&root, Some(prefix), |k, _v| { f(&k); true }, Some(child_info), None, false); } /// Execute given closure for all keys starting with prefix. pub fn for_keys_with_prefix(&self, prefix: &[u8], mut f: F) { - self.trie_iter_inner(&self.root, Some(prefix), |k, _v| { f(k); true }, None) + let _ = self.trie_iter_inner(&self.root, Some(prefix), |k, _v| { f(&k); true }, None, None, false); } - fn trie_iter_inner bool>( + fn trie_iter_inner, Vec) -> bool>( &self, root: &H::Out, prefix: Option<&[u8]>, mut f: F, child_info: Option<&ChildInfo>, - ) { - let mut iter = move |db| -> sp_std::result::Result<(), Box>> { + start_at: Option<&[u8]>, + allow_missing_nodes: bool, + ) -> Result { + let mut iter = move |db| -> sp_std::result::Result>> { let trie = TrieDB::::new(db, root)?; - let iter = if let Some(prefix) = prefix.as_ref() { - TrieDBIterator::new_prefixed(&trie, prefix)? + let prefix = prefix.unwrap_or(&[]); + let iterator = if let Some(start_at) = start_at { + TrieDBIterator::new_prefixed_then_seek(&trie, prefix, start_at)? } else { - TrieDBIterator::new(&trie)? + TrieDBIterator::new_prefixed(&trie, prefix)? }; - - for x in iter { + for x in iterator { let (key, value) = x?; - debug_assert!(prefix.as_ref().map(|prefix| key.starts_with(prefix)).unwrap_or(true)); + debug_assert!(key.starts_with(prefix)); - if !f(&key, &value) { - break; + if !f(key, value) { + return Ok(false) } } - Ok(()) + Ok(true) }; let result = if let Some(child_info) = child_info { @@ -274,14 +313,16 @@ impl, H: Hasher> TrieBackendEssence where H::Out: } else { iter(self) }; - if let Err(e) = result { - debug!(target: "trie", "Error while iterating by prefix: {}", e); + match result { + Ok(completed) => Ok(completed), + Err(e) if matches!(*e, TrieError::IncompleteDatabase(_)) && allow_missing_nodes => Ok(false), + Err(e) => Err(format!("TrieDB iteration error: {}", e)), } } /// Execute given closure for all key and values starting with prefix. pub fn for_key_values_with_prefix(&self, prefix: &[u8], mut f: F) { - self.trie_iter_inner(&self.root, Some(prefix), |k, v| { f(k, v); true }, None) + let _ = self.trie_iter_inner(&self.root, Some(prefix), |k, v| {f(&k, &v); true}, None, None, false); } } diff --git a/test-utils/client/src/lib.rs b/test-utils/client/src/lib.rs index e343181505c98..eb810e0360588 100644 --- a/test-utils/client/src/lib.rs +++ b/test-utils/client/src/lib.rs @@ -80,6 +80,7 @@ pub struct TestClientBuilder { fork_blocks: ForkBlocks, bad_blocks: BadBlocks, enable_offchain_indexing_api: bool, + no_genesis: bool, } impl Default @@ -116,6 +117,7 @@ impl TestClientBuilder TestClientBuilder Self { + self.no_genesis = true; + self + } + /// Build the test client with the given native executor. pub fn build_with_executor( self, @@ -232,6 +240,7 @@ impl TestClientBuilder Date: Tue, 22 Jun 2021 13:20:29 +0200 Subject: [PATCH 25/67] Fix allocator waste assessment in docs (#9167) * Fix allocator comment. * Add explanations where this comes from. * Clarify absolute values. --- primitives/allocator/src/freeing_bump.rs | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/primitives/allocator/src/freeing_bump.rs b/primitives/allocator/src/freeing_bump.rs index 64ba136f9a354..e2a6b19e4a7f1 100644 --- a/primitives/allocator/src/freeing_bump.rs +++ b/primitives/allocator/src/freeing_bump.rs @@ -60,8 +60,11 @@ //! fail. //! //! - Sizes of allocations are rounded up to the nearest order. That is, an allocation of 2,00001 MiB -//! will be put into the bucket of 4 MiB. Therefore, typically more than half of the space in allocation -//! will be wasted. This is more pronounced with larger allocation sizes. +//! will be put into the bucket of 4 MiB. Therefore, any allocation of size `(N, 2N]` will take +//! up to `2N`, thus assuming a uniform distribution of allocation sizes, the average amount in use +//! of a `2N` space on the heap will be `(3N + ε) / 2`. So average utilisation is going to be around +//! 75% (`(3N + ε) / 2 / 2N`) meaning that around 25% of the space in allocation will be wasted. +//! This is more pronounced (in terms of absolute heap amounts) with larger allocation sizes. use crate::Error; use sp_std::{mem, convert::{TryFrom, TryInto}, ops::{Range, Index, IndexMut}}; From 0982f101642ef3ea9899a0779eef43a87d6c9c07 Mon Sep 17 00:00:00 2001 From: Robert Klotzner Date: Tue, 22 Jun 2021 15:24:33 +0200 Subject: [PATCH 26/67] Add dummy Debug instance to authority discovery service. (#9156) * Add dummy Debug instance to AuthorityDiscoveryService. * Update client/authority-discovery/src/service.rs More idiomatic print Co-authored-by: Pierre Krieger Co-authored-by: Pierre Krieger --- client/authority-discovery/src/service.rs | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/client/authority-discovery/src/service.rs b/client/authority-discovery/src/service.rs index 1da97cbb03b53..a787ff8f51c21 100644 --- a/client/authority-discovery/src/service.rs +++ b/client/authority-discovery/src/service.rs @@ -16,6 +16,8 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . +use std::fmt::Debug; + use crate::ServicetoWorkerMsg; use futures::channel::{mpsc, oneshot}; @@ -30,6 +32,12 @@ pub struct Service { to_worker: mpsc::Sender, } +impl Debug for Service { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_tuple("AuthorityDiscoveryService").finish() + } +} + /// A [`Service`] allows to interact with a [`crate::Worker`], e.g. by querying the /// [`crate::Worker`]'s local address cache for a given [`AuthorityId`]. impl Service { From 088464e9c2639ea0e9c4631ec1af2c81f99713b5 Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Tue, 22 Jun 2021 18:11:42 +0200 Subject: [PATCH 27/67] Add `substrate-rpc-subscription` to exceptions in alert (#9172) --- .maintain/monitoring/alerting-rules/alerting-rules.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.maintain/monitoring/alerting-rules/alerting-rules.yaml b/.maintain/monitoring/alerting-rules/alerting-rules.yaml index bc3243d732b4f..a3aa1b145b343 100644 --- a/.maintain/monitoring/alerting-rules/alerting-rules.yaml +++ b/.maintain/monitoring/alerting-rules/alerting-rules.yaml @@ -134,7 +134,7 @@ groups: ############################################################################## - alert: ContinuousTaskEnded - expr: '(polkadot_tasks_spawned_total{task_name != "basic-authorship-proposer"} == 1) + expr: '(polkadot_tasks_spawned_total{task_name != "basic-authorship-proposer", task_name != "substrate-rpc-subscription"} == 1) - on(instance, task_name) group_left() (polkadot_tasks_ended_total == 1)' for: 5m labels: From 6242d37f43ec5aa9b6131a8e61b8d4e71b063907 Mon Sep 17 00:00:00 2001 From: Zeke Mostov <32168567+emostov@users.noreply.github.com> Date: Tue, 22 Jun 2021 13:36:12 -0700 Subject: [PATCH 28/67] try-runtime-cli: Add execute-block subcommand (#9077) * Refactor remote_externalities::rpc_api * try-runtime-cli: Adde `execute-block` subcommand * Trivial * Address some comments * Use required_if & remove header-at usage * Improve doc * Update comment * small tweaks * add overwrite-code to shared params * Update utils/frame/try-runtime/cli/src/lib.rs Co-authored-by: Peter Goodspeed-Niklaus * make url a shared param * add helper for block_at (#9153) * add helper for block_at * remove redundant bound * doc for fn block_at * Update error message Co-authored-by: kianenigma Co-authored-by: Peter Goodspeed-Niklaus --- Cargo.lock | 1 + .../election-provider-multi-phase/src/lib.rs | 2 +- utils/frame/remote-externalities/src/lib.rs | 63 ++++- .../frame/remote-externalities/src/rpc_api.rs | 71 +++-- utils/frame/try-runtime/cli/Cargo.toml | 1 + utils/frame/try-runtime/cli/src/lib.rs | 262 +++++++++++++----- 6 files changed, 298 insertions(+), 102 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ffcf95820342d..ee78c31645b43 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -10649,6 +10649,7 @@ dependencies = [ "sp-blockchain", "sp-core", "sp-externalities", + "sp-io", "sp-keystore", "sp-runtime", "sp-state-machine", diff --git a/frame/election-provider-multi-phase/src/lib.rs b/frame/election-provider-multi-phase/src/lib.rs index 2bb47a8778074..2864ca518d068 100644 --- a/frame/election-provider-multi-phase/src/lib.rs +++ b/frame/election-provider-multi-phase/src/lib.rs @@ -608,7 +608,7 @@ pub mod pallet { type Fallback: Get; /// Origin that can control this pallet. Note that any action taken by this origin (such) - /// as providing an emergency solution is not checked. Thus, it must be a trusted origin. + /// as providing an emergency solution is not checked. Thus, it must be a trusted origin. type ForceOrigin: EnsureOrigin; /// The configuration of benchmarking. diff --git a/utils/frame/remote-externalities/src/lib.rs b/utils/frame/remote-externalities/src/lib.rs index a77650d042125..4b6738f3b915a 100644 --- a/utils/frame/remote-externalities/src/lib.rs +++ b/utils/frame/remote-externalities/src/lib.rs @@ -43,10 +43,12 @@ type KeyPair = (StorageKey, StorageData); const LOG_TARGET: &str = "remote-ext"; const DEFAULT_TARGET: &str = "wss://rpc.polkadot.io"; -const BATCH_SIZE: usize = 512; +const BATCH_SIZE: usize = 1000; jsonrpsee_proc_macros::rpc_client_api! { RpcApi { + #[rpc(method = "state_getStorage", positional_params)] + fn get_storage(prefix: StorageKey, hash: Option) -> StorageData; #[rpc(method = "state_getKeysPaged", positional_params)] fn get_keys_paged( prefix: Option, @@ -107,7 +109,7 @@ impl From for Transport { /// A state snapshot config may be present and will be written to in that case. #[derive(Clone)] pub struct OnlineConfig { - /// The block number at which to connect. Will be latest finalized head if not provided. + /// The block hash at which to get the runtime state. Will be latest finalized head if not provided. pub at: Option, /// An optional state snapshot file to WRITE to, not for reading. Not written if set to `None`. pub state_snapshot: Option, @@ -159,8 +161,11 @@ impl Default for SnapshotConfig { pub struct Builder { /// Custom key-pairs to be injected into the externalities. inject: Vec, - /// Storage entry key prefixes to be injected into the externalities. The *hashed* prefix must be given. + /// Storage entry key prefixes to be injected into the externalities. The *hashed* prefix must + /// be given. hashed_prefixes: Vec>, + /// Storage entry keys to be injected into the externalities. The *hashed* key must be given. + hashed_keys: Vec>, /// connectivity mode, online or offline. mode: Mode, } @@ -169,7 +174,12 @@ pub struct Builder { // that. impl Default for Builder { fn default() -> Self { - Self { inject: Default::default(), mode: Default::default(), hashed_prefixes: Default::default() } + Self { + inject: Default::default(), + mode: Default::default(), + hashed_prefixes: Default::default(), + hashed_keys: Default::default(), + } } } @@ -192,6 +202,17 @@ impl Builder { // RPC methods impl Builder { + async fn rpc_get_storage( + &self, + key: StorageKey, + maybe_at: Option, + ) -> Result { + trace!(target: LOG_TARGET, "rpc: get_storage"); + RpcApi::::get_storage(self.as_online().rpc_client(), key, maybe_at).await.map_err(|e| { + error!("Error = {:?}", e); + "rpc get_storage failed." + }) + } /// Get the latest finalized head. async fn rpc_get_head(&self) -> Result { trace!(target: LOG_TARGET, "rpc: finalized_head"); @@ -281,7 +302,7 @@ impl Builder { let values = client.batch_request::>(batch) .await .map_err(|e| { - log::error!(target: LOG_TARGET, "failed to execute batch {:?} due to {:?}", chunk_keys, e); + log::error!(target: LOG_TARGET, "failed to execute batch: {:?}. Error: {:?}", chunk_keys, e); "batch failed." })?; assert_eq!(chunk_keys.len(), values.len()); @@ -356,11 +377,23 @@ impl Builder { }; for prefix in &self.hashed_prefixes { - info!(target: LOG_TARGET, "adding data for hashed prefix: {:?}", HexDisplay::from(prefix)); - let additional_key_values = self.rpc_get_pairs_paged(StorageKey(prefix.to_vec()), at).await?; + debug!( + target: LOG_TARGET, + "adding data for hashed prefix: {:?}", + HexDisplay::from(prefix) + ); + let additional_key_values = + self.rpc_get_pairs_paged(StorageKey(prefix.to_vec()), at).await?; keys_and_values.extend(additional_key_values); } + for key in &self.hashed_keys { + let key = StorageKey(key.to_vec()); + debug!(target: LOG_TARGET, "adding data for hashed key: {:?}", HexDisplay::from(&key)); + let value = self.rpc_get_storage(key.clone(), Some(at)).await?; + keys_and_values.push((key, value)); + } + Ok(keys_and_values) } @@ -400,7 +433,7 @@ impl Builder { info!( target: LOG_TARGET, - "extending externalities with {} manually injected keys", + "extending externalities with {} manually injected key-values", self.inject.len() ); base_kv.extend(self.inject.clone()); @@ -416,19 +449,29 @@ impl Builder { } /// Inject a manual list of key and values to the storage. - pub fn inject(mut self, injections: &[KeyPair]) -> Self { + pub fn inject_key_value(mut self, injections: &[KeyPair]) -> Self { for i in injections { self.inject.push(i.clone()); } self } - /// Inject a hashed prefix. This is treated as-is, and should be pre-hashed. + /// Inject a hashed prefix. This is treated as-is, and should be pre-hashed. + /// + /// This should be used to inject a "PREFIX", like a storage (double) map. pub fn inject_hashed_prefix(mut self, hashed: &[u8]) -> Self { self.hashed_prefixes.push(hashed.to_vec()); self } + /// Inject a hashed key to scrape. This is treated as-is, and should be pre-hashed. + /// + /// This should be used to inject a "KEY", like a storage value. + pub fn inject_hashed_key(mut self, hashed: &[u8]) -> Self { + self.hashed_keys.push(hashed.to_vec()); + self + } + /// Configure a state snapshot to be used. pub fn mode(mut self, mode: Mode) -> Self { self.mode = mode; diff --git a/utils/frame/remote-externalities/src/rpc_api.rs b/utils/frame/remote-externalities/src/rpc_api.rs index e7fd021bac4a8..6773bfd54bb19 100644 --- a/utils/frame/remote-externalities/src/rpc_api.rs +++ b/utils/frame/remote-externalities/src/rpc_api.rs @@ -18,36 +18,65 @@ //! WS RPC API for one off RPC calls to a substrate node. // TODO: Consolidate one off RPC calls https://github.com/paritytech/substrate/issues/8988 -use super::*; +use sp_runtime::{generic::SignedBlock, traits::{Block as BlockT, Header as HeaderT}}; +use jsonrpsee_ws_client::{WsClientBuilder, WsClient, v2::params::JsonRpcParams, traits::Client}; /// Get the header of the block identified by `at` -pub async fn get_header>(from: S, at: B::Hash) -> Result +pub async fn get_header(from: S, at: Block::Hash) -> Result where - B::Header: serde::de::DeserializeOwned, + Block: BlockT, + Block::Header: serde::de::DeserializeOwned, + S: AsRef, { - use jsonrpsee_ws_client::traits::Client; - let at = serde_json::to_value(at) - .map_err(|e| format!("Block hash could not be converted to JSON due to {:?}", e))?; - let params = vec![at]; - let client = WsClientBuilder::default() - .max_request_body_size(u32::MAX) - .build(from.as_ref()) - .await - .map_err(|e| format!("`WsClientBuilder` failed to build do to {:?}", e))?; - client.request::("chain_getHeader", JsonRpcParams::Array(params)) + let params = vec![hash_to_json::(at)?]; + let client = build_client(from).await?; + + client.request::("chain_getHeader", JsonRpcParams::Array(params)) .await - .map_err(|e| format!("chain_getHeader request failed due to {:?}", e)) + .map_err(|e| format!("chain_getHeader request failed: {:?}", e)) } /// Get the finalized head -pub async fn get_finalized_head>(from: S) -> Result { - use jsonrpsee_ws_client::traits::Client; - let client = WsClientBuilder::default() +pub async fn get_finalized_head(from: S) -> Result +where + Block: BlockT, + S: AsRef, +{ + let client = build_client(from).await?; + + client.request::("chain_getFinalizedHead", JsonRpcParams::NoParams) + .await + .map_err(|e| format!("chain_getFinalizedHead request failed: {:?}", e)) +} + +/// Get the signed block identified by `at`. +pub async fn get_block(from: S, at: Block::Hash) -> Result +where + S: AsRef, + Block: BlockT + serde::de::DeserializeOwned, + Block::Header: HeaderT, +{ + let params = vec![hash_to_json::(at)?]; + let client = build_client(from).await?; + let signed_block = client + .request::>("chain_getBlock", JsonRpcParams::Array(params)) + .await + .map_err(|e| format!("chain_getBlock request failed: {:?}", e))?; + + Ok(signed_block.block) +} + +/// Convert a block hash to a serde json value. +fn hash_to_json(hash: Block::Hash) -> Result { + serde_json::to_value(hash) + .map_err(|e| format!("Block hash could not be converted to JSON: {:?}", e)) +} + +/// Build a website client that connects to `from`. +async fn build_client>(from: S) -> Result { + WsClientBuilder::default() .max_request_body_size(u32::MAX) .build(from.as_ref()) .await - .map_err(|e| format!("`WsClientBuilder` failed to build do to {:?}", e))?; - client.request::("chain_getFinalizedHead", JsonRpcParams::NoParams) - .await - .map_err(|e| format!("chain_getFinalizedHead request failed due to {:?}", e)) + .map_err(|e| format!("`WsClientBuilder` failed to build: {:?}", e)) } diff --git a/utils/frame/try-runtime/cli/Cargo.toml b/utils/frame/try-runtime/cli/Cargo.toml index f262ba4812a0e..2e2335bc5fff9 100644 --- a/utils/frame/try-runtime/cli/Cargo.toml +++ b/utils/frame/try-runtime/cli/Cargo.toml @@ -29,6 +29,7 @@ sp-blockchain = { version = "3.0.0", path = "../../../../primitives/blockchain" sp-runtime = { version = "3.0.0", path = "../../../../primitives/runtime" } sp-externalities = { version = "0.9.0", path = "../../../../primitives/externalities" } sp-core = { version = "3.0.0", path = "../../../../primitives/core" } +sp-io = { version = "3.0.0", path = "../../../../primitives/io" } sp-keystore = { version = "0.9.0", path = "../../../../primitives/keystore" } frame-try-runtime = { version = "0.9.0", path = "../../../../frame/try-runtime" } diff --git a/utils/frame/try-runtime/cli/src/lib.rs b/utils/frame/try-runtime/cli/src/lib.rs index dc4cb7cd33dbd..e0d09ff7fbcf4 100644 --- a/utils/frame/try-runtime/cli/src/lib.rs +++ b/utils/frame/try-runtime/cli/src/lib.rs @@ -25,13 +25,14 @@ use sc_executor::NativeExecutor; use sc_service::NativeExecutionDispatch; use sc_chain_spec::ChainSpec; use sp_state_machine::StateMachine; -use sp_runtime::traits::{Block as BlockT, NumberFor}; +use sp_runtime::traits::{Block as BlockT, NumberFor, Header as HeaderT}; use sp_core::{ offchain::{ OffchainWorkerExt, OffchainDbExt, TransactionPoolExt, - testing::{TestOffchainExt, TestTransactionPoolExt} + testing::{TestOffchainExt, TestTransactionPoolExt}, }, storage::{StorageData, StorageKey, well_known_keys}, + hashing::twox_128, }; use sp_keystore::{KeystoreExt, testing::KeyStore}; use remote_externalities::{Builder, Mode, SnapshotConfig, OfflineConfig, OnlineConfig, rpc_api}; @@ -45,6 +46,8 @@ pub enum Command { OnRuntimeUpgrade(OnRuntimeUpgradeCmd), /// Execute "OffchainWorkerApi_offchain_worker" against the given runtime state. OffchainWorker(OffchainWorkerCmd), + /// Execute "Core_execute_block" using the given block and the runtime state of the parent block. + ExecuteBlock(ExecuteBlockCmd), } #[derive(Debug, Clone, structopt::StructOpt)] @@ -55,17 +58,14 @@ pub struct OnRuntimeUpgradeCmd { #[derive(Debug, Clone, structopt::StructOpt)] pub struct OffchainWorkerCmd { - /// Hash of the block whose header to use to execute the offchain worker. - #[structopt(short, long, multiple = false, parse(try_from_str = parse::hash))] - pub header_at: String, - #[structopt(subcommand)] pub state: State, +} - /// Whether or not to overwrite the code from state with the code from - /// the specified chain spec. - #[structopt(long)] - pub overwrite_code: bool, +#[derive(Debug, Clone, structopt::StructOpt)] +pub struct ExecuteBlockCmd { + #[structopt(subcommand)] + pub state: State, } #[derive(Debug, Clone, structopt::StructOpt)] @@ -99,6 +99,46 @@ pub struct SharedParams { /// sc_service::Configuration.default_heap_pages. #[structopt(long)] pub heap_pages: Option, + + /// The block hash at which to read state. This is required for execute-block, offchain-worker, + /// or any command that used the live subcommand. + #[structopt( + short, + long, + multiple = false, + parse(try_from_str = parse::hash), + required_ifs( + &[("command", "offchain-worker"), ("command", "execute-block"), ("subcommand", "live")] + ) + )] + block_at: String, + + /// Whether or not to overwrite the code from state with the code from + /// the specified chain spec. + #[structopt(long)] + pub overwrite_code: bool, + + /// The url to connect to. + // TODO having this a shared parm is a temporary hack; the url is used just + // to get the header/block. We should try and get that out of state, OR allow + // the user to feed in a header/block via file. + // https://github.com/paritytech/substrate/issues/9027 + #[structopt(short, long, default_value = "ws://localhost:9944", parse(try_from_str = parse::url))] + url: String, +} + +impl SharedParams { + /// Get the configured value of `block_at`, interpreted as the hash type of `Block`. + pub fn block_at(&self) -> sc_cli::Result + where + Block: BlockT, + ::Hash: FromStr, + <::Hash as FromStr>::Err: Debug, + { + self.block_at + .parse::<::Hash>() + .map_err(|e| format!("Could not parse block hash: {:?}", e).into()) + } } /// Various commands to try out against runtime state at a specific block. @@ -114,11 +154,10 @@ pub struct TryRuntimeCmd { /// The source of runtime state to try operations against. #[derive(Debug, Clone, structopt::StructOpt)] pub enum State { - /// Use a state snapshot as the source of runtime state. NOTE: for the offchain-worker command this - /// is only partially supported at the moment and you must have a relevant archive node exposed on - /// localhost:9944 in order to query the block header. - // TODO https://github.com/paritytech/substrate/issues/9027 + /// Use a state snapshot as the source of runtime state. NOTE: for the offchain-worker and + /// execute-block command this is only partially supported and requires a archive node url. Snap { + #[structopt(short, long)] snapshot_path: PathBuf, }, @@ -128,25 +167,16 @@ pub enum State { #[structopt(short, long)] snapshot_path: Option, - /// The block hash at which to connect. - /// Will be latest finalized head if not provided. - #[structopt(short, long, multiple = false, parse(try_from_str = parse::hash))] - block_at: Option, - /// The modules to scrape. If empty, entire chain state will be scraped. #[structopt(short, long, require_delimiter = true)] modules: Option>, - - /// The url to connect to. - #[structopt(default_value = "ws://localhost:9944", parse(try_from_str = parse::url))] - url: String, - }, + } } async fn on_runtime_upgrade( shared: SharedParams, command: OnRuntimeUpgradeCmd, - config: Configuration + config: Configuration, ) -> sc_cli::Result<()> where Block: BlockT, @@ -158,11 +188,7 @@ where { let wasm_method = shared.wasm_method; let execution = shared.execution; - let heap_pages = if shared.heap_pages.is_some() { - shared.heap_pages - } else { - config.default_heap_pages - }; + let heap_pages = shared.heap_pages.or(config.default_heap_pages); let mut changes = Default::default(); let max_runtime_instances = config.max_runtime_instances; @@ -180,22 +206,22 @@ where })) }, State::Live { - url, snapshot_path, - block_at, modules } => Builder::::new().mode(Mode::Online(OnlineConfig { - transport: url.to_owned().into(), + transport: shared.url.to_owned().into(), state_snapshot: snapshot_path.as_ref().map(SnapshotConfig::new), modules: modules.to_owned().unwrap_or_default(), - at: block_at.as_ref() - .map(|b| b.parse().map_err(|e| format!("Could not parse hash: {:?}", e))).transpose()?, + at: Some(shared.block_at::()?), ..Default::default() })), }; let (code_key, code) = extract_code(config.chain_spec)?; - builder.inject(&[(code_key, code)]).build().await? + builder + .inject_key_value(&[(code_key, code)]) + .inject_hashed_key(&[twox_128(b"System"), twox_128(b"LastRuntimeUpgrade")].concat()) + .build().await? }; let encoded_result = StateMachine::<_, _, NumberFor, _>::new( @@ -211,10 +237,10 @@ where sp_core::testing::TaskExecutor::new(), ) .execute(execution.into()) - .map_err(|e| format!("failed to execute 'TryRuntime_on_runtime_upgrade' due to {:?}", e))?; + .map_err(|e| format!("failed to execute 'TryRuntime_on_runtime_upgrade': {:?}", e))?; let (weight, total_weight) = <(u64, u64) as Decode>::decode(&mut &*encoded_result) - .map_err(|e| format!("failed to decode output due to {:?}", e))?; + .map_err(|e| format!("failed to decode output: {:?}", e))?; log::info!( "TryRuntime_on_runtime_upgrade executed without errors. Consumed weight = {}, total weight = {} ({})", weight, @@ -229,7 +255,7 @@ async fn offchain_worker( shared: SharedParams, command: OffchainWorkerCmd, config: Configuration, -)-> sc_cli::Result<()> +) -> sc_cli::Result<()> where Block: BlockT, Block::Hash: FromStr, @@ -241,11 +267,7 @@ where { let wasm_method = shared.wasm_method; let execution = shared.execution; - let heap_pages = if shared.heap_pages.is_some() { - shared.heap_pages - } else { - config.default_heap_pages - }; + let heap_pages = shared.heap_pages.or(config.default_heap_pages); let mut changes = Default::default(); let max_runtime_instances = config.max_runtime_instances; @@ -255,47 +277,43 @@ where max_runtime_instances, ); - let (mode, url) = match command.state { + let mode = match command.state { State::Live { - url, snapshot_path, - block_at, modules } => { + let at = shared.block_at::()?; let online_config = OnlineConfig { - transport: url.to_owned().into(), + transport: shared.url.to_owned().into(), state_snapshot: snapshot_path.as_ref().map(SnapshotConfig::new), modules: modules.to_owned().unwrap_or_default(), - at: block_at.as_ref() - .map(|b| b.parse().map_err(|e| format!("Could not parse hash: {:?}", e))).transpose()?, + at: Some(at), ..Default::default() }; - (Mode::Online(online_config), url) + Mode::Online(online_config) }, State::Snap { snapshot_path } => { - // TODO This is a temporary hack; the url is used just to get the header. We should try - // and get the header out of state, OR use an arbitrary header if thats ok, OR allow - // the user to feed in a header via file. - // https://github.com/paritytech/substrate/issues/9027 - // This assumes you have a node running on local host default - let url = "ws://127.0.0.1:9944".to_string(); let mode = Mode::Offline(OfflineConfig { state_snapshot: SnapshotConfig::new(snapshot_path), }); - (mode, url) + mode } }; - let builder = Builder::::new().mode(mode); - let mut ext = if command.overwrite_code { + let builder = Builder::::new() + .mode(mode) + .inject_hashed_key(&[twox_128(b"System"), twox_128(b"LastRuntimeUpgrade")].concat()); + let mut ext = if shared.overwrite_code { let (code_key, code) = extract_code(config.chain_spec)?; - builder.inject(&[(code_key, code)]).build().await? + builder.inject_key_value(&[(code_key, code)]).build().await? } else { - builder.build().await? + builder + .inject_hashed_key(well_known_keys::CODE) + .build() + .await? }; - // register externality extensions in order to provide host interface for OCW to the runtime. let (offchain, _offchain_state) = TestOffchainExt::new(); let (pool, _pool_state) = TestTransactionPoolExt::new(); ext.register_extension(OffchainDbExt::new(offchain.clone())); @@ -303,10 +321,8 @@ where ext.register_extension(KeystoreExt(Arc::new(KeyStore::new()))); ext.register_extension(TransactionPoolExt::new(pool)); - let header_hash: Block::Hash = command.header_at - .parse() - .map_err(|e| format!("Could not parse header hash: {:?}", e))?; - let header = rpc_api::get_header::(url, header_hash).await?; + let header_hash = shared.block_at::()?; + let header = rpc_api::get_header::(shared.url, header_hash).await?; let _ = StateMachine::<_, _, NumberFor, _>::new( &ext.backend, @@ -321,17 +337,120 @@ where sp_core::testing::TaskExecutor::new(), ) .execute(execution.into()) - .map_err(|e| format!("failed to execute 'OffchainWorkerApi_offchain_worker' due to {:?}", e))?; + .map_err(|e| format!("failed to execute 'OffchainWorkerApi_offchain_worker': {:?}", e))?; log::info!("OffchainWorkerApi_offchain_worker executed without errors."); Ok(()) } +async fn execute_block( + shared: SharedParams, + command: ExecuteBlockCmd, + config: Configuration, +) -> sc_cli::Result<()> +where + Block: BlockT + serde::de::DeserializeOwned, + Block::Hash: FromStr, + ::Err: Debug, + NumberFor: FromStr, + as FromStr>::Err: Debug, + ExecDispatch: NativeExecutionDispatch + 'static, +{ + let wasm_method = shared.wasm_method; + let execution = shared.execution; + let heap_pages = shared.heap_pages.or(config.default_heap_pages); + + let mut changes = Default::default(); + let max_runtime_instances = config.max_runtime_instances; + let executor = NativeExecutor::::new( + wasm_method.into(), + heap_pages, + max_runtime_instances, + ); + + let block_hash = shared.block_at::()?; + let block: Block = rpc_api::get_block::(shared.url.clone(), block_hash).await?; + + let mode = match command.state { + State::Snap { snapshot_path } => { + let mode = Mode::Offline(OfflineConfig { + state_snapshot: SnapshotConfig::new(snapshot_path), + }); + + mode + }, + State::Live { snapshot_path, modules } => { + let parent_hash = block.header().parent_hash(); + + let mode = Mode::Online(OnlineConfig { + transport: shared.url.to_owned().into(), + state_snapshot: snapshot_path.as_ref().map(SnapshotConfig::new), + modules: modules.to_owned().unwrap_or_default(), + at: Some(parent_hash.to_owned()), + ..Default::default() + }); + + mode + } + }; + + let ext = { + let builder = Builder::::new() + .mode(mode) + .inject_hashed_key(&[twox_128(b"System"), twox_128(b"LastRuntimeUpgrade")].concat()); + let mut ext = if shared.overwrite_code { + let (code_key, code) = extract_code(config.chain_spec)?; + builder.inject_key_value(&[(code_key, code)]).build().await? + } else { + builder + .inject_hashed_key(well_known_keys::CODE) + .build() + .await? + }; + + // register externality extensions in order to provide host interface for OCW to the + // runtime. + let (offchain, _offchain_state) = TestOffchainExt::new(); + let (pool, _pool_state) = TestTransactionPoolExt::new(); + ext.register_extension(OffchainDbExt::new(offchain.clone())); + ext.register_extension(OffchainWorkerExt::new(offchain)); + ext.register_extension(KeystoreExt(Arc::new(KeyStore::new()))); + ext.register_extension(TransactionPoolExt::new(pool)); + + ext + }; + + // A digest item gets added when the runtime is processing the block, so we need to pop + // the last one to be consistent with what a gossiped block would contain. + let (mut header, extrinsics) = block.deconstruct(); + header.digest_mut().pop(); + let block = Block::new(header, extrinsics); + + let _encoded_result = StateMachine::<_, _, NumberFor, _>::new( + &ext.backend, + None, + &mut changes, + &executor, + "Core_execute_block", + block.encode().as_ref(), + ext.extensions, + &sp_state_machine::backend::BackendRuntimeCode::new(&ext.backend).runtime_code()?, + sp_core::testing::TaskExecutor::new(), + ) + .execute(execution.into()) + .map_err(|e| format!("failed to execute 'Core_execute_block': {:?}", e))?; + debug_assert!(_encoded_result == vec![1]); + + log::info!("Core_execute_block executed without errors."); + + Ok(()) +} + impl TryRuntimeCmd { pub async fn run(&self, config: Configuration) -> sc_cli::Result<()> where - Block: BlockT, + Block: BlockT + serde::de::DeserializeOwned, Block::Header: serde::de::DeserializeOwned, Block::Hash: FromStr, ::Err: Debug, @@ -346,6 +465,9 @@ impl TryRuntimeCmd { Command::OffchainWorker(cmd) => { offchain_worker::(self.shared.clone(), cmd.clone(), config).await } + Command::ExecuteBlock(cmd) => { + execute_block::(self.shared.clone(), cmd.clone(), config).await + } } } } @@ -363,7 +485,7 @@ impl CliConfiguration for TryRuntimeCmd { } } -/// Extract `:code` from the given chain spec and return as `StorageData` along with the +/// Extract `:code` from the given chain spec and return as `StorageData` along with the /// corresponding `StorageKey`. fn extract_code(spec: Box) -> sc_cli::Result<(StorageKey, StorageData)> { let genesis_storage = spec.build_storage()?; From 63c4b497108cc52a101d9e0a31a3c723a27bc8d6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andr=C3=A9=20Silva?= <123550+andresilva@users.noreply.github.com> Date: Wed, 23 Jun 2021 06:01:11 +0100 Subject: [PATCH 29/67] consensus: remove unused offline tracker (#9178) --- primitives/consensus/common/src/lib.rs | 1 - .../consensus/common/src/offline_tracker.rs | 137 ------------------ 2 files changed, 138 deletions(-) delete mode 100644 primitives/consensus/common/src/offline_tracker.rs diff --git a/primitives/consensus/common/src/lib.rs b/primitives/consensus/common/src/lib.rs index 60e260a892829..51b2a96e17758 100644 --- a/primitives/consensus/common/src/lib.rs +++ b/primitives/consensus/common/src/lib.rs @@ -39,7 +39,6 @@ use futures::prelude::*; use sp_state_machine::StorageProof; pub mod block_validation; -pub mod offline_tracker; pub mod error; pub mod block_import; mod select_chain; diff --git a/primitives/consensus/common/src/offline_tracker.rs b/primitives/consensus/common/src/offline_tracker.rs deleted file mode 100644 index 8e33a2c449e35..0000000000000 --- a/primitives/consensus/common/src/offline_tracker.rs +++ /dev/null @@ -1,137 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Tracks offline validators. - -use std::collections::HashMap; -use std::time::Duration; -use wasm_timer::Instant; - -// time before we report a validator. -const REPORT_TIME: Duration = Duration::from_secs(60 * 5); - -struct Observed { - last_round_end: Instant, - offline_since: Instant, -} - -impl Observed { - fn new() -> Observed { - let now = Instant::now(); - Observed { - last_round_end: now, - offline_since: now, - } - } - - fn note_round_end(&mut self, was_online: bool) { - let now = Instant::now(); - - self.last_round_end = now; - if was_online { - self.offline_since = now; - } - } - - fn is_active(&self) -> bool { - // can happen if clocks are not monotonic - if self.offline_since > self.last_round_end { return true } - self.last_round_end.duration_since(self.offline_since) < REPORT_TIME - } -} - -/// Tracks offline validators and can issue a report for those offline. -pub struct OfflineTracker { - observed: HashMap, -} - -impl OfflineTracker { - /// Create a new tracker. - pub fn new() -> Self { - OfflineTracker { observed: HashMap::new() } - } - - /// Note new consensus is starting with the given set of validators. - pub fn note_new_block(&mut self, validators: &[AuthorityId]) { - use std::collections::HashSet; - - let set: HashSet<_> = validators.iter().cloned().collect(); - self.observed.retain(|k, _| set.contains(k)); - } - - /// Note that a round has ended. - pub fn note_round_end(&mut self, validator: AuthorityId, was_online: bool) { - self.observed.entry(validator) - .or_insert_with(Observed::new) - .note_round_end(was_online); - } - - /// Generate a vector of indices for offline account IDs. - pub fn reports(&self, validators: &[AuthorityId]) -> Vec { - validators.iter() - .enumerate() - .filter_map(|(i, v)| if self.is_online(v) { - None - } else { - Some(i as u32) - }) - .collect() - } - - /// Whether reports on a validator set are consistent with our view of things. - pub fn check_consistency(&self, validators: &[AuthorityId], reports: &[u32]) -> bool { - reports.iter().cloned().all(|r| { - let v = match validators.get(r as usize) { - Some(v) => v, - None => return false, - }; - - // we must think all validators reported externally are offline. - let thinks_online = self.is_online(v); - !thinks_online - }) - } - - fn is_online(&self, v: &AuthorityId) -> bool { - self.observed.get(v).map(Observed::is_active).unwrap_or(true) - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn validator_offline() { - let mut tracker = OfflineTracker::::new(); - let v1 = 1; - let v2 = 2; - let v3 = 3; - tracker.note_round_end(v1, true); - tracker.note_round_end(v2, true); - tracker.note_round_end(v3, true); - - let slash_time = REPORT_TIME + Duration::from_secs(5); - tracker.observed.get_mut(&v1).unwrap().offline_since -= slash_time; - tracker.observed.get_mut(&v2).unwrap().offline_since -= slash_time; - - assert_eq!(tracker.reports(&[v1, v2, v3]), vec![0, 1]); - - tracker.note_new_block(&[v1, v3]); - assert_eq!(tracker.reports(&[v1, v2, v3]), vec![0]); - } -} From 54813c8b781104e24327cfc2a508c7b5936e55a2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andr=C3=A9=20Silva?= <123550+andresilva@users.noreply.github.com> Date: Wed, 23 Jun 2021 08:50:48 +0100 Subject: [PATCH 30/67] grandpa: fix broken line breaks in logging (#9179) --- client/finality-grandpa/src/authorities.rs | 7 +++---- client/finality-grandpa/src/import.rs | 5 ++--- client/finality-grandpa/src/lib.rs | 10 +++++----- 3 files changed, 10 insertions(+), 12 deletions(-) diff --git a/client/finality-grandpa/src/authorities.rs b/client/finality-grandpa/src/authorities.rs index ececbf1d7c701..a04be72f9d31e 100644 --- a/client/finality-grandpa/src/authorities.rs +++ b/client/finality-grandpa/src/authorities.rs @@ -295,8 +295,7 @@ where debug!( target: "afg", - "Inserting potential standard set change signaled at block {:?} (delayed by {:?} - blocks).", + "Inserting potential standard set change signaled at block {:?} (delayed by {:?} blocks).", (&number, &hash), pending.delay, ); @@ -310,8 +309,8 @@ where debug!( target: "afg", - "There are now {} alternatives for the next pending standard change (roots), and a - total of {} pending standard changes (across all forks).", + "There are now {} alternatives for the next pending standard change (roots), and a \ + total of {} pending standard changes (across all forks).", self.pending_standard_changes.roots().count(), self.pending_standard_changes.iter().count(), ); diff --git a/client/finality-grandpa/src/import.rs b/client/finality-grandpa/src/import.rs index c287cc0b3b896..ebb26a28c3485 100644 --- a/client/finality-grandpa/src/import.rs +++ b/client/finality-grandpa/src/import.rs @@ -372,9 +372,8 @@ where self.inner.header(BlockId::Number(canon_number)) .map_err(|e| ConsensusError::ClientImport(e.to_string()))? .expect( - "the given block number is less or equal than the current best - finalized number; current best finalized number must exist in - chain; qed." + "the given block number is less or equal than the current best finalized number; \ + current best finalized number must exist in chain; qed." ) .hash(); diff --git a/client/finality-grandpa/src/lib.rs b/client/finality-grandpa/src/lib.rs index a133319fdbef4..6c3f0f6af37a8 100644 --- a/client/finality-grandpa/src/lib.rs +++ b/client/finality-grandpa/src/lib.rs @@ -778,7 +778,7 @@ where let authorities = serde_json::to_string(&authorities).expect( "authorities is always at least an empty vector; \ - elements are always of type string", + elements are always of type string", ); telemetry!( @@ -945,7 +945,7 @@ where .collect::>(); let authorities = serde_json::to_string(&authorities).expect( - "authorities is always at least an empty vector; elements are always of type string", + "authorities is always at least an empty vector; elements are always of type string; qed.", ); telemetry!( @@ -1037,9 +1037,9 @@ where let voters = Arc::new(VoterSet::new(new.authorities.into_iter()) .expect( "new authorities come from pending change; \ - pending change comes from `AuthoritySet`; \ - `AuthoritySet` validates authorities is non-empty and weights are non-zero; \ - qed." + pending change comes from `AuthoritySet`; \ + `AuthoritySet` validates authorities is non-empty and weights are non-zero; \ + qed." ) ); From e4d879c8347c6c1876c0095840d2c07a4fe791fc Mon Sep 17 00:00:00 2001 From: akashi6824 Date: Wed, 23 Jun 2021 15:04:40 +0700 Subject: [PATCH 31/67] Add PolkaFoundry, PolkaSmith SS58 address (#8623) * Add PolkaFoundry, PolkaSmith SS58 address * chang decimals to 18 * fix format * fix format --- primitives/core/src/crypto.rs | 4 ++++ ss58-registry.json | 18 ++++++++++++++++++ 2 files changed, 22 insertions(+) diff --git a/primitives/core/src/crypto.rs b/primitives/core/src/crypto.rs index 5be18422d0e12..9e3177f249a5e 100644 --- a/primitives/core/src/crypto.rs +++ b/primitives/core/src/crypto.rs @@ -584,6 +584,10 @@ ss58_address_format!( (77, "manta", "Manta Network, standard account (*25519).") CalamariAccount => (78, "calamari", "Manta Canary Network, standard account (*25519).") + PolkaSmith => + (98, "polkasmith", "PolkaSmith Canary Network, standard account (*25519).") + PolkaFoundry => + (99, "polkafoundry", "PolkaFoundry Network, standard account (*25519).") SocialAccount => (252, "social-network", "Social Network, standard account (*25519).") Moonbeam => diff --git a/ss58-registry.json b/ss58-registry.json index 9fec4b7be9f5a..133cb6506fb05 100644 --- a/ss58-registry.json +++ b/ss58-registry.json @@ -505,6 +505,24 @@ "standardAccount": "*25519", "website": "https://manta.network" }, + { + "prefix": 98, + "network": "polkasmith", + "displayName": "PolkaSmith Canary Network", + "symbols": ["PKS"], + "decimals": [18], + "standardAccount": "*25519", + "website": "https://polkafoundry.com" + }, + { + "prefix": 99, + "network": "polkafoundry", + "displayName": "PolkaFoundry Network", + "symbols": ["PKF"], + "decimals": [18], + "standardAccount": "*25519", + "website": "https://polkafoundry.com" + }, { "prefix": 252, "network": "social-network", From 96d7fe8b6d223a59a9668de28dbc40f8c43d5d83 Mon Sep 17 00:00:00 2001 From: Shawn Tabrizi Date: Wed, 23 Jun 2021 09:29:30 +0100 Subject: [PATCH 32/67] Remove Unused `AccountIndex` (#9149) * remove unused `AccountIndex` * Update lib.rs --- bin/node-template/runtime/src/lib.rs | 7 ------- 1 file changed, 7 deletions(-) diff --git a/bin/node-template/runtime/src/lib.rs b/bin/node-template/runtime/src/lib.rs index 2ff4272747ee5..b24d454877e0d 100644 --- a/bin/node-template/runtime/src/lib.rs +++ b/bin/node-template/runtime/src/lib.rs @@ -52,10 +52,6 @@ pub type Signature = MultiSignature; /// to the public key of our transaction signing scheme. pub type AccountId = <::Signer as IdentifyAccount>::AccountId; -/// The type for looking up accounts. We don't expect more than 4 billion of them, but you -/// never know... -pub type AccountIndex = u32; - /// Balance of an account. pub type Balance = u128; @@ -65,9 +61,6 @@ pub type Index = u32; /// A hash of some data used by the chain. pub type Hash = sp_core::H256; -/// Digest item type. -pub type DigestItem = generic::DigestItem; - /// Opaque types. These are used by the CLI to instantiate machinery that don't need to know /// the specifics of the runtime. They can then be made to be agnostic over specific formats /// of data like extrinsics, allowing for them to continue syncing the network through upgrades From 4d792cf9b95c7e3658ed52fb01f94ce64a4c6dbf Mon Sep 17 00:00:00 2001 From: Disconnect3d Date: Wed, 23 Jun 2021 11:31:35 +0200 Subject: [PATCH 33/67] node-template: remove redundant types from runtime (#9161) Removes `BlockId`, `SignedBlock` and `CheckedExtrinsic` as they are unused within the runtime currently and the `BlockId` was defined twice. Co-authored-by: Shawn Tabrizi --- bin/node-template/runtime/src/lib.rs | 6 ------ 1 file changed, 6 deletions(-) diff --git a/bin/node-template/runtime/src/lib.rs b/bin/node-template/runtime/src/lib.rs index b24d454877e0d..e89d7f28be220 100644 --- a/bin/node-template/runtime/src/lib.rs +++ b/bin/node-template/runtime/src/lib.rs @@ -298,10 +298,6 @@ pub type Address = sp_runtime::MultiAddress; pub type Header = generic::Header; /// Block type as expected by this runtime. pub type Block = generic::Block; -/// A Block signed with a Justification -pub type SignedBlock = generic::SignedBlock; -/// BlockId type as expected by this runtime. -pub type BlockId = generic::BlockId; /// The SignedExtension to the basic transaction logic. pub type SignedExtra = ( frame_system::CheckSpecVersion, @@ -314,8 +310,6 @@ pub type SignedExtra = ( ); /// Unchecked extrinsic type as expected by this runtime. pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; -/// Extrinsic type that has already been checked. -pub type CheckedExtrinsic = generic::CheckedExtrinsic; /// Executive: handles dispatch to the various modules. pub type Executive = frame_executive::Executive< Runtime, From ecf1b87939f36acb4b4a087739c107815d455e7e Mon Sep 17 00:00:00 2001 From: Guillaume Thiolliere Date: Wed, 23 Jun 2021 12:34:54 +0200 Subject: [PATCH 34/67] fix typo (#9184) --- frame/support/procedural/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/frame/support/procedural/src/lib.rs b/frame/support/procedural/src/lib.rs index 2768608cb6f5b..9ac648f5e795a 100644 --- a/frame/support/procedural/src/lib.rs +++ b/frame/support/procedural/src/lib.rs @@ -384,7 +384,7 @@ pub fn derive_clone_no_bound(input: TokenStream) -> TokenStream { clone_no_bound::derive_clone_no_bound(input) } -/// Derive [`Debug`] but do not bound any generics. Docs are at `frame_support::DeriveNoBounds`. +/// Derive [`Debug`] but do not bound any generics. Docs are at `frame_support::DebugNoBound`. #[proc_macro_derive(DebugNoBound)] pub fn derive_debug_no_bound(input: TokenStream) -> TokenStream { debug_no_bound::derive_debug_no_bound(input) From e6cd6b1240ce741f06b05ad1c59f5399e52e717a Mon Sep 17 00:00:00 2001 From: Julien Date: Wed, 23 Jun 2021 12:44:11 +0200 Subject: [PATCH 35/67] Fixed typo in comment (#9182) --- frame/uniques/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/frame/uniques/src/lib.rs b/frame/uniques/src/lib.rs index b98a038ecff36..70a9e58d7bfa7 100644 --- a/frame/uniques/src/lib.rs +++ b/frame/uniques/src/lib.rs @@ -389,7 +389,7 @@ pub mod pallet { /// /// Weight: `O(n + m)` where: /// - `n = witness.instances` - /// - `m = witness.instance_metdadatas` + /// - `m = witness.instance_metadatas` /// - `a = witness.attributes` #[pallet::weight(T::WeightInfo::destroy( witness.instances, From 7dc25343c650a4f7f15ad4e948c9094635aa6b88 Mon Sep 17 00:00:00 2001 From: Squirrel Date: Wed, 23 Jun 2021 13:33:48 +0100 Subject: [PATCH 36/67] Less slices (#9176) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Less slices Co-authored-by: Bastian Köcher --- client/executor/common/src/sandbox.rs | 8 +++---- client/executor/src/wasm_runtime.rs | 20 ++++++++--------- client/executor/wasmi/src/lib.rs | 4 ++-- primitives/io/src/lib.rs | 5 +---- primitives/runtime/src/generic/mod.rs | 2 +- primitives/state-machine/src/backend.rs | 2 +- .../state-machine/src/changes_trie/build.rs | 22 +++++++++---------- .../state-machine/src/changes_trie/prune.rs | 6 ++--- primitives/state-machine/src/ext.rs | 4 ++-- primitives/trie/src/lib.rs | 8 +++---- primitives/trie/src/node_codec.rs | 6 ++--- 11 files changed, 41 insertions(+), 46 deletions(-) diff --git a/client/executor/common/src/sandbox.rs b/client/executor/common/src/sandbox.rs index 8ed294bb83983..b7838aab7f348 100644 --- a/client/executor/common/src/sandbox.rs +++ b/client/executor/common/src/sandbox.rs @@ -187,10 +187,10 @@ fn trap(msg: &'static str) -> Trap { TrapKind::Host(Box::new(Error::Other(msg.into()))).into() } -fn deserialize_result(serialized_result: &[u8]) -> std::result::Result, Trap> { +fn deserialize_result(mut serialized_result: &[u8]) -> std::result::Result, Trap> { use self::sandbox_primitives::HostError; use sp_wasm_interface::ReturnValue; - let result_val = std::result::Result::::decode(&mut &serialized_result[..]) + let result_val = std::result::Result::::decode(&mut serialized_result) .map_err(|_| trap("Decoding Result failed!"))?; match result_val { @@ -379,10 +379,10 @@ pub enum InstantiationError { } fn decode_environment_definition( - raw_env_def: &[u8], + mut raw_env_def: &[u8], memories: &[Option], ) -> std::result::Result<(Imports, GuestToSupervisorFunctionMapping), InstantiationError> { - let env_def = sandbox_primitives::EnvironmentDefinition::decode(&mut &raw_env_def[..]) + let env_def = sandbox_primitives::EnvironmentDefinition::decode(&mut raw_env_def) .map_err(|_| InstantiationError::EnvironmentDefinitionCorrupted)?; let mut func_map = HashMap::new(); diff --git a/client/executor/src/wasm_runtime.rs b/client/executor/src/wasm_runtime.rs index d721f36e8a991..d01132da180a4 100644 --- a/client/executor/src/wasm_runtime.rs +++ b/client/executor/src/wasm_runtime.rs @@ -177,7 +177,7 @@ impl RuntimeCache { /// Prepares a WASM module instance and executes given function for it. /// - /// This uses internal cache to find avaiable instance or create a new one. + /// This uses internal cache to find available instance or create a new one. /// # Parameters /// /// `code` - Provides external code or tells the executor to fetch it from storage. @@ -196,7 +196,7 @@ impl RuntimeCache { /// /// `f` - Function to execute. /// - /// # Returns result of `f` wrapped in an additonal result. + /// # Returns result of `f` wrapped in an additional result. /// In case of failure one of two errors can be returned: /// /// `Err::InvalidCode` is returned for runtime code issues. @@ -337,7 +337,7 @@ pub fn create_wasm_runtime_with_code( } } -fn decode_version(version: &[u8]) -> Result { +fn decode_version(mut version: &[u8]) -> Result { let v: RuntimeVersion = sp_api::OldRuntimeVersion::decode(&mut &version[..]) .map_err(|_| WasmError::Instantiation( @@ -347,7 +347,7 @@ fn decode_version(version: &[u8]) -> Result { let core_api_id = sp_core::hashing::blake2_64(b"Core"); if v.has_api_with(&core_api_id, |v| v >= 3) { - sp_api::RuntimeVersion::decode(&mut &version[..]) + sp_api::RuntimeVersion::decode(&mut version) .map_err(|_| WasmError::Instantiation("failed to decode \"Core_version\" result".into()) ) @@ -367,9 +367,7 @@ fn decode_runtime_apis(apis: &[u8]) -> Result, WasmError> { <[u8; RUNTIME_API_INFO_SIZE]>::try_from(chunk) .map(sp_api::deserialize_runtime_api_info) .map_err(|_| { - WasmError::Other(format!( - "a clipped runtime api info declaration" - )) + WasmError::Other("a clipped runtime api info declaration".to_owned()) }) }) .collect::, WasmError>>() @@ -383,15 +381,15 @@ fn decode_runtime_apis(apis: &[u8]) -> Result, WasmError> { pub fn read_embedded_version( blob: &RuntimeBlob, ) -> Result, WasmError> { - if let Some(version_section) = blob.custom_section_contents("runtime_version") { + if let Some(mut version_section) = blob.custom_section_contents("runtime_version") { // We do not use `decode_version` here because the runtime_version section is not supposed // to ever contain a legacy version. Apart from that `decode_version` relies on presence // of a special API in the `apis` field to treat the input as a non-legacy version. However // the structure found in the `runtime_version` always contain an empty `apis` field. Therefore - // the version read will be mistakingly treated as an legacy one. - let mut decoded_version = sp_api::RuntimeVersion::decode(&mut &version_section[..]) + // the version read will be mistakenly treated as an legacy one. + let mut decoded_version = sp_api::RuntimeVersion::decode(&mut version_section) .map_err(|_| - WasmError::Instantiation("failed to decode verison section".into()) + WasmError::Instantiation("failed to decode version section".into()) )?; // Don't stop on this and check if there is a special section that encodes all runtime APIs. diff --git a/client/executor/wasmi/src/lib.rs b/client/executor/wasmi/src/lib.rs index 953c5e5178a61..d4c9f4dc2e806 100644 --- a/client/executor/wasmi/src/lib.rs +++ b/client/executor/wasmi/src/lib.rs @@ -185,7 +185,7 @@ impl<'a> Sandbox for FunctionExecutor<'a> { &mut self, instance_id: u32, export_name: &str, - args: &[u8], + mut args: &[u8], return_val: Pointer, return_val_len: WordSize, state: u32, @@ -193,7 +193,7 @@ impl<'a> Sandbox for FunctionExecutor<'a> { trace!(target: "sp-sandbox", "invoke, instance_idx={}", instance_id); // Deserialize arguments and convert them into wasmi types. - let args = Vec::::decode(&mut &args[..]) + let args = Vec::::decode(&mut args) .map_err(|_| "Can't decode serialized arguments for the invocation")? .into_iter() .map(Into::into) diff --git a/primitives/io/src/lib.rs b/primitives/io/src/lib.rs index 12cbf09e86507..6fb25df3d02a5 100644 --- a/primitives/io/src/lib.rs +++ b/primitives/io/src/lib.rs @@ -29,9 +29,6 @@ use sp_std::vec::Vec; -#[cfg(feature = "std")] -use sp_std::ops::Deref; - #[cfg(feature = "std")] use tracing; @@ -990,7 +987,7 @@ pub trait Offchain { .local_storage_compare_and_set( kind, key, - old_value.as_ref().map(|v| v.deref()), + old_value.as_deref(), new_value, ) } diff --git a/primitives/runtime/src/generic/mod.rs b/primitives/runtime/src/generic/mod.rs index f5087eccab080..c4b28a06c901f 100644 --- a/primitives/runtime/src/generic/mod.rs +++ b/primitives/runtime/src/generic/mod.rs @@ -44,7 +44,7 @@ fn encode_with_vec_prefix)>(encoder: F) -> Vec let size = ::sp_std::mem::size_of::(); let reserve = match size { 0..=0b00111111 => 1, - 0..=0b00111111_11111111 => 2, + 0b01000000..=0b00111111_11111111 => 2, _ => 4, }; let mut v = Vec::with_capacity(reserve + size); diff --git a/primitives/state-machine/src/backend.rs b/primitives/state-machine/src/backend.rs index 9b99537130364..5b1f901a3ea96 100644 --- a/primitives/state-machine/src/backend.rs +++ b/primitives/state-machine/src/backend.rs @@ -207,7 +207,7 @@ pub trait Backend: sp_std::fmt::Debug { } } let (root, parent_txs) = self.storage_root(delta - .map(|(k, v)| (&k[..], v.as_ref().map(|v| &v[..]))) + .map(|(k, v)| (k, v.as_ref().map(|v| &v[..]))) .chain( child_roots .iter() diff --git a/primitives/state-machine/src/changes_trie/build.rs b/primitives/state-machine/src/changes_trie/build.rs index 1e0fc5c4d6c82..38d1ab714e7f8 100644 --- a/primitives/state-machine/src/changes_trie/build.rs +++ b/primitives/state-machine/src/changes_trie/build.rs @@ -279,22 +279,22 @@ fn prepare_digest_input<'a, H, Number>( trie_root, ); - trie_storage.for_key_values_with_prefix(&child_prefix, |key, value| - if let Ok(InputKey::ChildIndex::(trie_key)) = Decode::decode(&mut &key[..]) { - if let Ok(value) = >::decode(&mut &value[..]) { + trie_storage.for_key_values_with_prefix(&child_prefix, |mut key, mut value| + if let Ok(InputKey::ChildIndex::(trie_key)) = Decode::decode(&mut key) { + if let Ok(value) = >::decode(&mut value) { let mut trie_root = ::Out::default(); trie_root.as_mut().copy_from_slice(&value[..]); children_roots.insert(trie_key.storage_key, trie_root); } }); - trie_storage.for_keys_with_prefix(&extrinsic_prefix, |key| - if let Ok(InputKey::ExtrinsicIndex::(trie_key)) = Decode::decode(&mut &key[..]) { + trie_storage.for_keys_with_prefix(&extrinsic_prefix, |mut key| + if let Ok(InputKey::ExtrinsicIndex::(trie_key)) = Decode::decode(&mut key) { insert_to_map(&mut map, trie_key.key); }); - trie_storage.for_keys_with_prefix(&digest_prefix, |key| - if let Ok(InputKey::DigestIndex::(trie_key)) = Decode::decode(&mut &key[..]) { + trie_storage.for_keys_with_prefix(&digest_prefix, |mut key| + if let Ok(InputKey::DigestIndex::(trie_key)) = Decode::decode(&mut key) { insert_to_map(&mut map, trie_key.key); }); } @@ -310,13 +310,13 @@ fn prepare_digest_input<'a, H, Number>( crate::changes_trie::TrieBackendStorageAdapter(storage), trie_root, ); - trie_storage.for_keys_with_prefix(&extrinsic_prefix, |key| - if let Ok(InputKey::ExtrinsicIndex::(trie_key)) = Decode::decode(&mut &key[..]) { + trie_storage.for_keys_with_prefix(&extrinsic_prefix, |mut key| + if let Ok(InputKey::ExtrinsicIndex::(trie_key)) = Decode::decode(&mut key) { insert_to_map(&mut map, trie_key.key); }); - trie_storage.for_keys_with_prefix(&digest_prefix, |key| - if let Ok(InputKey::DigestIndex::(trie_key)) = Decode::decode(&mut &key[..]) { + trie_storage.for_keys_with_prefix(&digest_prefix, |mut key| + if let Ok(InputKey::DigestIndex::(trie_key)) = Decode::decode(&mut key) { insert_to_map(&mut map, trie_key.key); }); } diff --git a/primitives/state-machine/src/changes_trie/prune.rs b/primitives/state-machine/src/changes_trie/prune.rs index a741b814a5c70..754e3893f966f 100644 --- a/primitives/state-machine/src/changes_trie/prune.rs +++ b/primitives/state-machine/src/changes_trie/prune.rs @@ -66,9 +66,9 @@ pub fn prune( ); let child_prefix = ChildIndex::key_neutral_prefix(block.clone()); let mut children_roots = Vec::new(); - trie_storage.for_key_values_with_prefix(&child_prefix, |key, value| { - if let Ok(InputKey::ChildIndex::(_trie_key)) = Decode::decode(&mut &key[..]) { - if let Ok(value) = >::decode(&mut &value[..]) { + trie_storage.for_key_values_with_prefix(&child_prefix, |mut key, mut value| { + if let Ok(InputKey::ChildIndex::(_trie_key)) = Decode::decode(&mut key) { + if let Ok(value) = >::decode(&mut value) { let mut trie_root = ::Out::default(); trie_root.as_mut().copy_from_slice(&value[..]); children_roots.push(trie_root); diff --git a/primitives/state-machine/src/ext.rs b/primitives/state-machine/src/ext.rs index e66664647d9d8..d1d636cb65619 100644 --- a/primitives/state-machine/src/ext.rs +++ b/primitives/state-machine/src/ext.rs @@ -637,7 +637,7 @@ where } #[cfg(feature = "std")] - fn storage_changes_root(&mut self, parent_hash: &[u8]) -> Result>, ()> { + fn storage_changes_root(&mut self, mut parent_hash: &[u8]) -> Result>, ()> { let _guard = guard(); if let Some(ref root) = self.storage_transaction_cache.changes_trie_transaction_storage_root { trace!( @@ -653,7 +653,7 @@ where let root = self.overlay.changes_trie_root( self.backend, self.changes_trie_state.as_ref(), - Decode::decode(&mut &parent_hash[..]).map_err(|e| + Decode::decode(&mut parent_hash).map_err(|e| trace!( target: "state", "Failed to decode changes root parent hash: {}", diff --git a/primitives/trie/src/lib.rs b/primitives/trie/src/lib.rs index f815d2af44ad7..4cfe3623812c1 100644 --- a/primitives/trie/src/lib.rs +++ b/primitives/trie/src/lib.rs @@ -211,7 +211,7 @@ pub fn read_trie_value, key: &[u8] ) -> Result>, Box>> { - Ok(TrieDB::::new(&*db, root)?.get(key).map(|x| x.map(|val| val.to_vec()))?) + TrieDB::::new(&*db, root)?.get(key).map(|x| x.map(|val| val.to_vec())) } /// Read a value from the trie with given Query. @@ -225,7 +225,7 @@ pub fn read_trie_value_with< key: &[u8], query: Q ) -> Result>, Box>> { - Ok(TrieDB::::new(&*db, root)?.get_with(key, query).map(|x| x.map(|val| val.to_vec()))?) + TrieDB::::new(&*db, root)?.get_with(key, query).map(|x| x.map(|val| val.to_vec())) } /// Determine the empty trie root. @@ -317,7 +317,7 @@ pub fn read_child_trie_value( root.as_mut().copy_from_slice(root_slice); let db = KeySpacedDB::new(&*db, keyspace); - Ok(TrieDB::::new(&db, &root)?.get(key).map(|x| x.map(|val| val.to_vec()))?) + TrieDB::::new(&db, &root)?.get(key).map(|x| x.map(|val| val.to_vec())) } /// Read a value from the child trie with given query. @@ -336,7 +336,7 @@ pub fn read_child_trie_value_with::new(&db, &root)?.get_with(key, query).map(|x| x.map(|val| val.to_vec()))?) + TrieDB::::new(&db, &root)?.get_with(key, query).map(|x| x.map(|val| val.to_vec())) } /// `HashDB` implementation that append a encoded prefix (unique id bytes) in addition to the diff --git a/primitives/trie/src/node_codec.rs b/primitives/trie/src/node_codec.rs index 0c923ff024c55..296f03972c795 100644 --- a/primitives/trie/src/node_codec.rs +++ b/primitives/trie/src/node_codec.rs @@ -259,7 +259,7 @@ fn partial_encode(partial: Partial, node_kind: NodeKind) -> Vec { if number_nibble_encoded > 0 { output.push(nibble_ops::pad_right((partial.0).1)); } - output.extend_from_slice(&partial.1[..]); + output.extend_from_slice(partial.1); output } @@ -272,8 +272,8 @@ const BITMAP_LENGTH: usize = 2; pub(crate) struct Bitmap(u16); impl Bitmap { - pub fn decode(data: &[u8]) -> Result { - Ok(Bitmap(u16::decode(&mut &data[..])?)) + pub fn decode(mut data: &[u8]) -> Result { + Ok(Bitmap(u16::decode(&mut data)?)) } pub fn value_at(&self, i: usize) -> bool { From 01ff4cef6626448998a3bcbc5be401dc15a394cf Mon Sep 17 00:00:00 2001 From: Squirrel Date: Wed, 23 Jun 2021 13:41:46 +0100 Subject: [PATCH 37/67] Result> rather than Option> (#9119) * Clearer API to code against. --- .../src/unsigned.rs | 40 +++++----- frame/example-offchain-worker/src/lib.rs | 16 ++-- frame/im-online/src/lib.rs | 12 ++- frame/session/src/historical/offchain.rs | 22 ++--- primitives/runtime/src/offchain/storage.rs | 80 +++++++++++++------ .../runtime/src/offchain/storage_lock.rs | 35 ++++---- primitives/trie/Cargo.toml | 2 +- 7 files changed, 121 insertions(+), 86 deletions(-) diff --git a/frame/election-provider-multi-phase/src/unsigned.rs b/frame/election-provider-multi-phase/src/unsigned.rs index 78726c542078c..543883fc035c5 100644 --- a/frame/election-provider-multi-phase/src/unsigned.rs +++ b/frame/election-provider-multi-phase/src/unsigned.rs @@ -29,7 +29,10 @@ use sp_npos_elections::{ CompactSolution, ElectionResult, assignment_ratio_to_staked_normalized, assignment_staked_to_ratio_normalized, is_score_better, seq_phragmen, }; -use sp_runtime::{offchain::storage::StorageValueRef, traits::TrailingZeroInput, SaturatedConversion}; +use sp_runtime::{ + offchain::storage::{MutateStorageError, StorageValueRef}, + traits::TrailingZeroInput, SaturatedConversion +}; use sp_std::{cmp::Ordering, convert::TryFrom, vec::Vec}; /// Storage key used to store the last block number at which offchain worker ran. @@ -98,9 +101,9 @@ fn save_solution(call: &Call) -> Result<(), MinerError> { log!(debug, "saving a call to the offchain storage."); let storage = StorageValueRef::persistent(&OFFCHAIN_CACHED_CALL); match storage.mutate::<_, (), _>(|_| Ok(call.clone())) { - Ok(Ok(_)) => Ok(()), - Ok(Err(_)) => Err(MinerError::FailedToStoreSolution), - Err(_) => { + Ok(_) => Ok(()), + Err(MutateStorageError::ConcurrentModification(_)) => Err(MinerError::FailedToStoreSolution), + Err(MutateStorageError::ValueFunctionFailed(_)) => { // this branch should be unreachable according to the definition of // `StorageValueRef::mutate`: that function should only ever `Err` if the closure we // pass it returns an error. however, for safety in case the definition changes, we do @@ -114,6 +117,7 @@ fn save_solution(call: &Call) -> Result<(), MinerError> { fn restore_solution() -> Result, MinerError> { StorageValueRef::persistent(&OFFCHAIN_CACHED_CALL) .get() + .ok() .flatten() .ok_or(MinerError::NoStoredSolution) } @@ -135,12 +139,9 @@ fn clear_offchain_repeat_frequency() { } /// `true` when OCW storage contains a solution -/// -/// More precise than `restore_solution::().is_ok()`; that invocation will return `false` -/// if a solution exists but cannot be decoded, whereas this just checks whether an item is present. #[cfg(test)] fn ocw_solution_exists() -> bool { - StorageValueRef::persistent(&OFFCHAIN_CACHED_CALL).get::>().is_some() + matches!(StorageValueRef::persistent(&OFFCHAIN_CACHED_CALL).get::>(), Ok(Some(_))) } impl Pallet { @@ -584,13 +585,13 @@ impl Pallet { let last_block = StorageValueRef::persistent(&OFFCHAIN_LAST_BLOCK); let mutate_stat = last_block.mutate::<_, &'static str, _>( - |maybe_head: Option>| { + |maybe_head: Result, _>| { match maybe_head { - Some(Some(head)) if now < head => Err("fork."), - Some(Some(head)) if now >= head && now <= head + threshold => { + Ok(Some(head)) if now < head => Err("fork."), + Ok(Some(head)) if now >= head && now <= head + threshold => { Err("recently executed.") } - Some(Some(head)) if now > head + threshold => { + Ok(Some(head)) if now > head + threshold => { // we can run again now. Write the new head. Ok(now) } @@ -604,11 +605,12 @@ impl Pallet { match mutate_stat { // all good - Ok(Ok(_)) => Ok(()), + Ok(_) => Ok(()), // failed to write. - Ok(Err(_)) => Err(MinerError::Lock("failed to write to offchain db.")), + Err(MutateStorageError::ConcurrentModification(_)) => + Err(MinerError::Lock("failed to write to offchain db (concurrent modification).")), // fork etc. - Err(why) => Err(MinerError::Lock(why)), + Err(MutateStorageError::ValueFunctionFailed(why)) => Err(MinerError::Lock(why)), } } @@ -1117,15 +1119,15 @@ mod tests { assert!(MultiPhase::current_phase().is_unsigned()); // initially, the lock is not set. - assert!(guard.get::().is_none()); + assert!(guard.get::().unwrap().is_none()); // a successful a-z execution. MultiPhase::offchain_worker(25); assert_eq!(pool.read().transactions.len(), 1); // afterwards, the lock is not set either.. - assert!(guard.get::().is_none()); - assert_eq!(last_block.get::().unwrap().unwrap(), 25); + assert!(guard.get::().unwrap().is_none()); + assert_eq!(last_block.get::().unwrap(), Some(25)); }); } @@ -1280,7 +1282,7 @@ mod tests { // this ensures that when the resubmit window rolls around, we're ready to regenerate // from scratch if necessary let mut call_cache = StorageValueRef::persistent(&OFFCHAIN_CACHED_CALL); - assert!(matches!(call_cache.get::>(), Some(Some(_call)))); + assert!(matches!(call_cache.get::>(), Ok(Some(_call)))); call_cache.clear(); // attempts to resubmit the tx after the threshold has expired diff --git a/frame/example-offchain-worker/src/lib.rs b/frame/example-offchain-worker/src/lib.rs index 1ec2591f5ec6b..b7a766ad847b2 100644 --- a/frame/example-offchain-worker/src/lib.rs +++ b/frame/example-offchain-worker/src/lib.rs @@ -53,7 +53,7 @@ use frame_support::traits::Get; use sp_core::crypto::KeyTypeId; use sp_runtime::{ RuntimeDebug, - offchain::{http, Duration, storage::StorageValueRef}, + offchain::{http, Duration, storage::{MutateStorageError, StorageRetrievalError, StorageValueRef}}, traits::Zero, transaction_validity::{InvalidTransaction, ValidTransaction, TransactionValidity}, }; @@ -366,15 +366,11 @@ impl Pallet { // low-level method of local storage API, which means that only one worker // will be able to "acquire a lock" and send a transaction if multiple workers // happen to be executed concurrently. - let res = val.mutate(|last_send: Option>| { - // We match on the value decoded from the storage. The first `Option` - // indicates if the value was present in the storage at all, - // the second (inner) `Option` indicates if the value was succesfuly - // decoded to expected type (`T::BlockNumber` in our case). + let res = val.mutate(|last_send: Result, StorageRetrievalError>| { match last_send { // If we already have a value in storage and the block number is recent enough // we avoid sending another transaction at this time. - Some(Some(block)) if block_number < block + T::GracePeriod::get() => { + Ok(Some(block)) if block_number < block + T::GracePeriod::get() => { Err(RECENTLY_SENT) }, // In every other case we attempt to acquire the lock and send a transaction. @@ -390,7 +386,7 @@ impl Pallet { // written to in the meantime. match res { // The value has been set correctly, which means we can safely send a transaction now. - Ok(Ok(block_number)) => { + Ok(block_number) => { // Depending if the block is even or odd we will send a `Signed` or `Unsigned` // transaction. // Note that this logic doesn't really guarantee that the transactions will be sent @@ -406,13 +402,13 @@ impl Pallet { else { TransactionType::Raw } }, // We are in the grace period, we should not send a transaction this time. - Err(RECENTLY_SENT) => TransactionType::None, + Err(MutateStorageError::ValueFunctionFailed(RECENTLY_SENT)) => TransactionType::None, // We wanted to send a transaction, but failed to write the block number (acquire a // lock). This indicates that another offchain worker that was running concurrently // most likely executed the same logic and succeeded at writing to storage. // Thus we don't really want to send the transaction, knowing that the other run // already did. - Ok(Err(_)) => TransactionType::None, + Err(MutateStorageError::ConcurrentModification(_)) => TransactionType::None, } } diff --git a/frame/im-online/src/lib.rs b/frame/im-online/src/lib.rs index 318e3d2de3ad2..3df5df7bb4d74 100644 --- a/frame/im-online/src/lib.rs +++ b/frame/im-online/src/lib.rs @@ -80,7 +80,7 @@ use sp_core::offchain::OpaqueNetworkState; use sp_std::prelude::*; use sp_std::convert::TryInto; use sp_runtime::{ - offchain::storage::StorageValueRef, + offchain::storage::{MutateStorageError, StorageRetrievalError, StorageValueRef}, traits::{AtLeast32BitUnsigned, Convert, Saturating, TrailingZeroInput}, Perbill, Permill, PerThing, RuntimeDebug, SaturatedConversion, }; @@ -719,14 +719,15 @@ impl Pallet { key }; let storage = StorageValueRef::persistent(&key); - let res = storage.mutate(|status: Option>>| { + let res = storage.mutate( + |status: Result>, StorageRetrievalError>| { // Check if there is already a lock for that particular block. // This means that the heartbeat has already been sent, and we are just waiting // for it to be included. However if it doesn't get included for INCLUDE_THRESHOLD // we will re-send it. match status { // we are still waiting for inclusion. - Some(Some(status)) if status.is_recent(session_index, now) => { + Ok(Some(status)) if status.is_recent(session_index, now) => { Err(OffchainErr::WaitingForInclusion(status.sent_at)) }, // attempt to set new status @@ -735,7 +736,10 @@ impl Pallet { sent_at: now, }), } - })?; + }); + if let Err(MutateStorageError::ValueFunctionFailed(err)) = res { + return Err(err); + } let mut new_status = res.map_err(|_| OffchainErr::FailedToAcquireLock)?; diff --git a/frame/session/src/historical/offchain.rs b/frame/session/src/historical/offchain.rs index f675d878c1e28..68cc78029f12c 100644 --- a/frame/session/src/historical/offchain.rs +++ b/frame/session/src/historical/offchain.rs @@ -25,7 +25,10 @@ //! This is used in conjunction with [`ProvingTrie`](super::ProvingTrie) and //! the off-chain indexing API. -use sp_runtime::{offchain::storage::StorageValueRef, KeyTypeId}; +use sp_runtime::{ + offchain::storage::{MutateStorageError, StorageRetrievalError, StorageValueRef}, + KeyTypeId +}; use sp_session::MembershipProof; use super::super::{Pallet as SessionModule, SessionIndex}; @@ -49,6 +52,7 @@ impl ValidatorSet { let derived_key = shared::derive_key(shared::PREFIX, session_index); StorageValueRef::persistent(derived_key.as_ref()) .get::>() + .ok() .flatten() .map(|validator_set| Self { validator_set }) } @@ -100,19 +104,19 @@ pub fn prove_session_membership>( pub fn prune_older_than(first_to_keep: SessionIndex) { let derived_key = shared::LAST_PRUNE.to_vec(); let entry = StorageValueRef::persistent(derived_key.as_ref()); - match entry.mutate(|current: Option>| -> Result<_, ()> { + match entry.mutate(|current: Result, StorageRetrievalError>| -> Result<_, ()> { match current { - Some(Some(current)) if current < first_to_keep => Ok(first_to_keep), + Ok(Some(current)) if current < first_to_keep => Ok(first_to_keep), // do not move the cursor, if the new one would be behind ours - Some(Some(current)) => Ok(current), - None => Ok(first_to_keep), + Ok(Some(current)) => Ok(current), + Ok(None) => Ok(first_to_keep), // if the storage contains undecodable data, overwrite with current anyways // which might leak some entries being never purged, but that is acceptable // in this context - Some(None) => Ok(first_to_keep), + Err(_) => Ok(first_to_keep), } }) { - Ok(Ok(new_value)) => { + Ok(new_value) => { // on a re-org this is not necessarily true, with the above they might be equal if new_value < first_to_keep { for session_index in new_value..first_to_keep { @@ -121,8 +125,8 @@ pub fn prune_older_than(first_to_keep: SessionIndex) { } } } - Ok(Err(_)) => {} // failed to store the value calculated with the given closure - Err(_) => {} // failed to calculate the value to store with the given closure + Err(MutateStorageError::ConcurrentModification(_)) => {} + Err(MutateStorageError::ValueFunctionFailed(_)) => {} } } diff --git a/primitives/runtime/src/offchain/storage.rs b/primitives/runtime/src/offchain/storage.rs index 794ae4255a330..c6ed10c5be26f 100644 --- a/primitives/runtime/src/offchain/storage.rs +++ b/primitives/runtime/src/offchain/storage.rs @@ -28,6 +28,25 @@ pub struct StorageValueRef<'a> { kind: StorageKind, } +/// Reason for not being able to provide the stored value +#[derive(Debug, PartialEq, Eq)] +pub enum StorageRetrievalError { + /// Value found but undecodable + Undecodable, +} + +/// Possible errors when mutating a storage value. +#[derive(Debug, PartialEq, Eq)] +pub enum MutateStorageError { + /// The underlying db failed to update due to a concurrent modification. + /// Contains the new value that was not stored. + ConcurrentModification(T), + /// The function given to us to create the value to be stored failed. + /// May be used to signal that having looked at the existing value, + /// they don't want to mutate it. + ValueFunctionFailed(E) +} + impl<'a> StorageValueRef<'a> { /// Create a new reference to a value in the persistent local storage. pub fn persistent(key: &'a [u8]) -> Self { @@ -58,30 +77,40 @@ impl<'a> StorageValueRef<'a> { /// Retrieve & decode the value from storage. /// /// Note that if you want to do some checks based on the value - /// and write changes after that you should rather be using `mutate`. + /// and write changes after that, you should rather be using `mutate`. /// - /// The function returns `None` if the value was not found in storage, - /// otherwise a decoding of the value to requested type. - pub fn get(&self) -> Option> { + /// Returns the value if stored. + /// Returns an error if the value could not be decoded. + pub fn get(&self) -> Result, StorageRetrievalError> { sp_io::offchain::local_storage_get(self.kind, self.key) - .map(|val| T::decode(&mut &*val).ok()) + .map(|val| T::decode(&mut &*val) + .map_err(|_| StorageRetrievalError::Undecodable)) + .transpose() } - /// Retrieve & decode the value and set it to a new one atomically. + /// Retrieve & decode the current value and set it to a new value atomically. + /// + /// Function `mutate_val` takes as input the current value and should + /// return a new value that is attempted to be written to storage. /// - /// Function `f` should return a new value that we should attempt to write to storage. /// This function returns: - /// 1. `Ok(Ok(T))` in case the value has been successfully set. - /// 2. `Ok(Err(T))` in case the value was calculated by the passed closure `f`, - /// but it could not be stored. - /// 3. `Err(_)` in case `f` returns an error. - pub fn mutate(&self, f: F) -> Result, E> where + /// 1. `Ok(T)` in case the value has been successfully set. + /// 2. `Err(MutateStorageError::ConcurrentModification(T))` in case the value was calculated + /// by the passed closure `mutate_val`, but it could not be stored. + /// 3. `Err(MutateStorageError::ValueFunctionFailed(_))` in case `mutate_val` returns an error. + pub fn mutate(&self, mutate_val: F) -> Result> where T: codec::Codec, - F: FnOnce(Option>) -> Result + F: FnOnce(Result, StorageRetrievalError>) -> Result { let value = sp_io::offchain::local_storage_get(self.kind, self.key); - let decoded = value.as_deref().map(|mut v| T::decode(&mut v).ok()); - let val = f(decoded)?; + let decoded = value.as_deref() + .map(|mut bytes| { + T::decode(&mut bytes) + .map_err(|_| StorageRetrievalError::Undecodable) + }).transpose(); + + let val = mutate_val(decoded).map_err(|err| MutateStorageError::ValueFunctionFailed(err))?; + let set = val.using_encoded(|new_val| { sp_io::offchain::local_storage_compare_and_set( self.kind, @@ -90,11 +119,10 @@ impl<'a> StorageValueRef<'a> { new_val, ) }); - if set { - Ok(Ok(val)) + Ok(val) } else { - Ok(Err(val)) + Err(MutateStorageError::ConcurrentModification(val)) } } } @@ -117,12 +145,12 @@ mod tests { t.execute_with(|| { let val = StorageValue::persistent(b"testval"); - assert_eq!(val.get::(), None); + assert_eq!(val.get::(), Ok(None)); val.set(&15_u32); - assert_eq!(val.get::(), Some(Some(15_u32))); - assert_eq!(val.get::>(), Some(None)); + assert_eq!(val.get::(), Ok(Some(15_u32))); + assert_eq!(val.get::>(), Err(StorageRetrievalError::Undecodable)); assert_eq!( state.read().persistent_storage.get(b"testval"), Some(vec![15_u8, 0, 0, 0]) @@ -140,12 +168,12 @@ mod tests { let val = StorageValue::persistent(b"testval"); let result = val.mutate::(|val| { - assert_eq!(val, None); + assert_eq!(val, Ok(None)); Ok(16_u32) }); - assert_eq!(result, Ok(Ok(16_u32))); - assert_eq!(val.get::(), Some(Some(16_u32))); + assert_eq!(result, Ok(16_u32)); + assert_eq!(val.get::(), Ok(Some(16_u32))); assert_eq!( state.read().persistent_storage.get(b"testval"), Some(vec![16_u8, 0, 0, 0]) @@ -153,10 +181,10 @@ mod tests { // mutate again, but this time early-exit. let res = val.mutate::(|val| { - assert_eq!(val, Some(Some(16_u32))); + assert_eq!(val, Ok(Some(16_u32))); Err(()) }); - assert_eq!(res, Err(())); + assert_eq!(res, Err(MutateStorageError::ValueFunctionFailed(()))); }) } } diff --git a/primitives/runtime/src/offchain/storage_lock.rs b/primitives/runtime/src/offchain/storage_lock.rs index c3e63a7924d7b..3189a814e06fd 100644 --- a/primitives/runtime/src/offchain/storage_lock.rs +++ b/primitives/runtime/src/offchain/storage_lock.rs @@ -61,7 +61,7 @@ //! } //! ``` -use crate::offchain::storage::StorageValueRef; +use crate::offchain::storage::{StorageRetrievalError, MutateStorageError, StorageValueRef}; use crate::traits::AtLeast32BitUnsigned; use codec::{Codec, Decode, Encode}; use sp_core::offchain::{Duration, Timestamp}; @@ -279,19 +279,20 @@ impl<'a, L: Lockable> StorageLock<'a, L> { /// Extend active lock's deadline fn extend_active_lock(&mut self) -> Result<::Deadline, ()> { - let res = self.value_ref.mutate(|s: Option>| -> Result<::Deadline, ()> { + let res = self.value_ref.mutate( + |s: Result, StorageRetrievalError>| -> Result<::Deadline, ()> { match s { // lock is present and is still active, extend the lock. - Some(Some(deadline)) if !::has_expired(&deadline) => + Ok(Some(deadline)) if !::has_expired(&deadline) => Ok(self.lockable.deadline()), // other cases _ => Err(()), } }); match res { - Ok(Ok(deadline)) => Ok(deadline), - Ok(Err(_)) => Err(()), - Err(e) => Err(e), + Ok(deadline) => Ok(deadline), + Err(MutateStorageError::ConcurrentModification(_)) => Err(()), + Err(MutateStorageError::ValueFunctionFailed(e)) => Err(e), } } @@ -301,25 +302,25 @@ impl<'a, L: Lockable> StorageLock<'a, L> { new_deadline: L::Deadline, ) -> Result<(), ::Deadline> { let res = self.value_ref.mutate( - |s: Option>| + |s: Result, StorageRetrievalError>| -> Result<::Deadline, ::Deadline> { match s { // no lock set, we can safely acquire it - None => Ok(new_deadline), + Ok(None) => Ok(new_deadline), // write was good, but read failed - Some(None) => Ok(new_deadline), + Err(_) => Ok(new_deadline), // lock is set, but it is expired. We can re-acquire it. - Some(Some(deadline)) if ::has_expired(&deadline) => + Ok(Some(deadline)) if ::has_expired(&deadline) => Ok(new_deadline), // lock is present and is still active - Some(Some(deadline)) => Err(deadline), + Ok(Some(deadline)) => Err(deadline), } }, ); match res { - Ok(Ok(_)) => Ok(()), - Ok(Err(deadline)) => Err(deadline), - Err(e) => Err(e), + Ok(_) => Ok(()), + Err(MutateStorageError::ConcurrentModification(deadline)) => Err(deadline), + Err(MutateStorageError::ValueFunctionFailed(e)) => Err(e), } } @@ -488,14 +489,14 @@ mod tests { val.set(&VAL_1); - assert_eq!(val.get::(), Some(Some(VAL_1))); + assert_eq!(val.get::(), Ok(Some(VAL_1))); } { let _guard = lock.lock(); val.set(&VAL_2); - assert_eq!(val.get::(), Some(Some(VAL_2))); + assert_eq!(val.get::(), Ok(Some(VAL_2))); } }); // lock must have been cleared at this point @@ -518,7 +519,7 @@ mod tests { val.set(&VAL_1); - assert_eq!(val.get::(), Some(Some(VAL_1))); + assert_eq!(val.get::(), Ok(Some(VAL_1))); guard.forget(); }); diff --git a/primitives/trie/Cargo.toml b/primitives/trie/Cargo.toml index 9584ae678d409..bf91fff31b8b6 100644 --- a/primitives/trie/Cargo.toml +++ b/primitives/trie/Cargo.toml @@ -21,7 +21,7 @@ harness = false codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false } sp-std = { version = "3.0.0", default-features = false, path = "../std" } hash-db = { version = "0.15.2", default-features = false } -trie-db = { version = "0.22.3", default-features = false } +trie-db = { version = "0.22.5", default-features = false } trie-root = { version = "0.16.0", default-features = false } memory-db = { version = "0.26.0", default-features = false } sp-core = { version = "3.0.0", default-features = false, path = "../core" } From 7170fdadba633b4ed596e7c7781a1ba7db483896 Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Wed, 23 Jun 2021 17:17:10 +0200 Subject: [PATCH 38/67] Fix alert about delay between best and finalized block (#9150) * Fix alert about delay between best and finalized block * Revert debugging changes --- .maintain/monitoring/alerting-rules/alerting-rules.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.maintain/monitoring/alerting-rules/alerting-rules.yaml b/.maintain/monitoring/alerting-rules/alerting-rules.yaml index a3aa1b145b343..7a69cba66c3f3 100644 --- a/.maintain/monitoring/alerting-rules/alerting-rules.yaml +++ b/.maintain/monitoring/alerting-rules/alerting-rules.yaml @@ -47,8 +47,8 @@ groups: # Under the assumption of an average block production of 6 seconds, # "best" and "finalized" being more than 10 blocks apart would imply # more than a 1 minute delay between block production and finalization. - expr: '(polkadot_block_height_number{status="best"} - ignoring(status) - polkadot_block_height_number{status="finalized"}) > 10' + expr: '(polkadot_block_height{status="best"} - ignoring(status) + polkadot_block_height{status="finalized"}) > 10' for: 8m labels: severity: critical From 550d64cc7e233edf815c215b5329e1171cd59d1d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Thu, 24 Jun 2021 00:10:44 +0200 Subject: [PATCH 39/67] Transaction Pool docs (#9056) * Add transaction pool docs. * Extra docs. * Apply suggestions from code review Co-authored-by: Pierre Krieger * Expand on some review comments. * Update README.md Fixed typos / spellings Co-authored-by: Pierre Krieger Co-authored-by: Squirrel --- .editorconfig | 5 + client/transaction-pool/README.md | 363 +++++++++++++++++++++++++++++- 2 files changed, 367 insertions(+), 1 deletion(-) diff --git a/.editorconfig b/.editorconfig index 2b40ec32fac3e..50cc9dacd7e42 100644 --- a/.editorconfig +++ b/.editorconfig @@ -9,6 +9,11 @@ trim_trailing_whitespace=true max_line_length=100 insert_final_newline=true +[*.md] +max_line_length=80 +indent_style=space +indent_size=2 + [*.yml] indent_style=space indent_size=2 diff --git a/client/transaction-pool/README.md b/client/transaction-pool/README.md index 15e4641c1f48d..28846fdbb38f6 100644 --- a/client/transaction-pool/README.md +++ b/client/transaction-pool/README.md @@ -1,3 +1,364 @@ Substrate transaction pool implementation. -License: GPL-3.0-or-later WITH Classpath-exception-2.0 \ No newline at end of file +License: GPL-3.0-or-later WITH Classpath-exception-2.0 + +# Problem Statement + +The transaction pool is responsible for maintaining a set of transactions that +possible to include by block authors in upcoming blocks. Transactions are received +either from networking (gossiped by other peers) or RPC (submitted locally). + +The main task of the pool is to prepare an ordered list of transactions for block +authorship module. The same list is useful for gossiping to other peers, but note +that it's not a hard requirement for the gossiped transactions to be exactly the +same (see implementation notes below). + +It's within block author incentives to have the transactions stored and ordered in +such a way to: + +1. Maximize block author's profits (value of the produced block) +2. Minimize block author's amount of work (time to produce block) + +In the case of FRAME the first property is simply making sure that the fee per weight +unit is the highest (high `tip` values), the second is about avoiding feeding +transactions that cannot be part of the next block (they are invalid, obsolete, etc). + +From the transaction pool PoV, transactions are simply opaque blob of bytes, +it's required to query the runtime (via `TaggedTransactionQueue` Runtime API) to +verify transaction's mere correctness and extract any information about how the +transaction relates to other transactions in the pool and current on-chain state. +Only valid transactions should be stored in the pool. + +Each imported block can affect validity of transactions already in the pool. Block +authors expect from the pool to get most up to date information about transactions +that can be included in the block that they are going to build on top of the just +imported one. The process of ensuring this property is called *pruning*. During +pruning the pool should remove transactions which are considered invalid by the +runtime (queried at current best imported block). + +Since the blockchain is not always linear, forks need to be correctly handled by +the transaction pool as well. In case of a fork, some blocks are *retracted* +from the canonical chain, and some other blocks get *enacted* on top of some +common ancestor. The transactions from retrated blocks could simply be discarded, +but it's desirable to make sure they are still considered for inclusion in case they +are deemed valid by the runtime state at best, recently enacted block (fork the +chain re-organized to). + +Transaction pool should also offer a way of tracking transaction lifecycle in the +pool, it's broadcasting status, block inclusion, finality, etc. + +## Transaction Validity details + +Information retrieved from the the runtime are encapsulated in `TransactionValidity` +type. + +```rust +pub type TransactionValidity = Result; + +pub struct ValidTransaction { + pub requires: Vec, + pub provides: Vec, + pub priority: TransactionPriority, + pub longevity: TransactionLongevity, + pub propagate: bool, +} + +pub enum TransactionValidityError { + Invalid(/* details */), + Unknown(/* details */), +} +``` + +We will go through each of the parameter now to understand the requirements they +create for transaction ordering. + +The runtime is expected to return these values in a deterministic fashion. Calling +the API multiple times given exactly the same state must return same results. +Field-specific rules are described below. + +### `requires` / `provides` + +These two fields contain a set of `TransactionTag`s (opaque blobs) associated with +given transaction. Looking at these fields we can find dependencies between +transactions and their readiness for block inclusion. + +The `provides` set contains properties that will be *satisfied* in case the transaction +is successfully added to a block. `requires` contains properties that must be satisfied +**before** the transaction can be included to a block. + +Note that a transaction with empty `requires` set can be added to a block immediately, +there are no other transactions that it expects to be included before. + +For some given series of transactions the `provides` and `requires` fields will create +a (simple) directed acyclic graph. The *sources* in such graph, if they don't have +any extra `requires` tags (i.e. they have their all dependencies *satisfied*), should +be considered for block inclusion first. Multiple transactions that are ready for +block inclusion should be ordered by `priority` (see below). + +Note the process of including transactions to a block is basically building the graph, +then selecting "the best" source vertex (transaction) with all tags satisfied and +removing it from that graph. + +#### Examples + +- A transaction in Bitcoin-like chain will `provide` generated UTXOs and will `require` + UTXOs it is still awaiting for (note that it's not necessarily all require inputs, + since some of them might already be spendable (i.e. the UTXO is in state)) + +- A transaction in account-based chain will `provide` a `(sender, transaction_index/nonce)` + (as one tag), and will `require` `(sender, nonce - 1)` in case + `on_chain_nonce < nonce - 1`. + +#### Rules & caveats + +- `provides` must not be empty +- transactions with an overlap in `provides` tags are mutually exclusive +- checking validity of transaction that `requires` tag `A` after including + transaction that provides that tag must not return `A` in `requires` again +- runtime developers should avoid re-using `provides` tag (i.e. it should be unique) +- there should be no cycles in transaction dependencies +- caveat: on-chain state conditions may render transaction invalid despite no + `requires` tags +- caveat: on-chain state conditions may render transaction valid despite some + `requires` tags +- caveat: including transactions to a chain might make them valid again right away + (for instance UTXO transaction gets in, but since we don't store spent outputs + it will be valid again, awaiting the same inputs/tags to be satisfied) + +### `priority` + +Transaction priority describes importance of the transaction relative to other transactions +in the pool. Block authors can expect benefiting from including such transactions +before others. + +Note that we can't simply order transactions in the pool by `priority`, cause first +we need to make sure that all of the transaction requirements are satisfied (see +`requires/provides` section). However if we consider a set of transactions +which all have their requirements (tags) satisfied, the block author should be +choosing the ones with highest priority to include to the next block first. + +`priority` can be any number between `0` (lowest inclusion priority) to `u64::MAX` +(highest inclusion priority). + +#### Rules & caveats + +- `priority` of transaction may change over time +- on-chain conditions may affect `priority` +- Given two transactions with overlapping `provides` tags, the one with higher + `priority` should be preferred. However we can also look at the total priority + of a subtree rooted at that transaction and compare that instead (i.e. even though + the transaction itself has lower `priority` it "unlocks" other high priority transactions). + +### `longevity` + +Longevity describes how long (in blocks) the transaction is expected to be +valid. This parameter only gives a hint to the transaction pool how long +current transaction may still be valid. Note that it does not guarantee +the transaction is valid all that time though. + +#### Rules & caveats + +- `longevity` of transaction may change over time +- on-chain conditions may affect `longevity` +- After `longevity` lapses the transaction may still be valid + +### `propagate` + +This parameter instructs the pool propagate/gossip a transaction to node peers. +By default this should be `true`, however in some cases it might be undesirable +to propagate transactions further. Examples might include heavy transactions +produced by block authors in offchain workers (DoS) or risking being front +runned by someone else after finding some non trivial solution or equivocation, +etc. + +### 'TransactionSource` + +To make it possible for the runtime to distinguish if the transaction that is +being validated was received over the network or submitted using local RPC or +maybe it's simply part of a block that is being imported, the transaction pool +should pass additional `TransactionSource` parameter to the validity function +runtime call. + +This can be used by runtime developers to quickly reject transactions that for +instance are not expected to be gossiped in the network. + + +### `Invalid` transaction + +In case the runtime returns an `Invalid` error it means the transaction cannot +be added to a block at all. Extracting the actual reason of invalidity gives +more details about the source. For instance `Stale` transaction just indicates +the transaction was already included in a block, while `BadProof` signifies +invalid signature. +Invalidity might also be temporary. In case of `ExhaustsResources` the +transaction does not fit to the current block, but it might be okay for the next +one. + +### `Unknown` transaction + +In case of `Unknown` validity, the runtime cannot determine if the transaction +is valid or not in current block. However this situation might be temporary, so +it is expected for the transaction to be retried in the future. + +# Implementation + +An ideal transaction pool should be storing only transactions that are considered +valid by the runtime at current best imported block. +After every block is imported, the pool should: + +1. Revalidate all transactions in the pool and remove the invalid ones. +1. Construct the transaction inclusion graph based on `provides/requires` tags. + Some transactions might not be reachable (have unsatisfied dependencies), + they should be just left out in the pool. +1. On block author request, the graph should be copied and transactions should + be removed one-by-one from the graph starting from the one with highest + priority and all conditions satisfied. + +With current gossip protocol, networking should propagate transactions in the +same order as block author would include them. Most likely it's fine if we +propagate transactions with cumulative weight not exceeding upcoming `N` +blocks (choosing `N` is subject to networking conditions and block times). + +Note that it's not a strict requirement though to propagate exactly the same +transactions that are prepared for block inclusion. Propagation is best +effort, especially for block authors and is not directly incentivised. +However the networking protocol might penalise peers that send invalid or +useless transactions so we should be nice to others. Also see below a proposal +to instead of gossiping everyting have other peers request transactions they +are interested in. + +Since the pool is expected to store more transactions than what can fit +to a single block. Validating the entire pool on every block might not be +feasible, so the actual implementation might need to take some shortcuts. + +## Suggestions & caveats + +1. The validity of transaction should not change significantly from block to + block. I.e. changes in validity should happen predicatably, e.g. `longevity` + decrements by 1, `priority` stays the same, `requires` changes if transaction + that provided a tag was included in block. `provides` does not change, etc. + +1. That means we don't have to revalidate every transaction after every block + import, but we need to take care of removing potentially stale transactions. + +1. Transactions with exactly the same bytes are most likely going to give the + same validity results. We can essentially treat them as identical. + +1. Watch out for re-organisations and re-importing transactions from retracted + blocks. + +1. In the past there were many issues found when running small networks with a + lot of re-orgs. Make sure that transactions are never lost. + +1. UTXO model is quite challenging. The transaction becomes valid right after + it's included in block, however it is waiting for exactly the same inputs to + be spent, so it will never really be included again. + +1. Note that in a non-ideal implementation the state of the pool will most + likely always be a bit off, i.e. some transactions might be still in the pool, + but they are invalid. The hard decision is about trade-offs you take. + +1. Note that import notification is not reliable - you might not receive a + notification about every imported block. + +## Potential implementation ideas + +1. Block authors remove transactions from the pool when they author a block. We + still store them around to re-import in case the block does not end up + canonical. This only works if the block is actively authoring blocks (also + see below). + +1. We don't prune, but rather remove a fixed amount of transactions from the front + of the pool (number based on average/max transactions per block from the + past) and re-validate them, reimporting the ones that are still valid. + +1. We periodically validate all transactions in the pool in batches. + +1. To minimize runtime calls, we introduce batch-verify call. Note it should reset + the state (overlay) after every verification. + +1. Consider leveraging finality. Maybe we could verify against latest finalised + block instead. With this the pool in different nodes can be more similar + which might help with gossiping (see set reconciliation). Note that finality + is not a strict requirement for a Substrate chain to have though. + +1. Perhaps we could avoid maintaining ready/future queues as currently, but + rather if transaction doesn't have all requirements satisfied by existing + transactions we attempt to re-import it in the future. + +1. Instead of maintaining a full pool with total ordering we attempt to maintain + a set of next (couple of) blocks. We could introduce batch-validate runtime + api method that pretty much attempts to simulate actual block inclusion of + a set of such transactions (without necessarily fully running/dispatching + them). Importing a transaction would consist of figuring out which next block + this transaction have a chance to be included in and then attempting to + either push it back or replace some of existing transactions. + +1. Perhaps we could use some immutable graph structure to easily add/remove + transactions. We need some traversal method that takes priority and + reachability into account. + +1. It was discussed in the past to use set reconciliation strategies instead of +simply broadcasting all/some transactions to all/selected peers. An Ethereum's +[EIP-2464](https://github.com/ethereum/EIPs/blob/5b9685bb9c7ba0f5f921e4d3f23504f7ef08d5b1/EIPS/eip-2464.md) +might be a good first approach to reduce transaction gossip. + +# Current implementation + +Current implementation of the pool is a result of experiences from Ethereum's +pool implementation, but also has some warts coming from the learning process of +Substrate's generic nature and light client support. + +The pool consists of basically two independent parts: + +1. The transaction pool itself. +2. Maintenance background task. + +The pool is split into `ready` pool and `future` pool. The latter contains +transactions that don't have their requirements satisfied, and the former holds +transactions that can be used to build a graph of dependencies. Note that the +graph is build ad-hoc during the traversal process (getting the `ready` +iterator). This makes the importing process cheaper (we don't need to find the +exact position in the queue or graph), but traversal process slower +(logarithmic). However most of the time we will only need the beginning of the +total ordering of transactions for block inclusion or network propagation, hence +the decision. + +The maintenance task is responsible for: + +1. Periodically revalidating pool's transactions (revalidation queue). +1. Handling block import notifications and doing pruning + re-importing of + transactions from retracted blocks. +1. Handling finality notifications and relaying that to transaction-specific + listeners. + +Additionally we maintain a list of recently included/rejected transactions +(`PoolRotator`) to quickly reject transactions that are unlikely to be valid +to limit number of runtime verification calls. + +Each time a transaction is imported, we first verify it's validity and later +find if the tags it `requires` can be satisfied by transactions already in +`ready` pool. In case the transaction is imported to the `ready` pool we +additionally *promote* transactions from `future` pool if the transaction +happened to fulfill their requirements. +Note we need to cater for cases where transaction might replace a already +existing transaction in the pool. In such case we check the entire sub-tree of +transactions that we are about to replace, compare their cumulative priority to +determine which subtree to keep. + +After a block is imported we kick-off pruning procedure. We first attempt to +figure out what tags were satisfied by transaction in that block. For each block +transaction we either call into runtime to get it's `ValidTransaction` object, +or we check the pool if that transaction is already known to spare the runtime +call. From this we gather full set of `provides` tags and perform pruning of +`ready` pool based on that. Also we promote all transactions from `future` that +have their tags satisfied. + +In case we remove transactions that we are unsure if they were already included +in current block or some block in the past, it is being added to revalidation +queue and attempted to be re-imported by the background task in the future. + +Runtime calls to verify transactions are performed from a separate (limited) +thread pool to avoid interferring too much with other subsystems of the node. We +definitely don't want to have all cores validating network transactions, cause +all of these transactions need to be considered untrusted (potentially DoS). From 63ab27876dd5320760a55a8739505e3b4b511644 Mon Sep 17 00:00:00 2001 From: Squirrel Date: Thu, 24 Jun 2021 08:20:15 +0100 Subject: [PATCH 40/67] Fix to support u32::MAX (#9188) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Fix to support u32::MAX * Update primitives/runtime/src/random_number_generator.rs Co-authored-by: Andronik Ordian Co-authored-by: Bastian Köcher Co-authored-by: Andronik Ordian --- .../runtime/src/random_number_generator.rs | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/primitives/runtime/src/random_number_generator.rs b/primitives/runtime/src/random_number_generator.rs index a4d1a66370c19..41ca7c723e9c7 100644 --- a/primitives/runtime/src/random_number_generator.rs +++ b/primitives/runtime/src/random_number_generator.rs @@ -27,6 +27,8 @@ use crate::traits::{Hash, TrailingZeroInput}; /// /// It can be saved and later reloaded using the Codec traits. /// +/// (It is recommended to use the `rand_chacha` crate as an alternative to this where possible.) +/// /// Example: /// ``` /// use sp_runtime::traits::{Hash, BlakeTwo256}; @@ -63,7 +65,7 @@ impl RandomNumberGenerator { /// Returns a number at least zero, at most `max`. pub fn pick_u32(&mut self, max: u32) -> u32 { let needed = (4 - max.leading_zeros() / 8) as usize; - let top = ((1 << (needed as u64 * 8)) / ((max + 1) as u64) * ((max + 1) as u64) - 1) as u32; + let top = ((1 << (needed as u64 * 8)) / (max as u64 + 1) * (max as u64 + 1) - 1) as u32; loop { if self.offset() + needed > self.current.as_ref().len() { // rehash @@ -102,3 +104,15 @@ impl RandomNumberGenerator { } } } + +#[cfg(test)] +mod tests { + use super::RandomNumberGenerator; + use crate::traits::{Hash, BlakeTwo256}; + + #[test] + fn does_not_panic_on_max() { + let seed = BlakeTwo256::hash(b"Fourty-two"); + let _random = RandomNumberGenerator::::new(seed).pick_u32(u32::MAX); + } +} From 76cc00f8e76b24b3d4f5057992c1fa9dd31d8f1e Mon Sep 17 00:00:00 2001 From: Squirrel Date: Thu, 24 Jun 2021 11:53:49 +0100 Subject: [PATCH 41/67] Use MAX associated const (#9196) * Use MAX associated const --- bin/node/runtime/src/impls.rs | 2 +- client/consensus/aura/src/lib.rs | 2 +- .../src/communication/gossip.rs | 2 +- client/informant/src/display.rs | 2 +- .../notifications/upgrade/notifications.rs | 4 +- client/network/src/request_responses.rs | 4 +- client/network/src/service.rs | 8 +- client/network/src/service/out_events.rs | 4 +- client/network/src/service/tests.rs | 2 +- client/peerset/src/peersstate.rs | 4 +- client/peerset/tests/fuzz.rs | 2 +- client/rpc/src/chain/mod.rs | 2 +- client/service/src/chain_ops/import_blocks.rs | 2 +- client/service/test/src/client/mod.rs | 4 +- client/transaction-pool/graph/src/ready.rs | 4 +- frame/assets/src/tests.rs | 8 +- frame/babe/src/mock.rs | 2 +- frame/balances/src/tests.rs | 12 +-- frame/contracts/src/benchmarking/mod.rs | 2 +- frame/contracts/src/chain_extension.rs | 2 +- frame/contracts/src/wasm/runtime.rs | 8 +- frame/democracy/src/benchmarking.rs | 6 +- frame/democracy/src/tests/decoders.rs | 4 +- frame/democracy/src/tests/preimage.rs | 12 +-- frame/democracy/src/tests/public_proposals.rs | 18 ++--- frame/example/src/tests.rs | 2 +- frame/grandpa/src/mock.rs | 2 +- frame/staking/reward-curve/src/lib.rs | 4 +- frame/staking/reward-curve/src/log.rs | 2 +- frame/staking/src/benchmarking.rs | 14 ++-- frame/staking/src/testing_utils.rs | 2 +- frame/staking/src/tests.rs | 6 +- frame/support/src/traits/voting.rs | 8 +- frame/support/src/weights.rs | 2 +- .../tests/pallet_with_name_trait_is_valid.rs | 2 +- frame/system/src/lib.rs | 2 +- frame/transaction-payment/src/lib.rs | 6 +- frame/treasury/src/tests.rs | 4 +- primitives/allocator/src/freeing_bump.rs | 2 +- .../fuzzer/src/multiply_by_rational.rs | 2 +- primitives/arithmetic/src/biguint.rs | 6 +- primitives/arithmetic/src/fixed_point.rs | 74 +++++++++---------- primitives/arithmetic/src/lib.rs | 8 +- primitives/arithmetic/src/rational.rs | 6 +- primitives/core/src/hash.rs | 4 +- primitives/core/src/offchain/mod.rs | 2 +- primitives/core/src/uint.rs | 4 +- primitives/npos-elections/src/phragmen.rs | 2 +- primitives/npos-elections/src/phragmms.rs | 6 +- primitives/npos-elections/src/tests.rs | 24 +++--- .../runtime-interface/test-wasm/src/lib.rs | 4 +- primitives/runtime/src/curve.rs | 12 +-- primitives/runtime/src/generic/era.rs | 10 +-- primitives/runtime/src/generic/header.rs | 8 +- .../runtime/src/random_number_generator.rs | 2 +- primitives/trie/src/lib.rs | 2 +- 56 files changed, 178 insertions(+), 178 deletions(-) diff --git a/bin/node/runtime/src/impls.rs b/bin/node/runtime/src/impls.rs index 1d1488e2fae96..d3d0541b6ec0e 100644 --- a/bin/node/runtime/src/impls.rs +++ b/bin/node/runtime/src/impls.rs @@ -307,7 +307,7 @@ mod multiplier_tests { fn weight_to_fee_should_not_overflow_on_large_weights() { let kb = 1024 as Weight; let mb = kb * kb; - let max_fm = Multiplier::saturating_from_integer(i128::max_value()); + let max_fm = Multiplier::saturating_from_integer(i128::MAX); // check that for all values it can compute, correctly. vec![ diff --git a/client/consensus/aura/src/lib.rs b/client/consensus/aura/src/lib.rs index d08ce5dfee259..72545eda077ba 100644 --- a/client/consensus/aura/src/lib.rs +++ b/client/consensus/aura/src/lib.rs @@ -97,7 +97,7 @@ fn slot_author(slot: Slot, authorities: &[AuthorityId

]) -> Option<&A let idx = *slot % (authorities.len() as u64); assert!( - idx <= usize::max_value() as u64, + idx <= usize::MAX as u64, "It is impossible to have a vector with length beyond the address space; qed", ); diff --git a/client/finality-grandpa/src/communication/gossip.rs b/client/finality-grandpa/src/communication/gossip.rs index 8f46e45d635aa..1b3b5ea7c5d24 100644 --- a/client/finality-grandpa/src/communication/gossip.rs +++ b/client/finality-grandpa/src/communication/gossip.rs @@ -1468,7 +1468,7 @@ impl GossipValidator { "" => "", ); - let len = std::cmp::min(i32::max_value() as usize, data.len()) as i32; + let len = std::cmp::min(i32::MAX as usize, data.len()) as i32; Action::Discard(Misbehavior::UndecodablePacket(len).cost()) } } diff --git a/client/informant/src/display.rs b/client/informant/src/display.rs index 00c2116fac60a..0b7f8bcfaf16b 100644 --- a/client/informant/src/display.rs +++ b/client/informant/src/display.rs @@ -179,7 +179,7 @@ fn speed( // algebraic approach and we stay within the realm of integers. let one_thousand = NumberFor::::from(1_000u32); let elapsed = NumberFor::::from( - >::try_from(elapsed_ms).unwrap_or(u32::max_value()) + >::try_from(elapsed_ms).unwrap_or(u32::MAX) ); let speed = diff.saturating_mul(one_thousand).checked_div(&elapsed) diff --git a/client/network/src/protocol/notifications/upgrade/notifications.rs b/client/network/src/protocol/notifications/upgrade/notifications.rs index e2ef26c81eba9..26bb92d77656b 100644 --- a/client/network/src/protocol/notifications/upgrade/notifications.rs +++ b/client/network/src/protocol/notifications/upgrade/notifications.rs @@ -159,7 +159,7 @@ where TSubstream: AsyncRead + AsyncWrite + Unpin + Send + 'static, } let mut codec = UviBytes::default(); - codec.set_max_len(usize::try_from(self.max_notification_size).unwrap_or(usize::max_value())); + codec.set_max_len(usize::try_from(self.max_notification_size).unwrap_or(usize::MAX)); let substream = NotificationsInSubstream { socket: Framed::new(socket, codec), @@ -390,7 +390,7 @@ where TSubstream: AsyncRead + AsyncWrite + Unpin + Send + 'static, } let mut codec = UviBytes::default(); - codec.set_max_len(usize::try_from(self.max_notification_size).unwrap_or(usize::max_value())); + codec.set_max_len(usize::try_from(self.max_notification_size).unwrap_or(usize::MAX)); Ok(NotificationsOutOpen { handshake, diff --git a/client/network/src/request_responses.rs b/client/network/src/request_responses.rs index 3762cf70e71d4..20469e143d41e 100644 --- a/client/network/src/request_responses.rs +++ b/client/network/src/request_responses.rs @@ -809,7 +809,7 @@ impl RequestResponseCodec for GenericCodec { // Read the length. let length = unsigned_varint::aio::read_usize(&mut io).await .map_err(|err| io::Error::new(io::ErrorKind::InvalidInput, err))?; - if length > usize::try_from(self.max_request_size).unwrap_or(usize::max_value()) { + if length > usize::try_from(self.max_request_size).unwrap_or(usize::MAX) { return Err(io::Error::new( io::ErrorKind::InvalidInput, format!("Request size exceeds limit: {} > {}", length, self.max_request_size) @@ -846,7 +846,7 @@ impl RequestResponseCodec for GenericCodec { Err(err) => return Err(io::Error::new(io::ErrorKind::InvalidInput, err)), }; - if length > usize::try_from(self.max_response_size).unwrap_or(usize::max_value()) { + if length > usize::try_from(self.max_response_size).unwrap_or(usize::MAX) { return Err(io::Error::new( io::ErrorKind::InvalidInput, format!("Response size exceeds limit: {} > {}", length, self.max_response_size) diff --git a/client/network/src/service.rs b/client/network/src/service.rs index 0bc28288501a4..fb303312093cd 100644 --- a/client/network/src/service.rs +++ b/client/network/src/service.rs @@ -300,20 +300,20 @@ impl NetworkWorker { let yamux_maximum_buffer_size = { let requests_max = params.network_config .request_response_protocols.iter() - .map(|cfg| usize::try_from(cfg.max_request_size).unwrap_or(usize::max_value())); + .map(|cfg| usize::try_from(cfg.max_request_size).unwrap_or(usize::MAX)); let responses_max = params.network_config .request_response_protocols.iter() - .map(|cfg| usize::try_from(cfg.max_response_size).unwrap_or(usize::max_value())); + .map(|cfg| usize::try_from(cfg.max_response_size).unwrap_or(usize::MAX)); let notifs_max = params.network_config .extra_sets.iter() - .map(|cfg| usize::try_from(cfg.max_notification_size).unwrap_or(usize::max_value())); + .map(|cfg| usize::try_from(cfg.max_notification_size).unwrap_or(usize::MAX)); // A "default" max is added to cover all the other protocols: ping, identify, // kademlia, block announces, and transactions. let default_max = cmp::max( 1024 * 1024, usize::try_from(protocol::BLOCK_ANNOUNCES_TRANSACTIONS_SUBSTREAM_SIZE) - .unwrap_or(usize::max_value()) + .unwrap_or(usize::MAX) ); iter::once(default_max) diff --git a/client/network/src/service/out_events.rs b/client/network/src/service/out_events.rs index 06c068e369da7..7ec6c608a8fcf 100644 --- a/client/network/src/service/out_events.rs +++ b/client/network/src/service/out_events.rs @@ -254,7 +254,7 @@ impl Metrics { .inc_by(num); self.notifications_sizes .with_label_values(&[protocol, "sent", name]) - .inc_by(num.saturating_mul(u64::try_from(message.len()).unwrap_or(u64::max_value()))); + .inc_by(num.saturating_mul(u64::try_from(message.len()).unwrap_or(u64::MAX))); } }, } @@ -294,7 +294,7 @@ impl Metrics { .inc(); self.notifications_sizes .with_label_values(&[&protocol, "received", name]) - .inc_by(u64::try_from(message.len()).unwrap_or(u64::max_value())); + .inc_by(u64::try_from(message.len()).unwrap_or(u64::MAX)); } }, } diff --git a/client/network/src/service/tests.rs b/client/network/src/service/tests.rs index c2e3844849f5c..4a739e50628a5 100644 --- a/client/network/src/service/tests.rs +++ b/client/network/src/service/tests.rs @@ -345,7 +345,7 @@ fn lots_of_incoming_peers_works() { fallback_names: Vec::new(), max_notification_size: 1024 * 1024, set_config: config::SetConfig { - in_peers: u32::max_value(), + in_peers: u32::MAX, .. Default::default() }, } diff --git a/client/peerset/src/peersstate.rs b/client/peerset/src/peersstate.rs index 309c7e6b8f973..9f54a7714fd05 100644 --- a/client/peerset/src/peersstate.rs +++ b/client/peerset/src/peersstate.rs @@ -97,8 +97,8 @@ struct Node { /// are indices into this `Vec`. sets: Vec, - /// Reputation value of the node, between `i32::min_value` (we hate that node) and - /// `i32::max_value` (we love that node). + /// Reputation value of the node, between `i32::MIN` (we hate that node) and + /// `i32::MAX` (we love that node). reputation: i32, } diff --git a/client/peerset/tests/fuzz.rs b/client/peerset/tests/fuzz.rs index 8f64962943477..d951b0cc560ce 100644 --- a/client/peerset/tests/fuzz.rs +++ b/client/peerset/tests/fuzz.rs @@ -120,7 +120,7 @@ fn test_once() { // If we generate 2, adjust a random reputation. 2 => { if let Some(id) = known_nodes.iter().choose(&mut rng) { - let val = Uniform::new_inclusive(i32::min_value(), i32::max_value()) + let val = Uniform::new_inclusive(i32::min_value(), i32::MAX) .sample(&mut rng); peerset_handle.report_peer(id.clone(), ReputationChange::new(val, "")); } diff --git a/client/rpc/src/chain/mod.rs b/client/rpc/src/chain/mod.rs index d3a28d534335f..1380927bca2f4 100644 --- a/client/rpc/src/chain/mod.rs +++ b/client/rpc/src/chain/mod.rs @@ -84,7 +84,7 @@ trait ChainBackend: Send + Sync + 'static // FIXME <2329>: Database seems to limit the block number to u32 for no reason let block_num: u32 = num_or_hex.try_into().map_err(|_| { Error::from(format!( - "`{:?}` > u32::max_value(), the max block number is u32.", + "`{:?}` > u32::MAX, the max block number is u32.", num_or_hex )) })?; diff --git a/client/service/src/chain_ops/import_blocks.rs b/client/service/src/chain_ops/import_blocks.rs index 90bcc94cb8996..330aaea4f555b 100644 --- a/client/service/src/chain_ops/import_blocks.rs +++ b/client/service/src/chain_ops/import_blocks.rs @@ -236,7 +236,7 @@ impl Speedometer { // algebraic approach and we stay within the realm of integers. let one_thousand = NumberFor::::from(1_000u32); let elapsed = NumberFor::::from( - >::try_from(elapsed_ms).unwrap_or(u32::max_value()) + >::try_from(elapsed_ms).unwrap_or(u32::MAX) ); let speed = diff.saturating_mul(one_thousand).checked_div(&elapsed) diff --git a/client/service/test/src/client/mod.rs b/client/service/test/src/client/mod.rs index bf4105377f9c1..9cd0e193fcd03 100644 --- a/client/service/test/src/client/mod.rs +++ b/client/service/test/src/client/mod.rs @@ -1547,7 +1547,7 @@ fn doesnt_import_blocks_that_revert_finality() { cache_size: 1024, }, }, - u64::max_value(), + u64::MAX, ).unwrap()); let mut client = TestClientBuilder::with_backend(backend).build(); @@ -1751,7 +1751,7 @@ fn returns_status_for_pruned_blocks() { cache_size: 1024, }, }, - u64::max_value(), + u64::MAX, ).unwrap()); let mut client = TestClientBuilder::with_backend(backend).build(); diff --git a/client/transaction-pool/graph/src/ready.rs b/client/transaction-pool/graph/src/ready.rs index 2c0575bf1efb0..ba6ca97dc6753 100644 --- a/client/transaction-pool/graph/src/ready.rs +++ b/client/transaction-pool/graph/src/ready.rs @@ -659,7 +659,7 @@ mod tests { bytes: 1, hash: 5, priority: 1, - valid_till: u64::max_value(), // use the max_value() here for testing. + valid_till: u64::MAX, // use the max here for testing. requires: vec![tx1.provides[0].clone()], provides: vec![], propagate: true, @@ -692,7 +692,7 @@ mod tests { bytes: 1, hash: 5, priority: 1, - valid_till: u64::max_value(), // use the max_value() here for testing. + valid_till: u64::MAX, // use the max here for testing. requires: vec![], provides: vec![], propagate: true, diff --git a/frame/assets/src/tests.rs b/frame/assets/src/tests.rs index b561864c8e481..b8eb2e40f8afb 100644 --- a/frame/assets/src/tests.rs +++ b/frame/assets/src/tests.rs @@ -310,7 +310,7 @@ fn querying_total_supply_should_work() { assert_eq!(Assets::balance(0, 1), 50); assert_eq!(Assets::balance(0, 2), 19); assert_eq!(Assets::balance(0, 3), 31); - assert_ok!(Assets::burn(Origin::signed(1), 0, 3, u64::max_value())); + assert_ok!(Assets::burn(Origin::signed(1), 0, 3, u64::MAX)); assert_eq!(Assets::total_supply(0), 69); }); } @@ -457,7 +457,7 @@ fn transferring_amount_more_than_available_balance_should_not_work() { assert_ok!(Assets::transfer(Origin::signed(1), 0, 2, 50)); assert_eq!(Assets::balance(0, 1), 50); assert_eq!(Assets::balance(0, 2), 50); - assert_ok!(Assets::burn(Origin::signed(1), 0, 1, u64::max_value())); + assert_ok!(Assets::burn(Origin::signed(1), 0, 1, u64::MAX)); assert_eq!(Assets::balance(0, 1), 0); assert_noop!(Assets::transfer(Origin::signed(1), 0, 1, 50), Error::::BalanceLow); assert_noop!(Assets::transfer(Origin::signed(2), 0, 1, 51), Error::::BalanceLow); @@ -491,7 +491,7 @@ fn burning_asset_balance_with_positive_balance_should_work() { assert_ok!(Assets::force_create(Origin::root(), 0, 1, true, 1)); assert_ok!(Assets::mint(Origin::signed(1), 0, 1, 100)); assert_eq!(Assets::balance(0, 1), 100); - assert_ok!(Assets::burn(Origin::signed(1), 0, 1, u64::max_value())); + assert_ok!(Assets::burn(Origin::signed(1), 0, 1, u64::MAX)); assert_eq!(Assets::balance(0, 1), 0); }); } @@ -502,7 +502,7 @@ fn burning_asset_balance_with_zero_balance_does_nothing() { assert_ok!(Assets::force_create(Origin::root(), 0, 1, true, 1)); assert_ok!(Assets::mint(Origin::signed(1), 0, 1, 100)); assert_eq!(Assets::balance(0, 2), 0); - assert_ok!(Assets::burn(Origin::signed(1), 0, 2, u64::max_value())); + assert_ok!(Assets::burn(Origin::signed(1), 0, 2, u64::MAX)); assert_eq!(Assets::balance(0, 2), 0); assert_eq!(Assets::total_supply(0), 100); }); diff --git a/frame/babe/src/mock.rs b/frame/babe/src/mock.rs index a8d0bba9632d8..6c1cc89cf1ed0 100644 --- a/frame/babe/src/mock.rs +++ b/frame/babe/src/mock.rs @@ -184,7 +184,7 @@ parameter_types! { pub const RewardCurve: &'static PiecewiseLinear<'static> = &REWARD_CURVE; pub const MaxNominatorRewardedPerValidator: u32 = 64; pub const ElectionLookahead: u64 = 0; - pub const StakingUnsignedPriority: u64 = u64::max_value() / 2; + pub const StakingUnsignedPriority: u64 = u64::MAX / 2; } impl onchain::Config for Test { diff --git a/frame/balances/src/tests.rs b/frame/balances/src/tests.rs index 3598595c7649c..c98b0ecf02bf0 100644 --- a/frame/balances/src/tests.rs +++ b/frame/balances/src/tests.rs @@ -87,7 +87,7 @@ macro_rules! decl_tests { #[test] fn lock_removal_should_work() { <$ext_builder>::default().existential_deposit(1).monied(true).build().execute_with(|| { - Balances::set_lock(ID_1, &1, u64::max_value(), WithdrawReasons::all()); + Balances::set_lock(ID_1, &1, u64::MAX, WithdrawReasons::all()); Balances::remove_lock(ID_1, &1); assert_ok!(>::transfer(&1, &2, 1, AllowDeath)); }); @@ -96,7 +96,7 @@ macro_rules! decl_tests { #[test] fn lock_replacement_should_work() { <$ext_builder>::default().existential_deposit(1).monied(true).build().execute_with(|| { - Balances::set_lock(ID_1, &1, u64::max_value(), WithdrawReasons::all()); + Balances::set_lock(ID_1, &1, u64::MAX, WithdrawReasons::all()); Balances::set_lock(ID_1, &1, 5, WithdrawReasons::all()); assert_ok!(>::transfer(&1, &2, 1, AllowDeath)); }); @@ -114,7 +114,7 @@ macro_rules! decl_tests { #[test] fn combination_locking_should_work() { <$ext_builder>::default().existential_deposit(1).monied(true).build().execute_with(|| { - Balances::set_lock(ID_1, &1, u64::max_value(), WithdrawReasons::empty()); + Balances::set_lock(ID_1, &1, u64::MAX, WithdrawReasons::empty()); Balances::set_lock(ID_2, &1, 0, WithdrawReasons::all()); assert_ok!(>::transfer(&1, &2, 1, AllowDeath)); }); @@ -513,15 +513,15 @@ macro_rules! decl_tests { #[test] fn transferring_too_high_value_should_not_panic() { <$ext_builder>::default().build().execute_with(|| { - Balances::make_free_balance_be(&1, u64::max_value()); + Balances::make_free_balance_be(&1, u64::MAX); Balances::make_free_balance_be(&2, 1); assert_err!( - Balances::transfer(Some(1).into(), 2, u64::max_value()), + Balances::transfer(Some(1).into(), 2, u64::MAX), ArithmeticError::Overflow, ); - assert_eq!(Balances::free_balance(1), u64::max_value()); + assert_eq!(Balances::free_balance(1), u64::MAX); assert_eq!(Balances::free_balance(2), 1); }); } diff --git a/frame/contracts/src/benchmarking/mod.rs b/frame/contracts/src/benchmarking/mod.rs index bb04e9b2cf32f..7b77569a1f6d7 100644 --- a/frame/contracts/src/benchmarking/mod.rs +++ b/frame/contracts/src/benchmarking/mod.rs @@ -116,7 +116,7 @@ where // storage_size cannot be zero because otherwise a contract that is just above // the subsistence threshold does not pay rent given a large enough subsistence // threshold. But we need rent payments to occur in order to benchmark for worst cases. - let storage_size = u32::max_value() / 10; + let storage_size = u32::MAX / 10; // Endowment should be large but not as large to inhibit rent payments. // Balance will only cover half the storage diff --git a/frame/contracts/src/chain_extension.rs b/frame/contracts/src/chain_extension.rs index d2839dfdbc2e1..ac71eca27b1ce 100644 --- a/frame/contracts/src/chain_extension.rs +++ b/frame/contracts/src/chain_extension.rs @@ -334,7 +334,7 @@ where /// /// If the contract supplied buffer is smaller than the passed `buffer` an `Err` is returned. /// If `allow_skip` is set to true the contract is allowed to skip the copying of the buffer - /// by supplying the guard value of `u32::max_value()` as `out_ptr`. The + /// by supplying the guard value of `u32::MAX` as `out_ptr`. The /// `weight_per_byte` is only charged when the write actually happens and is not skipped or /// failed due to a too small output buffer. pub fn write( diff --git a/frame/contracts/src/wasm/runtime.rs b/frame/contracts/src/wasm/runtime.rs index 7ca6dfed15819..8d1782e84d60d 100644 --- a/frame/contracts/src/wasm/runtime.rs +++ b/frame/contracts/src/wasm/runtime.rs @@ -550,7 +550,7 @@ where /// length of the buffer located at `out_ptr`. If that buffer is large enough the actual /// `buf.len()` is written to this location. /// - /// If `out_ptr` is set to the sentinel value of `u32::max_value()` and `allow_skip` is true the + /// If `out_ptr` is set to the sentinel value of `u32::MAX` and `allow_skip` is true the /// operation is skipped and `Ok` is returned. This is supposed to help callers to make copying /// output optional. For example to skip copying back the output buffer of an `seal_call` /// when the caller is not interested in the result. @@ -570,7 +570,7 @@ where create_token: impl FnOnce(u32) -> Option, ) -> Result<(), DispatchError> { - if allow_skip && out_ptr == u32::max_value() { + if allow_skip && out_ptr == u32::MAX { return Ok(()); } @@ -892,7 +892,7 @@ define_env!(Env, , // // The callees output buffer is copied to `output_ptr` and its length to `output_len_ptr`. // The copy of the output buffer can be skipped by supplying the sentinel value - // of `u32::max_value()` to `output_ptr`. + // of `u32::MAX` to `output_ptr`. // // # Parameters // @@ -953,7 +953,7 @@ define_env!(Env, , // by the code hash. The address of this new account is copied to `address_ptr` and its length // to `address_len_ptr`. The constructors output buffer is copied to `output_ptr` and its // length to `output_len_ptr`. The copy of the output buffer and address can be skipped by - // supplying the sentinel value of `u32::max_value()` to `output_ptr` or `address_ptr`. + // supplying the sentinel value of `u32::MAX` to `output_ptr` or `address_ptr`. // // After running the constructor it is verified that the contract account holds at // least the subsistence threshold. If that is not the case the instantiation fails and diff --git a/frame/democracy/src/benchmarking.rs b/frame/democracy/src/benchmarking.rs index 6cf35553f5367..d1d3b3e62bddd 100644 --- a/frame/democracy/src/benchmarking.rs +++ b/frame/democracy/src/benchmarking.rs @@ -118,13 +118,13 @@ benchmarks! { // Create s existing "seconds" for i in 0 .. s { let seconder = funded_account::("seconder", i); - Democracy::::second(RawOrigin::Signed(seconder).into(), 0, u32::max_value())?; + Democracy::::second(RawOrigin::Signed(seconder).into(), 0, u32::MAX)?; } let deposits = Democracy::::deposit_of(0).ok_or("Proposal not created")?; assert_eq!(deposits.0.len(), (s + 1) as usize, "Seconds not recorded"); whitelist_account!(caller); - }: _(RawOrigin::Signed(caller), 0, u32::max_value()) + }: _(RawOrigin::Signed(caller), 0, u32::MAX) verify { let deposits = Democracy::::deposit_of(0).ok_or("Proposal not created")?; assert_eq!(deposits.0.len(), (s + 2) as usize, "`second` benchmark did not work"); @@ -609,7 +609,7 @@ benchmarks! { let caller = funded_account::("caller", 0); whitelist_account!(caller); - }: _(RawOrigin::Signed(caller), proposal_hash.clone(), u32::max_value()) + }: _(RawOrigin::Signed(caller), proposal_hash.clone(), u32::MAX) verify { let proposal_hash = T::Hashing::hash(&encoded_proposal[..]); assert!(!Preimages::::contains_key(proposal_hash)); diff --git a/frame/democracy/src/tests/decoders.rs b/frame/democracy/src/tests/decoders.rs index 32e5e3ecf7ae4..c3eb9ca7e3322 100644 --- a/frame/democracy/src/tests/decoders.rs +++ b/frame/democracy/src/tests/decoders.rs @@ -23,11 +23,11 @@ use frame_support::storage::{migration, unhashed}; #[test] fn test_decode_compact_u32_at() { new_test_ext().execute_with(|| { - let v = codec::Compact(u64::max_value()); + let v = codec::Compact(u64::MAX); migration::put_storage_value(b"test", b"", &[], v); assert_eq!(decode_compact_u32_at(b"test"), None); - for v in vec![0, 10, u32::max_value()] { + for v in vec![0, 10, u32::MAX] { let compact_v = codec::Compact(v); unhashed::put(b"test", &compact_v); assert_eq!(decode_compact_u32_at(b"test"), Some(v)); diff --git a/frame/democracy/src/tests/preimage.rs b/frame/democracy/src/tests/preimage.rs index 135b167520be5..a412343299d9f 100644 --- a/frame/democracy/src/tests/preimage.rs +++ b/frame/democracy/src/tests/preimage.rs @@ -81,11 +81,11 @@ fn preimage_deposit_should_be_reapable_earlier_by_owner() { next_block(); assert_noop!( - Democracy::reap_preimage(Origin::signed(6), set_balance_proposal_hash(2), u32::max_value()), + Democracy::reap_preimage(Origin::signed(6), set_balance_proposal_hash(2), u32::MAX), Error::::TooEarly ); next_block(); - assert_ok!(Democracy::reap_preimage(Origin::signed(6), set_balance_proposal_hash(2), u32::max_value())); + assert_ok!(Democracy::reap_preimage(Origin::signed(6), set_balance_proposal_hash(2), u32::MAX)); assert_eq!(Balances::free_balance(6), 60); assert_eq!(Balances::reserved_balance(6), 0); @@ -96,7 +96,7 @@ fn preimage_deposit_should_be_reapable_earlier_by_owner() { fn preimage_deposit_should_be_reapable() { new_test_ext_execute_with_cond(|operational| { assert_noop!( - Democracy::reap_preimage(Origin::signed(5), set_balance_proposal_hash(2), u32::max_value()), + Democracy::reap_preimage(Origin::signed(5), set_balance_proposal_hash(2), u32::MAX), Error::::PreimageMissing ); @@ -111,12 +111,12 @@ fn preimage_deposit_should_be_reapable() { next_block(); next_block(); assert_noop!( - Democracy::reap_preimage(Origin::signed(5), set_balance_proposal_hash(2), u32::max_value()), + Democracy::reap_preimage(Origin::signed(5), set_balance_proposal_hash(2), u32::MAX), Error::::TooEarly ); next_block(); - assert_ok!(Democracy::reap_preimage(Origin::signed(5), set_balance_proposal_hash(2), u32::max_value())); + assert_ok!(Democracy::reap_preimage(Origin::signed(5), set_balance_proposal_hash(2), u32::MAX)); assert_eq!(Balances::reserved_balance(6), 0); assert_eq!(Balances::free_balance(6), 48); assert_eq!(Balances::free_balance(5), 62); @@ -161,7 +161,7 @@ fn reaping_imminent_preimage_should_fail() { assert_ok!(Democracy::vote(Origin::signed(1), r, aye(1))); next_block(); next_block(); - assert_noop!(Democracy::reap_preimage(Origin::signed(6), h, u32::max_value()), Error::::Imminent); + assert_noop!(Democracy::reap_preimage(Origin::signed(6), h, u32::MAX), Error::::Imminent); }); } diff --git a/frame/democracy/src/tests/public_proposals.rs b/frame/democracy/src/tests/public_proposals.rs index 4a4827ac7e9c1..1d323d684d7f2 100644 --- a/frame/democracy/src/tests/public_proposals.rs +++ b/frame/democracy/src/tests/public_proposals.rs @@ -35,10 +35,10 @@ fn backing_for_should_work() { fn deposit_for_proposals_should_be_taken() { new_test_ext().execute_with(|| { assert_ok!(propose_set_balance_and_note(1, 2, 5)); - assert_ok!(Democracy::second(Origin::signed(2), 0, u32::max_value())); - assert_ok!(Democracy::second(Origin::signed(5), 0, u32::max_value())); - assert_ok!(Democracy::second(Origin::signed(5), 0, u32::max_value())); - assert_ok!(Democracy::second(Origin::signed(5), 0, u32::max_value())); + assert_ok!(Democracy::second(Origin::signed(2), 0, u32::MAX)); + assert_ok!(Democracy::second(Origin::signed(5), 0, u32::MAX)); + assert_ok!(Democracy::second(Origin::signed(5), 0, u32::MAX)); + assert_ok!(Democracy::second(Origin::signed(5), 0, u32::MAX)); assert_eq!(Balances::free_balance(1), 5); assert_eq!(Balances::free_balance(2), 15); assert_eq!(Balances::free_balance(5), 35); @@ -49,10 +49,10 @@ fn deposit_for_proposals_should_be_taken() { fn deposit_for_proposals_should_be_returned() { new_test_ext().execute_with(|| { assert_ok!(propose_set_balance_and_note(1, 2, 5)); - assert_ok!(Democracy::second(Origin::signed(2), 0, u32::max_value())); - assert_ok!(Democracy::second(Origin::signed(5), 0, u32::max_value())); - assert_ok!(Democracy::second(Origin::signed(5), 0, u32::max_value())); - assert_ok!(Democracy::second(Origin::signed(5), 0, u32::max_value())); + assert_ok!(Democracy::second(Origin::signed(2), 0, u32::MAX)); + assert_ok!(Democracy::second(Origin::signed(5), 0, u32::MAX)); + assert_ok!(Democracy::second(Origin::signed(5), 0, u32::MAX)); + assert_ok!(Democracy::second(Origin::signed(5), 0, u32::MAX)); fast_forward_to(3); assert_eq!(Balances::free_balance(1), 10); assert_eq!(Balances::free_balance(2), 20); @@ -79,7 +79,7 @@ fn poor_seconder_should_not_work() { new_test_ext().execute_with(|| { assert_ok!(propose_set_balance_and_note(2, 2, 11)); assert_noop!( - Democracy::second(Origin::signed(1), 0, u32::max_value()), + Democracy::second(Origin::signed(1), 0, u32::MAX), BalancesError::::InsufficientBalance ); }); diff --git a/frame/example/src/tests.rs b/frame/example/src/tests.rs index a290ea0f6576f..c699a0bfad36c 100644 --- a/frame/example/src/tests.rs +++ b/frame/example/src/tests.rs @@ -166,7 +166,7 @@ fn signed_ext_watch_dummy_works() { WatchDummy::(PhantomData).validate(&1, &call, &info, 150) .unwrap() .priority, - u64::max_value(), + u64::MAX, ); assert_eq!( WatchDummy::(PhantomData).validate(&1, &call, &info, 250), diff --git a/frame/grandpa/src/mock.rs b/frame/grandpa/src/mock.rs index 9206b3ff2dfaf..ebe5996c9dab5 100644 --- a/frame/grandpa/src/mock.rs +++ b/frame/grandpa/src/mock.rs @@ -190,7 +190,7 @@ parameter_types! { pub const RewardCurve: &'static PiecewiseLinear<'static> = &REWARD_CURVE; pub const MaxNominatorRewardedPerValidator: u32 = 64; pub const ElectionLookahead: u64 = 0; - pub const StakingUnsignedPriority: u64 = u64::max_value() / 2; + pub const StakingUnsignedPriority: u64 = u64::MAX / 2; } impl onchain::Config for Test { diff --git a/frame/staking/reward-curve/src/lib.rs b/frame/staking/reward-curve/src/lib.rs index 5ce6d0c3a8679..de912eee99ce2 100644 --- a/frame/staking/reward-curve/src/lib.rs +++ b/frame/staking/reward-curve/src/lib.rs @@ -275,7 +275,7 @@ impl INPoS { // See web3 docs for the details fn compute_opposite_after_x_ideal(&self, y: u32) -> u32 { if y == self.i_0 { - return u32::max_value(); + return u32::MAX; } // Note: the log term calculated here represents a per_million value let log = log2(self.i_ideal_times_x_ideal - self.i_0, y - self.i_0); @@ -408,7 +408,7 @@ fn generate_test_module(input: &INposInput) -> TokenStream2 { #[test] fn reward_curve_precision() { - for &base in [MILLION, u32::max_value()].iter() { + for &base in [MILLION, u32::MAX].iter() { let number_of_check = 100_000.min(base); for check_index in 0..=number_of_check { let i = (check_index as u64 * base as u64 / number_of_check as u64) as u32; diff --git a/frame/staking/reward-curve/src/log.rs b/frame/staking/reward-curve/src/log.rs index 28acd5deed2bb..747011a73e1db 100644 --- a/frame/staking/reward-curve/src/log.rs +++ b/frame/staking/reward-curve/src/log.rs @@ -33,7 +33,7 @@ fn taylor_term(k: u32, y_num: u128, y_den: u128) -> u32 { /// * result represents a per-million output of log2 pub fn log2(p: u32, q: u32) -> u32 { assert!(p >= q); // keep p/q bound to [1, inf) - assert!(p <= u32::max_value()/2); + assert!(p <= u32::MAX/2); // This restriction should not be mandatory. But function is only tested and used for this. assert!(p <= 1_000_000); diff --git a/frame/staking/src/benchmarking.rs b/frame/staking/src/benchmarking.rs index 8adf797abe9e9..f7545b07c90a8 100644 --- a/frame/staking/src/benchmarking.rs +++ b/frame/staking/src/benchmarking.rs @@ -79,9 +79,9 @@ pub fn create_validator_with_nominators( // Give the validator n nominators, but keep total users in the system the same. for i in 0 .. upper_bound { let (n_stash, n_controller) = if !dead { - create_stash_controller::(u32::max_value() - i, 100, destination.clone())? + create_stash_controller::(u32::MAX - i, 100, destination.clone())? } else { - create_stash_and_dead_controller::(u32::max_value() - i, 100, destination.clone())? + create_stash_and_dead_controller::(u32::MAX - i, 100, destination.clone())? }; if i < n { Staking::::nominate(RawOrigin::Signed(n_controller.clone()).into(), vec![stash_lookup.clone()])?; @@ -456,7 +456,7 @@ benchmarks! { >::insert(i, BalanceOf::::one()); ErasStartSessionIndex::::insert(i, i); } - }: _(RawOrigin::Root, EraIndex::zero(), u32::max_value()) + }: _(RawOrigin::Root, EraIndex::zero(), u32::MAX) verify { assert_eq!(HistoryDepth::::get(), 0); } @@ -607,13 +607,13 @@ benchmarks! { RawOrigin::Root, BalanceOf::::max_value(), BalanceOf::::max_value(), - Some(u32::max_value()), - Some(u32::max_value()) + Some(u32::MAX), + Some(u32::MAX) ) verify { assert_eq!(MinNominatorBond::::get(), BalanceOf::::max_value()); assert_eq!(MinValidatorBond::::get(), BalanceOf::::max_value()); - assert_eq!(MaxNominatorsCount::::get(), Some(u32::max_value())); - assert_eq!(MaxValidatorsCount::::get(), Some(u32::max_value())); + assert_eq!(MaxNominatorsCount::::get(), Some(u32::MAX)); + assert_eq!(MaxValidatorsCount::::get(), Some(u32::MAX)); } chill_other { diff --git a/frame/staking/src/testing_utils.rs b/frame/staking/src/testing_utils.rs index c643cb283373b..18b77d59b3e2e 100644 --- a/frame/staking/src/testing_utils.rs +++ b/frame/staking/src/testing_utils.rs @@ -150,7 +150,7 @@ pub fn create_validators_with_nominators_for_era( for j in 0 .. nominators { let balance_factor = if randomize_stake { rng.next_u32() % 255 + 10 } else { 100u32 }; let (_n_stash, n_controller) = create_stash_controller::( - u32::max_value() - j, + u32::MAX - j, balance_factor, RewardDestination::Staked, )?; diff --git a/frame/staking/src/tests.rs b/frame/staking/src/tests.rs index 5d42d866b1336..e314a70399fdd 100644 --- a/frame/staking/src/tests.rs +++ b/frame/staking/src/tests.rs @@ -1960,8 +1960,8 @@ fn phragmen_should_not_overflow() { #[test] fn reward_validator_slashing_validator_does_not_overflow() { ExtBuilder::default().build_and_execute(|| { - let stake = u64::max_value() as Balance * 2; - let reward_slash = u64::max_value() as Balance * 2; + let stake = u64::MAX as Balance * 2; + let reward_slash = u64::MAX as Balance * 2; // Assert multiplication overflows in balance arithmetic. assert!(stake.checked_mul(reward_slash).is_none()); @@ -3995,7 +3995,7 @@ mod election_data_provider { ); Staking::force_no_eras(Origin::root()).unwrap(); - assert_eq!(Staking::next_election_prediction(System::block_number()), u64::max_value()); + assert_eq!(Staking::next_election_prediction(System::block_number()), u64::MAX); Staking::force_new_era_always(Origin::root()).unwrap(); assert_eq!(Staking::next_election_prediction(System::block_number()), 45 + 5); diff --git a/frame/support/src/traits/voting.rs b/frame/support/src/traits/voting.rs index b6913a182d30b..f5afbac129555 100644 --- a/frame/support/src/traits/voting.rs +++ b/frame/support/src/traits/voting.rs @@ -42,20 +42,20 @@ pub trait CurrencyToVote { /// An implementation of `CurrencyToVote` tailored for chain's that have a balance type of u128. /// -/// The factor is the `(total_issuance / u64::max()).max(1)`, represented as u64. Let's look at the +/// The factor is the `(total_issuance / u64::MAX).max(1)`, represented as u64. Let's look at the /// important cases: /// -/// If the chain's total issuance is less than u64::max(), this will always be 1, which means that +/// If the chain's total issuance is less than u64::MAX, this will always be 1, which means that /// the factor will not have any effect. In this case, any account's balance is also less. Thus, /// both of the conversions are basically an `as`; Any balance can fit in u64. /// -/// If the chain's total issuance is more than 2*u64::max(), then a factor might be multiplied and +/// If the chain's total issuance is more than 2*u64::MAX, then a factor might be multiplied and /// divided upon conversion. pub struct U128CurrencyToVote; impl U128CurrencyToVote { fn factor(issuance: u128) -> u128 { - (issuance / u64::max_value() as u128).max(1) + (issuance / u64::MAX as u128).max(1) } } diff --git a/frame/support/src/weights.rs b/frame/support/src/weights.rs index 9337ec330d1cc..2b7cff8c6168c 100644 --- a/frame/support/src/weights.rs +++ b/frame/support/src/weights.rs @@ -278,7 +278,7 @@ impl<'a> OneOrMany for &'a [DispatchClass] { /// Primitives related to priority management of Frame. pub mod priority { - /// The starting point of all Operational transactions. 3/4 of u64::max_value(). + /// The starting point of all Operational transactions. 3/4 of u64::MAX. pub const LIMIT: u64 = 13_835_058_055_282_163_711_u64; /// Wrapper for priority of different dispatch classes. diff --git a/frame/support/test/tests/pallet_with_name_trait_is_valid.rs b/frame/support/test/tests/pallet_with_name_trait_is_valid.rs index e7f44c4b96519..6f35b122f6399 100644 --- a/frame/support/test/tests/pallet_with_name_trait_is_valid.rs +++ b/frame/support/test/tests/pallet_with_name_trait_is_valid.rs @@ -43,7 +43,7 @@ frame_support::decl_module! { pub struct Module for enum Call where origin: T::Origin { fn deposit_event() = default; type Error = Error; - const Foo: u32 = u32::max_value(); + const Foo: u32 = u32::MAX; #[weight = 0] fn accumulate_dummy(_origin, _increase_by: T::Balance) { diff --git a/frame/system/src/lib.rs b/frame/system/src/lib.rs index 17ea3a71bec8c..f96c43ee1c98e 100644 --- a/frame/system/src/lib.rs +++ b/frame/system/src/lib.rs @@ -778,7 +778,7 @@ fn hash69 + Default>() -> T { /// This type alias represents an index of an event. /// /// We use `u32` here because this index is used as index for `Events` -/// which can't contain more than `u32::max_value()` items. +/// which can't contain more than `u32::MAX` items. type EventIndex = u32; /// Type used to encode the number of references an account has. diff --git a/frame/transaction-payment/src/lib.rs b/frame/transaction-payment/src/lib.rs index 1ce3f75d5a016..17a4c8f81c968 100644 --- a/frame/transaction-payment/src/lib.rs +++ b/frame/transaction-payment/src/lib.rs @@ -1142,11 +1142,11 @@ mod tests { }; assert_eq!( Module::::compute_fee( - ::max_value(), + u32::MAX, &dispatch_info, - ::max_value() + u64::MAX ), - ::max_value() + u64::MAX ); }); } diff --git a/frame/treasury/src/tests.rs b/frame/treasury/src/tests.rs index 408f99f29e1b1..e4b6f2d664fc2 100644 --- a/frame/treasury/src/tests.rs +++ b/frame/treasury/src/tests.rs @@ -367,8 +367,8 @@ fn genesis_funding_works() { #[test] fn max_approvals_limited() { new_test_ext().execute_with(|| { - Balances::make_free_balance_be(&Treasury::account_id(), u64::max_value()); - Balances::make_free_balance_be(&0, u64::max_value()); + Balances::make_free_balance_be(&Treasury::account_id(), u64::MAX); + Balances::make_free_balance_be(&0, u64::MAX); for _ in 0 .. MaxApprovals::get() { assert_ok!(Treasury::propose_spend(Origin::signed(0), 100, 3)); diff --git a/primitives/allocator/src/freeing_bump.rs b/primitives/allocator/src/freeing_bump.rs index e2a6b19e4a7f1..36f5bb9c65c0e 100644 --- a/primitives/allocator/src/freeing_bump.rs +++ b/primitives/allocator/src/freeing_bump.rs @@ -179,7 +179,7 @@ impl Order { } /// A special magic value for a pointer in a link that denotes the end of the linked list. -const NIL_MARKER: u32 = u32::max_value(); +const NIL_MARKER: u32 = u32::MAX; /// A link between headers in the free list. #[derive(Clone, Copy, Debug, PartialEq, Eq)] diff --git a/primitives/arithmetic/fuzzer/src/multiply_by_rational.rs b/primitives/arithmetic/fuzzer/src/multiply_by_rational.rs index 40f315ce755d1..a1689716b56c6 100644 --- a/primitives/arithmetic/fuzzer/src/multiply_by_rational.rs +++ b/primitives/arithmetic/fuzzer/src/multiply_by_rational.rs @@ -70,7 +70,7 @@ fn mul_div(a: u128, b: u128, c: u128) -> u128 { let ce: U256 = c.into(); let r = ae * be / ce; - if r > u128::max_value().into() { + if r > u128::MAX.into() { a } else { r.as_u128() diff --git a/primitives/arithmetic/src/biguint.rs b/primitives/arithmetic/src/biguint.rs index bfbd57f57013b..859cf829246f1 100644 --- a/primitives/arithmetic/src/biguint.rs +++ b/primitives/arithmetic/src/biguint.rs @@ -342,7 +342,7 @@ impl BigUint { // step D3.0 Find an estimate of q[j], named qhat. let (qhat, rhat) = { // PROOF: this always fits into `Double`. In the context of Single = u8, and - // Double = u16, think of 255 * 256 + 255 which is just u16::max_value(). + // Double = u16, think of 255 * 256 + 255 which is just u16::MAX. let dividend = Double::from(self_norm.get(j + n)) * B @@ -668,14 +668,14 @@ pub mod tests { fn can_try_build_numbers_from_types() { use sp_std::convert::TryFrom; assert_eq!(u64::try_from(with_limbs(1)).unwrap(), 1); - assert_eq!(u64::try_from(with_limbs(2)).unwrap(), u32::max_value() as u64 + 2); + assert_eq!(u64::try_from(with_limbs(2)).unwrap(), u32::MAX as u64 + 2); assert_eq!( u64::try_from(with_limbs(3)).unwrap_err(), "cannot fit a number into u64", ); assert_eq!( u128::try_from(with_limbs(3)).unwrap(), - u32::max_value() as u128 + u64::max_value() as u128 + 3 + u32::MAX as u128 + u64::MAX as u128 + 3 ); } diff --git a/primitives/arithmetic/src/fixed_point.rs b/primitives/arithmetic/src/fixed_point.rs index ec2c28f35f1ca..4940c7751aa1d 100644 --- a/primitives/arithmetic/src/fixed_point.rs +++ b/primitives/arithmetic/src/fixed_point.rs @@ -619,23 +619,23 @@ macro_rules! implement_fixed { assert_eq!(from_i129::(a), None); let a = I129 { - value: u128::max_value() - 1, + value: u128::MAX - 1, negative: false, }; // Max - 1 value fits. - assert_eq!(from_i129::(a), Some(u128::max_value() - 1)); + assert_eq!(from_i129::(a), Some(u128::MAX - 1)); let a = I129 { - value: u128::max_value(), + value: u128::MAX, negative: false, }; // Max value fits. - assert_eq!(from_i129::(a), Some(u128::max_value())); + assert_eq!(from_i129::(a), Some(u128::MAX)); let a = I129 { - value: i128::max_value() as u128 + 1, + value: i128::MAX as u128 + 1, negative: true, }; @@ -643,7 +643,7 @@ macro_rules! implement_fixed { assert_eq!(from_i129::(a), Some(i128::min_value())); let a = I129 { - value: i128::max_value() as u128 + 1, + value: i128::MAX as u128 + 1, negative: false, }; @@ -651,12 +651,12 @@ macro_rules! implement_fixed { assert_eq!(from_i129::(a), None); let a = I129 { - value: i128::max_value() as u128, + value: i128::MAX as u128, negative: false, }; // Max value fits. - assert_eq!(from_i129::(a), Some(i128::max_value())); + assert_eq!(from_i129::(a), Some(i128::MAX)); } #[test] @@ -665,13 +665,13 @@ macro_rules! implement_fixed { let b = 1i32; // Pos + Pos => Max. - assert_eq!(to_bound::<_, _, i32>(a, b), i32::max_value()); + assert_eq!(to_bound::<_, _, i32>(a, b), i32::MAX); let a = -1i32; let b = -1i32; // Neg + Neg => Max. - assert_eq!(to_bound::<_, _, i32>(a, b), i32::max_value()); + assert_eq!(to_bound::<_, _, i32>(a, b), i32::MAX); let a = 1i32; let b = -1i32; @@ -1084,11 +1084,11 @@ macro_rules! implement_fixed { fn checked_mul_int_works() { let a = $name::saturating_from_integer(2); // Max - 1. - assert_eq!(a.checked_mul_int((i128::max_value() - 1) / 2), Some(i128::max_value() - 1)); + assert_eq!(a.checked_mul_int((i128::MAX - 1) / 2), Some(i128::MAX - 1)); // Max. - assert_eq!(a.checked_mul_int(i128::max_value() / 2), Some(i128::max_value() - 1)); + assert_eq!(a.checked_mul_int(i128::MAX / 2), Some(i128::MAX - 1)); // Max + 1 => None. - assert_eq!(a.checked_mul_int(i128::max_value() / 2 + 1), None); + assert_eq!(a.checked_mul_int(i128::MAX / 2 + 1), None); if $name::SIGNED { // Min - 1. @@ -1100,20 +1100,20 @@ macro_rules! implement_fixed { let b = $name::saturating_from_rational(1, -2); assert_eq!(b.checked_mul_int(42i128), Some(-21)); - assert_eq!(b.checked_mul_int(u128::max_value()), None); - assert_eq!(b.checked_mul_int(i128::max_value()), Some(i128::max_value() / -2)); + assert_eq!(b.checked_mul_int(u128::MAX), None); + assert_eq!(b.checked_mul_int(i128::MAX), Some(i128::MAX / -2)); assert_eq!(b.checked_mul_int(i128::min_value()), Some(i128::min_value() / -2)); } let a = $name::saturating_from_rational(1, 2); assert_eq!(a.checked_mul_int(42i128), Some(21)); - assert_eq!(a.checked_mul_int(i128::max_value()), Some(i128::max_value() / 2)); + assert_eq!(a.checked_mul_int(i128::MAX), Some(i128::MAX / 2)); assert_eq!(a.checked_mul_int(i128::min_value()), Some(i128::min_value() / 2)); let c = $name::saturating_from_integer(255); assert_eq!(c.checked_mul_int(2i8), None); assert_eq!(c.checked_mul_int(2i128), Some(510)); - assert_eq!(c.checked_mul_int(i128::max_value()), None); + assert_eq!(c.checked_mul_int(i128::MAX), None); assert_eq!(c.checked_mul_int(i128::min_value()), None); } @@ -1121,11 +1121,11 @@ macro_rules! implement_fixed { fn saturating_mul_int_works() { let a = $name::saturating_from_integer(2); // Max - 1. - assert_eq!(a.saturating_mul_int((i128::max_value() - 1) / 2), i128::max_value() - 1); + assert_eq!(a.saturating_mul_int((i128::MAX - 1) / 2), i128::MAX - 1); // Max. - assert_eq!(a.saturating_mul_int(i128::max_value() / 2), i128::max_value() - 1); + assert_eq!(a.saturating_mul_int(i128::MAX / 2), i128::MAX - 1); // Max + 1 => saturates to max. - assert_eq!(a.saturating_mul_int(i128::max_value() / 2 + 1), i128::max_value()); + assert_eq!(a.saturating_mul_int(i128::MAX / 2 + 1), i128::MAX); // Min - 1. assert_eq!(a.saturating_mul_int((i128::min_value() + 1) / 2), i128::min_value() + 2); @@ -1137,20 +1137,20 @@ macro_rules! implement_fixed { if $name::SIGNED { let b = $name::saturating_from_rational(1, -2); assert_eq!(b.saturating_mul_int(42i32), -21); - assert_eq!(b.saturating_mul_int(i128::max_value()), i128::max_value() / -2); + assert_eq!(b.saturating_mul_int(i128::MAX), i128::MAX / -2); assert_eq!(b.saturating_mul_int(i128::min_value()), i128::min_value() / -2); - assert_eq!(b.saturating_mul_int(u128::max_value()), u128::min_value()); + assert_eq!(b.saturating_mul_int(u128::MAX), u128::min_value()); } let a = $name::saturating_from_rational(1, 2); assert_eq!(a.saturating_mul_int(42i32), 21); - assert_eq!(a.saturating_mul_int(i128::max_value()), i128::max_value() / 2); + assert_eq!(a.saturating_mul_int(i128::MAX), i128::MAX / 2); assert_eq!(a.saturating_mul_int(i128::min_value()), i128::min_value() / 2); let c = $name::saturating_from_integer(255); - assert_eq!(c.saturating_mul_int(2i8), i8::max_value()); + assert_eq!(c.saturating_mul_int(2i8), i8::MAX); assert_eq!(c.saturating_mul_int(-2i8), i8::min_value()); - assert_eq!(c.saturating_mul_int(i128::max_value()), i128::max_value()); + assert_eq!(c.saturating_mul_int(i128::MAX), i128::MAX); assert_eq!(c.saturating_mul_int(i128::min_value()), i128::min_value()); } @@ -1223,7 +1223,7 @@ macro_rules! implement_fixed { assert_eq!(e.checked_div_int(2.into()), Some(3)); assert_eq!(f.checked_div_int(2.into()), Some(2)); - assert_eq!(a.checked_div_int(i128::max_value()), Some(0)); + assert_eq!(a.checked_div_int(i128::MAX), Some(0)); assert_eq!(a.checked_div_int(2), Some(inner_max / (2 * accuracy))); assert_eq!(a.checked_div_int(inner_max / accuracy), Some(1)); assert_eq!(a.checked_div_int(1i8), None); @@ -1244,11 +1244,11 @@ macro_rules! implement_fixed { assert_eq!(b.checked_div_int(2), Some(inner_min / (2 * accuracy))); assert_eq!(c.checked_div_int(1), Some(0)); - assert_eq!(c.checked_div_int(i128::max_value()), Some(0)); + assert_eq!(c.checked_div_int(i128::MAX), Some(0)); assert_eq!(c.checked_div_int(1i8), Some(0)); assert_eq!(d.checked_div_int(1), Some(1)); - assert_eq!(d.checked_div_int(i32::max_value()), Some(0)); + assert_eq!(d.checked_div_int(i32::MAX), Some(0)); assert_eq!(d.checked_div_int(1i8), Some(1)); assert_eq!(a.checked_div_int(0), None); @@ -1303,17 +1303,17 @@ macro_rules! implement_fixed { assert_eq!($name::zero().saturating_mul_acc_int(42i8), 42i8); assert_eq!($name::one().saturating_mul_acc_int(42i8), 2 * 42i8); - assert_eq!($name::one().saturating_mul_acc_int(i128::max_value()), i128::max_value()); + assert_eq!($name::one().saturating_mul_acc_int(i128::MAX), i128::MAX); assert_eq!($name::one().saturating_mul_acc_int(i128::min_value()), i128::min_value()); - assert_eq!($name::one().saturating_mul_acc_int(u128::max_value() / 2), u128::max_value() - 1); + assert_eq!($name::one().saturating_mul_acc_int(u128::MAX / 2), u128::MAX - 1); assert_eq!($name::one().saturating_mul_acc_int(u128::min_value()), u128::min_value()); if $name::SIGNED { let a = $name::saturating_from_rational(-1, 2); assert_eq!(a.saturating_mul_acc_int(42i8), 21i8); assert_eq!(a.saturating_mul_acc_int(42u8), 21u8); - assert_eq!(a.saturating_mul_acc_int(u128::max_value() - 1), u128::max_value() / 2); + assert_eq!(a.saturating_mul_acc_int(u128::MAX - 1), u128::MAX / 2); } } @@ -1327,7 +1327,7 @@ macro_rules! implement_fixed { $name::saturating_from_integer(1125899906842624i64)); assert_eq!($name::saturating_from_integer(1).saturating_pow(1000), (1).into()); - assert_eq!($name::saturating_from_integer(1).saturating_pow(usize::max_value()), (1).into()); + assert_eq!($name::saturating_from_integer(1).saturating_pow(usize::MAX), (1).into()); if $name::SIGNED { // Saturating. @@ -1335,15 +1335,15 @@ macro_rules! implement_fixed { assert_eq!($name::saturating_from_integer(-1).saturating_pow(1000), (1).into()); assert_eq!($name::saturating_from_integer(-1).saturating_pow(1001), 0.saturating_sub(1).into()); - assert_eq!($name::saturating_from_integer(-1).saturating_pow(usize::max_value()), 0.saturating_sub(1).into()); - assert_eq!($name::saturating_from_integer(-1).saturating_pow(usize::max_value() - 1), (1).into()); + assert_eq!($name::saturating_from_integer(-1).saturating_pow(usize::MAX), 0.saturating_sub(1).into()); + assert_eq!($name::saturating_from_integer(-1).saturating_pow(usize::MAX - 1), (1).into()); } assert_eq!($name::saturating_from_integer(114209).saturating_pow(5), $name::max_value()); - assert_eq!($name::saturating_from_integer(1).saturating_pow(usize::max_value()), (1).into()); - assert_eq!($name::saturating_from_integer(0).saturating_pow(usize::max_value()), (0).into()); - assert_eq!($name::saturating_from_integer(2).saturating_pow(usize::max_value()), $name::max_value()); + assert_eq!($name::saturating_from_integer(1).saturating_pow(usize::MAX), (1).into()); + assert_eq!($name::saturating_from_integer(0).saturating_pow(usize::MAX), (0).into()); + assert_eq!($name::saturating_from_integer(2).saturating_pow(usize::MAX), $name::max_value()); } #[test] diff --git a/primitives/arithmetic/src/lib.rs b/primitives/arithmetic/src/lib.rs index d6069ad5154d1..527530d63e51d 100644 --- a/primitives/arithmetic/src/lib.rs +++ b/primitives/arithmetic/src/lib.rs @@ -500,15 +500,15 @@ mod threshold_compare_tests { #[test] fn saturating_mul_works() { assert_eq!(Saturating::saturating_mul(2, i32::min_value()), i32::min_value()); - assert_eq!(Saturating::saturating_mul(2, i32::max_value()), i32::max_value()); + assert_eq!(Saturating::saturating_mul(2, i32::MAX), i32::MAX); } #[test] fn saturating_pow_works() { assert_eq!(Saturating::saturating_pow(i32::min_value(), 0), 1); - assert_eq!(Saturating::saturating_pow(i32::max_value(), 0), 1); + assert_eq!(Saturating::saturating_pow(i32::MAX, 0), 1); assert_eq!(Saturating::saturating_pow(i32::min_value(), 3), i32::min_value()); - assert_eq!(Saturating::saturating_pow(i32::min_value(), 2), i32::max_value()); - assert_eq!(Saturating::saturating_pow(i32::max_value(), 2), i32::max_value()); + assert_eq!(Saturating::saturating_pow(i32::min_value(), 2), i32::MAX); + assert_eq!(Saturating::saturating_pow(i32::MAX, 2), i32::MAX); } } diff --git a/primitives/arithmetic/src/rational.rs b/primitives/arithmetic/src/rational.rs index 88eaca1efb6c4..feb81eb572068 100644 --- a/primitives/arithmetic/src/rational.rs +++ b/primitives/arithmetic/src/rational.rs @@ -267,9 +267,9 @@ mod tests { use super::*; use super::helpers_128bit::*; - const MAX128: u128 = u128::max_value(); - const MAX64: u128 = u64::max_value() as u128; - const MAX64_2: u128 = 2 * u64::max_value() as u128; + const MAX128: u128 = u128::MAX; + const MAX64: u128 = u64::MAX as u128; + const MAX64_2: u128 = 2 * u64::MAX as u128; fn r(p: u128, q: u128) -> Rational128 { Rational128(p, q) diff --git a/primitives/core/src/hash.rs b/primitives/core/src/hash.rs index dcaafd2906de4..6ef1827a1ba0c 100644 --- a/primitives/core/src/hash.rs +++ b/primitives/core/src/hash.rs @@ -43,7 +43,7 @@ mod tests { (H160::from_low_u64_be(16), "0x0000000000000000000000000000000000000010"), (H160::from_low_u64_be(1_000), "0x00000000000000000000000000000000000003e8"), (H160::from_low_u64_be(100_000), "0x00000000000000000000000000000000000186a0"), - (H160::from_low_u64_be(u64::max_value()), "0x000000000000000000000000ffffffffffffffff"), + (H160::from_low_u64_be(u64::MAX), "0x000000000000000000000000ffffffffffffffff"), ]; for (number, expected) in tests { @@ -61,7 +61,7 @@ mod tests { (H256::from_low_u64_be(16), "0x0000000000000000000000000000000000000000000000000000000000000010"), (H256::from_low_u64_be(1_000), "0x00000000000000000000000000000000000000000000000000000000000003e8"), (H256::from_low_u64_be(100_000), "0x00000000000000000000000000000000000000000000000000000000000186a0"), - (H256::from_low_u64_be(u64::max_value()), "0x000000000000000000000000000000000000000000000000ffffffffffffffff"), + (H256::from_low_u64_be(u64::MAX), "0x000000000000000000000000000000000000000000000000ffffffffffffffff"), ]; for (number, expected) in tests { diff --git a/primitives/core/src/offchain/mod.rs b/primitives/core/src/offchain/mod.rs index 66fc85ec7bf08..d3d2356b6ee8d 100644 --- a/primitives/core/src/offchain/mod.rs +++ b/primitives/core/src/offchain/mod.rs @@ -282,7 +282,7 @@ impl Capabilities { /// Return an object representing all capabilities enabled. pub fn all() -> Self { - Self(u8::max_value()) + Self(u8::MAX) } /// Return capabilities for rich offchain calls. diff --git a/primitives/core/src/uint.rs b/primitives/core/src/uint.rs index f917f472d787b..ff45ad6ecf0d5 100644 --- a/primitives/core/src/uint.rs +++ b/primitives/core/src/uint.rs @@ -39,8 +39,8 @@ mod tests { ($name::from(16), "0x10"), ($name::from(1_000), "0x3e8"), ($name::from(100_000), "0x186a0"), - ($name::from(u64::max_value()), "0xffffffffffffffff"), - ($name::from(u64::max_value()) + $name::from(1), "0x10000000000000000"), + ($name::from(u64::MAX), "0xffffffffffffffff"), + ($name::from(u64::MAX) + $name::from(1), "0x10000000000000000"), ]; for (number, expected) in tests { diff --git a/primitives/npos-elections/src/phragmen.rs b/primitives/npos-elections/src/phragmen.rs index a1e632acf5fd3..bbead91c938f8 100644 --- a/primitives/npos-elections/src/phragmen.rs +++ b/primitives/npos-elections/src/phragmen.rs @@ -33,7 +33,7 @@ use sp_std::prelude::*; /// The denominator used for loads. Since votes are collected as u64, the smallest ratio that we /// might collect is `1/approval_stake` where approval stake is the sum of votes. Hence, some number -/// bigger than u64::max_value() is needed. For maximum accuracy we simply use u128; +/// bigger than u64::MAX is needed. For maximum accuracy we simply use u128; const DEN: ExtendedBalance = ExtendedBalance::max_value(); /// Execute sequential phragmen with potentially some rounds of `balancing`. The return type is list diff --git a/primitives/npos-elections/src/phragmms.rs b/primitives/npos-elections/src/phragmms.rs index 644535d4c41c2..2a643d3673a52 100644 --- a/primitives/npos-elections/src/phragmms.rs +++ b/primitives/npos-elections/src/phragmms.rs @@ -181,7 +181,7 @@ pub(crate) fn apply_elected( ) { let elected_who = elected_ptr.borrow().who.clone(); let cutoff = elected_ptr.borrow().score.to_den(1) - .expect("(n / d) < u128::max() and (n' / 1) == (n / d), thus n' < u128::max()'; qed.") + .expect("(n / d) < u128::MAX and (n' / 1) == (n / d), thus n' < u128::MAX'; qed.") .n(); let mut elected_backed_stake = elected_ptr.borrow().backed_stake; @@ -386,10 +386,10 @@ mod tests { #[test] fn large_balance_wont_overflow() { let candidates = vec![1u32, 2, 3]; - let mut voters = (0..1000).map(|i| (10 + i, u64::max_value(), vec![1, 2, 3])).collect::>(); + let mut voters = (0..1000).map(|i| (10 + i, u64::MAX, vec![1, 2, 3])).collect::>(); // give a bit more to 1 and 3. - voters.push((2, u64::max_value(), vec![1, 3])); + voters.push((2, u64::MAX, vec![1, 3])); let ElectionResult { winners, assignments: _ } = phragmms::<_, Perbill>(2, candidates, voters, Some((2, 0))).unwrap(); assert_eq!(winners.into_iter().map(|(w, _)| w).collect::>(), vec![1u32, 3]); diff --git a/primitives/npos-elections/src/tests.rs b/primitives/npos-elections/src/tests.rs index 06505721fd23f..8cadff949b6f2 100644 --- a/primitives/npos-elections/src/tests.rs +++ b/primitives/npos-elections/src/tests.rs @@ -458,11 +458,11 @@ fn phragmen_accuracy_on_large_scale_only_candidates() { // candidate can have the maximum amount of tokens, and also supported by the maximum. let candidates = vec![1, 2, 3, 4, 5]; let stake_of = create_stake_of(&[ - (1, (u64::max_value() - 1).into()), - (2, (u64::max_value() - 4).into()), - (3, (u64::max_value() - 5).into()), - (4, (u64::max_value() - 3).into()), - (5, (u64::max_value() - 2).into()), + (1, (u64::MAX - 1).into()), + (2, (u64::MAX - 4).into()), + (3, (u64::MAX - 5).into()), + (4, (u64::MAX - 3).into()), + (5, (u64::MAX - 2).into()), ]); let ElectionResult { winners, assignments } = seq_phragmen::<_, Perbill>( @@ -489,13 +489,13 @@ fn phragmen_accuracy_on_large_scale_voters_and_candidates() { ]; voters.extend(auto_generate_self_voters(&candidates)); let stake_of = create_stake_of(&[ - (1, (u64::max_value() - 1).into()), - (2, (u64::max_value() - 4).into()), - (3, (u64::max_value() - 5).into()), - (4, (u64::max_value() - 3).into()), - (5, (u64::max_value() - 2).into()), - (13, (u64::max_value() - 10).into()), - (14, u64::max_value().into()), + (1, (u64::MAX - 1).into()), + (2, (u64::MAX - 4).into()), + (3, (u64::MAX - 5).into()), + (4, (u64::MAX - 3).into()), + (5, (u64::MAX - 2).into()), + (13, (u64::MAX - 10).into()), + (14, u64::MAX.into()), ]); let ElectionResult { winners, assignments } = seq_phragmen::<_, Perbill>( diff --git a/primitives/runtime-interface/test-wasm/src/lib.rs b/primitives/runtime-interface/test-wasm/src/lib.rs index 4cdf59349dd76..39f1c8b3f5708 100644 --- a/primitives/runtime-interface/test-wasm/src/lib.rs +++ b/primitives/runtime-interface/test-wasm/src/lib.rs @@ -226,11 +226,11 @@ wasm_export_functions! { } fn test_u128_i128_as_parameter_and_return_value() { - for val in &[u128::max_value(), 1u128, 5000u128, u64::max_value() as u128] { + for val in &[u128::MAX, 1u128, 5000u128, u64::MAX as u128] { assert_eq!(*val, test_api::get_and_return_u128(*val)); } - for val in &[i128::max_value(), i128::min_value(), 1i128, 5000i128, u64::max_value() as i128] { + for val in &[i128::MAX, i128::min_value(), 1i128, 5000i128, u64::MAX as i128] { assert_eq!(*val, test_api::get_and_return_i128(*val)); } } diff --git a/primitives/runtime/src/curve.rs b/primitives/runtime/src/curve.rs index 06f7f2c7e3f05..326ababcf5d4f 100644 --- a/primitives/runtime/src/curve.rs +++ b/primitives/runtime/src/curve.rs @@ -112,17 +112,17 @@ fn test_multiply_by_rational_saturating() { for value in 0..=div { for p in 0..=div { for q in 1..=div { - let value: u64 = (value as u128 * u64::max_value() as u128 / div as u128) + let value: u64 = (value as u128 * u64::MAX as u128 / div as u128) .try_into().unwrap(); - let p = (p as u64 * u32::max_value() as u64 / div as u64) + let p = (p as u64 * u32::MAX as u64 / div as u64) .try_into().unwrap(); - let q = (q as u64 * u32::max_value() as u64 / div as u64) + let q = (q as u64 * u32::MAX as u64 / div as u64) .try_into().unwrap(); assert_eq!( multiply_by_rational_saturating(value, p, q), (value as u128 * p as u128 / q as u128) - .try_into().unwrap_or(u64::max_value()) + .try_into().unwrap_or(u64::MAX) ); } } @@ -153,9 +153,9 @@ fn test_calculate_for_fraction_times_denominator() { let div = 100u32; for d in 0..=div { for n in 0..=d { - let d: u64 = (d as u128 * u64::max_value() as u128 / div as u128) + let d: u64 = (d as u128 * u64::MAX as u128 / div as u128) .try_into().unwrap(); - let n: u64 = (n as u128 * u64::max_value() as u128 / div as u128) + let n: u64 = (n as u128 * u64::MAX as u128 / div as u128) .try_into().unwrap(); let res = curve.calculate_for_fraction_times_denominator(n, d); diff --git a/primitives/runtime/src/generic/era.rs b/primitives/runtime/src/generic/era.rs index fbda688cc407a..83a9f22afe5d6 100644 --- a/primitives/runtime/src/generic/era.rs +++ b/primitives/runtime/src/generic/era.rs @@ -97,7 +97,7 @@ impl Era { /// Get the block number of the first block at which the era has ended. pub fn death(self, current: u64) -> u64 { match self { - Self::Immortal => u64::max_value(), + Self::Immortal => u64::MAX, Self::Mortal(period, _) => self.birth(current) + period, } } @@ -145,11 +145,11 @@ mod tests { fn immortal_works() { let e = Era::immortal(); assert_eq!(e.birth(0), 0); - assert_eq!(e.death(0), u64::max_value()); + assert_eq!(e.death(0), u64::MAX); assert_eq!(e.birth(1), 0); - assert_eq!(e.death(1), u64::max_value()); - assert_eq!(e.birth(u64::max_value()), 0); - assert_eq!(e.death(u64::max_value()), u64::max_value()); + assert_eq!(e.death(1), u64::MAX); + assert_eq!(e.birth(u64::MAX), 0); + assert_eq!(e.death(u64::MAX), u64::MAX); assert!(e.is_immortal()); assert_eq!(e.encode(), vec![0u8]); diff --git a/primitives/runtime/src/generic/header.rs b/primitives/runtime/src/generic/header.rs index 69c5f50796886..def761b201ceb 100644 --- a/primitives/runtime/src/generic/header.rs +++ b/primitives/runtime/src/generic/header.rs @@ -200,8 +200,8 @@ mod tests { assert_eq!(serialize(0), "\"0x0\"".to_owned()); assert_eq!(serialize(1), "\"0x1\"".to_owned()); - assert_eq!(serialize(u64::max_value() as u128), "\"0xffffffffffffffff\"".to_owned()); - assert_eq!(serialize(u64::max_value() as u128 + 1), "\"0x10000000000000000\"".to_owned()); + assert_eq!(serialize(u64::MAX as u128), "\"0xffffffffffffffff\"".to_owned()); + assert_eq!(serialize(u64::MAX as u128 + 1), "\"0x10000000000000000\"".to_owned()); } #[test] @@ -213,7 +213,7 @@ mod tests { assert_eq!(deserialize("\"0x0\""), 0); assert_eq!(deserialize("\"0x1\""), 1); - assert_eq!(deserialize("\"0xffffffffffffffff\""), u64::max_value() as u128); - assert_eq!(deserialize("\"0x10000000000000000\""), u64::max_value() as u128 + 1); + assert_eq!(deserialize("\"0xffffffffffffffff\""), u64::MAX as u128); + assert_eq!(deserialize("\"0x10000000000000000\""), u64::MAX as u128 + 1); } } diff --git a/primitives/runtime/src/random_number_generator.rs b/primitives/runtime/src/random_number_generator.rs index 41ca7c723e9c7..0adf346579a90 100644 --- a/primitives/runtime/src/random_number_generator.rs +++ b/primitives/runtime/src/random_number_generator.rs @@ -76,7 +76,7 @@ impl RandomNumberGenerator { self.offset += needed as u32; let raw = u32::decode(&mut TrailingZeroInput::new(data)).unwrap_or(0); if raw <= top { - break if max < u32::max_value() { + break if max < u32::MAX { raw % (max + 1) } else { raw diff --git a/primitives/trie/src/lib.rs b/primitives/trie/src/lib.rs index 4cfe3623812c1..a496245637a52 100644 --- a/primitives/trie/src/lib.rs +++ b/primitives/trie/src/lib.rs @@ -438,7 +438,7 @@ impl<'a, DB, H, T> hash_db::AsHashDB for KeySpacedDBMut<'a, DB, H> where /// Constants used into trie simplification codec. mod trie_constants { pub const EMPTY_TRIE: u8 = 0; - pub const NIBBLE_SIZE_BOUND: usize = u16::max_value() as usize; + pub const NIBBLE_SIZE_BOUND: usize = u16::MAX as usize; pub const LEAF_PREFIX_MASK: u8 = 0b_01 << 6; pub const BRANCH_WITHOUT_MASK: u8 = 0b_10 << 6; pub const BRANCH_WITH_MASK: u8 = 0b_11 << 6; From 8a9a8f170f556beb7c86e996f0576ae3df632f9b Mon Sep 17 00:00:00 2001 From: kotlarmilos Date: Thu, 24 Jun 2021 13:18:15 +0200 Subject: [PATCH 42/67] Add OriginTrail Parachain to SS58 Registry (#9067) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Add OriginTrail Parachain to SS58 Registry * Update ss58-registry.json Co-authored-by: Bastian Köcher Co-authored-by: Bastian Köcher --- primitives/core/src/crypto.rs | 2 ++ ss58-registry.json | 11 ++++++++++- 2 files changed, 12 insertions(+), 1 deletion(-) diff --git a/primitives/core/src/crypto.rs b/primitives/core/src/crypto.rs index 9e3177f249a5e..d9a0a69e16813 100644 --- a/primitives/core/src/crypto.rs +++ b/primitives/core/src/crypto.rs @@ -588,6 +588,8 @@ ss58_address_format!( (98, "polkasmith", "PolkaSmith Canary Network, standard account (*25519).") PolkaFoundry => (99, "polkafoundry", "PolkaFoundry Network, standard account (*25519).") + OriginTrailAccount => + (101, "origintrail-parachain", "OriginTrail Parachain, ethereumm account (ECDSA).") SocialAccount => (252, "social-network", "Social Network, standard account (*25519).") Moonbeam => diff --git a/ss58-registry.json b/ss58-registry.json index 133cb6506fb05..4d818dfa5b3e2 100644 --- a/ss58-registry.json +++ b/ss58-registry.json @@ -522,7 +522,16 @@ "decimals": [18], "standardAccount": "*25519", "website": "https://polkafoundry.com" - }, + }, + { + "prefix": 101, + "network": "origintrail-parachain", + "displayName": "OriginTrail Parachain", + "symbols": ["TRAC"], + "decimals": [18], + "standardAccount": "secp256k1", + "website": "https://origintrail.io" + }, { "prefix": 252, "network": "social-network", From 77dcc4f90917f2215ee40efeacd68be9ce85db14 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Thu, 24 Jun 2021 16:19:36 +0200 Subject: [PATCH 43/67] Remove RandomNumberGenerator (#9198) * Remove RandomNumberGenerator This is not used in Substrate/Polkadot. If someone else needs it, they can copy the code or use chacha. * :facepalm: --- primitives/runtime/src/lib.rs | 3 - .../runtime/src/random_number_generator.rs | 118 ------------------ 2 files changed, 121 deletions(-) delete mode 100644 primitives/runtime/src/random_number_generator.rs diff --git a/primitives/runtime/src/lib.rs b/primitives/runtime/src/lib.rs index 8f7bbf1680c05..9bc23be1e9759 100644 --- a/primitives/runtime/src/lib.rs +++ b/primitives/runtime/src/lib.rs @@ -54,7 +54,6 @@ pub mod offchain; pub mod testing; pub mod traits; pub mod transaction_validity; -pub mod random_number_generator; mod runtime_string; mod multiaddress; pub mod runtime_logger; @@ -85,8 +84,6 @@ pub use sp_arithmetic::helpers_128bit; /// Re-export big_uint stuff. pub use sp_arithmetic::biguint; -pub use random_number_generator::RandomNumberGenerator; - pub use either::Either; /// An abstraction over justification for a block's validity under a consensus algorithm. diff --git a/primitives/runtime/src/random_number_generator.rs b/primitives/runtime/src/random_number_generator.rs deleted file mode 100644 index 0adf346579a90..0000000000000 --- a/primitives/runtime/src/random_number_generator.rs +++ /dev/null @@ -1,118 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! A simple pseudo random number generator that allows a stream of random numbers to be efficiently -//! created from a single initial seed hash. - -use codec::{Encode, Decode}; -use crate::traits::{Hash, TrailingZeroInput}; - -/// Pseudo-random number streamer. This retains the state of the random number stream. It's as -/// secure as the combination of the seed with which it is constructed and the hash function it uses -/// to cycle elements. -/// -/// It can be saved and later reloaded using the Codec traits. -/// -/// (It is recommended to use the `rand_chacha` crate as an alternative to this where possible.) -/// -/// Example: -/// ``` -/// use sp_runtime::traits::{Hash, BlakeTwo256}; -/// use sp_runtime::RandomNumberGenerator; -/// let random_seed = BlakeTwo256::hash(b"Sixty-nine"); -/// let mut rng = >::new(random_seed); -/// assert_eq!(rng.pick_u32(100), 59); -/// assert_eq!(rng.pick_item(&[1, 2, 3]), Some(&1)); -/// ``` -/// -/// This can use any cryptographic `Hash` function as the means of entropy-extension, and avoids -/// needless extensions of entropy. -/// -/// If you're persisting it over blocks, be aware that the sequence will start to repeat. This won't -/// be a practical issue unless you're using tiny hash types (e.g. 64-bit) and pulling hundred of -/// megabytes of data from it. -#[derive(Encode, Decode)] -pub struct RandomNumberGenerator { - current: Hashing::Output, - offset: u32, -} - -impl RandomNumberGenerator { - /// A new source of random data. - pub fn new(seed: Hashing::Output) -> Self { - Self { - current: seed, - offset: 0, - } - } - - fn offset(&self) -> usize { self.offset as usize } - - /// Returns a number at least zero, at most `max`. - pub fn pick_u32(&mut self, max: u32) -> u32 { - let needed = (4 - max.leading_zeros() / 8) as usize; - let top = ((1 << (needed as u64 * 8)) / (max as u64 + 1) * (max as u64 + 1) - 1) as u32; - loop { - if self.offset() + needed > self.current.as_ref().len() { - // rehash - self.current = ::hash(self.current.as_ref()); - self.offset = 0; - } - let data = &self.current.as_ref()[self.offset()..self.offset() + needed]; - self.offset += needed as u32; - let raw = u32::decode(&mut TrailingZeroInput::new(data)).unwrap_or(0); - if raw <= top { - break if max < u32::MAX { - raw % (max + 1) - } else { - raw - } - } - } - } - - /// Returns a number at least zero, at most `max`. - /// - /// This returns a `usize`, but internally it only uses `u32` so avoid consensus problems. - pub fn pick_usize(&mut self, max: usize) -> usize { - self.pick_u32(max as u32) as usize - } - - /// Pick a random element from an array of `items`. - /// - /// This is guaranteed to return `Some` except in the case that the given array `items` is - /// empty. - pub fn pick_item<'a, T>(&mut self, items: &'a [T]) -> Option<&'a T> { - if items.is_empty() { - None - } else { - Some(&items[self.pick_usize(items.len() - 1)]) - } - } -} - -#[cfg(test)] -mod tests { - use super::RandomNumberGenerator; - use crate::traits::{Hash, BlakeTwo256}; - - #[test] - fn does_not_panic_on_max() { - let seed = BlakeTwo256::hash(b"Fourty-two"); - let _random = RandomNumberGenerator::::new(seed).pick_u32(u32::MAX); - } -} From bab9deca26db20bfc914263e0542a7a1b0d8f174 Mon Sep 17 00:00:00 2001 From: Squirrel Date: Thu, 24 Jun 2021 15:48:39 +0100 Subject: [PATCH 44/67] Use MIN associated const (#9199) --- client/network/src/protocol/sync.rs | 2 +- client/network/src/state_request_handler.rs | 2 +- client/peerset/src/lib.rs | 4 +- client/peerset/tests/fuzz.rs | 2 +- primitives/arithmetic/src/fixed_point.rs | 44 +++++++++---------- primitives/arithmetic/src/lib.rs | 8 ++-- .../runtime-interface/test-wasm/src/lib.rs | 2 +- 7 files changed, 32 insertions(+), 32 deletions(-) diff --git a/client/network/src/protocol/sync.rs b/client/network/src/protocol/sync.rs index 82df21fe9d044..44fbe64bfcff4 100644 --- a/client/network/src/protocol/sync.rs +++ b/client/network/src/protocol/sync.rs @@ -105,7 +105,7 @@ mod rep { /// Reputation change when a peer sent us a status message with a different /// genesis than us. - pub const GENESIS_MISMATCH: Rep = Rep::new(i32::min_value(), "Genesis mismatch"); + pub const GENESIS_MISMATCH: Rep = Rep::new(i32::MIN, "Genesis mismatch"); /// Reputation change for peers which send us a block with an incomplete header. pub const INCOMPLETE_HEADER: Rep = Rep::new(-(1 << 20), "Incomplete header"); diff --git a/client/network/src/state_request_handler.rs b/client/network/src/state_request_handler.rs index bf47b412f46d5..d340ff21bd449 100644 --- a/client/network/src/state_request_handler.rs +++ b/client/network/src/state_request_handler.rs @@ -42,7 +42,7 @@ mod rep { use super::ReputationChange as Rep; /// Reputation change when a peer sent us the same request multiple times. - pub const SAME_REQUEST: Rep = Rep::new(i32::min_value(), "Same state request multiple times"); + pub const SAME_REQUEST: Rep = Rep::new(i32::MIN, "Same state request multiple times"); } /// Generates a [`ProtocolConfig`] for the block request protocol, refusing incoming requests. diff --git a/client/peerset/src/lib.rs b/client/peerset/src/lib.rs index 36d1e1831cec6..1efb21dd5389e 100644 --- a/client/peerset/src/lib.rs +++ b/client/peerset/src/lib.rs @@ -45,7 +45,7 @@ use sp_utils::mpsc::{tracing_unbounded, TracingUnboundedSender, TracingUnbounded pub use libp2p::PeerId; /// We don't accept nodes whose reputation is under this value. -const BANNED_THRESHOLD: i32 = 82 * (i32::min_value() / 100); +const BANNED_THRESHOLD: i32 = 82 * (i32::MIN / 100); /// Reputation change for a node when we get disconnected from it. const DISCONNECT_REPUTATION_CHANGE: i32 = -256; /// Amount of time between the moment we disconnect from a node and the moment we remove it from @@ -107,7 +107,7 @@ impl ReputationChange { /// New reputation change that forces minimum possible reputation. pub const fn new_fatal(reason: &'static str) -> ReputationChange { - ReputationChange { value: i32::min_value(), reason } + ReputationChange { value: i32::MIN, reason } } } diff --git a/client/peerset/tests/fuzz.rs b/client/peerset/tests/fuzz.rs index d951b0cc560ce..96d1a48683f18 100644 --- a/client/peerset/tests/fuzz.rs +++ b/client/peerset/tests/fuzz.rs @@ -120,7 +120,7 @@ fn test_once() { // If we generate 2, adjust a random reputation. 2 => { if let Some(id) = known_nodes.iter().choose(&mut rng) { - let val = Uniform::new_inclusive(i32::min_value(), i32::MAX) + let val = Uniform::new_inclusive(i32::MIN, i32::MAX) .sample(&mut rng); peerset_handle.report_peer(id.clone(), ReputationChange::new(val, "")); } diff --git a/primitives/arithmetic/src/fixed_point.rs b/primitives/arithmetic/src/fixed_point.rs index 4940c7751aa1d..9c5078ca66f09 100644 --- a/primitives/arithmetic/src/fixed_point.rs +++ b/primitives/arithmetic/src/fixed_point.rs @@ -640,7 +640,7 @@ macro_rules! implement_fixed { }; // Min value fits. - assert_eq!(from_i129::(a), Some(i128::min_value())); + assert_eq!(from_i129::(a), Some(i128::MIN)); let a = I129 { value: i128::MAX as u128 + 1, @@ -677,13 +677,13 @@ macro_rules! implement_fixed { let b = -1i32; // Pos + Neg => Min. - assert_eq!(to_bound::<_, _, i32>(a, b), i32::min_value()); + assert_eq!(to_bound::<_, _, i32>(a, b), i32::MIN); let a = -1i32; let b = 1i32; // Neg + Pos => Min. - assert_eq!(to_bound::<_, _, i32>(a, b), i32::min_value()); + assert_eq!(to_bound::<_, _, i32>(a, b), i32::MIN); let a = 1i32; let b = -1i32; @@ -1092,29 +1092,29 @@ macro_rules! implement_fixed { if $name::SIGNED { // Min - 1. - assert_eq!(a.checked_mul_int((i128::min_value() + 1) / 2), Some(i128::min_value() + 2)); + assert_eq!(a.checked_mul_int((i128::MIN + 1) / 2), Some(i128::MIN + 2)); // Min. - assert_eq!(a.checked_mul_int(i128::min_value() / 2), Some(i128::min_value())); + assert_eq!(a.checked_mul_int(i128::MIN / 2), Some(i128::MIN)); // Min + 1 => None. - assert_eq!(a.checked_mul_int(i128::min_value() / 2 - 1), None); + assert_eq!(a.checked_mul_int(i128::MIN / 2 - 1), None); let b = $name::saturating_from_rational(1, -2); assert_eq!(b.checked_mul_int(42i128), Some(-21)); assert_eq!(b.checked_mul_int(u128::MAX), None); assert_eq!(b.checked_mul_int(i128::MAX), Some(i128::MAX / -2)); - assert_eq!(b.checked_mul_int(i128::min_value()), Some(i128::min_value() / -2)); + assert_eq!(b.checked_mul_int(i128::MIN), Some(i128::MIN / -2)); } let a = $name::saturating_from_rational(1, 2); assert_eq!(a.checked_mul_int(42i128), Some(21)); assert_eq!(a.checked_mul_int(i128::MAX), Some(i128::MAX / 2)); - assert_eq!(a.checked_mul_int(i128::min_value()), Some(i128::min_value() / 2)); + assert_eq!(a.checked_mul_int(i128::MIN), Some(i128::MIN / 2)); let c = $name::saturating_from_integer(255); assert_eq!(c.checked_mul_int(2i8), None); assert_eq!(c.checked_mul_int(2i128), Some(510)); assert_eq!(c.checked_mul_int(i128::MAX), None); - assert_eq!(c.checked_mul_int(i128::min_value()), None); + assert_eq!(c.checked_mul_int(i128::MIN), None); } #[test] @@ -1128,30 +1128,30 @@ macro_rules! implement_fixed { assert_eq!(a.saturating_mul_int(i128::MAX / 2 + 1), i128::MAX); // Min - 1. - assert_eq!(a.saturating_mul_int((i128::min_value() + 1) / 2), i128::min_value() + 2); + assert_eq!(a.saturating_mul_int((i128::MIN + 1) / 2), i128::MIN + 2); // Min. - assert_eq!(a.saturating_mul_int(i128::min_value() / 2), i128::min_value()); + assert_eq!(a.saturating_mul_int(i128::MIN / 2), i128::MIN); // Min + 1 => saturates to min. - assert_eq!(a.saturating_mul_int(i128::min_value() / 2 - 1), i128::min_value()); + assert_eq!(a.saturating_mul_int(i128::MIN / 2 - 1), i128::MIN); if $name::SIGNED { let b = $name::saturating_from_rational(1, -2); assert_eq!(b.saturating_mul_int(42i32), -21); assert_eq!(b.saturating_mul_int(i128::MAX), i128::MAX / -2); - assert_eq!(b.saturating_mul_int(i128::min_value()), i128::min_value() / -2); - assert_eq!(b.saturating_mul_int(u128::MAX), u128::min_value()); + assert_eq!(b.saturating_mul_int(i128::MIN), i128::MIN / -2); + assert_eq!(b.saturating_mul_int(u128::MAX), u128::MIN); } let a = $name::saturating_from_rational(1, 2); assert_eq!(a.saturating_mul_int(42i32), 21); assert_eq!(a.saturating_mul_int(i128::MAX), i128::MAX / 2); - assert_eq!(a.saturating_mul_int(i128::min_value()), i128::min_value() / 2); + assert_eq!(a.saturating_mul_int(i128::MIN), i128::MIN / 2); let c = $name::saturating_from_integer(255); assert_eq!(c.saturating_mul_int(2i8), i8::MAX); - assert_eq!(c.saturating_mul_int(-2i8), i8::min_value()); + assert_eq!(c.saturating_mul_int(-2i8), i8::MIN); assert_eq!(c.saturating_mul_int(i128::MAX), i128::MAX); - assert_eq!(c.saturating_mul_int(i128::min_value()), i128::min_value()); + assert_eq!(c.saturating_mul_int(i128::MIN), i128::MIN); } #[test] @@ -1232,13 +1232,13 @@ macro_rules! implement_fixed { // Not executed by unsigned inners. assert_eq!(a.checked_div_int(0.saturating_sub(2)), Some(0.saturating_sub(inner_max / (2 * accuracy)))); assert_eq!(a.checked_div_int(0.saturating_sub(inner_max / accuracy)), Some(0.saturating_sub(1))); - assert_eq!(b.checked_div_int(i128::min_value()), Some(0)); + assert_eq!(b.checked_div_int(i128::MIN), Some(0)); assert_eq!(b.checked_div_int(inner_min / accuracy), Some(1)); assert_eq!(b.checked_div_int(1i8), None); assert_eq!(b.checked_div_int(0.saturating_sub(2)), Some(0.saturating_sub(inner_min / (2 * accuracy)))); assert_eq!(b.checked_div_int(0.saturating_sub(inner_min / accuracy)), Some(0.saturating_sub(1))); - assert_eq!(c.checked_div_int(i128::min_value()), Some(0)); - assert_eq!(d.checked_div_int(i32::min_value()), Some(0)); + assert_eq!(c.checked_div_int(i128::MIN), Some(0)); + assert_eq!(d.checked_div_int(i32::MIN), Some(0)); } assert_eq!(b.checked_div_int(2), Some(inner_min / (2 * accuracy))); @@ -1304,10 +1304,10 @@ macro_rules! implement_fixed { assert_eq!($name::one().saturating_mul_acc_int(42i8), 2 * 42i8); assert_eq!($name::one().saturating_mul_acc_int(i128::MAX), i128::MAX); - assert_eq!($name::one().saturating_mul_acc_int(i128::min_value()), i128::min_value()); + assert_eq!($name::one().saturating_mul_acc_int(i128::MIN), i128::MIN); assert_eq!($name::one().saturating_mul_acc_int(u128::MAX / 2), u128::MAX - 1); - assert_eq!($name::one().saturating_mul_acc_int(u128::min_value()), u128::min_value()); + assert_eq!($name::one().saturating_mul_acc_int(u128::MIN), u128::MIN); if $name::SIGNED { let a = $name::saturating_from_rational(-1, 2); diff --git a/primitives/arithmetic/src/lib.rs b/primitives/arithmetic/src/lib.rs index 527530d63e51d..110e5c0728037 100644 --- a/primitives/arithmetic/src/lib.rs +++ b/primitives/arithmetic/src/lib.rs @@ -499,16 +499,16 @@ mod threshold_compare_tests { #[test] fn saturating_mul_works() { - assert_eq!(Saturating::saturating_mul(2, i32::min_value()), i32::min_value()); + assert_eq!(Saturating::saturating_mul(2, i32::MIN), i32::MIN); assert_eq!(Saturating::saturating_mul(2, i32::MAX), i32::MAX); } #[test] fn saturating_pow_works() { - assert_eq!(Saturating::saturating_pow(i32::min_value(), 0), 1); + assert_eq!(Saturating::saturating_pow(i32::MIN, 0), 1); assert_eq!(Saturating::saturating_pow(i32::MAX, 0), 1); - assert_eq!(Saturating::saturating_pow(i32::min_value(), 3), i32::min_value()); - assert_eq!(Saturating::saturating_pow(i32::min_value(), 2), i32::MAX); + assert_eq!(Saturating::saturating_pow(i32::MIN, 3), i32::MIN); + assert_eq!(Saturating::saturating_pow(i32::MIN, 2), i32::MAX); assert_eq!(Saturating::saturating_pow(i32::MAX, 2), i32::MAX); } } diff --git a/primitives/runtime-interface/test-wasm/src/lib.rs b/primitives/runtime-interface/test-wasm/src/lib.rs index 39f1c8b3f5708..65a0e5c5ca447 100644 --- a/primitives/runtime-interface/test-wasm/src/lib.rs +++ b/primitives/runtime-interface/test-wasm/src/lib.rs @@ -230,7 +230,7 @@ wasm_export_functions! { assert_eq!(*val, test_api::get_and_return_u128(*val)); } - for val in &[i128::MAX, i128::min_value(), 1i128, 5000i128, u64::MAX as i128] { + for val in &[i128::MAX, i128::MIN, 1i128, 5000i128, u64::MAX as i128] { assert_eq!(*val, test_api::get_and_return_i128(*val)); } } From df5012292cea6f5a747ff0e32d2e3c25b73001d9 Mon Sep 17 00:00:00 2001 From: Qinxuan Chen Date: Fri, 25 Jun 2021 11:23:36 +0800 Subject: [PATCH 45/67] Migrate `pallet-transaction-payment` to new pallet attribute macro (#9087) * Migrate pallet-transaciont-payment to new pallet attribute macro Signed-off-by: koushiro * remove generic from genesis config * fix test * fix tests * fix deprecated * fix tests Signed-off-by: koushiro Co-authored-by: thiolliere Co-authored-by: Keith Yeung --- bin/node/runtime/src/lib.rs | 2 +- frame/balances/src/tests.rs | 6 +- frame/balances/src/tests_composite.rs | 1 + frame/balances/src/tests_local.rs | 1 + frame/executive/src/lib.rs | 3 +- frame/transaction-payment/Cargo.toml | 18 +- frame/transaction-payment/README.md | 8 +- frame/transaction-payment/rpc/Cargo.toml | 7 +- frame/transaction-payment/rpc/README.md | 4 +- .../rpc/runtime-api/Cargo.toml | 4 +- .../rpc/runtime-api/README.md | 4 +- .../rpc/runtime-api/src/lib.rs | 2 +- frame/transaction-payment/rpc/src/lib.rs | 2 +- frame/transaction-payment/src/lib.rs | 232 +++++++++++------- frame/transaction-payment/src/payment.rs | 13 +- frame/transaction-payment/src/types.rs | 6 +- 16 files changed, 185 insertions(+), 128 deletions(-) diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index 2e11ab54e4316..2ce19483e5539 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -821,7 +821,7 @@ impl pallet_contracts::Config for Runtime { type RentFraction = RentFraction; type SurchargeReward = SurchargeReward; type CallStack = [pallet_contracts::Frame; 31]; - type WeightPrice = pallet_transaction_payment::Module; + type WeightPrice = pallet_transaction_payment::Pallet; type WeightInfo = pallet_contracts::weights::SubstrateWeight; type ChainExtension = (); type DeletionQueueDepth = DeletionQueueDepth; diff --git a/frame/balances/src/tests.rs b/frame/balances/src/tests.rs index c98b0ecf02bf0..624c2de618900 100644 --- a/frame/balances/src/tests.rs +++ b/frame/balances/src/tests.rs @@ -26,7 +26,7 @@ macro_rules! decl_tests { use crate::*; use sp_runtime::{ArithmeticError, FixedPointNumber, traits::{SignedExtension, BadOrigin}}; use frame_support::{ - assert_noop, assert_storage_noop, assert_ok, assert_err, StorageValue, + assert_noop, assert_storage_noop, assert_ok, assert_err, traits::{ LockableCurrency, LockIdentifier, WithdrawReasons, Currency, ReservableCurrency, ExistenceRequirement::AllowDeath @@ -148,7 +148,9 @@ macro_rules! decl_tests { .monied(true) .build() .execute_with(|| { - pallet_transaction_payment::NextFeeMultiplier::put(Multiplier::saturating_from_integer(1)); + pallet_transaction_payment::NextFeeMultiplier::<$test>::put( + Multiplier::saturating_from_integer(1) + ); Balances::set_lock(ID_1, &1, 10, WithdrawReasons::RESERVE); assert_noop!( >::transfer(&1, &2, 1, AllowDeath), diff --git a/frame/balances/src/tests_composite.rs b/frame/balances/src/tests_composite.rs index ff10607bcee09..07ec0f377ecfc 100644 --- a/frame/balances/src/tests_composite.rs +++ b/frame/balances/src/tests_composite.rs @@ -43,6 +43,7 @@ frame_support::construct_runtime!( { System: frame_system::{Pallet, Call, Config, Storage, Event}, Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, + TransactionPayment: pallet_transaction_payment::{Pallet, Storage}, } ); diff --git a/frame/balances/src/tests_local.rs b/frame/balances/src/tests_local.rs index e6de7e64b16a2..a6a1a09d9cbfe 100644 --- a/frame/balances/src/tests_local.rs +++ b/frame/balances/src/tests_local.rs @@ -45,6 +45,7 @@ frame_support::construct_runtime!( { System: frame_system::{Pallet, Call, Config, Storage, Event}, Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, + TransactionPayment: pallet_transaction_payment::{Pallet, Storage}, } ); diff --git a/frame/executive/src/lib.rs b/frame/executive/src/lib.rs index d8004e14acda1..1d2ad069f07a9 100644 --- a/frame/executive/src/lib.rs +++ b/frame/executive/src/lib.rs @@ -667,6 +667,7 @@ mod tests { { System: frame_system::{Pallet, Call, Config, Storage, Event}, Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, + TransactionPayment: pallet_transaction_payment::{Pallet, Storage}, Custom: custom::{Pallet, Call, ValidateUnsigned, Inherent}, } ); @@ -835,7 +836,7 @@ mod tests { header: Header { parent_hash: [69u8; 32].into(), number: 1, - state_root: hex!("ec6bb58b0e4bc7fdf0151a0f601eb825f529fbf90b5be5b2024deba30c5cbbcb").into(), + state_root: hex!("1039e1a4bd0cf5deefe65f313577e70169c41c7773d6acf31ca8d671397559f5").into(), extrinsics_root: hex!("03170a2e7597b7b7e3d84c05391d139a62b157e78786d8c082f29dcf4c111314").into(), digest: Digest { logs: vec![], }, }, diff --git a/frame/transaction-payment/Cargo.toml b/frame/transaction-payment/Cargo.toml index 2a7fbe503efaf..c5c7c34a7271f 100644 --- a/frame/transaction-payment/Cargo.toml +++ b/frame/transaction-payment/Cargo.toml @@ -15,29 +15,31 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } serde = { version = "1.0.101", optional = true } -sp-std = { version = "3.0.0", default-features = false, path = "../../primitives/std" } +smallvec = "1.4.1" + +sp-core = { version = "3.0.0", path = "../../primitives/core", default-features = false } +sp-io = { version = "3.0.0", path = "../../primitives/io", default-features = false } sp-runtime = { version = "3.0.0", default-features = false, path = "../../primitives/runtime" } +sp-std = { version = "3.0.0", default-features = false, path = "../../primitives/std" } + frame-support = { version = "3.0.0", default-features = false, path = "../support" } frame-system = { version = "3.0.0", default-features = false, path = "../system" } -smallvec = "1.4.1" -sp-io = { version = "3.0.0", path = "../../primitives/io", default-features = false } -sp-core = { version = "3.0.0", path = "../../primitives/core", default-features = false } [dev-dependencies] serde_json = "1.0.41" -pallet-balances = { version = "3.0.0", path = "../balances" } sp-storage = { version = "3.0.0", path = "../../primitives/storage" } +pallet-balances = { version = "3.0.0", path = "../balances" } [features] default = ["std"] std = [ "serde", "codec/std", - "sp-std/std", + "sp-core/std", + "sp-io/std", "sp-runtime/std", + "sp-std/std", "frame-support/std", "frame-system/std", - "sp-io/std", - "sp-core/std", ] try-runtime = ["frame-support/try-runtime"] diff --git a/frame/transaction-payment/README.md b/frame/transaction-payment/README.md index 7e95677a1b272..bf114246e60fa 100644 --- a/frame/transaction-payment/README.md +++ b/frame/transaction-payment/README.md @@ -1,16 +1,16 @@ -# Transaction Payment Module +# Transaction Payment Pallet -This module provides the basic logic needed to pay the absolute minimum amount needed for a +This pallet provides the basic logic needed to pay the absolute minimum amount needed for a transaction to be included. This includes: - _weight fee_: A fee proportional to amount of weight a transaction consumes. - _length fee_: A fee proportional to the encoded length of the transaction. - _tip_: An optional tip. Tip increases the priority of the transaction, giving it a higher chance to be included by the transaction queue. -Additionally, this module allows one to configure: +Additionally, this pallet allows one to configure: - The mapping between one unit of weight to one unit of fee via [`Config::WeightToFee`]. - A means of updating the fee for the next block, via defining a multiplier, based on the final state of the chain at the end of the previous block. This can be configured via [`Config::FeeMultiplierUpdate`] -License: Apache-2.0 \ No newline at end of file +License: Apache-2.0 diff --git a/frame/transaction-payment/rpc/Cargo.toml b/frame/transaction-payment/rpc/Cargo.toml index 102f91dcc2c08..b5e0fd91e1c57 100644 --- a/frame/transaction-payment/rpc/Cargo.toml +++ b/frame/transaction-payment/rpc/Cargo.toml @@ -6,7 +6,7 @@ edition = "2018" license = "Apache-2.0" homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" -description = "RPC interface for the transaction payment module." +description = "RPC interface for the transaction payment pallet." readme = "README.md" [package.metadata.docs.rs] @@ -17,9 +17,10 @@ codec = { package = "parity-scale-codec", version = "2.0.0" } jsonrpc-core = "15.1.0" jsonrpc-core-client = "15.1.0" jsonrpc-derive = "15.1.0" + +sp-api = { version = "3.0.0", path = "../../../primitives/api" } +sp-blockchain = { version = "3.0.0", path = "../../../primitives/blockchain" } sp-core = { version = "3.0.0", path = "../../../primitives/core" } sp-rpc = { version = "3.0.0", path = "../../../primitives/rpc" } sp-runtime = { version = "3.0.0", path = "../../../primitives/runtime" } -sp-api = { version = "3.0.0", path = "../../../primitives/api" } -sp-blockchain = { version = "3.0.0", path = "../../../primitives/blockchain" } pallet-transaction-payment-rpc-runtime-api = { version = "3.0.0", path = "./runtime-api" } diff --git a/frame/transaction-payment/rpc/README.md b/frame/transaction-payment/rpc/README.md index 21a8a7d37cae0..bf2ada1ff0ab3 100644 --- a/frame/transaction-payment/rpc/README.md +++ b/frame/transaction-payment/rpc/README.md @@ -1,3 +1,3 @@ -RPC interface for the transaction payment module. +RPC interface for the transaction payment pallet. -License: Apache-2.0 \ No newline at end of file +License: Apache-2.0 diff --git a/frame/transaction-payment/rpc/runtime-api/Cargo.toml b/frame/transaction-payment/rpc/runtime-api/Cargo.toml index fede9f9dd0267..bb84364a9dfec 100644 --- a/frame/transaction-payment/rpc/runtime-api/Cargo.toml +++ b/frame/transaction-payment/rpc/runtime-api/Cargo.toml @@ -13,16 +13,16 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-api = { version = "3.0.0", default-features = false, path = "../../../../primitives/api" } codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } +sp-api = { version = "3.0.0", default-features = false, path = "../../../../primitives/api" } sp-runtime = { version = "3.0.0", default-features = false, path = "../../../../primitives/runtime" } pallet-transaction-payment = { version = "3.0.0", default-features = false, path = "../../../transaction-payment" } [features] default = ["std"] std = [ - "sp-api/std", "codec/std", + "sp-api/std", "sp-runtime/std", "pallet-transaction-payment/std", ] diff --git a/frame/transaction-payment/rpc/runtime-api/README.md b/frame/transaction-payment/rpc/runtime-api/README.md index e453d9a3b7c8a..0d81abdb1eeb3 100644 --- a/frame/transaction-payment/rpc/runtime-api/README.md +++ b/frame/transaction-payment/rpc/runtime-api/README.md @@ -1,3 +1,3 @@ -Runtime API definition for transaction payment module. +Runtime API definition for transaction payment pallet. -License: Apache-2.0 \ No newline at end of file +License: Apache-2.0 diff --git a/frame/transaction-payment/rpc/runtime-api/src/lib.rs b/frame/transaction-payment/rpc/runtime-api/src/lib.rs index bd05aec30333e..696550d3ef040 100644 --- a/frame/transaction-payment/rpc/runtime-api/src/lib.rs +++ b/frame/transaction-payment/rpc/runtime-api/src/lib.rs @@ -15,7 +15,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Runtime API definition for transaction payment module. +//! Runtime API definition for transaction payment pallet. #![cfg_attr(not(feature = "std"), no_std)] diff --git a/frame/transaction-payment/rpc/src/lib.rs b/frame/transaction-payment/rpc/src/lib.rs index b3e892c165e32..efe9f010d139b 100644 --- a/frame/transaction-payment/rpc/src/lib.rs +++ b/frame/transaction-payment/rpc/src/lib.rs @@ -15,7 +15,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! RPC interface for the transaction payment module. +//! RPC interface for the transaction payment pallet. use std::sync::Arc; use std::convert::TryInto; diff --git a/frame/transaction-payment/src/lib.rs b/frame/transaction-payment/src/lib.rs index 17a4c8f81c968..af1fcc5bfeaaa 100644 --- a/frame/transaction-payment/src/lib.rs +++ b/frame/transaction-payment/src/lib.rs @@ -15,9 +15,9 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! # Transaction Payment Module +//! # Transaction Payment Pallet //! -//! This module provides the basic logic needed to pay the absolute minimum amount needed for a +//! This pallet provides the basic logic needed to pay the absolute minimum amount needed for a //! transaction to be included. This includes: //! - _base fee_: This is the minimum amount a user pays for a transaction. It is declared //! as a base _weight_ in the runtime and converted to a fee using `WeightToFee`. @@ -38,7 +38,7 @@ //! - `targeted_fee_adjustment`: This is a multiplier that can tune the final fee based on //! the congestion of the network. //! -//! Additionally, this module allows one to configure: +//! Additionally, this pallet allows one to configure: //! - The mapping between one unit of weight to one unit of fee via [`Config::WeightToFee`]. //! - A means of updating the fee for the next block, via defining a multiplier, based on the //! final state of the chain at the end of the previous block. This can be configured via @@ -47,17 +47,8 @@ #![cfg_attr(not(feature = "std"), no_std)] -use sp_std::prelude::*; use codec::{Encode, Decode}; -use frame_support::{ - decl_storage, decl_module, - traits::Get, - weights::{ - Weight, DispatchInfo, PostDispatchInfo, GetDispatchInfo, Pays, WeightToFeePolynomial, - WeightToFeeCoefficient, DispatchClass, - }, - dispatch::DispatchResult, -}; + use sp_runtime::{ FixedU128, FixedPointNumber, FixedPointOperand, Perquintill, RuntimeDebug, transaction_validity::{ @@ -68,23 +59,33 @@ use sp_runtime::{ DispatchInfoOf, PostDispatchInfoOf, }, }; +use sp_std::prelude::*; + +use frame_support::{ + traits::Get, + weights::{ + Weight, DispatchInfo, PostDispatchInfo, GetDispatchInfo, Pays, WeightToFeePolynomial, + WeightToFeeCoefficient, DispatchClass, + }, + dispatch::DispatchResult, +}; mod payment; mod types; +pub use pallet::*; pub use payment::*; pub use types::{InclusionFee, FeeDetails, RuntimeDispatchInfo}; /// Fee multiplier. pub type Multiplier = FixedU128; -type BalanceOf = - <::OnChargeTransaction as OnChargeTransaction>::Balance; +type BalanceOf = <::OnChargeTransaction as OnChargeTransaction>::Balance; /// A struct to update the weight multiplier per block. It implements `Convert`, meaning that it can convert the previous multiplier to the next one. This should /// be called on `on_finalize` of a block, prior to potentially cleaning the weight data from the -/// system module. +/// system pallet. /// /// given: /// s = previous block weight @@ -214,10 +215,10 @@ impl Convert for TargetedFeeAdjustment; +#[frame_support::pallet] +pub mod pallet { + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; + use super::*; - /// The fee to be paid for making a transaction; the per-byte portion. - type TransactionByteFee: Get>; + #[pallet::pallet] + #[pallet::generate_store(pub(super) trait Store)] + pub struct Pallet(_); - /// Convert a weight value into a deductible fee based on the currency type. - type WeightToFee: WeightToFeePolynomial>; + #[pallet::config] + pub trait Config: frame_system::Config { + /// Handler for withdrawing, refunding and depositing the transaction fee. + /// Transaction fees are withdrawn before the transaction is executed. + /// After the transaction was executed the transaction weight can be + /// adjusted, depending on the used resources by the transaction. If the + /// transaction weight is lower than expected, parts of the transaction fee + /// might be refunded. In the end the fees can be deposited. + type OnChargeTransaction: OnChargeTransaction; - /// Update the multiplier of the next block, based on the previous block's weight. - type FeeMultiplierUpdate: MultiplierUpdate; -} + /// The fee to be paid for making a transaction; the per-byte portion. + #[pallet::constant] + type TransactionByteFee: Get>; -decl_storage! { - trait Store for Module as TransactionPayment { - pub NextFeeMultiplier get(fn next_fee_multiplier): Multiplier = Multiplier::saturating_from_integer(1); + /// Convert a weight value into a deductible fee based on the currency type. + type WeightToFee: WeightToFeePolynomial>; - StorageVersion build(|_: &GenesisConfig| Releases::V2): Releases; + /// Update the multiplier of the next block, based on the previous block's weight. + type FeeMultiplierUpdate: MultiplierUpdate; } -} - -decl_module! { - pub struct Module for enum Call where origin: T::Origin { - /// The fee to be paid for making a transaction; the per-byte portion. - const TransactionByteFee: BalanceOf = T::TransactionByteFee::get(); + #[pallet::extra_constants] + impl Pallet { + //TODO: rename to snake case after https://github.com/paritytech/substrate/issues/8826 fixed. + #[allow(non_snake_case)] /// The polynomial that is applied in order to derive fee from weight. - const WeightToFee: Vec>> = - T::WeightToFee::polynomial().to_vec(); + fn WeightToFee() -> Vec>> { + T::WeightToFee::polynomial().to_vec() + } + } + + #[pallet::type_value] + pub fn NextFeeMultiplierOnEmpty() -> Multiplier { Multiplier::saturating_from_integer(1) } + + #[pallet::storage] + #[pallet::getter(fn next_fee_multiplier)] + pub type NextFeeMultiplier = StorageValue< + _, + Multiplier, + ValueQuery, + NextFeeMultiplierOnEmpty + >; + + #[pallet::storage] + pub(super) type StorageVersion = StorageValue<_, Releases, ValueQuery>; + + #[pallet::genesis_config] + pub struct GenesisConfig; + + #[cfg(feature = "std")] + impl Default for GenesisConfig { + fn default() -> Self { + Self + } + } - fn on_finalize() { - NextFeeMultiplier::mutate(|fm| { + #[pallet::genesis_build] + impl GenesisBuild for GenesisConfig { + fn build(&self) { + StorageVersion::::put(Releases::V2); + } + } + + #[pallet::hooks] + impl Hooks> for Pallet { + fn on_finalize(_: T::BlockNumber) { + >::mutate(|fm| { *fm = T::FeeMultiplierUpdate::convert(*fm); }); } @@ -293,7 +333,6 @@ decl_module! { "Setting `max_total` for `Normal` dispatch class is not compatible with \ `transaction-payment` pallet." ); - // add 1 percent; let addition = target / 100; if addition == 0 { @@ -302,6 +341,7 @@ decl_module! { } target += addition; + #[cfg(any(feature = "std", test))] sp_io::TestExternalities::new_empty().execute_with(|| { >::set_block_consumed_resources(target, 0); let next = T::FeeMultiplierUpdate::convert(min_value); @@ -309,17 +349,17 @@ decl_module! { block saturation is more than target by 1% and multiplier is minimal then \ the multiplier doesn't increase." ); - }) + }); } } } -impl Module where +impl Pallet where BalanceOf: FixedPointOperand { /// Query the data that we know about the fee of a given `call`. /// - /// This module is not and cannot be aware of the internals of a signed extension, for example + /// This pallet is not and cannot be aware of the internals of a signed extension, for example /// a tip. It only interprets the extrinsic as some encoded value and accounts for its weight /// and length, the runtime's extrinsic base weight, and the current fee multiplier. /// @@ -330,7 +370,7 @@ impl Module where len: u32, ) -> RuntimeDispatchInfo> where - T::Call: Dispatchable, + T::Call: Dispatchable, { // NOTE: we can actually make it understand `ChargeTransactionPayment`, but would be some // hassle for sure. We have to make it aware of the index of `ChargeTransactionPayment` in @@ -351,7 +391,7 @@ impl Module where len: u32, ) -> FeeDetails> where - T::Call: Dispatchable, + T::Call: Dispatchable, { let dispatch_info = ::get_dispatch_info(&unchecked_extrinsic); Self::compute_fee_details(len, &dispatch_info, 0u32.into()) @@ -363,7 +403,7 @@ impl Module where info: &DispatchInfoOf, tip: BalanceOf, ) -> BalanceOf where - T::Call: Dispatchable, + T::Call: Dispatchable, { Self::compute_fee_details(len, info, tip).final_fee() } @@ -374,7 +414,7 @@ impl Module where info: &DispatchInfoOf, tip: BalanceOf, ) -> FeeDetails> where - T::Call: Dispatchable, + T::Call: Dispatchable, { Self::compute_fee_raw(len, info.weight, tip, info.pays_fee, info.class) } @@ -389,7 +429,7 @@ impl Module where post_info: &PostDispatchInfoOf, tip: BalanceOf, ) -> BalanceOf where - T::Call: Dispatchable, + T::Call: Dispatchable, { Self::compute_actual_fee_details(len, info, post_info, tip).final_fee() } @@ -401,7 +441,7 @@ impl Module where post_info: &PostDispatchInfoOf, tip: BalanceOf, ) -> FeeDetails> where - T::Call: Dispatchable, + T::Call: Dispatchable, { Self::compute_fee_raw( len, @@ -457,7 +497,7 @@ impl Module where } } -impl Convert> for Module where +impl Convert> for Pallet where T: Config, BalanceOf: FixedPointOperand, { @@ -467,7 +507,7 @@ impl Convert> for Module where /// share that the weight contributes to the overall fee of a transaction. It is mainly /// for informational purposes and not used in the actual fee calculation. fn convert(weight: Weight) -> BalanceOf { - NextFeeMultiplier::get().saturating_mul_int(Self::weight_to_fee(weight)) + >::get().saturating_mul_int(Self::weight_to_fee(weight)) } } @@ -477,7 +517,7 @@ impl Convert> for Module where pub struct ChargeTransactionPayment(#[codec(compact)] BalanceOf); impl ChargeTransactionPayment where - T::Call: Dispatchable, + T::Call: Dispatchable, BalanceOf: Send + Sync + FixedPointOperand, { /// utility constructor. Used only in client/factory code. @@ -499,7 +539,7 @@ impl ChargeTransactionPayment where TransactionValidityError, > { let tip = self.0; - let fee = Module::::compute_fee(len as u32, info, tip); + let fee = Pallet::::compute_fee(len as u32, info, tip); <::OnChargeTransaction as OnChargeTransaction>::withdraw_fee(who, call, info, fee, tip) .map(|i| (fee, i)) @@ -537,7 +577,7 @@ impl sp_std::fmt::Debug for ChargeTransactionPayment { impl SignedExtension for ChargeTransactionPayment where BalanceOf: Send + Sync + From + FixedPointOperand, - T::Call: Dispatchable, + T::Call: Dispatchable, { const IDENTIFIER: &'static str = "ChargeTransactionPayment"; type AccountId = T::AccountId; @@ -586,7 +626,7 @@ impl SignedExtension for ChargeTransactionPayment where _result: &DispatchResult, ) -> Result<(), TransactionValidityError> { let (tip, who, imbalance) = pre; - let actual_fee = Module::::compute_actual_fee( + let actual_fee = Pallet::::compute_actual_fee( len as u32, info, post_info, @@ -601,8 +641,20 @@ impl SignedExtension for ChargeTransactionPayment where mod tests { use super::*; use crate as pallet_transaction_payment; - use frame_system as system; + + use std::cell::RefCell; + use codec::Encode; + use smallvec::smallvec; + + use sp_core::H256; + use sp_runtime::{ + testing::{Header, TestXt}, + traits::{BlakeTwo256, IdentityLookup, One}, + transaction_validity::InvalidTransaction, + Perbill, + }; + use frame_support::{ assert_noop, assert_ok, parameter_types, weights::{ @@ -611,16 +663,8 @@ mod tests { }, traits::{Currency, OnUnbalanced, Imbalance}, }; + use frame_system as system; use pallet_balances::Call as BalancesCall; - use sp_core::H256; - use sp_runtime::{ - testing::{Header, TestXt}, - traits::{BlakeTwo256, IdentityLookup, One}, - transaction_validity::InvalidTransaction, - Perbill, - }; - use std::cell::RefCell; - use smallvec::smallvec; type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; type Block = frame_system::mocking::MockBlock; @@ -728,7 +772,7 @@ mod tests { pub struct DealWithFees; impl OnUnbalanced> for DealWithFees { fn on_unbalanceds( - mut fees_then_tips: impl Iterator> + mut fees_then_tips: impl Iterator> ) { if let Some(fees) = fees_then_tips.next() { FEE_UNBALANCED_AMOUNT.with(|a| *a.borrow_mut() += fees.peek()); @@ -882,7 +926,7 @@ mod tests { .execute_with(|| { let len = 10; - NextFeeMultiplier::put(Multiplier::saturating_from_rational(3, 2)); + >::put(Multiplier::saturating_from_rational(3, 2)); let pre = ChargeTransactionPayment::::from(5 /* tipped */) .pre_dispatch(&2, CALL, &info_from_weight(100), len) @@ -967,7 +1011,7 @@ mod tests { .execute_with(|| { // all fees should be x1.5 - NextFeeMultiplier::put(Multiplier::saturating_from_rational(3, 2)); + >::put(Multiplier::saturating_from_rational(3, 2)); let len = 10; assert_ok!( @@ -1001,7 +1045,7 @@ mod tests { .execute_with(|| { // all fees should be x1.5 - NextFeeMultiplier::put(Multiplier::saturating_from_rational(3, 2)); + >::put(Multiplier::saturating_from_rational(3, 2)); assert_eq!( TransactionPayment::query_info(xt, len), @@ -1028,7 +1072,7 @@ mod tests { .execute_with(|| { // Next fee multiplier is zero - assert_eq!(NextFeeMultiplier::get(), Multiplier::one()); + assert_eq!(>::get(), Multiplier::one()); // Tip only, no fees works let dispatch_info = DispatchInfo { @@ -1036,25 +1080,25 @@ mod tests { class: DispatchClass::Operational, pays_fee: Pays::No, }; - assert_eq!(Module::::compute_fee(0, &dispatch_info, 10), 10); + assert_eq!(Pallet::::compute_fee(0, &dispatch_info, 10), 10); // No tip, only base fee works let dispatch_info = DispatchInfo { weight: 0, class: DispatchClass::Operational, pays_fee: Pays::Yes, }; - assert_eq!(Module::::compute_fee(0, &dispatch_info, 0), 100); + assert_eq!(Pallet::::compute_fee(0, &dispatch_info, 0), 100); // Tip + base fee works - assert_eq!(Module::::compute_fee(0, &dispatch_info, 69), 169); + assert_eq!(Pallet::::compute_fee(0, &dispatch_info, 69), 169); // Len (byte fee) + base fee works - assert_eq!(Module::::compute_fee(42, &dispatch_info, 0), 520); + assert_eq!(Pallet::::compute_fee(42, &dispatch_info, 0), 520); // Weight fee + base fee works let dispatch_info = DispatchInfo { weight: 1000, class: DispatchClass::Operational, pays_fee: Pays::Yes, }; - assert_eq!(Module::::compute_fee(0, &dispatch_info, 0), 1100); + assert_eq!(Pallet::::compute_fee(0, &dispatch_info, 0), 1100); }); } @@ -1068,14 +1112,14 @@ mod tests { .execute_with(|| { // Add a next fee multiplier. Fees will be x3/2. - NextFeeMultiplier::put(Multiplier::saturating_from_rational(3, 2)); + >::put(Multiplier::saturating_from_rational(3, 2)); // Base fee is unaffected by multiplier let dispatch_info = DispatchInfo { weight: 0, class: DispatchClass::Operational, pays_fee: Pays::Yes, }; - assert_eq!(Module::::compute_fee(0, &dispatch_info, 0), 100); + assert_eq!(Pallet::::compute_fee(0, &dispatch_info, 0), 100); // Everything works together :) let dispatch_info = DispatchInfo { @@ -1085,7 +1129,7 @@ mod tests { }; // 123 weight, 456 length, 100 base assert_eq!( - Module::::compute_fee(456, &dispatch_info, 789), + Pallet::::compute_fee(456, &dispatch_info, 789), 100 + (3 * 123 / 2) + 4560 + 789, ); }); @@ -1101,7 +1145,7 @@ mod tests { .execute_with(|| { // Add a next fee multiplier. All fees will be x1/2. - NextFeeMultiplier::put(Multiplier::saturating_from_rational(1, 2)); + >::put(Multiplier::saturating_from_rational(1, 2)); // Base fee is unaffected by multiplier. let dispatch_info = DispatchInfo { @@ -1109,7 +1153,7 @@ mod tests { class: DispatchClass::Operational, pays_fee: Pays::Yes, }; - assert_eq!(Module::::compute_fee(0, &dispatch_info, 0), 100); + assert_eq!(Pallet::::compute_fee(0, &dispatch_info, 0), 100); // Everything works together. let dispatch_info = DispatchInfo { @@ -1119,7 +1163,7 @@ mod tests { }; // 123 weight, 456 length, 100 base assert_eq!( - Module::::compute_fee(456, &dispatch_info, 789), + Pallet::::compute_fee(456, &dispatch_info, 789), 100 + (123 / 2) + 4560 + 789, ); }); @@ -1141,7 +1185,7 @@ mod tests { pays_fee: Pays::Yes, }; assert_eq!( - Module::::compute_fee( + Pallet::::compute_fee( u32::MAX, &dispatch_info, u64::MAX @@ -1250,7 +1294,7 @@ mod tests { let len = 10; let tip = 5; - NextFeeMultiplier::put(Multiplier::saturating_from_rational(5, 4)); + >::put(Multiplier::saturating_from_rational(5, 4)); let pre = ChargeTransactionPayment::::from(tip) .pre_dispatch(&2, CALL, &info, len) @@ -1261,7 +1305,7 @@ mod tests { .unwrap(); let refund_based_fee = prev_balance - Balances::free_balance(2); - let actual_fee = Module:: + let actual_fee = Pallet:: ::compute_actual_fee(len as u32, &info, &post_info, tip); // 33 weight, 10 length, 7 base, 5 tip @@ -1284,7 +1328,7 @@ mod tests { let len = 10; let tip = 5; - NextFeeMultiplier::put(Multiplier::saturating_from_rational(5, 4)); + >::put(Multiplier::saturating_from_rational(5, 4)); let pre = ChargeTransactionPayment::::from(tip) .pre_dispatch(&2, CALL, &info, len) @@ -1295,7 +1339,7 @@ mod tests { .unwrap(); let refund_based_fee = prev_balance - Balances::free_balance(2); - let actual_fee = Module:: + let actual_fee = Pallet:: ::compute_actual_fee(len as u32, &info, &post_info, tip); // Only 5 tip is paid diff --git a/frame/transaction-payment/src/payment.rs b/frame/transaction-payment/src/payment.rs index 1d910de8b6ce7..376cd77ce3f82 100644 --- a/frame/transaction-payment/src/payment.rs +++ b/frame/transaction-payment/src/payment.rs @@ -1,16 +1,19 @@ ///! Traits and default implementation for paying transaction fees. + use crate::Config; + use codec::FullCodec; -use frame_support::{ - traits::{Currency, ExistenceRequirement, Get, Imbalance, OnUnbalanced, WithdrawReasons}, - unsigned::TransactionValidityError, -}; use sp_runtime::{ traits::{AtLeast32BitUnsigned, DispatchInfoOf, MaybeSerializeDeserialize, PostDispatchInfoOf, Saturating, Zero}, transaction_validity::InvalidTransaction, }; use sp_std::{fmt::Debug, marker::PhantomData}; +use frame_support::{ + traits::{Currency, ExistenceRequirement, Get, Imbalance, OnUnbalanced, WithdrawReasons}, + unsigned::TransactionValidityError, +}; + type NegativeImbalanceOf = ::AccountId>>::NegativeImbalance; @@ -47,7 +50,7 @@ pub trait OnChargeTransaction { ) -> Result<(), TransactionValidityError>; } -/// Implements the transaction payment for a module implementing the `Currency` +/// Implements the transaction payment for a pallet implementing the `Currency` /// trait (eg. the pallet_balances) using an unbalance handler (implementing /// `OnUnbalanced`). /// diff --git a/frame/transaction-payment/src/types.rs b/frame/transaction-payment/src/types.rs index ab771eb8ba5df..b5d46a9167a75 100644 --- a/frame/transaction-payment/src/types.rs +++ b/frame/transaction-payment/src/types.rs @@ -17,12 +17,14 @@ //! Types for transaction-payment RPC. -use sp_std::prelude::*; -use frame_support::weights::{Weight, DispatchClass}; use codec::{Encode, Decode}; #[cfg(feature = "std")] use serde::{Serialize, Deserialize}; + use sp_runtime::traits::{AtLeast32BitUnsigned, Zero}; +use sp_std::prelude::*; + +use frame_support::weights::{Weight, DispatchClass}; /// The base fee and adjusted weight and length fees constitute the _inclusion fee_. #[derive(Encode, Decode, Clone, Eq, PartialEq)] From eae82abfe3221b0695e6c1c552728eeb8db26c56 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alexander=20Thei=C3=9Fen?= Date: Fri, 25 Jun 2021 18:27:01 +0200 Subject: [PATCH 46/67] contracts: Remove weight pre charging (#8976) * Remove pre-charging for code size * Remove pre charging when reading values of fixed size * Add new versions of API functions that leave out parameters * Update CHANGELOG.md * Apply suggestions from code review Co-authored-by: Alexander Popiak * Add v1 for seal_set_rent_allowance * Remove unneeded trait bound Co-authored-by: Guillaume Thiolliere Co-authored-by: Alexander Popiak Co-authored-by: Guillaume Thiolliere --- frame/contracts/CHANGELOG.md | 5 + frame/contracts/fixtures/dummy.wat | 5 + .../fixtures/instantiate_return_code.wat | 6 +- frame/contracts/fixtures/ok_trap_revert.wat | 2 +- frame/contracts/fixtures/restoration.wat | 13 +- frame/contracts/src/benchmarking/code.rs | 7 +- frame/contracts/src/benchmarking/mod.rs | 88 +- frame/contracts/src/chain_extension.rs | 21 +- frame/contracts/src/exec.rs | 177 +-- frame/contracts/src/gas.rs | 9 - frame/contracts/src/lib.rs | 23 +- frame/contracts/src/rent.rs | 22 +- frame/contracts/src/schedule.rs | 34 +- frame/contracts/src/tests.rs | 66 +- frame/contracts/src/wasm/code_cache.rs | 80 +- frame/contracts/src/wasm/mod.rs | 54 +- frame/contracts/src/wasm/runtime.rs | 463 +++--- frame/contracts/src/weights.rs | 1295 ++++++++--------- 18 files changed, 1238 insertions(+), 1132 deletions(-) create mode 100644 frame/contracts/fixtures/dummy.wat diff --git a/frame/contracts/CHANGELOG.md b/frame/contracts/CHANGELOG.md index dd679f432d314..03945d7b2e346 100644 --- a/frame/contracts/CHANGELOG.md +++ b/frame/contracts/CHANGELOG.md @@ -42,6 +42,11 @@ output to an RPC client. - Make storage and fields of `Schedule` private to the crate. [#8359](https://github.com/paritytech/substrate/pull/8359) +### Fixed + +- Remove pre-charging which caused wrongly estimated weights +[#8976](https://github.com/paritytech/substrate/pull/8976) + ## [v3.0.0] 2021-02-25 This version constitutes the first release that brings any stability guarantees (see above). diff --git a/frame/contracts/fixtures/dummy.wat b/frame/contracts/fixtures/dummy.wat new file mode 100644 index 0000000000000..0aeefbcb7ebfe --- /dev/null +++ b/frame/contracts/fixtures/dummy.wat @@ -0,0 +1,5 @@ +;; A valid contract which does nothing at all +(module + (func (export "deploy")) + (func (export "call")) +) diff --git a/frame/contracts/fixtures/instantiate_return_code.wat b/frame/contracts/fixtures/instantiate_return_code.wat index 544489329cfad..6a8654520f106 100644 --- a/frame/contracts/fixtures/instantiate_return_code.wat +++ b/frame/contracts/fixtures/instantiate_return_code.wat @@ -4,8 +4,8 @@ ;; The rest of the input is forwarded to the constructor of the callee (module (import "seal0" "seal_input" (func $seal_input (param i32 i32))) - (import "seal0" "seal_instantiate" (func $seal_instantiate - (param i32 i32 i64 i32 i32 i32 i32 i32 i32 i32 i32 i32 i32) (result i32) + (import "seal1" "seal_instantiate" (func $seal_instantiate + (param i32 i64 i32 i32 i32 i32 i32 i32 i32 i32 i32) (result i32) )) (import "seal0" "seal_return" (func $seal_return (param i32 i32 i32))) (import "env" "memory" (memory 1 1)) @@ -29,10 +29,8 @@ (i32.const 8) (call $seal_instantiate (i32.const 16) ;; Pointer to the code hash. - (i32.const 32) ;; Length of the code hash. (i64.const 0) ;; How much gas to devote for the execution. 0 = all. (i32.const 0) ;; Pointer to the buffer with value to transfer - (i32.const 8) ;; Length of the buffer with value to transfer. (i32.const 48) ;; Pointer to input data buffer address (i32.const 4) ;; Length of input data buffer (i32.const 0xffffffff) ;; u32 max sentinel value: do not copy address diff --git a/frame/contracts/fixtures/ok_trap_revert.wat b/frame/contracts/fixtures/ok_trap_revert.wat index b71a6435db9c1..b7eaa9b700af5 100644 --- a/frame/contracts/fixtures/ok_trap_revert.wat +++ b/frame/contracts/fixtures/ok_trap_revert.wat @@ -32,4 +32,4 @@ ;; 2 = trap (unreachable) ) -) \ No newline at end of file +) diff --git a/frame/contracts/fixtures/restoration.wat b/frame/contracts/fixtures/restoration.wat index 3462af2870816..e24e5695a3568 100644 --- a/frame/contracts/fixtures/restoration.wat +++ b/frame/contracts/fixtures/restoration.wat @@ -1,9 +1,9 @@ (module (import "seal0" "seal_set_storage" (func $seal_set_storage (param i32 i32 i32))) (import "seal0" "seal_input" (func $seal_input (param i32 i32))) - (import "seal0" "seal_restore_to" + (import "seal1" "seal_restore_to" (func $seal_restore_to - (param i32 i32 i32 i32 i32 i32 i32 i32) + (param i32 i32 i32 i32 i32) ) ) (import "env" "memory" (memory 1 1)) @@ -27,15 +27,12 @@ ) ) (call $seal_restore_to - ;; Pointer and length of the encoded dest buffer. + ;; Pointer to the encoded dest buffer. (i32.const 340) - (i32.const 32) - ;; Pointer and length of the encoded code hash buffer + ;; Pointer to the encoded code hash buffer (i32.const 308) - (i32.const 32) - ;; Pointer and length of the encoded rent_allowance buffer + ;; Pointer to the encoded rent_allowance buffer (i32.const 296) - (i32.const 8) ;; Pointer and number of items in the delta buffer. ;; This buffer specifies multiple keys for removal before restoration. (i32.const 100) diff --git a/frame/contracts/src/benchmarking/code.rs b/frame/contracts/src/benchmarking/code.rs index b9bd693f1c2c7..6faba8a2e064c 100644 --- a/frame/contracts/src/benchmarking/code.rs +++ b/frame/contracts/src/benchmarking/code.rs @@ -258,9 +258,14 @@ where /// Same as `dummy` but with maximum sized linear memory and a dummy section of specified size. pub fn dummy_with_bytes(dummy_bytes: u32) -> Self { + // We want the module to have the size `dummy_bytes`. + // This is not completely correct as the overhead grows when the contract grows + // because of variable length integer encoding. However, it is good enough to be that + // close for benchmarking purposes. + let module_overhead = 65; ModuleDefinition { memory: Some(ImportedMemory::max::()), - dummy_section: dummy_bytes, + dummy_section: dummy_bytes.saturating_sub(module_overhead), .. Default::default() } .into() diff --git a/frame/contracts/src/benchmarking/mod.rs b/frame/contracts/src/benchmarking/mod.rs index 7b77569a1f6d7..cbe5e48a4f020 100644 --- a/frame/contracts/src/benchmarking/mod.rs +++ b/frame/contracts/src/benchmarking/mod.rs @@ -320,6 +320,25 @@ benchmarks! { Contracts::::reinstrument_module(&mut module, &schedule)?; } + // The weight of loading and decoding of a contract's code per kilobyte. + code_load { + let c in 0 .. T::Schedule::get().limits.code_len / 1024; + let WasmModule { code, hash, .. } = WasmModule::::dummy_with_bytes(c * 1024); + Contracts::::store_code_raw(code)?; + }: { + >::from_storage_noinstr(hash)?; + } + + // The weight of changing the refcount of a contract's code per kilobyte. + code_refcount { + let c in 0 .. T::Schedule::get().limits.code_len / 1024; + let WasmModule { code, hash, .. } = WasmModule::::dummy_with_bytes(c * 1024); + Contracts::::store_code_raw(code)?; + let mut gas_meter = GasMeter::new(Weight::max_value()); + }: { + >::add_user(hash, &mut gas_meter)?; + } + // This constructs a contract that is maximal expensive to instrument. // It creates a maximum number of metering blocks per byte. // The size of the salt influences the runtime because is is hashed in order to @@ -352,16 +371,14 @@ benchmarks! { } // Instantiate uses a dummy contract constructor to measure the overhead of the instantiate. - // `c`: Size of the code in kilobytes. // `s`: Size of the salt in kilobytes. instantiate { - let c in 0 .. T::Schedule::get().limits.code_len / 1024; let s in 0 .. code::max_pages::() * 64; let salt = vec![42u8; (s * 1024) as usize]; let endowment = caller_funding::() / 3u32.into(); let caller = whitelisted_caller(); T::Currency::make_free_balance_be(&caller, caller_funding::()); - let WasmModule { code, hash, .. } = WasmModule::::dummy_with_bytes(c * 1024); + let WasmModule { code, hash, .. } = WasmModule::::dummy(); let origin = RawOrigin::Signed(caller.clone()); let addr = Contracts::::contract_address(&caller, &hash, &salt); Contracts::::store_code_raw(code)?; @@ -380,12 +397,10 @@ benchmarks! { // won't call `seal_input` in its constructor to copy the data to contract memory. // The dummy contract used here does not do this. The costs for the data copy is billed as // part of `seal_input`. - // `c`: Size of the code in kilobytes. call { - let c in 0 .. T::Schedule::get().limits.code_len / 1024; let data = vec![42u8; 1024]; let instance = Contract::::with_caller( - whitelisted_caller(), WasmModule::dummy_with_bytes(c * 1024), vec![], Endow::CollectRent + whitelisted_caller(), WasmModule::dummy(), vec![], Endow::CollectRent )?; let value = T::Currency::minimum_balance() * 100u32.into(); let origin = RawOrigin::Signed(instance.caller.clone()); @@ -720,43 +735,6 @@ benchmarks! { } } - seal_terminate_per_code_kb { - let c in 0 .. T::Schedule::get().limits.code_len / 1024; - let beneficiary = account::("beneficiary", 0, 0); - let beneficiary_bytes = beneficiary.encode(); - let beneficiary_len = beneficiary_bytes.len(); - let code = WasmModule::::from(ModuleDefinition { - memory: Some(ImportedMemory::max::()), - imported_functions: vec![ImportedFunction { - module: "seal0", - name: "seal_terminate", - params: vec![ValueType::I32, ValueType::I32], - return_type: None, - }], - data_segments: vec![ - DataSegment { - offset: 0, - value: beneficiary_bytes, - }, - ], - call_body: Some(body::repeated(1, &[ - Instruction::I32Const(0), // beneficiary_ptr - Instruction::I32Const(beneficiary_len as i32), // beneficiary_len - Instruction::Call(0), - ])), - dummy_section: c * 1024, - .. Default::default() - }); - let instance = Contract::::new(code, vec![], Endow::Max)?; - let origin = RawOrigin::Signed(instance.caller.clone()); - assert_eq!(T::Currency::total_balance(&beneficiary), 0u32.into()); - assert_eq!(T::Currency::total_balance(&instance.account_id), Endow::max::()); - }: call(origin, instance.addr, 0u32.into(), Weight::max_value(), vec![]) - verify { - assert_eq!(T::Currency::total_balance(&instance.account_id), 0u32.into()); - assert_eq!(T::Currency::total_balance(&beneficiary), Endow::max::()); - } - seal_restore_to { let r in 0 .. 1; @@ -836,18 +814,15 @@ benchmarks! { } } - // `c`: Code size of caller contract - // `t`: Code size of tombstone contract // `d`: Number of supplied delta keys - seal_restore_to_per_code_kb_delta { - let c in 0 .. T::Schedule::get().limits.code_len / 1024; - let t in 0 .. T::Schedule::get().limits.code_len / 1024; + seal_restore_to_per_delta { let d in 0 .. API_BENCHMARK_BATCHES; - let mut tombstone = ContractWithStorage::::with_code( - WasmModule::::dummy_with_bytes(t * 1024), 0, 0 - )?; + let mut tombstone = ContractWithStorage::::new(0, 0)?; tombstone.evict()?; - let delta = create_storage::(d * API_BENCHMARK_BATCH_SIZE, T::Schedule::get().limits.payload_len)?; + let delta = create_storage::( + d * API_BENCHMARK_BATCH_SIZE, + T::Schedule::get().limits.payload_len, + )?; let dest = tombstone.contract.account_id.encode(); let dest_len = dest.len(); @@ -909,7 +884,6 @@ benchmarks! { Instruction::Call(0), Instruction::End, ])), - dummy_section: c * 1024, .. Default::default() }); @@ -1393,8 +1367,7 @@ benchmarks! { let origin = RawOrigin::Signed(instance.caller.clone()); }: call(origin, instance.addr, 0u32.into(), Weight::max_value(), vec![]) - seal_call_per_code_transfer_input_output_kb { - let c in 0 .. T::Schedule::get().limits.code_len / 1024; + seal_call_per_transfer_input_output_kb { let t in 0 .. 1; let i in 0 .. code::max_pages::() * 64; let o in 0 .. (code::max_pages::() - 1) * 64; @@ -1417,7 +1390,6 @@ benchmarks! { Instruction::Call(0), Instruction::End, ])), - dummy_section: c * 1024, .. Default::default() }); let callees = (0..API_BENCHMARK_BATCH_SIZE) @@ -1593,8 +1565,7 @@ benchmarks! { } } - seal_instantiate_per_code_input_output_salt_kb { - let c in 0 .. T::Schedule::get().limits.code_len / 1024; + seal_instantiate_per_input_output_salt_kb { let i in 0 .. (code::max_pages::() - 1) * 64; let o in 0 .. (code::max_pages::() - 1) * 64; let s in 0 .. (code::max_pages::() - 1) * 64; @@ -1617,7 +1588,6 @@ benchmarks! { Instruction::Call(0), Instruction::End, ])), - dummy_section: c * 1024, .. Default::default() }); let hash = callee_code.hash.clone(); diff --git a/frame/contracts/src/chain_extension.rs b/frame/contracts/src/chain_extension.rs index ac71eca27b1ce..01c362f613a51 100644 --- a/frame/contracts/src/chain_extension.rs +++ b/frame/contracts/src/chain_extension.rs @@ -59,7 +59,7 @@ use crate::{ wasm::{Runtime, RuntimeCosts}, }; use codec::Decode; -use frame_support::weights::Weight; +use frame_support::{weights::Weight, traits::MaxEncodedLen}; use sp_runtime::DispatchError; use sp_std::{ marker::PhantomData, @@ -300,18 +300,21 @@ where Ok(()) } - /// Reads `in_len` from contract memory and scale decodes it. + /// Reads and decodes a type with a size fixed at compile time from contract memory. /// /// This function is secure and recommended for all input types of fixed size /// as long as the cost of reading the memory is included in the overall already charged /// weight of the chain extension. This should usually be the case when fixed input types - /// are used. Non fixed size types (like everything using `Vec`) usually need to use - /// [`in_len()`](Self::in_len) in order to properly charge the necessary weight. - pub fn read_as(&mut self) -> Result { - self.inner.runtime.read_sandbox_memory_as( - self.inner.input_ptr, - self.inner.input_len, - ) + /// are used. + pub fn read_as(&mut self) -> Result { + self.inner.runtime.read_sandbox_memory_as(self.inner.input_ptr) + } + + /// Reads and decodes a type with a dynamic size from contract memory. + /// + /// Make sure to include `len` in your weight calculations. + pub fn read_as_unbounded(&mut self, len: u32) -> Result { + self.inner.runtime.read_sandbox_memory_as_unbounded(self.inner.input_ptr, len) } /// The length of the input as passed in as `input_len`. diff --git a/frame/contracts/src/exec.rs b/frame/contracts/src/exec.rs index 3739ab77e2b6c..2b595ea6ce8d4 100644 --- a/frame/contracts/src/exec.rs +++ b/frame/contracts/src/exec.rs @@ -168,7 +168,7 @@ pub trait Ext: sealing::Sealed { value: BalanceOf, input_data: Vec, allows_reentry: bool, - ) -> Result<(ExecReturnValue, u32), (ExecError, u32)>; + ) -> Result; /// Instantiate a contract from the given code. /// @@ -186,24 +186,16 @@ pub trait Ext: sealing::Sealed { value: BalanceOf, input_data: Vec, salt: &[u8], - ) -> Result<(AccountIdOf, ExecReturnValue, u32), (ExecError, u32)>; + ) -> Result<(AccountIdOf, ExecReturnValue ), ExecError>; /// Transfer all funds to `beneficiary` and delete the contract. /// - /// Returns the original code size of the terminated contract. /// Since this function removes the self contract eagerly, if succeeded, no further actions should /// be performed on this `Ext` instance. /// /// This function will fail if the same contract is present on the contract /// call stack. - /// - /// # Return Value - /// - /// Result - fn terminate( - &mut self, - beneficiary: &AccountIdOf, - ) -> Result; + fn terminate(&mut self, beneficiary: &AccountIdOf) -> Result<(), DispatchError>; /// Restores the given destination contract sacrificing the current one. /// @@ -222,7 +214,7 @@ pub trait Ext: sealing::Sealed { code_hash: CodeHash, rent_allowance: BalanceOf, delta: Vec, - ) -> Result<(u32, u32), (DispatchError, u32, u32)>; + ) -> Result<(), DispatchError>; /// Transfer some amount of funds into the specified account. fn transfer( @@ -325,6 +317,9 @@ pub enum ExportedFunction { /// order to be able to mock the wasm logic for testing. pub trait Executable: Sized { /// Load the executable from storage. + /// + /// # Note + /// Charges size base load and instrumentation weight from the gas meter. fn from_storage( code_hash: CodeHash, schedule: &Schedule, @@ -336,6 +331,10 @@ pub trait Executable: Sized { /// A code module is re-instrumented on-load when it was originally instrumented with /// an older schedule. This skips this step for cases where the code storage is /// queried for purposes other than execution. + /// + /// # Note + /// + /// Does not charge from the gas meter. Do not call in contexts where this is important. fn from_storage_noinstr(code_hash: CodeHash) -> Result; /// Decrements the refcount by one and deletes the code if it drops to zero. @@ -344,12 +343,22 @@ pub trait Executable: Sized { /// Increment the refcount by one. Fails if the code does not exist on-chain. /// /// Returns the size of the original code. - fn add_user(code_hash: CodeHash) -> Result; + /// + /// # Note + /// + /// Charges weight proportional to the code size from the gas meter. + fn add_user(code_hash: CodeHash, gas_meter: &mut GasMeter) + -> Result<(), DispatchError>; /// Decrement the refcount by one and remove the code when it drops to zero. /// /// Returns the size of the original code. - fn remove_user(code_hash: CodeHash) -> u32; + /// + /// # Note + /// + /// Charges weight proportional to the code size from the gas meter + fn remove_user(code_hash: CodeHash, gas_meter: &mut GasMeter) + -> Result<(), DispatchError>; /// Execute the specified exported function and return the result. /// @@ -595,7 +604,7 @@ where value: BalanceOf, input_data: Vec, debug_message: Option<&'a mut Vec>, - ) -> Result<(ExecReturnValue, u32), (ExecError, u32)> { + ) -> Result { let (mut stack, executable) = Self::new( FrameArgs::Call{dest, cached_info: None}, origin, @@ -639,11 +648,9 @@ where schedule, value, debug_message, - ).map_err(|(e, _code_len)| e)?; + )?; let account_id = stack.top_frame().account_id.clone(); - stack.run(executable, input_data) - .map(|(ret, _code_len)| (account_id, ret)) - .map_err(|(err, _code_len)| err) + stack.run(executable, input_data).map(|ret| (account_id, ret)) } /// Create a new call stack. @@ -654,7 +661,7 @@ where schedule: &'a Schedule, value: BalanceOf, debug_message: Option<&'a mut Vec>, - ) -> Result<(Self, E), (ExecError, u32)> { + ) -> Result<(Self, E), ExecError> { let (first_frame, executable) = Self::new_frame(args, value, gas_meter, 0, &schedule)?; let stack = Self { origin, @@ -682,22 +689,20 @@ where gas_meter: &mut GasMeter, gas_limit: Weight, schedule: &Schedule - ) -> Result<(Frame, E), (ExecError, u32)> { + ) -> Result<(Frame, E), ExecError> { let (account_id, contract_info, executable, entry_point) = match frame_args { FrameArgs::Call{dest, cached_info} => { let contract = if let Some(contract) = cached_info { contract } else { >::get(&dest) - .ok_or((>::ContractNotFound.into(), 0)) + .ok_or(>::ContractNotFound.into()) .and_then(|contract| - contract.get_alive() - .ok_or((>::ContractIsTombstone.into(), 0)) + contract.get_alive().ok_or(>::ContractIsTombstone) )? }; - let executable = E::from_storage(contract.code_hash, schedule, gas_meter) - .map_err(|e| (e.into(), 0))?; + let executable = E::from_storage(contract.code_hash, schedule, gas_meter)?; // This charges the rent and denies access to a contract that is in need of // eviction by returning `None`. We cannot evict eagerly here because those @@ -705,9 +710,8 @@ where // contract. // See: https://github.com/paritytech/substrate/issues/6439#issuecomment-648754324 let contract = Rent:: - ::charge(&dest, contract, executable.occupied_storage()) - .map_err(|e| (e.into(), executable.code_len()))? - .ok_or((Error::::RentNotPaid.into(), executable.code_len()))?; + ::charge(&dest, contract, executable.occupied_storage())? + .ok_or(Error::::RentNotPaid)?; (dest, contract, executable, ExportedFunction::Call) } FrameArgs::Instantiate{sender, trie_seed, executable, salt} => { @@ -719,7 +723,7 @@ where &account_id, trie_id, executable.code_hash().clone(), - ).map_err(|e| (e.into(), executable.code_len()))?; + )?; (account_id, contract, executable, ExportedFunction::Constructor) } }; @@ -732,8 +736,7 @@ where contract_info: CachedContract::Cached(contract_info), account_id, entry_point, - nested_meter: gas_meter.nested(gas_limit) - .map_err(|e| (e.into(), executable.code_len()))?, + nested_meter: gas_meter.nested(gas_limit)?, allows_reentry: true, }; @@ -746,9 +749,9 @@ where frame_args: FrameArgs, value_transferred: BalanceOf, gas_limit: Weight, - ) -> Result { + ) -> Result { if self.frames.len() == T::CallStack::size() { - return Err((Error::::MaxCallDepthReached.into(), 0)); + return Err(Error::::MaxCallDepthReached.into()); } // We need to make sure that changes made to the contract info are not discarded. @@ -787,7 +790,7 @@ where &mut self, executable: E, input_data: Vec - ) -> Result<(ExecReturnValue, u32), (ExecError, u32)> { + ) -> Result { let entry_point = self.top_frame().entry_point; let do_transaction = || { // Cache the value before calling into the constructor because that @@ -795,17 +798,16 @@ where // the same code hash we still charge the "1 block rent" as if they weren't // spawned. This is OK as overcharging is always safe. let occupied_storage = executable.occupied_storage(); - let code_len = executable.code_len(); // Every call or instantiate also optionally transferres balance. - self.initial_transfer().map_err(|e| (ExecError::from(e), code_len))?; + self.initial_transfer()?; // Call into the wasm blob. let output = executable.execute( self, &entry_point, input_data, - ).map_err(|e| (ExecError { error: e.error, origin: ErrorOrigin::Callee }, code_len))?; + ).map_err(|e| ExecError { error: e.error, origin: ErrorOrigin::Callee })?; // Additional work needs to be performed in case of an instantiation. if output.is_success() && entry_point == ExportedFunction::Constructor { @@ -814,7 +816,7 @@ where // It is not allowed to terminate a contract inside its constructor. if let CachedContract::Terminated = frame.contract_info { - return Err((Error::::TerminatedInConstructor.into(), code_len)); + return Err(Error::::TerminatedInConstructor.into()); } // Collect the rent for the first block to prevent the creation of very large @@ -823,9 +825,8 @@ where // in order to keep up the guarantuee that we always leave a tombstone behind // with the exception of a contract that called `seal_terminate`. let contract = Rent:: - ::charge(&account_id, frame.invalidate(), occupied_storage) - .map_err(|e| (e.into(), code_len))? - .ok_or((Error::::NewContractNotFunded.into(), code_len))?; + ::charge(&account_id, frame.invalidate(), occupied_storage)? + .ok_or(Error::::NewContractNotFunded)?; frame.contract_info = CachedContract::Cached(contract); // Deposit an instantiation event. @@ -835,7 +836,7 @@ where )); } - Ok((output, code_len)) + Ok(output) }; // All changes performed by the contract are executed under a storage transaction. @@ -843,8 +844,8 @@ where // comitted or rolled back when popping the frame. let (success, output) = with_transaction(|| { let output = do_transaction(); - match output { - Ok((ref result, _)) if result.is_success() => { + match &output { + Ok(result) if result.is_success() => { TransactionOutcome::Commit((true, output)) }, _ => TransactionOutcome::Rollback((false, output)), @@ -1055,7 +1056,7 @@ where value: BalanceOf, input_data: Vec, allows_reentry: bool, - ) -> Result<(ExecReturnValue, u32), (ExecError, u32)> { + ) -> Result { // Before pushing the new frame: Protect the caller contract against reentrancy attacks. // It is important to do this before calling `allows_reentry` so that a direct recursion // is caught by it. @@ -1063,7 +1064,7 @@ where let try_call = || { if !self.allows_reentry(&to) { - return Err((>::ReentranceDenied.into(), 0)); + return Err(>::ReentranceDenied.into()); } // We ignore instantiate frames in our search for a cached contract. // Otherwise it would be possible to recursively call a contract from its own @@ -1101,9 +1102,8 @@ where endowment: BalanceOf, input_data: Vec, salt: &[u8], - ) -> Result<(AccountIdOf, ExecReturnValue, u32), (ExecError, u32)> { - let executable = E::from_storage(code_hash, &self.schedule, self.gas_meter()) - .map_err(|e| (e.into(), 0))?; + ) -> Result<(AccountIdOf, ExecReturnValue), ExecError> { + let executable = E::from_storage(code_hash, &self.schedule, self.gas_meter())?; let trie_seed = self.next_trie_seed(); let executable = self.push_frame( FrameArgs::Instantiate { @@ -1116,33 +1116,29 @@ where gas_limit, )?; let account_id = self.top_frame().account_id.clone(); - self.run(executable, input_data) - .map(|(ret, code_len)| (account_id, ret, code_len)) + self.run(executable, input_data).map(|ret| (account_id, ret)) } - fn terminate( - &mut self, - beneficiary: &AccountIdOf, - ) -> Result { + fn terminate(&mut self, beneficiary: &AccountIdOf) -> Result<(), DispatchError> { if self.is_recursive() { - return Err((Error::::TerminatedWhileReentrant.into(), 0)); + return Err(Error::::TerminatedWhileReentrant.into()); } let frame = self.top_frame_mut(); let info = frame.terminate(); - Storage::::queue_trie_for_deletion(&info).map_err(|e| (e, 0))?; + Storage::::queue_trie_for_deletion(&info)?; >::transfer( true, true, &frame.account_id, beneficiary, T::Currency::free_balance(&frame.account_id), - ).map_err(|e| (e, 0))?; + )?; ContractInfoOf::::remove(&frame.account_id); - let code_len = E::remove_user(info.code_hash); + E::remove_user(info.code_hash, &mut frame.nested_meter)?; Contracts::::deposit_event( Event::Terminated(frame.account_id.clone(), beneficiary.clone()), ); - Ok(code_len) + Ok(()) } fn restore_to( @@ -1151,30 +1147,33 @@ where code_hash: CodeHash, rent_allowance: BalanceOf, delta: Vec, - ) -> Result<(u32, u32), (DispatchError, u32, u32)> { + ) -> Result<(), DispatchError> { if self.is_recursive() { - return Err((Error::::TerminatedWhileReentrant.into(), 0, 0)); + return Err(Error::::TerminatedWhileReentrant.into()); } - let origin_contract = self.top_frame_mut().contract_info().clone(); + let frame = self.top_frame_mut(); + let origin_contract = frame.contract_info().clone(); + let account_id = frame.account_id.clone(); let result = Rent::::restore_to( - &self.top_frame().account_id, + &account_id, origin_contract, dest.clone(), code_hash.clone(), rent_allowance, delta, + &mut frame.nested_meter, ); if let Ok(_) = result { deposit_event::( vec![], Event::Restored( - self.top_frame().account_id.clone(), + account_id, dest, code_hash, rent_allowance, ), ); - self.top_frame_mut().terminate(); + frame.terminate(); } result } @@ -1463,14 +1462,18 @@ mod tests { MockLoader::decrement_refcount(self.code_hash); } - fn add_user(code_hash: CodeHash) -> Result { + fn add_user(code_hash: CodeHash, _: &mut GasMeter) + -> Result<(), DispatchError> + { MockLoader::increment_refcount(code_hash); - Ok(0) + Ok(()) } - fn remove_user(code_hash: CodeHash) -> u32 { + fn remove_user(code_hash: CodeHash, _: &mut GasMeter) + -> Result<(), DispatchError> + { MockLoader::decrement_refcount(code_hash); - 0 + Ok(()) } fn execute>( @@ -1597,7 +1600,7 @@ mod tests { None, ).unwrap(); - assert!(!output.0.is_success()); + assert!(!output.is_success()); assert_eq!(get_balance(&origin), 100); // the rent is still charged @@ -1658,8 +1661,8 @@ mod tests { ); let output = result.unwrap(); - assert!(output.0.is_success()); - assert_eq!(output.0.data, Bytes(vec![1, 2, 3, 4])); + assert!(output.is_success()); + assert_eq!(output.data, Bytes(vec![1, 2, 3, 4])); }); } @@ -1689,8 +1692,8 @@ mod tests { ); let output = result.unwrap(); - assert!(!output.0.is_success()); - assert_eq!(output.0.data, Bytes(vec![1, 2, 3, 4])); + assert!(!output.is_success()); + assert_eq!(output.data, Bytes(vec![1, 2, 3, 4])); }); } @@ -1770,7 +1773,7 @@ mod tests { // Verify that we've got proper error and set `reached_bottom`. assert_eq!( r, - Err((Error::::MaxCallDepthReached.into(), 0)) + Err(Error::::MaxCallDepthReached.into()) ); *reached_bottom = true; } else { @@ -2000,7 +2003,7 @@ mod tests { let instantiated_contract_address = Rc::clone(&instantiated_contract_address); move |ctx, _| { // Instantiate a contract and save it's address in `instantiated_contract_address`. - let (address, output, _) = ctx.ext.instantiate( + let (address, output) = ctx.ext.instantiate( 0, dummy_ch, Contracts::::subsistence_threshold() * 3, @@ -2053,10 +2056,10 @@ mod tests { vec![], &[], ), - Err((ExecError { + Err(ExecError { error: DispatchError::Other("It's a trap!"), origin: ErrorOrigin::Callee, - }, 0)) + }) ); exec_success() @@ -2293,7 +2296,7 @@ mod tests { assert_ne!(original_allowance, changed_allowance); ctx.ext.set_rent_allowance(changed_allowance); assert_eq!( - ctx.ext.call(0, CHARLIE, 0, vec![], true).map(|v| v.0).map_err(|e| e.0), + ctx.ext.call(0, CHARLIE, 0, vec![], true), exec_trapped() ); assert_eq!(ctx.ext.rent_allowance(), changed_allowance); @@ -2330,7 +2333,7 @@ mod tests { let code = MockLoader::insert(Constructor, |ctx, _| { assert_matches!( ctx.ext.call(0, ctx.ext.address().clone(), 0, vec![], true), - Err((ExecError{error, ..}, _)) if error == >::ContractNotFound.into() + Err(ExecError{error, ..}) if error == >::ContractNotFound.into() ); exec_success() }); @@ -2426,7 +2429,7 @@ mod tests { // call the contract passed as input with disabled reentry let code_bob = MockLoader::insert(Call, |ctx, _| { let dest = Decode::decode(&mut ctx.input_data.as_ref()).unwrap(); - ctx.ext.call(0, dest, 0, vec![], false).map(|v| v.0).map_err(|e| e.0) + ctx.ext.call(0, dest, 0, vec![], false) }); let code_charlie = MockLoader::insert(Call, |_, _| { @@ -2459,7 +2462,7 @@ mod tests { 0, BOB.encode(), None, - ).map_err(|e| e.0.error), + ).map_err(|e| e.error), >::ReentranceDenied, ); }); @@ -2469,7 +2472,7 @@ mod tests { fn call_deny_reentry() { let code_bob = MockLoader::insert(Call, |ctx, _| { if ctx.input_data[0] == 0 { - ctx.ext.call(0, CHARLIE, 0, vec![], false).map(|v| v.0).map_err(|e| e.0) + ctx.ext.call(0, CHARLIE, 0, vec![], false) } else { exec_success() } @@ -2477,7 +2480,7 @@ mod tests { // call BOB with input set to '1' let code_charlie = MockLoader::insert(Call, |ctx, _| { - ctx.ext.call(0, BOB, 0, vec![1], true).map(|v| v.0).map_err(|e| e.0) + ctx.ext.call(0, BOB, 0, vec![1], true) }); ExtBuilder::default().build().execute_with(|| { @@ -2495,7 +2498,7 @@ mod tests { 0, vec![0], None, - ).map_err(|e| e.0.error), + ).map_err(|e| e.error), >::ReentranceDenied, ); }); diff --git a/frame/contracts/src/gas.rs b/frame/contracts/src/gas.rs index 2c19c999b56a3..34ddb3ceb0434 100644 --- a/frame/contracts/src/gas.rs +++ b/frame/contracts/src/gas.rs @@ -167,15 +167,6 @@ where self.gas_left = self.gas_left.saturating_add(adjustment).min(self.gas_limit); } - /// Refund previously charged gas back to the gas meter. - /// - /// This can be used if a gas worst case estimation must be charged before - /// performing a certain action. This way the difference can be refundend when - /// the worst case did not happen. - pub fn refund(&mut self, amount: ChargedAmount) { - self.gas_left = self.gas_left.saturating_add(amount.0).min(self.gas_limit) - } - /// Returns how much gas was used. pub fn gas_spent(&self) -> Weight { self.gas_limit - self.gas_left diff --git a/frame/contracts/src/lib.rs b/frame/contracts/src/lib.rs index f7dec843a7f7c..3ac56d8980cb2 100644 --- a/frame/contracts/src/lib.rs +++ b/frame/contracts/src/lib.rs @@ -275,9 +275,7 @@ pub mod pallet { /// * If the account is a regular account, any value will be transferred. /// * If no account exists and the call value is not less than `existential_deposit`, /// a regular account will be created and any value will be transferred. - #[pallet::weight(T::WeightInfo::call(T::Schedule::get().limits.code_len / 1024) - .saturating_add(*gas_limit) - )] + #[pallet::weight(T::WeightInfo::call().saturating_add(*gas_limit))] pub fn call( origin: OriginFor, dest: ::Source, @@ -289,13 +287,10 @@ pub mod pallet { let dest = T::Lookup::lookup(dest)?; let mut gas_meter = GasMeter::new(gas_limit); let schedule = T::Schedule::get(); - let (result, code_len) = match ExecStack::>::run_call( + let result = ExecStack::>::run_call( origin, dest, &mut gas_meter, &schedule, value, data, None, - ) { - Ok((output, len)) => (Ok(output), len), - Err((err, len)) => (Err(err), len), - }; - gas_meter.into_dispatch_result(result, T::WeightInfo::call(code_len / 1024)) + ); + gas_meter.into_dispatch_result(result, T::WeightInfo::call()) } /// Instantiates a new contract from the supplied `code` optionally transferring @@ -357,10 +352,7 @@ pub mod pallet { /// code deployment step. Instead, the `code_hash` of an on-chain deployed wasm binary /// must be supplied. #[pallet::weight( - T::WeightInfo::instantiate( - T::Schedule::get().limits.code_len / 1024, salt.len() as u32 / 1024 - ) - .saturating_add(*gas_limit) + T::WeightInfo::instantiate(salt.len() as u32 / 1024).saturating_add(*gas_limit) )] pub fn instantiate( origin: OriginFor, @@ -374,13 +366,12 @@ pub mod pallet { let mut gas_meter = GasMeter::new(gas_limit); let schedule = T::Schedule::get(); let executable = PrefabWasmModule::from_storage(code_hash, &schedule, &mut gas_meter)?; - let code_len = executable.code_len(); let result = ExecStack::>::run_instantiate( origin, executable, &mut gas_meter, &schedule, endowment, data, &salt, None, ).map(|(_address, output)| output); gas_meter.into_dispatch_result( result, - T::WeightInfo::instantiate(code_len / 1024, salt.len() as u32 / 1024), + T::WeightInfo::instantiate(salt.len() as u32 / 1024), ) } @@ -666,7 +657,7 @@ where origin, dest, &mut gas_meter, &schedule, value, input_data, debug_message.as_mut(), ); ContractExecResult { - result: result.map(|r| r.0).map_err(|r| r.0.error), + result: result.map_err(|r| r.error), gas_consumed: gas_meter.gas_spent(), debug_message: debug_message.unwrap_or_default(), } diff --git a/frame/contracts/src/rent.rs b/frame/contracts/src/rent.rs index 68e8c57e9adeb..3135862e88c90 100644 --- a/frame/contracts/src/rent.rs +++ b/frame/contracts/src/rent.rs @@ -20,7 +20,7 @@ use crate::{ AliveContractInfo, BalanceOf, ContractInfo, ContractInfoOf, Pallet, Event, TombstoneContractInfo, Config, CodeHash, Error, - storage::Storage, wasm::PrefabWasmModule, exec::Executable, + storage::Storage, wasm::PrefabWasmModule, exec::Executable, gas::GasMeter, }; use sp_std::prelude::*; use sp_io::hashing::blake2_256; @@ -232,10 +232,6 @@ where /// Upon succesful restoration, `origin` will be destroyed, all its funds are transferred to /// the restored account. The restored account will inherit the last write block and its last /// deduct block will be set to the current block. - /// - /// # Return Value - /// - /// Result<(CallerCodeSize, DestCodeSize), (DispatchError, CallerCodeSize, DestCodesize)> pub fn restore_to( origin: &T::AccountId, mut origin_contract: AliveContractInfo, @@ -243,18 +239,19 @@ where code_hash: CodeHash, rent_allowance: BalanceOf, delta: Vec, - ) -> Result<(u32, u32), (DispatchError, u32, u32)> { + gas_meter: &mut GasMeter, + ) -> Result<(), DispatchError> { let child_trie_info = origin_contract.child_trie_info(); let current_block = >::block_number(); if origin_contract.last_write == Some(current_block) { - return Err((Error::::InvalidContractOrigin.into(), 0, 0)); + return Err(Error::::InvalidContractOrigin.into()); } let dest_tombstone = >::get(&dest) .and_then(|c| c.get_tombstone()) - .ok_or((Error::::InvalidDestinationContract.into(), 0, 0))?; + .ok_or(Error::::InvalidDestinationContract)?; let last_write = if !delta.is_empty() { Some(current_block) @@ -263,7 +260,7 @@ where }; // Fails if the code hash does not exist on chain - let caller_code_len = E::add_user(code_hash).map_err(|e| (e, 0, 0))?; + E::add_user(code_hash, gas_meter)?; // We are allowed to eagerly modify storage even though the function can // fail later due to tombstones not matching. This is because the restoration @@ -287,13 +284,13 @@ where ); if tombstone != dest_tombstone { - return Err((Error::::InvalidTombstone.into(), caller_code_len, 0)); + return Err(Error::::InvalidTombstone.into()); } origin_contract.storage_size -= bytes_taken; >::remove(&origin); - let tombstone_code_len = E::remove_user(origin_contract.code_hash); + E::remove_user(origin_contract.code_hash, gas_meter)?; >::insert(&dest, ContractInfo::Alive(AliveContractInfo:: { code_hash, rent_allowance, @@ -306,8 +303,7 @@ where let origin_free_balance = T::Currency::free_balance(&origin); T::Currency::make_free_balance_be(&origin, >::zero()); T::Currency::deposit_creating(&dest, origin_free_balance); - - Ok((caller_code_len, tombstone_code_len)) + Ok(()) } /// Create a new `RentStatus` struct for pass through to a requesting contract. diff --git a/frame/contracts/src/schedule.rs b/frame/contracts/src/schedule.rs index 0bf7c050e5dfa..0abe0c54d7481 100644 --- a/frame/contracts/src/schedule.rs +++ b/frame/contracts/src/schedule.rs @@ -300,18 +300,9 @@ pub struct HostFnWeights { /// Weight of calling `seal_terminate`. pub terminate: Weight, - /// Weight per byte of the terminated contract. - pub terminate_per_code_byte: Weight, - /// Weight of calling `seal_restore_to`. pub restore_to: Weight, - /// Weight per byte of the restoring contract. - pub restore_to_per_caller_code_byte: Weight, - - /// Weight per byte of the restored contract. - pub restore_to_per_tombstone_code_byte: Weight, - /// Weight per delta key supplied to `seal_restore_to`. pub restore_to_per_delta: Weight, @@ -354,9 +345,6 @@ pub struct HostFnWeights { /// Weight of calling `seal_call`. pub call: Weight, - /// Weight per byte of the called contract. - pub call_per_code_byte: Weight, - /// Weight surcharge that is claimed if `seal_call` does a balance transfer. pub call_transfer_surcharge: Weight, @@ -369,9 +357,6 @@ pub struct HostFnWeights { /// Weight of calling `seal_instantiate`. pub instantiate: Weight, - /// Weight per byte of the instantiated contract. - pub instantiate_per_code_byte: Weight, - /// Weight per input byte supplied to `seal_instantiate`. pub instantiate_per_input_byte: Weight, @@ -588,11 +573,8 @@ impl Default for HostFnWeights { r#return: cost!(seal_return), return_per_byte: cost_byte!(seal_return_per_kb), terminate: cost!(seal_terminate), - terminate_per_code_byte: cost_byte!(seal_terminate_per_code_kb), restore_to: cost!(seal_restore_to), - restore_to_per_caller_code_byte: cost_byte_args!(seal_restore_to_per_code_kb_delta, 1, 0, 0), - restore_to_per_tombstone_code_byte: cost_byte_args!(seal_restore_to_per_code_kb_delta, 0, 1, 0), - restore_to_per_delta: cost_batched_args!(seal_restore_to_per_code_kb_delta, 0, 0, 1), + restore_to_per_delta: cost_batched!(seal_restore_to_per_delta), random: cost_batched!(seal_random), deposit_event: cost_batched!(seal_deposit_event), deposit_event_per_topic: cost_batched_args!(seal_deposit_event_per_topic_and_kb, 1, 0), @@ -606,15 +588,13 @@ impl Default for HostFnWeights { get_storage_per_byte: cost_byte_batched!(seal_get_storage_per_kb), transfer: cost_batched!(seal_transfer), call: cost_batched!(seal_call), - call_per_code_byte: cost_byte_batched_args!(seal_call_per_code_transfer_input_output_kb, 1, 0, 0, 0), - call_transfer_surcharge: cost_batched_args!(seal_call_per_code_transfer_input_output_kb, 0, 1, 0, 0), - call_per_input_byte: cost_byte_batched_args!(seal_call_per_code_transfer_input_output_kb, 0, 0, 1, 0), - call_per_output_byte: cost_byte_batched_args!(seal_call_per_code_transfer_input_output_kb, 0, 0, 0, 1), + call_transfer_surcharge: cost_batched_args!(seal_call_per_transfer_input_output_kb, 1, 0, 0), + call_per_input_byte: cost_byte_batched_args!(seal_call_per_transfer_input_output_kb, 0, 1, 0), + call_per_output_byte: cost_byte_batched_args!(seal_call_per_transfer_input_output_kb, 0, 0, 1), instantiate: cost_batched!(seal_instantiate), - instantiate_per_code_byte: cost_byte_batched_args!(seal_instantiate_per_code_input_output_salt_kb, 1, 0, 0, 0), - instantiate_per_input_byte: cost_byte_batched_args!(seal_instantiate_per_code_input_output_salt_kb, 0, 1, 0, 0), - instantiate_per_output_byte: cost_byte_batched_args!(seal_instantiate_per_code_input_output_salt_kb, 0, 0, 1, 0), - instantiate_per_salt_byte: cost_byte_batched_args!(seal_instantiate_per_code_input_output_salt_kb, 0, 0, 0, 1), + instantiate_per_input_byte: cost_byte_batched_args!(seal_instantiate_per_input_output_salt_kb, 1, 0, 0), + instantiate_per_output_byte: cost_byte_batched_args!(seal_instantiate_per_input_output_salt_kb, 0, 1, 0), + instantiate_per_salt_byte: cost_byte_batched_args!(seal_instantiate_per_input_output_salt_kb, 0, 0, 1), hash_sha2_256: cost_batched!(seal_hash_sha2_256), hash_sha2_256_per_byte: cost_byte_batched!(seal_hash_sha2_256_per_kb), hash_keccak_256: cost_batched!(seal_hash_keccak_256), diff --git a/frame/contracts/src/tests.rs b/frame/contracts/src/tests.rs index 619bd8eac9d35..b3ee139008bc5 100644 --- a/frame/contracts/src/tests.rs +++ b/frame/contracts/src/tests.rs @@ -363,7 +363,7 @@ where fn calling_plain_account_fails() { ExtBuilder::default().build().execute_with(|| { let _ = Balances::deposit_creating(&ALICE, 100_000_000); - let base_cost = <::WeightInfo as WeightInfo>::call(0); + let base_cost = <::WeightInfo as WeightInfo>::call(); assert_eq!( Contracts::call(Origin::signed(ALICE), BOB, 0, GAS_LIMIT, Vec::new()), @@ -1727,6 +1727,10 @@ fn self_destruct_works() { Ok(_) ); + // The call triggers rent collection that reduces the amount of balance + // that remains for the beneficiary. + let balance_after_rent = 93_078; + pretty_assertions::assert_eq!(System::events(), vec![ EventRecord { phase: Phase::Initialization, @@ -1738,7 +1742,7 @@ fn self_destruct_works() { EventRecord { phase: Phase::Initialization, event: Event::Balances( - pallet_balances::Event::Transfer(addr.clone(), DJANGO, 93_086) + pallet_balances::Event::Transfer(addr.clone(), DJANGO, balance_after_rent) ), topics: vec![], }, @@ -1761,7 +1765,7 @@ fn self_destruct_works() { // check that the beneficiary (django) got remaining balance // some rent was deducted before termination - assert_eq!(Balances::free_balance(DJANGO), 1_093_086); + assert_eq!(Balances::free_balance(DJANGO), 1_000_000 + balance_after_rent); }); } @@ -2938,3 +2942,59 @@ fn debug_message_invalid_utf8() { assert_err!(result.result, >::DebugMessageInvalidUTF8); }); } + +#[test] +fn gas_estimation_correct() { + let (caller_code, caller_hash) = compile_module::("call_return_code").unwrap(); + let (callee_code, callee_hash) = compile_module::("dummy").unwrap(); + ExtBuilder::default().existential_deposit(50).build().execute_with(|| { + let subsistence = Pallet::::subsistence_threshold(); + let _ = Balances::deposit_creating(&ALICE, 1000 * subsistence); + let _ = Balances::deposit_creating(&CHARLIE, 1000 * subsistence); + + assert_ok!( + Contracts::instantiate_with_code( + Origin::signed(ALICE), + subsistence * 100, + GAS_LIMIT, + caller_code, + vec![], + vec![0], + ), + ); + let addr_caller = Contracts::contract_address(&ALICE, &caller_hash, &[0]); + + assert_ok!( + Contracts::instantiate_with_code( + Origin::signed(ALICE), + subsistence * 100, + GAS_LIMIT, + callee_code, + vec![], + vec![1], + ), + ); + let addr_callee = Contracts::contract_address(&ALICE, &callee_hash, &[1]); + + // Call in order to determine the gas that is required for this call + let result = Contracts::bare_call( + ALICE, + addr_caller.clone(), + 0, + GAS_LIMIT, + AsRef::<[u8]>::as_ref(&addr_callee).to_vec(), + false, + ); + assert_ok!(result.result); + + // Make the same call using the estimated gas. Should succeed. + assert_ok!(Contracts::bare_call( + ALICE, + addr_caller, + 0, + result.gas_consumed, + AsRef::<[u8]>::as_ref(&addr_callee).to_vec(), + false, + ).result); + }); +} diff --git a/frame/contracts/src/wasm/code_cache.rs b/frame/contracts/src/wasm/code_cache.rs index 8df604cdb0e1c..a2aa2b55e1657 100644 --- a/frame/contracts/src/wasm/code_cache.rs +++ b/frame/contracts/src/wasm/code_cache.rs @@ -81,14 +81,16 @@ where } /// Increment the refcount of a code in-storage by one. -pub fn increment_refcount(code_hash: CodeHash) -> Result +pub fn increment_refcount(code_hash: CodeHash, gas_meter: &mut GasMeter) + -> Result<(), DispatchError> where T::AccountId: UncheckedFrom + AsRef<[u8]> { + gas_meter.charge(CodeToken::UpdateRefcount(estimate_code_size::(&code_hash)?))?; >::mutate(code_hash, |existing| { if let Some(module) = existing { increment_64(&mut module.refcount); - Ok(module.original_code_len) + Ok(()) } else { Err(Error::::CodeNotFound.into()) } @@ -96,23 +98,24 @@ where } /// Decrement the refcount of a code in-storage by one and remove the code when it drops to zero. -pub fn decrement_refcount(code_hash: CodeHash) -> u32 +pub fn decrement_refcount(code_hash: CodeHash, gas_meter: &mut GasMeter) + -> Result<(), DispatchError> where T::AccountId: UncheckedFrom + AsRef<[u8]> { + if let Ok(len) = estimate_code_size::(&code_hash) { + gas_meter.charge(CodeToken::UpdateRefcount(len))?; + } >::mutate_exists(code_hash, |existing| { if let Some(module) = existing { - let code_len = module.original_code_len; module.refcount = module.refcount.saturating_sub(1); if module.refcount == 0 { *existing = None; finish_removal::(code_hash); } - code_len - } else { - 0 } - }) + }); + Ok(()) } /// Load code with the given code hash. @@ -120,13 +123,24 @@ where /// If the module was instrumented with a lower version of schedule than /// the current one given as an argument, then this function will perform /// re-instrumentation and update the cache in the storage. +/// +/// # Note +/// +/// If `reinstrument` is set it is assumed that the load is performed in the context of +/// a contract call: This means we charge the size based cased for loading the contract. pub fn load( code_hash: CodeHash, - reinstrument: Option<(&Schedule, &mut GasMeter)>, + mut reinstrument: Option<(&Schedule, &mut GasMeter)>, ) -> Result, DispatchError> where T::AccountId: UncheckedFrom + AsRef<[u8]> { + // The reinstrument case coincides with the cases where we need to charge extra + // based upon the code size: On-chain execution. + if let Some((_, gas_meter)) = &mut reinstrument { + gas_meter.charge(CodeToken::Load(estimate_code_size::(&code_hash)?))?; + } + let mut prefab_module = >::get(code_hash) .ok_or_else(|| Error::::CodeNotFound)?; prefab_module.code_hash = code_hash; @@ -135,7 +149,7 @@ where if prefab_module.instruction_weights_version < schedule.instruction_weights.version { // The instruction weights have changed. // We need to re-instrument the code with the new instruction weights. - gas_meter.charge(InstrumentToken(prefab_module.original_code_len))?; + gas_meter.charge(CodeToken::Instrument(prefab_module.original_code_len))?; private::reinstrument(&mut prefab_module, schedule)?; } } @@ -185,14 +199,50 @@ fn increment_64(refcount: &mut u64) { "); } -/// Token to be supplied to the gas meter which charges the weight needed for reinstrumenting -/// a contract of the specified size in bytes. +/// Get the size of the instrumented code stored at `code_hash` without loading it. +/// +/// The returned value is slightly too large because it also contains the fields apart from +/// `code` which are located inside [`PrefabWasmModule`]. However, those are negligible when +/// compared to the code size. Additionally, charging too much weight is completely safe. +fn estimate_code_size(code_hash: &CodeHash) -> Result +where + T::AccountId: UncheckedFrom + AsRef<[u8]> +{ + let key = >::hashed_key_for(code_hash); + let mut data = [0u8; 0]; + let len = sp_io::storage::read(&key, &mut data, 0).ok_or_else(|| Error::::CodeNotFound)?; + Ok(len) +} + +/// Costs for operations that are related to code handling. #[cfg_attr(test, derive(Debug, PartialEq, Eq))] #[derive(Clone, Copy)] -struct InstrumentToken(u32); +enum CodeToken { + /// Weight for instrumenting a contract contract of the supplied size in bytes. + Instrument(u32), + /// Weight for loading a contract per kilobyte. + Load(u32), + /// Weight for changing the refcount of a contract per kilobyte. + UpdateRefcount(u32), +} -impl Token for InstrumentToken { +impl Token for CodeToken +where + T: Config, + T::AccountId: UncheckedFrom + AsRef<[u8]> +{ fn weight(&self) -> Weight { - T::WeightInfo::instrument(self.0 / 1024) + use self::CodeToken::*; + // In case of `Load` and `UpdateRefcount` we already covered the general costs of + // accessing the storage but still need to account for the actual size of the + // contract code. This is why we substract `T::*::(0)`. We need to do this at this + // point because when charging the general weight we do not know the size of + // the contract. + match *self { + Instrument(len) => T::WeightInfo::instrument(len / 1024), + Load(len) => T::WeightInfo::code_load(len / 1024).saturating_sub(T::WeightInfo::code_load(0)), + UpdateRefcount(len) => + T::WeightInfo::code_refcount(len / 1024).saturating_sub(T::WeightInfo::code_refcount(0)), + } } } diff --git a/frame/contracts/src/wasm/mod.rs b/frame/contracts/src/wasm/mod.rs index 5f9936c68dfbe..03a409bb12fe2 100644 --- a/frame/contracts/src/wasm/mod.rs +++ b/frame/contracts/src/wasm/mod.rs @@ -168,12 +168,16 @@ where code_cache::store_decremented(self); } - fn add_user(code_hash: CodeHash) -> Result { - code_cache::increment_refcount::(code_hash) + fn add_user(code_hash: CodeHash, gas_meter: &mut GasMeter) + -> Result<(), DispatchError> + { + code_cache::increment_refcount::(code_hash, gas_meter) } - fn remove_user(code_hash: CodeHash) -> u32 { - code_cache::decrement_refcount::(code_hash) + fn remove_user(code_hash: CodeHash, gas_meter: &mut GasMeter) + -> Result<(), DispatchError> + { + code_cache::decrement_refcount::(code_hash, gas_meter) } fn execute>( @@ -349,14 +353,14 @@ mod tests { value: u64, data: Vec, allows_reentry: bool, - ) -> Result<(ExecReturnValue, u32), (ExecError, u32)> { + ) -> Result { self.calls.push(CallEntry { to, value, data, allows_reentry, }); - Ok((ExecReturnValue { flags: ReturnFlags::empty(), data: call_return_data() }, 0)) + Ok(ExecReturnValue { flags: ReturnFlags::empty(), data: call_return_data() }) } fn instantiate( &mut self, @@ -365,7 +369,7 @@ mod tests { endowment: u64, data: Vec, salt: &[u8], - ) -> Result<(AccountIdOf, ExecReturnValue, u32), (ExecError, u32)> { + ) -> Result<(AccountIdOf, ExecReturnValue), ExecError> { self.instantiates.push(InstantiateEntry { code_hash: code_hash.clone(), endowment, @@ -379,7 +383,6 @@ mod tests { flags: ReturnFlags::empty(), data: Bytes(Vec::new()), }, - 0, )) } fn transfer( @@ -396,11 +399,11 @@ mod tests { fn terminate( &mut self, beneficiary: &AccountIdOf, - ) -> Result { + ) -> Result<(), DispatchError> { self.terminations.push(TerminationEntry { beneficiary: beneficiary.clone(), }); - Ok(0) + Ok(()) } fn restore_to( &mut self, @@ -408,14 +411,14 @@ mod tests { code_hash: H256, rent_allowance: u64, delta: Vec, - ) -> Result<(u32, u32), (DispatchError, u32, u32)> { + ) -> Result<(), DispatchError> { self.restores.push(RestoreEntry { dest, code_hash, rent_allowance, delta, }); - Ok((0, 0)) + Ok(()) } fn get_storage(&mut self, key: &StorageKey) -> Option> { self.storage.get(key).cloned() @@ -616,7 +619,7 @@ mod tests { fn contract_call_forward_input() { const CODE: &str = r#" (module - (import "__unstable__" "seal_call" (func $seal_call (param i32 i32 i32 i64 i32 i32 i32 i32 i32 i32) (result i32))) + (import "__unstable__" "seal_call" (func $seal_call (param i32 i32 i64 i32 i32 i32 i32 i32) (result i32))) (import "seal0" "seal_input" (func $seal_input (param i32 i32))) (import "env" "memory" (memory 1 1)) (func (export "call") @@ -624,10 +627,8 @@ mod tests { (call $seal_call (i32.const 1) ;; Set FORWARD_INPUT bit (i32.const 4) ;; Pointer to "callee" address. - (i32.const 32) ;; Length of "callee" address. (i64.const 0) ;; How much gas to devote for the execution. 0 = all. (i32.const 36) ;; Pointer to the buffer with value to transfer - (i32.const 8) ;; Length of the buffer with value to transfer. (i32.const 44) ;; Pointer to input data buffer address (i32.const 4) ;; Length of input data buffer (i32.const 4294967295) ;; u32 max value is the sentinel value: do not copy output @@ -678,7 +679,7 @@ mod tests { fn contract_call_clone_input() { const CODE: &str = r#" (module - (import "__unstable__" "seal_call" (func $seal_call (param i32 i32 i32 i64 i32 i32 i32 i32 i32 i32) (result i32))) + (import "__unstable__" "seal_call" (func $seal_call (param i32 i32 i64 i32 i32 i32 i32 i32) (result i32))) (import "seal0" "seal_input" (func $seal_input (param i32 i32))) (import "seal0" "seal_return" (func $seal_return (param i32 i32 i32))) (import "env" "memory" (memory 1 1)) @@ -687,10 +688,8 @@ mod tests { (call $seal_call (i32.const 11) ;; Set FORWARD_INPUT | CLONE_INPUT | ALLOW_REENTRY bits (i32.const 4) ;; Pointer to "callee" address. - (i32.const 32) ;; Length of "callee" address. (i64.const 0) ;; How much gas to devote for the execution. 0 = all. (i32.const 36) ;; Pointer to the buffer with value to transfer - (i32.const 8) ;; Length of the buffer with value to transfer. (i32.const 44) ;; Pointer to input data buffer address (i32.const 4) ;; Length of input data buffer (i32.const 4294967295) ;; u32 max value is the sentinel value: do not copy output @@ -741,17 +740,15 @@ mod tests { fn contract_call_tail_call() { const CODE: &str = r#" (module - (import "__unstable__" "seal_call" (func $seal_call (param i32 i32 i32 i64 i32 i32 i32 i32 i32 i32) (result i32))) + (import "__unstable__" "seal_call" (func $seal_call (param i32 i32 i64 i32 i32 i32 i32 i32) (result i32))) (import "env" "memory" (memory 1 1)) (func (export "call") (drop (call $seal_call (i32.const 5) ;; Set FORWARD_INPUT | TAIL_CALL bit (i32.const 4) ;; Pointer to "callee" address. - (i32.const 32) ;; Length of "callee" address. (i64.const 0) ;; How much gas to devote for the execution. 0 = all. (i32.const 36) ;; Pointer to the buffer with value to transfer - (i32.const 8) ;; Length of the buffer with value to transfer. (i32.const 0) ;; Pointer to input data buffer address (i32.const 0) ;; Length of input data buffer (i32.const 4294967295) ;; u32 max value is the sentinel value: do not copy output @@ -2000,25 +1997,18 @@ mod tests { "#; #[test] - fn contract_decode_failure() { + fn contract_decode_length_ignored() { let mut mock_ext = MockExt::default(); let result = execute( CODE_DECODE_FAILURE, vec![], &mut mock_ext, ); - - assert_eq!( - result, - Err(ExecError { - error: Error::::DecodingFailed.into(), - origin: ErrorOrigin::Caller, - }) - ); + // AccountID implements `MaxEncodeLen` and therefore the supplied length is + // no longer needed nor used to determine how much is read from contract memory. + assert_ok!(result); } - - #[test] #[cfg(feature = "unstable-interface")] fn rent_params_work() { diff --git a/frame/contracts/src/wasm/runtime.rs b/frame/contracts/src/wasm/runtime.rs index 8d1782e84d60d..28987bba9d700 100644 --- a/frame/contracts/src/wasm/runtime.rs +++ b/frame/contracts/src/wasm/runtime.rs @@ -26,7 +26,7 @@ use crate::{ }; use bitflags::bitflags; use pwasm_utils::parity_wasm::elements::ValueType; -use frame_support::{dispatch::DispatchError, ensure, traits::Get, weights::Weight}; +use frame_support::{dispatch::DispatchError, ensure, weights::Weight, traits::MaxEncodedLen}; use sp_std::prelude::*; use codec::{Decode, DecodeAll, Encode}; use sp_core::{Bytes, crypto::UncheckedFrom}; @@ -170,12 +170,8 @@ pub enum RuntimeCosts { Return(u32), /// Weight of calling `seal_terminate`. Terminate, - /// Weight that is added to `seal_terminate` for every byte of the terminated contract. - TerminateSurchargeCodeSize(u32), /// Weight of calling `seal_restore_to` per number of supplied delta entries. RestoreTo(u32), - /// Weight that is added to `seal_restore_to` for the involved code sizes. - RestoreToSurchargeCodeSize{caller_code: u32, tombstone_code: u32}, /// Weight of calling `seal_random`. It includes the weight for copying the subject. Random, /// Weight of calling `seal_deposit_event` with the given number of topics and event size. @@ -197,8 +193,6 @@ pub enum RuntimeCosts { Transfer, /// Weight of calling `seal_call` for the given input size. CallBase(u32), - /// Weight that is added to `seal_call` for every byte of the called contract. - CallSurchargeCodeSize(u32), /// Weight of the transfer performed during a call. CallSurchargeTransfer, /// Weight of output received through `seal_call` for the given size. @@ -207,8 +201,6 @@ pub enum RuntimeCosts { /// This includes the transfer as an instantiate without a value will always be below /// the existential deposit and is disregarded as corner case. InstantiateBase{input_data_len: u32, salt_len: u32}, - /// Weight that is added to `seal_instantiate` for every byte of the instantiated contract. - InstantiateSurchargeCodeSize(u32), /// Weight of output received through `seal_instantiate` for the given size. InstantiateCopyOut(u32), /// Weight of calling `seal_hash_sha_256` for the given input size. @@ -221,8 +213,6 @@ pub enum RuntimeCosts { HashBlake128(u32), /// Weight charged by a chain extension through `seal_call_chain_extension`. ChainExtension(u64), - /// Weight charged for copying data from the sandbox. - CopyIn(u32), } impl RuntimeCosts { @@ -250,13 +240,8 @@ impl RuntimeCosts { Return(len) => s.r#return .saturating_add(s.return_per_byte.saturating_mul(len.into())), Terminate => s.terminate, - TerminateSurchargeCodeSize(len) => s.terminate_per_code_byte.saturating_mul(len.into()), RestoreTo(delta) => s.restore_to .saturating_add(s.restore_to_per_delta.saturating_mul(delta.into())), - RestoreToSurchargeCodeSize{caller_code, tombstone_code} => - s.restore_to_per_caller_code_byte.saturating_mul(caller_code.into()).saturating_add( - s.restore_to_per_tombstone_code_byte.saturating_mul(tombstone_code.into()) - ), Random => s.random, DepositEvent{num_topic, len} => s.deposit_event .saturating_add(s.deposit_event_per_topic.saturating_mul(num_topic.into())) @@ -272,14 +257,11 @@ impl RuntimeCosts { Transfer => s.transfer, CallBase(len) => s.call .saturating_add(s.call_per_input_byte.saturating_mul(len.into())), - CallSurchargeCodeSize(len) => s.call_per_code_byte.saturating_mul(len.into()), CallSurchargeTransfer => s.call_transfer_surcharge, CallCopyOut(len) => s.call_per_output_byte.saturating_mul(len.into()), InstantiateBase{input_data_len, salt_len} => s.instantiate .saturating_add(s.instantiate_per_input_byte.saturating_mul(input_data_len.into())) .saturating_add(s.instantiate_per_salt_byte.saturating_mul(salt_len.into())), - InstantiateSurchargeCodeSize(len) => - s.instantiate_per_code_byte.saturating_mul(len.into()), InstantiateCopyOut(len) => s.instantiate_per_output_byte .saturating_mul(len.into()), HashSha256(len) => s.hash_sha2_256 @@ -291,7 +273,6 @@ impl RuntimeCosts { HashBlake128(len) => s.hash_blake2_128 .saturating_add(s.hash_blake2_128_per_byte.saturating_mul(len.into())), ChainExtension(amount) => amount, - CopyIn(len) => s.return_per_byte.saturating_mul(len.into()), }; RuntimeToken { #[cfg(test)] @@ -476,15 +457,6 @@ where self.ext.gas_meter().charge(token) } - /// Correct previously charged gas amount. - pub fn adjust_gas(&mut self, charged_amount: ChargedAmount, adjusted_amount: RuntimeCosts) { - let adjusted_amount = adjusted_amount.token(&self.ext.schedule().host_fn_weights); - self.ext.gas_meter().adjust_gas( - charged_amount, - adjusted_amount, - ); - } - /// Read designated chunk from the sandbox memory. /// /// Returns `Err` if one of the following conditions occurs: @@ -511,6 +483,21 @@ where self.memory.get(ptr, buf).map_err(|_| Error::::OutOfBounds.into()) } + /// Reads and decodes a type with a size fixed at compile time from contract memory. + /// + /// # Note + /// + /// The weight of reading a fixed value is included in the overall weight of any + /// contract callable function. + pub fn read_sandbox_memory_as(&self, ptr: u32) + -> Result + { + let buf = self.read_sandbox_memory(ptr, D::max_encoded_len() as u32)?; + let decoded = D::decode_all(&mut &buf[..]) + .map_err(|_| DispatchError::from(Error::::DecodingFailed))?; + Ok(decoded) + } + /// Read designated chunk from the sandbox memory and attempt to decode into the specified type. /// /// Returns `Err` if one of the following conditions occurs: @@ -520,25 +507,14 @@ where /// /// # Note /// - /// It is safe to forgo benchmarking and charging weight relative to `len` for fixed - /// size types (basically everything not containing a heap collection): - /// Despite the fact that we are usually about to read the encoding of a fixed size - /// type, we cannot know the encoded size of that type. We therefore are required to - /// use the length provided by the contract. This length is untrusted and therefore - /// we charge weight relative to the provided size upfront that covers the copy costs. - /// On success this cost is refunded as the copying was already covered in the - /// overall cost of the host function. This is different from `read_sandbox_memory` - /// where the size is dynamic and the costs resulting from that dynamic size must - /// be charged relative to this dynamic size anyways (before reading) by constructing - /// the benchmark for that. - pub fn read_sandbox_memory_as(&mut self, ptr: u32, len: u32) + /// There must be an extra benchmark for determining the influence of `len` with + /// regard to the overall weight. + pub fn read_sandbox_memory_as_unbounded(&self, ptr: u32, len: u32) -> Result { - let amount = self.charge_gas(RuntimeCosts::CopyIn(len))?; let buf = self.read_sandbox_memory(ptr, len)?; let decoded = D::decode_all(&mut &buf[..]) .map_err(|_| DispatchError::from(Error::::DecodingFailed))?; - self.ext.gas_meter().refund(amount); Ok(decoded) } @@ -575,7 +551,7 @@ where } let buf_len = buf.len() as u32; - let len: u32 = self.read_sandbox_memory_as(out_len_ptr, 4)?; + let len: u32 = self.read_sandbox_memory_as(out_len_ptr)?; if len < buf_len { Err(Error::::OutputBufferTooSmall)? @@ -675,19 +651,18 @@ where &mut self, flags: CallFlags, callee_ptr: u32, - callee_len: u32, gas: u64, value_ptr: u32, - value_len: u32, input_data_ptr: u32, input_data_len: u32, output_ptr: u32, output_len_ptr: u32 - ) -> Result { + ) -> Result + { self.charge_gas(RuntimeCosts::CallBase(input_data_len))?; let callee: <::T as frame_system::Config>::AccountId = - self.read_sandbox_memory_as(callee_ptr, callee_len)?; - let value: BalanceOf<::T> = self.read_sandbox_memory_as(value_ptr, value_len)?; + self.read_sandbox_memory_as(callee_ptr)?; + let value: BalanceOf<::T> = self.read_sandbox_memory_as(value_ptr)?; let input_data = if flags.contains(CallFlags::CLONE_INPUT) { self.input_data.as_ref().ok_or_else(|| Error::::InputForwarded)?.clone() } else if flags.contains(CallFlags::FORWARD_INPUT) { @@ -698,23 +673,15 @@ where if value > 0u32.into() { self.charge_gas(RuntimeCosts::CallSurchargeTransfer)?; } - let charged = self.charge_gas( - RuntimeCosts::CallSurchargeCodeSize(::Schedule::get().limits.code_len) - )?; let ext = &mut self.ext; let call_outcome = ext.call( gas, callee, value, input_data, flags.contains(CallFlags::ALLOW_REENTRY), ); - let code_len = match &call_outcome { - Ok((_, len)) => len, - Err((_, len)) => len, - }; - self.adjust_gas(charged, RuntimeCosts::CallSurchargeCodeSize(*code_len)); // `TAIL_CALL` only matters on an `OK` result. Otherwise the call stack comes to // a halt anyways without anymore code being executed. if flags.contains(CallFlags::TAIL_CALL) { - if let Ok((return_value, _)) = call_outcome { + if let Ok(return_value) = call_outcome { return Err(TrapReason::Return(ReturnData { flags: return_value.flags.bits(), data: return_value.data.0, @@ -722,12 +689,98 @@ where } } - if let Ok((output, _)) = &call_outcome { + if let Ok(output) = &call_outcome { self.write_sandbox_output(output_ptr, output_len_ptr, &output.data, true, |len| { Some(RuntimeCosts::CallCopyOut(len)) })?; } - Ok(Runtime::::exec_into_return_code(call_outcome.map(|r| r.0).map_err(|r| r.0))?) + Ok(Runtime::::exec_into_return_code(call_outcome)?) + } + + fn instantiate( + &mut self, + code_hash_ptr: u32, + gas: u64, + value_ptr: u32, + input_data_ptr: u32, + input_data_len: u32, + address_ptr: u32, + address_len_ptr: u32, + output_ptr: u32, + output_len_ptr: u32, + salt_ptr: u32, + salt_len: u32 + ) -> Result + { + self.charge_gas(RuntimeCosts::InstantiateBase {input_data_len, salt_len})?; + let code_hash: CodeHash<::T> = self.read_sandbox_memory_as(code_hash_ptr)?; + let value: BalanceOf<::T> = self.read_sandbox_memory_as(value_ptr)?; + let input_data = self.read_sandbox_memory(input_data_ptr, input_data_len)?; + let salt = self.read_sandbox_memory(salt_ptr, salt_len)?; + let instantiate_outcome = self.ext.instantiate(gas, code_hash, value, input_data, &salt); + if let Ok((address, output)) = &instantiate_outcome { + if !output.flags.contains(ReturnFlags::REVERT) { + self.write_sandbox_output( + address_ptr, address_len_ptr, &address.encode(), true, already_charged, + )?; + } + self.write_sandbox_output(output_ptr, output_len_ptr, &output.data, true, |len| { + Some(RuntimeCosts::InstantiateCopyOut(len)) + })?; + } + Ok(Runtime::::exec_into_return_code(instantiate_outcome.map(|(_, retval)| retval))?) + } + + fn terminate(&mut self, beneficiary_ptr: u32) -> Result<(), TrapReason> { + self.charge_gas(RuntimeCosts::Terminate)?; + let beneficiary: <::T as frame_system::Config>::AccountId = + self.read_sandbox_memory_as(beneficiary_ptr)?; + self.ext.terminate(&beneficiary)?; + Err(TrapReason::Termination) + } + + fn restore_to( + &mut self, + dest_ptr: u32, + code_hash_ptr: u32, + rent_allowance_ptr: u32, + delta_ptr: u32, + delta_count: u32 + ) -> Result<(), TrapReason> { + self.charge_gas(RuntimeCosts::RestoreTo(delta_count))?; + let dest: <::T as frame_system::Config>::AccountId = + self.read_sandbox_memory_as(dest_ptr)?; + let code_hash: CodeHash<::T> = + self.read_sandbox_memory_as(code_hash_ptr)?; + let rent_allowance: BalanceOf<::T> = + self.read_sandbox_memory_as(rent_allowance_ptr)?; + let delta = { + const KEY_SIZE: usize = 32; + + // We can eagerly allocate because we charged for the complete delta count already + // We still need to make sure that the allocation isn't larger than the memory + // allocator can handle. + let max_memory = self.ext.schedule().limits.max_memory_size(); + ensure!( + delta_count.saturating_mul(KEY_SIZE as u32) <= max_memory, + Error::::OutOfBounds, + ); + let mut delta = vec![[0; KEY_SIZE]; delta_count as usize]; + let mut key_ptr = delta_ptr; + + for i in 0..delta_count { + // Read the delta into the provided buffer + // This cannot panic because of the loop condition + self.read_sandbox_memory_into_buf(key_ptr, &mut delta[i as usize])?; + + // Offset key_ptr to the next element. + key_ptr = key_ptr.checked_add(KEY_SIZE as u32).ok_or(Error::::OutOfBounds)?; + } + + delta + }; + self.ext.restore_to(dest, code_hash, rent_allowance, delta)?; + Err(TrapReason::Restoration) } } @@ -838,15 +891,15 @@ define_env!(Env, , [seal0] seal_transfer( ctx, account_ptr: u32, - account_len: u32, + _account_len: u32, value_ptr: u32, - value_len: u32 + _value_len: u32 ) -> ReturnCode => { ctx.charge_gas(RuntimeCosts::Transfer)?; let callee: <::T as frame_system::Config>::AccountId = - ctx.read_sandbox_memory_as(account_ptr, account_len)?; + ctx.read_sandbox_memory_as(account_ptr)?; let value: BalanceOf<::T> = - ctx.read_sandbox_memory_as(value_ptr, value_len)?; + ctx.read_sandbox_memory_as(value_ptr)?; let result = ctx.ext.transfer(&callee, value); match result { @@ -860,15 +913,23 @@ define_env!(Env, , // Make a call to another contract. // + // # Deprecation + // // This is equivalent to calling the newer version of this function with // `flags` set to `ALLOW_REENTRY`. See the newer version for documentation. + // + // # Note + // + // The values `_callee_len` and `_value_len` are ignored because the encoded sizes + // of those types are fixed through `[`MaxEncodedLen`]. The fields exist for backwards + // compatibility. Consider switching to the newest version of this function. [seal0] seal_call( ctx, callee_ptr: u32, - callee_len: u32, + _callee_len: u32, gas: u64, value_ptr: u32, - value_len: u32, + _value_len: u32, input_data_ptr: u32, input_data_len: u32, output_ptr: u32, @@ -877,10 +938,8 @@ define_env!(Env, , ctx.call( CallFlags::ALLOW_REENTRY, callee_ptr, - callee_len, gas, value_ptr, - value_len, input_data_ptr, input_data_len, output_ptr, @@ -899,11 +958,9 @@ define_env!(Env, , // - flags: See [`CallFlags`] for a documenation of the supported flags. // - callee_ptr: a pointer to the address of the callee contract. // Should be decodable as an `T::AccountId`. Traps otherwise. - // - callee_len: length of the address buffer. // - gas: how much gas to devote to the execution. // - value_ptr: a pointer to the buffer with value, how much value to send. // Should be decodable as a `T::Balance`. Traps otherwise. - // - value_len: length of the value buffer. // - input_data_ptr: a pointer to a buffer to be used as input data to the callee. // - input_data_len: length of the input data buffer. // - output_ptr: a pointer where the output buffer is copied to. @@ -924,10 +981,8 @@ define_env!(Env, , ctx, flags: u32, callee_ptr: u32, - callee_len: u32, gas: u64, value_ptr: u32, - value_len: u32, input_data_ptr: u32, input_data_len: u32, output_ptr: u32, @@ -936,10 +991,8 @@ define_env!(Env, , ctx.call( CallFlags::from_bits(flags).ok_or_else(|| "used rerved bit in CallFlags")?, callee_ptr, - callee_len, gas, value_ptr, - value_len, input_data_ptr, input_data_len, output_ptr, @@ -947,6 +1000,49 @@ define_env!(Env, , ) }, + // Instantiate a contract with the specified code hash. + // + // # Deprecation + // + // This is equivalent to calling the newer version of this function. The newer version + // drops the now unnecessary length fields. + // + // # Note + // + // The values `_code_hash_len` and `_value_len` are ignored because the encoded sizes + // of those types are fixed through `[`MaxEncodedLen`]. The fields exist for backwards + // compatibility. Consider switching to the newest version of this function. + [seal0] seal_instantiate( + ctx, + code_hash_ptr: u32, + _code_hash_len: u32, + gas: u64, + value_ptr: u32, + _value_len: u32, + input_data_ptr: u32, + input_data_len: u32, + address_ptr: u32, + address_len_ptr: u32, + output_ptr: u32, + output_len_ptr: u32, + salt_ptr: u32, + salt_len: u32 + ) -> ReturnCode => { + ctx.instantiate ( + code_hash_ptr, + gas, + value_ptr, + input_data_ptr, + input_data_len, + address_ptr, + address_len_ptr, + output_ptr, + output_len_ptr, + salt_ptr, + salt_len, + ) + }, + // Instantiate a contract with the specified code hash. // // This function creates an account and executes the constructor defined in the code specified @@ -962,11 +1058,9 @@ define_env!(Env, , // # Parameters // // - code_hash_ptr: a pointer to the buffer that contains the initializer code. - // - code_hash_len: length of the initializer code buffer. // - gas: how much gas to devote to the execution of the initializer code. // - value_ptr: a pointer to the buffer with value, how much value to send. // Should be decodable as a `T::Balance`. Traps otherwise. - // - value_len: length of the value buffer. // - input_data_ptr: a pointer to a buffer to be used as input data to the initializer code. // - input_data_len: length of the input data buffer. // - address_ptr: a pointer where the new account's address is copied to. @@ -992,13 +1086,11 @@ define_env!(Env, , // `ReturnCode::TransferFailed` // `ReturnCode::NewContractNotFunded` // `ReturnCode::CodeNotFound` - [seal0] seal_instantiate( + [seal1] seal_instantiate( ctx, code_hash_ptr: u32, - code_hash_len: u32, gas: u64, value_ptr: u32, - value_len: u32, input_data_ptr: u32, input_data_len: u32, address_ptr: u32, @@ -1008,37 +1100,35 @@ define_env!(Env, , salt_ptr: u32, salt_len: u32 ) -> ReturnCode => { - ctx.charge_gas(RuntimeCosts::InstantiateBase {input_data_len, salt_len})?; - let code_hash: CodeHash<::T> = - ctx.read_sandbox_memory_as(code_hash_ptr, code_hash_len)?; - let value: BalanceOf<::T> = ctx.read_sandbox_memory_as(value_ptr, value_len)?; - let input_data = ctx.read_sandbox_memory(input_data_ptr, input_data_len)?; - let salt = ctx.read_sandbox_memory(salt_ptr, salt_len)?; - let charged = ctx.charge_gas( - RuntimeCosts::InstantiateSurchargeCodeSize( - ::Schedule::get().limits.code_len - ) - )?; - let ext = &mut ctx.ext; - let instantiate_outcome = ext.instantiate(gas, code_hash, value, input_data, &salt); - let code_len = match &instantiate_outcome { - Ok((_, _, code_len)) => code_len, - Err((_, code_len)) => code_len, - }; - ctx.adjust_gas(charged, RuntimeCosts::InstantiateSurchargeCodeSize(*code_len)); - if let Ok((address, output, _)) = &instantiate_outcome { - if !output.flags.contains(ReturnFlags::REVERT) { - ctx.write_sandbox_output( - address_ptr, address_len_ptr, &address.encode(), true, already_charged, - )?; - } - ctx.write_sandbox_output(output_ptr, output_len_ptr, &output.data, true, |len| { - Some(RuntimeCosts::InstantiateCopyOut(len)) - })?; - } - Ok(Runtime::::exec_into_return_code( - instantiate_outcome.map(|(_, retval, _)| retval).map_err(|(err, _)| err) - )?) + ctx.instantiate( + code_hash_ptr, + gas, + value_ptr, + input_data_ptr, + input_data_len, + address_ptr, + address_len_ptr, + output_ptr, + output_len_ptr, + salt_ptr, + salt_len, + ) + }, + + // Remove the calling account and transfer remaining balance. + // + // # Deprecation + // + // This is equivalent to calling the newer version of this function. The newer version + // drops the now unnecessary length fields. + // + // # Note + // + // The value `_beneficiary_len` is ignored because the encoded sizes + // this type is fixed through `[`MaxEncodedLen`]. The field exist for backwards + // compatibility. Consider switching to the newest version of this function. + [seal0] seal_terminate(ctx, beneficiary_ptr: u32, _beneficiary_len: u32) => { + ctx.terminate(beneficiary_ptr) }, // Remove the calling account and transfer remaining balance. @@ -1050,33 +1140,14 @@ define_env!(Env, , // - beneficiary_ptr: a pointer to the address of the beneficiary account where all // where all remaining funds of the caller are transferred. // Should be decodable as an `T::AccountId`. Traps otherwise. - // - beneficiary_len: length of the address buffer. // // # Traps // // - The contract is live i.e is already on the call stack. // - Failed to send the balance to the beneficiary. // - The deletion queue is full. - [seal0] seal_terminate( - ctx, - beneficiary_ptr: u32, - beneficiary_len: u32 - ) => { - ctx.charge_gas(RuntimeCosts::Terminate)?; - let beneficiary: <::T as frame_system::Config>::AccountId = - ctx.read_sandbox_memory_as(beneficiary_ptr, beneficiary_len)?; - let charged = ctx.charge_gas( - RuntimeCosts::TerminateSurchargeCodeSize( - ::Schedule::get().limits.code_len - ) - )?; - let (result, code_len) = match ctx.ext.terminate(&beneficiary) { - Ok(len) => (Ok(()), len), - Err((err, len)) => (Err(err), len), - }; - ctx.adjust_gas(charged, RuntimeCosts::TerminateSurchargeCodeSize(code_len)); - result?; - Err(TrapReason::Termination) + [seal1] seal_terminate(ctx, beneficiary_ptr: u32) => { + ctx.terminate(beneficiary_ptr) }, // Stores the input passed by the caller into the supplied buffer. @@ -1323,6 +1394,38 @@ define_env!(Env, , )?) }, + // Try to restore the given destination contract sacrificing the caller. + // + // # Deprecation + // + // This is equivalent to calling the newer version of this function. The newer version + // drops the now unnecessary length fields. + // + // # Note + // + // The values `_dest_len`, `_code_hash_len` and `_rent_allowance_len` are ignored because + // the encoded sizes of those types are fixed through `[`MaxEncodedLen`]. The fields + // exist for backwards compatibility. Consider switching to the newest version of this function. + [seal0] seal_restore_to( + ctx, + dest_ptr: u32, + _dest_len: u32, + code_hash_ptr: u32, + _code_hash_len: u32, + rent_allowance_ptr: u32, + _rent_allowance_len: u32, + delta_ptr: u32, + delta_count: u32 + ) => { + ctx.restore_to( + dest_ptr, + code_hash_ptr, + rent_allowance_ptr, + delta_ptr, + delta_count, + ) + }, + // Try to restore the given destination contract sacrificing the caller. // // This function will compute a tombstone hash from the caller's storage and the given code hash @@ -1339,11 +1442,11 @@ define_env!(Env, , // On success, the destination contract is restored. This function is diverging and // stops execution even on success. // - // - `dest_ptr`, `dest_len` - the pointer and the length of a buffer that encodes `T::AccountId` + // - `dest_ptr` - the pointer to a buffer that encodes `T::AccountId` // with the address of the to be restored contract. - // - `code_hash_ptr`, `code_hash_len` - the pointer and the length of a buffer that encodes + // - `code_hash_ptr` - the pointer to a buffer that encodes // a code hash of the to be restored contract. - // - `rent_allowance_ptr`, `rent_allowance_len` - the pointer and the length of a buffer that + // - `rent_allowance_ptr` - the pointer to a buffer that // encodes the rent allowance that must be set in the case of successful restoration. // - `delta_ptr` is the pointer to the start of a buffer that has `delta_count` storage keys // laid out sequentially. @@ -1354,67 +1457,21 @@ define_env!(Env, , // - Tombstone hashes do not match. // - The calling contract is already present on the call stack. // - The supplied code_hash does not exist on-chain. - [seal0] seal_restore_to( + [seal1] seal_restore_to( ctx, dest_ptr: u32, - dest_len: u32, code_hash_ptr: u32, - code_hash_len: u32, rent_allowance_ptr: u32, - rent_allowance_len: u32, delta_ptr: u32, delta_count: u32 ) => { - ctx.charge_gas(RuntimeCosts::RestoreTo(delta_count))?; - let dest: <::T as frame_system::Config>::AccountId = - ctx.read_sandbox_memory_as(dest_ptr, dest_len)?; - let code_hash: CodeHash<::T> = - ctx.read_sandbox_memory_as(code_hash_ptr, code_hash_len)?; - let rent_allowance: BalanceOf<::T> = - ctx.read_sandbox_memory_as(rent_allowance_ptr, rent_allowance_len)?; - let delta = { - const KEY_SIZE: usize = 32; - - // We can eagerly allocate because we charged for the complete delta count already - // We still need to make sure that the allocation isn't larger than the memory - // allocator can handle. - ensure!( - delta_count - .saturating_mul(KEY_SIZE as u32) <= ctx.ext.schedule().limits.max_memory_size(), - Error::::OutOfBounds, - ); - let mut delta = vec![[0; KEY_SIZE]; delta_count as usize]; - let mut key_ptr = delta_ptr; - - for i in 0..delta_count { - // Read the delta into the provided buffer - // This cannot panic because of the loop condition - ctx.read_sandbox_memory_into_buf(key_ptr, &mut delta[i as usize])?; - - // Offset key_ptr to the next element. - key_ptr = key_ptr.checked_add(KEY_SIZE as u32).ok_or(Error::::OutOfBounds)?; - } - - delta - }; - - let max_len = ::Schedule::get().limits.code_len; - let charged = ctx.charge_gas(RuntimeCosts::RestoreToSurchargeCodeSize { - caller_code: max_len, - tombstone_code: max_len, - })?; - let (result, caller_code, tombstone_code) = match ctx.ext.restore_to( - dest, code_hash, rent_allowance, delta - ) { - Ok((code, tomb)) => (Ok(()), code, tomb), - Err((err, code, tomb)) => (Err(err), code, tomb), - }; - ctx.adjust_gas(charged, RuntimeCosts::RestoreToSurchargeCodeSize { - caller_code, - tombstone_code, - }); - result?; - Err(TrapReason::Restoration) + ctx.restore_to( + dest_ptr, + code_hash_ptr, + rent_allowance_ptr, + delta_ptr, + delta_count, + ) }, // Deposit a contract event with the data buffer and optional list of topics. There is a limit @@ -1460,7 +1517,7 @@ define_env!(Env, , let mut topics: Vec::::T>> = match topics_len { 0 => Vec::new(), - _ => ctx.read_sandbox_memory_as(topics_ptr, topics_len)?, + _ => ctx.read_sandbox_memory_as_unbounded(topics_ptr, topics_len)?, }; // If there are more than `event_topics`, then trap. @@ -1482,17 +1539,33 @@ define_env!(Env, , Ok(()) }, - // Set rent allowance of the contract + // Set rent allowance of the contract. + // + // # Deprecation + // + // This is equivalent to calling the newer version of this function. The newer version + // drops the now unnecessary length fields. + // + // # Note + // + // The value `_VALUE_len` is ignored because the encoded sizes + // this type is fixed through `[`MaxEncodedLen`]. The field exist for backwards + // compatibility. Consider switching to the newest version of this function. + [seal0] seal_set_rent_allowance(ctx, value_ptr: u32, _value_len: u32) => { + ctx.charge_gas(RuntimeCosts::SetRentAllowance)?; + let value: BalanceOf<::T> = ctx.read_sandbox_memory_as(value_ptr)?; + ctx.ext.set_rent_allowance(value); + Ok(()) + }, + + // Set rent allowance of the contract. // // - value_ptr: a pointer to the buffer with value, how much to allow for rent // Should be decodable as a `T::Balance`. Traps otherwise. - // - value_len: length of the value buffer. - [seal0] seal_set_rent_allowance(ctx, value_ptr: u32, value_len: u32) => { + [seal1] seal_set_rent_allowance(ctx, value_ptr: u32) => { ctx.charge_gas(RuntimeCosts::SetRentAllowance)?; - let value: BalanceOf<::T> = - ctx.read_sandbox_memory_as(value_ptr, value_len)?; + let value: BalanceOf<::T> = ctx.read_sandbox_memory_as(value_ptr)?; ctx.ext.set_rent_allowance(value); - Ok(()) }, diff --git a/frame/contracts/src/weights.rs b/frame/contracts/src/weights.rs index 5edb4170e4eab..503d952b110ed 100644 --- a/frame/contracts/src/weights.rs +++ b/frame/contracts/src/weights.rs @@ -18,7 +18,7 @@ //! Autogenerated weights for pallet_contracts //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0 -//! DATE: 2021-06-19, STEPS: `[50, ]`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2021-06-08, STEPS: `[50, ]`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 128 // Executed Command: @@ -48,9 +48,11 @@ pub trait WeightInfo { fn on_initialize_per_trie_key(k: u32, ) -> Weight; fn on_initialize_per_queue_item(q: u32, ) -> Weight; fn instrument(c: u32, ) -> Weight; + fn code_load(c: u32, ) -> Weight; + fn code_refcount(c: u32, ) -> Weight; fn instantiate_with_code(c: u32, s: u32, ) -> Weight; - fn instantiate(c: u32, s: u32, ) -> Weight; - fn call(c: u32, ) -> Weight; + fn instantiate(s: u32, ) -> Weight; + fn call() -> Weight; fn claim_surcharge(c: u32, ) -> Weight; fn seal_caller(r: u32, ) -> Weight; fn seal_address(r: u32, ) -> Weight; @@ -69,9 +71,8 @@ pub trait WeightInfo { fn seal_return(r: u32, ) -> Weight; fn seal_return_per_kb(n: u32, ) -> Weight; fn seal_terminate(r: u32, ) -> Weight; - fn seal_terminate_per_code_kb(c: u32, ) -> Weight; fn seal_restore_to(r: u32, ) -> Weight; - fn seal_restore_to_per_code_kb_delta(c: u32, t: u32, d: u32, ) -> Weight; + fn seal_restore_to_per_delta(d: u32, ) -> Weight; fn seal_random(r: u32, ) -> Weight; fn seal_deposit_event(r: u32, ) -> Weight; fn seal_deposit_event_per_topic_and_kb(t: u32, n: u32, ) -> Weight; @@ -84,9 +85,9 @@ pub trait WeightInfo { fn seal_get_storage_per_kb(n: u32, ) -> Weight; fn seal_transfer(r: u32, ) -> Weight; fn seal_call(r: u32, ) -> Weight; - fn seal_call_per_code_transfer_input_output_kb(c: u32, t: u32, i: u32, o: u32, ) -> Weight; + fn seal_call_per_transfer_input_output_kb(t: u32, i: u32, o: u32, ) -> Weight; fn seal_instantiate(r: u32, ) -> Weight; - fn seal_instantiate_per_code_input_output_salt_kb(c: u32, i: u32, o: u32, s: u32, ) -> Weight; + fn seal_instantiate_per_input_output_salt_kb(i: u32, o: u32, s: u32, ) -> Weight; fn seal_hash_sha2_256(r: u32, ) -> Weight; fn seal_hash_sha2_256_per_kb(n: u32, ) -> Weight; fn seal_hash_keccak_256(r: u32, ) -> Weight; @@ -152,272 +153,270 @@ pub trait WeightInfo { pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { fn on_initialize() -> Weight { - (3_603_000 as Weight) + (4_636_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) } fn on_initialize_per_trie_key(k: u32, ) -> Weight { (0 as Weight) - // Standard Error: 2_000 - .saturating_add((2_217_000 as Weight).saturating_mul(k as Weight)) + // Standard Error: 3_000 + .saturating_add((2_851_000 as Weight).saturating_mul(k as Weight)) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(k as Weight))) } fn on_initialize_per_queue_item(q: u32, ) -> Weight { (0 as Weight) - // Standard Error: 6_000 - .saturating_add((36_769_000 as Weight).saturating_mul(q as Weight)) + // Standard Error: 11_000 + .saturating_add((38_093_000 as Weight).saturating_mul(q as Weight)) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn instrument(c: u32, ) -> Weight { - (54_463_000 as Weight) - // Standard Error: 105_000 - .saturating_add((77_542_000 as Weight).saturating_mul(c as Weight)) + (60_027_000 as Weight) + // Standard Error: 109_000 + .saturating_add((169_008_000 as Weight).saturating_mul(c as Weight)) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + fn code_load(c: u32, ) -> Weight { + (7_881_000 as Weight) + // Standard Error: 0 + .saturating_add((2_007_000 as Weight).saturating_mul(c as Weight)) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + } + fn code_refcount(c: u32, ) -> Weight { + (12_861_000 as Weight) + // Standard Error: 0 + .saturating_add((3_028_000 as Weight).saturating_mul(c as Weight)) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn instantiate_with_code(c: u32, s: u32, ) -> Weight { - (184_114_000 as Weight) - // Standard Error: 82_000 - .saturating_add((117_247_000 as Weight).saturating_mul(c as Weight)) - // Standard Error: 5_000 - .saturating_add((1_542_000 as Weight).saturating_mul(s as Weight)) + (189_624_000 as Weight) + // Standard Error: 120_000 + .saturating_add((244_984_000 as Weight).saturating_mul(c as Weight)) + // Standard Error: 7_000 + .saturating_add((1_588_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().reads(5 as Weight)) .saturating_add(T::DbWeight::get().writes(4 as Weight)) } - fn instantiate(c: u32, s: u32, ) -> Weight { - (183_501_000 as Weight) - // Standard Error: 2_000 - .saturating_add((5_645_000 as Weight).saturating_mul(c as Weight)) + fn instantiate(s: u32, ) -> Weight { + (224_867_000 as Weight) // Standard Error: 0 - .saturating_add((1_473_000 as Weight).saturating_mul(s as Weight)) + .saturating_add((1_476_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().reads(5 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) } - fn call(c: u32, ) -> Weight { - (173_411_000 as Weight) - // Standard Error: 1_000 - .saturating_add((2_639_000 as Weight).saturating_mul(c as Weight)) + fn call() -> Weight { + (197_338_000 as Weight) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn claim_surcharge(c: u32, ) -> Weight { - (125_839_000 as Weight) - // Standard Error: 0 - .saturating_add((3_123_000 as Weight).saturating_mul(c as Weight)) + (147_775_000 as Weight) + // Standard Error: 5_000 + .saturating_add((3_094_000 as Weight).saturating_mul(c as Weight)) .saturating_add(T::DbWeight::get().reads(5 as Weight)) .saturating_add(T::DbWeight::get().writes(4 as Weight)) } fn seal_caller(r: u32, ) -> Weight { - (131_793_000 as Weight) - // Standard Error: 84_000 - .saturating_add((231_138_000 as Weight).saturating_mul(r as Weight)) + (150_159_000 as Weight) + // Standard Error: 90_000 + .saturating_add((274_529_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn seal_address(r: u32, ) -> Weight { - (129_995_000 as Weight) - // Standard Error: 78_000 - .saturating_add((231_839_000 as Weight).saturating_mul(r as Weight)) + (140_207_000 as Weight) + // Standard Error: 116_000 + .saturating_add((276_569_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn seal_gas_left(r: u32, ) -> Weight { - (129_710_000 as Weight) - // Standard Error: 85_000 - .saturating_add((227_268_000 as Weight).saturating_mul(r as Weight)) + (156_581_000 as Weight) + // Standard Error: 107_000 + .saturating_add((270_368_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn seal_balance(r: u32, ) -> Weight { - (133_445_000 as Weight) - // Standard Error: 144_000 - .saturating_add((487_125_000 as Weight).saturating_mul(r as Weight)) + (141_778_000 as Weight) + // Standard Error: 305_000 + .saturating_add((615_927_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn seal_value_transferred(r: u32, ) -> Weight { - (129_299_000 as Weight) - // Standard Error: 82_000 - .saturating_add((227_118_000 as Weight).saturating_mul(r as Weight)) + (138_752_000 as Weight) + // Standard Error: 91_000 + .saturating_add((280_176_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn seal_minimum_balance(r: u32, ) -> Weight { - (126_120_000 as Weight) - // Standard Error: 114_000 - .saturating_add((227_326_000 as Weight).saturating_mul(r as Weight)) + (141_089_000 as Weight) + // Standard Error: 82_000 + .saturating_add((274_199_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn seal_tombstone_deposit(r: u32, ) -> Weight { - (130_934_000 as Weight) - // Standard Error: 89_000 - .saturating_add((226_638_000 as Weight).saturating_mul(r as Weight)) + (140_447_000 as Weight) + // Standard Error: 119_000 + .saturating_add((270_823_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn seal_rent_allowance(r: u32, ) -> Weight { - (128_738_000 as Weight) - // Standard Error: 77_000 - .saturating_add((227_062_000 as Weight).saturating_mul(r as Weight)) + (138_394_000 as Weight) + // Standard Error: 105_000 + .saturating_add((275_261_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn seal_block_number(r: u32, ) -> Weight { - (132_375_000 as Weight) - // Standard Error: 88_000 - .saturating_add((226_861_000 as Weight).saturating_mul(r as Weight)) + (151_633_000 as Weight) + // Standard Error: 109_000 + .saturating_add((269_666_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn seal_now(r: u32, ) -> Weight { - (127_888_000 as Weight) - // Standard Error: 86_000 - .saturating_add((227_851_000 as Weight).saturating_mul(r as Weight)) + (129_087_000 as Weight) + // Standard Error: 252_000 + .saturating_add((277_368_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn seal_weight_to_fee(r: u32, ) -> Weight { - (131_825_000 as Weight) - // Standard Error: 149_000 - .saturating_add((420_149_000 as Weight).saturating_mul(r as Weight)) + (176_205_000 as Weight) + // Standard Error: 304_000 + .saturating_add((555_094_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(5 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn seal_gas(r: u32, ) -> Weight { - (113_641_000 as Weight) - // Standard Error: 114_000 - .saturating_add((113_068_000 as Weight).saturating_mul(r as Weight)) + (129_942_000 as Weight) + // Standard Error: 92_000 + .saturating_add((144_914_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn seal_input(r: u32, ) -> Weight { - (122_982_000 as Weight) - // Standard Error: 74_000 - .saturating_add((6_828_000 as Weight).saturating_mul(r as Weight)) + (141_540_000 as Weight) + // Standard Error: 68_000 + .saturating_add((6_576_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn seal_input_per_kb(n: u32, ) -> Weight { - (131_913_000 as Weight) + (150_832_000 as Weight) // Standard Error: 0 - .saturating_add((275_000 as Weight).saturating_mul(n as Weight)) + .saturating_add((263_000 as Weight).saturating_mul(n as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn seal_return(r: u32, ) -> Weight { - (114_164_000 as Weight) - // Standard Error: 72_000 - .saturating_add((4_318_000 as Weight).saturating_mul(r as Weight)) + (135_920_000 as Weight) + // Standard Error: 61_000 + .saturating_add((3_733_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn seal_return_per_kb(n: u32, ) -> Weight { - (123_940_000 as Weight) + (144_104_000 as Weight) // Standard Error: 0 - .saturating_add((664_000 as Weight).saturating_mul(n as Weight)) + .saturating_add((640_000 as Weight).saturating_mul(n as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn seal_terminate(r: u32, ) -> Weight { - (123_340_000 as Weight) - // Standard Error: 99_000 - .saturating_add((89_126_000 as Weight).saturating_mul(r as Weight)) + (141_631_000 as Weight) + // Standard Error: 70_000 + .saturating_add((112_747_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().reads((2 as Weight).saturating_mul(r as Weight))) .saturating_add(T::DbWeight::get().writes(1 as Weight)) .saturating_add(T::DbWeight::get().writes((4 as Weight).saturating_mul(r as Weight))) } - fn seal_terminate_per_code_kb(c: u32, ) -> Weight { - (217_499_000 as Weight) - // Standard Error: 1_000 - .saturating_add((5_608_000 as Weight).saturating_mul(c as Weight)) - .saturating_add(T::DbWeight::get().reads(6 as Weight)) - .saturating_add(T::DbWeight::get().writes(5 as Weight)) - } fn seal_restore_to(r: u32, ) -> Weight { - (149_019_000 as Weight) - // Standard Error: 903_000 - .saturating_add((87_433_000 as Weight).saturating_mul(r as Weight)) + (168_955_000 as Weight) + // Standard Error: 211_000 + .saturating_add((119_247_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().reads((4 as Weight).saturating_mul(r as Weight))) .saturating_add(T::DbWeight::get().writes(1 as Weight)) .saturating_add(T::DbWeight::get().writes((5 as Weight).saturating_mul(r as Weight))) } - fn seal_restore_to_per_code_kb_delta(c: u32, t: u32, d: u32, ) -> Weight { - (18_255_000 as Weight) - // Standard Error: 141_000 - .saturating_add((5_142_000 as Weight).saturating_mul(c as Weight)) - // Standard Error: 141_000 - .saturating_add((2_478_000 as Weight).saturating_mul(t as Weight)) - // Standard Error: 1_242_000 - .saturating_add((2_935_421_000 as Weight).saturating_mul(d as Weight)) + fn seal_restore_to_per_delta(d: u32, ) -> Weight { + (0 as Weight) + // Standard Error: 3_299_000 + .saturating_add((3_257_862_000 as Weight).saturating_mul(d as Weight)) .saturating_add(T::DbWeight::get().reads(7 as Weight)) .saturating_add(T::DbWeight::get().reads((100 as Weight).saturating_mul(d as Weight))) .saturating_add(T::DbWeight::get().writes(7 as Weight)) .saturating_add(T::DbWeight::get().writes((100 as Weight).saturating_mul(d as Weight))) } fn seal_random(r: u32, ) -> Weight { - (140_411_000 as Weight) - // Standard Error: 146_000 - .saturating_add((566_687_000 as Weight).saturating_mul(r as Weight)) + (124_927_000 as Weight) + // Standard Error: 407_000 + .saturating_add((730_247_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(5 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn seal_deposit_event(r: u32, ) -> Weight { - (132_048_000 as Weight) - // Standard Error: 308_000 - .saturating_add((818_622_000 as Weight).saturating_mul(r as Weight)) + (135_014_000 as Weight) + // Standard Error: 892_000 + .saturating_add((1_131_992_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn seal_deposit_event_per_topic_and_kb(t: u32, n: u32, ) -> Weight { - (1_080_578_000 as Weight) - // Standard Error: 2_337_000 - .saturating_add((534_525_000 as Weight).saturating_mul(t as Weight)) - // Standard Error: 460_000 - .saturating_add((167_990_000 as Weight).saturating_mul(n as Weight)) + (1_401_344_000 as Weight) + // Standard Error: 2_961_000 + .saturating_add((701_918_000 as Weight).saturating_mul(t as Weight)) + // Standard Error: 583_000 + .saturating_add((169_206_000 as Weight).saturating_mul(n as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().reads((100 as Weight).saturating_mul(t as Weight))) .saturating_add(T::DbWeight::get().writes(1 as Weight)) .saturating_add(T::DbWeight::get().writes((100 as Weight).saturating_mul(t as Weight))) } fn seal_set_rent_allowance(r: u32, ) -> Weight { - (123_998_000 as Weight) - // Standard Error: 53_000 - .saturating_add((155_113_000 as Weight).saturating_mul(r as Weight)) + (146_753_000 as Weight) + // Standard Error: 117_000 + .saturating_add((194_150_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn seal_debug_message(r: u32, ) -> Weight { - (120_514_000 as Weight) - // Standard Error: 93_000 - .saturating_add((124_243_000 as Weight).saturating_mul(r as Weight)) + (141_972_000 as Weight) + // Standard Error: 114_000 + .saturating_add((164_981_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn seal_set_storage(r: u32, ) -> Weight { - (47_131_000 as Weight) - // Standard Error: 931_000 - .saturating_add((4_033_062_000 as Weight).saturating_mul(r as Weight)) + (549_424_000 as Weight) + // Standard Error: 7_901_000 + .saturating_add((4_159_879_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().reads((100 as Weight).saturating_mul(r as Weight))) .saturating_add(T::DbWeight::get().writes(1 as Weight)) .saturating_add(T::DbWeight::get().writes((100 as Weight).saturating_mul(r as Weight))) } fn seal_set_storage_per_kb(n: u32, ) -> Weight { - (549_577_000 as Weight) - // Standard Error: 192_000 - .saturating_add((57_815_000 as Weight).saturating_mul(n as Weight)) + (682_814_000 as Weight) + // Standard Error: 229_000 + .saturating_add((59_572_000 as Weight).saturating_mul(n as Weight)) .saturating_add(T::DbWeight::get().reads(5 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn seal_clear_storage(r: u32, ) -> Weight { (0 as Weight) - // Standard Error: 2_635_000 - .saturating_add((1_214_454_000 as Weight).saturating_mul(r as Weight)) + // Standard Error: 2_889_000 + .saturating_add((1_563_117_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().reads((100 as Weight).saturating_mul(r as Weight))) .saturating_add(T::DbWeight::get().writes(1 as Weight)) @@ -425,23 +424,23 @@ impl WeightInfo for SubstrateWeight { } fn seal_get_storage(r: u32, ) -> Weight { (0 as Weight) - // Standard Error: 1_044_000 - .saturating_add((883_653_000 as Weight).saturating_mul(r as Weight)) + // Standard Error: 2_414_000 + .saturating_add((1_178_803_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().reads((100 as Weight).saturating_mul(r as Weight))) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn seal_get_storage_per_kb(n: u32, ) -> Weight { - (568_190_000 as Weight) - // Standard Error: 181_000 - .saturating_add((106_420_000 as Weight).saturating_mul(n as Weight)) + (696_056_000 as Weight) + // Standard Error: 266_000 + .saturating_add((108_870_000 as Weight).saturating_mul(n as Weight)) .saturating_add(T::DbWeight::get().reads(5 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn seal_transfer(r: u32, ) -> Weight { (0 as Weight) - // Standard Error: 1_553_000 - .saturating_add((4_810_405_000 as Weight).saturating_mul(r as Weight)) + // Standard Error: 2_764_000 + .saturating_add((6_397_838_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().reads((100 as Weight).saturating_mul(r as Weight))) .saturating_add(T::DbWeight::get().writes(2 as Weight)) @@ -449,631 +448,625 @@ impl WeightInfo for SubstrateWeight { } fn seal_call(r: u32, ) -> Weight { (0 as Weight) - // Standard Error: 8_671_000 - .saturating_add((10_965_308_000 as Weight).saturating_mul(r as Weight)) + // Standard Error: 8_279_000 + .saturating_add((13_318_274_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(5 as Weight)) .saturating_add(T::DbWeight::get().reads((200 as Weight).saturating_mul(r as Weight))) .saturating_add(T::DbWeight::get().writes(1 as Weight)) .saturating_add(T::DbWeight::get().writes((100 as Weight).saturating_mul(r as Weight))) } - fn seal_call_per_code_transfer_input_output_kb(c: u32, t: u32, i: u32, o: u32, ) -> Weight { - (10_138_403_000 as Weight) - // Standard Error: 162_000 - .saturating_add((264_871_000 as Weight).saturating_mul(c as Weight)) - // Standard Error: 67_846_000 - .saturating_add((3_793_372_000 as Weight).saturating_mul(t as Weight)) - // Standard Error: 21_000 - .saturating_add((49_168_000 as Weight).saturating_mul(i as Weight)) - // Standard Error: 22_000 - .saturating_add((71_664_000 as Weight).saturating_mul(o as Weight)) + fn seal_call_per_transfer_input_output_kb(t: u32, i: u32, o: u32, ) -> Weight { + (13_411_599_000 as Weight) + // Standard Error: 40_931_000 + .saturating_add((4_291_567_000 as Weight).saturating_mul(t as Weight)) + // Standard Error: 14_000 + .saturating_add((48_818_000 as Weight).saturating_mul(i as Weight)) + // Standard Error: 15_000 + .saturating_add((68_502_000 as Weight).saturating_mul(o as Weight)) .saturating_add(T::DbWeight::get().reads(205 as Weight)) .saturating_add(T::DbWeight::get().writes(101 as Weight)) .saturating_add(T::DbWeight::get().writes((101 as Weight).saturating_mul(t as Weight))) } fn seal_instantiate(r: u32, ) -> Weight { (0 as Weight) - // Standard Error: 34_546_000 - .saturating_add((19_938_393_000 as Weight).saturating_mul(r as Weight)) + // Standard Error: 31_671_000 + .saturating_add((24_164_540_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(5 as Weight)) .saturating_add(T::DbWeight::get().reads((300 as Weight).saturating_mul(r as Weight))) .saturating_add(T::DbWeight::get().writes(3 as Weight)) .saturating_add(T::DbWeight::get().writes((300 as Weight).saturating_mul(r as Weight))) } - fn seal_instantiate_per_code_input_output_salt_kb(c: u32, i: u32, o: u32, s: u32, ) -> Weight { - (8_861_543_000 as Weight) - // Standard Error: 566_000 - .saturating_add((585_057_000 as Weight).saturating_mul(c as Weight)) - // Standard Error: 80_000 - .saturating_add((52_025_000 as Weight).saturating_mul(i as Weight)) - // Standard Error: 80_000 - .saturating_add((75_956_000 as Weight).saturating_mul(o as Weight)) - // Standard Error: 80_000 - .saturating_add((198_033_000 as Weight).saturating_mul(s as Weight)) + fn seal_instantiate_per_input_output_salt_kb(i: u32, o: u32, s: u32, ) -> Weight { + (17_228_488_000 as Weight) + // Standard Error: 26_000 + .saturating_add((50_822_000 as Weight).saturating_mul(i as Weight)) + // Standard Error: 26_000 + .saturating_add((71_276_000 as Weight).saturating_mul(o as Weight)) + // Standard Error: 26_000 + .saturating_add((198_669_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().reads(206 as Weight)) .saturating_add(T::DbWeight::get().writes(204 as Weight)) } fn seal_hash_sha2_256(r: u32, ) -> Weight { - (129_022_000 as Weight) - // Standard Error: 76_000 - .saturating_add((216_764_000 as Weight).saturating_mul(r as Weight)) + (149_183_000 as Weight) + // Standard Error: 99_000 + .saturating_add((279_233_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn seal_hash_sha2_256_per_kb(n: u32, ) -> Weight { - (414_489_000 as Weight) + (457_629_000 as Weight) // Standard Error: 14_000 - .saturating_add((481_873_000 as Weight).saturating_mul(n as Weight)) + .saturating_add((480_686_000 as Weight).saturating_mul(n as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn seal_hash_keccak_256(r: u32, ) -> Weight { - (127_636_000 as Weight) - // Standard Error: 104_000 - .saturating_add((225_094_000 as Weight).saturating_mul(r as Weight)) + (141_603_000 as Weight) + // Standard Error: 120_000 + .saturating_add((283_527_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn seal_hash_keccak_256_per_kb(n: u32, ) -> Weight { - (216_668_000 as Weight) - // Standard Error: 16_000 - .saturating_add((331_423_000 as Weight).saturating_mul(n as Weight)) + (463_644_000 as Weight) + // Standard Error: 18_000 + .saturating_add((332_183_000 as Weight).saturating_mul(n as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn seal_hash_blake2_256(r: u32, ) -> Weight { - (129_582_000 as Weight) - // Standard Error: 97_000 - .saturating_add((198_429_000 as Weight).saturating_mul(r as Weight)) + (144_145_000 as Weight) + // Standard Error: 113_000 + .saturating_add((252_640_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn seal_hash_blake2_256_per_kb(n: u32, ) -> Weight { - (288_991_000 as Weight) - // Standard Error: 20_000 - .saturating_add((148_497_000 as Weight).saturating_mul(n as Weight)) + (455_101_000 as Weight) + // Standard Error: 23_000 + .saturating_add((149_174_000 as Weight).saturating_mul(n as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn seal_hash_blake2_128(r: u32, ) -> Weight { - (128_711_000 as Weight) - // Standard Error: 94_000 - .saturating_add((197_050_000 as Weight).saturating_mul(r as Weight)) + (147_166_000 as Weight) + // Standard Error: 233_000 + .saturating_add((254_430_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn seal_hash_blake2_128_per_kb(n: u32, ) -> Weight { - (275_444_000 as Weight) - // Standard Error: 18_000 - .saturating_add((148_469_000 as Weight).saturating_mul(n as Weight)) + (445_667_000 as Weight) + // Standard Error: 24_000 + .saturating_add((149_178_000 as Weight).saturating_mul(n as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn instr_i64const(r: u32, ) -> Weight { - (20_089_000 as Weight) - // Standard Error: 26_000 - .saturating_add((3_376_000 as Weight).saturating_mul(r as Weight)) + (21_505_000 as Weight) + // Standard Error: 10_000 + .saturating_add((7_963_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64load(r: u32, ) -> Weight { - (22_187_000 as Weight) - // Standard Error: 31_000 - .saturating_add((162_969_000 as Weight).saturating_mul(r as Weight)) + (24_775_000 as Weight) + // Standard Error: 37_000 + .saturating_add((157_130_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64store(r: u32, ) -> Weight { - (22_292_000 as Weight) - // Standard Error: 39_000 - .saturating_add((233_277_000 as Weight).saturating_mul(r as Weight)) + (24_722_000 as Weight) + // Standard Error: 69_000 + .saturating_add((240_564_000 as Weight).saturating_mul(r as Weight)) } fn instr_select(r: u32, ) -> Weight { - (20_083_000 as Weight) - // Standard Error: 24_000 - .saturating_add((12_378_000 as Weight).saturating_mul(r as Weight)) + (21_506_000 as Weight) + // Standard Error: 21_000 + .saturating_add((45_277_000 as Weight).saturating_mul(r as Weight)) } fn instr_if(r: u32, ) -> Weight { - (20_091_000 as Weight) - // Standard Error: 24_000 - .saturating_add((12_195_000 as Weight).saturating_mul(r as Weight)) + (21_587_000 as Weight) + // Standard Error: 18_000 + .saturating_add((42_269_000 as Weight).saturating_mul(r as Weight)) } fn instr_br(r: u32, ) -> Weight { - (20_082_000 as Weight) - // Standard Error: 18_000 - .saturating_add((6_151_000 as Weight).saturating_mul(r as Weight)) + (21_538_000 as Weight) + // Standard Error: 807_000 + .saturating_add((22_392_000 as Weight).saturating_mul(r as Weight)) } fn instr_br_if(r: u32, ) -> Weight { - (20_031_000 as Weight) - // Standard Error: 13_000 - .saturating_add((13_978_000 as Weight).saturating_mul(r as Weight)) + (21_634_000 as Weight) + // Standard Error: 57_000 + .saturating_add((44_203_000 as Weight).saturating_mul(r as Weight)) } fn instr_br_table(r: u32, ) -> Weight { - (20_063_000 as Weight) - // Standard Error: 21_000 - .saturating_add((15_524_000 as Weight).saturating_mul(r as Weight)) + (21_531_000 as Weight) + // Standard Error: 19_000 + .saturating_add((33_198_000 as Weight).saturating_mul(r as Weight)) } fn instr_br_table_per_entry(e: u32, ) -> Weight { - (34_332_000 as Weight) - // Standard Error: 0 - .saturating_add((117_000 as Weight).saturating_mul(e as Weight)) + (60_960_000 as Weight) + // Standard Error: 1_000 + .saturating_add((151_000 as Weight).saturating_mul(e as Weight)) } fn instr_call(r: u32, ) -> Weight { - (20_446_000 as Weight) - // Standard Error: 121_000 - .saturating_add((90_977_000 as Weight).saturating_mul(r as Weight)) + (21_777_000 as Weight) + // Standard Error: 141_000 + .saturating_add((245_105_000 as Weight).saturating_mul(r as Weight)) } fn instr_call_indirect(r: u32, ) -> Weight { - (28_119_000 as Weight) - // Standard Error: 390_000 - .saturating_add((192_865_000 as Weight).saturating_mul(r as Weight)) + (34_307_000 as Weight) + // Standard Error: 365_000 + .saturating_add((344_623_000 as Weight).saturating_mul(r as Weight)) } fn instr_call_indirect_per_param(p: u32, ) -> Weight { - (228_352_000 as Weight) - // Standard Error: 4_000 - .saturating_add((3_891_000 as Weight).saturating_mul(p as Weight)) + (398_310_000 as Weight) + // Standard Error: 6_000 + .saturating_add((4_163_000 as Weight).saturating_mul(p as Weight)) } fn instr_local_get(r: u32, ) -> Weight { - (37_745_000 as Weight) - // Standard Error: 13_000 - .saturating_add((3_135_000 as Weight).saturating_mul(r as Weight)) + (40_478_000 as Weight) + // Standard Error: 19_000 + .saturating_add((9_991_000 as Weight).saturating_mul(r as Weight)) } fn instr_local_set(r: u32, ) -> Weight { - (37_639_000 as Weight) - // Standard Error: 15_000 - .saturating_add((3_541_000 as Weight).saturating_mul(r as Weight)) + (40_427_000 as Weight) + // Standard Error: 26_000 + .saturating_add((8_526_000 as Weight).saturating_mul(r as Weight)) } fn instr_local_tee(r: u32, ) -> Weight { - (37_639_000 as Weight) - // Standard Error: 23_000 - .saturating_add((4_813_000 as Weight).saturating_mul(r as Weight)) + (40_463_000 as Weight) + // Standard Error: 19_000 + .saturating_add((16_497_000 as Weight).saturating_mul(r as Weight)) } fn instr_global_get(r: u32, ) -> Weight { - (23_379_000 as Weight) - // Standard Error: 27_000 - .saturating_add((7_757_000 as Weight).saturating_mul(r as Weight)) + (25_998_000 as Weight) + // Standard Error: 21_000 + .saturating_add((18_214_000 as Weight).saturating_mul(r as Weight)) } fn instr_global_set(r: u32, ) -> Weight { - (23_378_000 as Weight) - // Standard Error: 68_000 - .saturating_add((8_437_000 as Weight).saturating_mul(r as Weight)) + (25_972_000 as Weight) + // Standard Error: 42_000 + .saturating_add((18_901_000 as Weight).saturating_mul(r as Weight)) } fn instr_memory_current(r: u32, ) -> Weight { - (22_245_000 as Weight) + (24_949_000 as Weight) // Standard Error: 17_000 - .saturating_add((3_446_000 as Weight).saturating_mul(r as Weight)) + .saturating_add((8_541_000 as Weight).saturating_mul(r as Weight)) } fn instr_memory_grow(r: u32, ) -> Weight { - (20_714_000 as Weight) - // Standard Error: 478_000 - .saturating_add((2_314_540_000 as Weight).saturating_mul(r as Weight)) + (22_204_000 as Weight) + // Standard Error: 4_776_000 + .saturating_add((2_198_462_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64clz(r: u32, ) -> Weight { - (20_126_000 as Weight) - // Standard Error: 15_000 - .saturating_add((5_316_000 as Weight).saturating_mul(r as Weight)) + (21_506_000 as Weight) + // Standard Error: 18_000 + .saturating_add((25_302_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64ctz(r: u32, ) -> Weight { - (20_107_000 as Weight) - // Standard Error: 23_000 - .saturating_add((5_344_000 as Weight).saturating_mul(r as Weight)) + (21_523_000 as Weight) + // Standard Error: 29_000 + .saturating_add((25_206_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64popcnt(r: u32, ) -> Weight { - (20_135_000 as Weight) - // Standard Error: 22_000 - .saturating_add((5_909_000 as Weight).saturating_mul(r as Weight)) + (21_567_000 as Weight) + // Standard Error: 466_000 + .saturating_add((19_925_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64eqz(r: u32, ) -> Weight { - (20_107_000 as Weight) - // Standard Error: 19_000 - .saturating_add((5_515_000 as Weight).saturating_mul(r as Weight)) + (21_569_000 as Weight) + // Standard Error: 30_000 + .saturating_add((25_027_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64extendsi32(r: u32, ) -> Weight { - (20_229_000 as Weight) - // Standard Error: 18_000 - .saturating_add((5_113_000 as Weight).saturating_mul(r as Weight)) + (21_536_000 as Weight) + // Standard Error: 193_000 + .saturating_add((17_690_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64extendui32(r: u32, ) -> Weight { - (20_070_000 as Weight) - // Standard Error: 11_000 - .saturating_add((5_226_000 as Weight).saturating_mul(r as Weight)) + (21_555_000 as Weight) + // Standard Error: 356_000 + .saturating_add((17_105_000 as Weight).saturating_mul(r as Weight)) } fn instr_i32wrapi64(r: u32, ) -> Weight { - (20_090_000 as Weight) - // Standard Error: 15_000 - .saturating_add((5_296_000 as Weight).saturating_mul(r as Weight)) + (21_561_000 as Weight) + // Standard Error: 1_038_000 + .saturating_add((22_198_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64eq(r: u32, ) -> Weight { - (20_095_000 as Weight) - // Standard Error: 13_000 - .saturating_add((7_323_000 as Weight).saturating_mul(r as Weight)) + (21_513_000 as Weight) + // Standard Error: 21_000 + .saturating_add((33_620_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64ne(r: u32, ) -> Weight { - (20_043_000 as Weight) - // Standard Error: 10_000 - .saturating_add((7_280_000 as Weight).saturating_mul(r as Weight)) + (21_556_000 as Weight) + // Standard Error: 17_000 + .saturating_add((33_669_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64lts(r: u32, ) -> Weight { - (20_061_000 as Weight) - // Standard Error: 15_000 - .saturating_add((7_226_000 as Weight).saturating_mul(r as Weight)) + (21_571_000 as Weight) + // Standard Error: 19_000 + .saturating_add((33_649_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64ltu(r: u32, ) -> Weight { - (20_072_000 as Weight) - // Standard Error: 24_000 - .saturating_add((7_315_000 as Weight).saturating_mul(r as Weight)) + (21_533_000 as Weight) + // Standard Error: 23_000 + .saturating_add((33_450_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64gts(r: u32, ) -> Weight { - (20_054_000 as Weight) - // Standard Error: 27_000 - .saturating_add((7_228_000 as Weight).saturating_mul(r as Weight)) + (21_525_000 as Weight) + // Standard Error: 24_000 + .saturating_add((33_727_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64gtu(r: u32, ) -> Weight { - (20_169_000 as Weight) - // Standard Error: 18_000 - .saturating_add((7_262_000 as Weight).saturating_mul(r as Weight)) + (21_546_000 as Weight) + // Standard Error: 16_000 + .saturating_add((33_420_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64les(r: u32, ) -> Weight { - (20_115_000 as Weight) - // Standard Error: 21_000 - .saturating_add((7_212_000 as Weight).saturating_mul(r as Weight)) + (21_546_000 as Weight) + // Standard Error: 22_000 + .saturating_add((33_720_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64leu(r: u32, ) -> Weight { - (20_122_000 as Weight) - // Standard Error: 23_000 - .saturating_add((7_166_000 as Weight).saturating_mul(r as Weight)) + (21_546_000 as Weight) + // Standard Error: 20_000 + .saturating_add((33_383_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64ges(r: u32, ) -> Weight { - (20_140_000 as Weight) - // Standard Error: 16_000 - .saturating_add((7_242_000 as Weight).saturating_mul(r as Weight)) + (21_577_000 as Weight) + // Standard Error: 27_000 + .saturating_add((33_454_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64geu(r: u32, ) -> Weight { - (20_107_000 as Weight) - // Standard Error: 18_000 - .saturating_add((7_365_000 as Weight).saturating_mul(r as Weight)) + (21_566_000 as Weight) + // Standard Error: 25_000 + .saturating_add((33_665_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64add(r: u32, ) -> Weight { - (20_179_000 as Weight) - // Standard Error: 14_000 - .saturating_add((7_144_000 as Weight).saturating_mul(r as Weight)) + (21_524_000 as Weight) + // Standard Error: 22_000 + .saturating_add((33_351_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64sub(r: u32, ) -> Weight { - (20_143_000 as Weight) - // Standard Error: 17_000 - .saturating_add((7_222_000 as Weight).saturating_mul(r as Weight)) + (21_558_000 as Weight) + // Standard Error: 18_000 + .saturating_add((33_423_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64mul(r: u32, ) -> Weight { - (20_129_000 as Weight) - // Standard Error: 21_000 - .saturating_add((7_247_000 as Weight).saturating_mul(r as Weight)) + (21_554_000 as Weight) + // Standard Error: 17_000 + .saturating_add((33_588_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64divs(r: u32, ) -> Weight { - (20_107_000 as Weight) - // Standard Error: 16_000 - .saturating_add((12_953_000 as Weight).saturating_mul(r as Weight)) + (21_568_000 as Weight) + // Standard Error: 29_000 + .saturating_add((38_897_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64divu(r: u32, ) -> Weight { - (20_093_000 as Weight) - // Standard Error: 17_000 - .saturating_add((12_040_000 as Weight).saturating_mul(r as Weight)) + (21_567_000 as Weight) + // Standard Error: 31_000 + .saturating_add((38_756_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64rems(r: u32, ) -> Weight { - (20_102_000 as Weight) - // Standard Error: 13_000 - .saturating_add((12_945_000 as Weight).saturating_mul(r as Weight)) + (21_540_000 as Weight) + // Standard Error: 20_000 + .saturating_add((39_244_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64remu(r: u32, ) -> Weight { - (20_132_000 as Weight) - // Standard Error: 16_000 - .saturating_add((12_199_000 as Weight).saturating_mul(r as Weight)) + (21_581_000 as Weight) + // Standard Error: 24_000 + .saturating_add((38_461_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64and(r: u32, ) -> Weight { - (20_155_000 as Weight) - // Standard Error: 26_000 - .saturating_add((7_103_000 as Weight).saturating_mul(r as Weight)) + (21_555_000 as Weight) + // Standard Error: 24_000 + .saturating_add((33_367_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64or(r: u32, ) -> Weight { - (20_088_000 as Weight) - // Standard Error: 22_000 - .saturating_add((7_213_000 as Weight).saturating_mul(r as Weight)) + (21_523_000 as Weight) + // Standard Error: 18_000 + .saturating_add((33_466_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64xor(r: u32, ) -> Weight { - (20_060_000 as Weight) - // Standard Error: 18_000 - .saturating_add((7_275_000 as Weight).saturating_mul(r as Weight)) + (21_536_000 as Weight) + // Standard Error: 34_000 + .saturating_add((33_452_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64shl(r: u32, ) -> Weight { - (20_104_000 as Weight) - // Standard Error: 15_000 - .saturating_add((7_282_000 as Weight).saturating_mul(r as Weight)) + (21_567_000 as Weight) + // Standard Error: 24_000 + .saturating_add((33_809_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64shrs(r: u32, ) -> Weight { - (20_111_000 as Weight) - // Standard Error: 20_000 - .saturating_add((7_264_000 as Weight).saturating_mul(r as Weight)) + (21_580_000 as Weight) + // Standard Error: 32_000 + .saturating_add((33_849_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64shru(r: u32, ) -> Weight { - (20_096_000 as Weight) - // Standard Error: 23_000 - .saturating_add((7_347_000 as Weight).saturating_mul(r as Weight)) + (21_571_000 as Weight) + // Standard Error: 18_000 + .saturating_add((33_799_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64rotl(r: u32, ) -> Weight { - (20_091_000 as Weight) - // Standard Error: 16_000 - .saturating_add((7_370_000 as Weight).saturating_mul(r as Weight)) + (21_559_000 as Weight) + // Standard Error: 22_000 + .saturating_add((33_947_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64rotr(r: u32, ) -> Weight { - (20_102_000 as Weight) - // Standard Error: 28_000 - .saturating_add((7_266_000 as Weight).saturating_mul(r as Weight)) + (21_565_000 as Weight) + // Standard Error: 20_000 + .saturating_add((33_754_000 as Weight).saturating_mul(r as Weight)) } } // For backwards compatibility and tests impl WeightInfo for () { fn on_initialize() -> Weight { - (3_603_000 as Weight) + (4_636_000 as Weight) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) } fn on_initialize_per_trie_key(k: u32, ) -> Weight { (0 as Weight) - // Standard Error: 2_000 - .saturating_add((2_217_000 as Weight).saturating_mul(k as Weight)) + // Standard Error: 3_000 + .saturating_add((2_851_000 as Weight).saturating_mul(k as Weight)) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(k as Weight))) } fn on_initialize_per_queue_item(q: u32, ) -> Weight { (0 as Weight) - // Standard Error: 6_000 - .saturating_add((36_769_000 as Weight).saturating_mul(q as Weight)) + // Standard Error: 11_000 + .saturating_add((38_093_000 as Weight).saturating_mul(q as Weight)) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn instrument(c: u32, ) -> Weight { - (54_463_000 as Weight) - // Standard Error: 105_000 - .saturating_add((77_542_000 as Weight).saturating_mul(c as Weight)) + (60_027_000 as Weight) + // Standard Error: 109_000 + .saturating_add((169_008_000 as Weight).saturating_mul(c as Weight)) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + fn code_load(c: u32, ) -> Weight { + (7_881_000 as Weight) + // Standard Error: 0 + .saturating_add((2_007_000 as Weight).saturating_mul(c as Weight)) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + } + fn code_refcount(c: u32, ) -> Weight { + (12_861_000 as Weight) + // Standard Error: 0 + .saturating_add((3_028_000 as Weight).saturating_mul(c as Weight)) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn instantiate_with_code(c: u32, s: u32, ) -> Weight { - (184_114_000 as Weight) - // Standard Error: 82_000 - .saturating_add((117_247_000 as Weight).saturating_mul(c as Weight)) - // Standard Error: 5_000 - .saturating_add((1_542_000 as Weight).saturating_mul(s as Weight)) + (189_624_000 as Weight) + // Standard Error: 120_000 + .saturating_add((244_984_000 as Weight).saturating_mul(c as Weight)) + // Standard Error: 7_000 + .saturating_add((1_588_000 as Weight).saturating_mul(s as Weight)) .saturating_add(RocksDbWeight::get().reads(5 as Weight)) .saturating_add(RocksDbWeight::get().writes(4 as Weight)) } - fn instantiate(c: u32, s: u32, ) -> Weight { - (183_501_000 as Weight) - // Standard Error: 2_000 - .saturating_add((5_645_000 as Weight).saturating_mul(c as Weight)) + fn instantiate(s: u32, ) -> Weight { + (224_867_000 as Weight) // Standard Error: 0 - .saturating_add((1_473_000 as Weight).saturating_mul(s as Weight)) + .saturating_add((1_476_000 as Weight).saturating_mul(s as Weight)) .saturating_add(RocksDbWeight::get().reads(5 as Weight)) .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } - fn call(c: u32, ) -> Weight { - (173_411_000 as Weight) - // Standard Error: 1_000 - .saturating_add((2_639_000 as Weight).saturating_mul(c as Weight)) + fn call() -> Weight { + (197_338_000 as Weight) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } fn claim_surcharge(c: u32, ) -> Weight { - (125_839_000 as Weight) - // Standard Error: 0 - .saturating_add((3_123_000 as Weight).saturating_mul(c as Weight)) + (147_775_000 as Weight) + // Standard Error: 5_000 + .saturating_add((3_094_000 as Weight).saturating_mul(c as Weight)) .saturating_add(RocksDbWeight::get().reads(5 as Weight)) .saturating_add(RocksDbWeight::get().writes(4 as Weight)) } fn seal_caller(r: u32, ) -> Weight { - (131_793_000 as Weight) - // Standard Error: 84_000 - .saturating_add((231_138_000 as Weight).saturating_mul(r as Weight)) + (150_159_000 as Weight) + // Standard Error: 90_000 + .saturating_add((274_529_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn seal_address(r: u32, ) -> Weight { - (129_995_000 as Weight) - // Standard Error: 78_000 - .saturating_add((231_839_000 as Weight).saturating_mul(r as Weight)) + (140_207_000 as Weight) + // Standard Error: 116_000 + .saturating_add((276_569_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn seal_gas_left(r: u32, ) -> Weight { - (129_710_000 as Weight) - // Standard Error: 85_000 - .saturating_add((227_268_000 as Weight).saturating_mul(r as Weight)) + (156_581_000 as Weight) + // Standard Error: 107_000 + .saturating_add((270_368_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn seal_balance(r: u32, ) -> Weight { - (133_445_000 as Weight) - // Standard Error: 144_000 - .saturating_add((487_125_000 as Weight).saturating_mul(r as Weight)) + (141_778_000 as Weight) + // Standard Error: 305_000 + .saturating_add((615_927_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn seal_value_transferred(r: u32, ) -> Weight { - (129_299_000 as Weight) - // Standard Error: 82_000 - .saturating_add((227_118_000 as Weight).saturating_mul(r as Weight)) + (138_752_000 as Weight) + // Standard Error: 91_000 + .saturating_add((280_176_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn seal_minimum_balance(r: u32, ) -> Weight { - (126_120_000 as Weight) - // Standard Error: 114_000 - .saturating_add((227_326_000 as Weight).saturating_mul(r as Weight)) + (141_089_000 as Weight) + // Standard Error: 82_000 + .saturating_add((274_199_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn seal_tombstone_deposit(r: u32, ) -> Weight { - (130_934_000 as Weight) - // Standard Error: 89_000 - .saturating_add((226_638_000 as Weight).saturating_mul(r as Weight)) + (140_447_000 as Weight) + // Standard Error: 119_000 + .saturating_add((270_823_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn seal_rent_allowance(r: u32, ) -> Weight { - (128_738_000 as Weight) - // Standard Error: 77_000 - .saturating_add((227_062_000 as Weight).saturating_mul(r as Weight)) + (138_394_000 as Weight) + // Standard Error: 105_000 + .saturating_add((275_261_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn seal_block_number(r: u32, ) -> Weight { - (132_375_000 as Weight) - // Standard Error: 88_000 - .saturating_add((226_861_000 as Weight).saturating_mul(r as Weight)) + (151_633_000 as Weight) + // Standard Error: 109_000 + .saturating_add((269_666_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn seal_now(r: u32, ) -> Weight { - (127_888_000 as Weight) - // Standard Error: 86_000 - .saturating_add((227_851_000 as Weight).saturating_mul(r as Weight)) + (129_087_000 as Weight) + // Standard Error: 252_000 + .saturating_add((277_368_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn seal_weight_to_fee(r: u32, ) -> Weight { - (131_825_000 as Weight) - // Standard Error: 149_000 - .saturating_add((420_149_000 as Weight).saturating_mul(r as Weight)) + (176_205_000 as Weight) + // Standard Error: 304_000 + .saturating_add((555_094_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(5 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn seal_gas(r: u32, ) -> Weight { - (113_641_000 as Weight) - // Standard Error: 114_000 - .saturating_add((113_068_000 as Weight).saturating_mul(r as Weight)) + (129_942_000 as Weight) + // Standard Error: 92_000 + .saturating_add((144_914_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn seal_input(r: u32, ) -> Weight { - (122_982_000 as Weight) - // Standard Error: 74_000 - .saturating_add((6_828_000 as Weight).saturating_mul(r as Weight)) + (141_540_000 as Weight) + // Standard Error: 68_000 + .saturating_add((6_576_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn seal_input_per_kb(n: u32, ) -> Weight { - (131_913_000 as Weight) + (150_832_000 as Weight) // Standard Error: 0 - .saturating_add((275_000 as Weight).saturating_mul(n as Weight)) + .saturating_add((263_000 as Weight).saturating_mul(n as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn seal_return(r: u32, ) -> Weight { - (114_164_000 as Weight) - // Standard Error: 72_000 - .saturating_add((4_318_000 as Weight).saturating_mul(r as Weight)) + (135_920_000 as Weight) + // Standard Error: 61_000 + .saturating_add((3_733_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn seal_return_per_kb(n: u32, ) -> Weight { - (123_940_000 as Weight) + (144_104_000 as Weight) // Standard Error: 0 - .saturating_add((664_000 as Weight).saturating_mul(n as Weight)) + .saturating_add((640_000 as Weight).saturating_mul(n as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn seal_terminate(r: u32, ) -> Weight { - (123_340_000 as Weight) - // Standard Error: 99_000 - .saturating_add((89_126_000 as Weight).saturating_mul(r as Weight)) + (141_631_000 as Weight) + // Standard Error: 70_000 + .saturating_add((112_747_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().reads((2 as Weight).saturating_mul(r as Weight))) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) .saturating_add(RocksDbWeight::get().writes((4 as Weight).saturating_mul(r as Weight))) } - fn seal_terminate_per_code_kb(c: u32, ) -> Weight { - (217_499_000 as Weight) - // Standard Error: 1_000 - .saturating_add((5_608_000 as Weight).saturating_mul(c as Weight)) - .saturating_add(RocksDbWeight::get().reads(6 as Weight)) - .saturating_add(RocksDbWeight::get().writes(5 as Weight)) - } fn seal_restore_to(r: u32, ) -> Weight { - (149_019_000 as Weight) - // Standard Error: 903_000 - .saturating_add((87_433_000 as Weight).saturating_mul(r as Weight)) + (168_955_000 as Weight) + // Standard Error: 211_000 + .saturating_add((119_247_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().reads((4 as Weight).saturating_mul(r as Weight))) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) .saturating_add(RocksDbWeight::get().writes((5 as Weight).saturating_mul(r as Weight))) } - fn seal_restore_to_per_code_kb_delta(c: u32, t: u32, d: u32, ) -> Weight { - (18_255_000 as Weight) - // Standard Error: 141_000 - .saturating_add((5_142_000 as Weight).saturating_mul(c as Weight)) - // Standard Error: 141_000 - .saturating_add((2_478_000 as Weight).saturating_mul(t as Weight)) - // Standard Error: 1_242_000 - .saturating_add((2_935_421_000 as Weight).saturating_mul(d as Weight)) + fn seal_restore_to_per_delta(d: u32, ) -> Weight { + (0 as Weight) + // Standard Error: 3_299_000 + .saturating_add((3_257_862_000 as Weight).saturating_mul(d as Weight)) .saturating_add(RocksDbWeight::get().reads(7 as Weight)) .saturating_add(RocksDbWeight::get().reads((100 as Weight).saturating_mul(d as Weight))) .saturating_add(RocksDbWeight::get().writes(7 as Weight)) .saturating_add(RocksDbWeight::get().writes((100 as Weight).saturating_mul(d as Weight))) } fn seal_random(r: u32, ) -> Weight { - (140_411_000 as Weight) - // Standard Error: 146_000 - .saturating_add((566_687_000 as Weight).saturating_mul(r as Weight)) + (124_927_000 as Weight) + // Standard Error: 407_000 + .saturating_add((730_247_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(5 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn seal_deposit_event(r: u32, ) -> Weight { - (132_048_000 as Weight) - // Standard Error: 308_000 - .saturating_add((818_622_000 as Weight).saturating_mul(r as Weight)) + (135_014_000 as Weight) + // Standard Error: 892_000 + .saturating_add((1_131_992_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn seal_deposit_event_per_topic_and_kb(t: u32, n: u32, ) -> Weight { - (1_080_578_000 as Weight) - // Standard Error: 2_337_000 - .saturating_add((534_525_000 as Weight).saturating_mul(t as Weight)) - // Standard Error: 460_000 - .saturating_add((167_990_000 as Weight).saturating_mul(n as Weight)) + (1_401_344_000 as Weight) + // Standard Error: 2_961_000 + .saturating_add((701_918_000 as Weight).saturating_mul(t as Weight)) + // Standard Error: 583_000 + .saturating_add((169_206_000 as Weight).saturating_mul(n as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().reads((100 as Weight).saturating_mul(t as Weight))) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) .saturating_add(RocksDbWeight::get().writes((100 as Weight).saturating_mul(t as Weight))) } fn seal_set_rent_allowance(r: u32, ) -> Weight { - (123_998_000 as Weight) - // Standard Error: 53_000 - .saturating_add((155_113_000 as Weight).saturating_mul(r as Weight)) + (146_753_000 as Weight) + // Standard Error: 117_000 + .saturating_add((194_150_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn seal_debug_message(r: u32, ) -> Weight { - (120_514_000 as Weight) - // Standard Error: 93_000 - .saturating_add((124_243_000 as Weight).saturating_mul(r as Weight)) + (141_972_000 as Weight) + // Standard Error: 114_000 + .saturating_add((164_981_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn seal_set_storage(r: u32, ) -> Weight { - (47_131_000 as Weight) - // Standard Error: 931_000 - .saturating_add((4_033_062_000 as Weight).saturating_mul(r as Weight)) + (549_424_000 as Weight) + // Standard Error: 7_901_000 + .saturating_add((4_159_879_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().reads((100 as Weight).saturating_mul(r as Weight))) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) .saturating_add(RocksDbWeight::get().writes((100 as Weight).saturating_mul(r as Weight))) } fn seal_set_storage_per_kb(n: u32, ) -> Weight { - (549_577_000 as Weight) - // Standard Error: 192_000 - .saturating_add((57_815_000 as Weight).saturating_mul(n as Weight)) + (682_814_000 as Weight) + // Standard Error: 229_000 + .saturating_add((59_572_000 as Weight).saturating_mul(n as Weight)) .saturating_add(RocksDbWeight::get().reads(5 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } fn seal_clear_storage(r: u32, ) -> Weight { (0 as Weight) - // Standard Error: 2_635_000 - .saturating_add((1_214_454_000 as Weight).saturating_mul(r as Weight)) + // Standard Error: 2_889_000 + .saturating_add((1_563_117_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().reads((100 as Weight).saturating_mul(r as Weight))) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) @@ -1081,23 +1074,23 @@ impl WeightInfo for () { } fn seal_get_storage(r: u32, ) -> Weight { (0 as Weight) - // Standard Error: 1_044_000 - .saturating_add((883_653_000 as Weight).saturating_mul(r as Weight)) + // Standard Error: 2_414_000 + .saturating_add((1_178_803_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().reads((100 as Weight).saturating_mul(r as Weight))) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn seal_get_storage_per_kb(n: u32, ) -> Weight { - (568_190_000 as Weight) - // Standard Error: 181_000 - .saturating_add((106_420_000 as Weight).saturating_mul(n as Weight)) + (696_056_000 as Weight) + // Standard Error: 266_000 + .saturating_add((108_870_000 as Weight).saturating_mul(n as Weight)) .saturating_add(RocksDbWeight::get().reads(5 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn seal_transfer(r: u32, ) -> Weight { (0 as Weight) - // Standard Error: 1_553_000 - .saturating_add((4_810_405_000 as Weight).saturating_mul(r as Weight)) + // Standard Error: 2_764_000 + .saturating_add((6_397_838_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().reads((100 as Weight).saturating_mul(r as Weight))) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) @@ -1105,358 +1098,354 @@ impl WeightInfo for () { } fn seal_call(r: u32, ) -> Weight { (0 as Weight) - // Standard Error: 8_671_000 - .saturating_add((10_965_308_000 as Weight).saturating_mul(r as Weight)) + // Standard Error: 8_279_000 + .saturating_add((13_318_274_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(5 as Weight)) .saturating_add(RocksDbWeight::get().reads((200 as Weight).saturating_mul(r as Weight))) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) .saturating_add(RocksDbWeight::get().writes((100 as Weight).saturating_mul(r as Weight))) } - fn seal_call_per_code_transfer_input_output_kb(c: u32, t: u32, i: u32, o: u32, ) -> Weight { - (10_138_403_000 as Weight) - // Standard Error: 162_000 - .saturating_add((264_871_000 as Weight).saturating_mul(c as Weight)) - // Standard Error: 67_846_000 - .saturating_add((3_793_372_000 as Weight).saturating_mul(t as Weight)) - // Standard Error: 21_000 - .saturating_add((49_168_000 as Weight).saturating_mul(i as Weight)) - // Standard Error: 22_000 - .saturating_add((71_664_000 as Weight).saturating_mul(o as Weight)) + fn seal_call_per_transfer_input_output_kb(t: u32, i: u32, o: u32, ) -> Weight { + (13_411_599_000 as Weight) + // Standard Error: 40_931_000 + .saturating_add((4_291_567_000 as Weight).saturating_mul(t as Weight)) + // Standard Error: 14_000 + .saturating_add((48_818_000 as Weight).saturating_mul(i as Weight)) + // Standard Error: 15_000 + .saturating_add((68_502_000 as Weight).saturating_mul(o as Weight)) .saturating_add(RocksDbWeight::get().reads(205 as Weight)) .saturating_add(RocksDbWeight::get().writes(101 as Weight)) .saturating_add(RocksDbWeight::get().writes((101 as Weight).saturating_mul(t as Weight))) } fn seal_instantiate(r: u32, ) -> Weight { (0 as Weight) - // Standard Error: 34_546_000 - .saturating_add((19_938_393_000 as Weight).saturating_mul(r as Weight)) + // Standard Error: 31_671_000 + .saturating_add((24_164_540_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(5 as Weight)) .saturating_add(RocksDbWeight::get().reads((300 as Weight).saturating_mul(r as Weight))) .saturating_add(RocksDbWeight::get().writes(3 as Weight)) .saturating_add(RocksDbWeight::get().writes((300 as Weight).saturating_mul(r as Weight))) } - fn seal_instantiate_per_code_input_output_salt_kb(c: u32, i: u32, o: u32, s: u32, ) -> Weight { - (8_861_543_000 as Weight) - // Standard Error: 566_000 - .saturating_add((585_057_000 as Weight).saturating_mul(c as Weight)) - // Standard Error: 80_000 - .saturating_add((52_025_000 as Weight).saturating_mul(i as Weight)) - // Standard Error: 80_000 - .saturating_add((75_956_000 as Weight).saturating_mul(o as Weight)) - // Standard Error: 80_000 - .saturating_add((198_033_000 as Weight).saturating_mul(s as Weight)) + fn seal_instantiate_per_input_output_salt_kb(i: u32, o: u32, s: u32, ) -> Weight { + (17_228_488_000 as Weight) + // Standard Error: 26_000 + .saturating_add((50_822_000 as Weight).saturating_mul(i as Weight)) + // Standard Error: 26_000 + .saturating_add((71_276_000 as Weight).saturating_mul(o as Weight)) + // Standard Error: 26_000 + .saturating_add((198_669_000 as Weight).saturating_mul(s as Weight)) .saturating_add(RocksDbWeight::get().reads(206 as Weight)) .saturating_add(RocksDbWeight::get().writes(204 as Weight)) } fn seal_hash_sha2_256(r: u32, ) -> Weight { - (129_022_000 as Weight) - // Standard Error: 76_000 - .saturating_add((216_764_000 as Weight).saturating_mul(r as Weight)) + (149_183_000 as Weight) + // Standard Error: 99_000 + .saturating_add((279_233_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn seal_hash_sha2_256_per_kb(n: u32, ) -> Weight { - (414_489_000 as Weight) + (457_629_000 as Weight) // Standard Error: 14_000 - .saturating_add((481_873_000 as Weight).saturating_mul(n as Weight)) + .saturating_add((480_686_000 as Weight).saturating_mul(n as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn seal_hash_keccak_256(r: u32, ) -> Weight { - (127_636_000 as Weight) - // Standard Error: 104_000 - .saturating_add((225_094_000 as Weight).saturating_mul(r as Weight)) + (141_603_000 as Weight) + // Standard Error: 120_000 + .saturating_add((283_527_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn seal_hash_keccak_256_per_kb(n: u32, ) -> Weight { - (216_668_000 as Weight) - // Standard Error: 16_000 - .saturating_add((331_423_000 as Weight).saturating_mul(n as Weight)) + (463_644_000 as Weight) + // Standard Error: 18_000 + .saturating_add((332_183_000 as Weight).saturating_mul(n as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn seal_hash_blake2_256(r: u32, ) -> Weight { - (129_582_000 as Weight) - // Standard Error: 97_000 - .saturating_add((198_429_000 as Weight).saturating_mul(r as Weight)) + (144_145_000 as Weight) + // Standard Error: 113_000 + .saturating_add((252_640_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn seal_hash_blake2_256_per_kb(n: u32, ) -> Weight { - (288_991_000 as Weight) - // Standard Error: 20_000 - .saturating_add((148_497_000 as Weight).saturating_mul(n as Weight)) + (455_101_000 as Weight) + // Standard Error: 23_000 + .saturating_add((149_174_000 as Weight).saturating_mul(n as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn seal_hash_blake2_128(r: u32, ) -> Weight { - (128_711_000 as Weight) - // Standard Error: 94_000 - .saturating_add((197_050_000 as Weight).saturating_mul(r as Weight)) + (147_166_000 as Weight) + // Standard Error: 233_000 + .saturating_add((254_430_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn seal_hash_blake2_128_per_kb(n: u32, ) -> Weight { - (275_444_000 as Weight) - // Standard Error: 18_000 - .saturating_add((148_469_000 as Weight).saturating_mul(n as Weight)) + (445_667_000 as Weight) + // Standard Error: 24_000 + .saturating_add((149_178_000 as Weight).saturating_mul(n as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn instr_i64const(r: u32, ) -> Weight { - (20_089_000 as Weight) - // Standard Error: 26_000 - .saturating_add((3_376_000 as Weight).saturating_mul(r as Weight)) + (21_505_000 as Weight) + // Standard Error: 10_000 + .saturating_add((7_963_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64load(r: u32, ) -> Weight { - (22_187_000 as Weight) - // Standard Error: 31_000 - .saturating_add((162_969_000 as Weight).saturating_mul(r as Weight)) + (24_775_000 as Weight) + // Standard Error: 37_000 + .saturating_add((157_130_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64store(r: u32, ) -> Weight { - (22_292_000 as Weight) - // Standard Error: 39_000 - .saturating_add((233_277_000 as Weight).saturating_mul(r as Weight)) + (24_722_000 as Weight) + // Standard Error: 69_000 + .saturating_add((240_564_000 as Weight).saturating_mul(r as Weight)) } fn instr_select(r: u32, ) -> Weight { - (20_083_000 as Weight) - // Standard Error: 24_000 - .saturating_add((12_378_000 as Weight).saturating_mul(r as Weight)) + (21_506_000 as Weight) + // Standard Error: 21_000 + .saturating_add((45_277_000 as Weight).saturating_mul(r as Weight)) } fn instr_if(r: u32, ) -> Weight { - (20_091_000 as Weight) - // Standard Error: 24_000 - .saturating_add((12_195_000 as Weight).saturating_mul(r as Weight)) + (21_587_000 as Weight) + // Standard Error: 18_000 + .saturating_add((42_269_000 as Weight).saturating_mul(r as Weight)) } fn instr_br(r: u32, ) -> Weight { - (20_082_000 as Weight) - // Standard Error: 18_000 - .saturating_add((6_151_000 as Weight).saturating_mul(r as Weight)) + (21_538_000 as Weight) + // Standard Error: 807_000 + .saturating_add((22_392_000 as Weight).saturating_mul(r as Weight)) } fn instr_br_if(r: u32, ) -> Weight { - (20_031_000 as Weight) - // Standard Error: 13_000 - .saturating_add((13_978_000 as Weight).saturating_mul(r as Weight)) + (21_634_000 as Weight) + // Standard Error: 57_000 + .saturating_add((44_203_000 as Weight).saturating_mul(r as Weight)) } fn instr_br_table(r: u32, ) -> Weight { - (20_063_000 as Weight) - // Standard Error: 21_000 - .saturating_add((15_524_000 as Weight).saturating_mul(r as Weight)) + (21_531_000 as Weight) + // Standard Error: 19_000 + .saturating_add((33_198_000 as Weight).saturating_mul(r as Weight)) } fn instr_br_table_per_entry(e: u32, ) -> Weight { - (34_332_000 as Weight) - // Standard Error: 0 - .saturating_add((117_000 as Weight).saturating_mul(e as Weight)) + (60_960_000 as Weight) + // Standard Error: 1_000 + .saturating_add((151_000 as Weight).saturating_mul(e as Weight)) } fn instr_call(r: u32, ) -> Weight { - (20_446_000 as Weight) - // Standard Error: 121_000 - .saturating_add((90_977_000 as Weight).saturating_mul(r as Weight)) + (21_777_000 as Weight) + // Standard Error: 141_000 + .saturating_add((245_105_000 as Weight).saturating_mul(r as Weight)) } fn instr_call_indirect(r: u32, ) -> Weight { - (28_119_000 as Weight) - // Standard Error: 390_000 - .saturating_add((192_865_000 as Weight).saturating_mul(r as Weight)) + (34_307_000 as Weight) + // Standard Error: 365_000 + .saturating_add((344_623_000 as Weight).saturating_mul(r as Weight)) } fn instr_call_indirect_per_param(p: u32, ) -> Weight { - (228_352_000 as Weight) - // Standard Error: 4_000 - .saturating_add((3_891_000 as Weight).saturating_mul(p as Weight)) + (398_310_000 as Weight) + // Standard Error: 6_000 + .saturating_add((4_163_000 as Weight).saturating_mul(p as Weight)) } fn instr_local_get(r: u32, ) -> Weight { - (37_745_000 as Weight) - // Standard Error: 13_000 - .saturating_add((3_135_000 as Weight).saturating_mul(r as Weight)) + (40_478_000 as Weight) + // Standard Error: 19_000 + .saturating_add((9_991_000 as Weight).saturating_mul(r as Weight)) } fn instr_local_set(r: u32, ) -> Weight { - (37_639_000 as Weight) - // Standard Error: 15_000 - .saturating_add((3_541_000 as Weight).saturating_mul(r as Weight)) + (40_427_000 as Weight) + // Standard Error: 26_000 + .saturating_add((8_526_000 as Weight).saturating_mul(r as Weight)) } fn instr_local_tee(r: u32, ) -> Weight { - (37_639_000 as Weight) - // Standard Error: 23_000 - .saturating_add((4_813_000 as Weight).saturating_mul(r as Weight)) + (40_463_000 as Weight) + // Standard Error: 19_000 + .saturating_add((16_497_000 as Weight).saturating_mul(r as Weight)) } fn instr_global_get(r: u32, ) -> Weight { - (23_379_000 as Weight) - // Standard Error: 27_000 - .saturating_add((7_757_000 as Weight).saturating_mul(r as Weight)) + (25_998_000 as Weight) + // Standard Error: 21_000 + .saturating_add((18_214_000 as Weight).saturating_mul(r as Weight)) } fn instr_global_set(r: u32, ) -> Weight { - (23_378_000 as Weight) - // Standard Error: 68_000 - .saturating_add((8_437_000 as Weight).saturating_mul(r as Weight)) + (25_972_000 as Weight) + // Standard Error: 42_000 + .saturating_add((18_901_000 as Weight).saturating_mul(r as Weight)) } fn instr_memory_current(r: u32, ) -> Weight { - (22_245_000 as Weight) + (24_949_000 as Weight) // Standard Error: 17_000 - .saturating_add((3_446_000 as Weight).saturating_mul(r as Weight)) + .saturating_add((8_541_000 as Weight).saturating_mul(r as Weight)) } fn instr_memory_grow(r: u32, ) -> Weight { - (20_714_000 as Weight) - // Standard Error: 478_000 - .saturating_add((2_314_540_000 as Weight).saturating_mul(r as Weight)) + (22_204_000 as Weight) + // Standard Error: 4_776_000 + .saturating_add((2_198_462_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64clz(r: u32, ) -> Weight { - (20_126_000 as Weight) - // Standard Error: 15_000 - .saturating_add((5_316_000 as Weight).saturating_mul(r as Weight)) + (21_506_000 as Weight) + // Standard Error: 18_000 + .saturating_add((25_302_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64ctz(r: u32, ) -> Weight { - (20_107_000 as Weight) - // Standard Error: 23_000 - .saturating_add((5_344_000 as Weight).saturating_mul(r as Weight)) + (21_523_000 as Weight) + // Standard Error: 29_000 + .saturating_add((25_206_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64popcnt(r: u32, ) -> Weight { - (20_135_000 as Weight) - // Standard Error: 22_000 - .saturating_add((5_909_000 as Weight).saturating_mul(r as Weight)) + (21_567_000 as Weight) + // Standard Error: 466_000 + .saturating_add((19_925_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64eqz(r: u32, ) -> Weight { - (20_107_000 as Weight) - // Standard Error: 19_000 - .saturating_add((5_515_000 as Weight).saturating_mul(r as Weight)) + (21_569_000 as Weight) + // Standard Error: 30_000 + .saturating_add((25_027_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64extendsi32(r: u32, ) -> Weight { - (20_229_000 as Weight) - // Standard Error: 18_000 - .saturating_add((5_113_000 as Weight).saturating_mul(r as Weight)) + (21_536_000 as Weight) + // Standard Error: 193_000 + .saturating_add((17_690_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64extendui32(r: u32, ) -> Weight { - (20_070_000 as Weight) - // Standard Error: 11_000 - .saturating_add((5_226_000 as Weight).saturating_mul(r as Weight)) + (21_555_000 as Weight) + // Standard Error: 356_000 + .saturating_add((17_105_000 as Weight).saturating_mul(r as Weight)) } fn instr_i32wrapi64(r: u32, ) -> Weight { - (20_090_000 as Weight) - // Standard Error: 15_000 - .saturating_add((5_296_000 as Weight).saturating_mul(r as Weight)) + (21_561_000 as Weight) + // Standard Error: 1_038_000 + .saturating_add((22_198_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64eq(r: u32, ) -> Weight { - (20_095_000 as Weight) - // Standard Error: 13_000 - .saturating_add((7_323_000 as Weight).saturating_mul(r as Weight)) + (21_513_000 as Weight) + // Standard Error: 21_000 + .saturating_add((33_620_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64ne(r: u32, ) -> Weight { - (20_043_000 as Weight) - // Standard Error: 10_000 - .saturating_add((7_280_000 as Weight).saturating_mul(r as Weight)) + (21_556_000 as Weight) + // Standard Error: 17_000 + .saturating_add((33_669_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64lts(r: u32, ) -> Weight { - (20_061_000 as Weight) - // Standard Error: 15_000 - .saturating_add((7_226_000 as Weight).saturating_mul(r as Weight)) + (21_571_000 as Weight) + // Standard Error: 19_000 + .saturating_add((33_649_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64ltu(r: u32, ) -> Weight { - (20_072_000 as Weight) - // Standard Error: 24_000 - .saturating_add((7_315_000 as Weight).saturating_mul(r as Weight)) + (21_533_000 as Weight) + // Standard Error: 23_000 + .saturating_add((33_450_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64gts(r: u32, ) -> Weight { - (20_054_000 as Weight) - // Standard Error: 27_000 - .saturating_add((7_228_000 as Weight).saturating_mul(r as Weight)) + (21_525_000 as Weight) + // Standard Error: 24_000 + .saturating_add((33_727_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64gtu(r: u32, ) -> Weight { - (20_169_000 as Weight) - // Standard Error: 18_000 - .saturating_add((7_262_000 as Weight).saturating_mul(r as Weight)) + (21_546_000 as Weight) + // Standard Error: 16_000 + .saturating_add((33_420_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64les(r: u32, ) -> Weight { - (20_115_000 as Weight) - // Standard Error: 21_000 - .saturating_add((7_212_000 as Weight).saturating_mul(r as Weight)) + (21_546_000 as Weight) + // Standard Error: 22_000 + .saturating_add((33_720_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64leu(r: u32, ) -> Weight { - (20_122_000 as Weight) - // Standard Error: 23_000 - .saturating_add((7_166_000 as Weight).saturating_mul(r as Weight)) + (21_546_000 as Weight) + // Standard Error: 20_000 + .saturating_add((33_383_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64ges(r: u32, ) -> Weight { - (20_140_000 as Weight) - // Standard Error: 16_000 - .saturating_add((7_242_000 as Weight).saturating_mul(r as Weight)) + (21_577_000 as Weight) + // Standard Error: 27_000 + .saturating_add((33_454_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64geu(r: u32, ) -> Weight { - (20_107_000 as Weight) - // Standard Error: 18_000 - .saturating_add((7_365_000 as Weight).saturating_mul(r as Weight)) + (21_566_000 as Weight) + // Standard Error: 25_000 + .saturating_add((33_665_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64add(r: u32, ) -> Weight { - (20_179_000 as Weight) - // Standard Error: 14_000 - .saturating_add((7_144_000 as Weight).saturating_mul(r as Weight)) + (21_524_000 as Weight) + // Standard Error: 22_000 + .saturating_add((33_351_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64sub(r: u32, ) -> Weight { - (20_143_000 as Weight) - // Standard Error: 17_000 - .saturating_add((7_222_000 as Weight).saturating_mul(r as Weight)) + (21_558_000 as Weight) + // Standard Error: 18_000 + .saturating_add((33_423_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64mul(r: u32, ) -> Weight { - (20_129_000 as Weight) - // Standard Error: 21_000 - .saturating_add((7_247_000 as Weight).saturating_mul(r as Weight)) + (21_554_000 as Weight) + // Standard Error: 17_000 + .saturating_add((33_588_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64divs(r: u32, ) -> Weight { - (20_107_000 as Weight) - // Standard Error: 16_000 - .saturating_add((12_953_000 as Weight).saturating_mul(r as Weight)) + (21_568_000 as Weight) + // Standard Error: 29_000 + .saturating_add((38_897_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64divu(r: u32, ) -> Weight { - (20_093_000 as Weight) - // Standard Error: 17_000 - .saturating_add((12_040_000 as Weight).saturating_mul(r as Weight)) + (21_567_000 as Weight) + // Standard Error: 31_000 + .saturating_add((38_756_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64rems(r: u32, ) -> Weight { - (20_102_000 as Weight) - // Standard Error: 13_000 - .saturating_add((12_945_000 as Weight).saturating_mul(r as Weight)) + (21_540_000 as Weight) + // Standard Error: 20_000 + .saturating_add((39_244_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64remu(r: u32, ) -> Weight { - (20_132_000 as Weight) - // Standard Error: 16_000 - .saturating_add((12_199_000 as Weight).saturating_mul(r as Weight)) + (21_581_000 as Weight) + // Standard Error: 24_000 + .saturating_add((38_461_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64and(r: u32, ) -> Weight { - (20_155_000 as Weight) - // Standard Error: 26_000 - .saturating_add((7_103_000 as Weight).saturating_mul(r as Weight)) + (21_555_000 as Weight) + // Standard Error: 24_000 + .saturating_add((33_367_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64or(r: u32, ) -> Weight { - (20_088_000 as Weight) - // Standard Error: 22_000 - .saturating_add((7_213_000 as Weight).saturating_mul(r as Weight)) + (21_523_000 as Weight) + // Standard Error: 18_000 + .saturating_add((33_466_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64xor(r: u32, ) -> Weight { - (20_060_000 as Weight) - // Standard Error: 18_000 - .saturating_add((7_275_000 as Weight).saturating_mul(r as Weight)) + (21_536_000 as Weight) + // Standard Error: 34_000 + .saturating_add((33_452_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64shl(r: u32, ) -> Weight { - (20_104_000 as Weight) - // Standard Error: 15_000 - .saturating_add((7_282_000 as Weight).saturating_mul(r as Weight)) + (21_567_000 as Weight) + // Standard Error: 24_000 + .saturating_add((33_809_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64shrs(r: u32, ) -> Weight { - (20_111_000 as Weight) - // Standard Error: 20_000 - .saturating_add((7_264_000 as Weight).saturating_mul(r as Weight)) + (21_580_000 as Weight) + // Standard Error: 32_000 + .saturating_add((33_849_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64shru(r: u32, ) -> Weight { - (20_096_000 as Weight) - // Standard Error: 23_000 - .saturating_add((7_347_000 as Weight).saturating_mul(r as Weight)) + (21_571_000 as Weight) + // Standard Error: 18_000 + .saturating_add((33_799_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64rotl(r: u32, ) -> Weight { - (20_091_000 as Weight) - // Standard Error: 16_000 - .saturating_add((7_370_000 as Weight).saturating_mul(r as Weight)) + (21_559_000 as Weight) + // Standard Error: 22_000 + .saturating_add((33_947_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64rotr(r: u32, ) -> Weight { - (20_102_000 as Weight) - // Standard Error: 28_000 - .saturating_add((7_266_000 as Weight).saturating_mul(r as Weight)) + (21_565_000 as Weight) + // Standard Error: 20_000 + .saturating_add((33_754_000 as Weight).saturating_mul(r as Weight)) } } From c2d6fa797e3387104dd882de35a6d148000ab65a Mon Sep 17 00:00:00 2001 From: Keith Yeung Date: Mon, 28 Jun 2021 02:17:28 -0700 Subject: [PATCH 47/67] Support NMap in generate_storage_alias (#9147) * Support NMap in generate_storage_alias * Verify that 2-key NMap is identical to DoubleMap * Also compare key hashes and make sure they're identical * Fix and add tests for 1-tuple NMap generated by generate_storage_alias --- frame/support/src/lib.rs | 31 ++++++++++++++++++++- frame/support/src/storage/generator/nmap.rs | 20 +++++++++++++ frame/support/src/storage/types/key.rs | 4 +-- frame/support/src/storage/types/nmap.rs | 24 +++++++++++----- 4 files changed, 69 insertions(+), 10 deletions(-) diff --git a/frame/support/src/lib.rs b/frame/support/src/lib.rs index 45988c1c7372b..638485360c589 100644 --- a/frame/support/src/lib.rs +++ b/frame/support/src/lib.rs @@ -100,7 +100,8 @@ impl TypeId for PalletId { } /// Generate a new type alias for [`storage::types::StorageValue`], -/// [`storage::types::StorageMap`] and [`storage::types::StorageDoubleMap`]. +/// [`storage::types::StorageMap`], [`storage::types::StorageDoubleMap`] +/// and [`storage::types::StorageNMap`]. /// /// Useful for creating a *storage-like* struct for test and migrations. /// @@ -154,6 +155,18 @@ macro_rules! generate_storage_alias { >; } }; + ($pallet:ident, $name:ident => NMap<$(($key:ty, $hasher:ty),)+ $value:ty>) => { + $crate::paste::paste! { + $crate::generate_storage_alias!(@GENERATE_INSTANCE_STRUCT $pallet, $name); + type $name = $crate::storage::types::StorageNMap< + [<$name Instance>], + ( + $( $crate::storage::types::Key<$hasher, $key>, )+ + ), + $value, + >; + } + }; ($pallet:ident, $name:ident => Value<$value:ty>) => { $crate::paste::paste! { $crate::generate_storage_alias!(@GENERATE_INSTANCE_STRUCT $pallet, $name); @@ -193,6 +206,22 @@ macro_rules! generate_storage_alias { >; } }; + ( + $pallet:ident, + $name:ident<$t:ident : $bounds:tt> => NMap<$(($key:ty, $hasher:ty),)+ $value:ty> + ) => { + $crate::paste::paste! { + $crate::generate_storage_alias!(@GENERATE_INSTANCE_STRUCT $pallet, $name); + #[allow(type_alias_bounds)] + type $name<$t : $bounds> = $crate::storage::types::StorageNMap< + [<$name Instance>], + ( + $( $crate::storage::types::Key<$hasher, $key>, )+ + ), + $value, + >; + } + }; ($pallet:ident, $name:ident<$t:ident : $bounds:tt> => Value<$value:ty>) => { $crate::paste::paste! { $crate::generate_storage_alias!(@GENERATE_INSTANCE_STRUCT $pallet, $name); diff --git a/frame/support/src/storage/generator/nmap.rs b/frame/support/src/storage/generator/nmap.rs index 62f188a26db8d..7a320adcaab26 100755 --- a/frame/support/src/storage/generator/nmap.rs +++ b/frame/support/src/storage/generator/nmap.rs @@ -433,6 +433,26 @@ mod test_iterators { prefix } + #[test] + fn n_map_double_map_identical_key() { + sp_io::TestExternalities::default().execute_with(|| { + NMap::insert((1, 2), 50); + let key_hash = NMap::hashed_key_for((1, 2)); + + { + crate::generate_storage_alias!(Test, NMap => DoubleMap< + (u16, crate::Blake2_128Concat), + (u32, crate::Twox64Concat), + u64 + >); + + let value = NMap::get(1, 2).unwrap(); + assert_eq!(value, 50); + assert_eq!(NMap::hashed_key_for(1, 2), key_hash); + } + }); + } + #[test] fn n_map_reversible_reversible_iteration() { sp_io::TestExternalities::default().execute_with(|| { diff --git a/frame/support/src/storage/types/key.rs b/frame/support/src/storage/types/key.rs index a770d1b0fceab..def800f62c50e 100755 --- a/frame/support/src/storage/types/key.rs +++ b/frame/support/src/storage/types/key.rs @@ -110,7 +110,7 @@ impl KeyGeneratorInner for Key { } } -#[impl_trait_for_tuples::impl_for_tuples(2, 18)] +#[impl_trait_for_tuples::impl_for_tuples(1, 18)] #[tuple_types_custom_trait_bound(KeyGeneratorInner)] impl KeyGenerator for Tuple { for_tuples!( type Key = ( #(Tuple::Key),* ); ); @@ -150,7 +150,7 @@ impl KeyGenerator for Tuple { } } -#[impl_trait_for_tuples::impl_for_tuples(2, 18)] +#[impl_trait_for_tuples::impl_for_tuples(1, 18)] #[tuple_types_custom_trait_bound(KeyGeneratorInner + KeyGeneratorMaxEncodedLen)] impl KeyGeneratorMaxEncodedLen for Tuple { fn key_max_encoded_len() -> usize { diff --git a/frame/support/src/storage/types/nmap.rs b/frame/support/src/storage/types/nmap.rs index a9fc121d42d2e..fd1ca47b32c95 100755 --- a/frame/support/src/storage/types/nmap.rs +++ b/frame/support/src/storage/types/nmap.rs @@ -423,7 +423,7 @@ mod test { fn pallet_prefix() -> &'static str { "test" } - const STORAGE_PREFIX: &'static str = "foo"; + const STORAGE_PREFIX: &'static str = "Foo"; } struct ADefault; @@ -445,7 +445,7 @@ mod test { TestExternalities::default().execute_with(|| { let mut k: Vec = vec![]; k.extend(&twox_128(b"test")); - k.extend(&twox_128(b"foo")); + k.extend(&twox_128(b"Foo")); k.extend(&3u16.blake2_128_concat()); assert_eq!(A::hashed_key_for((&3,)).to_vec(), k); @@ -458,6 +458,16 @@ mod test { assert_eq!(A::get((3,)), Some(10)); assert_eq!(AValueQueryWithAnOnEmpty::get((3,)), 10); + { + crate::generate_storage_alias!(test, Foo => NMap< + (u16, Blake2_128Concat), + u32 + >); + + assert_eq!(Foo::contains_key((3,)), true); + assert_eq!(Foo::get((3,)), Some(10)); + } + A::swap::, _, _>((3,), (2,)); assert_eq!(A::contains_key((3,)), false); assert_eq!(A::contains_key((2,)), true); @@ -575,7 +585,7 @@ mod test { AValueQueryWithAnOnEmpty::MODIFIER, StorageEntryModifier::Default ); - assert_eq!(A::NAME, "foo"); + assert_eq!(A::NAME, "Foo"); assert_eq!( AValueQueryWithAnOnEmpty::DEFAULT.0.default_byte(), 98u32.encode() @@ -617,7 +627,7 @@ mod test { TestExternalities::default().execute_with(|| { let mut k: Vec = vec![]; k.extend(&twox_128(b"test")); - k.extend(&twox_128(b"foo")); + k.extend(&twox_128(b"Foo")); k.extend(&3u16.blake2_128_concat()); k.extend(&30u8.twox_64_concat()); assert_eq!(A::hashed_key_for((3, 30)).to_vec(), k); @@ -761,7 +771,7 @@ mod test { AValueQueryWithAnOnEmpty::MODIFIER, StorageEntryModifier::Default ); - assert_eq!(A::NAME, "foo"); + assert_eq!(A::NAME, "Foo"); assert_eq!( AValueQueryWithAnOnEmpty::DEFAULT.0.default_byte(), 98u32.encode() @@ -844,7 +854,7 @@ mod test { TestExternalities::default().execute_with(|| { let mut k: Vec = vec![]; k.extend(&twox_128(b"test")); - k.extend(&twox_128(b"foo")); + k.extend(&twox_128(b"Foo")); k.extend(&1u16.blake2_128_concat()); k.extend(&10u16.blake2_128_concat()); k.extend(&100u16.twox_64_concat()); @@ -996,7 +1006,7 @@ mod test { AValueQueryWithAnOnEmpty::MODIFIER, StorageEntryModifier::Default ); - assert_eq!(A::NAME, "foo"); + assert_eq!(A::NAME, "Foo"); assert_eq!( AValueQueryWithAnOnEmpty::DEFAULT.0.default_byte(), 98u32.encode() From c44e5d69aa408d98ce4bcca0d8d8f08a1026e5a4 Mon Sep 17 00:00:00 2001 From: Kian Paimani <5588131+kianenigma@users.noreply.github.com> Date: Mon, 28 Jun 2021 11:20:24 +0200 Subject: [PATCH 48/67] Decouple Staking and Election - Part 3: Signed Phase (#7910) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Base features and traits. * pallet and unsigned phase * add signed phase. * remove comments * Undo bad formattings. * some formatting cleanup. * Small self-cleanup. * Add todo * Make it all build * self-review * Some doc tests. * Some changes from other PR * Fix session test * Update bin/node/runtime/src/lib.rs Co-authored-by: Peter Goodspeed-Niklaus * Fix name. * cargo run --release --features=runtime-benchmarks --manifest-path=bin/node/cli/Cargo.toml -- benchmark --chain=dev --steps=50 --repeat=20 --pallet=pallet_election_provider_multi_phase --extrinsic=* --execution=wasm --wasm-execution=compiled --heap-pages=4096 --output=./frame/election-provider-multi-phase/src/weights.rs --template=./.maintain/frame-weight-template.hbs * typos and verbiage * no glob imports in signed.rs * meaningful generic type parameters for SignedSubmission * dedup feasibility check weight calculation * simplify/optimize fn insert_submission * tests: remove glob, cause to build without error * use sp_std::vec::Vec * maintain invariant within fn insert_submission * fix accidentally ordering the list backward * intentionally order the list in reverse * get rid of unused import * ensure signed submissions are cleared in early elect * finalize the signed phase when appropriate - ensure we don't leave storage lying around, even if elect called prematurely - test that proposition - disable the unsigned phase if a viable solution from the signed phase exists - ensure signed phase finalization weight is accounted for * resolve dispatch error todo * update assumptions in submit benchmark * cargo run --release --features=runtime-benchmarks --manifest-path=bin/node/cli/Cargo.toml -- benchmark --chain=dev --steps=50 --repeat=20 --pallet=pallet_election_provider_multi_phase --extrinsic=* --execution=wasm --wasm-execution=compiled --heap-pages=4096 --output=./frame/election-provider-multi-phase/src/weights.rs --template=./.maintain/frame-weight-template.hbs * line length * make a few more things pub * restore missing import * update ui test output * update tests from master branch * cargo run --release --features=runtime-benchmarks --manifest-path=bin/node/cli/Cargo.toml -- benchmark --chain=dev --steps=50 --repeat=20 --pallet=pallet_election_provider_multi_phase --extrinsic=* --execution=wasm --wasm-execution=compiled --heap-pages=4096 --output=./frame/election-provider-multi-phase/src/weights.rs --template=./.maintain/frame-weight-template.hbs * remove duplicate definitions * remove signed reward factor due to its attack potential * Update frame/election-provider-multi-phase/src/signed.rs Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> * remove SignedRewardMax; no longer necessary * compute the encoded size without actually encoding * remove unused PostInfo * pub use some stuff Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> * ensure `pub use` things are in fact `pub` * add event information: was another solution ejected to make room * unconditionally run the unsigned phase even if signed was successful * remove dead test code * meaningful witness data name * use errors instead of defensive `unwrap_or_default` * get rid of a log message redundant with an event * saturating math Co-authored-by: Shawn Tabrizi * import Saturating * mv `fn submit` to end of call * add log line * Use a better data structure for SignedSubmissions instead of Vec (#8933) * Remove: (#8748) * `NetworkStatusSinks` * `sc_service::SpawnTasksParams::network_status_sinks` Also: * `sc_service::build_network()` does not return `network_status_sinks` * CI: fix simnet trigger (#8927) * CI: chore * CI: pin simnet version * More sc-service config reexports (#8887) * Reexport ExecutionStrategies and ExecutionStrategy * Reexport more of the network * Reexport the ExecutionStrategy as it's used within ExecutionStrategies * Fix check runtime CI (#8930) * Fix check_runtime.sh script * contracts: Remove confusing "Related Modules" doc * Bump parity-wasm and pwasm-utils to the newest versions everywhere (#8928) * BROKEN: convert SignedSubmissions to BoundedBTreeSet Eventually, once it works, this change should improve overall performance. However, in the meantime, the trait bounds aren't playing nicely, and this is turning into too much of a pain to handle right now as part of /#7910. We can take care of it later. * Simple `MaxBoundedLen` Implementations (#8793) * implement max_values + storages info * some formatting + doc * sudo sanity check * timestamp * assets (not working) * fix assets * impl for proxy * update balances * rename StoragesInfo -> PalletStorageInfo * merge both StorageInfoTrait and PalletStorageInfo I think it is more future proof. In the future some storage could make use of multiple prefix. Like one to store how much value has been inserted, etc... * Update frame/support/procedural/src/storage/parse.rs Co-authored-by: Peter Goodspeed-Niklaus * Update frame/support/procedural/src/storage/storage_struct.rs Co-authored-by: Peter Goodspeed-Niklaus * Fix max_size using hasher information hasher now expose `max_len` which allows to computes their maximum len. For hasher without concatenation, it is the size of the hash part, for hasher with concatenation, it is the size of the hash part + max encoded len of the key. * fix tests * fix ui tests * Move `MaxBoundedLen` into its own crate (#8814) * move MaxEncodedLen into its own crate * remove MaxEncodedLen impl from frame-support * add to assets and balances * try more fixes * fix compile Co-authored-by: Shawn Tabrizi * nits * fix compile * line width * fix max-values-macro merge * Add some derive, needed for test and other purpose * use weak bounded vec in some cases * Update lib.rs * move max-encoded-len crate * fix * remove app crypto for now * width * Revert "remove app crypto for now" This reverts commit 73623e9933d50648e0e7fe90b6171a8e45d7f5a2. * unused variable * more unused variables * more fixes * Add #[max_encoded_len_crate(...)] helper attribute The purpose of this attribute is to reduce the surface area of max_encoded_len changes. Crates deriving `MaxEncodedLen` do not need to add it to `Cargo.toml`; they can instead just do ```rust \#[derive(Encode, MaxEncodedLen)] \#[max_encoded_len_crate(frame_support::max_encoded_len)] struct Example; ``` * fix a ui test * use #[max_encoded_len_crate(...)] helper in app_crypto * remove max_encoded_len import where not necessary * update lockfile * fix ui test * ui * newline * fix merge * try fix ui again * Update max-encoded-len/derive/src/lib.rs Co-authored-by: Peter Goodspeed-Niklaus * extract generate_crate_access_2018 * Update lib.rs * compiler isnt smart enough Co-authored-by: thiolliere Co-authored-by: Peter Goodspeed-Niklaus Co-authored-by: Peter Goodspeed-Niklaus * remove duplicate Issued/Burned events (#8935) * weather -> whether (#8938) * make remote ext use batch ws-client (#8916) * make remote ext use batch ws-client * Add debug log for key length * better assertions * new sanity_checl * try and make it work with batch * update test * remove exctra uri * add missing at * remove unused rpc stuff * improve Co-authored-by: emostov <32168567+emostov@users.noreply.github.com> * Make `Schedule` fields public to allow for customization (#8924) * Make `Schedule` fields public for customization * Fix doc typo Co-authored-by: Andrew Jones Co-authored-by: Andrew Jones * Session key should be settable at genesis even for non-endowed accounts (#8942) * Session key should be settable at genesis even for non-endowed accounts * Docs * Migrate pallet-scored-pool to pallet attribute macro (#8825) * Migrate pallet-scored-pool to pallet attribute macro. * Remove dummy event. * Apply review suggestions. * Bump retain_mut from 0.1.2 to 0.1.3 (#8951) Bumps [retain_mut](https://github.com/upsuper/retain_mut) from 0.1.2 to 0.1.3. - [Release notes](https://github.com/upsuper/retain_mut/releases) - [Commits](https://github.com/upsuper/retain_mut/compare/v0.1.2...v0.1.3) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * Use correct CreateInherentDataProviders impl for manual seal (#8852) * use correct CreateInherentDataProviders impl for manual seal * add babe inherent provider * move client into factory fn * Refactor code a little bit (#8932) * Optimize `next_storage_key` (#8956) * Optimize `next_storage_key` - Do not rely on recursion - Use an iterator over the overlay to not always call the same method * Fix bug * Add deserialize for TransactionValidityError in std. (#8961) * Add deserialize for TransactionValidityError in std. * Fix derives * Bump getrandom from 0.2.2 to 0.2.3 (#8952) Bumps [getrandom](https://github.com/rust-random/getrandom) from 0.2.2 to 0.2.3. - [Release notes](https://github.com/rust-random/getrandom/releases) - [Changelog](https://github.com/rust-random/getrandom/blob/master/CHANGELOG.md) - [Commits](https://github.com/rust-random/getrandom/compare/v0.2.2...v0.2.3) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * Allow usage of path in construct_runtime! (#8801) * Allow usage of path in construct_runtime! * Fix whitespace * Fix whitespace * Make expand_runtime_metadata accept slice instead of Iterator * Include Call and Event in construct_runtime for testing * Migrate impl_outer_event to proc macro * Fix integrity_test_works * Update UI test expectations * Factor in module path while generating enum variant or fn names * Use ParseStream::lookahead for more helpful error messages * Remove generating outer_event_metadata * Ensure pallets with different paths but same last path segment can coexist * Remove unnecessary generated function * Migrate decl_outer_config to proc macro * Add default_filter test for expand_outer_origin * Allow crate, self and super keywords to appear in pallet path * Add UI test for specifying empty pallet paths in construct_runtime * Reduce cargo doc warnings (#8947) Co-authored-by: Bastian Köcher * Update wasmtime to 0.27 (#8913) * Update wasmtime to 0.27 A couple of notes: - Now we are fair about unsafeness of runtime creation via an compiled artifact. This change was prompted by the change in wasmtime which made `deserialize` rightfully unsafe. Now `CodeSupplyMode` was hidden and the `create_runtime` now takes the blob again and there is now a new fn for creating a runtime with a compiled artifact. - This is a big change for wasmtime. They switched to the modern backend for code generation. While this can bring performance improvements, it can also introduce some problems. In fact, 0.27 fixed a serious issue that could lead to sandbox escape. Hence we need a proper burn in. This would require a change to PVF validation host as well. * Filter regalloc logging * Spellling corrections (no code changes) (#8971) * Spelling corrections * As this might break let's do as a separate PR * Dependabot use correct label (#8973) * Inject hashed prefix for remote-ext (#8960) * Inject for remote-ext * Update utils/frame/remote-externalities/src/lib.rs Co-authored-by: Zeke Mostov <32168567+emostov@users.noreply.github.com> * Update utils/frame/remote-externalities/src/lib.rs Co-authored-by: Zeke Mostov <32168567+emostov@users.noreply.github.com> * Apply suggestions from code review * Apply suggestions from code review Co-authored-by: Zeke Mostov <32168567+emostov@users.noreply.github.com> * Use `SpawnTaskHandle`s for spawning tasks in the tx pool (#8958) * Remove futures-diagnose * Use `SpawnTaskHandle`s for spawning tasks in the tx pool * Box the spawner * Fix tests * Use the testing task executor * Do not spend time on verifying the signatures before calling Runtime (#8980) * Revert "Use `SpawnTaskHandle`s for spawning tasks in the tx pool (#8958)" (#8983) This reverts commit bfef07c0d22ead3ab3c4e0e90ddf9b0e3537566e. * Uniques: An economically-secure basic-featured NFT pallet (#8813) * Uniques: An economically-secure basic-featured NFT pallet * force_transfer * freeze/thaw * team management * approvals * Fixes * force_asset_status * class_metadata * instance metadata * Fixes * use nmap * Fixes * class metadata has information field * Intiial mock/tests and a fix * Remove impl_non_fungibles * Docs * Update frame/uniques/src/lib.rs Co-authored-by: Shawn Tabrizi * Update frame/uniques/src/lib.rs Co-authored-by: Shawn Tabrizi * Update frame/uniques/src/lib.rs Co-authored-by: Shawn Tabrizi * Update frame/uniques/src/lib.rs Co-authored-by: Shawn Tabrizi * Reserve, don't transfer. * Fixes * Tests * Tests * refresh_deposit * Tests and proper handling of metdata destruction * test burn * Tests * Update impl_fungibles.rs * Initial benchmarking * benchmark * Fixes * cargo run --release --features=runtime-benchmarks --manifest-path=bin/node/cli/Cargo.toml -- benchmark --chain=dev --steps=50 --repeat=20 --pallet=pallet_uniques --extrinsic=* --execution=wasm --wasm-execution=compiled --heap-pages=4096 --output=./frame/uniques/src/weights.rs --template=./.maintain/frame-weight-template.hbs * Attributes * Attribute metadata * Fixes * Update frame/uniques/README.md * Docs * Docs * Docs * Simple metadata * Use BoundedVec * cargo run --release --features=runtime-benchmarks --manifest-path=bin/node/cli/Cargo.toml -- benchmark --chain=dev --steps=50 --repeat=20 --pallet=pallet_uniques --extrinsic=* --execution=wasm --wasm-execution=compiled --heap-pages=4096 --output=./frame/uniques/src/weights.rs --template=./.maintain/frame-weight-template.hbs * Update frame/uniques/src/lib.rs Co-authored-by: Lohann Paterno Coutinho Ferreira * Update frame/uniques/src/lib.rs Co-authored-by: Lohann Paterno Coutinho Ferreira * Update frame/uniques/src/lib.rs Co-authored-by: Lohann Paterno Coutinho Ferreira * Update frame/uniques/src/lib.rs Co-authored-by: Lohann Paterno Coutinho Ferreira * Update frame/uniques/src/lib.rs Co-authored-by: Lohann Paterno Coutinho Ferreira * Fixes * Update frame/uniques/README.md Co-authored-by: Alexander Popiak * Update frame/uniques/README.md Co-authored-by: Alexander Popiak * Update frame/uniques/README.md Co-authored-by: Alexander Popiak * Docs * Bump Co-authored-by: Shawn Tabrizi Co-authored-by: Parity Bot Co-authored-by: Lohann Paterno Coutinho Ferreira Co-authored-by: Alexander Popiak * Update WeakBoundedVec's remove and swap_remove (#8985) Co-authored-by: Boiethios * Convert another instance of Into impl to From in the macros (#8986) * Convert another instance of Into impl to From in the macros * Convert another location * also fix bounded vec (#8987) * fix most compiler errors Mostly the work so far has been in tracking down where precisely to insert appropriate trait bounds, and updating `fn insert_submission`. However, there's still a compiler error remaining: ``` error[E0275]: overflow evaluating the requirement `Compact<_>: Decode` | = help: consider adding a `#![recursion_limit="256"]` attribute to your crate (`pallet_election_provider_multi_phase`) = note: required because of the requirements on the impl of `Decode` for `Compact<_>` = note: 126 redundant requirements hidden = note: required because of the requirements on the impl of `Decode` for `Compact<_>` ``` Next up: figure out how we ended up with that recursive bound, and fix it. * extract type SignedSubmissionsOf Weirdly, we still encounter the recursive trait definition error here, despite removing the trait bounds. Something weird is happening. * impl Decode bounds on BoundedBTreeMap/Set on T, not predecessor Otherwise, Rust gets confused and decides that the trait bound is infinitely recursive. For that matter, it _still_ gets confused somehow and decides that the trait bound is infinitely recursive, but at least this should somewhat simplify the matter. * fix recursive trait bound problem * minor fixes * more little fixes * correct semantics for try_insert * more fixes * derive Ord for SolutionType * tests compile * fix most tests, rm unnecessary one * Transactionpool: Make `ready_at` return earlier (#8995) `ready_at` returns when we have processed the requested block. However, on startup we already have processed the best block and there are no transactions in the pool on startup anyway. So, we can set `updated_at` to the best block on startup. Besides that `ready_at` now returns early when there are no ready nor any future transactions in the pool. * Discard notifications if we have failed to parse handshake (#8806) * Migrate pallet-democracy to pallet attribute macro (#8824) * Migrate pallet-democracy to pallet attribute macro. * Metadata fix. * Trigger CI. * Add ecdsa::Pair::verify_prehashed() (#8996) * Add ecdsa::Pair::verify_prehashed() * turn verify_prehashed() into an associated function * add Signature::recover_prehashed() * Non-fungible token traits (#8993) * Non-fungible token traits * Docs * Fixes * Implement non-fungible trait for Uniques * Update frame/uniques/src/impl_nonfungibles.rs Co-authored-by: Shawn Tabrizi * Update frame/uniques/src/impl_nonfungibles.rs Co-authored-by: Shawn Tabrizi Co-authored-by: Shawn Tabrizi * Removes unused import (#9007) * Add Call Filter That Prevents Nested `batch_all` (#9009) * add filter preventing nested `batch_all` * more tests * fix test * cargo run --release --features=runtime-benchmarks --manifest-path=bin/node/cli/Cargo.toml -- benchmark --chain=dev --steps=50 --repeat=20 --pallet=pallet_utility --extrinsic=* --execution=wasm --wasm-execution=compiled --heap-pages=4096 --output=./frame/utility/src/weights.rs --template=./.maintain/frame-weight-template.hbs Co-authored-by: Parity Bot * Transaction pool: Ensure that we prune transactions properly (#8963) * Transaction pool: Ensure that we prune transactions properly There was a bug in the transaction pool that we didn't pruned transactions properly because we called `prune_known`, instead of `prune`. This bug was introduced by: https://github.com/paritytech/substrate/pull/4629 This is required to have stale extrinsics being removed properly, so that they don't fill up the tx pool. * Fix compilation * Fix benches * ... * Storage chain: Runtime module (#8624) * Transaction storage runtime module * WIP: Tests * Tests, benchmarks and docs * Made check_proof mandatory * Typo * Renamed a crate * Apply suggestions from code review Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> * Added weight for on_finalize * Fixed counter mutations * Reorganized tests * Fixed build * Update for the new inherent API * Reworked for the new inherents API * Apply suggestions from code review Co-authored-by: cheme Co-authored-by: Alexander Popiak Co-authored-by: Shawn Tabrizi * Store transactions in a Vec * Added FeeDestination * Get rid of constants * Fixed node runtime build * Fixed benches * Update frame/transaction-storage/src/lib.rs Co-authored-by: cheme Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> Co-authored-by: cheme Co-authored-by: Alexander Popiak Co-authored-by: Shawn Tabrizi * more useful error message (#9014) * Named reserve (#7778) * add NamedReservableCurrency * move currency related trait and types into a new file * implement NamedReservableCurrency * remove empty reserves * Update frame/support/src/traits.rs Co-authored-by: Shawn Tabrizi * fix build * bump year * add MaxReserves * repatriate_reserved_named should put reserved fund into named reserved * add tests * add some docs * fix warning * Update lib.rs * fix test * fix test * fix * fix * triggier CI * Move NamedReservableCurrency. * Use strongly bounded vec for reserves. * Fix test. * remove duplicated file * trigger CI * Make `ReserveIdentifier` assosicated type * add helpers * make ReserveIdentifier assosicated type * fix * update * trigger CI * Apply suggestions from code review Co-authored-by: Shawn Tabrizi * trigger CI * Apply suggestions from code review Co-authored-by: Shawn Tabrizi Co-authored-by: Gavin Wood Co-authored-by: Shaun Wang * update ss58 type to u16 (#8955) * Fixed build (#9021) * Bump parity-db (#9024) * consensus: handle justification sync for blocks authored locally (#8698) * consensus: add trait to control justification sync process * network: implement JustificationSyncLink for NetworkService * slots: handle justification sync in slot worker * babe: fix slot worker instantiation * aura: fix slot worker instantiation * pow: handle justification sync in miner * babe: fix tests * aura: fix tests * node: fix compilation * node-template: fix compilation * consensus: rename justification sync link parameter * aura: fix test compilation * consensus: slots: move JustificationSyncLink out of on_slot * arithmetic: fix PerThing pow (#9030) * arithmetic: add failing test for pow * arithmetic: fix PerThing::pow * Revert back to previous optimisations Co-authored-by: Gav Wood * Compact proof utilities in sp_trie. (#8574) * validation extension in sp_io * need paths * arc impl * missing host function in executor * io to pkdot * decode function. * encode primitive. * trailing tab * multiple patch * fix child trie logic * restore master versionning * bench compact proof size * trie-db 22.3 is needed * line width * split line * fixes for bench (additional root may not be needed as original issue was with empty proof). * revert compact from block size calculation. * New error type for compression. * Adding test (incomplete (failing)). Also lacking real proof checking (no good primitives in sp-trie crate). * There is currently no proof recording utility in sp_trie, removing test. * small test of child root in proof without a child proof. * remove empty test. * remove non compact proof size * Missing revert. * proof method to encode decode. * Don't inlucde nominaotrs that back no one in the snapshot. (#9017) * fix all_in_one test which had a logic error * use sp_std, not std * Periodically call `Peerset::alloc_slots` on all sets (#9025) * Periodically call alloc_slots on all slots * Add test * contracts: Add new `seal_call` that offers new features (#8909) * Add new `seal_call` that offers new features * Fix doc typo Co-authored-by: Michael Müller * Fix doc typos Co-authored-by: Michael Müller * Fix comment on assert * Update CHANGELOG.md Co-authored-by: Michael Müller * fix unreserve_all_named (#9042) * Delete legacy runtime metadata macros (#9043) * `rpc-http-threads` cli arg (#8890) * Add optional `rpc-http-threads` cli arg * Update `http::ServerBuilder`threads * allow inserting equal items into bounded map/set * refactor: only load one solution at a time This increases the database read load, because we read one solution at a time. On the other hand, it substantially decreases the overall memory load, because we _only_ read one solution at a time instead of reading all of them. * Emit `Bonded` event when rebonding (#9040) * Emit `Bonded` event when rebonding * fix borrow checker * cargo run --release --features=runtime-benchmarks --manifest-path=bin/node/cli/Cargo.toml -- benchmark --chain=dev --steps=50 --repeat=20 --pallet=pallet_staking --extrinsic=* --execution=wasm --wasm-execution=compiled --heap-pages=4096 --output=./frame/staking/src/weights.rs --template=./.maintain/frame-weight-template.hbs Co-authored-by: Parity Bot * fix tests * Revert "Merge remote-tracking branch 'origin/master' into prgn-election-provider-multi-phase-bounded-btree-set-signed-submissions" This reverts commit de92b1e8e0e44a74c24e270d02b6e8e6a2c37032, reversing changes made to dae31f2018593b60dbf1d96ec96cdc35c374bb9e. * only derive debug when std * write after check * SignedSubmissions doesn't ever modify storage until .put() This makes a true check-before-write pattern possible. * REVERT ME: demo that Drop impl doesn't work * Revert "REVERT ME: demo that Drop impl doesn't work" This reverts commit 3317a4bb4de2e77d5a7fff2154552a81ec081763. * doc note about decode_len * rename get_submission, take_submission for clarity * add test which fails for current incorrect behavior * inline fn insert_submission This fixes a tricky check-before-write error, ensuring that we really only ever modify anything if we have in fact succeeded. Co-authored-by: Roman Proskuryakov Co-authored-by: Denis Pisarev Co-authored-by: MOZGIII Co-authored-by: Alexander Theißen Co-authored-by: Shawn Tabrizi Co-authored-by: thiolliere Co-authored-by: joe petrowski <25483142+joepetrowski@users.noreply.github.com> Co-authored-by: Sebastian Müller Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> Co-authored-by: emostov <32168567+emostov@users.noreply.github.com> Co-authored-by: Andrew Jones Co-authored-by: Gavin Wood Co-authored-by: Shaun Wang Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Seun Lanlege Co-authored-by: Bastian Köcher Co-authored-by: Keith Yeung Co-authored-by: Squirrel Co-authored-by: Sergei Shulepov Co-authored-by: Ashley Co-authored-by: Parity Bot Co-authored-by: Lohann Paterno Coutinho Ferreira Co-authored-by: Alexander Popiak Co-authored-by: Boiethios Co-authored-by: Boiethios Co-authored-by: Pierre Krieger Co-authored-by: Andreas Doerr Co-authored-by: Dmitry Kashitsyn Co-authored-by: Arkadiy Paronyan Co-authored-by: cheme Co-authored-by: Andronik Ordian Co-authored-by: Xiliang Chen Co-authored-by: Gavin Wood Co-authored-by: Jakub Pánik Co-authored-by: André Silva <123550+andresilva@users.noreply.github.com> Co-authored-by: Michael Müller Co-authored-by: tgmichel * cargo run --release --features=runtime-benchmarks --manifest-path=bin/node/cli/Cargo.toml -- benchmark --chain=dev --steps=50 --repeat=20 --pallet=pallet_election_provider_multi_phase --extrinsic=* --execution=wasm --wasm-execution=compiled --heap-pages=4096 --output=./frame/election-provider-multi-phase/src/weights.rs --template=./.maintain/frame-weight-template.hbs * remove duplicate weight definitions injected by benchmark bot * check deletion overlay before getting * clarify non-conflict between delete, insert overlays * drain can be used wrong so is private * update take_submission docs * more drain improvements * more take_submission docs * debug assertion helps prove expectation is valid * doc on changing SignedMaxSubmissions * take_submission inner doc on system properties * Apply suggestions from code review Co-authored-by: Zeke Mostov <32168567+emostov@users.noreply.github.com> Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> * get SolutionOrSnapshotSize out of the loop Co-authored-by: Zeke Mostov <32168567+emostov@users.noreply.github.com> * doc which items comprise `SignedSubmissions` * add doc about index as unique identifier * Add debug assertions to prove drain worked properly Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> * replace take_submission with swap_out_submission * use a match to demonstrate all cases from signed_submissions.insert * refactor signed_submissions.insert return type * prettify test assertion Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> * improve docs Co-authored-by: Guillaume Thiolliere * add tests that finalize_signed_phase is idempotent * add some debug assertions to guard against misuse of storage * log internal logic errors instead of panicing * don't store the reward with each signed submission The signed reward base can be treated as a constant. It can in principle change, but even if it's updated in the middle of an election, it's appropriate to use the current value for the winner. * emit Rewarded, Slashed events as appropriate Makes it easier to see who won/lost with signed submissions. * update docs * use a custom enum to be explicit about the outcome of insertion * remove outdated docs Co-authored-by: Peter Goodspeed-Niklaus Co-authored-by: Parity Benchmarking Bot Co-authored-by: Peter Goodspeed-Niklaus Co-authored-by: Shawn Tabrizi Co-authored-by: Roman Proskuryakov Co-authored-by: Denis Pisarev Co-authored-by: MOZGIII Co-authored-by: Alexander Theißen Co-authored-by: thiolliere Co-authored-by: joe petrowski <25483142+joepetrowski@users.noreply.github.com> Co-authored-by: Sebastian Müller Co-authored-by: emostov <32168567+emostov@users.noreply.github.com> Co-authored-by: Andrew Jones Co-authored-by: Gavin Wood Co-authored-by: Shaun Wang Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Seun Lanlege Co-authored-by: Bastian Köcher Co-authored-by: Keith Yeung Co-authored-by: Squirrel Co-authored-by: Sergei Shulepov Co-authored-by: Ashley Co-authored-by: Lohann Paterno Coutinho Ferreira Co-authored-by: Alexander Popiak Co-authored-by: Boiethios Co-authored-by: Boiethios Co-authored-by: Pierre Krieger Co-authored-by: Andreas Doerr Co-authored-by: Dmitry Kashitsyn Co-authored-by: Arkadiy Paronyan Co-authored-by: cheme Co-authored-by: Andronik Ordian Co-authored-by: Xiliang Chen Co-authored-by: Gavin Wood Co-authored-by: Jakub Pánik Co-authored-by: André Silva <123550+andresilva@users.noreply.github.com> Co-authored-by: Michael Müller Co-authored-by: tgmichel --- bin/node/runtime/src/lib.rs | 21 +- .../src/benchmarking.rs | 105 +- .../src/helpers.rs | 8 +- .../election-provider-multi-phase/src/lib.rs | 272 +++++- .../election-provider-multi-phase/src/mock.rs | 56 +- .../src/signed.rs | 920 ++++++++++++++++++ .../src/unsigned.rs | 79 +- .../src/weights.rs | 107 +- .../support/src/storage/bounded_btree_map.rs | 64 +- .../support/src/storage/bounded_btree_set.rs | 62 +- primitives/npos-elections/compact/src/lib.rs | 2 +- 11 files changed, 1592 insertions(+), 104 deletions(-) create mode 100644 frame/election-provider-multi-phase/src/signed.rs diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index 2ce19483e5539..fd7fd4213366f 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -73,6 +73,7 @@ use pallet_session::{historical as pallet_session_historical}; use sp_inherents::{InherentData, CheckInherentsResult}; use static_assertions::const_assert; use pallet_contracts::weights::WeightInfo; +use pallet_election_provider_multi_phase::FallbackStrategy; #[cfg(any(feature = "std", test))] pub use sp_runtime::BuildStorage; @@ -516,9 +517,14 @@ parameter_types! { pub const SignedPhase: u32 = EPOCH_DURATION_IN_BLOCKS / 4; pub const UnsignedPhase: u32 = EPOCH_DURATION_IN_BLOCKS / 4; - // fallback: no need to do on-chain phragmen initially. - pub const Fallback: pallet_election_provider_multi_phase::FallbackStrategy = - pallet_election_provider_multi_phase::FallbackStrategy::Nothing; + // signed config + pub const SignedMaxSubmissions: u32 = 10; + pub const SignedRewardBase: Balance = 1 * DOLLARS; + pub const SignedDepositBase: Balance = 1 * DOLLARS; + pub const SignedDepositByte: Balance = 1 * CENTS; + + // fallback: no on-chain fallback. + pub const Fallback: FallbackStrategy = FallbackStrategy::Nothing; pub SolutionImprovementThreshold: Perbill = Perbill::from_rational(1u32, 10_000); @@ -559,6 +565,14 @@ impl pallet_election_provider_multi_phase::Config for Runtime { type MinerMaxWeight = MinerMaxWeight; type MinerMaxLength = MinerMaxLength; type MinerTxPriority = MultiPhaseUnsignedPriority; + type SignedMaxSubmissions = SignedMaxSubmissions; + type SignedRewardBase = SignedRewardBase; + type SignedDepositBase = SignedDepositBase; + type SignedDepositByte = SignedDepositByte; + type SignedDepositWeight = (); + type SignedMaxWeight = MinerMaxWeight; + type SlashHandler = (); // burn slashes + type RewardHandler = (); // nothing to do upon rewards type DataProvider = Staking; type OnChainAccuracy = Perbill; type CompactSolution = NposCompactSolution16; @@ -1556,6 +1570,7 @@ impl_runtime_apis! { add_benchmark!(params, batches, pallet_uniques, Uniques); add_benchmark!(params, batches, pallet_utility, Utility); add_benchmark!(params, batches, pallet_vesting, Vesting); + add_benchmark!(params, batches, pallet_election_provider_multi_phase, ElectionProviderMultiPhase); if batches.is_empty() { return Err("Benchmark not found for this pallet.".into()) } Ok(batches) diff --git a/frame/election-provider-multi-phase/src/benchmarking.rs b/frame/election-provider-multi-phase/src/benchmarking.rs index 4eade8e184e75..7988163e98f65 100644 --- a/frame/election-provider-multi-phase/src/benchmarking.rs +++ b/frame/election-provider-multi-phase/src/benchmarking.rs @@ -19,7 +19,7 @@ use super::*; use crate::{Pallet as MultiPhase, unsigned::IndexAssignmentOf}; -use frame_benchmarking::impl_benchmark_test_suite; +use frame_benchmarking::{account, impl_benchmark_test_suite}; use frame_support::{assert_ok, traits::OnInitialize}; use frame_system::RawOrigin; use rand::{prelude::SliceRandom, rngs::SmallRng, SeedableRng}; @@ -57,7 +57,7 @@ fn solution_with_size( let targets: Vec = (0..size.targets).map(|i| frame_benchmarking::account("Targets", i, SEED)).collect(); - let mut rng = SmallRng::seed_from_u64(SEED as u64); + let mut rng = SmallRng::seed_from_u64(SEED.into()); // decide who are the winners. let winners = targets @@ -176,6 +176,39 @@ frame_benchmarking::benchmarks! { assert!(>::current_phase().is_unsigned()); } + finalize_signed_phase_accept_solution { + let receiver = account("receiver", 0, SEED); + let initial_balance = T::Currency::minimum_balance() * 10u32.into(); + T::Currency::make_free_balance_be(&receiver, initial_balance); + let ready: ReadySolution = Default::default(); + let deposit: BalanceOf = 10u32.into(); + let reward: BalanceOf = 20u32.into(); + + assert_ok!(T::Currency::reserve(&receiver, deposit)); + assert_eq!(T::Currency::free_balance(&receiver), initial_balance - 10u32.into()); + }: { + >::finalize_signed_phase_accept_solution(ready, &receiver, deposit, reward) + } verify { + assert_eq!(T::Currency::free_balance(&receiver), initial_balance + 20u32.into()); + assert_eq!(T::Currency::reserved_balance(&receiver), 0u32.into()); + } + + finalize_signed_phase_reject_solution { + let receiver = account("receiver", 0, SEED); + let initial_balance = T::Currency::minimum_balance().max(One::one()) * 10u32.into(); + let deposit: BalanceOf = 10u32.into(); + T::Currency::make_free_balance_be(&receiver, initial_balance); + assert_ok!(T::Currency::reserve(&receiver, deposit)); + + assert_eq!(T::Currency::free_balance(&receiver), initial_balance - 10u32.into()); + assert_eq!(T::Currency::reserved_balance(&receiver), 10u32.into()); + }: { + >::finalize_signed_phase_reject_solution(&receiver, deposit) + } verify { + assert_eq!(T::Currency::free_balance(&receiver), initial_balance - 10u32.into()); + assert_eq!(T::Currency::reserved_balance(&receiver), 0u32.into()); + } + on_initialize_open_unsigned_without_snapshot { // need to assume signed phase was open before >::on_initialize_open_signed().unwrap(); @@ -227,6 +260,38 @@ frame_benchmarking::benchmarks! { assert!(>::snapshot().is_some()); } + submit { + let c in 1 .. (T::SignedMaxSubmissions::get() - 1); + + // the solution will be worse than all of them meaning the score need to be checked against + // ~ log2(c) + let solution = RawSolution { + score: [(10_000_000u128 - 1).into(), 0, 0], + ..Default::default() + }; + + MultiPhase::::on_initialize_open_signed().expect("should be ok to start signed phase"); + >::put(1); + + let mut signed_submissions = SignedSubmissions::::get(); + for i in 0..c { + let solution = RawSolution { + score: [(10_000_000 + i).into(), 0, 0], + ..Default::default() + }; + let signed_submission = SignedSubmission { solution, ..Default::default() }; + signed_submissions.insert(signed_submission); + } + signed_submissions.put(); + + let caller = frame_benchmarking::whitelisted_caller(); + T::Currency::make_free_balance_be(&caller, T::Currency::minimum_balance() * 10u32.into()); + + }: _(RawOrigin::Signed(caller), solution, c) + verify { + assert!(>::signed_submissions().len() as u32 == c + 1); + } + submit_unsigned { // number of votes in snapshot. let v in (T::BenchmarkingConfig::VOTERS[0]) .. T::BenchmarkingConfig::VOTERS[1]; @@ -234,9 +299,12 @@ frame_benchmarking::benchmarks! { let t in (T::BenchmarkingConfig::TARGETS[0]) .. T::BenchmarkingConfig::TARGETS[1]; // number of assignments, i.e. compact.len(). This means the active nominators, thus must be // a subset of `v` component. - let a in (T::BenchmarkingConfig::ACTIVE_VOTERS[0]) .. T::BenchmarkingConfig::ACTIVE_VOTERS[1]; + let a in + (T::BenchmarkingConfig::ACTIVE_VOTERS[0]) .. T::BenchmarkingConfig::ACTIVE_VOTERS[1]; // number of desired targets. Must be a subset of `t` component. - let d in (T::BenchmarkingConfig::DESIRED_TARGETS[0]) .. T::BenchmarkingConfig::DESIRED_TARGETS[1]; + let d in + (T::BenchmarkingConfig::DESIRED_TARGETS[0]) .. + T::BenchmarkingConfig::DESIRED_TARGETS[1]; let witness = SolutionOrSnapshotSize { voters: v, targets: t }; let raw_solution = solution_with_size::(witness, a, d); @@ -249,7 +317,8 @@ frame_benchmarking::benchmarks! { let encoded_call = >::submit_unsigned(raw_solution.clone(), witness).encode(); }: { assert_ok!(>::submit_unsigned(RawOrigin::None.into(), raw_solution, witness)); - let _decoded_snap = as Decode>::decode(&mut &*encoded_snapshot).unwrap(); + let _decoded_snap = as Decode>::decode(&mut &*encoded_snapshot) + .unwrap(); let _decoded_call = as Decode>::decode(&mut &*encoded_call).unwrap(); } verify { assert!(>::queued_solution().is_some()); @@ -263,13 +332,17 @@ frame_benchmarking::benchmarks! { let t in (T::BenchmarkingConfig::TARGETS[0]) .. T::BenchmarkingConfig::TARGETS[1]; // number of assignments, i.e. compact.len(). This means the active nominators, thus must be // a subset of `v` component. - let a in (T::BenchmarkingConfig::ACTIVE_VOTERS[0]) .. T::BenchmarkingConfig::ACTIVE_VOTERS[1]; + let a in + (T::BenchmarkingConfig::ACTIVE_VOTERS[0]) .. T::BenchmarkingConfig::ACTIVE_VOTERS[1]; // number of desired targets. Must be a subset of `t` component. - let d in (T::BenchmarkingConfig::DESIRED_TARGETS[0]) .. T::BenchmarkingConfig::DESIRED_TARGETS[1]; + let d in + (T::BenchmarkingConfig::DESIRED_TARGETS[0]) .. + T::BenchmarkingConfig::DESIRED_TARGETS[1]; // Subtract this percentage from the actual encoded size let f in 0 .. 95; - // Compute a random solution, then work backwards to get the lists of voters, targets, and assignments + // Compute a random solution, then work backwards to get the lists of voters, targets, and + // assignments let witness = SolutionOrSnapshotSize { voters: v, targets: t }; let RawSolution { compact, .. } = solution_with_size::(witness, a, d); let RoundSnapshot { voters, targets } = MultiPhase::::snapshot().unwrap(); @@ -313,7 +386,11 @@ frame_benchmarking::benchmarks! { } verify { let compact = CompactOf::::try_from(index_assignments.as_slice()).unwrap(); let encoding = compact.encode(); - log!(trace, "encoded size prediction = {}", encoded_size_of(index_assignments.as_slice()).unwrap()); + log!( + trace, + "encoded size prediction = {}", + encoded_size_of(index_assignments.as_slice()).unwrap(), + ); log!(trace, "actual encoded size = {}", encoding.len()); assert!(encoding.len() <= desired_size); } @@ -326,9 +403,12 @@ frame_benchmarking::benchmarks! { let t in (T::BenchmarkingConfig::TARGETS[0]) .. T::BenchmarkingConfig::TARGETS[1]; // number of assignments, i.e. compact.len(). This means the active nominators, thus must be // a subset of `v` component. - let a in (T::BenchmarkingConfig::ACTIVE_VOTERS[0]) .. T::BenchmarkingConfig::ACTIVE_VOTERS[1]; + let a in + (T::BenchmarkingConfig::ACTIVE_VOTERS[0]) .. T::BenchmarkingConfig::ACTIVE_VOTERS[1]; // number of desired targets. Must be a subset of `t` component. - let d in (T::BenchmarkingConfig::DESIRED_TARGETS[0]) .. T::BenchmarkingConfig::DESIRED_TARGETS[1]; + let d in + (T::BenchmarkingConfig::DESIRED_TARGETS[0]) .. + T::BenchmarkingConfig::DESIRED_TARGETS[1]; let size = SolutionOrSnapshotSize { voters: v, targets: t }; let raw_solution = solution_with_size::(size, a, d); @@ -340,7 +420,8 @@ frame_benchmarking::benchmarks! { let encoded_snapshot = >::snapshot().unwrap().encode(); }: { assert_ok!(>::feasibility_check(raw_solution, ElectionCompute::Unsigned)); - let _decoded_snap = as Decode>::decode(&mut &*encoded_snapshot).unwrap(); + let _decoded_snap = as Decode>::decode(&mut &*encoded_snapshot) + .unwrap(); } } diff --git a/frame/election-provider-multi-phase/src/helpers.rs b/frame/election-provider-multi-phase/src/helpers.rs index bf5b360499cb4..46eeef0a6bf73 100644 --- a/frame/election-provider-multi-phase/src/helpers.rs +++ b/frame/election-provider-multi-phase/src/helpers.rs @@ -47,13 +47,13 @@ pub fn generate_voter_cache( cache } -/// Create a function the returns the index a voter in the snapshot. +/// Create a function that returns the index of a voter in the snapshot. /// /// The returning index type is the same as the one defined in `T::CompactSolution::Voter`. /// /// ## Warning /// -/// The snapshot must be the same is the one used to create `cache`. +/// Note that this will represent the snapshot data from which the `cache` is generated. pub fn voter_index_fn( cache: &BTreeMap, ) -> impl Fn(&T::AccountId) -> Option> + '_ { @@ -78,7 +78,7 @@ pub fn voter_index_fn_owned( /// /// ## Warning /// -/// The snapshot must be the same is the one used to create `cache`. +/// Note that this will represent the snapshot data from which the `cache` is generated. pub fn voter_index_fn_usize( cache: &BTreeMap, ) -> impl Fn(&T::AccountId) -> Option + '_ { @@ -103,7 +103,7 @@ pub fn voter_index_fn_linear( } } -/// Create a function the returns the index to a target in the snapshot. +/// Create a function that returns the index of a target in the snapshot. /// /// The returned index type is the same as the one defined in `T::CompactSolution::Target`. /// diff --git a/frame/election-provider-multi-phase/src/lib.rs b/frame/election-provider-multi-phase/src/lib.rs index 2864ca518d068..45e04a757f0b3 100644 --- a/frame/election-provider-multi-phase/src/lib.rs +++ b/frame/election-provider-multi-phase/src/lib.rs @@ -231,7 +231,7 @@ use codec::{Decode, Encode}; use frame_support::{ dispatch::DispatchResultWithPostInfo, ensure, - traits::{Currency, Get, ReservableCurrency}, + traits::{Currency, Get, ReservableCurrency, OnUnbalanced}, weights::Weight, }; use frame_system::{ensure_none, offchain::SendTransactionTypes}; @@ -266,10 +266,14 @@ pub mod helpers; const LOG_TARGET: &'static str = "runtime::election-provider"; +pub mod signed; pub mod unsigned; pub mod weights; -/// The weight declaration of the pallet. +pub use signed::{ + BalanceOf, NegativeImbalanceOf, PositiveImbalanceOf, SignedSubmission, SignedSubmissionOf, + SignedSubmissions, SubmissionIndicesOf, +}; pub use weights::WeightInfo; /// The compact solution type used by this crate. @@ -411,7 +415,7 @@ impl Default for ElectionCompute { /// /// Such a solution should never become effective in anyway before being checked by the /// `Pallet::feasibility_check` -#[derive(PartialEq, Eq, Clone, Encode, Decode, RuntimeDebug)] +#[derive(PartialEq, Eq, Clone, Encode, Decode, RuntimeDebug, PartialOrd, Ord)] pub struct RawSolution { /// Compact election edges. pub compact: C, @@ -583,6 +587,44 @@ pub mod pallet { /// this value, based on [`WeightInfo::submit_unsigned`]. type MinerMaxWeight: Get; + /// Maximum number of signed submissions that can be queued. + /// + /// It is best to avoid adjusting this during an election, as it impacts downstream data + /// structures. In particular, `SignedSubmissionIndices` is bounded on this value. If you + /// update this value during an election, you _must_ ensure that + /// `SignedSubmissionIndices.len()` is less than or equal to the new value. Otherwise, + /// attempts to submit new solutions may cause a runtime panic. + #[pallet::constant] + type SignedMaxSubmissions: Get; + + /// Maximum weight of a signed solution. + /// + /// This should probably be similar to [`Config::MinerMaxWeight`]. + #[pallet::constant] + type SignedMaxWeight: Get; + + /// Base reward for a signed solution + #[pallet::constant] + type SignedRewardBase: Get>; + + /// Base deposit for a signed solution. + #[pallet::constant] + type SignedDepositBase: Get>; + + /// Per-byte deposit for a signed solution. + #[pallet::constant] + type SignedDepositByte: Get>; + + /// Per-weight deposit for a signed solution. + #[pallet::constant] + type SignedDepositWeight: Get>; + + /// Handler for the slashed deposits. + type SlashHandler: OnUnbalanced>; + + /// Handler for the rewards. + type RewardHandler: OnUnbalanced>; + /// Maximum length (bytes) that the mined solution should consume. /// /// The miner will ensure that the total length of the unsigned solution will not exceed @@ -599,6 +641,7 @@ pub mod pallet { + Eq + Clone + sp_std::fmt::Debug + + Ord + CompactSolution; /// Accuracy used for fallback on-chain election. @@ -656,11 +699,20 @@ pub mod pallet { Phase::Signed | Phase::Off if remaining <= unsigned_deadline && remaining > Zero::zero() => { - // Determine if followed by signed or not. + // our needs vary according to whether or not the unsigned phase follows a signed phase let (need_snapshot, enabled, signed_weight) = if current_phase == Phase::Signed { - // Followed by a signed phase: close the signed phase, no need for snapshot. - // TODO: proper weight https://github.com/paritytech/substrate/pull/7910. - (false, true, Weight::zero()) + // there was previously a signed phase: close the signed phase, no need for snapshot. + // + // Notes: + // + // - `Self::finalize_signed_phase()` also appears in `fn do_elect`. This is + // a guard against the case that `elect` is called prematurely. This adds + // a small amount of overhead, but that is unfortunately unavoidable. + let (_success, weight) = Self::finalize_signed_phase(); + // In the future we can consider disabling the unsigned phase if the signed + // phase completes successfully, but for now we're enabling it unconditionally + // as a defensive measure. + (false, true, weight) } else { // No signed phase: create a new snapshot, definitely `enable` the unsigned // phase. @@ -807,8 +859,12 @@ pub mod pallet { // Store the newly received solution. log!(info, "queued unsigned solution with score {:?}", ready.score); + let ejected_a_solution = >::exists(); >::put(ready); - Self::deposit_event(Event::SolutionStored(ElectionCompute::Unsigned)); + Self::deposit_event(Event::SolutionStored( + ElectionCompute::Unsigned, + ejected_a_solution, + )); Ok(None.into()) } @@ -828,6 +884,79 @@ pub mod pallet { Ok(()) } + /// Submit a solution for the signed phase. + /// + /// The dispatch origin fo this call must be __signed__. + /// + /// The solution is potentially queued, based on the claimed score and processed at the end + /// of the signed phase. + /// + /// A deposit is reserved and recorded for the solution. Based on the outcome, the solution + /// might be rewarded, slashed, or get all or a part of the deposit back. + /// + /// # + /// Queue size must be provided as witness data. + /// # + #[pallet::weight(T::WeightInfo::submit(*num_signed_submissions))] + pub fn submit( + origin: OriginFor, + solution: RawSolution>, + num_signed_submissions: u32, + ) -> DispatchResult { + let who = ensure_signed(origin)?; + + // ensure witness data is correct. + ensure!( + num_signed_submissions >= >::decode_len().unwrap_or_default() as u32, + Error::::SignedInvalidWitness, + ); + + // ensure solution is timely. + ensure!(Self::current_phase().is_signed(), Error::::PreDispatchEarlySubmission); + + // NOTE: this is the only case where having separate snapshot would have been better + // because could do just decode_len. But we can create abstractions to do this. + + // build size. Note: this is not needed for weight calc, thus not input. + // unlikely to ever return an error: if phase is signed, snapshot will exist. + let size = Self::snapshot_metadata().ok_or(Error::::MissingSnapshotMetadata)?; + + ensure!( + Self::feasibility_weight_of(&solution, size) < T::SignedMaxWeight::get(), + Error::::SignedTooMuchWeight, + ); + + // create the submission + let deposit = Self::deposit_for(&solution, size); + let submission = SignedSubmission { who: who.clone(), deposit, solution }; + + // insert the submission if the queue has space or it's better than the weakest + // eject the weakest if the queue was full + let mut signed_submissions = Self::signed_submissions(); + let maybe_removed = match signed_submissions.insert(submission) { + // it's an error if we failed to insert a submission: this indicates the queue was + // full but our solution had insufficient score to eject any solution + signed::InsertResult::NotInserted => return Err(Error::::SignedQueueFull.into()), + signed::InsertResult::Inserted => None, + signed::InsertResult::InsertedEjecting(weakest) => Some(weakest), + }; + + // collect deposit. Thereafter, the function cannot fail. + T::Currency::reserve(&who, deposit) + .map_err(|_| Error::::SignedCannotPayDeposit)?; + + let ejected_a_solution = maybe_removed.is_some(); + // if we had to remove the weakest solution, unreserve its deposit + if let Some(removed) = maybe_removed { + let _remainder = T::Currency::unreserve(&removed.who, removed.deposit); + debug_assert!(_remainder.is_zero()); + } + + signed_submissions.put(); + Self::deposit_event(Event::SolutionStored(ElectionCompute::Signed, ejected_a_solution)); + Ok(()) + } + /// Set a solution in the queue, to be handed out to the client of this pallet in the next /// call to `ElectionProvider::elect`. /// @@ -860,7 +989,9 @@ pub mod pallet { /// /// If the solution is signed, this means that it hasn't yet been processed. If the /// solution is unsigned, this means that it has also been processed. - SolutionStored(ElectionCompute), + /// + /// The `bool` is `true` when a previous solution was ejected to make room for this one. + SolutionStored(ElectionCompute, bool), /// The election has been finalized, with `Some` of the given computation, or else if the /// election failed, `None`. ElectionFinalized(Option), @@ -883,8 +1014,20 @@ pub mod pallet { PreDispatchWrongWinnerCount, /// Submission was too weak, score-wise. PreDispatchWeakSubmission, + /// The queue was full, and the solution was not better than any of the existing ones. + SignedQueueFull, + /// The origin failed to pay the deposit. + SignedCannotPayDeposit, + /// Witness data to dispatchable is invalid. + SignedInvalidWitness, + /// The signed submission consumes too much weight + SignedTooMuchWeight, /// OCW submitted solution for wrong round OcwCallWrongEra, + /// Snapshot metadata should exist but didn't. + MissingSnapshotMetadata, + /// `Self::insert_submission` returned an invalid index. + InvalidSubmissionIndex, /// The call is not allowed at this point. CallNotAllowed, } @@ -988,6 +1131,45 @@ pub mod pallet { #[pallet::getter(fn snapshot_metadata)] pub type SnapshotMetadata = StorageValue<_, SolutionOrSnapshotSize>; + // The following storage items collectively comprise `SignedSubmissions`, and should never be + // accessed independently. Instead, get `Self::signed_submissions()`, modify it as desired, and + // then do `signed_submissions.put()` when you're done with it. + + /// The next index to be assigned to an incoming signed submission. + /// + /// Every accepted submission is assigned a unique index; that index is bound to that particular + /// submission for the duration of the election. On election finalization, the next index is + /// reset to 0. + /// + /// We can't just use `SignedSubmissionIndices.len()`, because that's a bounded set; past its + /// capacity, it will simply saturate. We can't just iterate over `SignedSubmissionsMap`, + /// because iteration is slow. Instead, we store the value here. + #[pallet::storage] + pub(crate) type SignedSubmissionNextIndex = StorageValue<_, u32, ValueQuery>; + + /// A sorted, bounded set of `(score, index)`, where each `index` points to a value in + /// `SignedSubmissions`. + /// + /// We never need to process more than a single signed submission at a time. Signed submissions + /// can be quite large, so we're willing to pay the cost of multiple database accesses to access + /// them one at a time instead of reading and decoding all of them at once. + #[pallet::storage] + pub(crate) type SignedSubmissionIndices = + StorageValue<_, SubmissionIndicesOf, ValueQuery>; + + /// Unchecked, signed solutions. + /// + /// Together with `SubmissionIndices`, this stores a bounded set of `SignedSubmissions` while + /// allowing us to keep only a single one in memory at a time. + /// + /// Twox note: the key of the map is an auto-incrementing index which users cannot inspect or + /// affect; we shouldn't need a cryptographically secure hasher. + #[pallet::storage] + pub(crate) type SignedSubmissionsMap = + StorageMap<_, Twox64Concat, u32, SignedSubmissionOf, ValueQuery>; + + // `SignedSubmissions` items end here. + /// The minimum score that each 'untrusted' solution must attain in order to be considered /// feasible. /// @@ -1223,7 +1405,7 @@ impl Pallet { /// 3. Clear all snapshot data. fn rotate_round() { // Inc round. - >::mutate(|r| *r = *r + 1); + >::mutate(|r| *r += 1); // Phase is off now. >::put(Phase::Off); @@ -1242,6 +1424,13 @@ impl Pallet { } fn do_elect() -> Result<(Supports, Weight), ElectionError> { + // We have to unconditionally try finalizing the signed phase here. There are only two + // possibilities: + // + // - signed phase was open, in which case this is essential for correct functioning of the system + // - signed phase was complete or not started, in which case finalization is idempotent and + // inexpensive (1 read of an empty vector). + let (_, signed_finalize_weight) = Self::finalize_signed_phase(); >::take() .map_or_else( || match T::Fallback::get() { @@ -1261,7 +1450,7 @@ impl Pallet { if Self::round() != 1 { log!(info, "Finalized election round with compute {:?}.", compute); } - (supports, weight) + (supports, weight.saturating_add(signed_finalize_weight)) }) .map_err(|err| { Self::deposit_event(Event::ElectionFinalized(None)); @@ -1309,7 +1498,14 @@ mod feasibility_check { //! more. The best way to audit and review these tests is to try and come up with a solution //! that is invalid, but gets through the system as valid. - use super::{mock::*, *}; + use super::*; + use crate::{ + mock::{ + MultiPhase, Runtime, roll_to, TargetIndex, raw_solution, EpochLength, UnsignedPhase, + SignedPhase, VoterIndex, ExtBuilder, + }, + }; + use frame_support::assert_noop; const COMPUTE: ElectionCompute = ElectionCompute::OnChain; @@ -1476,16 +1672,24 @@ mod feasibility_check { #[cfg(test)] mod tests { - use super::{mock::*, Event, *}; + use super::*; + use crate::{ + Phase, + mock::{ + ExtBuilder, MultiPhase, Runtime, roll_to, MockWeightInfo, AccountId, TargetIndex, + Targets, multi_phase_events, System, SignedMaxSubmissions, + }, + }; use frame_election_provider_support::ElectionProvider; + use frame_support::{assert_noop, assert_ok}; use sp_npos_elections::Support; #[test] fn phase_rotation_works() { ExtBuilder::default().build_and_execute(|| { // 0 ------- 15 ------- 25 ------- 30 ------- ------- 45 ------- 55 ------- 60 - // | | | | - // Signed Unsigned Signed Unsigned + // | | | | | | + // Signed Unsigned Elect Signed Unsigned Elect assert_eq!(System::block_number(), 0); assert_eq!(MultiPhase::current_phase(), Phase::Off); @@ -1644,6 +1848,44 @@ mod tests { assert!(MultiPhase::snapshot_metadata().is_none()); assert!(MultiPhase::desired_targets().is_none()); assert!(MultiPhase::queued_solution().is_none()); + assert!(MultiPhase::signed_submissions().is_empty()); + }) + } + + #[test] + fn early_termination_with_submissions() { + // an early termination in the signed phase, with no queued solution. + ExtBuilder::default().build_and_execute(|| { + // signed phase started at block 15 and will end at 25. + roll_to(14); + assert_eq!(MultiPhase::current_phase(), Phase::Off); + + roll_to(15); + assert_eq!(multi_phase_events(), vec![Event::SignedPhaseStarted(1)]); + assert_eq!(MultiPhase::current_phase(), Phase::Signed); + assert_eq!(MultiPhase::round(), 1); + + // fill the queue with signed submissions + for s in 0..SignedMaxSubmissions::get() { + let solution = RawSolution { score: [(5 + s).into(), 0, 0], ..Default::default() }; + assert_ok!(MultiPhase::submit( + crate::mock::Origin::signed(99), + solution, + MultiPhase::signed_submissions().len() as u32 + )); + } + + // an unexpected call to elect. + roll_to(20); + assert!(MultiPhase::elect().is_ok()); + + // all storage items must be cleared. + assert_eq!(MultiPhase::round(), 2); + assert!(MultiPhase::snapshot().is_none()); + assert!(MultiPhase::snapshot_metadata().is_none()); + assert!(MultiPhase::desired_targets().is_none()); + assert!(MultiPhase::queued_solution().is_none()); + assert!(MultiPhase::signed_submissions().is_empty()); }) } diff --git a/frame/election-provider-multi-phase/src/mock.rs b/frame/election-provider-multi-phase/src/mock.rs index bd035aaf82969..8840e2b935d35 100644 --- a/frame/election-provider-multi-phase/src/mock.rs +++ b/frame/election-provider-multi-phase/src/mock.rs @@ -260,8 +260,13 @@ parameter_types! { pub static DesiredTargets: u32 = 2; pub static SignedPhase: u64 = 10; pub static UnsignedPhase: u64 = 5; - pub static MaxSignedSubmissions: u32 = 5; - + pub static SignedMaxSubmissions: u32 = 5; + pub static SignedDepositBase: Balance = 5; + pub static SignedDepositByte: Balance = 0; + pub static SignedDepositWeight: Balance = 0; + pub static SignedRewardBase: Balance = 7; + pub static SignedRewardMax: Balance = 10; + pub static SignedMaxWeight: Weight = BlockWeights::get().max_block; pub static MinerMaxIterations: u32 = 5; pub static MinerTxPriority: u64 = 100; pub static SolutionImprovementThreshold: Perbill = Perbill::zero(); @@ -304,6 +309,27 @@ impl multi_phase::weights::WeightInfo for DualMockWeightInfo { <() as multi_phase::weights::WeightInfo>::on_initialize_open_unsigned_without_snapshot() } } + fn finalize_signed_phase_accept_solution() -> Weight { + if MockWeightInfo::get() { + Zero::zero() + } else { + <() as multi_phase::weights::WeightInfo>::finalize_signed_phase_accept_solution() + } + } + fn finalize_signed_phase_reject_solution() -> Weight { + if MockWeightInfo::get() { + Zero::zero() + } else { + <() as multi_phase::weights::WeightInfo>::finalize_signed_phase_reject_solution() + } + } + fn submit(c: u32) -> Weight { + if MockWeightInfo::get() { + Zero::zero() + } else { + <() as multi_phase::weights::WeightInfo>::submit(c) + } + } fn elect_queued() -> Weight { if MockWeightInfo::get() { Zero::zero() @@ -342,6 +368,14 @@ impl crate::Config for Runtime { type MinerMaxWeight = MinerMaxWeight; type MinerMaxLength = MinerMaxLength; type MinerTxPriority = MinerTxPriority; + type SignedRewardBase = SignedRewardBase; + type SignedDepositBase = SignedDepositBase; + type SignedDepositByte = (); + type SignedDepositWeight = (); + type SignedMaxWeight = SignedMaxWeight; + type SignedMaxSubmissions = SignedMaxSubmissions; + type SlashHandler = (); + type RewardHandler = (); type DataProvider = StakingMock; type WeightInfo = DualMockWeightInfo; type BenchmarkingConfig = (); @@ -440,6 +474,20 @@ impl ExtBuilder { VOTERS.with(|v| v.borrow_mut().push((who, stake, targets))); self } + pub fn signed_max_submission(self, count: u32) -> Self { + ::set(count); + self + } + pub fn signed_deposit(self, base: u64, byte: u64, weight: u64) -> Self { + ::set(base); + ::set(byte); + ::set(weight); + self + } + pub fn signed_weight(self, weight: Weight) -> Self { + ::set(weight); + self + } pub fn build(self) -> sp_io::TestExternalities { sp_tracing::try_init_simple(); let mut storage = @@ -481,3 +529,7 @@ impl ExtBuilder { self.build().execute_with(test) } } + +pub(crate) fn balances(who: &u64) -> (u64, u64) { + (Balances::free_balance(who), Balances::reserved_balance(who)) +} diff --git a/frame/election-provider-multi-phase/src/signed.rs b/frame/election-provider-multi-phase/src/signed.rs new file mode 100644 index 0000000000000..ba1123c1331ad --- /dev/null +++ b/frame/election-provider-multi-phase/src/signed.rs @@ -0,0 +1,920 @@ +// This file is part of Substrate. + +// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! The signed phase implementation. + +use crate::{ + CompactOf, Config, ElectionCompute, Pallet, RawSolution, ReadySolution, SolutionOrSnapshotSize, + Weight, WeightInfo, QueuedSolution, SignedSubmissionsMap, SignedSubmissionIndices, + SignedSubmissionNextIndex, +}; +use codec::{Encode, Decode, HasCompact}; +use frame_support::{ + storage::bounded_btree_map::BoundedBTreeMap, + traits::{Currency, Get, OnUnbalanced, ReservableCurrency}, + DebugNoBound, +}; +use sp_arithmetic::traits::SaturatedConversion; +use sp_npos_elections::{is_score_better, CompactSolution, ElectionScore}; +use sp_runtime::{ + RuntimeDebug, + traits::{Saturating, Zero}, +}; +use sp_std::{ + cmp::Ordering, + collections::{btree_map::BTreeMap, btree_set::BTreeSet}, + ops::Deref, +}; + +/// A raw, unchecked signed submission. +/// +/// This is just a wrapper around [`RawSolution`] and some additional info. +#[derive(PartialEq, Eq, Clone, Encode, Decode, RuntimeDebug, Default)] +pub struct SignedSubmission { + /// Who submitted this solution. + pub who: AccountId, + /// The deposit reserved for storing this solution. + pub deposit: Balance, + /// The raw solution itself. + pub solution: RawSolution, +} + +impl Ord + for SignedSubmission +where + AccountId: Ord, + Balance: Ord + HasCompact, + CompactSolution: Ord, + RawSolution: Ord, +{ + fn cmp(&self, other: &Self) -> Ordering { + self.solution + .score + .cmp(&other.solution.score) + .then_with(|| self.solution.cmp(&other.solution)) + .then_with(|| self.deposit.cmp(&other.deposit)) + .then_with(|| self.who.cmp(&other.who)) + } +} + +impl PartialOrd + for SignedSubmission +where + AccountId: Ord, + Balance: Ord + HasCompact, + CompactSolution: Ord, + RawSolution: Ord, +{ + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +pub type BalanceOf = + <::Currency as Currency<::AccountId>>::Balance; +pub type PositiveImbalanceOf = <::Currency as Currency< + ::AccountId, +>>::PositiveImbalance; +pub type NegativeImbalanceOf = <::Currency as Currency< + ::AccountId, +>>::NegativeImbalance; +pub type SignedSubmissionOf = + SignedSubmission<::AccountId, BalanceOf, CompactOf>; + +pub type SubmissionIndicesOf = + BoundedBTreeMap::SignedMaxSubmissions>; + +/// Outcome of [`SignedSubmissions::insert`]. +pub enum InsertResult { + /// The submission was not inserted because the queue was full and the submission had + /// insufficient score to eject a prior solution from the queue. + NotInserted, + /// The submission was inserted successfully without ejecting a solution. + Inserted, + /// The submission was inserted successfully. As the queue was full, this operation ejected a + /// prior solution, contained in this variant. + InsertedEjecting(SignedSubmissionOf), +} + +/// Mask type which pretends to be a set of `SignedSubmissionOf`, while in fact delegating to the +/// actual implementations in `SignedSubmissionIndices`, `SignedSubmissionsMap`, and +/// `SignedSubmissionNextIndex`. +#[cfg_attr(feature = "std", derive(DebugNoBound))] +pub struct SignedSubmissions { + indices: SubmissionIndicesOf, + next_idx: u32, + insertion_overlay: BTreeMap>, + deletion_overlay: BTreeSet, +} + +impl SignedSubmissions { + /// Get the signed submissions from storage. + pub fn get() -> Self { + let submissions = SignedSubmissions { + indices: SignedSubmissionIndices::::get(), + next_idx: SignedSubmissionNextIndex::::get(), + insertion_overlay: BTreeMap::new(), + deletion_overlay: BTreeSet::new(), + }; + // validate that the stored state is sane + debug_assert!(submissions.indices.values().copied().max().map_or( + true, + |max_idx| submissions.next_idx > max_idx, + )); + submissions + } + + /// Put the signed submissions back into storage. + pub fn put(mut self) { + // validate that we're going to write only sane things to storage + debug_assert!(self.insertion_overlay.keys().copied().max().map_or( + true, + |max_idx| self.next_idx > max_idx, + )); + debug_assert!(self.indices.values().copied().max().map_or( + true, + |max_idx| self.next_idx > max_idx, + )); + + SignedSubmissionIndices::::put(self.indices); + SignedSubmissionNextIndex::::put(self.next_idx); + for key in self.deletion_overlay { + self.insertion_overlay.remove(&key); + SignedSubmissionsMap::::remove(key); + } + for (key, value) in self.insertion_overlay { + SignedSubmissionsMap::::insert(key, value); + } + } + + /// Get the submission at a particular index. + fn get_submission(&self, idx: u32) -> Option> { + if self.deletion_overlay.contains(&idx) { + // Note: can't actually remove the item from the insertion overlay (if present) + // because we don't want to use `&mut self` here. There may be some kind of + // `RefCell` optimization possible here in the future. + None + } else { + self.insertion_overlay + .get(&idx) + .cloned() + .or_else(|| SignedSubmissionsMap::::try_get(idx).ok()) + } + } + + /// Perform three operations: + /// + /// - Remove a submission (identified by score) + /// - Insert a new submission (identified by score and insertion index) + /// - Return the submission which was removed. + /// + /// Note: in the case that `weakest_score` is not present in `self.indices`, this will return + /// `None` without inserting the new submission and without further notice. + /// + /// Note: this does not enforce any ordering relation between the submission removed and that + /// inserted. + /// + /// Note: this doesn't insert into `insertion_overlay`, the optional new insertion must be + /// inserted into `insertion_overlay` to keep the variable `self` in a valid state. + fn swap_out_submission( + &mut self, + remove_score: ElectionScore, + insert: Option<(ElectionScore, u32)>, + ) -> Option> { + let remove_idx = self.indices.remove(&remove_score)?; + if let Some((insert_score, insert_idx)) = insert { + self.indices + .try_insert(insert_score, insert_idx) + .expect("just removed an item, we must be under capacity; qed"); + } + + self.insertion_overlay.remove(&remove_idx).or_else(|| { + (!self.deletion_overlay.contains(&remove_idx)).then(|| { + self.deletion_overlay.insert(remove_idx); + SignedSubmissionsMap::::try_get(remove_idx).ok() + }).flatten() + }) + } + + /// Iterate through the set of signed submissions in order of increasing score. + pub fn iter(&self) -> impl '_ + Iterator> { + self.indices.iter().filter_map(move |(_score, &idx)| { + let maybe_submission = self.get_submission(idx); + if maybe_submission.is_none() { + log!( + error, + "SignedSubmissions internal state is invalid (idx {}); \ + there is a logic error in code handling signed solution submissions", + idx, + ) + } + maybe_submission + }) + } + + /// Empty the set of signed submissions, returning an iterator of signed submissions in + /// arbitrary order. + /// + /// Note that if the iterator is dropped without consuming all elements, not all may be removed + /// from the underlying `SignedSubmissionsMap`, putting the storages into an invalid state. + /// + /// Note that, like `put`, this function consumes `Self` and modifies storage. + fn drain(mut self) -> impl Iterator> { + SignedSubmissionIndices::::kill(); + SignedSubmissionNextIndex::::kill(); + let insertion_overlay = sp_std::mem::take(&mut self.insertion_overlay); + SignedSubmissionsMap::::drain() + .filter(move |(k, _v)| !self.deletion_overlay.contains(k)) + .map(|(_k, v)| v) + .chain(insertion_overlay.into_iter().map(|(_k, v)| v)) + } + + /// Decode the length of the signed submissions without actually reading the entire struct into + /// memory. + /// + /// Note that if you hold an instance of `SignedSubmissions`, this function does _not_ + /// track its current length. This only decodes what is currently stored in memory. + pub fn decode_len() -> Option { + SignedSubmissionIndices::::decode_len() + } + + /// Insert a new signed submission into the set. + /// + /// In the event that the new submission is not better than the current weakest according + /// to `is_score_better`, we do not change anything. + pub fn insert( + &mut self, + submission: SignedSubmissionOf, + ) -> InsertResult { + // verify the expectation that we never reuse an index + debug_assert!(!self.indices.values().any(|&idx| idx == self.next_idx)); + + let weakest = match self.indices.try_insert(submission.solution.score, self.next_idx) { + Ok(Some(prev_idx)) => { + // a submission of equal score was already present in the set; + // no point editing the actual backing map as we know that the newer solution can't + // be better than the old. However, we do need to put the old value back. + self.indices + .try_insert(submission.solution.score, prev_idx) + .expect("didn't change the map size; qed"); + return InsertResult::NotInserted; + } + Ok(None) => { + // successfully inserted into the set; no need to take out weakest member + None + } + Err((insert_score, insert_idx)) => { + // could not insert into the set because it is full. + // note that we short-circuit return here in case the iteration produces `None`. + // If there wasn't a weakest entry to remove, then there must be a capacity of 0, + // which means that we can't meaningfully proceed. + let weakest_score = match self.indices.iter().next() { + None => return InsertResult::NotInserted, + Some((score, _)) => *score, + }; + let threshold = T::SolutionImprovementThreshold::get(); + + // if we haven't improved on the weakest score, don't change anything. + if !is_score_better(insert_score, weakest_score, threshold) { + return InsertResult::NotInserted; + } + + self.swap_out_submission(weakest_score, Some((insert_score, insert_idx))) + } + }; + + // we've taken out the weakest, so update the storage map and the next index + debug_assert!(!self.insertion_overlay.contains_key(&self.next_idx)); + self.insertion_overlay.insert(self.next_idx, submission); + debug_assert!(!self.deletion_overlay.contains(&self.next_idx)); + self.next_idx += 1; + match weakest { + Some(weakest) => InsertResult::InsertedEjecting(weakest), + None => InsertResult::Inserted, + } + } + + /// Remove the signed submission with the highest score from the set. + pub fn pop_last(&mut self) -> Option> { + let (score, _) = self.indices.iter().rev().next()?; + // deref in advance to prevent mutable-immutable borrow conflict + let score = *score; + self.swap_out_submission(score, None) + } +} + +impl Deref for SignedSubmissions { + type Target = SubmissionIndicesOf; + + fn deref(&self) -> &Self::Target { + &self.indices + } +} + +impl Pallet { + /// `Self` accessor for `SignedSubmission`. + pub fn signed_submissions() -> SignedSubmissions { + SignedSubmissions::::get() + } + + /// Finish the signed phase. Process the signed submissions from best to worse until a valid one + /// is found, rewarding the best one and slashing the invalid ones along the way. + /// + /// Returns true if we have a good solution in the signed phase. + /// + /// This drains the [`SignedSubmissions`], potentially storing the best valid one in + /// [`QueuedSolution`]. + pub fn finalize_signed_phase() -> (bool, Weight) { + let mut all_submissions = Self::signed_submissions(); + let mut found_solution = false; + let mut weight = T::DbWeight::get().reads(1); + + let SolutionOrSnapshotSize { voters, targets } = + Self::snapshot_metadata().unwrap_or_default(); + + let reward = T::SignedRewardBase::get(); + + while let Some(best) = all_submissions.pop_last() { + let SignedSubmission { solution, who, deposit} = best; + let active_voters = solution.compact.voter_count() as u32; + let feasibility_weight = { + // defensive only: at the end of signed phase, snapshot will exits. + let desired_targets = Self::desired_targets().unwrap_or_default(); + T::WeightInfo::feasibility_check( + voters, + targets, + active_voters, + desired_targets, + ) + }; + // the feasibility check itself has some weight + weight = weight.saturating_add(feasibility_weight); + match Self::feasibility_check(solution, ElectionCompute::Signed) { + Ok(ready_solution) => { + Self::finalize_signed_phase_accept_solution( + ready_solution, + &who, + deposit, + reward, + ); + found_solution = true; + + weight = weight + .saturating_add(T::WeightInfo::finalize_signed_phase_accept_solution()); + break; + } + Err(_) => { + Self::finalize_signed_phase_reject_solution(&who, deposit); + weight = weight + .saturating_add(T::WeightInfo::finalize_signed_phase_reject_solution()); + } + } + } + + // Any unprocessed solution is pointless to even consider. Feasible or malicious, + // they didn't end up being used. Unreserve the bonds. + let discarded = all_submissions.len(); + for SignedSubmission { who, deposit, .. } in all_submissions.drain() { + let _remaining = T::Currency::unreserve(&who, deposit); + weight = weight.saturating_add(T::DbWeight::get().writes(1)); + debug_assert!(_remaining.is_zero()); + } + + debug_assert!(!SignedSubmissionIndices::::exists()); + debug_assert!(!SignedSubmissionNextIndex::::exists()); + debug_assert!(SignedSubmissionsMap::::iter().next().is_none()); + + log!(debug, "closed signed phase, found solution? {}, discarded {}", found_solution, discarded); + (found_solution, weight) + } + + /// Helper function for the case where a solution is accepted in the signed phase. + /// + /// Extracted to facilitate with weight calculation. + /// + /// Infallible + pub fn finalize_signed_phase_accept_solution( + ready_solution: ReadySolution, + who: &T::AccountId, + deposit: BalanceOf, + reward: BalanceOf, + ) { + // write this ready solution. + >::put(ready_solution); + + // emit reward event + Self::deposit_event(crate::Event::Rewarded(who.clone())); + + // unreserve deposit. + let _remaining = T::Currency::unreserve(who, deposit); + debug_assert!(_remaining.is_zero()); + + // Reward. + let positive_imbalance = T::Currency::deposit_creating(who, reward); + T::RewardHandler::on_unbalanced(positive_imbalance); + } + + /// Helper function for the case where a solution is accepted in the rejected phase. + /// + /// Extracted to facilitate with weight calculation. + /// + /// Infallible + pub fn finalize_signed_phase_reject_solution(who: &T::AccountId, deposit: BalanceOf) { + Self::deposit_event(crate::Event::Slashed(who.clone())); + let (negative_imbalance, _remaining) = T::Currency::slash_reserved(who, deposit); + debug_assert!(_remaining.is_zero()); + T::SlashHandler::on_unbalanced(negative_imbalance); + } + + /// The feasibility weight of the given raw solution. + pub fn feasibility_weight_of( + solution: &RawSolution>, + size: SolutionOrSnapshotSize, + ) -> Weight { + T::WeightInfo::feasibility_check( + size.voters, + size.targets, + solution.compact.voter_count() as u32, + solution.compact.unique_targets().len() as u32, + ) + } + + /// Collect a sufficient deposit to store this solution. + /// + /// The deposit is composed of 3 main elements: + /// + /// 1. base deposit, fixed for all submissions. + /// 2. a per-byte deposit, for renting the state usage. + /// 3. a per-weight deposit, for the potential weight usage in an upcoming on_initialize + pub fn deposit_for( + solution: &RawSolution>, + size: SolutionOrSnapshotSize, + ) -> BalanceOf { + let encoded_len: u32 = solution.encoded_size().saturated_into(); + let encoded_len: BalanceOf = encoded_len.into(); + let feasibility_weight = Self::feasibility_weight_of(solution, size); + + let len_deposit = T::SignedDepositByte::get().saturating_mul(encoded_len); + let weight_deposit = T::SignedDepositWeight::get().saturating_mul(feasibility_weight.saturated_into()); + + T::SignedDepositBase::get().saturating_add(len_deposit).saturating_add(weight_deposit) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::{ + Phase, Error, + mock::{ + balances, ExtBuilder, MultiPhase, Origin, raw_solution, roll_to, Runtime, + SignedMaxSubmissions, SignedMaxWeight, + }, + }; + use frame_support::{dispatch::DispatchResult, assert_noop, assert_storage_noop, assert_ok}; + + fn submit_with_witness( + origin: Origin, + solution: RawSolution>, + ) -> DispatchResult { + MultiPhase::submit(origin, solution, MultiPhase::signed_submissions().len() as u32) + } + + #[test] + fn cannot_submit_too_early() { + ExtBuilder::default().build_and_execute(|| { + roll_to(2); + assert_eq!(MultiPhase::current_phase(), Phase::Off); + + // create a temp snapshot only for this test. + MultiPhase::create_snapshot().unwrap(); + let solution = raw_solution(); + + assert_noop!( + submit_with_witness(Origin::signed(10), solution), + Error::::PreDispatchEarlySubmission, + ); + }) + } + + #[test] + fn wrong_witness_fails() { + ExtBuilder::default().build_and_execute(|| { + roll_to(15); + assert!(MultiPhase::current_phase().is_signed()); + + let solution = raw_solution(); + // submit this once correctly + assert_ok!(submit_with_witness(Origin::signed(99), solution.clone())); + assert_eq!(MultiPhase::signed_submissions().len(), 1); + + // now try and cheat by passing a lower queue length + assert_noop!( + MultiPhase::submit(Origin::signed(99), solution, 0), + Error::::SignedInvalidWitness, + ); + }) + } + + #[test] + fn should_pay_deposit() { + ExtBuilder::default().build_and_execute(|| { + roll_to(15); + assert!(MultiPhase::current_phase().is_signed()); + + let solution = raw_solution(); + assert_eq!(balances(&99), (100, 0)); + + assert_ok!(submit_with_witness(Origin::signed(99), solution)); + + assert_eq!(balances(&99), (95, 5)); + assert_eq!(MultiPhase::signed_submissions().iter().next().unwrap().deposit, 5); + }) + } + + #[test] + fn good_solution_is_rewarded() { + ExtBuilder::default().build_and_execute(|| { + roll_to(15); + assert!(MultiPhase::current_phase().is_signed()); + + let solution = raw_solution(); + assert_eq!(balances(&99), (100, 0)); + + assert_ok!(submit_with_witness(Origin::signed(99), solution)); + assert_eq!(balances(&99), (95, 5)); + + assert!(MultiPhase::finalize_signed_phase().0); + assert_eq!(balances(&99), (100 + 7, 0)); + }) + } + + #[test] + fn bad_solution_is_slashed() { + ExtBuilder::default().build_and_execute(|| { + roll_to(15); + assert!(MultiPhase::current_phase().is_signed()); + + let mut solution = raw_solution(); + assert_eq!(balances(&99), (100, 0)); + + // make the solution invalid. + solution.score[0] += 1; + + assert_ok!(submit_with_witness(Origin::signed(99), solution)); + assert_eq!(balances(&99), (95, 5)); + + // no good solution was stored. + assert!(!MultiPhase::finalize_signed_phase().0); + // and the bond is gone. + assert_eq!(balances(&99), (95, 0)); + }) + } + + #[test] + fn suppressed_solution_gets_bond_back() { + ExtBuilder::default().build_and_execute(|| { + roll_to(15); + assert!(MultiPhase::current_phase().is_signed()); + + let mut solution = raw_solution(); + assert_eq!(balances(&99), (100, 0)); + assert_eq!(balances(&999), (100, 0)); + + // submit as correct. + assert_ok!(submit_with_witness(Origin::signed(99), solution.clone())); + + // make the solution invalid and weaker. + solution.score[0] -= 1; + assert_ok!(submit_with_witness(Origin::signed(999), solution)); + assert_eq!(balances(&99), (95, 5)); + assert_eq!(balances(&999), (95, 5)); + + // _some_ good solution was stored. + assert!(MultiPhase::finalize_signed_phase().0); + + // 99 is rewarded. + assert_eq!(balances(&99), (100 + 7, 0)); + // 999 gets everything back. + assert_eq!(balances(&999), (100, 0)); + }) + } + + #[test] + fn cannot_submit_worse_with_full_queue() { + ExtBuilder::default().build_and_execute(|| { + roll_to(15); + assert!(MultiPhase::current_phase().is_signed()); + + for s in 0..SignedMaxSubmissions::get() { + // score is always getting better + let solution = RawSolution { score: [(5 + s).into(), 0, 0], ..Default::default() }; + assert_ok!(submit_with_witness(Origin::signed(99), solution)); + } + + + // weaker. + let solution = RawSolution { score: [4, 0, 0], ..Default::default() }; + + assert_noop!( + submit_with_witness(Origin::signed(99), solution), + Error::::SignedQueueFull, + ); + }) + } + + #[test] + fn weakest_is_removed_if_better_provided() { + ExtBuilder::default().build_and_execute(|| { + roll_to(15); + assert!(MultiPhase::current_phase().is_signed()); + + for s in 0..SignedMaxSubmissions::get() { + // score is always getting better + let solution = RawSolution { score: [(5 + s).into(), 0, 0], ..Default::default() }; + assert_ok!(submit_with_witness(Origin::signed(99), solution)); + } + + assert_eq!( + MultiPhase::signed_submissions() + .iter() + .map(|s| s.solution.score[0]) + .collect::>(), + vec![5, 6, 7, 8, 9] + ); + + // better. + let solution = RawSolution { score: [20, 0, 0], ..Default::default() }; + assert_ok!(submit_with_witness(Origin::signed(99), solution)); + + // the one with score 5 was rejected, the new one inserted. + assert_eq!( + MultiPhase::signed_submissions() + .iter() + .map(|s| s.solution.score[0]) + .collect::>(), + vec![6, 7, 8, 9, 20] + ); + }) + } + + #[test] + fn replace_weakest_works() { + ExtBuilder::default().build_and_execute(|| { + roll_to(15); + assert!(MultiPhase::current_phase().is_signed()); + + for s in 1..SignedMaxSubmissions::get() { + // score is always getting better + let solution = RawSolution { score: [(5 + s).into(), 0, 0], ..Default::default() }; + assert_ok!(submit_with_witness(Origin::signed(99), solution)); + } + + let solution = RawSolution { score: [4, 0, 0], ..Default::default() }; + assert_ok!(submit_with_witness(Origin::signed(99), solution)); + + assert_eq!( + MultiPhase::signed_submissions() + .iter() + .map(|s| s.solution.score[0]) + .collect::>(), + vec![4, 6, 7, 8, 9], + ); + + // better. + let solution = RawSolution { score: [5, 0, 0], ..Default::default() }; + assert_ok!(submit_with_witness(Origin::signed(99), solution)); + + // the one with score 5 was rejected, the new one inserted. + assert_eq!( + MultiPhase::signed_submissions() + .iter() + .map(|s| s.solution.score[0]) + .collect::>(), + vec![5, 6, 7, 8, 9], + ); + }) + } + + #[test] + fn early_ejected_solution_gets_bond_back() { + ExtBuilder::default().signed_deposit(2, 0, 0).build_and_execute(|| { + roll_to(15); + assert!(MultiPhase::current_phase().is_signed()); + + for s in 0..SignedMaxSubmissions::get() { + // score is always getting better + let solution = RawSolution { score: [(5 + s).into(), 0, 0], ..Default::default() }; + assert_ok!(submit_with_witness(Origin::signed(99), solution)); + } + + assert_eq!(balances(&99).1, 2 * 5); + assert_eq!(balances(&999).1, 0); + + // better. + let solution = RawSolution { score: [20, 0, 0], ..Default::default() }; + assert_ok!(submit_with_witness(Origin::signed(999), solution)); + + // got one bond back. + assert_eq!(balances(&99).1, 2 * 4); + assert_eq!(balances(&999).1, 2); + }) + } + + #[test] + fn equally_good_solution_is_not_accepted() { + ExtBuilder::default().signed_max_submission(3).build_and_execute(|| { + roll_to(15); + assert!(MultiPhase::current_phase().is_signed()); + + for i in 0..SignedMaxSubmissions::get() { + let solution = RawSolution { score: [(5 + i).into(), 0, 0], ..Default::default() }; + assert_ok!(submit_with_witness(Origin::signed(99), solution)); + } + assert_eq!( + MultiPhase::signed_submissions() + .iter() + .map(|s| s.solution.score[0]) + .collect::>(), + vec![5, 6, 7] + ); + + // 5 is not accepted. This will only cause processing with no benefit. + let solution = RawSolution { score: [5, 0, 0], ..Default::default() }; + assert_noop!( + submit_with_witness(Origin::signed(99), solution), + Error::::SignedQueueFull, + ); + }) + } + + #[test] + fn all_in_one_signed_submission_scenario() { + // a combination of: + // - good_solution_is_rewarded + // - bad_solution_is_slashed + // - suppressed_solution_gets_bond_back + ExtBuilder::default().build_and_execute(|| { + roll_to(15); + assert!(MultiPhase::current_phase().is_signed()); + + assert_eq!(balances(&99), (100, 0)); + assert_eq!(balances(&999), (100, 0)); + assert_eq!(balances(&9999), (100, 0)); + let solution = raw_solution(); + + // submit a correct one. + assert_ok!(submit_with_witness(Origin::signed(99), solution.clone())); + + // make the solution invalidly better and submit. This ought to be slashed. + let mut solution_999 = solution.clone(); + solution_999.score[0] += 1; + assert_ok!(submit_with_witness(Origin::signed(999), solution_999)); + + // make the solution invalidly worse and submit. This ought to be suppressed and + // returned. + let mut solution_9999 = solution.clone(); + solution_9999.score[0] -= 1; + assert_ok!(submit_with_witness(Origin::signed(9999), solution_9999)); + + assert_eq!( + MultiPhase::signed_submissions().iter().map(|x| x.who).collect::>(), + vec![9999, 99, 999] + ); + + // _some_ good solution was stored. + assert!(MultiPhase::finalize_signed_phase().0); + + // 99 is rewarded. + assert_eq!(balances(&99), (100 + 7, 0)); + // 999 is slashed. + assert_eq!(balances(&999), (95, 0)); + // 9999 gets everything back. + assert_eq!(balances(&9999), (100, 0)); + }) + } + + #[test] + fn cannot_consume_too_much_future_weight() { + ExtBuilder::default().signed_weight(40).mock_weight_info(true).build_and_execute(|| { + roll_to(15); + assert!(MultiPhase::current_phase().is_signed()); + + let (solution, witness) = MultiPhase::mine_solution(2).unwrap(); + let solution_weight = ::WeightInfo::feasibility_check( + witness.voters, + witness.targets, + solution.compact.voter_count() as u32, + solution.compact.unique_targets().len() as u32, + ); + // default solution will have 5 edges (5 * 5 + 10) + assert_eq!(solution_weight, 35); + assert_eq!(solution.compact.voter_count(), 5); + assert_eq!(::SignedMaxWeight::get(), 40); + + assert_ok!(submit_with_witness(Origin::signed(99), solution.clone())); + + ::set(30); + + // note: resubmitting the same solution is technically okay as long as the queue has + // space. + assert_noop!( + submit_with_witness(Origin::signed(99), solution), + Error::::SignedTooMuchWeight, + ); + }) + } + + #[test] + fn insufficient_deposit_doesnt_store_submission() { + ExtBuilder::default().build_and_execute(|| { + roll_to(15); + assert!(MultiPhase::current_phase().is_signed()); + + let solution = raw_solution(); + + assert_eq!(balances(&123), (0, 0)); + assert_noop!( + submit_with_witness(Origin::signed(123), solution), + Error::::SignedCannotPayDeposit, + ); + + assert_eq!(balances(&123), (0, 0)); + }) + } + + // given a full queue, and a solution which _should_ be allowed in, but the proposer of this + // new solution has insufficient deposit, we should not modify storage at all + #[test] + fn insufficient_deposit_with_full_queue_works_properly() { + ExtBuilder::default().build_and_execute(|| { + roll_to(15); + assert!(MultiPhase::current_phase().is_signed()); + + for s in 0..SignedMaxSubmissions::get() { + // score is always getting better + let solution = RawSolution { score: [(5 + s).into(), 0, 0], ..Default::default() }; + assert_ok!(submit_with_witness(Origin::signed(99), solution)); + } + + // this solution has a higher score than any in the queue + let solution = RawSolution { + score: [(5 + SignedMaxSubmissions::get()).into(), 0, 0], + ..Default::default() + }; + + assert_eq!(balances(&123), (0, 0)); + assert_noop!( + submit_with_witness(Origin::signed(123), solution), + Error::::SignedCannotPayDeposit, + ); + + assert_eq!(balances(&123), (0, 0)); + }) + } + + #[test] + fn finalize_signed_phase_is_idempotent_given_no_submissions() { + ExtBuilder::default().build_and_execute(|| { + for block_number in 0..25 { + roll_to(block_number); + + assert_eq!(SignedSubmissions::::decode_len().unwrap_or_default(), 0); + assert_storage_noop!(MultiPhase::finalize_signed_phase()); + } + }) + } + + #[test] + fn finalize_signed_phase_is_idempotent_given_submissions() { + ExtBuilder::default().build_and_execute(|| { + roll_to(15); + assert!(MultiPhase::current_phase().is_signed()); + + let solution = raw_solution(); + + // submit a correct one. + assert_ok!(submit_with_witness(Origin::signed(99), solution.clone())); + + // _some_ good solution was stored. + assert!(MultiPhase::finalize_signed_phase().0); + + // calling it again doesn't change anything + assert_storage_noop!(MultiPhase::finalize_signed_phase()); + }) + } +} diff --git a/frame/election-provider-multi-phase/src/unsigned.rs b/frame/election-provider-multi-phase/src/unsigned.rs index 543883fc035c5..52ecae7afa5fc 100644 --- a/frame/election-provider-multi-phase/src/unsigned.rs +++ b/frame/election-provider-multi-phase/src/unsigned.rs @@ -30,8 +30,10 @@ use sp_npos_elections::{ assignment_staked_to_ratio_normalized, is_score_better, seq_phragmen, }; use sp_runtime::{ + DispatchError, + SaturatedConversion, offchain::storage::{MutateStorageError, StorageValueRef}, - traits::TrailingZeroInput, SaturatedConversion + traits::TrailingZeroInput, }; use sp_std::{cmp::Ordering, convert::TryFrom, vec::Vec}; @@ -57,7 +59,8 @@ pub type Assignment = sp_npos_elections::Assignment< CompactAccuracyOf, >; -/// The [`IndexAssignment`][sp_npos_elections::IndexAssignment] type specialized for a particular runtime `T`. +/// The [`IndexAssignment`][sp_npos_elections::IndexAssignment] type specialized for a particular +/// runtime `T`. pub type IndexAssignmentOf = sp_npos_elections::IndexAssignmentOf>; #[derive(Debug, Eq, PartialEq)] @@ -69,7 +72,7 @@ pub enum MinerError { /// Submitting a transaction to the pool failed. PoolSubmissionFailed, /// The pre-dispatch checks failed for the mined solution. - PreDispatchChecksFailed, + PreDispatchChecksFailed(DispatchError), /// The solution generated from the miner is not feasible. Feasibility(FeasibilityError), /// Something went wrong fetching the lock. @@ -234,7 +237,7 @@ impl Pallet { ) -> Result<(), MinerError> { Self::unsigned_pre_dispatch_checks(raw_solution).map_err(|err| { log!(debug, "pre-dispatch checks failed for {} solution: {:?}", solution_type, err); - MinerError::PreDispatchChecksFailed + MinerError::PreDispatchChecksFailed(err) })?; Self::feasibility_check(raw_solution.clone(), ElectionCompute::Unsigned).map_err(|err| { @@ -344,7 +347,11 @@ impl Pallet { // converting to `Compact`. let mut index_assignments = sorted_assignments .into_iter() - .map(|assignment| IndexAssignmentOf::::new(&assignment, &voter_index, &target_index)) + .map(|assignment| IndexAssignmentOf::::new( + &assignment, + &voter_index, + &target_index, + )) .collect::, _>>()?; // trim assignments list for weight and length. @@ -416,7 +423,9 @@ impl Pallet { size, max_weight, ); - let removing: usize = assignments.len().saturating_sub(maximum_allowed_voters.saturated_into()); + let removing: usize = assignments.len().saturating_sub( + maximum_allowed_voters.saturated_into(), + ); log!( debug, "from {} assignments, truncating to {} for weight, removing {}", @@ -464,7 +473,9 @@ impl Pallet { } } let maximum_allowed_voters = - if low < assignments.len() && encoded_size_of(&assignments[..low + 1])? <= max_allowed_length { + if low < assignments.len() && + encoded_size_of(&assignments[..low + 1])? <= max_allowed_length + { low + 1 } else { low @@ -674,6 +685,15 @@ mod max_weight { fn on_initialize_open_unsigned_without_snapshot() -> Weight { unreachable!() } + fn finalize_signed_phase_accept_solution() -> Weight { + unreachable!() + } + fn finalize_signed_phase_reject_solution() -> Weight { + unreachable!() + } + fn submit(c: u32) -> Weight { + unreachable!() + } fn submit_unsigned(v: u32, t: u32, a: u32, d: u32) -> Weight { (0 * v + 0 * t + 1000 * a + 0 * d) as Weight } @@ -994,7 +1014,11 @@ mod tests { assert_eq!( MultiPhase::mine_check_save_submit().unwrap_err(), - MinerError::PreDispatchChecksFailed, + MinerError::PreDispatchChecksFailed(DispatchError::Module{ + index: 2, + error: 1, + message: Some("PreDispatchWrongWinnerCount"), + }), ); }) } @@ -1199,11 +1223,17 @@ mod tests { let mut storage = StorageValueRef::persistent(&OFFCHAIN_LAST_BLOCK); storage.clear(); - assert!(!ocw_solution_exists::(), "no solution should be present before we mine one"); + assert!( + !ocw_solution_exists::(), + "no solution should be present before we mine one", + ); // creates and cache a solution MultiPhase::offchain_worker(25); - assert!(ocw_solution_exists::(), "a solution must be cached after running the worker"); + assert!( + ocw_solution_exists::(), + "a solution must be cached after running the worker", + ); // after an election, the solution must be cleared // we don't actually care about the result of the election @@ -1329,10 +1359,15 @@ mod tests { _ => panic!("bad call: unexpected submission"), }; - // Custom(3) maps to PreDispatchChecksFailed - let pre_dispatch_check_error = TransactionValidityError::Invalid(InvalidTransaction::Custom(3)); + // Custom(7) maps to PreDispatchChecksFailed + let pre_dispatch_check_error = TransactionValidityError::Invalid( + InvalidTransaction::Custom(7), + ); assert_eq!( - ::validate_unsigned(TransactionSource::Local, &call) + ::validate_unsigned( + TransactionSource::Local, + &call, + ) .unwrap_err(), pre_dispatch_check_error, ); @@ -1359,7 +1394,11 @@ mod tests { let compact_clone = compact.clone(); // when - MultiPhase::trim_assignments_length(encoded_len, &mut assignments, encoded_size_of).unwrap(); + MultiPhase::trim_assignments_length( + encoded_len, + &mut assignments, + encoded_size_of, + ).unwrap(); // then let compact = CompactOf::::try_from(assignments.as_slice()).unwrap(); @@ -1383,7 +1422,11 @@ mod tests { let compact_clone = compact.clone(); // when - MultiPhase::trim_assignments_length(encoded_len as u32 - 1, &mut assignments, encoded_size_of).unwrap(); + MultiPhase::trim_assignments_length( + encoded_len as u32 - 1, + &mut assignments, + encoded_size_of, + ).unwrap(); // then let compact = CompactOf::::try_from(assignments.as_slice()).unwrap(); @@ -1414,7 +1457,11 @@ mod tests { .unwrap(); // when - MultiPhase::trim_assignments_length(encoded_len - 1, &mut assignments, encoded_size_of).unwrap(); + MultiPhase::trim_assignments_length( + encoded_len - 1, + &mut assignments, + encoded_size_of, + ).unwrap(); // then assert_eq!(assignments.len(), count - 1, "we must have removed exactly one assignment"); diff --git a/frame/election-provider-multi-phase/src/weights.rs b/frame/election-provider-multi-phase/src/weights.rs index 51b99bc962d43..6a245ebb51254 100644 --- a/frame/election-provider-multi-phase/src/weights.rs +++ b/frame/election-provider-multi-phase/src/weights.rs @@ -18,7 +18,7 @@ //! Autogenerated weights for pallet_election_provider_multi_phase //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0 -//! DATE: 2021-06-19, STEPS: `[50, ]`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2021-06-20, STEPS: `[50, ]`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 128 // Executed Command: @@ -47,8 +47,11 @@ pub trait WeightInfo { fn on_initialize_nothing() -> Weight; fn on_initialize_open_signed() -> Weight; fn on_initialize_open_unsigned_with_snapshot() -> Weight; + fn finalize_signed_phase_accept_solution() -> Weight; + fn finalize_signed_phase_reject_solution() -> Weight; fn on_initialize_open_unsigned_without_snapshot() -> Weight; fn elect_queued() -> Weight; + fn submit(c: u32, ) -> Weight; fn submit_unsigned(v: u32, t: u32, a: u32, d: u32, ) -> Weight; fn feasibility_check(v: u32, t: u32, a: u32, d: u32, ) -> Weight; } @@ -57,52 +60,69 @@ pub trait WeightInfo { pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { fn on_initialize_nothing() -> Weight { - (24_579_000 as Weight) + (33_392_000 as Weight) .saturating_add(T::DbWeight::get().reads(8 as Weight)) } fn on_initialize_open_signed() -> Weight { - (87_463_000 as Weight) + (115_659_000 as Weight) .saturating_add(T::DbWeight::get().reads(10 as Weight)) .saturating_add(T::DbWeight::get().writes(4 as Weight)) } fn on_initialize_open_unsigned_with_snapshot() -> Weight { - (87_381_000 as Weight) + (114_970_000 as Weight) .saturating_add(T::DbWeight::get().reads(10 as Weight)) .saturating_add(T::DbWeight::get().writes(4 as Weight)) } + fn finalize_signed_phase_accept_solution() -> Weight { + (51_442_000 as Weight) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) + } + fn finalize_signed_phase_reject_solution() -> Weight { + (23_160_000 as Weight) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } fn on_initialize_open_unsigned_without_snapshot() -> Weight { - (18_489_000 as Weight) + (24_101_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn elect_queued() -> Weight { - (6_038_989_000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(6 as Weight)) + (6_153_604_000 as Weight) + .saturating_add(T::DbWeight::get().reads(5 as Weight)) + .saturating_add(T::DbWeight::get().writes(8 as Weight)) + } + fn submit(c: u32, ) -> Weight { + (78_972_000 as Weight) + // Standard Error: 16_000 + .saturating_add((308_000 as Weight).saturating_mul(c as Weight)) + .saturating_add(T::DbWeight::get().reads(4 as Weight)) + .saturating_add(T::DbWeight::get().writes(3 as Weight)) } fn submit_unsigned(v: u32, t: u32, a: u32, d: u32, ) -> Weight { (0 as Weight) // Standard Error: 12_000 - .saturating_add((3_480_000 as Weight).saturating_mul(v as Weight)) + .saturating_add((3_572_000 as Weight).saturating_mul(v as Weight)) // Standard Error: 42_000 - .saturating_add((194_000 as Weight).saturating_mul(t as Weight)) + .saturating_add((23_000 as Weight).saturating_mul(t as Weight)) // Standard Error: 12_000 - .saturating_add((10_498_000 as Weight).saturating_mul(a as Weight)) + .saturating_add((11_529_000 as Weight).saturating_mul(a as Weight)) // Standard Error: 63_000 - .saturating_add((3_074_000 as Weight).saturating_mul(d as Weight)) + .saturating_add((3_333_000 as Weight).saturating_mul(d as Weight)) .saturating_add(T::DbWeight::get().reads(7 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn feasibility_check(v: u32, t: u32, a: u32, d: u32, ) -> Weight { (0 as Weight) // Standard Error: 7_000 - .saturating_add((3_481_000 as Weight).saturating_mul(v as Weight)) - // Standard Error: 24_000 - .saturating_add((385_000 as Weight).saturating_mul(t as Weight)) + .saturating_add((3_647_000 as Weight).saturating_mul(v as Weight)) + // Standard Error: 23_000 + .saturating_add((390_000 as Weight).saturating_mul(t as Weight)) // Standard Error: 7_000 - .saturating_add((8_538_000 as Weight).saturating_mul(a as Weight)) - // Standard Error: 36_000 - .saturating_add((3_322_000 as Weight).saturating_mul(d as Weight)) + .saturating_add((9_614_000 as Weight).saturating_mul(a as Weight)) + // Standard Error: 35_000 + .saturating_add((3_405_000 as Weight).saturating_mul(d as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) } } @@ -110,52 +130,69 @@ impl WeightInfo for SubstrateWeight { // For backwards compatibility and tests impl WeightInfo for () { fn on_initialize_nothing() -> Weight { - (24_579_000 as Weight) + (33_392_000 as Weight) .saturating_add(RocksDbWeight::get().reads(8 as Weight)) } fn on_initialize_open_signed() -> Weight { - (87_463_000 as Weight) + (115_659_000 as Weight) .saturating_add(RocksDbWeight::get().reads(10 as Weight)) .saturating_add(RocksDbWeight::get().writes(4 as Weight)) } fn on_initialize_open_unsigned_with_snapshot() -> Weight { - (87_381_000 as Weight) + (114_970_000 as Weight) .saturating_add(RocksDbWeight::get().reads(10 as Weight)) .saturating_add(RocksDbWeight::get().writes(4 as Weight)) } + fn finalize_signed_phase_accept_solution() -> Weight { + (51_442_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + } + fn finalize_signed_phase_reject_solution() -> Weight { + (23_160_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } fn on_initialize_open_unsigned_without_snapshot() -> Weight { - (18_489_000 as Weight) + (24_101_000 as Weight) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn elect_queued() -> Weight { - (6_038_989_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(6 as Weight)) + (6_153_604_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(5 as Weight)) + .saturating_add(RocksDbWeight::get().writes(8 as Weight)) + } + fn submit(c: u32, ) -> Weight { + (78_972_000 as Weight) + // Standard Error: 16_000 + .saturating_add((308_000 as Weight).saturating_mul(c as Weight)) + .saturating_add(RocksDbWeight::get().reads(4 as Weight)) + .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } fn submit_unsigned(v: u32, t: u32, a: u32, d: u32, ) -> Weight { (0 as Weight) // Standard Error: 12_000 - .saturating_add((3_480_000 as Weight).saturating_mul(v as Weight)) + .saturating_add((3_572_000 as Weight).saturating_mul(v as Weight)) // Standard Error: 42_000 - .saturating_add((194_000 as Weight).saturating_mul(t as Weight)) + .saturating_add((23_000 as Weight).saturating_mul(t as Weight)) // Standard Error: 12_000 - .saturating_add((10_498_000 as Weight).saturating_mul(a as Weight)) + .saturating_add((11_529_000 as Weight).saturating_mul(a as Weight)) // Standard Error: 63_000 - .saturating_add((3_074_000 as Weight).saturating_mul(d as Weight)) + .saturating_add((3_333_000 as Weight).saturating_mul(d as Weight)) .saturating_add(RocksDbWeight::get().reads(7 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn feasibility_check(v: u32, t: u32, a: u32, d: u32, ) -> Weight { (0 as Weight) // Standard Error: 7_000 - .saturating_add((3_481_000 as Weight).saturating_mul(v as Weight)) - // Standard Error: 24_000 - .saturating_add((385_000 as Weight).saturating_mul(t as Weight)) + .saturating_add((3_647_000 as Weight).saturating_mul(v as Weight)) + // Standard Error: 23_000 + .saturating_add((390_000 as Weight).saturating_mul(t as Weight)) // Standard Error: 7_000 - .saturating_add((8_538_000 as Weight).saturating_mul(a as Weight)) - // Standard Error: 36_000 - .saturating_add((3_322_000 as Weight).saturating_mul(d as Weight)) + .saturating_add((9_614_000 as Weight).saturating_mul(a as Weight)) + // Standard Error: 35_000 + .saturating_add((3_405_000 as Weight).saturating_mul(d as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) } } diff --git a/frame/support/src/storage/bounded_btree_map.rs b/frame/support/src/storage/bounded_btree_map.rs index 8c50557618eec..0c1994d63a35d 100644 --- a/frame/support/src/storage/bounded_btree_map.rs +++ b/frame/support/src/storage/bounded_btree_map.rs @@ -39,7 +39,8 @@ pub struct BoundedBTreeMap(BTreeMap, PhantomData); impl Decode for BoundedBTreeMap where - BTreeMap: Decode, + K: Decode + Ord, + V: Decode, S: Get, { fn decode(input: &mut I) -> Result { @@ -115,14 +116,15 @@ where self.0.get_mut(key) } - /// Exactly the same semantics as [`BTreeMap::insert`], but returns an `Err` (and is a noop) if the - /// new length of the map exceeds `S`. - pub fn try_insert(&mut self, key: K, value: V) -> Result<(), ()> { - if self.len() < Self::bound() { - self.0.insert(key, value); - Ok(()) + /// Exactly the same semantics as [`BTreeMap::insert`], but returns an `Err` (and is a noop) if + /// the new length of the map exceeds `S`. + /// + /// In the `Err` case, returns the inserted pair so it can be further used without cloning. + pub fn try_insert(&mut self, key: K, value: V) -> Result, (K, V)> { + if self.len() < Self::bound() || self.0.contains_key(&key) { + Ok(self.0.insert(key, value)) } else { - Err(()) + Err((key, value)) } } @@ -407,4 +409,50 @@ pub mod test { Err("BoundedBTreeMap exceeds its limit".into()), ); } + + #[test] + fn unequal_eq_impl_insert_works() { + // given a struct with a strange notion of equality + #[derive(Debug)] + struct Unequal(u32, bool); + + impl PartialEq for Unequal { + fn eq(&self, other: &Self) -> bool { + self.0 == other.0 + } + } + impl Eq for Unequal {} + + impl Ord for Unequal { + fn cmp(&self, other: &Self) -> std::cmp::Ordering { + self.0.cmp(&other.0) + } + } + + impl PartialOrd for Unequal { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } + } + + let mut map = BoundedBTreeMap::::new(); + + // when the set is full + + for i in 0..4 { + map.try_insert(Unequal(i, false), i).unwrap(); + } + + // can't insert a new distinct member + map.try_insert(Unequal(5, false), 5).unwrap_err(); + + // but _can_ insert a distinct member which compares equal, though per the documentation, + // neither the set length nor the actual member are changed, but the value is + map.try_insert(Unequal(0, true), 6).unwrap(); + assert_eq!(map.len(), 4); + let (zero_key, zero_value) = map.get_key_value(&Unequal(0, true)).unwrap(); + assert_eq!(zero_key.0, 0); + assert_eq!(zero_key.1, false); + assert_eq!(*zero_value, 6); + } } diff --git a/frame/support/src/storage/bounded_btree_set.rs b/frame/support/src/storage/bounded_btree_set.rs index f551a3cbfa38e..10c2300a08a09 100644 --- a/frame/support/src/storage/bounded_btree_set.rs +++ b/frame/support/src/storage/bounded_btree_set.rs @@ -39,7 +39,7 @@ pub struct BoundedBTreeSet(BTreeSet, PhantomData); impl Decode for BoundedBTreeSet where - BTreeSet: Decode, + T: Decode + Ord, S: Get, { fn decode(input: &mut I) -> Result { @@ -103,14 +103,15 @@ where self.0.clear() } - /// Exactly the same semantics as [`BTreeSet::insert`], but returns an `Err` (and is a noop) if the - /// new length of the set exceeds `S`. - pub fn try_insert(&mut self, item: T) -> Result<(), ()> { - if self.len() < Self::bound() { - self.0.insert(item); - Ok(()) + /// Exactly the same semantics as [`BTreeSet::insert`], but returns an `Err` (and is a noop) if + /// the new length of the set exceeds `S`. + /// + /// In the `Err` case, returns the inserted item so it can be further used without cloning. + pub fn try_insert(&mut self, item: T) -> Result { + if self.len() < Self::bound() || self.0.contains(&item) { + Ok(self.0.insert(item)) } else { - Err(()) + Err(item) } } @@ -393,4 +394,49 @@ pub mod test { Err("BoundedBTreeSet exceeds its limit".into()), ); } + + #[test] + fn unequal_eq_impl_insert_works() { + // given a struct with a strange notion of equality + #[derive(Debug)] + struct Unequal(u32, bool); + + impl PartialEq for Unequal { + fn eq(&self, other: &Self) -> bool { + self.0 == other.0 + } + } + impl Eq for Unequal {} + + impl Ord for Unequal { + fn cmp(&self, other: &Self) -> std::cmp::Ordering { + self.0.cmp(&other.0) + } + } + + impl PartialOrd for Unequal { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } + } + + let mut set = BoundedBTreeSet::::new(); + + // when the set is full + + for i in 0..4 { + set.try_insert(Unequal(i, false)).unwrap(); + } + + // can't insert a new distinct member + set.try_insert(Unequal(5, false)).unwrap_err(); + + // but _can_ insert a distinct member which compares equal, though per the documentation, + // neither the set length nor the actual member are changed + set.try_insert(Unequal(0, true)).unwrap(); + assert_eq!(set.len(), 4); + let zero_item = set.get(&Unequal(0, true)).unwrap(); + assert_eq!(zero_item.0, 0); + assert_eq!(zero_item.1, false); + } } diff --git a/primitives/npos-elections/compact/src/lib.rs b/primitives/npos-elections/compact/src/lib.rs index e8cde87744539..0e9fbb34eea17 100644 --- a/primitives/npos-elections/compact/src/lib.rs +++ b/primitives/npos-elections/compact/src/lib.rs @@ -169,7 +169,7 @@ fn struct_def( ); quote!{ #compact_impl - #[derive(Default, PartialEq, Eq, Clone, Debug)] + #[derive(Default, PartialEq, Eq, Clone, Debug, PartialOrd, Ord)] } } else { // automatically derived. From d1f905bad9eb6f8d4c3f434e8e2db2a23a0e8c1b Mon Sep 17 00:00:00 2001 From: Shinsaku Ashizawa <39494661+NoCtrlZ@users.noreply.github.com> Date: Mon, 28 Jun 2021 19:02:31 +0900 Subject: [PATCH 49/67] change balance pallet reference (#9205) --- frame/balances/README.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/frame/balances/README.md b/frame/balances/README.md index cbbfea75e6848..93e424a89c721 100644 --- a/frame/balances/README.md +++ b/frame/balances/README.md @@ -2,9 +2,9 @@ The Balances module provides functionality for handling accounts and balances. -- [`balances::Trait`](https://docs.rs/pallet-balances/latest/pallet_balances/trait.Trait.html) -- [`Call`](https://docs.rs/pallet-balances/latest/pallet_balances/enum.Call.html) -- [`Module`](https://docs.rs/pallet-balances/latest/pallet_balances/struct.Module.html) +- [`Config`](https://docs.rs/pallet-balances/latest/pallet_balances/pallet/trait.Config.html) +- [`Call`](https://docs.rs/pallet-balances/latest/pallet_balances/pallet/enum.Call.html) +- [`Pallet`](https://docs.rs/pallet-balances/latest/pallet_balances/pallet/struct.Pallet.html) ## Overview @@ -113,7 +113,7 @@ fn update_ledger( ## Genesis config -The Balances module depends on the [`GenesisConfig`](https://docs.rs/pallet-balances/latest/pallet_balances/struct.GenesisConfig.html). +The Balances module depends on the [`GenesisConfig`](https://docs.rs/pallet-balances/latest/pallet_balances/pallet/struct.GenesisConfig.html). ## Assumptions From 24b26b9cc0c5d8fcb81857b7d0f3815ad27a2b1a Mon Sep 17 00:00:00 2001 From: Peter Goodspeed-Niklaus Date: Mon, 28 Jun 2021 13:45:01 +0200 Subject: [PATCH 50/67] move set_emergency_election_result before submit (#9215) --- .../election-provider-multi-phase/src/lib.rs | 46 +++++++++---------- 1 file changed, 23 insertions(+), 23 deletions(-) diff --git a/frame/election-provider-multi-phase/src/lib.rs b/frame/election-provider-multi-phase/src/lib.rs index 45e04a757f0b3..e127e34d55723 100644 --- a/frame/election-provider-multi-phase/src/lib.rs +++ b/frame/election-provider-multi-phase/src/lib.rs @@ -884,6 +884,29 @@ pub mod pallet { Ok(()) } + /// Set a solution in the queue, to be handed out to the client of this pallet in the next + /// call to `ElectionProvider::elect`. + /// + /// This can only be set by `T::ForceOrigin`, and only when the phase is `Emergency`. + /// + /// The solution is not checked for any feasibility and is assumed to be trustworthy, as any + /// feasibility check itself can in principle cause the election process to fail (due to + /// memory/weight constrains). + #[pallet::weight(T::DbWeight::get().reads_writes(1, 1))] + pub fn set_emergency_election_result( + origin: OriginFor, + solution: ReadySolution, + ) -> DispatchResult { + T::ForceOrigin::ensure_origin(origin)?; + ensure!(Self::current_phase().is_emergency(), >::CallNotAllowed); + + // Note: we don't `rotate_round` at this point; the next call to + // `ElectionProvider::elect` will succeed and take care of that. + + >::put(solution); + Ok(()) + } + /// Submit a solution for the signed phase. /// /// The dispatch origin fo this call must be __signed__. @@ -956,29 +979,6 @@ pub mod pallet { Self::deposit_event(Event::SolutionStored(ElectionCompute::Signed, ejected_a_solution)); Ok(()) } - - /// Set a solution in the queue, to be handed out to the client of this pallet in the next - /// call to `ElectionProvider::elect`. - /// - /// This can only be set by `T::ForceOrigin`, and only when the phase is `Emergency`. - /// - /// The solution is not checked for any feasibility and is assumed to be trustworthy, as any - /// feasibility check itself can in principle cause the election process to fail (due to - /// memory/weight constrains). - #[pallet::weight(T::DbWeight::get().reads_writes(1, 1))] - pub fn set_emergency_election_result( - origin: OriginFor, - solution: ReadySolution, - ) -> DispatchResult { - T::ForceOrigin::ensure_origin(origin)?; - ensure!(Self::current_phase().is_emergency(), >::CallNotAllowed); - - // Note: we don't `rotate_round` at this point; the next call to - // `ElectionProvider::elect` will succeed and take care of that. - - >::put(solution); - Ok(()) - } } #[pallet::event] From c6240ce90083f9e8db94b6c33f81297c11534ea0 Mon Sep 17 00:00:00 2001 From: Shawn Tabrizi Date: Mon, 28 Jun 2021 08:54:24 -0400 Subject: [PATCH 51/67] Improve Staking Limits (#9193) * only allow `chill_other` near threshold. * improve test * skip limit check for existing validators / nominators * add `ChillThreshold` * rename to `set` for consistent api * more tests * fix some line width --- frame/staking/src/benchmarking.rs | 15 +++- frame/staking/src/lib.rs | 80 ++++++++++++++------ frame/staking/src/tests.rs | 121 ++++++++++++++++++++++++------ frame/staking/src/weights.rs | 6 +- 4 files changed, 172 insertions(+), 50 deletions(-) diff --git a/frame/staking/src/benchmarking.rs b/frame/staking/src/benchmarking.rs index f7545b07c90a8..ff7be272eec81 100644 --- a/frame/staking/src/benchmarking.rs +++ b/frame/staking/src/benchmarking.rs @@ -601,26 +601,33 @@ benchmarks! { assert_eq!(targets.len() as u32, v); } - update_staking_limits { + set_staking_limits { // This function always does the same thing... just write to 4 storage items. }: _( RawOrigin::Root, BalanceOf::::max_value(), BalanceOf::::max_value(), Some(u32::MAX), - Some(u32::MAX) + Some(u32::MAX), + Some(Percent::max_value()) ) verify { assert_eq!(MinNominatorBond::::get(), BalanceOf::::max_value()); assert_eq!(MinValidatorBond::::get(), BalanceOf::::max_value()); assert_eq!(MaxNominatorsCount::::get(), Some(u32::MAX)); assert_eq!(MaxValidatorsCount::::get(), Some(u32::MAX)); + assert_eq!(ChillThreshold::::get(), Some(Percent::from_percent(100))); } chill_other { let (_, controller) = create_stash_controller::(USER_SEED, 100, Default::default())?; Staking::::validate(RawOrigin::Signed(controller.clone()).into(), ValidatorPrefs::default())?; - Staking::::update_staking_limits( - RawOrigin::Root.into(), BalanceOf::::max_value(), BalanceOf::::max_value(), None, None, + Staking::::set_staking_limits( + RawOrigin::Root.into(), + BalanceOf::::max_value(), + BalanceOf::::max_value(), + Some(0), + Some(0), + Some(Percent::from_percent(0)) )?; let caller = whitelisted_caller(); }: _(RawOrigin::Signed(caller), controller.clone()) diff --git a/frame/staking/src/lib.rs b/frame/staking/src/lib.rs index ce1f5afc64c1d..ec7da1be18714 100644 --- a/frame/staking/src/lib.rs +++ b/frame/staking/src/lib.rs @@ -1216,6 +1216,12 @@ pub mod pallet { #[pallet::storage] pub(crate) type StorageVersion = StorageValue<_, Releases, ValueQuery>; + /// The threshold for when users can start calling `chill_other` for other validators / nominators. + /// The threshold is compared to the actual number of validators / nominators (`CountFor*`) in + /// the system compared to the configured max (`Max*Count`). + #[pallet::storage] + pub(crate) type ChillThreshold = StorageValue<_, Percent, OptionQuery>; + #[pallet::genesis_config] pub struct GenesisConfig { pub history_depth: u32, @@ -1714,16 +1720,19 @@ pub mod pallet { pub fn validate(origin: OriginFor, prefs: ValidatorPrefs) -> DispatchResult { let controller = ensure_signed(origin)?; - // If this error is reached, we need to adjust the `MinValidatorBond` and start calling `chill_other`. - // Until then, we explicitly block new validators to protect the runtime. - if let Some(max_validators) = MaxValidatorsCount::::get() { - ensure!(CounterForValidators::::get() < max_validators, Error::::TooManyValidators); - } - let ledger = Self::ledger(&controller).ok_or(Error::::NotController)?; ensure!(ledger.active >= MinValidatorBond::::get(), Error::::InsufficientBond); - let stash = &ledger.stash; + + // Only check limits if they are not already a validator. + if !Validators::::contains_key(stash) { + // If this error is reached, we need to adjust the `MinValidatorBond` and start calling `chill_other`. + // Until then, we explicitly block new validators to protect the runtime. + if let Some(max_validators) = MaxValidatorsCount::::get() { + ensure!(CounterForValidators::::get() < max_validators, Error::::TooManyValidators); + } + } + Self::do_remove_nominator(stash); Self::do_add_validator(stash, prefs); Ok(()) @@ -1755,16 +1764,19 @@ pub mod pallet { ) -> DispatchResult { let controller = ensure_signed(origin)?; - // If this error is reached, we need to adjust the `MinNominatorBond` and start calling `chill_other`. - // Until then, we explicitly block new nominators to protect the runtime. - if let Some(max_nominators) = MaxNominatorsCount::::get() { - ensure!(CounterForNominators::::get() < max_nominators, Error::::TooManyNominators); - } - let ledger = Self::ledger(&controller).ok_or(Error::::NotController)?; ensure!(ledger.active >= MinNominatorBond::::get(), Error::::InsufficientBond); - let stash = &ledger.stash; + + // Only check limits if they are not already a nominator. + if !Nominators::::contains_key(stash) { + // If this error is reached, we need to adjust the `MinNominatorBond` and start calling `chill_other`. + // Until then, we explicitly block new nominators to protect the runtime. + if let Some(max_nominators) = MaxNominatorsCount::::get() { + ensure!(CounterForNominators::::get() < max_nominators, Error::::TooManyNominators); + } + } + ensure!(!targets.is_empty(), Error::::EmptyTargets); ensure!(targets.len() <= T::MAX_NOMINATIONS as usize, Error::::TooManyTargets); @@ -2266,31 +2278,42 @@ pub mod pallet { /// /// NOTE: Existing nominators and validators will not be affected by this update. /// to kick people under the new limits, `chill_other` should be called. - #[pallet::weight(T::WeightInfo::update_staking_limits())] - pub fn update_staking_limits( + #[pallet::weight(T::WeightInfo::set_staking_limits())] + pub fn set_staking_limits( origin: OriginFor, min_nominator_bond: BalanceOf, min_validator_bond: BalanceOf, max_nominator_count: Option, max_validator_count: Option, + threshold: Option, ) -> DispatchResult { ensure_root(origin)?; MinNominatorBond::::set(min_nominator_bond); MinValidatorBond::::set(min_validator_bond); MaxNominatorsCount::::set(max_nominator_count); MaxValidatorsCount::::set(max_validator_count); + ChillThreshold::::set(threshold); Ok(()) } - /// Declare a `controller` as having no desire to either validator or nominate. + /// Declare a `controller` to stop participating as either a validator or nominator. /// /// Effects will be felt at the beginning of the next era. /// /// The dispatch origin for this call must be _Signed_, but can be called by anyone. /// - /// If the caller is the same as the controller being targeted, then no further checks - /// are enforced. However, this call can also be made by an third party user who witnesses - /// that this controller does not satisfy the minimum bond requirements to be in their role. + /// If the caller is the same as the controller being targeted, then no further checks are + /// enforced, and this function behaves just like `chill`. + /// + /// If the caller is different than the controller being targeted, the following conditions + /// must be met: + /// * A `ChillThreshold` must be set and checked which defines how close to the max + /// nominators or validators we must reach before users can start chilling one-another. + /// * A `MaxNominatorCount` and `MaxValidatorCount` must be set which is used to determine + /// how close we are to the threshold. + /// * A `MinNominatorBond` and `MinValidatorBond` must be set and checked, which determines + /// if this is a person that should be chilled because they have not met the threshold + /// bond required. /// /// This can be helpful if bond requirements are updated, and we need to remove old users /// who do not satisfy these requirements. @@ -2307,14 +2330,27 @@ pub mod pallet { let ledger = Self::ledger(&controller).ok_or(Error::::NotController)?; let stash = ledger.stash; - // If the caller is not the controller, we want to check that the minimum bond - // requirements are not satisfied, and thus we have reason to chill this user. + // In order for one user to chill another user, the following conditions must be met: + // * A `ChillThreshold` is set which defines how close to the max nominators or + // validators we must reach before users can start chilling one-another. + // * A `MaxNominatorCount` and `MaxValidatorCount` which is used to determine how close + // we are to the threshold. + // * A `MinNominatorBond` and `MinValidatorBond` which is the final condition checked to + // determine this is a person that should be chilled because they have not met the + // threshold bond required. // // Otherwise, if caller is the same as the controller, this is just like `chill`. if caller != controller { + let threshold = ChillThreshold::::get().ok_or(Error::::CannotChillOther)?; let min_active_bond = if Nominators::::contains_key(&stash) { + let max_nominator_count = MaxNominatorsCount::::get().ok_or(Error::::CannotChillOther)?; + let current_nominator_count = CounterForNominators::::get(); + ensure!(threshold * max_nominator_count < current_nominator_count, Error::::CannotChillOther); MinNominatorBond::::get() } else if Validators::::contains_key(&stash) { + let max_validator_count = MaxValidatorsCount::::get().ok_or(Error::::CannotChillOther)?; + let current_validator_count = CounterForValidators::::get(); + ensure!(threshold * max_validator_count < current_validator_count, Error::::CannotChillOther); MinValidatorBond::::get() } else { Zero::zero() diff --git a/frame/staking/src/tests.rs b/frame/staking/src/tests.rs index e314a70399fdd..bbb0d5522fcc6 100644 --- a/frame/staking/src/tests.rs +++ b/frame/staking/src/tests.rs @@ -4050,12 +4050,18 @@ mod election_data_provider { // 500 is not enough for any role assert_ok!(Staking::bond(Origin::signed(3), 4, 500, RewardDestination::Controller)); assert_noop!(Staking::nominate(Origin::signed(4), vec![1]), Error::::InsufficientBond); - assert_noop!(Staking::validate(Origin::signed(4), ValidatorPrefs::default()), Error::::InsufficientBond); + assert_noop!( + Staking::validate(Origin::signed(4), ValidatorPrefs::default()), + Error::::InsufficientBond, + ); // 1000 is enough for nominator assert_ok!(Staking::bond_extra(Origin::signed(3), 500)); assert_ok!(Staking::nominate(Origin::signed(4), vec![1])); - assert_noop!(Staking::validate(Origin::signed(4), ValidatorPrefs::default()), Error::::InsufficientBond); + assert_noop!( + Staking::validate(Origin::signed(4), ValidatorPrefs::default()), + Error::::InsufficientBond, + ); // 1500 is enough for validator assert_ok!(Staking::bond_extra(Origin::signed(3), 500)); @@ -4083,24 +4089,80 @@ mod election_data_provider { .min_nominator_bond(1_000) .min_validator_bond(1_500) .build_and_execute(|| { - // Nominator - assert_ok!(Staking::bond(Origin::signed(1), 2, 1000, RewardDestination::Controller)); - assert_ok!(Staking::nominate(Origin::signed(2), vec![1])); + for i in 0 .. 15 { + let a = 4 * i; + let b = 4 * i + 1; + let c = 4 * i + 2; + let d = 4 * i + 3; + Balances::make_free_balance_be(&a, 100_000); + Balances::make_free_balance_be(&b, 100_000); + Balances::make_free_balance_be(&c, 100_000); + Balances::make_free_balance_be(&d, 100_000); + + // Nominator + assert_ok!(Staking::bond(Origin::signed(a), b, 1000, RewardDestination::Controller)); + assert_ok!(Staking::nominate(Origin::signed(b), vec![1])); + + // Validator + assert_ok!(Staking::bond(Origin::signed(c), d, 1500, RewardDestination::Controller)); + assert_ok!(Staking::validate(Origin::signed(d), ValidatorPrefs::default())); + } - // Validator - assert_ok!(Staking::bond(Origin::signed(3), 4, 1500, RewardDestination::Controller)); - assert_ok!(Staking::validate(Origin::signed(4), ValidatorPrefs::default())); + // To chill other users, we need to: + // * Set a minimum bond amount + // * Set a limit + // * Set a threshold + // + // If any of these are missing, we do not have enough information to allow the + // `chill_other` to succeed from one user to another. // Can't chill these users - assert_noop!(Staking::chill_other(Origin::signed(1), 2), Error::::CannotChillOther); - assert_noop!(Staking::chill_other(Origin::signed(1), 4), Error::::CannotChillOther); - - // Change the minimum bond - assert_ok!(Staking::update_staking_limits(Origin::root(), 1_500, 2_000, None, None)); + assert_noop!(Staking::chill_other(Origin::signed(1337), 1), Error::::CannotChillOther); + assert_noop!(Staking::chill_other(Origin::signed(1337), 3), Error::::CannotChillOther); + + // Change the minimum bond... but no limits. + assert_ok!(Staking::set_staking_limits(Origin::root(), 1_500, 2_000, None, None, None)); + + // Still can't chill these users + assert_noop!(Staking::chill_other(Origin::signed(1337), 1), Error::::CannotChillOther); + assert_noop!(Staking::chill_other(Origin::signed(1337), 3), Error::::CannotChillOther); + + // Add limits, but no threshold + assert_ok!(Staking::set_staking_limits(Origin::root(), 1_500, 2_000, Some(10), Some(10), None)); + + // Still can't chill these users + assert_noop!(Staking::chill_other(Origin::signed(1337), 1), Error::::CannotChillOther); + assert_noop!(Staking::chill_other(Origin::signed(1337), 3), Error::::CannotChillOther); + + // Add threshold, but no limits + assert_ok!(Staking::set_staking_limits( + Origin::root(), 1_500, 2_000, None, None, Some(Percent::from_percent(0)) + )); + + // Still can't chill these users + assert_noop!(Staking::chill_other(Origin::signed(1337), 1), Error::::CannotChillOther); + assert_noop!(Staking::chill_other(Origin::signed(1337), 3), Error::::CannotChillOther); + + // Add threshold and limits + assert_ok!(Staking::set_staking_limits( + Origin::root(), 1_500, 2_000, Some(10), Some(10), Some(Percent::from_percent(75)) + )); + + // 16 people total because tests start with 1 active one + assert_eq!(CounterForNominators::::get(), 16); + assert_eq!(CounterForValidators::::get(), 16); + + // Users can now be chilled down to 7 people, so we try to remove 9 of them (starting with 16) + for i in 6 .. 15 { + let b = 4 * i + 1; + let d = 4 * i + 3; + assert_ok!(Staking::chill_other(Origin::signed(1337), b)); + assert_ok!(Staking::chill_other(Origin::signed(1337), d)); + } - // Users can now be chilled - assert_ok!(Staking::chill_other(Origin::signed(1), 2)); - assert_ok!(Staking::chill_other(Origin::signed(1), 4)); + // Cant go lower. + assert_noop!(Staking::chill_other(Origin::signed(1337), 1), Error::::CannotChillOther); + assert_noop!(Staking::chill_other(Origin::signed(1337), 3), Error::::CannotChillOther); }) } @@ -4114,36 +4176,53 @@ mod election_data_provider { // Change the maximums let max = 10; - assert_ok!(Staking::update_staking_limits(Origin::root(), 10, 10, Some(max), Some(max))); + assert_ok!(Staking::set_staking_limits( + Origin::root(), 10, 10, Some(max), Some(max), Some(Percent::from_percent(0)) + )); // can create `max - validator_count` validators - assert_ok!(testing_utils::create_validators::(max - validator_count, 100)); + let mut some_existing_validator = AccountId::default(); + for i in 0 .. max - validator_count { + let (_, controller) = testing_utils::create_stash_controller::( + i + 10_000_000, 100, RewardDestination::Controller, + ).unwrap(); + assert_ok!(Staking::validate(Origin::signed(controller), ValidatorPrefs::default())); + some_existing_validator = controller; + } // but no more let (_, last_validator) = testing_utils::create_stash_controller::( 1337, 100, RewardDestination::Controller, ).unwrap(); + assert_noop!( Staking::validate(Origin::signed(last_validator), ValidatorPrefs::default()), Error::::TooManyValidators, ); // same with nominators + let mut some_existing_nominator = AccountId::default(); for i in 0 .. max - nominator_count { let (_, controller) = testing_utils::create_stash_controller::( - i + 10_000_000, 100, RewardDestination::Controller, + i + 20_000_000, 100, RewardDestination::Controller, ).unwrap(); assert_ok!(Staking::nominate(Origin::signed(controller), vec![1])); + some_existing_nominator = controller; } // one more is too many let (_, last_nominator) = testing_utils::create_stash_controller::( - 20_000_000, 100, RewardDestination::Controller, + 30_000_000, 100, RewardDestination::Controller, ).unwrap(); assert_noop!(Staking::nominate(Origin::signed(last_nominator), vec![1]), Error::::TooManyNominators); + // Re-nominate works fine + assert_ok!(Staking::nominate(Origin::signed(some_existing_nominator), vec![1])); + // Re-validate works fine + assert_ok!(Staking::validate(Origin::signed(some_existing_validator), ValidatorPrefs::default())); + // No problem when we set to `None` again - assert_ok!(Staking::update_staking_limits(Origin::root(), 10, 10, None, None)); + assert_ok!(Staking::set_staking_limits(Origin::root(), 10, 10, None, None, None)); assert_ok!(Staking::nominate(Origin::signed(last_nominator), vec![1])); assert_ok!(Staking::validate(Origin::signed(last_validator), ValidatorPrefs::default())); }) diff --git a/frame/staking/src/weights.rs b/frame/staking/src/weights.rs index dbf5f3fc82bf9..cf14e8b22362f 100644 --- a/frame/staking/src/weights.rs +++ b/frame/staking/src/weights.rs @@ -70,7 +70,7 @@ pub trait WeightInfo { fn new_era(v: u32, n: u32, ) -> Weight; fn get_npos_voters(v: u32, n: u32, s: u32, ) -> Weight; fn get_npos_targets(v: u32, ) -> Weight; - fn update_staking_limits() -> Weight; + fn set_staking_limits() -> Weight; fn chill_other() -> Weight; } @@ -252,7 +252,7 @@ impl WeightInfo for SubstrateWeight { .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(v as Weight))) } - fn update_staking_limits() -> Weight { + fn set_staking_limits() -> Weight { (5_028_000 as Weight) .saturating_add(T::DbWeight::get().writes(4 as Weight)) } @@ -440,7 +440,7 @@ impl WeightInfo for () { .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().reads((1 as Weight).saturating_mul(v as Weight))) } - fn update_staking_limits() -> Weight { + fn set_staking_limits() -> Weight { (5_028_000 as Weight) .saturating_add(RocksDbWeight::get().writes(4 as Weight)) } From 682e5e8efbc3aa92d3431f2c1357c33b998950b3 Mon Sep 17 00:00:00 2001 From: Joshy Orndorff Date: Mon, 28 Jun 2021 16:13:19 -0400 Subject: [PATCH 52/67] Add public accessor for tip amount (#9219) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Add public accessor for tip amount. * Update frame/transaction-payment/src/lib.rs Co-authored-by: Bastian Köcher Co-authored-by: Bastian Köcher --- frame/transaction-payment/src/lib.rs | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/frame/transaction-payment/src/lib.rs b/frame/transaction-payment/src/lib.rs index af1fcc5bfeaaa..416439e7f200c 100644 --- a/frame/transaction-payment/src/lib.rs +++ b/frame/transaction-payment/src/lib.rs @@ -525,6 +525,11 @@ impl ChargeTransactionPayment where Self(fee) } + /// Returns the tip as being choosen by the transaction sender. + pub fn tip(&self) -> BalanceOf { + self.0 + } + fn withdraw_fee( &self, who: &T::AccountId, From 3355dbcdb36b52266d363b91c1dede36202be782 Mon Sep 17 00:00:00 2001 From: Shinsaku Ashizawa <39494661+NoCtrlZ@users.noreply.github.com> Date: Tue, 29 Jun 2021 16:26:27 +0900 Subject: [PATCH 53/67] change reference trait to config (#9224) --- frame/assets/README.md | 4 ++-- frame/atomic-swap/README.md | 2 +- frame/aura/README.md | 2 +- frame/democracy/README.md | 2 +- frame/elections-phragmen/README.md | 2 +- frame/example/README.md | 2 +- frame/identity/README.md | 2 +- frame/im-online/README.md | 2 +- frame/multisig/README.md | 2 +- frame/nicks/README.md | 2 +- frame/proxy/README.md | 2 +- frame/recovery/README.md | 2 +- frame/scheduler/README.md | 2 +- frame/scored-pool/README.md | 2 +- frame/session/README.md | 2 +- frame/society/README.md | 2 +- frame/staking/README.md | 8 ++++---- frame/sudo/README.md | 2 +- frame/system/README.md | 2 +- frame/timestamp/README.md | 6 +++--- frame/utility/README.md | 2 +- frame/vesting/README.md | 2 +- 22 files changed, 28 insertions(+), 28 deletions(-) diff --git a/frame/assets/README.md b/frame/assets/README.md index f8583a5c91d70..2a62a457943fa 100644 --- a/frame/assets/README.md +++ b/frame/assets/README.md @@ -11,9 +11,9 @@ with a fixed supply, including: * Asset Transfer * Asset Destruction -To use it in your runtime, you need to implement the assets [`assets::Trait`](https://docs.rs/pallet-assets/latest/pallet_assets/trait.Trait.html). +To use it in your runtime, you need to implement the assets [`assets::Config`](https://docs.rs/pallet-assets/latest/pallet_assets/pallet/trait.Config.html). -The supported dispatchable functions are documented in the [`assets::Call`](https://docs.rs/pallet-assets/latest/pallet_assets/enum.Call.html) enum. +The supported dispatchable functions are documented in the [`assets::Call`](https://docs.rs/pallet-assets/latest/pallet_assets/pallet/enum.Call.html) enum. ### Terminology diff --git a/frame/atomic-swap/README.md b/frame/atomic-swap/README.md index 5dd502095d792..888a64ec7e065 100644 --- a/frame/atomic-swap/README.md +++ b/frame/atomic-swap/README.md @@ -2,7 +2,7 @@ A module for atomically sending funds. -- [`atomic_swap::Trait`](https://docs.rs/pallet-atomic-swap/latest/pallet_atomic_swap/trait.Trait.html) +- [`atomic_swap::Config`](https://docs.rs/pallet-atomic-swap/latest/pallet_atomic_swap/trait.Config.html) - [`Call`](https://docs.rs/pallet-atomic-swap/latest/pallet_atomic_swap/enum.Call.html) - [`Module`](https://docs.rs/pallet-atomic-swap/latest/pallet_atomic_swap/struct.Module.html) diff --git a/frame/aura/README.md b/frame/aura/README.md index 73ed986dd734d..89ea5010a8870 100644 --- a/frame/aura/README.md +++ b/frame/aura/README.md @@ -1,6 +1,6 @@ # Aura Module -- [`aura::Trait`](https://docs.rs/pallet-aura/latest/pallet_aura/trait.Trait.html) +- [`aura::Config`](https://docs.rs/pallet-aura/latest/pallet_aura/pallet/trait.Config.html) - [`Module`](https://docs.rs/pallet-aura/latest/pallet_aura/struct.Module.html) ## Overview diff --git a/frame/democracy/README.md b/frame/democracy/README.md index 6a390cc048e1c..bbc5f1c65586a 100644 --- a/frame/democracy/README.md +++ b/frame/democracy/README.md @@ -1,6 +1,6 @@ # Democracy Pallet -- [`democracy::Trait`](https://docs.rs/pallet-democracy/latest/pallet_democracy/trait.Trait.html) +- [`democracy::Config`](https://docs.rs/pallet-democracy/latest/pallet_democracy/trait.Config.html) - [`Call`](https://docs.rs/pallet-democracy/latest/pallet_democracy/enum.Call.html) ## Overview diff --git a/frame/elections-phragmen/README.md b/frame/elections-phragmen/README.md index 8c5940ea2d78e..26b3f260da563 100644 --- a/frame/elections-phragmen/README.md +++ b/frame/elections-phragmen/README.md @@ -60,7 +60,7 @@ being re-elected at the end of each round. ### Module Information -- [`election_sp_phragmen::Trait`](https://docs.rs/pallet-elections-phragmen/latest/pallet_elections_phragmen/trait.Trait.html) +- [`election_sp_phragmen::Config`](https://docs.rs/pallet-elections-phragmen/latest/pallet_elections_phragmen/trait.Config.html) - [`Call`](https://docs.rs/pallet-elections-phragmen/latest/pallet_elections_phragmen/enum.Call.html) - [`Module`](https://docs.rs/pallet-elections-phragmen/latest/pallet_elections_phragmen/struct.Module.html) diff --git a/frame/example/README.md b/frame/example/README.md index 46a0d076a969a..e06dee78c3f81 100644 --- a/frame/example/README.md +++ b/frame/example/README.md @@ -46,7 +46,7 @@ Copy and paste this template from frame/example/src/lib.rs into file // Include the following links that shows what trait needs to be implemented to use the pallet // and the supported dispatchables that are documented in the Call enum. -- \[`::Trait`](https://docs.rs/pallet-example/latest/pallet_example/trait.Trait.html) +- \[`::Config`](https://docs.rs/pallet-example/latest/pallet_example/trait.Config.html) - \[`Call`](https://docs.rs/pallet-example/latest/pallet_example/enum.Call.html) - \[`Module`](https://docs.rs/pallet-example/latest/pallet_example/struct.Module.html) diff --git a/frame/identity/README.md b/frame/identity/README.md index 38e16d4dd4902..a67c259e2537a 100644 --- a/frame/identity/README.md +++ b/frame/identity/README.md @@ -1,6 +1,6 @@ # Identity Module -- [`identity::Trait`](https://docs.rs/pallet-identity/latest/pallet_identity/trait.Trait.html) +- [`identity::Config`](https://docs.rs/pallet-identity/latest/pallet_identity/trait.Config.html) - [`Call`](https://docs.rs/pallet-identity/latest/pallet_identity/enum.Call.html) ## Overview diff --git a/frame/im-online/README.md b/frame/im-online/README.md index a2ed5edc906a2..46b2268f18b12 100644 --- a/frame/im-online/README.md +++ b/frame/im-online/README.md @@ -13,7 +13,7 @@ and includes the recent best block number of the local validators chain as well as the `NetworkState`. It is submitted as an Unsigned Transaction via off-chain workers. -- [`im_online::Trait`](https://docs.rs/pallet-im-online/latest/pallet_im_online/trait.Trait.html) +- [`im_online::Config`](https://docs.rs/pallet-im-online/latest/pallet_im_online/trait.Config.html) - [`Call`](https://docs.rs/pallet-im-online/latest/pallet_im_online/enum.Call.html) - [`Module`](https://docs.rs/pallet-im-online/latest/pallet_im_online/struct.Module.html) diff --git a/frame/multisig/README.md b/frame/multisig/README.md index a18ef74163d09..4eab00d108204 100644 --- a/frame/multisig/README.md +++ b/frame/multisig/README.md @@ -1,7 +1,7 @@ # Multisig Module A module for doing multisig dispatch. -- [`multisig::Trait`](https://docs.rs/pallet-multisig/latest/pallet_multisig/trait.Trait.html) +- [`multisig::Config`](https://docs.rs/pallet-multisig/latest/pallet_multisig/trait.Config.html) - [`Call`](https://docs.rs/pallet-multisig/latest/pallet_multisig/enum.Call.html) ## Overview diff --git a/frame/nicks/README.md b/frame/nicks/README.md index 766108470bedf..a2a897b044f10 100644 --- a/frame/nicks/README.md +++ b/frame/nicks/README.md @@ -1,6 +1,6 @@ # Nicks Module -- [`nicks::Trait`](https://docs.rs/pallet-nicks/latest/pallet_nicks/trait.Trait.html) +- [`nicks::Config`](https://docs.rs/pallet-nicks/latest/pallet_nicks/trait.Config.html) - [`Call`](https://docs.rs/pallet-nicks/latest/pallet_nicks/enum.Call.html) ## Overview diff --git a/frame/proxy/README.md b/frame/proxy/README.md index 20c4d2bf20b82..2eb83fab6d727 100644 --- a/frame/proxy/README.md +++ b/frame/proxy/README.md @@ -6,7 +6,7 @@ The accounts to which permission is delegated may be requied to announce the act wish to execute some duration prior to execution happens. In this case, the target account may reject the announcement and in doing so, veto the execution. -- [`proxy::Trait`](https://docs.rs/pallet-proxy/latest/pallet_proxy/trait.Trait.html) +- [`proxy::Config`](https://docs.rs/pallet-proxy/latest/pallet_proxy/trait.Config.html) - [`Call`](https://docs.rs/pallet-proxy/latest/pallet_proxy/enum.Call.html) ## Overview diff --git a/frame/recovery/README.md b/frame/recovery/README.md index c45df2c666af6..31416c65c46a5 100644 --- a/frame/recovery/README.md +++ b/frame/recovery/README.md @@ -1,6 +1,6 @@ # Recovery Pallet -- [`recovery::Trait`](https://docs.rs/pallet-recovery/latest/pallet_recovery/trait.Trait.html) +- [`recovery::Config`](https://docs.rs/pallet-recovery/latest/pallet_recovery/trait.Config.html) - [`Call`](https://docs.rs/pallet-recovery/latest/pallet_recovery/enum.Call.html) ## Overview diff --git a/frame/scheduler/README.md b/frame/scheduler/README.md index 3d07818b15d5e..9a209031d7402 100644 --- a/frame/scheduler/README.md +++ b/frame/scheduler/README.md @@ -1,7 +1,7 @@ # Scheduler A module for scheduling dispatches. -- [`scheduler::Trait`](https://docs.rs/pallet-scheduler/latest/pallet_scheduler/trait.Trait.html) +- [`scheduler::Config`](https://docs.rs/pallet-scheduler/latest/pallet_scheduler/trait.Config.html) - [`Call`](https://docs.rs/pallet-scheduler/latest/pallet_scheduler/enum.Call.html) - [`Module`](https://docs.rs/pallet-scheduler/latest/pallet_scheduler/struct.Module.html) diff --git a/frame/scored-pool/README.md b/frame/scored-pool/README.md index 8f7198a5e11de..bf20124edf52e 100644 --- a/frame/scored-pool/README.md +++ b/frame/scored-pool/README.md @@ -20,7 +20,7 @@ time. If an entity is currently a member, this results in removal from the `Pool` and `Members`; the entity is immediately replaced by the next highest scoring candidate in the pool, if available. -- [`scored_pool::Trait`](https://docs.rs/pallet-scored-pool/latest/pallet_scored_pool/trait.Trait.html) +- [`scored_pool::Trait`](https://docs.rs/pallet-scored-pool/latest/pallet_scored_pool/trait.Config.html) - [`Call`](https://docs.rs/pallet-scored-pool/latest/pallet_scored_pool/enum.Call.html) - [`Module`](https://docs.rs/pallet-scored-pool/latest/pallet_scored_pool/struct.Module.html) diff --git a/frame/session/README.md b/frame/session/README.md index e1f8b7f8e0238..c47b5610de09c 100644 --- a/frame/session/README.md +++ b/frame/session/README.md @@ -3,7 +3,7 @@ The Session module allows validators to manage their session keys, provides a function for changing the session length, and handles session rotation. -- [`session::Trait`](https://docs.rs/pallet-session/latest/pallet_session/trait.Trait.html) +- [`session::Trait`](https://docs.rs/pallet-session/latest/pallet_session/trait.Config.html) - [`Call`](https://docs.rs/pallet-session/latest/pallet_session/enum.Call.html) - [`Module`](https://docs.rs/pallet-session/latest/pallet_session/struct.Module.html) diff --git a/frame/society/README.md b/frame/society/README.md index a25940f636de9..8099861866429 100644 --- a/frame/society/README.md +++ b/frame/society/README.md @@ -1,6 +1,6 @@ # Society Module -- [`society::Trait`](https://docs.rs/pallet-society/latest/pallet_society/trait.Trait.html) +- [`society::Config`](https://docs.rs/pallet-society/latest/pallet_society/trait.Config.html) - [`Call`](https://docs.rs/pallet-society/latest/pallet_society/enum.Call.html) ## Overview diff --git a/frame/staking/README.md b/frame/staking/README.md index a379d0a7ad5e2..072353b1a586c 100644 --- a/frame/staking/README.md +++ b/frame/staking/README.md @@ -2,7 +2,7 @@ The Staking module is used to manage funds at stake by network maintainers. -- [`staking::Trait`](https://docs.rs/pallet-staking/latest/pallet_staking/trait.Trait.html) +- [`staking::Config`](https://docs.rs/pallet-staking/latest/pallet_staking/trait.Config.html) - [`Call`](https://docs.rs/pallet-staking/latest/pallet_staking/enum.Call.html) - [`Module`](https://docs.rs/pallet-staking/latest/pallet_staking/struct.Module.html) @@ -157,7 +157,7 @@ decl_module! { ### Era payout The era payout is computed using yearly inflation curve defined at -[`T::RewardCurve`](https://docs.rs/pallet-staking/latest/pallet_staking/trait.Trait.html#associatedtype.RewardCurve) as such: +[`T::RewardCurve`](https://docs.rs/pallet-staking/latest/pallet_staking/trait.Config.html#associatedtype.RewardCurve) as such: ```nocompile staker_payout = yearly_inflation(npos_token_staked / total_tokens) * total_tokens / era_per_year @@ -168,7 +168,7 @@ This payout is used to reward stakers as defined in next section remaining_payout = max_yearly_inflation * total_tokens / era_per_year - staker_payout ``` The remaining reward is send to the configurable end-point -[`T::RewardRemainder`](https://docs.rs/pallet-staking/latest/pallet_staking/trait.Trait.html#associatedtype.RewardRemainder). +[`T::RewardRemainder`](https://docs.rs/pallet-staking/latest/pallet_staking/trait.Config.html#associatedtype.RewardRemainder). ### Reward Calculation @@ -214,7 +214,7 @@ Any funds already placed into stash can be the target of the following operation The controller account can free a portion (or all) of the funds using the [`unbond`](https://docs.rs/pallet-staking/latest/pallet_staking/enum.Call.html#variant.unbond) call. Note that the funds are not immediately -accessible. Instead, a duration denoted by [`BondingDuration`](https://docs.rs/pallet-staking/latest/pallet_staking/trait.Trait.html#associatedtype.BondingDuration) +accessible. Instead, a duration denoted by [`BondingDuration`](https://docs.rs/pallet-staking/latest/pallet_staking/trait.Config.html#associatedtype.BondingDuration) (in number of eras) must pass until the funds can actually be removed. Once the `BondingDuration` is over, the [`withdraw_unbonded`](https://docs.rs/pallet-staking/latest/pallet_staking/enum.Call.html#variant.withdraw_unbonded) call can be used to actually withdraw the funds. diff --git a/frame/sudo/README.md b/frame/sudo/README.md index 95ca7ce88d972..ac7de01615f3f 100644 --- a/frame/sudo/README.md +++ b/frame/sudo/README.md @@ -1,6 +1,6 @@ # Sudo Module -- [`sudo::Trait`](https://docs.rs/pallet-sudo/latest/pallet_sudo/trait.Trait.html) +- [`sudo::Config`](https://docs.rs/pallet-sudo/latest/pallet_sudo/trait.Config.html) - [`Call`](https://docs.rs/pallet-sudo/latest/pallet_sudo/enum.Call.html) ## Overview diff --git a/frame/system/README.md b/frame/system/README.md index a6da7c3816d22..bc7198d2c9295 100644 --- a/frame/system/README.md +++ b/frame/system/README.md @@ -3,7 +3,7 @@ The System module provides low-level access to core types and cross-cutting utilities. It acts as the base layer for other pallets to interact with the Substrate framework components. -- [`system::Trait`](https://docs.rs/frame-system/latest/frame_system/trait.Trait.html) +- [`system::Config`](https://docs.rs/frame-system/latest/frame_system/pallet/trait.Config.html) ## Overview diff --git a/frame/timestamp/README.md b/frame/timestamp/README.md index de1fb74392225..5f8388b04f829 100644 --- a/frame/timestamp/README.md +++ b/frame/timestamp/README.md @@ -2,9 +2,9 @@ The Timestamp module provides functionality to get and set the on-chain time. -- [`timestamp::Trait`](https://docs.rs/pallet-timestamp/latest/pallet_timestamp/trait.Trait.html) -- [`Call`](https://docs.rs/pallet-timestamp/latest/pallet_timestamp/enum.Call.html) -- [`Module`](https://docs.rs/pallet-timestamp/latest/pallet_timestamp/struct.Module.html) +- [`timestamp::Config`](https://docs.rs/pallet-timestamp/latest/pallet_timestamp/pallet/trait.Config.html) +- [`Call`](https://docs.rs/pallet-timestamp/latest/pallet_timestamp/pallet/enum.Call.html) +- [`Pallet`](https://docs.rs/pallet-timestamp/latest/pallet_timestamp/pallet/struct.Pallet.html) ## Overview diff --git a/frame/utility/README.md b/frame/utility/README.md index f7c0923cd5497..1beeb66733dd4 100644 --- a/frame/utility/README.md +++ b/frame/utility/README.md @@ -1,7 +1,7 @@ # Utility Module A stateless module with helpers for dispatch management which does no re-authentication. -- [`utility::Trait`](https://docs.rs/pallet-utility/latest/pallet_utility/trait.Trait.html) +- [`utility::Config`](https://docs.rs/pallet-utility/latest/pallet_utility/trait.Config.html) - [`Call`](https://docs.rs/pallet-utility/latest/pallet_utility/enum.Call.html) ## Overview diff --git a/frame/vesting/README.md b/frame/vesting/README.md index 811b0dc44152d..c3800eb994d4d 100644 --- a/frame/vesting/README.md +++ b/frame/vesting/README.md @@ -1,6 +1,6 @@ # Vesting Module -- [`vesting::Trait`](https://docs.rs/pallet-vesting/latest/pallet_vesting/trait.Trait.html) +- [`vesting::Config`](https://docs.rs/pallet-vesting/latest/pallet_vesting/trait.Config.html) - [`Call`](https://docs.rs/pallet-vesting/latest/pallet_vesting/enum.Call.html) ## Overview From 3f7d2b7658cb87de61b75e3a782d17abd8a915d1 Mon Sep 17 00:00:00 2001 From: Ashley Date: Tue, 29 Jun 2021 10:23:39 +0200 Subject: [PATCH 54/67] Remove `txpool` as an export of `sc_transaction_pool`, exporting the used components instead. (#9217) * Remove `txpool` as an export of `sc_transaction_pool`, exporting the used components instead. * Fix tests --- client/consensus/manual-seal/src/lib.rs | 16 ++++++++-------- client/consensus/manual-seal/src/seal_block.rs | 8 ++++---- client/service/src/config.rs | 2 +- client/service/src/lib.rs | 2 +- client/transaction-pool/src/lib.rs | 8 ++++---- client/transaction-pool/src/testing/pool.rs | 1 - 6 files changed, 18 insertions(+), 19 deletions(-) diff --git a/client/consensus/manual-seal/src/lib.rs b/client/consensus/manual-seal/src/lib.rs index 2473ac848ca32..1e8c69a752ca2 100644 --- a/client/consensus/manual-seal/src/lib.rs +++ b/client/consensus/manual-seal/src/lib.rs @@ -29,7 +29,7 @@ use sp_blockchain::HeaderBackend; use sp_inherents::CreateInherentDataProviders; use sp_runtime::{traits::Block as BlockT, Justifications, ConsensusEngineId}; use sc_client_api::backend::{Backend as ClientBackend, Finalizer}; -use sc_transaction_pool::txpool; +use sc_transaction_pool::{ChainApi, Pool}; use std::{sync::Arc, marker::PhantomData}; use prometheus_endpoint::Registry; @@ -94,7 +94,7 @@ pub fn import_queue( } /// Params required to start the instant sealing authorship task. -pub struct ManualSealParams, A: txpool::ChainApi, SC, CS, CIDP> { +pub struct ManualSealParams, A: ChainApi, SC, CS, CIDP> { /// Block import instance for well. importing blocks. pub block_import: BI, @@ -105,7 +105,7 @@ pub struct ManualSealParams, A: txpool pub client: Arc, /// Shared reference to the transaction pool. - pub pool: Arc>, + pub pool: Arc>, /// Stream, Basically the receiving end of a channel for sending commands to /// the authorship task. @@ -122,7 +122,7 @@ pub struct ManualSealParams, A: txpool } /// Params required to start the manual sealing authorship task. -pub struct InstantSealParams, A: txpool::ChainApi, SC, CIDP> { +pub struct InstantSealParams, A: ChainApi, SC, CIDP> { /// Block import instance for well. importing blocks. pub block_import: BI, @@ -133,7 +133,7 @@ pub struct InstantSealParams, A: txpoo pub client: Arc, /// Shared reference to the transaction pool. - pub pool: Arc>, + pub pool: Arc>, /// SelectChain strategy. pub select_chain: SC, @@ -159,7 +159,7 @@ pub async fn run_manual_seal( }: ManualSealParams ) where - A: txpool::ChainApi + 'static, + A: ChainApi + 'static, B: BlockT + 'static, BI: BlockImport> + Send + Sync + 'static, @@ -227,7 +227,7 @@ pub async fn run_instant_seal( }: InstantSealParams ) where - A: txpool::ChainApi + 'static, + A: ChainApi + 'static, B: BlockT + 'static, BI: BlockImport> + Send + Sync + 'static, @@ -275,7 +275,7 @@ mod tests { AccountKeyring::*, TestClientBuilder, }; - use sc_transaction_pool::{BasicPool, RevalidationType, txpool::Options}; + use sc_transaction_pool::{BasicPool, RevalidationType, Options}; use substrate_test_runtime_transaction_pool::{TestApi, uxt}; use sp_transaction_pool::{TransactionPool, MaintainedTransactionPool, TransactionSource}; use sp_runtime::generic::BlockId; diff --git a/client/consensus/manual-seal/src/seal_block.rs b/client/consensus/manual-seal/src/seal_block.rs index 89da02ac49612..ca35bdecb44e8 100644 --- a/client/consensus/manual-seal/src/seal_block.rs +++ b/client/consensus/manual-seal/src/seal_block.rs @@ -25,7 +25,7 @@ use sp_runtime::{ generic::BlockId, }; use futures::prelude::*; -use sc_transaction_pool::txpool; +use sc_transaction_pool::{ChainApi, Pool}; use sp_consensus::{ self, BlockImport, Environment, Proposer, ForkChoiceStrategy, BlockImportParams, BlockOrigin, ImportResult, SelectChain, StateAction, @@ -40,7 +40,7 @@ use sp_api::{ProvideRuntimeApi, TransactionFor}; pub const MAX_PROPOSAL_DURATION: u64 = 10; /// params for sealing a new block -pub struct SealBlockParams<'a, B: BlockT, BI, SC, C: ProvideRuntimeApi, E, P: txpool::ChainApi, CIDP> { +pub struct SealBlockParams<'a, B: BlockT, BI, SC, C: ProvideRuntimeApi, E, P: ChainApi, CIDP> { /// if true, empty blocks(without extrinsics) will be created. /// otherwise, will return Error::EmptyTransactionPool. pub create_empty: bool, @@ -51,7 +51,7 @@ pub struct SealBlockParams<'a, B: BlockT, BI, SC, C: ProvideRuntimeApi, E, P: /// sender to report errors/success to the rpc. pub sender: rpc::Sender::Hash>>, /// transaction pool - pub pool: Arc>, + pub pool: Arc>, /// header backend pub client: Arc, /// Environment trait object for creating a proposer @@ -90,7 +90,7 @@ pub async fn seal_block( C: HeaderBackend + ProvideRuntimeApi, E: Environment, E::Proposer: Proposer>, - P: txpool::ChainApi, + P: ChainApi, SC: SelectChain, TransactionFor: 'static, CIDP: CreateInherentDataProviders, diff --git a/client/service/src/config.rs b/client/service/src/config.rs index c91cf0a4ef5c3..be14b4e322e76 100644 --- a/client/service/src/config.rs +++ b/client/service/src/config.rs @@ -32,7 +32,7 @@ pub use sc_executor::WasmExecutionMethod; pub use sc_client_api::execution_extensions::{ExecutionStrategies, ExecutionStrategy}; use std::{io, future::Future, path::{PathBuf, Path}, pin::Pin, net::SocketAddr, sync::Arc}; -pub use sc_transaction_pool::txpool::Options as TransactionPoolOptions; +pub use sc_transaction_pool::Options as TransactionPoolOptions; use sc_chain_spec::ChainSpec; use sp_core::crypto::SecretString; pub use sc_telemetry::TelemetryEndpoints; diff --git a/client/service/src/lib.rs b/client/service/src/lib.rs index c8ac03ee0e368..cb0f6c023372f 100644 --- a/client/service/src/lib.rs +++ b/client/service/src/lib.rs @@ -65,7 +65,7 @@ pub use sc_chain_spec::{ NoExtension, ChainType, }; pub use sp_transaction_pool::{TransactionPool, InPoolTransaction, error::IntoPoolError}; -pub use sc_transaction_pool::txpool::Options as TransactionPoolOptions; +pub use sc_transaction_pool::Options as TransactionPoolOptions; pub use sc_rpc::Metadata as RpcMetadata; pub use sc_executor::NativeExecutionDispatch; #[doc(hidden)] diff --git a/client/transaction-pool/src/lib.rs b/client/transaction-pool/src/lib.rs index 15c75a554daa3..7dd9414e9f7fc 100644 --- a/client/transaction-pool/src/lib.rs +++ b/client/transaction-pool/src/lib.rs @@ -31,7 +31,7 @@ pub mod error; #[cfg(test)] pub mod testing; -pub use sc_transaction_graph as txpool; +pub use sc_transaction_graph::{ChainApi, Options, Pool}; pub use crate::api::{FullChainApi, LightChainApi}; use std::{collections::{HashMap, HashSet}, sync::Arc, pin::Pin, convert::TryInto}; @@ -48,7 +48,7 @@ use sp_transaction_pool::{ TransactionStatusStreamFor, MaintainedTransactionPool, PoolFuture, ChainEvent, TransactionSource, }; -use sc_transaction_graph::{ChainApi, ExtrinsicHash}; +use sc_transaction_graph::{IsValidator, ExtrinsicHash}; use wasm_timer::Instant; use prometheus_endpoint::Registry as PrometheusRegistry; @@ -191,7 +191,7 @@ impl BasicPool /// revalidation type. pub fn with_revalidation_type( options: sc_transaction_graph::Options, - is_validator: txpool::IsValidator, + is_validator: IsValidator, pool_api: Arc, prometheus: Option<&PrometheusRegistry>, revalidation_type: RevalidationType, @@ -397,7 +397,7 @@ where /// Create new basic transaction pool for a full node with the provided api. pub fn new_full( options: sc_transaction_graph::Options, - is_validator: txpool::IsValidator, + is_validator: IsValidator, prometheus: Option<&PrometheusRegistry>, spawner: impl SpawnEssentialNamed, client: Arc, diff --git a/client/transaction-pool/src/testing/pool.rs b/client/transaction-pool/src/testing/pool.rs index 675a58cd44274..9232a1d13ad24 100644 --- a/client/transaction-pool/src/testing/pool.rs +++ b/client/transaction-pool/src/testing/pool.rs @@ -19,7 +19,6 @@ use crate::*; use sp_transaction_pool::TransactionStatus; use futures::executor::{block_on, block_on_stream}; -use txpool::{self, Pool}; use sp_runtime::{ generic::BlockId, transaction_validity::{ValidTransaction, TransactionSource, InvalidTransaction}, From 8712b36e1391d8351ca75931a507667200cfb61a Mon Sep 17 00:00:00 2001 From: Miguel Hervas Date: Wed, 30 Jun 2021 03:00:14 -0700 Subject: [PATCH 55/67] Prep for Altair - Add ss58 prefix (#9123) * Prep for Altair - Add ss58 prefix * fix indent * fix indent --- ss58-registry.json | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/ss58-registry.json b/ss58-registry.json index 4d818dfa5b3e2..6d23cbce90f91 100644 --- a/ss58-registry.json +++ b/ss58-registry.json @@ -329,7 +329,7 @@ "prefix": 36, "network": "centrifuge", "displayName": "Centrifuge Chain", - "symbols": ["RAD"], + "symbols": ["CFG"], "decimals": [18], "standardAccount": "*25519", "website": "https://centrifuge.io/" @@ -522,7 +522,7 @@ "decimals": [18], "standardAccount": "*25519", "website": "https://polkafoundry.com" - }, + }, { "prefix": 101, "network": "origintrail-parachain", @@ -532,6 +532,15 @@ "standardAccount": "secp256k1", "website": "https://origintrail.io" }, + { + "prefix": 136, + "network": "altair", + "displayName": "Altair", + "symbols": ["AIR"], + "decimals": [18], + "standardAccount": "*25519", + "website": "https://centrifuge.io/" + }, { "prefix": 252, "network": "social-network", From 631d4cdbcad438248c2597213918d8207d85bf6e Mon Sep 17 00:00:00 2001 From: Squirrel Date: Wed, 30 Jun 2021 11:06:39 +0100 Subject: [PATCH 56/67] Move client only primitives to another dir (#9220) * Move alloc primitive (not used in /pallets) * Move to alternative location as not shared * moved crates to different dir * ren sp_chain_spec to sc_chain_spec_primatives * merged sc-chain-spec and moved allocation up one. * no no_std * nudge * Bump CI --- Cargo.lock | 43 ++++++++----------- Cargo.toml | 3 +- {primitives => client}/allocator/Cargo.toml | 13 +++--- {primitives => client}/allocator/README.md | 2 +- {primitives => client}/allocator/src/error.rs | 8 ++-- .../allocator/src/freeing_bump.rs | 2 +- {primitives => client}/allocator/src/lib.rs | 1 - client/chain-spec/Cargo.toml | 1 - client/chain-spec/src/lib.rs | 26 ++++++++++- client/executor/common/Cargo.toml | 2 +- client/executor/common/src/error.rs | 2 +- client/executor/runtime-test/Cargo.toml | 4 +- client/executor/runtime-test/src/lib.rs | 8 ---- client/executor/wasmi/Cargo.toml | 2 +- client/executor/wasmi/src/lib.rs | 4 +- client/executor/wasmtime/Cargo.toml | 2 +- client/executor/wasmtime/src/host.rs | 2 +- .../executor/wasmtime/src/instance_wrapper.rs | 6 +-- client/executor/wasmtime/src/runtime.rs | 2 +- client/rpc-api/Cargo.toml | 2 +- client/rpc-api/src/system/helpers.rs | 2 +- client/rpc-api/src/system/mod.rs | 4 +- client/rpc/Cargo.toml | 2 +- client/rpc/src/system/mod.rs | 4 +- frame/identity/src/lib.rs | 3 +- primitives/chain-spec/Cargo.toml | 14 ------ primitives/chain-spec/README.md | 3 -- primitives/chain-spec/src/lib.rs | 43 ------------------- 28 files changed, 76 insertions(+), 134 deletions(-) rename {primitives => client}/allocator/Cargo.toml (57%) rename {primitives => client}/allocator/README.md (76%) rename {primitives => client}/allocator/src/error.rs (79%) rename {primitives => client}/allocator/src/freeing_bump.rs (99%) rename {primitives => client}/allocator/src/lib.rs (95%) delete mode 100644 primitives/chain-spec/Cargo.toml delete mode 100644 primitives/chain-spec/README.md delete mode 100644 primitives/chain-spec/src/lib.rs diff --git a/Cargo.lock b/Cargo.lock index ee78c31645b43..737a762d88f0d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6991,6 +6991,17 @@ dependencies = [ "winapi-util", ] +[[package]] +name = "sc-allocator" +version = "3.0.0" +dependencies = [ + "log", + "sp-core", + "sp-std", + "sp-wasm-interface", + "thiserror", +] + [[package]] name = "sc-authority-discovery" version = "0.9.0" @@ -7079,7 +7090,6 @@ dependencies = [ "sc-telemetry", "serde", "serde_json", - "sp-chain-spec", "sp-consensus-babe", "sp-core", "sp-runtime", @@ -7503,7 +7513,7 @@ dependencies = [ "derive_more", "parity-scale-codec", "pwasm-utils", - "sp-allocator", + "sc-allocator", "sp-core", "sp-maybe-compressed-blob", "sp-serializer", @@ -7518,8 +7528,8 @@ version = "0.9.0" dependencies = [ "log", "parity-scale-codec", + "sc-allocator", "sc-executor-common", - "sp-allocator", "sp-core", "sp-runtime-interface", "sp-wasm-interface", @@ -7536,9 +7546,9 @@ dependencies = [ "log", "parity-scale-codec", "parity-wasm 0.42.2", + "sc-allocator", "sc-executor-common", "scoped-tls", - "sp-allocator", "sp-core", "sp-runtime-interface", "sp-wasm-interface", @@ -7885,6 +7895,7 @@ dependencies = [ "parity-scale-codec", "parking_lot 0.11.1", "sc-block-builder", + "sc-chain-spec", "sc-cli", "sc-client-api", "sc-executor", @@ -7896,7 +7907,6 @@ dependencies = [ "serde_json", "sp-api", "sp-blockchain", - "sp-chain-spec", "sp-core", "sp-io", "sp-keystore", @@ -7926,9 +7936,9 @@ dependencies = [ "log", "parity-scale-codec", "parking_lot 0.11.1", + "sc-chain-spec", "serde", "serde_json", - "sp-chain-spec", "sp-core", "sp-rpc", "sp-runtime", @@ -7958,7 +7968,7 @@ dependencies = [ name = "sc-runtime-test" version = "2.0.0" dependencies = [ - "sp-allocator", + "sc-allocator", "sp-core", "sp-io", "sp-runtime", @@ -8665,17 +8675,6 @@ dependencies = [ "sha-1 0.9.4", ] -[[package]] -name = "sp-allocator" -version = "3.0.0" -dependencies = [ - "log", - "sp-core", - "sp-std", - "sp-wasm-interface", - "thiserror", -] - [[package]] name = "sp-api" version = "3.0.0" @@ -8828,14 +8827,6 @@ dependencies = [ "thiserror", ] -[[package]] -name = "sp-chain-spec" -version = "3.0.0" -dependencies = [ - "serde", - "serde_json", -] - [[package]] name = "sp-consensus" version = "0.9.0" diff --git a/Cargo.toml b/Cargo.toml index f7552f0bbbc48..d73bf1b52de90 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -49,6 +49,7 @@ members = [ "client/network/test", "client/offchain", "client/peerset", + "client/allocator", "client/proposer-metrics", "client/rpc", "client/rpc-api", @@ -129,7 +130,6 @@ members = [ "frame/uniques", "frame/utility", "frame/vesting", - "primitives/allocator", "primitives/api", "primitives/api/proc-macro", "primitives/api/test", @@ -141,7 +141,6 @@ members = [ "primitives/authorship", "primitives/block-builder", "primitives/blockchain", - "primitives/chain-spec", "primitives/consensus/aura", "primitives/consensus/babe", "primitives/consensus/common", diff --git a/primitives/allocator/Cargo.toml b/client/allocator/Cargo.toml similarity index 57% rename from primitives/allocator/Cargo.toml rename to client/allocator/Cargo.toml index 1c38cbbb9c26e..e2fc69e26db1c 100644 --- a/primitives/allocator/Cargo.toml +++ b/client/allocator/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "sp-allocator" +name = "sc-allocator" version = "3.0.0" authors = ["Parity Technologies "] edition = "2018" @@ -7,18 +7,18 @@ license = "Apache-2.0" homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" description = "Collection of allocator implementations." -documentation = "https://docs.rs/sp-allocator" +documentation = "https://docs.rs/sc-allocator" readme = "README.md" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-std = { version = "3.0.0", path = "../std", default-features = false } -sp-core = { version = "3.0.0", path = "../core", default-features = false } -sp-wasm-interface = { version = "3.0.0", path = "../wasm-interface", default-features = false } +sp-std = { version = "3.0.0", path = "../../primitives/std", default-features = false } +sp-core = { version = "3.0.0", path = "../../primitives/core", default-features = false } +sp-wasm-interface = { version = "3.0.0", path = "../../primitives/wasm-interface", default-features = false } log = { version = "0.4.11", optional = true } -thiserror = { version = "1.0.21", optional = true } +thiserror = { version = "1.0.21" } [features] default = [ "std" ] @@ -27,5 +27,4 @@ std = [ "sp-core/std", "sp-wasm-interface/std", "log", - "thiserror", ] diff --git a/primitives/allocator/README.md b/client/allocator/README.md similarity index 76% rename from primitives/allocator/README.md rename to client/allocator/README.md index cd845e2b028eb..b89348b4c6950 100644 --- a/primitives/allocator/README.md +++ b/client/allocator/README.md @@ -1,6 +1,6 @@ Collection of allocator implementations. This crate provides the following allocator implementations: -- A freeing-bump allocator: [`FreeingBumpHeapAllocator`](https://docs.rs/sp-allocator/latest/sp_allocator/struct.FreeingBumpHeapAllocator.html) +- A freeing-bump allocator: [`FreeingBumpHeapAllocator`](https://docs.rs/sc-allocator/latest/sc_allocator/struct.FreeingBumpHeapAllocator.html) License: Apache-2.0 \ No newline at end of file diff --git a/primitives/allocator/src/error.rs b/client/allocator/src/error.rs similarity index 79% rename from primitives/allocator/src/error.rs rename to client/allocator/src/error.rs index 8464cd225d00e..d28484d34f4cd 100644 --- a/primitives/allocator/src/error.rs +++ b/client/allocator/src/error.rs @@ -17,15 +17,15 @@ /// The error type used by the allocators. #[derive(sp_core::RuntimeDebug)] -#[cfg_attr(feature = "std", derive(thiserror::Error))] +#[derive(thiserror::Error)] pub enum Error { /// Someone tried to allocate more memory than the allowed maximum per allocation. - #[cfg_attr(feature = "std", error("Requested allocation size is too large"))] + #[error("Requested allocation size is too large")] RequestedAllocationTooLarge, /// Allocator run out of space. - #[cfg_attr(feature = "std", error("Allocator ran out of space"))] + #[error("Allocator ran out of space")] AllocatorOutOfSpace, /// Some other error occurred. - #[cfg_attr(feature = "std", error("Other: {0}"))] + #[error("Other: {0}")] Other(&'static str) } diff --git a/primitives/allocator/src/freeing_bump.rs b/client/allocator/src/freeing_bump.rs similarity index 99% rename from primitives/allocator/src/freeing_bump.rs rename to client/allocator/src/freeing_bump.rs index 36f5bb9c65c0e..3e9b0c9790360 100644 --- a/primitives/allocator/src/freeing_bump.rs +++ b/client/allocator/src/freeing_bump.rs @@ -495,7 +495,7 @@ impl Memory for [u8] { let range = heap_range(ptr, 8, self.len()).ok_or_else(|| error("write out of heap bounds"))?; let bytes = val.to_le_bytes(); - &mut self[range].copy_from_slice(&bytes[..]); + self[range].copy_from_slice(&bytes[..]); Ok(()) } fn size(&self) -> u32 { diff --git a/primitives/allocator/src/lib.rs b/client/allocator/src/lib.rs similarity index 95% rename from primitives/allocator/src/lib.rs rename to client/allocator/src/lib.rs index 7d45fb5f368c7..a82c7542199d4 100644 --- a/primitives/allocator/src/lib.rs +++ b/client/allocator/src/lib.rs @@ -20,7 +20,6 @@ //! This crate provides the following allocator implementations: //! - A freeing-bump allocator: [`FreeingBumpHeapAllocator`](freeing_bump::FreeingBumpHeapAllocator) -#![cfg_attr(not(feature = "std"), no_std)] #![warn(missing_docs)] mod error; diff --git a/client/chain-spec/Cargo.toml b/client/chain-spec/Cargo.toml index 27850cc8400b3..2eddec524cad6 100644 --- a/client/chain-spec/Cargo.toml +++ b/client/chain-spec/Cargo.toml @@ -20,7 +20,6 @@ sp-core = { version = "3.0.0", path = "../../primitives/core" } serde = { version = "1.0.101", features = ["derive"] } serde_json = "1.0.41" sp-runtime = { version = "3.0.0", path = "../../primitives/runtime" } -sp-chain-spec = { version = "3.0.0", path = "../../primitives/chain-spec" } sc-telemetry = { version = "3.0.0", path = "../telemetry" } codec = { package = "parity-scale-codec", version = "2.0.0" } sc-consensus-babe = { version = "0.9.0", path = "../consensus/babe" } diff --git a/client/chain-spec/src/lib.rs b/client/chain-spec/src/lib.rs index e75dafcfe0255..1bfa1808ee556 100644 --- a/client/chain-spec/src/lib.rs +++ b/client/chain-spec/src/lib.rs @@ -115,7 +115,6 @@ pub use chain_spec::{ }; pub use extension::{Group, Fork, Forks, Extension, GetExtension, get_extension}; pub use sc_chain_spec_derive::{ChainSpecExtension, ChainSpecGroup}; -pub use sp_chain_spec::{Properties, ChainType}; use serde::{Serialize, de::DeserializeOwned}; use sp_runtime::BuildStorage; @@ -123,6 +122,31 @@ use sc_network::config::MultiaddrWithPeerId; use sc_telemetry::TelemetryEndpoints; use sp_core::storage::Storage; +/// The type of a chain. +/// +/// This can be used by tools to determine the type of a chain for displaying +/// additional information or enabling additional features. +#[derive(serde::Serialize, serde::Deserialize, Debug, PartialEq, Clone)] +pub enum ChainType { + /// A development chain that runs mainly on one node. + Development, + /// A local chain that runs locally on multiple nodes for testing purposes. + Local, + /// A live chain. + Live, + /// Some custom chain type. + Custom(String), +} + +impl Default for ChainType { + fn default() -> Self { + Self::Live + } +} + +/// Arbitrary properties defined in chain spec as a JSON object +pub type Properties = serde_json::map::Map; + /// A set of traits for the runtime genesis config. pub trait RuntimeGenesis: Serialize + DeserializeOwned + BuildStorage {} impl RuntimeGenesis for T {} diff --git a/client/executor/common/Cargo.toml b/client/executor/common/Cargo.toml index cb238f3a96fb0..75cfcd3d2d851 100644 --- a/client/executor/common/Cargo.toml +++ b/client/executor/common/Cargo.toml @@ -19,7 +19,7 @@ pwasm-utils = "0.18.0" codec = { package = "parity-scale-codec", version = "2.0.0" } wasmi = "0.9.0" sp-core = { version = "3.0.0", path = "../../../primitives/core" } -sp-allocator = { version = "3.0.0", path = "../../../primitives/allocator" } +sc-allocator = { version = "3.0.0", path = "../../allocator" } sp-wasm-interface = { version = "3.0.0", path = "../../../primitives/wasm-interface" } sp-maybe-compressed-blob = { version = "3.0.0", path = "../../../primitives/maybe-compressed-blob" } sp-serializer = { version = "3.0.0", path = "../../../primitives/serializer" } diff --git a/client/executor/common/src/error.rs b/client/executor/common/src/error.rs index 96329d1680301..6ad4802e57a8b 100644 --- a/client/executor/common/src/error.rs +++ b/client/executor/common/src/error.rs @@ -77,7 +77,7 @@ pub enum Error { Other(String), #[error(transparent)] - Allocator(#[from] sp_allocator::Error), + Allocator(#[from] sc_allocator::Error), #[error("Host function {0} execution failed with: {1}")] FunctionExecution(String, String), diff --git a/client/executor/runtime-test/Cargo.toml b/client/executor/runtime-test/Cargo.toml index 93ad463be16c3..2f06556644ac4 100644 --- a/client/executor/runtime-test/Cargo.toml +++ b/client/executor/runtime-test/Cargo.toml @@ -13,7 +13,7 @@ repository = "https://github.com/paritytech/substrate/" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-allocator = { version = "3.0.0", default-features = false, path = "../../../primitives/allocator" } +sc-allocator = { version = "3.0.0", default-features = false, path = "../../allocator" } sp-core = { version = "3.0.0", default-features = false, path = "../../../primitives/core" } sp-io = { version = "3.0.0", default-features = false, path = "../../../primitives/io" } sp-runtime = { version = "3.0.0", default-features = false, path = "../../../primitives/runtime" } @@ -27,7 +27,7 @@ substrate-wasm-builder = { version = "4.0.0", path = "../../../utils/wasm-builde [features] default = [ "std" ] std = [ - "sp-allocator/std", + "sc-allocator/std", "sp-core/std", "sp-io/std", "sp-runtime/std", diff --git a/client/executor/runtime-test/src/lib.rs b/client/executor/runtime-test/src/lib.rs index 439d4f66b1879..af0c9edcc32e2 100644 --- a/client/executor/runtime-test/src/lib.rs +++ b/client/executor/runtime-test/src/lib.rs @@ -211,7 +211,6 @@ sp_core::wasm_export_functions! { code } - fn test_sandbox_get_global_val(code: Vec) -> i64 { let env_builder = sp_sandbox::EnvironmentDefinitionBuilder::new(); let instance = if let Ok(i) = sp_sandbox::Instance::new(&code, &env_builder, &mut ()) { @@ -227,12 +226,10 @@ sp_core::wasm_export_functions! { } } - fn test_offchain_index_set() { sp_io::offchain_index::set(b"k", b"v"); } - fn test_offchain_local_storage() -> bool { let kind = sp_core::offchain::StorageKind::PERSISTENT; assert_eq!(sp_io::offchain::local_storage_get(kind, b"test"), None); @@ -286,11 +283,6 @@ sp_core::wasm_export_functions! { run().is_some() } - // Just some test to make sure that `sp-allocator` compiles on `no_std`. - fn test_sp_allocator_compiles() { - sp_allocator::FreeingBumpHeapAllocator::new(0); - } - fn test_enter_span() -> u64 { wasm_tracing::enter_span(Default::default()) } diff --git a/client/executor/wasmi/Cargo.toml b/client/executor/wasmi/Cargo.toml index 4c3054d5d10c2..dbdf26b63d247 100644 --- a/client/executor/wasmi/Cargo.toml +++ b/client/executor/wasmi/Cargo.toml @@ -18,7 +18,7 @@ log = "0.4.8" wasmi = "0.9.0" codec = { package = "parity-scale-codec", version = "2.0.0" } sc-executor-common = { version = "0.9.0", path = "../common" } +sc-allocator = { version = "3.0.0", path = "../../allocator" } sp-wasm-interface = { version = "3.0.0", path = "../../../primitives/wasm-interface" } sp-runtime-interface = { version = "3.0.0", path = "../../../primitives/runtime-interface" } sp-core = { version = "3.0.0", path = "../../../primitives/core" } -sp-allocator = { version = "3.0.0", path = "../../../primitives/allocator" } diff --git a/client/executor/wasmi/src/lib.rs b/client/executor/wasmi/src/lib.rs index d4c9f4dc2e806..1bafa39494098 100644 --- a/client/executor/wasmi/src/lib.rs +++ b/client/executor/wasmi/src/lib.rs @@ -40,7 +40,7 @@ use sc_executor_common::runtime_blob::{RuntimeBlob, DataSegmentsSnapshot}; struct FunctionExecutor<'a> { sandbox_store: sandbox::Store, - heap: sp_allocator::FreeingBumpHeapAllocator, + heap: sc_allocator::FreeingBumpHeapAllocator, memory: MemoryRef, table: Option, host_functions: &'a [&'static dyn Function], @@ -59,7 +59,7 @@ impl<'a> FunctionExecutor<'a> { ) -> Result { Ok(FunctionExecutor { sandbox_store: sandbox::Store::new(), - heap: sp_allocator::FreeingBumpHeapAllocator::new(heap_base), + heap: sc_allocator::FreeingBumpHeapAllocator::new(heap_base), memory: m, table: t, host_functions, diff --git a/client/executor/wasmtime/Cargo.toml b/client/executor/wasmtime/Cargo.toml index 1e886d15beb18..bdaae49c24d59 100644 --- a/client/executor/wasmtime/Cargo.toml +++ b/client/executor/wasmtime/Cargo.toml @@ -23,7 +23,7 @@ sc-executor-common = { version = "0.9.0", path = "../common" } sp-wasm-interface = { version = "3.0.0", path = "../../../primitives/wasm-interface" } sp-runtime-interface = { version = "3.0.0", path = "../../../primitives/runtime-interface" } sp-core = { version = "3.0.0", path = "../../../primitives/core" } -sp-allocator = { version = "3.0.0", path = "../../../primitives/allocator" } +sc-allocator = { version = "3.0.0", path = "../../allocator" } wasmtime = "0.27.0" [dev-dependencies] diff --git a/client/executor/wasmtime/src/host.rs b/client/executor/wasmtime/src/host.rs index c1eb77ff81f34..3f5ac0560a6d7 100644 --- a/client/executor/wasmtime/src/host.rs +++ b/client/executor/wasmtime/src/host.rs @@ -24,7 +24,7 @@ use crate::util; use std::{cell::RefCell, rc::Rc}; use log::trace; use codec::{Encode, Decode}; -use sp_allocator::FreeingBumpHeapAllocator; +use sc_allocator::FreeingBumpHeapAllocator; use sc_executor_common::error::Result; use sc_executor_common::sandbox::{self, SandboxCapabilities, SupervisorFuncIndex}; use sp_core::sandbox as sandbox_primitives; diff --git a/client/executor/wasmtime/src/instance_wrapper.rs b/client/executor/wasmtime/src/instance_wrapper.rs index 866dbfb2e2bfc..10c4926743cf6 100644 --- a/client/executor/wasmtime/src/instance_wrapper.rs +++ b/client/executor/wasmtime/src/instance_wrapper.rs @@ -340,7 +340,7 @@ impl InstanceWrapper { let range = util::checked_range(address.into(), data.len(), memory.len()) .ok_or_else(|| Error::Other("memory write is out of bounds".into()))?; - &mut memory[range].copy_from_slice(data); + memory[range].copy_from_slice(data); Ok(()) } } @@ -351,7 +351,7 @@ impl InstanceWrapper { /// to get more details. pub fn allocate( &self, - allocator: &mut sp_allocator::FreeingBumpHeapAllocator, + allocator: &mut sc_allocator::FreeingBumpHeapAllocator, size: WordSize, ) -> Result> { unsafe { @@ -368,7 +368,7 @@ impl InstanceWrapper { /// Returns `Err` in case the given memory region cannot be deallocated. pub fn deallocate( &self, - allocator: &mut sp_allocator::FreeingBumpHeapAllocator, + allocator: &mut sc_allocator::FreeingBumpHeapAllocator, ptr: Pointer, ) -> Result<()> { unsafe { diff --git a/client/executor/wasmtime/src/runtime.rs b/client/executor/wasmtime/src/runtime.rs index 5018b11264d71..021377eeb20dc 100644 --- a/client/executor/wasmtime/src/runtime.rs +++ b/client/executor/wasmtime/src/runtime.rs @@ -31,7 +31,7 @@ use sc_executor_common::{ runtime_blob::{DataSegmentsSnapshot, ExposedMutableGlobalsSet, GlobalsSnapshot, RuntimeBlob}, wasm_runtime::{WasmModule, WasmInstance, InvokeMethod}, }; -use sp_allocator::FreeingBumpHeapAllocator; +use sc_allocator::FreeingBumpHeapAllocator; use sp_runtime_interface::unpack_ptr_and_len; use sp_wasm_interface::{Function, Pointer, WordSize, Value}; use wasmtime::{Engine, Store}; diff --git a/client/rpc-api/Cargo.toml b/client/rpc-api/Cargo.toml index 662f4bd16fd4c..87c4577c72808 100644 --- a/client/rpc-api/Cargo.toml +++ b/client/rpc-api/Cargo.toml @@ -25,7 +25,7 @@ parking_lot = "0.11.1" sp-core = { version = "3.0.0", path = "../../primitives/core" } sp-version = { version = "3.0.0", path = "../../primitives/version" } sp-runtime = { path = "../../primitives/runtime" , version = "3.0.0"} -sp-chain-spec = { path = "../../primitives/chain-spec" , version = "3.0.0"} +sc-chain-spec = { path = "../chain-spec" , version = "3.0.0"} serde = { version = "1.0.101", features = ["derive"] } serde_json = "1.0.41" sp-transaction-pool = { version = "3.0.0", path = "../../primitives/transaction-pool" } diff --git a/client/rpc-api/src/system/helpers.rs b/client/rpc-api/src/system/helpers.rs index c2fc807471f38..c8124d9c67526 100644 --- a/client/rpc-api/src/system/helpers.rs +++ b/client/rpc-api/src/system/helpers.rs @@ -20,7 +20,7 @@ use std::fmt; use serde::{Serialize, Deserialize}; -use sp_chain_spec::{Properties, ChainType}; +use sc_chain_spec::{Properties, ChainType}; /// Running node's static details. #[derive(Clone, Debug)] diff --git a/client/rpc-api/src/system/mod.rs b/client/rpc-api/src/system/mod.rs index 4252ef20ac22a..e820fb2e702e3 100644 --- a/client/rpc-api/src/system/mod.rs +++ b/client/rpc-api/src/system/mod.rs @@ -47,11 +47,11 @@ pub trait SystemApi { /// Get the chain's type. #[rpc(name = "system_chainType")] - fn system_type(&self) -> SystemResult; + fn system_type(&self) -> SystemResult; /// Get a custom set of properties as a JSON object, defined in the chain spec. #[rpc(name = "system_properties")] - fn system_properties(&self) -> SystemResult; + fn system_properties(&self) -> SystemResult; /// Return health status of the node. /// diff --git a/client/rpc/Cargo.toml b/client/rpc/Cargo.toml index a352e5fc387bd..140039cab7d4e 100644 --- a/client/rpc/Cargo.toml +++ b/client/rpc/Cargo.toml @@ -31,7 +31,7 @@ sp-utils = { version = "3.0.0", path = "../../primitives/utils" } sp-rpc = { version = "3.0.0", path = "../../primitives/rpc" } sp-keystore = { version = "0.9.0", path = "../../primitives/keystore" } sp-state-machine = { version = "0.9.0", path = "../../primitives/state-machine" } -sp-chain-spec = { version = "3.0.0", path = "../../primitives/chain-spec" } +sc-chain-spec = { version = "3.0.0", path = "../chain-spec" } sc-executor = { version = "0.9.0", path = "../executor" } sc-block-builder = { version = "0.9.0", path = "../block-builder" } sc-keystore = { version = "3.0.0", path = "../keystore" } diff --git a/client/rpc/src/system/mod.rs b/client/rpc/src/system/mod.rs index 248c2dcfed3c6..d405755731ccb 100644 --- a/client/rpc/src/system/mod.rs +++ b/client/rpc/src/system/mod.rs @@ -106,11 +106,11 @@ impl SystemApi::Number> for Sy Ok(self.info.chain_name.clone()) } - fn system_type(&self) -> Result { + fn system_type(&self) -> Result { Ok(self.info.chain_type.clone()) } - fn system_properties(&self) -> Result { + fn system_properties(&self) -> Result { Ok(self.info.properties.clone()) } diff --git a/frame/identity/src/lib.rs b/frame/identity/src/lib.rs index b71b069ccb74f..d398384887d98 100644 --- a/frame/identity/src/lib.rs +++ b/frame/identity/src/lib.rs @@ -141,7 +141,7 @@ impl Encode for Data { Data::Raw(ref x) => { let l = x.len().min(32); let mut r = vec![l as u8 + 1; l + 1]; - &mut r[1..].copy_from_slice(&x[..l as usize]); + r[1..].copy_from_slice(&x[..l as usize]); r } Data::BlakeTwo256(ref h) => once(34u8).chain(h.iter().cloned()).collect(), @@ -1161,4 +1161,3 @@ impl Pallet { .collect() } } - diff --git a/primitives/chain-spec/Cargo.toml b/primitives/chain-spec/Cargo.toml deleted file mode 100644 index ec3e731bb0e95..0000000000000 --- a/primitives/chain-spec/Cargo.toml +++ /dev/null @@ -1,14 +0,0 @@ -[package] -name = "sp-chain-spec" -version = "3.0.0" -authors = ["Parity Technologies "] -edition = "2018" -license = "Apache-2.0" -homepage = "https://substrate.dev" -repository = "https://github.com/paritytech/substrate/" -description = "Substrate chain configurations types." -readme = "README.md" - -[dependencies] -serde = { version = "1.0.101", features = ["derive"] } -serde_json = "1.0.41" diff --git a/primitives/chain-spec/README.md b/primitives/chain-spec/README.md deleted file mode 100644 index 375f14a441ab6..0000000000000 --- a/primitives/chain-spec/README.md +++ /dev/null @@ -1,3 +0,0 @@ -Types and traits related to chain specifications. - -License: Apache-2.0 \ No newline at end of file diff --git a/primitives/chain-spec/src/lib.rs b/primitives/chain-spec/src/lib.rs deleted file mode 100644 index 5456718e351d1..0000000000000 --- a/primitives/chain-spec/src/lib.rs +++ /dev/null @@ -1,43 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Types and traits related to chain specifications. - -/// The type of a chain. -/// -/// This can be used by tools to determine the type of a chain for displaying -/// additional information or enabling additional features. -#[derive(serde::Serialize, serde::Deserialize, Debug, PartialEq, Clone)] -pub enum ChainType { - /// A development chain that runs mainly on one node. - Development, - /// A local chain that runs locally on multiple nodes for testing purposes. - Local, - /// A live chain. - Live, - /// Some custom chain type. - Custom(String), -} - -impl Default for ChainType { - fn default() -> Self { - Self::Live - } -} - -/// Arbitrary properties defined in chain spec as a JSON object -pub type Properties = serde_json::map::Map; From a4c8ab99d68bdfa0cc85a07ae99b7e9f289ef450 Mon Sep 17 00:00:00 2001 From: Zeke Mostov <32168567+emostov@users.noreply.github.com> Date: Wed, 30 Jun 2021 06:02:09 -0700 Subject: [PATCH 57/67] pallet-vesting: Move `tests` module and create `mock` module (#9234) * pallet-vesting: Move `tests` module to seperate file * Move mock to own file * add copyright header appache-2.0 * fix mock import paths in benchmark test macro --- frame/vesting/src/benchmarking.rs | 4 +- frame/vesting/src/lib.rs | 499 ++---------------------------- frame/vesting/src/mock.rs | 140 +++++++++ frame/vesting/src/tests.rs | 359 +++++++++++++++++++++ 4 files changed, 525 insertions(+), 477 deletions(-) create mode 100644 frame/vesting/src/mock.rs create mode 100644 frame/vesting/src/tests.rs diff --git a/frame/vesting/src/benchmarking.rs b/frame/vesting/src/benchmarking.rs index 8d16a53fba2c1..6fd27e1877229 100644 --- a/frame/vesting/src/benchmarking.rs +++ b/frame/vesting/src/benchmarking.rs @@ -226,6 +226,6 @@ benchmarks! { impl_benchmark_test_suite!( Vesting, - crate::tests::ExtBuilder::default().existential_deposit(256).build(), - crate::tests::Test, + crate::mock::ExtBuilder::default().existential_deposit(256).build(), + crate::mock::Test, ); diff --git a/frame/vesting/src/lib.rs b/frame/vesting/src/lib.rs index 8c520b715801e..b53262840f443 100644 --- a/frame/vesting/src/lib.rs +++ b/frame/vesting/src/lib.rs @@ -45,25 +45,35 @@ #![cfg_attr(not(feature = "std"), no_std)] mod benchmarking; +#[cfg(test)] +mod mock; +#[cfg(test)] +mod tests; + pub mod weights; -use sp_std::prelude::*; -use sp_std::fmt::Debug; -use codec::{Encode, Decode}; -use sp_runtime::{RuntimeDebug, traits::{ - StaticLookup, Zero, AtLeast32BitUnsigned, MaybeSerializeDeserialize, Convert -}}; -use frame_support::{ensure, pallet_prelude::*}; -use frame_support::traits::{ - Currency, LockableCurrency, VestingSchedule, WithdrawReasons, LockIdentifier, - ExistenceRequirement, Get, +use codec::{Decode, Encode}; +use frame_support::{ + ensure, + pallet_prelude::*, + traits::{ + Currency, ExistenceRequirement, Get, LockIdentifier, LockableCurrency, VestingSchedule, + WithdrawReasons, + }, }; -use frame_system::{ensure_signed, ensure_root, pallet_prelude::*}; -pub use weights::WeightInfo; +use frame_system::{ensure_root, ensure_signed, pallet_prelude::*}; pub use pallet::*; +use sp_runtime::{ + traits::{AtLeast32BitUnsigned, Convert, MaybeSerializeDeserialize, StaticLookup, Zero}, + RuntimeDebug, +}; +use sp_std::{fmt::Debug, prelude::*}; +pub use weights::WeightInfo; -type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; -type MaxLocksOf = <::Currency as LockableCurrency<::AccountId>>::MaxLocks; +type BalanceOf = + <::Currency as Currency<::AccountId>>::Balance; +type MaxLocksOf = + <::Currency as LockableCurrency<::AccountId>>::MaxLocks; const VESTING_ID: LockIdentifier = *b"vesting "; @@ -404,464 +414,3 @@ impl VestingSchedule for Pallet where debug_assert!(res.is_ok()); } } - -#[cfg(test)] -mod tests { - use super::*; - use crate as pallet_vesting; - - use frame_support::{assert_ok, assert_noop, parameter_types}; - use sp_core::H256; - use sp_runtime::{ - testing::Header, - traits::{BlakeTwo256, IdentityLookup, Identity, BadOrigin}, - }; - use frame_system::RawOrigin; - - type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; - type Block = frame_system::mocking::MockBlock; - - frame_support::construct_runtime!( - pub enum Test where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic, - { - System: frame_system::{Pallet, Call, Config, Storage, Event}, - Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, - Vesting: pallet_vesting::{Pallet, Call, Storage, Event, Config}, - } - ); - - parameter_types! { - pub const BlockHashCount: u64 = 250; - pub BlockWeights: frame_system::limits::BlockWeights = - frame_system::limits::BlockWeights::simple_max(1024); - } - impl frame_system::Config for Test { - type BaseCallFilter = (); - type BlockWeights = (); - type BlockLength = (); - type DbWeight = (); - type Origin = Origin; - type Index = u64; - type BlockNumber = u64; - type Hash = H256; - type Call = Call; - type Hashing = BlakeTwo256; - type AccountId = u64; - type Lookup = IdentityLookup; - type Header = Header; - type Event = Event; - type BlockHashCount = BlockHashCount; - type Version = (); - type PalletInfo = PalletInfo; - type AccountData = pallet_balances::AccountData; - type OnNewAccount = (); - type OnKilledAccount = (); - type SystemWeightInfo = (); - type SS58Prefix = (); - type OnSetCode = (); - } - parameter_types! { - pub const MaxLocks: u32 = 10; - } - impl pallet_balances::Config for Test { - type Balance = u64; - type DustRemoval = (); - type Event = Event; - type ExistentialDeposit = ExistentialDeposit; - type AccountStore = System; - type MaxLocks = MaxLocks; - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; - type WeightInfo = (); - } - parameter_types! { - pub const MinVestedTransfer: u64 = 256 * 2; - pub static ExistentialDeposit: u64 = 0; - } - impl Config for Test { - type Event = Event; - type Currency = Balances; - type BlockNumberToBalance = Identity; - type MinVestedTransfer = MinVestedTransfer; - type WeightInfo = (); - } - - pub struct ExtBuilder { - existential_deposit: u64, - } - impl Default for ExtBuilder { - fn default() -> Self { - Self { - existential_deposit: 1, - } - } - } - impl ExtBuilder { - pub fn existential_deposit(mut self, existential_deposit: u64) -> Self { - self.existential_deposit = existential_deposit; - self - } - pub fn build(self) -> sp_io::TestExternalities { - EXISTENTIAL_DEPOSIT.with(|v| *v.borrow_mut() = self.existential_deposit); - let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); - pallet_balances::GenesisConfig:: { - balances: vec![ - (1, 10 * self.existential_deposit), - (2, 20 * self.existential_deposit), - (3, 30 * self.existential_deposit), - (4, 40 * self.existential_deposit), - (12, 10 * self.existential_deposit) - ], - }.assimilate_storage(&mut t).unwrap(); - pallet_vesting::GenesisConfig:: { - vesting: vec![ - (1, 0, 10, 5 * self.existential_deposit), - (2, 10, 20, 0), - (12, 10, 20, 5 * self.existential_deposit) - ], - }.assimilate_storage(&mut t).unwrap(); - let mut ext = sp_io::TestExternalities::new(t); - ext.execute_with(|| System::set_block_number(1)); - ext - } - } - - #[test] - fn check_vesting_status() { - ExtBuilder::default() - .existential_deposit(256) - .build() - .execute_with(|| { - let user1_free_balance = Balances::free_balance(&1); - let user2_free_balance = Balances::free_balance(&2); - let user12_free_balance = Balances::free_balance(&12); - assert_eq!(user1_free_balance, 256 * 10); // Account 1 has free balance - assert_eq!(user2_free_balance, 256 * 20); // Account 2 has free balance - assert_eq!(user12_free_balance, 256 * 10); // Account 12 has free balance - let user1_vesting_schedule = VestingInfo { - locked: 256 * 5, - per_block: 128, // Vesting over 10 blocks - starting_block: 0, - }; - let user2_vesting_schedule = VestingInfo { - locked: 256 * 20, - per_block: 256, // Vesting over 20 blocks - starting_block: 10, - }; - let user12_vesting_schedule = VestingInfo { - locked: 256 * 5, - per_block: 64, // Vesting over 20 blocks - starting_block: 10, - }; - assert_eq!(Vesting::vesting(&1), Some(user1_vesting_schedule)); // Account 1 has a vesting schedule - assert_eq!(Vesting::vesting(&2), Some(user2_vesting_schedule)); // Account 2 has a vesting schedule - assert_eq!(Vesting::vesting(&12), Some(user12_vesting_schedule)); // Account 12 has a vesting schedule - - // Account 1 has only 128 units vested from their illiquid 256 * 5 units at block 1 - assert_eq!(Vesting::vesting_balance(&1), Some(128 * 9)); - // Account 2 has their full balance locked - assert_eq!(Vesting::vesting_balance(&2), Some(user2_free_balance)); - // Account 12 has only their illiquid funds locked - assert_eq!(Vesting::vesting_balance(&12), Some(user12_free_balance - 256 * 5)); - - System::set_block_number(10); - assert_eq!(System::block_number(), 10); - - // Account 1 has fully vested by block 10 - assert_eq!(Vesting::vesting_balance(&1), Some(0)); - // Account 2 has started vesting by block 10 - assert_eq!(Vesting::vesting_balance(&2), Some(user2_free_balance)); - // Account 12 has started vesting by block 10 - assert_eq!(Vesting::vesting_balance(&12), Some(user12_free_balance - 256 * 5)); - - System::set_block_number(30); - assert_eq!(System::block_number(), 30); - - assert_eq!(Vesting::vesting_balance(&1), Some(0)); // Account 1 is still fully vested, and not negative - assert_eq!(Vesting::vesting_balance(&2), Some(0)); // Account 2 has fully vested by block 30 - assert_eq!(Vesting::vesting_balance(&12), Some(0)); // Account 2 has fully vested by block 30 - - }); - } - - #[test] - fn unvested_balance_should_not_transfer() { - ExtBuilder::default() - .existential_deposit(10) - .build() - .execute_with(|| { - let user1_free_balance = Balances::free_balance(&1); - assert_eq!(user1_free_balance, 100); // Account 1 has free balance - // Account 1 has only 5 units vested at block 1 (plus 50 unvested) - assert_eq!(Vesting::vesting_balance(&1), Some(45)); - assert_noop!( - Balances::transfer(Some(1).into(), 2, 56), - pallet_balances::Error::::LiquidityRestrictions, - ); // Account 1 cannot send more than vested amount - }); - } - - #[test] - fn vested_balance_should_transfer() { - ExtBuilder::default() - .existential_deposit(10) - .build() - .execute_with(|| { - let user1_free_balance = Balances::free_balance(&1); - assert_eq!(user1_free_balance, 100); // Account 1 has free balance - // Account 1 has only 5 units vested at block 1 (plus 50 unvested) - assert_eq!(Vesting::vesting_balance(&1), Some(45)); - assert_ok!(Vesting::vest(Some(1).into())); - assert_ok!(Balances::transfer(Some(1).into(), 2, 55)); - }); - } - - #[test] - fn vested_balance_should_transfer_using_vest_other() { - ExtBuilder::default() - .existential_deposit(10) - .build() - .execute_with(|| { - let user1_free_balance = Balances::free_balance(&1); - assert_eq!(user1_free_balance, 100); // Account 1 has free balance - // Account 1 has only 5 units vested at block 1 (plus 50 unvested) - assert_eq!(Vesting::vesting_balance(&1), Some(45)); - assert_ok!(Vesting::vest_other(Some(2).into(), 1)); - assert_ok!(Balances::transfer(Some(1).into(), 2, 55)); - }); - } - - #[test] - fn extra_balance_should_transfer() { - ExtBuilder::default() - .existential_deposit(10) - .build() - .execute_with(|| { - assert_ok!(Balances::transfer(Some(3).into(), 1, 100)); - assert_ok!(Balances::transfer(Some(3).into(), 2, 100)); - - let user1_free_balance = Balances::free_balance(&1); - assert_eq!(user1_free_balance, 200); // Account 1 has 100 more free balance than normal - - let user2_free_balance = Balances::free_balance(&2); - assert_eq!(user2_free_balance, 300); // Account 2 has 100 more free balance than normal - - // Account 1 has only 5 units vested at block 1 (plus 150 unvested) - assert_eq!(Vesting::vesting_balance(&1), Some(45)); - assert_ok!(Vesting::vest(Some(1).into())); - assert_ok!(Balances::transfer(Some(1).into(), 3, 155)); // Account 1 can send extra units gained - - // Account 2 has no units vested at block 1, but gained 100 - assert_eq!(Vesting::vesting_balance(&2), Some(200)); - assert_ok!(Vesting::vest(Some(2).into())); - assert_ok!(Balances::transfer(Some(2).into(), 3, 100)); // Account 2 can send extra units gained - }); - } - - #[test] - fn liquid_funds_should_transfer_with_delayed_vesting() { - ExtBuilder::default() - .existential_deposit(256) - .build() - .execute_with(|| { - let user12_free_balance = Balances::free_balance(&12); - - assert_eq!(user12_free_balance, 2560); // Account 12 has free balance - // Account 12 has liquid funds - assert_eq!(Vesting::vesting_balance(&12), Some(user12_free_balance - 256 * 5)); - - // Account 12 has delayed vesting - let user12_vesting_schedule = VestingInfo { - locked: 256 * 5, - per_block: 64, // Vesting over 20 blocks - starting_block: 10, - }; - assert_eq!(Vesting::vesting(&12), Some(user12_vesting_schedule)); - - // Account 12 can still send liquid funds - assert_ok!(Balances::transfer(Some(12).into(), 3, 256 * 5)); - }); - } - - #[test] - fn vested_transfer_works() { - ExtBuilder::default() - .existential_deposit(256) - .build() - .execute_with(|| { - let user3_free_balance = Balances::free_balance(&3); - let user4_free_balance = Balances::free_balance(&4); - assert_eq!(user3_free_balance, 256 * 30); - assert_eq!(user4_free_balance, 256 * 40); - // Account 4 should not have any vesting yet. - assert_eq!(Vesting::vesting(&4), None); - // Make the schedule for the new transfer. - let new_vesting_schedule = VestingInfo { - locked: 256 * 5, - per_block: 64, // Vesting over 20 blocks - starting_block: 10, - }; - assert_ok!(Vesting::vested_transfer(Some(3).into(), 4, new_vesting_schedule)); - // Now account 4 should have vesting. - assert_eq!(Vesting::vesting(&4), Some(new_vesting_schedule)); - // Ensure the transfer happened correctly. - let user3_free_balance_updated = Balances::free_balance(&3); - assert_eq!(user3_free_balance_updated, 256 * 25); - let user4_free_balance_updated = Balances::free_balance(&4); - assert_eq!(user4_free_balance_updated, 256 * 45); - // Account 4 has 5 * 256 locked. - assert_eq!(Vesting::vesting_balance(&4), Some(256 * 5)); - - System::set_block_number(20); - assert_eq!(System::block_number(), 20); - - // Account 4 has 5 * 64 units vested by block 20. - assert_eq!(Vesting::vesting_balance(&4), Some(10 * 64)); - - System::set_block_number(30); - assert_eq!(System::block_number(), 30); - - // Account 4 has fully vested. - assert_eq!(Vesting::vesting_balance(&4), Some(0)); - }); - } - - #[test] - fn vested_transfer_correctly_fails() { - ExtBuilder::default() - .existential_deposit(256) - .build() - .execute_with(|| { - let user2_free_balance = Balances::free_balance(&2); - let user4_free_balance = Balances::free_balance(&4); - assert_eq!(user2_free_balance, 256 * 20); - assert_eq!(user4_free_balance, 256 * 40); - // Account 2 should already have a vesting schedule. - let user2_vesting_schedule = VestingInfo { - locked: 256 * 20, - per_block: 256, // Vesting over 20 blocks - starting_block: 10, - }; - assert_eq!(Vesting::vesting(&2), Some(user2_vesting_schedule)); - - // The vesting schedule we will try to create, fails due to pre-existence of schedule. - let new_vesting_schedule = VestingInfo { - locked: 256 * 5, - per_block: 64, // Vesting over 20 blocks - starting_block: 10, - }; - assert_noop!( - Vesting::vested_transfer(Some(4).into(), 2, new_vesting_schedule), - Error::::ExistingVestingSchedule, - ); - - // Fails due to too low transfer amount. - let new_vesting_schedule_too_low = VestingInfo { - locked: 256 * 1, - per_block: 64, - starting_block: 10, - }; - assert_noop!( - Vesting::vested_transfer(Some(3).into(), 4, new_vesting_schedule_too_low), - Error::::AmountLow, - ); - - // Verify no currency transfer happened. - assert_eq!(user2_free_balance, 256 * 20); - assert_eq!(user4_free_balance, 256 * 40); - }); - } - - #[test] - fn force_vested_transfer_works() { - ExtBuilder::default() - .existential_deposit(256) - .build() - .execute_with(|| { - let user3_free_balance = Balances::free_balance(&3); - let user4_free_balance = Balances::free_balance(&4); - assert_eq!(user3_free_balance, 256 * 30); - assert_eq!(user4_free_balance, 256 * 40); - // Account 4 should not have any vesting yet. - assert_eq!(Vesting::vesting(&4), None); - // Make the schedule for the new transfer. - let new_vesting_schedule = VestingInfo { - locked: 256 * 5, - per_block: 64, // Vesting over 20 blocks - starting_block: 10, - }; - assert_noop!(Vesting::force_vested_transfer(Some(4).into(), 3, 4, new_vesting_schedule), BadOrigin); - assert_ok!(Vesting::force_vested_transfer(RawOrigin::Root.into(), 3, 4, new_vesting_schedule)); - // Now account 4 should have vesting. - assert_eq!(Vesting::vesting(&4), Some(new_vesting_schedule)); - // Ensure the transfer happened correctly. - let user3_free_balance_updated = Balances::free_balance(&3); - assert_eq!(user3_free_balance_updated, 256 * 25); - let user4_free_balance_updated = Balances::free_balance(&4); - assert_eq!(user4_free_balance_updated, 256 * 45); - // Account 4 has 5 * 256 locked. - assert_eq!(Vesting::vesting_balance(&4), Some(256 * 5)); - - System::set_block_number(20); - assert_eq!(System::block_number(), 20); - - // Account 4 has 5 * 64 units vested by block 20. - assert_eq!(Vesting::vesting_balance(&4), Some(10 * 64)); - - System::set_block_number(30); - assert_eq!(System::block_number(), 30); - - // Account 4 has fully vested. - assert_eq!(Vesting::vesting_balance(&4), Some(0)); - }); - } - - #[test] - fn force_vested_transfer_correctly_fails() { - ExtBuilder::default() - .existential_deposit(256) - .build() - .execute_with(|| { - let user2_free_balance = Balances::free_balance(&2); - let user4_free_balance = Balances::free_balance(&4); - assert_eq!(user2_free_balance, 256 * 20); - assert_eq!(user4_free_balance, 256 * 40); - // Account 2 should already have a vesting schedule. - let user2_vesting_schedule = VestingInfo { - locked: 256 * 20, - per_block: 256, // Vesting over 20 blocks - starting_block: 10, - }; - assert_eq!(Vesting::vesting(&2), Some(user2_vesting_schedule)); - - // The vesting schedule we will try to create, fails due to pre-existence of schedule. - let new_vesting_schedule = VestingInfo { - locked: 256 * 5, - per_block: 64, // Vesting over 20 blocks - starting_block: 10, - }; - assert_noop!( - Vesting::force_vested_transfer(RawOrigin::Root.into(), 4, 2, new_vesting_schedule), - Error::::ExistingVestingSchedule, - ); - - // Fails due to too low transfer amount. - let new_vesting_schedule_too_low = VestingInfo { - locked: 256 * 1, - per_block: 64, - starting_block: 10, - }; - assert_noop!( - Vesting::force_vested_transfer(RawOrigin::Root.into(), 3, 4, new_vesting_schedule_too_low), - Error::::AmountLow, - ); - - // Verify no currency transfer happened. - assert_eq!(user2_free_balance, 256 * 20); - assert_eq!(user4_free_balance, 256 * 40); - }); - } -} diff --git a/frame/vesting/src/mock.rs b/frame/vesting/src/mock.rs new file mode 100644 index 0000000000000..6fdd44aed140e --- /dev/null +++ b/frame/vesting/src/mock.rs @@ -0,0 +1,140 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use frame_support::parameter_types; +use sp_core::H256; +use sp_runtime::{ + testing::Header, + traits::{BlakeTwo256, Identity, IdentityLookup}, +}; + +use super::*; +use crate as pallet_vesting; + +type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; +type Block = frame_system::mocking::MockBlock; + +frame_support::construct_runtime!( + pub enum Test where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: frame_system::{Pallet, Call, Config, Storage, Event}, + Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, + Vesting: pallet_vesting::{Pallet, Call, Storage, Event, Config}, + } +); + +parameter_types! { + pub const BlockHashCount: u64 = 250; + pub BlockWeights: frame_system::limits::BlockWeights = + frame_system::limits::BlockWeights::simple_max(1024); +} +impl frame_system::Config for Test { + type AccountData = pallet_balances::AccountData; + type AccountId = u64; + type BaseCallFilter = (); + type BlockHashCount = BlockHashCount; + type BlockLength = (); + type BlockNumber = u64; + type BlockWeights = (); + type Call = Call; + type DbWeight = (); + type Event = Event; + type Hash = H256; + type Hashing = BlakeTwo256; + type Header = Header; + type Index = u64; + type Lookup = IdentityLookup; + type OnKilledAccount = (); + type OnNewAccount = (); + type OnSetCode = (); + type Origin = Origin; + type PalletInfo = PalletInfo; + type SS58Prefix = (); + type SystemWeightInfo = (); + type Version = (); +} +parameter_types! { + pub const MaxLocks: u32 = 10; +} +impl pallet_balances::Config for Test { + type AccountStore = System; + type Balance = u64; + type DustRemoval = (); + type Event = Event; + type ExistentialDeposit = ExistentialDeposit; + type MaxLocks = MaxLocks; + type MaxReserves = (); + type ReserveIdentifier = [u8; 8]; + type WeightInfo = (); +} +parameter_types! { + pub const MinVestedTransfer: u64 = 256 * 2; + pub static ExistentialDeposit: u64 = 0; +} +impl Config for Test { + type BlockNumberToBalance = Identity; + type Currency = Balances; + type Event = Event; + type MinVestedTransfer = MinVestedTransfer; + type WeightInfo = (); +} + +pub struct ExtBuilder { + existential_deposit: u64, +} +impl Default for ExtBuilder { + fn default() -> Self { + Self { existential_deposit: 1 } + } +} +impl ExtBuilder { + pub fn existential_deposit(mut self, existential_deposit: u64) -> Self { + self.existential_deposit = existential_deposit; + self + } + + pub fn build(self) -> sp_io::TestExternalities { + EXISTENTIAL_DEPOSIT.with(|v| *v.borrow_mut() = self.existential_deposit); + let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); + pallet_balances::GenesisConfig:: { + balances: vec![ + (1, 10 * self.existential_deposit), + (2, 20 * self.existential_deposit), + (3, 30 * self.existential_deposit), + (4, 40 * self.existential_deposit), + (12, 10 * self.existential_deposit), + ], + } + .assimilate_storage(&mut t) + .unwrap(); + pallet_vesting::GenesisConfig:: { + vesting: vec![ + (1, 0, 10, 5 * self.existential_deposit), + (2, 10, 20, 0), + (12, 10, 20, 5 * self.existential_deposit), + ], + } + .assimilate_storage(&mut t) + .unwrap(); + let mut ext = sp_io::TestExternalities::new(t); + ext.execute_with(|| System::set_block_number(1)); + ext + } +} diff --git a/frame/vesting/src/tests.rs b/frame/vesting/src/tests.rs new file mode 100644 index 0000000000000..7c59a61081d3b --- /dev/null +++ b/frame/vesting/src/tests.rs @@ -0,0 +1,359 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use frame_support::{assert_noop, assert_ok}; +use frame_system::RawOrigin; +use sp_runtime::traits::BadOrigin; + +use super::*; +use crate::mock::{Balances, ExtBuilder, System, Test, Vesting}; + +#[test] +fn check_vesting_status() { + ExtBuilder::default() + .existential_deposit(256) + .build() + .execute_with(|| { + let user1_free_balance = Balances::free_balance(&1); + let user2_free_balance = Balances::free_balance(&2); + let user12_free_balance = Balances::free_balance(&12); + assert_eq!(user1_free_balance, 256 * 10); // Account 1 has free balance + assert_eq!(user2_free_balance, 256 * 20); // Account 2 has free balance + assert_eq!(user12_free_balance, 256 * 10); // Account 12 has free balance + let user1_vesting_schedule = VestingInfo { + locked: 256 * 5, + per_block: 128, // Vesting over 10 blocks + starting_block: 0, + }; + let user2_vesting_schedule = VestingInfo { + locked: 256 * 20, + per_block: 256, // Vesting over 20 blocks + starting_block: 10, + }; + let user12_vesting_schedule = VestingInfo { + locked: 256 * 5, + per_block: 64, // Vesting over 20 blocks + starting_block: 10, + }; + assert_eq!(Vesting::vesting(&1), Some(user1_vesting_schedule)); // Account 1 has a vesting schedule + assert_eq!(Vesting::vesting(&2), Some(user2_vesting_schedule)); // Account 2 has a vesting schedule + assert_eq!(Vesting::vesting(&12), Some(user12_vesting_schedule)); // Account 12 has a vesting schedule + + // Account 1 has only 128 units vested from their illiquid 256 * 5 units at block 1 + assert_eq!(Vesting::vesting_balance(&1), Some(128 * 9)); + // Account 2 has their full balance locked + assert_eq!(Vesting::vesting_balance(&2), Some(user2_free_balance)); + // Account 12 has only their illiquid funds locked + assert_eq!(Vesting::vesting_balance(&12), Some(user12_free_balance - 256 * 5)); + + System::set_block_number(10); + assert_eq!(System::block_number(), 10); + + // Account 1 has fully vested by block 10 + assert_eq!(Vesting::vesting_balance(&1), Some(0)); + // Account 2 has started vesting by block 10 + assert_eq!(Vesting::vesting_balance(&2), Some(user2_free_balance)); + // Account 12 has started vesting by block 10 + assert_eq!(Vesting::vesting_balance(&12), Some(user12_free_balance - 256 * 5)); + + System::set_block_number(30); + assert_eq!(System::block_number(), 30); + + assert_eq!(Vesting::vesting_balance(&1), Some(0)); // Account 1 is still fully vested, and not negative + assert_eq!(Vesting::vesting_balance(&2), Some(0)); // Account 2 has fully vested by block 30 + assert_eq!(Vesting::vesting_balance(&12), Some(0)); // Account 2 has fully vested by block 30 + + }); +} + +#[test] +fn unvested_balance_should_not_transfer() { + ExtBuilder::default() + .existential_deposit(10) + .build() + .execute_with(|| { + let user1_free_balance = Balances::free_balance(&1); + assert_eq!(user1_free_balance, 100); // Account 1 has free balance + // Account 1 has only 5 units vested at block 1 (plus 50 unvested) + assert_eq!(Vesting::vesting_balance(&1), Some(45)); + assert_noop!( + Balances::transfer(Some(1).into(), 2, 56), + pallet_balances::Error::::LiquidityRestrictions, + ); // Account 1 cannot send more than vested amount + }); +} + +#[test] +fn vested_balance_should_transfer() { + ExtBuilder::default() + .existential_deposit(10) + .build() + .execute_with(|| { + let user1_free_balance = Balances::free_balance(&1); + assert_eq!(user1_free_balance, 100); // Account 1 has free balance + // Account 1 has only 5 units vested at block 1 (plus 50 unvested) + assert_eq!(Vesting::vesting_balance(&1), Some(45)); + assert_ok!(Vesting::vest(Some(1).into())); + assert_ok!(Balances::transfer(Some(1).into(), 2, 55)); + }); +} + +#[test] +fn vested_balance_should_transfer_using_vest_other() { + ExtBuilder::default() + .existential_deposit(10) + .build() + .execute_with(|| { + let user1_free_balance = Balances::free_balance(&1); + assert_eq!(user1_free_balance, 100); // Account 1 has free balance + // Account 1 has only 5 units vested at block 1 (plus 50 unvested) + assert_eq!(Vesting::vesting_balance(&1), Some(45)); + assert_ok!(Vesting::vest_other(Some(2).into(), 1)); + assert_ok!(Balances::transfer(Some(1).into(), 2, 55)); + }); +} + +#[test] +fn extra_balance_should_transfer() { + ExtBuilder::default() + .existential_deposit(10) + .build() + .execute_with(|| { + assert_ok!(Balances::transfer(Some(3).into(), 1, 100)); + assert_ok!(Balances::transfer(Some(3).into(), 2, 100)); + + let user1_free_balance = Balances::free_balance(&1); + assert_eq!(user1_free_balance, 200); // Account 1 has 100 more free balance than normal + + let user2_free_balance = Balances::free_balance(&2); + assert_eq!(user2_free_balance, 300); // Account 2 has 100 more free balance than normal + + // Account 1 has only 5 units vested at block 1 (plus 150 unvested) + assert_eq!(Vesting::vesting_balance(&1), Some(45)); + assert_ok!(Vesting::vest(Some(1).into())); + assert_ok!(Balances::transfer(Some(1).into(), 3, 155)); // Account 1 can send extra units gained + + // Account 2 has no units vested at block 1, but gained 100 + assert_eq!(Vesting::vesting_balance(&2), Some(200)); + assert_ok!(Vesting::vest(Some(2).into())); + assert_ok!(Balances::transfer(Some(2).into(), 3, 100)); // Account 2 can send extra units gained + }); +} + +#[test] +fn liquid_funds_should_transfer_with_delayed_vesting() { + ExtBuilder::default() + .existential_deposit(256) + .build() + .execute_with(|| { + let user12_free_balance = Balances::free_balance(&12); + + assert_eq!(user12_free_balance, 2560); // Account 12 has free balance + // Account 12 has liquid funds + assert_eq!(Vesting::vesting_balance(&12), Some(user12_free_balance - 256 * 5)); + + // Account 12 has delayed vesting + let user12_vesting_schedule = VestingInfo { + locked: 256 * 5, + per_block: 64, // Vesting over 20 blocks + starting_block: 10, + }; + assert_eq!(Vesting::vesting(&12), Some(user12_vesting_schedule)); + + // Account 12 can still send liquid funds + assert_ok!(Balances::transfer(Some(12).into(), 3, 256 * 5)); + }); +} + +#[test] +fn vested_transfer_works() { + ExtBuilder::default() + .existential_deposit(256) + .build() + .execute_with(|| { + let user3_free_balance = Balances::free_balance(&3); + let user4_free_balance = Balances::free_balance(&4); + assert_eq!(user3_free_balance, 256 * 30); + assert_eq!(user4_free_balance, 256 * 40); + // Account 4 should not have any vesting yet. + assert_eq!(Vesting::vesting(&4), None); + // Make the schedule for the new transfer. + let new_vesting_schedule = VestingInfo { + locked: 256 * 5, + per_block: 64, // Vesting over 20 blocks + starting_block: 10, + }; + assert_ok!(Vesting::vested_transfer(Some(3).into(), 4, new_vesting_schedule)); + // Now account 4 should have vesting. + assert_eq!(Vesting::vesting(&4), Some(new_vesting_schedule)); + // Ensure the transfer happened correctly. + let user3_free_balance_updated = Balances::free_balance(&3); + assert_eq!(user3_free_balance_updated, 256 * 25); + let user4_free_balance_updated = Balances::free_balance(&4); + assert_eq!(user4_free_balance_updated, 256 * 45); + // Account 4 has 5 * 256 locked. + assert_eq!(Vesting::vesting_balance(&4), Some(256 * 5)); + + System::set_block_number(20); + assert_eq!(System::block_number(), 20); + + // Account 4 has 5 * 64 units vested by block 20. + assert_eq!(Vesting::vesting_balance(&4), Some(10 * 64)); + + System::set_block_number(30); + assert_eq!(System::block_number(), 30); + + // Account 4 has fully vested. + assert_eq!(Vesting::vesting_balance(&4), Some(0)); + }); +} + +#[test] +fn vested_transfer_correctly_fails() { + ExtBuilder::default() + .existential_deposit(256) + .build() + .execute_with(|| { + let user2_free_balance = Balances::free_balance(&2); + let user4_free_balance = Balances::free_balance(&4); + assert_eq!(user2_free_balance, 256 * 20); + assert_eq!(user4_free_balance, 256 * 40); + // Account 2 should already have a vesting schedule. + let user2_vesting_schedule = VestingInfo { + locked: 256 * 20, + per_block: 256, // Vesting over 20 blocks + starting_block: 10, + }; + assert_eq!(Vesting::vesting(&2), Some(user2_vesting_schedule)); + + // The vesting schedule we will try to create, fails due to pre-existence of schedule. + let new_vesting_schedule = VestingInfo { + locked: 256 * 5, + per_block: 64, // Vesting over 20 blocks + starting_block: 10, + }; + assert_noop!( + Vesting::vested_transfer(Some(4).into(), 2, new_vesting_schedule), + Error::::ExistingVestingSchedule, + ); + + // Fails due to too low transfer amount. + let new_vesting_schedule_too_low = VestingInfo { + locked: 256 * 1, + per_block: 64, + starting_block: 10, + }; + assert_noop!( + Vesting::vested_transfer(Some(3).into(), 4, new_vesting_schedule_too_low), + Error::::AmountLow, + ); + + // Verify no currency transfer happened. + assert_eq!(user2_free_balance, 256 * 20); + assert_eq!(user4_free_balance, 256 * 40); + }); +} + +#[test] +fn force_vested_transfer_works() { + ExtBuilder::default() + .existential_deposit(256) + .build() + .execute_with(|| { + let user3_free_balance = Balances::free_balance(&3); + let user4_free_balance = Balances::free_balance(&4); + assert_eq!(user3_free_balance, 256 * 30); + assert_eq!(user4_free_balance, 256 * 40); + // Account 4 should not have any vesting yet. + assert_eq!(Vesting::vesting(&4), None); + // Make the schedule for the new transfer. + let new_vesting_schedule = VestingInfo { + locked: 256 * 5, + per_block: 64, // Vesting over 20 blocks + starting_block: 10, + }; + assert_noop!(Vesting::force_vested_transfer(Some(4).into(), 3, 4, new_vesting_schedule), BadOrigin); + assert_ok!(Vesting::force_vested_transfer(RawOrigin::Root.into(), 3, 4, new_vesting_schedule)); + // Now account 4 should have vesting. + assert_eq!(Vesting::vesting(&4), Some(new_vesting_schedule)); + // Ensure the transfer happened correctly. + let user3_free_balance_updated = Balances::free_balance(&3); + assert_eq!(user3_free_balance_updated, 256 * 25); + let user4_free_balance_updated = Balances::free_balance(&4); + assert_eq!(user4_free_balance_updated, 256 * 45); + // Account 4 has 5 * 256 locked. + assert_eq!(Vesting::vesting_balance(&4), Some(256 * 5)); + + System::set_block_number(20); + assert_eq!(System::block_number(), 20); + + // Account 4 has 5 * 64 units vested by block 20. + assert_eq!(Vesting::vesting_balance(&4), Some(10 * 64)); + + System::set_block_number(30); + assert_eq!(System::block_number(), 30); + + // Account 4 has fully vested. + assert_eq!(Vesting::vesting_balance(&4), Some(0)); + }); +} + +#[test] +fn force_vested_transfer_correctly_fails() { + ExtBuilder::default() + .existential_deposit(256) + .build() + .execute_with(|| { + let user2_free_balance = Balances::free_balance(&2); + let user4_free_balance = Balances::free_balance(&4); + assert_eq!(user2_free_balance, 256 * 20); + assert_eq!(user4_free_balance, 256 * 40); + // Account 2 should already have a vesting schedule. + let user2_vesting_schedule = VestingInfo { + locked: 256 * 20, + per_block: 256, // Vesting over 20 blocks + starting_block: 10, + }; + assert_eq!(Vesting::vesting(&2), Some(user2_vesting_schedule)); + + // The vesting schedule we will try to create, fails due to pre-existence of schedule. + let new_vesting_schedule = VestingInfo { + locked: 256 * 5, + per_block: 64, // Vesting over 20 blocks + starting_block: 10, + }; + assert_noop!( + Vesting::force_vested_transfer(RawOrigin::Root.into(), 4, 2, new_vesting_schedule), + Error::::ExistingVestingSchedule, + ); + + // Fails due to too low transfer amount. + let new_vesting_schedule_too_low = VestingInfo { + locked: 256 * 1, + per_block: 64, + starting_block: 10, + }; + assert_noop!( + Vesting::force_vested_transfer(RawOrigin::Root.into(), 3, 4, new_vesting_schedule_too_low), + Error::::AmountLow, + ); + + // Verify no currency transfer happened. + assert_eq!(user2_free_balance, 256 * 20); + assert_eq!(user4_free_balance, 256 * 40); + }); +} From 4b5c2f7007833f347dbed52ff9d3dd79ee3744d8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Thu, 1 Jul 2021 17:50:42 +0200 Subject: [PATCH 58/67] Do not call `initialize_block` before any runtime api (#8953) * Do not call `initialize_block` before any runtime api Before this change we always called `initialize_block` before calling into the runtime. There was already support with `skip_initialize` to skip the initialization. Almost no runtime_api requires that `initialize_block` is called before. Actually this only leads to higher execution times most of the time, because all runtime modules are initialized and this is especially expensive when the block contained a runtime upgrade. TLDR: Do not call `initialize_block` before calling a runtime api. * Change `validate_transaction` interface * Fix rpc test * Fixes and comments * Some docs --- bin/node-template/runtime/src/lib.rs | 3 +- bin/node/executor/tests/submit_transaction.rs | 8 +- bin/node/runtime/src/lib.rs | 3 +- client/api/src/call_executor.rs | 6 +- client/light/src/call_executor.rs | 75 ++--------------- client/rpc/src/state/tests.rs | 2 +- client/service/src/client/call_executor.rs | 15 +--- client/service/src/client/client.rs | 24 +----- client/service/test/src/client/light.rs | 53 +++++------- client/transaction-pool/src/api.rs | 54 ++++++++++--- frame/executive/src/lib.rs | 20 ++++- .../primitives/src/lib.rs | 3 - .../api/proc-macro/src/decl_runtime_apis.rs | 45 +---------- .../api/proc-macro/src/impl_runtime_apis.rs | 28 +++---- .../proc-macro/src/mock_impl_runtime_apis.rs | 7 ++ primitives/api/src/lib.rs | 81 +++++++++---------- primitives/api/test/tests/runtime_calls.rs | 25 ++---- primitives/consensus/aura/src/lib.rs | 1 - primitives/offchain/src/lib.rs | 2 - .../transaction-pool/src/runtime_api.rs | 14 +++- primitives/version/src/lib.rs | 5 ++ test-utils/runtime/src/lib.rs | 16 +--- test-utils/runtime/src/system.rs | 3 +- 23 files changed, 192 insertions(+), 301 deletions(-) diff --git a/bin/node-template/runtime/src/lib.rs b/bin/node-template/runtime/src/lib.rs index e89d7f28be220..940eb2379b114 100644 --- a/bin/node-template/runtime/src/lib.rs +++ b/bin/node-template/runtime/src/lib.rs @@ -365,8 +365,9 @@ impl_runtime_apis! { fn validate_transaction( source: TransactionSource, tx: ::Extrinsic, + block_hash: ::Hash, ) -> TransactionValidity { - Executive::validate_transaction(source, tx) + Executive::validate_transaction(source, tx, block_hash) } } diff --git a/bin/node/executor/tests/submit_transaction.rs b/bin/node/executor/tests/submit_transaction.rs index 3de0758d81462..590bdac4db757 100644 --- a/bin/node/executor/tests/submit_transaction.rs +++ b/bin/node/executor/tests/submit_transaction.rs @@ -256,12 +256,16 @@ fn submitted_transaction_should_be_valid() { >::insert(&address, account); // check validity - let res = Executive::validate_transaction(source, extrinsic).unwrap(); + let res = Executive::validate_transaction( + source, + extrinsic, + frame_system::BlockHash::::get(0), + ).unwrap(); // We ignore res.priority since this number can change based on updates to weights and such. assert_eq!(res.requires, Vec::::new()); assert_eq!(res.provides, vec![(address, 0).encode()]); - assert_eq!(res.longevity, 2048); + assert_eq!(res.longevity, 2047); assert_eq!(res.propagate, true); }); } diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index fd7fd4213366f..109a492e2c713 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -1292,8 +1292,9 @@ impl_runtime_apis! { fn validate_transaction( source: TransactionSource, tx: ::Extrinsic, + block_hash: ::Hash, ) -> TransactionValidity { - Executive::validate_transaction(source, tx) + Executive::validate_transaction(source, tx, block_hash) } } diff --git a/client/api/src/call_executor.rs b/client/api/src/call_executor.rs index 3b725bf8773a8..621cc292a71ac 100644 --- a/client/api/src/call_executor.rs +++ b/client/api/src/call_executor.rs @@ -30,7 +30,7 @@ use sc_executor::{RuntimeVersion, NativeVersion}; use sp_externalities::Extensions; use sp_core::NativeOrEncoded; -use sp_api::{ProofRecorder, InitializeBlock, StorageTransactionCache}; +use sp_api::{ProofRecorder, StorageTransactionCache}; use crate::execution_extensions::ExecutionExtensions; /// Executor Provider @@ -71,8 +71,6 @@ pub trait CallExecutor { /// Before executing the method, passed header is installed as the current header /// of the execution context. fn contextual_call< - 'a, - IB: Fn() -> sp_blockchain::Result<()>, EM: Fn( Result, Self::Error>, Result, Self::Error> @@ -81,7 +79,6 @@ pub trait CallExecutor { NC: FnOnce() -> result::Result + UnwindSafe, >( &self, - initialize_block_fn: IB, at: &BlockId, method: &str, call_data: &[u8], @@ -89,7 +86,6 @@ pub trait CallExecutor { storage_transaction_cache: Option<&RefCell< StorageTransactionCache>::State>, >>, - initialize_block: InitializeBlock<'a, B>, execution_manager: ExecutionManager, native_call: Option, proof_recorder: &Option>, diff --git a/client/light/src/call_executor.rs b/client/light/src/call_executor.rs index ae83807dc98f2..c9ca3bab37bef 100644 --- a/client/light/src/call_executor.rs +++ b/client/light/src/call_executor.rs @@ -27,7 +27,7 @@ use sp_core::{ convert_hash, NativeOrEncoded, traits::{CodeExecutor, SpawnNamed}, }; use sp_runtime::{ - generic::BlockId, traits::{One, Block as BlockT, Header as HeaderT, HashFor}, + generic::BlockId, traits::{Block as BlockT, Header as HeaderT, HashFor}, }; use sp_externalities::Extensions; use sp_state_machine::{ @@ -36,7 +36,7 @@ use sp_state_machine::{ }; use hash_db::Hasher; -use sp_api::{ProofRecorder, InitializeBlock, StorageTransactionCache}; +use sp_api::{ProofRecorder, StorageTransactionCache}; use sp_blockchain::{Error as ClientError, Result as ClientResult}; @@ -97,8 +97,6 @@ impl CallExecutor for } fn contextual_call< - 'a, - IB: Fn() -> ClientResult<()>, EM: Fn( Result, Self::Error>, Result, Self::Error> @@ -107,13 +105,11 @@ impl CallExecutor for NC: FnOnce() -> result::Result + UnwindSafe, >( &self, - initialize_block_fn: IB, at: &BlockId, method: &str, call_data: &[u8], changes: &RefCell, _: Option<&RefCell>>, - initialize_block: InitializeBlock<'a, Block>, _manager: ExecutionManager, native_call: Option, recorder: &Option>, @@ -124,7 +120,6 @@ impl CallExecutor for match self.backend.is_local_state_available(at) { true => CallExecutor::contextual_call::< - _, fn( Result, Local::Error>, Result, Local::Error>, @@ -133,13 +128,11 @@ impl CallExecutor for NC >( &self.local, - initialize_block_fn, at, method, call_data, changes, None, - initialize_block, ExecutionManager::NativeWhenPossible, native_call, recorder, @@ -177,7 +170,6 @@ impl CallExecutor for /// Proof includes both environment preparation proof and method execution proof. pub fn prove_execution( mut state: S, - header: Block::Header, executor: &E, method: &str, call_data: &[u8], @@ -193,31 +185,20 @@ pub fn prove_execution( Box )?; - // prepare execution environment + record preparation proof - let mut changes = Default::default(); - let (_, init_proof) = executor.prove_at_trie_state( - trie_state, - &mut changes, - "Core_initialize_block", - &header.encode(), - )?; - // execute method + record execution proof let (result, exec_proof) = executor.prove_at_trie_state( &trie_state, - &mut changes, + &mut Default::default(), method, call_data, )?; - let total_proof = StorageProof::merge(vec![init_proof, exec_proof]); - Ok((result, total_proof)) + Ok((result, exec_proof)) } /// Check remote contextual execution proof using given backend. /// -/// Method is executed using passed header as environment' current block. -/// Proof should include both environment preparation proof and method execution proof. +/// Proof should include the method execution proof. pub fn check_execution_proof( executor: &E, spawn_handle: Box, @@ -229,63 +210,19 @@ pub fn check_execution_proof( E: CodeExecutor + Clone + 'static, H: Hasher, H::Out: Ord + codec::Codec + 'static, -{ - check_execution_proof_with_make_header::( - executor, - spawn_handle, - request, - remote_proof, - |header|

::new( - *header.number() + One::one(), - Default::default(), - Default::default(), - header.hash(), - Default::default(), - ), - ) -} - -/// Check remote contextual execution proof using given backend and header factory. -/// -/// Method is executed using passed header as environment' current block. -/// Proof should include both environment preparation proof and method execution proof. -pub fn check_execution_proof_with_make_header( - executor: &E, - spawn_handle: Box, - request: &RemoteCallRequest
, - remote_proof: StorageProof, - make_next_header: MakeNextHeader, -) -> ClientResult> - where - E: CodeExecutor + Clone + 'static, - H: Hasher, - Header: HeaderT, - H::Out: Ord + codec::Codec + 'static, - MakeNextHeader: Fn(&Header) -> Header, { let local_state_root = request.header.state_root(); let root: H::Out = convert_hash(&local_state_root); - // prepare execution environment + check preparation proof + // prepare execution environment let mut changes = OverlayedChanges::default(); let trie_backend = create_proof_check_backend(root, remote_proof)?; - let next_header = make_next_header(&request.header); // TODO: Remove when solved: https://github.com/paritytech/substrate/issues/5047 let backend_runtime_code = sp_state_machine::backend::BackendRuntimeCode::new(&trie_backend); let runtime_code = backend_runtime_code.runtime_code() .map_err(|_e| ClientError::RuntimeCodeMissing)?; - execution_proof_check_on_trie_backend::( - &trie_backend, - &mut changes, - executor, - spawn_handle.clone(), - "Core_initialize_block", - &next_header.encode(), - &runtime_code, - )?; - // execute method execution_proof_check_on_trie_backend::( &trie_backend, diff --git a/client/rpc/src/state/tests.rs b/client/rpc/src/state/tests.rs index e413827552c9d..c9cb0bde89c1a 100644 --- a/client/rpc/src/state/tests.rs +++ b/client/rpc/src/state/tests.rs @@ -474,7 +474,7 @@ fn should_return_runtime_version() { let result = "{\"specName\":\"test\",\"implName\":\"parity-test\",\"authoringVersion\":1,\ \"specVersion\":2,\"implVersion\":2,\"apis\":[[\"0xdf6acb689907609b\",3],\ - [\"0x37e397fc7c91f5e4\",1],[\"0xd2bc9897eed08f15\",2],[\"0x40fe3ad401f8959a\",5],\ + [\"0x37e397fc7c91f5e4\",1],[\"0xd2bc9897eed08f15\",3],[\"0x40fe3ad401f8959a\",5],\ [\"0xc6e9a76309f39b09\",1],[\"0xdd718d5cc53262d4\",1],[\"0xcbca25e39f142387\",2],\ [\"0xf78b278be53f454c\",2],[\"0xab3c0572291feb8b\",1],[\"0xbc9d89904f5b923f\",1]],\ \"transactionVersion\":1}"; diff --git a/client/service/src/client/call_executor.rs b/client/service/src/client/call_executor.rs index c8c1fee545be2..a444819947607 100644 --- a/client/service/src/client/call_executor.rs +++ b/client/service/src/client/call_executor.rs @@ -30,7 +30,7 @@ use sp_externalities::Extensions; use sp_core::{ NativeOrEncoded, NeverNativeValue, traits::{CodeExecutor, SpawnNamed, RuntimeCode}, }; -use sp_api::{ProofRecorder, InitializeBlock, StorageTransactionCache}; +use sp_api::{ProofRecorder, StorageTransactionCache}; use sc_client_api::{backend, call_executor::CallExecutor}; use super::{client::ClientConfig, wasm_override::WasmOverride, wasm_substitutes::WasmSubstitutes}; @@ -173,8 +173,6 @@ where } fn contextual_call< - 'a, - IB: Fn() -> sp_blockchain::Result<()>, EM: Fn( Result, Self::Error>, Result, Self::Error> @@ -183,7 +181,6 @@ where NC: FnOnce() -> result::Result + UnwindSafe, >( &self, - initialize_block_fn: IB, at: &BlockId, method: &str, call_data: &[u8], @@ -191,21 +188,11 @@ where storage_transaction_cache: Option<&RefCell< StorageTransactionCache >>, - initialize_block: InitializeBlock<'a, Block>, execution_manager: ExecutionManager, native_call: Option, recorder: &Option>, extensions: Option, ) -> Result, sp_blockchain::Error> where ExecutionManager: Clone { - match initialize_block { - InitializeBlock::Do(ref init_block) - if init_block.borrow().as_ref().map(|id| id != at).unwrap_or(true) => { - initialize_block_fn()?; - }, - // We don't need to initialize the runtime at a block. - _ => {}, - } - let changes_trie_state = backend::changes_tries_state_at_block(at, self.backend.changes_trie_storage())?; let mut storage_transaction_cache = storage_transaction_cache.map(|c| c.borrow_mut()); diff --git a/client/service/src/client/client.rs b/client/service/src/client/client.rs index 4a998a12d2b7f..ab5a0d9394c2c 100644 --- a/client/service/src/client/client.rs +++ b/client/service/src/client/client.rs @@ -1243,18 +1243,6 @@ impl Client where trace!("Collected {} uncles", uncles.len()); Ok(uncles) } - - /// Prepare in-memory header that is used in execution environment. - fn prepare_environment_block(&self, parent: &BlockId) -> sp_blockchain::Result { - let parent_hash = self.backend.blockchain().expect_block_hash_from_id(parent)?; - Ok(<::Header as HeaderT>::new( - self.backend.blockchain().expect_block_number_from_id(parent)? + One::one(), - Default::default(), - Default::default(), - parent_hash, - Default::default(), - )) - } } impl UsageProvider for Client where @@ -1313,10 +1301,8 @@ impl ProofProvider for Client where )?; let state = self.state_at(id)?; - let header = self.prepare_environment_block(id)?; prove_execution( state, - header, &self.executor, method, call_data, @@ -1782,12 +1768,10 @@ impl CallApiAt for Client where 'a, R: Encode + Decode + PartialEq, NC: FnOnce() -> result::Result + UnwindSafe, - C: CoreApi, >( &self, - params: CallApiAtParams<'a, Block, C, NC, B::State>, + params: CallApiAtParams<'a, Block, NC, B::State>, ) -> Result, sp_api::ApiError> { - let core_api = params.core_api; let at = params.at; let (manager, extensions) = self.execution_extensions.manager_and_extensions( @@ -1795,16 +1779,12 @@ impl CallApiAt for Client where params.context, ); - self.executor.contextual_call::<_, fn(_,_) -> _,_,_>( - || core_api - .initialize_block(at, &self.prepare_environment_block(at)?) - .map_err(Error::RuntimeApiError), + self.executor.contextual_call:: _, _, _>( at, params.function, ¶ms.arguments, params.overlayed_changes, Some(params.storage_transaction_cache), - params.initialize_block, manager, params.native_call, params.recorder, diff --git a/client/service/test/src/client/light.rs b/client/service/test/src/client/light.rs index 8841d498ecfb0..440e0b4dd0dc3 100644 --- a/client/service/test/src/client/light.rs +++ b/client/service/test/src/client/light.rs @@ -20,7 +20,6 @@ use sc_light::{ call_executor::{ GenesisCallExecutor, check_execution_proof, - check_execution_proof_with_make_header, }, fetcher::LightDataChecker, blockchain::{BlockchainCache, Blockchain}, @@ -37,7 +36,7 @@ use parking_lot::Mutex; use substrate_test_runtime_client::{ runtime::{Hash, Block, Header}, TestClient, ClientBlockImportExt, }; -use sp_api::{InitializeBlock, StorageTransactionCache, ProofRecorder}; +use sp_api::{StorageTransactionCache, ProofRecorder}; use sp_consensus::BlockOrigin; use sc_executor::{NativeExecutor, WasmExecutionMethod, RuntimeVersion, NativeVersion}; use sp_core::{H256, NativeOrEncoded, testing::TaskExecutor}; @@ -209,8 +208,6 @@ impl CallExecutor for DummyCallExecutor { } fn contextual_call< - 'a, - IB: Fn() -> ClientResult<()>, EM: Fn( Result, Self::Error>, Result, Self::Error> @@ -219,7 +216,6 @@ impl CallExecutor for DummyCallExecutor { NC: FnOnce() -> Result + UnwindSafe, >( &self, - _initialize_block_fn: IB, _at: &BlockId, _method: &str, _call_data: &[u8], @@ -230,7 +226,6 @@ impl CallExecutor for DummyCallExecutor { >::State, > >>, - _initialize_block: InitializeBlock<'a, Block>, _execution_manager: ExecutionManager, _native_call: Option, _proof_recorder: &Option>, @@ -333,36 +328,41 @@ fn execution_proof_is_generated_and_checked() { (remote_result, local_result) } - fn execute_with_proof_failure(remote_client: &TestClient, at: u64, method: &'static str) { + fn execute_with_proof_failure(remote_client: &TestClient, at: u64) { let remote_block_id = BlockId::Number(at); let remote_header = remote_client.header(&remote_block_id).unwrap().unwrap(); // 'fetch' execution proof from remote node let (_, remote_execution_proof) = remote_client.execution_proof( &remote_block_id, - method, - &[] + "Core_initialize_block", + &Header::new( + at, + Default::default(), + Default::default(), + Default::default(), + Default::default(), + ).encode(), ).unwrap(); // check remote execution proof locally - let execution_result = check_execution_proof_with_make_header::<_, _, BlakeTwo256, _>( + let execution_result = check_execution_proof::<_, _, BlakeTwo256>( &local_executor(), Box::new(TaskExecutor::new()), &RemoteCallRequest { block: substrate_test_runtime_client::runtime::Hash::default(), - header: remote_header, - method: method.into(), - call_data: vec![], + header: remote_header.clone(), + method: "Core_initialize_block".into(), + call_data: Header::new( + at + 1, + Default::default(), + Default::default(), + remote_header.hash(), + remote_header.digest().clone(), // this makes next header wrong + ).encode(), retry_count: None, }, remote_execution_proof, - |header|
::new( - at + 1, - Default::default(), - Default::default(), - header.hash(), - header.digest().clone(), // this makes next header wrong - ), ); match execution_result { Err(sp_blockchain::Error::Execution(_)) => (), @@ -389,21 +389,12 @@ fn execution_proof_is_generated_and_checked() { let (remote, local) = execute(&remote_client, 2, "Core_version"); assert_eq!(remote, local); - // check method that requires environment - let (_, block) = execute(&remote_client, 0, "BlockBuilder_finalize_block"); - let local_block: Header = Decode::decode(&mut &block[..]).unwrap(); - assert_eq!(local_block.number, 1); - - let (_, block) = execute(&remote_client, 2, "BlockBuilder_finalize_block"); - let local_block: Header = Decode::decode(&mut &block[..]).unwrap(); - assert_eq!(local_block.number, 3); - // check that proof check doesn't panic even if proof is incorrect AND no panic handler is set - execute_with_proof_failure(&remote_client, 2, "Core_version"); + execute_with_proof_failure(&remote_client, 2); // check that proof check doesn't panic even if proof is incorrect AND panic handler is set sp_panic_handler::set("TEST", "1.2.3"); - execute_with_proof_failure(&remote_client, 2, "Core_version"); + execute_with_proof_failure(&remote_client, 2); } #[test] diff --git a/client/transaction-pool/src/api.rs b/client/transaction-pool/src/api.rs index 74e08c3aa0589..dd54e8e76947a 100644 --- a/client/transaction-pool/src/api.rs +++ b/client/transaction-pool/src/api.rs @@ -203,24 +203,54 @@ where sp_tracing::within_span!(sp_tracing::Level::TRACE, "validate_transaction"; { let runtime_api = client.runtime_api(); - let has_v2 = sp_tracing::within_span! { sp_tracing::Level::TRACE, "check_version"; + let api_version = sp_tracing::within_span! { sp_tracing::Level::TRACE, "check_version"; runtime_api - .has_api_with::, _>(&at, |v| v >= 2) - .unwrap_or_default() - }; + .api_version::>(&at) + .map_err(|e| Error::RuntimeApi(e.to_string()))? + .ok_or_else(|| Error::RuntimeApi( + format!("Could not find `TaggedTransactionQueue` api for block `{:?}`.", at) + )) + }?; + + let block_hash = client.to_hash(at) + .map_err(|e| Error::RuntimeApi(format!("{:?}", e)))? + .ok_or_else(|| Error::RuntimeApi(format!("Could not get hash for block `{:?}`.", at)))?; - let res = sp_tracing::within_span!( + use sp_api::Core; + + sp_tracing::within_span!( sp_tracing::Level::TRACE, "runtime::validate_transaction"; { - if has_v2 { - runtime_api.validate_transaction(&at, source, uxt) + if api_version >= 3 { + runtime_api.validate_transaction(&at, source, uxt, block_hash) + .map_err(|e| Error::RuntimeApi(e.to_string())) } else { - #[allow(deprecated)] // old validate_transaction - runtime_api.validate_transaction_before_version_2(&at, uxt) + let block_number = client.to_number(at) + .map_err(|e| Error::RuntimeApi(format!("{:?}", e)))? + .ok_or_else(|| + Error::RuntimeApi(format!("Could not get number for block `{:?}`.", at)) + )?; + + // The old versions require us to call `initialize_block` before. + runtime_api.initialize_block(at, &sp_runtime::traits::Header::new( + block_number + sp_runtime::traits::One::one(), + Default::default(), + Default::default(), + block_hash, + Default::default()), + ).map_err(|e| Error::RuntimeApi(e.to_string()))?; + + if api_version == 2 { + #[allow(deprecated)] // old validate_transaction + runtime_api.validate_transaction_before_version_3(&at, source, uxt) + .map_err(|e| Error::RuntimeApi(e.to_string())) + } else { + #[allow(deprecated)] // old validate_transaction + runtime_api.validate_transaction_before_version_2(&at, uxt) + .map_err(|e| Error::RuntimeApi(e.to_string())) + } } - }); - - res.map_err(|e| Error::RuntimeApi(e.to_string())) + }) }) } diff --git a/frame/executive/src/lib.rs b/frame/executive/src/lib.rs index 1d2ad069f07a9..c5f39e14f5fc1 100644 --- a/frame/executive/src/lib.rs +++ b/frame/executive/src/lib.rs @@ -474,10 +474,18 @@ where pub fn validate_transaction( source: TransactionSource, uxt: Block::Extrinsic, + block_hash: Block::Hash, ) -> TransactionValidity { sp_io::init_tracing(); use sp_tracing::{enter_span, within_span}; + >::initialize( + &(frame_system::Pallet::::block_number() + One::one()), + &block_hash, + &Default::default(), + frame_system::InitKind::Inspection, + ); + enter_span!{ sp_tracing::Level::TRACE, "validate_transaction" }; let encoded_len = within_span!{ sp_tracing::Level::TRACE, "using_encoded"; @@ -1006,11 +1014,19 @@ mod tests { default_with_prio_3.priority = 3; t.execute_with(|| { assert_eq!( - Executive::validate_transaction(TransactionSource::InBlock, valid.clone()), + Executive::validate_transaction( + TransactionSource::InBlock, + valid.clone(), + Default::default(), + ), Ok(default_with_prio_3), ); assert_eq!( - Executive::validate_transaction(TransactionSource::InBlock, invalid.clone()), + Executive::validate_transaction( + TransactionSource::InBlock, + invalid.clone(), + Default::default(), + ), Err(TransactionValidityError::Unknown(UnknownTransaction::NoUnsignedValidator)), ); assert_eq!(Executive::apply_extrinsic(valid), Ok(Err(DispatchError::BadOrigin))); diff --git a/frame/merkle-mountain-range/primitives/src/lib.rs b/frame/merkle-mountain-range/primitives/src/lib.rs index 73d4d3ecc1fc3..7b562656a1e04 100644 --- a/frame/merkle-mountain-range/primitives/src/lib.rs +++ b/frame/merkle-mountain-range/primitives/src/lib.rs @@ -406,7 +406,6 @@ sp_api::decl_runtime_apis! { /// API to interact with MMR pallet. pub trait MmrApi { /// Generate MMR proof for a leaf under given index. - #[skip_initialize_block] fn generate_proof(leaf_index: u64) -> Result<(EncodableOpaqueLeaf, Proof), Error>; /// Verify MMR proof against on-chain MMR. @@ -414,7 +413,6 @@ sp_api::decl_runtime_apis! { /// Note this function will use on-chain MMR root hash and check if the proof /// matches the hash. /// See [Self::verify_proof_stateless] for a stateless verifier. - #[skip_initialize_block] fn verify_proof(leaf: EncodableOpaqueLeaf, proof: Proof) -> Result<(), Error>; /// Verify MMR proof against given root hash. @@ -423,7 +421,6 @@ sp_api::decl_runtime_apis! { /// proof is verified against given MMR root hash. /// /// The leaf data is expected to be encoded in it's compact form. - #[skip_initialize_block] fn verify_proof_stateless(root: Hash, leaf: EncodableOpaqueLeaf, proof: Proof) -> Result<(), Error>; } diff --git a/primitives/api/proc-macro/src/decl_runtime_apis.rs b/primitives/api/proc-macro/src/decl_runtime_apis.rs index 9fd5baba877dc..4a8b49049e760 100644 --- a/primitives/api/proc-macro/src/decl_runtime_apis.rs +++ b/primitives/api/proc-macro/src/decl_runtime_apis.rs @@ -58,21 +58,9 @@ const CHANGED_IN_ATTRIBUTE: &str = "changed_in"; /// /// Is used when a trait method was renamed. const RENAMED_ATTRIBUTE: &str = "renamed"; -/// The `skip_initialize_block` attribute. -/// -/// Is used when a trait method does not require that the block is initialized -/// before being called. -const SKIP_INITIALIZE_BLOCK_ATTRIBUTE: &str = "skip_initialize_block"; -/// The `initialize_block` attribute. -/// -/// A trait method tagged with this attribute, initializes the runtime at -/// certain block. -const INITIALIZE_BLOCK_ATTRIBUTE: &str = "initialize_block"; /// All attributes that we support in the declaration of a runtime api trait. const SUPPORTED_ATTRIBUTE_NAMES: &[&str] = &[ - CORE_TRAIT_ATTRIBUTE, API_VERSION_ATTRIBUTE, CHANGED_IN_ATTRIBUTE, - RENAMED_ATTRIBUTE, SKIP_INITIALIZE_BLOCK_ATTRIBUTE, - INITIALIZE_BLOCK_ATTRIBUTE, + CORE_TRAIT_ATTRIBUTE, API_VERSION_ATTRIBUTE, CHANGED_IN_ATTRIBUTE, RENAMED_ATTRIBUTE, ]; /// The structure used for parsing the runtime api declarations. @@ -376,15 +364,6 @@ fn generate_call_api_at_calls(decl: &ItemTrait) -> Result { continue; } - let skip_initialize_block = attrs.contains_key(SKIP_INITIALIZE_BLOCK_ATTRIBUTE); - let update_initialized_block = if attrs.contains_key(INITIALIZE_BLOCK_ATTRIBUTE) { - quote!( - || *initialized_block.borrow_mut() = Some(*at) - ) - } else { - quote!(|| ()) - }; - // Parse the renamed attributes. let mut renames = Vec::new(); if let Some((_, a)) = attrs @@ -413,72 +392,54 @@ fn generate_call_api_at_calls(decl: &ItemTrait) -> Result { NC: FnOnce() -> std::result::Result + std::panic::UnwindSafe, Block: #crate_::BlockT, T: #crate_::CallApiAt, - C: #crate_::Core, >( call_runtime_at: &T, - core_api: &C, at: &#crate_::BlockId, args: Vec, changes: &std::cell::RefCell<#crate_::OverlayedChanges>, storage_transaction_cache: &std::cell::RefCell< #crate_::StorageTransactionCache >, - initialized_block: &std::cell::RefCell>>, native_call: Option, context: #crate_::ExecutionContext, recorder: &Option<#crate_::ProofRecorder>, ) -> std::result::Result<#crate_::NativeOrEncoded, #crate_::ApiError> { let version = call_runtime_at.runtime_version_at(at)?; - use #crate_::InitializeBlock; - let initialize_block = if #skip_initialize_block { - InitializeBlock::Skip - } else { - InitializeBlock::Do(&initialized_block) - }; - let update_initialized_block = #update_initialized_block; #( // Check if we need to call the function by an old name. if version.apis.iter().any(|(s, v)| { s == &ID && *v < #versions }) { - let params = #crate_::CallApiAtParams::<_, _, fn() -> _, _> { - core_api, + let params = #crate_::CallApiAtParams::<_, fn() -> _, _> { at, function: #old_names, native_call: None, arguments: args, overlayed_changes: changes, storage_transaction_cache, - initialize_block, context, recorder, }; let ret = call_runtime_at.call_api_at(params)?; - update_initialized_block(); return Ok(ret) } )* let params = #crate_::CallApiAtParams { - core_api, at, function: #trait_fn_name, native_call, arguments: args, overlayed_changes: changes, storage_transaction_cache, - initialize_block, context, recorder, }; - let ret = call_runtime_at.call_api_at(params)?; - - update_initialized_block(); - Ok(ret) + call_runtime_at.call_api_at(params) } )); } diff --git a/primitives/api/proc-macro/src/impl_runtime_apis.rs b/primitives/api/proc-macro/src/impl_runtime_apis.rs index cf1265fdb0028..e81c52bbb0b18 100644 --- a/primitives/api/proc-macro/src/impl_runtime_apis.rs +++ b/primitives/api/proc-macro/src/impl_runtime_apis.rs @@ -122,9 +122,9 @@ fn generate_impl_calls( impl_calls.push(( impl_trait_ident.clone(), - method.sig.ident.clone(), - impl_call, - filter_cfg_attrs(&impl_.attrs), + method.sig.ident.clone(), + impl_call, + filter_cfg_attrs(&impl_.attrs), )); } } @@ -186,7 +186,7 @@ fn generate_wasm_interface(impls: &[ItemImpl]) -> Result { #c::init_runtime_logger(); - let output = { #impl_ }; + let output = (move || { #impl_ })(); #c::to_substrate_wasm_fn_return_value(&output) } ) @@ -205,7 +205,6 @@ fn generate_runtime_api_base_structures() -> Result { pub struct RuntimeApiImpl + 'static> { call: &'static C, commit_on_success: std::cell::RefCell, - initialized_block: std::cell::RefCell>>, changes: std::cell::RefCell<#crate_::OverlayedChanges>, storage_transaction_cache: std::cell::RefCell< #crate_::StorageTransactionCache @@ -265,6 +264,15 @@ fn generate_runtime_api_base_structures() -> Result { .map(|v| v.has_api_with(&A::ID, pred)) } + fn api_version( + &self, + at: &#crate_::BlockId, + ) -> std::result::Result, #crate_::ApiError> where Self: Sized { + self.call + .runtime_version_at(at) + .map(|v| v.api_version(&A::ID)) + } + fn record_proof(&mut self) { self.recorder = Some(Default::default()); } @@ -291,7 +299,6 @@ fn generate_runtime_api_base_structures() -> Result { #crate_::StorageChanges, String > where Self: Sized { - self.initialized_block.borrow_mut().take(); self.changes.replace(Default::default()).into_storage_changes( backend, changes_trie_state, @@ -315,7 +322,6 @@ fn generate_runtime_api_base_structures() -> Result { RuntimeApiImpl { call: unsafe { std::mem::transmute(call) }, commit_on_success: true.into(), - initialized_block: None.into(), changes: Default::default(), recorder: Default::default(), storage_transaction_cache: Default::default(), @@ -329,10 +335,8 @@ fn generate_runtime_api_base_structures() -> Result { R: #crate_::Encode + #crate_::Decode + PartialEq, F: FnOnce( &C, - &Self, &std::cell::RefCell<#crate_::OverlayedChanges>, &std::cell::RefCell<#crate_::StorageTransactionCache>, - &std::cell::RefCell>>, &Option<#crate_::ProofRecorder>, ) -> std::result::Result<#crate_::NativeOrEncoded, E>, E, @@ -345,10 +349,8 @@ fn generate_runtime_api_base_structures() -> Result { } let res = call_api_at( &self.call, - self, &self.changes, &self.storage_transaction_cache, - &self.initialized_block, &self.recorder, ); @@ -501,20 +503,16 @@ impl<'a> Fold for ApiRuntimeImplToApiRuntimeApiImpl<'a> { self.call_api_at( | call_runtime_at, - core_api, changes, storage_transaction_cache, - initialized_block, recorder | { #runtime_mod_path #call_api_at_call( call_runtime_at, - core_api, at, params_encoded, changes, storage_transaction_cache, - initialized_block, params.map(|p| { #runtime_mod_path #native_call_generator_ident :: <#runtime, __SR_API_BLOCK__ #(, #trait_generic_arguments )*> ( diff --git a/primitives/api/proc-macro/src/mock_impl_runtime_apis.rs b/primitives/api/proc-macro/src/mock_impl_runtime_apis.rs index 383cd4f635ea2..738420615b622 100644 --- a/primitives/api/proc-macro/src/mock_impl_runtime_apis.rs +++ b/primitives/api/proc-macro/src/mock_impl_runtime_apis.rs @@ -94,6 +94,13 @@ fn implement_common_api_traits( Ok(pred(A::VERSION)) } + fn api_version( + &self, + _: &#crate_::BlockId<#block_type>, + ) -> std::result::Result, #crate_::ApiError> where Self: Sized { + Ok(Some(A::VERSION)) + } + fn record_proof(&mut self) { unimplemented!("`record_proof` not implemented for runtime api mocks") } diff --git a/primitives/api/src/lib.rs b/primitives/api/src/lib.rs index 97342377a76c8..ea023677adf34 100644 --- a/primitives/api/src/lib.rs +++ b/primitives/api/src/lib.rs @@ -17,20 +17,29 @@ //! Substrate runtime api //! -//! The Substrate runtime api is the crucial interface between the node and the runtime. -//! Every call that goes into the runtime is done with a runtime api. The runtime apis are not fixed. -//! Every Substrate user can define its own apis with -//! [`decl_runtime_apis`](macro.decl_runtime_apis.html) and implement them in -//! the runtime with [`impl_runtime_apis`](macro.impl_runtime_apis.html). +//! The Substrate runtime api is the interface between the node and the runtime. There isn't a fixed +//! set of runtime apis, instead it is up to the user to declare and implement these runtime apis. +//! The declaration of a runtime api is normally done outside of a runtime, while the implementation +//! of it has to be done in the runtime. We provide the [`decl_runtime_apis!`] macro for declaring +//! a runtime api and the [`impl_runtime_apis!`] for implementing them. The macro docs provide more +//! information on how to use them and what kind of attributes we support. //! -//! Every Substrate runtime needs to implement the [`Core`] runtime api. This api provides the basic -//! functionality that every runtime needs to export. +//! It is required that each runtime implements at least the [`Core`] runtime api. This runtime api +//! provides all the core functions that Substrate expects from a runtime. //! -//! Besides the macros and the [`Core`] runtime api, this crates provides the [`Metadata`] runtime -//! api, the [`ApiExt`] trait, the [`CallApiAt`] trait and the [`ConstructRuntimeApi`] trait. +//! # Versioning //! -//! On a meta level this implies, the client calls the generated API from the client perspective. +//! Runtime apis support versioning. Each runtime api itself has a version attached. It is also +//! supported to change function signatures or names in a non-breaking way. For more information on +//! versioning check the [`decl_runtime_apis!`] macro. //! +//! All runtime apis and their versions are returned as part of the [`RuntimeVersion`]. This can be +//! used to check which runtime api version is currently provided by the on-chain runtime. +//! +//! # Testing +//! +//! For testing we provide the [`mock_impl_runtime_apis!`] macro that lets you implement a runtime +//! api for a mocked object to use it in tests. //! //! # Logging //! @@ -43,6 +52,17 @@ //! that this feature instructs `log` and `tracing` to disable logging at compile time by setting //! the `max_level_off` feature for these crates. So, you should not enable this feature for a //! native build as otherwise the node will not output any log messages. +//! +//! # How does it work? +//! +//! Each runtime api is declared as a trait with functions. When compiled to WASM, each implemented +//! runtime api function is exported as a function with the following naming scheme +//! `${TRAIT_NAME}_${FUNCTION_NAME}`. Such a function has the following signature +//! `(ptr: *u8, length: u32) -> u64`. It takes a pointer to an `u8` array and its length as an +//! argument. This `u8` array is expected to be the SCALE encoded parameters of the function as +//! defined in the trait. The return value is an `u64` that represents `length << 32 | pointer` of an +//! `u8` array. This return value `u8` array contains the SCALE encoded return value as defined by +//! the trait function. The macros take care to encode the parameters and to decode the return value. #![cfg_attr(not(feature = "std"), no_std)] @@ -99,7 +119,7 @@ pub const MAX_EXTRINSIC_DEPTH: u32 = 256; /// to the client side and the runtime side. This generic parameter is usable by the user. /// /// For implementing these macros you should use the -/// [`impl_runtime_apis!`](macro.impl_runtime_apis.html) macro. +/// [`impl_runtime_apis!`] macro. /// /// # Example /// @@ -461,6 +481,12 @@ pub trait ApiExt { pred: P, ) -> Result where Self: Sized; + /// Returns the version of the given api. + fn api_version( + &self, + at: &BlockId, + ) -> Result, ApiError> where Self: Sized; + /// Start recording all accessed trie nodes for generating proofs. fn record_proof(&mut self); @@ -489,31 +515,9 @@ pub trait ApiExt { > where Self: Sized; } -/// Before calling any runtime api function, the runtime need to be initialized -/// at the requested block. However, some functions like `execute_block` or -/// `initialize_block` itself don't require to have the runtime initialized -/// at the requested block. -/// -/// `call_api_at` is instructed by this enum to do the initialization or to skip -/// it. -#[cfg(feature = "std")] -#[derive(Clone, Copy)] -pub enum InitializeBlock<'a, Block: BlockT> { - /// Skip initializing the runtime for a given block. - /// - /// This is used by functions who do the initialization by themselves or don't require it. - Skip, - /// Initialize the runtime for a given block. - /// - /// If the stored `BlockId` is `Some(_)`, the runtime is currently initialized at this block. - Do(&'a RefCell>>), -} - /// Parameters for [`CallApiAt::call_api_at`]. #[cfg(feature = "std")] -pub struct CallApiAtParams<'a, Block: BlockT, C, NC, Backend: StateBackend>> { - /// A reference to something that implements the [`Core`] api. - pub core_api: &'a C, +pub struct CallApiAtParams<'a, Block: BlockT, NC, Backend: StateBackend>> { /// The block id that determines the state that should be setup when calling the function. pub at: &'a BlockId, /// The name of the function that should be called. @@ -529,9 +533,6 @@ pub struct CallApiAtParams<'a, Block: BlockT, C, NC, Backend: StateBackend, /// The cache for storage transactions. pub storage_transaction_cache: &'a RefCell>, - /// Determines if the function requires that `initialize_block` should be called before calling - /// the actual function. - pub initialize_block: InitializeBlock<'a, Block>, /// The context this function is executed in. pub context: ExecutionContext, /// The optional proof recorder for recording storage accesses. @@ -550,10 +551,9 @@ pub trait CallApiAt { 'a, R: Encode + Decode + PartialEq, NC: FnOnce() -> result::Result + UnwindSafe, - C: Core, >( &self, - params: CallApiAtParams<'a, Block, C, NC, Self::StateBackend>, + params: CallApiAtParams<'a, Block, NC, Self::StateBackend>, ) -> Result, ApiError>; /// Returns the runtime version at the given block. @@ -704,12 +704,9 @@ decl_runtime_apis! { #[changed_in(3)] fn version() -> OldRuntimeVersion; /// Execute the given block. - #[skip_initialize_block] fn execute_block(block: Block); /// Initialize a block with the given header. #[renamed("initialise_block", 2)] - #[skip_initialize_block] - #[initialize_block] fn initialize_block(header: &::Header); } diff --git a/primitives/api/test/tests/runtime_calls.rs b/primitives/api/test/tests/runtime_calls.rs index 562735834ddca..b60c7a09cb616 100644 --- a/primitives/api/test/tests/runtime_calls.rs +++ b/primitives/api/test/tests/runtime_calls.rs @@ -15,11 +15,11 @@ // See the License for the specific language governing permissions and // limitations under the License. -use sp_api::ProvideRuntimeApi; +use sp_api::{ProvideRuntimeApi, Core}; use substrate_test_runtime_client::{ prelude::*, DefaultTestClientBuilderExt, TestClientBuilder, - runtime::{TestAPI, DecodeFails, Transfer, Block}, + runtime::{TestAPI, DecodeFails, Transfer, Block, Header}, }; use sp_runtime::{generic::BlockId, traits::{Header as HeaderT, HashFor}}; use sp_state_machine::{ @@ -133,26 +133,13 @@ fn initialize_block_works() { let client = TestClientBuilder::new().set_execution_strategy(ExecutionStrategy::Both).build(); let runtime_api = client.runtime_api(); let block_id = BlockId::Number(client.chain_info().best_number); + runtime_api.initialize_block( + &block_id, + &Header::new(1, Default::default(), Default::default(), Default::default(), Default::default()), + ).unwrap(); assert_eq!(runtime_api.get_block_number(&block_id).unwrap(), 1); } -#[test] -fn initialize_block_is_called_only_once() { - let client = TestClientBuilder::new().set_execution_strategy(ExecutionStrategy::Both).build(); - let runtime_api = client.runtime_api(); - let block_id = BlockId::Number(client.chain_info().best_number); - assert_eq!(runtime_api.take_block_number(&block_id).unwrap(), Some(1)); - assert_eq!(runtime_api.take_block_number(&block_id).unwrap(), None); -} - -#[test] -fn initialize_block_is_skipped() { - let client = TestClientBuilder::new().set_execution_strategy(ExecutionStrategy::Both).build(); - let runtime_api = client.runtime_api(); - let block_id = BlockId::Number(client.chain_info().best_number); - assert!(runtime_api.without_initialize_block(&block_id).unwrap()); -} - #[test] fn record_proof_works() { let (client, longest_chain) = TestClientBuilder::new() diff --git a/primitives/consensus/aura/src/lib.rs b/primitives/consensus/aura/src/lib.rs index ef888a2ab855b..a28e681fda27f 100644 --- a/primitives/consensus/aura/src/lib.rs +++ b/primitives/consensus/aura/src/lib.rs @@ -90,7 +90,6 @@ sp_api::decl_runtime_apis! { fn slot_duration() -> SlotDuration; // Return the current set of authorities. - #[skip_initialize_block] fn authorities() -> Vec; } } diff --git a/primitives/offchain/src/lib.rs b/primitives/offchain/src/lib.rs index ffdc2bfcc3a64..72ceca80cfbf8 100644 --- a/primitives/offchain/src/lib.rs +++ b/primitives/offchain/src/lib.rs @@ -28,12 +28,10 @@ sp_api::decl_runtime_apis! { #[api_version(2)] pub trait OffchainWorkerApi { /// Starts the off-chain task for given block number. - #[skip_initialize_block] #[changed_in(2)] fn offchain_worker(number: sp_runtime::traits::NumberFor); /// Starts the off-chain task for given block header. - #[skip_initialize_block] fn offchain_worker(header: &Block::Header); } } diff --git a/primitives/transaction-pool/src/runtime_api.rs b/primitives/transaction-pool/src/runtime_api.rs index e1c3280ca2aaf..42542d9f3c8b4 100644 --- a/primitives/transaction-pool/src/runtime_api.rs +++ b/primitives/transaction-pool/src/runtime_api.rs @@ -22,22 +22,32 @@ use sp_runtime::traits::Block as BlockT; sp_api::decl_runtime_apis! { /// The `TaggedTransactionQueue` api trait for interfering with the transaction queue. - #[api_version(2)] + #[api_version(3)] pub trait TaggedTransactionQueue { /// Validate the transaction. #[changed_in(2)] fn validate_transaction(tx: ::Extrinsic) -> TransactionValidity; + /// Validate the transaction. + #[changed_in(3)] + fn validate_transaction( + source: TransactionSource, + tx: ::Extrinsic, + ) -> TransactionValidity; + /// Validate the transaction. /// /// This method is invoked by the transaction pool to learn details about given transaction. /// The implementation should make sure to verify the correctness of the transaction - /// against current state. + /// against current state. The given `block_hash` corresponds to the hash of the block + /// that is used as current state. + /// /// Note that this call may be performed by the pool multiple times and transactions /// might be verified in any possible order. fn validate_transaction( source: TransactionSource, tx: ::Extrinsic, + block_hash: Block::Hash, ) -> TransactionValidity; } } diff --git a/primitives/version/src/lib.rs b/primitives/version/src/lib.rs index 8940e85f68a8d..15b4a128924fc 100644 --- a/primitives/version/src/lib.rs +++ b/primitives/version/src/lib.rs @@ -198,6 +198,11 @@ impl RuntimeVersion { ) -> bool { self.apis.iter().any(|(s, v)| s == id && predicate(*v)) } + + /// Returns the api version found for api with `id`. + pub fn api_version(&self, id: &ApiId) -> Option { + self.apis.iter().find_map(|a| (a.0 == *id).then(|| a.1)) + } } #[cfg(feature = "std")] diff --git a/test-utils/runtime/src/lib.rs b/test-utils/runtime/src/lib.rs index 7ee1072a7b83e..084f1338cd261 100644 --- a/test-utils/runtime/src/lib.rs +++ b/test-utils/runtime/src/lib.rs @@ -343,9 +343,6 @@ cfg_if! { fn get_block_number() -> u64; /// Takes and returns the initialized block number. fn take_block_number() -> Option; - /// Returns if no block was initialized. - #[skip_initialize_block] - fn without_initialize_block() -> bool; /// Test that `ed25519` crypto works in the runtime. /// /// Returns the signature generated for the message `ed25519` and the public key. @@ -396,9 +393,6 @@ cfg_if! { fn get_block_number() -> u64; /// Takes and returns the initialized block number. fn take_block_number() -> Option; - /// Returns if no block was initialized. - #[skip_initialize_block] - fn without_initialize_block() -> bool; /// Test that `ed25519` crypto works in the runtime. /// /// Returns the signature generated for the message `ed25519` and the public key. @@ -635,6 +629,7 @@ cfg_if! { fn validate_transaction( _source: TransactionSource, utx: ::Extrinsic, + _: ::Hash, ) -> TransactionValidity { if let Extrinsic::IncludeData(data) = utx { return Ok(ValidTransaction { @@ -720,10 +715,6 @@ cfg_if! { system::get_block_number().expect("Block number is initialized") } - fn without_initialize_block() -> bool { - system::get_block_number().is_none() - } - fn take_block_number() -> Option { system::take_block_number() } @@ -888,6 +879,7 @@ cfg_if! { fn validate_transaction( _source: TransactionSource, utx: ::Extrinsic, + _: ::Hash, ) -> TransactionValidity { if let Extrinsic::IncludeData(data) = utx { return Ok(ValidTransaction{ @@ -977,10 +969,6 @@ cfg_if! { system::get_block_number().expect("Block number is initialized") } - fn without_initialize_block() -> bool { - system::get_block_number().is_none() - } - fn take_block_number() -> Option { system::take_block_number() } diff --git a/test-utils/runtime/src/system.rs b/test-utils/runtime/src/system.rs index 33ef7b12d8db0..ae35ded83bfc7 100644 --- a/test-utils/runtime/src/system.rs +++ b/test-utils/runtime/src/system.rs @@ -193,7 +193,8 @@ pub fn validate_transaction(utx: Extrinsic) -> TransactionValidity { /// Execute a transaction outside of the block execution function. /// This doesn't attempt to validate anything regarding the block. pub fn execute_transaction(utx: Extrinsic) -> ApplyExtrinsicResult { - let extrinsic_index: u32 = storage::unhashed::get(well_known_keys::EXTRINSIC_INDEX).unwrap(); + let extrinsic_index: u32 = storage::unhashed::get(well_known_keys::EXTRINSIC_INDEX) + .unwrap_or_default(); let result = execute_transaction_backend(&utx, extrinsic_index); ExtrinsicData::insert(extrinsic_index, utx.encode()); storage::unhashed::put(well_known_keys::EXTRINSIC_INDEX, &(extrinsic_index + 1)); From 1b758b2a8d151d97d2242260c465b6df9cb8a7a4 Mon Sep 17 00:00:00 2001 From: Kian Paimani <5588131+kianenigma@users.noreply.github.com> Date: Wed, 30 Jun 2021 22:46:28 +0200 Subject: [PATCH 59/67] Make a few things for staking miner (#9241) --- frame/election-provider-multi-phase/src/lib.rs | 4 ++-- frame/system/src/lib.rs | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/frame/election-provider-multi-phase/src/lib.rs b/frame/election-provider-multi-phase/src/lib.rs index e127e34d55723..7aab93fb652f7 100644 --- a/frame/election-provider-multi-phase/src/lib.rs +++ b/frame/election-provider-multi-phase/src/lib.rs @@ -1294,14 +1294,14 @@ impl Pallet { } /// Kill everything created by [`Pallet::create_snapshot`]. - pub(crate) fn kill_snapshot() { + pub fn kill_snapshot() { >::kill(); >::kill(); >::kill(); } /// Checks the feasibility of a solution. - fn feasibility_check( + pub fn feasibility_check( solution: RawSolution>, compute: ElectionCompute, ) -> Result, FeasibilityError> { diff --git a/frame/system/src/lib.rs b/frame/system/src/lib.rs index f96c43ee1c98e..ad57bf6a87994 100644 --- a/frame/system/src/lib.rs +++ b/frame/system/src/lib.rs @@ -581,7 +581,7 @@ pub mod pallet { /// Events deposited for the current block. #[pallet::storage] #[pallet::getter(fn events)] - pub(super) type Events = + pub type Events = StorageValue<_, Vec>, ValueQuery>; /// The number of events in the `Events` list. From 74101dc21cfffb4c2d014fcc28edc166d5ca1b16 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Wed, 21 Jul 2021 00:20:34 +0200 Subject: [PATCH 60/67] Fix custom on runtime upgrade not being called (#9399) When the `Executive` was used through the `ExecuteBlock` trait, the custom on runtime upgrade wasn't called. This happened because we forgot to forward the type and it instead used default type `()` that doesn't do anything. This pr fixes it by forwarding the type and also adds a regression test. --- frame/executive/src/lib.rs | 56 +++++++++++++++++++++++++++++++++++++- 1 file changed, 55 insertions(+), 1 deletion(-) diff --git a/frame/executive/src/lib.rs b/frame/executive/src/lib.rs index c5f39e14f5fc1..5b347adfe56f9 100644 --- a/frame/executive/src/lib.rs +++ b/frame/executive/src/lib.rs @@ -179,7 +179,14 @@ where UnsignedValidator: ValidateUnsigned>, { fn execute_block(block: Block) { - Executive::::execute_block(block); + Executive::< + System, + Block, + Context, + UnsignedValidator, + AllPallets, + COnRuntimeUpgrade, + >::execute_block(block); } } @@ -1193,6 +1200,53 @@ mod tests { }); } + /// Regression test that ensures that the custom on runtime upgrade is called when executive is + /// used through the `ExecuteBlock` trait. + #[test] + fn custom_runtime_upgrade_is_called_when_using_execute_block_trait() { + let xt = TestXt::new(Call::Balances(BalancesCall::transfer(33, 0)), sign_extra(1, 0, 0)); + + let header = new_test_ext(1).execute_with(|| { + // Make sure `on_runtime_upgrade` is called. + RUNTIME_VERSION.with(|v| *v.borrow_mut() = sp_version::RuntimeVersion { + spec_version: 1, + ..Default::default() + }); + + // Let's build some fake block. + Executive::initialize_block(&Header::new( + 1, + H256::default(), + H256::default(), + [69u8; 32].into(), + Digest::default(), + )); + + Executive::apply_extrinsic(xt.clone()).unwrap().unwrap(); + + Executive::finalize_block() + }); + + // Reset to get the correct new genesis below. + RUNTIME_VERSION.with(|v| *v.borrow_mut() = sp_version::RuntimeVersion { + spec_version: 0, + ..Default::default() + }); + + new_test_ext(1).execute_with(|| { + // Make sure `on_runtime_upgrade` is called. + RUNTIME_VERSION.with(|v| *v.borrow_mut() = sp_version::RuntimeVersion { + spec_version: 1, + ..Default::default() + }); + + >>::execute_block(Block::new(header, vec![xt])); + + assert_eq!(&sp_io::storage::get(TEST_KEY).unwrap()[..], *b"module"); + assert_eq!(sp_io::storage::get(CUSTOM_ON_RUNTIME_KEY).unwrap(), true.encode()); + }); + } + #[test] fn all_weights_are_recorded_correctly() { new_test_ext(1).execute_with(|| { From fd11ab55a83ab645d49ba6d439606f1f61b257c0 Mon Sep 17 00:00:00 2001 From: Maksim Ramanenkau Date: Wed, 28 Sep 2022 13:33:27 +0300 Subject: [PATCH 61/67] Update lock file --- Cargo.lock | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 3151439736f34..11f71ce22f22e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7096,7 +7096,7 @@ dependencies = [ "sc-network", "sc-telemetry", "serde", - "serde_json", + "serde_json 1.0.64", "sp-consensus-babe", "sp-core", "sp-runtime", @@ -7945,7 +7945,7 @@ dependencies = [ "parking_lot 0.11.1", "sc-chain-spec", "serde", - "serde_json", + "serde_json 1.0.64", "sp-core", "sp-rpc", "sp-runtime", From 3ebbcab90ce4a6c052f25488ba8c60e4c7d158e6 Mon Sep 17 00:00:00 2001 From: Maksim Ramanenkau Date: Wed, 28 Sep 2022 14:54:05 +0300 Subject: [PATCH 62/67] Post merge fixes --- primitives/consensus/aura/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/primitives/consensus/aura/src/lib.rs b/primitives/consensus/aura/src/lib.rs index ef888a2ab855b..3b0d986007aad 100644 --- a/primitives/consensus/aura/src/lib.rs +++ b/primitives/consensus/aura/src/lib.rs @@ -90,7 +90,7 @@ sp_api::decl_runtime_apis! { fn slot_duration() -> SlotDuration; // Return the current set of authorities. - #[skip_initialize_block] + fn authorities() -> Vec; } } From de214319d4e0a855e0d15dd76fdca8fe3f7d427d Mon Sep 17 00:00:00 2001 From: Maksim Ramanenkau Date: Wed, 28 Sep 2022 17:51:19 +0300 Subject: [PATCH 63/67] Make build and test compile --- frame/ddc-metrics-offchain-worker/src/lib.rs | 19 ++++++++++--------- .../src/tests/test_runtime.rs | 2 +- 2 files changed, 11 insertions(+), 10 deletions(-) diff --git a/frame/ddc-metrics-offchain-worker/src/lib.rs b/frame/ddc-metrics-offchain-worker/src/lib.rs index b310c9e9b74d8..8e370c72514e6 100644 --- a/frame/ddc-metrics-offchain-worker/src/lib.rs +++ b/frame/ddc-metrics-offchain-worker/src/lib.rs @@ -32,6 +32,7 @@ use sp_std::vec::Vec; extern crate alloc; use alloc::string::String; +use sp_runtime::offchain::storage::StorageRetrievalError; pub const BLOCK_INTERVAL: u32 = 100; // TODO: Change to 1200 later [1h]. Now - 200 [10 minutes] for testing purposes. @@ -242,15 +243,15 @@ impl Module where ::AccountId: AsRef<[u let value = StorageValueRef::persistent(b"ddc-metrics-offchain-worker::sc_address").get(); match value { - None => { + Ok(None) => { warn!("[OCW] Smart Contract is not configured. Please configure it using offchain_localStorageSet with key=ddc-metrics-offchain-worker::sc_address"); None } - Some(None) => { + Ok(Some(contract_address)) => Some(contract_address), + Err(_) => { error!("[OCW] Smart Contract is configured but the value could not be decoded to an account ID"); None } - Some(Some(contract_address)) => Some(contract_address), } } @@ -258,14 +259,14 @@ impl Module where ::AccountId: AsRef<[u let value = StorageValueRef::persistent(b"ddc-metrics-offchain-worker::block_interval").get::(); match value { - None => { + Ok(None) => { None } - Some(None) => { - error!("[OCW] Block Interval could not be decoded"); - None - } - Some(Some(block_interval)) => Some(block_interval), + Ok(Some(block_interval)) => Some(block_interval), + Err(_) => { + error!("[OCW] Block Interval could not be decoded"); + None + } } } diff --git a/frame/ddc-metrics-offchain-worker/src/tests/test_runtime.rs b/frame/ddc-metrics-offchain-worker/src/tests/test_runtime.rs index 6ac3757a61ce9..d993af81f3337 100644 --- a/frame/ddc-metrics-offchain-worker/src/tests/test_runtime.rs +++ b/frame/ddc-metrics-offchain-worker/src/tests/test_runtime.rs @@ -55,7 +55,7 @@ frame_support::construct_runtime!( Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, Contracts: contracts::{Pallet, Call, Storage, Event}, Timestamp: pallet_timestamp::{Pallet, Call, Storage, Inherent}, - Randomness: pallet_randomness_collective_flip::{Pallet, Call, Storage}, + Randomness: pallet_randomness_collective_flip::{Pallet, Storage}, DdcMetricsOffchainWorker: pallet_ddc_metrics_offchain_worker::{Pallet, Call, Event}, } ); From 191a12caa3a4c2fd9a3dfb93d868f4647b8fd27f Mon Sep 17 00:00:00 2001 From: Maksim Ramanenkau Date: Thu, 29 Sep 2022 15:31:01 +0300 Subject: [PATCH 64/67] Fix ddc-metrics-offchain-worker test --- frame/ddc-metrics-offchain-worker/src/lib.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/frame/ddc-metrics-offchain-worker/src/lib.rs b/frame/ddc-metrics-offchain-worker/src/lib.rs index 8e370c72514e6..237d75e64a9d1 100644 --- a/frame/ddc-metrics-offchain-worker/src/lib.rs +++ b/frame/ddc-metrics-offchain-worker/src/lib.rs @@ -274,7 +274,10 @@ impl Module where ::AccountId: AsRef<[u let s_next_at = StorageValueRef::persistent(b"ddc-metrics-offchain-worker::next-at"); match s_next_at.mutate(|current_next_at| { - let current_next_at = current_next_at.unwrap_or(Some(T::BlockNumber::default())); + let current_next_at = match current_next_at { + Ok(Some(val)) => Some(val), + _ => Some(T::BlockNumber::default()), + }; if let Some(current_next_at) = current_next_at { if current_next_at > block_number { From 49777b809426f573a8ee689f5fb6e96699bd7475 Mon Sep 17 00:00:00 2001 From: Maksim Ramanenkau Date: Fri, 30 Sep 2022 11:09:13 +0300 Subject: [PATCH 65/67] Add release notes and update spec version --- CHANGELOG.md | 4 ++++ bin/node/runtime/src/lib.rs | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index dd1e7a8d4680f..eb8cad3d65e51 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,10 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). +## [2.22.0] +### Changed +- Updated Substrate to polkadot-v0.9.8 + ## [2.21.0] ### Changed - Updated Substrate to polkadot-v0.9.5 diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index b471f31ad33d8..782544acd2dcc 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -121,7 +121,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { // and set impl_version to 0. If only runtime // implementation changes and behavior does not, then leave spec_version as // is and increment impl_version. - spec_version: 291, + spec_version: 292, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 2, From cec97c54750fbda991c44168ef90307620be1612 Mon Sep 17 00:00:00 2001 From: Maksim Ramanenkau Date: Tue, 4 Oct 2022 11:02:15 +0300 Subject: [PATCH 66/67] Code review --- primitives/consensus/aura/src/lib.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/primitives/consensus/aura/src/lib.rs b/primitives/consensus/aura/src/lib.rs index 3b0d986007aad..a28e681fda27f 100644 --- a/primitives/consensus/aura/src/lib.rs +++ b/primitives/consensus/aura/src/lib.rs @@ -90,7 +90,6 @@ sp_api::decl_runtime_apis! { fn slot_duration() -> SlotDuration; // Return the current set of authorities. - fn authorities() -> Vec; } } From 03a6f1986110379a68a0c49bd96da3169a280876 Mon Sep 17 00:00:00 2001 From: Maksim Ramanenkau Date: Tue, 22 Nov 2022 14:35:39 +0300 Subject: [PATCH 67/67] Add storage migrations --- bin/node/runtime/src/lib.rs | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index 782544acd2dcc..39b071b1dd1dc 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -1185,9 +1185,19 @@ pub type Executive = frame_executive::Executive< frame_system::ChainContext, Runtime, AllPallets, - (), + RemoveCollectiveFlip, >; +pub struct RemoveCollectiveFlip; +impl frame_support::traits::OnRuntimeUpgrade for RemoveCollectiveFlip { + fn on_runtime_upgrade() -> Weight { + use frame_support::storage::migration; + // Remove the storage value `RandomMaterial` from removed pallet `RandomnessCollectiveFlip` + migration::remove_storage_prefix(b"RandomnessCollectiveFlip", b"RandomMaterial", b""); + ::DbWeight::get().writes(1) + } +} + impl_runtime_apis! { impl sp_api::Core for Runtime { fn version() -> RuntimeVersion {