From d22e68ccb67788a1f1ad5a044568abd3e0a2c904 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Wed, 25 Jan 2023 23:49:03 +0100 Subject: [PATCH 1/8] CI: Rewrite `check-each-crate` in python (#13238) * CI: Rewrite `check-each-crate` in python This is a much better readable version of the script and it should also work on Macos and not siltently fail ;) * Fix dumb bugs and print everything to stderr * Don't buffer Python output * :facepalm: * :facepalm: :facepalm: * Use check all until we have more macos runners * Update scripts/ci/gitlab/check-each-crate.py Co-authored-by: Oliver Tale-Yazdi * Update scripts/ci/gitlab/pipeline/test.yml Co-authored-by: Vladimir Istyufeev Co-authored-by: Oliver Tale-Yazdi --- scripts/ci/gitlab/check-each-crate.py | 57 +++++++++++++++++++++++++++ scripts/ci/gitlab/check-each-crate.sh | 54 ------------------------- scripts/ci/gitlab/pipeline/test.yml | 5 ++- 3 files changed, 60 insertions(+), 56 deletions(-) create mode 100755 scripts/ci/gitlab/check-each-crate.py delete mode 100755 scripts/ci/gitlab/check-each-crate.sh diff --git a/scripts/ci/gitlab/check-each-crate.py b/scripts/ci/gitlab/check-each-crate.py new file mode 100755 index 0000000000000..adad4f5bd5835 --- /dev/null +++ b/scripts/ci/gitlab/check-each-crate.py @@ -0,0 +1,57 @@ +#!/usr/bin/env python3 + +# A script that checks each workspace crate individually. +# It's relevant to check workspace crates individually because otherwise their compilation problems +# due to feature misconfigurations won't be caught, as exemplified by +# https://github.com/paritytech/substrate/issues/12705 +# +# `check-each-crate.py target_group groups_total` +# +# - `target_group`: Integer starting from 1, the group this script should execute. +# - `groups_total`: Integer starting from 1, total number of groups. + +import subprocess, sys + +# Get all crates +output = subprocess.check_output(["cargo", "tree", "--locked", "--workspace", "--depth", "0", "--prefix", "none"]) + +# Convert the output into a proper list +crates = [] +for line in output.splitlines(): + if line != b"": + crates.append(line.decode('utf8').split(" ")[0]) + +# Make the list unique and sorted +crates = list(set(crates)) +crates.sort() + +target_group = int(sys.argv[1]) - 1 +groups_total = int(sys.argv[2]) + +if len(crates) == 0: + print("No crates detected!", file=sys.stderr) + sys.exit(1) + +print(f"Total crates: {len(crates)}", file=sys.stderr) + +crates_per_group = len(crates) // groups_total + +# If this is the last runner, we need to take care of crates +# after the group that we lost because of the integer division. +if target_group + 1 == groups_total: + overflow_crates = len(crates) % groups_total +else: + overflow_crates = 0 + +print(f"Crates per group: {crates_per_group}", file=sys.stderr) + +# Check each crate +for i in range(0, crates_per_group + overflow_crates): + crate = crates_per_group * target_group + i + + print(f"Checking {crates[crate]}", file=sys.stderr) + + res = subprocess.run(["cargo", "check", "--locked", "-p", crates[crate]]) + + if res.returncode != 0: + sys.exit(1) diff --git a/scripts/ci/gitlab/check-each-crate.sh b/scripts/ci/gitlab/check-each-crate.sh deleted file mode 100755 index 24cad67007e73..0000000000000 --- a/scripts/ci/gitlab/check-each-crate.sh +++ /dev/null @@ -1,54 +0,0 @@ -#!/usr/bin/env bash - -## A script that checks each workspace crate individually. -## It's relevant to check workspace crates individually because otherwise their compilation problems -## due to feature misconfigurations won't be caught, as exemplified by -## https://github.com/paritytech/substrate/issues/12705 - -set -Eeu -o pipefail -shopt -s inherit_errexit - -set -vx - -target_group="$1" -groups_total="$2" - -readarray -t workspace_crates < <(\ - cargo tree --workspace --depth 0 --prefix none | - awk '{ if (length($1) == 0 || substr($1, 1, 1) == "[") { skip } else { print $1 } }' | - sort | - uniq -) - -crates_total=${#workspace_crates[*]} -if [ "$crates_total" -lt 1 ]; then - >&2 echo "No crates detected for $PWD" - exit 1 -fi - -if [ "$crates_total" -lt "$groups_total" ]; then - # `crates_total / groups_total` would result in 0, so round it up to 1 - crates_per_group=1 -else - # We add `crates_total % groups_total > 0` (which evaluates to 1 in case there's a remainder for - # `crates_total % groups_total`) to round UP `crates_total / groups_total` 's - # potentially-fractional result to the nearest integer. Doing that ensures that we'll not miss any - # crate in case `crates_total / groups_total` would normally result in a fractional number, since - # in those cases Bash would round DOWN the result to the nearest integer. For example, if - # `crates_total = 5` and `groups_total = 2`, then `crates_total / groups_total` would round down - # to 2; since the loop below would then step by 2, we'd miss the 5th crate. - crates_per_group=$(( (crates_total / groups_total) + (crates_total % groups_total > 0) )) -fi - -group=1 -for ((i=0; i < crates_total; i += crates_per_group)); do - if [ $group -eq "$target_group" ]; then - crates_in_group=("${workspace_crates[@]:$i:$crates_per_group}") - echo "crates in the group: ${crates_in_group[*]}" >/dev/null # >/dev/null due to "set -x" - for crate in "${crates_in_group[@]}"; do - cargo check --locked --release -p "$crate" - done - break - fi - group=$(( group + 1 )) -done diff --git a/scripts/ci/gitlab/pipeline/test.yml b/scripts/ci/gitlab/pipeline/test.yml index f00857ffa9935..02e05752fd01f 100644 --- a/scripts/ci/gitlab/pipeline/test.yml +++ b/scripts/ci/gitlab/pipeline/test.yml @@ -411,7 +411,7 @@ cargo-check-each-crate: CI_JOB_NAME: cargo-check-each-crate script: - rusty-cachier snapshot create - - time ./scripts/ci/gitlab/check-each-crate.sh "$CI_NODE_INDEX" "$CI_NODE_TOTAL" + - PYTHONUNBUFFERED=x time ./scripts/ci/gitlab/check-each-crate.py "$CI_NODE_INDEX" "$CI_NODE_TOTAL" # need to update cache only from one job - if [ "$CI_NODE_INDEX" == 1 ]; then rusty-cachier cache upload; fi parallel: 2 @@ -449,6 +449,7 @@ cargo-check-each-crate-macos: script: # TODO: enable rusty-cachier once it supports Mac # TODO: use parallel jobs, as per cargo-check-each-crate, once more Mac runners are available - - time ./scripts/ci/gitlab/check-each-crate.sh 1 1 + # - time ./scripts/ci/gitlab/check-each-crate.py 1 1 + - time cargo check --workspace --locked tags: - osx From d02922585673cb626df11a39927502d2ba040ff9 Mon Sep 17 00:00:00 2001 From: zjb0807 Date: Thu, 26 Jan 2023 12:12:19 +1300 Subject: [PATCH 2/8] Fix try-runtime with create-snapshot (#13223) * Fix try-runtime with create-snapshot * Apply review suggestions --- .../frame/try-runtime/cli/src/commands/create_snapshot.rs | 2 +- utils/frame/try-runtime/cli/src/commands/execute_block.rs | 2 +- utils/frame/try-runtime/cli/src/commands/follow_chain.rs | 2 +- .../frame/try-runtime/cli/src/commands/offchain_worker.rs | 2 +- .../try-runtime/cli/src/commands/on_runtime_upgrade.rs | 2 +- utils/frame/try-runtime/cli/src/lib.rs | 7 +++++-- 6 files changed, 10 insertions(+), 7 deletions(-) diff --git a/utils/frame/try-runtime/cli/src/commands/create_snapshot.rs b/utils/frame/try-runtime/cli/src/commands/create_snapshot.rs index ef39c3d9846ce..8d6144f84c1e8 100644 --- a/utils/frame/try-runtime/cli/src/commands/create_snapshot.rs +++ b/utils/frame/try-runtime/cli/src/commands/create_snapshot.rs @@ -71,7 +71,7 @@ where let executor = build_executor::(&shared); let _ = State::Live(command.from) - .into_ext::(&shared, &executor, Some(path.into())) + .into_ext::(&shared, &executor, Some(path.into()), false) .await?; Ok(()) diff --git a/utils/frame/try-runtime/cli/src/commands/execute_block.rs b/utils/frame/try-runtime/cli/src/commands/execute_block.rs index ee5e21af5d3ba..e054c8ab87932 100644 --- a/utils/frame/try-runtime/cli/src/commands/execute_block.rs +++ b/utils/frame/try-runtime/cli/src/commands/execute_block.rs @@ -99,7 +99,7 @@ where HostFns: HostFunctions, { let executor = build_executor::(&shared); - let ext = command.state.into_ext::(&shared, &executor, None).await?; + let ext = command.state.into_ext::(&shared, &executor, None, true).await?; // get the block number associated with this block. let block_ws_uri = command.block_ws_uri::(); diff --git a/utils/frame/try-runtime/cli/src/commands/follow_chain.rs b/utils/frame/try-runtime/cli/src/commands/follow_chain.rs index e4f166fe7ada7..d887757b1646e 100644 --- a/utils/frame/try-runtime/cli/src/commands/follow_chain.rs +++ b/utils/frame/try-runtime/cli/src/commands/follow_chain.rs @@ -137,7 +137,7 @@ where pallet: vec![], child_tree: true, }); - let ext = state.into_ext::(&shared, &executor, None).await?; + let ext = state.into_ext::(&shared, &executor, None, true).await?; maybe_state_ext = Some(ext); } diff --git a/utils/frame/try-runtime/cli/src/commands/offchain_worker.rs b/utils/frame/try-runtime/cli/src/commands/offchain_worker.rs index c55de7da64817..985b04bfb1cac 100644 --- a/utils/frame/try-runtime/cli/src/commands/offchain_worker.rs +++ b/utils/frame/try-runtime/cli/src/commands/offchain_worker.rs @@ -78,7 +78,7 @@ where { let executor = build_executor(&shared); // we first build the externalities with the remote code. - let ext = command.state.into_ext::(&shared, &executor, None).await?; + let ext = command.state.into_ext::(&shared, &executor, None, true).await?; let header_ws_uri = command.header_ws_uri::(); diff --git a/utils/frame/try-runtime/cli/src/commands/on_runtime_upgrade.rs b/utils/frame/try-runtime/cli/src/commands/on_runtime_upgrade.rs index 81f81d6a551b9..61b5847ed8ffc 100644 --- a/utils/frame/try-runtime/cli/src/commands/on_runtime_upgrade.rs +++ b/utils/frame/try-runtime/cli/src/commands/on_runtime_upgrade.rs @@ -62,7 +62,7 @@ where HostFns: HostFunctions, { let executor = build_executor(&shared); - let ext = command.state.into_ext::(&shared, &executor, None).await?; + let ext = command.state.into_ext::(&shared, &executor, None, true).await?; let (_, encoded_result) = state_machine_call_with_proof::( &ext, diff --git a/utils/frame/try-runtime/cli/src/lib.rs b/utils/frame/try-runtime/cli/src/lib.rs index aac2ad4238431..dea20febb9c58 100644 --- a/utils/frame/try-runtime/cli/src/lib.rs +++ b/utils/frame/try-runtime/cli/src/lib.rs @@ -609,6 +609,7 @@ impl State { shared: &SharedParams, executor: &WasmExecutor, state_snapshot: Option, + try_runtime_check: bool, ) -> sc_cli::Result> where Block::Hash: FromStr, @@ -707,8 +708,10 @@ impl State { } // whatever runtime we have in store now must have been compiled with try-runtime feature. - if !ensure_try_runtime::(&executor, &mut ext) { - return Err("given runtime is NOT compiled with try-runtime feature!".into()) + if try_runtime_check { + if !ensure_try_runtime::(&executor, &mut ext) { + return Err("given runtime is NOT compiled with try-runtime feature!".into()) + } } Ok(ext) From 8b4331c27bba6d9f178b8be093e39153350b9be8 Mon Sep 17 00:00:00 2001 From: Kian Paimani <5588131+kianenigma@users.noreply.github.com> Date: Wed, 25 Jan 2023 20:59:19 -0300 Subject: [PATCH 3/8] reduce exec time of fast-unstake benchmarks (#13120) * reduce exec time of fast-unstake benchmarks * fix test * fmt * fix patch the tests * fix patch the tests * ".git/.scripts/commands/bench/bench.sh" pallet dev pallet_fast_unstake * add batch size as well * update some benches to be better * fix one last test * fix * ".git/.scripts/commands/bench/bench.sh" pallet dev pallet_fast_unstake * reduce time even more * ".git/.scripts/commands/bench/bench.sh" pallet dev pallet_fast_unstake * fix tests * nit * remove * improve the weight calc further * ".git/.scripts/commands/bench/bench.sh" pallet dev pallet_fast_unstake * fix benchmarks * ".git/.scripts/commands/bench/bench.sh" pallet dev pallet_fast_unstake * update * fix * fix * fmt * ".git/.scripts/commands/bench/bench.sh" pallet dev pallet_fast_unstake * ".git/.scripts/commands/bench/bench.sh" pallet dev pallet_fast_unstake * lots of changes again... * smaller input * update * fmt * ".git/.scripts/commands/bench/bench.sh" pallet dev pallet_fast_unstake * cleanup * small simplification * fmt * reduce exec time a bit * fix * ".git/.scripts/commands/bench/bench.sh" pallet dev pallet_fast_unstake * test * ".git/.scripts/commands/bench/bench.sh" pallet dev pallet_fast_unstake * increase again * ".git/.scripts/commands/bench/bench.sh" pallet dev pallet_fast_unstake * review comments * fmt * fix * ".git/.scripts/commands/bench/bench.sh" pallet dev pallet_fast_unstake Co-authored-by: command-bot <> --- bin/node/runtime/src/lib.rs | 5 +- frame/fast-unstake/src/benchmarking.rs | 38 +++--- frame/fast-unstake/src/lib.rs | 126 ++++++++++++------ frame/fast-unstake/src/mock.rs | 3 + frame/fast-unstake/src/tests.rs | 173 ++++++------------------- frame/fast-unstake/src/weights.rs | 155 ++++++++++++---------- 6 files changed, 237 insertions(+), 263 deletions(-) diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index 0ac6ade9127e8..1f85d118ea534 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -591,10 +591,13 @@ impl pallet_staking::Config for Runtime { impl pallet_fast_unstake::Config for Runtime { type RuntimeEvent = RuntimeEvent; type ControlOrigin = frame_system::EnsureRoot; - type BatchSize = ConstU32<128>; + type BatchSize = ConstU32<64>; type Deposit = ConstU128<{ DOLLARS }>; type Currency = Balances; type Staking = Staking; + type MaxErasToCheckPerBlock = ConstU32<1>; + #[cfg(feature = "runtime-benchmarks")] + type MaxBackersPerValidator = MaxNominatorRewardedPerValidator; type WeightInfo = (); } diff --git a/frame/fast-unstake/src/benchmarking.rs b/frame/fast-unstake/src/benchmarking.rs index b4a5e21dcfc13..de85502f7b411 100644 --- a/frame/fast-unstake/src/benchmarking.rs +++ b/frame/fast-unstake/src/benchmarking.rs @@ -31,13 +31,11 @@ use sp_staking::{EraIndex, StakingInterface}; use sp_std::prelude::*; const USER_SEED: u32 = 0; -const DEFAULT_BACKER_PER_VALIDATOR: u32 = 128; -const MAX_VALIDATORS: u32 = 128; type CurrencyOf = ::Currency; -fn create_unexposed_nominators() -> Vec { - (0..T::BatchSize::get()) +fn create_unexposed_batch(batch_size: u32) -> Vec { + (0..batch_size) .map(|i| { let account = frame_benchmarking::account::("unexposed_nominator", i, USER_SEED); @@ -76,7 +74,7 @@ fn setup_staking(v: u32, until: EraIndex) { .collect::>(); for era in 0..=until { - let others = (0..DEFAULT_BACKER_PER_VALIDATOR) + let others = (0..T::MaxBackersPerValidator::get()) .map(|s| { let who = frame_benchmarking::account::("nominator", era, s); let value = ed; @@ -97,8 +95,10 @@ fn on_idle_full_block() { benchmarks! { // on_idle, we don't check anyone, but fully unbond them. on_idle_unstake { + let b in 1 .. T::BatchSize::get(); + ErasToCheckPerBlock::::put(1); - for who in create_unexposed_nominators::() { + for who in create_unexposed_batch::(b).into_iter() { assert_ok!(FastUnstake::::register_fast_unstake( RawOrigin::Signed(who.clone()).into(), )); @@ -114,7 +114,7 @@ benchmarks! { checked, stashes, .. - }) if checked.len() == 1 && stashes.len() as u32 == T::BatchSize::get() + }) if checked.len() == 1 && stashes.len() as u32 == b )); } : { @@ -123,25 +123,23 @@ benchmarks! { verify { assert!(matches!( fast_unstake_events::().last(), - Some(Event::BatchFinished) + Some(Event::BatchFinished { size: b }) )); } - // on_idle, when we check some number of eras, + // on_idle, when we check some number of eras and the queue is already set. on_idle_check { - // number of eras multiplied by validators in that era. - let x in (T::Staking::bonding_duration() * 1) .. (T::Staking::bonding_duration() * MAX_VALIDATORS); - - let u = T::Staking::bonding_duration(); - let v = x / u; + let v in 1 .. 256; + let b in 1 .. T::BatchSize::get(); + let u = T::MaxErasToCheckPerBlock::get().min(T::Staking::bonding_duration()); ErasToCheckPerBlock::::put(u); T::Staking::set_current_era(u); - // setup staking with v validators and u eras of data (0..=u) + // setup staking with v validators and u eras of data (0..=u+1) setup_staking::(v, u); - let stashes = create_unexposed_nominators::().into_iter().map(|s| { + let stashes = create_unexposed_batch::(b).into_iter().map(|s| { assert_ok!(FastUnstake::::register_fast_unstake( RawOrigin::Signed(s.clone()).into(), )); @@ -150,6 +148,8 @@ benchmarks! { // no one is queued thus far. assert_eq!(Head::::get(), None); + + Head::::put(UnstakeRequest { stashes: stashes.clone().try_into().unwrap(), checked: Default::default() }); } : { on_idle_full_block::(); @@ -167,7 +167,7 @@ benchmarks! { register_fast_unstake { ErasToCheckPerBlock::::put(1); - let who = create_unexposed_nominators::().get(0).cloned().unwrap(); + let who = create_unexposed_batch::(1).get(0).cloned().unwrap(); whitelist_account!(who); assert_eq!(Queue::::count(), 0); @@ -179,7 +179,7 @@ benchmarks! { deregister { ErasToCheckPerBlock::::put(1); - let who = create_unexposed_nominators::().get(0).cloned().unwrap(); + let who = create_unexposed_batch::(1).get(0).cloned().unwrap(); assert_ok!(FastUnstake::::register_fast_unstake( RawOrigin::Signed(who.clone()).into(), )); @@ -194,7 +194,7 @@ benchmarks! { control { let origin = ::ControlOrigin::successful_origin(); } - : _(origin, 128) + : _(origin, T::MaxErasToCheckPerBlock::get()) verify {} impl_benchmark_test_suite!(Pallet, crate::mock::ExtBuilder::default().build(), crate::mock::Runtime) diff --git a/frame/fast-unstake/src/lib.rs b/frame/fast-unstake/src/lib.rs index c0bffc4427a11..82454a4b8ac7d 100644 --- a/frame/fast-unstake/src/lib.rs +++ b/frame/fast-unstake/src/lib.rs @@ -86,12 +86,9 @@ pub mod pallet { traits::{Defensive, ReservableCurrency, StorageVersion}, }; use frame_system::pallet_prelude::*; - use sp_runtime::{ - traits::{Saturating, Zero}, - DispatchResult, - }; + use sp_runtime::{traits::Zero, DispatchResult}; use sp_staking::{EraIndex, StakingInterface}; - use sp_std::{collections::btree_set::BTreeSet, prelude::*, vec::Vec}; + use sp_std::{prelude::*, vec::Vec}; pub use weights::WeightInfo; #[derive(scale_info::TypeInfo, codec::Encode, codec::Decode, codec::MaxEncodedLen)] @@ -138,6 +135,14 @@ pub mod pallet { /// The weight information of this pallet. type WeightInfo: WeightInfo; + + /// Maximum value for `ErasToCheckPerBlock`. This should be as close as possible, but more + /// than the actual value, in order to have accurate benchmarks. + type MaxErasToCheckPerBlock: Get; + + /// Use only for benchmarking. + #[cfg(feature = "runtime-benchmarks")] + type MaxBackersPerValidator: Get; } /// The current "head of the queue" being unstaked. @@ -173,11 +178,11 @@ pub mod pallet { InternalError, /// A batch was partially checked for the given eras, but the process did not finish. BatchChecked { eras: Vec }, - /// A batch was terminated. + /// A batch of a given size was terminated. /// /// This is always follows by a number of `Unstaked` or `Slashed` events, marking the end /// of the batch. A new batch will be created upon next block. - BatchFinished, + BatchFinished { size: u32 }, } #[pallet::error] @@ -208,6 +213,31 @@ pub mod pallet { Self::do_on_idle(remaining_weight) } + + fn integrity_test() { + sp_std::if_std! { + sp_io::TestExternalities::new_empty().execute_with(|| { + // ensure that the value of `ErasToCheckPerBlock` is less than + // `T::MaxErasToCheckPerBlock`. + assert!( + ErasToCheckPerBlock::::get() <= T::MaxErasToCheckPerBlock::get(), + "the value of `ErasToCheckPerBlock` is greater than `T::MaxErasToCheckPerBlock`", + ); + }); + } + } + + #[cfg(feature = "try-runtime")] + fn try_state(_n: T::BlockNumber) -> Result<(), &'static str> { + // ensure that the value of `ErasToCheckPerBlock` is less than + // `T::MaxErasToCheckPerBlock`. + assert!( + ErasToCheckPerBlock::::get() <= T::MaxErasToCheckPerBlock::get(), + "the value of `ErasToCheckPerBlock` is greater than `T::MaxErasToCheckPerBlock`", + ); + + Ok(()) + } } #[pallet::call] @@ -288,9 +318,10 @@ pub mod pallet { /// Dispatch origin must be signed by the [`Config::ControlOrigin`]. #[pallet::call_index(2)] #[pallet::weight(::WeightInfo::control())] - pub fn control(origin: OriginFor, unchecked_eras_to_check: EraIndex) -> DispatchResult { + pub fn control(origin: OriginFor, eras_to_check: EraIndex) -> DispatchResult { let _ = T::ControlOrigin::ensure_origin(origin)?; - ErasToCheckPerBlock::::put(unchecked_eras_to_check); + ensure!(eras_to_check <= T::MaxErasToCheckPerBlock::get(), Error::::CallNotAllowed); + ErasToCheckPerBlock::::put(eras_to_check); Ok(()) } } @@ -324,27 +355,38 @@ pub mod pallet { /// found out to have no eras to check. At the end of a check cycle, even if they are fully /// checked, we don't finish the process. pub(crate) fn do_on_idle(remaining_weight: Weight) -> Weight { - let mut eras_to_check_per_block = ErasToCheckPerBlock::::get(); + // any weight that is unaccounted for + let mut unaccounted_weight = Weight::from_ref_time(0); + + let eras_to_check_per_block = ErasToCheckPerBlock::::get(); if eras_to_check_per_block.is_zero() { - return T::DbWeight::get().reads(1) + return T::DbWeight::get().reads(1).saturating_add(unaccounted_weight) } // NOTE: here we're assuming that the number of validators has only ever increased, // meaning that the number of exposures to check is either this per era, or less. let validator_count = T::Staking::desired_validator_count(); + let (next_batch_size, reads_from_queue) = Head::::get() + .map_or((Queue::::count().min(T::BatchSize::get()), true), |head| { + (head.stashes.len() as u32, false) + }); // determine the number of eras to check. This is based on both `ErasToCheckPerBlock` // and `remaining_weight` passed on to us from the runtime executive. - let max_weight = |v, u| { - ::WeightInfo::on_idle_check(v * u) - .max(::WeightInfo::on_idle_unstake()) + let max_weight = |v, b| { + // NOTE: this potentially under-counts by up to `BatchSize` reads from the queue. + ::WeightInfo::on_idle_check(v, b) + .max(::WeightInfo::on_idle_unstake(b)) + .saturating_add(if reads_from_queue { + T::DbWeight::get().reads(next_batch_size.into()) + } else { + Zero::zero() + }) }; - while max_weight(validator_count, eras_to_check_per_block).any_gt(remaining_weight) { - eras_to_check_per_block.saturating_dec(); - if eras_to_check_per_block.is_zero() { - log!(debug, "early existing because eras_to_check_per_block is zero"); - return T::DbWeight::get().reads(2) - } + + if max_weight(validator_count, next_batch_size).any_gt(remaining_weight) { + log!(debug, "early exit because eras_to_check_per_block is zero"); + return T::DbWeight::get().reads(3).saturating_add(unaccounted_weight) } if T::Staking::election_ongoing() { @@ -352,7 +394,7 @@ pub mod pallet { // there is an ongoing election -- we better not do anything. Imagine someone is not // exposed anywhere in the last era, and the snapshot for the election is already // taken. In this time period, we don't want to accidentally unstake them. - return T::DbWeight::get().reads(2) + return T::DbWeight::get().reads(4).saturating_add(unaccounted_weight) } let UnstakeRequest { stashes, mut checked } = match Head::::take().or_else(|| { @@ -362,6 +404,9 @@ pub mod pallet { .collect::>() .try_into() .expect("take ensures bound is met; qed"); + unaccounted_weight.saturating_accrue( + T::DbWeight::get().reads_writes(stashes.len() as u64, stashes.len() as u64), + ); if stashes.is_empty() { None } else { @@ -377,10 +422,11 @@ pub mod pallet { log!( debug, - "checking {:?} stashes, eras_to_check_per_block = {:?}, remaining_weight = {:?}", + "checking {:?} stashes, eras_to_check_per_block = {:?}, checked {:?}, remaining_weight = {:?}", stashes.len(), eras_to_check_per_block, - remaining_weight + checked, + remaining_weight, ); // the range that we're allowed to check in this round. @@ -431,11 +477,10 @@ pub mod pallet { } }; - let check_stash = |stash, deposit, eras_checked: &mut BTreeSet| { - let is_exposed = unchecked_eras_to_check.iter().any(|e| { - eras_checked.insert(*e); - T::Staking::is_exposed_in_era(&stash, e) - }); + let check_stash = |stash, deposit| { + let is_exposed = unchecked_eras_to_check + .iter() + .any(|e| T::Staking::is_exposed_in_era(&stash, e)); if is_exposed { T::Currency::slash_reserved(&stash, deposit); @@ -448,20 +493,16 @@ pub mod pallet { }; if unchecked_eras_to_check.is_empty() { - // `stash` is not exposed in any era now -- we can let go of them now. + // `stashes` are not exposed in any era now -- we can let go of them now. + let size = stashes.len() as u32; stashes.into_iter().for_each(|(stash, deposit)| unstake_stash(stash, deposit)); - Self::deposit_event(Event::::BatchFinished); - ::WeightInfo::on_idle_unstake() + Self::deposit_event(Event::::BatchFinished { size }); + ::WeightInfo::on_idle_unstake(size).saturating_add(unaccounted_weight) } else { - // eras checked so far. - let mut eras_checked = BTreeSet::::new(); - let pre_length = stashes.len(); let stashes: BoundedVec<(T::AccountId, BalanceOf), T::BatchSize> = stashes .into_iter() - .filter(|(stash, deposit)| { - check_stash(stash.clone(), *deposit, &mut eras_checked) - }) + .filter(|(stash, deposit)| check_stash(stash.clone(), *deposit)) .collect::>() .try_into() .expect("filter can only lessen the length; still in bound; qed"); @@ -469,8 +510,8 @@ pub mod pallet { log!( debug, - "checked {:?} eras, pre stashes: {:?}, post: {:?}", - eras_checked.len(), + "checked {:?}, pre stashes: {:?}, post: {:?}", + unchecked_eras_to_check, pre_length, post_length, ); @@ -478,7 +519,7 @@ pub mod pallet { match checked.try_extend(unchecked_eras_to_check.clone().into_iter()) { Ok(_) => if stashes.is_empty() { - Self::deposit_event(Event::::BatchFinished); + Self::deposit_event(Event::::BatchFinished { size: 0 }); } else { Head::::put(UnstakeRequest { stashes, checked }); Self::deposit_event(Event::::BatchChecked { @@ -491,9 +532,8 @@ pub mod pallet { }, } - ::WeightInfo::on_idle_check( - validator_count * eras_checked.len() as u32, - ) + ::WeightInfo::on_idle_check(validator_count, pre_length as u32) + .saturating_add(unaccounted_weight) } } } diff --git a/frame/fast-unstake/src/mock.rs b/frame/fast-unstake/src/mock.rs index 3f974e5e1a9d6..38cf41fdebe8b 100644 --- a/frame/fast-unstake/src/mock.rs +++ b/frame/fast-unstake/src/mock.rs @@ -185,6 +185,9 @@ impl fast_unstake::Config for Runtime { type ControlOrigin = frame_system::EnsureRoot; type BatchSize = BatchSize; type WeightInfo = (); + type MaxErasToCheckPerBlock = ConstU32<16>; + #[cfg(feature = "runtime-benchmarks")] + type MaxBackersPerValidator = ConstU32<128>; } type Block = frame_system::mocking::MockBlock; diff --git a/frame/fast-unstake/src/tests.rs b/frame/fast-unstake/src/tests.rs index 522aa1d0fac28..3e62146ae299f 100644 --- a/frame/fast-unstake/src/tests.rs +++ b/frame/fast-unstake/src/tests.rs @@ -18,7 +18,7 @@ //! Tests for pallet-fast-unstake. use super::*; -use crate::{mock::*, types::*, weights::WeightInfo, Event}; +use crate::{mock::*, types::*, Event}; use frame_support::{assert_noop, assert_ok, bounded_vec, pallet_prelude::*, traits::Currency}; use pallet_staking::{CurrentEra, RewardDestination}; @@ -236,120 +236,6 @@ mod on_idle { }); } - #[test] - fn respects_weight() { - ExtBuilder::default().build_and_execute(|| { - // we want to check all eras in one block... - ErasToCheckPerBlock::::put(BondingDuration::get() + 1); - CurrentEra::::put(BondingDuration::get()); - - // given - assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2))); - assert_eq!(Queue::::get(1), Some(Deposit::get())); - - assert_eq!(Queue::::count(), 1); - assert_eq!(Head::::get(), None); - - // when: call fast unstake with not enough weight to process the whole thing, just one - // era. - let remaining_weight = ::WeightInfo::on_idle_check( - pallet_staking::ValidatorCount::::get() * 1, - ); - assert_eq!(FastUnstake::on_idle(0, remaining_weight), remaining_weight); - - // then - assert_eq!( - fast_unstake_events_since_last_call(), - vec![Event::BatchChecked { eras: vec![3] }] - ); - assert_eq!( - Head::::get(), - Some(UnstakeRequest { - stashes: bounded_vec![(1, Deposit::get())], - checked: bounded_vec![3] - }) - ); - - // when: another 1 era. - let remaining_weight = ::WeightInfo::on_idle_check( - pallet_staking::ValidatorCount::::get() * 1, - ); - assert_eq!(FastUnstake::on_idle(0, remaining_weight), remaining_weight); - - // then: - assert_eq!( - fast_unstake_events_since_last_call(), - vec![Event::BatchChecked { eras: bounded_vec![2] }] - ); - assert_eq!( - Head::::get(), - Some(UnstakeRequest { - stashes: bounded_vec![(1, Deposit::get())], - checked: bounded_vec![3, 2] - }) - ); - - // when: then 5 eras, we only need 2 more. - let remaining_weight = ::WeightInfo::on_idle_check( - pallet_staking::ValidatorCount::::get() * 5, - ); - assert_eq!( - FastUnstake::on_idle(0, remaining_weight), - // note the amount of weight consumed: 2 eras worth of weight. - ::WeightInfo::on_idle_check( - pallet_staking::ValidatorCount::::get() * 2, - ) - ); - - // then: - assert_eq!( - fast_unstake_events_since_last_call(), - vec![Event::BatchChecked { eras: vec![1, 0] }] - ); - assert_eq!( - Head::::get(), - Some(UnstakeRequest { - stashes: bounded_vec![(1, Deposit::get())], - checked: bounded_vec![3, 2, 1, 0] - }) - ); - - // when: not enough weight to unstake: - let remaining_weight = - ::WeightInfo::on_idle_unstake() - Weight::from_ref_time(1); - assert_eq!(FastUnstake::on_idle(0, remaining_weight), Weight::from_ref_time(0)); - - // then nothing happens: - assert_eq!(fast_unstake_events_since_last_call(), vec![]); - assert_eq!( - Head::::get(), - Some(UnstakeRequest { - stashes: bounded_vec![(1, Deposit::get())], - checked: bounded_vec![3, 2, 1, 0] - }) - ); - - // when: enough weight to get over at least one iteration: then we are unblocked and can - // unstake. - let remaining_weight = ::WeightInfo::on_idle_check( - pallet_staking::ValidatorCount::::get() * 1, - ); - assert_eq!( - FastUnstake::on_idle(0, remaining_weight), - ::WeightInfo::on_idle_unstake() - ); - - // then we finish the unbonding: - assert_eq!( - fast_unstake_events_since_last_call(), - vec![Event::Unstaked { stash: 1, result: Ok(()) }, Event::BatchFinished], - ); - assert_eq!(Head::::get(), None,); - - assert_unstaked(&1); - }); - } - #[test] fn if_head_not_set_one_random_fetched_from_queue() { ExtBuilder::default().build_and_execute(|| { @@ -410,7 +296,7 @@ mod on_idle { vec![ Event::BatchChecked { eras: vec![3, 2, 1, 0] }, Event::Unstaked { stash: 1, result: Ok(()) }, - Event::BatchFinished, + Event::BatchFinished { size: 1 }, Event::BatchChecked { eras: vec![3, 2, 1, 0] } ] ); @@ -458,10 +344,10 @@ mod on_idle { vec![ Event::BatchChecked { eras: vec![3, 2, 1, 0] }, Event::Unstaked { stash: 1, result: Ok(()) }, - Event::BatchFinished, + Event::BatchFinished { size: 1 }, Event::BatchChecked { eras: vec![3, 2, 1, 0] }, Event::Unstaked { stash: 3, result: Ok(()) }, - Event::BatchFinished, + Event::BatchFinished { size: 1 }, ] ); @@ -503,7 +389,7 @@ mod on_idle { vec![ Event::BatchChecked { eras: vec![3, 2, 1, 0] }, Event::Unstaked { stash: 1, result: Ok(()) }, - Event::BatchFinished + Event::BatchFinished { size: 1 } ] ); assert_unstaked(&1); @@ -545,7 +431,7 @@ mod on_idle { vec![ Event::BatchChecked { eras: vec![3, 2, 1, 0] }, Event::Unstaked { stash: 1, result: Ok(()) }, - Event::BatchFinished + Event::BatchFinished { size: 1 } ] ); assert_unstaked(&1); @@ -620,7 +506,7 @@ mod on_idle { Event::BatchChecked { eras: vec![1] }, Event::BatchChecked { eras: vec![0] }, Event::Unstaked { stash: 1, result: Ok(()) }, - Event::BatchFinished + Event::BatchFinished { size: 1 } ] ); assert_unstaked(&1); @@ -704,7 +590,7 @@ mod on_idle { Event::BatchChecked { eras: vec![0] }, Event::BatchChecked { eras: vec![4] }, Event::Unstaked { stash: 1, result: Ok(()) }, - Event::BatchFinished + Event::BatchFinished { size: 1 } ] ); assert_unstaked(&1); @@ -799,7 +685,7 @@ mod on_idle { Event::BatchChecked { eras: vec![4] }, Event::BatchChecked { eras: vec![1] }, Event::Unstaked { stash: 1, result: Ok(()) }, - Event::BatchFinished + Event::BatchFinished { size: 1 } ] ); @@ -843,7 +729,7 @@ mod on_idle { Event::BatchChecked { eras: vec![3] }, Event::BatchChecked { eras: vec![2] }, Event::Slashed { stash: exposed, amount: Deposit::get() }, - Event::BatchFinished + Event::BatchFinished { size: 0 } ] ); }); @@ -879,7 +765,7 @@ mod on_idle { vec![ Event::BatchChecked { eras: vec![3, 2] }, Event::Slashed { stash: exposed, amount: Deposit::get() }, - Event::BatchFinished + Event::BatchFinished { size: 0 } ] ); }); @@ -910,7 +796,10 @@ mod on_idle { assert_eq!( fast_unstake_events_since_last_call(), - vec![Event::Slashed { stash: 100, amount: Deposit::get() }, Event::BatchFinished] + vec![ + Event::Slashed { stash: 100, amount: Deposit::get() }, + Event::BatchFinished { size: 0 } + ] ); }); } @@ -946,7 +835,7 @@ mod on_idle { vec![ Event::BatchChecked { eras: vec![3, 2, 1, 0] }, Event::Unstaked { stash: 42, result: Ok(()) }, - Event::BatchFinished + Event::BatchFinished { size: 1 } ] ); }); @@ -1001,7 +890,7 @@ mod batched { Event::Unstaked { stash: 1, result: Ok(()) }, Event::Unstaked { stash: 5, result: Ok(()) }, Event::Unstaked { stash: 7, result: Ok(()) }, - Event::BatchFinished + Event::BatchFinished { size: 3 } ] ); }); @@ -1067,7 +956,7 @@ mod batched { Event::Unstaked { stash: 1, result: Ok(()) }, Event::Unstaked { stash: 5, result: Ok(()) }, Event::Unstaked { stash: 7, result: Ok(()) }, - Event::BatchFinished + Event::BatchFinished { size: 3 } ] ); }); @@ -1132,7 +1021,7 @@ mod batched { Event::BatchChecked { eras: vec![1, 0] }, Event::Unstaked { stash: 1, result: Ok(()) }, Event::Unstaked { stash: 3, result: Ok(()) }, - Event::BatchFinished + Event::BatchFinished { size: 2 } ] ); }); @@ -1191,10 +1080,32 @@ mod batched { Event::Slashed { stash: 666, amount: Deposit::get() }, Event::BatchChecked { eras: vec![3] }, Event::Slashed { stash: 667, amount: Deposit::get() }, - Event::BatchFinished, + Event::BatchFinished { size: 0 }, Event::BatchChecked { eras: vec![3] } ] ); }); } } + +#[test] +fn kusama_estimate() { + use crate::WeightInfo; + let block_time = frame_support::weights::Weight::from_ref_time( + frame_support::weights::constants::WEIGHT_REF_TIME_PER_SECOND * 2, + ) + .ref_time() as f32; + let on_idle = crate::weights::SubstrateWeight::::on_idle_check(1000, 64).ref_time() as f32; + dbg!(block_time, on_idle, on_idle / block_time); +} + +#[test] +fn polkadot_estimate() { + use crate::WeightInfo; + let block_time = frame_support::weights::Weight::from_ref_time( + frame_support::weights::constants::WEIGHT_REF_TIME_PER_SECOND * 2, + ) + .ref_time() as f32; + let on_idle = crate::weights::SubstrateWeight::::on_idle_check(300, 64).ref_time() as f32; + dbg!(block_time, on_idle, on_idle / block_time); +} diff --git a/frame/fast-unstake/src/weights.rs b/frame/fast-unstake/src/weights.rs index 6001250e8c24d..f7d0cacba0bcf 100644 --- a/frame/fast-unstake/src/weights.rs +++ b/frame/fast-unstake/src/weights.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// Copyright (C) 2023 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -18,24 +18,25 @@ //! Autogenerated weights for pallet_fast_unstake //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2022-11-07, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` +//! DATE: 2023-01-25, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! HOSTNAME: `runner-b3zmxxc-project-145-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: -// ./target/production/substrate +// target/production/substrate // benchmark // pallet -// --chain=dev // --steps=50 // --repeat=20 -// --pallet=pallet_fast_unstake // --extrinsic=* // --execution=wasm // --wasm-execution=compiled // --heap-pages=4096 -// --output=./frame/fast-unstake/src/weights.rs +// --json-file=/builds/parity/mirrors/substrate/.git/.artifacts/bench.json +// --pallet=pallet_fast_unstake +// --chain=dev // --header=./HEADER-APACHE2 +// --output=./frame/fast-unstake/src/weights.rs // --template=./.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] @@ -47,8 +48,8 @@ use sp_std::marker::PhantomData; /// Weight functions needed for pallet_fast_unstake. pub trait WeightInfo { - fn on_idle_unstake() -> Weight; - fn on_idle_check(x: u32, ) -> Weight; + fn on_idle_unstake(b: u32, ) -> Weight; + fn on_idle_check(v: u32, b: u32, ) -> Weight; fn register_fast_unstake() -> Weight; fn deregister() -> Weight; fn control() -> Weight; @@ -59,8 +60,9 @@ pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { // Storage: FastUnstake ErasToCheckPerBlock (r:1 w:0) // Storage: Staking ValidatorCount (r:1 w:0) - // Storage: ElectionProviderMultiPhase CurrentPhase (r:1 w:0) // Storage: FastUnstake Head (r:1 w:1) + // Storage: FastUnstake CounterForQueue (r:1 w:0) + // Storage: ElectionProviderMultiPhase CurrentPhase (r:1 w:0) // Storage: Staking CurrentEra (r:1 w:0) // Storage: Staking SlashingSpans (r:1 w:0) // Storage: Staking Bonded (r:1 w:1) @@ -70,29 +72,36 @@ impl WeightInfo for SubstrateWeight { // Storage: Balances Locks (r:1 w:1) // Storage: Staking Ledger (r:0 w:1) // Storage: Staking Payee (r:0 w:1) - fn on_idle_unstake() -> Weight { - // Minimum execution time: 82_426 nanoseconds. - Weight::from_ref_time(83_422_000 as u64) - .saturating_add(T::DbWeight::get().reads(11 as u64)) - .saturating_add(T::DbWeight::get().writes(6 as u64)) + /// The range of component `b` is `[1, 64]`. + fn on_idle_unstake(b: u32, ) -> Weight { + // Minimum execution time: 92_833 nanoseconds. + Weight::from_ref_time(62_136_346) + // Standard Error: 25_541 + .saturating_add(Weight::from_ref_time(42_904_859).saturating_mul(b.into())) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().reads((6_u64).saturating_mul(b.into()))) + .saturating_add(T::DbWeight::get().writes(1)) + .saturating_add(T::DbWeight::get().writes((5_u64).saturating_mul(b.into()))) } // Storage: FastUnstake ErasToCheckPerBlock (r:1 w:0) // Storage: Staking ValidatorCount (r:1 w:0) - // Storage: ElectionProviderMultiPhase CurrentPhase (r:1 w:0) // Storage: FastUnstake Head (r:1 w:1) - // Storage: FastUnstake Queue (r:2 w:1) - // Storage: FastUnstake CounterForQueue (r:1 w:1) + // Storage: FastUnstake CounterForQueue (r:1 w:0) + // Storage: ElectionProviderMultiPhase CurrentPhase (r:1 w:0) // Storage: Staking CurrentEra (r:1 w:0) - // Storage: Staking ErasStakers (r:1344 w:0) - /// The range of component `x` is `[672, 86016]`. - fn on_idle_check(x: u32, ) -> Weight { - // Minimum execution time: 13_932_777 nanoseconds. - Weight::from_ref_time(13_996_029_000 as u64) - // Standard Error: 16_878 - .saturating_add(Weight::from_ref_time(18_113_540 as u64).saturating_mul(x as u64)) - .saturating_add(T::DbWeight::get().reads(345 as u64)) - .saturating_add(T::DbWeight::get().reads((1 as u64).saturating_mul(x as u64))) - .saturating_add(T::DbWeight::get().writes(3 as u64)) + // Storage: Staking ErasStakers (r:2 w:0) + /// The range of component `v` is `[1, 256]`. + /// The range of component `b` is `[1, 64]`. + fn on_idle_check(v: u32, b: u32, ) -> Weight { + // Minimum execution time: 1_775_293 nanoseconds. + Weight::from_ref_time(1_787_133_000) + // Standard Error: 17_109_142 + .saturating_add(Weight::from_ref_time(546_766_552).saturating_mul(v.into())) + // Standard Error: 68_455_625 + .saturating_add(Weight::from_ref_time(2_135_980_830).saturating_mul(b.into())) + .saturating_add(T::DbWeight::get().reads(7)) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(v.into()))) + .saturating_add(T::DbWeight::get().writes(1)) } // Storage: FastUnstake ErasToCheckPerBlock (r:1 w:0) // Storage: Staking Ledger (r:1 w:1) @@ -109,10 +118,10 @@ impl WeightInfo for SubstrateWeight { // Storage: Balances Locks (r:1 w:1) // Storage: FastUnstake CounterForQueue (r:1 w:1) fn register_fast_unstake() -> Weight { - // Minimum execution time: 120_190 nanoseconds. - Weight::from_ref_time(121_337_000 as u64) - .saturating_add(T::DbWeight::get().reads(14 as u64)) - .saturating_add(T::DbWeight::get().writes(9 as u64)) + // Minimum execution time: 124_849 nanoseconds. + Weight::from_ref_time(128_176_000) + .saturating_add(T::DbWeight::get().reads(14)) + .saturating_add(T::DbWeight::get().writes(9)) } // Storage: FastUnstake ErasToCheckPerBlock (r:1 w:0) // Storage: Staking Ledger (r:1 w:0) @@ -120,16 +129,16 @@ impl WeightInfo for SubstrateWeight { // Storage: FastUnstake Head (r:1 w:0) // Storage: FastUnstake CounterForQueue (r:1 w:1) fn deregister() -> Weight { - // Minimum execution time: 49_897 nanoseconds. - Weight::from_ref_time(50_080_000 as u64) - .saturating_add(T::DbWeight::get().reads(5 as u64)) - .saturating_add(T::DbWeight::get().writes(2 as u64)) + // Minimum execution time: 48_246 nanoseconds. + Weight::from_ref_time(49_720_000) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(2)) } // Storage: FastUnstake ErasToCheckPerBlock (r:0 w:1) fn control() -> Weight { - // Minimum execution time: 4_814 nanoseconds. - Weight::from_ref_time(4_997_000 as u64) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + // Minimum execution time: 4_611 nanoseconds. + Weight::from_ref_time(4_844_000) + .saturating_add(T::DbWeight::get().writes(1)) } } @@ -137,8 +146,9 @@ impl WeightInfo for SubstrateWeight { impl WeightInfo for () { // Storage: FastUnstake ErasToCheckPerBlock (r:1 w:0) // Storage: Staking ValidatorCount (r:1 w:0) - // Storage: ElectionProviderMultiPhase CurrentPhase (r:1 w:0) // Storage: FastUnstake Head (r:1 w:1) + // Storage: FastUnstake CounterForQueue (r:1 w:0) + // Storage: ElectionProviderMultiPhase CurrentPhase (r:1 w:0) // Storage: Staking CurrentEra (r:1 w:0) // Storage: Staking SlashingSpans (r:1 w:0) // Storage: Staking Bonded (r:1 w:1) @@ -148,29 +158,36 @@ impl WeightInfo for () { // Storage: Balances Locks (r:1 w:1) // Storage: Staking Ledger (r:0 w:1) // Storage: Staking Payee (r:0 w:1) - fn on_idle_unstake() -> Weight { - // Minimum execution time: 82_426 nanoseconds. - Weight::from_ref_time(83_422_000 as u64) - .saturating_add(RocksDbWeight::get().reads(11 as u64)) - .saturating_add(RocksDbWeight::get().writes(6 as u64)) + /// The range of component `b` is `[1, 64]`. + fn on_idle_unstake(b: u32, ) -> Weight { + // Minimum execution time: 92_833 nanoseconds. + Weight::from_ref_time(62_136_346) + // Standard Error: 25_541 + .saturating_add(Weight::from_ref_time(42_904_859).saturating_mul(b.into())) + .saturating_add(RocksDbWeight::get().reads(6)) + .saturating_add(RocksDbWeight::get().reads((6_u64).saturating_mul(b.into()))) + .saturating_add(RocksDbWeight::get().writes(1)) + .saturating_add(RocksDbWeight::get().writes((5_u64).saturating_mul(b.into()))) } // Storage: FastUnstake ErasToCheckPerBlock (r:1 w:0) // Storage: Staking ValidatorCount (r:1 w:0) - // Storage: ElectionProviderMultiPhase CurrentPhase (r:1 w:0) // Storage: FastUnstake Head (r:1 w:1) - // Storage: FastUnstake Queue (r:2 w:1) - // Storage: FastUnstake CounterForQueue (r:1 w:1) + // Storage: FastUnstake CounterForQueue (r:1 w:0) + // Storage: ElectionProviderMultiPhase CurrentPhase (r:1 w:0) // Storage: Staking CurrentEra (r:1 w:0) - // Storage: Staking ErasStakers (r:1344 w:0) - /// The range of component `x` is `[672, 86016]`. - fn on_idle_check(x: u32, ) -> Weight { - // Minimum execution time: 13_932_777 nanoseconds. - Weight::from_ref_time(13_996_029_000 as u64) - // Standard Error: 16_878 - .saturating_add(Weight::from_ref_time(18_113_540 as u64).saturating_mul(x as u64)) - .saturating_add(RocksDbWeight::get().reads(345 as u64)) - .saturating_add(RocksDbWeight::get().reads((1 as u64).saturating_mul(x as u64))) - .saturating_add(RocksDbWeight::get().writes(3 as u64)) + // Storage: Staking ErasStakers (r:2 w:0) + /// The range of component `v` is `[1, 256]`. + /// The range of component `b` is `[1, 64]`. + fn on_idle_check(v: u32, b: u32, ) -> Weight { + // Minimum execution time: 1_775_293 nanoseconds. + Weight::from_ref_time(1_787_133_000) + // Standard Error: 17_109_142 + .saturating_add(Weight::from_ref_time(546_766_552).saturating_mul(v.into())) + // Standard Error: 68_455_625 + .saturating_add(Weight::from_ref_time(2_135_980_830).saturating_mul(b.into())) + .saturating_add(RocksDbWeight::get().reads(7)) + .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(v.into()))) + .saturating_add(RocksDbWeight::get().writes(1)) } // Storage: FastUnstake ErasToCheckPerBlock (r:1 w:0) // Storage: Staking Ledger (r:1 w:1) @@ -187,10 +204,10 @@ impl WeightInfo for () { // Storage: Balances Locks (r:1 w:1) // Storage: FastUnstake CounterForQueue (r:1 w:1) fn register_fast_unstake() -> Weight { - // Minimum execution time: 120_190 nanoseconds. - Weight::from_ref_time(121_337_000 as u64) - .saturating_add(RocksDbWeight::get().reads(14 as u64)) - .saturating_add(RocksDbWeight::get().writes(9 as u64)) + // Minimum execution time: 124_849 nanoseconds. + Weight::from_ref_time(128_176_000) + .saturating_add(RocksDbWeight::get().reads(14)) + .saturating_add(RocksDbWeight::get().writes(9)) } // Storage: FastUnstake ErasToCheckPerBlock (r:1 w:0) // Storage: Staking Ledger (r:1 w:0) @@ -198,15 +215,15 @@ impl WeightInfo for () { // Storage: FastUnstake Head (r:1 w:0) // Storage: FastUnstake CounterForQueue (r:1 w:1) fn deregister() -> Weight { - // Minimum execution time: 49_897 nanoseconds. - Weight::from_ref_time(50_080_000 as u64) - .saturating_add(RocksDbWeight::get().reads(5 as u64)) - .saturating_add(RocksDbWeight::get().writes(2 as u64)) + // Minimum execution time: 48_246 nanoseconds. + Weight::from_ref_time(49_720_000) + .saturating_add(RocksDbWeight::get().reads(5)) + .saturating_add(RocksDbWeight::get().writes(2)) } // Storage: FastUnstake ErasToCheckPerBlock (r:0 w:1) fn control() -> Weight { - // Minimum execution time: 4_814 nanoseconds. - Weight::from_ref_time(4_997_000 as u64) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + // Minimum execution time: 4_611 nanoseconds. + Weight::from_ref_time(4_844_000) + .saturating_add(RocksDbWeight::get().writes(1)) } } From 9db957cea255802d50231347ebef393d0707b909 Mon Sep 17 00:00:00 2001 From: Koute Date: Thu, 26 Jan 2023 14:38:00 +0900 Subject: [PATCH 4/8] Rework the trie cache (#12982) * Rework the trie cache * Align `state-machine` tests * Bump `schnellru` to 0.1.1 * Fix off-by-one * Align to review comments * Bump `ahash` to 0.8.2 * Bump `schnellru` to 0.2.0 * Bump `schnellru` to 0.2.1 * Remove unnecessary bound * Remove unnecessary loop when calculating maximum memory usage * Remove unnecessary `mut`s --- Cargo.lock | 8 +- client/db/src/bench.rs | 2 +- client/db/src/lib.rs | 2 +- client/finality-grandpa/Cargo.toml | 2 +- client/network-gossip/Cargo.toml | 2 +- primitives/state-machine/src/trie_backend.rs | 20 +- primitives/trie/Cargo.toml | 6 +- primitives/trie/src/cache/mod.rs | 576 ++++++++--- primitives/trie/src/cache/shared_cache.rs | 948 +++++++++++-------- 9 files changed, 1016 insertions(+), 550 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 53e5d4882b9dd..7c412030d560c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8383,7 +8383,7 @@ dependencies = [ name = "sc-finality-grandpa" version = "0.10.0-dev" dependencies = [ - "ahash 0.7.6", + "ahash 0.8.2", "array-bytes", "assert_matches", "async-trait", @@ -8583,7 +8583,7 @@ dependencies = [ name = "sc-network-gossip" version = "0.10.0-dev" dependencies = [ - "ahash 0.7.6", + "ahash 0.8.2", "futures", "futures-timer", "libp2p", @@ -10352,18 +10352,18 @@ dependencies = [ name = "sp-trie" version = "7.0.0" dependencies = [ - "ahash 0.7.6", + "ahash 0.8.2", "array-bytes", "criterion", "hash-db", "hashbrown 0.12.3", "lazy_static", - "lru", "memory-db", "nohash-hasher", "parity-scale-codec", "parking_lot 0.12.1", "scale-info", + "schnellru", "sp-core", "sp-runtime", "sp-std", diff --git a/client/db/src/bench.rs b/client/db/src/bench.rs index 13d91fff0b555..e53209f27706c 100644 --- a/client/db/src/bench.rs +++ b/client/db/src/bench.rs @@ -110,7 +110,7 @@ impl BenchmarkingState { proof_recorder_root: Cell::new(root), enable_tracking, // Enable the cache, but do not sync anything to the shared state. - shared_trie_cache: SharedTrieCache::new(CacheSize::Maximum(0)), + shared_trie_cache: SharedTrieCache::new(CacheSize::new(0)), }; state.add_whitelist_to_tracker(); diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index 09ccfef1cc28b..f217eb6480abc 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -1243,7 +1243,7 @@ impl Backend { blocks_pruning: config.blocks_pruning, genesis_state: RwLock::new(None), shared_trie_cache: config.trie_cache_maximum_size.map(|maximum_size| { - SharedTrieCache::new(sp_trie::cache::CacheSize::Maximum(maximum_size)) + SharedTrieCache::new(sp_trie::cache::CacheSize::new(maximum_size)) }), }; diff --git a/client/finality-grandpa/Cargo.toml b/client/finality-grandpa/Cargo.toml index 9a31a1d4bf6a7..8a4e0449f2d1c 100644 --- a/client/finality-grandpa/Cargo.toml +++ b/client/finality-grandpa/Cargo.toml @@ -14,7 +14,7 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -ahash = "0.7.6" +ahash = "0.8.2" array-bytes = "4.1" async-trait = "0.1.57" dyn-clone = "1.0" diff --git a/client/network-gossip/Cargo.toml b/client/network-gossip/Cargo.toml index ef23062768d1e..7811e86c095ba 100644 --- a/client/network-gossip/Cargo.toml +++ b/client/network-gossip/Cargo.toml @@ -14,7 +14,7 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -ahash = "0.7.6" +ahash = "0.8.2" futures = "0.3.21" futures-timer = "3.0.1" libp2p = "0.50.0" diff --git a/primitives/state-machine/src/trie_backend.rs b/primitives/state-machine/src/trie_backend.rs index da4250b6ba3e1..ff5cce24c5124 100644 --- a/primitives/state-machine/src/trie_backend.rs +++ b/primitives/state-machine/src/trie_backend.rs @@ -412,19 +412,19 @@ pub mod tests { fn $name() { let parameters = vec![ (StateVersion::V0, None, None), - (StateVersion::V0, Some(SharedCache::new(CacheSize::Unlimited)), None), + (StateVersion::V0, Some(SharedCache::new(CacheSize::unlimited())), None), (StateVersion::V0, None, Some(Recorder::default())), ( StateVersion::V0, - Some(SharedCache::new(CacheSize::Unlimited)), + Some(SharedCache::new(CacheSize::unlimited())), Some(Recorder::default()), ), (StateVersion::V1, None, None), - (StateVersion::V1, Some(SharedCache::new(CacheSize::Unlimited)), None), + (StateVersion::V1, Some(SharedCache::new(CacheSize::unlimited())), None), (StateVersion::V1, None, Some(Recorder::default())), ( StateVersion::V1, - Some(SharedCache::new(CacheSize::Unlimited)), + Some(SharedCache::new(CacheSize::unlimited())), Some(Recorder::default()), ), ]; @@ -760,7 +760,7 @@ pub mod tests { .clone() .for_each(|i| assert_eq!(trie.storage(&[i]).unwrap().unwrap(), vec![i; size_content])); - for cache in [Some(SharedTrieCache::new(CacheSize::Unlimited)), None] { + for cache in [Some(SharedTrieCache::new(CacheSize::unlimited())), None] { // Run multiple times to have a different cache conditions. for i in 0..5 { if let Some(cache) = &cache { @@ -793,7 +793,7 @@ pub mod tests { proof_record_works_with_iter_inner(StateVersion::V1); } fn proof_record_works_with_iter_inner(state_version: StateVersion) { - for cache in [Some(SharedTrieCache::new(CacheSize::Unlimited)), None] { + for cache in [Some(SharedTrieCache::new(CacheSize::unlimited())), None] { // Run multiple times to have a different cache conditions. for i in 0..5 { if let Some(cache) = &cache { @@ -870,7 +870,7 @@ pub mod tests { assert_eq!(in_memory.child_storage(child_info_2, &[i]).unwrap().unwrap(), vec![i]) }); - for cache in [Some(SharedTrieCache::new(CacheSize::Unlimited)), None] { + for cache in [Some(SharedTrieCache::new(CacheSize::unlimited())), None] { // Run multiple times to have a different cache conditions. for i in 0..5 { eprintln!("Running with cache {}, iteration {}", cache.is_some(), i); @@ -1002,7 +1002,7 @@ pub mod tests { nodes }; - let cache = SharedTrieCache::::new(CacheSize::Unlimited); + let cache = SharedTrieCache::::new(CacheSize::unlimited()); { let local_cache = cache.local_cache(); let mut trie_cache = local_cache.as_trie_db_cache(child_1_root); @@ -1093,7 +1093,7 @@ pub mod tests { #[test] fn new_data_is_added_to_the_cache() { - let shared_cache = SharedTrieCache::new(CacheSize::Unlimited); + let shared_cache = SharedTrieCache::new(CacheSize::unlimited()); let new_data = vec![ (&b"new_data0"[..], Some(&b"0"[..])), (&b"new_data1"[..], Some(&b"1"[..])), @@ -1159,7 +1159,7 @@ pub mod tests { assert_eq!(in_memory.child_storage(child_info_1, &key).unwrap().unwrap(), child_trie_1_val); assert_eq!(in_memory.child_storage(child_info_2, &key).unwrap().unwrap(), child_trie_2_val); - for cache in [Some(SharedTrieCache::new(CacheSize::Unlimited)), None] { + for cache in [Some(SharedTrieCache::new(CacheSize::unlimited())), None] { // Run multiple times to have a different cache conditions. for i in 0..5 { eprintln!("Running with cache {}, iteration {}", cache.is_some(), i); diff --git a/primitives/trie/Cargo.toml b/primitives/trie/Cargo.toml index 3f045a1cb216d..78b0b5d1bbda3 100644 --- a/primitives/trie/Cargo.toml +++ b/primitives/trie/Cargo.toml @@ -18,12 +18,11 @@ name = "bench" harness = false [dependencies] -ahash = { version = "0.7.6", optional = true } +ahash = { version = "0.8.2", optional = true } codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false } hashbrown = { version = "0.12.3", optional = true } hash-db = { version = "0.15.2", default-features = false } lazy_static = { version = "1.4.0", optional = true } -lru = { version = "0.8.1", optional = true } memory-db = { version = "0.31.0", default-features = false } nohash-hasher = { version = "0.2.0", optional = true } parking_lot = { version = "0.12.1", optional = true } @@ -34,6 +33,7 @@ trie-db = { version = "0.24.0", default-features = false } trie-root = { version = "0.17.0", default-features = false } sp-core = { version = "7.0.0", default-features = false, path = "../core" } sp-std = { version = "5.0.0", default-features = false, path = "../std" } +schnellru = { version = "0.2.1", optional = true } [dev-dependencies] array-bytes = "4.1" @@ -50,7 +50,7 @@ std = [ "hashbrown", "hash-db/std", "lazy_static", - "lru", + "schnellru", "memory-db/std", "nohash-hasher", "parking_lot", diff --git a/primitives/trie/src/cache/mod.rs b/primitives/trie/src/cache/mod.rs index 85539cf626857..3c1e5b8d0ff0b 100644 --- a/primitives/trie/src/cache/mod.rs +++ b/primitives/trie/src/cache/mod.rs @@ -36,13 +36,17 @@ use crate::{Error, NodeCodec}; use hash_db::Hasher; -use hashbrown::HashSet; use nohash_hasher::BuildNoHashHasher; -use parking_lot::{Mutex, MutexGuard, RwLockReadGuard}; -use shared_cache::{SharedValueCache, ValueCacheKey}; +use parking_lot::{Mutex, MutexGuard}; +use schnellru::LruMap; +use shared_cache::{ValueCacheKey, ValueCacheRef}; use std::{ - collections::{hash_map::Entry as MapEntry, HashMap}, - sync::Arc, + collections::HashMap, + sync::{ + atomic::{AtomicU64, Ordering}, + Arc, + }, + time::Duration, }; use trie_db::{node::NodeOwned, CachedValue}; @@ -50,29 +54,267 @@ mod shared_cache; pub use shared_cache::SharedTrieCache; -use self::shared_cache::{SharedTrieCacheInner, ValueCacheKeyHash}; +use self::shared_cache::ValueCacheKeyHash; const LOG_TARGET: &str = "trie-cache"; -/// The size of the cache. +/// The maximum amount of time we'll wait trying to acquire the shared cache lock +/// when the local cache is dropped and synchronized with the share cache. +/// +/// This is just a failsafe; normally this should never trigger. +const SHARED_CACHE_WRITE_LOCK_TIMEOUT: Duration = Duration::from_millis(100); + +/// The maximum number of existing keys in the shared cache that a single local cache +/// can promote to the front of the LRU cache in one go. +/// +/// If we have a big shared cache and the local cache hits all of those keys we don't +/// want to spend forever bumping all of them. +const SHARED_NODE_CACHE_MAX_PROMOTED_KEYS: u32 = 1792; +/// Same as [`SHARED_NODE_CACHE_MAX_PROMOTED_KEYS`]. +const SHARED_VALUE_CACHE_MAX_PROMOTED_KEYS: u32 = 1792; + +/// The maximum portion of the shared cache (in percent) that a single local +/// cache can replace in one go. +/// +/// We don't want a single local cache instance to have the ability to replace +/// everything in the shared cache. +const SHARED_NODE_CACHE_MAX_REPLACE_PERCENT: usize = 33; +/// Same as [`SHARED_NODE_CACHE_MAX_REPLACE_PERCENT`]. +const SHARED_VALUE_CACHE_MAX_REPLACE_PERCENT: usize = 33; + +/// The maximum inline capacity of the local cache, in bytes. +/// +/// This is just an upper limit; since the maps are resized in powers of two +/// their actual size will most likely not exactly match this. +const LOCAL_NODE_CACHE_MAX_INLINE_SIZE: usize = 512 * 1024; +/// Same as [`LOCAL_NODE_CACHE_MAX_INLINE_SIZE`]. +const LOCAL_VALUE_CACHE_MAX_INLINE_SIZE: usize = 512 * 1024; + +/// The maximum size of the memory allocated on the heap by the local cache, in bytes. +const LOCAL_NODE_CACHE_MAX_HEAP_SIZE: usize = 2 * 1024 * 1024; +/// Same as [`LOCAL_NODE_CACHE_MAX_HEAP_SIZE`]. +const LOCAL_VALUE_CACHE_MAX_HEAP_SIZE: usize = 4 * 1024 * 1024; + +/// The size of the shared cache. #[derive(Debug, Clone, Copy)] -pub enum CacheSize { - /// Do not limit the cache size. - Unlimited, - /// Let the cache in maximum use the given amount of bytes. - Maximum(usize), -} +pub struct CacheSize(usize); impl CacheSize { - /// Returns `true` if the `current_size` exceeds the allowed size. - fn exceeds(&self, current_size: usize) -> bool { - match self { - Self::Unlimited => false, - Self::Maximum(max) => *max < current_size, + /// An unlimited cache size. + pub const fn unlimited() -> Self { + CacheSize(usize::MAX) + } + + /// A cache size `bytes` big. + pub const fn new(bytes: usize) -> Self { + CacheSize(bytes) + } +} + +/// A limiter for the local node cache. This makes sure the local cache doesn't grow too big. +#[derive(Default)] +pub struct LocalNodeCacheLimiter { + /// The current size (in bytes) of data allocated by this cache on the heap. + /// + /// This doesn't include the size of the map itself. + current_heap_size: usize, +} + +impl schnellru::Limiter> for LocalNodeCacheLimiter +where + H: AsRef<[u8]> + std::fmt::Debug, +{ + type KeyToInsert<'a> = H; + type LinkType = u32; + + #[inline] + fn is_over_the_limit(&self, length: usize) -> bool { + // Only enforce the limit if there's more than one element to make sure + // we can always add a new element to the cache. + if length <= 1 { + return false } + + self.current_heap_size > LOCAL_NODE_CACHE_MAX_HEAP_SIZE + } + + #[inline] + fn on_insert<'a>( + &mut self, + _length: usize, + key: H, + cached_node: NodeCached, + ) -> Option<(H, NodeCached)> { + self.current_heap_size += cached_node.heap_size(); + Some((key, cached_node)) + } + + #[inline] + fn on_replace( + &mut self, + _length: usize, + _old_key: &mut H, + _new_key: H, + old_node: &mut NodeCached, + new_node: &mut NodeCached, + ) -> bool { + debug_assert_eq!(_old_key.as_ref().len(), _new_key.as_ref().len()); + self.current_heap_size = + self.current_heap_size + new_node.heap_size() - old_node.heap_size(); + true + } + + #[inline] + fn on_removed(&mut self, _key: &mut H, cached_node: &mut NodeCached) { + self.current_heap_size -= cached_node.heap_size(); + } + + #[inline] + fn on_cleared(&mut self) { + self.current_heap_size = 0; + } + + #[inline] + fn on_grow(&mut self, new_memory_usage: usize) -> bool { + new_memory_usage <= LOCAL_NODE_CACHE_MAX_INLINE_SIZE + } +} + +/// A limiter for the local value cache. This makes sure the local cache doesn't grow too big. +#[derive(Default)] +pub struct LocalValueCacheLimiter { + /// The current size (in bytes) of data allocated by this cache on the heap. + /// + /// This doesn't include the size of the map itself. + current_heap_size: usize, +} + +impl schnellru::Limiter, CachedValue> for LocalValueCacheLimiter +where + H: AsRef<[u8]>, +{ + type KeyToInsert<'a> = ValueCacheRef<'a, H>; + type LinkType = u32; + + #[inline] + fn is_over_the_limit(&self, length: usize) -> bool { + // Only enforce the limit if there's more than one element to make sure + // we can always add a new element to the cache. + if length <= 1 { + return false + } + + self.current_heap_size > LOCAL_VALUE_CACHE_MAX_HEAP_SIZE + } + + #[inline] + fn on_insert( + &mut self, + _length: usize, + key: Self::KeyToInsert<'_>, + value: CachedValue, + ) -> Option<(ValueCacheKey, CachedValue)> { + self.current_heap_size += key.storage_key.len(); + Some((key.into(), value)) + } + + #[inline] + fn on_replace( + &mut self, + _length: usize, + _old_key: &mut ValueCacheKey, + _new_key: ValueCacheRef, + _old_value: &mut CachedValue, + _new_value: &mut CachedValue, + ) -> bool { + debug_assert_eq!(_old_key.storage_key.len(), _new_key.storage_key.len()); + true + } + + #[inline] + fn on_removed(&mut self, key: &mut ValueCacheKey, _: &mut CachedValue) { + self.current_heap_size -= key.storage_key.len(); + } + + #[inline] + fn on_cleared(&mut self) { + self.current_heap_size = 0; + } + + #[inline] + fn on_grow(&mut self, new_memory_usage: usize) -> bool { + new_memory_usage <= LOCAL_VALUE_CACHE_MAX_INLINE_SIZE + } +} + +/// A struct to gather hit/miss stats to aid in debugging the performance of the cache. +#[derive(Default)] +struct HitStats { + shared_hits: AtomicU64, + shared_fetch_attempts: AtomicU64, + local_hits: AtomicU64, + local_fetch_attempts: AtomicU64, +} + +impl std::fmt::Display for HitStats { + fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result { + let shared_hits = self.shared_hits.load(Ordering::Relaxed); + let shared_fetch_attempts = self.shared_fetch_attempts.load(Ordering::Relaxed); + let local_hits = self.local_hits.load(Ordering::Relaxed); + let local_fetch_attempts = self.local_fetch_attempts.load(Ordering::Relaxed); + if shared_fetch_attempts == 0 && local_hits == 0 { + write!(fmt, "empty") + } else { + let percent_local = (local_hits as f32 / local_fetch_attempts as f32) * 100.0; + let percent_shared = (shared_hits as f32 / shared_fetch_attempts as f32) * 100.0; + write!( + fmt, + "local hit rate = {}% [{}/{}], shared hit rate = {}% [{}/{}]", + percent_local as u32, + local_hits, + local_fetch_attempts, + percent_shared as u32, + shared_hits, + shared_fetch_attempts + ) + } + } +} + +/// A struct to gather hit/miss stats for the node cache and the value cache. +#[derive(Default)] +struct TrieHitStats { + node_cache: HitStats, + value_cache: HitStats, +} + +/// An internal struct to store the cached trie nodes. +pub(crate) struct NodeCached { + /// The cached node. + pub node: NodeOwned, + /// Whether this node was fetched from the shared cache or not. + pub is_from_shared_cache: bool, +} + +impl NodeCached { + /// Returns the number of bytes allocated on the heap by this node. + fn heap_size(&self) -> usize { + self.node.size_in_bytes() - std::mem::size_of::>() } } +type NodeCacheMap = LruMap, LocalNodeCacheLimiter, schnellru::RandomState>; + +type ValueCacheMap = LruMap< + ValueCacheKey, + CachedValue, + LocalValueCacheLimiter, + BuildNoHashHasher>, +>; + +type ValueAccessSet = + LruMap>; + /// The local trie cache. /// /// This cache should be used per state instance created by the backend. One state instance is @@ -86,21 +328,13 @@ impl CacheSize { pub struct LocalTrieCache { /// The shared trie cache that created this instance. shared: SharedTrieCache, + /// The local cache for the trie nodes. - node_cache: Mutex>>, - /// Keeps track of all the trie nodes accessed in the shared cache. - /// - /// This will be used to ensure that these nodes are brought to the front of the lru when this - /// local instance is merged back to the shared cache. - shared_node_cache_access: Mutex>, + node_cache: Mutex>, + /// The local cache for the values. - value_cache: Mutex< - HashMap< - ValueCacheKey<'static, H::Out>, - CachedValue, - BuildNoHashHasher>, - >, - >, + value_cache: Mutex>, + /// Keeps track of all values accessed in the shared cache. /// /// This will be used to ensure that these nodes are brought to the front of the lru when this @@ -109,8 +343,9 @@ pub struct LocalTrieCache { /// as we only use this set to update the lru position it is fine, even if we bring the wrong /// value to the top. The important part is that we always get the correct value from the value /// cache for a given key. - shared_value_cache_access: - Mutex>>, + shared_value_cache_access: Mutex, + + stats: TrieHitStats, } impl LocalTrieCache { @@ -118,19 +353,18 @@ impl LocalTrieCache { /// /// The given `storage_root` needs to be the storage root of the trie this cache is used for. pub fn as_trie_db_cache(&self, storage_root: H::Out) -> TrieCache<'_, H> { - let shared_inner = self.shared.read_lock_inner(); - let value_cache = ValueCache::ForStorageRoot { storage_root, local_value_cache: self.value_cache.lock(), shared_value_cache_access: self.shared_value_cache_access.lock(), + buffered_value: None, }; TrieCache { - shared_inner, + shared_cache: self.shared.clone(), local_cache: self.node_cache.lock(), value_cache, - shared_node_cache_access: self.shared_node_cache_access.lock(), + stats: &self.stats, } } @@ -143,63 +377,89 @@ impl LocalTrieCache { /// would break because of this. pub fn as_trie_db_mut_cache(&self) -> TrieCache<'_, H> { TrieCache { - shared_inner: self.shared.read_lock_inner(), + shared_cache: self.shared.clone(), local_cache: self.node_cache.lock(), value_cache: ValueCache::Fresh(Default::default()), - shared_node_cache_access: self.shared_node_cache_access.lock(), + stats: &self.stats, } } } impl Drop for LocalTrieCache { fn drop(&mut self) { - let mut shared_inner = self.shared.write_lock_inner(); + tracing::debug!( + target: LOG_TARGET, + "Local node trie cache dropped: {}", + self.stats.node_cache + ); + + tracing::debug!( + target: LOG_TARGET, + "Local value trie cache dropped: {}", + self.stats.value_cache + ); - shared_inner - .node_cache_mut() - .update(self.node_cache.lock().drain(), self.shared_node_cache_access.lock().drain()); + let mut shared_inner = match self.shared.write_lock_inner() { + Some(inner) => inner, + None => { + tracing::warn!( + target: LOG_TARGET, + "Timeout while trying to acquire a write lock for the shared trie cache" + ); + return + }, + }; - shared_inner - .value_cache_mut() - .update(self.value_cache.lock().drain(), self.shared_value_cache_access.lock().drain()); + shared_inner.node_cache_mut().update(self.node_cache.get_mut().drain()); + + shared_inner.value_cache_mut().update( + self.value_cache.get_mut().drain(), + self.shared_value_cache_access.get_mut().drain().map(|(key, ())| key), + ); } } /// The abstraction of the value cache for the [`TrieCache`]. -enum ValueCache<'a, H> { +enum ValueCache<'a, H: Hasher> { /// The value cache is fresh, aka not yet associated to any storage root. /// This is used for example when a new trie is being build, to cache new values. - Fresh(HashMap, CachedValue>), + Fresh(HashMap, CachedValue>), /// The value cache is already bound to a specific storage root. ForStorageRoot { - shared_value_cache_access: MutexGuard< - 'a, - HashSet>, - >, - local_value_cache: MutexGuard< - 'a, - HashMap< - ValueCacheKey<'static, H>, - CachedValue, - nohash_hasher::BuildNoHashHasher>, - >, - >, - storage_root: H, + shared_value_cache_access: MutexGuard<'a, ValueAccessSet>, + local_value_cache: MutexGuard<'a, ValueCacheMap>, + storage_root: H::Out, + // The shared value cache needs to be temporarily locked when reading from it + // so we need to clone the value that is returned, but we need to be able to + // return a reference to the value, so we just buffer it here. + buffered_value: Option>, }, } -impl + std::hash::Hash + Eq + Clone + Copy> ValueCache<'_, H> { +impl ValueCache<'_, H> { /// Get the value for the given `key`. fn get<'a>( &'a mut self, key: &[u8], - shared_value_cache: &'a SharedValueCache, - ) -> Option<&CachedValue> { - match self { - Self::Fresh(map) => map.get(key), - Self::ForStorageRoot { local_value_cache, shared_value_cache_access, storage_root } => { - let key = ValueCacheKey::new_ref(key, *storage_root); + shared_cache: &SharedTrieCache, + stats: &HitStats, + ) -> Option<&CachedValue> { + stats.local_fetch_attempts.fetch_add(1, Ordering::Relaxed); + match self { + Self::Fresh(map) => + if let Some(value) = map.get(key) { + stats.local_hits.fetch_add(1, Ordering::Relaxed); + Some(value) + } else { + None + }, + Self::ForStorageRoot { + local_value_cache, + shared_value_cache_access, + storage_root, + buffered_value, + } => { // We first need to look up in the local cache and then the shared cache. // It can happen that some value is cached in the shared cache, but the // weak reference of the data can not be upgraded anymore. This for example @@ -207,35 +467,39 @@ impl + std::hash::Hash + Eq + Clone + Copy> ValueCache<'_, H> { // // So, the logic of the trie would lookup the data and the node and store both // in our local caches. - local_value_cache - .get(unsafe { - // SAFETY - // - // We need to convert the lifetime to make the compiler happy. However, as - // we only use the `key` to looking up the value this lifetime conversion is - // safe. - std::mem::transmute::<&ValueCacheKey<'_, H>, &ValueCacheKey<'static, H>>( - &key, - ) - }) - .or_else(|| { - shared_value_cache.get(&key).map(|v| { - shared_value_cache_access.insert(key.get_hash()); - v - }) - }) + + let hash = ValueCacheKey::hash_data(key, storage_root); + + if let Some(value) = local_value_cache + .peek_by_hash(hash.raw(), |existing_key, _| { + existing_key.is_eq(storage_root, key) + }) { + stats.local_hits.fetch_add(1, Ordering::Relaxed); + + return Some(value) + } + + stats.shared_fetch_attempts.fetch_add(1, Ordering::Relaxed); + if let Some(value) = shared_cache.peek_value_by_hash(hash, storage_root, key) { + stats.shared_hits.fetch_add(1, Ordering::Relaxed); + shared_value_cache_access.insert(hash, ()); + *buffered_value = Some(value.clone()); + return buffered_value.as_ref() + } + + None }, } } /// Insert some new `value` under the given `key`. - fn insert(&mut self, key: &[u8], value: CachedValue) { + fn insert(&mut self, key: &[u8], value: CachedValue) { match self { Self::Fresh(map) => { map.insert(key.into(), value); }, Self::ForStorageRoot { local_value_cache, storage_root, .. } => { - local_value_cache.insert(ValueCacheKey::new_value(key, *storage_root), value); + local_value_cache.insert(ValueCacheRef::new(key, *storage_root), value); }, } } @@ -247,10 +511,10 @@ impl + std::hash::Hash + Eq + Clone + Copy> ValueCache<'_, H> { /// be merged back into the [`LocalTrieCache`] with [`Self::merge_into`] after all operations are /// done. pub struct TrieCache<'a, H: Hasher> { - shared_inner: RwLockReadGuard<'a, SharedTrieCacheInner>, - shared_node_cache_access: MutexGuard<'a, HashSet>, - local_cache: MutexGuard<'a, HashMap>>, - value_cache: ValueCache<'a, H::Out>, + shared_cache: SharedTrieCache, + local_cache: MutexGuard<'a, NodeCacheMap>, + value_cache: ValueCache<'a, H>, + stats: &'a TrieHitStats, } impl<'a, H: Hasher> TrieCache<'a, H> { @@ -267,16 +531,11 @@ impl<'a, H: Hasher> TrieCache<'a, H> { let mut value_cache = local.value_cache.lock(); let partial_hash = ValueCacheKey::hash_partial_data(&storage_root); - cache - .into_iter() - .map(|(k, v)| { - let hash = - ValueCacheKeyHash::from_hasher_and_storage_key(partial_hash.clone(), &k); - (ValueCacheKey::Value { storage_key: k, storage_root, hash }, v) - }) - .for_each(|(k, v)| { - value_cache.insert(k, v); - }); + cache.into_iter().for_each(|(k, v)| { + let hash = ValueCacheKeyHash::from_hasher_and_storage_key(partial_hash.clone(), &k); + let k = ValueCacheRef { storage_root, storage_key: &k, hash }; + value_cache.insert(k, v); + }); } } } @@ -287,53 +546,85 @@ impl<'a, H: Hasher> trie_db::TrieCache> for TrieCache<'a, H> { hash: H::Out, fetch_node: &mut dyn FnMut() -> trie_db::Result, H::Out, Error>, ) -> trie_db::Result<&NodeOwned, H::Out, Error> { - if let Some(res) = self.shared_inner.node_cache().get(&hash) { - tracing::trace!(target: LOG_TARGET, ?hash, "Serving node from shared cache"); - self.shared_node_cache_access.insert(hash); - return Ok(res) - } + let mut is_local_cache_hit = true; + self.stats.node_cache.local_fetch_attempts.fetch_add(1, Ordering::Relaxed); - match self.local_cache.entry(hash) { - MapEntry::Occupied(res) => { - tracing::trace!(target: LOG_TARGET, ?hash, "Serving node from local cache"); - Ok(res.into_mut()) - }, - MapEntry::Vacant(vacant) => { - let node = (*fetch_node)(); + // First try to grab the node from the local cache. + let node = self.local_cache.get_or_insert_fallible(hash, || { + is_local_cache_hit = false; - tracing::trace!( - target: LOG_TARGET, - ?hash, - fetch_successful = node.is_ok(), - "Node not found, needed to fetch it." - ); + // It was not in the local cache; try the shared cache. + self.stats.node_cache.shared_fetch_attempts.fetch_add(1, Ordering::Relaxed); + if let Some(node) = self.shared_cache.peek_node(&hash) { + self.stats.node_cache.shared_hits.fetch_add(1, Ordering::Relaxed); + tracing::trace!(target: LOG_TARGET, ?hash, "Serving node from shared cache"); - Ok(vacant.insert(node?)) - }, + return Ok(NodeCached:: { node: node.clone(), is_from_shared_cache: true }) + } + + // It was not in the shared cache; try fetching it from the database. + match fetch_node() { + Ok(node) => { + tracing::trace!(target: LOG_TARGET, ?hash, "Serving node from database"); + Ok(NodeCached:: { node, is_from_shared_cache: false }) + }, + Err(error) => { + tracing::trace!(target: LOG_TARGET, ?hash, "Serving node from database failed"); + Err(error) + }, + } + }); + + if is_local_cache_hit { + tracing::trace!(target: LOG_TARGET, ?hash, "Serving node from local cache"); + self.stats.node_cache.local_hits.fetch_add(1, Ordering::Relaxed); } + + Ok(&node? + .expect("you can always insert at least one element into the local cache; qed") + .node) } fn get_node(&mut self, hash: &H::Out) -> Option<&NodeOwned> { - if let Some(node) = self.shared_inner.node_cache().get(hash) { - tracing::trace!(target: LOG_TARGET, ?hash, "Getting node from shared cache"); - self.shared_node_cache_access.insert(*hash); - return Some(node) - } + let mut is_local_cache_hit = true; + self.stats.node_cache.local_fetch_attempts.fetch_add(1, Ordering::Relaxed); - let res = self.local_cache.get(hash); + // First try to grab the node from the local cache. + let cached_node = self.local_cache.get_or_insert_fallible(*hash, || { + is_local_cache_hit = false; - tracing::trace!( - target: LOG_TARGET, - ?hash, - found = res.is_some(), - "Getting node from local cache" - ); + // It was not in the local cache; try the shared cache. + self.stats.node_cache.shared_fetch_attempts.fetch_add(1, Ordering::Relaxed); + if let Some(node) = self.shared_cache.peek_node(&hash) { + self.stats.node_cache.shared_hits.fetch_add(1, Ordering::Relaxed); + tracing::trace!(target: LOG_TARGET, ?hash, "Serving node from shared cache"); - res + Ok(NodeCached:: { node: node.clone(), is_from_shared_cache: true }) + } else { + tracing::trace!(target: LOG_TARGET, ?hash, "Serving node from cache failed"); + + Err(()) + } + }); + + if is_local_cache_hit { + tracing::trace!(target: LOG_TARGET, ?hash, "Serving node from local cache"); + self.stats.node_cache.local_hits.fetch_add(1, Ordering::Relaxed); + } + + match cached_node { + Ok(Some(cached_node)) => Some(&cached_node.node), + Ok(None) => { + unreachable!( + "you can always insert at least one element into the local cache; qed" + ); + }, + Err(()) => None, + } } fn lookup_value_for_key(&mut self, key: &[u8]) -> Option<&CachedValue> { - let res = self.value_cache.get(key, self.shared_inner.value_cache()); + let res = self.value_cache.get(key, &self.shared_cache, &self.stats.value_cache); tracing::trace!( target: LOG_TARGET, @@ -352,7 +643,7 @@ impl<'a, H: Hasher> trie_db::TrieCache> for TrieCache<'a, H> { "Caching value for key", ); - self.value_cache.insert(key.into(), data); + self.value_cache.insert(key, data); } } @@ -369,7 +660,7 @@ mod tests { const TEST_DATA: &[(&[u8], &[u8])] = &[(b"key1", b"val1"), (b"key2", &[2; 64]), (b"key3", b"val3"), (b"key4", &[4; 64])]; const CACHE_SIZE_RAW: usize = 1024 * 10; - const CACHE_SIZE: CacheSize = CacheSize::Maximum(CACHE_SIZE_RAW); + const CACHE_SIZE: CacheSize = CacheSize::new(CACHE_SIZE_RAW); fn create_trie() -> (MemoryDB, TrieHash) { let mut db = MemoryDB::default(); @@ -418,7 +709,7 @@ mod tests { let fake_data = Bytes::from(&b"fake_data"[..]); let local_cache = shared_cache.local_cache(); - shared_cache.write_lock_inner().value_cache_mut().lru.put( + shared_cache.write_lock_inner().unwrap().value_cache_mut().lru.insert( ValueCacheKey::new_value(TEST_DATA[1].0, root), (fake_data.clone(), Default::default()).into(), ); @@ -591,7 +882,7 @@ mod tests { .lru .iter() .map(|d| d.0) - .all(|l| TEST_DATA.iter().any(|d| l.storage_key().unwrap() == d.0))); + .all(|l| TEST_DATA.iter().any(|d| &*l.storage_key == d.0))); // Run this in a loop. The first time we check that with the filled value cache, // the expected values are at the top of the LRU. @@ -617,7 +908,7 @@ mod tests { .iter() .take(2) .map(|d| d.0) - .all(|l| { TEST_DATA.iter().take(2).any(|d| l.storage_key().unwrap() == d.0) })); + .all(|l| { TEST_DATA.iter().take(2).any(|d| &*l.storage_key == d.0) })); // Delete the value cache, so that we access the nodes. shared_cache.reset_value_cache(); @@ -684,9 +975,6 @@ mod tests { } } - let node_cache_size = shared_cache.read_lock_inner().node_cache().size_in_bytes; - let value_cache_size = shared_cache.read_lock_inner().value_cache().size_in_bytes; - - assert!(node_cache_size + value_cache_size < CACHE_SIZE_RAW); + assert!(shared_cache.used_memory_size() < CACHE_SIZE_RAW); } } diff --git a/primitives/trie/src/cache/shared_cache.rs b/primitives/trie/src/cache/shared_cache.rs index 9d4d36b83a28a..8c60d5043d062 100644 --- a/primitives/trie/src/cache/shared_cache.rs +++ b/primitives/trie/src/cache/shared_cache.rs @@ -17,15 +17,14 @@ ///! Provides the [`SharedNodeCache`], the [`SharedValueCache`] and the [`SharedTrieCache`] ///! that combines both caches and is exported to the outside. -use super::{CacheSize, LOG_TARGET}; +use super::{CacheSize, NodeCached}; use hash_db::Hasher; use hashbrown::{hash_set::Entry as SetEntry, HashSet}; -use lru::LruCache; use nohash_hasher::BuildNoHashHasher; -use parking_lot::{RwLock, RwLockReadGuard, RwLockWriteGuard}; +use parking_lot::{Mutex, RwLock, RwLockWriteGuard}; +use schnellru::LruMap; use std::{ hash::{BuildHasher, Hasher as _}, - mem, sync::Arc, }; use trie_db::{node::NodeOwned, CachedValue}; @@ -34,94 +33,300 @@ lazy_static::lazy_static! { static ref RANDOM_STATE: ahash::RandomState = ahash::RandomState::default(); } -/// No hashing [`LruCache`]. -type NoHashingLruCache = LruCache>; +pub struct SharedNodeCacheLimiter { + /// The maximum size (in bytes) the cache can hold inline. + /// + /// This space is always consumed whether there are any items in the map or not. + max_inline_size: usize, + + /// The maximum size (in bytes) the cache can hold on the heap. + max_heap_size: usize, + + /// The current size (in bytes) of data allocated by this cache on the heap. + /// + /// This doesn't include the size of the map itself. + heap_size: usize, + + /// A counter with the number of elements that got evicted from the cache. + /// + /// Reset to zero before every update. + items_evicted: usize, + + /// The maximum number of elements that we allow to be evicted. + /// + /// Reset on every update. + max_items_evicted: usize, +} + +impl schnellru::Limiter> for SharedNodeCacheLimiter +where + H: AsRef<[u8]>, +{ + type KeyToInsert<'a> = H; + type LinkType = u32; + + #[inline] + fn is_over_the_limit(&self, _length: usize) -> bool { + // Once we hit the limit of max items evicted this will return `false` and prevent + // any further evictions, but this is fine because the outer loop which inserts + // items into this cache will just detect this and stop inserting new items. + self.items_evicted <= self.max_items_evicted && self.heap_size > self.max_heap_size + } + + #[inline] + fn on_insert( + &mut self, + _length: usize, + key: Self::KeyToInsert<'_>, + node: NodeOwned, + ) -> Option<(H, NodeOwned)> { + let new_item_heap_size = node.size_in_bytes() - std::mem::size_of::>(); + if new_item_heap_size > self.max_heap_size { + // Item's too big to add even if the cache's empty; bail. + return None + } + + self.heap_size += new_item_heap_size; + Some((key, node)) + } + + #[inline] + fn on_replace( + &mut self, + _length: usize, + _old_key: &mut H, + _new_key: H, + old_node: &mut NodeOwned, + new_node: &mut NodeOwned, + ) -> bool { + debug_assert_eq!(_old_key.as_ref(), _new_key.as_ref()); + + let new_item_heap_size = new_node.size_in_bytes() - std::mem::size_of::>(); + if new_item_heap_size > self.max_heap_size { + // Item's too big to add even if the cache's empty; bail. + return false + } + + let old_item_heap_size = old_node.size_in_bytes() - std::mem::size_of::>(); + self.heap_size = self.heap_size - old_item_heap_size + new_item_heap_size; + true + } + + #[inline] + fn on_cleared(&mut self) { + self.heap_size = 0; + } + + #[inline] + fn on_removed(&mut self, _: &mut H, node: &mut NodeOwned) { + self.heap_size -= node.size_in_bytes() - std::mem::size_of::>(); + self.items_evicted += 1; + } + + #[inline] + fn on_grow(&mut self, new_memory_usage: usize) -> bool { + new_memory_usage <= self.max_inline_size + } +} + +pub struct SharedValueCacheLimiter { + /// The maximum size (in bytes) the cache can hold inline. + /// + /// This space is always consumed whether there are any items in the map or not. + max_inline_size: usize, + + /// The maximum size (in bytes) the cache can hold on the heap. + max_heap_size: usize, + + /// The current size (in bytes) of data allocated by this cache on the heap. + /// + /// This doesn't include the size of the map itself. + heap_size: usize, + + /// A set with all of the keys deduplicated to save on memory. + known_storage_keys: HashSet>, + + /// A counter with the number of elements that got evicted from the cache. + /// + /// Reset to zero before every update. + items_evicted: usize, + + /// The maximum number of elements that we allow to be evicted. + /// + /// Reset on every update. + max_items_evicted: usize, +} + +impl schnellru::Limiter, CachedValue> for SharedValueCacheLimiter +where + H: AsRef<[u8]>, +{ + type KeyToInsert<'a> = ValueCacheKey; + type LinkType = u32; + + #[inline] + fn is_over_the_limit(&self, _length: usize) -> bool { + self.items_evicted <= self.max_items_evicted && self.heap_size > self.max_heap_size + } + + #[inline] + fn on_insert( + &mut self, + _length: usize, + mut key: Self::KeyToInsert<'_>, + value: CachedValue, + ) -> Option<(ValueCacheKey, CachedValue)> { + match self.known_storage_keys.entry(key.storage_key.clone()) { + SetEntry::Vacant(entry) => { + let new_item_heap_size = key.storage_key.len(); + if new_item_heap_size > self.max_heap_size { + // Item's too big to add even if the cache's empty; bail. + return None + } + + self.heap_size += new_item_heap_size; + entry.insert(); + }, + SetEntry::Occupied(entry) => { + key.storage_key = entry.get().clone(); + }, + } + + Some((key, value)) + } + + #[inline] + fn on_replace( + &mut self, + _length: usize, + _old_key: &mut ValueCacheKey, + _new_key: ValueCacheKey, + _old_value: &mut CachedValue, + _new_value: &mut CachedValue, + ) -> bool { + debug_assert_eq!(_new_key.storage_key, _old_key.storage_key); + true + } + + #[inline] + fn on_removed(&mut self, key: &mut ValueCacheKey, _: &mut CachedValue) { + if Arc::strong_count(&key.storage_key) == 2 { + // There are only two instances of this key: + // 1) one memoized in `known_storage_keys`, + // 2) one inside the map. + // + // This means that after this remove goes through the `Arc` will be deallocated. + self.heap_size -= key.storage_key.len(); + self.known_storage_keys.remove(&key.storage_key); + } + self.items_evicted += 1; + } + + #[inline] + fn on_cleared(&mut self) { + self.heap_size = 0; + self.known_storage_keys.clear(); + } + + #[inline] + fn on_grow(&mut self, new_memory_usage: usize) -> bool { + new_memory_usage <= self.max_inline_size + } +} + +type SharedNodeCacheMap = + LruMap, SharedNodeCacheLimiter, schnellru::RandomState>; /// The shared node cache. /// -/// Internally this stores all cached nodes in a [`LruCache`]. It ensures that when updating the +/// Internally this stores all cached nodes in a [`LruMap`]. It ensures that when updating the /// cache, that the cache stays within its allowed bounds. -pub(super) struct SharedNodeCache { +pub(super) struct SharedNodeCache +where + H: AsRef<[u8]>, +{ /// The cached nodes, ordered by least recently used. - pub(super) lru: LruCache>, - /// The size of [`Self::lru`] in bytes. - pub(super) size_in_bytes: usize, - /// The maximum cache size of [`Self::lru`]. - maximum_cache_size: CacheSize, + pub(super) lru: SharedNodeCacheMap, } impl + Eq + std::hash::Hash> SharedNodeCache { /// Create a new instance. - fn new(cache_size: CacheSize) -> Self { - Self { lru: LruCache::unbounded(), size_in_bytes: 0, maximum_cache_size: cache_size } + fn new(max_inline_size: usize, max_heap_size: usize) -> Self { + Self { + lru: LruMap::new(SharedNodeCacheLimiter { + max_inline_size, + max_heap_size, + heap_size: 0, + items_evicted: 0, + max_items_evicted: 0, // Will be set during `update`. + }), + } } - /// Get the node for `key`. - /// - /// This doesn't change the least recently order in the internal [`LruCache`]. - pub fn get(&self, key: &H) -> Option<&NodeOwned> { - self.lru.peek(key) - } + /// Update the cache with the `list` of nodes which were either newly added or accessed. + pub fn update(&mut self, list: impl IntoIterator)>) { + let mut access_count = 0; + let mut add_count = 0; - /// Update the cache with the `added` nodes and the `accessed` nodes. - /// - /// The `added` nodes are the ones that have been collected by doing operations on the trie and - /// now should be stored in the shared cache. The `accessed` nodes are only referenced by hash - /// and represent the nodes that were retrieved from this shared cache through [`Self::get`]. - /// These `accessed` nodes are being put to the front of the internal [`LruCache`] like the - /// `added` ones. - /// - /// After the internal [`LruCache`] was updated, it is ensured that the internal [`LruCache`] is - /// inside its bounds ([`Self::maximum_size_in_bytes`]). - pub fn update( - &mut self, - added: impl IntoIterator)>, - accessed: impl IntoIterator, - ) { - let update_size_in_bytes = |size_in_bytes: &mut usize, key: &H, node: &NodeOwned| { - if let Some(new_size_in_bytes) = - size_in_bytes.checked_sub(key.as_ref().len() + node.size_in_bytes()) - { - *size_in_bytes = new_size_in_bytes; - } else { - *size_in_bytes = 0; - tracing::error!(target: LOG_TARGET, "`SharedNodeCache` underflow detected!",); - } - }; + self.lru.limiter_mut().items_evicted = 0; + self.lru.limiter_mut().max_items_evicted = + self.lru.len() * 100 / super::SHARED_NODE_CACHE_MAX_REPLACE_PERCENT; - accessed.into_iter().for_each(|key| { - // Access every node in the lru to put it to the front. - self.lru.get(&key); - }); - added.into_iter().for_each(|(key, node)| { - self.size_in_bytes += key.as_ref().len() + node.size_in_bytes(); + for (key, cached_node) in list { + if cached_node.is_from_shared_cache { + if self.lru.get(&key).is_some() { + access_count += 1; - if let Some((r_key, r_node)) = self.lru.push(key, node) { - update_size_in_bytes(&mut self.size_in_bytes, &r_key, &r_node); - } + if access_count >= super::SHARED_NODE_CACHE_MAX_PROMOTED_KEYS { + // Stop when we've promoted a large enough number of items. + break + } - // Directly ensure that we respect the maximum size. By doing it directly here we ensure - // that the internal map of the [`LruCache`] doesn't grow too much. - while self.maximum_cache_size.exceeds(self.size_in_bytes) { - // This should always be `Some(_)`, otherwise something is wrong! - if let Some((key, node)) = self.lru.pop_lru() { - update_size_in_bytes(&mut self.size_in_bytes, &key, &node); + continue } } - }); + + self.lru.insert(key, cached_node.node); + add_count += 1; + + if self.lru.limiter().items_evicted > self.lru.limiter().max_items_evicted { + // Stop when we've evicted a big enough chunk of the shared cache. + break + } + } + + tracing::debug!( + target: super::LOG_TARGET, + "Updated the shared node cache: {} accesses, {} new values, {}/{} evicted (length = {}, inline size={}/{}, heap size={}/{})", + access_count, + add_count, + self.lru.limiter().items_evicted, + self.lru.limiter().max_items_evicted, + self.lru.len(), + self.lru.memory_usage(), + self.lru.limiter().max_inline_size, + self.lru.limiter().heap_size, + self.lru.limiter().max_heap_size, + ); } /// Reset the cache. fn reset(&mut self) { - self.size_in_bytes = 0; self.lru.clear(); } } /// The hash of [`ValueCacheKey`]. -#[derive(Eq, Clone, Copy)] +#[derive(PartialEq, Eq, Clone, Copy, Hash)] +#[repr(transparent)] pub struct ValueCacheKeyHash(u64); +impl ValueCacheKeyHash { + pub fn raw(self) -> u64 { + self.0 + } +} + impl ValueCacheKeyHash { pub fn from_hasher_and_storage_key( mut hasher: impl std::hash::Hasher, @@ -133,88 +338,75 @@ impl ValueCacheKeyHash { } } -impl PartialEq for ValueCacheKeyHash { - fn eq(&self, other: &Self) -> bool { - self.0 == other.0 - } +impl nohash_hasher::IsEnabled for ValueCacheKeyHash {} + +/// The key type that is being used to address a [`CachedValue`]. +#[derive(Eq)] +pub(super) struct ValueCacheKey { + /// The storage root of the trie this key belongs to. + pub storage_root: H, + /// The key to access the value in the storage. + pub storage_key: Arc<[u8]>, + /// The hash that identifies this instance of `storage_root` and `storage_key`. + pub hash: ValueCacheKeyHash, } -impl std::hash::Hash for ValueCacheKeyHash { - fn hash(&self, state: &mut Hasher) { - state.write_u64(self.0); +/// A borrowed variant of [`ValueCacheKey`]. +pub(super) struct ValueCacheRef<'a, H> { + /// The storage root of the trie this key belongs to. + pub storage_root: H, + /// The key to access the value in the storage. + pub storage_key: &'a [u8], + /// The hash that identifies this instance of `storage_root` and `storage_key`. + pub hash: ValueCacheKeyHash, +} + +impl<'a, H> ValueCacheRef<'a, H> { + pub fn new(storage_key: &'a [u8], storage_root: H) -> Self + where + H: AsRef<[u8]>, + { + let hash = ValueCacheKey::::hash_data(&storage_key, &storage_root); + Self { storage_root, storage_key, hash } } } -impl nohash_hasher::IsEnabled for ValueCacheKeyHash {} +impl<'a, H> From> for ValueCacheKey { + fn from(value: ValueCacheRef<'a, H>) -> Self { + ValueCacheKey { + storage_root: value.storage_root, + storage_key: value.storage_key.into(), + hash: value.hash, + } + } +} -/// A type that can only be constructed inside of this file. -/// -/// It "requires" that the user has read the docs to prevent fuck ups. -#[derive(Eq, PartialEq)] -pub(super) struct IReadTheDocumentation(()); +impl<'a, H: std::hash::Hash> std::hash::Hash for ValueCacheRef<'a, H> { + fn hash(&self, state: &mut Hasher) { + self.hash.hash(state) + } +} -/// The key type that is being used to address a [`CachedValue`]. -/// -/// This type is implemented as `enum` to improve the performance when accessing the value cache. -/// The problem being that we need to calculate the `hash` of [`Self`] in worst case three times -/// when trying to find a value in the value cache. First to lookup the local cache, then the shared -/// cache and if we found it in the shared cache a third time to insert it into the list of accessed -/// values. To work around each variant stores the `hash` to identify a unique combination of -/// `storage_key` and `storage_root`. However, be aware that this `hash` can lead to collisions when -/// there are two different `storage_key` and `storage_root` pairs that map to the same `hash`. This -/// type also has the `Hash` variant. This variant should only be used for the use case of updating -/// the lru for a key. Because when using only the `Hash` variant to getting a value from a hash map -/// it could happen that a wrong value is returned when there is another key in the same hash map -/// that maps to the same `hash`. The [`PartialEq`] implementation is written in a way that when one -/// of the two compared instances is the `Hash` variant, we will only compare the hashes. This -/// ensures that we can use the `Hash` variant to bring values up in the lru. -#[derive(Eq)] -pub(super) enum ValueCacheKey<'a, H> { - /// Variant that stores the `storage_key` by value. - Value { - /// The storage root of the trie this key belongs to. - storage_root: H, - /// The key to access the value in the storage. - storage_key: Arc<[u8]>, - /// The hash that identifying this instance of `storage_root` and `storage_key`. - hash: ValueCacheKeyHash, - }, - /// Variant that only references the `storage_key`. - Ref { - /// The storage root of the trie this key belongs to. - storage_root: H, - /// The key to access the value in the storage. - storage_key: &'a [u8], - /// The hash that identifying this instance of `storage_root` and `storage_key`. - hash: ValueCacheKeyHash, - }, - /// Variant that only stores the hash that represents the `storage_root` and `storage_key`. - /// - /// This should be used by caution, because it can lead to accessing the wrong value in a - /// hash map/set when there exists two different `storage_root`s and `storage_key`s that - /// map to the same `hash`. - Hash { hash: ValueCacheKeyHash, _i_read_the_documentation: IReadTheDocumentation }, +impl<'a, H> PartialEq> for ValueCacheRef<'a, H> +where + H: AsRef<[u8]>, +{ + fn eq(&self, rhs: &ValueCacheKey) -> bool { + self.storage_root.as_ref() == rhs.storage_root.as_ref() && + self.storage_key == &*rhs.storage_key + } } -impl<'a, H> ValueCacheKey<'a, H> { +impl ValueCacheKey { /// Constructs [`Self::Value`]. + #[cfg(test)] // Only used in tests. pub fn new_value(storage_key: impl Into>, storage_root: H) -> Self where H: AsRef<[u8]>, { let storage_key = storage_key.into(); let hash = Self::hash_data(&storage_key, &storage_root); - Self::Value { storage_root, storage_key, hash } - } - - /// Constructs [`Self::Ref`]. - pub fn new_ref(storage_key: &'a [u8], storage_root: H) -> Self - where - H: AsRef<[u8]>, - { - let storage_key = storage_key.into(); - let hash = Self::hash_data(storage_key, &storage_root); - Self::Ref { storage_root, storage_key, hash } + Self { storage_root, storage_key, hash } } /// Returns a hasher prepared to build the final hash to identify [`Self`]. @@ -241,231 +433,133 @@ impl<'a, H> ValueCacheKey<'a, H> { ValueCacheKeyHash::from_hasher_and_storage_key(hasher, key) } - /// Returns the `hash` that identifies the current instance. - pub fn get_hash(&self) -> ValueCacheKeyHash { - match self { - Self::Value { hash, .. } | Self::Ref { hash, .. } | Self::Hash { hash, .. } => *hash, - } - } - - /// Returns the stored storage root. - pub fn storage_root(&self) -> Option<&H> { - match self { - Self::Value { storage_root, .. } | Self::Ref { storage_root, .. } => Some(storage_root), - Self::Hash { .. } => None, - } - } - - /// Returns the stored storage key. - pub fn storage_key(&self) -> Option<&[u8]> { - match self { - Self::Ref { storage_key, .. } => Some(&storage_key), - Self::Value { storage_key, .. } => Some(storage_key), - Self::Hash { .. } => None, - } + /// Checks whether the key is equal to the given `storage_key` and `storage_root`. + #[inline] + pub fn is_eq(&self, storage_root: &H, storage_key: &[u8]) -> bool + where + H: PartialEq, + { + self.storage_root == *storage_root && *self.storage_key == *storage_key } } -// Implement manually to ensure that the `Value` and `Hash` are treated equally. -impl std::hash::Hash for ValueCacheKey<'_, H> { +// Implement manually so that only `hash` is accessed. +impl std::hash::Hash for ValueCacheKey { fn hash(&self, state: &mut Hasher) { - self.get_hash().hash(state) + self.hash.hash(state) } } -impl nohash_hasher::IsEnabled for ValueCacheKey<'_, H> {} +impl nohash_hasher::IsEnabled for ValueCacheKey {} -// Implement manually to ensure that the `Value` and `Hash` are treated equally. -impl PartialEq for ValueCacheKey<'_, H> { +// Implement manually to not have to compare `hash`. +impl PartialEq for ValueCacheKey { + #[inline] fn eq(&self, other: &Self) -> bool { - // First check if `self` or `other` is only the `Hash`. - // Then we only compare the `hash`. So, there could actually be some collision - // if two different storage roots and keys are mapping to the same key. See the - // [`ValueCacheKey`] docs for more information. - match (self, other) { - (Self::Hash { hash, .. }, Self::Hash { hash: other_hash, .. }) => hash == other_hash, - (Self::Hash { hash, .. }, _) => *hash == other.get_hash(), - (_, Self::Hash { hash: other_hash, .. }) => self.get_hash() == *other_hash, - // If both are not the `Hash` variant, we compare all the values. - _ => - self.get_hash() == other.get_hash() && - self.storage_root() == other.storage_root() && - self.storage_key() == other.storage_key(), - } + self.is_eq(&other.storage_root, &other.storage_key) } } +type SharedValueCacheMap = schnellru::LruMap< + ValueCacheKey, + CachedValue, + SharedValueCacheLimiter, + BuildNoHashHasher>, +>; + /// The shared value cache. /// /// The cache ensures that it stays in the configured size bounds. -pub(super) struct SharedValueCache { +pub(super) struct SharedValueCache +where + H: AsRef<[u8]>, +{ /// The cached nodes, ordered by least recently used. - pub(super) lru: NoHashingLruCache, CachedValue>, - /// The size of [`Self::lru`] in bytes. - pub(super) size_in_bytes: usize, - /// The maximum cache size of [`Self::lru`]. - maximum_cache_size: CacheSize, - /// All known storage keys that are stored in [`Self::lru`]. - /// - /// This is used to de-duplicate keys in memory that use the - /// same [`SharedValueCache::storage_key`], but have a different - /// [`SharedValueCache::storage_root`]. - known_storage_keys: HashSet>, + pub(super) lru: SharedValueCacheMap, } impl> SharedValueCache { /// Create a new instance. - fn new(cache_size: CacheSize) -> Self { + fn new(max_inline_size: usize, max_heap_size: usize) -> Self { Self { - lru: NoHashingLruCache::unbounded_with_hasher(Default::default()), - size_in_bytes: 0, - maximum_cache_size: cache_size, - known_storage_keys: Default::default(), + lru: schnellru::LruMap::with_hasher( + SharedValueCacheLimiter { + max_inline_size, + max_heap_size, + heap_size: 0, + known_storage_keys: Default::default(), + items_evicted: 0, + max_items_evicted: 0, // Will be set during `update`. + }, + Default::default(), + ), } } - /// Get the [`CachedValue`] for `key`. - /// - /// This doesn't change the least recently order in the internal [`LruCache`]. - pub fn get<'a>(&'a self, key: &ValueCacheKey) -> Option<&'a CachedValue> { - debug_assert!( - !matches!(key, ValueCacheKey::Hash { .. }), - "`get` can not be called with `Hash` variant as this may returns the wrong value." - ); - - self.lru.peek(unsafe { - // SAFETY - // - // We need to convert the lifetime to make the compiler happy. However, as - // we only use the `key` to looking up the value this lifetime conversion is - // safe. - mem::transmute::<&ValueCacheKey<'_, H>, &ValueCacheKey<'static, H>>(key) - }) - } - /// Update the cache with the `added` values and the `accessed` values. /// /// The `added` values are the ones that have been collected by doing operations on the trie and /// now should be stored in the shared cache. The `accessed` values are only referenced by the - /// [`ValueCacheKeyHash`] and represent the values that were retrieved from this shared cache - /// through [`Self::get`]. These `accessed` values are being put to the front of the internal - /// [`LruCache`] like the `added` ones. - /// - /// After the internal [`LruCache`] was updated, it is ensured that the internal [`LruCache`] is - /// inside its bounds ([`Self::maximum_size_in_bytes`]). + /// [`ValueCacheKeyHash`] and represent the values that were retrieved from this shared cache. + /// These `accessed` values are being put to the front of the internal [`LruMap`] like the + /// `added` ones. pub fn update( &mut self, - added: impl IntoIterator, CachedValue)>, + added: impl IntoIterator, CachedValue)>, accessed: impl IntoIterator, ) { - // The base size in memory per ([`ValueCacheKey`], [`CachedValue`]). - let base_size = mem::size_of::>() + mem::size_of::>(); - let known_keys_entry_size = mem::size_of::>(); - - let update_size_in_bytes = - |size_in_bytes: &mut usize, r_key: Arc<[u8]>, known_keys: &mut HashSet>| { - // If the `strong_count == 2`, it means this is the last instance of the key. - // One being `r_key` and the other being stored in `known_storage_keys`. - let last_instance = Arc::strong_count(&r_key) == 2; - - let key_len = if last_instance { - known_keys.remove(&r_key); - r_key.len() + known_keys_entry_size - } else { - // The key is still in `keys`, because it is still used by another - // `ValueCacheKey`. - 0 - }; - - if let Some(new_size_in_bytes) = size_in_bytes.checked_sub(key_len + base_size) { - *size_in_bytes = new_size_in_bytes; - } else { - *size_in_bytes = 0; - tracing::error!(target: LOG_TARGET, "`SharedValueCache` underflow detected!",); - } - }; - - accessed.into_iter().for_each(|key| { - // Access every node in the lru to put it to the front. - // As we are using the `Hash` variant here, it may leads to putting the wrong value to - // the top. However, the only consequence of this is that we may prune a recently used - // value to early. - self.lru.get(&ValueCacheKey::Hash { - hash: key, - _i_read_the_documentation: IReadTheDocumentation(()), - }); - }); - - added.into_iter().for_each(|(key, value)| { - let (storage_root, storage_key, key_hash) = match key { - ValueCacheKey::Hash { .. } => { - // Ignore the hash variant and try the next. - tracing::error!( - target: LOG_TARGET, - "`SharedValueCached::update` was called with a key to add \ - that uses the `Hash` variant. This would lead to potential hash collision!", - ); - return - }, - ValueCacheKey::Ref { storage_key, storage_root, hash } => - (storage_root, storage_key.into(), hash), - ValueCacheKey::Value { storage_root, storage_key, hash } => - (storage_root, storage_key, hash), - }; - - let (size_update, storage_key) = - match self.known_storage_keys.entry(storage_key.clone()) { - SetEntry::Vacant(v) => { - let len = v.get().len(); - v.insert(); - - // If the key was unknown, we need to also take its length and the size of - // the entry of `known_keys` into account. - (len + base_size + known_keys_entry_size, storage_key) - }, - SetEntry::Occupied(o) => { - // Key is known - (base_size, o.get().clone()) - }, - }; - - self.size_in_bytes += size_update; - - if let Some((r_key, _)) = self - .lru - .push(ValueCacheKey::Value { storage_key, storage_root, hash: key_hash }, value) - { - if let ValueCacheKey::Value { storage_key, .. } = r_key { - update_size_in_bytes( - &mut self.size_in_bytes, - storage_key, - &mut self.known_storage_keys, - ); - } - } + let mut access_count = 0; + let mut add_count = 0; - // Directly ensure that we respect the maximum size. By doing it directly here we - // ensure that the internal map of the [`LruCache`] doesn't grow too much. - while self.maximum_cache_size.exceeds(self.size_in_bytes) { - // This should always be `Some(_)`, otherwise something is wrong! - if let Some((r_key, _)) = self.lru.pop_lru() { - if let ValueCacheKey::Value { storage_key, .. } = r_key { - update_size_in_bytes( - &mut self.size_in_bytes, - storage_key, - &mut self.known_storage_keys, - ); - } - } + for hash in accessed { + // Access every node in the map to put it to the front. + // + // Since we are only comparing the hashes here it may lead us to promoting the wrong + // values as the most recently accessed ones. However this is harmless as the only + // consequence is that we may accidentally prune a recently used value too early. + self.lru.get_by_hash(hash.raw(), |existing_key, _| existing_key.hash == hash); + access_count += 1; + } + + // Insert all of the new items which were *not* found in the shared cache. + // + // Limit how many items we'll replace in the shared cache in one go so that + // we don't evict the whole shared cache nor we keep spinning our wheels + // evicting items which we've added ourselves in previous iterations of this loop. + + self.lru.limiter_mut().items_evicted = 0; + self.lru.limiter_mut().max_items_evicted = + self.lru.len() * 100 / super::SHARED_VALUE_CACHE_MAX_REPLACE_PERCENT; + + for (key, value) in added { + self.lru.insert(key, value); + add_count += 1; + + if self.lru.limiter().items_evicted > self.lru.limiter().max_items_evicted { + // Stop when we've evicted a big enough chunk of the shared cache. + break } - }); + } + + tracing::debug!( + target: super::LOG_TARGET, + "Updated the shared value cache: {} accesses, {} new values, {}/{} evicted (length = {}, known_storage_keys = {}, inline size={}/{}, heap size={}/{})", + access_count, + add_count, + self.lru.limiter().items_evicted, + self.lru.limiter().max_items_evicted, + self.lru.len(), + self.lru.limiter().known_storage_keys.len(), + self.lru.memory_usage(), + self.lru.limiter().max_inline_size, + self.lru.limiter().heap_size, + self.lru.limiter().max_heap_size + ); } /// Reset the cache. fn reset(&mut self) { - self.size_in_bytes = 0; self.lru.clear(); - self.known_storage_keys.clear(); } } @@ -477,6 +571,7 @@ pub(super) struct SharedTrieCacheInner { impl SharedTrieCacheInner { /// Returns a reference to the [`SharedValueCache`]. + #[cfg(test)] pub(super) fn value_cache(&self) -> &SharedValueCache { &self.value_cache } @@ -487,6 +582,7 @@ impl SharedTrieCacheInner { } /// Returns a reference to the [`SharedNodeCache`]. + #[cfg(test)] pub(super) fn node_cache(&self) -> &SharedNodeCache { &self.node_cache } @@ -517,23 +613,50 @@ impl Clone for SharedTrieCache { impl SharedTrieCache { /// Create a new [`SharedTrieCache`]. pub fn new(cache_size: CacheSize) -> Self { - let (node_cache_size, value_cache_size) = match cache_size { - CacheSize::Maximum(max) => { - // Allocate 20% for the value cache. - let value_cache_size_in_bytes = (max as f32 * 0.20) as usize; - - ( - CacheSize::Maximum(max - value_cache_size_in_bytes), - CacheSize::Maximum(value_cache_size_in_bytes), - ) - }, - CacheSize::Unlimited => (CacheSize::Unlimited, CacheSize::Unlimited), - }; + let total_budget = cache_size.0; + + // Split our memory budget between the two types of caches. + let value_cache_budget = (total_budget as f32 * 0.20) as usize; // 20% for the value cache + let node_cache_budget = total_budget - value_cache_budget; // 80% for the node cache + + // Split our memory budget between what we'll be holding inline in the map, + // and what we'll be holding on the heap. + let value_cache_inline_budget = (value_cache_budget as f32 * 0.70) as usize; + let node_cache_inline_budget = (node_cache_budget as f32 * 0.70) as usize; + + // Calculate how much memory the maps will be allowed to hold inline given our budget. + let value_cache_max_inline_size = + SharedValueCacheMap::::memory_usage_for_memory_budget( + value_cache_inline_budget, + ); + + let node_cache_max_inline_size = + SharedNodeCacheMap::::memory_usage_for_memory_budget(node_cache_inline_budget); + + // And this is how much data we'll at most keep on the heap for each cache. + let value_cache_max_heap_size = value_cache_budget - value_cache_max_inline_size; + let node_cache_max_heap_size = node_cache_budget - node_cache_max_inline_size; + + tracing::debug!( + target: super::LOG_TARGET, + "Configured a shared trie cache with a budget of ~{} bytes (node_cache_max_inline_size = {}, node_cache_max_heap_size = {}, value_cache_max_inline_size = {}, value_cache_max_heap_size = {})", + total_budget, + node_cache_max_inline_size, + node_cache_max_heap_size, + value_cache_max_inline_size, + value_cache_max_heap_size, + ); Self { inner: Arc::new(RwLock::new(SharedTrieCacheInner { - node_cache: SharedNodeCache::new(node_cache_size), - value_cache: SharedValueCache::new(value_cache_size), + node_cache: SharedNodeCache::new( + node_cache_max_inline_size, + node_cache_max_heap_size, + ), + value_cache: SharedValueCache::new( + value_cache_max_inline_size, + value_cache_max_heap_size, + ), })), } } @@ -544,16 +667,50 @@ impl SharedTrieCache { shared: self.clone(), node_cache: Default::default(), value_cache: Default::default(), - shared_node_cache_access: Default::default(), - shared_value_cache_access: Default::default(), + shared_value_cache_access: Mutex::new(super::ValueAccessSet::with_hasher( + schnellru::ByLength::new(super::SHARED_VALUE_CACHE_MAX_PROMOTED_KEYS), + Default::default(), + )), + stats: Default::default(), } } + /// Get a copy of the node for `key`. + /// + /// This will temporarily lock the shared cache for reading. + /// + /// This doesn't change the least recently order in the internal [`LruMap`]. + #[inline] + pub fn peek_node(&self, key: &H::Out) -> Option> { + self.inner.read().node_cache.lru.peek(key).cloned() + } + + /// Get a copy of the [`CachedValue`] for `key`. + /// + /// This will temporarily lock the shared cache for reading. + /// + /// This doesn't reorder any of the elements in the internal [`LruMap`]. + pub fn peek_value_by_hash( + &self, + hash: ValueCacheKeyHash, + storage_root: &H::Out, + storage_key: &[u8], + ) -> Option> { + self.inner + .read() + .value_cache + .lru + .peek_by_hash(hash.0, |existing_key, _| existing_key.is_eq(storage_root, storage_key)) + .cloned() + } + /// Returns the used memory size of this cache in bytes. pub fn used_memory_size(&self) -> usize { let inner = self.inner.read(); - let value_cache_size = inner.value_cache.size_in_bytes; - let node_cache_size = inner.node_cache.size_in_bytes; + let value_cache_size = + inner.value_cache.lru.memory_usage() + inner.value_cache.lru.limiter().heap_size; + let node_cache_size = + inner.node_cache.lru.memory_usage() + inner.node_cache.lru.limiter().heap_size; node_cache_size + value_cache_size } @@ -575,13 +732,19 @@ impl SharedTrieCache { } /// Returns the read locked inner. - pub(super) fn read_lock_inner(&self) -> RwLockReadGuard<'_, SharedTrieCacheInner> { + #[cfg(test)] + pub(super) fn read_lock_inner( + &self, + ) -> parking_lot::RwLockReadGuard<'_, SharedTrieCacheInner> { self.inner.read() } /// Returns the write locked inner. - pub(super) fn write_lock_inner(&self) -> RwLockWriteGuard<'_, SharedTrieCacheInner> { - self.inner.write() + pub(super) fn write_lock_inner(&self) -> Option>> { + // This should never happen, but we *really* don't want to deadlock. So let's have it + // timeout, just in case. At worst it'll do nothing, and at best it'll avert a catastrophe + // and notify us that there's a problem. + self.inner.try_write_for(super::SHARED_CACHE_WRITE_LOCK_TIMEOUT) } } @@ -592,12 +755,7 @@ mod tests { #[test] fn shared_value_cache_works() { - let base_size = mem::size_of::>() + mem::size_of::>(); - let arc_size = mem::size_of::>(); - - let mut cache = SharedValueCache::::new(CacheSize::Maximum( - (base_size + arc_size + 10) * 10, - )); + let mut cache = SharedValueCache::::new(usize::MAX, 10 * 10); let key = vec![0; 10]; @@ -613,65 +771,85 @@ mod tests { ); // Ensure that the basics are working - assert_eq!(1, cache.known_storage_keys.len()); - assert_eq!(3, Arc::strong_count(cache.known_storage_keys.get(&key[..]).unwrap())); - assert_eq!(base_size * 2 + key.len() + arc_size, cache.size_in_bytes); + assert_eq!(1, cache.lru.limiter_mut().known_storage_keys.len()); + assert_eq!( + 3, // Two instances inside the cache + one extra in `known_storage_keys`. + Arc::strong_count(cache.lru.limiter_mut().known_storage_keys.get(&key[..]).unwrap()) + ); + assert_eq!(key.len(), cache.lru.limiter().heap_size); + assert_eq!(cache.lru.len(), 2); + assert_eq!(cache.lru.peek_newest().unwrap().0.storage_root, root1); + assert_eq!(cache.lru.peek_oldest().unwrap().0.storage_root, root0); + assert!(cache.lru.limiter().heap_size <= cache.lru.limiter().max_heap_size); + assert_eq!(cache.lru.limiter().heap_size, 10); // Just accessing a key should not change anything on the size and number of entries. cache.update(vec![], vec![ValueCacheKey::hash_data(&key[..], &root0)]); - assert_eq!(1, cache.known_storage_keys.len()); - assert_eq!(3, Arc::strong_count(cache.known_storage_keys.get(&key[..]).unwrap())); - assert_eq!(base_size * 2 + key.len() + arc_size, cache.size_in_bytes); - - // Add 9 other entries and this should move out the key for `root1`. + assert_eq!(1, cache.lru.limiter_mut().known_storage_keys.len()); + assert_eq!( + 3, + Arc::strong_count(cache.lru.limiter_mut().known_storage_keys.get(&key[..]).unwrap()) + ); + assert_eq!(key.len(), cache.lru.limiter().heap_size); + assert_eq!(cache.lru.len(), 2); + assert_eq!(cache.lru.peek_newest().unwrap().0.storage_root, root0); + assert_eq!(cache.lru.peek_oldest().unwrap().0.storage_root, root1); + assert!(cache.lru.limiter().heap_size <= cache.lru.limiter().max_heap_size); + assert_eq!(cache.lru.limiter().heap_size, 10); + + // Updating the cache again with exactly the same data should not change anything. cache.update( - (1..10) + vec![ + (ValueCacheKey::new_value(&key[..], root1), CachedValue::NonExisting), + (ValueCacheKey::new_value(&key[..], root0), CachedValue::NonExisting), + ], + vec![], + ); + assert_eq!(1, cache.lru.limiter_mut().known_storage_keys.len()); + assert_eq!( + 3, + Arc::strong_count(cache.lru.limiter_mut().known_storage_keys.get(&key[..]).unwrap()) + ); + assert_eq!(key.len(), cache.lru.limiter().heap_size); + assert_eq!(cache.lru.len(), 2); + assert_eq!(cache.lru.peek_newest().unwrap().0.storage_root, root0); + assert_eq!(cache.lru.peek_oldest().unwrap().0.storage_root, root1); + assert!(cache.lru.limiter().heap_size <= cache.lru.limiter().max_heap_size); + assert_eq!(cache.lru.limiter().items_evicted, 0); + assert_eq!(cache.lru.limiter().heap_size, 10); + + // Add 10 other entries and this should move out two of the initial entries. + cache.update( + (1..11) .map(|i| vec![i; 10]) .map(|key| (ValueCacheKey::new_value(&key[..], root0), CachedValue::NonExisting)), vec![], ); - assert_eq!(10, cache.known_storage_keys.len()); - assert_eq!(2, Arc::strong_count(cache.known_storage_keys.get(&key[..]).unwrap())); - assert_eq!((base_size + key.len() + arc_size) * 10, cache.size_in_bytes); + assert_eq!(cache.lru.limiter().items_evicted, 2); + assert_eq!(10, cache.lru.len()); + assert_eq!(10, cache.lru.limiter_mut().known_storage_keys.len()); + assert!(cache.lru.limiter_mut().known_storage_keys.get(&key[..]).is_none()); + assert_eq!(key.len() * 10, cache.lru.limiter().heap_size); + assert_eq!(cache.lru.len(), 10); + assert!(cache.lru.limiter().heap_size <= cache.lru.limiter().max_heap_size); + assert_eq!(cache.lru.limiter().heap_size, 100); + assert!(matches!( - cache.get(&ValueCacheKey::new_ref(&key, root0)).unwrap(), + cache.lru.peek(&ValueCacheKey::new_value(&[1; 10][..], root0)).unwrap(), CachedValue::::NonExisting )); - assert!(cache.get(&ValueCacheKey::new_ref(&key, root1)).is_none()); + + assert!(cache.lru.peek(&ValueCacheKey::new_value(&[1; 10][..], root1)).is_none(),); + + assert!(cache.lru.peek(&ValueCacheKey::new_value(&key[..], root0)).is_none()); + assert!(cache.lru.peek(&ValueCacheKey::new_value(&key[..], root1)).is_none()); cache.update( vec![(ValueCacheKey::new_value(vec![10; 10], root0), CachedValue::NonExisting)], vec![], ); - assert!(cache.known_storage_keys.get(&key[..]).is_none()); - } - - #[test] - fn value_cache_key_eq_works() { - let storage_key = &b"something"[..]; - let storage_key2 = &b"something2"[..]; - let storage_root = Hash::random(); - - let value = ValueCacheKey::new_value(storage_key, storage_root); - // Ref gets the same hash, but a different storage key - let ref_ = - ValueCacheKey::Ref { storage_root, storage_key: storage_key2, hash: value.get_hash() }; - let hash = ValueCacheKey::Hash { - hash: value.get_hash(), - _i_read_the_documentation: IReadTheDocumentation(()), - }; - - // Ensure that the hash variants is equal to `value`, `ref_` and itself. - assert!(hash == value); - assert!(value == hash); - assert!(hash == ref_); - assert!(ref_ == hash); - assert!(hash == hash); - - // But when we compare `value` and `ref_` the different storage key is detected. - assert!(value != ref_); - assert!(ref_ != value); + assert!(cache.lru.limiter_mut().known_storage_keys.get(&key[..]).is_none()); } } From 64a1a360eaf5c467c0f836a1ca4ae4cde8663e43 Mon Sep 17 00:00:00 2001 From: Andrei Sandu <54316454+sandreim@users.noreply.github.com> Date: Thu, 26 Jan 2023 08:22:51 +0200 Subject: [PATCH 5/8] Add task type label to metrics (#13240) Signed-off-by: Andrei Sandu Signed-off-by: Andrei Sandu --- client/service/src/task_manager/mod.rs | 41 +++++++++++++++++++------- 1 file changed, 30 insertions(+), 11 deletions(-) diff --git a/client/service/src/task_manager/mod.rs b/client/service/src/task_manager/mod.rs index d792122576444..f2987e684d9dc 100644 --- a/client/service/src/task_manager/mod.rs +++ b/client/service/src/task_manager/mod.rs @@ -129,12 +129,20 @@ impl SpawnTaskHandle { GroupName::Default => DEFAULT_GROUP_NAME, }; + let task_type_label = match task_type { + TaskType::Blocking => "blocking", + TaskType::Async => "async", + }; + // Note that we increase the started counter here and not within the future. This way, // we could properly visualize on Prometheus situations where the spawning doesn't work. if let Some(metrics) = &self.metrics { - metrics.tasks_spawned.with_label_values(&[name, group]).inc(); + metrics.tasks_spawned.with_label_values(&[name, group, task_type_label]).inc(); // We do a dummy increase in order for the task to show up in metrics. - metrics.tasks_ended.with_label_values(&[name, "finished", group]).inc_by(0); + metrics + .tasks_ended + .with_label_values(&[name, "finished", group, task_type_label]) + .inc_by(0); } let future = async move { @@ -145,8 +153,10 @@ impl SpawnTaskHandle { if let Some(metrics) = metrics { // Add some wrappers around `task`. let task = { - let poll_duration = metrics.poll_duration.with_label_values(&[name, group]); - let poll_start = metrics.poll_start.with_label_values(&[name, group]); + let poll_duration = + metrics.poll_duration.with_label_values(&[name, group, task_type_label]); + let poll_start = + metrics.poll_start.with_label_values(&[name, group, task_type_label]); let inner = prometheus_future::with_poll_durations(poll_duration, poll_start, task); // The logic of `AssertUnwindSafe` here is ok considering that we throw @@ -157,15 +167,24 @@ impl SpawnTaskHandle { match select(on_exit, task).await { Either::Right((Err(payload), _)) => { - metrics.tasks_ended.with_label_values(&[name, "panic", group]).inc(); + metrics + .tasks_ended + .with_label_values(&[name, "panic", group, task_type_label]) + .inc(); panic::resume_unwind(payload) }, Either::Right((Ok(()), _)) => { - metrics.tasks_ended.with_label_values(&[name, "finished", group]).inc(); + metrics + .tasks_ended + .with_label_values(&[name, "finished", group, task_type_label]) + .inc(); }, Either::Left(((), _)) => { // The `on_exit` has triggered. - metrics.tasks_ended.with_label_values(&[name, "interrupted", group]).inc(); + metrics + .tasks_ended + .with_label_values(&[name, "interrupted", group, task_type_label]) + .inc(); }, } } else { @@ -433,28 +452,28 @@ impl Metrics { buckets: exponential_buckets(0.001, 4.0, 9) .expect("function parameters are constant and always valid; qed"), }, - &["task_name", "task_group"] + &["task_name", "task_group", "kind"] )?, registry)?, poll_start: register(CounterVec::new( Opts::new( "substrate_tasks_polling_started_total", "Total number of times we started invoking Future::poll" ), - &["task_name", "task_group"] + &["task_name", "task_group", "kind"] )?, registry)?, tasks_spawned: register(CounterVec::new( Opts::new( "substrate_tasks_spawned_total", "Total number of tasks that have been spawned on the Service" ), - &["task_name", "task_group"] + &["task_name", "task_group", "kind"] )?, registry)?, tasks_ended: register(CounterVec::new( Opts::new( "substrate_tasks_ended_total", "Total number of tasks for which Future::poll has returned Ready(()) or panicked" ), - &["task_name", "reason", "task_group"] + &["task_name", "reason", "task_group", "kind"] )?, registry)?, }) } From f4bc99303e6faa0ae0461b4606c1699b4463ff4b Mon Sep 17 00:00:00 2001 From: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> Date: Thu, 26 Jan 2023 08:59:18 +0100 Subject: [PATCH 6/8] upgrade nix to 0.26.1 (#13230) --- Cargo.lock | 15 +-------------- bin/node/cli/Cargo.toml | 2 +- 2 files changed, 2 insertions(+), 15 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 7c412030d560c..43f67695faa11 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4640,19 +4640,6 @@ dependencies = [ "tokio", ] -[[package]] -name = "nix" -version = "0.23.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f3790c00a0150112de0f4cd161e3d7fc4b2d8a5542ffc35f099a2562aecb35c" -dependencies = [ - "bitflags", - "cc", - "cfg-if", - "libc", - "memoffset 0.6.5", -] - [[package]] name = "nix" version = "0.24.3" @@ -4731,7 +4718,7 @@ dependencies = [ "jsonrpsee", "kitchensink-runtime", "log", - "nix 0.23.2", + "nix 0.26.1", "node-executor", "node-inspect", "node-primitives", diff --git a/bin/node/cli/Cargo.toml b/bin/node/cli/Cargo.toml index 8a883ee0119ec..b55b35c05652d 100644 --- a/bin/node/cli/Cargo.toml +++ b/bin/node/cli/Cargo.toml @@ -117,7 +117,7 @@ sp-blockchain = { version = "4.0.0-dev", path = "../../../primitives/blockchain" futures = "0.3.21" tempfile = "3.1.0" assert_cmd = "2.0.2" -nix = "0.23" +nix = { version = "0.26.1", features = ["signal"] } serde_json = "1.0" regex = "1.6.0" platforms = "2.0" From 2dbf62591fd35725c6e39f8ddfd27c3695dab594 Mon Sep 17 00:00:00 2001 From: Stephen Shelton Date: Thu, 26 Jan 2023 07:39:20 -0700 Subject: [PATCH 7/8] Add WeightToFee and LengthToFee impls to transaction-payment Runtime API (#13110) * Add WeightToFee and LengthToFee impls to RPC * Remove RPC additions * Update frame/transaction-payment/rpc/runtime-api/src/lib.rs Co-authored-by: joe petrowski <25483142+joepetrowski@users.noreply.github.com> * Add comments to length_to_fee and weight_to_fee Co-authored-by: joe petrowski <25483142+joepetrowski@users.noreply.github.com> Co-authored-by: Oliver Tale-Yazdi --- bin/node-template/runtime/src/lib.rs | 12 ++++++++++++ bin/node/runtime/src/lib.rs | 12 ++++++++++++ frame/transaction-payment/rpc/runtime-api/src/lib.rs | 12 ++++++++++-- frame/transaction-payment/src/lib.rs | 7 +++++-- 4 files changed, 39 insertions(+), 4 deletions(-) diff --git a/bin/node-template/runtime/src/lib.rs b/bin/node-template/runtime/src/lib.rs index f4372af525da5..baba5d9b05e59 100644 --- a/bin/node-template/runtime/src/lib.rs +++ b/bin/node-template/runtime/src/lib.rs @@ -477,6 +477,12 @@ impl_runtime_apis! { ) -> pallet_transaction_payment::FeeDetails { TransactionPayment::query_fee_details(uxt, len) } + fn query_weight_to_fee(weight: Weight) -> Balance { + TransactionPayment::weight_to_fee(weight) + } + fn query_length_to_fee(length: u32) -> Balance { + TransactionPayment::length_to_fee(length) + } } impl pallet_transaction_payment_rpc_runtime_api::TransactionPaymentCallApi @@ -494,6 +500,12 @@ impl_runtime_apis! { ) -> pallet_transaction_payment::FeeDetails { TransactionPayment::query_call_fee_details(call, len) } + fn query_weight_to_fee(weight: Weight) -> Balance { + TransactionPayment::weight_to_fee(weight) + } + fn query_length_to_fee(length: u32) -> Balance { + TransactionPayment::length_to_fee(length) + } } #[cfg(feature = "runtime-benchmarks")] diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index 1f85d118ea534..0121f1ee3a8ca 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -2120,6 +2120,12 @@ impl_runtime_apis! { fn query_fee_details(uxt: ::Extrinsic, len: u32) -> FeeDetails { TransactionPayment::query_fee_details(uxt, len) } + fn query_weight_to_fee(weight: Weight) -> Balance { + TransactionPayment::weight_to_fee(weight) + } + fn query_length_to_fee(length: u32) -> Balance { + TransactionPayment::length_to_fee(length) + } } impl pallet_transaction_payment_rpc_runtime_api::TransactionPaymentCallApi @@ -2131,6 +2137,12 @@ impl_runtime_apis! { fn query_call_fee_details(call: RuntimeCall, len: u32) -> FeeDetails { TransactionPayment::query_call_fee_details(call, len) } + fn query_weight_to_fee(weight: Weight) -> Balance { + TransactionPayment::weight_to_fee(weight) + } + fn query_length_to_fee(length: u32) -> Balance { + TransactionPayment::length_to_fee(length) + } } impl pallet_mmr::primitives::MmrApi< diff --git a/frame/transaction-payment/rpc/runtime-api/src/lib.rs b/frame/transaction-payment/rpc/runtime-api/src/lib.rs index 10fd2a9e61fc1..760f9e3693417 100644 --- a/frame/transaction-payment/rpc/runtime-api/src/lib.rs +++ b/frame/transaction-payment/rpc/runtime-api/src/lib.rs @@ -25,7 +25,7 @@ use sp_runtime::traits::MaybeDisplay; pub use pallet_transaction_payment::{FeeDetails, InclusionFee, RuntimeDispatchInfo}; sp_api::decl_runtime_apis! { - #[api_version(2)] + #[api_version(3)] pub trait TransactionPaymentApi where Balance: Codec + MaybeDisplay, { @@ -33,9 +33,11 @@ sp_api::decl_runtime_apis! { fn query_info(uxt: Block::Extrinsic, len: u32) -> RuntimeDispatchInfo; fn query_info(uxt: Block::Extrinsic, len: u32) -> RuntimeDispatchInfo; fn query_fee_details(uxt: Block::Extrinsic, len: u32) -> FeeDetails; + fn query_weight_to_fee(weight: sp_weights::Weight) -> Balance; + fn query_length_to_fee(length: u32) -> Balance; } - #[api_version(2)] + #[api_version(3)] pub trait TransactionPaymentCallApi where Balance: Codec + MaybeDisplay, @@ -46,5 +48,11 @@ sp_api::decl_runtime_apis! { /// Query fee details of a given encoded `Call`. fn query_call_fee_details(call: Call, len: u32) -> FeeDetails; + + /// Query the output of the current `WeightToFee` given some input. + fn query_weight_to_fee(weight: sp_weights::Weight) -> Balance; + + /// Query the output of the current `LengthToFee` given some input. + fn query_length_to_fee(length: u32) -> Balance; } } diff --git a/frame/transaction-payment/src/lib.rs b/frame/transaction-payment/src/lib.rs index 13adbf89c4270..fb4381c5242c6 100644 --- a/frame/transaction-payment/src/lib.rs +++ b/frame/transaction-payment/src/lib.rs @@ -612,11 +612,14 @@ where } } - fn length_to_fee(length: u32) -> BalanceOf { + /// Compute the length portion of a fee by invoking the configured `LengthToFee` impl. + pub fn length_to_fee(length: u32) -> BalanceOf { T::LengthToFee::weight_to_fee(&Weight::from_ref_time(length as u64)) } - fn weight_to_fee(weight: Weight) -> BalanceOf { + /// Compute the unadjusted portion of the weight fee by invoking the configured `WeightToFee` + /// impl. Note that the input `weight` is capped by the maximum block weight before computation. + pub fn weight_to_fee(weight: Weight) -> BalanceOf { // cap the weight to the maximum defined in runtime, otherwise it will be the // `Bounded` maximum of its data type, which is not desired. let capped_weight = weight.min(T::BlockWeights::get().max_block); From 495cc0e2b3b06bd3f6b746416fc0d635adcefa7c Mon Sep 17 00:00:00 2001 From: Oliver Tale-Yazdi Date: Thu, 26 Jan 2023 17:13:38 +0100 Subject: [PATCH 8/8] Correct arithmetical semantic of `PerDispatchClass` (#13194) * Fix PerDispatchClass arithmetic Signed-off-by: Oliver Tale-Yazdi * Test PerDispatchClass arithmetic Signed-off-by: Oliver Tale-Yazdi * Add helpers for Weight Signed-off-by: Oliver Tale-Yazdi * Remove stale doc Signed-off-by: Oliver Tale-Yazdi * Dont mention Polkadot in Substrate Signed-off-by: Oliver Tale-Yazdi * Tests Signed-off-by: Oliver Tale-Yazdi * fmt Signed-off-by: Oliver Tale-Yazdi * Remove saturating_ prefix Signed-off-by: Oliver Tale-Yazdi * Fix tests Signed-off-by: Oliver Tale-Yazdi * Fix usage Signed-off-by: Oliver Tale-Yazdi Signed-off-by: Oliver Tale-Yazdi --- frame/support/src/dispatch.rs | 209 ++++++++++++++++++-- frame/support/src/weights.rs | 10 +- frame/system/src/extensions/check_weight.rs | 6 +- frame/system/src/lib.rs | 2 +- primitives/weights/src/lib.rs | 7 - primitives/weights/src/weight_v2.rs | 109 ++++++++++ 6 files changed, 307 insertions(+), 36 deletions(-) diff --git a/frame/support/src/dispatch.rs b/frame/support/src/dispatch.rs index 93cf08c131641..291abdd12893f 100644 --- a/frame/support/src/dispatch.rs +++ b/frame/support/src/dispatch.rs @@ -423,33 +423,35 @@ impl PerDispatchClass { impl PerDispatchClass { /// Returns the total weight consumed by all extrinsics in the block. + /// + /// Saturates on overflow. pub fn total(&self) -> Weight { let mut sum = Weight::zero(); for class in DispatchClass::all() { - sum = sum.saturating_add(*self.get(*class)); + sum.saturating_accrue(*self.get(*class)); } sum } - /// Add some weight of a specific dispatch class, saturating at the numeric bounds of `Weight`. - pub fn add(&mut self, weight: Weight, class: DispatchClass) { - let value = self.get_mut(class); - *value = value.saturating_add(weight); + /// Add some weight to the given class. Saturates at the numeric bounds. + pub fn add(mut self, weight: Weight, class: DispatchClass) -> Self { + self.accrue(weight, class); + self + } + + /// Increase the weight of the given class. Saturates at the numeric bounds. + pub fn accrue(&mut self, weight: Weight, class: DispatchClass) { + self.get_mut(class).saturating_accrue(weight); } - /// Try to add some weight of a specific dispatch class, returning Err(()) if overflow would - /// occur. - pub fn checked_add(&mut self, weight: Weight, class: DispatchClass) -> Result<(), ()> { - let value = self.get_mut(class); - *value = value.checked_add(&weight).ok_or(())?; - Ok(()) + /// Try to increase the weight of the given class. Saturates at the numeric bounds. + pub fn checked_accrue(&mut self, weight: Weight, class: DispatchClass) -> Result<(), ()> { + self.get_mut(class).checked_accrue(weight).ok_or(()) } - /// Subtract some weight of a specific dispatch class, saturating at the numeric bounds of - /// `Weight`. - pub fn sub(&mut self, weight: Weight, class: DispatchClass) { - let value = self.get_mut(class); - *value = value.saturating_sub(weight); + /// Reduce the weight of the given class. Saturates at the numeric bounds. + pub fn reduce(&mut self, weight: Weight, class: DispatchClass) { + self.get_mut(class).saturating_reduce(weight); } } @@ -3693,3 +3695,178 @@ mod weight_tests { assert_eq!(extract_actual_pays_fee(&Ok((Some(1000), Pays::Yes).into()), &pre), Pays::No); } } + +#[cfg(test)] +mod per_dispatch_class_tests { + use super::*; + use sp_runtime::traits::Zero; + use DispatchClass::*; + + #[test] + fn add_works() { + let a = PerDispatchClass { + normal: (5, 10).into(), + operational: (20, 30).into(), + mandatory: Weight::MAX, + }; + assert_eq!( + a.clone() + .add((20, 5).into(), Normal) + .add((10, 10).into(), Operational) + .add((u64::MAX, 3).into(), Mandatory), + PerDispatchClass { + normal: (25, 15).into(), + operational: (30, 40).into(), + mandatory: Weight::MAX + } + ); + let b = a + .add(Weight::MAX, Normal) + .add(Weight::MAX, Operational) + .add(Weight::MAX, Mandatory); + assert_eq!( + b, + PerDispatchClass { + normal: Weight::MAX, + operational: Weight::MAX, + mandatory: Weight::MAX + } + ); + assert_eq!(b.total(), Weight::MAX); + } + + #[test] + fn accrue_works() { + let mut a = PerDispatchClass::default(); + + a.accrue((10, 15).into(), Normal); + assert_eq!(a.normal, (10, 15).into()); + assert_eq!(a.total(), (10, 15).into()); + + a.accrue((20, 25).into(), Operational); + assert_eq!(a.operational, (20, 25).into()); + assert_eq!(a.total(), (30, 40).into()); + + a.accrue((30, 35).into(), Mandatory); + assert_eq!(a.mandatory, (30, 35).into()); + assert_eq!(a.total(), (60, 75).into()); + + a.accrue((u64::MAX, 10).into(), Operational); + assert_eq!(a.operational, (u64::MAX, 35).into()); + assert_eq!(a.total(), (u64::MAX, 85).into()); + + a.accrue((10, u64::MAX).into(), Normal); + assert_eq!(a.normal, (20, u64::MAX).into()); + assert_eq!(a.total(), Weight::MAX); + } + + #[test] + fn reduce_works() { + let mut a = PerDispatchClass { + normal: (10, u64::MAX).into(), + mandatory: (u64::MAX, 10).into(), + operational: (20, 20).into(), + }; + + a.reduce((5, 100).into(), Normal); + assert_eq!(a.normal, (5, u64::MAX - 100).into()); + assert_eq!(a.total(), (u64::MAX, u64::MAX - 70).into()); + + a.reduce((15, 5).into(), Operational); + assert_eq!(a.operational, (5, 15).into()); + assert_eq!(a.total(), (u64::MAX, u64::MAX - 75).into()); + + a.reduce((50, 0).into(), Mandatory); + assert_eq!(a.mandatory, (u64::MAX - 50, 10).into()); + assert_eq!(a.total(), (u64::MAX - 40, u64::MAX - 75).into()); + + a.reduce((u64::MAX, 100).into(), Operational); + assert!(a.operational.is_zero()); + assert_eq!(a.total(), (u64::MAX - 45, u64::MAX - 90).into()); + + a.reduce((5, u64::MAX).into(), Normal); + assert!(a.normal.is_zero()); + assert_eq!(a.total(), (u64::MAX - 50, 10).into()); + } + + #[test] + fn checked_accrue_works() { + let mut a = PerDispatchClass::default(); + + a.checked_accrue((1, 2).into(), Normal).unwrap(); + a.checked_accrue((3, 4).into(), Operational).unwrap(); + a.checked_accrue((5, 6).into(), Mandatory).unwrap(); + a.checked_accrue((7, 8).into(), Operational).unwrap(); + a.checked_accrue((9, 0).into(), Normal).unwrap(); + + assert_eq!( + a, + PerDispatchClass { + normal: (10, 2).into(), + operational: (10, 12).into(), + mandatory: (5, 6).into(), + } + ); + + a.checked_accrue((u64::MAX - 10, u64::MAX - 2).into(), Normal).unwrap(); + a.checked_accrue((0, 0).into(), Normal).unwrap(); + a.checked_accrue((1, 0).into(), Normal).unwrap_err(); + a.checked_accrue((0, 1).into(), Normal).unwrap_err(); + + assert_eq!( + a, + PerDispatchClass { + normal: Weight::MAX, + operational: (10, 12).into(), + mandatory: (5, 6).into(), + } + ); + } + + #[test] + fn checked_accrue_does_not_modify_on_error() { + let mut a = PerDispatchClass { + normal: 0.into(), + operational: Weight::MAX / 2 + 2.into(), + mandatory: 10.into(), + }; + + a.checked_accrue(Weight::MAX / 2, Operational).unwrap_err(); + a.checked_accrue(Weight::MAX - 9.into(), Mandatory).unwrap_err(); + a.checked_accrue(Weight::MAX, Normal).unwrap(); // This one works + + assert_eq!( + a, + PerDispatchClass { + normal: Weight::MAX, + operational: Weight::MAX / 2 + 2.into(), + mandatory: 10.into(), + } + ); + } + + #[test] + fn total_works() { + assert!(PerDispatchClass::default().total().is_zero()); + + assert_eq!( + PerDispatchClass { + normal: 0.into(), + operational: (10, 20).into(), + mandatory: (20, u64::MAX).into(), + } + .total(), + (30, u64::MAX).into() + ); + + assert_eq!( + PerDispatchClass { + normal: (u64::MAX - 10, 10).into(), + operational: (3, u64::MAX).into(), + mandatory: (4, u64::MAX).into(), + } + .total(), + (u64::MAX - 3, u64::MAX).into() + ); + } +} diff --git a/frame/support/src/weights.rs b/frame/support/src/weights.rs index 9ff49b97bf21f..d52380b832fc4 100644 --- a/frame/support/src/weights.rs +++ b/frame/support/src/weights.rs @@ -15,15 +15,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Re-exports `sp-weights` public API, and contains benchmarked weight constants specific to -//! FRAME. -//! -//! Latest machine specification used to benchmark are: -//! - Digital Ocean: ubuntu-s-2vcpu-4gb-ams3-01 -//! - 2x Intel(R) Xeon(R) CPU E5-2650 v4 @ 2.20GHz -//! - 4GB RAM -//! - Ubuntu 19.10 (GNU/Linux 5.3.0-18-generic x86_64) -//! - rustc 1.42.0 (b8cedc004 2020-03-09) +//! Re-exports `sp-weights` public API, and contains benchmarked weight constants specific to FRAME. mod block_weights; mod extrinsic_weights; diff --git a/frame/system/src/extensions/check_weight.rs b/frame/system/src/extensions/check_weight.rs index 757b2197bc238..590d05474f91a 100644 --- a/frame/system/src/extensions/check_weight.rs +++ b/frame/system/src/extensions/check_weight.rs @@ -135,10 +135,10 @@ where // add the weight. If class is unlimited, use saturating add instead of checked one. if limit_per_class.max_total.is_none() && limit_per_class.reserved.is_none() { - all_weight.add(extrinsic_weight, info.class) + all_weight.accrue(extrinsic_weight, info.class) } else { all_weight - .checked_add(extrinsic_weight, info.class) + .checked_accrue(extrinsic_weight, info.class) .map_err(|_| InvalidTransaction::ExhaustsResources)?; } @@ -229,7 +229,7 @@ where let unspent = post_info.calc_unspent(info); if unspent.any_gt(Weight::zero()) { crate::BlockWeight::::mutate(|current_weight| { - current_weight.sub(unspent, info.class); + current_weight.reduce(unspent, info.class); }) } diff --git a/frame/system/src/lib.rs b/frame/system/src/lib.rs index 4bd5cf629873b..c5d1706006154 100644 --- a/frame/system/src/lib.rs +++ b/frame/system/src/lib.rs @@ -1321,7 +1321,7 @@ impl Pallet { /// Another potential use-case could be for the `on_initialize` and `on_finalize` hooks. pub fn register_extra_weight_unchecked(weight: Weight, class: DispatchClass) { BlockWeight::::mutate(|current_weight| { - current_weight.add(weight, class); + current_weight.accrue(weight, class); }); } diff --git a/primitives/weights/src/lib.rs b/primitives/weights/src/lib.rs index 928080d139864..59852815d7aef 100644 --- a/primitives/weights/src/lib.rs +++ b/primitives/weights/src/lib.rs @@ -16,13 +16,6 @@ // limitations under the License. //! # Primitives for transaction weighting. -//! -//! Latest machine specification used to benchmark are: -//! - Digital Ocean: ubuntu-s-2vcpu-4gb-ams3-01 -//! - 2x Intel(R) Xeon(R) CPU E5-2650 v4 @ 2.20GHz -//! - 4GB RAM -//! - Ubuntu 19.10 (GNU/Linux 5.3.0-18-generic x86_64) -//! - rustc 1.42.0 (b8cedc004 2020-03-09) #![cfg_attr(not(feature = "std"), no_std)] diff --git a/primitives/weights/src/weight_v2.rs b/primitives/weights/src/weight_v2.rs index 2933d80099dd7..33e61b6954a37 100644 --- a/primitives/weights/src/weight_v2.rs +++ b/primitives/weights/src/weight_v2.rs @@ -117,6 +117,11 @@ impl Weight { Self { ref_time, proof_size } } + /// Construct [`Weight`] from the same weight for all parts. + pub const fn from_all(value: u64) -> Self { + Self { ref_time: value, proof_size: value } + } + /// Saturating [`Weight`] addition. Computes `self + rhs`, saturating at the numeric bounds of /// all fields instead of overflowing. pub const fn saturating_add(self, rhs: Self) -> Self { @@ -167,6 +172,11 @@ impl Weight { *self = self.saturating_add(amount); } + /// Reduce [`Weight`] by `amount` via saturating subtraction. + pub fn saturating_reduce(&mut self, amount: Self) { + *self = self.saturating_sub(amount); + } + /// Checked [`Weight`] addition. Computes `self + rhs`, returning `None` if overflow occurred. pub const fn checked_add(&self, rhs: &Self) -> Option { let ref_time = match self.ref_time.checked_add(rhs.ref_time) { @@ -222,6 +232,16 @@ impl Weight { Some(Self { ref_time, proof_size }) } + /// Try to increase `self` by `amount` via checked addition. + pub fn checked_accrue(&mut self, amount: Self) -> Option<()> { + self.checked_add(&amount).map(|new_self| *self = new_self) + } + + /// Try to reduce `self` by `amount` via checked subtraction. + pub fn checked_reduce(&mut self, amount: Self) -> Option<()> { + self.checked_sub(&amount).map(|new_self| *self = new_self) + } + /// Return a [`Weight`] where all fields are zero. pub const fn zero() -> Self { Self { ref_time: 0, proof_size: 0 } @@ -352,6 +372,20 @@ where } } +#[cfg(any(test, feature = "std", feature = "runtime-benchmarks"))] +impl From for Weight { + fn from(value: u64) -> Self { + Self::from_parts(value, value) + } +} + +#[cfg(any(test, feature = "std", feature = "runtime-benchmarks"))] +impl From<(u64, u64)> for Weight { + fn from(value: (u64, u64)) -> Self { + Self::from_parts(value.0, value.1) + } +} + macro_rules! weight_mul_per_impl { ($($t:ty),* $(,)?) => { $( @@ -459,4 +493,79 @@ mod tests { assert!(!Weight::from_parts(0, 1).is_zero()); assert!(!Weight::MAX.is_zero()); } + + #[test] + fn from_parts_works() { + assert_eq!(Weight::from_parts(0, 0), Weight { ref_time: 0, proof_size: 0 }); + assert_eq!(Weight::from_parts(5, 5), Weight { ref_time: 5, proof_size: 5 }); + assert_eq!( + Weight::from_parts(u64::MAX, u64::MAX), + Weight { ref_time: u64::MAX, proof_size: u64::MAX } + ); + } + + #[test] + fn from_all_works() { + assert_eq!(Weight::from_all(0), Weight::from_parts(0, 0)); + assert_eq!(Weight::from_all(5), Weight::from_parts(5, 5)); + assert_eq!(Weight::from_all(u64::MAX), Weight::from_parts(u64::MAX, u64::MAX)); + } + + #[test] + fn from_u64_works() { + assert_eq!(Weight::from_all(0), 0_u64.into()); + assert_eq!(Weight::from_all(123), 123_u64.into()); + assert_eq!(Weight::from_all(u64::MAX), u64::MAX.into()); + } + + #[test] + fn from_u64_pair_works() { + assert_eq!(Weight::from_parts(0, 1), (0, 1).into()); + assert_eq!(Weight::from_parts(123, 321), (123u64, 321u64).into()); + assert_eq!(Weight::from_parts(u64::MAX, 0), (u64::MAX, 0).into()); + } + + #[test] + fn saturating_reduce_works() { + let mut weight = Weight::from_parts(10, 20); + weight.saturating_reduce(Weight::from_all(5)); + assert_eq!(weight, Weight::from_parts(5, 15)); + weight.saturating_reduce(Weight::from_all(5)); + assert_eq!(weight, Weight::from_parts(0, 10)); + weight.saturating_reduce(Weight::from_all(11)); + assert!(weight.is_zero()); + weight.saturating_reduce(Weight::from_all(u64::MAX)); + assert!(weight.is_zero()); + } + + #[test] + fn checked_accrue_works() { + let mut weight = Weight::from_parts(10, 20); + assert!(weight.checked_accrue(Weight::from_all(2)).is_some()); + assert_eq!(weight, Weight::from_parts(12, 22)); + assert!(weight.checked_accrue(Weight::from_parts(u64::MAX, 0)).is_none()); + assert!(weight.checked_accrue(Weight::from_parts(0, u64::MAX)).is_none()); + assert_eq!(weight, Weight::from_parts(12, 22)); + assert!(weight + .checked_accrue(Weight::from_parts(u64::MAX - 12, u64::MAX - 22)) + .is_some()); + assert_eq!(weight, Weight::MAX); + assert!(weight.checked_accrue(Weight::from_parts(1, 0)).is_none()); + assert!(weight.checked_accrue(Weight::from_parts(0, 1)).is_none()); + assert_eq!(weight, Weight::MAX); + } + + #[test] + fn checked_reduce_works() { + let mut weight = Weight::from_parts(10, 20); + assert!(weight.checked_reduce(Weight::from_all(2)).is_some()); + assert_eq!(weight, Weight::from_parts(8, 18)); + assert!(weight.checked_reduce(Weight::from_parts(9, 0)).is_none()); + assert!(weight.checked_reduce(Weight::from_parts(0, 19)).is_none()); + assert_eq!(weight, Weight::from_parts(8, 18)); + assert!(weight.checked_reduce(Weight::from_parts(8, 0)).is_some()); + assert_eq!(weight, Weight::from_parts(0, 18)); + assert!(weight.checked_reduce(Weight::from_parts(0, 18)).is_some()); + assert!(weight.is_zero()); + } }