Skip to content
This repository has been archived by the owner on Nov 15, 2023. It is now read-only.

New Weights for All Pallets #12325

Merged
merged 14 commits into from
Nov 8, 2022
312 changes: 147 additions & 165 deletions frame/alliance/src/weights.rs

Large diffs are not rendered by default.

196 changes: 104 additions & 92 deletions frame/assets/src/weights.rs

Large diffs are not rendered by default.

46 changes: 24 additions & 22 deletions frame/bags-list/src/weights.rs
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,8 @@
//! Autogenerated weights for pallet_bags_list
//!
//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev
//! DATE: 2022-05-23, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]`
//! DATE: 2022-09-20, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]`
//! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz`
//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024

// Executed Command:
Expand All @@ -32,8 +33,9 @@
// --extrinsic=*
// --execution=wasm
// --wasm-execution=compiled
// --template=./.maintain/frame-weight-template.hbs
// --heap-pages=4096
// --output=./frame/bags-list/src/weights.rs
// --template=./.maintain/frame-weight-template.hbs

#![cfg_attr(rustfmt, rustfmt_skip)]
#![allow(unused_parens)]
Expand All @@ -54,29 +56,29 @@ pub struct SubstrateWeight<T>(PhantomData<T>);
impl<T: frame_system::Config> WeightInfo for SubstrateWeight<T> {
// Storage: Staking Bonded (r:1 w:0)
// Storage: Staking Ledger (r:1 w:0)
// Storage: BagsList ListNodes (r:4 w:4)
// Storage: BagsList ListBags (r:1 w:1)
// Storage: VoterBagsList ListNodes (r:4 w:4)
// Storage: VoterBagsList ListBags (r:1 w:1)
fn rebag_non_terminal() -> Weight {
Weight::from_ref_time(55_040_000 as u64)
Weight::from_ref_time(70_469_000 as u64)
.saturating_add(T::DbWeight::get().reads(7 as u64))
.saturating_add(T::DbWeight::get().writes(5 as u64))
}
// Storage: Staking Bonded (r:1 w:0)
// Storage: Staking Ledger (r:1 w:0)
// Storage: BagsList ListNodes (r:3 w:3)
// Storage: BagsList ListBags (r:2 w:2)
// Storage: VoterBagsList ListNodes (r:3 w:3)
// Storage: VoterBagsList ListBags (r:2 w:2)
fn rebag_terminal() -> Weight {
Weight::from_ref_time(53_671_000 as u64)
Weight::from_ref_time(69_039_000 as u64)
.saturating_add(T::DbWeight::get().reads(7 as u64))
.saturating_add(T::DbWeight::get().writes(5 as u64))
}
// Storage: BagsList ListNodes (r:4 w:4)
// Storage: VoterBagsList ListNodes (r:4 w:4)
// Storage: Staking Bonded (r:2 w:0)
// Storage: Staking Ledger (r:2 w:0)
// Storage: BagsList CounterForListNodes (r:1 w:1)
// Storage: BagsList ListBags (r:1 w:1)
// Storage: VoterBagsList CounterForListNodes (r:1 w:1)
// Storage: VoterBagsList ListBags (r:1 w:1)
fn put_in_front_of() -> Weight {
Weight::from_ref_time(56_410_000 as u64)
Weight::from_ref_time(70_028_000 as u64)
.saturating_add(T::DbWeight::get().reads(10 as u64))
.saturating_add(T::DbWeight::get().writes(6 as u64))
}
Expand All @@ -86,29 +88,29 @@ impl<T: frame_system::Config> WeightInfo for SubstrateWeight<T> {
impl WeightInfo for () {
// Storage: Staking Bonded (r:1 w:0)
// Storage: Staking Ledger (r:1 w:0)
// Storage: BagsList ListNodes (r:4 w:4)
// Storage: BagsList ListBags (r:1 w:1)
// Storage: VoterBagsList ListNodes (r:4 w:4)
// Storage: VoterBagsList ListBags (r:1 w:1)
fn rebag_non_terminal() -> Weight {
Weight::from_ref_time(55_040_000 as u64)
Weight::from_ref_time(70_469_000 as u64)
.saturating_add(RocksDbWeight::get().reads(7 as u64))
.saturating_add(RocksDbWeight::get().writes(5 as u64))
}
// Storage: Staking Bonded (r:1 w:0)
// Storage: Staking Ledger (r:1 w:0)
// Storage: BagsList ListNodes (r:3 w:3)
// Storage: BagsList ListBags (r:2 w:2)
// Storage: VoterBagsList ListNodes (r:3 w:3)
// Storage: VoterBagsList ListBags (r:2 w:2)
fn rebag_terminal() -> Weight {
Weight::from_ref_time(53_671_000 as u64)
Weight::from_ref_time(69_039_000 as u64)
.saturating_add(RocksDbWeight::get().reads(7 as u64))
.saturating_add(RocksDbWeight::get().writes(5 as u64))
}
// Storage: BagsList ListNodes (r:4 w:4)
// Storage: VoterBagsList ListNodes (r:4 w:4)
// Storage: Staking Bonded (r:2 w:0)
// Storage: Staking Ledger (r:2 w:0)
// Storage: BagsList CounterForListNodes (r:1 w:1)
// Storage: BagsList ListBags (r:1 w:1)
// Storage: VoterBagsList CounterForListNodes (r:1 w:1)
// Storage: VoterBagsList ListBags (r:1 w:1)
fn put_in_front_of() -> Weight {
Weight::from_ref_time(56_410_000 as u64)
Weight::from_ref_time(70_028_000 as u64)
.saturating_add(RocksDbWeight::get().reads(10 as u64))
.saturating_add(RocksDbWeight::get().writes(6 as u64))
}
Expand Down
34 changes: 18 additions & 16 deletions frame/balances/src/weights.rs
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,8 @@
//! Autogenerated weights for pallet_balances
//!
//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev
//! DATE: 2022-05-23, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]`
//! DATE: 2022-09-20, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]`
//! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz`
//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024

// Executed Command:
Expand All @@ -32,8 +33,9 @@
// --extrinsic=*
// --execution=wasm
// --wasm-execution=compiled
// --template=./.maintain/frame-weight-template.hbs
// --heap-pages=4096
// --output=./frame/balances/src/weights.rs
// --template=./.maintain/frame-weight-template.hbs

#![cfg_attr(rustfmt, rustfmt_skip)]
#![allow(unused_parens)]
Expand All @@ -58,43 +60,43 @@ pub struct SubstrateWeight<T>(PhantomData<T>);
impl<T: frame_system::Config> WeightInfo for SubstrateWeight<T> {
// Storage: System Account (r:1 w:1)
fn transfer() -> Weight {
Weight::from_ref_time(41_860_000 as u64)
Weight::from_ref_time(47_091_000 as u64)
.saturating_add(T::DbWeight::get().reads(1 as u64))
.saturating_add(T::DbWeight::get().writes(1 as u64))
}
// Storage: System Account (r:1 w:1)
fn transfer_keep_alive() -> Weight {
Weight::from_ref_time(32_760_000 as u64)
Weight::from_ref_time(35_830_000 as u64)
.saturating_add(T::DbWeight::get().reads(1 as u64))
.saturating_add(T::DbWeight::get().writes(1 as u64))
}
// Storage: System Account (r:1 w:1)
fn set_balance_creating() -> Weight {
Weight::from_ref_time(22_279_000 as u64)
Weight::from_ref_time(26_736_000 as u64)
.saturating_add(T::DbWeight::get().reads(1 as u64))
.saturating_add(T::DbWeight::get().writes(1 as u64))
}
// Storage: System Account (r:1 w:1)
fn set_balance_killing() -> Weight {
Weight::from_ref_time(25_488_000 as u64)
Weight::from_ref_time(29_798_000 as u64)
.saturating_add(T::DbWeight::get().reads(1 as u64))
.saturating_add(T::DbWeight::get().writes(1 as u64))
}
// Storage: System Account (r:2 w:2)
fn force_transfer() -> Weight {
Weight::from_ref_time(42_190_000 as u64)
Weight::from_ref_time(47_171_000 as u64)
.saturating_add(T::DbWeight::get().reads(2 as u64))
.saturating_add(T::DbWeight::get().writes(2 as u64))
}
// Storage: System Account (r:1 w:1)
fn transfer_all() -> Weight {
Weight::from_ref_time(37_789_000 as u64)
Weight::from_ref_time(41_705_000 as u64)
.saturating_add(T::DbWeight::get().reads(1 as u64))
.saturating_add(T::DbWeight::get().writes(1 as u64))
}
// Storage: System Account (r:1 w:1)
fn force_unreserve() -> Weight {
Weight::from_ref_time(20_056_000 as u64)
Weight::from_ref_time(23_053_000 as u64)
.saturating_add(T::DbWeight::get().reads(1 as u64))
.saturating_add(T::DbWeight::get().writes(1 as u64))
}
Expand All @@ -104,43 +106,43 @@ impl<T: frame_system::Config> WeightInfo for SubstrateWeight<T> {
impl WeightInfo for () {
// Storage: System Account (r:1 w:1)
fn transfer() -> Weight {
Weight::from_ref_time(41_860_000 as u64)
Weight::from_ref_time(47_091_000 as u64)
.saturating_add(RocksDbWeight::get().reads(1 as u64))
.saturating_add(RocksDbWeight::get().writes(1 as u64))
}
// Storage: System Account (r:1 w:1)
fn transfer_keep_alive() -> Weight {
Weight::from_ref_time(32_760_000 as u64)
Weight::from_ref_time(35_830_000 as u64)
.saturating_add(RocksDbWeight::get().reads(1 as u64))
.saturating_add(RocksDbWeight::get().writes(1 as u64))
}
// Storage: System Account (r:1 w:1)
fn set_balance_creating() -> Weight {
Weight::from_ref_time(22_279_000 as u64)
Weight::from_ref_time(26_736_000 as u64)
.saturating_add(RocksDbWeight::get().reads(1 as u64))
.saturating_add(RocksDbWeight::get().writes(1 as u64))
}
// Storage: System Account (r:1 w:1)
fn set_balance_killing() -> Weight {
Weight::from_ref_time(25_488_000 as u64)
Weight::from_ref_time(29_798_000 as u64)
.saturating_add(RocksDbWeight::get().reads(1 as u64))
.saturating_add(RocksDbWeight::get().writes(1 as u64))
}
// Storage: System Account (r:2 w:2)
fn force_transfer() -> Weight {
Weight::from_ref_time(42_190_000 as u64)
Weight::from_ref_time(47_171_000 as u64)
.saturating_add(RocksDbWeight::get().reads(2 as u64))
.saturating_add(RocksDbWeight::get().writes(2 as u64))
}
// Storage: System Account (r:1 w:1)
fn transfer_all() -> Weight {
Weight::from_ref_time(37_789_000 as u64)
Weight::from_ref_time(41_705_000 as u64)
.saturating_add(RocksDbWeight::get().reads(1 as u64))
.saturating_add(RocksDbWeight::get().writes(1 as u64))
}
// Storage: System Account (r:1 w:1)
fn force_unreserve() -> Weight {
Weight::from_ref_time(20_056_000 as u64)
Weight::from_ref_time(23_053_000 as u64)
.saturating_add(RocksDbWeight::get().reads(1 as u64))
.saturating_add(RocksDbWeight::get().writes(1 as u64))
}
Expand Down
86 changes: 54 additions & 32 deletions frame/benchmarking/src/weights.rs
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,8 @@
//! Autogenerated weights for frame_benchmarking
//!
//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev
//! DATE: 2022-05-23, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]`
//! DATE: 2022-09-20, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]`
//! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz`
//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024

// Executed Command:
Expand All @@ -32,8 +33,9 @@
// --extrinsic=*
// --execution=wasm
// --wasm-execution=compiled
// --template=./.maintain/frame-weight-template.hbs
// --heap-pages=4096
// --output=./frame/benchmarking/src/weights.rs
// --template=./.maintain/frame-weight-template.hbs

#![cfg_attr(rustfmt, rustfmt_skip)]
#![allow(unused_parens)]
Expand All @@ -57,76 +59,96 @@ pub trait WeightInfo {
/// Weights for frame_benchmarking using the Substrate node and recommended hardware.
pub struct SubstrateWeight<T>(PhantomData<T>);
impl<T: frame_system::Config> WeightInfo for SubstrateWeight<T> {
/// The range of component `i` is `[0, 1000000]`.
fn addition(_i: u32, ) -> Weight {
Weight::from_ref_time(103_000 as u64)
Weight::from_ref_time(119_000 as u64)
}
/// The range of component `i` is `[0, 1000000]`.
fn subtraction(_i: u32, ) -> Weight {
Weight::from_ref_time(105_000 as u64)
Weight::from_ref_time(112_000 as u64)
}
/// The range of component `i` is `[0, 1000000]`.
fn multiplication(_i: u32, ) -> Weight {
Weight::from_ref_time(113_000 as u64)
Weight::from_ref_time(118_000 as u64)
}
/// The range of component `i` is `[0, 1000000]`.
fn division(_i: u32, ) -> Weight {
Weight::from_ref_time(102_000 as u64)
Weight::from_ref_time(122_000 as u64)
}
fn hashing(_i: u32, ) -> Weight {
Weight::from_ref_time(20_865_902_000 as u64)
/// The range of component `i` is `[0, 100]`.
fn hashing(i: u32, ) -> Weight {
Weight::from_ref_time(21_342_134_000 as u64)
// Standard Error: 39_648
.saturating_add(Weight::from_ref_time(823_058 as u64).saturating_mul(i as u64))
}
/// The range of component `i` is `[1, 100]`.
fn sr25519_verification(i: u32, ) -> Weight {
Weight::from_ref_time(319_000 as u64)
// Standard Error: 8_000
.saturating_add(Weight::from_ref_time(47_171_000 as u64).saturating_mul(i as u64))
Weight::from_ref_time(48_224_000 as u64)
// Standard Error: 18_799
.saturating_add(Weight::from_ref_time(46_506_091 as u64).saturating_mul(i as u64))
}
// Storage: Skipped Metadata (r:0 w:0)
/// The range of component `i` is `[0, 1000]`.
fn storage_read(i: u32, ) -> Weight {
Weight::from_ref_time(0 as u64)
// Standard Error: 3_000
.saturating_add(Weight::from_ref_time(2_110_000 as u64).saturating_mul(i as u64))
Weight::from_ref_time(127_000 as u64)
// Standard Error: 3_908
.saturating_add(Weight::from_ref_time(1_933_926 as u64).saturating_mul(i as u64))
.saturating_add(T::DbWeight::get().reads((1 as u64).saturating_mul(i as u64)))
}
// Storage: Skipped Metadata (r:0 w:0)
/// The range of component `i` is `[0, 1000]`.
fn storage_write(i: u32, ) -> Weight {
Weight::from_ref_time(0 as u64)
// Standard Error: 0
.saturating_add(Weight::from_ref_time(372_000 as u64).saturating_mul(i as u64))
Weight::from_ref_time(141_000 as u64)
// Standard Error: 330
.saturating_add(Weight::from_ref_time(377_734 as u64).saturating_mul(i as u64))
.saturating_add(T::DbWeight::get().writes((1 as u64).saturating_mul(i as u64)))
}
}

// For backwards compatibility and tests
impl WeightInfo for () {
/// The range of component `i` is `[0, 1000000]`.
fn addition(_i: u32, ) -> Weight {
Weight::from_ref_time(103_000 as u64)
Weight::from_ref_time(119_000 as u64)
}
/// The range of component `i` is `[0, 1000000]`.
fn subtraction(_i: u32, ) -> Weight {
Weight::from_ref_time(105_000 as u64)
Weight::from_ref_time(112_000 as u64)
}
/// The range of component `i` is `[0, 1000000]`.
fn multiplication(_i: u32, ) -> Weight {
Weight::from_ref_time(113_000 as u64)
Weight::from_ref_time(118_000 as u64)
}
/// The range of component `i` is `[0, 1000000]`.
fn division(_i: u32, ) -> Weight {
Weight::from_ref_time(102_000 as u64)
Weight::from_ref_time(122_000 as u64)
}
fn hashing(_i: u32, ) -> Weight {
Weight::from_ref_time(20_865_902_000 as u64)
/// The range of component `i` is `[0, 100]`.
fn hashing(i: u32, ) -> Weight {
Weight::from_ref_time(21_342_134_000 as u64)
// Standard Error: 39_648
.saturating_add(Weight::from_ref_time(823_058 as u64).saturating_mul(i as u64))
}
/// The range of component `i` is `[1, 100]`.
fn sr25519_verification(i: u32, ) -> Weight {
Weight::from_ref_time(319_000 as u64)
// Standard Error: 8_000
.saturating_add(Weight::from_ref_time(47_171_000 as u64).saturating_mul(i as u64))
Weight::from_ref_time(48_224_000 as u64)
// Standard Error: 18_799
.saturating_add(Weight::from_ref_time(46_506_091 as u64).saturating_mul(i as u64))
}
Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Okay, so I think I found an issue @koute @ggwpez

If you see these results, the base weight is now 48_224_000, and the per i weight is 46_506_091. This is because each sr25519 verification is around 4X_000_000 weight.

Based on the changes here: #11804

We now take the minimum weight among all benchmarks, and make that the y intercept.

In this case, the benchmark runs between [1, 100], which means there was never a case where we tested this benchmark at 0, and thus, the minimum weight is when i = 1, and the slope is also this, so we are basically double counting. In this case, the y intercept SHOULD have been near 0, since this function does nothing, but we never tested for that.

Potential Solutions:

  • Some way to cleverly detect this kind of situation, and handle it in the output results, solve the problem for everyone.
  • We can change the range to [0, 100] rather than [1, 100]... but there are probably many situations where users do not start their benchmarks at a strictly 0 value.
  • Have each benchmark run explicitly where each component is zero, to detect a proper minimum execution time... but not all benchmarks may execute successfully without the designer having that in mind.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Hmmm.... this is a fair point! Although I don't think we can do much to handle this case automatically without making some sort of a tradeoff which is going to be wrong in certain situations.

There isn't really a way for the benchmarking machinery to know how the benchmark will behave at 0 without actually running it at 0. Sure, it can linearly extrapolate that information, and in this case it would be right! But in other cases it's going to be wrong, depending on whether the data is actually perfectly linear, on the range of input and output values, etc. So to me this is more of a question of which tradeoff do we want to pick. The consequence of the weights being too high is a lower throughput of the chain (which is bad); the consequence of the weights being too low is a potential for a denial of service attack and/or accidental overload of the chain (which I'd argue is worse).

Personally I'd probably just go for changing the range of the benchmark to [0, 100]. I don't think it's a huge thing to ask the users to run their benchmarks over the whole input domain of their inputs? Especially the minimum and the maximum boundary. If I may exaggerate a little bit and take this to the extreme, supporting the use case of "I want to have this function work in the range of [0, 10000000] while I only benchmark it/test it in the range of [100, 200]" would be quite weird, wouldn't it? So I think it's pretty natural to just require the whole input domain to be covered by the benchmarks.

@ggwpez Your thoughts?

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

There isn't really a way for the benchmarking machinery to know how the benchmark will behave at 0 without actually running it at 0.

Exactly. The developer of the benchmark has to ensure that he always puts the legal min values in the component range.
We cannot hack around such a shortcoming in FRAME.
Otherwise the benchmarking will assume that 1 is the lowest possible value and therefore use it to calculate the minimum.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

If we have a consensus that we'll go with the "the benchmarks should cover the whole input domain" then I can go through our benchmarks and fix them so that they do start at the minimum, and make a PR.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yes I think so. @shawntabrizi any objections?

I can go through our benchmarks and fix them

If you want to - yes sure! Otherwise we can create an issue for it.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

this makes me question the new algorithm we are using, presuming the slowest extrinsic is the base extrinsic weight. I wonder if there are ways we can correct for negative numbers without introducing such a large assumption...

But yeah, i mean also correcting the ranges seems okay for now.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I wonder if there are ways we can correct for negative numbers without introducing such a large assumption...

Well, one hack we could do is to only do this if the base weight is negative, but this seems like only a partial fix to me. The linear regression can still generate a base weight of zero, or a base weight of 1, or 10, or 100, or another low number which is lower than the actual base weight if the extrinsic were to be executed with its components set to 0.

I definitely agree with you that this is not perfect, and it would be nice to do something better. But I don't really see what that would be without introducing other serious limitations.

Considering the main problem here is that the users might not run their benchmarks at 0 to get a true base weight maybe we could by default emit an error like "the components' range do not cover 0; are you sure this is what you want?" and have a longer explanation of the problem, and allow the users to disable the check with an #[attribute] of some sort? Then people would either correct their benchmarks, or opt-in that the minimums are actual true minimums. (I'm not entirely sure if this is a good idea though; we'd probably have to empirically test for how many benchmarks per your average chain this would trigger, and how many benchmarks would have to be corrected.)

But yeah, i mean also correcting the ranges seems okay for now.

Okay, great! I'll whip up a PR extending the ranges then.

Copy link
Member Author

@shawntabrizi shawntabrizi Sep 27, 2022

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

actual base weight if the extrinsic were to be executed with its components set to 0.

All extrinsics have a base weight added to them, so it is possible that an extrinsic with components 0 would give an extrinsic specific weight of 0, and that is okay.

// Storage: Skipped Metadata (r:0 w:0)
/// The range of component `i` is `[0, 1000]`.
fn storage_read(i: u32, ) -> Weight {
Weight::from_ref_time(0 as u64)
// Standard Error: 3_000
.saturating_add(Weight::from_ref_time(2_110_000 as u64).saturating_mul(i as u64))
Weight::from_ref_time(127_000 as u64)
// Standard Error: 3_908
.saturating_add(Weight::from_ref_time(1_933_926 as u64).saturating_mul(i as u64))
.saturating_add(RocksDbWeight::get().reads((1 as u64).saturating_mul(i as u64)))
}
// Storage: Skipped Metadata (r:0 w:0)
/// The range of component `i` is `[0, 1000]`.
fn storage_write(i: u32, ) -> Weight {
Weight::from_ref_time(0 as u64)
// Standard Error: 0
.saturating_add(Weight::from_ref_time(372_000 as u64).saturating_mul(i as u64))
Weight::from_ref_time(141_000 as u64)
// Standard Error: 330
.saturating_add(Weight::from_ref_time(377_734 as u64).saturating_mul(i as u64))
.saturating_add(RocksDbWeight::get().writes((1 as u64).saturating_mul(i as u64)))
}
}
Loading