Skip to content
This repository has been archived by the owner on Nov 15, 2023. It is now read-only.

Commit

Permalink
Support running the pallet benchmarks analysis without running the be…
Browse files Browse the repository at this point in the history
…nchmarks (#12361)

* Support running the pallet benchmarks analysis without running the benchmarks

* Rename `override-results` to `json-input` and update the help comment

* ".git/.scripts/fmt.sh" 1

Co-authored-by: command-bot <>
  • Loading branch information
koute authored Sep 28, 2022
1 parent 1b1a5e1 commit 0ec4373
Show file tree
Hide file tree
Showing 3 changed files with 82 additions and 15 deletions.
20 changes: 14 additions & 6 deletions frame/benchmarking/src/utils.rs
Original file line number Diff line number Diff line change
Expand Up @@ -23,14 +23,14 @@ use frame_support::{
traits::StorageInfo,
};
#[cfg(feature = "std")]
use serde::Serialize;
use serde::{Deserialize, Serialize};
use sp_io::hashing::blake2_256;
use sp_runtime::traits::TrailingZeroInput;
use sp_std::{prelude::Box, vec::Vec};
use sp_storage::TrackedStorageKey;

/// An alphabet of possible parameters to use for benchmarking.
#[cfg_attr(feature = "std", derive(Serialize))]
#[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
#[derive(Encode, Decode, Clone, Copy, PartialEq, Debug)]
#[allow(missing_docs)]
#[allow(non_camel_case_types)]
Expand Down Expand Up @@ -71,7 +71,7 @@ impl std::fmt::Display for BenchmarkParameter {
}

/// The results of a single of benchmark.
#[cfg_attr(feature = "std", derive(Serialize))]
#[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
#[derive(Encode, Decode, Clone, PartialEq, Debug)]
pub struct BenchmarkBatch {
/// The pallet containing this benchmark.
Expand All @@ -89,7 +89,7 @@ pub struct BenchmarkBatch {

// TODO: could probably make API cleaner here.
/// The results of a single of benchmark, where time and db results are separated.
#[cfg_attr(feature = "std", derive(Serialize))]
#[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
#[derive(Encode, Decode, Clone, PartialEq, Debug)]
pub struct BenchmarkBatchSplitResults {
/// The pallet containing this benchmark.
Expand All @@ -110,7 +110,7 @@ pub struct BenchmarkBatchSplitResults {
/// Result from running benchmarks on a FRAME pallet.
/// Contains duration of the function call in nanoseconds along with the benchmark parameters
/// used for that benchmark result.
#[cfg_attr(feature = "std", derive(Serialize))]
#[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
#[derive(Encode, Decode, Default, Clone, PartialEq, Debug)]
pub struct BenchmarkResult {
pub components: Vec<(BenchmarkParameter, u32)>,
Expand All @@ -121,7 +121,7 @@ pub struct BenchmarkResult {
pub writes: u32,
pub repeat_writes: u32,
pub proof_size: u32,
#[cfg_attr(feature = "std", serde(skip_serializing))]
#[cfg_attr(feature = "std", serde(skip))]
pub keys: Vec<(Vec<u8>, u32, u32, bool)>,
}

Expand All @@ -141,6 +141,14 @@ mod serde_as_str {
let s = std::str::from_utf8(value).map_err(serde::ser::Error::custom)?;
serializer.collect_str(s)
}

pub fn deserialize<'de, D>(deserializer: D) -> Result<Vec<u8>, D::Error>
where
D: serde::de::Deserializer<'de>,
{
let s: &str = serde::de::Deserialize::deserialize(deserializer)?;
Ok(s.into())
}
}

/// Possible errors returned from the benchmarking pipeline.
Expand Down
67 changes: 60 additions & 7 deletions utils/frame/benchmarking-cli/src/pallet/command.rs
Original file line number Diff line number Diff line change
Expand Up @@ -134,6 +134,20 @@ impl PalletCmd {
};
}

if let Some(json_input) = &self.json_input {
let raw_data = match std::fs::read(json_input) {
Ok(raw_data) => raw_data,
Err(error) =>
return Err(format!("Failed to read {:?}: {}", json_input, error).into()),
};
let batches: Vec<BenchmarkBatchSplitResults> = match serde_json::from_slice(&raw_data) {
Ok(batches) => batches,
Err(error) =>
return Err(format!("Failed to deserialize {:?}: {}", json_input, error).into()),
};
return self.output_from_results(&batches)
}

let spec = config.chain_spec;
let strategy = self.execution.unwrap_or(ExecutionStrategy::Native);
let pallet = self.pallet.clone().unwrap_or_default();
Expand Down Expand Up @@ -396,8 +410,16 @@ impl PalletCmd {

// Combine all of the benchmark results, so that benchmarks of the same pallet/function
// are together.
let batches: Vec<BenchmarkBatchSplitResults> = combine_batches(batches, batches_db);
let batches = combine_batches(batches, batches_db);
self.output(&batches, &storage_info, &component_ranges)
}

fn output(
&self,
batches: &[BenchmarkBatchSplitResults],
storage_info: &[StorageInfo],
component_ranges: &HashMap<(Vec<u8>, Vec<u8>), Vec<ComponentRange>>,
) -> Result<()> {
// Jsonify the result and write it to a file or stdout if desired.
if !self.jsonify(&batches)? {
// Print the summary only if `jsonify` did not write to stdout.
Expand All @@ -412,10 +434,45 @@ impl PalletCmd {
Ok(())
}

fn output_from_results(&self, batches: &[BenchmarkBatchSplitResults]) -> Result<()> {
let mut component_ranges =
HashMap::<(Vec<u8>, Vec<u8>), HashMap<String, (u32, u32)>>::new();
for batch in batches {
let range = component_ranges
.entry((batch.pallet.clone(), batch.benchmark.clone()))
.or_default();
for result in &batch.time_results {
for (param, value) in &result.components {
let name = param.to_string();
let (ref mut min, ref mut max) = range.entry(name).or_insert((*value, *value));
if *value < *min {
*min = *value;
}
if *value > *max {
*max = *value;
}
}
}
}

let component_ranges: HashMap<_, _> = component_ranges
.into_iter()
.map(|(key, ranges)| {
let ranges = ranges
.into_iter()
.map(|(name, (min, max))| ComponentRange { name, min, max })
.collect();
(key, ranges)
})
.collect();

self.output(batches, &[], &component_ranges)
}

/// Jsonifies the passed batches and writes them to stdout or into a file.
/// Can be configured via `--json` and `--json-file`.
/// Returns whether it wrote to stdout.
fn jsonify(&self, batches: &Vec<BenchmarkBatchSplitResults>) -> Result<bool> {
fn jsonify(&self, batches: &[BenchmarkBatchSplitResults]) -> Result<bool> {
if self.json_output || self.json_file.is_some() {
let json = serde_json::to_string_pretty(&batches)
.map_err(|e| format!("Serializing into JSON: {:?}", e))?;
Expand All @@ -432,11 +489,7 @@ impl PalletCmd {
}

/// Prints the results as human-readable summary without raw timing data.
fn print_summary(
&self,
batches: &Vec<BenchmarkBatchSplitResults>,
storage_info: &Vec<StorageInfo>,
) {
fn print_summary(&self, batches: &[BenchmarkBatchSplitResults], storage_info: &[StorageInfo]) {
for batch in batches.iter() {
// Print benchmark metadata
println!(
Expand Down
10 changes: 8 additions & 2 deletions utils/frame/benchmarking-cli/src/pallet/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -35,11 +35,11 @@ fn parse_pallet_name(pallet: &str) -> String {
#[derive(Debug, clap::Parser)]
pub struct PalletCmd {
/// Select a FRAME Pallet to benchmark, or `*` for all (in which case `extrinsic` must be `*`).
#[clap(short, long, parse(from_str = parse_pallet_name), required_unless_present = "list")]
#[clap(short, long, parse(from_str = parse_pallet_name), required_unless_present_any = ["list", "json-input"])]
pub pallet: Option<String>,

/// Select an extrinsic inside the pallet to benchmark, or `*` for all.
#[clap(short, long, required_unless_present = "list")]
#[clap(short, long, required_unless_present_any = ["list", "json-input"])]
pub extrinsic: Option<String>,

/// Select how many samples we should take across the variable components.
Expand Down Expand Up @@ -166,4 +166,10 @@ pub struct PalletCmd {
/// template for that purpose.
#[clap(long)]
pub no_storage_info: bool,

/// A path to a `.json` file with existing benchmark results generated with `--json` or
/// `--json-file`. When specified the benchmarks are not actually executed, and the data for
/// the analysis is read from this file.
#[clap(long)]
pub json_input: Option<PathBuf>,
}

0 comments on commit 0ec4373

Please sign in to comment.