Skip to content

Commit

Permalink
fix(ci): Fix benchmark in CI (#1051)
Browse files Browse the repository at this point in the history
* fix(ci): Fix benchmark job

- Add --no-install-recommends to apt-get install command, which stops
  installing xdg-desktop-portal-gtk, which make the benchmarks to
  run about 30 times slower.
- Stop using deprecated actions-rs GitHub actions
- Update hyperfine

* refactor(bench): Clippy fixes
  • Loading branch information
olivierlemasle authored Oct 29, 2023
1 parent f420665 commit 81cfa37
Show file tree
Hide file tree
Showing 4 changed files with 46 additions and 50 deletions.
31 changes: 17 additions & 14 deletions .github/workflows/bench.yml
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ on:
env:
RUST_BACKTRACE: 1
CARGO_PROFILE_DEV_DEBUG: 0 # This would add unnecessary bloat to the target folder, decreasing cache efficiency.
LC_ALL: en_US.UTF-8 # This prevents strace from changing it's number format to use commas.
LC_ALL: en_US.UTF-8 # This prevents strace from changing its number format to use commas.

concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
Expand All @@ -27,28 +27,31 @@ jobs:
runs-on: ${{ matrix.platform.os }}

steps:
- uses: actions/checkout@v2
- name: install ${{ matrix.rust }}
uses: actions-rs/toolchain@v1
- uses: actions/checkout@v4

- name: install Rust ${{ matrix.rust }}
uses: dtolnay/rust-toolchain@master
with:
toolchain: ${{ matrix.rust }}
override: true
components: rust-src
target: ${{ matrix.platform.target }}
targets: ${{ matrix.platform.target }}

- name: Setup python
uses: actions/setup-python@v2
uses: actions/setup-python@v4
with:
python-version: "3.x"
python-version: '3.10'
architecture: x64

- name: install depedencies
- name: install dependencies
run: |
python -m pip install --upgrade pip
sudo apt-get update
sudo apt-get install -y webkit2gtk-4.1-dev libayatana-appindicator3-dev xvfb
wget https://github.com/sharkdp/hyperfine/releases/download/v1.11.0/hyperfine_1.11.0_amd64.deb
sudo dpkg -i hyperfine_1.11.0_amd64.deb
sudo apt-get install -y --no-install-recommends \
libwebkit2gtk-4.1-dev libayatana-appindicator3-dev \
xvfb \
at-spi2-core
wget https://github.com/sharkdp/hyperfine/releases/download/v1.18.0/hyperfine_1.18.0_amd64.deb
sudo dpkg -i hyperfine_1.18.0_amd64.deb
pip install memory_profiler
- uses: Swatinem/rust-cache@v2
Expand All @@ -64,7 +67,7 @@ jobs:
- name: clone benchmarks_results
if: github.repository == 'tauri-apps/wry' && github.ref == 'refs/heads/dev'
uses: actions/checkout@v2
uses: actions/checkout@v4
with:
token: ${{ secrets.BENCH_PAT }}
path: gh-pages
Expand All @@ -82,7 +85,7 @@ jobs:
git commit --message "Update WRY benchmarks"
git push origin gh-pages
- name: worker info
- name: Print worker info
run: |
cat /proc/cpuinfo
cat /proc/meminfo
20 changes: 9 additions & 11 deletions bench/src/build_benchmark_jsons.rs
Original file line number Diff line number Diff line change
Expand Up @@ -15,40 +15,38 @@ fn main() {

// current data
let current_data_buffer = BufReader::new(
File::open(&utils::target_dir().join("bench.json")).expect("Unable to read current data file"),
File::open(utils::target_dir().join("bench.json")).expect("Unable to read current data file"),
);
let current_data: utils::BenchResult =
serde_json::from_reader(current_data_buffer).expect("Unable to read current data buffer");

// all data's
let all_data_buffer =
BufReader::new(File::open(&wry_data).expect("Unable to read all data file"));
let all_data_buffer = BufReader::new(File::open(wry_data).expect("Unable to read all data file"));
let mut all_data: Vec<utils::BenchResult> =
serde_json::from_reader(all_data_buffer).expect("Unable to read all data buffer");

// add current data to all data
all_data.push(current_data);

// use only latest 20 elements from all data
let recent: Vec<utils::BenchResult>;
if all_data.len() > 20 {
recent = all_data[all_data.len() - 20..].to_vec();
let recent: Vec<utils::BenchResult> = if all_data.len() > 20 {
all_data[all_data.len() - 20..].to_vec()
} else {
recent = all_data.clone();
}
all_data.clone()
};

// write jsons
utils::write_json(
wry_data.to_str().expect("Something wrong with wry_data"),
&serde_json::to_value(&all_data).expect("Unable to build final json (all)"),
)
.expect(format!("Unable to write {:?}", wry_data).as_str());
.unwrap_or_else(|_| panic!("Unable to write {:?}", wry_data));

utils::write_json(
wry_recent
.to_str()
.expect("Something wrong with wry_recent"),
&serde_json::to_value(&recent).expect("Unable to build final json (recent)"),
&serde_json::to_value(recent).expect("Unable to build final json (recent)"),
)
.expect(format!("Unable to write {:?}", wry_recent).as_str());
.unwrap_or_else(|_| panic!("Unable to write {:?}", wry_recent));
}
21 changes: 10 additions & 11 deletions bench/src/run_benchmark.rs
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ fn run_strace_benchmarks(new_data: &mut utils::BenchResult) -> Result<()> {
let mut file = tempfile::NamedTempFile::new()?;

Command::new("strace")
.args(&[
.args([
"-c",
"-f",
"-o",
Expand Down Expand Up @@ -86,7 +86,7 @@ fn run_max_mem_benchmark() -> Result<HashMap<String, u64>> {
let benchmark_file = benchmark_file.to_str().unwrap();

let proc = Command::new("mprof")
.args(&[
.args([
"run",
"-C",
"-o",
Expand All @@ -101,7 +101,7 @@ fn run_max_mem_benchmark() -> Result<HashMap<String, u64>> {
println!("{:?}", proc_result);
results.insert(
name.to_string(),
utils::parse_max_mem(&benchmark_file).unwrap(),
utils::parse_max_mem(benchmark_file).unwrap(),
);
}

Expand Down Expand Up @@ -135,12 +135,12 @@ fn rlib_size(target_dir: &std::path::Path, prefix: &str) -> u64 {
fn get_binary_sizes(target_dir: &Path) -> Result<HashMap<String, u64>> {
let mut sizes = HashMap::<String, u64>::new();

let wry_size = rlib_size(&target_dir, "libwry");
let wry_size = rlib_size(target_dir, "libwry");
println!("wry {} bytes", wry_size);
sizes.insert("wry_rlib".to_string(), wry_size);

// add up size for everything in target/release/deps/libtao*
let tao_size = rlib_size(&target_dir, "libtao");
let tao_size = rlib_size(target_dir, "libtao");
println!("tao {} bytes", tao_size);
sizes.insert("tao_rlib".to_string(), tao_size);

Expand Down Expand Up @@ -182,9 +182,9 @@ fn cargo_deps() -> HashMap<String, usize> {
let mut cmd = Command::new("cargo");
cmd.arg("tree");
cmd.arg("--no-dedupe");
cmd.args(&["--edges", "normal"]);
cmd.args(&["--prefix", "none"]);
cmd.args(&["--target", target]);
cmd.args(["--edges", "normal"]);
cmd.args(["--prefix", "none"]);
cmd.args(["--target", target]);
cmd.current_dir(&utils::wry_root_path());

let full_deps = cmd.output().expect("failed to run cargo tree").stdout;
Expand Down Expand Up @@ -258,14 +258,13 @@ fn main() -> Result<()> {
println!("Starting wry benchmark");

let target_dir = utils::target_dir();
env::set_current_dir(&utils::bench_root_path())?;
env::set_current_dir(utils::bench_root_path())?;

let format =
time::format_description::parse("[year]-[month]-[day]T[hour]:[minute]:[second]Z").unwrap();
let now = time::OffsetDateTime::now_utc();

let mut new_data = utils::BenchResult {
created_at: format!("{}", now.format(&format).unwrap()),
created_at: now.format(&format).unwrap(),
sha1: utils::run_collect(&["git", "rev-parse", "HEAD"])
.0
.trim()
Expand Down
24 changes: 10 additions & 14 deletions bench/src/utils.rs
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,6 @@ use std::{
io::{BufRead, BufReader},
path::PathBuf,
process::{Command, Output, Stdio},
u64,
};

#[derive(Default, Clone, Serialize, Deserialize, Debug)]
Expand Down Expand Up @@ -46,12 +45,11 @@ pub fn get_target() -> &'static str {
}

pub fn target_dir() -> PathBuf {
let target_dir = bench_root_path()
bench_root_path()
.join("tests")
.join("target")
.join(get_target())
.join("release");
target_dir.into()
.join("release")
}

pub fn bench_root_path() -> PathBuf {
Expand Down Expand Up @@ -93,16 +91,14 @@ pub fn parse_max_mem(file_path: &str) -> Option<u64> {
let output = BufReader::new(file);
let mut highest: u64 = 0;
// MEM 203.437500 1621617192.4123
for line in output.lines() {
if let Ok(line) = line {
// split line by space
let split = line.split(" ").collect::<Vec<_>>();
if split.len() == 3 {
// mprof generate result in MB
let current_bytes = str::parse::<f64>(split[1]).unwrap() as u64 * 1024 * 1024;
if current_bytes > highest {
highest = current_bytes;
}
for line in output.lines().flatten() {
// split line by space
let split = line.split(' ').collect::<Vec<_>>();
if split.len() == 3 {
// mprof generate result in MB
let current_bytes = str::parse::<f64>(split[1]).unwrap() as u64 * 1024 * 1024;
if current_bytes > highest {
highest = current_bytes;
}
}
}
Expand Down

0 comments on commit 81cfa37

Please sign in to comment.