Skip to content

Commit

Permalink
Add a script to measure build performance
Browse files Browse the repository at this point in the history
In order to avoid redundancy and make it more maintainable, I refactored
the performance script.
  - I moved the logic into a common script file (kani-perf-setup.sh)
  - I also changed the performance to run in a temporary file
    - To avoid messing up with repository files
  - I changed benchcomp to work with the new location
  • Loading branch information
celinval committed Sep 20, 2023
1 parent c7c0f18 commit 6cdbb4b
Show file tree
Hide file tree
Showing 4 changed files with 116 additions and 36 deletions.
23 changes: 23 additions & 0 deletions scripts/kani-build-perf.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
#!/usr/bin/env bash
# Copyright Kani Contributors
# SPDX-License-Identifier: Apache-2.0 OR MIT
# Run compiler benchmarks (only build time)

set -o pipefail
set -o nounset

SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
source ${SCRIPT_DIR}/kani-perf-setup.sh
prep_build_perf
run_benchmarks
exit_code=$?
cleanup_perf

echo
if [ $exit_code -eq 0 ]; then
echo "All Kani perf tests completed successfully."
else
echo "***Kani perf tests failed."
fi
echo
exit $exit_code
83 changes: 83 additions & 0 deletions scripts/kani-perf-setup.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,83 @@
#!/usr/bin/env bash
# Copyright Kani Contributors
# SPDX-License-Identifier: Apache-2.0 OR MIT
# This is just the setup stage of all performance benchmarks
# Other scripts should source this and invoke which setup they want to run

set -o pipefail
set -o nounset

SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
KANI_DIR=$SCRIPT_DIR/..
PERF_DIR="${KANI_DIR}/tests/perf"

SUITE="kani_perf_run_$(date +%s)"
RUN_DIR="/tmp/${SUITE}"

build_kani() {
# Build Kani using release mode.
cargo build-dev -- --release
}

prep_perf_files() {
mkdir ${RUN_DIR} # We want to error out if the folder exists
ln -s ${RUN_DIR} ${KANI_DIR}/tests/${SUITE}
cp -r ${PERF_DIR} ${RUN_DIR}/perf
for overlay_dir in ${PERF_DIR}/overlays/*/; do
orig_dir=$(basename ${overlay_dir})
echo "Copying overlays for $orig_dir"
cp -r -v ${overlay_dir}* ${RUN_DIR}/perf/${orig_dir}/
done
}

cleanup_perf() {
echo "Cleaning up..."
rm -r ${RUN_DIR}
rm ${KANI_DIR}/tests/${SUITE}
}

run_benchmarks() {
suite="${SUITE}"
mode="cargo-kani-test"
echo "Check compiletest suite=$suite mode=$mode"
cargo run -p compiletest -- --suite $suite --mode $mode --no-fail-fast --report-time "$@"
}

print_result() {
exit_code=$1
echo
if [ $exit_code -eq 0 ]; then
echo "All Kani perf tests completed successfully."
else
echo "***Kani perf tests failed."
fi
echo
exit $exit_code
}

prep_build_perf() {
build_kani
# Prepare for a verification first
prep_perf_files

# Now override expected files to just expect a successful build
expected_files=$(find ${RUN_DIR} -name "*expected")
for expected_file in ${expected_files}; do
echo "Compiling" > ${expected_file}
echo "Finished" >> ${expected_file}
done

run_benchmarks --kani-flag="--only-codegen"
exit_code=$?
cleanup_perf
print_result ${exit_code}
}

benchmark_verification() {
build_kani
prep_perf_files
run_benchmarks
exit_code=$?
cleanup_perf
print_result ${exit_code}
}
37 changes: 2 additions & 35 deletions scripts/kani-perf.sh
Original file line number Diff line number Diff line change
Expand Up @@ -6,38 +6,5 @@ set -o pipefail
set -o nounset

SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
KANI_DIR=$SCRIPT_DIR/..

# Build Kani using release mode.
cargo build-dev -- --release

PERF_DIR="${KANI_DIR}/tests/perf"

# Copy expected files from overlay directories
to_delete=
for overlay_dir in ${PERF_DIR}/overlays/*/; do
orig_dir=$(basename ${overlay_dir})
echo "Copying overlays for $orig_dir"
copy_output=$(cp -r -v ${overlay_dir}* ${PERF_DIR}/${orig_dir}/)
copied_files=$(echo ${copy_output} | rev | cut -d' ' -f 1 | rev | tr -d "'")
# Add to the list of files to delete
to_delete="${to_delete} ${copied_files}"
done

suite="perf"
mode="cargo-kani-test"
echo "Check compiletest suite=$suite mode=$mode"
cargo run -p compiletest -- --suite $suite --mode $mode --no-fail-fast
exit_code=$?

echo "Cleaning up..."
rm ${to_delete}

echo
if [ $exit_code -eq 0 ]; then
echo "All Kani perf tests completed successfully."
else
echo "***Kani perf tests failed."
fi
echo
exit $exit_code
source ${SCRIPT_DIR}/kani-perf-setup.sh
benchmark_verification
9 changes: 8 additions & 1 deletion tools/benchcomp/benchcomp/parsers/kani_perf.py
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,14 @@ def get_metrics():

def main(root_dir):
benchmarks = {}
test_out_dir = root_dir / "build" / "tests" / "perf"
test_dir = os.path.join(root_dir, "build", "tests")
test_out_dirs = list(pathlib.Path(test_dir).glob("kani_perf_run_*"))
if len(test_out_dirs) == 0:
raise Exception("[error] No result folder was found")
elif len(test_out_dirs) > 1:
raise Exception(f"[error] Found too many performance results. "
f"Found:\n - {test_out_dirs}")
test_out_dir = test_out_dirs[0]
harness_pat = re.compile(r"Checking harness (?P<name>.+)\.\.\.")

metrics = _get_metrics()
Expand Down

0 comments on commit 6cdbb4b

Please sign in to comment.