forked from acts-project/traccc
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathautomated_benchmark
executable file
·115 lines (101 loc) · 3.37 KB
/
automated_benchmark
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
#!/bin/bash
#
# Script to execute the benchmarks automatically and store the files in a dedicated directory.
#
# Helper function for print outs
print_verification_row(){
printf "%-15s %-8s %-14s\n" "$1" "$2" "$3"
}
print_sec_separator(){
echo -e "++++++++++++++++++++++++++++++++++++++++++\n"
}
print_subsec_separator(){
echo "------------------------------------------"
}
# Configuration
BASE_DIR="/home/nwachuch/bld6/traccc"
# Define location of data dir that will be used in benchmarks
export TRACCC_TEST_DATA_DIR="$BASE_DIR/data"
# Create directory to store results
timemarker=$(date +"%Y_%m_%d_%H_%M")
OUTPUT_DIR="./benchmark_results_$timemarker"
# OUTPUT_DIR="./benchmark_results" #TODO to prevent creating endless many directories
mkdir -p $OUTPUT_DIR
# Helper function to execute benchmark always with same parameter for repetition etc.
# Expects the suffix of the benchmark as argument.
# Example usage: "execute_benchmark seq_cca" will execute the benchmark "traccc_benchmark_seq_cca" "fast_sv_2/"
execute_benchmark(){
# Fetch arguments and setup parameters for command
benchmark="$1"
benchmark_output_name="$2"
filter="--benchmark_filter=$3"
# Prepare command to execute
exe_cmd="$BASE_DIR/build/bin/traccc_benchmark_$benchmark \
--benchmark_repetitions=1 \
--benchmark_display_aggregates_only=true \
--benchmark_out=$OUTPUT_DIR/${benchmark_output_name}.json \
--benchmark_out_format=json \
$filter"
# Print context information
t1=$(date +"%s")
print_sec_separator
echo "Start Benchmark $benchmark at $(date -d @$t1)"
print_subsec_separator
# Run the benchmark
echo -e "Executing Command: \n $exe_cmd"
print_subsec_separator
eval $exe_cmd
print_subsec_separator
# Print summary
t2=$(date +"%s")
echo "Finish Benchmark $benchmark at $(date -d @$t2)"
exe_time_seconds=$(($t2-$t1))
echo "Overall Execution time: $(($exe_time_seconds/60)) min $(($exe_time_seconds%60)) sec"
print_subsec_separator
}
# Define benchmarks that should be run
BENCHMARKS=(
"cuda_cca"
"seq_cca"
"stdpar_cca"
)
# Check that all executables exist
print_sec_separator
echo "Verify Executables"
print_subsec_separator
print_verification_row "Benchmark" "Exists" "Compilation Type"
print_subsec_separator
for benchmark in "${BENCHMARKS[@]}"
do
# Check for file existence
exe="$BASE_DIR/build/bin/traccc_benchmark_$benchmark"
if [ -f "$exe" ]; then
exists="YES"
compiled="NAN" # the check for build type has been removed because not detectable with objdump --sysm
else
exists="NO"
compiled="NAN"
at_least_one_error=1
fi
print_verification_row "$benchmark" "$exists" "$compiled"
done
if [[ $at_least_one_error -eq 1 ]]; then
print_subsec_separator
echo "Make sure that all executables exist and are compiled for release..."
exit 1
fi
# Run each benchmark
for benchmark in "${BENCHMARKS[@]}"
do
execute_benchmark "$benchmark" "$benchmark"
done
print_sec_separator
echo "Run selcted benchmarks on the GeForce 2080"
print_subsec_separator
export CUDA_VISIBLE_DEVICES=1
echo "--> Set CUDA_VISIBLE_DEVICES=1"
execute_benchmark "cuda_cca" "cuda_cca_geforce_2080" "fast_sv_2/"
execute_benchmark "stdpar_cca" "stdpar_cca_fast_sv_2_geforce_2080" "fast_sv_2/"
execute_benchmark "stdpar_cca" "stdpar_cca_simplified_sv_geforce_2080" "fast_sv_2/"
export CUDA_VISIBLE_DEVICES=0
echo "--> Set CUDA_VISIBLE_DEVICES=0"