Skip to content
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
121 changes: 84 additions & 37 deletions run_benchmark_simple.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,129 +5,176 @@
import argparse

def clean_page_cache():
"""
Clean the system page cache using the sync command and writing to /proc/sys/vm/drop_caches
Requires root privileges (sudo) to execute successfully
"""

cmd = "sudo bash -c \"sync; echo 3 > /proc/sys/vm/drop_caches\""
if verbose:
print(cmd)
os.system(cmd)

def run_benchmark(benchmark_path, draw=0):
# 确保路径是一个目录
"""
Run benchmark tests on all .benchmark files in the specified directory

Args:
benchmark_path (str): Path to directory containing benchmark files
draw (int): Flag to enable result plotting (1 = enable, 0 = disable)
"""
# Verify the provided path is a valid directory

if not os.path.isdir(benchmark_path):
print(f"错误: {benchmark_path} 不是一个有效的目录")
print(f"Error: {benchmark_path} is not a valid path")
return

# 获取目录名的最后两部分作为输出文件名

# Get last two parts of the path to use as output filename

path_parts = os.path.normpath(benchmark_path).split(os.sep)
output_name = f"{path_parts[-2]}_{path_parts[-1]}"
output_csv = "output/"+f"{output_name}.csv"

results = []

# 遍历目录中的所有文件

# Traverse through all files in the directory
for root, dirs, files in os.walk(benchmark_path):
# 按文件名排序
# Sort files ending with .benchmark by numeric part in filename

files = sorted([file for file in files if file.endswith('.benchmark')],
key=lambda x: int(x[1:3]))
print(files)
for file in files:
if file.endswith('.benchmark'):
# 构建完整文件路径

# Construct full file path
benchmark_file = os.path.join(root, file)

# 运行命令并捕获输出
# Execute benchmark command and capture output
try:
# 构建命令时添加--disable-timeout参数
# Build command with --disable-timeout parameter
cmd = f"{os.path.join(pixels_home, 'cpp/build/release/benchmark/benchmark_runner')} \"{benchmark_file}\" --disable-timeout --Nruns={nRuns}"
if verbose:
print(cmd)
output = subprocess.getoutput(cmd)

# 收集所有结果

# Collect all run results

run_times = []
print(output)
for line in output.splitlines():
if line.startswith('Result:'):
time = float(line.split()[1])
run_times.append(time)
if verbose:
print(f"文件 {file} 运行时间: {time}")

# 如果有结果,保存所有运行时间
print(f"File {file} runtime: {time}")

# Save all run times if results exist
if run_times:
# 存储文件名和所有运行时间
# Store filename and all run times
results.append((file, run_times))
if verbose:
print(f"文件 {file} 结果: {run_times}")
print(f"File {file} results: {run_times}")
else:
if verbose:
print(f"文件 {file} 未找到结果")
print(f"No results found for file {file}")

except Exception as e:
print(f"运行 {benchmark_file} 时出错: {e}")
print(f"An error {e} ocurred whne running {benchmark_file}")


# 保存结果到CSV
# Save results to CSV file
with open(output_csv, 'w', newline='') as csvfile:
writer = csv.writer(csvfile)
# 写入表头:基准测试名称 + 多次运行的结果列
# Write header: benchmark name + columns for each run
max_runs = max(len(times) for _, times in results) if results else 0
header = ['基准测试'] + [f'运行{i+1}时间(s)' for i in range(max_runs)]
header = ['Benchmark'] + [f'Run {i+1} Time(s)' for i in range(max_runs)]
writer.writerow(header)

# 写入每个基准测试的所有运行结果
# Write all run results for each benchmark
for file, times in results:
# 确保每行的列数相同
# Ensure consistent column count per row

row = [file] + times + [''] * (max_runs - len(times))
writer.writerow(row)

print(f"结果已保存到 {output_csv}")
print(f"oupput has saved in {output_csv}")


# Generate plot if requested

# 如果请求则绘制结果
if draw:
plot_results(output_name, results)

def plot_results(title, results):
# 提取文件名和平均时间(使用平均值进行绘图)

"""
Generate a bar chart from benchmark results showing average run times

Args:
title (str): Title for the plot (used in chart title and filename)
results (list): List of tuples containing (filename, run_times)
"""
# Extract benchmark names and calculate average times
benchmarks = [r[0].split('.')[0] for r in results]
# 计算每个基准测试的平均时间
# Calculate average time for each benchmark
avg_times = [sum(r[1])/len(r[1]) for r in results]

# 绘制结果
# Create the plot
plt.figure(figsize=(10, 6))
plt.bar(benchmarks, avg_times, color='skyblue')
plt.xlabel('基准测试')
plt.ylabel('平均时间 (s)')
plt.title(f'{title} 的结果')
plt.xlabel('Benchmarks')
plt.ylabel('Average Time (s)')
plt.title(f'{title} Results')
plt.xticks(rotation=45)
plt.tight_layout()
plt.savefig("output/"+f"{title}.png")
plt.show()
print(f"图表已保存为 {title}.png")

print(f"Chart saved as {title}.png")


if __name__ == "__main__":
# Global variables for configuration
global pixels_home
global verbose
global nRuns


# Get PIXELS_SRC environment variable
pixels_home = os.environ.get('PIXELS_SRC')
current_dir = os.getcwd()
# Create output directory if it doesn't exist
os.makedirs(os.path.join(current_dir, "output"), exist_ok=True)

# 使用argparse处理命令行参数
parser = argparse.ArgumentParser(description="运行基准测试并保存结果。")
parser.add_argument('--dir', type=str, required=True, help='包含基准测试文件的目录')
parser.add_argument('--draw', type=int, default=0, choices=[0, 1], help='绘制图表:1表示是,0表示否(默认:0)')
parser.add_argument('--from-page-cache', help='是否从页面缓存读取文件', type=int, default=0, choices=[0,1])
parser.add_argument('--v', dest='verbose', help='输出命令', type=int, default=1, choices=[0,1])
parser.add_argument('--nRuns', type=int, default=1, help='runTimes')
# Parse command line arguments
parser = argparse.ArgumentParser(description="Run benchmark tests and save results.")
parser.add_argument('--dir', type=str, required=True, help='Directory containing benchmark files')
parser.add_argument('--draw', type=int, default=0, choices=[0, 1],
help='Draw chart: 1 = yes, 0 = no (default: 0)')
parser.add_argument('--from-page-cache', help='Whether to read files from page cache',
type=int, default=0, choices=[0,1])
parser.add_argument('--v', dest='verbose', help='Output commands',
type=int, default=1, choices=[0,1])
parser.add_argument('--nRuns', type=int, default=1, help='Number of times to run each benchmark')
args = parser.parse_args()

# Initialize configuration from arguments
from_page_cache = args.from_page_cache
verbose = args.verbose
nRuns=args.nRuns
nRuns = args.nRuns


# Clean page cache if not reading from cache

if not from_page_cache:
clean_page_cache()

# Run benchmarks with provided arguments
run_benchmark(args.dir, args.draw)


Loading