Skip to content

Commit 32af3b8

Browse files
committed
benchmark: improve filename handling, make compare_benchmarks reusable
- When trying to benchmark with branch names, presence of '\' can have issues, replace them with '-' - output filename changed to "benchmark_comparison.txt" - Add ability to import the compare_benchmarks function for independent use
1 parent 1df8cfb commit 32af3b8

File tree

1 file changed

+24
-21
lines changed

1 file changed

+24
-21
lines changed

bin/admin/benchmark_compare.py

Lines changed: 24 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,10 @@
1212
def run_command(command):
1313
subprocess.run(command, shell=True, check=True, capture_output=SILENT_OUTPUT, text=True)
1414

15+
# replace slashes in branch names with dashes for file naming, otherwise it will create directories
16+
def process_branch_name(branch_name):
17+
return branch_name.replace('/', '-')
18+
1519
def get_current_git_state():
1620
try:
1721
# try branch name
@@ -41,34 +45,35 @@ def configure_and_build(commit, cmake_variables, benchmark_target, build_dir):
4145

4246
def run_benchmarks(commit, benchmark_target, build_dir):
4347
print(f"Running benchmarks for commit: {commit}\n")
44-
command = f"./{build_dir}/benchmarks/{benchmark_target} --benchmark_out_format=json --benchmark_time_unit=us --benchmark_out={commit}-results.json"
48+
output = process_branch_name(commit) + "-results.json"
49+
command = f"./{build_dir}/benchmarks/{benchmark_target} --benchmark_out_format=json --benchmark_time_unit=us --benchmark_out={output}"
4550
run_command(command)
46-
print(f"Benchmarks for commit {commit} completed and results saved to {commit}-results.json\n")
47-
48-
def compare_benchmarks(base_commit, head_commit, metric="cpu_time"):
49-
base_file = f"{base_commit}-results.json"
50-
new_file = f"{head_commit}-results.json"
51-
output_file = f"{base_commit}-{head_commit}-comparison.txt"
51+
print(f"Benchmarks for commit {commit} completed and results saved to {output}\n")
5252

53-
print(f"\nComparing benchmarks between {base_commit} and {head_commit} using metric: {metric}\n")
53+
def compare_benchmarks(base_benchmark, new_benchmark, metric="cpu_time"):
54+
if metric not in ["cpu_time", "real_time"]:
55+
raise ValueError("Invalid metric specified. Use 'cpu_time' or 'real_time'.")
5456

5557
# Validate files exist and load JSON data
5658
try:
57-
if not os.path.exists(base_file):
58-
raise FileNotFoundError(f"Base file not found: {base_file}")
59-
if not os.path.exists(new_file):
60-
raise FileNotFoundError(f"New file not found: {new_file}")
59+
if not os.path.exists(base_benchmark):
60+
raise FileNotFoundError(f"Base file not found: {base_benchmark}")
61+
if not os.path.exists(new_benchmark):
62+
raise FileNotFoundError(f"New file not found: {new_benchmark}")
6163

62-
with open(base_file, 'r') as f:
64+
with open(base_benchmark, 'r') as f:
6365
base_data = json.load(f)
64-
with open(new_file, 'r') as f:
66+
with open(new_benchmark, 'r') as f:
6567
new_data = json.load(f)
6668

6769
except json.JSONDecodeError as e:
6870
raise ValueError(f"Invalid JSON format: {e}")
6971
except Exception as e:
7072
raise RuntimeError(f"Error reading files: {e}")
7173

74+
output_file = "benchmark-comparison.txt"
75+
print(f"\nComparing benchmarks between {base_benchmark} and {new_benchmark} using metric: {metric}\n")
76+
7277
# index benchmarks by name
7378
base_benchmarks = {b['name']: b for b in base_data['benchmarks']}
7479
new_benchmarks = {b['name']: b for b in new_data['benchmarks']}
@@ -84,12 +89,8 @@ def compare_benchmarks(base_commit, head_commit, metric="cpu_time"):
8489
common_names.append(benchmark['name'])
8590

8691
# output details
87-
output_lines = []
88-
output_lines.append(f"Benchmark Comparison: {base_commit} vs {head_commit}")
89-
output_lines.append(f"Metric: {metric}")
90-
output_lines.append(f"Time Unit: {time_unit}")
91-
output_lines.append(f"Date: {os.popen('date').read().strip()}")
92-
output_lines.append("")
92+
output_lines = [f"Benchmark Comparison: {base_benchmark} vs {new_benchmark}", f"Metric: {metric}",
93+
f"Time Unit: {time_unit}", f"Date: {os.popen('date').read().strip()}", ""]
9394

9495
if 'context' in base_data:
9596
output_lines.append("Base Benchmark Context:")
@@ -175,7 +176,9 @@ def compare_benchmarks(base_commit, head_commit, metric="cpu_time"):
175176
run_benchmarks(args.head_commit, args.benchmark_target, args.build_dir)
176177

177178
# Compare benchmarks
178-
compare_benchmarks(args.base_commit, args.head_commit)
179+
base_benchmark_file = process_branch_name(args.base_commit) + "-results.json"
180+
new_benchmark_file = process_branch_name(args.head_commit) + "-results.json"
181+
compare_benchmarks(base_benchmark_file, new_benchmark_file, metric="cpu_time")
179182
print("Benchmark comparison completed successfully.")
180183

181184
except Exception as e:

0 commit comments

Comments
 (0)