Skip to content

Commit 5454502

Browse files
authored
feat: add benchmark (#101)
* feat: add benchmark * Add CodSpeed badge to benchmark.md Added CodSpeed badge to benchmarking documentation. * fix: update benchmark test script * fix: update benchmark test script * feat: create benchmark worflow and write to summaryy * docs: update benchmark.md
1 parent 4f54e37 commit 5454502

File tree

7 files changed

+208
-4
lines changed

7 files changed

+208
-4
lines changed

.github/workflows/benchmark.yml

Lines changed: 25 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,25 @@
1+
name: Benchmark Hooks
2+
3+
on:
4+
workflow_dispatch:
5+
6+
jobs:
7+
benchmark:
8+
runs-on: ubuntu-latest
9+
steps:
10+
- name: Checkout code
11+
uses: actions/checkout@v2
12+
13+
- name: Set up Python
14+
uses: actions/setup-python@v2
15+
with:
16+
python-version: '3.8'
17+
18+
- name: Install dependencies
19+
run: |
20+
python -m pip install --upgrade pip
21+
pip install pre-commit
22+
23+
- name: Run benchmarks
24+
run: |
25+
python testing/benchmark_hooks.py

.gitignore

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,8 @@ venv
1212
result.txt
1313
testing/main.c
1414
*/*compile_commands.json
15+
testing/benchmark_results.txt
16+
testing/test-examples/*
1517

1618
# Ignore Python wheel packages (clang-format, clang-tidy)
1719
clang-tidy-1*

.pre-commit-config.yaml

Lines changed: 1 addition & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -9,12 +9,9 @@ repos:
99
- id: check-yaml
1010
- id: check-toml
1111
- id: requirements-txt-fixer
12-
- repo: https://github.com/asottile/pyupgrade
13-
rev: v3.20.0
14-
hooks:
15-
- id: pyupgrade
1612
- repo: https://github.com/astral-sh/ruff-pre-commit
1713
rev: v0.12.11
1814
hooks:
1915
- id: ruff
16+
args: [--fix]
2017
- id: ruff-format

docs/benchmark.md

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,15 @@
1+
# Benchmarking
2+
3+
[![CodSpeed Badge](https://img.shields.io/endpoint?url=https://codspeed.io/badge.json)](https://codspeed.io/cpp-linter/cpp-linter-hooks)
4+
5+
This document outlines the benchmarking process for comparing the performance of cpp-linter-hooks and mirrors-clang-format.
6+
7+
## Running the Benchmark
8+
9+
```bash
10+
python3 testing/benchmark_hooks.py
11+
```
12+
13+
## Results
14+
15+
The results of the benchmarking process will be saved to `testing/benchmark_results.txt`.

testing/benchmark_hook_1.yaml

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,6 @@
1+
repos:
2+
- repo: https://github.com/pre-commit/pre-commit-hooks
3+
rev: v1.1.0
4+
hooks:
5+
- id: clang-format
6+
args: [--style=file, --version=21]

testing/benchmark_hook_2.yaml

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
repos:
2+
- repo: https://github.com/pre-commit/mirrors-clang-format
3+
rev: v21.1.0
4+
hooks:
5+
- id: clang-format

testing/benchmark_hooks.py

Lines changed: 154 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,154 @@
1+
#!/usr/bin/env python3
2+
"""
3+
Benchmark script to compare performance of cpp-linter-hooks vs mirrors-clang-format.
4+
5+
Usage:
6+
python benchmark_hooks.py
7+
8+
Requirements:
9+
- pre-commit must be installed and available in PATH
10+
- Two config files:
11+
- testing/pre-commit-config-cpp-linter-hooks.yaml
12+
- testing/pre-commit-config-mirrors-clang-format.yaml
13+
- Target files: testing/main.c (or adjust as needed)
14+
"""
15+
16+
import os
17+
import subprocess
18+
import time
19+
import statistics
20+
import glob
21+
22+
HOOKS = [
23+
{
24+
"name": "cpp-linter-hooks",
25+
"config": "testing/benchmark_hook_1.yaml",
26+
},
27+
{
28+
"name": "mirrors-clang-format",
29+
"config": "testing/benchmark_hook_2.yaml",
30+
},
31+
]
32+
33+
# Automatically find all C/C++ files in testing/ (and optionally src/, include/)
34+
TARGET_FILES = glob.glob("testing/test-examples/*.c", recursive=True)
35+
36+
REPEATS = 5
37+
RESULTS_FILE = "testing/benchmark_results.txt"
38+
39+
40+
def git_clone():
41+
try:
42+
subprocess.run(
43+
[
44+
"git",
45+
"clone",
46+
"--depth",
47+
"1",
48+
"https://github.com/gouravthakur39/beginners-C-program-examples.git",
49+
"testing/test-examples",
50+
],
51+
check=True,
52+
)
53+
except subprocess.CalledProcessError:
54+
pass
55+
56+
57+
def run_hook(config, files):
58+
cmd = ["pre-commit", "run", "--config", config, "--files"] + files
59+
start = time.perf_counter()
60+
try:
61+
subprocess.run(cmd, check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
62+
except subprocess.CalledProcessError:
63+
# Still record time even if hook fails
64+
pass
65+
end = time.perf_counter()
66+
return end - start
67+
68+
69+
def safe_git_restore(files):
70+
# Only restore files tracked by git
71+
tracked = []
72+
for f in files:
73+
result = subprocess.run(
74+
["git", "ls-files", "--error-unmatch", f],
75+
stdout=subprocess.PIPE,
76+
stderr=subprocess.PIPE,
77+
)
78+
if result.returncode == 0:
79+
tracked.append(f)
80+
if tracked:
81+
subprocess.run(["git", "restore"] + tracked)
82+
83+
84+
def benchmark():
85+
results = {}
86+
for hook in HOOKS:
87+
times = []
88+
print(f"\nBenchmarking {hook['name']}...")
89+
for i in range(REPEATS):
90+
safe_git_restore(TARGET_FILES)
91+
subprocess.run(["pre-commit", "clean"])
92+
t = run_hook(hook["config"], TARGET_FILES)
93+
print(f" Run {i + 1}: {t:.3f} seconds")
94+
times.append(t)
95+
results[hook["name"]] = times
96+
return results
97+
98+
99+
def report(results):
100+
headers = ["Hook", "Avg (s)", "Std (s)", "Min (s)", "Max (s)", "Runs"]
101+
col_widths = [max(len(h), 16) for h in headers]
102+
# Calculate max width for each column
103+
for name, times in results.items():
104+
col_widths[0] = max(col_widths[0], len(name))
105+
print("\nBenchmark Results:\n")
106+
# Print header
107+
header_row = " | ".join(h.ljust(w) for h, w in zip(headers, col_widths))
108+
print(header_row)
109+
print("-+-".join("-" * w for w in col_widths))
110+
# Print rows
111+
lines = []
112+
for name, times in results.items():
113+
avg = statistics.mean(times)
114+
std = statistics.stdev(times) if len(times) > 1 else 0.0
115+
min_t = min(times)
116+
max_t = max(times)
117+
row = [
118+
name.ljust(col_widths[0]),
119+
f"{avg:.3f}".ljust(col_widths[1]),
120+
f"{std:.3f}".ljust(col_widths[2]),
121+
f"{min_t:.3f}".ljust(col_widths[3]),
122+
f"{max_t:.3f}".ljust(col_widths[4]),
123+
str(len(times)).ljust(col_widths[5]),
124+
]
125+
print(" | ".join(row))
126+
lines.append(" | ".join(row))
127+
# Save to file
128+
with open(RESULTS_FILE, "w") as f:
129+
f.write(header_row + "\n")
130+
f.write("-+-".join("-" * w for w in col_widths) + "\n")
131+
for line in lines:
132+
f.write(line + "\n")
133+
print(f"\nResults saved to {RESULTS_FILE}")
134+
135+
# Write to GitHub Actions summary if available
136+
summary_path = os.environ.get("GITHUB_STEP_SUMMARY")
137+
if summary_path:
138+
with open(summary_path, "a") as f:
139+
f.write("## Benchmark Results\n\n")
140+
f.write(header_row + "\n")
141+
f.write("-+-".join("-" * w for w in col_widths) + "\n")
142+
for line in lines:
143+
f.write(line + "\n")
144+
f.write("\n")
145+
146+
147+
def main():
148+
git_clone()
149+
results = benchmark()
150+
report(results)
151+
152+
153+
if __name__ == "__main__":
154+
main()

0 commit comments

Comments
 (0)