From dcd38c1d14e4eeee06e61da46a113e47d74a58a0 Mon Sep 17 00:00:00 2001 From: R-Goc Date: Thu, 15 May 2025 19:04:53 +0200 Subject: [PATCH] Add Windows support This commit adds windows support. This is done through using the time module for timing, as the time command on windows is to set time. Also the path of the test to execute is joined through os.path.join to ensure the proper separator. As bin bash is not available on windows, shell mode for subprocess was disabled. This should not be an issue as the command runs an executable either way. --- run.py | 108 +++++++++++++++++++++++++++++++++++++++++++++++---------- 1 file changed, 89 insertions(+), 19 deletions(-) diff --git a/run.py b/run.py index f2cbfc6..7e5ccfc 100755 --- a/run.py +++ b/run.py @@ -6,44 +6,89 @@ import subprocess import statistics import sys +import time from tabulate import tabulate -def run_benchmark(executable, suite, test_file, iterations, index, total, suppress_output=False): + +def run_benchmark( + executable, suite, test_file, iterations, index, total, suppress_output=False +): times = [] for i in range(iterations): if not suppress_output: - print(f"[{index}/{total}] {suite}/{test_file} (Iteration {i+1}/{iterations}, Avg: {statistics.mean(times):.3f}s)" if times else f"[{index}/{total}] {suite}/{test_file} (Iteration {i+1}/{iterations})", end="\r") + print( + f"[{index}/{total}] {suite}/{test_file} (Iteration {i + 1}/{iterations}, Avg: {statistics.mean(times):.3f}s)" + if times + else f"[{index}/{total}] {suite}/{test_file} (Iteration {i + 1}/{iterations})", + end="\r", + ) sys.stdout.flush() - result = subprocess.run([f"time -p {executable} {suite}/{test_file}"], shell=True, stderr=subprocess.PIPE, stdout=subprocess.DEVNULL, text=True, executable="/bin/bash") + command = [executable, os.path.join(suite, test_file)] + start_time = time.perf_counter_ns() + result = subprocess.run( + command, + stderr=subprocess.PIPE, + stdout=subprocess.DEVNULL, + text=True, + ) + end_time = time.perf_counter_ns() result.check_returncode() + elapsed_time_ns = end_time - start_time + elapsed_time_s = elapsed_time_ns / 1_000_000_000.0 - time_output = result.stderr.split("\n") - real_time_line = [line for line in time_output if "real" in line][0] - time_taken = float(real_time_line.split(" ")[-1]) - times.append(time_taken) + times.append(elapsed_time_s) mean = statistics.mean(times) stdev = statistics.stdev(times) if len(times) > 1 else 0 min_time = min(times) max_time = max(times) if not suppress_output: - print(f"[{index}/{total}] {suite}/{test_file} completed. Mean: {mean:.3f}s ± {stdev:.3f}s, Range: {min_time:.3f}s … {max_time:.3f}s\033[K") + print( + f"[{index}/{total}] {suite}/{test_file} completed. Mean: {mean:.3f}s ± {stdev:.3f}s, Range: {min_time:.3f}s … {max_time:.3f}s\033[K" + ) sys.stdout.flush() return mean, stdev, min_time, max_time, times + def main(): parser = argparse.ArgumentParser(description="Run JavaScript benchmarks.") - parser.add_argument("--executable", "-e", default="js", help="Path to the JavaScript executable.") - parser.add_argument("--iterations", "-i", type=int, default=3, help="Number of iterations for each test.") - parser.add_argument("--suites", "-s", default="all", help="Comma-separated list of suites to run.") - parser.add_argument("--warmups", "-w", type=int, default=0, help="Number of warm-up runs of SunSpider.") - parser.add_argument("--output", "-o", default="results.json", help="JSON output file name.") + parser.add_argument( + "--executable", "-e", default="js", help="Path to the JavaScript executable." + ) + parser.add_argument( + "--iterations", + "-i", + type=int, + default=3, + help="Number of iterations for each test.", + ) + parser.add_argument( + "--suites", "-s", default="all", help="Comma-separated list of suites to run." + ) + parser.add_argument( + "--warmups", + "-w", + type=int, + default=0, + help="Number of warm-up runs of SunSpider.", + ) + parser.add_argument( + "--output", "-o", default="results.json", help="JSON output file name." + ) args = parser.parse_args() if args.suites == "all": - suites = ["SunSpider", "Kraken", "Octane", "JetStream", "JetStream3", "RegExp", "MicroBench"] + suites = [ + "SunSpider", + "Kraken", + "Octane", + "JetStream", + "JetStream3", + "RegExp", + "MicroBench", + ] else: suites = args.suites.split(",") @@ -53,7 +98,15 @@ def main(): for test_file in sorted(os.listdir("SunSpider")): if not test_file.endswith(".js"): continue - run_benchmark(args.executable, "SunSpider", test_file, 1, 0, 0, suppress_output=True) + run_benchmark( + args.executable, + "SunSpider", + test_file, + 1, + 0, + 0, + suppress_output=True, + ) results = {} table_data = [] @@ -65,21 +118,38 @@ def main(): for test_file in sorted(os.listdir(suite)): if not test_file.endswith(".js"): continue - mean, stdev, min_time, max_time, runs = run_benchmark(args.executable, suite, test_file, args.iterations, current_test, total_tests) + mean, stdev, min_time, max_time, runs = run_benchmark( + args.executable, + suite, + test_file, + args.iterations, + current_test, + total_tests, + ) results[suite][test_file] = { "mean": mean, "stdev": stdev, "min": min_time, "max": max_time, - "runs": runs + "runs": runs, } - table_data.append([suite, test_file, f"{mean:.3f} ± {stdev:.3f}", f"{min_time:.3f} … {max_time:.3f}"]) + table_data.append( + [ + suite, + test_file, + f"{mean:.3f} ± {stdev:.3f}", + f"{min_time:.3f} … {max_time:.3f}", + ] + ) current_test += 1 - print(tabulate(table_data, headers=["Suite", "Test", "Mean ± σ", "Range (min … max)"])) + print( + tabulate(table_data, headers=["Suite", "Test", "Mean ± σ", "Range (min … max)"]) + ) with open(args.output, "w") as f: json.dump(results, f, indent=4) + if __name__ == "__main__": main()