@@ -21,22 +21,12 @@ def generate_cpp_cycle_test(n: int) -> str:
21
21
struct MyObj { int id; ~MyObj() {} };
22
22
23
23
void long_cycle_4(bool condition) {
24
- MyObj v1{1};
25
- MyObj v2{1};
26
- MyObj v3{1};
27
- MyObj v4{1};
28
-
29
- MyObj* p1 = &v1;
30
- MyObj* p2 = &v2;
31
- MyObj* p3 = &v3;
32
- MyObj* p4 = &v4;
24
+ MyObj v1{1}; MyObj v2{1}; MyObj v3{1}; MyObj v4{1};
25
+ MyObj* p1 = &v1; MyObj* p2 = &v2; MyObj* p3 = &v3; MyObj* p4 = &v4;
33
26
34
27
while (condition) {
35
28
MyObj* temp = p1;
36
- p1 = p2;
37
- p2 = p3;
38
- p3 = p4;
39
- p4 = temp;
29
+ p1 = p2; p2 = p3; p3 = p4; p4 = temp;
40
30
}
41
31
}
42
32
"""
@@ -99,28 +89,81 @@ def generate_cpp_merge_test(n: int) -> str:
99
89
return cpp_code
100
90
101
91
102
- def analyze_trace_file ( trace_path : str ) -> tuple [ float , float ] :
92
+ def generate_cpp_nested_loop_test ( n : int ) -> str :
103
93
"""
104
- Parses the -ftime-trace JSON output to find durations.
94
+ Generates C++ code with N levels of nested loops.
95
+ This pattern tests how analysis performance scales with loop nesting depth,
96
+ which is a key factor in the complexity of dataflow analyses on structured
97
+ control flow.
98
+
99
+ Example (n=3):
100
+ struct MyObj { int id; ~MyObj() {} };
101
+ void nested_loops_3() {
102
+ MyObj* p = nullptr;
103
+ for(int i0=0; i0<2; ++i0) {
104
+ MyObj s0; p = &s0;
105
+ for(int i1=0; i1<2; ++i1) {
106
+ MyObj s1; p = &s1;
107
+ for(int i2=0; i2<2; ++i2) {
108
+ MyObj s2; p = &s2;
109
+ }
110
+ }
111
+ }
112
+ }
113
+ """
114
+ if n <= 0 :
115
+ return "// Nesting depth must be positive."
116
+
117
+ cpp_code = "struct MyObj { int id; ~MyObj() {} };\n \n "
118
+ cpp_code += f"void nested_loops_{ n } () {{\n "
119
+ cpp_code += " MyObj* p = nullptr;\n "
120
+
121
+ for i in range (n ):
122
+ indent = " " * (i + 1 )
123
+ cpp_code += f"{ indent } for(int i{ i } =0; i{ i } <2; ++i{ i } ) {{\n "
124
+ cpp_code += f"{ indent } MyObj s{ i } ; p = &s{ i } ;\n "
125
+
126
+ for i in range (n - 1 , - 1 , - 1 ):
127
+ indent = " " * (i + 1 )
128
+ cpp_code += f"{ indent } }}\n "
129
+
130
+ cpp_code += "}\n "
131
+ cpp_code += f"\n int main() {{ nested_loops_{ n } (); return 0; }}\n "
132
+ return cpp_code
133
+
105
134
106
- Returns:
107
- A tuple of (lifetime_analysis_duration_us, total_clang_duration_us).
135
+ def analyze_trace_file (trace_path : str ) -> dict :
108
136
"""
109
- lifetime_duration = 0.0
110
- total_duration = 0.0
137
+ Parses the -ftime-trace JSON output to find durations for the lifetime
138
+ analysis and its sub-phases.
139
+ Returns a dictionary of durations in microseconds.
140
+ """
141
+ durations = {
142
+ "lifetime_us" : 0.0 ,
143
+ "total_us" : 0.0 ,
144
+ "fact_gen_us" : 0.0 ,
145
+ "loan_prop_us" : 0.0 ,
146
+ "expired_loans_us" : 0.0 ,
147
+ }
148
+ event_name_map = {
149
+ "LifetimeSafetyAnalysis" : "lifetime_us" ,
150
+ "ExecuteCompiler" : "total_us" ,
151
+ "FactGenerator" : "fact_gen_us" ,
152
+ "LoanPropagation" : "loan_prop_us" ,
153
+ "ExpiredLoans" : "expired_loans_us" ,
154
+ }
111
155
try :
112
156
with open (trace_path , "r" ) as f :
113
157
trace_data = json .load (f )
114
158
for event in trace_data .get ("traceEvents" , []):
115
- if event .get ("name" ) == "LifetimeSafetyAnalysis" :
116
- lifetime_duration += float (event .get ("dur" , 0 ))
117
- if event .get ("name" ) == "ExecuteCompiler" :
118
- total_duration += float (event .get ("dur" , 0 ))
119
-
159
+ event_name = event .get ("name" )
160
+ if event_name in event_name_map :
161
+ key = event_name_map [event_name ]
162
+ durations [key ] += float (event .get ("dur" , 0 ))
120
163
except (IOError , json .JSONDecodeError ) as e :
121
164
print (f"Error reading or parsing trace file { trace_path } : { e } " , file = sys .stderr )
122
- return 0.0 , 0.0
123
- return lifetime_duration , total_duration
165
+ return { key : 0.0 for key in durations }
166
+ return durations
124
167
125
168
126
169
def power_law (n , c , k ):
@@ -135,8 +178,29 @@ def human_readable_time(ms: float) -> str:
135
178
return f"{ ms :.2f} ms"
136
179
137
180
181
+ def calculate_complexity (n_data , y_data ) -> tuple [float | None , float | None ]:
182
+ """
183
+ Calculates the exponent 'k' for the power law fit y = c * n^k.
184
+ Returns a tuple of (k, k_standard_error).
185
+ """
186
+ try :
187
+ if len (n_data ) < 3 or np .all (y_data < 1e-6 ) or np .var (y_data ) < 1e-6 :
188
+ return None , None
189
+
190
+ non_zero_indices = y_data > 0
191
+ if np .sum (non_zero_indices ) < 3 :
192
+ return None , None
193
+
194
+ n_fit , y_fit = n_data [non_zero_indices ], y_data [non_zero_indices ]
195
+ popt , pcov = curve_fit (power_law , n_fit , y_fit , p0 = [0 , 1 ], maxfev = 5000 )
196
+ k_stderr = np .sqrt (np .diag (pcov ))[1 ]
197
+ return popt [1 ], k_stderr
198
+ except (RuntimeError , ValueError ):
199
+ return None , None
200
+
201
+
138
202
def generate_markdown_report (results : dict ) -> str :
139
- """Generates a Markdown-formatted report from the benchmark results."""
203
+ """Generates a concise, Markdown-formatted report from the benchmark results."""
140
204
report = []
141
205
timestamp = datetime .now ().strftime ("%Y-%m-%d %H:%M:%S %Z" )
142
206
report .append (f"# Lifetime Analysis Performance Report" )
@@ -146,54 +210,52 @@ def generate_markdown_report(results: dict) -> str:
146
210
for test_name , data in results .items ():
147
211
title = data ["title" ]
148
212
report .append (f"## Test Case: { title } " )
149
- report .append ("" )
213
+ report .append ("\n **Timing Results:** \n " )
150
214
151
215
# Table header
152
- report .append ("| N | Analysis Time | Total Clang Time |" )
153
- report .append ("|:----|--------------:|-----------------:|" )
216
+ report .append (
217
+ "| N (Input Size) | Total Time | Analysis Time (%) | Fact Generator (%) | Loan Propagation (%) | Expired Loans (%) |"
218
+ )
219
+ report .append (
220
+ "|:---------------|-----------:|------------------:|-------------------:|---------------------:|------------------:|"
221
+ )
154
222
155
223
# Table rows
156
224
n_data = np .array (data ["n" ])
157
- analysis_data = np .array (data ["lifetime_ms" ])
158
- total_data = np .array (data ["total_ms" ])
225
+ total_ms_data = np .array (data ["total_ms" ])
159
226
for i in range (len (n_data )):
160
- analysis_str = human_readable_time (analysis_data [i ])
161
- total_str = human_readable_time (total_data [i ])
162
- report .append (f"| { n_data [i ]:<3} | { analysis_str :>13} | { total_str :>16} |" )
163
-
164
- report .append ("" )
165
-
166
- # Complexity analysis
167
- report .append (f"**Complexity Analysis:**" )
168
- try :
169
- # Curve fitting requires at least 3 points
170
- if len (n_data ) < 3 :
171
- raise ValueError ("Not enough data points to perform curve fitting." )
172
-
173
- popt , pcov = curve_fit (
174
- power_law , n_data , analysis_data , p0 = [0 , 2 ], maxfev = 5000
175
- )
176
- _ , k = popt
177
-
178
- # Confidence Interval for k
179
- alpha = 0.05 # 95% confidence
180
- dof = max (0 , len (n_data ) - len (popt )) # degrees of freedom
181
- t_val = t .ppf (1.0 - alpha / 2.0 , dof )
182
- # Standard error of the parameters
183
- perr = np .sqrt (np .diag (pcov ))
184
- k_stderr = perr [1 ]
185
- k_ci_lower = k - t_val * k_stderr
186
- k_ci_upper = k + t_val * k_stderr
187
-
188
- report .append (
189
- f"- The performance for this case scales approx. as **O(n<sup>{ k :.2f} </sup>)**."
190
- )
191
- report .append (
192
- f"- **95% Confidence interval for exponent:** `[{ k_ci_lower :.2f} , { k_ci_upper :.2f} ]`."
193
- )
194
-
195
- except (RuntimeError , ValueError ) as e :
196
- report .append (f"- Could not determine a best-fit curve for the data: { e } " )
227
+ total_t = total_ms_data [i ]
228
+ if total_t < 1e-6 :
229
+ total_t = 1.0 # Avoid division by zero
230
+
231
+ row = [
232
+ f"| { n_data [i ]:<14} |" ,
233
+ f"{ human_readable_time (total_t ):>10} |" ,
234
+ f"{ data ['lifetime_ms' ][i ] / total_t * 100 :>17.2f} % |" ,
235
+ f"{ data ['fact_gen_ms' ][i ] / total_t * 100 :>18.2f} % |" ,
236
+ f"{ data ['loan_prop_ms' ][i ] / total_t * 100 :>20.2f} % |" ,
237
+ f"{ data ['expired_loans_ms' ][i ] / total_t * 100 :>17.2f} % |" ,
238
+ ]
239
+ report .append (" " .join (row ))
240
+
241
+ report .append ("\n **Complexity Analysis:**\n " )
242
+ report .append ("| Analysis Phase | Complexity O(n<sup>k</sup>) |" )
243
+ report .append ("|:------------------|:--------------------------|" )
244
+
245
+ analysis_phases = {
246
+ "Total Analysis" : data ["lifetime_ms" ],
247
+ "FactGenerator" : data ["fact_gen_ms" ],
248
+ "LoanPropagation" : data ["loan_prop_ms" ],
249
+ "ExpiredLoans" : data ["expired_loans_ms" ],
250
+ }
251
+
252
+ for phase_name , y_data in analysis_phases .items ():
253
+ k , delta = calculate_complexity (n_data , np .array (y_data ))
254
+ if k is not None and delta is not None :
255
+ complexity_str = f"O(n<sup>{ k :.2f} </sup> ± { delta :.2f} )"
256
+ else :
257
+ complexity_str = "(Negligible)"
258
+ report .append (f"| { phase_name :<17} | { complexity_str :<25} |" )
197
259
198
260
report .append ("\n ---\n " )
199
261
@@ -202,12 +264,11 @@ def generate_markdown_report(results: dict) -> str:
202
264
203
265
def run_single_test (
204
266
clang_binary : str , output_dir : str , test_name : str , generator_func , n : int
205
- ) -> tuple [ float , float ] :
267
+ ) -> dict :
206
268
"""Generates, compiles, and benchmarks a single test case."""
207
269
print (f"--- Running Test: { test_name .capitalize ()} with N={ n } ---" )
208
270
209
271
generated_code = generator_func (n )
210
-
211
272
base_name = f"test_{ test_name } _{ n } "
212
273
source_file = os .path .join (output_dir , f"{ base_name } .cpp" )
213
274
trace_file = os .path .join (output_dir , f"{ base_name } .json" )
@@ -225,17 +286,15 @@ def run_single_test(
225
286
"-std=c++17" ,
226
287
source_file ,
227
288
]
228
-
229
289
result = subprocess .run (clang_command , capture_output = True , text = True )
230
290
231
291
if result .returncode != 0 :
232
292
print (f"Compilation failed for N={ n } !" , file = sys .stderr )
233
293
print (result .stderr , file = sys .stderr )
234
- return 0.0 , 0.0
294
+ return {}
235
295
236
- lifetime_us , total_us = analyze_trace_file (trace_file )
237
-
238
- return lifetime_us / 1000.0 , total_us / 1000.0
296
+ durations_us = analyze_trace_file (trace_file )
297
+ return {key .replace ('_us' , '_ms' ): value / 1000.0 for key , value in durations_us .items ()}
239
298
240
299
241
300
if __name__ == "__main__" :
@@ -270,6 +329,12 @@ def run_single_test(
270
329
"generator_func" : generate_cpp_merge_test ,
271
330
"n_values" : [10 , 50 , 100 , 200 , 400 , 800 ],
272
331
},
332
+ {
333
+ "name" : "nested_loops" ,
334
+ "title" : "Deeply Nested Loops" ,
335
+ "generator_func" : generate_cpp_nested_loop_test ,
336
+ "n_values" : [10 , 50 , 100 , 200 , 400 , 800 ],
337
+ },
273
338
]
274
339
275
340
results = {}
@@ -282,21 +347,28 @@ def run_single_test(
282
347
"n" : [],
283
348
"lifetime_ms" : [],
284
349
"total_ms" : [],
350
+ "fact_gen_ms" : [],
351
+ "loan_prop_ms" : [],
352
+ "expired_loans_ms" : [],
285
353
}
286
354
for n in config ["n_values" ]:
287
- lifetime_ms , total_ms = run_single_test (
355
+ durations_ms = run_single_test (
288
356
args .clang_binary ,
289
357
args .output_dir ,
290
358
test_name ,
291
359
config ["generator_func" ],
292
360
n ,
293
361
)
294
- if total_ms > 0 :
362
+ if durations_ms :
295
363
results [test_name ]["n" ].append (n )
296
- results [test_name ]["lifetime_ms" ].append (lifetime_ms )
297
- results [test_name ]["total_ms" ].append (total_ms )
364
+ for key , value in durations_ms .items ():
365
+ results [test_name ][key ].append (value )
366
+
298
367
print (
299
- f" Total: { human_readable_time (total_ms )} | Analysis: { human_readable_time (lifetime_ms )} "
368
+ f" Total Analysis: { human_readable_time (durations_ms ['lifetime_ms' ])} | "
369
+ f"FactGen: { human_readable_time (durations_ms ['fact_gen_ms' ])} | "
370
+ f"LoanProp: { human_readable_time (durations_ms ['loan_prop_ms' ])} | "
371
+ f"ExpiredLoans: { human_readable_time (durations_ms ['expired_loans_ms' ])} "
300
372
)
301
373
302
374
print ("\n \n " + "=" * 80 )
@@ -305,3 +377,8 @@ def run_single_test(
305
377
306
378
markdown_report = generate_markdown_report (results )
307
379
print (markdown_report )
380
+
381
+ report_filename = os .path .join (args .output_dir , "performance_report.md" )
382
+ with open (report_filename , "w" ) as f :
383
+ f .write (markdown_report )
384
+ print (f"Report saved to: { report_filename } " )
0 commit comments