6
6
import pandas as pd
7
7
8
8
BASE_RESULT_DIR = "artifacts/results/"
9
- PROJECTS = ["jflex " , "convex " , "mph-table" ]
9
+ PROJECTS = ["convex " , "jflex " , "mph-table" , "rpki-commons " ]
10
10
REPORT_NAME = "artifacts/output/rq4.csv"
11
11
TEX_REPORT_NAME = "artifacts/output/rq4.tex"
12
12
13
- CALC_NAMES = ['Vanilla' , 'Improved' ]
13
+ RAW_NAMES = ['Vanilla' , 'Improved' ]
14
+ CALC_NAMES = ['Vanilla' , 'Improved' , 'Overhead' ]
14
15
15
16
propertyShortNames = {
16
17
"TestSmartByteSerializer#canRoundTripBytes" : 'byte' ,
31
32
"CharClassesQuickcheck#addString" : 'addString' ,
32
33
"StateSetQuickcheck#addStateDoesNotRemove" : 'add' ,
33
34
"StateSetQuickcheck#containsElements" : 'contains' ,
34
- "StateSetQuickcheck#removeAdd" : 'remove'
35
+ "StateSetQuickcheck#removeAdd" : 'remove' ,
36
+ "RoaCMSBuilderPropertyTest#buildEncodedParseCheck" : 'roa' ,
37
+ "ManifestCMSBuilderPropertyTest#buildEncodedParseCheck" : 'manifest' ,
38
+ "AspaCmsTest#should_generate_aspa" : 'aspa' ,
39
+ "X509ResourceCertificateParentChildValidatorTest#validParentChildSubResources" : 'resources' ,
40
+ "X509ResourceCertificateParentChildValidatorTest#validParentChildOverClaiming" : 'claiming' ,
41
+ "X509ResourceCertificateParentChildValidatorTest#validParentChildOverClaimingLooseValidation" : 'loose'
35
42
}
36
43
37
44
row_count = 1
@@ -45,6 +52,8 @@ def filter_for_recent_results(project_name: str, stats_directories: list[str]) -
45
52
project_string = project_name if project_name != "convex" else project_name + "-core" # edge case
46
53
if "mph-table-fixed" in stats_directories [0 ]: # edge case
47
54
project_string = "mph-table-fixed"
55
+ elif "rpki-commons-fixed" in stats_directories [0 ]:
56
+ project_string = "rpki-commons-fixed"
48
57
time_stamps = [datetime .datetime .strptime (x .replace (project_string , "" ).replace ("_" , ":" ).replace ("T" , " " ), "%Y-%m-%d %H:%M:%S.%f" )
49
58
for x in stats_directories ]
50
59
time_stamps .sort ()
@@ -88,11 +97,14 @@ def generate_report_stats(stat_values: dict[str, dict]) -> dict[str, str]:
88
97
property_dict = {}
89
98
for key in first_iteration :
90
99
property_dict [key ] = []
91
-
100
+
92
101
# populate the dictionary with our results
93
102
for key , val in stat_values .items ():
94
103
for prop , time in val .items ():
95
104
property_array = property_dict .get (prop )
105
+ if property_array is None :
106
+ property_dict [prop ] = []
107
+ property_array = property_dict .get (prop )
96
108
property_array .append (time )
97
109
98
110
# generate mean, standard deviation and populate our final object
@@ -141,7 +153,7 @@ def main():
141
153
fixed_stats_directories = obtain_stats_directories (results_directory = fixed_results_directory )
142
154
evaluated_fixed_runs = filter_for_recent_results (project_name = project_name , stats_directories = fixed_stats_directories )
143
155
fixed_raw_stats = evaluate_directories (project_name = fixed_project_name , results_directory = fixed_results_directory , directories = evaluated_fixed_runs )
144
-
156
+
145
157
# obtain mean/st dev
146
158
final_stats = generate_report_stats (stat_values = raw_stats )
147
159
final_fixed_stats = generate_report_stats (stat_values = fixed_raw_stats )
@@ -153,21 +165,24 @@ def main():
153
165
df = pd .DataFrame ()
154
166
for project in PROJECTS :
155
167
final_dataset [project ]['_style' ] = ''
156
- proj_mean_and_std = final_dataset [project ][CALC_NAMES ].copy ()
168
+ proj_mean_and_std = final_dataset [project ][RAW_NAMES ].copy ()
157
169
vanilla_mean = pd .DataFrame (proj_mean_and_std ['Vanilla' ].apply (lambda v : float (v .split (" \u00B1 " )[0 ]) if
158
170
" \u00B1 " in str (v ) else np .nan )).reset_index ()
159
171
improved_mean = pd .DataFrame (proj_mean_and_std ['Improved' ].apply (lambda v : float (v .split (" \u00B1 " )[0 ]) if
160
172
" \u00B1 " in str (v ) else np .nan )).reset_index ()
161
173
162
- proj_stats = pd .merge (vanilla_mean .copy (), improved_mean .copy (), how = 'outer' , on = 'index' )[CALC_NAMES ]
163
- final_dataset [project ]['Difference' ] = proj_stats [['Vanilla' , 'Improved' ]].pct_change (axis = 'columns' )['Improved' ]
164
- proj_mean = pd .merge (vanilla_mean , improved_mean , how = 'outer' , on = 'index' )[CALC_NAMES ].mean ()
174
+ proj_stats = pd .merge (vanilla_mean , improved_mean , how = 'outer' , on = 'index' )[RAW_NAMES ].reset_index ()
175
+
176
+ final_dataset [project ]['Overhead' ] = proj_stats [['Improved' ]].values / proj_stats [['Vanilla' ]].values
177
+ overhead_stats = final_dataset [project ]['Overhead' ].copy ().reset_index ()
178
+
179
+ proj_mean = pd .merge (proj_stats , overhead_stats , how = 'outer' , on = 'index' )[CALC_NAMES ].mean ()
165
180
proj_mean ['_style' ] = 'BOLD'
166
181
proj_mean ['N' ] = ''
167
182
proj_mean ['Property' ] = 'Average'
168
183
final_dataset [project ].loc ['mean' ] = proj_mean
169
184
170
- header = dict (zip (['N' , 'Property' , 'Vanilla' , 'Improved' , 'Difference ' ], ['' , '' , '' , '' , '' ]))
185
+ header = dict (zip (['N' , 'Property' , 'Vanilla' , 'Improved' , 'Overhead ' ], ['' , '' , '' , '' , '' ]))
171
186
df = pd .concat ([
172
187
df ,
173
188
pd .DataFrame (header | {'_style' : 'HEADER' , 'Property' : project }, index = [0 ]),
@@ -203,6 +218,5 @@ def main():
203
218
tf .write (outTable )
204
219
205
220
206
-
207
221
if __name__ == "__main__" :
208
222
main ()
0 commit comments