1
1
from .. import result_summarizer
2
+ from ...rcp_checker import rcp_checker
2
3
from ...compliance_checker .mlp_compliance import usage_choices , rule_choices
3
4
import argparse
4
5
@@ -8,40 +9,62 @@ def get_compute_args():
8
9
prog = "mlperf_logging.result_summarizer.compute_score" ,
9
10
description = "Compute the score of a single benchmark" ,
10
11
)
11
- parser .add_argument ("benchmark" , type = str , help = "TODO:" , required = True )
12
- parser .add_argument ("system" , type = str , help = "System name" , default = None )
12
+ parser .add_argument ("-- benchmark" , type = str , help = "TODO:" , required = True )
13
+ parser .add_argument ("-- system" , type = str , help = "System name" , default = None )
13
14
parser .add_argument (
14
- "has_power" , action = "store_true" , help = "Compute power score as well"
15
+ "-- has_power" , action = "store_true" , help = "Compute power score as well"
15
16
)
16
17
parser .add_argument (
17
- "benchmark_folder" , type = str , help = "Folder containing all the result files" , required = True
18
+ "-- benchmark_folder" , type = str , help = "Folder containing all the result files" , required = True
18
19
)
19
20
parser .add_argument (
20
- "usage" ,
21
+ "-- usage" ,
21
22
type = str ,
22
23
default = "training" ,
23
24
choices = usage_choices (),
24
25
help = "the usage such as training, hpc, inference_edge, inference_server" ,
25
26
required = True ,
26
27
)
27
28
parser .add_argument (
28
- "ruleset" ,
29
+ "-- ruleset" ,
29
30
type = str ,
30
31
choices = rule_choices (),
31
32
help = "the ruleset such as 0.6.0, 0.7.0, or 1.0.0" ,
32
33
required = True ,
33
34
)
34
-
35
35
parser .add_argument (
36
- "weak_scaling" , action = "store_true" , help = "Compute weak scaling score"
36
+ "--is_weak_scaling" , action = "store_true" , help = "Compute weak scaling score"
37
+ )
38
+ parser .add_argument (
39
+ "--scale" , action = "store_true" , help = "Compute the scaling factor"
37
40
)
38
41
39
42
return parser .parse_args ()
40
43
41
44
45
+ def print_benchmark_info (args ):
46
+ print (f"MLPerf { args .usage } " )
47
+ print (f"Folder: { args .benchmark_folder } " )
48
+ print (f"Version: { args .ruleset } " )
49
+ print (f"System: { args .system } " )
50
+ print (f"Benchmark: { args .benchmark } " )
51
+
42
52
args = get_compute_args ()
43
53
44
- if args .weak_scaling :
54
+ if args .scale :
55
+ rcp_checker .check_directory (
56
+ args .benchmark_folder ,
57
+ args .usage ,
58
+ args .ruleset ,
59
+ False ,
60
+ False ,
61
+ rcp_file = None ,
62
+ rcp_pass = 'pruned_rcps' ,
63
+ rcp_bypass = False ,
64
+ set_scaling = True ,
65
+ )
66
+
67
+ if args .is_weak_scaling :
45
68
scores , power_scores = result_summarizer ._compute_weak_score_standalone (
46
69
args .benchmark ,
47
70
args .system ,
@@ -50,9 +73,10 @@ def get_compute_args():
50
73
args .usage ,
51
74
args .ruleset ,
52
75
)
76
+ print_benchmark_info (args )
53
77
print (f"Scores: { scores } " )
54
78
if power_scores :
55
- print (f"Power Scores: { power_scores } " )
79
+ print (f"Power Scores - Energy (kJ) : { power_scores } " )
56
80
else :
57
81
score , power_score = result_summarizer ._compute_strong_score_standalone (
58
82
args .benchmark ,
@@ -62,6 +86,7 @@ def get_compute_args():
62
86
args .usage ,
63
87
args .ruleset ,
64
88
)
65
- print (f"Score: { score } " )
89
+ print_benchmark_info (args )
90
+ print (f"Score - Time to Train (minutes): { score } " )
66
91
if power_score :
67
- print (f"Power Score: { power_score } " )
92
+ print (f"Power Score - Energy (kJ) : { power_score } " )
0 commit comments