Skip to content

Commit de96f51

Browse files
authored
Update regresssion test to parse eval result from json (#1310)
* export metrics results to json * fix mmedit * update docs * fix test failure * fix * fix mmocr metrics * remove srgan config with no set5 test
1 parent b23411d commit de96f51

File tree

20 files changed

+229
-467
lines changed

20 files changed

+229
-467
lines changed

docs/en/02-how-to-run/profile_model.md

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -50,6 +50,7 @@ ${MODEL_CFG} \
5050
- `--speed-test`: Whether to activate speed test.
5151
- `--warmup`: warmup before counting inference elapse, require setting speed-test first.
5252
- `--log-interval`: The interval between each log, require setting speed-test first.
53+
- `--json-file`: The path of json file to save evaluation results. Default is `./results.json`.
5354

5455
\* Other arguments in `tools/test.py` are used for speed test. They have no concern with evaluation.
5556

mmdeploy/codebase/mmcls/deploy/classification.py

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -228,7 +228,8 @@ def evaluate_outputs(model_cfg: mmcv.Config,
228228
out: Optional[str] = None,
229229
metric_options: Optional[dict] = None,
230230
format_only: bool = False,
231-
log_file: Optional[str] = None) -> None:
231+
log_file: Optional[str] = None,
232+
json_file: Optional[str] = None) -> None:
232233
"""Perform post-processing to predictions of model.
233234
234235
Args:
@@ -249,9 +250,11 @@ def evaluate_outputs(model_cfg: mmcv.Config,
249250
"""
250251
from mmcv.utils import get_logger
251252
logger = get_logger('test', log_file=log_file, log_level=logging.INFO)
252-
253253
if metrics:
254254
results = dataset.evaluate(outputs, metrics, metric_options)
255+
if json_file is not None:
256+
mmcv.dump(results, json_file, indent=4)
257+
255258
for k, v in results.items():
256259
logger.info(f'{k} : {v:.2f}')
257260
else:

mmdeploy/codebase/mmdet/deploy/object_detection.py

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -249,7 +249,8 @@ def evaluate_outputs(model_cfg: mmcv.Config,
249249
out: Optional[str] = None,
250250
metric_options: Optional[dict] = None,
251251
format_only: bool = False,
252-
log_file: Optional[str] = None):
252+
log_file: Optional[str] = None,
253+
json_file: Optional[str] = None):
253254
"""Perform post-processing to predictions of model.
254255
255256
Args:
@@ -287,7 +288,10 @@ def evaluate_outputs(model_cfg: mmcv.Config,
287288
]:
288289
eval_kwargs.pop(key, None)
289290
eval_kwargs.update(dict(metric=metrics, **kwargs))
290-
logger.info(dataset.evaluate(outputs, **eval_kwargs))
291+
results = dataset.evaluate(outputs, **eval_kwargs)
292+
if json_file is not None:
293+
mmcv.dump(results, json_file, indent=4)
294+
logger.info(results)
291295

292296
def get_preprocess(self) -> Dict:
293297
"""Get the preprocess information for SDK.

mmdeploy/codebase/mmdet3d/deploy/voxel_detection.py

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -178,7 +178,8 @@ def evaluate_outputs(model_cfg,
178178
out: Optional[str] = None,
179179
metric_options: Optional[dict] = None,
180180
format_only: bool = False,
181-
log_file: Optional[str] = None):
181+
log_file: Optional[str] = None,
182+
json_file: Optional[str] = None):
182183
if out:
183184
logger = get_root_logger()
184185
logger.info(f'\nwriting results to {out}')
@@ -196,7 +197,10 @@ def evaluate_outputs(model_cfg,
196197
eval_kwargs.pop(key, None)
197198
eval_kwargs.pop(key, None)
198199
eval_kwargs.update(dict(metric=metrics, **kwargs))
199-
dataset.evaluate(outputs, **eval_kwargs)
200+
results = dataset.evaluate(outputs, **eval_kwargs)
201+
if json_file is not None:
202+
mmcv.dump(results, json_file, indent=4)
203+
logger.info(results)
200204

201205
def get_model_name(self) -> str:
202206
"""Get the model name.

mmdeploy/codebase/mmedit/deploy/super_resolution.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -257,6 +257,7 @@ def evaluate_outputs(model_cfg,
257257
metric_options: Optional[dict] = None,
258258
format_only: bool = False,
259259
log_file: Optional[str] = None,
260+
json_file: Optional[str] = None,
260261
**kwargs) -> None:
261262
"""Evaluation function implemented in mmedit.
262263
@@ -287,6 +288,8 @@ def evaluate_outputs(model_cfg,
287288
stats = dataset.evaluate(outputs)
288289
for stat in stats:
289290
logger.info('Eval-{}: {}'.format(stat, stats[stat]))
291+
if json_file is not None:
292+
mmcv.dump(stats, json_file, indent=4)
290293

291294
def get_preprocess(self) -> Dict:
292295
"""Get the preprocess information for SDK.

mmdeploy/codebase/mmocr/deploy/text_detection.py

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -241,7 +241,8 @@ def evaluate_outputs(model_cfg,
241241
out: Optional[str] = None,
242242
metric_options: Optional[dict] = None,
243243
format_only: bool = False,
244-
log_file: Optional[str] = None):
244+
log_file: Optional[str] = None,
245+
json_file: Optional[str] = None):
245246
"""Perform post-processing to predictions of model.
246247
247248
Args:
@@ -279,7 +280,10 @@ def evaluate_outputs(model_cfg,
279280
]:
280281
eval_kwargs.pop(key, None)
281282
eval_kwargs.update(dict(metric=metrics, **kwargs))
282-
logger.info(dataset.evaluate(outputs, **eval_kwargs))
283+
results = dataset.evaluate(outputs, **eval_kwargs)
284+
if json_file is not None:
285+
mmcv.dump(results, json_file, indent=4)
286+
logger.info(results)
283287

284288
def get_preprocess(self) -> Dict:
285289
"""Get the preprocess information for SDK.

mmdeploy/codebase/mmocr/deploy/text_recognition.py

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -255,7 +255,8 @@ def evaluate_outputs(model_cfg: mmcv.Config,
255255
out: Optional[str] = None,
256256
metric_options: Optional[dict] = None,
257257
format_only: bool = False,
258-
log_file: Optional[str] = None):
258+
log_file: Optional[str] = None,
259+
json_file: Optional[str] = None):
259260
"""Perform post-processing to predictions of model.
260261
261262
Args:
@@ -293,7 +294,10 @@ def evaluate_outputs(model_cfg: mmcv.Config,
293294
]:
294295
eval_kwargs.pop(key, None)
295296
eval_kwargs.update(dict(metric=metrics, **kwargs))
296-
logger.info(dataset.evaluate(outputs, **eval_kwargs))
297+
results = dataset.evaluate(outputs, **eval_kwargs)
298+
if json_file is not None:
299+
mmcv.dump(results, json_file, indent=4)
300+
logger.info(results)
297301

298302
def get_preprocess(self) -> Dict:
299303
"""Get the preprocess information for SDK.

mmdeploy/codebase/mmpose/deploy/pose_detection.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -272,6 +272,7 @@ def evaluate_outputs(model_cfg: mmcv.Config,
272272
metric_options: Optional[dict] = None,
273273
format_only: bool = False,
274274
log_file: Optional[str] = None,
275+
json_file: Optional[str] = None,
275276
**kwargs):
276277
"""Perform post-processing to predictions of model.
277278
@@ -307,6 +308,8 @@ def evaluate_outputs(model_cfg: mmcv.Config,
307308
eval_config.update(dict(metric=metrics))
308309

309310
results = dataset.evaluate(outputs, res_folder, **eval_config)
311+
if json_file is not None:
312+
mmcv.dump(results, json_file, indent=4)
310313
for k, v in sorted(results.items()):
311314
logger.info(f'{k}: {v:.4f}')
312315

mmdeploy/codebase/mmrotate/deploy/rotated_detection.py

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -284,7 +284,8 @@ def evaluate_outputs(model_cfg,
284284
out: Optional[str] = None,
285285
metric_options: Optional[dict] = None,
286286
format_only: bool = False,
287-
log_file: Optional[str] = None):
287+
log_file: Optional[str] = None,
288+
json_file: Optional[str] = None):
288289
"""Perform post-processing to predictions of model.
289290
290291
Args:
@@ -322,7 +323,10 @@ def evaluate_outputs(model_cfg,
322323
]:
323324
eval_kwargs.pop(key, None)
324325
eval_kwargs.update(dict(metric=metrics, **kwargs))
325-
logger.info(dataset.evaluate(outputs, **eval_kwargs))
326+
results = dataset.evaluate(outputs, **eval_kwargs)
327+
if json_file is not None:
328+
mmcv.dump(results, json_file, indent=4)
329+
logger.info(results)
326330

327331
def get_preprocess(self) -> Dict:
328332
"""Get the preprocess information for SDK.

mmdeploy/codebase/mmseg/deploy/segmentation.py

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -227,7 +227,8 @@ def evaluate_outputs(model_cfg,
227227
out: Optional[str] = None,
228228
metric_options: Optional[dict] = None,
229229
format_only: bool = False,
230-
log_file: Optional[str] = None):
230+
log_file: Optional[str] = None,
231+
json_file: Optional[str] = None):
231232
"""Perform post-processing to predictions of model.
232233
233234
Args:
@@ -257,7 +258,10 @@ def evaluate_outputs(model_cfg,
257258
if format_only:
258259
dataset.format_results(outputs, **kwargs)
259260
if metrics:
260-
dataset.evaluate(outputs, metrics, logger=logger, **kwargs)
261+
results = dataset.evaluate(
262+
outputs, metrics, logger=logger, **kwargs)
263+
if json_file is not None:
264+
mmcv.dump(results, json_file, indent=4)
261265

262266
def get_preprocess(self) -> Dict:
263267
"""Get the preprocess information for SDK.

0 commit comments

Comments
 (0)