From 62d8263650adf499188e8236bc32894d25ad8c91 Mon Sep 17 00:00:00 2001 From: shangliang Xu Date: Tue, 13 Apr 2021 19:17:49 +0800 Subject: [PATCH] add save only in eval/train test=develop (#2604) --- ppdet/engine/trainer.py | 5 +++- ppdet/metrics/metrics.py | 55 +++++++++++++++++++++++++--------------- tools/eval.py | 9 ++++++- tools/train.py | 6 +++++ 4 files changed, 52 insertions(+), 23 deletions(-) diff --git a/ppdet/engine/trainer.py b/ppdet/engine/trainer.py index c64bbe5b8..a25554be3 100644 --- a/ppdet/engine/trainer.py +++ b/ppdet/engine/trainer.py @@ -125,6 +125,8 @@ class Trainer(object): bias = self.cfg['bias'] if 'bias' in self.cfg else 0 output_eval = self.cfg['output_eval'] \ if 'output_eval' in self.cfg else None + save_prediction_only = self.cfg['save_prediction_only'] \ + if 'save_prediction_only' in self.cfg else False # pass clsid2catid info to metric instance to avoid multiple loading # annotation file @@ -145,7 +147,8 @@ class Trainer(object): clsid2catid=clsid2catid, classwise=classwise, output_eval=output_eval, - bias=bias) + bias=bias, + save_prediction_only=save_prediction_only) ] elif self.cfg.metric == 'VOC': self._metrics = [ diff --git a/ppdet/metrics/metrics.py b/ppdet/metrics/metrics.py index cae3a2e09..e4ad1544f 100644 --- a/ppdet/metrics/metrics.py +++ b/ppdet/metrics/metrics.py @@ -69,6 +69,7 @@ class COCOMetric(Metric): self.output_eval = kwargs.get('output_eval', None) # TODO: bias should be unified self.bias = kwargs.get('bias', 0) + self.save_prediction_only = kwargs.get('save_prediction_only', False) self.reset() def reset(self): @@ -104,13 +105,17 @@ class COCOMetric(Metric): json.dump(self.results['bbox'], f) logger.info('The bbox result is saved to bbox.json.') - bbox_stats = cocoapi_eval( - output, - 'bbox', - anno_file=self.anno_file, - classwise=self.classwise) - self.eval_results['bbox'] = bbox_stats - sys.stdout.flush() + if self.save_prediction_only: + logger.info('The bbox result is saved to {} and do not ' + 'evaluate the mAP.'.format(output)) + else: + bbox_stats = cocoapi_eval( + output, + 'bbox', + anno_file=self.anno_file, + classwise=self.classwise) + self.eval_results['bbox'] = bbox_stats + sys.stdout.flush() if len(self.results['mask']) > 0: output = "mask.json" @@ -120,13 +125,17 @@ class COCOMetric(Metric): json.dump(self.results['mask'], f) logger.info('The mask result is saved to mask.json.') - seg_stats = cocoapi_eval( - output, - 'segm', - anno_file=self.anno_file, - classwise=self.classwise) - self.eval_results['mask'] = seg_stats - sys.stdout.flush() + if self.save_prediction_only: + logger.info('The mask result is saved to {} and do not ' + 'evaluate the mAP.'.format(output)) + else: + seg_stats = cocoapi_eval( + output, + 'segm', + anno_file=self.anno_file, + classwise=self.classwise) + self.eval_results['mask'] = seg_stats + sys.stdout.flush() if len(self.results['segm']) > 0: output = "segm.json" @@ -136,13 +145,17 @@ class COCOMetric(Metric): json.dump(self.results['segm'], f) logger.info('The segm result is saved to segm.json.') - seg_stats = cocoapi_eval( - output, - 'segm', - anno_file=self.anno_file, - classwise=self.classwise) - self.eval_results['mask'] = seg_stats - sys.stdout.flush() + if self.save_prediction_only: + logger.info('The segm result is saved to {} and do not ' + 'evaluate the mAP.'.format(output)) + else: + seg_stats = cocoapi_eval( + output, + 'segm', + anno_file=self.anno_file, + classwise=self.classwise) + self.eval_results['mask'] = seg_stats + sys.stdout.flush() def log(self): pass diff --git a/tools/eval.py b/tools/eval.py index 56e350d92..16f6acee8 100755 --- a/tools/eval.py +++ b/tools/eval.py @@ -66,6 +66,12 @@ def parse_args(): action="store_true", help="whether per-category AP and draw P-R Curve or not.") + parser.add_argument( + '--save_prediction_only', + action='store_true', + default=False, + help='Whether to save the evaluation results only') + args = parser.parse_args() return args @@ -85,7 +91,7 @@ def run(FLAGS, cfg): # init parallel environment if nranks > 1 init_parallel_env() - # build trainer + # build trainer trainer = Trainer(cfg, mode='eval') # load weights @@ -102,6 +108,7 @@ def main(): cfg['bias'] = 1 if FLAGS.bias else 0 cfg['classwise'] = True if FLAGS.classwise else False cfg['output_eval'] = FLAGS.output_eval + cfg['save_prediction_only'] = FLAGS.save_prediction_only merge_config(FLAGS.opt) place = paddle.set_device('gpu' if cfg.use_gpu else 'cpu') diff --git a/tools/train.py b/tools/train.py index 0ae86da07..675163b4e 100755 --- a/tools/train.py +++ b/tools/train.py @@ -75,6 +75,11 @@ def parse_args(): type=str, default="vdl_log_dir/scalar", help='VisualDL logging directory for scalar.') + parser.add_argument( + '--save_prediction_only', + action='store_true', + default=False, + help='Whether to save the evaluation results only') args = parser.parse_args() return args @@ -110,6 +115,7 @@ def main(): cfg['fleet'] = FLAGS.fleet cfg['use_vdl'] = FLAGS.use_vdl cfg['vdl_log_dir'] = FLAGS.vdl_log_dir + cfg['save_prediction_only'] = FLAGS.save_prediction_only merge_config(FLAGS.opt) place = paddle.set_device('gpu' if cfg.use_gpu else 'cpu') -- GitLab