diff --git a/ppdet/engine/trainer.py b/ppdet/engine/trainer.py index c64bbe5b83de08f33d10edbdb34e15c139621038..a25554be3b2e9f0c930e3cd23aa238d1a1f4efd5 100644 --- a/ppdet/engine/trainer.py +++ b/ppdet/engine/trainer.py @@ -125,6 +125,8 @@ class Trainer(object): bias = self.cfg['bias'] if 'bias' in self.cfg else 0 output_eval = self.cfg['output_eval'] \ if 'output_eval' in self.cfg else None + save_prediction_only = self.cfg['save_prediction_only'] \ + if 'save_prediction_only' in self.cfg else False # pass clsid2catid info to metric instance to avoid multiple loading # annotation file @@ -145,7 +147,8 @@ class Trainer(object): clsid2catid=clsid2catid, classwise=classwise, output_eval=output_eval, - bias=bias) + bias=bias, + save_prediction_only=save_prediction_only) ] elif self.cfg.metric == 'VOC': self._metrics = [ diff --git a/ppdet/metrics/metrics.py b/ppdet/metrics/metrics.py index cae3a2e09ec190907a46d73900ca41afa00a3f09..e4ad1544f4808f721445390f07b5c81441ef21ca 100644 --- a/ppdet/metrics/metrics.py +++ b/ppdet/metrics/metrics.py @@ -69,6 +69,7 @@ class COCOMetric(Metric): self.output_eval = kwargs.get('output_eval', None) # TODO: bias should be unified self.bias = kwargs.get('bias', 0) + self.save_prediction_only = kwargs.get('save_prediction_only', False) self.reset() def reset(self): @@ -104,13 +105,17 @@ class COCOMetric(Metric): json.dump(self.results['bbox'], f) logger.info('The bbox result is saved to bbox.json.') - bbox_stats = cocoapi_eval( - output, - 'bbox', - anno_file=self.anno_file, - classwise=self.classwise) - self.eval_results['bbox'] = bbox_stats - sys.stdout.flush() + if self.save_prediction_only: + logger.info('The bbox result is saved to {} and do not ' + 'evaluate the mAP.'.format(output)) + else: + bbox_stats = cocoapi_eval( + output, + 'bbox', + anno_file=self.anno_file, + classwise=self.classwise) + self.eval_results['bbox'] = bbox_stats + sys.stdout.flush() if len(self.results['mask']) > 0: output = "mask.json" @@ -120,13 +125,17 @@ class COCOMetric(Metric): json.dump(self.results['mask'], f) logger.info('The mask result is saved to mask.json.') - seg_stats = cocoapi_eval( - output, - 'segm', - anno_file=self.anno_file, - classwise=self.classwise) - self.eval_results['mask'] = seg_stats - sys.stdout.flush() + if self.save_prediction_only: + logger.info('The mask result is saved to {} and do not ' + 'evaluate the mAP.'.format(output)) + else: + seg_stats = cocoapi_eval( + output, + 'segm', + anno_file=self.anno_file, + classwise=self.classwise) + self.eval_results['mask'] = seg_stats + sys.stdout.flush() if len(self.results['segm']) > 0: output = "segm.json" @@ -136,13 +145,17 @@ class COCOMetric(Metric): json.dump(self.results['segm'], f) logger.info('The segm result is saved to segm.json.') - seg_stats = cocoapi_eval( - output, - 'segm', - anno_file=self.anno_file, - classwise=self.classwise) - self.eval_results['mask'] = seg_stats - sys.stdout.flush() + if self.save_prediction_only: + logger.info('The segm result is saved to {} and do not ' + 'evaluate the mAP.'.format(output)) + else: + seg_stats = cocoapi_eval( + output, + 'segm', + anno_file=self.anno_file, + classwise=self.classwise) + self.eval_results['mask'] = seg_stats + sys.stdout.flush() def log(self): pass diff --git a/tools/eval.py b/tools/eval.py index 56e350d92c8115551e6ac6e8b7bc1d1a34f73284..16f6acee84e791b5a3e85cfb9563dec017cd242f 100755 --- a/tools/eval.py +++ b/tools/eval.py @@ -66,6 +66,12 @@ def parse_args(): action="store_true", help="whether per-category AP and draw P-R Curve or not.") + parser.add_argument( + '--save_prediction_only', + action='store_true', + default=False, + help='Whether to save the evaluation results only') + args = parser.parse_args() return args @@ -85,7 +91,7 @@ def run(FLAGS, cfg): # init parallel environment if nranks > 1 init_parallel_env() - # build trainer + # build trainer trainer = Trainer(cfg, mode='eval') # load weights @@ -102,6 +108,7 @@ def main(): cfg['bias'] = 1 if FLAGS.bias else 0 cfg['classwise'] = True if FLAGS.classwise else False cfg['output_eval'] = FLAGS.output_eval + cfg['save_prediction_only'] = FLAGS.save_prediction_only merge_config(FLAGS.opt) place = paddle.set_device('gpu' if cfg.use_gpu else 'cpu') diff --git a/tools/train.py b/tools/train.py index 0ae86da07c09b58d624c9d93ba122bcb8d9e8b1b..675163b4eebfce351379be5331c7372ee57891b4 100755 --- a/tools/train.py +++ b/tools/train.py @@ -75,6 +75,11 @@ def parse_args(): type=str, default="vdl_log_dir/scalar", help='VisualDL logging directory for scalar.') + parser.add_argument( + '--save_prediction_only', + action='store_true', + default=False, + help='Whether to save the evaluation results only') args = parser.parse_args() return args @@ -110,6 +115,7 @@ def main(): cfg['fleet'] = FLAGS.fleet cfg['use_vdl'] = FLAGS.use_vdl cfg['vdl_log_dir'] = FLAGS.vdl_log_dir + cfg['save_prediction_only'] = FLAGS.save_prediction_only merge_config(FLAGS.opt) place = paddle.set_device('gpu' if cfg.use_gpu else 'cpu')