未验证 提交 0dc44aac 编写于 作者: S shangliang Xu 提交者: GitHub

update tools/eval (#2389)

* update tools/eval, support output_eval/json_eval

* update tools/eval, support output_eval/json_eval

* update tools/eval, support output_eval/json_eval

* update tools/eval
上级 71a95461
...@@ -124,10 +124,13 @@ class Trainer(object): ...@@ -124,10 +124,13 @@ class Trainer(object):
if self.cfg.metric == 'COCO': if self.cfg.metric == 'COCO':
# TODO: bias should be unified # TODO: bias should be unified
bias = self.cfg['bias'] if 'bias' in self.cfg else 0 bias = self.cfg['bias'] if 'bias' in self.cfg else 0
output_eval = self.cfg['output_eval'] \
if 'output_eval' in self.cfg else None
self._metrics = [ self._metrics = [
COCOMetric( COCOMetric(
anno_file=self.dataset.get_anno(), anno_file=self.dataset.get_anno(),
classwise=classwise, classwise=classwise,
output_eval=output_eval,
bias=bias) bias=bias)
] ]
elif self.cfg.metric == 'VOC': elif self.cfg.metric == 'VOC':
......
...@@ -140,3 +140,27 @@ def cocoapi_eval(jsonfile, ...@@ -140,3 +140,27 @@ def cocoapi_eval(jsonfile,
# flush coco evaluation result # flush coco evaluation result
sys.stdout.flush() sys.stdout.flush()
return coco_eval.stats return coco_eval.stats
def json_eval_results(metric: object,
json_directory: object=None,
dataset: object=None) -> object:
"""
cocoapi eval with already exists proposal.json, bbox.json or mask.json
"""
assert metric == 'COCO'
anno_file = dataset.get_anno()
json_file_list = ['proposal.json', 'bbox.json', 'mask.json']
if json_directory:
assert os.path.exists(
json_directory), "The json directory:{} does not exist".format(
json_directory)
for k, v in enumerate(json_file_list):
json_file_list[k] = os.path.join(str(json_directory), v)
coco_eval_style = ['proposal', 'bbox', 'segm']
for i, v_json in enumerate(json_file_list):
if os.path.exists(v_json):
cocoapi_eval(v_json, coco_eval_style[i], anno_file=anno_file)
else:
logger.info("{} not exists!".format(v_json))
...@@ -64,6 +64,7 @@ class COCOMetric(Metric): ...@@ -64,6 +64,7 @@ class COCOMetric(Metric):
self.anno_file = anno_file self.anno_file = anno_file
self.clsid2catid, self.catid2name = get_categories('COCO', anno_file) self.clsid2catid, self.catid2name = get_categories('COCO', anno_file)
self.classwise = kwargs.get('classwise', False) self.classwise = kwargs.get('classwise', False)
self.output_eval = kwargs.get('output_eval', None)
# TODO: bias should be unified # TODO: bias should be unified
self.bias = kwargs.get('bias', 0) self.bias = kwargs.get('bias', 0)
self.reset() self.reset()
...@@ -94,12 +95,15 @@ class COCOMetric(Metric): ...@@ -94,12 +95,15 @@ class COCOMetric(Metric):
def accumulate(self): def accumulate(self):
if len(self.results['bbox']) > 0: if len(self.results['bbox']) > 0:
with open("bbox.json", 'w') as f: output = "bbox.json"
if self.output_eval:
output = os.path.join(self.output_eval, output)
with open(output, 'w') as f:
json.dump(self.results['bbox'], f) json.dump(self.results['bbox'], f)
logger.info('The bbox result is saved to bbox.json.') logger.info('The bbox result is saved to bbox.json.')
bbox_stats = cocoapi_eval( bbox_stats = cocoapi_eval(
'bbox.json', output,
'bbox', 'bbox',
anno_file=self.anno_file, anno_file=self.anno_file,
classwise=self.classwise) classwise=self.classwise)
...@@ -107,12 +111,15 @@ class COCOMetric(Metric): ...@@ -107,12 +111,15 @@ class COCOMetric(Metric):
sys.stdout.flush() sys.stdout.flush()
if len(self.results['mask']) > 0: if len(self.results['mask']) > 0:
with open("mask.json", 'w') as f: output = "mask.json"
if self.output_eval:
output = os.path.join(self.output_eval, output)
with open(output, 'w') as f:
json.dump(self.results['mask'], f) json.dump(self.results['mask'], f)
logger.info('The mask result is saved to mask.json.') logger.info('The mask result is saved to mask.json.')
seg_stats = cocoapi_eval( seg_stats = cocoapi_eval(
'mask.json', output,
'segm', 'segm',
anno_file=self.anno_file, anno_file=self.anno_file,
classwise=self.classwise) classwise=self.classwise)
...@@ -120,12 +127,15 @@ class COCOMetric(Metric): ...@@ -120,12 +127,15 @@ class COCOMetric(Metric):
sys.stdout.flush() sys.stdout.flush()
if len(self.results['segm']) > 0: if len(self.results['segm']) > 0:
with open("segm.json", 'w') as f: output = "segm.json"
if self.output_eval:
output = os.path.join(self.output_eval, output)
with open(output, 'w') as f:
json.dump(self.results['segm'], f) json.dump(self.results['segm'], f)
logger.info('The segm result is saved to segm.json.') logger.info('The segm result is saved to segm.json.')
seg_stats = cocoapi_eval( seg_stats = cocoapi_eval(
'segm.json', output,
'segm', 'segm',
anno_file=self.anno_file, anno_file=self.anno_file,
classwise=self.classwise) classwise=self.classwise)
......
...@@ -33,6 +33,7 @@ from ppdet.core.workspace import load_config, merge_config ...@@ -33,6 +33,7 @@ from ppdet.core.workspace import load_config, merge_config
from ppdet.utils.check import check_gpu, check_version, check_config from ppdet.utils.check import check_gpu, check_version, check_config
from ppdet.utils.cli import ArgsParser from ppdet.utils.cli import ArgsParser
from ppdet.engine import Trainer, init_parallel_env from ppdet.engine import Trainer, init_parallel_env
from ppdet.metrics.coco_utils import json_eval_results
from ppdet.utils.logger import setup_logger from ppdet.utils.logger import setup_logger
logger = setup_logger('eval') logger = setup_logger('eval')
...@@ -74,6 +75,17 @@ def parse_args(): ...@@ -74,6 +75,17 @@ def parse_args():
def run(FLAGS, cfg): def run(FLAGS, cfg):
if FLAGS.json_eval:
logger.info(
"In json_eval mode, PaddleDetection will evaluate json files in "
"output_eval directly. And proposal.json, bbox.json and mask.json "
"will be detected by default.")
json_eval_results(
cfg.metric,
json_directory=FLAGS.output_eval,
dataset=cfg['EvalDataset'])
return
# init parallel environment if nranks > 1 # init parallel environment if nranks > 1
init_parallel_env() init_parallel_env()
...@@ -94,6 +106,7 @@ def main(): ...@@ -94,6 +106,7 @@ def main():
# TODO: bias should be unified # TODO: bias should be unified
cfg['bias'] = 1 if FLAGS.bias else 0 cfg['bias'] = 1 if FLAGS.bias else 0
cfg['classwise'] = True if FLAGS.classwise else False cfg['classwise'] = True if FLAGS.classwise else False
cfg['output_eval'] = FLAGS.output_eval
merge_config(FLAGS.opt) merge_config(FLAGS.opt)
if FLAGS.slim_config: if FLAGS.slim_config:
slim_cfg = load_config(FLAGS.slim_config) slim_cfg = load_config(FLAGS.slim_config)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册