提交 c3b87b79 编写于 作者: G Guanghua Yu 提交者: wangguanzhong

[PaddleDetection] fix some easy of use problem (#2962)

* fix easy problem
上级 88db5c1b
......@@ -35,7 +35,7 @@ python tools/train.py -c configs/faster_rcnn_r50_1x.yml
- `-r` or `--resume_checkpoint`: Checkpoint path for resuming training. Such as: `-r output/faster_rcnn_r50_1x/10000`
- `--eval`: Whether to perform evaluation in training, default is `False`
- `-p` or `--output_eval`: If perform evaluation in training, this edits evaluation directory, default is current directory.
- `--output_eval`: If perform evaluation in training, this edits evaluation directory, default is current directory.
- `-d` or `--dataset_dir`: Dataset path, same as `dataset_dir` of configs. Such as: `-d dataset/coco`
- `-o`: Set configuration options in config file. Such as: `-o weights=output/faster_rcnn_r50_1x/model_final`
......@@ -90,7 +90,7 @@ python tools/eval.py -c configs/faster_rcnn_r50_1x.yml
#### Optional arguments
- `-d` or `--dataset_dir`: Dataset path, same as dataset_dir of configs. Such as: `-d dataset/coco`
- `-p` or `--output_eval`: Evaluation directory, default is current directory.
- `--output_eval`: Evaluation directory, default is current directory.
- `-o`: Set configuration options in config file. Such as: `-o weights=output/faster_rcnn_r50_1x/model_final`
- `--json_eval`: Whether to eval with already existed bbox.json or mask.json. Default is `False`. Json file directory is assigned by `-f` argument.
......
......@@ -36,7 +36,7 @@ python tools/train.py -c configs/faster_rcnn_r50_1x.yml
- `-r` or `--resume_checkpoint`: 从某一检查点恢复训练,例如: `-r output/faster_rcnn_r50_1x/10000`
- `--eval`: 是否边训练边测试,默认是 `False`
- `-p` or `--output_eval`: 如果边训练边测试, 这个参数可以编辑评测保存json路径, 默认是当前目录。
- `--output_eval`: 如果边训练边测试, 这个参数可以编辑评测保存json路径, 默认是当前目录。
- `-d` or `--dataset_dir`: 数据集路径, 同配置文件里的`dataset_dir`. 例如: `-d dataset/coco`
- `-o`: 设置配置文件里的参数内容。 例如: `-o weights=output/faster_rcnn_r50_1x/model_final`
......@@ -84,7 +84,7 @@ python tools/eval.py -c configs/faster_rcnn_r50_1x.yml
#### 可选参数
- `-d` or `--dataset_dir`: 数据集路径, 同配置文件里的`dataset_dir`。例如: `-d dataset/coco`
- `-p` or `--output_eval`: 这个参数可以编辑评测保存json路径, 默认是当前目录。
- `--output_eval`: 这个参数可以编辑评测保存json路径, 默认是当前目录。
- `-o`: 设置配置文件里的参数内容。 例如: `-o weights=output/faster_rcnn_r50_1x/model_final`
- `--json_eval`: 是否通过已存在的bbox.json或者mask.json进行评估。默认是`False`。json文件路径通过`-f`指令来设置。
......
......@@ -38,6 +38,7 @@ __all__ = [
'mask2out',
'get_category_info',
'proposal_eval',
'cocoapi_eval',
]
......@@ -61,22 +62,10 @@ def proposal_eval(results, anno_file, outfile, max_dets=(100, 300, 1000)):
with open(outfile, 'w') as f:
json.dump(xywh_results, f)
coco_gt = COCO(anno_file)
logger.info("Start evaluate...")
coco_dt = coco_gt.loadRes(outfile)
coco_ev = COCOeval(coco_gt, coco_dt, 'bbox')
coco_ev.params.useCats = 0
coco_ev.params.maxDets = list(max_dets)
coco_ev.evaluate()
coco_ev.accumulate()
coco_ev.summarize()
cocoapi_eval(outfile, 'proposal', anno_file=anno_file, max_dets=max_dets)
# flush coco evaluation result
sys.stdout.flush()
def bbox_eval(results, anno_file, outfile, with_background=True):
assert 'bbox' in results[0]
assert outfile.endswith('.json')
......@@ -98,12 +87,7 @@ def bbox_eval(results, anno_file, outfile, with_background=True):
with open(outfile, 'w') as f:
json.dump(xywh_results, f)
logger.info("Start evaluate...")
coco_dt = coco_gt.loadRes(outfile)
coco_ev = COCOeval(coco_gt, coco_dt, 'bbox')
coco_ev.evaluate()
coco_ev.accumulate()
coco_ev.summarize()
cocoapi_eval(outfile, 'bbox', coco_gt=coco_gt)
# flush coco evaluation result
sys.stdout.flush()
......@@ -123,12 +107,36 @@ def mask_eval(results, anno_file, outfile, resolution, thresh_binarize=0.5):
with open(outfile, 'w') as f:
json.dump(segm_results, f)
cocoapi_eval(outfile, 'segm', coco_gt=coco_gt)
def cocoapi_eval(jsonfile,
style,
coco_gt=None,
anno_file=None,
max_dets=(100, 300, 1000)):
"""
Args:
jsonfile: Evaluation json file, eg: bbox.json, mask.json.
style: COCOeval style, can be `bbox` , `segm` and `proposal`.
coco_gt: Whether to load COCOAPI through anno_file,
eg: coco_gt = COCO(anno_file)
anno_file: COCO annotations file.
max_dets: COCO evaluation maxDets.
"""
assert coco_gt != None or anno_file != None
if coco_gt == None:
coco_gt = COCO(anno_file)
logger.info("Start evaluate...")
coco_dt = coco_gt.loadRes(outfile)
coco_ev = COCOeval(coco_gt, coco_dt, 'segm')
coco_ev.evaluate()
coco_ev.accumulate()
coco_ev.summarize()
coco_dt = coco_gt.loadRes(jsonfile)
if style == 'proposal':
coco_eval = COCOeval(coco_gt, coco_dt, 'bbox')
coco_eval.params.useCats = 0
coco_eval.params.maxDets = list(max_dets)
else:
coco_eval = COCOeval(coco_gt, coco_dt, style)
coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize()
def proposal2out(results, is_bbox_normalized=False):
......
......@@ -76,7 +76,7 @@ def get_dataset_path(path, annotation, image_dir):
if _dataset_exists(path, annotation, image_dir):
return path
logger.info("Dataset {} not exitst, try searching {} or "
logger.info("Dataset {} not exists, try searching {} or "
"downloading dataset...".format(
osp.realpath(path), DATASET_HOME))
......
......@@ -18,12 +18,13 @@ from __future__ import print_function
import logging
import numpy as np
import os
import paddle.fluid as fluid
from ppdet.utils.voc_eval import bbox_eval as voc_bbox_eval
__all__ = ['parse_fetches', 'eval_run', 'eval_results']
__all__ = ['parse_fetches', 'eval_run', 'eval_results', 'json_eval_results']
logger = logging.getLogger(__name__)
......@@ -96,7 +97,7 @@ def eval_results(results,
num_classes,
resolution=None,
is_bbox_normalized=False,
output_file=None):
output_directory=None):
"""Evaluation for evaluation program results"""
if metric == 'COCO':
from ppdet.utils.coco_eval import proposal_eval, bbox_eval, mask_eval
......@@ -104,18 +105,18 @@ def eval_results(results,
with_background = getattr(feed, 'with_background', True)
if 'proposal' in results[0]:
output = 'proposal.json'
if output_file:
output = '{}_proposal.json'.format(output_file)
if output_directory:
output = os.path.join(output_directory, 'proposal.json')
proposal_eval(results, anno_file, output)
if 'bbox' in results[0]:
output = 'bbox.json'
if output_file:
output = '{}_bbox.json'.format(output_file)
if output_directory:
output = os.path.join(output_directory, 'bbox.json')
bbox_eval(results, anno_file, output, with_background)
if 'mask' in results[0]:
output = 'mask.json'
if output_file:
output = '{}_mask.json'.format(output_file)
if output_directory:
output = os.path.join(output_directory, 'mask.json')
mask_eval(results, anno_file, output, resolution)
else:
if 'accum_map' in results[-1]:
......@@ -124,3 +125,22 @@ def eval_results(results,
elif 'bbox' in results[0]:
voc_bbox_eval(
results, num_classes, is_bbox_normalized=is_bbox_normalized)
def json_eval_results(feed, metric, json_directory=None):
"""
cocoapi eval with already exists proposal.json, bbox.json or mask.json
"""
assert metric == 'COCO'
from ppdet.utils.coco_eval import cocoapi_eval
anno_file = getattr(feed.dataset, 'annotation', None)
json_file_list = ['proposal.json', 'bbox.json', 'mask.json']
if json_directory:
for k, v in enumerate(json_file_list):
json_file_list[k] = os.path.join(str(json_directory), v)
coco_eval_style = ['proposal', 'bbox', 'segm']
for i, v_json in enumerate(json_file_list):
if os.path.exists(v_json):
cocoapi_eval(v_json, coco_eval_style[i], anno_file=anno_file)
else:
logger.info("{} not exists!".format(v_json))
......@@ -55,7 +55,7 @@ class TrainingStats(object):
for k, v in extras.items():
stats[k] = v
for k, v in self.smoothed_losses_and_metrics.items():
stats[k] = round(v.get_median_value(), 6)
stats[k] = format(v.get_median_value(), '.6f')
return stats
......
......@@ -19,9 +19,20 @@ from __future__ import print_function
import os
import multiprocessing
def set_paddle_flags(**kwargs):
for key, value in kwargs.items():
if os.environ.get(key, None) is None:
os.environ[key] = str(value)
# NOTE(paddle-dev): All of these flags should be set before
# `import paddle`. Otherwise, it would not take any effect.
set_paddle_flags(
FLAGS_eager_delete_tensor_gb=0, # enable GC to save memory
)
import paddle.fluid as fluid
from ppdet.utils.eval_utils import parse_fetches, eval_run, eval_results
from ppdet.utils.eval_utils import parse_fetches, eval_run, eval_results, json_eval_results
import ppdet.utils.checkpoint as checkpoint
from ppdet.utils.cli import ArgsParser
from ppdet.utils.check import check_gpu
......@@ -78,6 +89,11 @@ def main():
reader = create_reader(eval_feed, args_path=FLAGS.dataset_dir)
pyreader.decorate_sample_list_generator(reader, place)
# eval already exists json file
if FLAGS.json_eval:
json_eval_results(eval_feed, cfg.metric,
json_directory=FLAGS.output_eval)
return
# compile program for multi-devices
if devices_num <= 1:
compile_program = fluid.compiler.CompiledProgram(eval_prog)
......@@ -115,22 +131,25 @@ def main():
if 'mask' in results[0]:
resolution = model.mask_head.resolution
eval_results(results, eval_feed, cfg.metric, cfg.num_classes, resolution,
is_bbox_normalized, FLAGS.output_file)
is_bbox_normalized, FLAGS.output_eval)
if __name__ == '__main__':
parser = ArgsParser()
parser.add_argument(
"-f",
"--output_file",
default=None,
type=str,
help="Evaluation file name, default to bbox.json and mask.json.")
"--json_eval",
action='store_true',
default=False,
help="Whether to re eval with already exists bbox.json or mask.json")
parser.add_argument(
"-d",
"--dataset_dir",
default=None,
type=str,
help="Dataset path, same as DataFeed.dataset.dataset_dir")
parser.add_argument(
"--output_eval",
default=None,
type=str,
help="Evaluation file directory, default is current directory.")
FLAGS = parser.parse_args()
main()
......@@ -22,6 +22,17 @@ import glob
import numpy as np
from PIL import Image
def set_paddle_flags(**kwargs):
for key, value in kwargs.items():
if os.environ.get(key, None) is None:
os.environ[key] = str(value)
# NOTE(paddle-dev): All of these flags should be set before
# `import paddle`. Otherwise, it would not take any effect.
set_paddle_flags(
FLAGS_eager_delete_tensor_gb=0, # enable GC to save memory
)
from paddle import fluid
from ppdet.core.workspace import load_config, merge_config, create
......
......@@ -23,16 +23,13 @@ import numpy as np
import datetime
from collections import deque
def set_paddle_flags(**kwargs):
for key, value in kwargs.items():
if os.environ.get(key, None) is None:
os.environ[key] = str(value)
# NOTE(paddle-dev): All of these flags should be
# set before `import paddle`. Otherwise, it would
# not take any effect.
# NOTE(paddle-dev): All of these flags should be set before
# `import paddle`. Otherwise, it would not take any effect.
set_paddle_flags(
FLAGS_eager_delete_tensor_gb=0, # enable GC to save memory
)
......@@ -199,7 +196,7 @@ def main():
if 'mask' in results[0]:
resolution = model.mask_head.resolution
eval_results(results, eval_feed, cfg.metric, cfg.num_classes,
resolution, is_bbox_normalized, FLAGS.output_file)
resolution, is_bbox_normalized, FLAGS.output_eval)
train_pyreader.reset()
......@@ -218,11 +215,10 @@ if __name__ == '__main__':
default=False,
help="Whether to perform evaluation in train")
parser.add_argument(
"-f",
"--output_file",
"--output_eval",
default=None,
type=str,
help="Evaluation file name, default to bbox.json and mask.json.")
help="Evaluation directory, default is current directory.")
parser.add_argument(
"-d",
"--dataset_dir",
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册