提交 c3b87b79 编写于 作者: G Guanghua Yu 提交者: wangguanzhong

[PaddleDetection] fix some easy of use problem (#2962)

* fix easy problem
上级 88db5c1b
...@@ -35,7 +35,7 @@ python tools/train.py -c configs/faster_rcnn_r50_1x.yml ...@@ -35,7 +35,7 @@ python tools/train.py -c configs/faster_rcnn_r50_1x.yml
- `-r` or `--resume_checkpoint`: Checkpoint path for resuming training. Such as: `-r output/faster_rcnn_r50_1x/10000` - `-r` or `--resume_checkpoint`: Checkpoint path for resuming training. Such as: `-r output/faster_rcnn_r50_1x/10000`
- `--eval`: Whether to perform evaluation in training, default is `False` - `--eval`: Whether to perform evaluation in training, default is `False`
- `-p` or `--output_eval`: If perform evaluation in training, this edits evaluation directory, default is current directory. - `--output_eval`: If perform evaluation in training, this edits evaluation directory, default is current directory.
- `-d` or `--dataset_dir`: Dataset path, same as `dataset_dir` of configs. Such as: `-d dataset/coco` - `-d` or `--dataset_dir`: Dataset path, same as `dataset_dir` of configs. Such as: `-d dataset/coco`
- `-o`: Set configuration options in config file. Such as: `-o weights=output/faster_rcnn_r50_1x/model_final` - `-o`: Set configuration options in config file. Such as: `-o weights=output/faster_rcnn_r50_1x/model_final`
...@@ -90,7 +90,7 @@ python tools/eval.py -c configs/faster_rcnn_r50_1x.yml ...@@ -90,7 +90,7 @@ python tools/eval.py -c configs/faster_rcnn_r50_1x.yml
#### Optional arguments #### Optional arguments
- `-d` or `--dataset_dir`: Dataset path, same as dataset_dir of configs. Such as: `-d dataset/coco` - `-d` or `--dataset_dir`: Dataset path, same as dataset_dir of configs. Such as: `-d dataset/coco`
- `-p` or `--output_eval`: Evaluation directory, default is current directory. - `--output_eval`: Evaluation directory, default is current directory.
- `-o`: Set configuration options in config file. Such as: `-o weights=output/faster_rcnn_r50_1x/model_final` - `-o`: Set configuration options in config file. Such as: `-o weights=output/faster_rcnn_r50_1x/model_final`
- `--json_eval`: Whether to eval with already existed bbox.json or mask.json. Default is `False`. Json file directory is assigned by `-f` argument. - `--json_eval`: Whether to eval with already existed bbox.json or mask.json. Default is `False`. Json file directory is assigned by `-f` argument.
......
...@@ -36,7 +36,7 @@ python tools/train.py -c configs/faster_rcnn_r50_1x.yml ...@@ -36,7 +36,7 @@ python tools/train.py -c configs/faster_rcnn_r50_1x.yml
- `-r` or `--resume_checkpoint`: 从某一检查点恢复训练,例如: `-r output/faster_rcnn_r50_1x/10000` - `-r` or `--resume_checkpoint`: 从某一检查点恢复训练,例如: `-r output/faster_rcnn_r50_1x/10000`
- `--eval`: 是否边训练边测试,默认是 `False` - `--eval`: 是否边训练边测试,默认是 `False`
- `-p` or `--output_eval`: 如果边训练边测试, 这个参数可以编辑评测保存json路径, 默认是当前目录。 - `--output_eval`: 如果边训练边测试, 这个参数可以编辑评测保存json路径, 默认是当前目录。
- `-d` or `--dataset_dir`: 数据集路径, 同配置文件里的`dataset_dir`. 例如: `-d dataset/coco` - `-d` or `--dataset_dir`: 数据集路径, 同配置文件里的`dataset_dir`. 例如: `-d dataset/coco`
- `-o`: 设置配置文件里的参数内容。 例如: `-o weights=output/faster_rcnn_r50_1x/model_final` - `-o`: 设置配置文件里的参数内容。 例如: `-o weights=output/faster_rcnn_r50_1x/model_final`
...@@ -84,7 +84,7 @@ python tools/eval.py -c configs/faster_rcnn_r50_1x.yml ...@@ -84,7 +84,7 @@ python tools/eval.py -c configs/faster_rcnn_r50_1x.yml
#### 可选参数 #### 可选参数
- `-d` or `--dataset_dir`: 数据集路径, 同配置文件里的`dataset_dir`。例如: `-d dataset/coco` - `-d` or `--dataset_dir`: 数据集路径, 同配置文件里的`dataset_dir`。例如: `-d dataset/coco`
- `-p` or `--output_eval`: 这个参数可以编辑评测保存json路径, 默认是当前目录。 - `--output_eval`: 这个参数可以编辑评测保存json路径, 默认是当前目录。
- `-o`: 设置配置文件里的参数内容。 例如: `-o weights=output/faster_rcnn_r50_1x/model_final` - `-o`: 设置配置文件里的参数内容。 例如: `-o weights=output/faster_rcnn_r50_1x/model_final`
- `--json_eval`: 是否通过已存在的bbox.json或者mask.json进行评估。默认是`False`。json文件路径通过`-f`指令来设置。 - `--json_eval`: 是否通过已存在的bbox.json或者mask.json进行评估。默认是`False`。json文件路径通过`-f`指令来设置。
......
...@@ -38,6 +38,7 @@ __all__ = [ ...@@ -38,6 +38,7 @@ __all__ = [
'mask2out', 'mask2out',
'get_category_info', 'get_category_info',
'proposal_eval', 'proposal_eval',
'cocoapi_eval',
] ]
...@@ -61,22 +62,10 @@ def proposal_eval(results, anno_file, outfile, max_dets=(100, 300, 1000)): ...@@ -61,22 +62,10 @@ def proposal_eval(results, anno_file, outfile, max_dets=(100, 300, 1000)):
with open(outfile, 'w') as f: with open(outfile, 'w') as f:
json.dump(xywh_results, f) json.dump(xywh_results, f)
coco_gt = COCO(anno_file) cocoapi_eval(outfile, 'proposal', anno_file=anno_file, max_dets=max_dets)
logger.info("Start evaluate...")
coco_dt = coco_gt.loadRes(outfile)
coco_ev = COCOeval(coco_gt, coco_dt, 'bbox')
coco_ev.params.useCats = 0
coco_ev.params.maxDets = list(max_dets)
coco_ev.evaluate()
coco_ev.accumulate()
coco_ev.summarize()
# flush coco evaluation result # flush coco evaluation result
sys.stdout.flush() sys.stdout.flush()
def bbox_eval(results, anno_file, outfile, with_background=True): def bbox_eval(results, anno_file, outfile, with_background=True):
assert 'bbox' in results[0] assert 'bbox' in results[0]
assert outfile.endswith('.json') assert outfile.endswith('.json')
...@@ -98,12 +87,7 @@ def bbox_eval(results, anno_file, outfile, with_background=True): ...@@ -98,12 +87,7 @@ def bbox_eval(results, anno_file, outfile, with_background=True):
with open(outfile, 'w') as f: with open(outfile, 'w') as f:
json.dump(xywh_results, f) json.dump(xywh_results, f)
logger.info("Start evaluate...") cocoapi_eval(outfile, 'bbox', coco_gt=coco_gt)
coco_dt = coco_gt.loadRes(outfile)
coco_ev = COCOeval(coco_gt, coco_dt, 'bbox')
coco_ev.evaluate()
coco_ev.accumulate()
coco_ev.summarize()
# flush coco evaluation result # flush coco evaluation result
sys.stdout.flush() sys.stdout.flush()
...@@ -123,12 +107,36 @@ def mask_eval(results, anno_file, outfile, resolution, thresh_binarize=0.5): ...@@ -123,12 +107,36 @@ def mask_eval(results, anno_file, outfile, resolution, thresh_binarize=0.5):
with open(outfile, 'w') as f: with open(outfile, 'w') as f:
json.dump(segm_results, f) json.dump(segm_results, f)
cocoapi_eval(outfile, 'segm', coco_gt=coco_gt)
def cocoapi_eval(jsonfile,
style,
coco_gt=None,
anno_file=None,
max_dets=(100, 300, 1000)):
"""
Args:
jsonfile: Evaluation json file, eg: bbox.json, mask.json.
style: COCOeval style, can be `bbox` , `segm` and `proposal`.
coco_gt: Whether to load COCOAPI through anno_file,
eg: coco_gt = COCO(anno_file)
anno_file: COCO annotations file.
max_dets: COCO evaluation maxDets.
"""
assert coco_gt != None or anno_file != None
if coco_gt == None:
coco_gt = COCO(anno_file)
logger.info("Start evaluate...") logger.info("Start evaluate...")
coco_dt = coco_gt.loadRes(outfile) coco_dt = coco_gt.loadRes(jsonfile)
coco_ev = COCOeval(coco_gt, coco_dt, 'segm') if style == 'proposal':
coco_ev.evaluate() coco_eval = COCOeval(coco_gt, coco_dt, 'bbox')
coco_ev.accumulate() coco_eval.params.useCats = 0
coco_ev.summarize() coco_eval.params.maxDets = list(max_dets)
else:
coco_eval = COCOeval(coco_gt, coco_dt, style)
coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize()
def proposal2out(results, is_bbox_normalized=False): def proposal2out(results, is_bbox_normalized=False):
......
...@@ -76,7 +76,7 @@ def get_dataset_path(path, annotation, image_dir): ...@@ -76,7 +76,7 @@ def get_dataset_path(path, annotation, image_dir):
if _dataset_exists(path, annotation, image_dir): if _dataset_exists(path, annotation, image_dir):
return path return path
logger.info("Dataset {} not exitst, try searching {} or " logger.info("Dataset {} not exists, try searching {} or "
"downloading dataset...".format( "downloading dataset...".format(
osp.realpath(path), DATASET_HOME)) osp.realpath(path), DATASET_HOME))
......
...@@ -18,12 +18,13 @@ from __future__ import print_function ...@@ -18,12 +18,13 @@ from __future__ import print_function
import logging import logging
import numpy as np import numpy as np
import os
import paddle.fluid as fluid import paddle.fluid as fluid
from ppdet.utils.voc_eval import bbox_eval as voc_bbox_eval from ppdet.utils.voc_eval import bbox_eval as voc_bbox_eval
__all__ = ['parse_fetches', 'eval_run', 'eval_results'] __all__ = ['parse_fetches', 'eval_run', 'eval_results', 'json_eval_results']
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
...@@ -96,7 +97,7 @@ def eval_results(results, ...@@ -96,7 +97,7 @@ def eval_results(results,
num_classes, num_classes,
resolution=None, resolution=None,
is_bbox_normalized=False, is_bbox_normalized=False,
output_file=None): output_directory=None):
"""Evaluation for evaluation program results""" """Evaluation for evaluation program results"""
if metric == 'COCO': if metric == 'COCO':
from ppdet.utils.coco_eval import proposal_eval, bbox_eval, mask_eval from ppdet.utils.coco_eval import proposal_eval, bbox_eval, mask_eval
...@@ -104,18 +105,18 @@ def eval_results(results, ...@@ -104,18 +105,18 @@ def eval_results(results,
with_background = getattr(feed, 'with_background', True) with_background = getattr(feed, 'with_background', True)
if 'proposal' in results[0]: if 'proposal' in results[0]:
output = 'proposal.json' output = 'proposal.json'
if output_file: if output_directory:
output = '{}_proposal.json'.format(output_file) output = os.path.join(output_directory, 'proposal.json')
proposal_eval(results, anno_file, output) proposal_eval(results, anno_file, output)
if 'bbox' in results[0]: if 'bbox' in results[0]:
output = 'bbox.json' output = 'bbox.json'
if output_file: if output_directory:
output = '{}_bbox.json'.format(output_file) output = os.path.join(output_directory, 'bbox.json')
bbox_eval(results, anno_file, output, with_background) bbox_eval(results, anno_file, output, with_background)
if 'mask' in results[0]: if 'mask' in results[0]:
output = 'mask.json' output = 'mask.json'
if output_file: if output_directory:
output = '{}_mask.json'.format(output_file) output = os.path.join(output_directory, 'mask.json')
mask_eval(results, anno_file, output, resolution) mask_eval(results, anno_file, output, resolution)
else: else:
if 'accum_map' in results[-1]: if 'accum_map' in results[-1]:
...@@ -124,3 +125,22 @@ def eval_results(results, ...@@ -124,3 +125,22 @@ def eval_results(results,
elif 'bbox' in results[0]: elif 'bbox' in results[0]:
voc_bbox_eval( voc_bbox_eval(
results, num_classes, is_bbox_normalized=is_bbox_normalized) results, num_classes, is_bbox_normalized=is_bbox_normalized)
def json_eval_results(feed, metric, json_directory=None):
"""
cocoapi eval with already exists proposal.json, bbox.json or mask.json
"""
assert metric == 'COCO'
from ppdet.utils.coco_eval import cocoapi_eval
anno_file = getattr(feed.dataset, 'annotation', None)
json_file_list = ['proposal.json', 'bbox.json', 'mask.json']
if json_directory:
for k, v in enumerate(json_file_list):
json_file_list[k] = os.path.join(str(json_directory), v)
coco_eval_style = ['proposal', 'bbox', 'segm']
for i, v_json in enumerate(json_file_list):
if os.path.exists(v_json):
cocoapi_eval(v_json, coco_eval_style[i], anno_file=anno_file)
else:
logger.info("{} not exists!".format(v_json))
...@@ -55,7 +55,7 @@ class TrainingStats(object): ...@@ -55,7 +55,7 @@ class TrainingStats(object):
for k, v in extras.items(): for k, v in extras.items():
stats[k] = v stats[k] = v
for k, v in self.smoothed_losses_and_metrics.items(): for k, v in self.smoothed_losses_and_metrics.items():
stats[k] = round(v.get_median_value(), 6) stats[k] = format(v.get_median_value(), '.6f')
return stats return stats
......
...@@ -19,9 +19,20 @@ from __future__ import print_function ...@@ -19,9 +19,20 @@ from __future__ import print_function
import os import os
import multiprocessing import multiprocessing
def set_paddle_flags(**kwargs):
for key, value in kwargs.items():
if os.environ.get(key, None) is None:
os.environ[key] = str(value)
# NOTE(paddle-dev): All of these flags should be set before
# `import paddle`. Otherwise, it would not take any effect.
set_paddle_flags(
FLAGS_eager_delete_tensor_gb=0, # enable GC to save memory
)
import paddle.fluid as fluid import paddle.fluid as fluid
from ppdet.utils.eval_utils import parse_fetches, eval_run, eval_results from ppdet.utils.eval_utils import parse_fetches, eval_run, eval_results, json_eval_results
import ppdet.utils.checkpoint as checkpoint import ppdet.utils.checkpoint as checkpoint
from ppdet.utils.cli import ArgsParser from ppdet.utils.cli import ArgsParser
from ppdet.utils.check import check_gpu from ppdet.utils.check import check_gpu
...@@ -78,6 +89,11 @@ def main(): ...@@ -78,6 +89,11 @@ def main():
reader = create_reader(eval_feed, args_path=FLAGS.dataset_dir) reader = create_reader(eval_feed, args_path=FLAGS.dataset_dir)
pyreader.decorate_sample_list_generator(reader, place) pyreader.decorate_sample_list_generator(reader, place)
# eval already exists json file
if FLAGS.json_eval:
json_eval_results(eval_feed, cfg.metric,
json_directory=FLAGS.output_eval)
return
# compile program for multi-devices # compile program for multi-devices
if devices_num <= 1: if devices_num <= 1:
compile_program = fluid.compiler.CompiledProgram(eval_prog) compile_program = fluid.compiler.CompiledProgram(eval_prog)
...@@ -115,22 +131,25 @@ def main(): ...@@ -115,22 +131,25 @@ def main():
if 'mask' in results[0]: if 'mask' in results[0]:
resolution = model.mask_head.resolution resolution = model.mask_head.resolution
eval_results(results, eval_feed, cfg.metric, cfg.num_classes, resolution, eval_results(results, eval_feed, cfg.metric, cfg.num_classes, resolution,
is_bbox_normalized, FLAGS.output_file) is_bbox_normalized, FLAGS.output_eval)
if __name__ == '__main__': if __name__ == '__main__':
parser = ArgsParser() parser = ArgsParser()
parser.add_argument( parser.add_argument(
"-f", "--json_eval",
"--output_file", action='store_true',
default=None, default=False,
type=str, help="Whether to re eval with already exists bbox.json or mask.json")
help="Evaluation file name, default to bbox.json and mask.json.")
parser.add_argument( parser.add_argument(
"-d", "-d",
"--dataset_dir", "--dataset_dir",
default=None, default=None,
type=str, type=str,
help="Dataset path, same as DataFeed.dataset.dataset_dir") help="Dataset path, same as DataFeed.dataset.dataset_dir")
parser.add_argument(
"--output_eval",
default=None,
type=str,
help="Evaluation file directory, default is current directory.")
FLAGS = parser.parse_args() FLAGS = parser.parse_args()
main() main()
...@@ -22,6 +22,17 @@ import glob ...@@ -22,6 +22,17 @@ import glob
import numpy as np import numpy as np
from PIL import Image from PIL import Image
def set_paddle_flags(**kwargs):
for key, value in kwargs.items():
if os.environ.get(key, None) is None:
os.environ[key] = str(value)
# NOTE(paddle-dev): All of these flags should be set before
# `import paddle`. Otherwise, it would not take any effect.
set_paddle_flags(
FLAGS_eager_delete_tensor_gb=0, # enable GC to save memory
)
from paddle import fluid from paddle import fluid
from ppdet.core.workspace import load_config, merge_config, create from ppdet.core.workspace import load_config, merge_config, create
......
...@@ -23,16 +23,13 @@ import numpy as np ...@@ -23,16 +23,13 @@ import numpy as np
import datetime import datetime
from collections import deque from collections import deque
def set_paddle_flags(**kwargs): def set_paddle_flags(**kwargs):
for key, value in kwargs.items(): for key, value in kwargs.items():
if os.environ.get(key, None) is None: if os.environ.get(key, None) is None:
os.environ[key] = str(value) os.environ[key] = str(value)
# NOTE(paddle-dev): All of these flags should be set before
# NOTE(paddle-dev): All of these flags should be # `import paddle`. Otherwise, it would not take any effect.
# set before `import paddle`. Otherwise, it would
# not take any effect.
set_paddle_flags( set_paddle_flags(
FLAGS_eager_delete_tensor_gb=0, # enable GC to save memory FLAGS_eager_delete_tensor_gb=0, # enable GC to save memory
) )
...@@ -199,7 +196,7 @@ def main(): ...@@ -199,7 +196,7 @@ def main():
if 'mask' in results[0]: if 'mask' in results[0]:
resolution = model.mask_head.resolution resolution = model.mask_head.resolution
eval_results(results, eval_feed, cfg.metric, cfg.num_classes, eval_results(results, eval_feed, cfg.metric, cfg.num_classes,
resolution, is_bbox_normalized, FLAGS.output_file) resolution, is_bbox_normalized, FLAGS.output_eval)
train_pyreader.reset() train_pyreader.reset()
...@@ -218,11 +215,10 @@ if __name__ == '__main__': ...@@ -218,11 +215,10 @@ if __name__ == '__main__':
default=False, default=False,
help="Whether to perform evaluation in train") help="Whether to perform evaluation in train")
parser.add_argument( parser.add_argument(
"-f", "--output_eval",
"--output_file",
default=None, default=None,
type=str, type=str,
help="Evaluation file name, default to bbox.json and mask.json.") help="Evaluation directory, default is current directory.")
parser.add_argument( parser.add_argument(
"-d", "-d",
"--dataset_dir", "--dataset_dir",
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册