Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
s920243400
PaddleDetection
提交
c3b87b79
P
PaddleDetection
项目概览
s920243400
/
PaddleDetection
与 Fork 源项目一致
Fork自
PaddlePaddle / PaddleDetection
通知
2
Star
0
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleDetection
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
体验新版 GitCode,发现更多精彩内容 >>
提交
c3b87b79
编写于
7月 31, 2019
作者:
G
Guanghua Yu
提交者:
wangguanzhong
7月 31, 2019
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
[PaddleDetection] fix some easy of use problem (#2962)
* fix easy problem
上级
88db5c1b
变更
9
显示空白变更内容
内联
并排
Showing
9 changed file
with
109 addition
and
55 deletion
+109
-55
docs/GETTING_STARTED.md
docs/GETTING_STARTED.md
+2
-2
docs/GETTING_STARTED_cn.md
docs/GETTING_STARTED_cn.md
+2
-2
ppdet/utils/coco_eval.py
ppdet/utils/coco_eval.py
+32
-24
ppdet/utils/download.py
ppdet/utils/download.py
+1
-1
ppdet/utils/eval_utils.py
ppdet/utils/eval_utils.py
+28
-8
ppdet/utils/stats.py
ppdet/utils/stats.py
+1
-1
tools/eval.py
tools/eval.py
+27
-8
tools/infer.py
tools/infer.py
+11
-0
tools/train.py
tools/train.py
+5
-9
未找到文件。
docs/GETTING_STARTED.md
浏览文件 @
c3b87b79
...
...
@@ -35,7 +35,7 @@ python tools/train.py -c configs/faster_rcnn_r50_1x.yml
-
`-r`
or
`--resume_checkpoint`
: Checkpoint path for resuming training. Such as:
`-r output/faster_rcnn_r50_1x/10000`
-
`--eval`
: Whether to perform evaluation in training, default is
`False`
-
`-
p`
or
`-
-output_eval`
: If perform evaluation in training, this edits evaluation directory, default is current directory.
-
`--output_eval`
: If perform evaluation in training, this edits evaluation directory, default is current directory.
-
`-d`
or
`--dataset_dir`
: Dataset path, same as
`dataset_dir`
of configs. Such as:
`-d dataset/coco`
-
`-o`
: Set configuration options in config file. Such as:
`-o weights=output/faster_rcnn_r50_1x/model_final`
...
...
@@ -90,7 +90,7 @@ python tools/eval.py -c configs/faster_rcnn_r50_1x.yml
#### Optional arguments
-
`-d`
or
`--dataset_dir`
: Dataset path, same as dataset_dir of configs. Such as:
`-d dataset/coco`
-
`-
p`
or
`-
-output_eval`
: Evaluation directory, default is current directory.
-
`--output_eval`
: Evaluation directory, default is current directory.
-
`-o`
: Set configuration options in config file. Such as:
`-o weights=output/faster_rcnn_r50_1x/model_final`
-
`--json_eval`
: Whether to eval with already existed bbox.json or mask.json. Default is
`False`
. Json file directory is assigned by
`-f`
argument.
...
...
docs/GETTING_STARTED_cn.md
浏览文件 @
c3b87b79
...
...
@@ -36,7 +36,7 @@ python tools/train.py -c configs/faster_rcnn_r50_1x.yml
-
`-r`
or
`--resume_checkpoint`
: 从某一检查点恢复训练,例如:
`-r output/faster_rcnn_r50_1x/10000`
-
`--eval`
: 是否边训练边测试,默认是
`False`
-
`-
p`
or
`-
-output_eval`
: 如果边训练边测试, 这个参数可以编辑评测保存json路径, 默认是当前目录。
-
`--output_eval`
: 如果边训练边测试, 这个参数可以编辑评测保存json路径, 默认是当前目录。
-
`-d`
or
`--dataset_dir`
: 数据集路径, 同配置文件里的
`dataset_dir`
. 例如:
`-d dataset/coco`
-
`-o`
: 设置配置文件里的参数内容。 例如:
`-o weights=output/faster_rcnn_r50_1x/model_final`
...
...
@@ -84,7 +84,7 @@ python tools/eval.py -c configs/faster_rcnn_r50_1x.yml
#### 可选参数
-
`-d`
or
`--dataset_dir`
: 数据集路径, 同配置文件里的
`dataset_dir`
。例如:
`-d dataset/coco`
-
`-
p`
or
`-
-output_eval`
: 这个参数可以编辑评测保存json路径, 默认是当前目录。
-
`--output_eval`
: 这个参数可以编辑评测保存json路径, 默认是当前目录。
-
`-o`
: 设置配置文件里的参数内容。 例如:
`-o weights=output/faster_rcnn_r50_1x/model_final`
-
`--json_eval`
: 是否通过已存在的bbox.json或者mask.json进行评估。默认是
`False`
。json文件路径通过
`-f`
指令来设置。
...
...
ppdet/utils/coco_eval.py
浏览文件 @
c3b87b79
...
...
@@ -38,6 +38,7 @@ __all__ = [
'mask2out'
,
'get_category_info'
,
'proposal_eval'
,
'cocoapi_eval'
,
]
...
...
@@ -61,22 +62,10 @@ def proposal_eval(results, anno_file, outfile, max_dets=(100, 300, 1000)):
with
open
(
outfile
,
'w'
)
as
f
:
json
.
dump
(
xywh_results
,
f
)
coco_gt
=
COCO
(
anno_file
)
logger
.
info
(
"Start evaluate..."
)
coco_dt
=
coco_gt
.
loadRes
(
outfile
)
coco_ev
=
COCOeval
(
coco_gt
,
coco_dt
,
'bbox'
)
coco_ev
.
params
.
useCats
=
0
coco_ev
.
params
.
maxDets
=
list
(
max_dets
)
coco_ev
.
evaluate
()
coco_ev
.
accumulate
()
coco_ev
.
summarize
()
cocoapi_eval
(
outfile
,
'proposal'
,
anno_file
=
anno_file
,
max_dets
=
max_dets
)
# flush coco evaluation result
sys
.
stdout
.
flush
()
def
bbox_eval
(
results
,
anno_file
,
outfile
,
with_background
=
True
):
assert
'bbox'
in
results
[
0
]
assert
outfile
.
endswith
(
'.json'
)
...
...
@@ -98,12 +87,7 @@ def bbox_eval(results, anno_file, outfile, with_background=True):
with
open
(
outfile
,
'w'
)
as
f
:
json
.
dump
(
xywh_results
,
f
)
logger
.
info
(
"Start evaluate..."
)
coco_dt
=
coco_gt
.
loadRes
(
outfile
)
coco_ev
=
COCOeval
(
coco_gt
,
coco_dt
,
'bbox'
)
coco_ev
.
evaluate
()
coco_ev
.
accumulate
()
coco_ev
.
summarize
()
cocoapi_eval
(
outfile
,
'bbox'
,
coco_gt
=
coco_gt
)
# flush coco evaluation result
sys
.
stdout
.
flush
()
...
...
@@ -123,12 +107,36 @@ def mask_eval(results, anno_file, outfile, resolution, thresh_binarize=0.5):
with
open
(
outfile
,
'w'
)
as
f
:
json
.
dump
(
segm_results
,
f
)
cocoapi_eval
(
outfile
,
'segm'
,
coco_gt
=
coco_gt
)
def
cocoapi_eval
(
jsonfile
,
style
,
coco_gt
=
None
,
anno_file
=
None
,
max_dets
=
(
100
,
300
,
1000
)):
"""
Args:
jsonfile: Evaluation json file, eg: bbox.json, mask.json.
style: COCOeval style, can be `bbox` , `segm` and `proposal`.
coco_gt: Whether to load COCOAPI through anno_file,
eg: coco_gt = COCO(anno_file)
anno_file: COCO annotations file.
max_dets: COCO evaluation maxDets.
"""
assert
coco_gt
!=
None
or
anno_file
!=
None
if
coco_gt
==
None
:
coco_gt
=
COCO
(
anno_file
)
logger
.
info
(
"Start evaluate..."
)
coco_dt
=
coco_gt
.
loadRes
(
outfile
)
coco_ev
=
COCOeval
(
coco_gt
,
coco_dt
,
'segm'
)
coco_ev
.
evaluate
()
coco_ev
.
accumulate
()
coco_ev
.
summarize
()
coco_dt
=
coco_gt
.
loadRes
(
jsonfile
)
if
style
==
'proposal'
:
coco_eval
=
COCOeval
(
coco_gt
,
coco_dt
,
'bbox'
)
coco_eval
.
params
.
useCats
=
0
coco_eval
.
params
.
maxDets
=
list
(
max_dets
)
else
:
coco_eval
=
COCOeval
(
coco_gt
,
coco_dt
,
style
)
coco_eval
.
evaluate
()
coco_eval
.
accumulate
()
coco_eval
.
summarize
()
def
proposal2out
(
results
,
is_bbox_normalized
=
False
):
...
...
ppdet/utils/download.py
浏览文件 @
c3b87b79
...
...
@@ -76,7 +76,7 @@ def get_dataset_path(path, annotation, image_dir):
if
_dataset_exists
(
path
,
annotation
,
image_dir
):
return
path
logger
.
info
(
"Dataset {} not exi
tst
, try searching {} or "
logger
.
info
(
"Dataset {} not exi
sts
, try searching {} or "
"downloading dataset..."
.
format
(
osp
.
realpath
(
path
),
DATASET_HOME
))
...
...
ppdet/utils/eval_utils.py
浏览文件 @
c3b87b79
...
...
@@ -18,12 +18,13 @@ from __future__ import print_function
import
logging
import
numpy
as
np
import
os
import
paddle.fluid
as
fluid
from
ppdet.utils.voc_eval
import
bbox_eval
as
voc_bbox_eval
__all__
=
[
'parse_fetches'
,
'eval_run'
,
'eval_results'
]
__all__
=
[
'parse_fetches'
,
'eval_run'
,
'eval_results'
,
'json_eval_results'
]
logger
=
logging
.
getLogger
(
__name__
)
...
...
@@ -96,7 +97,7 @@ def eval_results(results,
num_classes
,
resolution
=
None
,
is_bbox_normalized
=
False
,
output_
file
=
None
):
output_
directory
=
None
):
"""Evaluation for evaluation program results"""
if
metric
==
'COCO'
:
from
ppdet.utils.coco_eval
import
proposal_eval
,
bbox_eval
,
mask_eval
...
...
@@ -104,18 +105,18 @@ def eval_results(results,
with_background
=
getattr
(
feed
,
'with_background'
,
True
)
if
'proposal'
in
results
[
0
]:
output
=
'proposal.json'
if
output_
file
:
output
=
'{}_proposal.json'
.
format
(
output_file
)
if
output_
directory
:
output
=
os
.
path
.
join
(
output_directory
,
'proposal.json'
)
proposal_eval
(
results
,
anno_file
,
output
)
if
'bbox'
in
results
[
0
]:
output
=
'bbox.json'
if
output_
file
:
output
=
'{}_bbox.json'
.
format
(
output_file
)
if
output_
directory
:
output
=
os
.
path
.
join
(
output_directory
,
'bbox.json'
)
bbox_eval
(
results
,
anno_file
,
output
,
with_background
)
if
'mask'
in
results
[
0
]:
output
=
'mask.json'
if
output_
file
:
output
=
'{}_mask.json'
.
format
(
output_file
)
if
output_
directory
:
output
=
os
.
path
.
join
(
output_directory
,
'mask.json'
)
mask_eval
(
results
,
anno_file
,
output
,
resolution
)
else
:
if
'accum_map'
in
results
[
-
1
]:
...
...
@@ -124,3 +125,22 @@ def eval_results(results,
elif
'bbox'
in
results
[
0
]:
voc_bbox_eval
(
results
,
num_classes
,
is_bbox_normalized
=
is_bbox_normalized
)
def
json_eval_results
(
feed
,
metric
,
json_directory
=
None
):
"""
cocoapi eval with already exists proposal.json, bbox.json or mask.json
"""
assert
metric
==
'COCO'
from
ppdet.utils.coco_eval
import
cocoapi_eval
anno_file
=
getattr
(
feed
.
dataset
,
'annotation'
,
None
)
json_file_list
=
[
'proposal.json'
,
'bbox.json'
,
'mask.json'
]
if
json_directory
:
for
k
,
v
in
enumerate
(
json_file_list
):
json_file_list
[
k
]
=
os
.
path
.
join
(
str
(
json_directory
),
v
)
coco_eval_style
=
[
'proposal'
,
'bbox'
,
'segm'
]
for
i
,
v_json
in
enumerate
(
json_file_list
):
if
os
.
path
.
exists
(
v_json
):
cocoapi_eval
(
v_json
,
coco_eval_style
[
i
],
anno_file
=
anno_file
)
else
:
logger
.
info
(
"{} not exists!"
.
format
(
v_json
))
ppdet/utils/stats.py
浏览文件 @
c3b87b79
...
...
@@ -55,7 +55,7 @@ class TrainingStats(object):
for
k
,
v
in
extras
.
items
():
stats
[
k
]
=
v
for
k
,
v
in
self
.
smoothed_losses_and_metrics
.
items
():
stats
[
k
]
=
round
(
v
.
get_median_value
(),
6
)
stats
[
k
]
=
format
(
v
.
get_median_value
(),
'.6f'
)
return
stats
...
...
tools/eval.py
浏览文件 @
c3b87b79
...
...
@@ -19,9 +19,20 @@ from __future__ import print_function
import
os
import
multiprocessing
def
set_paddle_flags
(
**
kwargs
):
for
key
,
value
in
kwargs
.
items
():
if
os
.
environ
.
get
(
key
,
None
)
is
None
:
os
.
environ
[
key
]
=
str
(
value
)
# NOTE(paddle-dev): All of these flags should be set before
# `import paddle`. Otherwise, it would not take any effect.
set_paddle_flags
(
FLAGS_eager_delete_tensor_gb
=
0
,
# enable GC to save memory
)
import
paddle.fluid
as
fluid
from
ppdet.utils.eval_utils
import
parse_fetches
,
eval_run
,
eval_results
from
ppdet.utils.eval_utils
import
parse_fetches
,
eval_run
,
eval_results
,
json_eval_results
import
ppdet.utils.checkpoint
as
checkpoint
from
ppdet.utils.cli
import
ArgsParser
from
ppdet.utils.check
import
check_gpu
...
...
@@ -78,6 +89,11 @@ def main():
reader
=
create_reader
(
eval_feed
,
args_path
=
FLAGS
.
dataset_dir
)
pyreader
.
decorate_sample_list_generator
(
reader
,
place
)
# eval already exists json file
if
FLAGS
.
json_eval
:
json_eval_results
(
eval_feed
,
cfg
.
metric
,
json_directory
=
FLAGS
.
output_eval
)
return
# compile program for multi-devices
if
devices_num
<=
1
:
compile_program
=
fluid
.
compiler
.
CompiledProgram
(
eval_prog
)
...
...
@@ -115,22 +131,25 @@ def main():
if
'mask'
in
results
[
0
]:
resolution
=
model
.
mask_head
.
resolution
eval_results
(
results
,
eval_feed
,
cfg
.
metric
,
cfg
.
num_classes
,
resolution
,
is_bbox_normalized
,
FLAGS
.
output_file
)
is_bbox_normalized
,
FLAGS
.
output_eval
)
if
__name__
==
'__main__'
:
parser
=
ArgsParser
()
parser
.
add_argument
(
"-f"
,
"--output_file"
,
default
=
None
,
type
=
str
,
help
=
"Evaluation file name, default to bbox.json and mask.json."
)
"--json_eval"
,
action
=
'store_true'
,
default
=
False
,
help
=
"Whether to re eval with already exists bbox.json or mask.json"
)
parser
.
add_argument
(
"-d"
,
"--dataset_dir"
,
default
=
None
,
type
=
str
,
help
=
"Dataset path, same as DataFeed.dataset.dataset_dir"
)
parser
.
add_argument
(
"--output_eval"
,
default
=
None
,
type
=
str
,
help
=
"Evaluation file directory, default is current directory."
)
FLAGS
=
parser
.
parse_args
()
main
()
tools/infer.py
浏览文件 @
c3b87b79
...
...
@@ -22,6 +22,17 @@ import glob
import
numpy
as
np
from
PIL
import
Image
def
set_paddle_flags
(
**
kwargs
):
for
key
,
value
in
kwargs
.
items
():
if
os
.
environ
.
get
(
key
,
None
)
is
None
:
os
.
environ
[
key
]
=
str
(
value
)
# NOTE(paddle-dev): All of these flags should be set before
# `import paddle`. Otherwise, it would not take any effect.
set_paddle_flags
(
FLAGS_eager_delete_tensor_gb
=
0
,
# enable GC to save memory
)
from
paddle
import
fluid
from
ppdet.core.workspace
import
load_config
,
merge_config
,
create
...
...
tools/train.py
浏览文件 @
c3b87b79
...
...
@@ -23,16 +23,13 @@ import numpy as np
import
datetime
from
collections
import
deque
def
set_paddle_flags
(
**
kwargs
):
for
key
,
value
in
kwargs
.
items
():
if
os
.
environ
.
get
(
key
,
None
)
is
None
:
os
.
environ
[
key
]
=
str
(
value
)
# NOTE(paddle-dev): All of these flags should be
# set before `import paddle`. Otherwise, it would
# not take any effect.
# NOTE(paddle-dev): All of these flags should be set before
# `import paddle`. Otherwise, it would not take any effect.
set_paddle_flags
(
FLAGS_eager_delete_tensor_gb
=
0
,
# enable GC to save memory
)
...
...
@@ -199,7 +196,7 @@ def main():
if
'mask'
in
results
[
0
]:
resolution
=
model
.
mask_head
.
resolution
eval_results
(
results
,
eval_feed
,
cfg
.
metric
,
cfg
.
num_classes
,
resolution
,
is_bbox_normalized
,
FLAGS
.
output_
file
)
resolution
,
is_bbox_normalized
,
FLAGS
.
output_
eval
)
train_pyreader
.
reset
()
...
...
@@ -218,11 +215,10 @@ if __name__ == '__main__':
default
=
False
,
help
=
"Whether to perform evaluation in train"
)
parser
.
add_argument
(
"-f"
,
"--output_file"
,
"--output_eval"
,
default
=
None
,
type
=
str
,
help
=
"Evaluation
file name, default to bbox.json and mask.json
."
)
help
=
"Evaluation
directory, default is current directory
."
)
parser
.
add_argument
(
"-d"
,
"--dataset_dir"
,
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录