Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
PaddleDetection
提交
38ebb263
P
PaddleDetection
项目概览
PaddlePaddle
/
PaddleDetection
1 年多 前同步成功
通知
696
Star
11112
Fork
2696
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
184
列表
看板
标记
里程碑
合并请求
40
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleDetection
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
184
Issue
184
列表
看板
标记
里程碑
合并请求
40
合并请求
40
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
38ebb263
编写于
1月 31, 2023
作者:
F
Feng Ni
提交者:
GitHub
1月 31, 2023
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
fix slice infer one image save_results (#7654)
上级
55cc99b1
变更
2
隐藏空白更改
内联
并排
Showing
2 changed file
with
62 addition
and
13 deletion
+62
-13
configs/smalldet/README.md
configs/smalldet/README.md
+4
-2
ppdet/engine/trainer.py
ppdet/engine/trainer.py
+58
-11
未找到文件。
configs/smalldet/README.md
浏览文件 @
38ebb263
...
...
@@ -294,7 +294,7 @@ CUDA_VISIBLE_DEVICES=0 python tools/infer.py -c configs/smalldet/ppyoloe_crn_l_8
也可以对原图进行自动切图并拼图重组来预测原图,如:
```
bash
# 单张图
CUDA_VISIBLE_DEVICES
=
0 python tools/infer.py
-c
configs/smalldet/ppyoloe_crn_l_80e_sliced_visdrone_640_025.yml
-o
weights
=
https://paddledet.bj.bcebos.com/models/ppyoloe_crn_l_80e_sliced_visdrone_640_025.pdparams
--infer_img
=
demo/0000315_01601_d_0000509.jpg
--draw_threshold
=
0.25
--slice_infer
--slice_size
640 640
--overlap_ratio
0.25 0.25
--combine_method
=
nms
--match_threshold
=
0.6
--match_metric
=
ios
CUDA_VISIBLE_DEVICES
=
0 python tools/infer.py
-c
configs/smalldet/ppyoloe_crn_l_80e_sliced_visdrone_640_025.yml
-o
weights
=
https://paddledet.bj.bcebos.com/models/ppyoloe_crn_l_80e_sliced_visdrone_640_025.pdparams
--infer_img
=
demo/0000315_01601_d_0000509.jpg
--draw_threshold
=
0.25
--slice_infer
--slice_size
640 640
--overlap_ratio
0.25 0.25
--combine_method
=
nms
--match_threshold
=
0.6
--match_metric
=
ios
--save_results
=
True
# 或图片文件夹
CUDA_VISIBLE_DEVICES
=
0 python tools/infer.py
-c
configs/smalldet/ppyoloe_crn_l_80e_sliced_visdrone_640_025.yml
-o
weights
=
https://paddledet.bj.bcebos.com/models/ppyoloe_crn_l_80e_sliced_visdrone_640_025.pdparams
--infer_dir
=
demo/
--draw_threshold
=
0.25
--slice_infer
--slice_size
640 640
--overlap_ratio
0.25 0.25
--combine_method
=
nms
--match_threshold
=
0.6
--match_metric
=
ios
```
...
...
@@ -303,6 +303,7 @@ CUDA_VISIBLE_DEVICES=0 python tools/infer.py -c configs/smalldet/ppyoloe_crn_l_8
-
设置
`--combine_method`
表示子图结果重组去重的方式,默认是
`nms`
;
-
设置
`--match_threshold`
表示子图结果重组去重的阈值,默认是0.6;
-
设置
`--match_metric`
表示子图结果重组去重的度量标准,默认是
`ios`
表示交小比(两个框交集面积除以更小框的面积),也可以选择交并比
`iou`
(两个框交集面积除以并集面积),精度效果因数据集而而异,但选择
`ios`
预测速度会更快一点;
-
设置
`--save_results`
表示保存图片结果为json文件,一般只单张图预测时使用;
### 部署
...
...
@@ -323,7 +324,7 @@ CUDA_VISIBLE_DEVICES=0 python deploy/python/infer.py --model_dir=output_inferenc
```
bash
# deploy slice infer
# 单张图
CUDA_VISIBLE_DEVICES
=
0 python deploy/python/infer.py
--model_dir
=
output_inference/ppyoloe_crn_l_80e_sliced_visdrone_640_025
--image_file
=
demo/0000315_01601_d_0000509.jpg
--device
=
GPU
--save_images
--threshold
=
0.25
--slice_infer
--slice_size
640 640
--overlap_ratio
0.25 0.25
--combine_method
=
nms
--match_threshold
=
0.6
--match_metric
=
ios
CUDA_VISIBLE_DEVICES
=
0 python deploy/python/infer.py
--model_dir
=
output_inference/ppyoloe_crn_l_80e_sliced_visdrone_640_025
--image_file
=
demo/0000315_01601_d_0000509.jpg
--device
=
GPU
--save_images
--threshold
=
0.25
--slice_infer
--slice_size
640 640
--overlap_ratio
0.25 0.25
--combine_method
=
nms
--match_threshold
=
0.6
--match_metric
=
ios
--save_results
=
True
# 或图片文件夹
CUDA_VISIBLE_DEVICES
=
0 python deploy/python/infer.py
--model_dir
=
output_inference/ppyoloe_crn_l_80e_sliced_visdrone_640_025
--image_dir
=
demo/
--device
=
GPU
--save_images
--threshold
=
0.25
--slice_infer
--slice_size
640 640
--overlap_ratio
0.25 0.25
--combine_method
=
nms
--match_threshold
=
0.6
--match_metric
=
ios
```
...
...
@@ -332,6 +333,7 @@ CUDA_VISIBLE_DEVICES=0 python deploy/python/infer.py --model_dir=output_inferenc
-
设置
`--combine_method`
表示子图结果重组去重的方式,默认是
`nms`
;
-
设置
`--match_threshold`
表示子图结果重组去重的阈值,默认是0.6;
-
设置
`--match_metric`
表示子图结果重组去重的度量标准,默认是
`ios`
表示交小比(两个框交集面积除以更小框的面积),也可以选择交并比
`iou`
(两个框交集面积除以并集面积),精度效果因数据集而而异,但选择
`ios`
预测速度会更快一点;
-
设置
`--save_results`
表示保存图片结果为json文件,一般只单张图预测时使用;
...
...
ppdet/engine/trainer.py
浏览文件 @
38ebb263
...
...
@@ -774,6 +774,44 @@ class Trainer(object):
loader
=
create
(
'TestReader'
)(
self
.
dataset
,
0
)
imid2path
=
self
.
dataset
.
get_imid2path
()
def
setup_metrics_for_loader
():
# mem
metrics
=
copy
.
deepcopy
(
self
.
_metrics
)
mode
=
self
.
mode
save_prediction_only
=
self
.
cfg
[
'save_prediction_only'
]
if
'save_prediction_only'
in
self
.
cfg
else
None
output_eval
=
self
.
cfg
[
'output_eval'
]
if
'output_eval'
in
self
.
cfg
else
None
# modify
self
.
mode
=
'_test'
self
.
cfg
[
'save_prediction_only'
]
=
True
self
.
cfg
[
'output_eval'
]
=
output_dir
self
.
cfg
[
'imid2path'
]
=
imid2path
self
.
_init_metrics
()
# restore
self
.
mode
=
mode
self
.
cfg
.
pop
(
'save_prediction_only'
)
if
save_prediction_only
is
not
None
:
self
.
cfg
[
'save_prediction_only'
]
=
save_prediction_only
self
.
cfg
.
pop
(
'output_eval'
)
if
output_eval
is
not
None
:
self
.
cfg
[
'output_eval'
]
=
output_eval
self
.
cfg
.
pop
(
'imid2path'
)
_metrics
=
copy
.
deepcopy
(
self
.
_metrics
)
self
.
_metrics
=
metrics
return
_metrics
if
save_results
:
metrics
=
setup_metrics_for_loader
()
else
:
metrics
=
[]
anno_file
=
self
.
dataset
.
get_anno
()
clsid2catid
,
catid2name
=
get_categories
(
self
.
cfg
.
metric
,
anno_file
=
anno_file
)
...
...
@@ -819,6 +857,9 @@ class Trainer(object):
merged_bboxs
=
[]
data
[
'im_id'
]
=
data
[
'ori_im_id'
]
for
_m
in
metrics
:
_m
.
update
(
data
,
merged_results
)
for
key
in
[
'im_shape'
,
'scale_factor'
,
'im_id'
]:
if
isinstance
(
data
,
typing
.
Sequence
):
merged_results
[
key
]
=
data
[
0
][
key
]
...
...
@@ -829,31 +870,36 @@ class Trainer(object):
merged_results
[
key
]
=
value
.
numpy
()
results
.
append
(
merged_results
)
for
_m
in
metrics
:
_m
.
accumulate
()
_m
.
reset
()
if
visualize
:
for
outs
in
results
:
batch_res
=
get_infer_results
(
outs
,
clsid2catid
)
bbox_num
=
outs
[
'bbox_num'
]
start
=
0
for
i
,
im_id
in
enumerate
(
outs
[
'im_id'
]):
image_path
=
imid2path
[
int
(
im_id
)]
image
=
Image
.
open
(
image_path
).
convert
(
'RGB'
)
image
=
ImageOps
.
exif_transpose
(
image
)
self
.
status
[
'original_image'
]
=
np
.
array
(
image
.
copy
())
end
=
start
+
bbox_num
[
i
]
bbox_res
=
batch_res
[
'bbox'
][
start
:
end
]
\
if
'bbox'
in
batch_res
else
None
mask_res
=
batch_res
[
'mask'
][
start
:
end
]
\
if
'mask'
in
batch_res
else
None
segm_res
=
batch_res
[
'segm'
][
start
:
end
]
\
if
'segm'
in
batch_res
else
None
keypoint_res
=
batch_res
[
'keypoint'
][
start
:
end
]
\
if
'keypoint'
in
batch_res
else
None
pose3d_res
=
batch_res
[
'pose3d'
][
start
:
end
]
\
if
'pose3d'
in
batch_res
else
None
image
=
visualize_results
(
image
,
bbox_res
,
mask_res
=
None
,
segm_res
=
None
,
keypoint_res
=
None
,
pose3d_res
=
None
,
im_id
=
int
(
im_id
),
catid2name
=
catid2name
,
threshold
=
draw_threshold
)
image
,
bbox_res
,
mask_res
,
segm_res
,
keypoint_res
,
pose3d_res
,
int
(
im_id
),
catid2name
,
draw_threshold
)
self
.
status
[
'result_image'
]
=
np
.
array
(
image
.
copy
())
if
self
.
_compose_callback
:
self
.
_compose_callback
.
on_step_end
(
self
.
status
)
...
...
@@ -863,6 +909,7 @@ class Trainer(object):
logger
.
info
(
"Detection bbox results save in {}"
.
format
(
save_name
))
image
.
save
(
save_name
,
quality
=
95
)
start
=
end
def
predict
(
self
,
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录