Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
PaddleDetection
提交
d6f38b0c
P
PaddleDetection
项目概览
PaddlePaddle
/
PaddleDetection
1 年多 前同步成功
通知
696
Star
11112
Fork
2696
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
184
列表
看板
标记
里程碑
合并请求
40
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleDetection
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
184
Issue
184
列表
看板
标记
里程碑
合并请求
40
合并请求
40
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
d6f38b0c
编写于
5月 21, 2021
作者:
G
Guanghua Yu
提交者:
GitHub
5月 21, 2021
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
[cherry-pick]fix image_shape in export_model (#3094)
* fix image_shape in export_model
上级
516fab87
变更
9
隐藏空白更改
内联
并排
Showing
9 changed file
with
26 addition
and
60 deletion
+26
-60
configs/slim/README.md
configs/slim/README.md
+2
-2
deploy/TENSOR_RT.md
deploy/TENSOR_RT.md
+3
-2
deploy/cpp/include/config_parser.h
deploy/cpp/include/config_parser.h
+0
-8
deploy/cpp/include/object_detector.h
deploy/cpp/include/object_detector.h
+1
-3
deploy/cpp/include/preprocess_op.h
deploy/cpp/include/preprocess_op.h
+8
-11
deploy/python/infer.py
deploy/python/infer.py
+4
-6
deploy/python/keypoint_infer.py
deploy/python/keypoint_infer.py
+1
-3
deploy/python/preprocess.py
deploy/python/preprocess.py
+2
-15
ppdet/engine/export_utils.py
ppdet/engine/export_utils.py
+5
-10
未找到文件。
configs/slim/README.md
浏览文件 @
d6f38b0c
...
...
@@ -20,7 +20,7 @@
**PaddleDetection、 PaddlePaddle与PaddleSlim 版本关系:**
| PaddleDetection版本 | PaddlePaddle版本 | PaddleSlim版本 | 备注 |
| :------------------: | :---------------: | :-------: |:---------------: |
| release/2.1 | >= 2.1.0 | 2.1 |
--
|
| release/2.1 | >= 2.1.0 | 2.1 |
量化模型导出依赖最新Paddle develop分支,可在
[
PaddlePaddle每日版本
](
https://www.paddlepaddle.org.cn/documentation/docs/zh/install/Tables.html#whl-dev
)
中下载安装
|
| release/2.0 | >= 2.0.1 | 2.0 | 量化依赖Paddle 2.1及PaddleSlim 2.1 |
...
...
@@ -107,7 +107,7 @@ python tools/export_model.py -c configs/{MODEL.yml} --slim_config configs/slim/{
#### COCO上benchmark
| 模型 | 压缩策略 | GFLOPs | 模型体积(MB) | 输入尺寸 | 预测时延(SD855) | Box AP | 下载 | 模型配置文件 | 压缩算法配置文件 |
| :---------: | :-------: | :------------: |:-------------: | :------: | :-------------: | :------: | :-----------------------------------------------------: |:-------------: | :------: |
| PP-YOLO-MobileNetV3_large | baseline | -- | 18.5 | 608 | 25.1ms | 2
4.3
|
[
下载链接
](
https://paddledet.bj.bcebos.com/models/ppyolo_mbv3_large_coco.pdparams
)
|
[
配置文件
](
https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/ppyolo/ppyolo_mbv3_large_coco.yml
)
| - |
| PP-YOLO-MobileNetV3_large | baseline | -- | 18.5 | 608 | 25.1ms | 2
3.2
|
[
下载链接
](
https://paddledet.bj.bcebos.com/models/ppyolo_mbv3_large_coco.pdparams
)
|
[
配置文件
](
https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/ppyolo/ppyolo_mbv3_large_coco.yml
)
| - |
| PP-YOLO-MobileNetV3_large | 剪裁-FPGM | -37% | 12.6 | 608 | - | 22.3 |
[
下载链接
](
https://paddledet.bj.bcebos.com/models/slim/ppyolo_mbv3_large_prune_fpgm.pdparams
)
|
[
配置文件
](
https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/ppyolo/ppyolo_mbv3_large_coco.yml
)
|
[
slim配置文件
](
https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/slim/prune/ppyolo_mbv3_large_prune_fpgm.yml
)
|
| YOLOv3-DarkNet53 | baseline | -- | 238.2 | 608 | - | 39.0 |
[
下载链接
](
https://paddledet.bj.bcebos.com/models/ppyolo_mbv3_large_coco.pdparams
)
|
[
配置文件
](
https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/yolov3/yolov3_darknet53_270e_coco.yml
)
| - |
| YOLOv3-DarkNet53 | 剪裁-FPGM | -24% | - | 608 | - | 37.6 |
[
下载链接
](
https://paddledet.bj.bcebos.com/models/slim/yolov3_darknet_prune_fpgm.pdparams
)
|
[
配置文件
](
https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/yolov3/yolov3_darknet53_270e_coco.yml
)
|
[
slim配置文件
](
https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/slim/prune/yolov3_darknet_prune_fpgm.yml
)
|
...
...
deploy/TENSOR_RT.md
浏览文件 @
d6f38b0c
...
...
@@ -8,7 +8,9 @@ TensorRT是NVIDIA提出的用于统一模型部署的加速库,可以应用于
-
如果Python和CPP官网没有提供已编译好的安装包或预测库,请参考
[
源码安装
](
https://www.paddlepaddle.org.cn/documentation/docs/zh/install/compile/linux-compile.html
)
自行编译
注意,您的机器上TensorRT的版本需要跟您使用的预测库中TensorRT版本保持一致。
**注意:**
-
您的机器上TensorRT的版本需要跟您使用的预测库中TensorRT版本保持一致。
-
PaddleDetection中部署预测要求TensorRT版本 > 6.0。
## 2. 导出模型
模型导出具体请参考文档
[
PaddleDetection模型导出教程
](
../EXPORT_MODEL.md
)
。
...
...
@@ -31,7 +33,6 @@ config->EnableTensorRtEngine(1 << 20 /*workspace_size*/,
```
### 3.2 TensorRT固定尺寸预测
TensorRT版本<=5时,使用TensorRT预测时,只支持固定尺寸输入。
在导出模型时指定模型输入尺寸,设置
`TestReader.inputs_def.image_shape=[3,640,640]`
,具体请参考
[
PaddleDetection模型导出教程
](
../EXPORT_MODEL.md
)
。
...
...
deploy/cpp/include/config_parser.h
浏览文件 @
d6f38b0c
...
...
@@ -91,13 +91,6 @@ class ConfigPaser {
return
false
;
}
if
(
config
[
"image_shape"
].
IsDefined
())
{
image_shape_
=
config
[
"image_shape"
].
as
<
std
::
vector
<
int
>>
();
}
else
{
std
::
cerr
<<
"Please set image_shape."
<<
std
::
endl
;
return
false
;
}
return
true
;
}
std
::
string
mode_
;
...
...
@@ -106,7 +99,6 @@ class ConfigPaser {
int
min_subgraph_size_
;
YAML
::
Node
preprocess_info_
;
std
::
vector
<
std
::
string
>
label_list_
;
std
::
vector
<
int
>
image_shape_
;
};
}
// namespace PaddleDetection
...
...
deploy/cpp/include/object_detector.h
浏览文件 @
d6f38b0c
...
...
@@ -82,8 +82,7 @@ class ObjectDetector {
config_
.
load_config
(
model_dir
);
this
->
min_subgraph_size_
=
config_
.
min_subgraph_size_
;
threshold_
=
config_
.
draw_threshold_
;
image_shape_
=
config_
.
image_shape_
;
preprocessor_
.
Init
(
config_
.
preprocess_info_
,
image_shape_
);
preprocessor_
.
Init
(
config_
.
preprocess_info_
);
LoadModel
(
model_dir
,
batch_size
,
run_mode
);
}
...
...
@@ -134,7 +133,6 @@ class ObjectDetector {
std
::
vector
<
int
>
out_bbox_num_data_
;
float
threshold_
;
ConfigPaser
config_
;
std
::
vector
<
int
>
image_shape_
;
};
}
// namespace PaddleDetection
deploy/cpp/include/preprocess_op.h
浏览文件 @
d6f38b0c
...
...
@@ -48,19 +48,19 @@ class ImageBlob {
// Abstraction of preprocessing opration class
class
PreprocessOp
{
public:
virtual
void
Init
(
const
YAML
::
Node
&
item
,
const
std
::
vector
<
int
>
image_shape
)
=
0
;
virtual
void
Init
(
const
YAML
::
Node
&
item
)
=
0
;
virtual
void
Run
(
cv
::
Mat
*
im
,
ImageBlob
*
data
)
=
0
;
};
class
InitInfo
:
public
PreprocessOp
{
public:
virtual
void
Init
(
const
YAML
::
Node
&
item
,
const
std
::
vector
<
int
>
image_shape
)
{}
virtual
void
Init
(
const
YAML
::
Node
&
item
)
{}
virtual
void
Run
(
cv
::
Mat
*
im
,
ImageBlob
*
data
);
};
class
NormalizeImage
:
public
PreprocessOp
{
public:
virtual
void
Init
(
const
YAML
::
Node
&
item
,
const
std
::
vector
<
int
>
image_shape
)
{
virtual
void
Init
(
const
YAML
::
Node
&
item
)
{
mean_
=
item
[
"mean"
].
as
<
std
::
vector
<
float
>>
();
scale_
=
item
[
"std"
].
as
<
std
::
vector
<
float
>>
();
is_scale_
=
item
[
"is_scale"
].
as
<
bool
>
();
...
...
@@ -77,21 +77,18 @@ class NormalizeImage : public PreprocessOp {
class
Permute
:
public
PreprocessOp
{
public:
virtual
void
Init
(
const
YAML
::
Node
&
item
,
const
std
::
vector
<
int
>
image_shape
)
{}
virtual
void
Init
(
const
YAML
::
Node
&
item
)
{}
virtual
void
Run
(
cv
::
Mat
*
im
,
ImageBlob
*
data
);
};
class
Resize
:
public
PreprocessOp
{
public:
virtual
void
Init
(
const
YAML
::
Node
&
item
,
const
std
::
vector
<
int
>
image_shape
)
{
virtual
void
Init
(
const
YAML
::
Node
&
item
)
{
interp_
=
item
[
"interp"
].
as
<
int
>
();
//max_size_ = item["target_size"].as<int>();
keep_ratio_
=
item
[
"keep_ratio"
].
as
<
bool
>
();
target_size_
=
item
[
"target_size"
].
as
<
std
::
vector
<
int
>>
();
if
(
item
[
"keep_ratio"
])
{
in_net_shape_
=
image_shape
;
}
}
// Compute best resize scale for x-dimension, y-dimension
...
...
@@ -109,7 +106,7 @@ class Resize : public PreprocessOp {
// Models with FPN need input shape % stride == 0
class
PadStride
:
public
PreprocessOp
{
public:
virtual
void
Init
(
const
YAML
::
Node
&
item
,
const
std
::
vector
<
int
>
image_shape
)
{
virtual
void
Init
(
const
YAML
::
Node
&
item
)
{
stride_
=
item
[
"stride"
].
as
<
int
>
();
}
...
...
@@ -121,14 +118,14 @@ class PadStride : public PreprocessOp {
class
Preprocessor
{
public:
void
Init
(
const
YAML
::
Node
&
config_node
,
const
std
::
vector
<
int
>
image_shape
)
{
void
Init
(
const
YAML
::
Node
&
config_node
)
{
// initialize image info at first
ops_
[
"InitInfo"
]
=
std
::
make_shared
<
InitInfo
>
();
for
(
const
auto
&
item
:
config_node
)
{
auto
op_name
=
item
[
"type"
].
as
<
std
::
string
>
();
ops_
[
op_name
]
=
CreateOp
(
op_name
);
ops_
[
op_name
]
->
Init
(
item
,
image_shape
);
ops_
[
op_name
]
->
Init
(
item
);
}
}
...
...
deploy/python/infer.py
浏览文件 @
d6f38b0c
...
...
@@ -99,8 +99,7 @@ class Detector(object):
input_im_lst
=
[]
input_im_info_lst
=
[]
for
im_path
in
image_list
:
im
,
im_info
=
preprocess
(
im_path
,
preprocess_ops
,
self
.
pred_config
.
input_shape
)
im
,
im_info
=
preprocess
(
im_path
,
preprocess_ops
)
input_im_lst
.
append
(
im
)
input_im_info_lst
.
append
(
im_info
)
inputs
=
create_inputs
(
input_im_lst
,
input_im_info_lst
)
...
...
@@ -141,12 +140,12 @@ class Detector(object):
'''
self
.
det_times
.
preprocess_time_s
.
start
()
inputs
=
self
.
preprocess
(
image_list
)
self
.
det_times
.
preprocess_time_s
.
end
()
np_boxes
,
np_masks
=
None
,
None
input_names
=
self
.
predictor
.
get_input_names
()
for
i
in
range
(
len
(
input_names
)):
input_tensor
=
self
.
predictor
.
get_input_handle
(
input_names
[
i
])
input_tensor
.
copy_from_cpu
(
inputs
[
input_names
[
i
]])
self
.
det_times
.
preprocess_time_s
.
end
()
for
i
in
range
(
warmup
):
self
.
predictor
.
run
()
output_names
=
self
.
predictor
.
get_output_names
()
...
...
@@ -236,14 +235,14 @@ class DetectorSOLOv2(Detector):
'cate_label': label of segm, shape:[N]
'cate_score': confidence score of segm, shape:[N]
'''
self
.
det_times
.
p
ost
process_time_s
.
start
()
self
.
det_times
.
p
re
process_time_s
.
start
()
inputs
=
self
.
preprocess
(
image
)
self
.
det_times
.
preprocess_time_s
.
end
()
np_label
,
np_score
,
np_segms
=
None
,
None
,
None
input_names
=
self
.
predictor
.
get_input_names
()
for
i
in
range
(
len
(
input_names
)):
input_tensor
=
self
.
predictor
.
get_input_handle
(
input_names
[
i
])
input_tensor
.
copy_from_cpu
(
inputs
[
input_names
[
i
]])
self
.
det_times
.
postprocess_time_s
.
end
()
for
i
in
range
(
warmup
):
self
.
predictor
.
run
()
output_names
=
self
.
predictor
.
get_output_names
()
...
...
@@ -331,7 +330,6 @@ class PredictConfig():
self
.
mask
=
False
if
'mask'
in
yml_conf
:
self
.
mask
=
yml_conf
[
'mask'
]
self
.
input_shape
=
yml_conf
[
'image_shape'
]
self
.
print_config
()
def
check_model
(
self
,
yml_conf
):
...
...
deploy/python/keypoint_infer.py
浏览文件 @
d6f38b0c
...
...
@@ -88,8 +88,7 @@ class KeyPoint_Detector(object):
new_op_info
=
op_info
.
copy
()
op_type
=
new_op_info
.
pop
(
'type'
)
preprocess_ops
.
append
(
eval
(
op_type
)(
**
new_op_info
))
im
,
im_info
=
preprocess
(
im
,
preprocess_ops
,
self
.
pred_config
.
input_shape
)
im
,
im_info
=
preprocess
(
im
,
preprocess_ops
)
inputs
=
create_inputs
(
im
,
im_info
)
return
inputs
...
...
@@ -213,7 +212,6 @@ class PredictConfig_KeyPoint():
self
.
tagmap
=
False
if
'keypoint_bottomup'
==
self
.
archcls
:
self
.
tagmap
=
True
self
.
input_shape
=
yml_conf
[
'image_shape'
]
self
.
print_config
()
def
check_model
(
self
,
yml_conf
):
...
...
deploy/python/preprocess.py
浏览文件 @
d6f38b0c
...
...
@@ -47,11 +47,7 @@ class Resize(object):
interp (int): method of resize
"""
def
__init__
(
self
,
target_size
,
keep_ratio
=
True
,
interp
=
cv2
.
INTER_LINEAR
,
):
def
__init__
(
self
,
target_size
,
keep_ratio
=
True
,
interp
=
cv2
.
INTER_LINEAR
):
if
isinstance
(
target_size
,
int
):
target_size
=
[
target_size
,
target_size
]
self
.
target_size
=
target_size
...
...
@@ -81,14 +77,6 @@ class Resize(object):
im_info
[
'im_shape'
]
=
np
.
array
(
im
.
shape
[:
2
]).
astype
(
'float32'
)
im_info
[
'scale_factor'
]
=
np
.
array
(
[
im_scale_y
,
im_scale_x
]).
astype
(
'float32'
)
# padding im when image_shape fixed by infer_cfg.yml
if
self
.
keep_ratio
and
im_info
[
'input_shape'
][
1
]
!=
-
1
:
max_size
=
im_info
[
'input_shape'
][
1
]
padding_im
=
np
.
zeros
(
(
max_size
,
max_size
,
im_channel
),
dtype
=
np
.
float32
)
im_h
,
im_w
=
im
.
shape
[:
2
]
padding_im
[:
im_h
,
:
im_w
,
:]
=
im
im
=
padding_im
return
im
,
im_info
def
generate_scale
(
self
,
im
):
...
...
@@ -205,13 +193,12 @@ class PadStride(object):
return
padding_im
,
im_info
def
preprocess
(
im
,
preprocess_ops
,
input_shape
):
def
preprocess
(
im
,
preprocess_ops
):
# process image by preprocess_ops
im_info
=
{
'scale_factor'
:
np
.
array
(
[
1.
,
1.
],
dtype
=
np
.
float32
),
'im_shape'
:
None
,
'input_shape'
:
input_shape
,
}
im
,
im_info
=
decode_image
(
im
,
im_info
)
for
operator
in
preprocess_ops
:
...
...
ppdet/engine/export_utils.py
浏览文件 @
d6f38b0c
...
...
@@ -58,9 +58,7 @@ def _parse_reader(reader_cfg, dataset_cfg, metric, arch, image_shape):
for
key
,
value
in
st
.
items
():
p
=
{
'type'
:
key
}
if
key
==
'Resize'
:
if
value
.
get
(
'keep_ratio'
,
False
)
and
int
(
image_shape
[
1
])
!=
-
1
:
max_size
=
max
(
image_shape
[
1
:])
image_shape
=
[
3
,
max_size
,
max_size
]
if
int
(
image_shape
[
1
])
!=
-
1
:
value
[
'target_size'
]
=
image_shape
[
1
:]
p
.
update
(
value
)
preprocess_list
.
append
(
p
)
...
...
@@ -76,7 +74,7 @@ def _parse_reader(reader_cfg, dataset_cfg, metric, arch, image_shape):
})
break
return
preprocess_list
,
label_list
,
image_shape
return
preprocess_list
,
label_list
def
_dump_infer_config
(
config
,
path
,
image_shape
,
model
):
...
...
@@ -87,7 +85,6 @@ def _dump_infer_config(config, path, image_shape, model):
'mode'
:
'fluid'
,
'draw_threshold'
:
0.5
,
'metric'
:
config
[
'metric'
],
'image_shape'
:
image_shape
})
infer_arch
=
config
[
'architecture'
]
...
...
@@ -107,10 +104,9 @@ def _dump_infer_config(config, path, image_shape, model):
label_arch
=
'detection_arch'
if
infer_arch
in
KEYPOINT_ARCH
:
label_arch
=
'keypoint_arch'
infer_cfg
[
'Preprocess'
],
infer_cfg
[
'label_list'
],
image_shape
=
_parse_reader
(
config
[
'TestReader'
],
config
[
'TestDataset'
],
config
[
'metric'
],
label_arch
,
image_shape
)
infer_cfg
[
'Preprocess'
],
infer_cfg
[
'label_list'
]
=
_parse_reader
(
config
[
'TestReader'
],
config
[
'TestDataset'
],
config
[
'metric'
],
label_arch
,
image_shape
)
if
infer_arch
==
'S2ANet'
:
# TODO: move background to num_classes
...
...
@@ -119,4 +115,3 @@ def _dump_infer_config(config, path, image_shape, model):
yaml
.
dump
(
infer_cfg
,
open
(
path
,
'w'
))
logger
.
info
(
"Export inference config file to {}"
.
format
(
os
.
path
.
join
(
path
)))
return
image_shape
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录