Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
s920243400
PaddleDetection
提交
5606d243
P
PaddleDetection
项目概览
s920243400
/
PaddleDetection
与 Fork 源项目一致
Fork自
PaddlePaddle / PaddleDetection
通知
2
Star
0
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleDetection
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
5606d243
编写于
5月 18, 2020
作者:
W
wangguanzhong
提交者:
GitHub
5月 18, 2020
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
remove trt_int8 (#709)
上级
05c71b5f
变更
4
显示空白变更内容
内联
并排
Showing
4 changed file
with
15 addition
and
10 deletion
+15
-10
deploy/cpp/include/config_parser.h
deploy/cpp/include/config_parser.h
+2
-2
deploy/cpp/src/object_detector.cc
deploy/cpp/src/object_detector.cc
+3
-2
deploy/python/README.md
deploy/python/README.md
+1
-1
deploy/python/infer.py
deploy/python/infer.py
+9
-5
未找到文件。
deploy/cpp/include/config_parser.h
浏览文件 @
5606d243
...
@@ -42,12 +42,12 @@ class ConfigPaser {
...
@@ -42,12 +42,12 @@ class ConfigPaser {
YAML
::
Node
config
;
YAML
::
Node
config
;
config
=
YAML
::
LoadFile
(
model_dir
+
OS_PATH_SEP
+
cfg
);
config
=
YAML
::
LoadFile
(
model_dir
+
OS_PATH_SEP
+
cfg
);
// Get runtime mode : fluid, trt_
int8, trt_
fp16, trt_fp32
// Get runtime mode : fluid, trt_fp16, trt_fp32
if
(
config
[
"mode"
].
IsDefined
())
{
if
(
config
[
"mode"
].
IsDefined
())
{
mode_
=
config
[
"mode"
].
as
<
std
::
string
>
();
mode_
=
config
[
"mode"
].
as
<
std
::
string
>
();
}
else
{
}
else
{
std
::
cerr
<<
"Please set mode, "
std
::
cerr
<<
"Please set mode, "
<<
"support value : fluid/trt_
int8/trt_
fp16/trt_fp32."
<<
"support value : fluid/trt_fp16/trt_fp32."
<<
std
::
endl
;
<<
std
::
endl
;
return
false
;
return
false
;
}
}
...
...
deploy/cpp/src/object_detector.cc
浏览文件 @
5606d243
...
@@ -33,7 +33,8 @@ void ObjectDetector::LoadModel(const std::string& model_dir,
...
@@ -33,7 +33,8 @@ void ObjectDetector::LoadModel(const std::string& model_dir,
if
(
run_mode
==
"trt_fp16"
)
{
if
(
run_mode
==
"trt_fp16"
)
{
precision
=
paddle
::
AnalysisConfig
::
Precision
::
kHalf
;
precision
=
paddle
::
AnalysisConfig
::
Precision
::
kHalf
;
}
else
if
(
run_mode
==
"trt_int8"
)
{
}
else
if
(
run_mode
==
"trt_int8"
)
{
precision
=
paddle
::
AnalysisConfig
::
Precision
::
kInt8
;
printf
(
"TensorRT int8 mode is not supported now, "
"please use 'trt_fp32' or 'trt_fp16' instead"
);
}
else
{
}
else
{
if
(
run_mode
!=
"trt_32"
)
{
if
(
run_mode
!=
"trt_32"
)
{
printf
(
"run_mode should be 'fluid', 'trt_fp32' or 'trt_fp16'"
);
printf
(
"run_mode should be 'fluid', 'trt_fp32' or 'trt_fp16'"
);
...
@@ -45,7 +46,7 @@ void ObjectDetector::LoadModel(const std::string& model_dir,
...
@@ -45,7 +46,7 @@ void ObjectDetector::LoadModel(const std::string& model_dir,
min_subgraph_size
,
min_subgraph_size
,
precision
,
precision
,
false
,
false
,
run_mode
==
"trt_int8"
);
false
);
}
}
}
else
{
}
else
{
config
.
DisableGpu
();
config
.
DisableGpu
();
...
...
deploy/python/README.md
浏览文件 @
5606d243
...
@@ -45,7 +45,7 @@ python deploy/python/infer.py --model_dir=/path/to/models --image_file=/path/to/
...
@@ -45,7 +45,7 @@ python deploy/python/infer.py --model_dir=/path/to/models --image_file=/path/to/
| --image_file | Yes |需要预测的图片 |
| --image_file | Yes |需要预测的图片 |
| --video_file | Yes |需要预测的视频 |
| --video_file | Yes |需要预测的视频 |
| --use_gpu |No|是否GPU,默认为False|
| --use_gpu |No|是否GPU,默认为False|
| --run_mode |No|使用GPU时,默认为fluid, 可选(fluid/trt_fp32/trt_fp16
/trt_int8
)|
| --run_mode |No|使用GPU时,默认为fluid, 可选(fluid/trt_fp32/trt_fp16)|
| --threshold |No|预测得分的阈值,默认为0.5|
| --threshold |No|预测得分的阈值,默认为0.5|
| --output_dir |No|可视化结果保存的根目录,默认为output/|
| --output_dir |No|可视化结果保存的根目录,默认为output/|
...
...
deploy/python/infer.py
浏览文件 @
5606d243
...
@@ -318,8 +318,10 @@ def load_predictor(model_dir,
...
@@ -318,8 +318,10 @@ def load_predictor(model_dir,
raise
ValueError
(
raise
ValueError
(
"Predict by TensorRT mode: {}, expect use_gpu==True, but use_gpu == {}"
"Predict by TensorRT mode: {}, expect use_gpu==True, but use_gpu == {}"
.
format
(
run_mode
,
use_gpu
))
.
format
(
run_mode
,
use_gpu
))
if
run_mode
==
'trt_int8'
:
raise
ValueError
(
"TensorRT int8 mode is not supported now, "
"please use trt_fp32 or trt_fp16 instead."
)
precision_map
=
{
precision_map
=
{
'trt_int8'
:
fluid
.
core
.
AnalysisConfig
.
Precision
.
Int8
,
'trt_fp32'
:
fluid
.
core
.
AnalysisConfig
.
Precision
.
Float32
,
'trt_fp32'
:
fluid
.
core
.
AnalysisConfig
.
Precision
.
Float32
,
'trt_fp16'
:
fluid
.
core
.
AnalysisConfig
.
Precision
.
Half
'trt_fp16'
:
fluid
.
core
.
AnalysisConfig
.
Precision
.
Half
}
}
...
@@ -341,7 +343,7 @@ def load_predictor(model_dir,
...
@@ -341,7 +343,7 @@ def load_predictor(model_dir,
min_subgraph_size
=
min_subgraph_size
,
min_subgraph_size
=
min_subgraph_size
,
precision_mode
=
precision_map
[
run_mode
],
precision_mode
=
precision_map
[
run_mode
],
use_static
=
False
,
use_static
=
False
,
use_calib_mode
=
run_mode
==
'trt_int8'
)
use_calib_mode
=
False
)
# disable print log when predict
# disable print log when predict
config
.
disable_glog_info
()
config
.
disable_glog_info
()
...
@@ -482,8 +484,6 @@ class Detector():
...
@@ -482,8 +484,6 @@ class Detector():
t1
=
time
.
time
()
t1
=
time
.
time
()
self
.
predictor
.
zero_copy_run
()
self
.
predictor
.
zero_copy_run
()
t2
=
time
.
time
()
t2
=
time
.
time
()
ms
=
(
t2
-
t1
)
*
1000.0
print
(
"Inference: {} ms per batch image"
.
format
(
ms
))
output_names
=
self
.
predictor
.
get_output_names
()
output_names
=
self
.
predictor
.
get_output_names
()
boxes_tensor
=
self
.
predictor
.
get_output_tensor
(
output_names
[
0
])
boxes_tensor
=
self
.
predictor
.
get_output_tensor
(
output_names
[
0
])
...
@@ -491,6 +491,10 @@ class Detector():
...
@@ -491,6 +491,10 @@ class Detector():
if
self
.
config
.
mask_resolution
is
not
None
:
if
self
.
config
.
mask_resolution
is
not
None
:
masks_tensor
=
self
.
predictor
.
get_output_tensor
(
output_names
[
1
])
masks_tensor
=
self
.
predictor
.
get_output_tensor
(
output_names
[
1
])
np_masks
=
masks_tensor
.
copy_to_cpu
()
np_masks
=
masks_tensor
.
copy_to_cpu
()
ms
=
(
t2
-
t1
)
*
1000.0
print
(
"Inference: {} ms per batch image"
.
format
(
ms
))
results
=
self
.
postprocess
(
results
=
self
.
postprocess
(
np_boxes
,
np_masks
,
im_info
,
threshold
=
threshold
)
np_boxes
,
np_masks
,
im_info
,
threshold
=
threshold
)
return
results
return
results
...
@@ -556,7 +560,7 @@ if __name__ == '__main__':
...
@@ -556,7 +560,7 @@ if __name__ == '__main__':
"--run_mode"
,
"--run_mode"
,
type
=
str
,
type
=
str
,
default
=
'fluid'
,
default
=
'fluid'
,
help
=
"mode of running(fluid/trt_fp32/trt_fp16
/trt_int8
)"
)
help
=
"mode of running(fluid/trt_fp32/trt_fp16)"
)
parser
.
add_argument
(
parser
.
add_argument
(
"--use_gpu"
,
default
=
False
,
help
=
"Whether to predict with GPU."
)
"--use_gpu"
,
default
=
False
,
help
=
"Whether to predict with GPU."
)
parser
.
add_argument
(
parser
.
add_argument
(
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录