Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
PaddleClas
提交
a479afa5
P
PaddleClas
项目概览
PaddlePaddle
/
PaddleClas
大约 1 年 前同步成功
通知
115
Star
4999
Fork
1114
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
19
列表
看板
标记
里程碑
合并请求
6
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleClas
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
19
Issue
19
列表
看板
标记
里程碑
合并请求
6
合并请求
6
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
a479afa5
编写于
4月 13, 2020
作者:
D
dyning
提交者:
GitHub
4月 13, 2020
浏览文件
操作
浏览文件
下载
差异文件
Merge pull request #18 from littletomatodonkey/fix_trt_bench
fix benchmark
上级
7f0edf55
d1aa461b
变更
1
显示空白变更内容
内联
并排
Showing
1 changed file
with
47 addition
and
24 deletion
+47
-24
tools/infer/predict.py
tools/infer/predict.py
+47
-24
未找到文件。
tools/infer/predict.py
浏览文件 @
a479afa5
...
@@ -12,7 +12,6 @@
...
@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# See the License for the specific language governing permissions and
# limitations under the License.
# limitations under the License.
import
utils
import
utils
import
argparse
import
argparse
import
numpy
as
np
import
numpy
as
np
...
@@ -24,6 +23,7 @@ from paddle.fluid.core import create_paddle_predictor
...
@@ -24,6 +23,7 @@ from paddle.fluid.core import create_paddle_predictor
logging
.
basicConfig
(
level
=
logging
.
INFO
)
logging
.
basicConfig
(
level
=
logging
.
INFO
)
logger
=
logging
.
getLogger
(
__name__
)
logger
=
logging
.
getLogger
(
__name__
)
def
parse_args
():
def
parse_args
():
def
str2bool
(
v
):
def
str2bool
(
v
):
return
v
.
lower
()
in
(
"true"
,
"t"
,
"1"
)
return
v
.
lower
()
in
(
"true"
,
"t"
,
"1"
)
...
@@ -47,8 +47,6 @@ def parse_args():
...
@@ -47,8 +47,6 @@ def parse_args():
def
create_predictor
(
args
):
def
create_predictor
(
args
):
config
=
AnalysisConfig
(
args
.
model_file
,
args
.
params_file
)
config
=
AnalysisConfig
(
args
.
model_file
,
args
.
params_file
)
if
args
.
use_gpu
:
if
args
.
use_gpu
:
config
.
enable_use_gpu
(
args
.
gpu_mem
,
0
)
config
.
enable_use_gpu
(
args
.
gpu_mem
,
0
)
else
:
else
:
...
@@ -58,7 +56,8 @@ def create_predictor(args):
...
@@ -58,7 +56,8 @@ def create_predictor(args):
config
.
switch_ir_optim
(
args
.
ir_optim
)
# default true
config
.
switch_ir_optim
(
args
.
ir_optim
)
# default true
if
args
.
use_tensorrt
:
if
args
.
use_tensorrt
:
config
.
enable_tensorrt_engine
(
config
.
enable_tensorrt_engine
(
precision_mode
=
AnalysisConfig
.
Precision
.
Half
if
args
.
use_fp16
else
AnalysisConfig
.
Precision
.
Float32
,
precision_mode
=
AnalysisConfig
.
Precision
.
Half
if
args
.
use_fp16
else
AnalysisConfig
.
Precision
.
Float32
,
max_batch_size
=
args
.
batch_size
)
max_batch_size
=
args
.
batch_size
)
config
.
enable_memory_optim
()
config
.
enable_memory_optim
()
...
@@ -104,39 +103,63 @@ def main():
...
@@ -104,39 +103,63 @@ def main():
assert
args
.
model_name
is
not
None
assert
args
.
model_name
is
not
None
assert
args
.
use_tensorrt
==
True
assert
args
.
use_tensorrt
==
True
# HALF precission predict only work when using tensorrt
# HALF precission predict only work when using tensorrt
if
args
.
use_fp16
==
True
:
if
args
.
use_fp16
==
True
:
assert
args
.
use_tensorrt
==
True
assert
args
.
use_tensorrt
==
True
operators
=
create_operators
()
operators
=
create_operators
()
predictor
=
create_predictor
(
args
)
predictor
=
create_predictor
(
args
)
inputs
=
preprocess
(
args
.
image_file
,
operators
)
inputs
=
preprocess
(
args
.
image_file
,
operators
)
inputs
=
np
.
expand_dims
(
inputs
,
axis
=
0
).
repeat
(
args
.
batch_size
,
axis
=
0
).
copy
()
inputs
=
np
.
expand_dims
(
inputs
,
axis
=
0
).
repeat
(
args
.
batch_size
,
axis
=
0
).
copy
()
input_names
=
predictor
.
get_input_names
()
input_names
=
predictor
.
get_input_names
()
input_tensor
=
predictor
.
get_input_tensor
(
input_names
[
0
])
input_tensor
=
predictor
.
get_input_tensor
(
input_names
[
0
])
input_tensor
.
copy_from_cpu
(
inputs
)
output_names
=
predictor
.
get_output_names
()
output_tensor
=
predictor
.
get_output_tensor
(
output_names
[
0
])
test_num
=
500
test_time
=
0.0
if
not
args
.
enable_benchmark
:
if
not
args
.
enable_benchmark
:
inputs
=
preprocess
(
args
.
image_file
,
operators
)
inputs
=
np
.
expand_dims
(
inputs
,
axis
=
0
).
repeat
(
args
.
batch_size
,
axis
=
0
).
copy
()
input_tensor
.
copy_from_cpu
(
inputs
)
predictor
.
zero_copy_run
()
predictor
.
zero_copy_run
()
output
=
output_tensor
.
copy_to_cpu
()
output
=
output
.
flatten
()
cls
=
np
.
argmax
(
output
)
score
=
output
[
cls
]
logger
.
info
(
"class: {0}"
.
format
(
cls
))
logger
.
info
(
"score: {0}"
.
format
(
score
))
else
:
else
:
for
i
in
range
(
0
,
1010
):
for
i
in
range
(
0
,
test_num
+
10
):
if
i
==
10
:
inputs
=
np
.
random
.
rand
(
args
.
batch_size
,
3
,
224
,
start
=
time
.
time
()
224
).
astype
(
np
.
float32
)
predictor
.
zero_copy_run
()
start_time
=
time
.
time
()
input_tensor
.
copy_from_cpu
(
inputs
)
end
=
time
.
time
()
predictor
.
zero_copy_run
()
fp_message
=
"FP16"
if
args
.
use_fp16
else
"FP32"
logger
.
info
(
"{0}
\t
{1}
\t
batch size: {2}
\t
time(ms): {3}"
.
format
(
args
.
model_name
,
fp_message
,
args
.
batch_size
,
end
-
start
))
output_names
=
predictor
.
get_output_names
()
output_tensor
=
predictor
.
get_output_tensor
(
output_names
[
0
])
output
=
output_tensor
.
copy_to_cpu
()
output
=
output_tensor
.
copy_to_cpu
()
output
=
output
.
flatten
()
output
=
output
.
flatten
()
if
i
>=
10
:
test_time
+=
time
.
time
()
-
start_time
cls
=
np
.
argmax
(
output
)
cls
=
np
.
argmax
(
output
)
score
=
output
[
cls
]
score
=
output
[
cls
]
logger
.
info
(
"class: {0}"
.
format
(
cls
))
logger
.
info
(
"class: {0}"
.
format
(
cls
))
logger
.
info
(
"score: {0}"
.
format
(
score
))
logger
.
info
(
"score: {0}"
.
format
(
score
))
fp_message
=
"FP16"
if
args
.
use_fp16
else
"FP32"
logger
.
info
(
"{0}
\t
{1}
\t
batch size: {2}
\t
time(ms): {3}"
.
format
(
args
.
model_name
,
fp_message
,
args
.
batch_size
,
1000
*
test_time
/
test_num
))
if
__name__
==
"__main__"
:
if
__name__
==
"__main__"
:
main
()
main
()
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录