Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
s920243400
PaddleDetection
提交
2f4aee36
P
PaddleDetection
项目概览
s920243400
/
PaddleDetection
与 Fork 源项目一致
Fork自
PaddlePaddle / PaddleDetection
通知
2
Star
0
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleDetection
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
2f4aee36
编写于
1月 22, 2019
作者:
N
nhzlx
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
fix comments
test=develop
上级
ec213730
变更
3
隐藏空白更改
内联
并排
Showing
3 changed file
with
51 addition
and
3 deletion
+51
-3
paddle/fluid/inference/tests/api/tester_helper.h
paddle/fluid/inference/tests/api/tester_helper.h
+18
-1
paddle/fluid/inference/tests/api/trt_models_tester.cc
paddle/fluid/inference/tests/api/trt_models_tester.cc
+31
-0
paddle/fluid/operators/tensorrt/tensorrt_engine_op_test.cc
paddle/fluid/operators/tensorrt/tensorrt_engine_op_test.cc
+2
-2
未找到文件。
paddle/fluid/inference/tests/api/tester_helper.h
浏览文件 @
2f4aee36
...
...
@@ -56,6 +56,13 @@ DECLARE_int32(paddle_num_threads);
namespace
paddle
{
namespace
inference
{
float
Random
(
float
low
,
float
high
)
{
static
std
::
random_device
rd
;
static
std
::
mt19937
mt
(
rd
());
std
::
uniform_real_distribution
<
double
>
dist
(
low
,
high
);
return
dist
(
mt
);
}
void
PrintConfig
(
const
PaddlePredictor
::
Config
*
config
,
bool
use_analysis
)
{
const
auto
*
analysis_config
=
reinterpret_cast
<
const
contrib
::
AnalysisConfig
*>
(
config
);
...
...
@@ -176,7 +183,7 @@ void SetFakeImageInput(std::vector<std::vector<PaddleTensor>> *inputs,
float
*
input_data
=
static_cast
<
float
*>
(
input
.
data
.
data
());
// fill input data, for profile easily, do not use random data here.
for
(
size_t
j
=
0
;
j
<
len
;
++
j
)
{
*
(
input_data
+
j
)
=
static_cast
<
float
>
(
j
)
/
len
;
*
(
input_data
+
j
)
=
Random
(
0
,
10.
)
;
}
}
(
*
inputs
).
emplace_back
(
input_slots
);
...
...
@@ -344,6 +351,16 @@ void CompareNativeAndAnalysis(
CompareResult
(
analysis_outputs
,
native_outputs
);
}
void
CompareNativeAndAnalysis
(
PaddlePredictor
*
native_pred
,
PaddlePredictor
*
analysis_pred
,
const
std
::
vector
<
std
::
vector
<
PaddleTensor
>>
&
inputs
)
{
int
batch_size
=
FLAGS_batch_size
;
std
::
vector
<
PaddleTensor
>
native_outputs
,
analysis_outputs
;
native_pred
->
Run
(
inputs
[
0
],
&
native_outputs
,
batch_size
);
analysis_pred
->
Run
(
inputs
[
0
],
&
analysis_outputs
,
batch_size
);
CompareResult
(
analysis_outputs
,
native_outputs
);
}
template
<
typename
T
>
std
::
string
LoDTensorSummary
(
const
framework
::
LoDTensor
&
tensor
)
{
std
::
stringstream
ss
;
...
...
paddle/fluid/inference/tests/api/trt_models_tester.cc
浏览文件 @
2f4aee36
...
...
@@ -107,6 +107,27 @@ void compare(std::string model_dir, bool use_tensorrt) {
inputs_all
);
}
void
compare_continuous_input
(
std
::
string
model_dir
,
bool
use_tensorrt
)
{
contrib
::
AnalysisConfig
analysis_config
;
SetConfig
<
contrib
::
AnalysisConfig
>
(
&
analysis_config
,
model_dir
,
true
,
use_tensorrt
,
FLAGS_batch_size
);
auto
config
=
reinterpret_cast
<
const
PaddlePredictor
::
Config
*>
(
&
analysis_config
);
auto
native_pred
=
CreateTestPredictor
(
config
,
false
);
auto
analysis_pred
=
CreateTestPredictor
(
config
,
true
);
for
(
int
i
=
0
;
i
<
100
;
i
++
)
{
std
::
vector
<
std
::
vector
<
PaddleTensor
>>
inputs_all
;
if
(
!
FLAGS_prog_filename
.
empty
()
&&
!
FLAGS_param_filename
.
empty
())
{
SetFakeImageInput
(
&
inputs_all
,
model_dir
,
true
,
FLAGS_prog_filename
,
FLAGS_param_filename
);
}
else
{
SetFakeImageInput
(
&
inputs_all
,
model_dir
,
false
,
"__model__"
,
""
);
}
CompareNativeAndAnalysis
(
native_pred
.
get
(),
analysis_pred
.
get
(),
inputs_all
);
}
}
TEST
(
TensorRT_mobilenet
,
compare
)
{
std
::
string
model_dir
=
FLAGS_infer_model
+
"/mobilenet"
;
compare
(
model_dir
,
/* use_tensorrt */
true
);
...
...
@@ -157,5 +178,15 @@ TEST(AnalysisPredictor, use_gpu) {
}
}
TEST
(
resnet50
,
compare_continuous_input
)
{
std
::
string
model_dir
=
FLAGS_infer_model
+
"/resnet50"
;
compare_continuous_input
(
model_dir
,
true
);
}
TEST
(
resnet50
,
compare_continuous_input_native
)
{
std
::
string
model_dir
=
FLAGS_infer_model
+
"/resnet50"
;
compare_continuous_input
(
model_dir
,
false
);
}
}
// namespace inference
}
// namespace paddle
paddle/fluid/operators/tensorrt/tensorrt_engine_op_test.cc
浏览文件 @
2f4aee36
...
...
@@ -99,7 +99,7 @@ TEST(TensorRTEngineOp, manual) {
SetAttr
<
std
::
string
>
(
engine_op_desc
.
Proto
(),
"subgraph"
,
block_
->
SerializeAsString
());
SetAttr
<
int
>
(
engine_op_desc
.
Proto
(),
"max_batch_size"
,
2
);
SetAttr
<
int
>
(
engine_op_desc
.
Proto
(),
"workspace_size"
,
2
<<
20
);
SetAttr
<
int
>
(
engine_op_desc
.
Proto
(),
"workspace_size"
,
1
<<
20
);
SetAttr
<
std
::
string
>
(
engine_op_desc
.
Proto
(),
"engine_uniq_key"
,
"a_engine"
);
SetAttr
<
std
::
vector
<
std
::
string
>>
(
engine_op_desc
.
Proto
(),
"parameters"
,
std
::
vector
<
std
::
string
>
({}));
...
...
@@ -193,7 +193,7 @@ void Execute(int batch_size, int input_dim, int output_dim, int nlayers = 1) {
SetAttr
<
std
::
string
>
(
engine_op_desc
.
Proto
(),
"subgraph"
,
block_
->
SerializeAsString
());
SetAttr
<
int
>
(
engine_op_desc
.
Proto
(),
"max_batch_size"
,
batch_size
);
SetAttr
<
int
>
(
engine_op_desc
.
Proto
(),
"workspace_size"
,
2
<<
20
);
SetAttr
<
int
>
(
engine_op_desc
.
Proto
(),
"workspace_size"
,
1
<<
20
);
SetAttr
<
std
::
vector
<
std
::
string
>>
(
engine_op_desc
.
Proto
(),
"parameters"
,
std
::
vector
<
std
::
string
>
({
"y0"
,
"y1"
,
"y2"
,
"y3"
}));
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录