Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
67e4450c
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
67e4450c
编写于
1月 28, 2019
作者:
T
Tao Luo
提交者:
GitHub
1月 28, 2019
浏览文件
操作
浏览文件
下载
差异文件
Merge pull request #15485 from luotao1/fc500110-bert_test
add bert analyzer test
上级
6000a6e7
e31aef9f
变更
2
显示空白变更内容
内联
并排
Showing
2 changed file
with
234 addition
and
0 deletion
+234
-0
paddle/fluid/inference/tests/api/CMakeLists.txt
paddle/fluid/inference/tests/api/CMakeLists.txt
+5
-0
paddle/fluid/inference/tests/api/analyzer_bert_tester.cc
paddle/fluid/inference/tests/api/analyzer_bert_tester.cc
+229
-0
未找到文件。
paddle/fluid/inference/tests/api/CMakeLists.txt
浏览文件 @
67e4450c
...
...
@@ -128,6 +128,11 @@ inference_analysis_api_test_with_fake_data(test_analyzer_resnet50
inference_analysis_api_test_with_fake_data
(
test_analyzer_mobilenet_depthwise_conv
"
${
INFERENCE_DEMO_INSTALL_DIR
}
/mobilenet_depthwise_conv"
analyzer_resnet50_tester.cc
"mobilenet_model.tar.gz"
SERIAL
)
# bert
set
(
BERT_INSTALL_DIR
"
${
INFERENCE_DEMO_INSTALL_DIR
}
/bert"
)
download_model_and_data
(
${
BERT_INSTALL_DIR
}
"bert_model.tar.gz"
"bert_data.txt.tar.gz"
)
inference_analysis_api_test
(
test_analyzer_bert
${
BERT_INSTALL_DIR
}
analyzer_bert_tester.cc
)
# anakin
if
(
WITH_ANAKIN AND WITH_MKL
)
# only needed in CI
# anakin rnn1
...
...
paddle/fluid/inference/tests/api/analyzer_bert_tester.cc
0 → 100644
浏览文件 @
67e4450c
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/fluid/inference/tests/api/tester_helper.h"
namespace
paddle
{
namespace
inference
{
using
paddle
::
PaddleTensor
;
using
paddle
::
contrib
::
AnalysisConfig
;
template
<
typename
T
>
void
GetValueFromStream
(
std
::
stringstream
*
ss
,
T
*
t
)
{
(
*
ss
)
>>
(
*
t
);
}
template
<
>
void
GetValueFromStream
<
std
::
string
>
(
std
::
stringstream
*
ss
,
std
::
string
*
t
)
{
*
t
=
ss
->
str
();
}
// Split string to vector
template
<
typename
T
>
void
Split
(
const
std
::
string
&
line
,
char
sep
,
std
::
vector
<
T
>
*
v
)
{
std
::
stringstream
ss
;
T
t
;
for
(
auto
c
:
line
)
{
if
(
c
!=
sep
)
{
ss
<<
c
;
}
else
{
GetValueFromStream
<
T
>
(
&
ss
,
&
t
);
v
->
push_back
(
std
::
move
(
t
));
ss
.
str
({});
ss
.
clear
();
}
}
if
(
!
ss
.
str
().
empty
())
{
GetValueFromStream
<
T
>
(
&
ss
,
&
t
);
v
->
push_back
(
std
::
move
(
t
));
ss
.
str
({});
ss
.
clear
();
}
}
template
<
typename
T
>
constexpr
paddle
::
PaddleDType
GetPaddleDType
();
template
<
>
constexpr
paddle
::
PaddleDType
GetPaddleDType
<
int64_t
>
()
{
return
paddle
::
PaddleDType
::
INT64
;
}
template
<
>
constexpr
paddle
::
PaddleDType
GetPaddleDType
<
float
>
()
{
return
paddle
::
PaddleDType
::
FLOAT32
;
}
// Parse tensor from string
template
<
typename
T
>
bool
ParseTensor
(
const
std
::
string
&
field
,
paddle
::
PaddleTensor
*
tensor
)
{
std
::
vector
<
std
::
string
>
data
;
Split
(
field
,
':'
,
&
data
);
if
(
data
.
size
()
<
2
)
return
false
;
std
::
string
shape_str
=
data
[
0
];
std
::
vector
<
int
>
shape
;
Split
(
shape_str
,
' '
,
&
shape
);
std
::
string
mat_str
=
data
[
1
];
std
::
vector
<
T
>
mat
;
Split
(
mat_str
,
' '
,
&
mat
);
tensor
->
shape
=
shape
;
auto
size
=
std
::
accumulate
(
shape
.
begin
(),
shape
.
end
(),
1
,
std
::
multiplies
<
int
>
())
*
sizeof
(
T
);
tensor
->
data
.
Resize
(
size
);
std
::
copy
(
mat
.
begin
(),
mat
.
end
(),
static_cast
<
T
*>
(
tensor
->
data
.
data
()));
tensor
->
dtype
=
GetPaddleDType
<
T
>
();
return
true
;
}
// Parse input tensors from string
bool
ParseLine
(
const
std
::
string
&
line
,
std
::
vector
<
paddle
::
PaddleTensor
>
*
tensors
)
{
std
::
vector
<
std
::
string
>
fields
;
Split
(
line
,
';'
,
&
fields
);
if
(
fields
.
size
()
<
5
)
return
false
;
tensors
->
clear
();
tensors
->
reserve
(
5
);
int
i
=
0
;
// src_id
paddle
::
PaddleTensor
src_id
;
ParseTensor
<
int64_t
>
(
fields
[
i
++
],
&
src_id
);
tensors
->
push_back
(
src_id
);
// pos_id
paddle
::
PaddleTensor
pos_id
;
ParseTensor
<
int64_t
>
(
fields
[
i
++
],
&
pos_id
);
tensors
->
push_back
(
pos_id
);
// segment_id
paddle
::
PaddleTensor
segment_id
;
ParseTensor
<
int64_t
>
(
fields
[
i
++
],
&
segment_id
);
tensors
->
push_back
(
segment_id
);
// self_attention_bias
paddle
::
PaddleTensor
self_attention_bias
;
ParseTensor
<
float
>
(
fields
[
i
++
],
&
self_attention_bias
);
tensors
->
push_back
(
self_attention_bias
);
// next_segment_index
paddle
::
PaddleTensor
next_segment_index
;
ParseTensor
<
int64_t
>
(
fields
[
i
++
],
&
next_segment_index
);
tensors
->
push_back
(
next_segment_index
);
return
true
;
}
bool
LoadInputData
(
std
::
vector
<
std
::
vector
<
paddle
::
PaddleTensor
>>
*
inputs
)
{
if
(
FLAGS_infer_data
.
empty
())
{
LOG
(
ERROR
)
<<
"please set input data path"
;
return
false
;
}
std
::
ifstream
fin
(
FLAGS_infer_data
);
std
::
string
line
;
int
sample
=
0
;
// The unit-test dataset only have 10 samples, each sample have 5 feeds.
while
(
std
::
getline
(
fin
,
line
))
{
std
::
vector
<
paddle
::
PaddleTensor
>
feed_data
;
ParseLine
(
line
,
&
feed_data
);
inputs
->
push_back
(
std
::
move
(
feed_data
));
sample
++
;
if
(
!
FLAGS_test_all_data
&&
sample
==
FLAGS_batch_size
)
break
;
}
LOG
(
INFO
)
<<
"number of samples: "
<<
sample
;
return
true
;
}
void
SetConfig
(
contrib
::
AnalysisConfig
*
config
)
{
config
->
SetModel
(
FLAGS_infer_model
);
}
void
profile
(
bool
use_mkldnn
=
false
)
{
contrib
::
AnalysisConfig
config
;
SetConfig
(
&
config
);
if
(
use_mkldnn
)
{
config
.
EnableMKLDNN
();
}
std
::
vector
<
PaddleTensor
>
outputs
;
std
::
vector
<
std
::
vector
<
PaddleTensor
>>
inputs
;
LoadInputData
(
&
inputs
);
TestPrediction
(
reinterpret_cast
<
const
PaddlePredictor
::
Config
*>
(
&
config
),
inputs
,
&
outputs
,
FLAGS_num_threads
);
}
TEST
(
Analyzer_bert
,
profile
)
{
profile
();
}
#ifdef PADDLE_WITH_MKLDNN
TEST
(
Analyzer_bert
,
profile_mkldnn
)
{
profile
(
true
);
}
#endif
// Check the fuse status
TEST
(
Analyzer_bert
,
fuse_statis
)
{
AnalysisConfig
cfg
;
SetConfig
(
&
cfg
);
int
num_ops
;
auto
predictor
=
CreatePaddlePredictor
<
AnalysisConfig
>
(
cfg
);
auto
fuse_statis
=
GetFuseStatis
(
static_cast
<
AnalysisPredictor
*>
(
predictor
.
get
()),
&
num_ops
);
LOG
(
INFO
)
<<
"num_ops: "
<<
num_ops
;
}
// Compare result of NativeConfig and AnalysisConfig
void
compare
(
bool
use_mkldnn
=
false
)
{
AnalysisConfig
cfg
;
SetConfig
(
&
cfg
);
if
(
use_mkldnn
)
{
cfg
.
EnableMKLDNN
();
}
std
::
vector
<
std
::
vector
<
PaddleTensor
>>
inputs
;
LoadInputData
(
&
inputs
);
CompareNativeAndAnalysis
(
reinterpret_cast
<
const
PaddlePredictor
::
Config
*>
(
&
cfg
),
inputs
);
}
TEST
(
Analyzer_bert
,
compare
)
{
compare
();
}
#ifdef PADDLE_WITH_MKLDNN
TEST
(
Analyzer_bert
,
compare_mkldnn
)
{
compare
(
true
/* use_mkldnn */
);
}
#endif
// Compare Deterministic result
// TODO(luotao): Since each unit-test on CI only have 10 minutes, cancel this to
// decrease the CI time.
// TEST(Analyzer_bert, compare_determine) {
// AnalysisConfig cfg;
// SetConfig(&cfg);
//
// std::vector<std::vector<PaddleTensor>> inputs;
// LoadInputData(&inputs);
// CompareDeterministic(reinterpret_cast<const PaddlePredictor::Config
// *>(&cfg),
// inputs);
// }
}
// namespace inference
}
// namespace paddle
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录