Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
f76f42c2
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
f76f42c2
编写于
9月 07, 2018
作者:
T
Tao Luo
提交者:
GitHub
9月 07, 2018
浏览文件
操作
浏览文件
下载
差异文件
Merge pull request #13260 from luotao1/all_data
add test_all_data in test_analyzer_ner
上级
e2d325ac
00c72309
变更
3
隐藏空白更改
内联
并排
Showing
3 changed file
with
35 addition
and
35 deletion
+35
-35
paddle/fluid/inference/analysis/analyzer_lac_tester.cc
paddle/fluid/inference/analysis/analyzer_lac_tester.cc
+2
-8
paddle/fluid/inference/analysis/analyzer_ner_tester.cc
paddle/fluid/inference/analysis/analyzer_ner_tester.cc
+28
-8
paddle/fluid/inference/analysis/analyzer_text_classification_tester.cc
...inference/analysis/analyzer_text_classification_tester.cc
+5
-19
未找到文件。
paddle/fluid/inference/analysis/analyzer_lac_tester.cc
浏览文件 @
f76f42c2
...
@@ -114,12 +114,6 @@ void GetOneBatch(std::vector<PaddleTensor> *input_slots, DataRecord *data,
...
@@ -114,12 +114,6 @@ void GetOneBatch(std::vector<PaddleTensor> *input_slots, DataRecord *data,
PADDLE_ENFORCE_EQ
(
batch_size
,
static_cast
<
int
>
(
one_batch
.
lod
.
size
()
-
1
));
PADDLE_ENFORCE_EQ
(
batch_size
,
static_cast
<
int
>
(
one_batch
.
lod
.
size
()
-
1
));
input_slots
->
assign
({
input_tensor
});
input_slots
->
assign
({
input_tensor
});
}
}
static
void
PrintTime
(
const
double
latency
,
const
int
bs
,
const
int
repeat
)
{
LOG
(
INFO
)
<<
"===========profile result==========="
;
LOG
(
INFO
)
<<
"batch_size: "
<<
bs
<<
", repeat: "
<<
repeat
<<
", avg latency: "
<<
latency
/
repeat
<<
"ms"
;
LOG
(
INFO
)
<<
"====================================="
;
}
void
BenchAllData
(
const
std
::
string
&
model_path
,
const
std
::
string
&
data_file
,
void
BenchAllData
(
const
std
::
string
&
model_path
,
const
std
::
string
&
data_file
,
const
int
batch_size
,
const
int
repeat
)
{
const
int
batch_size
,
const
int
repeat
)
{
NativeConfig
config
;
NativeConfig
config
;
...
@@ -145,7 +139,7 @@ void BenchAllData(const std::string &model_path, const std::string &data_file,
...
@@ -145,7 +139,7 @@ void BenchAllData(const std::string &model_path, const std::string &data_file,
sum
+=
timer
.
toc
();
sum
+=
timer
.
toc
();
}
}
}
}
PrintTime
(
sum
,
batch_size
,
repeat
);
PrintTime
(
batch_size
,
repeat
,
1
,
0
,
sum
/
repeat
);
}
}
const
int64_t
lac_ref_data
[]
=
{
24
,
25
,
25
,
25
,
38
,
30
,
31
,
14
,
15
,
44
,
24
,
25
,
const
int64_t
lac_ref_data
[]
=
{
24
,
25
,
25
,
25
,
38
,
30
,
31
,
14
,
15
,
44
,
24
,
25
,
25
,
25
,
25
,
25
,
44
,
24
,
25
,
25
,
25
,
36
,
42
,
43
,
25
,
25
,
25
,
25
,
44
,
24
,
25
,
25
,
25
,
36
,
42
,
43
,
...
@@ -176,7 +170,7 @@ void TestLACPrediction(const std::string &model_path,
...
@@ -176,7 +170,7 @@ void TestLACPrediction(const std::string &model_path,
for
(
int
i
=
0
;
i
<
repeat
;
i
++
)
{
for
(
int
i
=
0
;
i
<
repeat
;
i
++
)
{
predictor
->
Run
(
input_slots
,
&
outputs_slots
);
predictor
->
Run
(
input_slots
,
&
outputs_slots
);
}
}
PrintTime
(
timer
.
toc
(),
batch_size
,
repeat
);
PrintTime
(
batch_size
,
repeat
,
1
,
0
,
timer
.
toc
()
/
repeat
);
EXPECT_EQ
(
outputs_slots
.
size
(),
1UL
);
EXPECT_EQ
(
outputs_slots
.
size
(),
1UL
);
auto
&
out
=
outputs_slots
[
0
];
auto
&
out
=
outputs_slots
[
0
];
size_t
size
=
std
::
accumulate
(
out
.
shape
.
begin
(),
out
.
shape
.
end
(),
1
,
size_t
size
=
std
::
accumulate
(
out
.
shape
.
begin
(),
out
.
shape
.
end
(),
1
,
...
...
paddle/fluid/inference/analysis/analyzer_ner_tester.cc
浏览文件 @
f76f42c2
...
@@ -25,6 +25,7 @@ DEFINE_string(infer_model, "", "model path");
...
@@ -25,6 +25,7 @@ DEFINE_string(infer_model, "", "model path");
DEFINE_string
(
infer_data
,
""
,
"data path"
);
DEFINE_string
(
infer_data
,
""
,
"data path"
);
DEFINE_int32
(
batch_size
,
10
,
"batch size."
);
DEFINE_int32
(
batch_size
,
10
,
"batch size."
);
DEFINE_int32
(
repeat
,
1
,
"Running the inference program repeat times."
);
DEFINE_int32
(
repeat
,
1
,
"Running the inference program repeat times."
);
DEFINE_bool
(
test_all_data
,
false
,
"Test the all dataset in data file."
);
namespace
paddle
{
namespace
paddle
{
namespace
inference
{
namespace
inference
{
...
@@ -35,6 +36,7 @@ struct DataRecord {
...
@@ -35,6 +36,7 @@ struct DataRecord {
std
::
vector
<
size_t
>
lod
;
// two inputs have the same lod info.
std
::
vector
<
size_t
>
lod
;
// two inputs have the same lod info.
size_t
batch_iter
{
0
};
size_t
batch_iter
{
0
};
size_t
batch_size
{
1
};
size_t
batch_size
{
1
};
size_t
num_samples
;
// total number of samples
DataRecord
()
=
default
;
DataRecord
()
=
default
;
explicit
DataRecord
(
const
std
::
string
&
path
,
int
batch_size
=
1
)
explicit
DataRecord
(
const
std
::
string
&
path
,
int
batch_size
=
1
)
:
batch_size
(
batch_size
)
{
:
batch_size
(
batch_size
)
{
...
@@ -81,6 +83,7 @@ struct DataRecord {
...
@@ -81,6 +83,7 @@ struct DataRecord {
word_data_all
.
push_back
(
std
::
move
(
word_data
));
word_data_all
.
push_back
(
std
::
move
(
word_data
));
mention_data_all
.
push_back
(
std
::
move
(
mention_data
));
mention_data_all
.
push_back
(
std
::
move
(
mention_data
));
}
}
num_samples
=
num_lines
;
}
}
};
};
...
@@ -120,21 +123,38 @@ void TestChineseNERPrediction() {
...
@@ -120,21 +123,38 @@ void TestChineseNERPrediction() {
auto
predictor
=
auto
predictor
=
CreatePaddlePredictor
<
NativeConfig
,
PaddleEngineKind
::
kNative
>
(
config
);
CreatePaddlePredictor
<
NativeConfig
,
PaddleEngineKind
::
kNative
>
(
config
);
std
::
vector
<
PaddleTensor
>
input_slots
;
std
::
vector
<
PaddleTensor
>
input_slots
;
DataRecord
data
(
FLAGS_infer_data
,
FLAGS_batch_size
);
std
::
vector
<
PaddleTensor
>
outputs
;
Timer
timer
;
if
(
FLAGS_test_all_data
)
{
LOG
(
INFO
)
<<
"test all data"
;
double
sum
=
0
;
size_t
num_samples
;
for
(
int
i
=
0
;
i
<
FLAGS_repeat
;
i
++
)
{
DataRecord
data
(
FLAGS_infer_data
,
FLAGS_batch_size
);
num_samples
=
data
.
num_samples
;
for
(
size_t
bid
=
0
;
bid
<
num_samples
;
++
bid
)
{
PrepareInputs
(
&
input_slots
,
&
data
,
FLAGS_batch_size
);
timer
.
tic
();
predictor
->
Run
(
input_slots
,
&
outputs
);
sum
+=
timer
.
toc
();
}
}
LOG
(
INFO
)
<<
"total number of samples: "
<<
num_samples
;
PrintTime
(
FLAGS_batch_size
,
FLAGS_repeat
,
1
,
0
,
sum
/
FLAGS_repeat
);
LOG
(
INFO
)
<<
"average latency of each sample: "
<<
sum
/
FLAGS_repeat
/
num_samples
;
return
;
}
// Prepare inputs.
// Prepare inputs.
DataRecord
data
(
FLAGS_infer_data
,
FLAGS_batch_size
);
PrepareInputs
(
&
input_slots
,
&
data
,
FLAGS_batch_size
);
PrepareInputs
(
&
input_slots
,
&
data
,
FLAGS_batch_size
);
std
::
vector
<
PaddleTensor
>
outputs
;
Timer
timer
;
timer
.
tic
();
timer
.
tic
();
for
(
int
i
=
0
;
i
<
FLAGS_repeat
;
i
++
)
{
for
(
int
i
=
0
;
i
<
FLAGS_repeat
;
i
++
)
{
predictor
->
Run
(
input_slots
,
&
outputs
);
predictor
->
Run
(
input_slots
,
&
outputs
);
}
}
LOG
(
INFO
)
<<
"===========profile result==========="
;
PrintTime
(
FLAGS_batch_size
,
FLAGS_repeat
,
1
,
0
,
timer
.
toc
()
/
FLAGS_repeat
);
LOG
(
INFO
)
<<
"batch_size: "
<<
FLAGS_batch_size
<<
", repeat: "
<<
FLAGS_repeat
<<
", latency: "
<<
timer
.
toc
()
/
FLAGS_repeat
<<
"ms"
;
LOG
(
INFO
)
<<
"====================================="
;
PADDLE_ENFORCE
(
outputs
.
size
(),
1UL
);
PADDLE_ENFORCE
(
outputs
.
size
(),
1UL
);
auto
&
out
=
outputs
[
0
];
auto
&
out
=
outputs
[
0
];
...
...
paddle/fluid/inference/analysis/analyzer_text_classification_tester.cc
浏览文件 @
f76f42c2
...
@@ -31,25 +31,11 @@ DEFINE_int32(repeat, 1, "How many times to repeat run.");
...
@@ -31,25 +31,11 @@ DEFINE_int32(repeat, 1, "How many times to repeat run.");
DEFINE_int32
(
topn
,
-
1
,
"Run top n batches of data to save time"
);
DEFINE_int32
(
topn
,
-
1
,
"Run top n batches of data to save time"
);
namespace
paddle
{
namespace
paddle
{
namespace
inference
{
template
<
typename
T
>
std
::
string
to_string
(
const
std
::
vector
<
T
>
&
vec
)
{
std
::
stringstream
ss
;
for
(
const
auto
&
c
:
vec
)
{
ss
<<
c
<<
" "
;
}
return
ss
.
str
();
}
void
PrintTime
(
const
double
latency
,
const
int
bs
,
const
int
repeat
)
{
LOG
(
INFO
)
<<
"===========profile result==========="
;
LOG
(
INFO
)
<<
"batch_size: "
<<
bs
<<
", repeat: "
<<
repeat
<<
", avg latency: "
<<
latency
/
repeat
<<
"ms"
;
LOG
(
INFO
)
<<
"====================================="
;
}
struct
DataReader
{
struct
DataReader
{
DataReader
(
const
std
::
string
&
path
)
:
file
(
new
std
::
ifstream
(
path
))
{}
explicit
DataReader
(
const
std
::
string
&
path
)
:
file
(
new
std
::
ifstream
(
path
))
{}
bool
NextBatch
(
PaddleTensor
*
tensor
,
int
batch_size
)
{
bool
NextBatch
(
PaddleTensor
*
tensor
,
int
batch_size
)
{
PADDLE_ENFORCE_EQ
(
batch_size
,
1
);
PADDLE_ENFORCE_EQ
(
batch_size
,
1
);
...
@@ -107,8 +93,7 @@ void Main(int batch_size) {
...
@@ -107,8 +93,7 @@ void Main(int batch_size) {
++
num_batches
;
++
num_batches
;
}
}
}
}
PrintTime
(
batch_size
,
FLAGS_repeat
,
1
,
0
,
sum
/
FLAGS_repeat
);
PrintTime
(
sum
,
batch_size
,
num_batches
);
// Get output
// Get output
LOG
(
INFO
)
<<
"get outputs "
<<
output_slots
.
size
();
LOG
(
INFO
)
<<
"get outputs "
<<
output_slots
.
size
();
...
@@ -129,4 +114,5 @@ void Main(int batch_size) {
...
@@ -129,4 +114,5 @@ void Main(int batch_size) {
TEST
(
text_classification
,
basic
)
{
Main
(
FLAGS_batch_size
);
}
TEST
(
text_classification
,
basic
)
{
Main
(
FLAGS_batch_size
);
}
}
// namespace inference
}
// namespace paddle
}
// namespace paddle
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录