Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
机器未来
Paddle
提交
1da03005
P
Paddle
项目概览
机器未来
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
1da03005
编写于
11月 05, 2018
作者:
Z
Zhen Wang
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
add dam test
上级
eb7ed1b7
变更
3
显示空白变更内容
内联
并排
Showing
3 changed file
with
231 addition
and
5 deletion
+231
-5
paddle/fluid/inference/tests/api/CMakeLists.txt
paddle/fluid/inference/tests/api/CMakeLists.txt
+5
-0
paddle/fluid/inference/tests/api/analyzer_dam_tester.cc
paddle/fluid/inference/tests/api/analyzer_dam_tester.cc
+224
-0
paddle/fluid/inference/tests/api/analyzer_ner_tester.cc
paddle/fluid/inference/tests/api/analyzer_ner_tester.cc
+2
-5
未找到文件。
paddle/fluid/inference/tests/api/CMakeLists.txt
浏览文件 @
1da03005
...
@@ -43,6 +43,11 @@ set(RNN2_INSTALL_DIR "${INFERENCE_DEMO_INSTALL_DIR}/rnn2")
...
@@ -43,6 +43,11 @@ set(RNN2_INSTALL_DIR "${INFERENCE_DEMO_INSTALL_DIR}/rnn2")
download_model_and_data
(
${
RNN2_INSTALL_DIR
}
"rnn2_model.tar.gz"
"rnn2_data.txt.tar.gz"
)
download_model_and_data
(
${
RNN2_INSTALL_DIR
}
"rnn2_model.tar.gz"
"rnn2_data.txt.tar.gz"
)
inference_analysis_api_test
(
test_analyzer_rnn2
${
RNN2_INSTALL_DIR
}
analyzer_rnn2_tester.cc
)
inference_analysis_api_test
(
test_analyzer_rnn2
${
RNN2_INSTALL_DIR
}
analyzer_rnn2_tester.cc
)
# DAM
set
(
DAM_INSTALL_DIR
"
${
INFERENCE_DEMO_INSTALL_DIR
}
/dam"
)
download_model_and_data
(
${
DAM_INSTALL_DIR
}
"DAM_model.tar.gz"
"DAM_data.txt.tar.gz"
)
inference_analysis_api_test
(
test_analyzer_dam
${
DAM_INSTALL_DIR
}
analyzer_dam_tester.cc
)
# chinese_ner
# chinese_ner
set
(
CHINESE_NER_INSTALL_DIR
"
${
INFERENCE_DEMO_INSTALL_DIR
}
/chinese_ner"
)
set
(
CHINESE_NER_INSTALL_DIR
"
${
INFERENCE_DEMO_INSTALL_DIR
}
/chinese_ner"
)
download_model_and_data
(
${
CHINESE_NER_INSTALL_DIR
}
"chinese_ner_model.tar.gz"
"chinese_ner-data.txt.tar.gz"
)
download_model_and_data
(
${
CHINESE_NER_INSTALL_DIR
}
"chinese_ner_model.tar.gz"
"chinese_ner-data.txt.tar.gz"
)
...
...
paddle/fluid/inference/tests/api/analyzer_dam_tester.cc
0 → 100644
浏览文件 @
1da03005
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/fluid/inference/tests/api/tester_helper.h"
namespace
paddle
{
namespace
inference
{
using
contrib
::
AnalysisConfig
;
#define MAX_TURN_NUM 9
#define MAX_TURN_LEN 50
static
std
::
vector
<
float
>
result_data
;
struct
DataRecord
{
std
::
vector
<
std
::
vector
<
int64_t
>>
turns
[
MAX_TURN_NUM
];
// turns data : MAX_TURN_NUM
std
::
vector
<
std
::
vector
<
float
>>
turns_mask
[
MAX_TURN_NUM
];
// turns mask data : MAX_TURN_NUM
std
::
vector
<
std
::
vector
<
int64_t
>>
response
;
// response data : 1
std
::
vector
<
std
::
vector
<
float
>>
response_mask
;
// response mask data : 1
size_t
batch_iter
{
0
};
size_t
batch_size
{
1
};
size_t
num_samples
;
// total number of samples
DataRecord
()
=
default
;
explicit
DataRecord
(
const
std
::
string
&
path
,
int
batch_size
=
1
)
:
batch_size
(
batch_size
)
{
Load
(
path
);
}
DataRecord
NextBatch
()
{
DataRecord
data
;
size_t
batch_end
=
batch_iter
+
batch_size
;
// NOTE skip the final batch, if no enough data is provided.
if
(
batch_end
<=
response
.
size
())
{
for
(
int
i
=
0
;
i
<
MAX_TURN_NUM
;
++
i
)
{
data
.
turns
[
i
].
assign
(
turns
[
i
].
begin
()
+
batch_iter
,
turns
[
i
].
begin
()
+
batch_end
);
}
for
(
int
i
=
0
;
i
<
MAX_TURN_NUM
;
++
i
)
{
data
.
turns_mask
[
i
].
assign
(
turns_mask
[
i
].
begin
()
+
batch_iter
,
turns_mask
[
i
].
begin
()
+
batch_end
);
}
data
.
response
.
assign
(
response
.
begin
()
+
batch_iter
,
response
.
begin
()
+
batch_end
);
data
.
response_mask
.
assign
(
response_mask
.
begin
()
+
batch_iter
,
response_mask
.
begin
()
+
batch_end
);
CHECK
(
!
data
.
response
.
empty
());
CHECK
(
!
data
.
response_mask
.
empty
());
CHECK_EQ
(
data
.
response
.
size
(),
data
.
response_mask
.
size
());
}
batch_iter
+=
batch_size
;
return
data
;
}
void
Load
(
const
std
::
string
&
path
)
{
std
::
ifstream
file
(
path
);
std
::
string
line
;
size_t
num_lines
=
0
;
result_data
.
clear
();
while
(
std
::
getline
(
file
,
line
))
{
num_lines
++
;
std
::
vector
<
std
::
string
>
data
;
split
(
line
,
','
,
&
data
);
CHECK_EQ
(
data
.
size
(),
2
*
MAX_TURN_NUM
+
3
);
// load turn data
std
::
vector
<
int64_t
>
turns_tmp
[
MAX_TURN_NUM
];
for
(
int
i
=
0
;
i
<
MAX_TURN_NUM
;
++
i
)
{
split_to_int64
(
data
[
i
],
' '
,
&
turns_tmp
[
i
]);
turns
[
i
].
push_back
(
std
::
move
(
turns_tmp
[
i
]));
}
// load turn_mask data
std
::
vector
<
float
>
turns_mask_tmp
[
MAX_TURN_NUM
];
for
(
int
i
=
0
;
i
<
MAX_TURN_NUM
;
++
i
)
{
split_to_float
(
data
[
MAX_TURN_NUM
+
i
],
' '
,
&
turns_mask_tmp
[
i
]);
turns_mask
[
i
].
push_back
(
std
::
move
(
turns_mask_tmp
[
i
]));
}
// load response data
std
::
vector
<
int64_t
>
response_tmp
;
split_to_int64
(
data
[
2
*
MAX_TURN_NUM
],
' '
,
&
response_tmp
);
response
.
push_back
(
std
::
move
(
response_tmp
));
// load response_mask data
std
::
vector
<
float
>
response_mask_tmp
;
split_to_float
(
data
[
2
*
MAX_TURN_NUM
+
1
],
' '
,
&
response_mask_tmp
);
response_mask
.
push_back
(
std
::
move
(
response_mask_tmp
));
// load result data
float
result_tmp
;
result_tmp
=
std
::
stof
(
data
[
2
*
MAX_TURN_NUM
+
2
]);
result_data
.
push_back
(
result_tmp
);
}
num_samples
=
num_lines
;
}
};
void
PrepareInputs
(
std
::
vector
<
PaddleTensor
>
*
input_slots
,
DataRecord
*
data
,
int
batch_size
)
{
PaddleTensor
turns_tensor
[
MAX_TURN_NUM
];
PaddleTensor
turns_mask_tensor
[
MAX_TURN_NUM
];
PaddleTensor
response_tensor
;
PaddleTensor
response_mask_tensor
;
std
::
string
turn_pre
=
"turn_"
;
std
::
string
turn_mask_pre
=
"turn_mask_"
;
auto
one_batch
=
data
->
NextBatch
();
int
size
=
one_batch
.
response
[
0
].
size
();
CHECK_EQ
(
size
,
MAX_TURN_LEN
);
// turn tensor assignment
for
(
int
i
=
0
;
i
<
MAX_TURN_NUM
;
++
i
)
{
turns_tensor
[
i
].
name
=
turn_pre
+
std
::
to_string
(
i
);
turns_tensor
[
i
].
shape
.
assign
({
batch_size
,
size
,
1
});
turns_tensor
[
i
].
dtype
=
PaddleDType
::
INT64
;
TensorAssignData
<
int64_t
>
(
&
turns_tensor
[
i
],
one_batch
.
turns
[
i
]);
}
// turn mask tensor assignment
for
(
int
i
=
0
;
i
<
MAX_TURN_NUM
;
++
i
)
{
turns_mask_tensor
[
i
].
name
=
turn_mask_pre
+
std
::
to_string
(
i
);
turns_mask_tensor
[
i
].
shape
.
assign
({
batch_size
,
size
,
1
});
turns_mask_tensor
[
i
].
dtype
=
PaddleDType
::
FLOAT32
;
TensorAssignData
<
float
>
(
&
turns_mask_tensor
[
i
],
one_batch
.
turns_mask
[
i
]);
}
// response tensor assignment
response_tensor
.
name
=
"response"
;
response_tensor
.
shape
.
assign
({
batch_size
,
size
,
1
});
response_tensor
.
dtype
=
PaddleDType
::
INT64
;
TensorAssignData
<
int64_t
>
(
&
response_tensor
,
one_batch
.
response
);
// response mask tensor assignment
response_mask_tensor
.
name
=
"response_mask"
;
response_mask_tensor
.
shape
.
assign
({
batch_size
,
size
,
1
});
response_mask_tensor
.
dtype
=
PaddleDType
::
FLOAT32
;
TensorAssignData
<
float
>
(
&
response_mask_tensor
,
one_batch
.
response_mask
);
// Set inputs.
for
(
int
i
=
0
;
i
<
MAX_TURN_NUM
;
++
i
)
{
input_slots
->
push_back
(
std
::
move
(
turns_tensor
[
i
]));
}
for
(
int
i
=
0
;
i
<
MAX_TURN_NUM
;
++
i
)
{
input_slots
->
push_back
(
std
::
move
(
turns_mask_tensor
[
i
]));
}
input_slots
->
push_back
(
std
::
move
(
response_tensor
));
input_slots
->
push_back
(
std
::
move
(
response_mask_tensor
));
}
void
SetConfig
(
contrib
::
AnalysisConfig
*
cfg
)
{
cfg
->
prog_file
=
FLAGS_infer_model
+
"/__model__"
;
cfg
->
param_file
=
FLAGS_infer_model
+
"/param"
;
cfg
->
use_gpu
=
false
;
cfg
->
device
=
0
;
cfg
->
specify_input_name
=
true
;
cfg
->
enable_ir_optim
=
true
;
}
void
SetInput
(
std
::
vector
<
std
::
vector
<
PaddleTensor
>>
*
inputs
)
{
DataRecord
data
(
FLAGS_infer_data
,
FLAGS_batch_size
);
std
::
vector
<
PaddleTensor
>
input_slots
;
int
test_batch_num
=
FLAGS_test_all_data
?
data
.
num_samples
/
FLAGS_batch_size
:
1
;
LOG
(
INFO
)
<<
"The number of samples to be test: "
<<
test_batch_num
*
FLAGS_batch_size
;
for
(
int
bid
=
0
;
bid
<
test_batch_num
;
++
bid
)
{
input_slots
.
clear
();
PrepareInputs
(
&
input_slots
,
&
data
,
FLAGS_batch_size
);
(
*
inputs
).
emplace_back
(
input_slots
);
}
}
// Easy for profiling independently.
TEST
(
Analyzer_dam
,
profile
)
{
contrib
::
AnalysisConfig
cfg
;
SetConfig
(
&
cfg
);
std
::
vector
<
PaddleTensor
>
outputs
;
std
::
vector
<
std
::
vector
<
PaddleTensor
>>
input_slots_all
;
SetInput
(
&
input_slots_all
);
TestPrediction
(
cfg
,
input_slots_all
,
&
outputs
,
FLAGS_num_threads
);
if
(
FLAGS_num_threads
==
1
&&
!
FLAGS_test_all_data
)
{
PADDLE_ENFORCE_GT
(
outputs
.
size
(),
0
);
size_t
size
=
GetSize
(
outputs
[
0
]);
PADDLE_ENFORCE_GT
(
size
,
0
);
float
*
result
=
static_cast
<
float
*>
(
outputs
[
0
].
data
.
data
());
for
(
size_t
i
=
0
;
i
<
size
;
i
++
)
{
EXPECT_NEAR
(
result
[
i
],
result_data
[
i
],
1e-3
);
}
}
}
// Check the fuse status
TEST
(
Analyzer_dam
,
fuse_statis
)
{
contrib
::
AnalysisConfig
cfg
;
// cfg.enable_ir_optim must be set true
SetConfig
(
&
cfg
);
int
num_ops
;
auto
predictor
=
CreatePaddlePredictor
<
AnalysisConfig
>
(
cfg
);
auto
fuse_statis
=
GetFuseStatis
(
static_cast
<
AnalysisPredictor
*>
(
predictor
.
get
()),
&
num_ops
);
ASSERT_TRUE
(
fuse_statis
.
count
(
"fc_fuse"
));
EXPECT_EQ
(
fuse_statis
.
at
(
"fc_fuse"
),
317
);
EXPECT_EQ
(
num_ops
,
2020
);
}
// Compare result of NativeConfig and AnalysisConfig
TEST
(
Analyzer_dam
,
compare
)
{
contrib
::
AnalysisConfig
cfg
;
SetConfig
(
&
cfg
);
std
::
vector
<
std
::
vector
<
PaddleTensor
>>
input_slots_all
;
SetInput
(
&
input_slots_all
);
CompareNativeAndAnalysis
(
cfg
,
input_slots_all
);
}
}
// namespace inference
}
// namespace paddle
paddle/fluid/inference/tests/api/analyzer_ner_tester.cc
浏览文件 @
1da03005
...
@@ -20,7 +20,6 @@ using contrib::AnalysisConfig;
...
@@ -20,7 +20,6 @@ using contrib::AnalysisConfig;
struct
DataRecord
{
struct
DataRecord
{
std
::
vector
<
std
::
vector
<
int64_t
>>
word_data_all
,
mention_data_all
;
std
::
vector
<
std
::
vector
<
int64_t
>>
word_data_all
,
mention_data_all
;
std
::
vector
<
std
::
vector
<
int64_t
>>
rnn_word_datas
,
rnn_mention_datas
;
std
::
vector
<
size_t
>
lod
;
// two inputs have the same lod info.
std
::
vector
<
size_t
>
lod
;
// two inputs have the same lod info.
size_t
batch_iter
{
0
};
size_t
batch_iter
{
0
};
size_t
batch_size
{
1
};
size_t
batch_size
{
1
};
...
@@ -45,8 +44,6 @@ struct DataRecord {
...
@@ -45,8 +44,6 @@ struct DataRecord {
CHECK
(
!
data
.
mention_data_all
.
empty
());
CHECK
(
!
data
.
mention_data_all
.
empty
());
CHECK_EQ
(
data
.
word_data_all
.
size
(),
data
.
mention_data_all
.
size
());
CHECK_EQ
(
data
.
word_data_all
.
size
(),
data
.
mention_data_all
.
size
());
for
(
size_t
j
=
0
;
j
<
data
.
word_data_all
.
size
();
j
++
)
{
for
(
size_t
j
=
0
;
j
<
data
.
word_data_all
.
size
();
j
++
)
{
data
.
rnn_word_datas
.
push_back
(
data
.
word_data_all
[
j
]);
data
.
rnn_mention_datas
.
push_back
(
data
.
mention_data_all
[
j
]);
// calculate lod
// calculate lod
data
.
lod
.
push_back
(
data
.
lod
.
back
()
+
data
.
word_data_all
[
j
].
size
());
data
.
lod
.
push_back
(
data
.
lod
.
back
()
+
data
.
word_data_all
[
j
].
size
());
}
}
...
@@ -87,8 +84,8 @@ void PrepareInputs(std::vector<PaddleTensor> *input_slots, DataRecord *data,
...
@@ -87,8 +84,8 @@ void PrepareInputs(std::vector<PaddleTensor> *input_slots, DataRecord *data,
lod_mention_tensor
.
shape
.
assign
({
size
,
1
});
lod_mention_tensor
.
shape
.
assign
({
size
,
1
});
lod_mention_tensor
.
lod
.
assign
({
one_batch
.
lod
});
lod_mention_tensor
.
lod
.
assign
({
one_batch
.
lod
});
// assign data
// assign data
TensorAssignData
<
int64_t
>
(
&
lod_word_tensor
,
one_batch
.
rnn_word_datas
);
TensorAssignData
<
int64_t
>
(
&
lod_word_tensor
,
one_batch
.
word_data_all
);
TensorAssignData
<
int64_t
>
(
&
lod_mention_tensor
,
one_batch
.
rnn_mention_datas
);
TensorAssignData
<
int64_t
>
(
&
lod_mention_tensor
,
one_batch
.
mention_data_all
);
// Set inputs.
// Set inputs.
input_slots
->
assign
({
lod_word_tensor
,
lod_mention_tensor
});
input_slots
->
assign
({
lod_word_tensor
,
lod_mention_tensor
});
for
(
auto
&
tensor
:
*
input_slots
)
{
for
(
auto
&
tensor
:
*
input_slots
)
{
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录