Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
33b49635
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
33b49635
编写于
11月 29, 2018
作者:
Z
ZhenWang
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
unify the normal and small dam model.
上级
8f2e556e
变更
2
隐藏空白更改
内联
并排
Showing
2 changed file
with
52 addition
and
33 deletion
+52
-33
paddle/fluid/inference/tests/api/CMakeLists.txt
paddle/fluid/inference/tests/api/CMakeLists.txt
+6
-3
paddle/fluid/inference/tests/api/analyzer_dam_tester.cc
paddle/fluid/inference/tests/api/analyzer_dam_tester.cc
+46
-30
未找到文件。
paddle/fluid/inference/tests/api/CMakeLists.txt
浏览文件 @
33b49635
...
...
@@ -48,10 +48,13 @@ inference_analysis_api_test(test_analyzer_rnn2 ${RNN2_INSTALL_DIR} analyzer_rnn2
# DAM
set
(
DAM_INSTALL_DIR
"
${
INFERENCE_DEMO_INSTALL_DIR
}
/dam"
)
# For the normal DAM model
# For the normal DAM model
: download DAM_model.tar.gz and DAM_data.txt.tar.gz.
# download_model_and_data(${DAM_INSTALL_DIR} "DAM_model.tar.gz" "DAM_data.txt.tar.gz")
download_model_and_data
(
${
DAM_INSTALL_DIR
}
"small_dam_model.tar.gz"
"small_dam_data.txt.tar.gz"
)
inference_analysis_api_test
(
test_analyzer_dam
${
DAM_INSTALL_DIR
}
analyzer_dam_tester.cc
)
download_model_and_data
(
${
DAM_INSTALL_DIR
}
"dam_small_model.tar.gz"
"dam_small_data.txt.tar.gz"
)
# For the normal DAM model: --max_turn_num=9.
inference_analysis_test
(
test_analyzer_dam SRCS analyzer_dam_tester.cc
EXTRA_DEPS
${
INFERENCE_EXTRA_DEPS
}
ARGS --infer_model=
${
DAM_INSTALL_DIR
}
/model --infer_data=
${
DAM_INSTALL_DIR
}
/data.txt --max_turn_num=1
)
# chinese_ner
set
(
CHINESE_NER_INSTALL_DIR
"
${
INFERENCE_DEMO_INSTALL_DIR
}
/chinese_ner"
)
...
...
paddle/fluid/inference/tests/api/analyzer_dam_tester.cc
浏览文件 @
33b49635
...
...
@@ -14,38 +14,54 @@
#include "paddle/fluid/inference/tests/api/tester_helper.h"
DEFINE_int32
(
max_turn_num
,
1
,
"The max turn number: 1 for the small and 9 for the normal."
);
namespace
paddle
{
namespace
inference
{
using
contrib
::
AnalysisConfig
;
#define MAX_TURN_NUM 1
#define MAX_TURN_LEN 50
constexpr
int32_t
kMaxTurnLen
=
50
;
static
std
::
vector
<
float
>
result_data
;
struct
DataRecord
{
std
::
vector
<
std
::
vector
<
int64_t
>>
turns
[
MAX_TURN_NUM
];
// turns data : MAX_TURN_NUM
std
::
vector
<
std
::
vector
<
float
>>
turns_mask
[
MAX_TURN_NUM
];
// turns mask data : MAX_TURN_NUM
std
::
vector
<
std
::
vector
<
int64_t
>>
response
;
// response data : 1
std
::
vector
<
std
::
vector
<
int64_t
>>
*
turns
;
std
::
vector
<
std
::
vector
<
float
>>
*
turns_mask
;
std
::
vector
<
std
::
vector
<
int64_t
>>
response
;
// response data : 1
std
::
vector
<
std
::
vector
<
float
>>
response_mask
;
// response mask data : 1
size_t
batch_iter
{
0
};
size_t
batch_size
{
1
};
size_t
num_samples
;
// total number of samples
DataRecord
()
=
default
;
DataRecord
()
{
turns
=
new
std
::
vector
<
std
::
vector
<
int64_t
>>
[
FLAGS_max_turn_num
];
// turns data : FLAGS_max_turn_num
turns_mask
=
new
std
::
vector
<
std
::
vector
<
float
>>
[
FLAGS_max_turn_num
];
// turns mask data : FLAGS_max_turn_num
}
explicit
DataRecord
(
const
std
::
string
&
path
,
int
batch_size
=
1
)
:
batch_size
(
batch_size
)
{
:
DataRecord
()
{
this
->
batch_size
=
batch_size
;
Load
(
path
);
}
~
DataRecord
()
{
delete
[]
turns
;
delete
[]
turns_mask
;
}
DataRecord
NextBatch
()
{
DataRecord
data
;
size_t
batch_end
=
batch_iter
+
batch_size
;
// NOTE skip the final batch, if no enough data is provided.
if
(
batch_end
<=
response
.
size
())
{
for
(
int
i
=
0
;
i
<
MAX_TURN_NUM
;
++
i
)
{
for
(
int
i
=
0
;
i
<
FLAGS_max_turn_num
;
++
i
)
{
data
.
turns
[
i
].
assign
(
turns
[
i
].
begin
()
+
batch_iter
,
turns
[
i
].
begin
()
+
batch_end
);
}
for
(
int
i
=
0
;
i
<
MAX_TURN_NUM
;
++
i
)
{
for
(
int
i
=
0
;
i
<
FLAGS_max_turn_num
;
++
i
)
{
data
.
turns_mask
[
i
].
assign
(
turns_mask
[
i
].
begin
()
+
batch_iter
,
turns_mask
[
i
].
begin
()
+
batch_end
);
}
...
...
@@ -60,6 +76,7 @@ struct DataRecord {
batch_iter
+=
batch_size
;
return
data
;
}
void
Load
(
const
std
::
string
&
path
)
{
std
::
ifstream
file
(
path
);
std
::
string
line
;
...
...
@@ -69,30 +86,30 @@ struct DataRecord {
num_lines
++
;
std
::
vector
<
std
::
string
>
data
;
split
(
line
,
','
,
&
data
);
CHECK_EQ
(
data
.
size
(),
(
size_t
)(
2
*
MAX_TURN_NUM
+
3
));
CHECK_EQ
(
data
.
size
(),
(
size_t
)(
2
*
FLAGS_max_turn_num
+
3
));
// load turn data
std
::
vector
<
int64_t
>
turns_tmp
[
MAX_TURN_NUM
];
for
(
int
i
=
0
;
i
<
MAX_TURN_NUM
;
++
i
)
{
std
::
vector
<
int64_t
>
turns_tmp
[
FLAGS_max_turn_num
];
for
(
int
i
=
0
;
i
<
FLAGS_max_turn_num
;
++
i
)
{
split_to_int64
(
data
[
i
],
' '
,
&
turns_tmp
[
i
]);
turns
[
i
].
push_back
(
std
::
move
(
turns_tmp
[
i
]));
}
// load turn_mask data
std
::
vector
<
float
>
turns_mask_tmp
[
MAX_TURN_NUM
];
for
(
int
i
=
0
;
i
<
MAX_TURN_NUM
;
++
i
)
{
split_to_float
(
data
[
MAX_TURN_NUM
+
i
],
' '
,
&
turns_mask_tmp
[
i
]);
std
::
vector
<
float
>
turns_mask_tmp
[
FLAGS_max_turn_num
];
for
(
int
i
=
0
;
i
<
FLAGS_max_turn_num
;
++
i
)
{
split_to_float
(
data
[
FLAGS_max_turn_num
+
i
],
' '
,
&
turns_mask_tmp
[
i
]);
turns_mask
[
i
].
push_back
(
std
::
move
(
turns_mask_tmp
[
i
]));
}
// load response data
std
::
vector
<
int64_t
>
response_tmp
;
split_to_int64
(
data
[
2
*
MAX_TURN_NUM
],
' '
,
&
response_tmp
);
split_to_int64
(
data
[
2
*
FLAGS_max_turn_num
],
' '
,
&
response_tmp
);
response
.
push_back
(
std
::
move
(
response_tmp
));
// load response_mask data
std
::
vector
<
float
>
response_mask_tmp
;
split_to_float
(
data
[
2
*
MAX_TURN_NUM
+
1
],
' '
,
&
response_mask_tmp
);
split_to_float
(
data
[
2
*
FLAGS_max_turn_num
+
1
],
' '
,
&
response_mask_tmp
);
response_mask
.
push_back
(
std
::
move
(
response_mask_tmp
));
// load result data
float
result_tmp
;
result_tmp
=
std
::
stof
(
data
[
2
*
MAX_TURN_NUM
+
2
]);
result_tmp
=
std
::
stof
(
data
[
2
*
FLAGS_max_turn_num
+
2
]);
result_data
.
push_back
(
result_tmp
);
}
num_samples
=
num_lines
;
...
...
@@ -101,8 +118,8 @@ struct DataRecord {
void
PrepareInputs
(
std
::
vector
<
PaddleTensor
>
*
input_slots
,
DataRecord
*
data
,
int
batch_size
)
{
PaddleTensor
turns_tensor
[
MAX_TURN_NUM
];
PaddleTensor
turns_mask_tensor
[
MAX_TURN_NUM
];
PaddleTensor
turns_tensor
[
FLAGS_max_turn_num
];
PaddleTensor
turns_mask_tensor
[
FLAGS_max_turn_num
];
PaddleTensor
response_tensor
;
PaddleTensor
response_mask_tensor
;
std
::
string
turn_pre
=
"turn_"
;
...
...
@@ -110,16 +127,16 @@ void PrepareInputs(std::vector<PaddleTensor> *input_slots, DataRecord *data,
auto
one_batch
=
data
->
NextBatch
();
int
size
=
one_batch
.
response
[
0
].
size
();
CHECK_EQ
(
size
,
MAX_TURN_LEN
);
CHECK_EQ
(
size
,
kMaxTurnLen
);
// turn tensor assignment
for
(
int
i
=
0
;
i
<
MAX_TURN_NUM
;
++
i
)
{
for
(
int
i
=
0
;
i
<
FLAGS_max_turn_num
;
++
i
)
{
turns_tensor
[
i
].
name
=
turn_pre
+
std
::
to_string
(
i
);
turns_tensor
[
i
].
shape
.
assign
({
batch_size
,
size
,
1
});
turns_tensor
[
i
].
dtype
=
PaddleDType
::
INT64
;
TensorAssignData
<
int64_t
>
(
&
turns_tensor
[
i
],
one_batch
.
turns
[
i
]);
}
// turn mask tensor assignment
for
(
int
i
=
0
;
i
<
MAX_TURN_NUM
;
++
i
)
{
for
(
int
i
=
0
;
i
<
FLAGS_max_turn_num
;
++
i
)
{
turns_mask_tensor
[
i
].
name
=
turn_mask_pre
+
std
::
to_string
(
i
);
turns_mask_tensor
[
i
].
shape
.
assign
({
batch_size
,
size
,
1
});
turns_mask_tensor
[
i
].
dtype
=
PaddleDType
::
FLOAT32
;
...
...
@@ -137,10 +154,10 @@ void PrepareInputs(std::vector<PaddleTensor> *input_slots, DataRecord *data,
TensorAssignData
<
float
>
(
&
response_mask_tensor
,
one_batch
.
response_mask
);
// Set inputs.
for
(
int
i
=
0
;
i
<
MAX_TURN_NUM
;
++
i
)
{
for
(
int
i
=
0
;
i
<
FLAGS_max_turn_num
;
++
i
)
{
input_slots
->
push_back
(
std
::
move
(
turns_tensor
[
i
]));
}
for
(
int
i
=
0
;
i
<
MAX_TURN_NUM
;
++
i
)
{
for
(
int
i
=
0
;
i
<
FLAGS_max_turn_num
;
++
i
)
{
input_slots
->
push_back
(
std
::
move
(
turns_mask_tensor
[
i
]));
}
input_slots
->
push_back
(
std
::
move
(
response_tensor
));
...
...
@@ -148,7 +165,8 @@ void PrepareInputs(std::vector<PaddleTensor> *input_slots, DataRecord *data,
}
void
SetConfig
(
contrib
::
AnalysisConfig
*
cfg
)
{
cfg
->
model_dir
=
FLAGS_infer_model
;
cfg
->
prog_file
=
FLAGS_infer_model
+
"/__model__"
;
cfg
->
param_file
=
FLAGS_infer_model
+
"/param"
;
cfg
->
use_gpu
=
false
;
cfg
->
device
=
0
;
cfg
->
specify_input_name
=
true
;
...
...
@@ -201,8 +219,6 @@ TEST(Analyzer_dam, fuse_statis) {
auto
fuse_statis
=
GetFuseStatis
(
static_cast
<
AnalysisPredictor
*>
(
predictor
.
get
()),
&
num_ops
);
ASSERT_TRUE
(
fuse_statis
.
count
(
"fc_fuse"
));
EXPECT_EQ
(
fuse_statis
.
at
(
"fc_fuse"
),
45
);
EXPECT_EQ
(
num_ops
,
292
);
}
// Compare result of NativeConfig and AnalysisConfig
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录