Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
Paddle-Lite
提交
7587d479
P
Paddle-Lite
项目概览
PaddlePaddle
/
Paddle-Lite
通知
331
Star
4
Fork
1
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
271
列表
看板
标记
里程碑
合并请求
78
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle-Lite
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
271
Issue
271
列表
看板
标记
里程碑
合并请求
78
合并请求
78
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
7587d479
编写于
11月 25, 2019
作者:
W
Wilber
提交者:
GitHub
11月 25, 2019
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
update x86 op and kernel to run content-dnn model test=develop (#2481)
* update x86 op and kernel to run content-dnn model test=develop
上级
3c83e6f3
变更
19
隐藏空白更改
内联
并排
Showing
19 changed file
with
98 addition
and
55 deletion
+98
-55
lite/kernels/x86/CMakeLists.txt
lite/kernels/x86/CMakeLists.txt
+1
-1
lite/kernels/x86/attention_padding_mask_compute.cc
lite/kernels/x86/attention_padding_mask_compute.cc
+2
-2
lite/kernels/x86/attention_padding_mask_compute.h
lite/kernels/x86/attention_padding_mask_compute.h
+29
-23
lite/kernels/x86/attention_padding_mask_compute_test.cc
lite/kernels/x86/attention_padding_mask_compute_test.cc
+1
-1
lite/kernels/x86/lookup_table_compute.h
lite/kernels/x86/lookup_table_compute.h
+5
-5
lite/kernels/x86/match_matrix_tensor_compute.cc
lite/kernels/x86/match_matrix_tensor_compute.cc
+23
-0
lite/kernels/x86/search_aligned_mat_mul_compute.cc
lite/kernels/x86/search_aligned_mat_mul_compute.cc
+3
-0
lite/kernels/x86/search_fc_compute.h
lite/kernels/x86/search_fc_compute.h
+1
-0
lite/kernels/x86/sequence_reverse_compute.cc
lite/kernels/x86/sequence_reverse_compute.cc
+13
-6
lite/kernels/x86/sequence_reverse_compute.h
lite/kernels/x86/sequence_reverse_compute.h
+5
-6
lite/kernels/x86/sequence_reverse_compute_test.cc
lite/kernels/x86/sequence_reverse_compute_test.cc
+2
-2
lite/kernels/x86/softmax_compute.cc
lite/kernels/x86/softmax_compute.cc
+1
-0
lite/operators/attention_padding_mask_op.cc
lite/operators/attention_padding_mask_op.cc
+2
-2
lite/operators/match_matrix_tensor_op.cc
lite/operators/match_matrix_tensor_op.cc
+3
-0
lite/operators/search_fc_op.cc
lite/operators/search_fc_op.cc
+1
-1
lite/operators/search_group_padding_op.cc
lite/operators/search_group_padding_op.cc
+3
-3
lite/operators/sequence_arithmetic_op.cc
lite/operators/sequence_arithmetic_op.cc
+1
-1
lite/operators/sequence_concat_op.cc
lite/operators/sequence_concat_op.cc
+1
-1
lite/operators/sequence_topk_avg_pooling_op.cc
lite/operators/sequence_topk_avg_pooling_op.cc
+1
-1
未找到文件。
lite/kernels/x86/CMakeLists.txt
浏览文件 @
7587d479
...
...
@@ -96,5 +96,5 @@ lite_cc_test(test_stack_compute_x86 SRCS stack_compute_test.cc DEPS stack_comput
lite_cc_test
(
test_search_group_padding_compute_x86 SRCS search_group_padding_compute_test.cc DEPS search_group_padding_compute_x86
)
lite_cc_test
(
test_sequence_concat_compute_x86 SRCS sequence_concat_compute_test.cc DEPS sequence_concat_compute_x86
)
lite_cc_test
(
test_var_conv_2d_compute_x86 SRCS var_conv_2d_compute_test.cc DEPS var_conv_2d_compute_x86
)
lite_cc_test
(
test_attention_padding_mask_compute_x86 SRCS attention_padding_mask_compute_test.cc DEPS attention_padding_mask_compute_x86
)
#
lite_cc_test(test_attention_padding_mask_compute_x86 SRCS attention_padding_mask_compute_test.cc DEPS attention_padding_mask_compute_x86)
lite_cc_test
(
test_sequence_arithmetic_compute_x86 SRCS sequence_arithmetic_compute_test.cc DEPS sequence_arithmetic_compute_x86
)
lite/kernels/x86/attention_padding_mask_compute.cc
浏览文件 @
7587d479
...
...
@@ -15,7 +15,7 @@
#include "lite/kernels/x86/attention_padding_mask_compute.h"
REGISTER_LITE_KERNEL
(
attention_padding_mask
,
search_
attention_padding_mask
,
kX86
,
kFloat
,
kNCHW
,
...
...
@@ -23,6 +23,6 @@ REGISTER_LITE_KERNEL(
def
)
.
BindInput
(
"X"
,
{
LiteType
::
GetTensorTy
(
TARGET
(
kX86
))})
.
BindInput
(
"Y"
,
{
LiteType
::
GetTensorTy
(
TARGET
(
kX86
))})
.
BindOutput
(
"
o
ut"
,
{
LiteType
::
GetTensorTy
(
TARGET
(
kX86
))})
.
BindOutput
(
"
O
ut"
,
{
LiteType
::
GetTensorTy
(
TARGET
(
kX86
))})
.
BindOutput
(
"pad_begin"
,
{
LiteType
::
GetTensorTy
(
TARGET
(
kX86
))})
.
Finalize
();
lite/kernels/x86/attention_padding_mask_compute.h
浏览文件 @
7587d479
...
...
@@ -36,30 +36,36 @@ class AttentionPaddingMaskCompute
void
Run
()
override
{
auto
&
param
=
*
param_
.
get_mutable
<
param_t
>
();
auto
src
=
param
.
Y
;
auto
attn
=
param
.
X
;
auto
src_offset
=
src
->
lod
()[
0
];
auto
attn_offset
=
attn
->
lod
()[
0
];
int
attn_seq_num
=
attn_offset
.
size
()
-
1
;
int
src_seq_num
=
src_offset
.
size
()
-
1
;
int
attn_seq_len
=
attn_offset
[
1
];
int
src_seq_len
=
attn
->
numel
()
/
attn
->
dims
()[
0
];
size_t
count
=
attn
->
numel
();
auto
attn_data
=
attn
->
data
<
T
>
();
auto
out
=
param
.
Out
;
out
->
Resize
(
attn
->
dims
());
out
->
set_lod
(
attn
->
lod
());
auto
out_data
=
out
->
mutable_data
<
T
>
();
memcpy
(
out_data
,
attn_data
,
count
*
sizeof
(
T
));
auto
*
bottom0
=
param
.
X
;
auto
*
bottom1
=
param
.
Y
;
auto
*
_pad_begin
=
param
.
pad_begin
;
auto
*
top
=
param
.
Out
;
int
_pad_id
=
param
.
pad_id
;
float
_mask
=
param
.
mask
;
auto
src_len
=
static_cast
<
int64_t
>
(
bottom1
->
lod
()[
0
][
1
]);
const
int
att_batch
=
bottom0
->
lod
()[
0
].
size
()
-
1
;
const
int
src_batch
=
bottom1
->
lod
()[
0
].
size
()
-
1
;
int
*
pad_begin
=
_pad_begin
->
mutable_data
<
int
>
();
for
(
int
i
=
0
;
i
<
src_batch
;
++
i
)
{
const
auto
*
src_data
=
bottom1
->
data
<
T
>
()
+
src_len
*
i
;
int
index
=
src_len
-
1
;
for
(;
index
>=
0
&&
_pad_id
==
static_cast
<
int
>
(
src_data
[
index
]);
--
index
)
{
}
pad_begin
[
i
]
=
index
+
1
;
}
for
(
int
i
=
0
;
i
<
attn_seq_num
;
++
i
)
{
for
(
int
j
=
0
;
j
<
attn_seq_len
;
++
j
)
{
auto
tmp_out_data
=
out_data
+
src_seq_len
*
(
attn_seq_len
*
i
+
j
);
int
src_seq_idx
=
i
%
src_seq_num
;
int
cur_len
=
src_offset
[
src_seq_idx
+
1
]
-
src_offset
[
src_seq_idx
];
for
(
int
k
=
cur_len
;
k
<
src_seq_len
;
k
++
)
{
tmp_out_data
[
k
]
=
param
.
mask
;
const
auto
att_len
=
static_cast
<
int64_t
>
(
bottom0
->
lod
()[
0
][
1
]);
auto
*
top_data
=
top
->
mutable_data
<
T
>
();
memcpy
(
top_data
,
bottom0
->
data
<
T
>
(),
bottom0
->
dims
()[
0
]
*
bottom0
->
dims
()[
1
]
*
sizeof
(
T
));
for
(
int
i
=
0
;
i
<
att_batch
;
++
i
)
{
for
(
int
j
=
0
;
j
<
att_len
;
++
j
)
{
top_data
=
top
->
mutable_data
<
T
>
()
+
src_len
*
(
att_len
*
i
+
j
);
int
src_idx
=
i
%
src_batch
;
for
(
int
k
=
pad_begin
[
src_idx
];
k
<
src_len
;
++
k
)
{
top_data
[
k
]
=
_mask
;
}
}
}
...
...
lite/kernels/x86/attention_padding_mask_compute_test.cc
浏览文件 @
7587d479
...
...
@@ -129,4 +129,4 @@ TEST(attention_padding_mask_x86, run_test) {
}
// namespace lite
}
// namespace paddle
USE_LITE_KERNEL
(
attention_padding_mask
,
kX86
,
kFloat
,
kNCHW
,
def
);
USE_LITE_KERNEL
(
search_
attention_padding_mask
,
kX86
,
kFloat
,
kNCHW
,
def
);
lite/kernels/x86/lookup_table_compute.h
浏览文件 @
7587d479
...
...
@@ -40,18 +40,18 @@ class LookupTableCompute : public KernelLite<TARGET(kX86), PRECISION(kInt64)> {
int64_t
row_number
=
table_t
->
dims
()[
0
];
int64_t
row_width
=
table_t
->
dims
()[
1
];
auto
*
table
=
table_t
->
data
<
float
>
();
auto
*
output
=
output_t
->
mutable_data
<
float
>
();
memset
(
output
,
0
,
output_t
->
dims
().
production
()
*
sizeof
(
float
));
auto
*
table
=
table_t
->
data
<
T
>
();
auto
*
output
=
output_t
->
mutable_data
<
T
>
();
memset
(
output
,
0
,
output_t
->
dims
().
production
()
*
sizeof
(
T
));
for
(
int64_t
i
=
0
;
i
<
ids_numel
;
++
i
)
{
if
(
padding_idx
!=
-
1
&&
ids
[
i
]
==
padding_idx
)
{
memset
(
output
+
i
*
row_width
,
0
,
row_width
*
sizeof
(
float
));
memset
(
output
+
i
*
row_width
,
0
,
row_width
*
sizeof
(
T
));
}
else
{
CHECK_LT
(
ids
[
i
],
row_number
);
CHECK_GE
(
ids
[
i
],
0
);
memcpy
(
output
+
i
*
row_width
,
table
+
ids
[
i
]
*
row_width
,
row_width
*
sizeof
(
float
));
row_width
*
sizeof
(
T
));
}
}
}
...
...
lite/kernels/x86/match_matrix_tensor_compute.cc
浏览文件 @
7587d479
...
...
@@ -94,8 +94,31 @@ void MatchMatrixTensorCompute<T>::Run() {
}
}
int
batch_size
=
x
->
lod
()[
0
].
size
()
-
1
;
int
lod_lv1_size
=
batch_size
*
dim_t
;
int
lod_lv2_size
=
x
->
lod
()[
0
].
back
()
*
dim_t
;
std
::
vector
<
size_t
>
out_lod0
(
batch_size
+
1
,
0
);
std
::
vector
<
size_t
>
out_lod1
(
lod_lv1_size
+
1
,
0
);
std
::
vector
<
size_t
>
out_lod2
(
lod_lv2_size
+
1
,
0
);
for
(
int
i
=
0
;
i
<
batch_size
;
i
++
)
{
out_lod0
[
i
+
1
]
=
out_lod0
[
i
]
+
dim_t
;
int
len_l
=
offset_l
[
i
+
1
]
-
offset_l
[
i
];
for
(
int
j
=
0
;
j
<
dim_t
;
j
++
)
{
out_lod1
[
i
*
dim_t
+
j
+
1
]
=
out_lod1
[
i
*
dim_t
+
j
]
+
len_l
;
int
len_r
=
offset_r
[
i
+
1
]
-
offset_r
[
i
];
for
(
int
k
=
0
;
k
<
len_l
;
k
++
)
{
out_lod2
[
offset_l
[
i
]
*
dim_t
+
j
*
len_l
+
k
+
1
]
=
out_lod2
[
offset_l
[
i
]
*
dim_t
+
j
*
len_l
+
k
]
+
len_r
;
}
}
}
LoD
out_lod
;
out_lod
.
push_back
(
top_offset
);
out_lod
.
push_back
(
offset_l
);
out_lod
.
push_back
(
offset_r
);
out
->
set_lod
(
out_lod
);
}
...
...
lite/kernels/x86/search_aligned_mat_mul_compute.cc
浏览文件 @
7587d479
...
...
@@ -24,4 +24,7 @@ REGISTER_LITE_KERNEL(
.
BindInput
(
"X"
,
{
LiteType
::
GetTensorTy
(
TARGET
(
kX86
))})
.
BindInput
(
"Y"
,
{
LiteType
::
GetTensorTy
(
TARGET
(
kX86
))})
.
BindOutput
(
"Out"
,
{
LiteType
::
GetTensorTy
(
TARGET
(
kX86
))})
.
BindOutput
(
"_a_addr"
,
{
LiteType
::
GetTensorTy
(
TARGET
(
kX86
))})
.
BindOutput
(
"_b_addr"
,
{
LiteType
::
GetTensorTy
(
TARGET
(
kX86
))})
.
BindOutput
(
"_c_addr"
,
{
LiteType
::
GetTensorTy
(
TARGET
(
kX86
))})
.
Finalize
();
lite/kernels/x86/search_fc_compute.h
浏览文件 @
7587d479
...
...
@@ -31,6 +31,7 @@ class SearchFcCompute : public KernelLite<TARGET(kX86), PRECISION(kFloat)> {
auto
&
context
=
ctx_
->
As
<
X86Context
>
();
auto
&
param
=
*
param_
.
get_mutable
<
param_t
>
();
param
.
Out
->
Resize
({
param
.
X
->
dims
()[
0
],
param
.
out_size
});
lite
::
x86
::
math
::
SearchFcFunctor
<
lite
::
TargetType
::
kX86
,
T
>
search_fc
;
search_fc
(
context
,
*
param
.
X
,
*
param
.
W
,
*
param
.
b
,
param
.
Out
,
param
.
out_size
);
}
...
...
lite/kernels/x86/sequence_reverse_compute.cc
浏览文件 @
7587d479
...
...
@@ -14,12 +14,19 @@
#include "lite/kernels/x86/sequence_reverse_compute.h"
REGISTER_LITE_KERNEL
(
sequence_reverse
,
kX86
,
kFloat
,
kNCHW
,
paddle
::
lite
::
kernels
::
x86
::
SequenceReverseCompute
<
float
>
,
def
)
typedef
paddle
::
lite
::
kernels
::
x86
::
SequenceReverseCompute
<
float
,
PRECISION
(
kFloat
)
>
ReverseFp32
;
typedef
paddle
::
lite
::
kernels
::
x86
::
SequenceReverseCompute
<
int64_t
,
PRECISION
(
kInt64
)
>
ReverseInt64
;
REGISTER_LITE_KERNEL
(
sequence_reverse
,
kX86
,
kFloat
,
kNCHW
,
ReverseFp32
,
def
)
.
BindInput
(
"X"
,
{
LiteType
::
GetTensorTy
(
TARGET
(
kX86
))})
.
BindOutput
(
"Y"
,
{
LiteType
::
GetTensorTy
(
TARGET
(
kX86
))})
.
Finalize
();
REGISTER_LITE_KERNEL
(
sequence_reverse
,
kX86
,
kInt64
,
kNCHW
,
ReverseInt64
,
def
)
.
BindInput
(
"X"
,
{
LiteType
::
GetTensorTy
(
TARGET
(
kX86
),
PRECISION
(
kInt64
))})
.
BindOutput
(
"Y"
,
{
LiteType
::
GetTensorTy
(
TARGET
(
kX86
),
PRECISION
(
kInt64
))})
.
Finalize
();
lite/kernels/x86/sequence_reverse_compute.h
浏览文件 @
7587d479
...
...
@@ -22,18 +22,17 @@ namespace lite {
namespace
kernels
{
namespace
x86
{
template
<
typename
T
>
class
SequenceReverseCompute
:
public
KernelLite
<
TARGET
(
kX86
),
PRECISION
(
kFloat
)
>
{
template
<
typename
T
,
PrecisionType
Ptype
>
class
SequenceReverseCompute
:
public
KernelLite
<
TARGET
(
kX86
),
Ptype
>
{
public:
using
param_t
=
operators
::
SequenceReverseParam
;
void
Run
()
override
{
auto
&
param
=
*
param_
.
get_mutable
<
operators
::
SequenceReverseParam
>
();
auto
&
param
=
this
->
template
Param
<
param_t
>();
auto
*
output
=
param
.
Out
;
const
auto
*
din
=
param
.
X
->
data
<
T
>
();
const
auto
*
din
=
param
.
X
->
template
data
<
T
>();
T
*
dout
=
output
->
mutable_data
<
T
>
();
T
*
dout
=
output
->
template
mutable_data
<
T
>();
CHECK_NE
(
din
,
dout
)
<<
"SequenceReverse Op does not support in-place operation"
;
const
auto
lod
=
param
.
X
->
lod
()[
param
.
X
->
lod
().
size
()
-
1
];
...
...
lite/kernels/x86/sequence_reverse_compute_test.cc
浏览文件 @
7587d479
...
...
@@ -52,13 +52,13 @@ TEST(sequence_reverse_x86, retrive_op) {
}
TEST
(
sequence_reverse_x86
,
init
)
{
SequenceReverseCompute
<
float
>
sequence_reverse
;
SequenceReverseCompute
<
float
,
PRECISION
(
kFloat
)
>
sequence_reverse
;
ASSERT_EQ
(
sequence_reverse
.
precision
(),
PRECISION
(
kFloat
));
ASSERT_EQ
(
sequence_reverse
.
target
(),
TARGET
(
kX86
));
}
TEST
(
sequence_reverse_x86
,
run_test
)
{
SequenceReverseCompute
<
float
>
seq_kernel
;
SequenceReverseCompute
<
float
,
PRECISION
(
kFloat
)
>
seq_kernel
;
std
::
unique_ptr
<
KernelContext
>
ctx
(
new
KernelContext
);
operators
::
SequenceReverseParam
param
;
...
...
lite/kernels/x86/softmax_compute.cc
浏览文件 @
7587d479
...
...
@@ -31,4 +31,5 @@ REGISTER_LITE_KERNEL(search_seq_softmax,
def
)
.
BindInput
(
"X"
,
{
LiteType
::
GetTensorTy
(
TARGET
(
kX86
))})
.
BindOutput
(
"Out"
,
{
LiteType
::
GetTensorTy
(
TARGET
(
kX86
))})
.
BindOutput
(
"Out_log"
,
{
LiteType
::
GetTensorTy
(
TARGET
(
kX86
))})
.
Finalize
();
lite/operators/attention_padding_mask_op.cc
浏览文件 @
7587d479
...
...
@@ -50,9 +50,9 @@ bool AttentionPaddingMaskOp::AttachImpl(const cpp::OpDesc &op_desc,
lite
::
Scope
*
scope
)
{
param_
.
X
=
scope
->
FindTensor
(
op_desc
.
Input
(
"X"
).
front
());
param_
.
Y
=
scope
->
FindTensor
(
op_desc
.
Input
(
"Y"
).
front
());
param_
.
Out
=
scope
->
FindMutableTensor
(
op_desc
.
In
put
(
"Out"
).
front
());
param_
.
Out
=
scope
->
FindMutableTensor
(
op_desc
.
Out
put
(
"Out"
).
front
());
param_
.
pad_begin
=
scope
->
FindMutableTensor
(
op_desc
.
In
put
(
"pad_begin"
).
front
());
scope
->
FindMutableTensor
(
op_desc
.
Out
put
(
"pad_begin"
).
front
());
param_
.
pad_id
=
op_desc
.
GetAttr
<
int
>
(
"pad_id"
);
param_
.
mask
=
op_desc
.
GetAttr
<
float
>
(
"mask"
);
...
...
lite/operators/match_matrix_tensor_op.cc
浏览文件 @
7587d479
...
...
@@ -35,6 +35,7 @@ bool MatchMatrixTensorOpLite::CheckShape() const {
CHECK_OR_FALSE
(
x_dims
.
size
()
==
2
);
CHECK_OR_FALSE
(
y_dims
.
size
()
==
2
);
CHECK_OR_FALSE
(
w_dims
.
size
()
==
3
);
CHECK_OR_FALSE
(
x_dims
[
1
]
==
w_dims
[
0
]
&&
y_dims
[
1
]
==
w_dims
[
2
]
&&
w_dims
[
1
]
==
dim_t
);
...
...
@@ -91,6 +92,8 @@ bool MatchMatrixTensorOpLite::AttachImpl(const cpp::OpDesc& op_desc,
param_
.
out
=
scope
->
FindVar
(
out
)
->
GetMutable
<
lite
::
Tensor
>
();
param_
.
tmp
=
scope
->
FindVar
(
tmp
)
->
GetMutable
<
lite
::
Tensor
>
();
param_
.
dim_t
=
op_desc
.
GetAttr
<
int32_t
>
(
"dim_t"
);
return
true
;
}
...
...
lite/operators/search_fc_op.cc
浏览文件 @
7587d479
...
...
@@ -77,4 +77,4 @@ bool SearchFcOpLite::AttachImpl(const cpp::OpDesc &op_desc,
}
// namespace lite
}
// namespace paddle
REGISTER_LITE_OP
(
SearchF
c
,
paddle
::
lite
::
operators
::
SearchFcOpLite
);
REGISTER_LITE_OP
(
search_f
c
,
paddle
::
lite
::
operators
::
SearchFcOpLite
);
lite/operators/search_group_padding_op.cc
浏览文件 @
7587d479
...
...
@@ -43,9 +43,9 @@ bool SearchGroupPaddingOp::InferShape() const {
bool
SearchGroupPaddingOp
::
AttachImpl
(
const
cpp
::
OpDesc
&
op_desc
,
lite
::
Scope
*
scope
)
{
auto
x
=
op_desc
.
Input
(
"X"
).
front
();
auto
out_emb_padding
=
op_desc
.
In
put
(
"Out_emb_padding"
).
front
();
auto
out_new
=
op_desc
.
In
put
(
"Out_new"
).
front
();
auto
out_padding
=
op_desc
.
In
put
(
"Out_padding"
).
front
();
auto
out_emb_padding
=
op_desc
.
Out
put
(
"Out_emb_padding"
).
front
();
auto
out_new
=
op_desc
.
Out
put
(
"Out_new"
).
front
();
auto
out_padding
=
op_desc
.
Out
put
(
"Out_padding"
).
front
();
param_
.
x
=
scope
->
FindVar
(
x
)
->
GetMutable
<
lite
::
Tensor
>
();
param_
.
out_emb_padding
=
...
...
lite/operators/sequence_arithmetic_op.cc
浏览文件 @
7587d479
...
...
@@ -38,7 +38,7 @@ bool SequenceArithmeticOp::AttachImpl(const cpp::OpDesc &opdesc,
lite
::
Scope
*
scope
)
{
param_
.
X
=
scope
->
FindTensor
(
opdesc
.
Input
(
"X"
).
front
());
param_
.
Y
=
scope
->
FindTensor
(
opdesc
.
Input
(
"Y"
).
front
());
param_
.
Out
=
scope
->
FindMutableTensor
(
opdesc
.
In
put
(
"Out"
).
front
());
param_
.
Out
=
scope
->
FindMutableTensor
(
opdesc
.
Out
put
(
"Out"
).
front
());
param_
.
op_type
=
opdesc
.
GetAttr
<
int
>
(
"op_type"
);
...
...
lite/operators/sequence_concat_op.cc
浏览文件 @
7587d479
...
...
@@ -27,7 +27,7 @@ bool SequenceConcatOp::CheckShape() const {
for
(
const
auto
&
t
:
param_
.
X
)
{
CHECK_EQ
(
t
->
lod
().
empty
(),
false
)
<<
"Input Tensor of X does not contain LoD information."
;
CHECK_EQ
(
t
->
lod
().
size
(),
1
)
<<
"Only support one level sequence now."
;
//
CHECK_EQ(t->lod().size(), 1) << "Only support one level sequence now.";
if
(
lod_size
==
0
)
{
lod_size
=
t
->
lod
()[
0
].
size
();
}
else
{
...
...
lite/operators/sequence_topk_avg_pooling_op.cc
浏览文件 @
7587d479
...
...
@@ -82,5 +82,5 @@ bool SequenceTopkAvgPoolingOpLite::AttachImpl(const cpp::OpDesc &op_desc,
}
// namespace lite
}
// namespace paddle
REGISTER_LITE_OP
(
SequenceTopkAvgP
ooling
,
REGISTER_LITE_OP
(
sequence_topk_avg_p
ooling
,
paddle
::
lite
::
operators
::
SequenceTopkAvgPoolingOpLite
);
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录