Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
机器未来
Paddle
提交
cd382866
P
Paddle
项目概览
机器未来
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
体验新版 GitCode,发现更多精彩内容 >>
提交
cd382866
编写于
10月 26, 2017
作者:
D
dangqingqing
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Add gradient check unit testing and fix bug.
上级
d2bd7357
变更
7
显示空白变更内容
内联
并排
Showing
7 changed file
with
163 addition
and
78 deletion
+163
-78
paddle/operators/lstm_op.cc
paddle/operators/lstm_op.cc
+33
-24
paddle/operators/lstm_op.h
paddle/operators/lstm_op.h
+28
-13
paddle/operators/math/math_function.cc
paddle/operators/math/math_function.cc
+20
-0
paddle/operators/math/math_function.cu
paddle/operators/math/math_function.cu
+27
-0
paddle/operators/math/math_function.h
paddle/operators/math/math_function.h
+5
-0
paddle/operators/math/sequence2batch.h
paddle/operators/math/sequence2batch.h
+3
-6
python/paddle/v2/framework/tests/test_lstm_op.py
python/paddle/v2/framework/tests/test_lstm_op.py
+47
-35
未找到文件。
paddle/operators/lstm_op.cc
浏览文件 @
cd382866
...
...
@@ -28,6 +28,10 @@ class LSTMOp : public framework::OperatorWithKernel {
"Output(Hidden) of LSTM should not be null."
);
PADDLE_ENFORCE
(
ctx
->
HasOutput
(
"Cell"
),
"Output(Cell) of LSTM should not be null."
);
PADDLE_ENFORCE
(
ctx
->
HasOutput
(
"BatchGate"
),
"Output(BatchGate) of LSTM should not be null."
);
PADDLE_ENFORCE
(
ctx
->
HasOutput
(
"BatchCellPreAct"
),
"Output(BatchGate) of LSTM should not be null."
);
auto
in_dims
=
ctx
->
GetInputDim
(
"Input"
);
PADDLE_ENFORCE_EQ
(
in_dims
.
size
(),
2
,
"Input(X)'s rank must be 2."
);
...
...
@@ -92,11 +96,13 @@ class LSTMOpMaker : public framework::OpProtoAndCheckerMaker {
AddInput
(
"H0"
,
"(Tensor, optional) the initial hidden state is an optional "
"input. This is a tensor with shape (N x D), where N is the "
"batch size, D is the hidden size."
);
"batch size, D is the hidden size."
)
.
AsDispensable
();
AddInput
(
"C0"
,
"(Tensor, optional) the initial cell state is an optional "
"input. This is a tensor with shape (N x D), where N is the "
"batch size. `H0` and `C0` can be NULL but only at the same time"
);
"batch size. `H0` and `C0` can be NULL but only at the same time"
)
.
AsDispensable
();
AddInput
(
"Weight"
,
"(Tensor) the learnable hidden-hidden weights."
" - The shape is (D x 4D), where D is the hidden size. "
...
...
@@ -110,7 +116,8 @@ class LSTMOpMaker : public framework::OpProtoAndCheckerMaker {
" - Bias = {b_c, b_i, b_f, b_o}."
"2. `usePeepholes = True` "
" - The shape is (1 x 7D). "
" - Bias = {b_c, b_i, b_f, b_o, W_ic, W_fc, W_oc}."
);
" - Bias = {b_c, b_i, b_f, b_o, W_ic, W_fc, W_oc}."
)
.
AsDispensable
();
AddOutput
(
"Hidden"
,
"(LoDTensor) the hidden state lod tensor of LSTM operator. "
"The shape and lod is the same with the `Input`."
);
...
...
@@ -208,27 +215,29 @@ class LSTMGradOp : public framework::OperatorWithKernel {
using
framework
::
OperatorWithKernel
::
OperatorWithKernel
;
void
InferShape
(
framework
::
InferShapeContext
*
ctx
)
const
override
{
PADDLE_ENFORCE
(
ctx
->
HasInput
(
framework
::
GradVarName
(
"Hidden"
)),
"Input(Hidden@GRAD) should not be null"
);
PADDLE_ENFORCE
(
ctx
->
HasInput
(
framework
::
GradVarName
(
"Cell"
)),
"Input(Cell@GRAD) should not be null"
);
ctx
->
SetOutputDim
(
framework
::
GradVarName
(
"Input"
),
ctx
->
GetInputDim
(
"Input"
));
if
(
ctx
->
HasInput
(
"Weight"
))
{
ctx
->
SetOutputDim
(
framework
::
GradVarName
(
"Weight"
),
ctx
->
GetInputDim
(
"Weight"
));
}
if
(
ctx
->
HasInput
(
"Bias"
))
{
ctx
->
SetOutputDim
(
framework
::
GradVarName
(
"Bias"
),
ctx
->
GetInputDim
(
"Bias"
));
}
if
(
ctx
->
HasInput
(
"H0"
))
{
ctx
->
SetOutputDim
(
framework
::
GradVarName
(
"H0"
),
ctx
->
GetInputDim
(
"H0"
));
}
if
(
ctx
->
HasInput
(
"C0"
))
{
ctx
->
SetOutputDim
(
framework
::
GradVarName
(
"C0"
),
ctx
->
GetInputDim
(
"C0"
));
}
PADDLE_ENFORCE
(
ctx
->
HasInput
(
"Input"
),
"Input(Input) of LSTM should not be null."
);
PADDLE_ENFORCE
(
ctx
->
HasInput
(
"Hidden"
),
"Input(Hidden) of LSTM should not be null."
);
PADDLE_ENFORCE
(
ctx
->
HasInput
(
"Cell"
),
"Input(Cell) of LSTM should not be null."
);
PADDLE_ENFORCE
(
ctx
->
HasInput
(
"BatchGate"
),
"Input(BatchGate) of LSTM should not be null."
);
PADDLE_ENFORCE
(
ctx
->
HasInput
(
"BatchCellPreAct"
),
"Input(BatchGate) of LSTM should not be null."
);
auto
in_g_name
=
framework
::
GradVarName
(
"Input"
);
if
(
ctx
->
HasOutput
(
in_g_name
))
ctx
->
SetOutputDim
(
in_g_name
,
ctx
->
GetInputDim
(
"Input"
));
auto
w_g_name
=
framework
::
GradVarName
(
"Weight"
);
if
(
ctx
->
HasOutput
(
w_g_name
))
ctx
->
SetOutputDim
(
w_g_name
,
ctx
->
GetInputDim
(
"Weight"
));
auto
b_g_name
=
framework
::
GradVarName
(
"Bias"
);
if
(
ctx
->
HasOutput
(
b_g_name
))
ctx
->
SetOutputDim
(
b_g_name
,
ctx
->
GetInputDim
(
"Bias"
));
}
};
...
...
paddle/operators/lstm_op.h
浏览文件 @
cd382866
...
...
@@ -74,6 +74,7 @@ class LSTMKernel : public framework::OpKernel<T> {
if
(
bias
)
{
T
*
bias_data
=
const_cast
<
T
*>
(
bias
->
data
<
T
>
());
// the code style in LstmMetaValue will be updated later.
lstm_value
.
checkIg
=
bias_data
+
4
*
frame_size
;
lstm_value
.
checkFg
=
lstm_value
.
checkIg
+
frame_size
;
lstm_value
.
checkOg
=
lstm_value
.
checkFg
+
frame_size
;
...
...
@@ -86,10 +87,10 @@ class LSTMKernel : public framework::OpKernel<T> {
// Use the local variable as here.
LoDTensor
batch_hidden
,
batch_cell
;
auto
batch_cell_pre_act
=
*
(
ctx
.
Output
<
LoDTensor
>
(
"BatchCellPreAct"
)
);
auto
*
batch_cell_pre_act
=
ctx
.
Output
<
LoDTensor
>
(
"BatchCellPreAct"
);
batch_hidden
.
mutable_data
<
T
>
(
dims
,
ctx
.
GetPlace
());
batch_cell
.
mutable_data
<
T
>
(
dims
,
ctx
.
GetPlace
());
batch_cell_pre_act
.
mutable_data
<
T
>
(
dims
,
ctx
.
GetPlace
());
batch_cell_pre_act
->
mutable_data
<
T
>
(
dims
,
ctx
.
GetPlace
());
auto
batch_starts
=
batch_gate
->
lod
()[
0
];
size_t
num_batch
=
batch_starts
.
size
()
-
1
;
...
...
@@ -104,7 +105,7 @@ class LSTMKernel : public framework::OpKernel<T> {
Tensor
gate_t
=
batch_gate
->
Slice
(
bstart
,
bend
);
Tensor
out_t
=
batch_hidden
.
Slice
(
bstart
,
bend
);
Tensor
cell_t
=
batch_cell
.
Slice
(
bstart
,
bend
);
Tensor
cell_pre_act_t
=
batch_cell_pre_act
.
Slice
(
bstart
,
bend
);
Tensor
cell_pre_act_t
=
batch_cell_pre_act
->
Slice
(
bstart
,
bend
);
int
cur_batch_size
=
bend
-
bstart
;
...
...
@@ -162,6 +163,7 @@ class LSTMGradKernel : public framework::OpKernel<T> {
auto
&
device_ctx
=
ctx
.
device_context
();
if
(
weight_g
)
{
weight_g
->
mutable_data
<
T
>
(
ctx
.
GetPlace
());
math
::
SetConstant
<
Place
,
T
>
zero
;
zero
(
device_ctx
,
weight_g
,
static_cast
<
T
>
(
0.0
));
}
...
...
@@ -228,7 +230,7 @@ class LSTMGradKernel : public framework::OpKernel<T> {
auto
batch_starts
=
batch_gate
->
lod
()[
0
];
size_t
num_batch
=
batch_starts
.
size
()
-
1
;
for
(
int
n
=
static_cast
<
int
>
(
num_batch
);
n
>=
0
;
n
--
)
{
for
(
int
n
=
static_cast
<
int
>
(
num_batch
)
-
1
;
n
>=
0
;
n
--
)
{
int
bstart
=
static_cast
<
int
>
(
batch_starts
[
n
]);
int
bend
=
static_cast
<
int
>
(
batch_starts
[
n
+
1
]);
...
...
@@ -282,19 +284,32 @@ class LSTMGradKernel : public framework::OpKernel<T> {
math
::
Batch2LoDTensorFunctor
<
Place
,
T
>
to_seq
;
if
(
in_g
)
{
/* backward data */
in_g
->
mutable_data
<
T
>
(
ctx
.
GetPlace
());
to_seq
(
device_ctx
,
batch_gate_g
,
*
in_g
);
}
if
(
bias
&&
bias_g
)
{
/* backward bias */
bias_g
->
mutable_data
<
T
>
(
ctx
.
GetPlace
());
auto
bias_g_e
=
EigenMatrix
<
T
>::
From
(
*
bias_g
);
auto
gate_g_e
=
EigenMatrix
<
T
>::
From
(
batch_gate_g
);
Eigen
::
array
<
int
,
2
>
extents
({{
1
,
4
*
frame_size
}});
Eigen
::
array
<
int
,
2
>
offsets
({{
0
,
0
}});
auto
bg
=
bias_g_e
.
slice
(
offsets
,
extents
)
.
reshape
(
Eigen
::
array
<
int
,
2
>
({{
1
,
frame_size
*
4
}}));
bg
.
device
(
ctx
.
GetEigenDevice
<
Place
>
())
=
gate_g_e
.
sum
(
Eigen
::
array
<
int
,
1
>
({{
0
}}));
// Following Eigen computation failed for double type on GPU device.
// bias_g->mutable_data<T>(ctx.GetPlace());
// Tensor bias_mat;
// bias_mat.ShareDataWith(*bias_g);
// bias_mat.Resize({1, 4 * frame_size});
// auto bias_g_e = EigenVector<T>::Flatten(bias_mat);
// auto gate_g_e = EigenMatrix<T>::From(batch_gate_g);
// Eigen::array<int, 1> dims{{0}};
// bias_g_e.device(ctx.GetEigenDevice<Place>()) = gate_g_e.sum(dims);
int
m
=
static_cast
<
int
>
(
batch_gate_g
.
dims
()[
0
]);
int
n
=
static_cast
<
int
>
(
batch_gate_g
.
dims
()[
1
]);
Tensor
ones
;
ones
.
mutable_data
<
T
>
({
1
,
m
},
ctx
.
GetPlace
());
math
::
SetConstant
<
Place
,
T
>
set
;
set
(
device_ctx
,
&
ones
,
static_cast
<
T
>
(
1.0
));
math
::
gemv
<
Place
,
T
>
(
device_ctx
,
true
,
m
,
n
,
1.
,
batch_gate_g
.
data
<
T
>
(),
ones
.
data
<
T
>
(),
0.
,
bias_g
->
data
<
T
>
());
}
}
};
...
...
paddle/operators/math/math_function.cc
浏览文件 @
cd382866
...
...
@@ -211,6 +211,26 @@ void batched_gemm<platform::CPUPlace, double>(
}
#endif
template
<
>
void
gemv
<
platform
::
CPUPlace
,
float
>
(
const
platform
::
DeviceContext
&
context
,
const
bool
trans_a
,
const
int
M
,
const
int
N
,
const
float
alpha
,
const
float
*
A
,
const
float
*
B
,
const
float
beta
,
float
*
C
)
{
CBLAS_TRANSPOSE
transA
=
(
trans_a
==
false
)
?
CblasNoTrans
:
CblasTrans
;
cblas_sgemv
(
CblasRowMajor
,
transA
,
M
,
N
,
alpha
,
A
,
N
,
B
,
1
,
beta
,
C
,
1
);
}
template
<
>
void
gemv
<
platform
::
CPUPlace
,
double
>
(
const
platform
::
DeviceContext
&
context
,
const
bool
trans_a
,
const
int
M
,
const
int
N
,
const
double
alpha
,
const
double
*
A
,
const
double
*
B
,
const
double
beta
,
double
*
C
)
{
CBLAS_TRANSPOSE
transA
=
(
trans_a
==
false
)
?
CblasNoTrans
:
CblasTrans
;
cblas_dgemv
(
CblasRowMajor
,
transA
,
M
,
N
,
alpha
,
A
,
N
,
B
,
1
,
beta
,
C
,
1
);
}
template
struct
SetConstant
<
platform
::
CPUPlace
,
float
>;
}
// namespace math
...
...
paddle/operators/math/math_function.cu
浏览文件 @
cd382866
...
...
@@ -203,6 +203,33 @@ void batched_gemm<platform::GPUPlace, double>(
&
beta
,
C
,
ldc
,
strideC
,
batchCount
));
}
template
<
>
void
gemv
<
platform
::
GPUPlace
,
float
>
(
const
platform
::
DeviceContext
&
context
,
const
bool
trans_a
,
const
int
M
,
const
int
N
,
const
float
alpha
,
const
float
*
A
,
const
float
*
B
,
const
float
beta
,
float
*
C
)
{
cublasOperation_t
cuTransA
=
(
trans_a
==
false
)
?
CUBLAS_OP_T
:
CUBLAS_OP_N
;
PADDLE_ENFORCE
(
platform
::
dynload
::
cublasSgemv
(
reinterpret_cast
<
const
platform
::
CUDADeviceContext
&>
(
context
)
.
cublas_handle
(),
cuTransA
,
N
,
M
,
&
alpha
,
A
,
N
,
B
,
1
,
&
beta
,
C
,
1
));
}
template
<
>
void
gemv
<
platform
::
GPUPlace
,
double
>
(
const
platform
::
DeviceContext
&
context
,
const
bool
trans_a
,
const
int
M
,
const
int
N
,
const
double
alpha
,
const
double
*
A
,
const
double
*
B
,
const
double
beta
,
double
*
C
)
{
cublasOperation_t
cuTransA
=
(
trans_a
==
false
)
?
CUBLAS_OP_T
:
CUBLAS_OP_N
;
PADDLE_ENFORCE
(
platform
::
dynload
::
cublasDgemv
(
reinterpret_cast
<
const
platform
::
CUDADeviceContext
&>
(
context
)
.
cublas_handle
(),
cuTransA
,
N
,
M
,
&
alpha
,
A
,
N
,
B
,
1
,
&
beta
,
C
,
1
));
}
template
struct
SetConstant
<
platform
::
GPUPlace
,
float
>;
}
// namespace math
...
...
paddle/operators/math/math_function.h
浏览文件 @
cd382866
...
...
@@ -93,6 +93,11 @@ void batched_gemm(const platform::DeviceContext& context,
const
T
*
A
,
const
T
*
B
,
const
T
beta
,
T
*
C
,
const
int
batchCount
,
const
int
strideA
,
const
int
strideB
);
template
<
typename
Place
,
typename
T
>
void
gemv
(
const
platform
::
DeviceContext
&
context
,
const
bool
trans_a
,
const
int
M
,
const
int
N
,
const
T
alpha
,
const
T
*
A
,
const
T
*
B
,
const
T
beta
,
T
*
C
);
template
<
typename
Place
,
typename
T
>
struct
SetConstant
{
void
operator
()(
const
platform
::
DeviceContext
&
context
,
...
...
paddle/operators/math/sequence2batch.h
浏览文件 @
cd382866
...
...
@@ -58,7 +58,7 @@ class LoDTensor2BatchFunctor {
if
(
!
is_cal_batch_lod
)
{
auto
lods
=
batch
.
lod
();
PADDLE_ENFORCE_EQ
(
lods
.
size
(),
2UL
);
PADDLE_ENFORCE_EQ
(
lods
[
1
].
size
(),
lod_tensor
.
dims
()[
1
]);
PADDLE_ENFORCE_EQ
(
lods
[
1
].
size
(),
lod_tensor
.
dims
()[
0
]);
CopyMatrixRowsFunctor
<
Place
,
T
>
to_batch
;
to_batch
(
context
,
lod_tensor
,
lods
[
1
].
data
(),
batch
,
true
);
return
;
...
...
@@ -142,11 +142,8 @@ class Batch2LoDTensorFunctor {
auto
in_lod
=
batch
.
lod
();
PADDLE_ENFORCE_EQ
(
in_lod
.
size
(),
2UL
,
"The LoD size of input `batch` should be 2."
);
auto
out_lod
=
lod_tensor
.
lod
()[
0
];
auto
num
=
out_lod
[
out_lod
.
size
()
-
1
];
PADDLE_ENFORCE_EQ
(
num
,
lod_tensor
.
dims
()[
0
]);
PADDLE_ENFORCE_EQ
(
num
,
in_lod
[
1
].
size
());
PADDLE_ENFORCE_EQ
(
num
,
batch
.
dims
()[
0
]);
PADDLE_ENFORCE_EQ
(
in_lod
[
1
].
size
(),
static_cast
<
size_t
>
(
lod_tensor
.
dims
()[
0
]));
CopyMatrixRowsFunctor
<
Place
,
T
>
to_seq
;
size_t
*
index
=
in_lod
[
1
].
data
();
to_seq
(
context
,
batch
,
index
,
lod_tensor
,
false
);
...
...
python/paddle/v2/framework/tests/test_lstm_op.py
浏览文件 @
cd382866
...
...
@@ -100,9 +100,9 @@ def lstm(
cell
.
append
(
c_pre
.
flatten
())
gate
.
append
(
g_pre
.
flatten
())
hidden
=
np
.
array
(
hidden
).
astype
(
"float64"
)
cell
=
np
.
array
(
cell
).
astype
(
"float64"
)
gate
=
np
.
array
(
gate
).
astype
(
"float64"
)
hidden
=
np
.
array
(
hidden
).
astype
(
'float64'
)
cell
=
np
.
array
(
cell
).
astype
(
'float64'
)
gate
=
np
.
array
(
gate
).
astype
(
'float64'
)
hidden
=
_reverse
(
hidden
,
offset
)
if
is_reverse
else
hidden
cell
=
_reverse
(
cell
,
offset
)
if
is_reverse
else
cell
...
...
@@ -115,28 +115,35 @@ def lstm(
class
TestLstmOp
(
OpTest
):
def
set_data
(
self
):
self
.
lod
=
[[
0
,
2
,
6
,
9
]]
self
.
D
=
64
self
.
sort_idx
=
[
2
,
6
,
0
,
3
,
7
,
1
,
4
,
8
,
5
]
#
self.lod = [[0, 2, 6, 9]]
#
self.D = 64
#
self.sort_idx = [2, 6, 0, 3, 7, 1, 4, 8, 5]
self
.
act_gate
=
"sigmoid"
self
.
act_cell
=
"tanh"
self
.
act_cand
=
"tanh"
self
.
lod
=
[[
0
,
1
]]
self
.
D
=
4
self
.
sort_idx
=
[
0
]
# self.act_gate = 'identity'
# self.act_cell = 'identity'
# self.act_cand = 'identity'
self
.
act_gate
=
'sigmoid'
self
.
act_cell
=
'tanh'
self
.
act_cand
=
'tanh'
self
.
is_reverse
=
False
def
setUp
(
self
):
self
.
set_data
()
self
.
op_type
=
"lstm"
self
.
op_type
=
'lstm'
T
=
self
.
lod
[
0
][
-
1
]
N
=
len
(
self
.
lod
[
0
])
-
1
x
=
np
.
random
.
normal
(
size
=
(
T
,
4
*
self
.
D
)).
astype
(
"float64"
)
h0
=
np
.
zeros
((
N
,
self
.
D
)).
astype
(
"float64"
)
c0
=
np
.
zeros
((
N
,
self
.
D
)).
astype
(
"float64"
)
w
=
np
.
random
.
normal
(
size
=
(
self
.
D
,
4
*
self
.
D
)).
astype
(
"float64"
)
b
=
np
.
random
.
normal
(
size
=
(
1
,
7
*
self
.
D
)).
astype
(
"float64"
)
x
=
np
.
random
.
normal
(
size
=
(
T
,
4
*
self
.
D
)).
astype
(
'float64'
)
h0
=
np
.
zeros
((
N
,
self
.
D
)).
astype
(
'float64'
)
c0
=
np
.
zeros
((
N
,
self
.
D
)).
astype
(
'float64'
)
w
=
np
.
random
.
normal
(
size
=
(
self
.
D
,
4
*
self
.
D
)).
astype
(
'float64'
)
b
=
np
.
random
.
normal
(
size
=
(
1
,
7
*
self
.
D
)).
astype
(
'float64'
)
w_b
=
b
[:,
0
:
4
*
self
.
D
]
w_c
=
b
[:,
4
*
self
.
D
:]
...
...
@@ -158,32 +165,37 @@ class TestLstmOp(OpTest):
self
.
outputs
=
{
'Hidden'
:
(
h
,
self
.
lod
),
'Cell'
:
(
c
,
self
.
lod
),
'BatchGate'
:
g_sort
#'BatchGate': g_sort,
}
self
.
attrs
=
{
'usePeepholes'
:
True
,
'isReverse'
:
self
.
is_reverse
,
'gateActivation'
:
'sigmoid'
,
'cellActivation'
:
'tanh'
,
'candidateActivation'
:
'tanh'
'gateActivation'
:
self
.
act_gate
,
'cellActivation'
:
self
.
act_cell
,
'candidateActivation'
:
self
.
act_cand
}
def
test_check_output
(
self
):
def
not_
test_check_output
(
self
):
self
.
check_output
()
class
TestLstmOpRerverse
(
TestLstmOp
):
def
set_data
(
self
):
self
.
lod
=
[[
0
,
2
,
6
,
9
]]
self
.
D
=
64
self
.
sort_idx
=
[
2
,
6
,
0
,
3
,
7
,
1
,
4
,
8
,
5
]
self
.
act_gate
=
"sigmoid"
self
.
act_cell
=
"tanh"
self
.
act_cand
=
"tanh"
self
.
is_reverse
=
True
if
__name__
==
"__main__"
:
def
test_check_grad
(
self
):
self
.
outputs
[
'BatchGate'
]
=
None
self
.
outputs
[
'BatchCellPreAct'
]
=
None
self
.
check_grad
([
'Input'
,
'Weight'
],
[
'Hidden'
,
'Cell'
])
#['Input', 'Weight', 'Bias'], ['Hidden', 'Cell'])
#class TestLstmOpRerverse(TestLstmOp):
# def set_data(self):
# self.lod = [[0, 2, 6, 9]]
# self.D = 64
# self.sort_idx = [2, 6, 0, 3, 7, 1, 4, 8, 5]
#
# self.act_gate = 'sigmoid'
# self.act_cell = 'tanh'
# self.act_cand = 'tanh'
#
# self.is_reverse = True
if
__name__
==
'__main__'
:
unittest
.
main
()
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录