Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
Crayon鑫
Paddle
提交
32f5c9dd
P
Paddle
项目概览
Crayon鑫
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
32f5c9dd
编写于
10月 02, 2017
作者:
Q
qiaolongfei
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
recurrent_op pass the unit test
上级
7163dd04
变更
6
显示空白变更内容
内联
并排
Showing
6 changed file
with
66 addition
and
136 deletion
+66
-136
paddle/operators/recurrent_op.cc
paddle/operators/recurrent_op.cc
+20
-67
paddle/operators/recurrent_op.h
paddle/operators/recurrent_op.h
+2
-21
paddle/operators/rnn/recurrent_op_utils.cc
paddle/operators/rnn/recurrent_op_utils.cc
+23
-32
paddle/operators/rnn/recurrent_op_utils.h
paddle/operators/rnn/recurrent_op_utils.h
+3
-3
paddle/operators/sum_op.cc
paddle/operators/sum_op.cc
+3
-2
python/paddle/v2/framework/tests/test_recurrent_op.py
python/paddle/v2/framework/tests/test_recurrent_op.py
+15
-11
未找到文件。
paddle/operators/recurrent_op.cc
浏览文件 @
32f5c9dd
...
...
@@ -28,7 +28,8 @@ using Variable = framework::Variable;
using
Tensor
=
framework
::
Tensor
;
using
LoDTensor
=
framework
::
LoDTensor
;
void
RecurrentAlgorithm
::
InferShape
(
const
Scope
&
scope
)
const
{
void
RecurrentAlgorithm
::
Run
(
const
Scope
&
scope
,
const
platform
::
DeviceContext
&
dev_ctx
)
const
{
auto
*
input0
=
scope
.
FindVar
(
arg_
->
inlinks
[
0
]);
PADDLE_ENFORCE_NOT_NULL
(
input0
);
seq_len_
=
input0
->
GetMutable
<
LoDTensor
>
()
->
dims
()[
0
];
...
...
@@ -36,38 +37,16 @@ void RecurrentAlgorithm::InferShape(const Scope& scope) const {
CreateScopes
(
scope
);
auto
&
step_scopes
=
GetStepScopes
(
scope
);
rnn
::
SegmentInputs
(
step_scopes
,
arg_
->
inlinks
,
seq_len_
,
true
/*infer_shape_mode*/
);
InitMemories
(
step_scopes
[
0
],
true
/*infer_shape_mode*/
);
rnn
::
SegmentInputs
(
step_scopes
,
arg_
->
inlinks
,
seq_len_
);
InitMemories
(
step_scopes
[
0
]);
for
(
size_t
i
=
0
;
i
<
seq_len_
;
i
++
)
{
if
(
i
>
0
)
{
rnn
::
LinkMemories
(
step_scopes
,
arg_
->
memories
,
i
,
-
1
,
true
/*infer_shape_mode*/
);
}
(
*
stepnet_
)
->
InferShape
(
*
step_scopes
[
i
]);
}
rnn
::
ConcatOutputs
(
step_scopes
,
arg_
->
outlinks
,
seq_len_
,
true
/*infer_shape_mode*/
);
}
void
RecurrentAlgorithm
::
Run
(
const
Scope
&
scope
,
const
platform
::
DeviceContext
&
dev_ctx
)
const
{
auto
step_scopes
=
GetStepScopes
(
scope
);
rnn
::
SegmentInputs
(
step_scopes
,
arg_
->
inlinks
,
seq_len_
,
false
/*infer_shape_mode*/
);
InitMemories
(
step_scopes
[
0
],
false
/*infer_shape_mode*/
);
for
(
size_t
step_id
=
0
;
step_id
<
seq_len_
;
step_id
++
)
{
// create output alias variables
if
(
step_id
>
0
)
{
rnn
::
LinkMemories
(
step_scopes
,
arg_
->
memories
,
step_id
,
-
1
,
false
/*infer_shape_mode*/
);
rnn
::
LinkMemories
(
step_scopes
,
arg_
->
memories
,
i
,
-
1
);
}
(
*
stepnet_
)
->
Run
(
*
step_scopes
[
step_id
],
dev_ctx
);
(
*
stepnet_
)
->
Run
(
*
step_scopes
[
i
],
dev_ctx
);
}
rnn
::
ConcatOutputs
(
step_scopes
,
arg_
->
outlinks
,
seq_len_
,
false
/*infer_shape_mode*/
);
rnn
::
ConcatOutputs
(
step_scopes
,
arg_
->
outlinks
,
seq_len_
);
}
void
RecurrentAlgorithm
::
CreateScopes
(
const
Scope
&
scope
)
const
{
...
...
@@ -105,8 +84,7 @@ void RecurrentAlgorithm::CreateScopes(const Scope& scope) const {
}
}
void
RecurrentAlgorithm
::
InitMemories
(
Scope
*
step_scope
,
bool
infer_shape_mode
)
const
{
void
RecurrentAlgorithm
::
InitMemories
(
Scope
*
step_scope
)
const
{
for
(
auto
&
attr
:
arg_
->
memories
)
{
auto
*
pre_mem
=
step_scope
->
NewVar
(
attr
.
pre_var
)
->
GetMutable
<
LoDTensor
>
();
PADDLE_ENFORCE
(
step_scope
->
FindVar
(
attr
.
boot_var
)
!=
nullptr
,
...
...
@@ -114,13 +92,10 @@ void RecurrentAlgorithm::InitMemories(Scope* step_scope,
attr
.
boot_var
);
auto
*
boot_mem
=
step_scope
->
FindVar
(
attr
.
boot_var
)
->
GetMutable
<
LoDTensor
>
();
if
(
infer_shape_mode
)
{
pre_mem
->
Resize
(
boot_mem
->
dims
());
PADDLE_ENFORCE_EQ
(
pre_mem
->
dims
().
size
(),
2
);
}
else
{
pre_mem
->
ShareDataWith
<
float
>
(
*
boot_mem
);
}
}
}
const
rnn
::
ArgumentName
RecurrentOp
::
kArgName
{
...
...
@@ -169,23 +144,22 @@ class RecurrentAlgorithmProtoAndCheckerMaker
void
RecurrentGradientAlgorithm
::
Run
(
const
Scope
&
scope
,
const
platform
::
DeviceContext
&
dev_ctx
)
const
{
seq_len_
=
scope
.
FindVar
(
arg_
->
inlinks
[
0
])
->
GetMutable
<
LoDTensor
>
()
->
dims
()[
0
];
auto
step_scopes
=
GetStepScopes
(
scope
);
rnn
::
SegmentInputs
(
step_scopes
,
arg_
->
inlinks
,
seq_len_
,
false
/*infer_shape_mode*/
);
rnn
::
SegmentInputs
(
step_scopes
,
arg_
->
inlinks
,
seq_len_
);
for
(
int
step_id
=
seq_len_
-
1
;
step_id
>=
0
;
--
step_id
)
{
if
(
static_cast
<
size_t
>
(
step_id
)
!=
seq_len_
-
1
)
{
rnn
::
LinkMemories
(
step_scopes
,
arg_
->
memories
,
step_id
,
1
,
false
/*infer_shape_mode*/
);
rnn
::
LinkMemories
(
step_scopes
,
arg_
->
memories
,
step_id
,
1
);
}
(
*
stepnet_
)
->
Run
(
*
step_scopes
[
step_id
],
dev_ctx
);
}
LinkBootMemoryGradients
(
step_scopes
[
0
],
false
);
rnn
::
ConcatOutputs
(
step_scopes
,
arg_
->
outlinks
,
seq_len_
,
false
/*infer_shape_mode*/
);
rnn
::
ConcatOutputs
(
step_scopes
,
arg_
->
outlinks
,
seq_len_
);
LinkBootMemoryGradients
(
step_scopes
[
0
]);
}
void
RecurrentGradientAlgorithm
::
LinkBootMemoryGradients
(
Scope
*
step_scope
,
bool
infer_shape_mode
)
const
{
Scope
*
step_scope
)
const
{
for
(
auto
&
attr
:
arg_
->
memories
)
{
PADDLE_ENFORCE
(
step_scope
->
FindVar
(
attr
.
var
)
!=
nullptr
,
"memory variable [%s] does not exists"
,
attr
.
var
);
...
...
@@ -194,30 +168,9 @@ void RecurrentGradientAlgorithm::LinkBootMemoryGradients(
auto
*
mem_grad
=
step_scope
->
NewVar
(
attr
.
var
)
->
GetMutable
<
LoDTensor
>
();
auto
*
boot_mem_grad
=
step_scope
->
NewVar
(
attr
.
boot_var
)
->
GetMutable
<
LoDTensor
>
();
if
(
infer_shape_mode
)
{
boot_mem_grad
->
Resize
(
mem_grad
->
dims
());
}
else
{
boot_mem_grad
->
ShareDataWith
<
float
>
(
*
mem_grad
);
}
}
}
void
RecurrentGradientAlgorithm
::
InferShape
(
const
Scope
&
scope
)
const
{
seq_len_
=
scope
.
FindVar
(
arg_
->
inlinks
[
0
])
->
GetMutable
<
LoDTensor
>
()
->
dims
()[
0
];
auto
step_scopes
=
GetStepScopes
(
scope
);
rnn
::
SegmentInputs
(
step_scopes
,
arg_
->
inlinks
,
seq_len_
,
true
/*infer_shape_mode*/
);
for
(
int
step_id
=
seq_len_
-
1
;
step_id
>=
0
;
--
step_id
)
{
if
(
static_cast
<
size_t
>
(
step_id
)
!=
seq_len_
-
1
)
{
rnn
::
LinkMemories
(
step_scopes
,
arg_
->
memories
,
step_id
,
1
,
true
/*infer_shape_mode*/
);
}
(
*
stepnet_
)
->
InferShape
(
*
step_scopes
[
step_id
]);
}
rnn
::
ConcatOutputs
(
step_scopes
,
arg_
->
outlinks
,
seq_len_
,
true
/*infer_shape_mode*/
);
LinkBootMemoryGradients
(
step_scopes
[
0
],
true
/*infer_shape_mode*/
);
}
RecurrentGradientOp
::
RecurrentGradientOp
(
...
...
paddle/operators/recurrent_op.h
浏览文件 @
32f5c9dd
...
...
@@ -41,11 +41,6 @@ class RecurrentAlgorithm {
stepnet_
=
stepnet
;
}
/**
* InferShape must be called before Run.
*/
void
InferShape
(
const
framework
::
Scope
&
scope
)
const
;
protected:
/*
* The step scopes will be stored in the father scope as a variable.
...
...
@@ -61,7 +56,7 @@ class RecurrentAlgorithm {
->
GetMutable
<
std
::
vector
<
framework
::
Scope
*>>
();
}
void
InitMemories
(
framework
::
Scope
*
step_scopes
,
bool
infer_shape_mode
)
const
;
void
InitMemories
(
framework
::
Scope
*
step_scopes
)
const
;
private:
std
::
unique_ptr
<
framework
::
OperatorBase
>*
stepnet_
;
...
...
@@ -91,13 +86,7 @@ class RecurrentGradientAlgorithm {
void
Run
(
const
framework
::
Scope
&
scope
,
const
platform
::
DeviceContext
&
dev_ctx
)
const
;
void
LinkBootMemoryGradients
(
framework
::
Scope
*
step_scopes
,
bool
infer_shape_mode
)
const
;
/**
* InferShape must be called before Run.
*/
void
InferShape
(
const
framework
::
Scope
&
scope
)
const
;
void
LinkBootMemoryGradients
(
framework
::
Scope
*
step_scopes
)
const
;
protected:
inline
const
std
::
vector
<
framework
::
Scope
*>&
GetStepScopes
(
...
...
@@ -136,10 +125,6 @@ class RecurrentOp : public framework::OperatorBase {
const
OperatorBase
&
stepnet
()
const
{
return
*
stepnet_
;
}
void
InferShape
(
const
framework
::
Scope
&
scope
)
const
{
alg_
.
InferShape
(
scope
);
}
static
const
rnn
::
ArgumentName
kArgName
;
private:
...
...
@@ -162,10 +147,6 @@ class RecurrentGradientOp : public framework::OperatorBase {
PADDLE_THROW
(
"Not Implemented"
);
}
void
InferShape
(
const
framework
::
Scope
&
scope
)
const
{
alg_
.
InferShape
(
scope
);
}
void
Run
(
const
framework
::
Scope
&
scope
,
const
platform
::
DeviceContext
&
dev_ctx
)
const
override
{
alg_
.
Run
(
scope
,
dev_ctx
);
...
...
paddle/operators/rnn/recurrent_op_utils.cc
浏览文件 @
32f5c9dd
...
...
@@ -25,7 +25,7 @@ using LoDTensor = framework::LoDTensor;
void
SegmentInputs
(
const
std
::
vector
<
Scope
*>&
step_scopes
,
const
std
::
vector
<
std
::
string
>&
inlinks
,
const
size_t
seq_len
,
bool
infer_shape_mode
)
{
const
size_t
seq_len
)
{
PADDLE_ENFORCE
(
!
inlinks
.
empty
(),
"no in links are provided."
);
for
(
size_t
i
=
0
;
i
<
inlinks
.
size
();
++
i
)
{
// global inputs
...
...
@@ -41,11 +41,9 @@ void SegmentInputs(const std::vector<Scope*>& step_scopes,
for
(
size_t
j
=
0
;
j
<
seq_len
;
j
++
)
{
Tensor
*
step_input
=
step_scopes
[
j
]
->
NewVar
(
inlinks
[
i
])
->
GetMutable
<
Tensor
>
();
if
(
!
infer_shape_mode
)
{
// The input of operators of each step is Tensor here.
// Maybe need to modify Slice function.
*
step_input
=
input
->
Slice
<
float
>
(
j
,
j
+
1
);
}
step_input
->
Resize
(
step_dims
);
}
}
...
...
@@ -53,14 +51,13 @@ void SegmentInputs(const std::vector<Scope*>& step_scopes,
void
ConcatOutputs
(
const
std
::
vector
<
Scope
*>&
step_scopes
,
const
std
::
vector
<
std
::
string
>&
outlinks
,
const
size_t
seq_len
,
bool
infer_shape_mode
)
{
const
size_t
seq_len
)
{
for
(
size_t
i
=
0
;
i
<
outlinks
.
size
();
i
++
)
{
auto
output_var
=
step_scopes
[
0
]
->
parent
().
FindVar
(
outlinks
[
i
]);
PADDLE_ENFORCE_NOT_NULL
(
output_var
,
"output link [%s] is not in scope."
,
outlinks
[
i
]);
LoDTensor
*
output
=
output_var
->
GetMutable
<
LoDTensor
>
();
if
(
infer_shape_mode
)
{
auto
step_scope_var
=
step_scopes
[
0
]
->
FindVar
(
outlinks
[
i
]);
PADDLE_ENFORCE_NOT_NULL
(
step_scope_var
,
"%s not in scope"
,
outlinks
[
i
]);
f
::
DDim
step_dims
=
...
...
@@ -68,7 +65,6 @@ void ConcatOutputs(const std::vector<Scope*>& step_scopes,
std
::
vector
<
int64_t
>
dims_vec
=
vectorize
(
step_dims
);
dims_vec
.
insert
(
dims_vec
.
begin
(),
seq_len
);
output
->
Resize
(
f
::
make_ddim
(
dims_vec
));
}
else
{
output
->
mutable_data
<
float
>
(
platform
::
CPUPlace
());
for
(
size_t
j
=
0
;
j
<
seq_len
;
j
++
)
{
LoDTensor
*
step_output
=
...
...
@@ -79,13 +75,11 @@ void ConcatOutputs(const std::vector<Scope*>& step_scopes,
.
CopyFrom
<
float
>
(
*
step_output
,
platform
::
CPUPlace
());
}
}
}
}
void
LinkMemories
(
const
std
::
vector
<
Scope
*>&
scopes
,
const
std
::
vector
<
rnn
::
MemoryAttr
>&
memories
,
const
size_t
step_id
,
const
int
offset
,
bool
infer_shape_mode
)
{
const
size_t
step_id
,
const
int
offset
)
{
PADDLE_ENFORCE_LT
(
step_id
,
scopes
.
size
(),
"step [%d] is out of range of step scopes' size [%d]"
,
step_id
,
scopes
.
size
());
...
...
@@ -100,12 +94,9 @@ void LinkMemories(const std::vector<Scope*>& scopes,
for
(
auto
&
attr
:
memories
)
{
auto
mem
=
scope
->
FindVar
(
attr
.
pre_var
)
->
GetMutable
<
LoDTensor
>
();
auto
linked_mem
=
linked_scope
->
FindVar
(
attr
.
var
)
->
GetMutable
<
LoDTensor
>
();
if
(
infer_shape_mode
)
{
mem
->
Resize
(
linked_mem
->
dims
());
}
else
{
mem
->
ShareDataWith
<
float
>
(
*
linked_mem
);
}
}
}
void
InitArgument
(
const
ArgumentName
&
name
,
Argument
*
arg
,
...
...
paddle/operators/rnn/recurrent_op_utils.h
浏览文件 @
32f5c9dd
...
...
@@ -64,18 +64,18 @@ struct ArgumentName {
*/
void
SegmentInputs
(
const
std
::
vector
<
Scope
*>&
step_scopes
,
const
std
::
vector
<
std
::
string
>&
inlinks
,
const
size_t
seq_len
,
bool
infer_shape_mode
);
const
size_t
seq_len
);
/**
* Process outputs of step nets and merge to variables.
*/
void
ConcatOutputs
(
const
std
::
vector
<
Scope
*>&
step_scopes
,
const
std
::
vector
<
std
::
string
>&
outlinks
,
const
size_t
seq_len
,
bool
infer_shape_mode
);
const
size_t
seq_len
);
void
LinkMemories
(
const
std
::
vector
<
Scope
*>&
step_scopes
,
const
std
::
vector
<
MemoryAttr
>&
memories
,
const
size_t
step_id
,
const
int
offset
,
bool
infer_shape_mode
);
const
int
offset
);
void
InitArgument
(
const
ArgumentName
&
name
,
Argument
*
arg
,
const
framework
::
OperatorBase
&
op
,
bool
is_grad
=
false
);
...
...
paddle/operators/sum_op.cc
浏览文件 @
32f5c9dd
...
...
@@ -22,14 +22,15 @@ class SumOp : public framework::OperatorWithKernel {
protected:
void
InferShape
(
framework
::
InferShapeContextBase
*
ctx
)
const
override
{
PADDLE_ENFORCE
(
ctx
->
HasInputs
(
"X"
),
"Inputs(X) should not be null"
);
auto
x_dims
=
ctx
->
GetInputsDim
(
"X"
);
PADDLE_ENFORCE
(
!
x_dims
.
empty
(),
"Input(X) of SumOp should not be null."
);
PADDLE_ENFORCE
(
ctx
->
HasOutput
(
"Out"
),
"Output(Out) of SumOp should not be null."
);
auto
in_dim
=
x_dims
[
0
];
size_t
N
=
x_dims
.
size
();
PADDLE_ENFORCE_GT
(
N
,
1
,
"Input tensors count should > 1."
);
auto
in_dim
=
x_dims
[
0
];
for
(
size_t
i
=
1
;
i
<
N
;
i
++
)
{
auto
dim
=
x_dims
[
i
];
PADDLE_ENFORCE
(
in_dim
==
dim
,
"Input tensors must have same shape"
);
...
...
python/paddle/v2/framework/tests/test_recurrent_op.py
浏览文件 @
32f5c9dd
...
...
@@ -16,14 +16,17 @@ class PySimpleRNN(object):
'''
def
__init__
(
self
,
input_dim
=
30
,
batch_size
=
50
,
weight_dim
=
15
,
sent_len
=
11
):
self
.
x
=
np
.
random
.
normal
(
size
=
(
sent_len
,
batch_size
,
input_dim
))
self
.
W
=
np
.
random
.
normal
(
size
=
(
input_dim
,
input_dim
))
self
.
U
=
np
.
random
.
normal
(
size
=
(
input_dim
,
input_dim
))
self
.
h_boot
=
np
.
random
.
normal
(
size
=
(
batch_size
,
input_dim
))
self
.
x
=
np
.
random
.
normal
(
size
=
(
sent_len
,
batch_size
,
input_dim
)).
astype
(
"float32"
)
self
.
W
=
np
.
random
.
normal
(
size
=
(
input_dim
,
input_dim
)).
astype
(
"float32"
)
self
.
U
=
np
.
random
.
normal
(
size
=
(
input_dim
,
input_dim
)).
astype
(
"float32"
)
self
.
h_boot
=
np
.
random
.
normal
(
size
=
(
batch_size
,
input_dim
)).
astype
(
"float32"
)
# memories
self
.
mems
=
[
np
.
zeros
(
shape
=
(
batch_size
,
input_dim
))
for
i
in
range
(
sent_len
)
np
.
zeros
(
shape
=
(
batch_size
,
input_dim
)).
astype
(
"float32"
)
for
i
in
range
(
sent_len
)
]
def
forward
(
self
):
...
...
@@ -36,7 +39,7 @@ class PySimpleRNN(object):
return
[
self
.
x
[
i
]
for
i
in
range
(
self
.
x
.
shape
[
0
])]
def
concat_outputs
(
self
):
return
np
.
array
(
self
.
mems
)
return
np
.
array
(
self
.
mems
)
.
astype
(
"float32"
)
def
step
(
self
,
step_id
,
x
):
'''
...
...
@@ -47,8 +50,8 @@ class PySimpleRNN(object):
pre_mem
=
self
.
mems
[
step_id
-
1
]
else
:
pre_mem
=
self
.
h_boot
xW
=
np
.
matmul
(
x
,
self
.
W
)
hU
=
np
.
matmul
(
pre_mem
,
self
.
U
)
xW
=
np
.
matmul
(
x
,
self
.
W
)
.
astype
(
"float32"
)
hU
=
np
.
matmul
(
pre_mem
,
self
.
U
)
.
astype
(
"float32"
)
sum
=
xW
+
hU
self
.
mems
[
step_id
]
=
py_sigmoid
(
sum
)
...
...
@@ -102,7 +105,8 @@ class RecurrentOpTest(unittest.TestCase):
self
.
create_step_net
()
ctx
=
core
.
DeviceContext
.
create
(
core
.
CPUPlace
())
self
.
rnnop
.
run
(
self
.
scope
,
ctx
)
return
np
.
array
(
self
.
scope
.
find_var
(
"h@mem"
).
get_tensor
())
return
np
.
array
(
self
.
scope
.
find_var
(
"h@mem"
).
get_tensor
()).
astype
(
"float32"
)
def
create_global_variables
(
self
):
# create inlink
...
...
@@ -142,7 +146,7 @@ class RecurrentOpTest(unittest.TestCase):
stepnet
=
core
.
Net
.
create
()
x_fc_op
=
Operator
(
"mul"
,
X
=
"x"
,
Y
=
"W"
,
Out
=
"Wx"
)
h_fc_op
=
Operator
(
"mul"
,
X
=
"h@pre"
,
Y
=
"U"
,
Out
=
"Uh"
)
sum_op
=
Operator
(
"
add"
,
X
=
"Wx"
,
Y
=
"Uh"
,
Out
=
"sum"
)
sum_op
=
Operator
(
"
sum"
,
X
=
[
"Wx"
,
"Uh"
]
,
Out
=
"sum"
)
sig_op
=
Operator
(
"sigmoid"
,
X
=
"sum"
,
Y
=
"h@mem"
)
for
op
in
[
x_fc_op
,
h_fc_op
,
sum_op
,
sig_op
]:
...
...
@@ -179,7 +183,7 @@ class RecurrentGradientOpTest(unittest.TestCase):
stepnet
=
core
.
Net
.
create
()
x_fc_op
=
Operator
(
"mul"
,
X
=
"x@alias"
,
Y
=
"W"
,
Out
=
"Wx"
)
h_fc_op
=
Operator
(
"mul"
,
X
=
"h@pre"
,
Y
=
"U"
,
Out
=
"Uh"
)
sum_op
=
Operator
(
"
add"
,
X
=
"Wx"
,
Y
=
"Uh"
,
Out
=
"sum"
)
sum_op
=
Operator
(
"
sum"
,
X
=
[
"Wx"
,
"Uh"
]
,
Out
=
"sum"
)
sig_op
=
Operator
(
"sigmoid"
,
X
=
"sum"
,
Y
=
"h@alias"
)
for
op
in
[
x_fc_op
,
h_fc_op
,
sum_op
,
sig_op
]:
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录