Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
机器未来
Paddle
提交
b5e67fce
P
Paddle
项目概览
机器未来
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
体验新版 GitCode,发现更多精彩内容 >>
提交
b5e67fce
编写于
9月 20, 2017
作者:
Y
Yan Chunwei
提交者:
GitHub
9月 20, 2017
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
RNNOp remove alias (#4274)
* remove alias
上级
686f3b88
变更
5
显示空白变更内容
内联
并排
Showing
5 changed file
with
44 addition
and
79 deletion
+44
-79
paddle/framework/scope.h
paddle/framework/scope.h
+2
-0
paddle/operators/recurrent_op.cc
paddle/operators/recurrent_op.cc
+10
-13
paddle/operators/rnn/recurrent_op_utils.cc
paddle/operators/rnn/recurrent_op_utils.cc
+20
-41
paddle/operators/rnn/recurrent_op_utils.h
paddle/operators/rnn/recurrent_op_utils.h
+6
-15
python/paddle/v2/framework/tests/test_recurrent_op.py
python/paddle/v2/framework/tests/test_recurrent_op.py
+6
-10
未找到文件。
paddle/framework/scope.h
浏览文件 @
b5e67fce
...
@@ -58,6 +58,8 @@ class Scope {
...
@@ -58,6 +58,8 @@ class Scope {
/// nullptr if cannot find.
/// nullptr if cannot find.
Variable
*
FindVar
(
const
std
::
string
&
name
)
const
;
Variable
*
FindVar
(
const
std
::
string
&
name
)
const
;
const
Scope
&
parent
()
const
{
return
*
parent_
;
}
/// Find the scope or an ancestor scope that contains the given variable.
/// Find the scope or an ancestor scope that contains the given variable.
const
Scope
*
FindScope
(
const
Variable
*
var
)
const
;
const
Scope
*
FindScope
(
const
Variable
*
var
)
const
;
...
...
paddle/operators/recurrent_op.cc
浏览文件 @
b5e67fce
...
@@ -29,9 +29,11 @@ using Tensor = framework::Tensor;
...
@@ -29,9 +29,11 @@ using Tensor = framework::Tensor;
using
LoDTensor
=
framework
::
LoDTensor
;
using
LoDTensor
=
framework
::
LoDTensor
;
void
RecurrentAlgorithm
::
InferShape
(
const
Scope
&
scope
)
const
{
void
RecurrentAlgorithm
::
InferShape
(
const
Scope
&
scope
)
const
{
seq_len_
=
scope
.
FindVar
((
arg_
->
inlinks
[
0
]).
external
)
auto
*
input0
=
scope
.
FindVar
(
arg_
->
inlinks
[
0
]);
->
GetMutable
<
LoDTensor
>
()
PADDLE_ENFORCE_NOT_NULL
(
input0
);
->
dims
()[
0
];
seq_len_
=
input0
->
GetMutable
<
LoDTensor
>
()
->
dims
()[
0
];
PADDLE_ENFORCE_GT
(
seq_len_
,
0
);
CreateScopes
(
scope
);
CreateScopes
(
scope
);
auto
step_scopes
=
GetStepScopes
(
scope
);
auto
step_scopes
=
GetStepScopes
(
scope
);
rnn
::
SegmentInputs
(
step_scopes
,
arg_
->
inlinks
,
seq_len_
,
rnn
::
SegmentInputs
(
step_scopes
,
arg_
->
inlinks
,
seq_len_
,
...
@@ -123,13 +125,11 @@ void RecurrentAlgorithm::InitMemories(Scope* step_scope,
...
@@ -123,13 +125,11 @@ void RecurrentAlgorithm::InitMemories(Scope* step_scope,
}
}
const
rnn
::
ArgumentName
RecurrentOp
::
kArgName
{
const
rnn
::
ArgumentName
RecurrentOp
::
kArgName
{
"step_net"
,
"step_scopes"
,
"inlinks"
,
"step_net"
,
"step_scopes"
,
"inlinks"
,
"outlinks"
,
"outlinks"
,
"inlink_alias"
,
"outlink_alias"
,
"memories"
,
"pre_memories"
,
"boot_memories"
};
"memories"
,
"pre_memories"
,
"boot_memories"
};
const
rnn
::
ArgumentName
RecurrentGradientOp
::
kArgName
{
const
rnn
::
ArgumentName
RecurrentGradientOp
::
kArgName
{
"step_net"
,
"step_scopes"
,
"outlink@grad"
,
"step_net"
,
"step_scopes"
,
"outlink@grad"
,
"inlink@grad"
,
"inlink@grad"
,
"inlink_alias"
,
"outlink_alias"
,
"memories"
,
"pre_memories"
,
"boot_memories@grad"
};
"memories"
,
"pre_memories"
,
"boot_memories@grad"
};
RecurrentOp
::
RecurrentOp
(
const
std
::
string
&
type
,
RecurrentOp
::
RecurrentOp
(
const
std
::
string
&
type
,
...
@@ -160,8 +160,6 @@ class RecurrentAlgorithmProtoAndCheckerMaker
...
@@ -160,8 +160,6 @@ class RecurrentAlgorithmProtoAndCheckerMaker
AddOutput
(
name
.
step_scopes
,
"step scopes"
);
AddOutput
(
name
.
step_scopes
,
"step scopes"
);
// Attributes stored in AttributeMap
// Attributes stored in AttributeMap
AddAttr
<
std
::
vector
<
std
::
string
>>
(
name
.
inlink_alias
,
"alias of inlinks"
);
AddAttr
<
std
::
vector
<
std
::
string
>>
(
name
.
outlink_alias
,
"alias of outlinks"
);
AddAttr
<
std
::
vector
<
std
::
string
>>
(
name
.
pre_memories
,
AddAttr
<
std
::
vector
<
std
::
string
>>
(
name
.
pre_memories
,
"names of pre-memories"
);
"names of pre-memories"
);
AddAttr
<
std
::
vector
<
std
::
string
>>
(
name
.
memories
,
"names of memories"
);
AddAttr
<
std
::
vector
<
std
::
string
>>
(
name
.
memories
,
"names of memories"
);
...
@@ -206,9 +204,8 @@ void RecurrentGradientAlgorithm::LinkBootMemoryGradients(
...
@@ -206,9 +204,8 @@ void RecurrentGradientAlgorithm::LinkBootMemoryGradients(
}
}
void
RecurrentGradientAlgorithm
::
InferShape
(
const
Scope
&
scope
)
const
{
void
RecurrentGradientAlgorithm
::
InferShape
(
const
Scope
&
scope
)
const
{
seq_len_
=
scope
.
FindVar
((
arg_
->
inlinks
[
0
]).
external
)
seq_len_
=
->
GetMutable
<
LoDTensor
>
()
scope
.
FindVar
(
arg_
->
inlinks
[
0
])
->
GetMutable
<
LoDTensor
>
()
->
dims
()[
0
];
->
dims
()[
0
];
auto
step_scopes
=
GetStepScopes
(
scope
);
auto
step_scopes
=
GetStepScopes
(
scope
);
rnn
::
SegmentInputs
(
step_scopes
,
arg_
->
inlinks
,
seq_len_
,
rnn
::
SegmentInputs
(
step_scopes
,
arg_
->
inlinks
,
seq_len_
,
true
/*infer_shape_mode*/
);
true
/*infer_shape_mode*/
);
...
...
paddle/operators/rnn/recurrent_op_utils.cc
浏览文件 @
b5e67fce
...
@@ -24,22 +24,23 @@ using Tensor = framework::Tensor;
...
@@ -24,22 +24,23 @@ using Tensor = framework::Tensor;
using
LoDTensor
=
framework
::
LoDTensor
;
using
LoDTensor
=
framework
::
LoDTensor
;
void
SegmentInputs
(
const
std
::
vector
<
Scope
*>&
step_scopes
,
void
SegmentInputs
(
const
std
::
vector
<
Scope
*>&
step_scopes
,
const
std
::
vector
<
Link
>&
inlinks
,
const
size_t
seq_len
,
const
std
::
vector
<
std
::
string
>&
inlinks
,
bool
infer_shape_mode
)
{
const
size_t
seq_len
,
bool
infer_shape_mode
)
{
PADDLE_ENFORCE
(
!
inlinks
.
empty
(),
"no in links are provided."
);
PADDLE_ENFORCE
(
!
inlinks
.
empty
(),
"no in links are provided."
);
for
(
size_t
i
=
0
;
i
<
inlinks
.
size
();
++
i
)
{
for
(
size_t
i
=
0
;
i
<
inlinks
.
size
();
++
i
)
{
auto
input_var
=
step_scopes
[
0
]
->
FindVar
(
inlinks
[
i
].
external
);
// global inputs
PADDLE_ENFORCE
(
input_var
!=
nullptr
,
"input link [%s] is not in scope."
,
auto
input_var
=
step_scopes
[
0
]
->
parent
().
FindVar
(
inlinks
[
i
]);
inlinks
[
i
].
external
);
PADDLE_ENFORCE_NOT_NULL
(
input_var
,
"input link [%s] is not in scope."
,
inlinks
[
i
]);
LoDTensor
*
input
=
input_var
->
GetMutable
<
LoDTensor
>
();
LoDTensor
*
input
=
input_var
->
GetMutable
<
LoDTensor
>
();
f
::
DDim
dims
=
input
->
dims
();
f
::
DDim
dims
=
input
->
dims
();
PADDLE_ENFORCE
(
static_cast
<
size_t
>
(
dims
[
0
])
==
seq_len
,
PADDLE_ENFORCE
_EQ
(
static_cast
<
size_t
>
(
dims
[
0
]),
seq_len
,
"all the inlinks must hav
e same length"
);
"all the inlinks be th
e same length"
);
f
::
DDim
step_dims
=
slice_ddim
(
dims
,
1
,
dims
.
size
());
f
::
DDim
step_dims
=
slice_ddim
(
dims
,
1
,
dims
.
size
());
for
(
size_t
j
=
0
;
j
<
seq_len
;
j
++
)
{
for
(
size_t
j
=
0
;
j
<
seq_len
;
j
++
)
{
Tensor
*
step_input
=
Tensor
*
step_input
=
step_scopes
[
j
]
->
NewVar
(
inlinks
[
i
]
.
internal
)
->
GetMutable
<
Tensor
>
();
step_scopes
[
j
]
->
NewVar
(
inlinks
[
i
])
->
GetMutable
<
Tensor
>
();
if
(
!
infer_shape_mode
)
{
if
(
!
infer_shape_mode
)
{
// The input of operators of each step is Tensor here.
// The input of operators of each step is Tensor here.
// Maybe need to modify Slice function.
// Maybe need to modify Slice function.
...
@@ -51,18 +52,17 @@ void SegmentInputs(const std::vector<Scope*>& step_scopes,
...
@@ -51,18 +52,17 @@ void SegmentInputs(const std::vector<Scope*>& step_scopes,
}
}
void
ConcatOutputs
(
const
std
::
vector
<
Scope
*>&
step_scopes
,
void
ConcatOutputs
(
const
std
::
vector
<
Scope
*>&
step_scopes
,
const
std
::
vector
<
Link
>&
outlinks
,
const
size_t
seq_len
,
const
std
::
vector
<
std
::
string
>&
outlinks
,
bool
infer_shape_mode
)
{
const
size_t
seq_len
,
bool
infer_shape_mode
)
{
for
(
size_t
i
=
0
;
i
<
outlinks
.
size
();
i
++
)
{
for
(
size_t
i
=
0
;
i
<
outlinks
.
size
();
i
++
)
{
auto
output_var
=
step_scopes
[
0
]
->
FindVar
(
outlinks
[
i
].
external
);
auto
output_var
=
step_scopes
[
0
]
->
parent
().
FindVar
(
outlinks
[
i
]
);
PADDLE_ENFORCE
(
output_var
!=
nullpt
r
,
"output link [%s] is not in scope."
,
PADDLE_ENFORCE
_NOT_NULL
(
output_va
r
,
"output link [%s] is not in scope."
,
outlinks
[
i
].
external
);
outlinks
[
i
]
);
LoDTensor
*
output
=
output_var
->
GetMutable
<
LoDTensor
>
();
LoDTensor
*
output
=
output_var
->
GetMutable
<
LoDTensor
>
();
if
(
infer_shape_mode
)
{
if
(
infer_shape_mode
)
{
auto
step_scope_var
=
step_scopes
[
0
]
->
FindVar
(
outlinks
[
i
].
internal
);
auto
step_scope_var
=
step_scopes
[
0
]
->
FindVar
(
outlinks
[
i
]);
PADDLE_ENFORCE
(
step_scope_var
!=
nullptr
,
"%s not in scope"
,
PADDLE_ENFORCE_NOT_NULL
(
step_scope_var
,
"%s not in scope"
,
outlinks
[
i
]);
outlinks
[
i
].
internal
);
f
::
DDim
step_dims
=
f
::
DDim
step_dims
=
step_scope_var
->
template
GetMutable
<
LoDTensor
>()
->
dims
();
step_scope_var
->
template
GetMutable
<
LoDTensor
>()
->
dims
();
std
::
vector
<
int64_t
>
dims_vec
=
vectorize
(
step_dims
);
std
::
vector
<
int64_t
>
dims_vec
=
vectorize
(
step_dims
);
...
@@ -71,9 +71,8 @@ void ConcatOutputs(const std::vector<Scope*>& step_scopes,
...
@@ -71,9 +71,8 @@ void ConcatOutputs(const std::vector<Scope*>& step_scopes,
}
else
{
}
else
{
output
->
mutable_data
<
float
>
(
platform
::
CPUPlace
());
output
->
mutable_data
<
float
>
(
platform
::
CPUPlace
());
for
(
size_t
j
=
0
;
j
<
seq_len
;
j
++
)
{
for
(
size_t
j
=
0
;
j
<
seq_len
;
j
++
)
{
LoDTensor
*
step_output
=
step_scopes
[
j
]
LoDTensor
*
step_output
=
->
FindVar
(
outlinks
[
i
].
internal
)
step_scopes
[
j
]
->
FindVar
(
outlinks
[
i
])
->
GetMutable
<
LoDTensor
>
();
->
GetMutable
<
LoDTensor
>
();
// TODO(luotao02) data type and platform::DeviceContext() should set
// TODO(luotao02) data type and platform::DeviceContext() should set
// correctly
// correctly
(
output
->
Slice
<
float
>
(
j
,
j
+
1
))
(
output
->
Slice
<
float
>
(
j
,
j
+
1
))
...
@@ -113,29 +112,9 @@ void InitArgument(const ArgumentName& name, Argument* arg,
...
@@ -113,29 +112,9 @@ void InitArgument(const ArgumentName& name, Argument* arg,
const
framework
::
OperatorBase
&
op
)
{
const
framework
::
OperatorBase
&
op
)
{
arg
->
step_scopes
=
op
.
Output
(
name
.
step_scopes
);
arg
->
step_scopes
=
op
.
Output
(
name
.
step_scopes
);
auto
inlinks
=
op
.
Inputs
(
name
.
inlinks
);
arg
->
inlinks
=
op
.
Inputs
(
name
.
inlinks
);
auto
inlink_alias
=
op
.
Attr
<
std
::
vector
<
std
::
string
>>
(
name
.
inlink_alias
);
PADDLE_ENFORCE
(
inlinks
.
size
()
==
inlink_alias
.
size
(),
"the size of inlinks and inlink_alias don't match:%d,%d"
,
inlinks
.
size
(),
inlink_alias
.
size
());
for
(
size_t
i
=
0
;
i
<
inlinks
.
size
();
++
i
)
{
rnn
::
Link
link
;
link
.
external
=
inlinks
[
i
];
link
.
internal
=
inlink_alias
[
i
];
(
arg
->
inlinks
).
push_back
(
link
);
}
auto
outlinks
=
op
.
Outputs
(
name
.
outlinks
);
arg
->
outlinks
=
op
.
Outputs
(
name
.
outlinks
);
auto
outlink_alias
=
op
.
Attr
<
std
::
vector
<
std
::
string
>>
(
name
.
outlink_alias
);
PADDLE_ENFORCE
(
outlinks
.
size
()
==
outlink_alias
.
size
(),
"the size of outlinks and outlink_alias don't match:%d,%d"
,
outlinks
.
size
(),
outlink_alias
.
size
());
for
(
size_t
i
=
0
;
i
<
outlinks
.
size
();
++
i
)
{
rnn
::
Link
link
;
link
.
external
=
outlinks
[
i
];
link
.
internal
=
outlink_alias
[
i
];
(
arg
->
outlinks
).
push_back
(
link
);
}
auto
boot_memories
=
op
.
Inputs
(
name
.
boot_memories
);
auto
boot_memories
=
op
.
Inputs
(
name
.
boot_memories
);
...
...
paddle/operators/rnn/recurrent_op_utils.h
浏览文件 @
b5e67fce
...
@@ -41,18 +41,11 @@ struct MemoryAttr {
...
@@ -41,18 +41,11 @@ struct MemoryAttr {
std
::
string
boot_var
;
std
::
string
boot_var
;
};
};
struct
Link
{
// input or output links name.
std
::
string
internal
;
// alias to avoid duplicate keys in scopes.
std
::
string
external
;
};
struct
Argument
{
struct
Argument
{
std
::
string
step_net
;
std
::
string
step_net
;
std
::
string
step_scopes
;
std
::
string
step_scopes
;
std
::
vector
<
Link
>
inlinks
;
std
::
vector
<
std
::
string
>
inlinks
;
std
::
vector
<
Link
>
outlinks
;
std
::
vector
<
std
::
string
>
outlinks
;
std
::
vector
<
rnn
::
MemoryAttr
>
memories
;
std
::
vector
<
rnn
::
MemoryAttr
>
memories
;
};
};
...
@@ -61,8 +54,6 @@ struct ArgumentName {
...
@@ -61,8 +54,6 @@ struct ArgumentName {
std
::
string
step_scopes
;
std
::
string
step_scopes
;
std
::
string
inlinks
;
std
::
string
inlinks
;
std
::
string
outlinks
;
std
::
string
outlinks
;
std
::
string
inlink_alias
;
// the alias of inlinks in step net.
std
::
string
outlink_alias
;
// the alias of outlinks in step net.
std
::
string
memories
;
// the memory name
std
::
string
memories
;
// the memory name
std
::
string
pre_memories
;
// the previous memory name
std
::
string
pre_memories
;
// the previous memory name
std
::
string
boot_memories
;
// the boot memory name
std
::
string
boot_memories
;
// the boot memory name
...
@@ -72,15 +63,15 @@ struct ArgumentName {
...
@@ -72,15 +63,15 @@ struct ArgumentName {
* Prepare inputs for each step net.
* Prepare inputs for each step net.
*/
*/
void
SegmentInputs
(
const
std
::
vector
<
Scope
*>&
step_scopes
,
void
SegmentInputs
(
const
std
::
vector
<
Scope
*>&
step_scopes
,
const
std
::
vector
<
Link
>&
inlinks
,
const
size_t
seq_len
,
const
std
::
vector
<
std
::
string
>&
inlinks
,
bool
infer_shape_mode
);
const
size_t
seq_len
,
bool
infer_shape_mode
);
/**
/**
* Process outputs of step nets and merge to variables.
* Process outputs of step nets and merge to variables.
*/
*/
void
ConcatOutputs
(
const
std
::
vector
<
Scope
*>&
step_scopes
,
void
ConcatOutputs
(
const
std
::
vector
<
Scope
*>&
step_scopes
,
const
std
::
vector
<
Link
>&
outlinks
,
const
size_t
seq_len
,
const
std
::
vector
<
std
::
string
>&
outlinks
,
bool
infer_shape_mode
);
const
size_t
seq_len
,
bool
infer_shape_mode
);
void
LinkMemories
(
const
std
::
vector
<
Scope
*>&
step_scopes
,
void
LinkMemories
(
const
std
::
vector
<
Scope
*>&
step_scopes
,
const
std
::
vector
<
MemoryAttr
>&
memories
,
const
size_t
step_id
,
const
std
::
vector
<
MemoryAttr
>&
memories
,
const
size_t
step_id
,
...
...
python/paddle/v2/framework/tests/test_recurrent_op.py
浏览文件 @
b5e67fce
...
@@ -59,7 +59,6 @@ class PySimpleRNNTest(unittest.TestCase):
...
@@ -59,7 +59,6 @@ class PySimpleRNNTest(unittest.TestCase):
def
test_forward
(
self
):
def
test_forward
(
self
):
output
=
self
.
rnn
.
forward
()
output
=
self
.
rnn
.
forward
()
print
'output'
,
output
def
create_tensor
(
scope
,
name
,
shape
,
np_data
):
def
create_tensor
(
scope
,
name
,
shape
,
np_data
):
...
@@ -103,7 +102,7 @@ class TestRecurrentOp(unittest.TestCase):
...
@@ -103,7 +102,7 @@ class TestRecurrentOp(unittest.TestCase):
ctx
=
core
.
DeviceContext
.
create
(
core
.
CPUPlace
())
ctx
=
core
.
DeviceContext
.
create
(
core
.
CPUPlace
())
self
.
rnnop
.
infer_shape
(
self
.
scope
)
self
.
rnnop
.
infer_shape
(
self
.
scope
)
self
.
rnnop
.
run
(
self
.
scope
,
ctx
)
self
.
rnnop
.
run
(
self
.
scope
,
ctx
)
return
np
.
array
(
self
.
scope
.
find_var
(
"h"
).
get_tensor
())
return
np
.
array
(
self
.
scope
.
find_var
(
"h
@mem
"
).
get_tensor
())
def
create_global_variables
(
self
):
def
create_global_variables
(
self
):
# create inlink
# create inlink
...
@@ -123,8 +122,7 @@ class TestRecurrentOp(unittest.TestCase):
...
@@ -123,8 +122,7 @@ class TestRecurrentOp(unittest.TestCase):
create_tensor
(
self
.
scope
,
"h_boot"
,
[
self
.
batch_size
,
self
.
input_dim
],
create_tensor
(
self
.
scope
,
"h_boot"
,
[
self
.
batch_size
,
self
.
input_dim
],
h_boot_np_data
)
h_boot_np_data
)
self
.
scope
.
new_var
(
"step_scopes"
)
self
.
scope
.
new_var
(
"step_scopes"
)
self
.
scope
.
new_var
(
"h@alias"
)
self
.
scope
.
new_var
(
"h@mem"
)
self
.
scope
.
new_var
(
"h"
)
def
create_rnn_op
(
self
):
def
create_rnn_op
(
self
):
# create RNNOp
# create RNNOp
...
@@ -134,20 +132,18 @@ class TestRecurrentOp(unittest.TestCase):
...
@@ -134,20 +132,18 @@ class TestRecurrentOp(unittest.TestCase):
boot_memories
=
[
"h_boot"
],
boot_memories
=
[
"h_boot"
],
step_net
=
"stepnet"
,
step_net
=
"stepnet"
,
# outputs
# outputs
outlinks
=
[
"h"
],
outlinks
=
[
"h
@mem
"
],
step_scopes
=
"step_scopes"
,
step_scopes
=
"step_scopes"
,
# attributes
# attributes
inlink_alias
=
[
"x@alias"
],
outlink_alias
=
[
"h@alias"
],
pre_memories
=
[
"h@pre"
],
pre_memories
=
[
"h@pre"
],
memories
=
[
"h@
alias
"
])
memories
=
[
"h@
mem
"
])
def
create_step_net
(
self
):
def
create_step_net
(
self
):
stepnet
=
core
.
Net
.
create
()
stepnet
=
core
.
Net
.
create
()
x_fc_op
=
Operator
(
"mul"
,
X
=
"x
@alias
"
,
Y
=
"W"
,
Out
=
"Wx"
)
x_fc_op
=
Operator
(
"mul"
,
X
=
"x"
,
Y
=
"W"
,
Out
=
"Wx"
)
h_fc_op
=
Operator
(
"mul"
,
X
=
"h@pre"
,
Y
=
"U"
,
Out
=
"Uh"
)
h_fc_op
=
Operator
(
"mul"
,
X
=
"h@pre"
,
Y
=
"U"
,
Out
=
"Uh"
)
sum_op
=
Operator
(
"add"
,
X
=
"Wx"
,
Y
=
"Uh"
,
Out
=
"sum"
)
sum_op
=
Operator
(
"add"
,
X
=
"Wx"
,
Y
=
"Uh"
,
Out
=
"sum"
)
sig_op
=
Operator
(
"sigmoid"
,
X
=
"sum"
,
Y
=
"h@
alias
"
)
sig_op
=
Operator
(
"sigmoid"
,
X
=
"sum"
,
Y
=
"h@
mem
"
)
for
op
in
[
x_fc_op
,
h_fc_op
,
sum_op
,
sig_op
]:
for
op
in
[
x_fc_op
,
h_fc_op
,
sum_op
,
sig_op
]:
stepnet
.
append_op
(
op
)
stepnet
.
append_op
(
op
)
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录