Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
机器未来
Paddle
提交
b5e67fce
P
Paddle
项目概览
机器未来
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
b5e67fce
编写于
9月 20, 2017
作者:
Y
Yan Chunwei
提交者:
GitHub
9月 20, 2017
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
RNNOp remove alias (#4274)
* remove alias
上级
686f3b88
变更
5
隐藏空白更改
内联
并排
Showing
5 changed file
with
44 addition
and
79 deletion
+44
-79
paddle/framework/scope.h
paddle/framework/scope.h
+2
-0
paddle/operators/recurrent_op.cc
paddle/operators/recurrent_op.cc
+10
-13
paddle/operators/rnn/recurrent_op_utils.cc
paddle/operators/rnn/recurrent_op_utils.cc
+20
-41
paddle/operators/rnn/recurrent_op_utils.h
paddle/operators/rnn/recurrent_op_utils.h
+6
-15
python/paddle/v2/framework/tests/test_recurrent_op.py
python/paddle/v2/framework/tests/test_recurrent_op.py
+6
-10
未找到文件。
paddle/framework/scope.h
浏览文件 @
b5e67fce
...
...
@@ -58,6 +58,8 @@ class Scope {
/// nullptr if cannot find.
Variable
*
FindVar
(
const
std
::
string
&
name
)
const
;
const
Scope
&
parent
()
const
{
return
*
parent_
;
}
/// Find the scope or an ancestor scope that contains the given variable.
const
Scope
*
FindScope
(
const
Variable
*
var
)
const
;
...
...
paddle/operators/recurrent_op.cc
浏览文件 @
b5e67fce
...
...
@@ -29,9 +29,11 @@ using Tensor = framework::Tensor;
using
LoDTensor
=
framework
::
LoDTensor
;
void
RecurrentAlgorithm
::
InferShape
(
const
Scope
&
scope
)
const
{
seq_len_
=
scope
.
FindVar
((
arg_
->
inlinks
[
0
]).
external
)
->
GetMutable
<
LoDTensor
>
()
->
dims
()[
0
];
auto
*
input0
=
scope
.
FindVar
(
arg_
->
inlinks
[
0
]);
PADDLE_ENFORCE_NOT_NULL
(
input0
);
seq_len_
=
input0
->
GetMutable
<
LoDTensor
>
()
->
dims
()[
0
];
PADDLE_ENFORCE_GT
(
seq_len_
,
0
);
CreateScopes
(
scope
);
auto
step_scopes
=
GetStepScopes
(
scope
);
rnn
::
SegmentInputs
(
step_scopes
,
arg_
->
inlinks
,
seq_len_
,
...
...
@@ -123,14 +125,12 @@ void RecurrentAlgorithm::InitMemories(Scope* step_scope,
}
const
rnn
::
ArgumentName
RecurrentOp
::
kArgName
{
"step_net"
,
"step_scopes"
,
"inlinks"
,
"outlinks"
,
"inlink_alias"
,
"outlink_alias"
,
"step_net"
,
"step_scopes"
,
"inlinks"
,
"outlinks"
,
"memories"
,
"pre_memories"
,
"boot_memories"
};
const
rnn
::
ArgumentName
RecurrentGradientOp
::
kArgName
{
"step_net"
,
"step_scopes"
,
"outlink@grad"
,
"inlink@grad"
,
"inlink_alias"
,
"outlink_alias"
,
"memories"
,
"pre_memories"
,
"boot_memories@grad"
};
"step_net"
,
"step_scopes"
,
"outlink@grad"
,
"inlink@grad"
,
"memories"
,
"pre_memories"
,
"boot_memories@grad"
};
RecurrentOp
::
RecurrentOp
(
const
std
::
string
&
type
,
const
framework
::
VariableNameMap
&
inputs
,
...
...
@@ -160,8 +160,6 @@ class RecurrentAlgorithmProtoAndCheckerMaker
AddOutput
(
name
.
step_scopes
,
"step scopes"
);
// Attributes stored in AttributeMap
AddAttr
<
std
::
vector
<
std
::
string
>>
(
name
.
inlink_alias
,
"alias of inlinks"
);
AddAttr
<
std
::
vector
<
std
::
string
>>
(
name
.
outlink_alias
,
"alias of outlinks"
);
AddAttr
<
std
::
vector
<
std
::
string
>>
(
name
.
pre_memories
,
"names of pre-memories"
);
AddAttr
<
std
::
vector
<
std
::
string
>>
(
name
.
memories
,
"names of memories"
);
...
...
@@ -206,9 +204,8 @@ void RecurrentGradientAlgorithm::LinkBootMemoryGradients(
}
void
RecurrentGradientAlgorithm
::
InferShape
(
const
Scope
&
scope
)
const
{
seq_len_
=
scope
.
FindVar
((
arg_
->
inlinks
[
0
]).
external
)
->
GetMutable
<
LoDTensor
>
()
->
dims
()[
0
];
seq_len_
=
scope
.
FindVar
(
arg_
->
inlinks
[
0
])
->
GetMutable
<
LoDTensor
>
()
->
dims
()[
0
];
auto
step_scopes
=
GetStepScopes
(
scope
);
rnn
::
SegmentInputs
(
step_scopes
,
arg_
->
inlinks
,
seq_len_
,
true
/*infer_shape_mode*/
);
...
...
paddle/operators/rnn/recurrent_op_utils.cc
浏览文件 @
b5e67fce
...
...
@@ -24,22 +24,23 @@ using Tensor = framework::Tensor;
using
LoDTensor
=
framework
::
LoDTensor
;
void
SegmentInputs
(
const
std
::
vector
<
Scope
*>&
step_scopes
,
const
std
::
vector
<
Link
>&
inlinks
,
const
size_t
seq_len
,
bool
infer_shape_mode
)
{
const
std
::
vector
<
std
::
string
>&
inlinks
,
const
size_t
seq_len
,
bool
infer_shape_mode
)
{
PADDLE_ENFORCE
(
!
inlinks
.
empty
(),
"no in links are provided."
);
for
(
size_t
i
=
0
;
i
<
inlinks
.
size
();
++
i
)
{
auto
input_var
=
step_scopes
[
0
]
->
FindVar
(
inlinks
[
i
].
external
);
PADDLE_ENFORCE
(
input_var
!=
nullptr
,
"input link [%s] is not in scope."
,
inlinks
[
i
].
external
);
// global inputs
auto
input_var
=
step_scopes
[
0
]
->
parent
().
FindVar
(
inlinks
[
i
]);
PADDLE_ENFORCE_NOT_NULL
(
input_var
,
"input link [%s] is not in scope."
,
inlinks
[
i
]);
LoDTensor
*
input
=
input_var
->
GetMutable
<
LoDTensor
>
();
f
::
DDim
dims
=
input
->
dims
();
PADDLE_ENFORCE
(
static_cast
<
size_t
>
(
dims
[
0
])
==
seq_len
,
"all the inlinks must hav
e same length"
);
PADDLE_ENFORCE
_EQ
(
static_cast
<
size_t
>
(
dims
[
0
]),
seq_len
,
"all the inlinks be th
e same length"
);
f
::
DDim
step_dims
=
slice_ddim
(
dims
,
1
,
dims
.
size
());
for
(
size_t
j
=
0
;
j
<
seq_len
;
j
++
)
{
Tensor
*
step_input
=
step_scopes
[
j
]
->
NewVar
(
inlinks
[
i
]
.
internal
)
->
GetMutable
<
Tensor
>
();
step_scopes
[
j
]
->
NewVar
(
inlinks
[
i
])
->
GetMutable
<
Tensor
>
();
if
(
!
infer_shape_mode
)
{
// The input of operators of each step is Tensor here.
// Maybe need to modify Slice function.
...
...
@@ -51,18 +52,17 @@ void SegmentInputs(const std::vector<Scope*>& step_scopes,
}
void
ConcatOutputs
(
const
std
::
vector
<
Scope
*>&
step_scopes
,
const
std
::
vector
<
Link
>&
outlinks
,
const
size_t
seq_len
,
bool
infer_shape_mode
)
{
const
std
::
vector
<
std
::
string
>&
outlinks
,
const
size_t
seq_len
,
bool
infer_shape_mode
)
{
for
(
size_t
i
=
0
;
i
<
outlinks
.
size
();
i
++
)
{
auto
output_var
=
step_scopes
[
0
]
->
FindVar
(
outlinks
[
i
].
external
);
PADDLE_ENFORCE
(
output_var
!=
nullpt
r
,
"output link [%s] is not in scope."
,
outlinks
[
i
].
external
);
auto
output_var
=
step_scopes
[
0
]
->
parent
().
FindVar
(
outlinks
[
i
]
);
PADDLE_ENFORCE
_NOT_NULL
(
output_va
r
,
"output link [%s] is not in scope."
,
outlinks
[
i
]
);
LoDTensor
*
output
=
output_var
->
GetMutable
<
LoDTensor
>
();
if
(
infer_shape_mode
)
{
auto
step_scope_var
=
step_scopes
[
0
]
->
FindVar
(
outlinks
[
i
].
internal
);
PADDLE_ENFORCE
(
step_scope_var
!=
nullptr
,
"%s not in scope"
,
outlinks
[
i
].
internal
);
auto
step_scope_var
=
step_scopes
[
0
]
->
FindVar
(
outlinks
[
i
]);
PADDLE_ENFORCE_NOT_NULL
(
step_scope_var
,
"%s not in scope"
,
outlinks
[
i
]);
f
::
DDim
step_dims
=
step_scope_var
->
template
GetMutable
<
LoDTensor
>()
->
dims
();
std
::
vector
<
int64_t
>
dims_vec
=
vectorize
(
step_dims
);
...
...
@@ -71,9 +71,8 @@ void ConcatOutputs(const std::vector<Scope*>& step_scopes,
}
else
{
output
->
mutable_data
<
float
>
(
platform
::
CPUPlace
());
for
(
size_t
j
=
0
;
j
<
seq_len
;
j
++
)
{
LoDTensor
*
step_output
=
step_scopes
[
j
]
->
FindVar
(
outlinks
[
i
].
internal
)
->
GetMutable
<
LoDTensor
>
();
LoDTensor
*
step_output
=
step_scopes
[
j
]
->
FindVar
(
outlinks
[
i
])
->
GetMutable
<
LoDTensor
>
();
// TODO(luotao02) data type and platform::DeviceContext() should set
// correctly
(
output
->
Slice
<
float
>
(
j
,
j
+
1
))
...
...
@@ -113,29 +112,9 @@ void InitArgument(const ArgumentName& name, Argument* arg,
const
framework
::
OperatorBase
&
op
)
{
arg
->
step_scopes
=
op
.
Output
(
name
.
step_scopes
);
auto
inlinks
=
op
.
Inputs
(
name
.
inlinks
);
auto
inlink_alias
=
op
.
Attr
<
std
::
vector
<
std
::
string
>>
(
name
.
inlink_alias
);
PADDLE_ENFORCE
(
inlinks
.
size
()
==
inlink_alias
.
size
(),
"the size of inlinks and inlink_alias don't match:%d,%d"
,
inlinks
.
size
(),
inlink_alias
.
size
());
for
(
size_t
i
=
0
;
i
<
inlinks
.
size
();
++
i
)
{
rnn
::
Link
link
;
link
.
external
=
inlinks
[
i
];
link
.
internal
=
inlink_alias
[
i
];
(
arg
->
inlinks
).
push_back
(
link
);
}
arg
->
inlinks
=
op
.
Inputs
(
name
.
inlinks
);
auto
outlinks
=
op
.
Outputs
(
name
.
outlinks
);
auto
outlink_alias
=
op
.
Attr
<
std
::
vector
<
std
::
string
>>
(
name
.
outlink_alias
);
PADDLE_ENFORCE
(
outlinks
.
size
()
==
outlink_alias
.
size
(),
"the size of outlinks and outlink_alias don't match:%d,%d"
,
outlinks
.
size
(),
outlink_alias
.
size
());
for
(
size_t
i
=
0
;
i
<
outlinks
.
size
();
++
i
)
{
rnn
::
Link
link
;
link
.
external
=
outlinks
[
i
];
link
.
internal
=
outlink_alias
[
i
];
(
arg
->
outlinks
).
push_back
(
link
);
}
arg
->
outlinks
=
op
.
Outputs
(
name
.
outlinks
);
auto
boot_memories
=
op
.
Inputs
(
name
.
boot_memories
);
...
...
paddle/operators/rnn/recurrent_op_utils.h
浏览文件 @
b5e67fce
...
...
@@ -41,18 +41,11 @@ struct MemoryAttr {
std
::
string
boot_var
;
};
struct
Link
{
// input or output links name.
std
::
string
internal
;
// alias to avoid duplicate keys in scopes.
std
::
string
external
;
};
struct
Argument
{
std
::
string
step_net
;
std
::
string
step_scopes
;
std
::
vector
<
Link
>
inlinks
;
std
::
vector
<
Link
>
outlinks
;
std
::
vector
<
std
::
string
>
inlinks
;
std
::
vector
<
std
::
string
>
outlinks
;
std
::
vector
<
rnn
::
MemoryAttr
>
memories
;
};
...
...
@@ -61,8 +54,6 @@ struct ArgumentName {
std
::
string
step_scopes
;
std
::
string
inlinks
;
std
::
string
outlinks
;
std
::
string
inlink_alias
;
// the alias of inlinks in step net.
std
::
string
outlink_alias
;
// the alias of outlinks in step net.
std
::
string
memories
;
// the memory name
std
::
string
pre_memories
;
// the previous memory name
std
::
string
boot_memories
;
// the boot memory name
...
...
@@ -72,15 +63,15 @@ struct ArgumentName {
* Prepare inputs for each step net.
*/
void
SegmentInputs
(
const
std
::
vector
<
Scope
*>&
step_scopes
,
const
std
::
vector
<
Link
>&
inlinks
,
const
size_t
seq_len
,
bool
infer_shape_mode
);
const
std
::
vector
<
std
::
string
>&
inlinks
,
const
size_t
seq_len
,
bool
infer_shape_mode
);
/**
* Process outputs of step nets and merge to variables.
*/
void
ConcatOutputs
(
const
std
::
vector
<
Scope
*>&
step_scopes
,
const
std
::
vector
<
Link
>&
outlinks
,
const
size_t
seq_len
,
bool
infer_shape_mode
);
const
std
::
vector
<
std
::
string
>&
outlinks
,
const
size_t
seq_len
,
bool
infer_shape_mode
);
void
LinkMemories
(
const
std
::
vector
<
Scope
*>&
step_scopes
,
const
std
::
vector
<
MemoryAttr
>&
memories
,
const
size_t
step_id
,
...
...
python/paddle/v2/framework/tests/test_recurrent_op.py
浏览文件 @
b5e67fce
...
...
@@ -59,7 +59,6 @@ class PySimpleRNNTest(unittest.TestCase):
def
test_forward
(
self
):
output
=
self
.
rnn
.
forward
()
print
'output'
,
output
def
create_tensor
(
scope
,
name
,
shape
,
np_data
):
...
...
@@ -103,7 +102,7 @@ class TestRecurrentOp(unittest.TestCase):
ctx
=
core
.
DeviceContext
.
create
(
core
.
CPUPlace
())
self
.
rnnop
.
infer_shape
(
self
.
scope
)
self
.
rnnop
.
run
(
self
.
scope
,
ctx
)
return
np
.
array
(
self
.
scope
.
find_var
(
"h"
).
get_tensor
())
return
np
.
array
(
self
.
scope
.
find_var
(
"h
@mem
"
).
get_tensor
())
def
create_global_variables
(
self
):
# create inlink
...
...
@@ -123,8 +122,7 @@ class TestRecurrentOp(unittest.TestCase):
create_tensor
(
self
.
scope
,
"h_boot"
,
[
self
.
batch_size
,
self
.
input_dim
],
h_boot_np_data
)
self
.
scope
.
new_var
(
"step_scopes"
)
self
.
scope
.
new_var
(
"h@alias"
)
self
.
scope
.
new_var
(
"h"
)
self
.
scope
.
new_var
(
"h@mem"
)
def
create_rnn_op
(
self
):
# create RNNOp
...
...
@@ -134,20 +132,18 @@ class TestRecurrentOp(unittest.TestCase):
boot_memories
=
[
"h_boot"
],
step_net
=
"stepnet"
,
# outputs
outlinks
=
[
"h"
],
outlinks
=
[
"h
@mem
"
],
step_scopes
=
"step_scopes"
,
# attributes
inlink_alias
=
[
"x@alias"
],
outlink_alias
=
[
"h@alias"
],
pre_memories
=
[
"h@pre"
],
memories
=
[
"h@
alias
"
])
memories
=
[
"h@
mem
"
])
def
create_step_net
(
self
):
stepnet
=
core
.
Net
.
create
()
x_fc_op
=
Operator
(
"mul"
,
X
=
"x
@alias
"
,
Y
=
"W"
,
Out
=
"Wx"
)
x_fc_op
=
Operator
(
"mul"
,
X
=
"x"
,
Y
=
"W"
,
Out
=
"Wx"
)
h_fc_op
=
Operator
(
"mul"
,
X
=
"h@pre"
,
Y
=
"U"
,
Out
=
"Uh"
)
sum_op
=
Operator
(
"add"
,
X
=
"Wx"
,
Y
=
"Uh"
,
Out
=
"sum"
)
sig_op
=
Operator
(
"sigmoid"
,
X
=
"sum"
,
Y
=
"h@
alias
"
)
sig_op
=
Operator
(
"sigmoid"
,
X
=
"sum"
,
Y
=
"h@
mem
"
)
for
op
in
[
x_fc_op
,
h_fc_op
,
sum_op
,
sig_op
]:
stepnet
.
append_op
(
op
)
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录