Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
57c95c79
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
57c95c79
编写于
9月 22, 2017
作者:
F
fengjiayi
浏览文件
操作
浏览文件
下载
差异文件
Merge branch 'fix_lod_tensor_dim_64' into feature/pybind_for_protobuf_desc
上级
eeb7c8ad
8149ba0d
变更
7
隐藏空白更改
内联
并排
Showing
7 changed file
with
58 addition
and
20 deletion
+58
-20
paddle/framework/framework.proto
paddle/framework/framework.proto
+1
-1
paddle/framework/operator.cc
paddle/framework/operator.cc
+2
-2
paddle/operators/recurrent_op.cc
paddle/operators/recurrent_op.cc
+6
-7
paddle/operators/recurrent_op.h
paddle/operators/recurrent_op.h
+4
-1
paddle/operators/rnn/recurrent_op_utils.cc
paddle/operators/rnn/recurrent_op_utils.cc
+5
-6
paddle/operators/rnn/recurrent_op_utils.h
paddle/operators/rnn/recurrent_op_utils.h
+1
-1
python/paddle/v2/framework/tests/test_recurrent_op.py
python/paddle/v2/framework/tests/test_recurrent_op.py
+39
-2
未找到文件。
paddle/framework/framework.proto
浏览文件 @
57c95c79
...
@@ -106,7 +106,7 @@ enum DataType {
...
@@ -106,7 +106,7 @@ enum DataType {
message
LoDTensorDesc
{
message
LoDTensorDesc
{
required
DataType
data_type
=
1
;
required
DataType
data_type
=
1
;
repeated
int
32
dims
=
2
;
// [UNK, 640, 480] is saved as [-1, 640, 480]
repeated
int
64
dims
=
2
;
// [UNK, 640, 480] is saved as [-1, 640, 480]
optional
int32
lod_level
=
3
[
default
=
0
];
optional
int32
lod_level
=
3
[
default
=
0
];
}
}
...
...
paddle/framework/operator.cc
浏览文件 @
57c95c79
...
@@ -60,8 +60,8 @@ std::string OperatorBase::Output(const std::string& name) const {
...
@@ -60,8 +60,8 @@ std::string OperatorBase::Output(const std::string& name) const {
const
std
::
vector
<
std
::
string
>&
OperatorBase
::
Outputs
(
const
std
::
vector
<
std
::
string
>&
OperatorBase
::
Outputs
(
const
std
::
string
&
name
)
const
{
const
std
::
string
&
name
)
const
{
auto
it
=
outputs_
.
find
(
name
);
auto
it
=
outputs_
.
find
(
name
);
PADDLE_ENFORCE
(
it
!=
outputs_
.
end
(),
"Op %s does not have output
%s"
,
type_
,
PADDLE_ENFORCE
(
it
!=
outputs_
.
end
(),
"Op %s does not have output
called %s"
,
name
);
type_
,
name
);
return
it
->
second
;
return
it
->
second
;
}
}
...
...
paddle/operators/recurrent_op.cc
浏览文件 @
57c95c79
...
@@ -80,7 +80,6 @@ void RecurrentAlgorithm::CreateScopes(const Scope& scope) const {
...
@@ -80,7 +80,6 @@ void RecurrentAlgorithm::CreateScopes(const Scope& scope) const {
// Now all variables in scope must be created outside of op.
// Now all variables in scope must be created outside of op.
PADDLE_ENFORCE_NOT_NULL
(
stepnet_
);
PADDLE_ENFORCE_NOT_NULL
(
stepnet_
);
PADDLE_ENFORCE
(
!
(
*
stepnet_
)
->
Outputs
().
empty
(),
"stepnet_ op has no outputs"
);
PADDLE_ENFORCE
(
!
(
*
stepnet_
)
->
Outputs
().
empty
(),
"stepnet_ op has no outputs"
);
PADDLE_ENFORCE
(
!
(
*
stepnet_
)
->
Outputs
().
empty
(),
"net_op has no outputs"
);
if
(
seq_len_
>
step_scopes
->
size
())
{
if
(
seq_len_
>
step_scopes
->
size
())
{
for
(
size_t
i
=
step_scopes
->
size
();
i
<
seq_len_
;
++
i
)
{
for
(
size_t
i
=
step_scopes
->
size
();
i
<
seq_len_
;
++
i
)
{
...
@@ -129,8 +128,8 @@ const rnn::ArgumentName RecurrentOp::kArgName{
...
@@ -129,8 +128,8 @@ const rnn::ArgumentName RecurrentOp::kArgName{
"memories"
,
"pre_memories"
,
"boot_memories"
};
"memories"
,
"pre_memories"
,
"boot_memories"
};
const
rnn
::
ArgumentName
RecurrentGradientOp
::
kArgName
{
const
rnn
::
ArgumentName
RecurrentGradientOp
::
kArgName
{
"step_net"
,
"step_scopes
"
,
"outlink@grad"
,
"inlink@grad
"
,
"step_net"
,
"step_scopes
@GRAD"
,
"outlinks@GRAD"
,
"inlinks@GRAD
"
,
"memories"
,
"pre_memories"
,
"boot_memories@grad
"
};
"memories"
,
"pre_memories"
,
"boot_memories@GRAD
"
};
RecurrentOp
::
RecurrentOp
(
const
std
::
string
&
type
,
RecurrentOp
::
RecurrentOp
(
const
std
::
string
&
type
,
const
framework
::
VariableNameMap
&
inputs
,
const
framework
::
VariableNameMap
&
inputs
,
...
@@ -226,13 +225,13 @@ RecurrentGradientOp::RecurrentGradientOp(
...
@@ -226,13 +225,13 @@ RecurrentGradientOp::RecurrentGradientOp(
const
framework
::
VariableNameMap
&
outputs
,
const
framework
::
VariableNameMap
&
outputs
,
const
framework
::
AttributeMap
&
attrs
)
const
framework
::
AttributeMap
&
attrs
)
:
OperatorBase
(
type
,
inputs
,
outputs
,
attrs
)
{
:
OperatorBase
(
type
,
inputs
,
outputs
,
attrs
)
{
rnn
::
InitArgument
(
kArgName
,
&
arg_
,
*
this
);
rnn
::
InitArgument
(
kArgName
,
&
arg_
,
*
this
,
true
/*is grad*/
);
alg_
.
Init
(
&
arg_
,
&
stepnet_
);
alg_
.
Init
(
&
arg_
,
&
stepnet_
);
}
}
}
// namespace operators
}
// namespace operators
}
// namespace paddle
}
// namespace paddle
REGISTER_OP
_WITHOUT_GRADIENT
(
REGISTER_OP
(
recurrent
,
paddle
::
operators
::
RecurrentOp
,
recurrent
,
paddle
::
operators
::
RecurrentOp
,
paddle
::
operators
::
RecurrentAlgorithmProtoAndCheckerMaker
,
paddle
::
operators
::
RecurrentAlgorithmProtoAndCheckerMaker
);
recurrent_grad
,
paddle
::
operators
::
RecurrentGradientOp
);
paddle/operators/recurrent_op.h
浏览文件 @
57c95c79
...
@@ -22,7 +22,7 @@ namespace paddle {
...
@@ -22,7 +22,7 @@ namespace paddle {
namespace
operators
{
namespace
operators
{
// The sequence format in RecurrentOp is Tensor<seq_len, batch_size, dim> now.
// The sequence format in RecurrentOp is Tensor<seq_len, batch_size, dim> now.
// TODO(
Yan Chunwei):
// TODO(
Superjom)
// 1. No-padding computing for sequences with indifinite length in one batch.
// 1. No-padding computing for sequences with indifinite length in one batch.
// 2. Hierarchical RNN for sequence with sub-sequence.
// 2. Hierarchical RNN for sequence with sub-sequence.
// 3. Internal Memory.
// 3. Internal Memory.
...
@@ -177,6 +177,9 @@ class RecurrentGradientOp : public framework::OperatorBase {
...
@@ -177,6 +177,9 @@ class RecurrentGradientOp : public framework::OperatorBase {
static
const
rnn
::
ArgumentName
kArgName
;
static
const
rnn
::
ArgumentName
kArgName
;
/*
* set a stepnet that is created according to a RecurrentOp's stepnet.
*/
void
set_stepnet
(
std
::
unique_ptr
<
OperatorBase
>
net
)
{
void
set_stepnet
(
std
::
unique_ptr
<
OperatorBase
>
net
)
{
stepnet_
=
std
::
move
(
net
);
stepnet_
=
std
::
move
(
net
);
}
}
...
...
paddle/operators/rnn/recurrent_op_utils.cc
浏览文件 @
57c95c79
...
@@ -109,15 +109,14 @@ void LinkMemories(const std::vector<Scope*>& scopes,
...
@@ -109,15 +109,14 @@ void LinkMemories(const std::vector<Scope*>& scopes,
}
}
void
InitArgument
(
const
ArgumentName
&
name
,
Argument
*
arg
,
void
InitArgument
(
const
ArgumentName
&
name
,
Argument
*
arg
,
const
framework
::
OperatorBase
&
op
)
{
const
framework
::
OperatorBase
&
op
,
bool
is_grad
)
{
arg
->
step_scopes
=
op
.
Output
(
name
.
step_scopes
);
arg
->
step_scopes
=
is_grad
?
op
.
Input
(
name
.
step_scopes
)
:
op
.
Output
(
name
.
step_scopes
);
arg
->
inlinks
=
op
.
Inputs
(
name
.
inlinks
);
arg
->
inlinks
=
op
.
Inputs
(
name
.
inlinks
);
arg
->
outlinks
=
op
.
Outputs
(
name
.
outlinks
);
arg
->
outlinks
=
op
.
Outputs
(
name
.
outlinks
);
auto
boot_memories
=
op
.
Inputs
(
name
.
boot_memories
);
auto
boot_memories
=
is_grad
?
op
.
Outputs
(
name
.
boot_memories
)
:
op
.
Inputs
(
name
.
boot_memories
);
// attributes
// attributes
auto
memories
=
op
.
Attr
<
std
::
vector
<
std
::
string
>>
(
name
.
memories
);
auto
memories
=
op
.
Attr
<
std
::
vector
<
std
::
string
>>
(
name
.
memories
);
auto
pre_memories
=
op
.
Attr
<
std
::
vector
<
std
::
string
>>
(
name
.
pre_memories
);
auto
pre_memories
=
op
.
Attr
<
std
::
vector
<
std
::
string
>>
(
name
.
pre_memories
);
...
...
paddle/operators/rnn/recurrent_op_utils.h
浏览文件 @
57c95c79
...
@@ -78,7 +78,7 @@ void LinkMemories(const std::vector<Scope*>& step_scopes,
...
@@ -78,7 +78,7 @@ void LinkMemories(const std::vector<Scope*>& step_scopes,
const
int
offset
,
bool
infer_shape_mode
);
const
int
offset
,
bool
infer_shape_mode
);
void
InitArgument
(
const
ArgumentName
&
name
,
Argument
*
arg
,
void
InitArgument
(
const
ArgumentName
&
name
,
Argument
*
arg
,
const
framework
::
OperatorBase
&
op
);
const
framework
::
OperatorBase
&
op
,
bool
is_grad
=
false
);
}
// namespace rnn
}
// namespace rnn
}
// namespace operators
}
// namespace operators
...
...
python/paddle/v2/framework/tests/test_recurrent_op.py
浏览文件 @
57c95c79
...
@@ -3,6 +3,7 @@ import paddle.v2.framework.core as core
...
@@ -3,6 +3,7 @@ import paddle.v2.framework.core as core
import
unittest
import
unittest
import
numpy
as
np
import
numpy
as
np
from
paddle.v2.framework.op
import
Operator
,
RecurrentOp
from
paddle.v2.framework.op
import
Operator
,
RecurrentOp
from
op_test
import
get_numeric_gradient
def
py_sigmoid
(
x
):
def
py_sigmoid
(
x
):
...
@@ -47,7 +48,7 @@ class PySimpleRNN(object):
...
@@ -47,7 +48,7 @@ class PySimpleRNN(object):
else
:
else
:
pre_mem
=
self
.
h_boot
pre_mem
=
self
.
h_boot
xW
=
np
.
matmul
(
x
,
self
.
W
)
xW
=
np
.
matmul
(
x
,
self
.
W
)
hU
=
np
.
matmul
(
mem
,
self
.
U
)
hU
=
np
.
matmul
(
pre_
mem
,
self
.
U
)
sum
=
xW
+
hU
sum
=
xW
+
hU
self
.
mems
[
step_id
]
=
py_sigmoid
(
sum
)
self
.
mems
[
step_id
]
=
py_sigmoid
(
sum
)
...
@@ -68,7 +69,7 @@ def create_tensor(scope, name, shape, np_data):
...
@@ -68,7 +69,7 @@ def create_tensor(scope, name, shape, np_data):
return
tensor
return
tensor
class
TestRecurrentOp
(
unittest
.
TestCase
):
class
RecurrentOpTest
(
unittest
.
TestCase
):
'''
'''
Test RNNOp
Test RNNOp
...
@@ -158,6 +159,42 @@ class TestRecurrentOp(unittest.TestCase):
...
@@ -158,6 +159,42 @@ class TestRecurrentOp(unittest.TestCase):
print
print
print
'py_output'
,
py_output
print
'py_output'
,
py_output
self
.
assertEqual
(
pd_output
.
shape
,
py_output
.
shape
)
self
.
assertEqual
(
pd_output
.
shape
,
py_output
.
shape
)
self
.
assertTrue
(
np
.
isclose
(
pd_output
,
py_output
,
rtol
=
0.1
).
all
())
class
RecurrentGradientOpTest
(
unittest
.
TestCase
):
def
create_forward_op
(
self
):
self
.
forward_op
=
RecurrentOp
(
# inputs
inlinks
=
[
"x"
],
boot_memories
=
[
"h_boot"
],
step_net
=
"stepnet"
,
# outputs
outlinks
=
[
"h"
],
step_scopes
=
"step_scopes"
,
# attributes
pre_memories
=
[
"h@pre"
],
memories
=
[
"h@alias"
])
# create a stepnet for RNN
stepnet
=
core
.
Net
.
create
()
x_fc_op
=
Operator
(
"mul"
,
X
=
"x@alias"
,
Y
=
"W"
,
Out
=
"Wx"
)
h_fc_op
=
Operator
(
"mul"
,
X
=
"h@pre"
,
Y
=
"U"
,
Out
=
"Uh"
)
sum_op
=
Operator
(
"add"
,
X
=
"Wx"
,
Y
=
"Uh"
,
Out
=
"sum"
)
sig_op
=
Operator
(
"sigmoid"
,
X
=
"sum"
,
Y
=
"h@alias"
)
for
op
in
[
x_fc_op
,
h_fc_op
,
sum_op
,
sig_op
]:
stepnet
.
append_op
(
op
)
stepnet
.
complete_add_op
(
True
)
self
.
forward_op
.
set_stepnet
(
stepnet
)
def
create_gradient_op
(
self
):
a
=
set
()
backward_op
=
core
.
RecurrentOp
.
backward
(
self
.
forward_op
,
a
)
def
test_grad
(
self
):
self
.
create_forward_op
()
self
.
create_gradient_op
()
if
__name__
==
'__main__'
:
if
__name__
==
'__main__'
:
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录