Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
s920243400
PaddleDetection
提交
b941865d
P
PaddleDetection
项目概览
s920243400
/
PaddleDetection
与 Fork 源项目一致
Fork自
PaddlePaddle / PaddleDetection
通知
2
Star
0
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleDetection
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
b941865d
编写于
9月 22, 2017
作者:
Y
Yu Yang
浏览文件
操作
浏览文件
下载
差异文件
Merge branch 'feature/simplify_attr_parse' into feature/pybind_for_protobuf_desc
上级
ddf24484
057e8102
变更
11
隐藏空白更改
内联
并排
Showing
11 changed file
with
82 addition
and
85 deletion
+82
-85
paddle/framework/attribute.cc
paddle/framework/attribute.cc
+0
-41
paddle/framework/attribute.h
paddle/framework/attribute.h
+9
-5
paddle/framework/lod_tensor.cc
paddle/framework/lod_tensor.cc
+6
-10
paddle/framework/lod_tensor.h
paddle/framework/lod_tensor.h
+4
-4
paddle/framework/lod_tensor_test.cc
paddle/framework/lod_tensor_test.cc
+6
-6
paddle/framework/operator.cc
paddle/framework/operator.cc
+2
-2
paddle/operators/recurrent_op.cc
paddle/operators/recurrent_op.cc
+6
-7
paddle/operators/recurrent_op.h
paddle/operators/recurrent_op.h
+4
-1
paddle/operators/rnn/recurrent_op_utils.cc
paddle/operators/rnn/recurrent_op_utils.cc
+5
-6
paddle/operators/rnn/recurrent_op_utils.h
paddle/operators/rnn/recurrent_op_utils.h
+1
-1
python/paddle/v2/framework/tests/test_recurrent_op.py
python/paddle/v2/framework/tests/test_recurrent_op.py
+39
-2
未找到文件。
paddle/framework/attribute.cc
浏览文件 @
b941865d
...
@@ -31,47 +31,6 @@ ProgramDesc& GetProgramDesc() {
...
@@ -31,47 +31,6 @@ ProgramDesc& GetProgramDesc() {
return
*
g_program_desc
;
return
*
g_program_desc
;
}
}
template
<
>
AttrType
AttrTypeID
<
bool
>
()
{
return
BOOLEAN
;
}
template
<
>
AttrType
AttrTypeID
<
int
>
()
{
return
INT
;
}
template
<
>
AttrType
AttrTypeID
<
float
>
()
{
return
FLOAT
;
}
template
<
>
AttrType
AttrTypeID
<
std
::
string
>
()
{
return
STRING
;
}
template
<
>
AttrType
AttrTypeID
<
std
::
vector
<
bool
>>
()
{
return
BOOLEANS
;
}
template
<
>
AttrType
AttrTypeID
<
std
::
vector
<
int
>>
()
{
return
INTS
;
}
template
<
>
AttrType
AttrTypeID
<
std
::
vector
<
float
>>
()
{
return
FLOATS
;
}
template
<
>
AttrType
AttrTypeID
<
std
::
vector
<
std
::
string
>>
()
{
return
STRINGS
;
}
template
<
>
AttrType
AttrTypeID
<
std
::
vector
<
std
::
pair
<
int
,
int
>>>
()
{
return
INT_PAIRS
;
}
template
<
>
AttrType
AttrTypeID
<
BlockDesc
>
()
{
return
BLOCK
;
}
Attribute
GetAttrValue
(
const
OpDesc
::
Attr
&
attr_desc
)
{
Attribute
GetAttrValue
(
const
OpDesc
::
Attr
&
attr_desc
)
{
switch
(
attr_desc
.
type
())
{
switch
(
attr_desc
.
type
())
{
case
framework
::
AttrType
::
BOOLEAN
:
{
case
framework
::
AttrType
::
BOOLEAN
:
{
...
...
paddle/framework/attribute.h
浏览文件 @
b941865d
...
@@ -27,10 +27,11 @@ limitations under the License. */
...
@@ -27,10 +27,11 @@ limitations under the License. */
namespace
paddle
{
namespace
paddle
{
namespace
framework
{
namespace
framework
{
typedef
boost
::
variant
<
boost
::
blank
,
bool
,
int
,
float
,
std
::
string
,
// The order should be as same as framework.proto
std
::
vector
<
bool
>
,
std
::
vector
<
int
>
,
std
::
vector
<
float
>
,
typedef
boost
::
variant
<
boost
::
blank
,
int
,
float
,
std
::
string
,
std
::
vector
<
int
>
,
std
::
vector
<
std
::
string
>
,
std
::
vector
<
float
>
,
std
::
vector
<
std
::
string
>
,
std
::
vector
<
std
::
pair
<
int
,
int
>>
,
BlockDesc
*>
std
::
vector
<
std
::
pair
<
int
,
int
>>
,
bool
,
std
::
vector
<
bool
>
,
BlockDesc
*>
Attribute
;
Attribute
;
typedef
std
::
unordered_map
<
std
::
string
,
Attribute
>
AttributeMap
;
typedef
std
::
unordered_map
<
std
::
string
,
Attribute
>
AttributeMap
;
...
@@ -38,7 +39,10 @@ typedef std::unordered_map<std::string, Attribute> AttributeMap;
...
@@ -38,7 +39,10 @@ typedef std::unordered_map<std::string, Attribute> AttributeMap;
ProgramDesc
&
GetProgramDesc
();
ProgramDesc
&
GetProgramDesc
();
template
<
typename
T
>
template
<
typename
T
>
AttrType
AttrTypeID
();
inline
AttrType
AttrTypeID
()
{
Attribute
tmp
=
T
();
return
static_cast
<
AttrType
>
(
tmp
.
which
()
-
1
);
}
Attribute
GetAttrValue
(
const
OpDesc
::
Attr
&
attr_desc
);
Attribute
GetAttrValue
(
const
OpDesc
::
Attr
&
attr_desc
);
...
...
paddle/framework/lod_tensor.cc
浏览文件 @
b941865d
...
@@ -72,20 +72,16 @@ bool operator==(const LoD& a, const LoD& b) {
...
@@ -72,20 +72,16 @@ bool operator==(const LoD& a, const LoD& b) {
return
true
;
return
true
;
}
}
void
LoDTensor
::
S
lice
Levels
(
size_t
level_begin
,
size_t
level_end
)
{
void
LoDTensor
::
S
hrink
Levels
(
size_t
level_begin
,
size_t
level_end
)
{
auto
new_lod
=
framework
::
SliceLevels
(
lod_
,
level_begin
,
level_end
);
auto
new_lod
=
framework
::
SliceLevels
(
lod_
,
level_begin
,
level_end
);
lod_
=
new_lod
;
lod_
=
new_lod
;
}
}
void
LoDTensor
::
SliceInLevel
(
size_t
level
,
size_t
elem_begin
,
size_t
elem_end
)
{
void
LoDTensor
::
ShrinkInLevel
(
size_t
level
,
size_t
elem_begin
,
PADDLE_ENFORCE
(
level
<
NumLevels
(),
"level [%d] out of range [%d]"
,
level
,
size_t
elem_end
)
{
NumLevels
());
PADDLE_ENFORCE_LT
(
level
,
NumLevels
());
PADDLE_ENFORCE
(
elem_begin
<
NumElements
(
level
),
PADDLE_ENFORCE_LT
(
elem_begin
,
NumElements
(
level
));
"element begin [%d] out of range [%d]"
,
elem_begin
,
PADDLE_ENFORCE_LT
(
elem_end
,
NumElements
(
level
)
+
1
);
NumElements
(
level
));
PADDLE_ENFORCE
(
elem_end
<
NumElements
(
level
)
+
1
,
"element end [%d] out of range [%d]"
,
elem_end
,
NumElements
(
level
));
auto
new_lod
=
framework
::
SliceInLevel
(
lod_
,
level
,
elem_begin
,
elem_end
);
auto
new_lod
=
framework
::
SliceInLevel
(
lod_
,
level
,
elem_begin
,
elem_end
);
lod_
=
new_lod
;
lod_
=
new_lod
;
...
...
paddle/framework/lod_tensor.h
浏览文件 @
b941865d
...
@@ -89,15 +89,15 @@ class LoDTensor : public Tensor {
...
@@ -89,15 +89,15 @@ class LoDTensor : public Tensor {
}
}
/*
/*
* S
lice of
levels[level_begin:level_end]
* S
hrink
levels[level_begin:level_end]
*/
*/
void
S
lice
Levels
(
size_t
level_begin
,
size_t
level_end
);
void
S
hrink
Levels
(
size_t
level_begin
,
size_t
level_end
);
/*
/*
* S
lice of
elements of a level, [elem_begin: elem_end]
* S
hrink
elements of a level, [elem_begin: elem_end]
* @note: low performance in slice lod_.
* @note: low performance in slice lod_.
*/
*/
void
S
lice
InLevel
(
size_t
level
,
size_t
elem_begin
,
size_t
elem_end
);
void
S
hrink
InLevel
(
size_t
level
,
size_t
elem_begin
,
size_t
elem_end
);
private:
private:
LoD
lod_
;
LoD
lod_
;
...
...
paddle/framework/lod_tensor_test.cc
浏览文件 @
b941865d
...
@@ -56,11 +56,11 @@ TEST_F(LoDTensorTester, NumElements) {
...
@@ -56,11 +56,11 @@ TEST_F(LoDTensorTester, NumElements) {
ASSERT_EQ
(
lod_tensor_
.
NumElements
(
2
),
8UL
);
ASSERT_EQ
(
lod_tensor_
.
NumElements
(
2
),
8UL
);
}
}
TEST_F
(
LoDTensorTester
,
S
lice
Levels
)
{
TEST_F
(
LoDTensorTester
,
S
hrink
Levels
)
{
// slice 1 level
// slice 1 level
for
(
size_t
level
=
0
;
level
<
3UL
;
++
level
)
{
for
(
size_t
level
=
0
;
level
<
3UL
;
++
level
)
{
LoDTensor
new_lod_tensor
=
lod_tensor_
;
LoDTensor
new_lod_tensor
=
lod_tensor_
;
new_lod_tensor
.
S
lice
Levels
(
level
,
level
+
1
);
new_lod_tensor
.
S
hrink
Levels
(
level
,
level
+
1
);
ASSERT_EQ
(
new_lod_tensor
.
NumLevels
(),
1UL
);
ASSERT_EQ
(
new_lod_tensor
.
NumLevels
(),
1UL
);
ASSERT_EQ
(
new_lod_tensor
.
NumElements
(
0
),
lod_tensor_
.
NumElements
(
level
));
ASSERT_EQ
(
new_lod_tensor
.
NumElements
(
0
),
lod_tensor_
.
NumElements
(
level
));
ASSERT_EQ
(
new_lod_tensor
.
data
<
float
>
(),
lod_tensor_
.
data
<
float
>
());
ASSERT_EQ
(
new_lod_tensor
.
data
<
float
>
(),
lod_tensor_
.
data
<
float
>
());
...
@@ -68,7 +68,7 @@ TEST_F(LoDTensorTester, SliceLevels) {
...
@@ -68,7 +68,7 @@ TEST_F(LoDTensorTester, SliceLevels) {
// slice 2 level
// slice 2 level
for
(
size_t
level
=
0
;
level
<
2UL
;
++
level
)
{
for
(
size_t
level
=
0
;
level
<
2UL
;
++
level
)
{
LoDTensor
new_lod_tensor
=
lod_tensor_
;
LoDTensor
new_lod_tensor
=
lod_tensor_
;
new_lod_tensor
.
S
lice
Levels
(
level
,
level
+
2
);
new_lod_tensor
.
S
hrink
Levels
(
level
,
level
+
2
);
ASSERT_EQ
(
new_lod_tensor
.
NumLevels
(),
2UL
);
ASSERT_EQ
(
new_lod_tensor
.
NumLevels
(),
2UL
);
ASSERT_EQ
(
new_lod_tensor
.
NumElements
(
0
),
lod_tensor_
.
NumElements
(
level
));
ASSERT_EQ
(
new_lod_tensor
.
NumElements
(
0
),
lod_tensor_
.
NumElements
(
level
));
ASSERT_EQ
(
new_lod_tensor
.
NumElements
(
1
),
ASSERT_EQ
(
new_lod_tensor
.
NumElements
(
1
),
...
@@ -77,10 +77,10 @@ TEST_F(LoDTensorTester, SliceLevels) {
...
@@ -77,10 +77,10 @@ TEST_F(LoDTensorTester, SliceLevels) {
}
}
}
}
TEST_F
(
LoDTensorTester
,
S
lice
InLevel
)
{
TEST_F
(
LoDTensorTester
,
S
hrink
InLevel
)
{
size_t
level
=
0
;
size_t
level
=
0
;
LoDTensor
new_lod_tensor
=
lod_tensor_
;
LoDTensor
new_lod_tensor
=
lod_tensor_
;
new_lod_tensor
.
S
lice
InLevel
(
level
,
0
,
2
);
new_lod_tensor
.
S
hrink
InLevel
(
level
,
0
,
2
);
EXPECT_EQ
(
new_lod_tensor
.
NumLevels
(),
3UL
);
EXPECT_EQ
(
new_lod_tensor
.
NumLevels
(),
3UL
);
EXPECT_EQ
(
new_lod_tensor
.
NumElements
(
0
),
2UL
);
EXPECT_EQ
(
new_lod_tensor
.
NumElements
(
0
),
2UL
);
EXPECT_EQ
(
new_lod_tensor
.
NumElements
(
1
),
4UL
);
EXPECT_EQ
(
new_lod_tensor
.
NumElements
(
1
),
4UL
);
...
@@ -89,7 +89,7 @@ TEST_F(LoDTensorTester, SliceInLevel) {
...
@@ -89,7 +89,7 @@ TEST_F(LoDTensorTester, SliceInLevel) {
level
=
1
;
level
=
1
;
new_lod_tensor
=
lod_tensor_
;
new_lod_tensor
=
lod_tensor_
;
new_lod_tensor
.
S
lice
InLevel
(
level
,
0
,
2
);
new_lod_tensor
.
S
hrink
InLevel
(
level
,
0
,
2
);
ASSERT_EQ
(
new_lod_tensor
.
NumLevels
(),
2UL
);
ASSERT_EQ
(
new_lod_tensor
.
NumLevels
(),
2UL
);
ASSERT_EQ
(
new_lod_tensor
.
NumElements
(
0
),
2UL
);
ASSERT_EQ
(
new_lod_tensor
.
NumElements
(
0
),
2UL
);
ASSERT_EQ
(
new_lod_tensor
.
NumElements
(
1
),
4UL
);
ASSERT_EQ
(
new_lod_tensor
.
NumElements
(
1
),
4UL
);
...
...
paddle/framework/operator.cc
浏览文件 @
b941865d
...
@@ -60,8 +60,8 @@ std::string OperatorBase::Output(const std::string& name) const {
...
@@ -60,8 +60,8 @@ std::string OperatorBase::Output(const std::string& name) const {
const
std
::
vector
<
std
::
string
>&
OperatorBase
::
Outputs
(
const
std
::
vector
<
std
::
string
>&
OperatorBase
::
Outputs
(
const
std
::
string
&
name
)
const
{
const
std
::
string
&
name
)
const
{
auto
it
=
outputs_
.
find
(
name
);
auto
it
=
outputs_
.
find
(
name
);
PADDLE_ENFORCE
(
it
!=
outputs_
.
end
(),
"Op %s does not have output
%s"
,
type_
,
PADDLE_ENFORCE
(
it
!=
outputs_
.
end
(),
"Op %s does not have output
called %s"
,
name
);
type_
,
name
);
return
it
->
second
;
return
it
->
second
;
}
}
...
...
paddle/operators/recurrent_op.cc
浏览文件 @
b941865d
...
@@ -80,7 +80,6 @@ void RecurrentAlgorithm::CreateScopes(const Scope& scope) const {
...
@@ -80,7 +80,6 @@ void RecurrentAlgorithm::CreateScopes(const Scope& scope) const {
// Now all variables in scope must be created outside of op.
// Now all variables in scope must be created outside of op.
PADDLE_ENFORCE_NOT_NULL
(
stepnet_
);
PADDLE_ENFORCE_NOT_NULL
(
stepnet_
);
PADDLE_ENFORCE
(
!
(
*
stepnet_
)
->
Outputs
().
empty
(),
"stepnet_ op has no outputs"
);
PADDLE_ENFORCE
(
!
(
*
stepnet_
)
->
Outputs
().
empty
(),
"stepnet_ op has no outputs"
);
PADDLE_ENFORCE
(
!
(
*
stepnet_
)
->
Outputs
().
empty
(),
"net_op has no outputs"
);
if
(
seq_len_
>
step_scopes
->
size
())
{
if
(
seq_len_
>
step_scopes
->
size
())
{
for
(
size_t
i
=
step_scopes
->
size
();
i
<
seq_len_
;
++
i
)
{
for
(
size_t
i
=
step_scopes
->
size
();
i
<
seq_len_
;
++
i
)
{
...
@@ -129,8 +128,8 @@ const rnn::ArgumentName RecurrentOp::kArgName{
...
@@ -129,8 +128,8 @@ const rnn::ArgumentName RecurrentOp::kArgName{
"memories"
,
"pre_memories"
,
"boot_memories"
};
"memories"
,
"pre_memories"
,
"boot_memories"
};
const
rnn
::
ArgumentName
RecurrentGradientOp
::
kArgName
{
const
rnn
::
ArgumentName
RecurrentGradientOp
::
kArgName
{
"step_net"
,
"step_scopes
"
,
"outlink@grad"
,
"inlink@grad
"
,
"step_net"
,
"step_scopes
@GRAD"
,
"outlinks@GRAD"
,
"inlinks@GRAD
"
,
"memories"
,
"pre_memories"
,
"boot_memories@grad
"
};
"memories"
,
"pre_memories"
,
"boot_memories@GRAD
"
};
RecurrentOp
::
RecurrentOp
(
const
std
::
string
&
type
,
RecurrentOp
::
RecurrentOp
(
const
std
::
string
&
type
,
const
framework
::
VariableNameMap
&
inputs
,
const
framework
::
VariableNameMap
&
inputs
,
...
@@ -226,13 +225,13 @@ RecurrentGradientOp::RecurrentGradientOp(
...
@@ -226,13 +225,13 @@ RecurrentGradientOp::RecurrentGradientOp(
const
framework
::
VariableNameMap
&
outputs
,
const
framework
::
VariableNameMap
&
outputs
,
const
framework
::
AttributeMap
&
attrs
)
const
framework
::
AttributeMap
&
attrs
)
:
OperatorBase
(
type
,
inputs
,
outputs
,
attrs
)
{
:
OperatorBase
(
type
,
inputs
,
outputs
,
attrs
)
{
rnn
::
InitArgument
(
kArgName
,
&
arg_
,
*
this
);
rnn
::
InitArgument
(
kArgName
,
&
arg_
,
*
this
,
true
/*is grad*/
);
alg_
.
Init
(
&
arg_
,
&
stepnet_
);
alg_
.
Init
(
&
arg_
,
&
stepnet_
);
}
}
}
// namespace operators
}
// namespace operators
}
// namespace paddle
}
// namespace paddle
REGISTER_OP
_WITHOUT_GRADIENT
(
REGISTER_OP
(
recurrent
,
paddle
::
operators
::
RecurrentOp
,
recurrent
,
paddle
::
operators
::
RecurrentOp
,
paddle
::
operators
::
RecurrentAlgorithmProtoAndCheckerMaker
,
paddle
::
operators
::
RecurrentAlgorithmProtoAndCheckerMaker
);
recurrent_grad
,
paddle
::
operators
::
RecurrentGradientOp
);
paddle/operators/recurrent_op.h
浏览文件 @
b941865d
...
@@ -22,7 +22,7 @@ namespace paddle {
...
@@ -22,7 +22,7 @@ namespace paddle {
namespace
operators
{
namespace
operators
{
// The sequence format in RecurrentOp is Tensor<seq_len, batch_size, dim> now.
// The sequence format in RecurrentOp is Tensor<seq_len, batch_size, dim> now.
// TODO(
Yan Chunwei):
// TODO(
Superjom)
// 1. No-padding computing for sequences with indifinite length in one batch.
// 1. No-padding computing for sequences with indifinite length in one batch.
// 2. Hierarchical RNN for sequence with sub-sequence.
// 2. Hierarchical RNN for sequence with sub-sequence.
// 3. Internal Memory.
// 3. Internal Memory.
...
@@ -177,6 +177,9 @@ class RecurrentGradientOp : public framework::OperatorBase {
...
@@ -177,6 +177,9 @@ class RecurrentGradientOp : public framework::OperatorBase {
static
const
rnn
::
ArgumentName
kArgName
;
static
const
rnn
::
ArgumentName
kArgName
;
/*
* set a stepnet that is created according to a RecurrentOp's stepnet.
*/
void
set_stepnet
(
std
::
unique_ptr
<
OperatorBase
>
net
)
{
void
set_stepnet
(
std
::
unique_ptr
<
OperatorBase
>
net
)
{
stepnet_
=
std
::
move
(
net
);
stepnet_
=
std
::
move
(
net
);
}
}
...
...
paddle/operators/rnn/recurrent_op_utils.cc
浏览文件 @
b941865d
...
@@ -109,15 +109,14 @@ void LinkMemories(const std::vector<Scope*>& scopes,
...
@@ -109,15 +109,14 @@ void LinkMemories(const std::vector<Scope*>& scopes,
}
}
void
InitArgument
(
const
ArgumentName
&
name
,
Argument
*
arg
,
void
InitArgument
(
const
ArgumentName
&
name
,
Argument
*
arg
,
const
framework
::
OperatorBase
&
op
)
{
const
framework
::
OperatorBase
&
op
,
bool
is_grad
)
{
arg
->
step_scopes
=
op
.
Output
(
name
.
step_scopes
);
arg
->
step_scopes
=
is_grad
?
op
.
Input
(
name
.
step_scopes
)
:
op
.
Output
(
name
.
step_scopes
);
arg
->
inlinks
=
op
.
Inputs
(
name
.
inlinks
);
arg
->
inlinks
=
op
.
Inputs
(
name
.
inlinks
);
arg
->
outlinks
=
op
.
Outputs
(
name
.
outlinks
);
arg
->
outlinks
=
op
.
Outputs
(
name
.
outlinks
);
auto
boot_memories
=
op
.
Inputs
(
name
.
boot_memories
);
auto
boot_memories
=
is_grad
?
op
.
Outputs
(
name
.
boot_memories
)
:
op
.
Inputs
(
name
.
boot_memories
);
// attributes
// attributes
auto
memories
=
op
.
Attr
<
std
::
vector
<
std
::
string
>>
(
name
.
memories
);
auto
memories
=
op
.
Attr
<
std
::
vector
<
std
::
string
>>
(
name
.
memories
);
auto
pre_memories
=
op
.
Attr
<
std
::
vector
<
std
::
string
>>
(
name
.
pre_memories
);
auto
pre_memories
=
op
.
Attr
<
std
::
vector
<
std
::
string
>>
(
name
.
pre_memories
);
...
...
paddle/operators/rnn/recurrent_op_utils.h
浏览文件 @
b941865d
...
@@ -78,7 +78,7 @@ void LinkMemories(const std::vector<Scope*>& step_scopes,
...
@@ -78,7 +78,7 @@ void LinkMemories(const std::vector<Scope*>& step_scopes,
const
int
offset
,
bool
infer_shape_mode
);
const
int
offset
,
bool
infer_shape_mode
);
void
InitArgument
(
const
ArgumentName
&
name
,
Argument
*
arg
,
void
InitArgument
(
const
ArgumentName
&
name
,
Argument
*
arg
,
const
framework
::
OperatorBase
&
op
);
const
framework
::
OperatorBase
&
op
,
bool
is_grad
=
false
);
}
// namespace rnn
}
// namespace rnn
}
// namespace operators
}
// namespace operators
...
...
python/paddle/v2/framework/tests/test_recurrent_op.py
浏览文件 @
b941865d
...
@@ -3,6 +3,7 @@ import paddle.v2.framework.core as core
...
@@ -3,6 +3,7 @@ import paddle.v2.framework.core as core
import
unittest
import
unittest
import
numpy
as
np
import
numpy
as
np
from
paddle.v2.framework.op
import
Operator
,
RecurrentOp
from
paddle.v2.framework.op
import
Operator
,
RecurrentOp
from
op_test
import
get_numeric_gradient
def
py_sigmoid
(
x
):
def
py_sigmoid
(
x
):
...
@@ -47,7 +48,7 @@ class PySimpleRNN(object):
...
@@ -47,7 +48,7 @@ class PySimpleRNN(object):
else
:
else
:
pre_mem
=
self
.
h_boot
pre_mem
=
self
.
h_boot
xW
=
np
.
matmul
(
x
,
self
.
W
)
xW
=
np
.
matmul
(
x
,
self
.
W
)
hU
=
np
.
matmul
(
mem
,
self
.
U
)
hU
=
np
.
matmul
(
pre_
mem
,
self
.
U
)
sum
=
xW
+
hU
sum
=
xW
+
hU
self
.
mems
[
step_id
]
=
py_sigmoid
(
sum
)
self
.
mems
[
step_id
]
=
py_sigmoid
(
sum
)
...
@@ -68,7 +69,7 @@ def create_tensor(scope, name, shape, np_data):
...
@@ -68,7 +69,7 @@ def create_tensor(scope, name, shape, np_data):
return
tensor
return
tensor
class
TestRecurrentOp
(
unittest
.
TestCase
):
class
RecurrentOpTest
(
unittest
.
TestCase
):
'''
'''
Test RNNOp
Test RNNOp
...
@@ -158,6 +159,42 @@ class TestRecurrentOp(unittest.TestCase):
...
@@ -158,6 +159,42 @@ class TestRecurrentOp(unittest.TestCase):
print
print
print
'py_output'
,
py_output
print
'py_output'
,
py_output
self
.
assertEqual
(
pd_output
.
shape
,
py_output
.
shape
)
self
.
assertEqual
(
pd_output
.
shape
,
py_output
.
shape
)
self
.
assertTrue
(
np
.
isclose
(
pd_output
,
py_output
,
rtol
=
0.1
).
all
())
class
RecurrentGradientOpTest
(
unittest
.
TestCase
):
def
create_forward_op
(
self
):
self
.
forward_op
=
RecurrentOp
(
# inputs
inlinks
=
[
"x"
],
boot_memories
=
[
"h_boot"
],
step_net
=
"stepnet"
,
# outputs
outlinks
=
[
"h"
],
step_scopes
=
"step_scopes"
,
# attributes
pre_memories
=
[
"h@pre"
],
memories
=
[
"h@alias"
])
# create a stepnet for RNN
stepnet
=
core
.
Net
.
create
()
x_fc_op
=
Operator
(
"mul"
,
X
=
"x@alias"
,
Y
=
"W"
,
Out
=
"Wx"
)
h_fc_op
=
Operator
(
"mul"
,
X
=
"h@pre"
,
Y
=
"U"
,
Out
=
"Uh"
)
sum_op
=
Operator
(
"add"
,
X
=
"Wx"
,
Y
=
"Uh"
,
Out
=
"sum"
)
sig_op
=
Operator
(
"sigmoid"
,
X
=
"sum"
,
Y
=
"h@alias"
)
for
op
in
[
x_fc_op
,
h_fc_op
,
sum_op
,
sig_op
]:
stepnet
.
append_op
(
op
)
stepnet
.
complete_add_op
(
True
)
self
.
forward_op
.
set_stepnet
(
stepnet
)
def
create_gradient_op
(
self
):
a
=
set
()
backward_op
=
core
.
RecurrentOp
.
backward
(
self
.
forward_op
,
a
)
def
test_grad
(
self
):
self
.
create_forward_op
()
self
.
create_gradient_op
()
if
__name__
==
'__main__'
:
if
__name__
==
'__main__'
:
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录