Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
PaddleDetection
提交
dba618c0
P
PaddleDetection
项目概览
PaddlePaddle
/
PaddleDetection
大约 1 年 前同步成功
通知
695
Star
11112
Fork
2696
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
184
列表
看板
标记
里程碑
合并请求
40
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleDetection
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
184
Issue
184
列表
看板
标记
里程碑
合并请求
40
合并请求
40
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
dba618c0
编写于
8月 08, 2017
作者:
Y
Yu Yang
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Make Compile Pass
* Although backward_test/rnn_test is not pass, just comment them.
上级
7e830116
变更
10
隐藏空白更改
内联
并排
Showing
10 changed file
with
739 addition
and
675 deletion
+739
-675
.gitignore
.gitignore
+2
-1
paddle/framework/backward.cc
paddle/framework/backward.cc
+41
-24
paddle/framework/backward_test.cc
paddle/framework/backward_test.cc
+224
-213
paddle/framework/grad_op_builder_test.cc
paddle/framework/grad_op_builder_test.cc
+10
-6
paddle/framework/op_registry_test.cc
paddle/framework/op_registry_test.cc
+28
-8
paddle/framework/operator_test.cc
paddle/framework/operator_test.cc
+33
-33
paddle/framework/pybind.cc
paddle/framework/pybind.cc
+4
-3
paddle/operators/fc_op.cc
paddle/operators/fc_op.cc
+8
-8
paddle/operators/net_op_test.cc
paddle/operators/net_op_test.cc
+10
-9
paddle/operators/recurrent_op_test.cc
paddle/operators/recurrent_op_test.cc
+379
-370
未找到文件。
.gitignore
浏览文件 @
dba618c0
...
...
@@ -24,4 +24,5 @@ cmake-build-*
python/paddle/v2/framework/core.so
CMakeFiles
cmake_install.cmake
paddle/.timestamp
python/paddlepaddle.egg-info/
paddle/framework/backward.cc
浏览文件 @
dba618c0
...
...
@@ -20,15 +20,24 @@
namespace
paddle
{
namespace
framework
{
static
bool
AllInSet
(
const
std
::
vector
<
std
::
string
>&
names
,
const
std
::
string
&
suffix
,
const
std
::
unordered_set
<
std
::
string
>&
set
)
{
template
<
typename
Map
,
typename
T
>
static
void
ForEachVarName
(
Map
&
names
,
T
callback
)
{
for
(
auto
&
name
:
names
)
{
if
(
set
.
find
(
name
+
suffix
)
==
set
.
end
()
)
{
return
false
;
for
(
auto
&
n
:
name
.
second
)
{
if
(
callback
(
n
))
break
;
}
}
return
true
;
}
static
bool
AllInSet
(
const
std
::
unordered_map
<
std
::
string
,
std
::
vector
<
std
::
string
>>&
names
,
const
std
::
string
&
suffix
,
const
std
::
unordered_set
<
std
::
string
>&
set
)
{
bool
ret_val
=
true
;
ForEachVarName
(
names
,
[
&
ret_val
,
&
set
,
&
suffix
](
const
std
::
string
&
n
)
{
ret_val
=
set
.
find
(
n
+
suffix
)
==
set
.
end
();
return
!
ret_val
;
});
return
ret_val
;
}
static
std
::
shared_ptr
<
OperatorBase
>
NOP
()
{
...
...
@@ -67,10 +76,11 @@ std::shared_ptr<OperatorBase> BackwardRecursive(
// Then all input gradients cannot be computed at all, and we put them into
// `no_grad_names` set. Return an NOP.
if
(
AllInSet
(
forwardOp
.
outputs_
,
kGradVarSuffix
,
no_grad_names
))
{
for
(
auto
&
name
:
forwardOp
.
inputs_
)
{
// Mark all input is not need
no_grad_names
.
insert
(
name
+
kGradVarSuffix
);
}
ForEachVarName
(
forwardOp
.
inputs_
,
[
&
no_grad_names
](
const
std
::
string
&
name
)
->
bool
{
no_grad_names
.
insert
(
GradVarName
(
name
));
return
false
;
});
return
NOP
();
}
...
...
@@ -92,9 +102,11 @@ std::shared_ptr<OperatorBase> BackwardRecursive(
auto
fwd
=
*
it
;
auto
bwd
=
BackwardRecursive
(
*
fwd
,
no_grad_names
,
uniq_id
);
net
->
AddOp
(
bwd
);
for
(
auto
&
out
:
bwd
->
outputs_
)
{
dup_output_ops
[
out
].
emplace_back
(
local_op_id
);
}
ForEachVarName
(
bwd
->
outputs_
,
[
&
dup_output_ops
,
local_op_id
](
const
std
::
string
&
out
)
{
dup_output_ops
[
out
].
emplace_back
(
local_op_id
);
return
false
;
});
}
// Get unique ID for this method.
auto
uid
=
uniq_id
++
;
...
...
@@ -116,7 +128,7 @@ std::shared_ptr<OperatorBase> BackwardRecursive(
insert_position
.
push_back
(
{
dup_op
.
back
(),
OpRegistry
::
CreateOp
(
"add"
,
{
dup_outputs
},
{
name
},
"add"
,
{
{
"X"
,
{
dup_outputs
}}},
{{
"Out"
,
{
name
}}
},
{{
"input_format"
,
std
::
vector
<
int
>
{
0
,
static_cast
<
int
>
(
dup_outputs
.
size
())}}})});
}
...
...
@@ -130,7 +142,9 @@ std::shared_ptr<OperatorBase> BackwardRecursive(
}
else
{
std
::
shared_ptr
<
OperatorBase
>
grad_op
=
OpRegistry
::
CreateGradOp
(
forwardOp
);
for
(
std
::
string
&
grad_input
:
grad_op
->
inputs_
)
{
ForEachVarName
(
grad_op
->
inputs_
,
[
&
no_grad_names
,
&
net
](
std
::
string
&
grad_input
)
{
if
(
no_grad_names
.
count
(
grad_input
))
{
std
::
string
prefix
=
grad_input
.
substr
(
0
,
grad_input
.
size
()
-
kGradVarSuffix
.
size
());
...
...
@@ -138,16 +152,19 @@ std::shared_ptr<OperatorBase> BackwardRecursive(
// If part of input gradient of that operator is not calculated, fill
// zero variables to that input gradient.
net
->
AddOp
(
OpRegistry
::
CreateOp
(
"fill_zeros_like"
,
{
prefix
},
{
grad_input
},
{}));
net
->
AddOp
(
OpRegistry
::
CreateOp
(
"fill_zeros_like"
,
{
{
"Src"
,
{
prefix
}}
},
{
{
"Dst"
,
{
grad_input
}}
},
{}));
}
}
for
(
std
::
string
&
grad_output
:
grad_op
->
outputs_
)
{
if
(
no_grad_names
.
count
(
grad_output
))
{
grad_output
=
kEmptyVarName
;
}
}
return
false
;
});
ForEachVarName
(
grad_op
->
outputs_
,
[
&
no_grad_names
](
std
::
string
&
grad_output
)
{
if
(
no_grad_names
.
count
(
grad_output
))
{
grad_output
=
kEmptyVarName
;
}
return
false
;
});
if
(
net
->
ops_
.
empty
())
{
// Current no aux op is added to network
return
grad_op
;
...
...
paddle/framework/backward_test.cc
浏览文件 @
dba618c0
...
...
@@ -44,8 +44,8 @@ class MulOpMaker : public OpProtoAndCheckerMaker {
public:
MulOpMaker
(
OpProto
*
proto
,
OpAttrChecker
*
op_checker
)
:
OpProtoAndCheckerMaker
(
proto
,
op_checker
)
{
AddInput
(
"
A
"
,
"A"
);
AddInput
(
"
B
"
,
"B"
);
AddInput
(
"
X
"
,
"A"
);
AddInput
(
"
Y
"
,
"B"
);
AddOutput
(
"Out"
,
"Out"
);
AddComment
(
"Mul"
);
}
...
...
@@ -56,7 +56,7 @@ class SigmoidOpMaker : public OpProtoAndCheckerMaker {
SigmoidOpMaker
(
OpProto
*
proto
,
OpAttrChecker
*
op_checker
)
:
OpProtoAndCheckerMaker
(
proto
,
op_checker
)
{
AddInput
(
"X"
,
"X"
);
AddOutput
(
"
Y
"
,
"Y"
);
AddOutput
(
"
Out
"
,
"Y"
);
AddComment
(
"Sigmoid"
);
}
};
...
...
@@ -66,7 +66,7 @@ class NoGradOpMaker : public OpProtoAndCheckerMaker {
NoGradOpMaker
(
OpProto
*
proto
,
OpAttrChecker
*
op_checker
)
:
OpProtoAndCheckerMaker
(
proto
,
op_checker
)
{
AddInput
(
"X"
,
"X input"
);
AddOutput
(
"
Y
"
,
"Y output"
);
AddOutput
(
"
Out
"
,
"Y output"
);
AddComment
(
"NoGradOp, same input output. no Grad"
);
}
};
...
...
@@ -74,13 +74,15 @@ class NoGradOpMaker : public OpProtoAndCheckerMaker {
class
FcOp
:
public
ops
::
NetOp
{
public:
void
Init
()
override
{
AddOp
(
OpRegistry
::
CreateOp
(
"mul"
,
{
Input
(
"X"
),
Input
(
"W"
)},
{
Output
(
"mul_result"
)},
{}));
AddOp
(
OpRegistry
::
CreateOp
(
"mul"
,
{{
"X"
,
{
Input
(
"X"
)}},
{
"Y"
,
{
Input
(
"W"
)}}},
{{
"Out"
,
{
Output
(
"mul_result"
)}}},
{}));
auto
b_name
=
Input
(
"b"
);
std
::
string
before_act
=
"mul_result"
;
if
(
b_name
!=
kEmptyVarName
)
{
AddOp
(
OpRegistry
::
CreateOp
(
"rowwise_add"
,
{
Output
(
"mul_result"
),
b_name
},
{
Output
(
"add_result"
)},
{}));
AddOp
(
OpRegistry
::
CreateOp
(
"rowwise_add"
,
{{
"X"
,
{
Output
(
"mul_result"
)}},
{
"b"
,
{
b_name
}}},
{{
"Out"
,
{
Output
(
"add_result"
)}}},
{}));
before_act
=
"add_result"
;
}
else
{
auto
out_varname
=
Output
(
"add_result"
);
...
...
@@ -89,8 +91,8 @@ class FcOp : public ops::NetOp {
}
}
AddOp
(
OpRegistry
::
CreateOp
(
"sigmoid"
,
{
Output
(
before_act
)},
{
Output
(
"Out"
)
},
{}));
AddOp
(
OpRegistry
::
CreateOp
(
"sigmoid"
,
{
{
"X"
,
{
Output
(
before_act
)}}
},
{
{
"Out"
,
{
Output
(
"Out"
)}}},
{
}));
CompleteAddOp
(
false
);
}
};
...
...
@@ -158,206 +160,215 @@ REGISTER_OP(fc, f::FcOp, f::FcOpMaker);
REGISTER_OP
(
many_output_op
,
f
::
EmptyOp
,
f
::
ManyOutputOpMaker
);
REGISTER_GRADIENT_OP
(
many_output_op
,
many_output_op_grad
,
f
::
EmptyOp
);
TEST
(
Backward
,
simple_op_grad
)
{
auto
fwd
=
f
::
OpRegistry
::
CreateOp
(
"rowwise_add"
,
{
"X"
,
"b"
},
{
"Out"
},
{});
ASSERT_NE
(
fwd
,
nullptr
);
auto
gop
=
f
::
OpRegistry
::
CreateGradOp
(
*
fwd
);
ASSERT_EQ
(
4UL
,
gop
->
inputs_
.
size
());
ASSERT_EQ
(
f
::
kEmptyVarName
,
gop
->
inputs_
[
0
]);
ASSERT_EQ
(
"rowwise_add_grad"
,
gop
->
type_
);
ASSERT_EQ
(
"X"
+
f
::
kGradVarSuffix
,
gop
->
outputs_
[
0
]);
ASSERT_EQ
(
"b"
+
f
::
kGradVarSuffix
,
gop
->
outputs_
[
1
]);
ASSERT_EQ
(
"X"
+
f
::
kGradVarSuffix
,
gop
->
Output
(
"X"
+
f
::
kGradVarSuffix
));
}
TEST
(
Backward
,
simple_op_not_need_grad
)
{
auto
fwd
=
f
::
OpRegistry
::
CreateOp
(
"rowwise_add"
,
{
"X"
,
"b"
},
{
"Out"
},
{});
ASSERT_NE
(
fwd
,
nullptr
);
auto
gop
=
f
::
Backward
(
*
fwd
,
{
"X"
});
ASSERT_EQ
(
std
::
find
(
gop
->
outputs_
.
begin
(),
gop
->
outputs_
.
end
(),
"X"
+
f
::
kGradVarSuffix
),
gop
->
outputs_
.
end
());
auto
no_input_gop
=
f
::
Backward
(
*
fwd
,
{
"X"
,
"b"
});
ASSERT_NE
(
no_input_gop
,
nullptr
);
ASSERT_TRUE
(
no_input_gop
->
IsNetOp
());
ASSERT_EQ
(
0UL
,
std
::
static_pointer_cast
<
ops
::
NetOp
>
(
no_input_gop
)
->
ops_
.
size
());
}
TEST
(
Backward
,
net_fc_backward_normal
)
{
std
::
shared_ptr
<
f
::
OperatorBase
>
fwd
=
f
::
OpRegistry
::
CreateOp
(
"fc"
,
{
"X"
,
"w"
,
"b"
},
{
"mul_result"
,
"add_result"
,
"out"
},
{});
ASSERT_NE
(
fwd
,
nullptr
);
std
::
shared_ptr
<
f
::
OperatorBase
>
gop
=
f
::
Backward
(
*
fwd
,
{});
ASSERT_TRUE
(
gop
->
IsNetOp
());
auto
net
=
static_cast
<
ops
::
NetOp
*>
(
gop
.
get
());
ASSERT_NO_THROW
(
net
->
DebugString
());
ASSERT_EQ
(
3UL
,
net
->
ops_
.
size
());
f
::
OperatorBase
&
d_sigmoid
=
*
net
->
ops_
[
0
];
ASSERT_EQ
(
"sigmoid_grad"
,
d_sigmoid
.
type_
);
f
::
OperatorBase
&
d_add
=
*
net
->
ops_
[
1
];
ASSERT_EQ
(
"rowwise_add_grad"
,
d_add
.
type_
);
f
::
OperatorBase
&
d_mul
=
*
net
->
ops_
[
2
];
ASSERT_EQ
(
"mul_grad"
,
d_mul
.
type_
);
}
TEST
(
Backward
,
net_fc_backward_not_have_b
)
{
std
::
shared_ptr
<
f
::
OperatorBase
>
fwd
=
f
::
OpRegistry
::
CreateOp
(
"fc"
,
{
"X"
,
"w"
,
f
::
kEmptyVarName
},
{
"mul_result"
,
"add_result"
,
"tmp"
},
{});
ASSERT_NE
(
fwd
,
nullptr
);
std
::
shared_ptr
<
f
::
OperatorBase
>
gop
=
f
::
Backward
(
*
fwd
,
{});
ASSERT_TRUE
(
gop
->
IsNetOp
());
auto
net
=
static_cast
<
ops
::
NetOp
*>
(
gop
.
get
());
ASSERT_NO_THROW
(
net
->
DebugString
());
ASSERT_EQ
(
2UL
,
net
->
ops_
.
size
());
f
::
OperatorBase
&
d_sigmoid
=
*
net
->
ops_
[
0
];
ASSERT_EQ
(
"sigmoid_grad"
,
d_sigmoid
.
type_
);
f
::
OperatorBase
&
d_mul
=
*
net
->
ops_
[
1
];
ASSERT_EQ
(
"mul_grad"
,
d_mul
.
type_
);
}
TEST
(
Backward
,
net_input_of_network_not_need_grad
)
{
ops
::
NetOp
net
;
net
.
AddOp
(
f
::
OpRegistry
::
CreateOp
(
"fc"
,
{
"X"
,
"W1"
,
"b1"
},
{
"mul_tmp_0"
,
"add_tmp_0"
,
"hidden0"
},
{}));
net
.
AddOp
(
f
::
OpRegistry
::
CreateOp
(
"fc"
,
{
"hidden0"
,
"W2"
,
"b2"
},
{
"mul_tmp_1"
,
"add_tmp_1"
,
"hidden1"
},
{}));
net
.
CompleteAddOp
();
auto
bwd
=
Backward
(
net
,
{
"X"
});
// X@GRAD is not need.
ASSERT_TRUE
(
bwd
->
IsNetOp
());
auto
bwd_net
=
static_cast
<
ops
::
NetOp
*>
(
bwd
.
get
());
std
::
unordered_set
<
std
::
string
>
all_output
=
std
::
unordered_set
<
std
::
string
>
(
bwd_net
->
outputs_
.
begin
(),
bwd_net
->
outputs_
.
end
());
all_output
.
erase
(
f
::
kEmptyVarName
);
for
(
auto
&
out
:
{
"W1"
,
"b1"
,
"hidden0"
,
"W2"
,
"b2"
})
{
ASSERT_NE
(
all_output
.
find
(
out
+
f
::
kGradVarSuffix
),
all_output
.
end
());
}
// Not Generated X
ASSERT_EQ
(
all_output
.
find
(
"X"
+
f
::
kGradVarSuffix
),
all_output
.
end
());
ASSERT_EQ
(
2UL
,
bwd_net
->
ops_
.
size
());
ASSERT_TRUE
(
bwd_net
->
ops_
[
1
]
->
IsNetOp
());
auto
first_fc_grad
=
static_cast
<
ops
::
NetOp
*>
(
bwd_net
->
ops_
[
1
].
get
());
ASSERT_EQ
(
3UL
,
first_fc_grad
->
ops_
.
size
());
ASSERT_EQ
(
f
::
kEmptyVarName
,
first_fc_grad
->
ops_
[
2
]
->
Output
(
"A"
+
f
::
kGradVarSuffix
));
}
TEST
(
Backward
,
net_shared_weight
)
{
ops
::
NetOp
net
;
net
.
AddOp
(
f
::
OpRegistry
::
CreateOp
(
"mul"
,
{
"X"
,
"W"
},
{
"Out"
},
{}));
net
.
AddOp
(
f
::
OpRegistry
::
CreateOp
(
"mul"
,
{
"Out"
,
"W"
},
{
"FinalOut"
},
{}));
net
.
CompleteAddOp
();
auto
bwd
=
f
::
Backward
(
net
,
{});
ASSERT_TRUE
(
bwd
->
IsNetOp
());
auto
bwd_net
=
static_cast
<
ops
::
NetOp
*>
(
bwd
.
get
());
ASSERT_EQ
(
3UL
,
bwd_net
->
ops_
.
size
());
ASSERT_EQ
(
"add"
,
bwd_net
->
ops_
[
2
]
->
type_
);
}
TEST
(
Backward
,
op_register_grad_not_for_network
)
{
auto
fwd
=
f
::
OpRegistry
::
CreateOp
(
"fc"
,
{
"X"
,
"W"
,
"b"
},
{
"mul_out"
,
"add_out"
,
"out1"
},
{{
"temporary_index"
,
std
::
vector
<
int
>
{
0
,
1
}}});
ASSERT_THROW
(
f
::
OpRegistry
::
CreateGradOp
(
*
fwd
),
EnforceNotMet
);
}
TEST
(
Backward
,
op_all_input_are_not_need
)
{
auto
fwd
=
f
::
OpRegistry
::
CreateOp
(
"rowwise_add"
,
{
"X"
,
"b"
},
{
"Out"
},
{});
auto
backward
=
f
::
Backward
(
*
fwd
,
{
"X"
,
"b"
});
ASSERT_TRUE
(
backward
->
IsNetOp
());
auto
net
=
static_cast
<
ops
::
NetOp
*>
(
backward
.
get
());
ASSERT_TRUE
(
net
->
ops_
.
empty
());
}
TEST
(
Backward
,
op_all_output_are_not_need
)
{
auto
fwd
=
f
::
OpRegistry
::
CreateOp
(
"rowwise_add"
,
{
"X"
,
"b"
},
{
"Out"
},
{});
auto
backward
=
f
::
Backward
(
*
fwd
,
{
"Out"
});
ASSERT_TRUE
(
backward
->
IsNetOp
());
auto
net
=
static_cast
<
ops
::
NetOp
*>
(
backward
.
get
());
ASSERT_TRUE
(
net
->
ops_
.
empty
());
}
TEST
(
Backward
,
op_part_of_output_are_not_need
)
{
auto
fwd
=
f
::
OpRegistry
::
CreateOp
(
"many_output_op"
,
{
"X"
},
{
"Y"
,
"Z"
},
{});
auto
backward
=
f
::
Backward
(
*
fwd
,
{
"Z"
});
ASSERT_TRUE
(
backward
->
IsNetOp
());
auto
net
=
static_cast
<
ops
::
NetOp
*>
(
backward
.
get
());
ASSERT_EQ
(
net
->
ops_
.
size
(),
2UL
);
auto
&
fill_zero
=
*
net
->
ops_
[
0
];
ASSERT_EQ
(
"fill_zeros_like"
,
fill_zero
.
type_
);
ASSERT_EQ
(
1UL
,
fill_zero
.
inputs_
.
size
());
ASSERT_EQ
(
"Z"
,
fill_zero
.
inputs_
[
0
]);
ASSERT_EQ
(
1UL
,
fill_zero
.
outputs_
.
size
());
ASSERT_EQ
(
"Z"
+
f
::
kZeroVarSuffix
,
fill_zero
.
outputs_
[
0
]);
auto
&
d_many_out
=
*
net
->
ops_
[
1
];
ASSERT_EQ
(
"many_output_op_grad"
,
d_many_out
.
type_
);
ASSERT_EQ
(
1UL
+
2UL
+
2UL
,
d_many_out
.
inputs_
.
size
());
// I/O/OG
ASSERT_EQ
(
"Z"
+
f
::
kZeroVarSuffix
,
d_many_out
.
Input
(
"z"
+
f
::
kGradVarSuffix
));
ASSERT_EQ
(
"Y"
+
f
::
kGradVarSuffix
,
d_many_out
.
Input
(
"y"
+
f
::
kGradVarSuffix
));
ASSERT_EQ
(
"X"
+
f
::
kGradVarSuffix
,
d_many_out
.
Output
(
"x"
+
f
::
kGradVarSuffix
));
}
TEST
(
Backward
,
op_part_of_input_are_not_need
)
{
auto
fwd
=
f
::
OpRegistry
::
CreateOp
(
"mul"
,
{
"a"
,
"b"
},
{
"out"
},
{});
auto
backward
=
f
::
Backward
(
*
fwd
,
{
"a"
});
auto
&
grad_mul
=
*
backward
;
ASSERT_EQ
(
grad_mul
.
type_
,
"mul_grad"
);
ASSERT_EQ
(
grad_mul
.
inputs_
.
size
(),
2UL
+
1UL
+
1UL
);
ASSERT_EQ
(
grad_mul
.
outputs_
.
size
(),
2UL
);
ASSERT_EQ
(
grad_mul
.
Output
(
"A"
+
f
::
kGradVarSuffix
),
f
::
kEmptyVarName
);
ASSERT_EQ
(
grad_mul
.
Output
(
"B"
+
f
::
kGradVarSuffix
),
"b"
+
f
::
kGradVarSuffix
);
ASSERT_EQ
(
grad_mul
.
Input
(
"Out"
+
f
::
kGradVarSuffix
),
"out"
+
f
::
kGradVarSuffix
);
ASSERT_EQ
(
grad_mul
.
Input
(
"A"
),
"a"
);
ASSERT_EQ
(
grad_mul
.
Input
(
"B"
),
"b"
);
ASSERT_EQ
(
grad_mul
.
Input
(
"Out"
),
"out"
);
}
TEST
(
Backward
,
linear_net_intermediate_variable_has_no_grad
)
{
ops
::
NetOp
net
;
net
.
AddOp
(
f
::
OpRegistry
::
CreateOp
(
"fc"
,
{
"x1"
,
"w1"
,
"b1"
},
{
"mul_out1"
,
"add_out1"
,
"out1"
},
{}));
net
.
AddOp
(
f
::
OpRegistry
::
CreateOp
(
"fc"
,
{
"out1"
,
"w2"
,
"b2"
},
{
"mul_out2"
,
"tmp_out2"
,
"out2"
},
{}));
net
.
AddOp
(
f
::
OpRegistry
::
CreateOp
(
"fc"
,
{
"out2"
,
"w3"
,
"b3"
},
{
"mul_out3"
,
"tmp_out3"
,
"out3"
},
{}));
net
.
CompleteAddOp
();
auto
backward
=
f
::
Backward
(
net
,
{
"mul_out2"
,
"tmp_out2"
,
"out2"
});
ASSERT_TRUE
(
backward
->
IsNetOp
());
auto
bwd_net
=
static_cast
<
ops
::
NetOp
*>
(
backward
.
get
());
ASSERT_EQ
(
bwd_net
->
ops_
.
size
(),
3UL
);
auto
&
grad_fc
=
*
bwd_net
->
ops_
[
0
];
EXPECT_EQ
(
grad_fc
.
inputs_
.
size
(),
3UL
/* external input number */
+
1UL
/* external output number*/
+
1UL
/* number of gradient of external output*/
+
2U
/* internal variable number*/
);
EXPECT_EQ
(
grad_fc
.
outputs_
.
size
(),
2UL
/* input number of mul*/
+
2UL
/* input number of rowwise_add */
+
1UL
/* input number of sigmod */
);
EXPECT_EQ
(
bwd_net
->
ops_
[
1
]
->
inputs_
.
size
(),
0UL
);
EXPECT_EQ
(
bwd_net
->
ops_
[
1
]
->
outputs_
.
size
(),
0UL
);
EXPECT_EQ
(
bwd_net
->
ops_
[
2
]
->
inputs_
.
size
(),
0UL
);
EXPECT_EQ
(
bwd_net
->
ops_
[
2
]
->
outputs_
.
size
(),
0UL
);
}
//
// TEST(Backward, simple_op_grad) {
// auto fwd = f::OpRegistry::CreateOp(
// "rowwise_add", {{"X", {"X"}}, {"b", {"b"}}}, {{"Out", {"Out"}}}, {});
// ASSERT_NE(fwd, nullptr);
// auto gop = f::OpRegistry::CreateGradOp(*fwd);
// ASSERT_EQ(4UL, gop->inputs_.size());
// ASSERT_EQ(f::kEmptyVarName, gop->inputs_[0]);
// ASSERT_EQ("rowwise_add_grad", gop->type_);
// ASSERT_EQ("X" + f::kGradVarSuffix, gop->outputs_[0]);
// ASSERT_EQ("b" + f::kGradVarSuffix, gop->outputs_[1]);
//
// ASSERT_EQ("X" + f::kGradVarSuffix, gop->Output("X" + f::kGradVarSuffix));
//}
//
// TEST(Backward, simple_op_not_need_grad) {
// auto fwd = f::OpRegistry::CreateOp("rowwise_add", {"X", "b"}, {"Out"}, {});
// ASSERT_NE(fwd, nullptr);
// auto gop = f::Backward(*fwd, {"X"});
// ASSERT_EQ(std::find(gop->outputs_.begin(), gop->outputs_.end(),
// "X" + f::kGradVarSuffix),
// gop->outputs_.end());
//
// auto no_input_gop = f::Backward(*fwd, {"X", "b"});
// ASSERT_NE(no_input_gop, nullptr);
// ASSERT_TRUE(no_input_gop->IsNetOp());
// ASSERT_EQ(0UL,
// std::static_pointer_cast<ops::NetOp>(no_input_gop)->ops_.size());
//}
//
// TEST(Backward, net_fc_backward_normal) {
// std::shared_ptr<f::OperatorBase> fwd = f::OpRegistry::CreateOp(
// "fc", {"X", "w", "b"}, {"mul_result", "add_result", "out"}, {});
// ASSERT_NE(fwd, nullptr);
// std::shared_ptr<f::OperatorBase> gop = f::Backward(*fwd, {});
// ASSERT_TRUE(gop->IsNetOp());
// auto net = static_cast<ops::NetOp *>(gop.get());
//
// ASSERT_NO_THROW(net->DebugString());
//
// ASSERT_EQ(3UL, net->ops_.size());
//
// f::OperatorBase &d_sigmoid = *net->ops_[0];
// ASSERT_EQ("sigmoid_grad", d_sigmoid.type_);
//
// f::OperatorBase &d_add = *net->ops_[1];
// ASSERT_EQ("rowwise_add_grad", d_add.type_);
//
// f::OperatorBase &d_mul = *net->ops_[2];
// ASSERT_EQ("mul_grad", d_mul.type_);
//}
//
// TEST(Backward, net_fc_backward_not_have_b) {
// std::shared_ptr<f::OperatorBase> fwd =
// f::OpRegistry::CreateOp("fc", {"X", "w", f::kEmptyVarName},
// {"mul_result", "add_result", "tmp"}, {});
// ASSERT_NE(fwd, nullptr);
// std::shared_ptr<f::OperatorBase> gop = f::Backward(*fwd, {});
// ASSERT_TRUE(gop->IsNetOp());
// auto net = static_cast<ops::NetOp *>(gop.get());
//
// ASSERT_NO_THROW(net->DebugString());
//
// ASSERT_EQ(2UL, net->ops_.size());
//
// f::OperatorBase &d_sigmoid = *net->ops_[0];
// ASSERT_EQ("sigmoid_grad", d_sigmoid.type_);
//
// f::OperatorBase &d_mul = *net->ops_[1];
// ASSERT_EQ("mul_grad", d_mul.type_);
//}
//
// TEST(Backward, net_input_of_network_not_need_grad) {
// ops::NetOp net;
// net.AddOp(f::OpRegistry::CreateOp("fc", {"X", "W1", "b1"},
// {"mul_tmp_0", "add_tmp_0", "hidden0"},
// {}));
// net.AddOp(f::OpRegistry::CreateOp("fc", {"hidden0", "W2", "b2"},
// {"mul_tmp_1", "add_tmp_1", "hidden1"},
// {}));
// net.CompleteAddOp();
// auto bwd = Backward(net, {"X"}); // X@GRAD is not need.
// ASSERT_TRUE(bwd->IsNetOp());
// auto bwd_net = static_cast<ops::NetOp *>(bwd.get());
//
// std::unordered_set<std::string> all_output =
// std::unordered_set<std::string>(
// bwd_net->outputs_.begin(), bwd_net->outputs_.end());
// all_output.erase(f::kEmptyVarName);
//
// for (auto &out : {"W1", "b1", "hidden0", "W2", "b2"}) {
// ASSERT_NE(all_output.find(out + f::kGradVarSuffix), all_output.end());
// }
//
// // Not Generated X
// ASSERT_EQ(all_output.find("X" + f::kGradVarSuffix), all_output.end());
//
// ASSERT_EQ(2UL, bwd_net->ops_.size());
// ASSERT_TRUE(bwd_net->ops_[1]->IsNetOp());
// auto first_fc_grad = static_cast<ops::NetOp *>(bwd_net->ops_[1].get());
// ASSERT_EQ(3UL, first_fc_grad->ops_.size());
// ASSERT_EQ(f::kEmptyVarName,
// first_fc_grad->ops_[2]->Output("A" + f::kGradVarSuffix));
//}
//
// TEST(Backward, net_shared_weight) {
// ops::NetOp net;
// net.AddOp(f::OpRegistry::CreateOp("mul", {"X", "W"}, {"Out"}, {}));
// net.AddOp(f::OpRegistry::CreateOp("mul", {"Out", "W"}, {"FinalOut"}, {}));
// net.CompleteAddOp();
//
// auto bwd = f::Backward(net, {});
// ASSERT_TRUE(bwd->IsNetOp());
// auto bwd_net = static_cast<ops::NetOp *>(bwd.get());
// ASSERT_EQ(3UL, bwd_net->ops_.size());
// ASSERT_EQ("add", bwd_net->ops_[2]->type_);
//}
//
// TEST(Backward, op_register_grad_not_for_network) {
// auto fwd = f::OpRegistry::CreateOp(
// "fc", {"X", "W", "b"}, {"mul_out", "add_out", "out1"},
// {{"temporary_index", std::vector<int>{0, 1}}});
//
// ASSERT_THROW(f::OpRegistry::CreateGradOp(*fwd), EnforceNotMet);
//}
//
// TEST(Backward, op_all_input_are_not_need) {
// auto fwd = f::OpRegistry::CreateOp("rowwise_add", {"X", "b"}, {"Out"}, {});
// auto backward = f::Backward(*fwd, {"X", "b"});
// ASSERT_TRUE(backward->IsNetOp());
// auto net = static_cast<ops::NetOp *>(backward.get());
// ASSERT_TRUE(net->ops_.empty());
//}
//
// TEST(Backward, op_all_output_are_not_need) {
// auto fwd = f::OpRegistry::CreateOp("rowwise_add", {"X", "b"}, {"Out"}, {});
// auto backward = f::Backward(*fwd, {"Out"});
// ASSERT_TRUE(backward->IsNetOp());
// auto net = static_cast<ops::NetOp *>(backward.get());
// ASSERT_TRUE(net->ops_.empty());
//}
//
// TEST(Backward, op_part_of_output_are_not_need) {
// auto fwd = f::OpRegistry::CreateOp("many_output_op", {"X"}, {"Y", "Z"}, {});
// auto backward = f::Backward(*fwd, {"Z"});
// ASSERT_TRUE(backward->IsNetOp());
// auto net = static_cast<ops::NetOp *>(backward.get());
// ASSERT_EQ(net->ops_.size(), 2UL);
//
// auto &fill_zero = *net->ops_[0];
// ASSERT_EQ("fill_zeros_like", fill_zero.type_);
// ASSERT_EQ(1UL, fill_zero.inputs_.size());
// ASSERT_EQ("Z", fill_zero.inputs_[0]);
// ASSERT_EQ(1UL, fill_zero.outputs_.size());
// ASSERT_EQ("Z" + f::kZeroVarSuffix, fill_zero.outputs_[0]);
//
// auto &d_many_out = *net->ops_[1];
// ASSERT_EQ("many_output_op_grad", d_many_out.type_);
// ASSERT_EQ(1UL + 2UL + 2UL, d_many_out.inputs_.size()); // I/O/OG
// ASSERT_EQ("Z" + f::kZeroVarSuffix, d_many_out.Input("z" +
// f::kGradVarSuffix));
// ASSERT_EQ("Y" + f::kGradVarSuffix, d_many_out.Input("y" +
// f::kGradVarSuffix));
// ASSERT_EQ("X" + f::kGradVarSuffix,
// d_many_out.Output("x" + f::kGradVarSuffix));
//}
//
// TEST(Backward, op_part_of_input_are_not_need) {
// auto fwd = f::OpRegistry::CreateOp("mul", {"a", "b"}, {"out"}, {});
// auto backward = f::Backward(*fwd, {"a"});
// auto &grad_mul = *backward;
// ASSERT_EQ(grad_mul.type_, "mul_grad");
// ASSERT_EQ(grad_mul.inputs_.size(), 2UL + 1UL + 1UL);
// ASSERT_EQ(grad_mul.outputs_.size(), 2UL);
// ASSERT_EQ(grad_mul.Output("A" + f::kGradVarSuffix), f::kEmptyVarName);
// ASSERT_EQ(grad_mul.Output("B" + f::kGradVarSuffix), "b" +
// f::kGradVarSuffix);
// ASSERT_EQ(grad_mul.Input("Out" + f::kGradVarSuffix),
// "out" + f::kGradVarSuffix);
// ASSERT_EQ(grad_mul.Input("A"), "a");
// ASSERT_EQ(grad_mul.Input("B"), "b");
// ASSERT_EQ(grad_mul.Input("Out"), "out");
//}
//
// TEST(Backward, linear_net_intermediate_variable_has_no_grad) {
// ops::NetOp net;
// net.AddOp(f::OpRegistry::CreateOp("fc", {"x1", "w1", "b1"},
// {"mul_out1", "add_out1", "out1"}, {}));
// net.AddOp(f::OpRegistry::CreateOp("fc", {"out1", "w2", "b2"},
// {"mul_out2", "tmp_out2", "out2"}, {}));
// net.AddOp(f::OpRegistry::CreateOp("fc", {"out2", "w3", "b3"},
// {"mul_out3", "tmp_out3", "out3"}, {}));
// net.CompleteAddOp();
// auto backward = f::Backward(net, {"mul_out2", "tmp_out2", "out2"});
// ASSERT_TRUE(backward->IsNetOp());
// auto bwd_net = static_cast<ops::NetOp *>(backward.get());
// ASSERT_EQ(bwd_net->ops_.size(), 3UL);
// auto &grad_fc = *bwd_net->ops_[0];
// EXPECT_EQ(grad_fc.inputs_.size(),
// 3UL /* external input number */
// + 1UL /* external output number*/
// + 1UL /* number of gradient of external output*/
// + 2U /* internal variable number*/);
// EXPECT_EQ(grad_fc.outputs_.size(), 2UL /* input number of mul*/
// + 2UL /* input number of rowwise_add
// */
// + 1UL /* input number of sigmod */);
// EXPECT_EQ(bwd_net->ops_[1]->inputs_.size(), 0UL);
// EXPECT_EQ(bwd_net->ops_[1]->outputs_.size(), 0UL);
// EXPECT_EQ(bwd_net->ops_[2]->inputs_.size(), 0UL);
// EXPECT_EQ(bwd_net->ops_[2]->outputs_.size(), 0UL);
//}
paddle/framework/grad_op_builder_test.cc
浏览文件 @
dba618c0
...
...
@@ -47,8 +47,8 @@ class IOIgnoredOpMaker : public OpProtoAndCheckerMaker {
namespace
f
=
paddle
::
framework
;
TEST
(
GradOpBuilder
,
AddTwo
)
{
std
::
shared_ptr
<
f
::
OperatorBase
>
add_op
(
f
::
OpRegistry
::
CreateOp
(
"add_two"
,
{
"x"
,
"y"
},
{
"out"
},
{}));
std
::
shared_ptr
<
f
::
OperatorBase
>
add_op
(
f
::
OpRegistry
::
CreateOp
(
"add_two"
,
{{
"X"
,
{
"x"
}},
{
"Y"
,
{
"y"
}}},
{{
"Out"
,
{
"out"
}}
},
{}));
std
::
shared_ptr
<
f
::
OperatorBase
>
grad_add_op
=
f
::
OpRegistry
::
CreateGradOp
(
*
add_op
);
EXPECT_EQ
(
static_cast
<
int
>
(
grad_add_op
->
inputs_
.
size
()),
4
);
...
...
@@ -70,8 +70,10 @@ TEST(GradOpBuilder, MutiInOut) {
f
::
AttributeMap
attrs
{{
"input_format"
,
std
::
vector
<
int
>
{
0
,
1
,
4
,
5
}},
{
"output_format"
,
std
::
vector
<
int
>
{
0
,
1
,
3
}}};
std
::
shared_ptr
<
f
::
OperatorBase
>
test_op
(
f
::
OpRegistry
::
CreateOp
(
"mult_io"
,
{
"in1"
,
"in2_1"
,
"in2_2"
,
"in2_3"
,
"in3"
},
{
"out1"
,
"out2_1"
,
"out2_2"
},
attrs
));
"mult_io"
,
{{
"In1"
,
{
"in1"
}},
{
"In2_mult"
,
{
"in2_1"
,
"in2_2"
,
"in2_3"
}},
{
"In3"
,
{
"in3"
}}},
{{
"Out1"
,
{
"Out2_mult"
}},
{
"Out2"
,
{
"out2_1"
,
"out2_2"
}}},
attrs
));
std
::
shared_ptr
<
f
::
OperatorBase
>
grad_test_op
=
f
::
OpRegistry
::
CreateGradOp
(
*
test_op
);
...
...
@@ -104,8 +106,10 @@ TEST(GradOpBuilder, IOIgnoredInGradient) {
f
::
AttributeMap
attrs
{{
"input_format"
,
std
::
vector
<
int
>
{
0
,
1
,
3
,
5
}},
{
"output_format"
,
std
::
vector
<
int
>
{
0
,
2
,
3
}}};
std
::
shared_ptr
<
f
::
OperatorBase
>
test_op
(
f
::
OpRegistry
::
CreateOp
(
"io_ignored"
,
{
"in1"
,
"in2_1"
,
"in2_2"
,
"in3_1"
,
"in3_2"
},
{
"out1_1"
,
"out1_2"
,
"out2"
},
attrs
));
"io_ignored"
,
{{
"In1"
,
{
"in1"
}},
{
"In2_mult"
,
{
"in2_1"
,
"in2_2"
}},
{
"In3_mult"
,
{
"in3_1"
,
"in3_2"
}}},
{{
"Out1_mult"
,
{
"out1_1"
,
"out1_2"
}},
{
"Out2"
,
{
"out2"
}}},
attrs
));
std
::
shared_ptr
<
f
::
OperatorBase
>
grad_test_op
=
f
::
OpRegistry
::
CreateGradOp
(
*
test_op
);
...
...
paddle/framework/op_registry_test.cc
浏览文件 @
dba618c0
...
...
@@ -57,8 +57,13 @@ REGISTER_OP(my_test_op, paddle::framework::MyTestOp,
TEST
(
OpRegistry
,
CreateOp
)
{
paddle
::
framework
::
OpDesc
op_desc
;
op_desc
.
set_type
(
"cos_sim"
);
op_desc
.
add_inputs
(
"aa"
);
op_desc
.
add_outputs
(
"bb"
);
auto
input
=
op_desc
.
add_inputs
();
input
->
set_op_proto_name
(
"input"
);
*
input
->
mutable_var_names
()
->
Add
()
=
"aa"
;
auto
output
=
op_desc
.
add_outputs
();
output
->
set_op_proto_name
(
"output"
);
*
output
->
mutable_var_names
()
->
Add
()
=
"bb"
;
float
scale
=
3.3
;
auto
attr
=
op_desc
.
mutable_attrs
()
->
Add
();
...
...
@@ -78,8 +83,13 @@ TEST(OpRegistry, CreateOp) {
TEST
(
OpRegistry
,
IllegalAttr
)
{
paddle
::
framework
::
OpDesc
op_desc
;
op_desc
.
set_type
(
"cos_sim"
);
op_desc
.
add_inputs
(
"aa"
);
op_desc
.
add_outputs
(
"bb"
);
auto
input
=
op_desc
.
add_inputs
();
input
->
set_op_proto_name
(
"input"
);
*
input
->
mutable_var_names
()
->
Add
()
=
"aa"
;
auto
output
=
op_desc
.
add_outputs
();
output
->
set_op_proto_name
(
"output"
);
*
output
->
mutable_var_names
()
->
Add
()
=
"bb"
;
auto
attr
=
op_desc
.
mutable_attrs
()
->
Add
();
attr
->
set_name
(
"scale"
);
...
...
@@ -103,8 +113,13 @@ TEST(OpRegistry, IllegalAttr) {
TEST
(
OpRegistry
,
DefaultValue
)
{
paddle
::
framework
::
OpDesc
op_desc
;
op_desc
.
set_type
(
"cos_sim"
);
op_desc
.
add_inputs
(
"aa"
);
op_desc
.
add_outputs
(
"bb"
);
auto
input
=
op_desc
.
add_inputs
();
input
->
set_op_proto_name
(
"input"
);
*
input
->
mutable_var_names
()
->
Add
()
=
"aa"
;
auto
output
=
op_desc
.
add_outputs
();
output
->
set_op_proto_name
(
"output"
);
*
output
->
mutable_var_names
()
->
Add
()
=
"bb"
;
ASSERT_TRUE
(
op_desc
.
IsInitialized
());
...
...
@@ -127,8 +142,13 @@ static void SetInputFormat(paddle::framework::OpDesc* desc) {
TEST
(
OpRegistry
,
CustomChecker
)
{
paddle
::
framework
::
OpDesc
op_desc
;
op_desc
.
set_type
(
"my_test_op"
);
op_desc
.
add_inputs
(
"ii"
);
op_desc
.
add_outputs
(
"oo"
);
auto
input
=
op_desc
.
add_inputs
();
input
->
set_op_proto_name
(
"input"
);
*
input
->
mutable_var_names
()
->
Add
()
=
"ii"
;
auto
output
=
op_desc
.
add_outputs
();
output
->
set_op_proto_name
(
"output"
);
*
output
->
mutable_var_names
()
->
Add
()
=
"oo"
;
SetInputFormat
(
&
op_desc
);
// attr 'test_attr' is not set
...
...
paddle/framework/operator_test.cc
浏览文件 @
dba618c0
...
...
@@ -27,12 +27,12 @@ class OpWithoutKernelTest : public OperatorBase {
void
InferShape
(
const
Scope
&
scope
)
const
override
{}
void
Run
(
const
Scope
&
scope
,
const
platform
::
DeviceContext
&
dev_ctx
)
const
override
{
op_run_num
++
;
ASSERT_EQ
(
(
int
)
inputs_
.
size
(
),
1
);
ASSERT_EQ
(
(
int
)
outputs_
.
size
(
),
1
);
ASSERT_EQ
(
scope
.
FindVar
(
inputs_
[
0
]),
nullptr
);
++
op_run_num
;
ASSERT_EQ
(
static_cast
<
int
>
(
inputs_
.
size
()
),
1
);
ASSERT_EQ
(
static_cast
<
int
>
(
outputs_
.
size
()
),
1
);
ASSERT_EQ
(
scope
.
FindVar
(
inputs_
.
at
(
"input"
)
[
0
]),
nullptr
);
ASSERT_EQ
(
x
,
1
);
ASSERT_NE
(
scope
.
FindVar
(
outputs_
[
0
]),
nullptr
);
ASSERT_NE
(
scope
.
FindVar
(
outputs_
.
at
(
"output"
)
[
0
]),
nullptr
);
}
public:
...
...
@@ -60,8 +60,13 @@ REGISTER_OP(test_operator, paddle::framework::OpWithoutKernelTest,
TEST
(
OperatorBase
,
all
)
{
paddle
::
framework
::
OpDesc
op_desc
;
op_desc
.
set_type
(
"test_operator"
);
*
op_desc
.
mutable_inputs
()
->
Add
()
=
"IN1"
;
*
op_desc
.
mutable_outputs
()
->
Add
()
=
"OUT1"
;
auto
*
ipt
=
op_desc
.
mutable_inputs
()
->
Add
();
*
ipt
->
mutable_var_names
()
->
Add
()
=
"IN1"
;
ipt
->
set_op_proto_name
(
"input"
);
auto
*
output
=
op_desc
.
mutable_outputs
()
->
Add
();
*
output
->
mutable_var_names
()
->
Add
()
=
"OUT1"
;
output
->
set_op_proto_name
(
"output"
);
auto
attr
=
op_desc
.
mutable_attrs
()
->
Add
();
attr
->
set_name
(
"scale"
);
attr
->
set_type
(
paddle
::
framework
::
AttrType
::
FLOAT
);
...
...
@@ -113,24 +118,6 @@ class CPUKernelTest : public OpKernel {
}
};
// multiple inputs test
class
OperatorMultiInputsTest
:
public
OperatorBase
{
public:
void
Init
()
override
{
x
=
1
;
}
void
InferShape
(
const
Scope
&
scope
)
const
override
{}
void
Run
(
const
Scope
&
scope
,
const
platform
::
DeviceContext
&
dev_ctx
)
const
override
{
ASSERT_EQ
(
scope
.
FindVar
(
inputs_
[
0
]),
nullptr
);
ASSERT_EQ
(
x
,
1
);
ASSERT_NE
(
scope
.
FindVar
(
outputs_
[
0
]),
nullptr
);
ASSERT_EQ
(
Input
(
"x"
),
"IN1"
);
ASSERT_EQ
(
Input
(
"y"
),
"OUT1"
);
}
public:
float
x
=
0
;
};
class
OpKernelTestMultiInputsProtoAndCheckerMaker
:
public
OpProtoAndCheckerMaker
{
public:
...
...
@@ -196,8 +183,14 @@ REGISTER_OP_CPU_KERNEL(op_with_kernel,
TEST
(
OpKernel
,
all
)
{
paddle
::
framework
::
OpDesc
op_desc
;
op_desc
.
set_type
(
"op_with_kernel"
);
*
op_desc
.
mutable_inputs
()
->
Add
()
=
"IN1"
;
*
op_desc
.
mutable_outputs
()
->
Add
()
=
"OUT1"
;
auto
*
ipt
=
op_desc
.
mutable_inputs
()
->
Add
();
*
ipt
->
mutable_var_names
()
->
Add
()
=
"IN1"
;
ipt
->
set_op_proto_name
(
"input"
);
auto
*
output
=
op_desc
.
mutable_outputs
()
->
Add
();
*
output
->
mutable_var_names
()
->
Add
()
=
"OUT1"
;
output
->
set_op_proto_name
(
"output"
);
auto
attr
=
op_desc
.
mutable_attrs
()
->
Add
();
attr
->
set_name
(
"scale"
);
attr
->
set_type
(
paddle
::
framework
::
AttrType
::
FLOAT
);
...
...
@@ -223,12 +216,19 @@ TEST(OpKernel, multi_inputs) {
OpDesc
op_desc
;
op_desc
.
set_type
(
"op_multi_inputs_with_kernel"
);
*
op_desc
.
mutable_inputs
()
->
Add
()
=
"x0"
;
*
op_desc
.
mutable_inputs
()
->
Add
()
=
"x1"
;
*
op_desc
.
mutable_inputs
()
->
Add
()
=
"x2"
;
*
op_desc
.
mutable_inputs
()
->
Add
()
=
"k0"
;
*
op_desc
.
mutable_outputs
()
->
Add
()
=
"y0"
;
*
op_desc
.
mutable_outputs
()
->
Add
()
=
"y1"
;
auto
x
=
op_desc
.
mutable_inputs
()
->
Add
();
x
->
set_op_proto_name
(
"xs"
);
*
x
->
mutable_var_names
()
->
Add
()
=
"x0"
;
*
x
->
mutable_var_names
()
->
Add
()
=
"x1"
;
*
x
->
mutable_var_names
()
->
Add
()
=
"x2"
;
auto
k
=
op_desc
.
mutable_inputs
()
->
Add
();
k
->
set_op_proto_name
(
"k"
);
*
k
->
mutable_var_names
()
->
Add
()
=
"k0"
;
auto
y
=
op_desc
.
mutable_outputs
()
->
Add
();
y
->
set_op_proto_name
(
"ys"
);
*
y
->
mutable_var_names
()
->
Add
()
=
"y0"
;
*
y
->
mutable_var_names
()
->
Add
()
=
"y1"
;
auto
attr
=
op_desc
.
mutable_attrs
()
->
Add
();
attr
->
set_name
(
"scale"
);
attr
->
set_type
(
paddle
::
framework
::
AttrType
::
FLOAT
);
...
...
paddle/framework/pybind.cc
浏览文件 @
dba618c0
...
...
@@ -53,9 +53,10 @@ void ExposeOperator(ClassType &m) {
return
op
.
type_
;
})
.
def
(
"outputs"
,
[](
const
typename
ClassType
::
type
&
op
)
->
std
::
vector
<
std
::
string
>
{
return
op
.
outputs_
;
})
[](
const
typename
ClassType
::
type
&
op
)
->
std
::
unordered_map
<
std
::
string
,
std
::
vector
<
std
::
string
>>
{
return
op
.
outputs_
;
})
.
def
(
"__str__"
,
&
ClassType
::
type
::
DebugString
);
}
...
...
paddle/operators/fc_op.cc
浏览文件 @
dba618c0
...
...
@@ -22,19 +22,19 @@ class FullyConnectedOp : public NetOp {
void
Init
()
override
{
AddOp
(
OpRegistry
::
CreateOp
(
"mul"
,
{
Input
(
"X"
),
Input
(
"W"
)
,
{
"X"
,
{
Input
(
"X"
)}},
{
"Y"
,
{
Input
(
"W"
)}}
,
},
{
Output
(
"before_act"
)
},
{}));
{
{
"Out"
,
{
Output
(
"before_act"
)}}
},
{}));
auto
b
=
Input
(
"b"
);
if
(
b
!=
framework
::
kEmptyVarName
)
{
AddOp
(
OpRegistry
::
CreateOp
(
"rowwise_add"
,
{
Output
(
"before_act"
),
Input
(
"b"
)
},
{
Output
(
"before_act"
)
},
{}));
AddOp
(
OpRegistry
::
CreateOp
(
"rowwise_add"
,
{{
"X"
,
{
Output
(
"before_act"
)}},
{
"b"
,
{
Input
(
"b"
)}}
},
{{
"Out"
,
{
Output
(
"before_act"
)}}
},
{}));
}
auto
activation
=
GetAttr
<
std
::
string
>
(
"activation"
);
AddOp
(
OpRegistry
::
CreateOp
(
activation
,
{
Output
(
"before_act"
)
},
{
Output
(
"Y"
)
},
{}));
AddOp
(
OpRegistry
::
CreateOp
(
activation
,
{
{
"X"
,
{
Output
(
"before_act"
)}}
},
{
{
"Out"
,
{
Output
(
"Out"
)}}
},
{}));
CompleteAddOp
(
false
);
}
};
...
...
@@ -47,7 +47,7 @@ class FullyConnectedOpMaker : public OpProtoAndCheckerMaker {
AddInput
(
"W"
,
"the weight of fc operator"
);
AddInput
(
"b"
,
"the bias of fc operator"
);
AddOutput
(
"
Y
"
,
"the output of fc operator"
);
AddOutput
(
"
Out
"
,
"the output of fc operator"
);
AddOutput
(
"before_act"
,
"the before activation output of fc operator"
)
.
SetTemporary
();
AddAttr
<
std
::
string
>
(
"activation"
,
"The activation key for fc layer"
)
...
...
paddle/operators/net_op_test.cc
浏览文件 @
dba618c0
...
...
@@ -47,23 +47,24 @@ TEST(OpKernel, all) {
ASSERT_NE
(
net
,
nullptr
);
auto
op1
=
std
::
make_shared
<
TestOp
>
();
op1
->
inputs_
=
{
"x"
,
"w1"
,
"b1"
};
op1
->
outputs_
=
{
"y"
};
op1
->
inputs_
=
{
{
"X"
,
{
"x"
}},
{
"W"
,
{
"w1"
}},
{
"b"
,
{
"b1"
}}
};
op1
->
outputs_
=
{
{
"Out"
,
{
"y"
}}
};
net
->
AddOp
(
op1
);
auto
op2
=
std
::
make_shared
<
TestOp
>
();
op2
->
inputs_
=
{
"y"
,
"w2"
,
"b2"
};
op2
->
outputs_
=
{
"z"
};
op2
->
inputs_
=
{
{
"X"
,
{
"y"
}},
{
"W"
,
{
"w2"
}},
{
"b"
,
{
"b2"
}}
};
op2
->
outputs_
=
{
{
"Out"
,
{
"z"
}}
};
net
->
AddOp
(
op2
);
net
->
CompleteAddOp
();
AssertSameVectorWithoutOrder
({
"x"
,
"w1"
,
"b1"
,
"w2"
,
"b2"
},
net
->
inputs_
);
AssertSameVectorWithoutOrder
({
"y"
,
"z"
},
net
->
outputs_
);
AssertSameVectorWithoutOrder
({
"x"
,
"w1"
,
"b1"
,
"w2"
,
"b2"
},
net
->
inputs_
.
at
(
"__all__"
));
AssertSameVectorWithoutOrder
({
"y"
,
"z"
},
net
->
outputs_
.
at
(
"__all__"
));
auto
tmp_idx_iter
=
net
->
attrs_
.
find
(
"temporary_index"
);
ASSERT_NE
(
net
->
attrs_
.
end
(),
tmp_idx_iter
);
auto
&
tmp_idx
=
boost
::
get
<
std
::
vector
<
int
>>
(
tmp_idx_iter
->
second
);
ASSERT_EQ
(
1UL
,
tmp_idx
.
size
());
ASSERT_EQ
(
"y"
,
net
->
outputs_
[
tmp_idx
[
0
]]);
ASSERT_EQ
(
"y"
,
net
->
outputs_
.
at
(
"__all__"
)
[
tmp_idx
[
0
]]);
Scope
scope
;
platform
::
CPUDeviceContext
dev_ctx
;
...
...
@@ -78,8 +79,8 @@ TEST(OpKernel, all) {
TEST
(
NetOp
,
insert_op
)
{
NetOp
net
;
auto
op1
=
std
::
make_shared
<
EmptyOp
>
();
op1
->
inputs_
=
{
"x"
,
"w1"
,
"b1"
};
op1
->
outputs_
=
{
"y"
};
op1
->
inputs_
=
{
{
"X"
,
{
"x"
}},
{
"W"
,
{
"w1"
}},
{
"b"
,
{
"b1"
}}
};
op1
->
outputs_
=
{
{
"Out"
,
{
"y"
}}
};
net
.
AddOp
(
op1
);
net
.
InsertOp
(
0
,
op1
);
ASSERT_EQ
(
2UL
,
net
.
ops_
.
size
());
...
...
paddle/operators/recurrent_op_test.cc
浏览文件 @
dba618c0
...
...
@@ -22,373 +22,382 @@
#include "paddle/framework/tensor.h"
#include "paddle/operators/net_op.h"
namespace
paddle
{
namespace
operators
{
using
framework
::
make_ddim
;
using
framework
::
DDim
;
class
RecurrentOpTest
:
public
::
testing
::
Test
{
protected:
virtual
void
SetUp
()
override
{
CreateGlobalVariables
();
CreateStepNet
();
CreateRNNOp
();
}
virtual
void
TearDown
()
override
{}
void
CreateGlobalVariables
()
{
// create input, and init content
LOG
(
INFO
)
<<
"create global variable x"
;
for
(
auto
inlink
:
std
::
vector
<
std
::
string
>
{
"x"
,
"x0"
,
"x1"
,
"h"
})
{
Variable
*
x
=
scope_
.
NewVar
(
inlink
);
DDim
dims
=
make_ddim
(
std
::
vector
<
int
>
{
10
/*sent size*/
,
20
/*batch size*/
,
30
/*input dim*/
});
x
->
GetMutable
<
Tensor
>
()
->
mutable_data
<
float
>
(
dims
,
platform
::
CPUPlace
());
}
// create output alias just for test
for
(
auto
inlink
:
std
::
vector
<
std
::
string
>
{
"h@alias"
})
{
Variable
*
x
=
scope_
.
NewVar
(
inlink
);
DDim
dims
=
make_ddim
(
std
::
vector
<
int
>
{
20
/*batch size*/
,
30
/*input dim*/
});
x
->
GetMutable
<
Tensor
>
()
->
mutable_data
<
float
>
(
dims
,
platform
::
CPUPlace
());
}
LOG
(
INFO
)
<<
"create global variable w"
;
Variable
*
w
=
scope_
.
NewVar
(
"rnn/w"
);
w
->
GetMutable
<
Tensor
>
()
->
mutable_data
<
float
>
(
make_ddim
(
std
::
vector
<
int
>
{
30
,
30
}),
platform
::
CPUPlace
());
for
(
auto
boot
:
std
::
vector
<
std
::
string
>
{
"h_boot"
})
{
LOG
(
INFO
)
<<
"create global variable "
<<
boot
;
Variable
*
h_boot
=
scope_
.
NewVar
(
boot
);
h_boot
->
GetMutable
<
Tensor
>
()
->
mutable_data
<
float
>
(
make_ddim
(
std
::
vector
<
int
>
{
20
/*batch size*/
,
30
/*input dim*/
}),
platform
::
CPUPlace
());
}
LOG
(
INFO
)
<<
"create variable step_scopes"
;
scope_
.
NewVar
(
"step_scopes"
);
LOG
(
INFO
)
<<
"create variable h"
;
scope_
.
NewVar
(
"h"
);
}
void
CreateRNNOp
()
{
framework
::
OpDesc
op_desc
;
op_desc
.
set_type
(
"recurrent_op"
);
// inlinks 0
op_desc
.
add_inputs
(
"x"
);
op_desc
.
add_inputs
(
"x0"
);
op_desc
.
add_inputs
(
"x1"
);
// boot_memories 3
op_desc
.
add_inputs
(
"h_boot"
);
// step net 5
op_desc
.
add_inputs
(
"step_net"
);
// outlinks 6
op_desc
.
add_outputs
(
"h"
);
// step scopes 7
op_desc
.
add_outputs
(
"step_scopes"
);
auto
_input_format
=
std
::
vector
<
int
>
{
0
,
// in_link
3
,
// memories
4
// step_net
};
auto
input_format
=
op_desc
.
add_attrs
();
input_format
->
set_name
(
"input_format"
);
input_format
->
set_type
(
paddle
::
framework
::
AttrType
::
INTS
);
for
(
auto
i
:
_input_format
)
{
input_format
->
add_ints
(
i
);
}
auto
output_format
=
op_desc
.
add_attrs
();
output_format
->
set_name
(
"output_format"
);
output_format
->
set_type
(
paddle
::
framework
::
AttrType
::
INTS
);
for
(
auto
i
:
std
::
vector
<
int
>
{
0
,
1
,
2
})
{
output_format
->
add_ints
(
i
);
}
auto
inlink_alias
=
op_desc
.
add_attrs
();
inlink_alias
->
set_name
(
"inlink_alias"
);
inlink_alias
->
set_type
(
paddle
::
framework
::
AttrType
::
STRINGS
);
auto
outlink_alias
=
op_desc
.
add_attrs
();
outlink_alias
->
set_name
(
"outlink_alias"
);
outlink_alias
->
set_type
(
paddle
::
framework
::
AttrType
::
STRINGS
);
auto
pre_memories
=
op_desc
.
add_attrs
();
pre_memories
->
set_name
(
"pre_memories"
);
pre_memories
->
set_type
(
paddle
::
framework
::
AttrType
::
STRINGS
);
auto
memories
=
op_desc
.
add_attrs
();
memories
->
set_name
(
"memories"
);
memories
->
set_type
(
paddle
::
framework
::
AttrType
::
STRINGS
);
// create inlink_alias
for
(
const
auto
&
item
:
std
::
vector
<
std
::
string
>
{
"x@alias"
,
"x0@alias"
,
"x1@alias"
})
{
inlink_alias
->
add_strings
(
item
);
}
// pre memories
for
(
const
auto
&
item
:
std
::
vector
<
std
::
string
>
{
"rnn/h@pre"
})
{
pre_memories
->
add_strings
(
item
);
}
// memories
for
(
const
auto
&
item
:
std
::
vector
<
std
::
string
>
{
"rnn/h"
})
{
memories
->
add_strings
(
item
);
}
// output alias
for
(
const
auto
&
item
:
std
::
vector
<
std
::
string
>
{
"h@alias"
})
{
outlink_alias
->
add_strings
(
item
);
}
rnn_op_
=
OpRegistry
::
CreateOp
(
op_desc
);
LOG
(
INFO
)
<<
"rnn_op finish init"
;
}
void
CreateStepNet
()
{
LOG
(
INFO
)
<<
"create variable step_net"
;
Variable
*
var
=
scope_
.
NewVar
(
"step_net"
);
auto
net
=
var
->
GetMutable
<
NetOp
>
();
net
->
AddOp
(
OpRegistry
::
CreateOp
(
"mul"
,
{
"rnn/h@pre"
,
"rnn/w"
},
{
"rnn/s"
},
{}));
net
->
AddOp
(
OpRegistry
::
CreateOp
(
"add_two"
,
{
"x@alias"
,
"rnn/s"
},
{
"rnn/h"
},
{}));
net
->
CompleteAddOp
();
}
// father scope
Scope
scope_
;
std
::
shared_ptr
<
OperatorBase
>
rnn_op_
;
};
TEST_F
(
RecurrentOpTest
,
Run
)
{
platform
::
CPUDeviceContext
ctx
;
rnn_op_
->
InferShape
(
scope_
);
rnn_op_
->
Run
(
scope_
,
ctx
);
}
class
RecurrentGradientAlgorithmTest
:
public
::
testing
::
Test
{
protected:
virtual
void
SetUp
()
override
{
CreateGlobalVariables
();
CreateStepScopes
();
CreateStepNet
();
CreateRNNGradientAlgorithm
();
// segment inputs
SegmentInputs
();
// link forward memories
LinkeMemories
();
}
virtual
void
TearDown
()
override
{}
void
CreateGlobalVariables
()
{
// inputs: x
LOG
(
INFO
)
<<
"create global variable x"
;
Variable
*
x
=
scope_
.
NewVar
(
"x"
);
DDim
dims
=
make_ddim
({
10
/*sent size*/
,
20
/*batch size*/
,
30
/*input dim*/
});
x
->
GetMutable
<
Tensor
>
()
->
mutable_data
<
float
>
(
dims
,
platform
::
CPUPlace
());
// inputs: h_boot
LOG
(
INFO
)
<<
"create global variable h_boot"
;
Variable
*
h_boot
=
scope_
.
NewVar
(
"h_boot"
);
h_boot
->
GetMutable
<
Tensor
>
()
->
mutable_data
<
float
>
(
make_ddim
({
20
/*batch size*/
,
30
/*input dim*/
}),
platform
::
CPUPlace
());
// inputs: w
LOG
(
INFO
)
<<
"create global variable w"
;
Variable
*
w
=
scope_
.
NewVar
(
"rnn/w"
);
w
->
GetMutable
<
Tensor
>
()
->
mutable_data
<
float
>
(
make_ddim
({
30
,
30
}),
platform
::
CPUPlace
());
// inputs: h_grad
LOG
(
INFO
)
<<
"create variable h_grad"
;
Variable
*
dh
=
scope_
.
NewVar
(
"h_grad"
);
dh
->
GetMutable
<
Tensor
>
()
->
mutable_data
<
float
>
(
make_ddim
({
10
,
20
,
30
}),
platform
::
CPUPlace
());
// inputs: step_scopes
LOG
(
INFO
)
<<
"create variable step_scopes"
;
scope_
.
NewVar
(
"step_scopes"
);
// inputs: step_net
LOG
(
INFO
)
<<
"create variable step_net"
;
scope_
.
NewVar
(
"step_net"
);
// outputs: w_grad
LOG
(
INFO
)
<<
"create global variable w_grad"
;
scope_
.
NewVar
(
"rnn/w_grad"
);
// outputs: x_grad
LOG
(
INFO
)
<<
"create global variable x_grad"
;
scope_
.
NewVar
(
"x_grad"
);
// outputs: h_boot_grad
LOG
(
INFO
)
<<
"create global variable h_boot_grad"
;
scope_
.
NewVar
(
"h_boot_grad"
);
}
void
CreateStepScopes
()
{
auto
step_scopes
=
scope_
.
FindVar
(
"step_scopes"
)
->
GetMutable
<
std
::
vector
<
Scope
*>>
();
for
(
int
i
=
0
;
i
<
10
;
++
i
)
{
auto
&
scope
=
scope_
.
NewScope
();
auto
pre_t
=
scope
.
NewVar
(
"rnn/pre_h"
)
->
GetMutable
<
Tensor
>
();
pre_t
->
mutable_data
<
float
>
({
20
,
30
},
platform
::
CPUPlace
());
auto
tensor
=
scope
.
NewVar
(
"rnn/h"
)
->
GetMutable
<
Tensor
>
();
tensor
->
mutable_data
<
float
>
({
20
,
30
},
platform
::
CPUPlace
());
// for unit test of ConcatOutputs
auto
xg
=
scope
.
NewVar
(
"rnn/x_grad"
)
->
GetMutable
<
Tensor
>
();
xg
->
mutable_data
<
float
>
({
20
,
30
},
platform
::
CPUPlace
());
step_scopes
->
emplace_back
(
&
scope
);
}
// last time step
auto
g
=
(
*
step_scopes
)[
9
]
->
NewVar
(
"rnn/h_pre_grad"
)
->
GetMutable
<
Tensor
>
();
g
->
mutable_data
<
float
>
({
20
,
30
},
platform
::
CPUPlace
());
}
void
CreateRNNGradientAlgorithm
()
{
std
::
unique_ptr
<
rnn
::
Argument
>
arg
(
new
rnn
::
Argument
());
arg
->
step_net
=
"step_net"
;
arg
->
step_scopes
=
"step_scopes"
;
rnn
::
Link
inlink
;
inlink
.
external
=
"h_grad"
;
inlink
.
internal
=
"rnn/h_grad"
;
arg
->
inlinks
=
std
::
vector
<
rnn
::
Link
>
{
inlink
};
rnn
::
Link
outlink
;
outlink
.
external
=
"x_grad"
;
outlink
.
internal
=
"rnn/x_grad"
;
arg
->
outlinks
=
std
::
vector
<
rnn
::
Link
>
{
outlink
};
rnn
::
MemoryAttr
mem_attr
;
mem_attr
.
pre_var
=
"rnn/h_pre_grad"
;
mem_attr
.
var
=
"rnn/h_grad"
;
mem_attr
.
boot_var
=
"h_boot_grad"
;
arg
->
memories
=
std
::
vector
<
rnn
::
MemoryAttr
>
{
mem_attr
};
rnn_grad_algo_
.
Init
(
std
::
move
(
arg
));
}
void
CreateStepNet
()
{
LOG
(
INFO
)
<<
"create variable step_net"
;
Variable
*
var
=
scope_
.
NewVar
(
"step_net"
);
auto
net
=
var
->
GetMutable
<
NetOp
>
();
net
->
AddOp
(
OpRegistry
::
CreateOp
(
"mul"
,
{
"rnn/h_pre"
,
"rnn/w"
,
"rnn/s_grad"
},
{
"rnn/h_pre_grad"
,
"rnn/w_grad"
},
{}));
net
->
AddOp
(
OpRegistry
::
CreateOp
(
"add_two"
,
{
"rnn/h_grad"
},
{
"rnn/x_grad"
,
"rnn/s_grad"
},
{}));
net
->
CompleteAddOp
();
}
void
SegmentInputs
()
{
LOG
(
INFO
)
<<
"segment inputs"
;
std
::
vector
<
std
::
string
>
inlinks
=
{
"x"
};
std
::
vector
<
std
::
string
>
inlinks_alias
=
{
"rnn/x"
};
rnn
::
Link
inlink
;
inlink
.
external
=
"x"
;
inlink
.
internal
=
"rnn/x"
;
auto
step_scopes
=
scope_
.
FindVar
(
"step_scopes"
)
->
GetMutable
<
std
::
vector
<
Scope
*>>
();
rnn
::
SegmentInputs
(
*
step_scopes
,
std
::
vector
<
rnn
::
Link
>
{
inlink
},
10
,
true
/*infer_shape_mode*/
);
}
void
LinkeMemories
()
{
LOG
(
INFO
)
<<
"link memories"
;
rnn
::
MemoryAttr
mem_attr
;
mem_attr
.
pre_var
=
"rnn/h_pre"
;
mem_attr
.
var
=
"rnn/h"
;
mem_attr
.
boot_var
=
"boot_h"
;
std
::
vector
<
rnn
::
MemoryAttr
>
memories
;
memories
.
push_back
(
mem_attr
);
auto
step_scopes
=
scope_
.
FindVar
(
"step_scopes"
)
->
GetMutable
<
std
::
vector
<
Scope
*>>
();
for
(
int
i
=
1
;
i
<
10
;
++
i
)
{
rnn
::
LinkMemories
(
*
step_scopes
,
memories
,
i
,
-
1
,
true
/*infer_shape_mode*/
);
}
}
Scope
scope_
;
RecurrentGradientAlgorithm
rnn_grad_algo_
;
};
// TEST_F(RecurrentGradientAlgorithmTest, Run) {
// platform::CPUDeviceContext ctx;
// rnn_grad_algo_.Run(scope_, ctx);
// }
}
// namespace operators
}
// namespace paddle
TEST
(
RecurrentOp
,
LinkMemories
)
{
using
namespace
paddle
::
framework
;
using
namespace
paddle
::
platform
;
using
namespace
paddle
::
operators
;
// create and init step scopes
size_t
len
=
10
;
std
::
vector
<
Scope
*>
step_scopes
;
for
(
size_t
i
=
0
;
i
<
len
;
++
i
)
{
auto
scope
=
new
Scope
();
scope
->
NewVar
(
"pre_h"
);
auto
tensor
=
scope
->
NewVar
(
"h"
)
->
GetMutable
<
Tensor
>
();
float
*
data
=
tensor
->
mutable_data
<
float
>
({
15
,
20
},
CPUPlace
());
for
(
size_t
j
=
0
;
j
<
15
*
20
;
++
j
)
{
data
[
j
]
=
rand
()
*
(
1.
/
(
double
)
RAND_MAX
);
}
step_scopes
.
push_back
(
scope
);
}
// create MemoryAttr
rnn
::
MemoryAttr
mem_attr
;
mem_attr
.
pre_var
=
"pre_h"
;
mem_attr
.
var
=
"h"
;
mem_attr
.
boot_var
=
"boot_h"
;
std
::
vector
<
rnn
::
MemoryAttr
>
memories
;
memories
.
push_back
(
mem_attr
);
for
(
size_t
i
=
1
;
i
<
len
;
++
i
)
{
rnn
::
LinkMemories
(
step_scopes
,
memories
,
i
,
-
1
,
false
/*infer_shape_mode*/
);
}
// check
for
(
size_t
i
=
0
;
i
<
len
-
1
;
++
i
)
{
const
float
*
a
=
step_scopes
[
i
]
->
FindVar
(
"h"
)
->
GetMutable
<
Tensor
>
()
->
data
<
float
>
();
const
float
*
b
=
step_scopes
[
i
+
1
]
->
FindVar
(
"pre_h"
)
->
GetMutable
<
Tensor
>
()
->
data
<
float
>
();
for
(
size_t
j
=
0
;
j
<
15
*
20
;
++
j
)
{
ASSERT_FLOAT_EQ
(
a
[
j
],
b
[
j
]);
}
}
for
(
int
i
=
len
-
2
;
i
>=
0
;
--
i
)
{
rnn
::
LinkMemories
(
step_scopes
,
memories
,
i
,
1
,
false
/*infer_shape_mode*/
);
}
// check
for
(
int
i
=
len
-
2
;
i
>=
0
;
--
i
)
{
const
float
*
a
=
step_scopes
[
i
]
->
FindVar
(
"pre_h"
)
->
GetMutable
<
Tensor
>
()
->
data
<
float
>
();
const
float
*
b
=
step_scopes
[
i
+
1
]
->
FindVar
(
"h"
)
->
GetMutable
<
Tensor
>
()
->
data
<
float
>
();
for
(
size_t
j
=
0
;
j
<
15
*
20
;
++
j
)
{
ASSERT_FLOAT_EQ
(
a
[
j
],
b
[
j
]);
}
}
for
(
auto
s
:
step_scopes
)
{
delete
s
;
}
}
USE_OP
(
add_two
);
USE_OP
(
mul
);
USE_OP_WITHOUT_KERNEL
(
recurrent_op
);
TEST
(
rnn
,
bad
)
{
ASSERT_TRUE
(
false
);
}
// namespace paddle {
// namespace operators {
//
// using framework::make_ddim;
// using framework::DDim;
//
// class RecurrentOpTest : public ::testing::Test {
// protected:
// virtual void SetUp() override {
// CreateGlobalVariables();
// CreateStepNet();
// CreateRNNOp();
// }
//
// virtual void TearDown() override {}
//
// void CreateGlobalVariables() {
// // create input, and init content
// LOG(INFO) << "create global variable x";
// for (auto inlink : std::vector<std::string>{"x", "x0", "x1", "h"}) {
// Variable* x = scope_.NewVar(inlink);
// DDim dims = make_ddim(std::vector<int>{
// 10 /*sent size*/, 20 /*batch size*/, 30 /*input dim*/});
// x->GetMutable<Tensor>()->mutable_data<float>(dims,
// platform::CPUPlace());
// }
// // create output alias just for test
// for (auto inlink : std::vector<std::string>{"h@alias"}) {
// Variable* x = scope_.NewVar(inlink);
// DDim dims =
// make_ddim(std::vector<int>{20 /*batch size*/, 30 /*input dim*/});
// x->GetMutable<Tensor>()->mutable_data<float>(dims,
// platform::CPUPlace());
// }
//
// LOG(INFO) << "create global variable w";
// Variable* w = scope_.NewVar("rnn/w");
// w->GetMutable<Tensor>()->mutable_data<float>(
// make_ddim(std::vector<int>{30, 30}), platform::CPUPlace());
//
// for (auto boot : std::vector<std::string>{"h_boot"}) {
// LOG(INFO) << "create global variable " << boot;
// Variable* h_boot = scope_.NewVar(boot);
// h_boot->GetMutable<Tensor>()->mutable_data<float>(
// make_ddim(std::vector<int>{20 /*batch size*/, 30 /*input dim*/}),
// platform::CPUPlace());
// }
//
// LOG(INFO) << "create variable step_scopes";
// scope_.NewVar("step_scopes");
//
// LOG(INFO) << "create variable h";
// scope_.NewVar("h");
// }
//
// void CreateRNNOp() {
// framework::OpDesc op_desc;
//
// op_desc.set_type("recurrent_op");
// // inlinks 0
// op_desc.add_inputs("x");
// op_desc.add_inputs("x0");
// op_desc.add_inputs("x1");
// // boot_memories 3
// op_desc.add_inputs("h_boot");
// // step net 5
// op_desc.add_inputs("step_net");
// // outlinks 6
// op_desc.add_outputs("h");
// // step scopes 7
// op_desc.add_outputs("step_scopes");
//
// auto _input_format = std::vector<int>{
// 0, // in_link
// 3, // memories
// 4 // step_net
// };
// auto input_format = op_desc.add_attrs();
// input_format->set_name("input_format");
// input_format->set_type(paddle::framework::AttrType::INTS);
// for (auto i : _input_format) {
// input_format->add_ints(i);
// }
//
// auto output_format = op_desc.add_attrs();
// output_format->set_name("output_format");
// output_format->set_type(paddle::framework::AttrType::INTS);
// for (auto i : std::vector<int>{0, 1, 2}) {
// output_format->add_ints(i);
// }
//
// auto inlink_alias = op_desc.add_attrs();
// inlink_alias->set_name("inlink_alias");
// inlink_alias->set_type(paddle::framework::AttrType::STRINGS);
//
// auto outlink_alias = op_desc.add_attrs();
// outlink_alias->set_name("outlink_alias");
// outlink_alias->set_type(paddle::framework::AttrType::STRINGS);
//
// auto pre_memories = op_desc.add_attrs();
// pre_memories->set_name("pre_memories");
// pre_memories->set_type(paddle::framework::AttrType::STRINGS);
//
// auto memories = op_desc.add_attrs();
// memories->set_name("memories");
// memories->set_type(paddle::framework::AttrType::STRINGS);
//
// // create inlink_alias
// for (const auto& item :
// std::vector<std::string>{"x@alias", "x0@alias", "x1@alias"}) {
// inlink_alias->add_strings(item);
// }
// // pre memories
// for (const auto& item : std::vector<std::string>{"rnn/h@pre"}) {
// pre_memories->add_strings(item);
// }
// // memories
// for (const auto& item : std::vector<std::string>{"rnn/h"}) {
// memories->add_strings(item);
// }
// // output alias
// for (const auto& item : std::vector<std::string>{"h@alias"}) {
// outlink_alias->add_strings(item);
// }
//
// rnn_op_ = OpRegistry::CreateOp(op_desc);
//
// LOG(INFO) << "rnn_op finish init";
// }
//
// void CreateStepNet() {
// LOG(INFO) << "create variable step_net";
// Variable* var = scope_.NewVar("step_net");
// auto net = var->GetMutable<NetOp>();
// net->AddOp(
// OpRegistry::CreateOp("mul", {"rnn/h@pre", "rnn/w"}, {"rnn/s"}, {}));
//
// net->AddOp(
// OpRegistry::CreateOp("add_two", {"x@alias", "rnn/s"}, {"rnn/h"}, {}));
// net->CompleteAddOp();
// }
//
// // father scope
// Scope scope_;
// std::shared_ptr<OperatorBase> rnn_op_;
//};
//
// TEST_F(RecurrentOpTest, Run) {
// platform::CPUDeviceContext ctx;
// rnn_op_->InferShape(scope_);
// rnn_op_->Run(scope_, ctx);
//}
//
// class RecurrentGradientAlgorithmTest : public ::testing::Test {
// protected:
// virtual void SetUp() override {
// CreateGlobalVariables();
// CreateStepScopes();
// CreateStepNet();
// CreateRNNGradientAlgorithm();
//
// // segment inputs
// SegmentInputs();
// // link forward memories
// LinkeMemories();
// }
//
// virtual void TearDown() override {}
//
// void CreateGlobalVariables() {
// // inputs: x
// LOG(INFO) << "create global variable x";
// Variable* x = scope_.NewVar("x");
// DDim dims =
// make_ddim({10 /*sent size*/, 20 /*batch size*/, 30 /*input dim*/});
// x->GetMutable<Tensor>()->mutable_data<float>(dims, platform::CPUPlace());
// // inputs: h_boot
// LOG(INFO) << "create global variable h_boot";
// Variable* h_boot = scope_.NewVar("h_boot");
// h_boot->GetMutable<Tensor>()->mutable_data<float>(
// make_ddim({20 /*batch size*/, 30 /*input dim*/}),
// platform::CPUPlace());
// // inputs: w
// LOG(INFO) << "create global variable w";
// Variable* w = scope_.NewVar("rnn/w");
// w->GetMutable<Tensor>()->mutable_data<float>(make_ddim({30, 30}),
// platform::CPUPlace());
// // inputs: h_grad
// LOG(INFO) << "create variable h_grad";
// Variable* dh = scope_.NewVar("h_grad");
// dh->GetMutable<Tensor>()->mutable_data<float>(make_ddim({10, 20, 30}),
// platform::CPUPlace());
// // inputs: step_scopes
// LOG(INFO) << "create variable step_scopes";
// scope_.NewVar("step_scopes");
// // inputs: step_net
// LOG(INFO) << "create variable step_net";
// scope_.NewVar("step_net");
// // outputs: w_grad
// LOG(INFO) << "create global variable w_grad";
// scope_.NewVar("rnn/w_grad");
// // outputs: x_grad
// LOG(INFO) << "create global variable x_grad";
// scope_.NewVar("x_grad");
// // outputs: h_boot_grad
// LOG(INFO) << "create global variable h_boot_grad";
// scope_.NewVar("h_boot_grad");
// }
//
// void CreateStepScopes() {
// auto step_scopes =
// scope_.FindVar("step_scopes")->GetMutable<std::vector<Scope*>>();
// for (int i = 0; i < 10; ++i) {
// auto& scope = scope_.NewScope();
// auto pre_t = scope.NewVar("rnn/pre_h")->GetMutable<Tensor>();
// pre_t->mutable_data<float>({20, 30}, platform::CPUPlace());
// auto tensor = scope.NewVar("rnn/h")->GetMutable<Tensor>();
// tensor->mutable_data<float>({20, 30}, platform::CPUPlace());
//
// // for unit test of ConcatOutputs
// auto xg = scope.NewVar("rnn/x_grad")->GetMutable<Tensor>();
// xg->mutable_data<float>({20, 30}, platform::CPUPlace());
//
// step_scopes->emplace_back(&scope);
// }
//
// // last time step
// auto g =
// (*step_scopes)[9]->NewVar("rnn/h_pre_grad")->GetMutable<Tensor>();
// g->mutable_data<float>({20, 30}, platform::CPUPlace());
// }
//
// void CreateRNNGradientAlgorithm() {
// std::unique_ptr<rnn::Argument> arg(new rnn::Argument());
// arg->step_net = "step_net";
// arg->step_scopes = "step_scopes";
// rnn::Link inlink;
// inlink.external = "h_grad";
// inlink.internal = "rnn/h_grad";
// arg->inlinks = std::vector<rnn::Link>{inlink};
//
// rnn::Link outlink;
// outlink.external = "x_grad";
// outlink.internal = "rnn/x_grad";
// arg->outlinks = std::vector<rnn::Link>{outlink};
//
// rnn::MemoryAttr mem_attr;
// mem_attr.pre_var = "rnn/h_pre_grad";
// mem_attr.var = "rnn/h_grad";
// mem_attr.boot_var = "h_boot_grad";
// arg->memories = std::vector<rnn::MemoryAttr>{mem_attr};
//
// rnn_grad_algo_.Init(std::move(arg));
// }
//
// void CreateStepNet() {
// LOG(INFO) << "create variable step_net";
// Variable* var = scope_.NewVar("step_net");
// auto net = var->GetMutable<NetOp>();
// net->AddOp(OpRegistry::CreateOp("mul", {"rnn/h_pre", "rnn/w",
// "rnn/s_grad"},
// {"rnn/h_pre_grad", "rnn/w_grad"}, {}));
//
// net->AddOp(OpRegistry::CreateOp("add_two", {"rnn/h_grad"},
// {"rnn/x_grad", "rnn/s_grad"}, {}));
// net->CompleteAddOp();
// }
//
// void SegmentInputs() {
// LOG(INFO) << "segment inputs";
// std::vector<std::string> inlinks = {"x"};
// std::vector<std::string> inlinks_alias = {"rnn/x"};
//
// rnn::Link inlink;
// inlink.external = "x";
// inlink.internal = "rnn/x";
// auto step_scopes =
// scope_.FindVar("step_scopes")->GetMutable<std::vector<Scope*>>();
// rnn::SegmentInputs(*step_scopes, std::vector<rnn::Link>{inlink}, 10,
// true /*infer_shape_mode*/);
// }
//
// void LinkeMemories() {
// LOG(INFO) << "link memories";
// rnn::MemoryAttr mem_attr;
// mem_attr.pre_var = "rnn/h_pre";
// mem_attr.var = "rnn/h";
// mem_attr.boot_var = "boot_h";
// std::vector<rnn::MemoryAttr> memories;
// memories.push_back(mem_attr);
// auto step_scopes =
// scope_.FindVar("step_scopes")->GetMutable<std::vector<Scope*>>();
// for (int i = 1; i < 10; ++i) {
// rnn::LinkMemories(*step_scopes, memories, i, -1,
// true /*infer_shape_mode*/);
// }
// }
//
// Scope scope_;
// RecurrentGradientAlgorithm rnn_grad_algo_;
//};
//
//// TEST_F(RecurrentGradientAlgorithmTest, Run) {
//// platform::CPUDeviceContext ctx;
//// rnn_grad_algo_.Run(scope_, ctx);
//// }
//
//} // namespace operators
//} // namespace paddle
//
// TEST(RecurrentOp, LinkMemories) {
// using namespace paddle::framework;
// using namespace paddle::platform;
// using namespace paddle::operators;
//
// // create and init step scopes
// size_t len = 10;
// std::vector<Scope*> step_scopes;
// for (size_t i = 0; i < len; ++i) {
// auto scope = new Scope();
// scope->NewVar("pre_h");
// auto tensor = scope->NewVar("h")->GetMutable<Tensor>();
// float* data = tensor->mutable_data<float>({15, 20}, CPUPlace());
// for (size_t j = 0; j < 15 * 20; ++j) {
// data[j] = rand() * (1. / (double)RAND_MAX);
// }
// step_scopes.push_back(scope);
// }
//
// // create MemoryAttr
// rnn::MemoryAttr mem_attr;
// mem_attr.pre_var = "pre_h";
// mem_attr.var = "h";
// mem_attr.boot_var = "boot_h";
// std::vector<rnn::MemoryAttr> memories;
// memories.push_back(mem_attr);
//
// for (size_t i = 1; i < len; ++i) {
// rnn::LinkMemories(step_scopes, memories, i, -1, false
// /*infer_shape_mode*/);
// }
// // check
// for (size_t i = 0; i < len - 1; ++i) {
// const float* a =
// step_scopes[i]->FindVar("h")->GetMutable<Tensor>()->data<float>();
// const float* b = step_scopes[i + 1]
// ->FindVar("pre_h")
// ->GetMutable<Tensor>()
// ->data<float>();
// for (size_t j = 0; j < 15 * 20; ++j) {
// ASSERT_FLOAT_EQ(a[j], b[j]);
// }
// }
//
// for (int i = len - 2; i >= 0; --i) {
// rnn::LinkMemories(step_scopes, memories, i, 1, false
// /*infer_shape_mode*/);
// }
// // check
// for (int i = len - 2; i >= 0; --i) {
// const float* a =
// step_scopes[i]->FindVar("pre_h")->GetMutable<Tensor>()->data<float>();
// const float* b =
// step_scopes[i + 1]->FindVar("h")->GetMutable<Tensor>()->data<float>();
// for (size_t j = 0; j < 15 * 20; ++j) {
// ASSERT_FLOAT_EQ(a[j], b[j]);
// }
// }
//
// for (auto s : step_scopes) {
// delete s;
// }
//}
//
// USE_OP(add_two);
// USE_OP(mul);
// USE_OP_WITHOUT_KERNEL(recurrent_op);
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录