Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
Paddle
提交
7202f425
P
Paddle
项目概览
PaddlePaddle
/
Paddle
大约 1 年 前同步成功
通知
2299
Star
20931
Fork
5422
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1423
列表
看板
标记
里程碑
合并请求
543
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1,423
Issue
1,423
列表
看板
标记
里程碑
合并请求
543
合并请求
543
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
7202f425
编写于
8月 10, 2017
作者:
Q
qingqing01
提交者:
GitHub
8月 10, 2017
浏览文件
操作
浏览文件
下载
差异文件
Merge branch 'refactorize_framework_proto' into feature/refactorize_framework_proto
上级
030f4302
36709d05
变更
6
隐藏空白更改
内联
并排
Showing
6 changed file
with
264 addition
and
478 deletion
+264
-478
paddle/framework/grad_op_builder.cc
paddle/framework/grad_op_builder.cc
+14
-54
paddle/framework/grad_op_builder_test.cc
paddle/framework/grad_op_builder_test.cc
+19
-21
paddle/framework/op_registry_test.cc
paddle/framework/op_registry_test.cc
+0
-10
paddle/framework/operator_test.cc
paddle/framework/operator_test.cc
+2
-17
paddle/operators/recurrent_op_test.cc
paddle/operators/recurrent_op_test.cc
+227
-376
python/paddle/v2/framework/tests/test_operator.py
python/paddle/v2/framework/tests/test_operator.py
+2
-0
未找到文件。
paddle/framework/grad_op_builder.cc
浏览文件 @
7202f425
...
@@ -18,59 +18,32 @@ permissions and limitations under the License. */
...
@@ -18,59 +18,32 @@ permissions and limitations under the License. */
namespace
paddle
{
namespace
paddle
{
namespace
framework
{
namespace
framework
{
/**
class
OpRegistry
;
class
OpRegistry
;
using
VarIndexMap
=
std
::
unordered_map
<
std
::
string
,
int
>
;
using
VarIndexMap
=
std
::
unordered_map
<
std
::
string
,
int
>
;
enum
class
OpArgType
{
IN
,
OUT
};
enum
class
OpArgType
{
IN
,
OUT
};
static std::vector<int>* GetOpFormat(OperatorBase* op, const OpArgType& type) {
std::string key = type == OpArgType::IN ? "input_format" : "output_format";
return op->attrs_.count(key)
? &boost::get<std::vector<int>>(op->attrs_.at(key))
: nullptr;
}
static const std::vector<int>* GetOpFormat(const OperatorBase* op,
const OpArgType& type) {
std::string key = type == OpArgType::IN ? "input_format" : "output_format";
return op->attrs_.count(key)
? &boost::get<std::vector<int>>(op->attrs_.at(key))
: nullptr;
}
static
void
TransOpArg
(
const
OperatorBase
*
src_op
,
OperatorBase
*
dst_op
,
static
void
TransOpArg
(
const
OperatorBase
*
src_op
,
OperatorBase
*
dst_op
,
const
OpArgType
&
src_type
,
const
OpArgType
&
dst_type
,
const
OpArgType
&
src_type
,
const
OpArgType
&
dst_type
,
int& idx,
bool is_grad) {
bool
is_grad
)
{
const
std::vector<std::string>
& src_inout =
const
auto
&
src_inout
=
src_type
==
OpArgType
::
IN
?
src_op
->
inputs_
:
src_op
->
outputs_
;
src_type
==
OpArgType
::
IN
?
src_op
->
inputs_
:
src_op
->
outputs_
;
const std::vector<int>* src_format = GetOpFormat(src_op, src_type);
std::vector<std::string>
& dst_inout =
auto
&
dst_inout
=
dst_type
==
OpArgType
::
IN
?
dst_op
->
inputs_
:
dst_op
->
outputs_
;
dst_type
==
OpArgType
::
IN
?
dst_op
->
inputs_
:
dst_op
->
outputs_
;
std::vector<int>* dst_format = GetOpFormat(dst_op, dst_type);
const
OpProto
&
proto
=
OpRegistry
::
protos
().
at
(
src_op
->
type_
);
const
OpProto
&
proto
=
OpRegistry
::
protos
().
at
(
src_op
->
type_
);
const
auto
&
src_arg_list
=
const
auto
&
src_arg_list
=
src_type
==
OpArgType
::
IN
?
proto
.
inputs
()
:
proto
.
outputs
();
src_type
==
OpArgType
::
IN
?
proto
.
inputs
()
:
proto
.
outputs
();
for
(
const
auto
&
arg
:
src_arg_list
)
{
for
(
const
auto
&
arg
:
src_arg_list
)
{
std
::
string
src_name
=
arg
.
name
();
std
::
string
src_name
=
arg
.
name
();
std::string dst_name = is_grad ? src_name + kGradVarSuffix : src_name;
std
::
string
dst_name
=
is_grad
?
GradVarName
(
src_name
)
:
src_name
;
(*dst_op->in_out_idxs_)[dst_name] = idx++;
for
(
auto
&
var_name
:
src_inout
.
at
(
src_name
))
{
int src_arg_idx = src_op->in_out_idxs_->at(src_name);
std
::
string
s
=
is_grad
?
GradVarName
(
var_name
)
int src_begin =
:
(
arg
.
no_gradient
()
?
kEmptyVarName
:
var_name
);
src_format == nullptr ? src_arg_idx : src_format->at(src_arg_idx);
dst_inout
[
dst_name
].
emplace_back
(
s
);
int src_end = src_format == nullptr ? src_arg_idx + 1
: src_format->at(src_arg_idx + 1);
for (int i = src_begin; i < src_end; ++i) {
std::string s =
is_grad ? src_inout[i] + kGradVarSuffix
: (arg.ignore_gradient() ? kEmptyVarName : src_inout[i]);
dst_inout.emplace_back(s);
}
if (dst_format != nullptr) {
dst_format->push_back(dst_inout.size());
}
}
}
}
}
}
...
@@ -80,25 +53,12 @@ OperatorBase* BuildGradOp(const OperatorBase* op) {
...
@@ -80,25 +53,12 @@ OperatorBase* BuildGradOp(const OperatorBase* op) {
OperatorBase
*
grad_op
=
OpRegistry
::
op_creators
().
at
(
grad_op_type
)();
OperatorBase
*
grad_op
=
OpRegistry
::
op_creators
().
at
(
grad_op_type
)();
grad_op
->
type_
=
grad_op_type
;
grad_op
->
type_
=
grad_op_type
;
grad_op
->
attrs_
=
op
->
attrs_
;
grad_op
->
attrs_
=
op
->
attrs_
;
grad_op->attrs_.erase("input_format");
TransOpArg
(
op
,
grad_op
,
OpArgType
::
IN
,
OpArgType
::
IN
,
false
);
// I
grad_op->attrs_.erase("output_format");
TransOpArg
(
op
,
grad_op
,
OpArgType
::
OUT
,
OpArgType
::
IN
,
false
);
// O
if (GetOpFormat(op, OpArgType::IN) != nullptr) {
TransOpArg
(
op
,
grad_op
,
OpArgType
::
OUT
,
OpArgType
::
IN
,
true
);
// OG
grad_op->attrs_["output_format"] = std::vector<int>({0});
TransOpArg
(
op
,
grad_op
,
OpArgType
::
IN
,
OpArgType
::
OUT
,
true
);
// IG
}
if (GetOpFormat(op, OpArgType::IN) != nullptr ||
GetOpFormat(op, OpArgType::OUT) != nullptr) {
grad_op->attrs_["input_format"] = std::vector<int>({0});
}
grad_op->in_out_idxs_.reset(new VarIndexMap());
int in_idx = 0;
int out_idx = 0;
TransOpArg(op, grad_op, OpArgType::IN, OpArgType::IN, in_idx, false); // I
TransOpArg(op, grad_op, OpArgType::OUT, OpArgType::IN, in_idx, false); // G
TransOpArg(op, grad_op, OpArgType::OUT, OpArgType::IN, in_idx, true); // OG
TransOpArg(op, grad_op, OpArgType::IN, OpArgType::OUT, out_idx, true); // IG
return
grad_op
;
return
grad_op
;
}
}
**/
OperatorBase
*
BuildGradOp
(
const
OperatorBase
*
op
)
{
return
nullptr
;
}
}
// namespace framework
}
// namespace framework
}
// namespace paddle
}
// namespace paddle
paddle/framework/grad_op_builder_test.cc
浏览文件 @
7202f425
...
@@ -51,14 +51,14 @@ TEST(GradOpBuilder, AddTwo) {
...
@@ -51,14 +51,14 @@ TEST(GradOpBuilder, AddTwo) {
"add_two"
,
{{
"X"
,
{
"x"
}},
{
"Y"
,
{
"y"
}}},
{{
"Out"
,
{
"out"
}}},
{}));
"add_two"
,
{{
"X"
,
{
"x"
}},
{
"Y"
,
{
"y"
}}},
{{
"Out"
,
{
"out"
}}},
{}));
std
::
shared_ptr
<
f
::
OperatorBase
>
grad_add_op
=
std
::
shared_ptr
<
f
::
OperatorBase
>
grad_add_op
=
f
::
OpRegistry
::
CreateGradOp
(
*
add_op
);
f
::
OpRegistry
::
CreateGradOp
(
*
add_op
);
EXPECT_EQ
(
static_cast
<
int
>
(
grad_add_op
->
inputs_
.
size
()),
4
);
EXPECT_EQ
(
grad_add_op
->
inputs_
.
size
(),
4UL
);
EXPECT_EQ
(
static_cast
<
int
>
(
grad_add_op
->
outputs_
.
size
()),
2
);
EXPECT_EQ
(
grad_add_op
->
outputs_
.
size
(),
2UL
);
EXPECT_EQ
(
grad_add_op
->
Input
(
"X"
),
"x"
);
EXPECT_EQ
(
grad_add_op
->
Input
(
"X"
),
"x"
);
EXPECT_EQ
(
grad_add_op
->
Input
(
"Y"
),
"y"
);
EXPECT_EQ
(
grad_add_op
->
Input
(
"Y"
),
"y"
);
EXPECT_EQ
(
grad_add_op
->
Input
(
"Out"
),
"out"
);
EXPECT_EQ
(
grad_add_op
->
Input
(
"Out"
),
"out"
);
EXPECT_EQ
(
grad_add_op
->
Input
(
"Out@GRAD"
),
"out@GRAD"
);
EXPECT_EQ
(
grad_add_op
->
Input
(
f
::
GradVarName
(
"Out"
)),
f
::
GradVarName
(
"out"
)
);
EXPECT_EQ
(
grad_add_op
->
Output
(
"X@GRAD"
),
"x@GRAD"
);
EXPECT_EQ
(
grad_add_op
->
Output
(
f
::
GradVarName
(
"X"
)),
f
::
GradVarName
(
"x"
)
);
EXPECT_EQ
(
grad_add_op
->
Output
(
"Y@GRAD"
),
"y@GRAD"
);
EXPECT_EQ
(
grad_add_op
->
Output
(
f
::
GradVarName
(
"Y"
)),
f
::
GradVarName
(
"y"
)
);
}
}
REGISTER_OP
(
mult_io
,
f
::
NOP
,
f
::
MutiInOutOpMaker
);
REGISTER_OP
(
mult_io
,
f
::
NOP
,
f
::
MutiInOutOpMaker
);
...
@@ -67,17 +67,16 @@ REGISTER_OP(io_ignored, f::NOP, f::IOIgnoredOpMaker);
...
@@ -67,17 +67,16 @@ REGISTER_OP(io_ignored, f::NOP, f::IOIgnoredOpMaker);
REGISTER_GRADIENT_OP
(
io_ignored
,
io_ignored_grad
,
f
::
NOP
);
REGISTER_GRADIENT_OP
(
io_ignored
,
io_ignored_grad
,
f
::
NOP
);
TEST
(
GradOpBuilder
,
MutiInOut
)
{
TEST
(
GradOpBuilder
,
MutiInOut
)
{
f
::
AttributeMap
attrs
{{
"input_format"
,
std
::
vector
<
int
>
{
0
,
1
,
4
,
5
}},
{
"output_format"
,
std
::
vector
<
int
>
{
0
,
1
,
3
}}};
std
::
shared_ptr
<
f
::
OperatorBase
>
test_op
(
f
::
OpRegistry
::
CreateOp
(
std
::
shared_ptr
<
f
::
OperatorBase
>
test_op
(
f
::
OpRegistry
::
CreateOp
(
"mult_io"
,
{{
"In1"
,
{
"in1"
}},
"mult_io"
,
{
"In2_mult"
,
{
"in2_1"
,
"in2_2"
,
"in2_3"
}},
{{
"In1"
,
{
"in1"
}},
{
"In3"
,
{
"in3"
}}},
{
"In2_mult"
,
{
"in2_1"
,
"in2_2"
,
"in2_3"
}},
{{
"Out1"
,
{
"Out2_mult"
}},
{
"Out2"
,
{
"out2_1"
,
"out2_2"
}}},
attrs
));
{
"In3"
,
{
"in3"
}}},
{{
"Out1"
,
{
"out1"
}},
{
"Out2_mult"
,
{
"out2_1"
,
"out2_2"
}}},
{}));
std
::
shared_ptr
<
f
::
OperatorBase
>
grad_test_op
=
std
::
shared_ptr
<
f
::
OperatorBase
>
grad_test_op
=
f
::
OpRegistry
::
CreateGradOp
(
*
test_op
);
f
::
OpRegistry
::
CreateGradOp
(
*
test_op
);
ASSERT_EQ
(
grad_test_op
->
inputs_
.
size
(),
5UL
+
3UL
+
3
UL
);
ASSERT_EQ
(
grad_test_op
->
inputs_
.
size
(),
3UL
+
2UL
+
2
UL
);
EXPECT_EQ
(
grad_test_op
->
Input
(
"In1"
),
"in1"
);
EXPECT_EQ
(
grad_test_op
->
Input
(
"In1"
),
"in1"
);
EXPECT_EQ
(
grad_test_op
->
Inputs
(
"In2_mult"
),
EXPECT_EQ
(
grad_test_op
->
Inputs
(
"In2_mult"
),
std
::
vector
<
std
::
string
>
({
"in2_1"
,
"in2_2"
,
"in2_3"
}));
std
::
vector
<
std
::
string
>
({
"in2_1"
,
"in2_2"
,
"in2_3"
}));
...
@@ -91,7 +90,7 @@ TEST(GradOpBuilder, MutiInOut) {
...
@@ -91,7 +90,7 @@ TEST(GradOpBuilder, MutiInOut) {
std
::
vector
<
std
::
string
>
(
std
::
vector
<
std
::
string
>
(
{
f
::
GradVarName
(
"out2_1"
),
f
::
GradVarName
(
"out2_2"
)}));
{
f
::
GradVarName
(
"out2_1"
),
f
::
GradVarName
(
"out2_2"
)}));
ASSERT_EQ
(
grad_test_op
->
outputs_
.
size
(),
5
UL
);
ASSERT_EQ
(
grad_test_op
->
outputs_
.
size
(),
3
UL
);
EXPECT_EQ
(
grad_test_op
->
Output
(
f
::
GradVarName
(
"In1"
)),
f
::
GradVarName
(
"in1"
));
EXPECT_EQ
(
grad_test_op
->
Output
(
f
::
GradVarName
(
"In1"
)),
f
::
GradVarName
(
"in1"
));
EXPECT_EQ
(
grad_test_op
->
Outputs
(
f
::
GradVarName
(
"In2_mult"
)),
EXPECT_EQ
(
grad_test_op
->
Outputs
(
f
::
GradVarName
(
"In2_mult"
)),
std
::
vector
<
std
::
string
>
({
f
::
GradVarName
(
"in2_1"
),
std
::
vector
<
std
::
string
>
({
f
::
GradVarName
(
"in2_1"
),
...
@@ -101,18 +100,17 @@ TEST(GradOpBuilder, MutiInOut) {
...
@@ -101,18 +100,17 @@ TEST(GradOpBuilder, MutiInOut) {
}
}
TEST
(
GradOpBuilder
,
IOIgnoredInGradient
)
{
TEST
(
GradOpBuilder
,
IOIgnoredInGradient
)
{
f
::
AttributeMap
attrs
{{
"input_format"
,
std
::
vector
<
int
>
{
0
,
1
,
3
,
5
}},
{
"output_format"
,
std
::
vector
<
int
>
{
0
,
2
,
3
}}};
std
::
shared_ptr
<
f
::
OperatorBase
>
test_op
(
f
::
OpRegistry
::
CreateOp
(
std
::
shared_ptr
<
f
::
OperatorBase
>
test_op
(
f
::
OpRegistry
::
CreateOp
(
"io_ignored"
,
{{
"In1"
,
{
"in1"
}},
"io_ignored"
,
{
"In2_mult"
,
{
"in2_1"
,
"in2_2"
}},
{{
"In1"
,
{
"in1"
}},
{
"In3_mult"
,
{
"in3_1"
,
"in3_2"
}}},
{
"In2_mult"
,
{
"in2_1"
,
"in2_2"
}},
{{
"Out1_mult"
,
{
"out1_1"
,
"out1_2"
}},
{
"Out2"
,
{
"out2"
}}},
attrs
));
{
"In3_mult"
,
{
"in3_1"
,
"in3_2"
}}},
{{
"Out1_mult"
,
{
"out1_1"
,
"out1_2"
}},
{
"Out2"
,
{
"out2"
}}},
{}));
std
::
shared_ptr
<
f
::
OperatorBase
>
grad_test_op
=
std
::
shared_ptr
<
f
::
OperatorBase
>
grad_test_op
=
f
::
OpRegistry
::
CreateGradOp
(
*
test_op
);
f
::
OpRegistry
::
CreateGradOp
(
*
test_op
);
// 'In2' and 'Out2' are ignored in gradient calculating
// 'In2' and 'Out2' are ignored in gradient calculating
ASSERT_EQ
(
grad_test_op
->
inputs_
.
size
(),
5UL
+
3UL
+
3
UL
);
ASSERT_EQ
(
grad_test_op
->
inputs_
.
size
(),
3UL
+
2UL
+
2
UL
);
EXPECT_EQ
(
grad_test_op
->
Input
(
"In1"
),
"in1"
);
EXPECT_EQ
(
grad_test_op
->
Input
(
"In1"
),
"in1"
);
EXPECT_EQ
(
grad_test_op
->
Inputs
(
"In2_mult"
),
EXPECT_EQ
(
grad_test_op
->
Inputs
(
"In2_mult"
),
std
::
vector
<
std
::
string
>
({
f
::
kEmptyVarName
,
f
::
kEmptyVarName
}));
std
::
vector
<
std
::
string
>
({
f
::
kEmptyVarName
,
f
::
kEmptyVarName
}));
...
@@ -127,7 +125,7 @@ TEST(GradOpBuilder, IOIgnoredInGradient) {
...
@@ -127,7 +125,7 @@ TEST(GradOpBuilder, IOIgnoredInGradient) {
EXPECT_EQ
(
grad_test_op
->
Input
(
f
::
GradVarName
(
"Out2"
)),
EXPECT_EQ
(
grad_test_op
->
Input
(
f
::
GradVarName
(
"Out2"
)),
f
::
GradVarName
(
"out2"
));
f
::
GradVarName
(
"out2"
));
ASSERT_EQ
(
grad_test_op
->
outputs_
.
size
(),
5
UL
);
ASSERT_EQ
(
grad_test_op
->
outputs_
.
size
(),
3
UL
);
EXPECT_EQ
(
grad_test_op
->
Output
(
f
::
GradVarName
(
"In1"
)),
f
::
GradVarName
(
"in1"
));
EXPECT_EQ
(
grad_test_op
->
Output
(
f
::
GradVarName
(
"In1"
)),
f
::
GradVarName
(
"in1"
));
EXPECT_EQ
(
grad_test_op
->
Outputs
(
f
::
GradVarName
(
"In2_mult"
)),
EXPECT_EQ
(
grad_test_op
->
Outputs
(
f
::
GradVarName
(
"In2_mult"
)),
std
::
vector
<
std
::
string
>
(
std
::
vector
<
std
::
string
>
(
...
...
paddle/framework/op_registry_test.cc
浏览文件 @
7202f425
...
@@ -131,14 +131,6 @@ TEST(OpRegistry, DefaultValue) {
...
@@ -131,14 +131,6 @@ TEST(OpRegistry, DefaultValue) {
ASSERT_EQ
(
op
->
GetAttr
<
float
>
(
"scale"
),
1.0
);
ASSERT_EQ
(
op
->
GetAttr
<
float
>
(
"scale"
),
1.0
);
}
}
static
void
SetInputFormat
(
paddle
::
framework
::
OpDesc
*
desc
)
{
auto
attr
=
desc
->
add_attrs
();
attr
->
set_name
(
"input_format"
);
attr
->
set_type
(
paddle
::
framework
::
INTS
);
attr
->
mutable_ints
()
->
Add
(
0
);
attr
->
mutable_ints
()
->
Add
(
1
);
}
TEST
(
OpRegistry
,
CustomChecker
)
{
TEST
(
OpRegistry
,
CustomChecker
)
{
paddle
::
framework
::
OpDesc
op_desc
;
paddle
::
framework
::
OpDesc
op_desc
;
op_desc
.
set_type
(
"my_test_op"
);
op_desc
.
set_type
(
"my_test_op"
);
...
@@ -149,7 +141,6 @@ TEST(OpRegistry, CustomChecker) {
...
@@ -149,7 +141,6 @@ TEST(OpRegistry, CustomChecker) {
auto
output
=
op_desc
.
add_outputs
();
auto
output
=
op_desc
.
add_outputs
();
output
->
set_parameter
(
"output"
);
output
->
set_parameter
(
"output"
);
*
output
->
mutable_arguments
()
->
Add
()
=
"oo"
;
*
output
->
mutable_arguments
()
->
Add
()
=
"oo"
;
SetInputFormat
(
&
op_desc
);
// attr 'test_attr' is not set
// attr 'test_attr' is not set
bool
caught
=
false
;
bool
caught
=
false
;
...
@@ -189,7 +180,6 @@ TEST(OpRegistry, CustomChecker) {
...
@@ -189,7 +180,6 @@ TEST(OpRegistry, CustomChecker) {
attr
->
set_name
(
"test_attr"
);
attr
->
set_name
(
"test_attr"
);
attr
->
set_type
(
paddle
::
framework
::
AttrType
::
INT
);
attr
->
set_type
(
paddle
::
framework
::
AttrType
::
INT
);
attr
->
set_i
(
4
);
attr
->
set_i
(
4
);
SetInputFormat
(
&
op_desc
);
auto
op
=
paddle
::
framework
::
OpRegistry
::
CreateOp
(
op_desc
);
auto
op
=
paddle
::
framework
::
OpRegistry
::
CreateOp
(
op_desc
);
paddle
::
platform
::
CPUDeviceContext
dev_ctx
;
paddle
::
platform
::
CPUDeviceContext
dev_ctx
;
paddle
::
framework
::
Scope
scope
;
paddle
::
framework
::
Scope
scope
;
...
...
paddle/framework/operator_test.cc
浏览文件 @
7202f425
...
@@ -185,11 +185,11 @@ TEST(OpKernel, all) {
...
@@ -185,11 +185,11 @@ TEST(OpKernel, all) {
op_desc
.
set_type
(
"op_with_kernel"
);
op_desc
.
set_type
(
"op_with_kernel"
);
auto
*
ipt
=
op_desc
.
mutable_inputs
()
->
Add
();
auto
*
ipt
=
op_desc
.
mutable_inputs
()
->
Add
();
*
ipt
->
mutable_arguments
()
->
Add
()
=
"IN1"
;
*
ipt
->
mutable_arguments
()
->
Add
()
=
"IN1"
;
ipt
->
set_parameter
(
"
input
"
);
ipt
->
set_parameter
(
"
x
"
);
auto
*
output
=
op_desc
.
mutable_outputs
()
->
Add
();
auto
*
output
=
op_desc
.
mutable_outputs
()
->
Add
();
*
output
->
mutable_arguments
()
->
Add
()
=
"OUT1"
;
*
output
->
mutable_arguments
()
->
Add
()
=
"OUT1"
;
output
->
set_parameter
(
"
output
"
);
output
->
set_parameter
(
"
y
"
);
auto
attr
=
op_desc
.
mutable_attrs
()
->
Add
();
auto
attr
=
op_desc
.
mutable_attrs
()
->
Add
();
attr
->
set_name
(
"scale"
);
attr
->
set_name
(
"scale"
);
...
@@ -234,21 +234,6 @@ TEST(OpKernel, multi_inputs) {
...
@@ -234,21 +234,6 @@ TEST(OpKernel, multi_inputs) {
attr
->
set_type
(
paddle
::
framework
::
AttrType
::
FLOAT
);
attr
->
set_type
(
paddle
::
framework
::
AttrType
::
FLOAT
);
attr
->
set_f
(
3.14
);
attr
->
set_f
(
3.14
);
auto
attr0
=
op_desc
.
mutable_attrs
()
->
Add
();
attr0
->
set_name
(
"input_format"
);
attr0
->
set_type
(
paddle
::
framework
::
AttrType
::
INTS
);
auto
input_format
=
attr0
->
mutable_ints
();
input_format
->
Add
(
0
);
// x0
input_format
->
Add
(
3
);
// k
input_format
->
Add
(
4
);
// end
auto
attr1
=
op_desc
.
mutable_attrs
()
->
Add
();
attr1
->
set_name
(
"output_format"
);
attr1
->
set_type
(
paddle
::
framework
::
AttrType
::
INTS
);
auto
output_format
=
attr1
->
mutable_ints
();
output_format
->
Add
(
0
);
// y0
output_format
->
Add
(
2
);
// y1
paddle
::
platform
::
CPUDeviceContext
cpu_device_context
;
paddle
::
platform
::
CPUDeviceContext
cpu_device_context
;
paddle
::
framework
::
Scope
scope
;
paddle
::
framework
::
Scope
scope
;
scope
.
NewVar
(
"x0"
)
->
GetMutable
<
Tensor
>
();
scope
.
NewVar
(
"x0"
)
->
GetMutable
<
Tensor
>
();
...
...
paddle/operators/recurrent_op_test.cc
浏览文件 @
7202f425
...
@@ -22,382 +22,233 @@
...
@@ -22,382 +22,233 @@
#include "paddle/framework/tensor.h"
#include "paddle/framework/tensor.h"
#include "paddle/operators/net_op.h"
#include "paddle/operators/net_op.h"
TEST
(
rnn
,
bad
)
{
ASSERT_TRUE
(
false
);
}
namespace
paddle
{
namespace
operators
{
// namespace paddle {
using
namespace
paddle
::
framework
;
// namespace operators {
//
// using framework::make_ddim;
// using framework::make_ddim;
// using framework::DDim;
// using framework::DDim;
//
// class RecurrentOpTest : public ::testing::Test {
class
RecurrentGradientAlgorithmTest
:
public
::
testing
::
Test
{
// protected:
protected:
// virtual void SetUp() override {
virtual
void
SetUp
()
override
{
// CreateGlobalVariables();
CreateGlobalVariables
();
// CreateStepNet();
CreateStepScopes
();
// CreateRNNOp();
CreateStepNet
();
// }
CreateRNNGradientAlgorithm
();
//
// virtual void TearDown() override {}
// segment inputs
//
SegmentInputs
();
// void CreateGlobalVariables() {
// link forward memories
// // create input, and init content
LinkeMemories
();
// LOG(INFO) << "create global variable x";
}
// for (auto inlink : std::vector<std::string>{"x", "x0", "x1", "h"}) {
// Variable* x = scope_.NewVar(inlink);
virtual
void
TearDown
()
override
{}
// DDim dims = make_ddim(std::vector<int>{
// 10 /*sent size*/, 20 /*batch size*/, 30 /*input dim*/});
void
CreateGlobalVariables
()
{
// x->GetMutable<Tensor>()->mutable_data<float>(dims,
// inputs: x
// platform::CPUPlace());
LOG
(
INFO
)
<<
"create global variable x"
;
// }
Variable
*
x
=
scope_
.
NewVar
(
"x"
);
// // create output alias just for test
DDim
dims
=
// for (auto inlink : std::vector<std::string>{"h@alias"}) {
make_ddim
({
10
/*sent size*/
,
20
/*batch size*/
,
30
/*input dim*/
});
// Variable* x = scope_.NewVar(inlink);
x
->
GetMutable
<
Tensor
>
()
->
mutable_data
<
float
>
(
dims
,
platform
::
CPUPlace
());
// DDim dims =
// inputs: h_boot
// make_ddim(std::vector<int>{20 /*batch size*/, 30 /*input dim*/});
LOG
(
INFO
)
<<
"create global variable h_boot"
;
// x->GetMutable<Tensor>()->mutable_data<float>(dims,
Variable
*
h_boot
=
scope_
.
NewVar
(
"h_boot"
);
// platform::CPUPlace());
h_boot
->
GetMutable
<
Tensor
>
()
->
mutable_data
<
float
>
(
// }
make_ddim
({
20
/*batch size*/
,
30
/*input dim*/
}),
platform
::
CPUPlace
());
//
// inputs: w
// LOG(INFO) << "create global variable w";
LOG
(
INFO
)
<<
"create global variable w"
;
// Variable* w = scope_.NewVar("rnn/w");
Variable
*
w
=
scope_
.
NewVar
(
"rnn/w"
);
// w->GetMutable<Tensor>()->mutable_data<float>(
w
->
GetMutable
<
Tensor
>
()
->
mutable_data
<
float
>
(
make_ddim
({
30
,
30
}),
// make_ddim(std::vector<int>{30, 30}), platform::CPUPlace());
platform
::
CPUPlace
());
//
// inputs: h_grad
// for (auto boot : std::vector<std::string>{"h_boot"}) {
LOG
(
INFO
)
<<
"create variable h_grad"
;
// LOG(INFO) << "create global variable " << boot;
Variable
*
dh
=
scope_
.
NewVar
(
"h_grad"
);
// Variable* h_boot = scope_.NewVar(boot);
dh
->
GetMutable
<
Tensor
>
()
->
mutable_data
<
float
>
(
make_ddim
({
10
,
20
,
30
}),
// h_boot->GetMutable<Tensor>()->mutable_data<float>(
platform
::
CPUPlace
());
// make_ddim(std::vector<int>{20 /*batch size*/, 30 /*input dim*/}),
// inputs: step_scopes
// platform::CPUPlace());
LOG
(
INFO
)
<<
"create variable step_scopes"
;
// }
scope_
.
NewVar
(
"step_scopes"
);
//
// inputs: step_net
// LOG(INFO) << "create variable step_scopes";
LOG
(
INFO
)
<<
"create variable step_net"
;
// scope_.NewVar("step_scopes");
scope_
.
NewVar
(
"step_net"
);
//
// outputs: w_grad
// LOG(INFO) << "create variable h";
LOG
(
INFO
)
<<
"create global variable w_grad"
;
// scope_.NewVar("h");
scope_
.
NewVar
(
"rnn/w_grad"
);
// }
// outputs: x_grad
//
LOG
(
INFO
)
<<
"create global variable x_grad"
;
// void CreateRNNOp() {
scope_
.
NewVar
(
"x_grad"
);
// framework::OpDesc op_desc;
// outputs: h_boot_grad
//
LOG
(
INFO
)
<<
"create global variable h_boot_grad"
;
// op_desc.set_type("recurrent_op");
scope_
.
NewVar
(
"h_boot_grad"
);
// // inlinks 0
}
// op_desc.add_inputs("x");
// op_desc.add_inputs("x0");
void
CreateStepScopes
()
{
// op_desc.add_inputs("x1");
auto
step_scopes
=
// // boot_memories 3
scope_
.
FindVar
(
"step_scopes"
)
->
GetMutable
<
std
::
vector
<
Scope
*>>
();
// op_desc.add_inputs("h_boot");
for
(
int
i
=
0
;
i
<
10
;
++
i
)
{
// // step net 5
auto
&
scope
=
scope_
.
NewScope
();
// op_desc.add_inputs("step_net");
auto
pre_t
=
scope
.
NewVar
(
"rnn/pre_h"
)
->
GetMutable
<
Tensor
>
();
// // outlinks 6
pre_t
->
mutable_data
<
float
>
({
20
,
30
},
platform
::
CPUPlace
());
// op_desc.add_outputs("h");
auto
tensor
=
scope
.
NewVar
(
"rnn/h"
)
->
GetMutable
<
Tensor
>
();
// // step scopes 7
tensor
->
mutable_data
<
float
>
({
20
,
30
},
platform
::
CPUPlace
());
// op_desc.add_outputs("step_scopes");
//
// for unit test of ConcatOutputs
// auto _input_format = std::vector<int>{
auto
xg
=
scope
.
NewVar
(
"rnn/x_grad"
)
->
GetMutable
<
Tensor
>
();
// 0, // in_link
xg
->
mutable_data
<
float
>
({
20
,
30
},
platform
::
CPUPlace
());
// 3, // memories
// 4 // step_net
step_scopes
->
emplace_back
(
&
scope
);
// };
}
// auto input_format = op_desc.add_attrs();
// input_format->set_name("input_format");
// last time step
// input_format->set_type(paddle::framework::AttrType::INTS);
auto
g
=
(
*
step_scopes
)[
9
]
->
NewVar
(
"rnn/h_pre_grad"
)
->
GetMutable
<
Tensor
>
();
// for (auto i : _input_format) {
g
->
mutable_data
<
float
>
({
20
,
30
},
platform
::
CPUPlace
());
// input_format->add_ints(i);
}
// }
//
void
CreateRNNGradientAlgorithm
()
{
// auto output_format = op_desc.add_attrs();
std
::
unique_ptr
<
rnn
::
Argument
>
arg
(
new
rnn
::
Argument
());
// output_format->set_name("output_format");
arg
->
step_net
=
"step_net"
;
// output_format->set_type(paddle::framework::AttrType::INTS);
arg
->
step_scopes
=
"step_scopes"
;
// for (auto i : std::vector<int>{0, 1, 2}) {
rnn
::
Link
inlink
;
// output_format->add_ints(i);
inlink
.
external
=
"h_grad"
;
// }
inlink
.
internal
=
"rnn/h_grad"
;
//
arg
->
inlinks
=
std
::
vector
<
rnn
::
Link
>
{
inlink
};
// auto inlink_alias = op_desc.add_attrs();
// inlink_alias->set_name("inlink_alias");
rnn
::
Link
outlink
;
// inlink_alias->set_type(paddle::framework::AttrType::STRINGS);
outlink
.
external
=
"x_grad"
;
//
outlink
.
internal
=
"rnn/x_grad"
;
// auto outlink_alias = op_desc.add_attrs();
arg
->
outlinks
=
std
::
vector
<
rnn
::
Link
>
{
outlink
};
// outlink_alias->set_name("outlink_alias");
// outlink_alias->set_type(paddle::framework::AttrType::STRINGS);
rnn
::
MemoryAttr
mem_attr
;
//
mem_attr
.
pre_var
=
"rnn/h_pre_grad"
;
// auto pre_memories = op_desc.add_attrs();
mem_attr
.
var
=
"rnn/h_grad"
;
// pre_memories->set_name("pre_memories");
mem_attr
.
boot_var
=
"h_boot_grad"
;
// pre_memories->set_type(paddle::framework::AttrType::STRINGS);
arg
->
memories
=
std
::
vector
<
rnn
::
MemoryAttr
>
{
mem_attr
};
//
// auto memories = op_desc.add_attrs();
rnn_grad_algo_
.
Init
(
std
::
move
(
arg
));
// memories->set_name("memories");
}
// memories->set_type(paddle::framework::AttrType::STRINGS);
//
void
CreateStepNet
()
{
// // create inlink_alias
LOG
(
INFO
)
<<
"create variable step_net"
;
// for (const auto& item :
Variable
*
var
=
scope_
.
NewVar
(
"step_net"
);
// std::vector<std::string>{"x@alias", "x0@alias", "x1@alias"}) {
auto
net
=
var
->
GetMutable
<
NetOp
>
();
// inlink_alias->add_strings(item);
// TODO(qingqing) modify backward op create for RNNOp unit test
// }
// and the unit test will be removed to Python.
// // pre memories
// net->AddOp(OpRegistry::CreateOp("mul", {"X", {"rnn/h_pre", "rnn/w",
// for (const auto& item : std::vector<std::string>{"rnn/h@pre"}) {
// "rnn/s_grad"}}, {"Y", {"rnn/h_pre_grad", "rnn/w_grad"}}, {}));
// pre_memories->add_strings(item);
// }
// net->AddOp(OpRegistry::CreateOp("add_two", {"X", {"rnn/h_grad"}},
// // memories
// {"Y", {"rnn/x_grad"}}, {"Out", "rnn/s_grad"}}, {}));
// for (const auto& item : std::vector<std::string>{"rnn/h"}) {
net
->
CompleteAddOp
();
// memories->add_strings(item);
}
// }
// // output alias
void
SegmentInputs
()
{
// for (const auto& item : std::vector<std::string>{"h@alias"}) {
LOG
(
INFO
)
<<
"segment inputs"
;
// outlink_alias->add_strings(item);
std
::
vector
<
std
::
string
>
inlinks
=
{
"x"
};
// }
std
::
vector
<
std
::
string
>
inlinks_alias
=
{
"rnn/x"
};
//
// rnn_op_ = OpRegistry::CreateOp(op_desc);
rnn
::
Link
inlink
;
//
inlink
.
external
=
"x"
;
// LOG(INFO) << "rnn_op finish init";
inlink
.
internal
=
"rnn/x"
;
// }
auto
step_scopes
=
//
scope_
.
FindVar
(
"step_scopes"
)
->
GetMutable
<
std
::
vector
<
Scope
*>>
();
// void CreateStepNet() {
rnn
::
SegmentInputs
(
*
step_scopes
,
std
::
vector
<
rnn
::
Link
>
{
inlink
},
10
,
// LOG(INFO) << "create variable step_net";
true
/*infer_shape_mode*/
);
// Variable* var = scope_.NewVar("step_net");
}
// auto net = var->GetMutable<NetOp>();
// net->AddOp(
void
LinkeMemories
()
{
// OpRegistry::CreateOp("mul", {"rnn/h@pre", "rnn/w"}, {"rnn/s"}, {}));
LOG
(
INFO
)
<<
"link memories"
;
//
rnn
::
MemoryAttr
mem_attr
;
// net->AddOp(
mem_attr
.
pre_var
=
"rnn/h_pre"
;
// OpRegistry::CreateOp("add_two", {"x@alias", "rnn/s"}, {"rnn/h"}, {}));
mem_attr
.
var
=
"rnn/h"
;
// net->CompleteAddOp();
mem_attr
.
boot_var
=
"boot_h"
;
// }
std
::
vector
<
rnn
::
MemoryAttr
>
memories
;
//
memories
.
push_back
(
mem_attr
);
// // father scope
auto
step_scopes
=
// Scope scope_;
scope_
.
FindVar
(
"step_scopes"
)
->
GetMutable
<
std
::
vector
<
Scope
*>>
();
// std::shared_ptr<OperatorBase> rnn_op_;
for
(
int
i
=
1
;
i
<
10
;
++
i
)
{
//};
rnn
::
LinkMemories
(
*
step_scopes
,
memories
,
i
,
-
1
,
//
true
/*infer_shape_mode*/
);
// TEST_F(RecurrentOpTest, Run) {
}
// platform::CPUDeviceContext ctx;
}
// rnn_op_->InferShape(scope_);
// rnn_op_->Run(scope_, ctx);
Scope
scope_
;
//}
RecurrentGradientAlgorithm
rnn_grad_algo_
;
//
};
// class RecurrentGradientAlgorithmTest : public ::testing::Test {
// protected:
// TEST_F(RecurrentGradientAlgorithmTest, Run) {
// virtual void SetUp() override {
// platform::CPUDeviceContext ctx;
// CreateGlobalVariables();
// rnn_grad_algo_.Run(scope_, ctx);
// CreateStepScopes();
// }
// CreateStepNet();
// CreateRNNGradientAlgorithm();
}
// namespace operators
//
}
// namespace paddle
// // segment inputs
// SegmentInputs();
TEST
(
RecurrentOp
,
LinkMemories
)
{
// // link forward memories
using
namespace
paddle
::
framework
;
// LinkeMemories();
using
namespace
paddle
::
platform
;
// }
using
namespace
paddle
::
operators
;
//
// virtual void TearDown() override {}
// create and init step scopes
//
size_t
len
=
10
;
// void CreateGlobalVariables() {
std
::
vector
<
Scope
*>
step_scopes
;
// // inputs: x
for
(
size_t
i
=
0
;
i
<
len
;
++
i
)
{
// LOG(INFO) << "create global variable x";
auto
scope
=
new
Scope
();
// Variable* x = scope_.NewVar("x");
scope
->
NewVar
(
"pre_h"
);
// DDim dims =
auto
tensor
=
scope
->
NewVar
(
"h"
)
->
GetMutable
<
Tensor
>
();
// make_ddim({10 /*sent size*/, 20 /*batch size*/, 30 /*input dim*/});
float
*
data
=
tensor
->
mutable_data
<
float
>
({
15
,
20
},
CPUPlace
());
// x->GetMutable<Tensor>()->mutable_data<float>(dims, platform::CPUPlace());
for
(
size_t
j
=
0
;
j
<
15
*
20
;
++
j
)
{
// // inputs: h_boot
data
[
j
]
=
rand
()
*
(
1.
/
(
double
)
RAND_MAX
);
// LOG(INFO) << "create global variable h_boot";
}
// Variable* h_boot = scope_.NewVar("h_boot");
step_scopes
.
push_back
(
scope
);
// h_boot->GetMutable<Tensor>()->mutable_data<float>(
}
// make_ddim({20 /*batch size*/, 30 /*input dim*/}),
// platform::CPUPlace());
// create MemoryAttr
// // inputs: w
rnn
::
MemoryAttr
mem_attr
;
// LOG(INFO) << "create global variable w";
mem_attr
.
pre_var
=
"pre_h"
;
// Variable* w = scope_.NewVar("rnn/w");
mem_attr
.
var
=
"h"
;
// w->GetMutable<Tensor>()->mutable_data<float>(make_ddim({30, 30}),
mem_attr
.
boot_var
=
"boot_h"
;
// platform::CPUPlace());
std
::
vector
<
rnn
::
MemoryAttr
>
memories
;
// // inputs: h_grad
memories
.
push_back
(
mem_attr
);
// LOG(INFO) << "create variable h_grad";
// Variable* dh = scope_.NewVar("h_grad");
for
(
size_t
i
=
1
;
i
<
len
;
++
i
)
{
// dh->GetMutable<Tensor>()->mutable_data<float>(make_ddim({10, 20, 30}),
rnn
::
LinkMemories
(
step_scopes
,
memories
,
i
,
-
1
,
false
// platform::CPUPlace());
/*infer_shape_mode*/
);
// // inputs: step_scopes
}
// LOG(INFO) << "create variable step_scopes";
// check
// scope_.NewVar("step_scopes");
for
(
size_t
i
=
0
;
i
<
len
-
1
;
++
i
)
{
// // inputs: step_net
const
float
*
a
=
// LOG(INFO) << "create variable step_net";
step_scopes
[
i
]
->
FindVar
(
"h"
)
->
GetMutable
<
Tensor
>
()
->
data
<
float
>
();
// scope_.NewVar("step_net");
const
float
*
b
=
step_scopes
[
i
+
1
]
// // outputs: w_grad
->
FindVar
(
"pre_h"
)
// LOG(INFO) << "create global variable w_grad";
->
GetMutable
<
Tensor
>
()
// scope_.NewVar("rnn/w_grad");
->
data
<
float
>
();
// // outputs: x_grad
for
(
size_t
j
=
0
;
j
<
15
*
20
;
++
j
)
{
// LOG(INFO) << "create global variable x_grad";
ASSERT_FLOAT_EQ
(
a
[
j
],
b
[
j
]);
// scope_.NewVar("x_grad");
}
// // outputs: h_boot_grad
}
// LOG(INFO) << "create global variable h_boot_grad";
// scope_.NewVar("h_boot_grad");
for
(
int
i
=
len
-
2
;
i
>=
0
;
--
i
)
{
// }
rnn
::
LinkMemories
(
step_scopes
,
memories
,
i
,
1
,
false
//
/*infer_shape_mode*/
);
// void CreateStepScopes() {
}
// auto step_scopes =
// check
// scope_.FindVar("step_scopes")->GetMutable<std::vector<Scope*>>();
for
(
int
i
=
len
-
2
;
i
>=
0
;
--
i
)
{
// for (int i = 0; i < 10; ++i) {
const
float
*
a
=
// auto& scope = scope_.NewScope();
step_scopes
[
i
]
->
FindVar
(
"pre_h"
)
->
GetMutable
<
Tensor
>
()
->
data
<
float
>
();
// auto pre_t = scope.NewVar("rnn/pre_h")->GetMutable<Tensor>();
const
float
*
b
=
// pre_t->mutable_data<float>({20, 30}, platform::CPUPlace());
step_scopes
[
i
+
1
]
->
FindVar
(
"h"
)
->
GetMutable
<
Tensor
>
()
->
data
<
float
>
();
// auto tensor = scope.NewVar("rnn/h")->GetMutable<Tensor>();
for
(
size_t
j
=
0
;
j
<
15
*
20
;
++
j
)
{
// tensor->mutable_data<float>({20, 30}, platform::CPUPlace());
ASSERT_FLOAT_EQ
(
a
[
j
],
b
[
j
]);
//
}
// // for unit test of ConcatOutputs
}
// auto xg = scope.NewVar("rnn/x_grad")->GetMutable<Tensor>();
// xg->mutable_data<float>({20, 30}, platform::CPUPlace());
for
(
auto
s
:
step_scopes
)
{
//
delete
s
;
// step_scopes->emplace_back(&scope);
}
// }
}
//
// // last time step
USE_OP
(
add_two
);
// auto g =
USE_OP
(
mul
);
// (*step_scopes)[9]->NewVar("rnn/h_pre_grad")->GetMutable<Tensor>();
USE_OP_WITHOUT_KERNEL
(
recurrent_op
);
// g->mutable_data<float>({20, 30}, platform::CPUPlace());
// }
//
// void CreateRNNGradientAlgorithm() {
// std::unique_ptr<rnn::Argument> arg(new rnn::Argument());
// arg->step_net = "step_net";
// arg->step_scopes = "step_scopes";
// rnn::Link inlink;
// inlink.external = "h_grad";
// inlink.internal = "rnn/h_grad";
// arg->inlinks = std::vector<rnn::Link>{inlink};
//
// rnn::Link outlink;
// outlink.external = "x_grad";
// outlink.internal = "rnn/x_grad";
// arg->outlinks = std::vector<rnn::Link>{outlink};
//
// rnn::MemoryAttr mem_attr;
// mem_attr.pre_var = "rnn/h_pre_grad";
// mem_attr.var = "rnn/h_grad";
// mem_attr.boot_var = "h_boot_grad";
// arg->memories = std::vector<rnn::MemoryAttr>{mem_attr};
//
// rnn_grad_algo_.Init(std::move(arg));
// }
//
// void CreateStepNet() {
// LOG(INFO) << "create variable step_net";
// Variable* var = scope_.NewVar("step_net");
// auto net = var->GetMutable<NetOp>();
// net->AddOp(OpRegistry::CreateOp("mul", {"rnn/h_pre", "rnn/w",
// "rnn/s_grad"},
// {"rnn/h_pre_grad", "rnn/w_grad"}, {}));
//
// net->AddOp(OpRegistry::CreateOp("add_two", {"rnn/h_grad"},
// {"rnn/x_grad", "rnn/s_grad"}, {}));
// net->CompleteAddOp();
// }
//
// void SegmentInputs() {
// LOG(INFO) << "segment inputs";
// std::vector<std::string> inlinks = {"x"};
// std::vector<std::string> inlinks_alias = {"rnn/x"};
//
// rnn::Link inlink;
// inlink.external = "x";
// inlink.internal = "rnn/x";
// auto step_scopes =
// scope_.FindVar("step_scopes")->GetMutable<std::vector<Scope*>>();
// rnn::SegmentInputs(*step_scopes, std::vector<rnn::Link>{inlink}, 10,
// true /*infer_shape_mode*/);
// }
//
// void LinkeMemories() {
// LOG(INFO) << "link memories";
// rnn::MemoryAttr mem_attr;
// mem_attr.pre_var = "rnn/h_pre";
// mem_attr.var = "rnn/h";
// mem_attr.boot_var = "boot_h";
// std::vector<rnn::MemoryAttr> memories;
// memories.push_back(mem_attr);
// auto step_scopes =
// scope_.FindVar("step_scopes")->GetMutable<std::vector<Scope*>>();
// for (int i = 1; i < 10; ++i) {
// rnn::LinkMemories(*step_scopes, memories, i, -1,
// true /*infer_shape_mode*/);
// }
// }
//
// Scope scope_;
// RecurrentGradientAlgorithm rnn_grad_algo_;
//};
//
//// TEST_F(RecurrentGradientAlgorithmTest, Run) {
//// platform::CPUDeviceContext ctx;
//// rnn_grad_algo_.Run(scope_, ctx);
//// }
//
//} // namespace operators
//} // namespace paddle
//
// TEST(RecurrentOp, LinkMemories) {
// using namespace paddle::framework;
// using namespace paddle::platform;
// using namespace paddle::operators;
//
// // create and init step scopes
// size_t len = 10;
// std::vector<Scope*> step_scopes;
// for (size_t i = 0; i < len; ++i) {
// auto scope = new Scope();
// scope->NewVar("pre_h");
// auto tensor = scope->NewVar("h")->GetMutable<Tensor>();
// float* data = tensor->mutable_data<float>({15, 20}, CPUPlace());
// for (size_t j = 0; j < 15 * 20; ++j) {
// data[j] = rand() * (1. / (double)RAND_MAX);
// }
// step_scopes.push_back(scope);
// }
//
// // create MemoryAttr
// rnn::MemoryAttr mem_attr;
// mem_attr.pre_var = "pre_h";
// mem_attr.var = "h";
// mem_attr.boot_var = "boot_h";
// std::vector<rnn::MemoryAttr> memories;
// memories.push_back(mem_attr);
//
// for (size_t i = 1; i < len; ++i) {
// rnn::LinkMemories(step_scopes, memories, i, -1, false
// /*infer_shape_mode*/);
// }
// // check
// for (size_t i = 0; i < len - 1; ++i) {
// const float* a =
// step_scopes[i]->FindVar("h")->GetMutable<Tensor>()->data<float>();
// const float* b = step_scopes[i + 1]
// ->FindVar("pre_h")
// ->GetMutable<Tensor>()
// ->data<float>();
// for (size_t j = 0; j < 15 * 20; ++j) {
// ASSERT_FLOAT_EQ(a[j], b[j]);
// }
// }
//
// for (int i = len - 2; i >= 0; --i) {
// rnn::LinkMemories(step_scopes, memories, i, 1, false
// /*infer_shape_mode*/);
// }
// // check
// for (int i = len - 2; i >= 0; --i) {
// const float* a =
// step_scopes[i]->FindVar("pre_h")->GetMutable<Tensor>()->data<float>();
// const float* b =
// step_scopes[i + 1]->FindVar("h")->GetMutable<Tensor>()->data<float>();
// for (size_t j = 0; j < 15 * 20; ++j) {
// ASSERT_FLOAT_EQ(a[j], b[j]);
// }
// }
//
// for (auto s : step_scopes) {
// delete s;
// }
//}
//
// USE_OP(add_two);
// USE_OP(mul);
// USE_OP_WITHOUT_KERNEL(recurrent_op);
python/paddle/v2/framework/tests/test_operator.py
浏览文件 @
7202f425
...
@@ -74,6 +74,7 @@ class TestOpDescCreationMethod(unittest.TestCase):
...
@@ -74,6 +74,7 @@ class TestOpDescCreationMethod(unittest.TestCase):
expected1
.
inputs
.
extend
([
'x'
,
'w'
,
'b'
])
expected1
.
inputs
.
extend
([
'x'
,
'w'
,
'b'
])
expected1
.
outputs
.
extend
([
'y'
])
expected1
.
outputs
.
extend
([
'y'
])
expected1
.
type
=
'fc'
expected1
.
type
=
'fc'
# the input_format can be removed after testing
attr
=
expected1
.
attrs
.
add
()
attr
=
expected1
.
attrs
.
add
()
attr
.
name
=
'input_format'
attr
.
name
=
'input_format'
attr
.
type
=
attribute_pb2
.
INTS
attr
.
type
=
attribute_pb2
.
INTS
...
@@ -86,6 +87,7 @@ class TestOpDescCreationMethod(unittest.TestCase):
...
@@ -86,6 +87,7 @@ class TestOpDescCreationMethod(unittest.TestCase):
expected2
.
inputs
.
extend
([
'x1'
,
'x2'
,
'x3'
,
'w1'
,
'w2'
,
'w3'
,
'b'
])
expected2
.
inputs
.
extend
([
'x1'
,
'x2'
,
'x3'
,
'w1'
,
'w2'
,
'w3'
,
'b'
])
expected2
.
outputs
.
extend
([
'y'
])
expected2
.
outputs
.
extend
([
'y'
])
expected2
.
type
=
'fc'
expected2
.
type
=
'fc'
# the input_format can be removed after testing
attr
=
expected2
.
attrs
.
add
()
attr
=
expected2
.
attrs
.
add
()
attr
.
name
=
'input_format'
attr
.
name
=
'input_format'
attr
.
type
=
attribute_pb2
.
INTS
attr
.
type
=
attribute_pb2
.
INTS
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录