Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
PaddleDetection
提交
7202f425
P
PaddleDetection
项目概览
PaddlePaddle
/
PaddleDetection
大约 1 年 前同步成功
通知
695
Star
11112
Fork
2696
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
184
列表
看板
标记
里程碑
合并请求
40
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleDetection
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
184
Issue
184
列表
看板
标记
里程碑
合并请求
40
合并请求
40
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
7202f425
编写于
8月 10, 2017
作者:
Q
qingqing01
提交者:
GitHub
8月 10, 2017
浏览文件
操作
浏览文件
下载
差异文件
Merge branch 'refactorize_framework_proto' into feature/refactorize_framework_proto
上级
030f4302
36709d05
变更
6
显示空白变更内容
内联
并排
Showing
6 changed file
with
264 addition
and
478 deletion
+264
-478
paddle/framework/grad_op_builder.cc
paddle/framework/grad_op_builder.cc
+14
-54
paddle/framework/grad_op_builder_test.cc
paddle/framework/grad_op_builder_test.cc
+19
-21
paddle/framework/op_registry_test.cc
paddle/framework/op_registry_test.cc
+0
-10
paddle/framework/operator_test.cc
paddle/framework/operator_test.cc
+2
-17
paddle/operators/recurrent_op_test.cc
paddle/operators/recurrent_op_test.cc
+227
-376
python/paddle/v2/framework/tests/test_operator.py
python/paddle/v2/framework/tests/test_operator.py
+2
-0
未找到文件。
paddle/framework/grad_op_builder.cc
浏览文件 @
7202f425
...
...
@@ -18,59 +18,32 @@ permissions and limitations under the License. */
namespace
paddle
{
namespace
framework
{
/**
class
OpRegistry
;
using
VarIndexMap
=
std
::
unordered_map
<
std
::
string
,
int
>
;
enum
class
OpArgType
{
IN
,
OUT
};
static std::vector<int>* GetOpFormat(OperatorBase* op, const OpArgType& type) {
std::string key = type == OpArgType::IN ? "input_format" : "output_format";
return op->attrs_.count(key)
? &boost::get<std::vector<int>>(op->attrs_.at(key))
: nullptr;
}
static const std::vector<int>* GetOpFormat(const OperatorBase* op,
const OpArgType& type) {
std::string key = type == OpArgType::IN ? "input_format" : "output_format";
return op->attrs_.count(key)
? &boost::get<std::vector<int>>(op->attrs_.at(key))
: nullptr;
}
static
void
TransOpArg
(
const
OperatorBase
*
src_op
,
OperatorBase
*
dst_op
,
const
OpArgType
&
src_type
,
const
OpArgType
&
dst_type
,
int& idx,
bool is_grad) {
const
std::vector<std::string>
& src_inout =
bool
is_grad
)
{
const
auto
&
src_inout
=
src_type
==
OpArgType
::
IN
?
src_op
->
inputs_
:
src_op
->
outputs_
;
const std::vector<int>* src_format = GetOpFormat(src_op, src_type);
std::vector<std::string>
& dst_inout =
auto
&
dst_inout
=
dst_type
==
OpArgType
::
IN
?
dst_op
->
inputs_
:
dst_op
->
outputs_
;
std::vector<int>* dst_format = GetOpFormat(dst_op, dst_type);
const
OpProto
&
proto
=
OpRegistry
::
protos
().
at
(
src_op
->
type_
);
const
auto
&
src_arg_list
=
src_type
==
OpArgType
::
IN
?
proto
.
inputs
()
:
proto
.
outputs
();
for
(
const
auto
&
arg
:
src_arg_list
)
{
std
::
string
src_name
=
arg
.
name
();
std::string dst_name = is_grad ? src_name + kGradVarSuffix : src_name;
(*dst_op->in_out_idxs_)[dst_name] = idx++;
int src_arg_idx = src_op->in_out_idxs_->at(src_name);
int src_begin =
src_format == nullptr ? src_arg_idx : src_format->at(src_arg_idx);
int src_end = src_format == nullptr ? src_arg_idx + 1
: src_format->at(src_arg_idx + 1);
for (int i = src_begin; i < src_end; ++i) {
std::string s =
is_grad ? src_inout[i] + kGradVarSuffix
: (arg.ignore_gradient() ? kEmptyVarName : src_inout[i]);
dst_inout.emplace_back(s);
}
if (dst_format != nullptr) {
dst_format->push_back(dst_inout.size());
std
::
string
dst_name
=
is_grad
?
GradVarName
(
src_name
)
:
src_name
;
for
(
auto
&
var_name
:
src_inout
.
at
(
src_name
))
{
std
::
string
s
=
is_grad
?
GradVarName
(
var_name
)
:
(
arg
.
no_gradient
()
?
kEmptyVarName
:
var_name
);
dst_inout
[
dst_name
].
emplace_back
(
s
);
}
}
}
...
...
@@ -80,25 +53,12 @@ OperatorBase* BuildGradOp(const OperatorBase* op) {
OperatorBase
*
grad_op
=
OpRegistry
::
op_creators
().
at
(
grad_op_type
)();
grad_op
->
type_
=
grad_op_type
;
grad_op
->
attrs_
=
op
->
attrs_
;
grad_op->attrs_.erase("input_format");
grad_op->attrs_.erase("output_format");
if (GetOpFormat(op, OpArgType::IN) != nullptr) {
grad_op->attrs_["output_format"] = std::vector<int>({0});
}
if (GetOpFormat(op, OpArgType::IN) != nullptr ||
GetOpFormat(op, OpArgType::OUT) != nullptr) {
grad_op->attrs_["input_format"] = std::vector<int>({0});
}
grad_op->in_out_idxs_.reset(new VarIndexMap());
int in_idx = 0;
int out_idx = 0;
TransOpArg(op, grad_op, OpArgType::IN, OpArgType::IN, in_idx, false); // I
TransOpArg(op, grad_op, OpArgType::OUT, OpArgType::IN, in_idx, false); // G
TransOpArg(op, grad_op, OpArgType::OUT, OpArgType::IN, in_idx, true); // OG
TransOpArg(op, grad_op, OpArgType::IN, OpArgType::OUT, out_idx, true); // IG
TransOpArg
(
op
,
grad_op
,
OpArgType
::
IN
,
OpArgType
::
IN
,
false
);
// I
TransOpArg
(
op
,
grad_op
,
OpArgType
::
OUT
,
OpArgType
::
IN
,
false
);
// O
TransOpArg
(
op
,
grad_op
,
OpArgType
::
OUT
,
OpArgType
::
IN
,
true
);
// OG
TransOpArg
(
op
,
grad_op
,
OpArgType
::
IN
,
OpArgType
::
OUT
,
true
);
// IG
return
grad_op
;
}
**/
OperatorBase
*
BuildGradOp
(
const
OperatorBase
*
op
)
{
return
nullptr
;
}
}
// namespace framework
}
// namespace paddle
paddle/framework/grad_op_builder_test.cc
浏览文件 @
7202f425
...
...
@@ -51,14 +51,14 @@ TEST(GradOpBuilder, AddTwo) {
"add_two"
,
{{
"X"
,
{
"x"
}},
{
"Y"
,
{
"y"
}}},
{{
"Out"
,
{
"out"
}}},
{}));
std
::
shared_ptr
<
f
::
OperatorBase
>
grad_add_op
=
f
::
OpRegistry
::
CreateGradOp
(
*
add_op
);
EXPECT_EQ
(
static_cast
<
int
>
(
grad_add_op
->
inputs_
.
size
()),
4
);
EXPECT_EQ
(
static_cast
<
int
>
(
grad_add_op
->
outputs_
.
size
()),
2
);
EXPECT_EQ
(
grad_add_op
->
inputs_
.
size
(),
4UL
);
EXPECT_EQ
(
grad_add_op
->
outputs_
.
size
(),
2UL
);
EXPECT_EQ
(
grad_add_op
->
Input
(
"X"
),
"x"
);
EXPECT_EQ
(
grad_add_op
->
Input
(
"Y"
),
"y"
);
EXPECT_EQ
(
grad_add_op
->
Input
(
"Out"
),
"out"
);
EXPECT_EQ
(
grad_add_op
->
Input
(
"Out@GRAD"
),
"out@GRAD"
);
EXPECT_EQ
(
grad_add_op
->
Output
(
"X@GRAD"
),
"x@GRAD"
);
EXPECT_EQ
(
grad_add_op
->
Output
(
"Y@GRAD"
),
"y@GRAD"
);
EXPECT_EQ
(
grad_add_op
->
Input
(
f
::
GradVarName
(
"Out"
)),
f
::
GradVarName
(
"out"
)
);
EXPECT_EQ
(
grad_add_op
->
Output
(
f
::
GradVarName
(
"X"
)),
f
::
GradVarName
(
"x"
)
);
EXPECT_EQ
(
grad_add_op
->
Output
(
f
::
GradVarName
(
"Y"
)),
f
::
GradVarName
(
"y"
)
);
}
REGISTER_OP
(
mult_io
,
f
::
NOP
,
f
::
MutiInOutOpMaker
);
...
...
@@ -67,17 +67,16 @@ REGISTER_OP(io_ignored, f::NOP, f::IOIgnoredOpMaker);
REGISTER_GRADIENT_OP
(
io_ignored
,
io_ignored_grad
,
f
::
NOP
);
TEST
(
GradOpBuilder
,
MutiInOut
)
{
f
::
AttributeMap
attrs
{{
"input_format"
,
std
::
vector
<
int
>
{
0
,
1
,
4
,
5
}},
{
"output_format"
,
std
::
vector
<
int
>
{
0
,
1
,
3
}}};
std
::
shared_ptr
<
f
::
OperatorBase
>
test_op
(
f
::
OpRegistry
::
CreateOp
(
"mult_io"
,
{{
"In1"
,
{
"in1"
}},
"mult_io"
,
{{
"In1"
,
{
"in1"
}},
{
"In2_mult"
,
{
"in2_1"
,
"in2_2"
,
"in2_3"
}},
{
"In3"
,
{
"in3"
}}},
{{
"Out1"
,
{
"
Out2_mult"
}},
{
"Out2"
,
{
"out2_1"
,
"out2_2"
}}},
attrs
));
{{
"Out1"
,
{
"
out1"
}},
{
"Out2_mult"
,
{
"out2_1"
,
"out2_2"
}}},
{}
));
std
::
shared_ptr
<
f
::
OperatorBase
>
grad_test_op
=
f
::
OpRegistry
::
CreateGradOp
(
*
test_op
);
ASSERT_EQ
(
grad_test_op
->
inputs_
.
size
(),
5UL
+
3UL
+
3
UL
);
ASSERT_EQ
(
grad_test_op
->
inputs_
.
size
(),
3UL
+
2UL
+
2
UL
);
EXPECT_EQ
(
grad_test_op
->
Input
(
"In1"
),
"in1"
);
EXPECT_EQ
(
grad_test_op
->
Inputs
(
"In2_mult"
),
std
::
vector
<
std
::
string
>
({
"in2_1"
,
"in2_2"
,
"in2_3"
}));
...
...
@@ -91,7 +90,7 @@ TEST(GradOpBuilder, MutiInOut) {
std
::
vector
<
std
::
string
>
(
{
f
::
GradVarName
(
"out2_1"
),
f
::
GradVarName
(
"out2_2"
)}));
ASSERT_EQ
(
grad_test_op
->
outputs_
.
size
(),
5
UL
);
ASSERT_EQ
(
grad_test_op
->
outputs_
.
size
(),
3
UL
);
EXPECT_EQ
(
grad_test_op
->
Output
(
f
::
GradVarName
(
"In1"
)),
f
::
GradVarName
(
"in1"
));
EXPECT_EQ
(
grad_test_op
->
Outputs
(
f
::
GradVarName
(
"In2_mult"
)),
std
::
vector
<
std
::
string
>
({
f
::
GradVarName
(
"in2_1"
),
...
...
@@ -101,18 +100,17 @@ TEST(GradOpBuilder, MutiInOut) {
}
TEST
(
GradOpBuilder
,
IOIgnoredInGradient
)
{
f
::
AttributeMap
attrs
{{
"input_format"
,
std
::
vector
<
int
>
{
0
,
1
,
3
,
5
}},
{
"output_format"
,
std
::
vector
<
int
>
{
0
,
2
,
3
}}};
std
::
shared_ptr
<
f
::
OperatorBase
>
test_op
(
f
::
OpRegistry
::
CreateOp
(
"io_ignored"
,
{{
"In1"
,
{
"in1"
}},
"io_ignored"
,
{{
"In1"
,
{
"in1"
}},
{
"In2_mult"
,
{
"in2_1"
,
"in2_2"
}},
{
"In3_mult"
,
{
"in3_1"
,
"in3_2"
}}},
{{
"Out1_mult"
,
{
"out1_1"
,
"out1_2"
}},
{
"Out2"
,
{
"out2"
}}},
attrs
));
{{
"Out1_mult"
,
{
"out1_1"
,
"out1_2"
}},
{
"Out2"
,
{
"out2"
}}},
{}
));
std
::
shared_ptr
<
f
::
OperatorBase
>
grad_test_op
=
f
::
OpRegistry
::
CreateGradOp
(
*
test_op
);
// 'In2' and 'Out2' are ignored in gradient calculating
ASSERT_EQ
(
grad_test_op
->
inputs_
.
size
(),
5UL
+
3UL
+
3
UL
);
ASSERT_EQ
(
grad_test_op
->
inputs_
.
size
(),
3UL
+
2UL
+
2
UL
);
EXPECT_EQ
(
grad_test_op
->
Input
(
"In1"
),
"in1"
);
EXPECT_EQ
(
grad_test_op
->
Inputs
(
"In2_mult"
),
std
::
vector
<
std
::
string
>
({
f
::
kEmptyVarName
,
f
::
kEmptyVarName
}));
...
...
@@ -127,7 +125,7 @@ TEST(GradOpBuilder, IOIgnoredInGradient) {
EXPECT_EQ
(
grad_test_op
->
Input
(
f
::
GradVarName
(
"Out2"
)),
f
::
GradVarName
(
"out2"
));
ASSERT_EQ
(
grad_test_op
->
outputs_
.
size
(),
5
UL
);
ASSERT_EQ
(
grad_test_op
->
outputs_
.
size
(),
3
UL
);
EXPECT_EQ
(
grad_test_op
->
Output
(
f
::
GradVarName
(
"In1"
)),
f
::
GradVarName
(
"in1"
));
EXPECT_EQ
(
grad_test_op
->
Outputs
(
f
::
GradVarName
(
"In2_mult"
)),
std
::
vector
<
std
::
string
>
(
...
...
paddle/framework/op_registry_test.cc
浏览文件 @
7202f425
...
...
@@ -131,14 +131,6 @@ TEST(OpRegistry, DefaultValue) {
ASSERT_EQ
(
op
->
GetAttr
<
float
>
(
"scale"
),
1.0
);
}
static
void
SetInputFormat
(
paddle
::
framework
::
OpDesc
*
desc
)
{
auto
attr
=
desc
->
add_attrs
();
attr
->
set_name
(
"input_format"
);
attr
->
set_type
(
paddle
::
framework
::
INTS
);
attr
->
mutable_ints
()
->
Add
(
0
);
attr
->
mutable_ints
()
->
Add
(
1
);
}
TEST
(
OpRegistry
,
CustomChecker
)
{
paddle
::
framework
::
OpDesc
op_desc
;
op_desc
.
set_type
(
"my_test_op"
);
...
...
@@ -149,7 +141,6 @@ TEST(OpRegistry, CustomChecker) {
auto
output
=
op_desc
.
add_outputs
();
output
->
set_parameter
(
"output"
);
*
output
->
mutable_arguments
()
->
Add
()
=
"oo"
;
SetInputFormat
(
&
op_desc
);
// attr 'test_attr' is not set
bool
caught
=
false
;
...
...
@@ -189,7 +180,6 @@ TEST(OpRegistry, CustomChecker) {
attr
->
set_name
(
"test_attr"
);
attr
->
set_type
(
paddle
::
framework
::
AttrType
::
INT
);
attr
->
set_i
(
4
);
SetInputFormat
(
&
op_desc
);
auto
op
=
paddle
::
framework
::
OpRegistry
::
CreateOp
(
op_desc
);
paddle
::
platform
::
CPUDeviceContext
dev_ctx
;
paddle
::
framework
::
Scope
scope
;
...
...
paddle/framework/operator_test.cc
浏览文件 @
7202f425
...
...
@@ -185,11 +185,11 @@ TEST(OpKernel, all) {
op_desc
.
set_type
(
"op_with_kernel"
);
auto
*
ipt
=
op_desc
.
mutable_inputs
()
->
Add
();
*
ipt
->
mutable_arguments
()
->
Add
()
=
"IN1"
;
ipt
->
set_parameter
(
"
input
"
);
ipt
->
set_parameter
(
"
x
"
);
auto
*
output
=
op_desc
.
mutable_outputs
()
->
Add
();
*
output
->
mutable_arguments
()
->
Add
()
=
"OUT1"
;
output
->
set_parameter
(
"
output
"
);
output
->
set_parameter
(
"
y
"
);
auto
attr
=
op_desc
.
mutable_attrs
()
->
Add
();
attr
->
set_name
(
"scale"
);
...
...
@@ -234,21 +234,6 @@ TEST(OpKernel, multi_inputs) {
attr
->
set_type
(
paddle
::
framework
::
AttrType
::
FLOAT
);
attr
->
set_f
(
3.14
);
auto
attr0
=
op_desc
.
mutable_attrs
()
->
Add
();
attr0
->
set_name
(
"input_format"
);
attr0
->
set_type
(
paddle
::
framework
::
AttrType
::
INTS
);
auto
input_format
=
attr0
->
mutable_ints
();
input_format
->
Add
(
0
);
// x0
input_format
->
Add
(
3
);
// k
input_format
->
Add
(
4
);
// end
auto
attr1
=
op_desc
.
mutable_attrs
()
->
Add
();
attr1
->
set_name
(
"output_format"
);
attr1
->
set_type
(
paddle
::
framework
::
AttrType
::
INTS
);
auto
output_format
=
attr1
->
mutable_ints
();
output_format
->
Add
(
0
);
// y0
output_format
->
Add
(
2
);
// y1
paddle
::
platform
::
CPUDeviceContext
cpu_device_context
;
paddle
::
framework
::
Scope
scope
;
scope
.
NewVar
(
"x0"
)
->
GetMutable
<
Tensor
>
();
...
...
paddle/operators/recurrent_op_test.cc
浏览文件 @
7202f425
...
...
@@ -22,382 +22,233 @@
#include "paddle/framework/tensor.h"
#include "paddle/operators/net_op.h"
TEST
(
rnn
,
bad
)
{
ASSERT_TRUE
(
false
);
}
namespace
paddle
{
namespace
operators
{
// namespace paddle {
// namespace operators {
//
using
namespace
paddle
::
framework
;
// using framework::make_ddim;
// using framework::DDim;
//
// class RecurrentOpTest : public ::testing::Test {
// protected:
// virtual void SetUp() override {
// CreateGlobalVariables();
// CreateStepNet();
// CreateRNNOp();
// }
//
// virtual void TearDown() override {}
//
// void CreateGlobalVariables() {
// // create input, and init content
// LOG(INFO) << "create global variable x";
// for (auto inlink : std::vector<std::string>{"x", "x0", "x1", "h"}) {
// Variable* x = scope_.NewVar(inlink);
// DDim dims = make_ddim(std::vector<int>{
// 10 /*sent size*/, 20 /*batch size*/, 30 /*input dim*/});
// x->GetMutable<Tensor>()->mutable_data<float>(dims,
// platform::CPUPlace());
// }
// // create output alias just for test
// for (auto inlink : std::vector<std::string>{"h@alias"}) {
// Variable* x = scope_.NewVar(inlink);
// DDim dims =
// make_ddim(std::vector<int>{20 /*batch size*/, 30 /*input dim*/});
// x->GetMutable<Tensor>()->mutable_data<float>(dims,
// platform::CPUPlace());
// }
//
// LOG(INFO) << "create global variable w";
// Variable* w = scope_.NewVar("rnn/w");
// w->GetMutable<Tensor>()->mutable_data<float>(
// make_ddim(std::vector<int>{30, 30}), platform::CPUPlace());
//
// for (auto boot : std::vector<std::string>{"h_boot"}) {
// LOG(INFO) << "create global variable " << boot;
// Variable* h_boot = scope_.NewVar(boot);
// h_boot->GetMutable<Tensor>()->mutable_data<float>(
// make_ddim(std::vector<int>{20 /*batch size*/, 30 /*input dim*/}),
// platform::CPUPlace());
// }
//
// LOG(INFO) << "create variable step_scopes";
// scope_.NewVar("step_scopes");
//
// LOG(INFO) << "create variable h";
// scope_.NewVar("h");
// }
//
// void CreateRNNOp() {
// framework::OpDesc op_desc;
//
// op_desc.set_type("recurrent_op");
// // inlinks 0
// op_desc.add_inputs("x");
// op_desc.add_inputs("x0");
// op_desc.add_inputs("x1");
// // boot_memories 3
// op_desc.add_inputs("h_boot");
// // step net 5
// op_desc.add_inputs("step_net");
// // outlinks 6
// op_desc.add_outputs("h");
// // step scopes 7
// op_desc.add_outputs("step_scopes");
//
// auto _input_format = std::vector<int>{
// 0, // in_link
// 3, // memories
// 4 // step_net
// };
// auto input_format = op_desc.add_attrs();
// input_format->set_name("input_format");
// input_format->set_type(paddle::framework::AttrType::INTS);
// for (auto i : _input_format) {
// input_format->add_ints(i);
// }
//
// auto output_format = op_desc.add_attrs();
// output_format->set_name("output_format");
// output_format->set_type(paddle::framework::AttrType::INTS);
// for (auto i : std::vector<int>{0, 1, 2}) {
// output_format->add_ints(i);
// }
//
// auto inlink_alias = op_desc.add_attrs();
// inlink_alias->set_name("inlink_alias");
// inlink_alias->set_type(paddle::framework::AttrType::STRINGS);
//
// auto outlink_alias = op_desc.add_attrs();
// outlink_alias->set_name("outlink_alias");
// outlink_alias->set_type(paddle::framework::AttrType::STRINGS);
//
// auto pre_memories = op_desc.add_attrs();
// pre_memories->set_name("pre_memories");
// pre_memories->set_type(paddle::framework::AttrType::STRINGS);
//
// auto memories = op_desc.add_attrs();
// memories->set_name("memories");
// memories->set_type(paddle::framework::AttrType::STRINGS);
//
// // create inlink_alias
// for (const auto& item :
// std::vector<std::string>{"x@alias", "x0@alias", "x1@alias"}) {
// inlink_alias->add_strings(item);
// }
// // pre memories
// for (const auto& item : std::vector<std::string>{"rnn/h@pre"}) {
// pre_memories->add_strings(item);
// }
// // memories
// for (const auto& item : std::vector<std::string>{"rnn/h"}) {
// memories->add_strings(item);
// }
// // output alias
// for (const auto& item : std::vector<std::string>{"h@alias"}) {
// outlink_alias->add_strings(item);
// }
//
// rnn_op_ = OpRegistry::CreateOp(op_desc);
//
// LOG(INFO) << "rnn_op finish init";
// }
//
// void CreateStepNet() {
// LOG(INFO) << "create variable step_net";
// Variable* var = scope_.NewVar("step_net");
// auto net = var->GetMutable<NetOp>();
// net->AddOp(
// OpRegistry::CreateOp("mul", {"rnn/h@pre", "rnn/w"}, {"rnn/s"}, {}));
//
// net->AddOp(
// OpRegistry::CreateOp("add_two", {"x@alias", "rnn/s"}, {"rnn/h"}, {}));
// net->CompleteAddOp();
// }
//
// // father scope
// Scope scope_;
// std::shared_ptr<OperatorBase> rnn_op_;
//};
//
// TEST_F(RecurrentOpTest, Run) {
class
RecurrentGradientAlgorithmTest
:
public
::
testing
::
Test
{
protected:
virtual
void
SetUp
()
override
{
CreateGlobalVariables
();
CreateStepScopes
();
CreateStepNet
();
CreateRNNGradientAlgorithm
();
// segment inputs
SegmentInputs
();
// link forward memories
LinkeMemories
();
}
virtual
void
TearDown
()
override
{}
void
CreateGlobalVariables
()
{
// inputs: x
LOG
(
INFO
)
<<
"create global variable x"
;
Variable
*
x
=
scope_
.
NewVar
(
"x"
);
DDim
dims
=
make_ddim
({
10
/*sent size*/
,
20
/*batch size*/
,
30
/*input dim*/
});
x
->
GetMutable
<
Tensor
>
()
->
mutable_data
<
float
>
(
dims
,
platform
::
CPUPlace
());
// inputs: h_boot
LOG
(
INFO
)
<<
"create global variable h_boot"
;
Variable
*
h_boot
=
scope_
.
NewVar
(
"h_boot"
);
h_boot
->
GetMutable
<
Tensor
>
()
->
mutable_data
<
float
>
(
make_ddim
({
20
/*batch size*/
,
30
/*input dim*/
}),
platform
::
CPUPlace
());
// inputs: w
LOG
(
INFO
)
<<
"create global variable w"
;
Variable
*
w
=
scope_
.
NewVar
(
"rnn/w"
);
w
->
GetMutable
<
Tensor
>
()
->
mutable_data
<
float
>
(
make_ddim
({
30
,
30
}),
platform
::
CPUPlace
());
// inputs: h_grad
LOG
(
INFO
)
<<
"create variable h_grad"
;
Variable
*
dh
=
scope_
.
NewVar
(
"h_grad"
);
dh
->
GetMutable
<
Tensor
>
()
->
mutable_data
<
float
>
(
make_ddim
({
10
,
20
,
30
}),
platform
::
CPUPlace
());
// inputs: step_scopes
LOG
(
INFO
)
<<
"create variable step_scopes"
;
scope_
.
NewVar
(
"step_scopes"
);
// inputs: step_net
LOG
(
INFO
)
<<
"create variable step_net"
;
scope_
.
NewVar
(
"step_net"
);
// outputs: w_grad
LOG
(
INFO
)
<<
"create global variable w_grad"
;
scope_
.
NewVar
(
"rnn/w_grad"
);
// outputs: x_grad
LOG
(
INFO
)
<<
"create global variable x_grad"
;
scope_
.
NewVar
(
"x_grad"
);
// outputs: h_boot_grad
LOG
(
INFO
)
<<
"create global variable h_boot_grad"
;
scope_
.
NewVar
(
"h_boot_grad"
);
}
void
CreateStepScopes
()
{
auto
step_scopes
=
scope_
.
FindVar
(
"step_scopes"
)
->
GetMutable
<
std
::
vector
<
Scope
*>>
();
for
(
int
i
=
0
;
i
<
10
;
++
i
)
{
auto
&
scope
=
scope_
.
NewScope
();
auto
pre_t
=
scope
.
NewVar
(
"rnn/pre_h"
)
->
GetMutable
<
Tensor
>
();
pre_t
->
mutable_data
<
float
>
({
20
,
30
},
platform
::
CPUPlace
());
auto
tensor
=
scope
.
NewVar
(
"rnn/h"
)
->
GetMutable
<
Tensor
>
();
tensor
->
mutable_data
<
float
>
({
20
,
30
},
platform
::
CPUPlace
());
// for unit test of ConcatOutputs
auto
xg
=
scope
.
NewVar
(
"rnn/x_grad"
)
->
GetMutable
<
Tensor
>
();
xg
->
mutable_data
<
float
>
({
20
,
30
},
platform
::
CPUPlace
());
step_scopes
->
emplace_back
(
&
scope
);
}
// last time step
auto
g
=
(
*
step_scopes
)[
9
]
->
NewVar
(
"rnn/h_pre_grad"
)
->
GetMutable
<
Tensor
>
();
g
->
mutable_data
<
float
>
({
20
,
30
},
platform
::
CPUPlace
());
}
void
CreateRNNGradientAlgorithm
()
{
std
::
unique_ptr
<
rnn
::
Argument
>
arg
(
new
rnn
::
Argument
());
arg
->
step_net
=
"step_net"
;
arg
->
step_scopes
=
"step_scopes"
;
rnn
::
Link
inlink
;
inlink
.
external
=
"h_grad"
;
inlink
.
internal
=
"rnn/h_grad"
;
arg
->
inlinks
=
std
::
vector
<
rnn
::
Link
>
{
inlink
};
rnn
::
Link
outlink
;
outlink
.
external
=
"x_grad"
;
outlink
.
internal
=
"rnn/x_grad"
;
arg
->
outlinks
=
std
::
vector
<
rnn
::
Link
>
{
outlink
};
rnn
::
MemoryAttr
mem_attr
;
mem_attr
.
pre_var
=
"rnn/h_pre_grad"
;
mem_attr
.
var
=
"rnn/h_grad"
;
mem_attr
.
boot_var
=
"h_boot_grad"
;
arg
->
memories
=
std
::
vector
<
rnn
::
MemoryAttr
>
{
mem_attr
};
rnn_grad_algo_
.
Init
(
std
::
move
(
arg
));
}
void
CreateStepNet
()
{
LOG
(
INFO
)
<<
"create variable step_net"
;
Variable
*
var
=
scope_
.
NewVar
(
"step_net"
);
auto
net
=
var
->
GetMutable
<
NetOp
>
();
// TODO(qingqing) modify backward op create for RNNOp unit test
// and the unit test will be removed to Python.
// net->AddOp(OpRegistry::CreateOp("mul", {"X", {"rnn/h_pre", "rnn/w",
// "rnn/s_grad"}}, {"Y", {"rnn/h_pre_grad", "rnn/w_grad"}}, {}));
// net->AddOp(OpRegistry::CreateOp("add_two", {"X", {"rnn/h_grad"}},
// {"Y", {"rnn/x_grad"}}, {"Out", "rnn/s_grad"}}, {}));
net
->
CompleteAddOp
();
}
void
SegmentInputs
()
{
LOG
(
INFO
)
<<
"segment inputs"
;
std
::
vector
<
std
::
string
>
inlinks
=
{
"x"
};
std
::
vector
<
std
::
string
>
inlinks_alias
=
{
"rnn/x"
};
rnn
::
Link
inlink
;
inlink
.
external
=
"x"
;
inlink
.
internal
=
"rnn/x"
;
auto
step_scopes
=
scope_
.
FindVar
(
"step_scopes"
)
->
GetMutable
<
std
::
vector
<
Scope
*>>
();
rnn
::
SegmentInputs
(
*
step_scopes
,
std
::
vector
<
rnn
::
Link
>
{
inlink
},
10
,
true
/*infer_shape_mode*/
);
}
void
LinkeMemories
()
{
LOG
(
INFO
)
<<
"link memories"
;
rnn
::
MemoryAttr
mem_attr
;
mem_attr
.
pre_var
=
"rnn/h_pre"
;
mem_attr
.
var
=
"rnn/h"
;
mem_attr
.
boot_var
=
"boot_h"
;
std
::
vector
<
rnn
::
MemoryAttr
>
memories
;
memories
.
push_back
(
mem_attr
);
auto
step_scopes
=
scope_
.
FindVar
(
"step_scopes"
)
->
GetMutable
<
std
::
vector
<
Scope
*>>
();
for
(
int
i
=
1
;
i
<
10
;
++
i
)
{
rnn
::
LinkMemories
(
*
step_scopes
,
memories
,
i
,
-
1
,
true
/*infer_shape_mode*/
);
}
}
Scope
scope_
;
RecurrentGradientAlgorithm
rnn_grad_algo_
;
};
// TEST_F(RecurrentGradientAlgorithmTest, Run) {
// platform::CPUDeviceContext ctx;
// rnn_op_->InferShape(scope_);
// rnn_op_->Run(scope_, ctx);
//}
//
// class RecurrentGradientAlgorithmTest : public ::testing::Test {
// protected:
// virtual void SetUp() override {
// CreateGlobalVariables();
// CreateStepScopes();
// CreateStepNet();
// CreateRNNGradientAlgorithm();
//
// // segment inputs
// SegmentInputs();
// // link forward memories
// LinkeMemories();
// }
//
// virtual void TearDown() override {}
//
// void CreateGlobalVariables() {
// // inputs: x
// LOG(INFO) << "create global variable x";
// Variable* x = scope_.NewVar("x");
// DDim dims =
// make_ddim({10 /*sent size*/, 20 /*batch size*/, 30 /*input dim*/});
// x->GetMutable<Tensor>()->mutable_data<float>(dims, platform::CPUPlace());
// // inputs: h_boot
// LOG(INFO) << "create global variable h_boot";
// Variable* h_boot = scope_.NewVar("h_boot");
// h_boot->GetMutable<Tensor>()->mutable_data<float>(
// make_ddim({20 /*batch size*/, 30 /*input dim*/}),
// platform::CPUPlace());
// // inputs: w
// LOG(INFO) << "create global variable w";
// Variable* w = scope_.NewVar("rnn/w");
// w->GetMutable<Tensor>()->mutable_data<float>(make_ddim({30, 30}),
// platform::CPUPlace());
// // inputs: h_grad
// LOG(INFO) << "create variable h_grad";
// Variable* dh = scope_.NewVar("h_grad");
// dh->GetMutable<Tensor>()->mutable_data<float>(make_ddim({10, 20, 30}),
// platform::CPUPlace());
// // inputs: step_scopes
// LOG(INFO) << "create variable step_scopes";
// scope_.NewVar("step_scopes");
// // inputs: step_net
// LOG(INFO) << "create variable step_net";
// scope_.NewVar("step_net");
// // outputs: w_grad
// LOG(INFO) << "create global variable w_grad";
// scope_.NewVar("rnn/w_grad");
// // outputs: x_grad
// LOG(INFO) << "create global variable x_grad";
// scope_.NewVar("x_grad");
// // outputs: h_boot_grad
// LOG(INFO) << "create global variable h_boot_grad";
// scope_.NewVar("h_boot_grad");
// }
//
// void CreateStepScopes() {
// auto step_scopes =
// scope_.FindVar("step_scopes")->GetMutable<std::vector<Scope*>>();
// for (int i = 0; i < 10; ++i) {
// auto& scope = scope_.NewScope();
// auto pre_t = scope.NewVar("rnn/pre_h")->GetMutable<Tensor>();
// pre_t->mutable_data<float>({20, 30}, platform::CPUPlace());
// auto tensor = scope.NewVar("rnn/h")->GetMutable<Tensor>();
// tensor->mutable_data<float>({20, 30}, platform::CPUPlace());
//
// // for unit test of ConcatOutputs
// auto xg = scope.NewVar("rnn/x_grad")->GetMutable<Tensor>();
// xg->mutable_data<float>({20, 30}, platform::CPUPlace());
//
// step_scopes->emplace_back(&scope);
// }
//
// // last time step
// auto g =
// (*step_scopes)[9]->NewVar("rnn/h_pre_grad")->GetMutable<Tensor>();
// g->mutable_data<float>({20, 30}, platform::CPUPlace());
// }
//
// void CreateRNNGradientAlgorithm() {
// std::unique_ptr<rnn::Argument> arg(new rnn::Argument());
// arg->step_net = "step_net";
// arg->step_scopes = "step_scopes";
// rnn::Link inlink;
// inlink.external = "h_grad";
// inlink.internal = "rnn/h_grad";
// arg->inlinks = std::vector<rnn::Link>{inlink};
//
// rnn::Link outlink;
// outlink.external = "x_grad";
// outlink.internal = "rnn/x_grad";
// arg->outlinks = std::vector<rnn::Link>{outlink};
//
// rnn::MemoryAttr mem_attr;
// mem_attr.pre_var = "rnn/h_pre_grad";
// mem_attr.var = "rnn/h_grad";
// mem_attr.boot_var = "h_boot_grad";
// arg->memories = std::vector<rnn::MemoryAttr>{mem_attr};
//
// rnn_grad_algo_.Init(std::move(arg));
// }
//
// void CreateStepNet() {
// LOG(INFO) << "create variable step_net";
// Variable* var = scope_.NewVar("step_net");
// auto net = var->GetMutable<NetOp>();
// net->AddOp(OpRegistry::CreateOp("mul", {"rnn/h_pre", "rnn/w",
// "rnn/s_grad"},
// {"rnn/h_pre_grad", "rnn/w_grad"}, {}));
//
// net->AddOp(OpRegistry::CreateOp("add_two", {"rnn/h_grad"},
// {"rnn/x_grad", "rnn/s_grad"}, {}));
// net->CompleteAddOp();
// }
//
// void SegmentInputs() {
// LOG(INFO) << "segment inputs";
// std::vector<std::string> inlinks = {"x"};
// std::vector<std::string> inlinks_alias = {"rnn/x"};
//
// rnn::Link inlink;
// inlink.external = "x";
// inlink.internal = "rnn/x";
// auto step_scopes =
// scope_.FindVar("step_scopes")->GetMutable<std::vector<Scope*>>();
// rnn::SegmentInputs(*step_scopes, std::vector<rnn::Link>{inlink}, 10,
// true /*infer_shape_mode*/);
// }
//
// void LinkeMemories() {
// LOG(INFO) << "link memories";
// rnn::MemoryAttr mem_attr;
// mem_attr.pre_var = "rnn/h_pre";
// mem_attr.var = "rnn/h";
// mem_attr.boot_var = "boot_h";
// std::vector<rnn::MemoryAttr> memories;
// memories.push_back(mem_attr);
// auto step_scopes =
// scope_.FindVar("step_scopes")->GetMutable<std::vector<Scope*>>();
// for (int i = 1; i < 10; ++i) {
// rnn::LinkMemories(*step_scopes, memories, i, -1,
// true /*infer_shape_mode*/);
// }
// }
//
// Scope scope_;
// RecurrentGradientAlgorithm rnn_grad_algo_;
//};
//
//// TEST_F(RecurrentGradientAlgorithmTest, Run) {
//// platform::CPUDeviceContext ctx;
//// rnn_grad_algo_.Run(scope_, ctx);
//// }
//
//} // namespace operators
//} // namespace paddle
//
// TEST(RecurrentOp, LinkMemories) {
// using namespace paddle::framework;
// using namespace paddle::platform;
// using namespace paddle::operators;
//
// // create and init step scopes
// size_t len = 10;
// std::vector<Scope*> step_scopes;
// for (size_t i = 0; i < len; ++i) {
// auto scope = new Scope();
// scope->NewVar("pre_h");
// auto tensor = scope->NewVar("h")->GetMutable<Tensor>();
// float* data = tensor->mutable_data<float>({15, 20}, CPUPlace());
// for (size_t j = 0; j < 15 * 20; ++j) {
// data[j] = rand() * (1. / (double)RAND_MAX);
// }
// step_scopes.push_back(scope);
// }
//
// // create MemoryAttr
// rnn::MemoryAttr mem_attr;
// mem_attr.pre_var = "pre_h";
// mem_attr.var = "h";
// mem_attr.boot_var = "boot_h";
// std::vector<rnn::MemoryAttr> memories;
// memories.push_back(mem_attr);
//
// for (size_t i = 1; i < len; ++i) {
// rnn::LinkMemories(step_scopes, memories, i, -1, false
// /*infer_shape_mode*/);
// rnn_grad_algo_.Run(scope_, ctx);
// }
// // check
// for (size_t i = 0; i < len - 1; ++i) {
// const float* a =
// step_scopes[i]->FindVar("h")->GetMutable<Tensor>()->data<float>();
// const float* b = step_scopes[i + 1]
// ->FindVar("pre_h")
// ->GetMutable<Tensor>()
// ->data<float>();
// for (size_t j = 0; j < 15 * 20; ++j) {
// ASSERT_FLOAT_EQ(a[j], b[j]);
// }
// }
//
// for (int i = len - 2; i >= 0; --i) {
// rnn::LinkMemories(step_scopes, memories, i, 1, false
// /*infer_shape_mode*/);
// }
// // check
// for (int i = len - 2; i >= 0; --i) {
// const float* a =
// step_scopes[i]->FindVar("pre_h")->GetMutable<Tensor>()->data<float>();
// const float* b =
// step_scopes[i + 1]->FindVar("h")->GetMutable<Tensor>()->data<float>();
// for (size_t j = 0; j < 15 * 20; ++j) {
// ASSERT_FLOAT_EQ(a[j], b[j]);
// }
// }
//
// for (auto s : step_scopes) {
// delete s;
// }
//}
//
// USE_OP(add_two);
// USE_OP(mul);
// USE_OP_WITHOUT_KERNEL(recurrent_op);
}
// namespace operators
}
// namespace paddle
TEST
(
RecurrentOp
,
LinkMemories
)
{
using
namespace
paddle
::
framework
;
using
namespace
paddle
::
platform
;
using
namespace
paddle
::
operators
;
// create and init step scopes
size_t
len
=
10
;
std
::
vector
<
Scope
*>
step_scopes
;
for
(
size_t
i
=
0
;
i
<
len
;
++
i
)
{
auto
scope
=
new
Scope
();
scope
->
NewVar
(
"pre_h"
);
auto
tensor
=
scope
->
NewVar
(
"h"
)
->
GetMutable
<
Tensor
>
();
float
*
data
=
tensor
->
mutable_data
<
float
>
({
15
,
20
},
CPUPlace
());
for
(
size_t
j
=
0
;
j
<
15
*
20
;
++
j
)
{
data
[
j
]
=
rand
()
*
(
1.
/
(
double
)
RAND_MAX
);
}
step_scopes
.
push_back
(
scope
);
}
// create MemoryAttr
rnn
::
MemoryAttr
mem_attr
;
mem_attr
.
pre_var
=
"pre_h"
;
mem_attr
.
var
=
"h"
;
mem_attr
.
boot_var
=
"boot_h"
;
std
::
vector
<
rnn
::
MemoryAttr
>
memories
;
memories
.
push_back
(
mem_attr
);
for
(
size_t
i
=
1
;
i
<
len
;
++
i
)
{
rnn
::
LinkMemories
(
step_scopes
,
memories
,
i
,
-
1
,
false
/*infer_shape_mode*/
);
}
// check
for
(
size_t
i
=
0
;
i
<
len
-
1
;
++
i
)
{
const
float
*
a
=
step_scopes
[
i
]
->
FindVar
(
"h"
)
->
GetMutable
<
Tensor
>
()
->
data
<
float
>
();
const
float
*
b
=
step_scopes
[
i
+
1
]
->
FindVar
(
"pre_h"
)
->
GetMutable
<
Tensor
>
()
->
data
<
float
>
();
for
(
size_t
j
=
0
;
j
<
15
*
20
;
++
j
)
{
ASSERT_FLOAT_EQ
(
a
[
j
],
b
[
j
]);
}
}
for
(
int
i
=
len
-
2
;
i
>=
0
;
--
i
)
{
rnn
::
LinkMemories
(
step_scopes
,
memories
,
i
,
1
,
false
/*infer_shape_mode*/
);
}
// check
for
(
int
i
=
len
-
2
;
i
>=
0
;
--
i
)
{
const
float
*
a
=
step_scopes
[
i
]
->
FindVar
(
"pre_h"
)
->
GetMutable
<
Tensor
>
()
->
data
<
float
>
();
const
float
*
b
=
step_scopes
[
i
+
1
]
->
FindVar
(
"h"
)
->
GetMutable
<
Tensor
>
()
->
data
<
float
>
();
for
(
size_t
j
=
0
;
j
<
15
*
20
;
++
j
)
{
ASSERT_FLOAT_EQ
(
a
[
j
],
b
[
j
]);
}
}
for
(
auto
s
:
step_scopes
)
{
delete
s
;
}
}
USE_OP
(
add_two
);
USE_OP
(
mul
);
USE_OP_WITHOUT_KERNEL
(
recurrent_op
);
python/paddle/v2/framework/tests/test_operator.py
浏览文件 @
7202f425
...
...
@@ -74,6 +74,7 @@ class TestOpDescCreationMethod(unittest.TestCase):
expected1
.
inputs
.
extend
([
'x'
,
'w'
,
'b'
])
expected1
.
outputs
.
extend
([
'y'
])
expected1
.
type
=
'fc'
# the input_format can be removed after testing
attr
=
expected1
.
attrs
.
add
()
attr
.
name
=
'input_format'
attr
.
type
=
attribute_pb2
.
INTS
...
...
@@ -86,6 +87,7 @@ class TestOpDescCreationMethod(unittest.TestCase):
expected2
.
inputs
.
extend
([
'x1'
,
'x2'
,
'x3'
,
'w1'
,
'w2'
,
'w3'
,
'b'
])
expected2
.
outputs
.
extend
([
'y'
])
expected2
.
type
=
'fc'
# the input_format can be removed after testing
attr
=
expected2
.
attrs
.
add
()
attr
.
name
=
'input_format'
attr
.
type
=
attribute_pb2
.
INTS
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录