Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
机器未来
Paddle
提交
5a591117
P
Paddle
项目概览
机器未来
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
5a591117
编写于
8月 09, 2017
作者:
Q
qingqing01
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Modify rnn op unit test after refactoring framework proto.
上级
4a788854
变更
1
隐藏空白更改
内联
并排
Showing
1 changed file
with
227 addition
and
376 deletion
+227
-376
paddle/operators/recurrent_op_test.cc
paddle/operators/recurrent_op_test.cc
+227
-376
未找到文件。
paddle/operators/recurrent_op_test.cc
浏览文件 @
5a591117
...
@@ -22,382 +22,233 @@
...
@@ -22,382 +22,233 @@
#include "paddle/framework/tensor.h"
#include "paddle/framework/tensor.h"
#include "paddle/operators/net_op.h"
#include "paddle/operators/net_op.h"
TEST
(
rnn
,
bad
)
{
ASSERT_TRUE
(
false
);
}
namespace
paddle
{
namespace
operators
{
// namespace paddle {
using
namespace
paddle
::
framework
;
// namespace operators {
//
// using framework::make_ddim;
// using framework::make_ddim;
// using framework::DDim;
// using framework::DDim;
//
// class RecurrentOpTest : public ::testing::Test {
class
RecurrentGradientAlgorithmTest
:
public
::
testing
::
Test
{
// protected:
protected:
// virtual void SetUp() override {
virtual
void
SetUp
()
override
{
// CreateGlobalVariables();
CreateGlobalVariables
();
// CreateStepNet();
CreateStepScopes
();
// CreateRNNOp();
CreateStepNet
();
// }
CreateRNNGradientAlgorithm
();
//
// virtual void TearDown() override {}
// segment inputs
//
SegmentInputs
();
// void CreateGlobalVariables() {
// link forward memories
// // create input, and init content
LinkeMemories
();
// LOG(INFO) << "create global variable x";
}
// for (auto inlink : std::vector<std::string>{"x", "x0", "x1", "h"}) {
// Variable* x = scope_.NewVar(inlink);
virtual
void
TearDown
()
override
{}
// DDim dims = make_ddim(std::vector<int>{
// 10 /*sent size*/, 20 /*batch size*/, 30 /*input dim*/});
void
CreateGlobalVariables
()
{
// x->GetMutable<Tensor>()->mutable_data<float>(dims,
// inputs: x
// platform::CPUPlace());
LOG
(
INFO
)
<<
"create global variable x"
;
// }
Variable
*
x
=
scope_
.
NewVar
(
"x"
);
// // create output alias just for test
DDim
dims
=
// for (auto inlink : std::vector<std::string>{"h@alias"}) {
make_ddim
({
10
/*sent size*/
,
20
/*batch size*/
,
30
/*input dim*/
});
// Variable* x = scope_.NewVar(inlink);
x
->
GetMutable
<
Tensor
>
()
->
mutable_data
<
float
>
(
dims
,
platform
::
CPUPlace
());
// DDim dims =
// inputs: h_boot
// make_ddim(std::vector<int>{20 /*batch size*/, 30 /*input dim*/});
LOG
(
INFO
)
<<
"create global variable h_boot"
;
// x->GetMutable<Tensor>()->mutable_data<float>(dims,
Variable
*
h_boot
=
scope_
.
NewVar
(
"h_boot"
);
// platform::CPUPlace());
h_boot
->
GetMutable
<
Tensor
>
()
->
mutable_data
<
float
>
(
// }
make_ddim
({
20
/*batch size*/
,
30
/*input dim*/
}),
platform
::
CPUPlace
());
//
// inputs: w
// LOG(INFO) << "create global variable w";
LOG
(
INFO
)
<<
"create global variable w"
;
// Variable* w = scope_.NewVar("rnn/w");
Variable
*
w
=
scope_
.
NewVar
(
"rnn/w"
);
// w->GetMutable<Tensor>()->mutable_data<float>(
w
->
GetMutable
<
Tensor
>
()
->
mutable_data
<
float
>
(
make_ddim
({
30
,
30
}),
// make_ddim(std::vector<int>{30, 30}), platform::CPUPlace());
platform
::
CPUPlace
());
//
// inputs: h_grad
// for (auto boot : std::vector<std::string>{"h_boot"}) {
LOG
(
INFO
)
<<
"create variable h_grad"
;
// LOG(INFO) << "create global variable " << boot;
Variable
*
dh
=
scope_
.
NewVar
(
"h_grad"
);
// Variable* h_boot = scope_.NewVar(boot);
dh
->
GetMutable
<
Tensor
>
()
->
mutable_data
<
float
>
(
make_ddim
({
10
,
20
,
30
}),
// h_boot->GetMutable<Tensor>()->mutable_data<float>(
platform
::
CPUPlace
());
// make_ddim(std::vector<int>{20 /*batch size*/, 30 /*input dim*/}),
// inputs: step_scopes
// platform::CPUPlace());
LOG
(
INFO
)
<<
"create variable step_scopes"
;
// }
scope_
.
NewVar
(
"step_scopes"
);
//
// inputs: step_net
// LOG(INFO) << "create variable step_scopes";
LOG
(
INFO
)
<<
"create variable step_net"
;
// scope_.NewVar("step_scopes");
scope_
.
NewVar
(
"step_net"
);
//
// outputs: w_grad
// LOG(INFO) << "create variable h";
LOG
(
INFO
)
<<
"create global variable w_grad"
;
// scope_.NewVar("h");
scope_
.
NewVar
(
"rnn/w_grad"
);
// }
// outputs: x_grad
//
LOG
(
INFO
)
<<
"create global variable x_grad"
;
// void CreateRNNOp() {
scope_
.
NewVar
(
"x_grad"
);
// framework::OpDesc op_desc;
// outputs: h_boot_grad
//
LOG
(
INFO
)
<<
"create global variable h_boot_grad"
;
// op_desc.set_type("recurrent_op");
scope_
.
NewVar
(
"h_boot_grad"
);
// // inlinks 0
}
// op_desc.add_inputs("x");
// op_desc.add_inputs("x0");
void
CreateStepScopes
()
{
// op_desc.add_inputs("x1");
auto
step_scopes
=
// // boot_memories 3
scope_
.
FindVar
(
"step_scopes"
)
->
GetMutable
<
std
::
vector
<
Scope
*>>
();
// op_desc.add_inputs("h_boot");
for
(
int
i
=
0
;
i
<
10
;
++
i
)
{
// // step net 5
auto
&
scope
=
scope_
.
NewScope
();
// op_desc.add_inputs("step_net");
auto
pre_t
=
scope
.
NewVar
(
"rnn/pre_h"
)
->
GetMutable
<
Tensor
>
();
// // outlinks 6
pre_t
->
mutable_data
<
float
>
({
20
,
30
},
platform
::
CPUPlace
());
// op_desc.add_outputs("h");
auto
tensor
=
scope
.
NewVar
(
"rnn/h"
)
->
GetMutable
<
Tensor
>
();
// // step scopes 7
tensor
->
mutable_data
<
float
>
({
20
,
30
},
platform
::
CPUPlace
());
// op_desc.add_outputs("step_scopes");
//
// for unit test of ConcatOutputs
// auto _input_format = std::vector<int>{
auto
xg
=
scope
.
NewVar
(
"rnn/x_grad"
)
->
GetMutable
<
Tensor
>
();
// 0, // in_link
xg
->
mutable_data
<
float
>
({
20
,
30
},
platform
::
CPUPlace
());
// 3, // memories
// 4 // step_net
step_scopes
->
emplace_back
(
&
scope
);
// };
}
// auto input_format = op_desc.add_attrs();
// input_format->set_name("input_format");
// last time step
// input_format->set_type(paddle::framework::AttrType::INTS);
auto
g
=
(
*
step_scopes
)[
9
]
->
NewVar
(
"rnn/h_pre_grad"
)
->
GetMutable
<
Tensor
>
();
// for (auto i : _input_format) {
g
->
mutable_data
<
float
>
({
20
,
30
},
platform
::
CPUPlace
());
// input_format->add_ints(i);
}
// }
//
void
CreateRNNGradientAlgorithm
()
{
// auto output_format = op_desc.add_attrs();
std
::
unique_ptr
<
rnn
::
Argument
>
arg
(
new
rnn
::
Argument
());
// output_format->set_name("output_format");
arg
->
step_net
=
"step_net"
;
// output_format->set_type(paddle::framework::AttrType::INTS);
arg
->
step_scopes
=
"step_scopes"
;
// for (auto i : std::vector<int>{0, 1, 2}) {
rnn
::
Link
inlink
;
// output_format->add_ints(i);
inlink
.
external
=
"h_grad"
;
// }
inlink
.
internal
=
"rnn/h_grad"
;
//
arg
->
inlinks
=
std
::
vector
<
rnn
::
Link
>
{
inlink
};
// auto inlink_alias = op_desc.add_attrs();
// inlink_alias->set_name("inlink_alias");
rnn
::
Link
outlink
;
// inlink_alias->set_type(paddle::framework::AttrType::STRINGS);
outlink
.
external
=
"x_grad"
;
//
outlink
.
internal
=
"rnn/x_grad"
;
// auto outlink_alias = op_desc.add_attrs();
arg
->
outlinks
=
std
::
vector
<
rnn
::
Link
>
{
outlink
};
// outlink_alias->set_name("outlink_alias");
// outlink_alias->set_type(paddle::framework::AttrType::STRINGS);
rnn
::
MemoryAttr
mem_attr
;
//
mem_attr
.
pre_var
=
"rnn/h_pre_grad"
;
// auto pre_memories = op_desc.add_attrs();
mem_attr
.
var
=
"rnn/h_grad"
;
// pre_memories->set_name("pre_memories");
mem_attr
.
boot_var
=
"h_boot_grad"
;
// pre_memories->set_type(paddle::framework::AttrType::STRINGS);
arg
->
memories
=
std
::
vector
<
rnn
::
MemoryAttr
>
{
mem_attr
};
//
// auto memories = op_desc.add_attrs();
rnn_grad_algo_
.
Init
(
std
::
move
(
arg
));
// memories->set_name("memories");
}
// memories->set_type(paddle::framework::AttrType::STRINGS);
//
void
CreateStepNet
()
{
// // create inlink_alias
LOG
(
INFO
)
<<
"create variable step_net"
;
// for (const auto& item :
Variable
*
var
=
scope_
.
NewVar
(
"step_net"
);
// std::vector<std::string>{"x@alias", "x0@alias", "x1@alias"}) {
auto
net
=
var
->
GetMutable
<
NetOp
>
();
// inlink_alias->add_strings(item);
// TODO(qingqing) modify backward op create for RNNOp unit test
// }
// and the unit test will be removed to Python.
// // pre memories
// net->AddOp(OpRegistry::CreateOp("mul", {"X", {"rnn/h_pre", "rnn/w",
// for (const auto& item : std::vector<std::string>{"rnn/h@pre"}) {
// "rnn/s_grad"}}, {"Y", {"rnn/h_pre_grad", "rnn/w_grad"}}, {}));
// pre_memories->add_strings(item);
// }
// net->AddOp(OpRegistry::CreateOp("add_two", {"X", {"rnn/h_grad"}},
// // memories
// {"Y", {"rnn/x_grad"}}, {"Out", "rnn/s_grad"}}, {}));
// for (const auto& item : std::vector<std::string>{"rnn/h"}) {
net
->
CompleteAddOp
();
// memories->add_strings(item);
}
// }
// // output alias
void
SegmentInputs
()
{
// for (const auto& item : std::vector<std::string>{"h@alias"}) {
LOG
(
INFO
)
<<
"segment inputs"
;
// outlink_alias->add_strings(item);
std
::
vector
<
std
::
string
>
inlinks
=
{
"x"
};
// }
std
::
vector
<
std
::
string
>
inlinks_alias
=
{
"rnn/x"
};
//
// rnn_op_ = OpRegistry::CreateOp(op_desc);
rnn
::
Link
inlink
;
//
inlink
.
external
=
"x"
;
// LOG(INFO) << "rnn_op finish init";
inlink
.
internal
=
"rnn/x"
;
// }
auto
step_scopes
=
//
scope_
.
FindVar
(
"step_scopes"
)
->
GetMutable
<
std
::
vector
<
Scope
*>>
();
// void CreateStepNet() {
rnn
::
SegmentInputs
(
*
step_scopes
,
std
::
vector
<
rnn
::
Link
>
{
inlink
},
10
,
// LOG(INFO) << "create variable step_net";
true
/*infer_shape_mode*/
);
// Variable* var = scope_.NewVar("step_net");
}
// auto net = var->GetMutable<NetOp>();
// net->AddOp(
void
LinkeMemories
()
{
// OpRegistry::CreateOp("mul", {"rnn/h@pre", "rnn/w"}, {"rnn/s"}, {}));
LOG
(
INFO
)
<<
"link memories"
;
//
rnn
::
MemoryAttr
mem_attr
;
// net->AddOp(
mem_attr
.
pre_var
=
"rnn/h_pre"
;
// OpRegistry::CreateOp("add_two", {"x@alias", "rnn/s"}, {"rnn/h"}, {}));
mem_attr
.
var
=
"rnn/h"
;
// net->CompleteAddOp();
mem_attr
.
boot_var
=
"boot_h"
;
// }
std
::
vector
<
rnn
::
MemoryAttr
>
memories
;
//
memories
.
push_back
(
mem_attr
);
// // father scope
auto
step_scopes
=
// Scope scope_;
scope_
.
FindVar
(
"step_scopes"
)
->
GetMutable
<
std
::
vector
<
Scope
*>>
();
// std::shared_ptr<OperatorBase> rnn_op_;
for
(
int
i
=
1
;
i
<
10
;
++
i
)
{
//};
rnn
::
LinkMemories
(
*
step_scopes
,
memories
,
i
,
-
1
,
//
true
/*infer_shape_mode*/
);
// TEST_F(RecurrentOpTest, Run) {
}
// platform::CPUDeviceContext ctx;
}
// rnn_op_->InferShape(scope_);
// rnn_op_->Run(scope_, ctx);
Scope
scope_
;
//}
RecurrentGradientAlgorithm
rnn_grad_algo_
;
//
};
// class RecurrentGradientAlgorithmTest : public ::testing::Test {
// protected:
// TEST_F(RecurrentGradientAlgorithmTest, Run) {
// virtual void SetUp() override {
// platform::CPUDeviceContext ctx;
// CreateGlobalVariables();
// rnn_grad_algo_.Run(scope_, ctx);
// CreateStepScopes();
// }
// CreateStepNet();
// CreateRNNGradientAlgorithm();
}
// namespace operators
//
}
// namespace paddle
// // segment inputs
// SegmentInputs();
TEST
(
RecurrentOp
,
LinkMemories
)
{
// // link forward memories
using
namespace
paddle
::
framework
;
// LinkeMemories();
using
namespace
paddle
::
platform
;
// }
using
namespace
paddle
::
operators
;
//
// virtual void TearDown() override {}
// create and init step scopes
//
size_t
len
=
10
;
// void CreateGlobalVariables() {
std
::
vector
<
Scope
*>
step_scopes
;
// // inputs: x
for
(
size_t
i
=
0
;
i
<
len
;
++
i
)
{
// LOG(INFO) << "create global variable x";
auto
scope
=
new
Scope
();
// Variable* x = scope_.NewVar("x");
scope
->
NewVar
(
"pre_h"
);
// DDim dims =
auto
tensor
=
scope
->
NewVar
(
"h"
)
->
GetMutable
<
Tensor
>
();
// make_ddim({10 /*sent size*/, 20 /*batch size*/, 30 /*input dim*/});
float
*
data
=
tensor
->
mutable_data
<
float
>
({
15
,
20
},
CPUPlace
());
// x->GetMutable<Tensor>()->mutable_data<float>(dims, platform::CPUPlace());
for
(
size_t
j
=
0
;
j
<
15
*
20
;
++
j
)
{
// // inputs: h_boot
data
[
j
]
=
rand
()
*
(
1.
/
(
double
)
RAND_MAX
);
// LOG(INFO) << "create global variable h_boot";
}
// Variable* h_boot = scope_.NewVar("h_boot");
step_scopes
.
push_back
(
scope
);
// h_boot->GetMutable<Tensor>()->mutable_data<float>(
}
// make_ddim({20 /*batch size*/, 30 /*input dim*/}),
// platform::CPUPlace());
// create MemoryAttr
// // inputs: w
rnn
::
MemoryAttr
mem_attr
;
// LOG(INFO) << "create global variable w";
mem_attr
.
pre_var
=
"pre_h"
;
// Variable* w = scope_.NewVar("rnn/w");
mem_attr
.
var
=
"h"
;
// w->GetMutable<Tensor>()->mutable_data<float>(make_ddim({30, 30}),
mem_attr
.
boot_var
=
"boot_h"
;
// platform::CPUPlace());
std
::
vector
<
rnn
::
MemoryAttr
>
memories
;
// // inputs: h_grad
memories
.
push_back
(
mem_attr
);
// LOG(INFO) << "create variable h_grad";
// Variable* dh = scope_.NewVar("h_grad");
for
(
size_t
i
=
1
;
i
<
len
;
++
i
)
{
// dh->GetMutable<Tensor>()->mutable_data<float>(make_ddim({10, 20, 30}),
rnn
::
LinkMemories
(
step_scopes
,
memories
,
i
,
-
1
,
false
// platform::CPUPlace());
/*infer_shape_mode*/
);
// // inputs: step_scopes
}
// LOG(INFO) << "create variable step_scopes";
// check
// scope_.NewVar("step_scopes");
for
(
size_t
i
=
0
;
i
<
len
-
1
;
++
i
)
{
// // inputs: step_net
const
float
*
a
=
// LOG(INFO) << "create variable step_net";
step_scopes
[
i
]
->
FindVar
(
"h"
)
->
GetMutable
<
Tensor
>
()
->
data
<
float
>
();
// scope_.NewVar("step_net");
const
float
*
b
=
step_scopes
[
i
+
1
]
// // outputs: w_grad
->
FindVar
(
"pre_h"
)
// LOG(INFO) << "create global variable w_grad";
->
GetMutable
<
Tensor
>
()
// scope_.NewVar("rnn/w_grad");
->
data
<
float
>
();
// // outputs: x_grad
for
(
size_t
j
=
0
;
j
<
15
*
20
;
++
j
)
{
// LOG(INFO) << "create global variable x_grad";
ASSERT_FLOAT_EQ
(
a
[
j
],
b
[
j
]);
// scope_.NewVar("x_grad");
}
// // outputs: h_boot_grad
}
// LOG(INFO) << "create global variable h_boot_grad";
// scope_.NewVar("h_boot_grad");
for
(
int
i
=
len
-
2
;
i
>=
0
;
--
i
)
{
// }
rnn
::
LinkMemories
(
step_scopes
,
memories
,
i
,
1
,
false
//
/*infer_shape_mode*/
);
// void CreateStepScopes() {
}
// auto step_scopes =
// check
// scope_.FindVar("step_scopes")->GetMutable<std::vector<Scope*>>();
for
(
int
i
=
len
-
2
;
i
>=
0
;
--
i
)
{
// for (int i = 0; i < 10; ++i) {
const
float
*
a
=
// auto& scope = scope_.NewScope();
step_scopes
[
i
]
->
FindVar
(
"pre_h"
)
->
GetMutable
<
Tensor
>
()
->
data
<
float
>
();
// auto pre_t = scope.NewVar("rnn/pre_h")->GetMutable<Tensor>();
const
float
*
b
=
// pre_t->mutable_data<float>({20, 30}, platform::CPUPlace());
step_scopes
[
i
+
1
]
->
FindVar
(
"h"
)
->
GetMutable
<
Tensor
>
()
->
data
<
float
>
();
// auto tensor = scope.NewVar("rnn/h")->GetMutable<Tensor>();
for
(
size_t
j
=
0
;
j
<
15
*
20
;
++
j
)
{
// tensor->mutable_data<float>({20, 30}, platform::CPUPlace());
ASSERT_FLOAT_EQ
(
a
[
j
],
b
[
j
]);
//
}
// // for unit test of ConcatOutputs
}
// auto xg = scope.NewVar("rnn/x_grad")->GetMutable<Tensor>();
// xg->mutable_data<float>({20, 30}, platform::CPUPlace());
for
(
auto
s
:
step_scopes
)
{
//
delete
s
;
// step_scopes->emplace_back(&scope);
}
// }
}
//
// // last time step
USE_OP
(
add_two
);
// auto g =
USE_OP
(
mul
);
// (*step_scopes)[9]->NewVar("rnn/h_pre_grad")->GetMutable<Tensor>();
USE_OP_WITHOUT_KERNEL
(
recurrent_op
);
// g->mutable_data<float>({20, 30}, platform::CPUPlace());
// }
//
// void CreateRNNGradientAlgorithm() {
// std::unique_ptr<rnn::Argument> arg(new rnn::Argument());
// arg->step_net = "step_net";
// arg->step_scopes = "step_scopes";
// rnn::Link inlink;
// inlink.external = "h_grad";
// inlink.internal = "rnn/h_grad";
// arg->inlinks = std::vector<rnn::Link>{inlink};
//
// rnn::Link outlink;
// outlink.external = "x_grad";
// outlink.internal = "rnn/x_grad";
// arg->outlinks = std::vector<rnn::Link>{outlink};
//
// rnn::MemoryAttr mem_attr;
// mem_attr.pre_var = "rnn/h_pre_grad";
// mem_attr.var = "rnn/h_grad";
// mem_attr.boot_var = "h_boot_grad";
// arg->memories = std::vector<rnn::MemoryAttr>{mem_attr};
//
// rnn_grad_algo_.Init(std::move(arg));
// }
//
// void CreateStepNet() {
// LOG(INFO) << "create variable step_net";
// Variable* var = scope_.NewVar("step_net");
// auto net = var->GetMutable<NetOp>();
// net->AddOp(OpRegistry::CreateOp("mul", {"rnn/h_pre", "rnn/w",
// "rnn/s_grad"},
// {"rnn/h_pre_grad", "rnn/w_grad"}, {}));
//
// net->AddOp(OpRegistry::CreateOp("add_two", {"rnn/h_grad"},
// {"rnn/x_grad", "rnn/s_grad"}, {}));
// net->CompleteAddOp();
// }
//
// void SegmentInputs() {
// LOG(INFO) << "segment inputs";
// std::vector<std::string> inlinks = {"x"};
// std::vector<std::string> inlinks_alias = {"rnn/x"};
//
// rnn::Link inlink;
// inlink.external = "x";
// inlink.internal = "rnn/x";
// auto step_scopes =
// scope_.FindVar("step_scopes")->GetMutable<std::vector<Scope*>>();
// rnn::SegmentInputs(*step_scopes, std::vector<rnn::Link>{inlink}, 10,
// true /*infer_shape_mode*/);
// }
//
// void LinkeMemories() {
// LOG(INFO) << "link memories";
// rnn::MemoryAttr mem_attr;
// mem_attr.pre_var = "rnn/h_pre";
// mem_attr.var = "rnn/h";
// mem_attr.boot_var = "boot_h";
// std::vector<rnn::MemoryAttr> memories;
// memories.push_back(mem_attr);
// auto step_scopes =
// scope_.FindVar("step_scopes")->GetMutable<std::vector<Scope*>>();
// for (int i = 1; i < 10; ++i) {
// rnn::LinkMemories(*step_scopes, memories, i, -1,
// true /*infer_shape_mode*/);
// }
// }
//
// Scope scope_;
// RecurrentGradientAlgorithm rnn_grad_algo_;
//};
//
//// TEST_F(RecurrentGradientAlgorithmTest, Run) {
//// platform::CPUDeviceContext ctx;
//// rnn_grad_algo_.Run(scope_, ctx);
//// }
//
//} // namespace operators
//} // namespace paddle
//
// TEST(RecurrentOp, LinkMemories) {
// using namespace paddle::framework;
// using namespace paddle::platform;
// using namespace paddle::operators;
//
// // create and init step scopes
// size_t len = 10;
// std::vector<Scope*> step_scopes;
// for (size_t i = 0; i < len; ++i) {
// auto scope = new Scope();
// scope->NewVar("pre_h");
// auto tensor = scope->NewVar("h")->GetMutable<Tensor>();
// float* data = tensor->mutable_data<float>({15, 20}, CPUPlace());
// for (size_t j = 0; j < 15 * 20; ++j) {
// data[j] = rand() * (1. / (double)RAND_MAX);
// }
// step_scopes.push_back(scope);
// }
//
// // create MemoryAttr
// rnn::MemoryAttr mem_attr;
// mem_attr.pre_var = "pre_h";
// mem_attr.var = "h";
// mem_attr.boot_var = "boot_h";
// std::vector<rnn::MemoryAttr> memories;
// memories.push_back(mem_attr);
//
// for (size_t i = 1; i < len; ++i) {
// rnn::LinkMemories(step_scopes, memories, i, -1, false
// /*infer_shape_mode*/);
// }
// // check
// for (size_t i = 0; i < len - 1; ++i) {
// const float* a =
// step_scopes[i]->FindVar("h")->GetMutable<Tensor>()->data<float>();
// const float* b = step_scopes[i + 1]
// ->FindVar("pre_h")
// ->GetMutable<Tensor>()
// ->data<float>();
// for (size_t j = 0; j < 15 * 20; ++j) {
// ASSERT_FLOAT_EQ(a[j], b[j]);
// }
// }
//
// for (int i = len - 2; i >= 0; --i) {
// rnn::LinkMemories(step_scopes, memories, i, 1, false
// /*infer_shape_mode*/);
// }
// // check
// for (int i = len - 2; i >= 0; --i) {
// const float* a =
// step_scopes[i]->FindVar("pre_h")->GetMutable<Tensor>()->data<float>();
// const float* b =
// step_scopes[i + 1]->FindVar("h")->GetMutable<Tensor>()->data<float>();
// for (size_t j = 0; j < 15 * 20; ++j) {
// ASSERT_FLOAT_EQ(a[j], b[j]);
// }
// }
//
// for (auto s : step_scopes) {
// delete s;
// }
//}
//
// USE_OP(add_two);
// USE_OP(mul);
// USE_OP_WITHOUT_KERNEL(recurrent_op);
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录