提交 36709d05 编写于 作者: Y Yi Wang 提交者: GitHub

Merge pull request #5 from qingqing01/rnn_test_for_refactorize_framework_proto

Modify rnn op unit test after refactoring framework proto.
...@@ -22,382 +22,233 @@ ...@@ -22,382 +22,233 @@
#include "paddle/framework/tensor.h" #include "paddle/framework/tensor.h"
#include "paddle/operators/net_op.h" #include "paddle/operators/net_op.h"
TEST(rnn, bad) { ASSERT_TRUE(false); } namespace paddle {
namespace operators {
// namespace paddle { using namespace paddle::framework;
// namespace operators {
//
// using framework::make_ddim; // using framework::make_ddim;
// using framework::DDim; // using framework::DDim;
//
// class RecurrentOpTest : public ::testing::Test { class RecurrentGradientAlgorithmTest : public ::testing::Test {
// protected: protected:
// virtual void SetUp() override { virtual void SetUp() override {
// CreateGlobalVariables(); CreateGlobalVariables();
// CreateStepNet(); CreateStepScopes();
// CreateRNNOp(); CreateStepNet();
// } CreateRNNGradientAlgorithm();
//
// virtual void TearDown() override {} // segment inputs
// SegmentInputs();
// void CreateGlobalVariables() { // link forward memories
// // create input, and init content LinkeMemories();
// LOG(INFO) << "create global variable x"; }
// for (auto inlink : std::vector<std::string>{"x", "x0", "x1", "h"}) {
// Variable* x = scope_.NewVar(inlink); virtual void TearDown() override {}
// DDim dims = make_ddim(std::vector<int>{
// 10 /*sent size*/, 20 /*batch size*/, 30 /*input dim*/}); void CreateGlobalVariables() {
// x->GetMutable<Tensor>()->mutable_data<float>(dims, // inputs: x
// platform::CPUPlace()); LOG(INFO) << "create global variable x";
// } Variable* x = scope_.NewVar("x");
// // create output alias just for test DDim dims =
// for (auto inlink : std::vector<std::string>{"h@alias"}) { make_ddim({10 /*sent size*/, 20 /*batch size*/, 30 /*input dim*/});
// Variable* x = scope_.NewVar(inlink); x->GetMutable<Tensor>()->mutable_data<float>(dims, platform::CPUPlace());
// DDim dims = // inputs: h_boot
// make_ddim(std::vector<int>{20 /*batch size*/, 30 /*input dim*/}); LOG(INFO) << "create global variable h_boot";
// x->GetMutable<Tensor>()->mutable_data<float>(dims, Variable* h_boot = scope_.NewVar("h_boot");
// platform::CPUPlace()); h_boot->GetMutable<Tensor>()->mutable_data<float>(
// } make_ddim({20 /*batch size*/, 30 /*input dim*/}), platform::CPUPlace());
// // inputs: w
// LOG(INFO) << "create global variable w"; LOG(INFO) << "create global variable w";
// Variable* w = scope_.NewVar("rnn/w"); Variable* w = scope_.NewVar("rnn/w");
// w->GetMutable<Tensor>()->mutable_data<float>( w->GetMutable<Tensor>()->mutable_data<float>(make_ddim({30, 30}),
// make_ddim(std::vector<int>{30, 30}), platform::CPUPlace()); platform::CPUPlace());
// // inputs: h_grad
// for (auto boot : std::vector<std::string>{"h_boot"}) { LOG(INFO) << "create variable h_grad";
// LOG(INFO) << "create global variable " << boot; Variable* dh = scope_.NewVar("h_grad");
// Variable* h_boot = scope_.NewVar(boot); dh->GetMutable<Tensor>()->mutable_data<float>(make_ddim({10, 20, 30}),
// h_boot->GetMutable<Tensor>()->mutable_data<float>( platform::CPUPlace());
// make_ddim(std::vector<int>{20 /*batch size*/, 30 /*input dim*/}), // inputs: step_scopes
// platform::CPUPlace()); LOG(INFO) << "create variable step_scopes";
// } scope_.NewVar("step_scopes");
// // inputs: step_net
// LOG(INFO) << "create variable step_scopes"; LOG(INFO) << "create variable step_net";
// scope_.NewVar("step_scopes"); scope_.NewVar("step_net");
// // outputs: w_grad
// LOG(INFO) << "create variable h"; LOG(INFO) << "create global variable w_grad";
// scope_.NewVar("h"); scope_.NewVar("rnn/w_grad");
// } // outputs: x_grad
// LOG(INFO) << "create global variable x_grad";
// void CreateRNNOp() { scope_.NewVar("x_grad");
// framework::OpDesc op_desc; // outputs: h_boot_grad
// LOG(INFO) << "create global variable h_boot_grad";
// op_desc.set_type("recurrent_op"); scope_.NewVar("h_boot_grad");
// // inlinks 0 }
// op_desc.add_inputs("x");
// op_desc.add_inputs("x0"); void CreateStepScopes() {
// op_desc.add_inputs("x1"); auto step_scopes =
// // boot_memories 3 scope_.FindVar("step_scopes")->GetMutable<std::vector<Scope*>>();
// op_desc.add_inputs("h_boot"); for (int i = 0; i < 10; ++i) {
// // step net 5 auto& scope = scope_.NewScope();
// op_desc.add_inputs("step_net"); auto pre_t = scope.NewVar("rnn/pre_h")->GetMutable<Tensor>();
// // outlinks 6 pre_t->mutable_data<float>({20, 30}, platform::CPUPlace());
// op_desc.add_outputs("h"); auto tensor = scope.NewVar("rnn/h")->GetMutable<Tensor>();
// // step scopes 7 tensor->mutable_data<float>({20, 30}, platform::CPUPlace());
// op_desc.add_outputs("step_scopes");
// // for unit test of ConcatOutputs
// auto _input_format = std::vector<int>{ auto xg = scope.NewVar("rnn/x_grad")->GetMutable<Tensor>();
// 0, // in_link xg->mutable_data<float>({20, 30}, platform::CPUPlace());
// 3, // memories
// 4 // step_net step_scopes->emplace_back(&scope);
// }; }
// auto input_format = op_desc.add_attrs();
// input_format->set_name("input_format"); // last time step
// input_format->set_type(paddle::framework::AttrType::INTS); auto g = (*step_scopes)[9]->NewVar("rnn/h_pre_grad")->GetMutable<Tensor>();
// for (auto i : _input_format) { g->mutable_data<float>({20, 30}, platform::CPUPlace());
// input_format->add_ints(i); }
// }
// void CreateRNNGradientAlgorithm() {
// auto output_format = op_desc.add_attrs(); std::unique_ptr<rnn::Argument> arg(new rnn::Argument());
// output_format->set_name("output_format"); arg->step_net = "step_net";
// output_format->set_type(paddle::framework::AttrType::INTS); arg->step_scopes = "step_scopes";
// for (auto i : std::vector<int>{0, 1, 2}) { rnn::Link inlink;
// output_format->add_ints(i); inlink.external = "h_grad";
// } inlink.internal = "rnn/h_grad";
// arg->inlinks = std::vector<rnn::Link>{inlink};
// auto inlink_alias = op_desc.add_attrs();
// inlink_alias->set_name("inlink_alias"); rnn::Link outlink;
// inlink_alias->set_type(paddle::framework::AttrType::STRINGS); outlink.external = "x_grad";
// outlink.internal = "rnn/x_grad";
// auto outlink_alias = op_desc.add_attrs(); arg->outlinks = std::vector<rnn::Link>{outlink};
// outlink_alias->set_name("outlink_alias");
// outlink_alias->set_type(paddle::framework::AttrType::STRINGS); rnn::MemoryAttr mem_attr;
// mem_attr.pre_var = "rnn/h_pre_grad";
// auto pre_memories = op_desc.add_attrs(); mem_attr.var = "rnn/h_grad";
// pre_memories->set_name("pre_memories"); mem_attr.boot_var = "h_boot_grad";
// pre_memories->set_type(paddle::framework::AttrType::STRINGS); arg->memories = std::vector<rnn::MemoryAttr>{mem_attr};
//
// auto memories = op_desc.add_attrs(); rnn_grad_algo_.Init(std::move(arg));
// memories->set_name("memories"); }
// memories->set_type(paddle::framework::AttrType::STRINGS);
// void CreateStepNet() {
// // create inlink_alias LOG(INFO) << "create variable step_net";
// for (const auto& item : Variable* var = scope_.NewVar("step_net");
// std::vector<std::string>{"x@alias", "x0@alias", "x1@alias"}) { auto net = var->GetMutable<NetOp>();
// inlink_alias->add_strings(item); // TODO(qingqing) modify backward op create for RNNOp unit test
// } // and the unit test will be removed to Python.
// // pre memories // net->AddOp(OpRegistry::CreateOp("mul", {"X", {"rnn/h_pre", "rnn/w",
// for (const auto& item : std::vector<std::string>{"rnn/h@pre"}) { // "rnn/s_grad"}}, {"Y", {"rnn/h_pre_grad", "rnn/w_grad"}}, {}));
// pre_memories->add_strings(item);
// } // net->AddOp(OpRegistry::CreateOp("add_two", {"X", {"rnn/h_grad"}},
// // memories // {"Y", {"rnn/x_grad"}}, {"Out", "rnn/s_grad"}}, {}));
// for (const auto& item : std::vector<std::string>{"rnn/h"}) { net->CompleteAddOp();
// memories->add_strings(item); }
// }
// // output alias void SegmentInputs() {
// for (const auto& item : std::vector<std::string>{"h@alias"}) { LOG(INFO) << "segment inputs";
// outlink_alias->add_strings(item); std::vector<std::string> inlinks = {"x"};
// } std::vector<std::string> inlinks_alias = {"rnn/x"};
//
// rnn_op_ = OpRegistry::CreateOp(op_desc); rnn::Link inlink;
// inlink.external = "x";
// LOG(INFO) << "rnn_op finish init"; inlink.internal = "rnn/x";
// } auto step_scopes =
// scope_.FindVar("step_scopes")->GetMutable<std::vector<Scope*>>();
// void CreateStepNet() { rnn::SegmentInputs(*step_scopes, std::vector<rnn::Link>{inlink}, 10,
// LOG(INFO) << "create variable step_net"; true /*infer_shape_mode*/);
// Variable* var = scope_.NewVar("step_net"); }
// auto net = var->GetMutable<NetOp>();
// net->AddOp( void LinkeMemories() {
// OpRegistry::CreateOp("mul", {"rnn/h@pre", "rnn/w"}, {"rnn/s"}, {})); LOG(INFO) << "link memories";
// rnn::MemoryAttr mem_attr;
// net->AddOp( mem_attr.pre_var = "rnn/h_pre";
// OpRegistry::CreateOp("add_two", {"x@alias", "rnn/s"}, {"rnn/h"}, {})); mem_attr.var = "rnn/h";
// net->CompleteAddOp(); mem_attr.boot_var = "boot_h";
// } std::vector<rnn::MemoryAttr> memories;
// memories.push_back(mem_attr);
// // father scope auto step_scopes =
// Scope scope_; scope_.FindVar("step_scopes")->GetMutable<std::vector<Scope*>>();
// std::shared_ptr<OperatorBase> rnn_op_; for (int i = 1; i < 10; ++i) {
//}; rnn::LinkMemories(*step_scopes, memories, i, -1,
// true /*infer_shape_mode*/);
// TEST_F(RecurrentOpTest, Run) { }
}
Scope scope_;
RecurrentGradientAlgorithm rnn_grad_algo_;
};
// TEST_F(RecurrentGradientAlgorithmTest, Run) {
// platform::CPUDeviceContext ctx; // platform::CPUDeviceContext ctx;
// rnn_op_->InferShape(scope_); // rnn_grad_algo_.Run(scope_, ctx);
// rnn_op_->Run(scope_, ctx);
//}
//
// class RecurrentGradientAlgorithmTest : public ::testing::Test {
// protected:
// virtual void SetUp() override {
// CreateGlobalVariables();
// CreateStepScopes();
// CreateStepNet();
// CreateRNNGradientAlgorithm();
//
// // segment inputs
// SegmentInputs();
// // link forward memories
// LinkeMemories();
// }
//
// virtual void TearDown() override {}
//
// void CreateGlobalVariables() {
// // inputs: x
// LOG(INFO) << "create global variable x";
// Variable* x = scope_.NewVar("x");
// DDim dims =
// make_ddim({10 /*sent size*/, 20 /*batch size*/, 30 /*input dim*/});
// x->GetMutable<Tensor>()->mutable_data<float>(dims, platform::CPUPlace());
// // inputs: h_boot
// LOG(INFO) << "create global variable h_boot";
// Variable* h_boot = scope_.NewVar("h_boot");
// h_boot->GetMutable<Tensor>()->mutable_data<float>(
// make_ddim({20 /*batch size*/, 30 /*input dim*/}),
// platform::CPUPlace());
// // inputs: w
// LOG(INFO) << "create global variable w";
// Variable* w = scope_.NewVar("rnn/w");
// w->GetMutable<Tensor>()->mutable_data<float>(make_ddim({30, 30}),
// platform::CPUPlace());
// // inputs: h_grad
// LOG(INFO) << "create variable h_grad";
// Variable* dh = scope_.NewVar("h_grad");
// dh->GetMutable<Tensor>()->mutable_data<float>(make_ddim({10, 20, 30}),
// platform::CPUPlace());
// // inputs: step_scopes
// LOG(INFO) << "create variable step_scopes";
// scope_.NewVar("step_scopes");
// // inputs: step_net
// LOG(INFO) << "create variable step_net";
// scope_.NewVar("step_net");
// // outputs: w_grad
// LOG(INFO) << "create global variable w_grad";
// scope_.NewVar("rnn/w_grad");
// // outputs: x_grad
// LOG(INFO) << "create global variable x_grad";
// scope_.NewVar("x_grad");
// // outputs: h_boot_grad
// LOG(INFO) << "create global variable h_boot_grad";
// scope_.NewVar("h_boot_grad");
// }
//
// void CreateStepScopes() {
// auto step_scopes =
// scope_.FindVar("step_scopes")->GetMutable<std::vector<Scope*>>();
// for (int i = 0; i < 10; ++i) {
// auto& scope = scope_.NewScope();
// auto pre_t = scope.NewVar("rnn/pre_h")->GetMutable<Tensor>();
// pre_t->mutable_data<float>({20, 30}, platform::CPUPlace());
// auto tensor = scope.NewVar("rnn/h")->GetMutable<Tensor>();
// tensor->mutable_data<float>({20, 30}, platform::CPUPlace());
//
// // for unit test of ConcatOutputs
// auto xg = scope.NewVar("rnn/x_grad")->GetMutable<Tensor>();
// xg->mutable_data<float>({20, 30}, platform::CPUPlace());
//
// step_scopes->emplace_back(&scope);
// }
//
// // last time step
// auto g =
// (*step_scopes)[9]->NewVar("rnn/h_pre_grad")->GetMutable<Tensor>();
// g->mutable_data<float>({20, 30}, platform::CPUPlace());
// }
//
// void CreateRNNGradientAlgorithm() {
// std::unique_ptr<rnn::Argument> arg(new rnn::Argument());
// arg->step_net = "step_net";
// arg->step_scopes = "step_scopes";
// rnn::Link inlink;
// inlink.external = "h_grad";
// inlink.internal = "rnn/h_grad";
// arg->inlinks = std::vector<rnn::Link>{inlink};
//
// rnn::Link outlink;
// outlink.external = "x_grad";
// outlink.internal = "rnn/x_grad";
// arg->outlinks = std::vector<rnn::Link>{outlink};
//
// rnn::MemoryAttr mem_attr;
// mem_attr.pre_var = "rnn/h_pre_grad";
// mem_attr.var = "rnn/h_grad";
// mem_attr.boot_var = "h_boot_grad";
// arg->memories = std::vector<rnn::MemoryAttr>{mem_attr};
//
// rnn_grad_algo_.Init(std::move(arg));
// }
//
// void CreateStepNet() {
// LOG(INFO) << "create variable step_net";
// Variable* var = scope_.NewVar("step_net");
// auto net = var->GetMutable<NetOp>();
// net->AddOp(OpRegistry::CreateOp("mul", {"rnn/h_pre", "rnn/w",
// "rnn/s_grad"},
// {"rnn/h_pre_grad", "rnn/w_grad"}, {}));
//
// net->AddOp(OpRegistry::CreateOp("add_two", {"rnn/h_grad"},
// {"rnn/x_grad", "rnn/s_grad"}, {}));
// net->CompleteAddOp();
// }
//
// void SegmentInputs() {
// LOG(INFO) << "segment inputs";
// std::vector<std::string> inlinks = {"x"};
// std::vector<std::string> inlinks_alias = {"rnn/x"};
//
// rnn::Link inlink;
// inlink.external = "x";
// inlink.internal = "rnn/x";
// auto step_scopes =
// scope_.FindVar("step_scopes")->GetMutable<std::vector<Scope*>>();
// rnn::SegmentInputs(*step_scopes, std::vector<rnn::Link>{inlink}, 10,
// true /*infer_shape_mode*/);
// }
//
// void LinkeMemories() {
// LOG(INFO) << "link memories";
// rnn::MemoryAttr mem_attr;
// mem_attr.pre_var = "rnn/h_pre";
// mem_attr.var = "rnn/h";
// mem_attr.boot_var = "boot_h";
// std::vector<rnn::MemoryAttr> memories;
// memories.push_back(mem_attr);
// auto step_scopes =
// scope_.FindVar("step_scopes")->GetMutable<std::vector<Scope*>>();
// for (int i = 1; i < 10; ++i) {
// rnn::LinkMemories(*step_scopes, memories, i, -1,
// true /*infer_shape_mode*/);
// }
// }
//
// Scope scope_;
// RecurrentGradientAlgorithm rnn_grad_algo_;
//};
//
//// TEST_F(RecurrentGradientAlgorithmTest, Run) {
//// platform::CPUDeviceContext ctx;
//// rnn_grad_algo_.Run(scope_, ctx);
//// }
//
//} // namespace operators
//} // namespace paddle
//
// TEST(RecurrentOp, LinkMemories) {
// using namespace paddle::framework;
// using namespace paddle::platform;
// using namespace paddle::operators;
//
// // create and init step scopes
// size_t len = 10;
// std::vector<Scope*> step_scopes;
// for (size_t i = 0; i < len; ++i) {
// auto scope = new Scope();
// scope->NewVar("pre_h");
// auto tensor = scope->NewVar("h")->GetMutable<Tensor>();
// float* data = tensor->mutable_data<float>({15, 20}, CPUPlace());
// for (size_t j = 0; j < 15 * 20; ++j) {
// data[j] = rand() * (1. / (double)RAND_MAX);
// }
// step_scopes.push_back(scope);
// }
//
// // create MemoryAttr
// rnn::MemoryAttr mem_attr;
// mem_attr.pre_var = "pre_h";
// mem_attr.var = "h";
// mem_attr.boot_var = "boot_h";
// std::vector<rnn::MemoryAttr> memories;
// memories.push_back(mem_attr);
//
// for (size_t i = 1; i < len; ++i) {
// rnn::LinkMemories(step_scopes, memories, i, -1, false
// /*infer_shape_mode*/);
// } // }
// // check
// for (size_t i = 0; i < len - 1; ++i) { } // namespace operators
// const float* a = } // namespace paddle
// step_scopes[i]->FindVar("h")->GetMutable<Tensor>()->data<float>();
// const float* b = step_scopes[i + 1] TEST(RecurrentOp, LinkMemories) {
// ->FindVar("pre_h") using namespace paddle::framework;
// ->GetMutable<Tensor>() using namespace paddle::platform;
// ->data<float>(); using namespace paddle::operators;
// for (size_t j = 0; j < 15 * 20; ++j) {
// ASSERT_FLOAT_EQ(a[j], b[j]); // create and init step scopes
// } size_t len = 10;
// } std::vector<Scope*> step_scopes;
// for (size_t i = 0; i < len; ++i) {
// for (int i = len - 2; i >= 0; --i) { auto scope = new Scope();
// rnn::LinkMemories(step_scopes, memories, i, 1, false scope->NewVar("pre_h");
// /*infer_shape_mode*/); auto tensor = scope->NewVar("h")->GetMutable<Tensor>();
// } float* data = tensor->mutable_data<float>({15, 20}, CPUPlace());
// // check for (size_t j = 0; j < 15 * 20; ++j) {
// for (int i = len - 2; i >= 0; --i) { data[j] = rand() * (1. / (double)RAND_MAX);
// const float* a = }
// step_scopes[i]->FindVar("pre_h")->GetMutable<Tensor>()->data<float>(); step_scopes.push_back(scope);
// const float* b = }
// step_scopes[i + 1]->FindVar("h")->GetMutable<Tensor>()->data<float>();
// for (size_t j = 0; j < 15 * 20; ++j) { // create MemoryAttr
// ASSERT_FLOAT_EQ(a[j], b[j]); rnn::MemoryAttr mem_attr;
// } mem_attr.pre_var = "pre_h";
// } mem_attr.var = "h";
// mem_attr.boot_var = "boot_h";
// for (auto s : step_scopes) { std::vector<rnn::MemoryAttr> memories;
// delete s; memories.push_back(mem_attr);
// }
//} for (size_t i = 1; i < len; ++i) {
// rnn::LinkMemories(step_scopes, memories, i, -1, false
// USE_OP(add_two); /*infer_shape_mode*/);
// USE_OP(mul); }
// USE_OP_WITHOUT_KERNEL(recurrent_op); // check
for (size_t i = 0; i < len - 1; ++i) {
const float* a =
step_scopes[i]->FindVar("h")->GetMutable<Tensor>()->data<float>();
const float* b = step_scopes[i + 1]
->FindVar("pre_h")
->GetMutable<Tensor>()
->data<float>();
for (size_t j = 0; j < 15 * 20; ++j) {
ASSERT_FLOAT_EQ(a[j], b[j]);
}
}
for (int i = len - 2; i >= 0; --i) {
rnn::LinkMemories(step_scopes, memories, i, 1, false
/*infer_shape_mode*/);
}
// check
for (int i = len - 2; i >= 0; --i) {
const float* a =
step_scopes[i]->FindVar("pre_h")->GetMutable<Tensor>()->data<float>();
const float* b =
step_scopes[i + 1]->FindVar("h")->GetMutable<Tensor>()->data<float>();
for (size_t j = 0; j < 15 * 20; ++j) {
ASSERT_FLOAT_EQ(a[j], b[j]);
}
}
for (auto s : step_scopes) {
delete s;
}
}
USE_OP(add_two);
USE_OP(mul);
USE_OP_WITHOUT_KERNEL(recurrent_op);
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册