diff --git a/paddle/operators/recurrent_op_test.cc b/paddle/operators/recurrent_op_test.cc index 3fc2954ba1de0f76d5955114f131bd76568b321f..d950296c4a6d0c9bd8b0d0f763b093b1c5011f0b 100644 --- a/paddle/operators/recurrent_op_test.cc +++ b/paddle/operators/recurrent_op_test.cc @@ -22,382 +22,233 @@ #include "paddle/framework/tensor.h" #include "paddle/operators/net_op.h" -TEST(rnn, bad) { ASSERT_TRUE(false); } +namespace paddle { +namespace operators { -// namespace paddle { -// namespace operators { -// +using namespace paddle::framework; // using framework::make_ddim; // using framework::DDim; -// -// class RecurrentOpTest : public ::testing::Test { -// protected: -// virtual void SetUp() override { -// CreateGlobalVariables(); -// CreateStepNet(); -// CreateRNNOp(); -// } -// -// virtual void TearDown() override {} -// -// void CreateGlobalVariables() { -// // create input, and init content -// LOG(INFO) << "create global variable x"; -// for (auto inlink : std::vector{"x", "x0", "x1", "h"}) { -// Variable* x = scope_.NewVar(inlink); -// DDim dims = make_ddim(std::vector{ -// 10 /*sent size*/, 20 /*batch size*/, 30 /*input dim*/}); -// x->GetMutable()->mutable_data(dims, -// platform::CPUPlace()); -// } -// // create output alias just for test -// for (auto inlink : std::vector{"h@alias"}) { -// Variable* x = scope_.NewVar(inlink); -// DDim dims = -// make_ddim(std::vector{20 /*batch size*/, 30 /*input dim*/}); -// x->GetMutable()->mutable_data(dims, -// platform::CPUPlace()); -// } -// -// LOG(INFO) << "create global variable w"; -// Variable* w = scope_.NewVar("rnn/w"); -// w->GetMutable()->mutable_data( -// make_ddim(std::vector{30, 30}), platform::CPUPlace()); -// -// for (auto boot : std::vector{"h_boot"}) { -// LOG(INFO) << "create global variable " << boot; -// Variable* h_boot = scope_.NewVar(boot); -// h_boot->GetMutable()->mutable_data( -// make_ddim(std::vector{20 /*batch size*/, 30 /*input dim*/}), -// platform::CPUPlace()); -// } -// -// LOG(INFO) << "create variable step_scopes"; -// scope_.NewVar("step_scopes"); -// -// LOG(INFO) << "create variable h"; -// scope_.NewVar("h"); -// } -// -// void CreateRNNOp() { -// framework::OpDesc op_desc; -// -// op_desc.set_type("recurrent_op"); -// // inlinks 0 -// op_desc.add_inputs("x"); -// op_desc.add_inputs("x0"); -// op_desc.add_inputs("x1"); -// // boot_memories 3 -// op_desc.add_inputs("h_boot"); -// // step net 5 -// op_desc.add_inputs("step_net"); -// // outlinks 6 -// op_desc.add_outputs("h"); -// // step scopes 7 -// op_desc.add_outputs("step_scopes"); -// -// auto _input_format = std::vector{ -// 0, // in_link -// 3, // memories -// 4 // step_net -// }; -// auto input_format = op_desc.add_attrs(); -// input_format->set_name("input_format"); -// input_format->set_type(paddle::framework::AttrType::INTS); -// for (auto i : _input_format) { -// input_format->add_ints(i); -// } -// -// auto output_format = op_desc.add_attrs(); -// output_format->set_name("output_format"); -// output_format->set_type(paddle::framework::AttrType::INTS); -// for (auto i : std::vector{0, 1, 2}) { -// output_format->add_ints(i); -// } -// -// auto inlink_alias = op_desc.add_attrs(); -// inlink_alias->set_name("inlink_alias"); -// inlink_alias->set_type(paddle::framework::AttrType::STRINGS); -// -// auto outlink_alias = op_desc.add_attrs(); -// outlink_alias->set_name("outlink_alias"); -// outlink_alias->set_type(paddle::framework::AttrType::STRINGS); -// -// auto pre_memories = op_desc.add_attrs(); -// pre_memories->set_name("pre_memories"); -// pre_memories->set_type(paddle::framework::AttrType::STRINGS); -// -// auto memories = op_desc.add_attrs(); -// memories->set_name("memories"); -// memories->set_type(paddle::framework::AttrType::STRINGS); -// -// // create inlink_alias -// for (const auto& item : -// std::vector{"x@alias", "x0@alias", "x1@alias"}) { -// inlink_alias->add_strings(item); -// } -// // pre memories -// for (const auto& item : std::vector{"rnn/h@pre"}) { -// pre_memories->add_strings(item); -// } -// // memories -// for (const auto& item : std::vector{"rnn/h"}) { -// memories->add_strings(item); -// } -// // output alias -// for (const auto& item : std::vector{"h@alias"}) { -// outlink_alias->add_strings(item); -// } -// -// rnn_op_ = OpRegistry::CreateOp(op_desc); -// -// LOG(INFO) << "rnn_op finish init"; -// } -// -// void CreateStepNet() { -// LOG(INFO) << "create variable step_net"; -// Variable* var = scope_.NewVar("step_net"); -// auto net = var->GetMutable(); -// net->AddOp( -// OpRegistry::CreateOp("mul", {"rnn/h@pre", "rnn/w"}, {"rnn/s"}, {})); -// -// net->AddOp( -// OpRegistry::CreateOp("add_two", {"x@alias", "rnn/s"}, {"rnn/h"}, {})); -// net->CompleteAddOp(); -// } -// -// // father scope -// Scope scope_; -// std::shared_ptr rnn_op_; -//}; -// -// TEST_F(RecurrentOpTest, Run) { -// platform::CPUDeviceContext ctx; -// rnn_op_->InferShape(scope_); -// rnn_op_->Run(scope_, ctx); -//} -// -// class RecurrentGradientAlgorithmTest : public ::testing::Test { -// protected: -// virtual void SetUp() override { -// CreateGlobalVariables(); -// CreateStepScopes(); -// CreateStepNet(); -// CreateRNNGradientAlgorithm(); -// -// // segment inputs -// SegmentInputs(); -// // link forward memories -// LinkeMemories(); -// } -// -// virtual void TearDown() override {} -// -// void CreateGlobalVariables() { -// // inputs: x -// LOG(INFO) << "create global variable x"; -// Variable* x = scope_.NewVar("x"); -// DDim dims = -// make_ddim({10 /*sent size*/, 20 /*batch size*/, 30 /*input dim*/}); -// x->GetMutable()->mutable_data(dims, platform::CPUPlace()); -// // inputs: h_boot -// LOG(INFO) << "create global variable h_boot"; -// Variable* h_boot = scope_.NewVar("h_boot"); -// h_boot->GetMutable()->mutable_data( -// make_ddim({20 /*batch size*/, 30 /*input dim*/}), -// platform::CPUPlace()); -// // inputs: w -// LOG(INFO) << "create global variable w"; -// Variable* w = scope_.NewVar("rnn/w"); -// w->GetMutable()->mutable_data(make_ddim({30, 30}), -// platform::CPUPlace()); -// // inputs: h_grad -// LOG(INFO) << "create variable h_grad"; -// Variable* dh = scope_.NewVar("h_grad"); -// dh->GetMutable()->mutable_data(make_ddim({10, 20, 30}), -// platform::CPUPlace()); -// // inputs: step_scopes -// LOG(INFO) << "create variable step_scopes"; -// scope_.NewVar("step_scopes"); -// // inputs: step_net -// LOG(INFO) << "create variable step_net"; -// scope_.NewVar("step_net"); -// // outputs: w_grad -// LOG(INFO) << "create global variable w_grad"; -// scope_.NewVar("rnn/w_grad"); -// // outputs: x_grad -// LOG(INFO) << "create global variable x_grad"; -// scope_.NewVar("x_grad"); -// // outputs: h_boot_grad -// LOG(INFO) << "create global variable h_boot_grad"; -// scope_.NewVar("h_boot_grad"); -// } -// -// void CreateStepScopes() { -// auto step_scopes = -// scope_.FindVar("step_scopes")->GetMutable>(); -// for (int i = 0; i < 10; ++i) { -// auto& scope = scope_.NewScope(); -// auto pre_t = scope.NewVar("rnn/pre_h")->GetMutable(); -// pre_t->mutable_data({20, 30}, platform::CPUPlace()); -// auto tensor = scope.NewVar("rnn/h")->GetMutable(); -// tensor->mutable_data({20, 30}, platform::CPUPlace()); -// -// // for unit test of ConcatOutputs -// auto xg = scope.NewVar("rnn/x_grad")->GetMutable(); -// xg->mutable_data({20, 30}, platform::CPUPlace()); -// -// step_scopes->emplace_back(&scope); -// } -// -// // last time step -// auto g = -// (*step_scopes)[9]->NewVar("rnn/h_pre_grad")->GetMutable(); -// g->mutable_data({20, 30}, platform::CPUPlace()); -// } -// -// void CreateRNNGradientAlgorithm() { -// std::unique_ptr arg(new rnn::Argument()); -// arg->step_net = "step_net"; -// arg->step_scopes = "step_scopes"; -// rnn::Link inlink; -// inlink.external = "h_grad"; -// inlink.internal = "rnn/h_grad"; -// arg->inlinks = std::vector{inlink}; -// -// rnn::Link outlink; -// outlink.external = "x_grad"; -// outlink.internal = "rnn/x_grad"; -// arg->outlinks = std::vector{outlink}; -// -// rnn::MemoryAttr mem_attr; -// mem_attr.pre_var = "rnn/h_pre_grad"; -// mem_attr.var = "rnn/h_grad"; -// mem_attr.boot_var = "h_boot_grad"; -// arg->memories = std::vector{mem_attr}; -// -// rnn_grad_algo_.Init(std::move(arg)); -// } -// -// void CreateStepNet() { -// LOG(INFO) << "create variable step_net"; -// Variable* var = scope_.NewVar("step_net"); -// auto net = var->GetMutable(); -// net->AddOp(OpRegistry::CreateOp("mul", {"rnn/h_pre", "rnn/w", -// "rnn/s_grad"}, -// {"rnn/h_pre_grad", "rnn/w_grad"}, {})); -// -// net->AddOp(OpRegistry::CreateOp("add_two", {"rnn/h_grad"}, -// {"rnn/x_grad", "rnn/s_grad"}, {})); -// net->CompleteAddOp(); -// } -// -// void SegmentInputs() { -// LOG(INFO) << "segment inputs"; -// std::vector inlinks = {"x"}; -// std::vector inlinks_alias = {"rnn/x"}; -// -// rnn::Link inlink; -// inlink.external = "x"; -// inlink.internal = "rnn/x"; -// auto step_scopes = -// scope_.FindVar("step_scopes")->GetMutable>(); -// rnn::SegmentInputs(*step_scopes, std::vector{inlink}, 10, -// true /*infer_shape_mode*/); -// } -// -// void LinkeMemories() { -// LOG(INFO) << "link memories"; -// rnn::MemoryAttr mem_attr; -// mem_attr.pre_var = "rnn/h_pre"; -// mem_attr.var = "rnn/h"; -// mem_attr.boot_var = "boot_h"; -// std::vector memories; -// memories.push_back(mem_attr); -// auto step_scopes = -// scope_.FindVar("step_scopes")->GetMutable>(); -// for (int i = 1; i < 10; ++i) { -// rnn::LinkMemories(*step_scopes, memories, i, -1, -// true /*infer_shape_mode*/); -// } -// } -// -// Scope scope_; -// RecurrentGradientAlgorithm rnn_grad_algo_; -//}; -// -//// TEST_F(RecurrentGradientAlgorithmTest, Run) { -//// platform::CPUDeviceContext ctx; -//// rnn_grad_algo_.Run(scope_, ctx); -//// } -// -//} // namespace operators -//} // namespace paddle -// -// TEST(RecurrentOp, LinkMemories) { -// using namespace paddle::framework; -// using namespace paddle::platform; -// using namespace paddle::operators; -// -// // create and init step scopes -// size_t len = 10; -// std::vector step_scopes; -// for (size_t i = 0; i < len; ++i) { -// auto scope = new Scope(); -// scope->NewVar("pre_h"); -// auto tensor = scope->NewVar("h")->GetMutable(); -// float* data = tensor->mutable_data({15, 20}, CPUPlace()); -// for (size_t j = 0; j < 15 * 20; ++j) { -// data[j] = rand() * (1. / (double)RAND_MAX); -// } -// step_scopes.push_back(scope); -// } -// -// // create MemoryAttr -// rnn::MemoryAttr mem_attr; -// mem_attr.pre_var = "pre_h"; -// mem_attr.var = "h"; -// mem_attr.boot_var = "boot_h"; -// std::vector memories; -// memories.push_back(mem_attr); -// -// for (size_t i = 1; i < len; ++i) { -// rnn::LinkMemories(step_scopes, memories, i, -1, false -// /*infer_shape_mode*/); -// } -// // check -// for (size_t i = 0; i < len - 1; ++i) { -// const float* a = -// step_scopes[i]->FindVar("h")->GetMutable()->data(); -// const float* b = step_scopes[i + 1] -// ->FindVar("pre_h") -// ->GetMutable() -// ->data(); -// for (size_t j = 0; j < 15 * 20; ++j) { -// ASSERT_FLOAT_EQ(a[j], b[j]); -// } -// } -// -// for (int i = len - 2; i >= 0; --i) { -// rnn::LinkMemories(step_scopes, memories, i, 1, false -// /*infer_shape_mode*/); -// } -// // check -// for (int i = len - 2; i >= 0; --i) { -// const float* a = -// step_scopes[i]->FindVar("pre_h")->GetMutable()->data(); -// const float* b = -// step_scopes[i + 1]->FindVar("h")->GetMutable()->data(); -// for (size_t j = 0; j < 15 * 20; ++j) { -// ASSERT_FLOAT_EQ(a[j], b[j]); -// } -// } -// -// for (auto s : step_scopes) { -// delete s; -// } -//} -// -// USE_OP(add_two); -// USE_OP(mul); -// USE_OP_WITHOUT_KERNEL(recurrent_op); + +class RecurrentGradientAlgorithmTest : public ::testing::Test { + protected: + virtual void SetUp() override { + CreateGlobalVariables(); + CreateStepScopes(); + CreateStepNet(); + CreateRNNGradientAlgorithm(); + + // segment inputs + SegmentInputs(); + // link forward memories + LinkeMemories(); + } + + virtual void TearDown() override {} + + void CreateGlobalVariables() { + // inputs: x + LOG(INFO) << "create global variable x"; + Variable* x = scope_.NewVar("x"); + DDim dims = + make_ddim({10 /*sent size*/, 20 /*batch size*/, 30 /*input dim*/}); + x->GetMutable()->mutable_data(dims, platform::CPUPlace()); + // inputs: h_boot + LOG(INFO) << "create global variable h_boot"; + Variable* h_boot = scope_.NewVar("h_boot"); + h_boot->GetMutable()->mutable_data( + make_ddim({20 /*batch size*/, 30 /*input dim*/}), platform::CPUPlace()); + // inputs: w + LOG(INFO) << "create global variable w"; + Variable* w = scope_.NewVar("rnn/w"); + w->GetMutable()->mutable_data(make_ddim({30, 30}), + platform::CPUPlace()); + // inputs: h_grad + LOG(INFO) << "create variable h_grad"; + Variable* dh = scope_.NewVar("h_grad"); + dh->GetMutable()->mutable_data(make_ddim({10, 20, 30}), + platform::CPUPlace()); + // inputs: step_scopes + LOG(INFO) << "create variable step_scopes"; + scope_.NewVar("step_scopes"); + // inputs: step_net + LOG(INFO) << "create variable step_net"; + scope_.NewVar("step_net"); + // outputs: w_grad + LOG(INFO) << "create global variable w_grad"; + scope_.NewVar("rnn/w_grad"); + // outputs: x_grad + LOG(INFO) << "create global variable x_grad"; + scope_.NewVar("x_grad"); + // outputs: h_boot_grad + LOG(INFO) << "create global variable h_boot_grad"; + scope_.NewVar("h_boot_grad"); + } + + void CreateStepScopes() { + auto step_scopes = + scope_.FindVar("step_scopes")->GetMutable>(); + for (int i = 0; i < 10; ++i) { + auto& scope = scope_.NewScope(); + auto pre_t = scope.NewVar("rnn/pre_h")->GetMutable(); + pre_t->mutable_data({20, 30}, platform::CPUPlace()); + auto tensor = scope.NewVar("rnn/h")->GetMutable(); + tensor->mutable_data({20, 30}, platform::CPUPlace()); + + // for unit test of ConcatOutputs + auto xg = scope.NewVar("rnn/x_grad")->GetMutable(); + xg->mutable_data({20, 30}, platform::CPUPlace()); + + step_scopes->emplace_back(&scope); + } + + // last time step + auto g = (*step_scopes)[9]->NewVar("rnn/h_pre_grad")->GetMutable(); + g->mutable_data({20, 30}, platform::CPUPlace()); + } + + void CreateRNNGradientAlgorithm() { + std::unique_ptr arg(new rnn::Argument()); + arg->step_net = "step_net"; + arg->step_scopes = "step_scopes"; + rnn::Link inlink; + inlink.external = "h_grad"; + inlink.internal = "rnn/h_grad"; + arg->inlinks = std::vector{inlink}; + + rnn::Link outlink; + outlink.external = "x_grad"; + outlink.internal = "rnn/x_grad"; + arg->outlinks = std::vector{outlink}; + + rnn::MemoryAttr mem_attr; + mem_attr.pre_var = "rnn/h_pre_grad"; + mem_attr.var = "rnn/h_grad"; + mem_attr.boot_var = "h_boot_grad"; + arg->memories = std::vector{mem_attr}; + + rnn_grad_algo_.Init(std::move(arg)); + } + + void CreateStepNet() { + LOG(INFO) << "create variable step_net"; + Variable* var = scope_.NewVar("step_net"); + auto net = var->GetMutable(); + // TODO(qingqing) modify backward op create for RNNOp unit test + // and the unit test will be removed to Python. + // net->AddOp(OpRegistry::CreateOp("mul", {"X", {"rnn/h_pre", "rnn/w", + // "rnn/s_grad"}}, {"Y", {"rnn/h_pre_grad", "rnn/w_grad"}}, {})); + + // net->AddOp(OpRegistry::CreateOp("add_two", {"X", {"rnn/h_grad"}}, + // {"Y", {"rnn/x_grad"}}, {"Out", "rnn/s_grad"}}, {})); + net->CompleteAddOp(); + } + + void SegmentInputs() { + LOG(INFO) << "segment inputs"; + std::vector inlinks = {"x"}; + std::vector inlinks_alias = {"rnn/x"}; + + rnn::Link inlink; + inlink.external = "x"; + inlink.internal = "rnn/x"; + auto step_scopes = + scope_.FindVar("step_scopes")->GetMutable>(); + rnn::SegmentInputs(*step_scopes, std::vector{inlink}, 10, + true /*infer_shape_mode*/); + } + + void LinkeMemories() { + LOG(INFO) << "link memories"; + rnn::MemoryAttr mem_attr; + mem_attr.pre_var = "rnn/h_pre"; + mem_attr.var = "rnn/h"; + mem_attr.boot_var = "boot_h"; + std::vector memories; + memories.push_back(mem_attr); + auto step_scopes = + scope_.FindVar("step_scopes")->GetMutable>(); + for (int i = 1; i < 10; ++i) { + rnn::LinkMemories(*step_scopes, memories, i, -1, + true /*infer_shape_mode*/); + } + } + + Scope scope_; + RecurrentGradientAlgorithm rnn_grad_algo_; +}; + +// TEST_F(RecurrentGradientAlgorithmTest, Run) { +// platform::CPUDeviceContext ctx; +// rnn_grad_algo_.Run(scope_, ctx); +// } + +} // namespace operators +} // namespace paddle + +TEST(RecurrentOp, LinkMemories) { + using namespace paddle::framework; + using namespace paddle::platform; + using namespace paddle::operators; + + // create and init step scopes + size_t len = 10; + std::vector step_scopes; + for (size_t i = 0; i < len; ++i) { + auto scope = new Scope(); + scope->NewVar("pre_h"); + auto tensor = scope->NewVar("h")->GetMutable(); + float* data = tensor->mutable_data({15, 20}, CPUPlace()); + for (size_t j = 0; j < 15 * 20; ++j) { + data[j] = rand() * (1. / (double)RAND_MAX); + } + step_scopes.push_back(scope); + } + + // create MemoryAttr + rnn::MemoryAttr mem_attr; + mem_attr.pre_var = "pre_h"; + mem_attr.var = "h"; + mem_attr.boot_var = "boot_h"; + std::vector memories; + memories.push_back(mem_attr); + + for (size_t i = 1; i < len; ++i) { + rnn::LinkMemories(step_scopes, memories, i, -1, false + /*infer_shape_mode*/); + } + // check + for (size_t i = 0; i < len - 1; ++i) { + const float* a = + step_scopes[i]->FindVar("h")->GetMutable()->data(); + const float* b = step_scopes[i + 1] + ->FindVar("pre_h") + ->GetMutable() + ->data(); + for (size_t j = 0; j < 15 * 20; ++j) { + ASSERT_FLOAT_EQ(a[j], b[j]); + } + } + + for (int i = len - 2; i >= 0; --i) { + rnn::LinkMemories(step_scopes, memories, i, 1, false + /*infer_shape_mode*/); + } + // check + for (int i = len - 2; i >= 0; --i) { + const float* a = + step_scopes[i]->FindVar("pre_h")->GetMutable()->data(); + const float* b = + step_scopes[i + 1]->FindVar("h")->GetMutable()->data(); + for (size_t j = 0; j < 15 * 20; ++j) { + ASSERT_FLOAT_EQ(a[j], b[j]); + } + } + + for (auto s : step_scopes) { + delete s; + } +} + +USE_OP(add_two); +USE_OP(mul); +USE_OP_WITHOUT_KERNEL(recurrent_op);