diff --git a/paddle/framework/scope.h b/paddle/framework/scope.h index 2ba3f8ed355b48800cfa4180e4e8a94f2c9958a9..c93b03e48130afe9568089b6a7586c4185d1d5b4 100644 --- a/paddle/framework/scope.h +++ b/paddle/framework/scope.h @@ -58,6 +58,8 @@ class Scope { /// nullptr if cannot find. Variable* FindVar(const std::string& name) const; + const Scope& parent() const { return *parent_; } + /// Find the scope or an ancestor scope that contains the given variable. const Scope* FindScope(const Variable* var) const; diff --git a/paddle/operators/recurrent_op.cc b/paddle/operators/recurrent_op.cc index d3413d7cb9305732e9ddf3cb1bc267f7203097f3..ad985839f5908d9235a4dbefc9b841362810114e 100644 --- a/paddle/operators/recurrent_op.cc +++ b/paddle/operators/recurrent_op.cc @@ -29,9 +29,11 @@ using Tensor = framework::Tensor; using LoDTensor = framework::LoDTensor; void RecurrentAlgorithm::InferShape(const Scope& scope) const { - seq_len_ = scope.FindVar((arg_->inlinks[0]).external) - ->GetMutable() - ->dims()[0]; + auto* input0 = scope.FindVar(arg_->inlinks[0]); + PADDLE_ENFORCE_NOT_NULL(input0); + seq_len_ = input0->GetMutable()->dims()[0]; + PADDLE_ENFORCE_GT(seq_len_, 0); + CreateScopes(scope); auto step_scopes = GetStepScopes(scope); rnn::SegmentInputs(step_scopes, arg_->inlinks, seq_len_, @@ -123,14 +125,12 @@ void RecurrentAlgorithm::InitMemories(Scope* step_scope, } const rnn::ArgumentName RecurrentOp::kArgName{ - "step_net", "step_scopes", "inlinks", - "outlinks", "inlink_alias", "outlink_alias", + "step_net", "step_scopes", "inlinks", "outlinks", "memories", "pre_memories", "boot_memories"}; const rnn::ArgumentName RecurrentGradientOp::kArgName{ - "step_net", "step_scopes", "outlink@grad", - "inlink@grad", "inlink_alias", "outlink_alias", - "memories", "pre_memories", "boot_memories@grad"}; + "step_net", "step_scopes", "outlink@grad", "inlink@grad", + "memories", "pre_memories", "boot_memories@grad"}; RecurrentOp::RecurrentOp(const std::string& type, const framework::VariableNameMap& inputs, @@ -160,8 +160,6 @@ class RecurrentAlgorithmProtoAndCheckerMaker AddOutput(name.step_scopes, "step scopes"); // Attributes stored in AttributeMap - AddAttr>(name.inlink_alias, "alias of inlinks"); - AddAttr>(name.outlink_alias, "alias of outlinks"); AddAttr>(name.pre_memories, "names of pre-memories"); AddAttr>(name.memories, "names of memories"); @@ -206,9 +204,8 @@ void RecurrentGradientAlgorithm::LinkBootMemoryGradients( } void RecurrentGradientAlgorithm::InferShape(const Scope& scope) const { - seq_len_ = scope.FindVar((arg_->inlinks[0]).external) - ->GetMutable() - ->dims()[0]; + seq_len_ = + scope.FindVar(arg_->inlinks[0])->GetMutable()->dims()[0]; auto step_scopes = GetStepScopes(scope); rnn::SegmentInputs(step_scopes, arg_->inlinks, seq_len_, true /*infer_shape_mode*/); diff --git a/paddle/operators/rnn/recurrent_op_utils.cc b/paddle/operators/rnn/recurrent_op_utils.cc index 6c082cb1825e04accb09019fef28eb2ec6523a5b..ca7219b26d83eb6b8db75a5ed9cd360c5ac1d5df 100644 --- a/paddle/operators/rnn/recurrent_op_utils.cc +++ b/paddle/operators/rnn/recurrent_op_utils.cc @@ -24,22 +24,23 @@ using Tensor = framework::Tensor; using LoDTensor = framework::LoDTensor; void SegmentInputs(const std::vector& step_scopes, - const std::vector& inlinks, const size_t seq_len, - bool infer_shape_mode) { + const std::vector& inlinks, + const size_t seq_len, bool infer_shape_mode) { PADDLE_ENFORCE(!inlinks.empty(), "no in links are provided."); for (size_t i = 0; i < inlinks.size(); ++i) { - auto input_var = step_scopes[0]->FindVar(inlinks[i].external); - PADDLE_ENFORCE(input_var != nullptr, "input link [%s] is not in scope.", - inlinks[i].external); + // global inputs + auto input_var = step_scopes[0]->parent().FindVar(inlinks[i]); + PADDLE_ENFORCE_NOT_NULL(input_var, "input link [%s] is not in scope.", + inlinks[i]); LoDTensor* input = input_var->GetMutable(); f::DDim dims = input->dims(); - PADDLE_ENFORCE(static_cast(dims[0]) == seq_len, - "all the inlinks must have same length"); + PADDLE_ENFORCE_EQ(static_cast(dims[0]), seq_len, + "all the inlinks be the same length"); f::DDim step_dims = slice_ddim(dims, 1, dims.size()); for (size_t j = 0; j < seq_len; j++) { Tensor* step_input = - step_scopes[j]->NewVar(inlinks[i].internal)->GetMutable(); + step_scopes[j]->NewVar(inlinks[i])->GetMutable(); if (!infer_shape_mode) { // The input of operators of each step is Tensor here. // Maybe need to modify Slice function. @@ -51,18 +52,17 @@ void SegmentInputs(const std::vector& step_scopes, } void ConcatOutputs(const std::vector& step_scopes, - const std::vector& outlinks, const size_t seq_len, - bool infer_shape_mode) { + const std::vector& outlinks, + const size_t seq_len, bool infer_shape_mode) { for (size_t i = 0; i < outlinks.size(); i++) { - auto output_var = step_scopes[0]->FindVar(outlinks[i].external); - PADDLE_ENFORCE(output_var != nullptr, "output link [%s] is not in scope.", - outlinks[i].external); + auto output_var = step_scopes[0]->parent().FindVar(outlinks[i]); + PADDLE_ENFORCE_NOT_NULL(output_var, "output link [%s] is not in scope.", + outlinks[i]); LoDTensor* output = output_var->GetMutable(); if (infer_shape_mode) { - auto step_scope_var = step_scopes[0]->FindVar(outlinks[i].internal); - PADDLE_ENFORCE(step_scope_var != nullptr, "%s not in scope", - outlinks[i].internal); + auto step_scope_var = step_scopes[0]->FindVar(outlinks[i]); + PADDLE_ENFORCE_NOT_NULL(step_scope_var, "%s not in scope", outlinks[i]); f::DDim step_dims = step_scope_var->template GetMutable()->dims(); std::vector dims_vec = vectorize(step_dims); @@ -71,9 +71,8 @@ void ConcatOutputs(const std::vector& step_scopes, } else { output->mutable_data(platform::CPUPlace()); for (size_t j = 0; j < seq_len; j++) { - LoDTensor* step_output = step_scopes[j] - ->FindVar(outlinks[i].internal) - ->GetMutable(); + LoDTensor* step_output = + step_scopes[j]->FindVar(outlinks[i])->GetMutable(); // TODO(luotao02) data type and platform::DeviceContext() should set // correctly (output->Slice(j, j + 1)) @@ -113,29 +112,9 @@ void InitArgument(const ArgumentName& name, Argument* arg, const framework::OperatorBase& op) { arg->step_scopes = op.Output(name.step_scopes); - auto inlinks = op.Inputs(name.inlinks); - auto inlink_alias = op.Attr>(name.inlink_alias); - PADDLE_ENFORCE(inlinks.size() == inlink_alias.size(), - "the size of inlinks and inlink_alias don't match:%d,%d", - inlinks.size(), inlink_alias.size()); - for (size_t i = 0; i < inlinks.size(); ++i) { - rnn::Link link; - link.external = inlinks[i]; - link.internal = inlink_alias[i]; - (arg->inlinks).push_back(link); - } + arg->inlinks = op.Inputs(name.inlinks); - auto outlinks = op.Outputs(name.outlinks); - auto outlink_alias = op.Attr>(name.outlink_alias); - PADDLE_ENFORCE(outlinks.size() == outlink_alias.size(), - "the size of outlinks and outlink_alias don't match:%d,%d", - outlinks.size(), outlink_alias.size()); - for (size_t i = 0; i < outlinks.size(); ++i) { - rnn::Link link; - link.external = outlinks[i]; - link.internal = outlink_alias[i]; - (arg->outlinks).push_back(link); - } + arg->outlinks = op.Outputs(name.outlinks); auto boot_memories = op.Inputs(name.boot_memories); diff --git a/paddle/operators/rnn/recurrent_op_utils.h b/paddle/operators/rnn/recurrent_op_utils.h index 17941c503cfcc83415b8bc635623a2c2ce2981c3..7dafe5d0088c4c8bf2cad163654e7e4f28eebe2e 100644 --- a/paddle/operators/rnn/recurrent_op_utils.h +++ b/paddle/operators/rnn/recurrent_op_utils.h @@ -41,18 +41,11 @@ struct MemoryAttr { std::string boot_var; }; -struct Link { - // input or output links name. - std::string internal; - // alias to avoid duplicate keys in scopes. - std::string external; -}; - struct Argument { std::string step_net; std::string step_scopes; - std::vector inlinks; - std::vector outlinks; + std::vector inlinks; + std::vector outlinks; std::vector memories; }; @@ -61,8 +54,6 @@ struct ArgumentName { std::string step_scopes; std::string inlinks; std::string outlinks; - std::string inlink_alias; // the alias of inlinks in step net. - std::string outlink_alias; // the alias of outlinks in step net. std::string memories; // the memory name std::string pre_memories; // the previous memory name std::string boot_memories; // the boot memory name @@ -72,15 +63,15 @@ struct ArgumentName { * Prepare inputs for each step net. */ void SegmentInputs(const std::vector& step_scopes, - const std::vector& inlinks, const size_t seq_len, - bool infer_shape_mode); + const std::vector& inlinks, + const size_t seq_len, bool infer_shape_mode); /** * Process outputs of step nets and merge to variables. */ void ConcatOutputs(const std::vector& step_scopes, - const std::vector& outlinks, const size_t seq_len, - bool infer_shape_mode); + const std::vector& outlinks, + const size_t seq_len, bool infer_shape_mode); void LinkMemories(const std::vector& step_scopes, const std::vector& memories, const size_t step_id, diff --git a/python/paddle/v2/framework/tests/test_recurrent_op.py b/python/paddle/v2/framework/tests/test_recurrent_op.py index 22e680fd783ec681e95326fb84db34570265cffc..79eda70021b76cd06e4c40740b1ca49476f4c503 100644 --- a/python/paddle/v2/framework/tests/test_recurrent_op.py +++ b/python/paddle/v2/framework/tests/test_recurrent_op.py @@ -59,7 +59,6 @@ class PySimpleRNNTest(unittest.TestCase): def test_forward(self): output = self.rnn.forward() - print 'output', output def create_tensor(scope, name, shape, np_data): @@ -103,7 +102,7 @@ class TestRecurrentOp(unittest.TestCase): ctx = core.DeviceContext.create(core.CPUPlace()) self.rnnop.infer_shape(self.scope) self.rnnop.run(self.scope, ctx) - return np.array(self.scope.find_var("h").get_tensor()) + return np.array(self.scope.find_var("h@mem").get_tensor()) def create_global_variables(self): # create inlink @@ -123,8 +122,7 @@ class TestRecurrentOp(unittest.TestCase): create_tensor(self.scope, "h_boot", [self.batch_size, self.input_dim], h_boot_np_data) self.scope.new_var("step_scopes") - self.scope.new_var("h@alias") - self.scope.new_var("h") + self.scope.new_var("h@mem") def create_rnn_op(self): # create RNNOp @@ -134,20 +132,18 @@ class TestRecurrentOp(unittest.TestCase): boot_memories=["h_boot"], step_net="stepnet", # outputs - outlinks=["h"], + outlinks=["h@mem"], step_scopes="step_scopes", # attributes - inlink_alias=["x@alias"], - outlink_alias=["h@alias"], pre_memories=["h@pre"], - memories=["h@alias"]) + memories=["h@mem"]) def create_step_net(self): stepnet = core.Net.create() - x_fc_op = Operator("mul", X="x@alias", Y="W", Out="Wx") + x_fc_op = Operator("mul", X="x", Y="W", Out="Wx") h_fc_op = Operator("mul", X="h@pre", Y="U", Out="Uh") sum_op = Operator("add", X="Wx", Y="Uh", Out="sum") - sig_op = Operator("sigmoid", X="sum", Y="h@alias") + sig_op = Operator("sigmoid", X="sum", Y="h@mem") for op in [x_fc_op, h_fc_op, sum_op, sig_op]: stepnet.append_op(op)