diff --git a/paddle/framework/scope.h b/paddle/framework/scope.h index 2ba3f8ed355b48800cfa4180e4e8a94f2c9958a9..c93b03e48130afe9568089b6a7586c4185d1d5b4 100644 --- a/paddle/framework/scope.h +++ b/paddle/framework/scope.h @@ -58,6 +58,8 @@ class Scope { /// nullptr if cannot find. Variable* FindVar(const std::string& name) const; + const Scope& parent() const { return *parent_; } + /// Find the scope or an ancestor scope that contains the given variable. const Scope* FindScope(const Variable* var) const; diff --git a/paddle/operators/prelu_op.h b/paddle/operators/prelu_op.h index 63031c25cc3570cf40440726ea76976953d5417a..9843c476e4a49aba456219c0e2e992aa826f1685 100644 --- a/paddle/operators/prelu_op.h +++ b/paddle/operators/prelu_op.h @@ -94,7 +94,7 @@ class PReluGradKernel : public framework::OpKernel { Transform(context.device_context(), out_ptr, out_ptr + numel, dout_ptr, dx_ptr, PReluGradFunctor(alpha_ptr)); - // TODO (Zhuoyuan): add dalpha upgrade when GPU kernels ready + // TODO(Zhuoyuan): add dalpha upgrade when GPU kernels ready } }; diff --git a/paddle/operators/recurrent_op.cc b/paddle/operators/recurrent_op.cc index fc57f0dcea9a5f81ada518bd5c945e9f50da5d7f..ad985839f5908d9235a4dbefc9b841362810114e 100644 --- a/paddle/operators/recurrent_op.cc +++ b/paddle/operators/recurrent_op.cc @@ -29,8 +29,11 @@ using Tensor = framework::Tensor; using LoDTensor = framework::LoDTensor; void RecurrentAlgorithm::InferShape(const Scope& scope) const { - seq_len_ = - scope.FindVar(arg_->inlinks[0])->GetMutable()->dims()[0]; + auto* input0 = scope.FindVar(arg_->inlinks[0]); + PADDLE_ENFORCE_NOT_NULL(input0); + seq_len_ = input0->GetMutable()->dims()[0]; + PADDLE_ENFORCE_GT(seq_len_, 0); + CreateScopes(scope); auto step_scopes = GetStepScopes(scope); rnn::SegmentInputs(step_scopes, arg_->inlinks, seq_len_, diff --git a/paddle/operators/rnn/recurrent_op_utils.cc b/paddle/operators/rnn/recurrent_op_utils.cc index 3e7e301ae3daa97fe0b802c44bfbd67620f7ddcb..ca7219b26d83eb6b8db75a5ed9cd360c5ac1d5df 100644 --- a/paddle/operators/rnn/recurrent_op_utils.cc +++ b/paddle/operators/rnn/recurrent_op_utils.cc @@ -28,14 +28,15 @@ void SegmentInputs(const std::vector& step_scopes, const size_t seq_len, bool infer_shape_mode) { PADDLE_ENFORCE(!inlinks.empty(), "no in links are provided."); for (size_t i = 0; i < inlinks.size(); ++i) { - auto input_var = step_scopes[0]->FindVar(inlinks[i]); - PADDLE_ENFORCE(input_var != nullptr, "input link [%s] is not in scope.", - inlinks[i]); + // global inputs + auto input_var = step_scopes[0]->parent().FindVar(inlinks[i]); + PADDLE_ENFORCE_NOT_NULL(input_var, "input link [%s] is not in scope.", + inlinks[i]); LoDTensor* input = input_var->GetMutable(); f::DDim dims = input->dims(); - PADDLE_ENFORCE(static_cast(dims[0]) == seq_len, - "all the inlinks must have same length"); + PADDLE_ENFORCE_EQ(static_cast(dims[0]), seq_len, + "all the inlinks be the same length"); f::DDim step_dims = slice_ddim(dims, 1, dims.size()); for (size_t j = 0; j < seq_len; j++) { Tensor* step_input = @@ -54,15 +55,14 @@ void ConcatOutputs(const std::vector& step_scopes, const std::vector& outlinks, const size_t seq_len, bool infer_shape_mode) { for (size_t i = 0; i < outlinks.size(); i++) { - auto output_var = step_scopes[0]->FindVar(outlinks[i]); - PADDLE_ENFORCE(output_var != nullptr, "output link [%s] is not in scope.", - outlinks[i]); + auto output_var = step_scopes[0]->parent().FindVar(outlinks[i]); + PADDLE_ENFORCE_NOT_NULL(output_var, "output link [%s] is not in scope.", + outlinks[i]); LoDTensor* output = output_var->GetMutable(); if (infer_shape_mode) { - auto step_scope_var = step_scopes[0]->FindVar(outlinks[i].internal); - PADDLE_ENFORCE(step_scope_var != nullptr, "%s not in scope", - outlinks[i].internal); + auto step_scope_var = step_scopes[0]->FindVar(outlinks[i]); + PADDLE_ENFORCE_NOT_NULL(step_scope_var, "%s not in scope", outlinks[i]); f::DDim step_dims = step_scope_var->template GetMutable()->dims(); std::vector dims_vec = vectorize(step_dims); diff --git a/python/paddle/v2/framework/tests/test_recurrent_op.py b/python/paddle/v2/framework/tests/test_recurrent_op.py index 886f941d4dc5fa253e92a4a7ac0c5e5abf7b0797..79eda70021b76cd06e4c40740b1ca49476f4c503 100644 --- a/python/paddle/v2/framework/tests/test_recurrent_op.py +++ b/python/paddle/v2/framework/tests/test_recurrent_op.py @@ -59,7 +59,6 @@ class PySimpleRNNTest(unittest.TestCase): def test_forward(self): output = self.rnn.forward() - print 'output', output def create_tensor(scope, name, shape, np_data): @@ -103,7 +102,7 @@ class TestRecurrentOp(unittest.TestCase): ctx = core.DeviceContext.create(core.CPUPlace()) self.rnnop.infer_shape(self.scope) self.rnnop.run(self.scope, ctx) - return np.array(self.scope.find_var("h").get_tensor()) + return np.array(self.scope.find_var("h@mem").get_tensor()) def create_global_variables(self): # create inlink @@ -123,7 +122,7 @@ class TestRecurrentOp(unittest.TestCase): create_tensor(self.scope, "h_boot", [self.batch_size, self.input_dim], h_boot_np_data) self.scope.new_var("step_scopes") - self.scope.new_var("h") + self.scope.new_var("h@mem") def create_rnn_op(self): # create RNNOp @@ -133,7 +132,7 @@ class TestRecurrentOp(unittest.TestCase): boot_memories=["h_boot"], step_net="stepnet", # outputs - outlinks=["h"], + outlinks=["h@mem"], step_scopes="step_scopes", # attributes pre_memories=["h@pre"],