From 7665bdba134a05a957a7ec230429fb8e60180d26 Mon Sep 17 00:00:00 2001 From: Yan Chunwei Date: Mon, 7 Aug 2017 14:29:50 +0800 Subject: [PATCH] Rnn forward logic test (#3291) * finish forward debug --- paddle/framework/operator.h | 6 +- paddle/operators/add_op.cc | 4 +- paddle/operators/mul_op.cc | 14 +-- paddle/operators/recurrent_op.cc | 26 ++++-- paddle/operators/rnn/recurrent_op_utils.cc | 27 +++--- .../v2/framework/tests/test_recurrent_op.py | 90 ++++++++++--------- 6 files changed, 98 insertions(+), 69 deletions(-) diff --git a/paddle/framework/operator.h b/paddle/framework/operator.h index b25362fef3..9672492d1c 100644 --- a/paddle/framework/operator.h +++ b/paddle/framework/operator.h @@ -174,7 +174,11 @@ class OperatorContext { template T* Output(const size_t index) const { auto var = OutputVar(index); - PADDLE_ENFORCE(var != nullptr, "Output(%d) should not be nullptr", index); + PADDLE_ENFORCE( + var != nullptr, + "Output(%d) not be nullptr, which means variable [%s] does not " + "exist in scope", + index, op_.outputs_[index]); return var->GetMutable(); } diff --git a/paddle/operators/add_op.cc b/paddle/operators/add_op.cc index 7fbdd84a39..d4c05ed483 100644 --- a/paddle/operators/add_op.cc +++ b/paddle/operators/add_op.cc @@ -20,8 +20,8 @@ namespace operators { class AddOp : public OperatorWithKernel { protected: void InferShape(const InferShapeContext &ctx) const override { - PADDLE_ENFORCE(ctx.InputSize() == 2, "Input size of AddOp must be two"); - PADDLE_ENFORCE(ctx.OutputSize() == 1, "Output size of AddOp must be one"); + PADDLE_ENFORCE_EQ(ctx.InputSize(), 2); + PADDLE_ENFORCE_EQ(ctx.OutputSize(), 1); PADDLE_ENFORCE(ctx.InputVar(0) != nullptr && ctx.InputVar(1) != nullptr, "Inputs of AddOp must all be set"); PADDLE_ENFORCE(ctx.OutputVar(0) != nullptr, diff --git a/paddle/operators/mul_op.cc b/paddle/operators/mul_op.cc index f41e95e9db..ccab9a994c 100644 --- a/paddle/operators/mul_op.cc +++ b/paddle/operators/mul_op.cc @@ -23,12 +23,16 @@ class MulOp : public OperatorWithKernel { PADDLE_ENFORCE(ctx.InputSize() == 2, "The mul op must take two inputs"); auto dim0 = ctx.Input(0)->dims(); auto dim1 = ctx.Input(1)->dims(); - PADDLE_ENFORCE(dim0.size() == 2 && dim1.size() == 2, - "The input of mul op must be matrix"); - PADDLE_ENFORCE( - dim0[1] == dim1[0], + PADDLE_ENFORCE_EQ(dim0.size(), 2, + "input X(%s) should be a tensor with 2 dims, a matrix", + ctx.op_.Input("X")); + PADDLE_ENFORCE_EQ(dim1.size(), 2, + "input Y(%s) should be a tensor with 2 dims, a matrix", + ctx.op_.Input("Y")); + PADDLE_ENFORCE_EQ( + dim0[1], dim1[0], "First matrix's width must be equal with second matrix's height."); - PADDLE_ENFORCE(ctx.OutputSize() == 1, "The mul op must take one output"); + PADDLE_ENFORCE_EQ(ctx.OutputSize(), 1, "The mul op takes only one output"); ctx.Output(0)->Resize({dim0[0], dim1[1]}); } }; diff --git a/paddle/operators/recurrent_op.cc b/paddle/operators/recurrent_op.cc index 389d432395..5e9c15ca0e 100644 --- a/paddle/operators/recurrent_op.cc +++ b/paddle/operators/recurrent_op.cc @@ -36,6 +36,7 @@ void RecurrentAlgorithm::InferShape(const Scope& scope) const { InitMemories(step_scopes[0], true /*infer_shape_mode*/); Variable* net = scope.FindVar(arg_->step_net); PADDLE_ENFORCE(net != nullptr, "failed to get step net"); + for (size_t i = 0; i < seq_len_; i++) { if (i > 0) { rnn::LinkMemories(step_scopes, arg_->memories, i, -1, @@ -56,6 +57,7 @@ void RecurrentAlgorithm::Run(const Scope& scope, Variable* net = scope.FindVar(arg_->step_net); for (size_t step_id = 0; step_id < seq_len_; step_id++) { + // create output alias variables if (step_id > 0) { rnn::LinkMemories(step_scopes, arg_->memories, step_id, -1, false /*infer_shape_mode*/); @@ -67,22 +69,31 @@ void RecurrentAlgorithm::Run(const Scope& scope, } void RecurrentAlgorithm::CreateScopes(const Scope& scope) const { - // TODO(xxx) Only two scopes are needed for inference, this case will be + // TODO(superjom) Only two scopes are needed for inference, this case will be // supported later. - auto step_scopes = - scope.FindVar(arg_->step_scopes)->GetMutable>(); + auto step_scopes_var = scope.FindVar(arg_->step_scopes); + PADDLE_ENFORCE(step_scopes_var != nullptr, ""); + auto step_scopes = step_scopes_var->GetMutable>(); + + // Now all variables in scope must be created outside of op. + auto net_var = scope.FindVar(arg_->step_net); + PADDLE_ENFORCE(net_var != nullptr, "no stepnet called %s in scope", + arg_->step_net); + auto net_op = net_var->GetMutable(); + PADDLE_ENFORCE(!net_op->outputs_.empty(), "net_op has no outputs"); if (seq_len_ > step_scopes->size()) { for (size_t i = step_scopes->size(); i < seq_len_; ++i) { auto& step_scope = scope.NewScope(); - // Now all variables in scope must be created outside of op. - auto net_op = scope.FindVar(arg_->step_net)->GetMutable(); + // create step net's temp inputs for (auto& input : net_op->inputs_) { // the weight are located in parent scope - if (!step_scope.FindVar(input)) step_scope.NewVar(input); + if (!step_scope.FindVar(input)) + step_scope.NewVar(input)->GetMutable(); } - for (auto& output : net_op->outputs_) { + // create stepnet's outputs + for (const auto& output : net_op->outputs_) { step_scope.NewVar(output); } step_scopes->emplace_back(&step_scope); @@ -100,6 +111,7 @@ void RecurrentAlgorithm::InitMemories(Scope* step_scope, Tensor* boot_mem = step_scope->FindVar(attr.boot_var)->GetMutable(); if (infer_shape_mode) { pre_mem->Resize(boot_mem->dims()); + PADDLE_ENFORCE_EQ(pre_mem->dims().size(), 2); } else { pre_mem->ShareDataWith(*boot_mem); } diff --git a/paddle/operators/rnn/recurrent_op_utils.cc b/paddle/operators/rnn/recurrent_op_utils.cc index 43c97ba29f..32c6c2dd4e 100644 --- a/paddle/operators/rnn/recurrent_op_utils.cc +++ b/paddle/operators/rnn/recurrent_op_utils.cc @@ -53,11 +53,13 @@ void ConcatOutputs(const std::vector& step_scopes, PADDLE_ENFORCE(output_var != nullptr, "output link [%s] is not in scope.", outlinks[i].external); Tensor* output = output_var->GetMutable(); + if (infer_shape_mode) { - fmw::DDim step_dims = step_scopes[0] - ->FindVar(outlinks[i].internal) - ->GetMutable() - ->dims(); + auto step_scope_var = step_scopes[0]->FindVar(outlinks[i].internal); + PADDLE_ENFORCE(step_scope_var != nullptr, "%s not in scope", + outlinks[i].internal); + fmw::DDim step_dims = + step_scope_var->template GetMutable()->dims(); std::vector dims_vec = vectorize(step_dims); dims_vec.insert(dims_vec.begin(), seq_len); output->Resize(fmw::make_ddim(dims_vec)); @@ -79,14 +81,15 @@ void LinkMemories(const std::vector& scopes, const std::vector& memories, const size_t step_id, const int offset, bool infer_shape_mode) { - PADDLE_ENFORCE(step_id < scopes.size(), - "step [%d] is out of range of step scopes' size [%d]", step_id, - scopes.size()); - PADDLE_ENFORCE(static_cast(step_id) + offset >= 0, - "offset [%d] must be large than -[%d]", offset, step_id); - PADDLE_ENFORCE(step_id + offset < scopes.size(), - "offset [%d] is out of range, it must be less than (%d - %d)", - offset, scopes.size(), step_id); + PADDLE_ENFORCE_LT(step_id, scopes.size(), + "step [%d] is out of range of step scopes' size [%d]", + step_id, scopes.size()); + PADDLE_ENFORCE_GE(static_cast(step_id) + offset, 0, + "offset [%d] must be large than -[%d]", offset, step_id); + PADDLE_ENFORCE_LT( + step_id + offset, scopes.size(), + "offset [%d] is out of range, it must be less than (%d - %d)", offset, + scopes.size(), step_id); auto scope = scopes[step_id]; auto linked_scope = scopes[step_id + offset]; for (auto& attr : memories) { diff --git a/python/paddle/v2/framework/tests/test_recurrent_op.py b/python/paddle/v2/framework/tests/test_recurrent_op.py index 0457e3f16a..5c77c477b3 100644 --- a/python/paddle/v2/framework/tests/test_recurrent_op.py +++ b/python/paddle/v2/framework/tests/test_recurrent_op.py @@ -1,3 +1,4 @@ +import logging import paddle.v2.framework.core as core import unittest import numpy as np @@ -7,10 +8,9 @@ ops = creation.op_creations def create_tensor(scope, name, shape): - tensor = scope.create_var(name).get_tensor() + tensor = scope.new_var(name).get_tensor() tensor.set_dims(shape) - tensor.alloc_float() - tensor.set(np.random.random(shape)) + tensor.set(np.random.random(shape), core.CPUPlace()) return tensor @@ -31,40 +31,36 @@ class TestRNN(unittest.TestCase): - h ''' + input_dim = 30 + batch_size = 50 + weight_dim = 15 + sent_len = 11 + def init(self): - input_dim = 30 - batch_size = 50 - weight_dim = 15 - - self.scope = core.Scope(None) - - # create vars - create_tensor(self.scope, "x", [batch_size, input_dim]) - create_tensor(self.scope, "W", [input_dim, weight_dim]) - create_tensor(self.scope, "U", [weight_dim, weight_dim]) - create_tensor(self.scope, "h_boot", [batch_size, weight_dim]) - - x_alias = "x@alias" - y_alias = "y@alias" - memory = "h@alias" - prememory = "h@pre" - output = "rnn_out" - output_alias = "rnn_out@alias" - - # create step net - stepnet_var = self.scope.create_var("stepnet") - stepnet = stepnet_var.get_net() - # stepnet = core.Net.create() - x_fc_op = ops.fc(X=x_alias, W="W", Y="Wx") - h_fc_op = ops.fc(X=prememory, W="U", Y="Uh") - sum_op = ops.add_two(X="Wx", Y="Uh", Out="sum") - sig_op = ops.sigmoid(X="sum", Y=memory) - stepnet.add_op(x_fc_op) - stepnet.add_op(h_fc_op) - stepnet.add_op(sum_op) - stepnet.add_op(sig_op) - stepnet.complete_add_op(True) + self.scope = core.Scope() + + self.create_global_variables() + self.create_step_net() + rnn_op = self.create_rnn_op() + ctx = core.DeviceContext.create(core.CPUPlace()) + print 'infer_shape' + rnn_op.infer_shape(self.scope) + + rnn_op.run(self.scope, ctx) + + def create_global_variables(self): + # create inlink + create_tensor(self.scope, "x", + [self.sent_len, self.batch_size, self.input_dim]) + create_tensor(self.scope, "W", [self.input_dim, self.input_dim]) + create_tensor(self.scope, "U", [self.input_dim, self.input_dim]) + create_tensor(self.scope, "h_boot", [self.batch_size, self.input_dim]) + self.scope.new_var("step_scopes") + self.scope.new_var("h@alias") + self.scope.new_var("h") + + def create_rnn_op(self): # create RNNOp rnnop = ops.recurrent_op( # inputs @@ -72,17 +68,27 @@ class TestRNN(unittest.TestCase): boot_memories=["h_boot"], step_net="stepnet", # outputs - outlinks=[output], + outlinks=["h"], step_scopes="step_scopes", # attributes inlink_alias=["x@alias"], - outlink_alias=[output_alias], - pre_memories=[prememory], - memories=[memory]) + outlink_alias=["h@alias"], + pre_memories=["h@pre"], + memories=["h@alias"]) + return rnnop + + def create_step_net(self): + var = self.scope.new_var("stepnet") + stepnet = var.get_net() - ctx = core.DeviceContext.cpu_context() - rnnop.infer_shape(self.scope) - rnnop.run(self.scope, ctx) + x_fc_op = ops.fc(X="x@alias", W="W", Y="Wx") + h_fc_op = ops.fc(X="h@pre", W="U", Y="Uh") + sum_op = ops.add_two(X="Wx", Y="Uh", Out="sum") + sig_op = ops.sigmoid(X="sum", Y="h@alias") + + for op in [x_fc_op, h_fc_op, sum_op, sig_op]: + stepnet.add_op(op) + stepnet.complete_add_op(True) def test_recurrent(self): self.init() -- GitLab