From 2c5d4c6d200c478f9660593cdff67bad10c56402 Mon Sep 17 00:00:00 2001 From: dangqingqing Date: Mon, 30 Oct 2017 16:19:58 +0800 Subject: [PATCH] Clean code and update doc. --- paddle/operators/lstm_op.cc | 10 +++++----- paddle/operators/lstm_op.h | 14 +------------- python/paddle/v2/framework/tests/test_lstm_op.py | 8 +++++--- 3 files changed, 11 insertions(+), 21 deletions(-) diff --git a/paddle/operators/lstm_op.cc b/paddle/operators/lstm_op.cc index 10b60e3de62..94342d94070 100644 --- a/paddle/operators/lstm_op.cc +++ b/paddle/operators/lstm_op.cc @@ -126,11 +126,11 @@ class LSTMOpMaker : public framework::OpProtoAndCheckerMaker { " - Bias = {b_c, b_i, b_f, b_o, W_ic, W_fc, W_oc}.") .AsDispensable(); AddOutput("Hidden", - "(LoDTensor) the hidden state lod tensor of LSTM operator. " - "The shape and lod is the same with the `Input`."); + "(LoDTensor) the hidden state of LSTM operator. " + "The shape is (T x D), and lod is the same with the `Input`."); AddOutput("Cell", - "(LoDTensor) the cell state lod tensor of LSTM operator. " - "The shape and lod is the same with the `Input`."); + "(LoDTensor) the cell state of LSTM operator. " + "The shape is (T x D), and lod is the same with the `Input`."); AddOutput("BatchGate", "(LoDTensor) This LoDTensor contains input gate, forget gate " "and output gate after the nonlinear computation. This " @@ -141,7 +141,7 @@ class LSTMOpMaker : public framework::OpProtoAndCheckerMaker { "in the raw input.") .AsIntermediate(); AddOutput("BatchCellPreAct", - "(LoDTensor) This LoDTensor is get in the forward and used " + "(LoDTensor) This LoDTensor is got in the forward and used " "in the backward.") .AsIntermediate(); AddAttr("usePeepholes", diff --git a/paddle/operators/lstm_op.h b/paddle/operators/lstm_op.h index d147b84aefe..af088b80b42 100644 --- a/paddle/operators/lstm_op.h +++ b/paddle/operators/lstm_op.h @@ -155,7 +155,6 @@ class LSTMGradKernel : public framework::OpKernel { auto* batch_cell_pre_act = ctx.Input("BatchCellPreAct"); auto* hidden_g = ctx.Input(framework::GradVarName("Hidden")); - // auto* cell_g = ctx.Input(framework::GradVarName("Cell")); auto* in_g = ctx.Output(framework::GradVarName("Input")); auto* weight_g = ctx.Output(framework::GradVarName("Weight")); @@ -251,7 +250,7 @@ class LSTMGradKernel : public framework::OpKernel { lstm_grad.gateGrad = gate_g.data(); lstm_grad.outputGrad = out_g.data(); - if (n != 0) { + if (n) { int bstart_pre = static_cast(batch_starts[n - 1]); Tensor cell_pre = batch_cell.Slice(bstart_pre, bstart); Tensor cell_pre_g = batch_cell_g.Slice(bstart_pre, bstart); @@ -292,17 +291,6 @@ class LSTMGradKernel : public framework::OpKernel { } if (bias && bias_g) { /* backward bias */ - // Following Eigen computation failed for double type on GPU device. - // bias_g->mutable_data(ctx.GetPlace()); - // Tensor bias_mat; - // bias_mat.ShareDataWith(*bias_g); - // bias_mat.Resize({1, 4 * frame_size}); - - // auto bias_g_e = EigenVector::Flatten(bias_mat); - // auto gate_g_e = EigenMatrix::From(batch_gate_g); - // Eigen::array dims{{0}}; - // bias_g_e.device(ctx.GetEigenDevice()) = gate_g_e.sum(dims); - int m = static_cast(batch_gate_g.dims()[0]); int n = static_cast(batch_gate_g.dims()[1]); diff --git a/python/paddle/v2/framework/tests/test_lstm_op.py b/python/paddle/v2/framework/tests/test_lstm_op.py index f308ba82fa6..fe7f9783e46 100644 --- a/python/paddle/v2/framework/tests/test_lstm_op.py +++ b/python/paddle/v2/framework/tests/test_lstm_op.py @@ -161,9 +161,11 @@ class TestLstmOp(OpTest): #TODO(qingqing) add more unit testing case def test_check_grad(self): - # TODO(qingqing) remove folowing two lines after the check_grad is refined. - self.outputs['BatchGate'] = None - self.outputs['BatchCellPreAct'] = None + # TODO(qingqing) remove folowing lines after the check_grad is refined. + N = len(self.lod[0]) - 1 + self.outputs['BatchGate'] = np.zeros((N, 4 * self.D)).astype('float64') + self.outputs['BatchCellPreAct'] = np.zeros( + (N, self.D)).astype('float64') self.check_grad( ['Input', 'Weight', 'Bias'], ['Hidden'], max_relative_error=0.02) -- GitLab