From fe8d006f7e85f21cfc87f4d1cd7db5f5c9e6a9b1 Mon Sep 17 00:00:00 2001 From: Guo Sheng Date: Mon, 18 May 2020 14:35:47 +0800 Subject: [PATCH] API/OP(sequence_expand_as) error message enhancement (#23712) * API/OP(sequence_expand_as) error message enhancement. test=develop Co-authored-by: FrostML <380185688@qq.com> --- .../sequence_ops/sequence_expand_as_op.cc | 32 ++++++++++-------- .../sequence_ops/sequence_expand_as_op.h | 22 ++++++++++--- python/paddle/fluid/layers/sequence_lod.py | 33 ++++++++++--------- .../sequence/test_sequence_expand_as.py | 19 +++++++++++ 4 files changed, 73 insertions(+), 33 deletions(-) diff --git a/paddle/fluid/operators/sequence_ops/sequence_expand_as_op.cc b/paddle/fluid/operators/sequence_ops/sequence_expand_as_op.cc index 8e2a95a8148..d93185388a6 100644 --- a/paddle/fluid/operators/sequence_ops/sequence_expand_as_op.cc +++ b/paddle/fluid/operators/sequence_ops/sequence_expand_as_op.cc @@ -27,18 +27,18 @@ class SequenceExpandAsOp : public framework::OperatorWithKernel { protected: void InferShape(framework::InferShapeContext* ctx) const override { - PADDLE_ENFORCE(ctx->HasInput("X"), - "Input(X) of SequenceExpandAsOp should not be null."); - PADDLE_ENFORCE(ctx->HasInput("Y"), - "Input(Y) of SequenceExpandAsOp should not be null."); - PADDLE_ENFORCE(ctx->HasOutput("Out"), - "Output(Out) of SequenceExpandAsOp should not be null."); + OP_INOUT_CHECK(ctx->HasInputs("X"), "Input", "X", "SequenceExpandAs"); + OP_INOUT_CHECK(ctx->HasInputs("Y"), "Input", "Y", "SequenceExpandAs"); + OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "SequenceExpandAs"); auto x_dims = ctx->GetInputDim("X"); auto out_dims = x_dims; PADDLE_ENFORCE_GE(x_dims.size(), 2, - "Dimension number of Input(X) should be at least 2."); + platform::errors::InvalidArgument( + "Dimension number of Input(X) should be at least 2. " + "But received X's dimensions = %d, X's shape = [%s].", + x_dims.size(), x_dims)); if (ctx->IsRuntime()) { framework::Variable* x_var = @@ -50,11 +50,17 @@ class SequenceExpandAsOp : public framework::OperatorWithKernel { auto& y_lod = y_var->Get().lod(); PADDLE_ENFORCE_EQ(y_lod.size(), 1, - "Level number of Input(Y)'s lod should be 1."); + platform::errors::InvalidArgument( + "Level number of Input(Y)'s lod should be 1. But " + "received Y's lod level = %d.", + y_lod.size())); PADDLE_ENFORCE_EQ(static_cast(x_dim[0]), y_lod[0].size() - 1, - "The first dimension of Input(X) should be equal " - "to the size of Input(Y)'s 0 level lod."); + platform::errors::InvalidArgument( + "The first dimension of Input(X) should be one " + "less than the size of Input(Y)'s 0 level lod. But " + "received X's shape[0] = %d, Y's lod[0].size = %d.", + x_dim[0], y_lod[0].size())); int64_t out_first_dim = 0; if (y_lod[0].size() <= 1) { @@ -138,9 +144,9 @@ class SequenceExpandAsOpGrad : public framework::OperatorWithKernel { protected: void InferShape(framework::InferShapeContext* ctx) const override { - PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) should not be null."); - PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")), - "Input(Out@GRAD) should not be null."); + OP_INOUT_CHECK(ctx->HasInputs("X"), "Input", "X", "SequenceExpandAsGrad"); + OP_INOUT_CHECK(ctx->HasInputs(framework::GradVarName("Out")), "Input", + "Out@GRAD", "SequenceExpandAsGrad"); auto x_dims = ctx->GetInputDim("X"); auto x_grad_name = framework::GradVarName("X"); diff --git a/paddle/fluid/operators/sequence_ops/sequence_expand_as_op.h b/paddle/fluid/operators/sequence_ops/sequence_expand_as_op.h index b215c894273..6afcc72763d 100644 --- a/paddle/fluid/operators/sequence_ops/sequence_expand_as_op.h +++ b/paddle/fluid/operators/sequence_ops/sequence_expand_as_op.h @@ -74,13 +74,25 @@ class SequenceExpandAsKernel : public framework::OpKernel { auto *y = context.Input("Y"); auto *out = context.Output("Out"); - PADDLE_ENFORCE_EQ(y->lod().empty(), false, - "Input(Y) Tensor of SequenceExpandAsOp does not contain " - "LoD information."); + PADDLE_ENFORCE_EQ( + y->lod().empty(), false, + platform::errors::InvalidArgument( + "Input(Y) of SequenceExpandAsOp has wrong LoD information. " + "Expected Y's lod is not empty, but received empty lod.")); auto &y_lod = y->lod(); - PADDLE_ENFORCE_EQ(y_lod.size(), 1, "LoD of Y should be 1."); - PADDLE_ENFORCE_GT(y_lod[0].size(), 1, "."); + PADDLE_ENFORCE_EQ(y_lod.size(), 1, + platform::errors::InvalidArgument( + "Input(Y) of SequenceExpandAsOp has wrong LoD " + "information. Expected Y's lod level = 1, but " + "received lod level = %d.", + y_lod.size())); + PADDLE_ENFORCE_GT(y_lod[0].size(), 1, + platform::errors::InvalidArgument( + "Input(Y) of SequenceExpandAsOp has wrong LoD " + "information. Expected the size of Y's lod[0] > 1, " + "but received lod[0].size = %d.", + y_lod[0].size())); out->mutable_data(context.GetPlace()); diff --git a/python/paddle/fluid/layers/sequence_lod.py b/python/paddle/fluid/layers/sequence_lod.py index b0fcc92adc3..f33ea099a42 100644 --- a/python/paddle/fluid/layers/sequence_lod.py +++ b/python/paddle/fluid/layers/sequence_lod.py @@ -52,7 +52,7 @@ def sequence_conv(input, act=None, name=None): """ - :api_attr: Static Graph + :api_attr: Static Graph **Notes: The Op only receives LoDTensor as input. If your input is Tensor, please use conv2d Op.(fluid.layers.** :ref:`api_fluid_layers_conv2d` ). @@ -176,7 +176,7 @@ def sequence_conv(input, def sequence_softmax(input, use_cudnn=False, name=None): """ - :api_attr: Static Graph + :api_attr: Static Graph **Note**: @@ -260,7 +260,7 @@ def sequence_softmax(input, use_cudnn=False, name=None): def sequence_pool(input, pool_type, is_test=False, pad_value=0.0): """ - :api_attr: Static Graph + :api_attr: Static Graph **Notes: The Op only receives LoDTensor as input. If your input is Tensor, please use pool2d Op.(fluid.layers.** :ref:`api_fluid_layers_pool2d` ). @@ -374,7 +374,7 @@ def sequence_pool(input, pool_type, is_test=False, pad_value=0.0): @templatedoc() def sequence_concat(input, name=None): """ - :api_attr: Static Graph + :api_attr: Static Graph **Notes: The Op only receives LoDTensor as input. If your input is Tensor, please use concat Op.(fluid.layers.** :ref:`api_fluid_layers_concat` ). @@ -435,7 +435,7 @@ def sequence_concat(input, name=None): def sequence_first_step(input): """ - :api_attr: Static Graph + :api_attr: Static Graph This operator only supports LoDTensor as input. Given the input LoDTensor, it will select first time-step feature of each sequence as output. @@ -489,7 +489,7 @@ def sequence_first_step(input): def sequence_last_step(input): """ - :api_attr: Static Graph + :api_attr: Static Graph This operator only supports LoDTensor as input. Given the input LoDTensor, it will select last time-step feature of each sequence as output. @@ -544,7 +544,7 @@ def sequence_last_step(input): def sequence_slice(input, offset, length, name=None): """ - :api_attr: Static Graph + :api_attr: Static Graph **Sequence Slice Layer** @@ -632,7 +632,7 @@ def sequence_slice(input, offset, length, name=None): def sequence_expand(x, y, ref_level=-1, name=None): """ - :api_attr: Static Graph + :api_attr: Static Graph Sequence Expand Layer. This layer will expand the input variable ``x`` \ according to specified level ``ref_level`` lod of ``y``. Please note that \ @@ -768,7 +768,7 @@ def sequence_expand(x, y, ref_level=-1, name=None): def sequence_expand_as(x, y, name=None): """ - :api_attr: Static Graph + :api_attr: Static Graph Sequence Expand As Layer. This OP will expand the input variable ``x`` \ according to the zeroth level lod of ``y``. Current implementation requires \ @@ -815,7 +815,7 @@ def sequence_expand_as(x, y, name=None): Args: x (Variable): The input variable which is a Tensor or LoDTensor, with the \ - dims ``[M, K]``. The data type should be float32, float64, int8, int32 \ + dims ``[M, K]``. The data type should be float32, float64, int32 \ or int64. y (Variable): The input variable which is a LoDTensor with 1-level lod. name (str, optional): For detailed information, please refer \ @@ -872,6 +872,9 @@ def sequence_expand_as(x, y, name=None): """ assert not in_dygraph_mode(), ( "sequence layer is not supported in dygraph mode yet.") + check_variable_and_dtype(x, 'x', ['float32', 'float64', 'int32', 'int64'], + 'sequence_expand_as') + check_type(y, 'y', Variable, 'sequence_expand_as') helper = LayerHelper('sequence_expand_as', input=x, **locals()) dtype = helper.input_dtype() tmp = helper.create_variable_for_type_inference(dtype) @@ -885,7 +888,7 @@ def sequence_expand_as(x, y, name=None): def sequence_pad(x, pad_value, maxlen=None, name=None): """ - :api_attr: Static Graph + :api_attr: Static Graph This layer padding the sequences in a same batch to a common length (according \ to ``maxlen``). The padding value is defined by ``pad_value``, and will be \ @@ -999,7 +1002,7 @@ def sequence_pad(x, pad_value, maxlen=None, name=None): def sequence_unpad(x, length, name=None): """ - :api_attr: Static Graph + :api_attr: Static Graph **Note**: @@ -1074,7 +1077,7 @@ def sequence_unpad(x, length, name=None): def sequence_reshape(input, new_dim): """ - :api_attr: Static Graph + :api_attr: Static Graph **Notes: The Op only receives LoDTensor as input. If your input is Tensor, please use reshape Op.(fluid.layers.** :ref:`api_fluid_layers_reshape` ). @@ -1136,7 +1139,7 @@ def sequence_reshape(input, new_dim): def sequence_scatter(input, index, updates, name=None): """ - :api_attr: Static Graph + :api_attr: Static Graph **Note**: @@ -1226,7 +1229,7 @@ def sequence_scatter(input, index, updates, name=None): def sequence_enumerate(input, win_size, pad_value=0, name=None): """ - :api_attr: Static Graph + :api_attr: Static Graph Generate a new sequence for the input index sequence with \ shape ``[d_1, win_size]``, which enumerates all the \ diff --git a/python/paddle/fluid/tests/unittests/sequence/test_sequence_expand_as.py b/python/paddle/fluid/tests/unittests/sequence/test_sequence_expand_as.py index 3b3a1e9d5cd..98996e21e1c 100644 --- a/python/paddle/fluid/tests/unittests/sequence/test_sequence_expand_as.py +++ b/python/paddle/fluid/tests/unittests/sequence/test_sequence_expand_as.py @@ -18,7 +18,9 @@ import unittest import numpy as np import sys sys.path.append("../") +import paddle.fluid as fluid from op_test import OpTest +from paddle.fluid import Program, program_guard class TestSequenceExpandAs(OpTest): @@ -84,5 +86,22 @@ class TestSequenceExpandAsCase3(TestSequenceExpandAs): self.inputs = {'X': (x_data, x_lod), 'Y': (y_data, y_lod)} +class TestSequenceExpandAsOpError(unittest.TestCase): + def test_errors(self): + with program_guard(Program(), Program()): + # the input x must be Variable + x1 = np.random.random((2, 4)).astype("float32") + self.assertRaises(TypeError, fluid.layers.sequence_expand_as, x1) + + # the dtype of input x must be float32, float64, int32 or int64 + x2 = fluid.data(name='x2', shape=[None, 4], dtype="bool") + self.assertRaises(TypeError, fluid.layers.sequence_expand_as, x2) + + # the input y must be Variable + x3 = fluid.data(name='x3', shape=[None, 4], dtype="float32") + y = np.random.random((2, 4)).astype("float32") + self.assertRaises(TypeError, fluid.layers.sequence_expand_as, x3, y) + + if __name__ == '__main__': unittest.main() -- GitLab