From 479c47f31665029148cc2b317c27dad7a5ea47c6 Mon Sep 17 00:00:00 2001 From: Xing Wu Date: Wed, 13 May 2020 16:59:51 +0800 Subject: [PATCH] =?UTF-8?q?fix=20error=20info=20for=20transpose=20sequence?= =?UTF-8?q?=5Fconv=5Fpool=20max=5Fsequence=5Flen=20sequ=E2=80=A6=20(#24437?= =?UTF-8?q?)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * fix error info for transpose sequence_conv_pool max_sequence_len sequence_erase, test=develop * fix error info for transpose sequence_conv_pool max_sequence_len sequence_erase, test=develop * update modify, test=develop * update modify, test=develop * fixed some modifications, test=develop --- paddle/fluid/operators/max_sequence_len_op.cc | 3 +- .../sequence_ops/sequence_erase_op.cc | 13 ++--- .../sequence_ops/sequence_erase_op.cu | 6 ++- .../sequence_ops/sequence_erase_op.h | 8 ++- paddle/fluid/operators/transpose_op.cc | 50 ++++++++++--------- paddle/fluid/operators/transpose_op.h | 5 +- python/paddle/fluid/nets.py | 2 + 7 files changed, 51 insertions(+), 36 deletions(-) diff --git a/paddle/fluid/operators/max_sequence_len_op.cc b/paddle/fluid/operators/max_sequence_len_op.cc index 3ac9c584727..b47ec8bc70a 100644 --- a/paddle/fluid/operators/max_sequence_len_op.cc +++ b/paddle/fluid/operators/max_sequence_len_op.cc @@ -57,7 +57,8 @@ class MaxSeqenceLenOpProtoMaker : public framework::OpProtoAndCheckerMaker { class MaxSeqenceLenInferShape : public framework::InferShapeBase { public: void operator()(framework::InferShapeContext *context) const override { - PADDLE_ENFORCE(context->HasInput("RankTable")); + OP_INOUT_CHECK(context->HasInput("RankTable"), "Input", "RankTable", + "MaxSeqenceLen"); context->SetOutputDim("Out", {1}); } }; diff --git a/paddle/fluid/operators/sequence_ops/sequence_erase_op.cc b/paddle/fluid/operators/sequence_ops/sequence_erase_op.cc index 4343a76b484..79503d9714f 100644 --- a/paddle/fluid/operators/sequence_ops/sequence_erase_op.cc +++ b/paddle/fluid/operators/sequence_ops/sequence_erase_op.cc @@ -23,14 +23,15 @@ class SequenceEraseOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext* ctx) const override { - PADDLE_ENFORCE(ctx->HasInput("X"), - "Input(X) of SequenceErase operator should not be null."); - PADDLE_ENFORCE(ctx->HasOutput("Out"), - "Output(Out) of SequenceErase operator should not be null."); + OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "SequenceErase"); + OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "SequenceErase"); auto x_dims = ctx->GetInputDim("X"); PADDLE_ENFORCE(x_dims.size() == 2 && x_dims[1] == 1, - "Input(X) of SequenceEraseOp should be a 2-D LoDTensor " - "with the 2nd dimension equal to 1."); + platform::errors::InvalidArgument( + "Input(X) of SequenceEraseOp should be a 2-D LoDTensor " + "with the 2nd dimension equal to 1," + "but received size %d with the 2nd dimension %d.", + x_dims.size(), x_dims[1])); ctx->SetOutputDim("Out", x_dims); // The output LoDTensor's lod_level should be input X's lod_level. // For compile-time, we call SetLoDLevel to set output's lod_level. diff --git a/paddle/fluid/operators/sequence_ops/sequence_erase_op.cu b/paddle/fluid/operators/sequence_ops/sequence_erase_op.cu index 0401c22c92e..bacaaeadbf5 100644 --- a/paddle/fluid/operators/sequence_ops/sequence_erase_op.cu +++ b/paddle/fluid/operators/sequence_ops/sequence_erase_op.cu @@ -64,8 +64,10 @@ class SequenceEraseOpCUDAKernel : public framework::OpKernel { auto* out = ctx.Output("Out"); auto lod = in->lod(); - PADDLE_ENFORCE_EQ(lod[lod.size() - 1].back(), (size_t)in->numel(), - "The actual size mismatches with the LoD information."); + PADDLE_ENFORCE_EQ( + lod[lod.size() - 1].back(), (size_t)in->numel(), + platform::errors::InvalidArgument( + "The actual size mismatches with the LoD information.")); auto tokens = ctx.Attr>("tokens"); auto in_len = in->numel(); auto in_dat = in->data(); diff --git a/paddle/fluid/operators/sequence_ops/sequence_erase_op.h b/paddle/fluid/operators/sequence_ops/sequence_erase_op.h index 0c2d2894174..ed98b694b27 100644 --- a/paddle/fluid/operators/sequence_ops/sequence_erase_op.h +++ b/paddle/fluid/operators/sequence_ops/sequence_erase_op.h @@ -30,9 +30,13 @@ class SequenceEraseKernel : public framework::OpKernel { auto lod = in->lod(); PADDLE_ENFORCE_EQ( lod.empty(), false, - "Input(X) Tensor of SequenceEraseOp does not contain LoD information."); + platform::errors::InvalidArgument("Input(X) Tensor of SequenceEraseOp " + "does not contain LoD information.")); PADDLE_ENFORCE_EQ(lod[lod.size() - 1].back(), (size_t)in->numel(), - "The actual size mismatches with the LoD information."); + platform::errors::InvalidArgument( + "The actual input size %d mismatches with the LoD " + "information size %d.", + lod[lod.size() - 1].back(), (size_t)in->numel())); auto tokens = ctx.Attr>("tokens"); auto in_len = in->numel(); auto in_dat = in->data(); diff --git a/paddle/fluid/operators/transpose_op.cc b/paddle/fluid/operators/transpose_op.cc index d32ecd37817..6849bd73950 100644 --- a/paddle/fluid/operators/transpose_op.cc +++ b/paddle/fluid/operators/transpose_op.cc @@ -31,31 +31,33 @@ class TransposeOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext *ctx) const override { - PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) should not be null"); - PADDLE_ENFORCE(ctx->HasOutput("Out"), "Output(Out) should not be null"); + OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "Transpose"); + OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "Transpose"); auto x_dims = ctx->GetInputDim("X"); std::vector axis = ctx->Attrs().Get>("axis"); size_t x_rank = x_dims.size(); size_t axis_size = axis.size(); PADDLE_ENFORCE_EQ(x_rank, axis_size, - "ShapeError: The input tensor's dimension " - "should be equal to the axis's size. " - "But received input tensor's dimension is %d, " - "axis's size is %d", - x_rank, axis_size); + platform::errors::InvalidArgument( + "The input tensor's dimension " + "should be equal to the axis's size. " + "But received input tensor's dimension is %d, " + "axis's size is %d", + x_rank, axis_size)); std::vector count(axis_size, 0); for (size_t i = 0; i < axis_size; i++) { - PADDLE_ENFORCE( - axis[i] < static_cast(axis_size) && ++count[axis[i]] == 1, - "ValueError: Each element of Attribute axis should " - "be a unique value range from 0 to (dims - 1), " - "where the dims is the axis's size, " - "unique value means this axis value can appear only once. " - "But received axis[%d] is %d, axis_size is %d, " - "count[axis[%d]] is %d", - i, axis[i], axis_size, i, count[axis[i]]); + PADDLE_ENFORCE_EQ( + axis[i] < static_cast(axis_size) && ++count[axis[i]] == 1, true, + platform::errors::InvalidArgument( + "Each element of Attribute axis should " + "be a unique value range from 0 to (dims - 1), " + "where the dims is the axis's size, " + "unique value means this axis value can appear only once. " + "But received axis[%d] is %d, axis_size is %d, " + "count[axis[%d]] is %d", + i, axis[i], axis_size, i, count[axis[i]])); } framework::DDim out_dims(x_dims); @@ -149,9 +151,9 @@ class TransposeOpGrad : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext *ctx) const override { - PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) should not be null"); - PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")), - "Input(Out@GRAD) should not be null"); + OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "TransposeOpGrad"); + OP_INOUT_CHECK(ctx->HasInput(framework::GradVarName("Out")), "Input", + framework::GradVarName("Out"), "TransposeOpGrad"); auto x_dims = ctx->GetInputDim("X"); ctx->SetOutputDim(framework::GradVarName("X"), x_dims); if (ctx->HasOutput(framework::GradVarName("X"))) { @@ -193,8 +195,7 @@ class Transpose2Op : public TransposeOp { void InferShape(framework::InferShapeContext *ctx) const override { TransposeOp::InferShape(ctx); - PADDLE_ENFORCE(ctx->HasOutput("XShape"), - "Output(XShape) should not be null"); + OP_INOUT_CHECK(ctx->HasOutput("XShape"), "Output", "XShape", "Transpose2"); const auto &in_dims = ctx->GetInputDim("X"); std::vector x_shape_dim(in_dims.size() + 1); x_shape_dim[0] = 0; @@ -259,9 +260,10 @@ class Transpose2OpGrad : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext *ctx) const override { - PADDLE_ENFORCE(ctx->HasInput("XShape"), "Input(XShape) should not be null"); - PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")), - "Input(Out@GRAD) should not be null"); + OP_INOUT_CHECK(ctx->HasInput("XShape"), "Input", "XShape", + "Transpose2OpGrad"); + OP_INOUT_CHECK(ctx->HasInput(framework::GradVarName("Out")), "Input", + framework::GradVarName("Out"), "Transpose2OpGrad"); if (ctx->HasOutput(framework::GradVarName("X"))) { auto xshape_dim = ctx->GetInputDim("XShape"); auto x_shape_dim = diff --git a/paddle/fluid/operators/transpose_op.h b/paddle/fluid/operators/transpose_op.h index 557cb408ee8..f2951e90ebe 100644 --- a/paddle/fluid/operators/transpose_op.h +++ b/paddle/fluid/operators/transpose_op.h @@ -53,7 +53,10 @@ inline void TransCompute(const int dim, const DeviceContext& dev_ctx, trans6(dev_ctx, in, out, axis); break; default: - PADDLE_THROW("Tensors with rank at most 6 are supported"); + PADDLE_THROW(platform::errors::InvalidArgument( + "Tensors with rank at most 6 are supported" + ", but received input tensor's rank is %d,", + dim)); } } diff --git a/python/paddle/fluid/nets.py b/python/paddle/fluid/nets.py index 35ca2972a25..50a954d3e07 100644 --- a/python/paddle/fluid/nets.py +++ b/python/paddle/fluid/nets.py @@ -305,6 +305,8 @@ def sequence_conv_pool(input, act="tanh", pool_type="sqrt") """ + + check_variable_and_dtype(input, 'input', ['float32', 'float64'], 'input') conv_out = layers.sequence_conv( input=input, num_filters=num_filters, -- GitLab