diff --git a/paddle/fluid/operators/max_sequence_len_op.cc b/paddle/fluid/operators/max_sequence_len_op.cc index 3ac9c584727e9278f07c6f321722c379037b93f5..b47ec8bc70a207c5f662e9a7ee0cdd944936e7a0 100644 --- a/paddle/fluid/operators/max_sequence_len_op.cc +++ b/paddle/fluid/operators/max_sequence_len_op.cc @@ -57,7 +57,8 @@ class MaxSeqenceLenOpProtoMaker : public framework::OpProtoAndCheckerMaker { class MaxSeqenceLenInferShape : public framework::InferShapeBase { public: void operator()(framework::InferShapeContext *context) const override { - PADDLE_ENFORCE(context->HasInput("RankTable")); + OP_INOUT_CHECK(context->HasInput("RankTable"), "Input", "RankTable", + "MaxSeqenceLen"); context->SetOutputDim("Out", {1}); } }; diff --git a/paddle/fluid/operators/sequence_ops/sequence_erase_op.cc b/paddle/fluid/operators/sequence_ops/sequence_erase_op.cc index 4343a76b484126ce1a5be911d1a3cf5d270741a1..79503d9714f5b2b7a0ce902316c923134dacd476 100644 --- a/paddle/fluid/operators/sequence_ops/sequence_erase_op.cc +++ b/paddle/fluid/operators/sequence_ops/sequence_erase_op.cc @@ -23,14 +23,15 @@ class SequenceEraseOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext* ctx) const override { - PADDLE_ENFORCE(ctx->HasInput("X"), - "Input(X) of SequenceErase operator should not be null."); - PADDLE_ENFORCE(ctx->HasOutput("Out"), - "Output(Out) of SequenceErase operator should not be null."); + OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "SequenceErase"); + OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "SequenceErase"); auto x_dims = ctx->GetInputDim("X"); PADDLE_ENFORCE(x_dims.size() == 2 && x_dims[1] == 1, - "Input(X) of SequenceEraseOp should be a 2-D LoDTensor " - "with the 2nd dimension equal to 1."); + platform::errors::InvalidArgument( + "Input(X) of SequenceEraseOp should be a 2-D LoDTensor " + "with the 2nd dimension equal to 1," + "but received size %d with the 2nd dimension %d.", + x_dims.size(), x_dims[1])); ctx->SetOutputDim("Out", x_dims); // The output LoDTensor's lod_level should be input X's lod_level. // For compile-time, we call SetLoDLevel to set output's lod_level. diff --git a/paddle/fluid/operators/sequence_ops/sequence_erase_op.cu b/paddle/fluid/operators/sequence_ops/sequence_erase_op.cu index 0401c22c92e1a9be35c2ff6b2c7e95924afe3f1b..bacaaeadbf5765e27cda451837c4b2e004c69af7 100644 --- a/paddle/fluid/operators/sequence_ops/sequence_erase_op.cu +++ b/paddle/fluid/operators/sequence_ops/sequence_erase_op.cu @@ -64,8 +64,10 @@ class SequenceEraseOpCUDAKernel : public framework::OpKernel { auto* out = ctx.Output("Out"); auto lod = in->lod(); - PADDLE_ENFORCE_EQ(lod[lod.size() - 1].back(), (size_t)in->numel(), - "The actual size mismatches with the LoD information."); + PADDLE_ENFORCE_EQ( + lod[lod.size() - 1].back(), (size_t)in->numel(), + platform::errors::InvalidArgument( + "The actual size mismatches with the LoD information.")); auto tokens = ctx.Attr>("tokens"); auto in_len = in->numel(); auto in_dat = in->data(); diff --git a/paddle/fluid/operators/sequence_ops/sequence_erase_op.h b/paddle/fluid/operators/sequence_ops/sequence_erase_op.h index 0c2d2894174c4036e8443fca9ada5dcb932c3120..ed98b694b27547ec0adaeaee40a91ae0c16e3dfb 100644 --- a/paddle/fluid/operators/sequence_ops/sequence_erase_op.h +++ b/paddle/fluid/operators/sequence_ops/sequence_erase_op.h @@ -30,9 +30,13 @@ class SequenceEraseKernel : public framework::OpKernel { auto lod = in->lod(); PADDLE_ENFORCE_EQ( lod.empty(), false, - "Input(X) Tensor of SequenceEraseOp does not contain LoD information."); + platform::errors::InvalidArgument("Input(X) Tensor of SequenceEraseOp " + "does not contain LoD information.")); PADDLE_ENFORCE_EQ(lod[lod.size() - 1].back(), (size_t)in->numel(), - "The actual size mismatches with the LoD information."); + platform::errors::InvalidArgument( + "The actual input size %d mismatches with the LoD " + "information size %d.", + lod[lod.size() - 1].back(), (size_t)in->numel())); auto tokens = ctx.Attr>("tokens"); auto in_len = in->numel(); auto in_dat = in->data(); diff --git a/paddle/fluid/operators/transpose_op.cc b/paddle/fluid/operators/transpose_op.cc index d32ecd37817928a0deb6b915e86ae510ce10802d..6849bd739501cc73e7142e6462a3f627f445bd22 100644 --- a/paddle/fluid/operators/transpose_op.cc +++ b/paddle/fluid/operators/transpose_op.cc @@ -31,31 +31,33 @@ class TransposeOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext *ctx) const override { - PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) should not be null"); - PADDLE_ENFORCE(ctx->HasOutput("Out"), "Output(Out) should not be null"); + OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "Transpose"); + OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "Transpose"); auto x_dims = ctx->GetInputDim("X"); std::vector axis = ctx->Attrs().Get>("axis"); size_t x_rank = x_dims.size(); size_t axis_size = axis.size(); PADDLE_ENFORCE_EQ(x_rank, axis_size, - "ShapeError: The input tensor's dimension " - "should be equal to the axis's size. " - "But received input tensor's dimension is %d, " - "axis's size is %d", - x_rank, axis_size); + platform::errors::InvalidArgument( + "The input tensor's dimension " + "should be equal to the axis's size. " + "But received input tensor's dimension is %d, " + "axis's size is %d", + x_rank, axis_size)); std::vector count(axis_size, 0); for (size_t i = 0; i < axis_size; i++) { - PADDLE_ENFORCE( - axis[i] < static_cast(axis_size) && ++count[axis[i]] == 1, - "ValueError: Each element of Attribute axis should " - "be a unique value range from 0 to (dims - 1), " - "where the dims is the axis's size, " - "unique value means this axis value can appear only once. " - "But received axis[%d] is %d, axis_size is %d, " - "count[axis[%d]] is %d", - i, axis[i], axis_size, i, count[axis[i]]); + PADDLE_ENFORCE_EQ( + axis[i] < static_cast(axis_size) && ++count[axis[i]] == 1, true, + platform::errors::InvalidArgument( + "Each element of Attribute axis should " + "be a unique value range from 0 to (dims - 1), " + "where the dims is the axis's size, " + "unique value means this axis value can appear only once. " + "But received axis[%d] is %d, axis_size is %d, " + "count[axis[%d]] is %d", + i, axis[i], axis_size, i, count[axis[i]])); } framework::DDim out_dims(x_dims); @@ -149,9 +151,9 @@ class TransposeOpGrad : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext *ctx) const override { - PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) should not be null"); - PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")), - "Input(Out@GRAD) should not be null"); + OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "TransposeOpGrad"); + OP_INOUT_CHECK(ctx->HasInput(framework::GradVarName("Out")), "Input", + framework::GradVarName("Out"), "TransposeOpGrad"); auto x_dims = ctx->GetInputDim("X"); ctx->SetOutputDim(framework::GradVarName("X"), x_dims); if (ctx->HasOutput(framework::GradVarName("X"))) { @@ -193,8 +195,7 @@ class Transpose2Op : public TransposeOp { void InferShape(framework::InferShapeContext *ctx) const override { TransposeOp::InferShape(ctx); - PADDLE_ENFORCE(ctx->HasOutput("XShape"), - "Output(XShape) should not be null"); + OP_INOUT_CHECK(ctx->HasOutput("XShape"), "Output", "XShape", "Transpose2"); const auto &in_dims = ctx->GetInputDim("X"); std::vector x_shape_dim(in_dims.size() + 1); x_shape_dim[0] = 0; @@ -259,9 +260,10 @@ class Transpose2OpGrad : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext *ctx) const override { - PADDLE_ENFORCE(ctx->HasInput("XShape"), "Input(XShape) should not be null"); - PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")), - "Input(Out@GRAD) should not be null"); + OP_INOUT_CHECK(ctx->HasInput("XShape"), "Input", "XShape", + "Transpose2OpGrad"); + OP_INOUT_CHECK(ctx->HasInput(framework::GradVarName("Out")), "Input", + framework::GradVarName("Out"), "Transpose2OpGrad"); if (ctx->HasOutput(framework::GradVarName("X"))) { auto xshape_dim = ctx->GetInputDim("XShape"); auto x_shape_dim = diff --git a/paddle/fluid/operators/transpose_op.h b/paddle/fluid/operators/transpose_op.h index 557cb408ee8af845eec5de65239998ce02de4725..f2951e90ebe883c5006081ff7e4c8f97742cafff 100644 --- a/paddle/fluid/operators/transpose_op.h +++ b/paddle/fluid/operators/transpose_op.h @@ -53,7 +53,10 @@ inline void TransCompute(const int dim, const DeviceContext& dev_ctx, trans6(dev_ctx, in, out, axis); break; default: - PADDLE_THROW("Tensors with rank at most 6 are supported"); + PADDLE_THROW(platform::errors::InvalidArgument( + "Tensors with rank at most 6 are supported" + ", but received input tensor's rank is %d,", + dim)); } } diff --git a/python/paddle/fluid/nets.py b/python/paddle/fluid/nets.py index 35ca2972a25dd6434d61efc1de8e104527808a59..50a954d3e072fc653bb6cb4c4db6a87ba125dbeb 100644 --- a/python/paddle/fluid/nets.py +++ b/python/paddle/fluid/nets.py @@ -305,6 +305,8 @@ def sequence_conv_pool(input, act="tanh", pool_type="sqrt") """ + + check_variable_and_dtype(input, 'input', ['float32', 'float64'], 'input') conv_out = layers.sequence_conv( input=input, num_filters=num_filters,