diff --git a/paddle/fluid/operators/concat_op.cc b/paddle/fluid/operators/concat_op.cc index 9b7b3a07549dcde6d316a91ddeb4363e7692a788..e4ef952b7bdf99bdad1d60830f241d1a3b56689d 100644 --- a/paddle/fluid/operators/concat_op.cc +++ b/paddle/fluid/operators/concat_op.cc @@ -30,18 +30,17 @@ class ConcatOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext *ctx) const override { - PADDLE_ENFORCE_GE(ctx->Inputs("X").size(), 1UL, - "Inputs(X) of ConcatOp should not be empty."); - - PADDLE_ENFORCE_EQ(ctx->HasOutput("Out"), true, - "Output(Out) of ConcatOp should not be null."); + OP_INOUT_CHECK(ctx->HasInputs("X"), "Input", "X", "Concat"); + OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "Concat"); auto inputs_dims = ctx->GetInputsDim("X"); const size_t inputs_num = inputs_dims.size(); - PADDLE_ENFORCE_GT(inputs_num, 0, - "ShapeError: Input tensors count should > 0. But " - "recevied inputs' length is 0."); + PADDLE_ENFORCE_GT( + inputs_num, static_cast(0), + platform::errors::InvalidArgument( + "The number of input tensors in concat op should > 0. But " + "received inputs' length is 0.")); if (inputs_num == 1) { VLOG(3) << "Warning: concat op have only one input, may waste memory"; } diff --git a/paddle/fluid/operators/concat_op.h b/paddle/fluid/operators/concat_op.h index cf0ae2ec8aa2923c911994207e4135d449fd939d..6f4b9147c66e06d36a7ce7612ffd252ff7f7a21a 100644 --- a/paddle/fluid/operators/concat_op.h +++ b/paddle/fluid/operators/concat_op.h @@ -49,10 +49,11 @@ static inline framework::DDim ComputeAndCheckShape( // check all shape in run time PADDLE_ENFORCE_EQ( inputs_dims[0][j], inputs_dims[i][j], - "ShapeError: Dimension %d in inputs' shapes must be equal. " - "But recevied input[0]'s shape = " - "[%s], input[%d]'s shape = [%s].", - j, inputs_dims[0], i, inputs_dims[i]); + platform::errors::InvalidArgument( + "The shape of input[%d] must be equal to input[0]. " + "But received input[0]'s shape = " + "[%s], input[%d]'s shape = [%s].", + i, inputs_dims[0], i, inputs_dims[i])); } } } @@ -78,7 +79,9 @@ class ConcatKernel : public framework::OpKernel { void Compute(const framework::ExecutionContext& ctx) const override { auto ins = ctx.MultiInput("X"); framework::LoDTensor* out = ctx.Output("Out"); - PADDLE_ENFORCE_EQ(ins[0] != nullptr, true, "The input should not be null."); + PADDLE_ENFORCE_NOT_NULL( + ins[0], platform::errors::NotFound( + " The first input of concat should not be null.")); auto axis = ctx.Attr("axis"); bool need_resize_out_dims = false; if (ctx.HasInput("AxisTensor")) { @@ -178,7 +181,9 @@ class ConcatGradKernel : public framework::OpKernel { } } } - PADDLE_ENFORCE_EQ(ins[0] != nullptr, true, "The input should not be null."); + PADDLE_ENFORCE_NOT_NULL( + ins[0], platform::errors::NotFound( + "The first input of concat should not be null.")); auto axis = ctx.Attr("axis"); if (ctx.HasInput("AxisTensor")) {