From 41b59555387616edef6bd5ef1b9093ab92b90db1 Mon Sep 17 00:00:00 2001 From: Chen Weihang Date: Wed, 23 Sep 2020 10:55:01 +0800 Subject: [PATCH] Polish no onwer ops error message (#27448) * polish no onwer op error message * fix unittest failed * polish details based reviewer comment --- .../operators/add_position_encoding_op.cc | 10 +++- paddle/fluid/operators/assign_value_op.h | 5 +- paddle/fluid/operators/coalesce_tensor_op.cc | 60 ++++++++++++------- .../fluid/operators/dequantize_abs_max_op.cc | 6 +- paddle/fluid/operators/detection/gpc.cc | 30 +++++++--- .../distributed_ops/fetch_barrier_op.cc | 4 +- .../distributed_ops/send_recv_util.h | 10 ++-- paddle/fluid/operators/gru_unit_op.h | 8 ++- paddle/fluid/operators/interpolate_op.cc | 47 ++++++++------- paddle/fluid/operators/merge_lod_tensor_op.cc | 46 ++++++++------ paddle/fluid/operators/strided_memcpy.h | 32 +++++++--- paddle/fluid/operators/var_conv_2d_op.cc | 38 ++++++++---- 12 files changed, 193 insertions(+), 103 deletions(-) diff --git a/paddle/fluid/operators/add_position_encoding_op.cc b/paddle/fluid/operators/add_position_encoding_op.cc index 629fedba6e..e5fcd270eb 100644 --- a/paddle/fluid/operators/add_position_encoding_op.cc +++ b/paddle/fluid/operators/add_position_encoding_op.cc @@ -69,12 +69,18 @@ class AddPositionEncodingOpMaker : public framework::OpProtoAndCheckerMaker { AddAttr("alpha", "The scale of Original Embedding.") .SetDefault(1.0f) .AddCustomChecker([](const float& alpha) { - PADDLE_ENFORCE(alpha >= 0.0f, "'alpha' must be above 0.0."); + PADDLE_ENFORCE_GE( + alpha, 0.0f, + platform::errors::InvalidArgument( + "Attribute 'alpha' must be greater than or equal to 0.0.")); }); AddAttr("beta", "The scale of Position Embedding.") .SetDefault(1.0f) .AddCustomChecker([](const float& beta) { - PADDLE_ENFORCE(beta >= 0.0f, "'beta' must be between 0.0."); + PADDLE_ENFORCE_GE( + beta, 0.0f, + platform::errors::InvalidArgument( + "Attribute 'beta' must be greater than or equal to 0.0.")); }); AddComment(R"DOC( Add Position Encoding Operator. diff --git a/paddle/fluid/operators/assign_value_op.h b/paddle/fluid/operators/assign_value_op.h index b462c43d23..1418d96b67 100644 --- a/paddle/fluid/operators/assign_value_op.h +++ b/paddle/fluid/operators/assign_value_op.h @@ -76,7 +76,10 @@ class AssignValueKernel : public framework::OpKernel { value_name = "int64_values"; break; default: - PADDLE_THROW("Unsupported dtype for assign_value_op: %d", dtype); + PADDLE_THROW(platform::errors::Unimplemented( + "Unsupported data type(code %d) for AssignValue operator, only " + "supports bool, int32, float32 and int64.", + dtype)); break; } CopyVecotorToTensor(value_name, out, ctx); diff --git a/paddle/fluid/operators/coalesce_tensor_op.cc b/paddle/fluid/operators/coalesce_tensor_op.cc index 5b7bcde21a..d67d90c348 100644 --- a/paddle/fluid/operators/coalesce_tensor_op.cc +++ b/paddle/fluid/operators/coalesce_tensor_op.cc @@ -33,29 +33,37 @@ class CoalesceTensorOpKernel : public framework::OpKernel { auto out_vars = context.MultiOutputVar("Output"); PADDLE_ENFORCE_GT(in_var_names.size(), static_cast(0), - "The CoalesceTensorOp has no input."); - PADDLE_ENFORCE_EQ( - in_var_names.size(), out_var_names.size(), - "The number of CoalesceTensorOp's input and output is not match."); + platform::errors::InvalidArgument( + "The CoalesceTensor operator has no input.")); + PADDLE_ENFORCE_EQ(in_var_names.size(), out_var_names.size(), + platform::errors::InvalidArgument( + "The number of CoalesceTensor operator's input and " + "output is not match, " + "input number is %u, output number is %u.", + in_var_names.size(), out_var_names.size())); // Input & Output check: only support LoDTensor for (size_t i = 0; i < in_var_names.size(); ++i) { PADDLE_ENFORCE_NOT_NULL( in_vars[i], - "The input variable %s of CoalesceTensorOp does not exist.", - in_var_names[i]); + platform::errors::NotFound("The input variable %s of CoalesceTensor " + "operator does not exist.", + in_var_names[i])); PADDLE_ENFORCE_NOT_NULL( out_vars[i], - "The output variable %s of CoalesceTensorOp does not exist.", - out_var_names[i]); - PADDLE_ENFORCE_EQ( - in_vars[i]->IsType(), true, - "The input variable %s of CoalesceTensorOp is not LoDTensor.", - in_var_names[i]); - PADDLE_ENFORCE_EQ( - out_vars[i]->IsType(), true, - "The output variable %s of CoalesceTensorOp is not LoDTensor.", - in_var_names[i]); + platform::errors::NotFound("The output variable %s of CoalesceTensor " + "operator does not exist.", + out_var_names[i])); + PADDLE_ENFORCE_EQ(in_vars[i]->IsType(), true, + platform::errors::InvalidArgument( + "The input variable %s of CoalesceTensor operator " + "is not LoDTensor.", + in_var_names[i])); + PADDLE_ENFORCE_EQ(out_vars[i]->IsType(), true, + platform::errors::InvalidArgument( + "The output variable %s of CoalesceTensor operator " + "is not LoDTensor.", + in_var_names[i])); } auto in_tensors = context.MultiInput("Input"); @@ -64,7 +72,10 @@ class CoalesceTensorOpKernel : public framework::OpKernel { for (size_t i = 0; i < in_var_names.size(); ++i) { PADDLE_ENFORCE_EQ( in_var_names[i], out_var_names[i], - "The input and output variable of CoalesceTensorOp is different."); + platform::errors::InvalidArgument( + "The input and output variable of CoalesceTensor operator is " + "different, %dth input is %s, %dth output is %s.", + i, in_var_names[i], i, out_var_names[i])); } } else { // Init the output as input @@ -134,16 +145,25 @@ class CoalesceTensorOpKernel : public framework::OpKernel { const std::vector &lod_tensors, const std::vector var_names, size_t *numel, const size_t &size_of_dtype, const platform::Place &place) const { - PADDLE_ENFORCE_EQ(lod_tensors.size(), var_names.size()); + PADDLE_ENFORCE_EQ( + lod_tensors.size(), var_names.size(), + platform::errors::InvalidArgument( + "The number of input tensor and variable does not match, the " + "number of input tensor is %u, the number of input variable is %u.", + lod_tensors.size(), var_names.size())); *numel = 0; std::stringstream ss; ss << "alloc_space_for_vars: "; for (size_t i = 0; i < var_names.size(); ++i) { PADDLE_ENFORCE_EQ(lod_tensors[i]->IsInitialized(), true, - "%s is not initialized.", var_names[i]); + platform::errors::InvalidArgument( + "Tensor `%s` is not initialized.", var_names[i])); auto size = lod_tensors[i]->numel(); - PADDLE_ENFORCE_GT(size, 0); + PADDLE_ENFORCE_GT( + size, 0, + platform::errors::InvalidArgument( + "The number of tensor `%s`'s elements is 0.", var_names[i])); ss << "input(" << var_names[i] << ") dim:(" << lod_tensors[i]->dims() << ") " << " addres:" << lod_tensors[i]->data() << ", "; diff --git a/paddle/fluid/operators/dequantize_abs_max_op.cc b/paddle/fluid/operators/dequantize_abs_max_op.cc index 48743f2e48..0d4d68d9f6 100644 --- a/paddle/fluid/operators/dequantize_abs_max_op.cc +++ b/paddle/fluid/operators/dequantize_abs_max_op.cc @@ -45,10 +45,8 @@ class DequantizeMaxAbsOp : public framework::OperatorWithKernel { : OperatorWithKernel(type, inputs, outputs, attrs) {} void InferShape(framework::InferShapeContext* ctx) const override { - PADDLE_ENFORCE_EQ(ctx->HasInput("X"), true, - "Input(X) of DequantizeMaxAbsOp should not be null."); - PADDLE_ENFORCE_EQ(ctx->HasOutput("Out"), true, - "Output(Out) of DequantizeMaxAbsOp should not be null."); + OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "DequantizeMaxAbs"); + OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "DequantizeMaxAbs"); ctx->ShareDim("X", /*->*/ "Out"); ctx->ShareLoD("X", /*->*/ "Out"); diff --git a/paddle/fluid/operators/detection/gpc.cc b/paddle/fluid/operators/detection/gpc.cc index b46d231d0f..6b1b0cd8b3 100644 --- a/paddle/fluid/operators/detection/gpc.cc +++ b/paddle/fluid/operators/detection/gpc.cc @@ -532,7 +532,8 @@ static int count_contours(polygon_node *polygon) { } static void add_left(polygon_node *p, double x, double y) { - PADDLE_ENFORCE_NOT_NULL(p); + PADDLE_ENFORCE_NOT_NULL(p, paddle::platform::errors::InvalidArgument( + "Input polygon node is nullptr.")); vertex_node *nv = NULL; /* Create a new vertex node and set its fields */ @@ -588,7 +589,8 @@ static void add_right(polygon_node *p, double x, double y) { } static void merge_right(polygon_node *p, polygon_node *q, polygon_node *list) { - PADDLE_ENFORCE_NOT_NULL(p); + PADDLE_ENFORCE_NOT_NULL(p, paddle::platform::errors::InvalidArgument( + "Input polygon node is nullptr.")); polygon_node *target = NULL; /* Label contour as external */ @@ -664,7 +666,8 @@ void add_vertex(vertex_node **t, double x, double y) { } void gpc_vertex_create(edge_node *e, int p, int s, double x, double y) { - PADDLE_ENFORCE_NOT_NULL(e); + PADDLE_ENFORCE_NOT_NULL(e, paddle::platform::errors::InvalidArgument( + "Input edge node is nullptr.")); add_vertex(&(e->outp[p]->v[s]), x, y); e->outp[p]->active++; } @@ -693,7 +696,8 @@ static bbox *create_contour_bboxes(gpc_polygon *p) { gpc_malloc(box, p->num_contours * sizeof(bbox), const_cast("Bounding box creation")); - PADDLE_ENFORCE_NOT_NULL(box); + PADDLE_ENFORCE_NOT_NULL(box, paddle::platform::errors::ResourceExhausted( + "Failed to malloc box memory.")); /* Construct contour bounding boxes */ for (c = 0; c < p->num_contours; c++) { @@ -857,7 +861,9 @@ void gpc_add_contour(gpc_polygon *p, gpc_vertex_list *new_contour, int hole) { /* Create an extended hole array */ gpc_malloc(extended_hole, (p->num_contours + 1) * sizeof(int), const_cast("contour hole addition")); - PADDLE_ENFORCE_NOT_NULL(extended_hole); + PADDLE_ENFORCE_NOT_NULL(extended_hole, + paddle::platform::errors::ResourceExhausted( + "Failed to malloc extended hole memory.")); /* Create an extended contour array */ gpc_malloc(extended_contour, @@ -975,7 +981,9 @@ void gpc_polygon_clip(gpc_op op, gpc_polygon *subj, gpc_polygon *clip, /* Build scanbeam table from scanbeam tree */ gpc_malloc(sbt, sbt_entries * sizeof(double), const_cast("sbt creation")); - PADDLE_ENFORCE_NOT_NULL(sbt); + PADDLE_ENFORCE_NOT_NULL(sbt, paddle::platform::errors::ResourceExhausted( + "Failed to malloc scanbeam table memory.")); + build_sbt(&scanbeam, sbt, sbtree); scanbeam = 0; free_sbtree(&sbtree); @@ -1017,7 +1025,9 @@ void gpc_polygon_clip(gpc_op op, gpc_polygon *subj, gpc_polygon *clip, e0 = aet; e1 = aet; /* Set up bundle fields of first edge */ - PADDLE_ENFORCE_NOT_NULL(aet); + PADDLE_ENFORCE_NOT_NULL(aet, paddle::platform::errors::InvalidArgument( + "Edge node AET is nullptr.")); + aet->bundle[ABOVE][aet->type] = (aet->top.y != yb); aet->bundle[ABOVE][!aet->type] = 0; aet->bstate[ABOVE] = UNBUNDLED; @@ -1612,7 +1622,8 @@ void gpc_tristrip_clip(gpc_op op, gpc_polygon *subj, gpc_polygon *clip, /* Build scanbeam table from scanbeam tree */ gpc_malloc(sbt, sbt_entries * sizeof(double), const_cast("sbt creation")); - PADDLE_ENFORCE_NOT_NULL(sbt); + PADDLE_ENFORCE_NOT_NULL(sbt, paddle::platform::errors::ResourceExhausted( + "Failed to malloc scanbeam table memory.")); build_sbt(&scanbeam, sbt, sbtree); scanbeam = 0; free_sbtree(&sbtree); @@ -1650,7 +1661,8 @@ void gpc_tristrip_clip(gpc_op op, gpc_polygon *subj, gpc_polygon *clip, e1 = aet; /* Set up bundle fields of first edge */ - PADDLE_ENFORCE_NOT_NULL(aet); + PADDLE_ENFORCE_NOT_NULL(aet, paddle::platform::errors::InvalidArgument( + "Edge node AET is nullptr.")); aet->bundle[ABOVE][aet->type] = (aet->top.y != yb); aet->bundle[ABOVE][!aet->type] = 0; aet->bstate[ABOVE] = UNBUNDLED; diff --git a/paddle/fluid/operators/distributed_ops/fetch_barrier_op.cc b/paddle/fluid/operators/distributed_ops/fetch_barrier_op.cc index b064265917..c9f9daf3b3 100644 --- a/paddle/fluid/operators/distributed_ops/fetch_barrier_op.cc +++ b/paddle/fluid/operators/distributed_ops/fetch_barrier_op.cc @@ -48,7 +48,9 @@ class FetchBarrierOp : public framework::OperatorBase { } for (size_t i = 0; i < rets.size(); i++) { - PADDLE_ENFORCE_NE(rets[i]->Wait(), 0U, "internal error in RPCClient"); + PADDLE_ENFORCE_NE(rets[i]->Wait(), 0U, + platform::errors::Unavailable( + "Internal error occurred in RPCClient.")); } } }; diff --git a/paddle/fluid/operators/distributed_ops/send_recv_util.h b/paddle/fluid/operators/distributed_ops/send_recv_util.h index c05a1ff1da..7dc0596ac3 100644 --- a/paddle/fluid/operators/distributed_ops/send_recv_util.h +++ b/paddle/fluid/operators/distributed_ops/send_recv_util.h @@ -34,16 +34,16 @@ inline bool NeedSend(const framework::Scope& scope, std::string::npos) return false; auto* var = scope.FindVar(varname); - PADDLE_ENFORCE_NOT_NULL(var, "Can not find variable '%s' in the send side.", - varname); + PADDLE_ENFORCE_NOT_NULL( + var, platform::errors::NotFound( + "Can not find variable '%s' in the send side.", varname)); if (var->IsType()) { return var->Get().IsInitialized(); } else if (var->IsType()) { return var->Get().rows().size() > 0UL; } else { - PADDLE_THROW( - "Variable type in send side should be in " - "[LodTensor, SelectedRows]"); + PADDLE_THROW(platform::errors::Unimplemented( + "Variable type in send side should be LodTensor or SelectedRows.")); } return false; } diff --git a/paddle/fluid/operators/gru_unit_op.h b/paddle/fluid/operators/gru_unit_op.h index 712ef05d86..4865a02c52 100644 --- a/paddle/fluid/operators/gru_unit_op.h +++ b/paddle/fluid/operators/gru_unit_op.h @@ -47,7 +47,9 @@ class GRUUnitKernel : public framework::OpKernel { else if (act_type == relu) ReluFunctor()(d, x, y); else - PADDLE_THROW("unsupported activation type"); + PADDLE_THROW(platform::errors::Unimplemented( + "Unsupported activation type, only supports identity, sigmoid, tanh " + "and relu.")); } void Compute(const framework::ExecutionContext& context) const override { @@ -137,7 +139,9 @@ class GRUUnitGradKernel : public framework::OpKernel { else if (act_type == relu) ReluGradFunctor()(d, x, y, dy, dx); else - PADDLE_THROW("unsupported activation type"); + PADDLE_THROW(platform::errors::Unimplemented( + "Unsupported activation type, only supports identity, sigmoid, tanh " + "and relu.")); } void Compute(const framework::ExecutionContext& context) const override { diff --git a/paddle/fluid/operators/interpolate_op.cc b/paddle/fluid/operators/interpolate_op.cc index 1e99e22e12..e8a9ed878e 100644 --- a/paddle/fluid/operators/interpolate_op.cc +++ b/paddle/fluid/operators/interpolate_op.cc @@ -104,12 +104,13 @@ static void Interpolate2DInferShapeCheck(framework::InferShapeContext* ctx) { auto dim_x = ctx->GetInputDim("X"); auto interp_method = ctx->Attrs().Get("interp_method"); - PADDLE_ENFORCE( - "bilinear" == interp_method || "nearest" == interp_method || - "bicubic" == interp_method, - "Interpolation method can only be \"bilinear\" or \"nearest\" when " - "Input(X) dimension is 4, but got method = %s .", - interp_method); + PADDLE_ENFORCE_EQ("bilinear" == interp_method || "nearest" == interp_method || + "bicubic" == interp_method, + true, platform::errors::InvalidArgument( + "Interpolation method can only be \"bilinear\" " + "or \"nearest\" or \"bicubic\" when " + "Input(X) dimension is 4, but got method is %s.", + interp_method)); const DataLayout data_layout = framework::StringToDataLayout( ctx->Attrs().Get("data_layout")); @@ -169,13 +170,13 @@ static void Interpolate2DInferShapeCheck(framework::InferShapeContext* ctx) { auto out_size_dim = ctx->GetInputDim("OutSize"); PADDLE_ENFORCE_EQ( out_size_dim.size(), 1, - platform::errors::InvalidArgument( - "OutSize's dimension size must be 1, but got dimension = %d .", - out_size_dim.size())); + platform::errors::InvalidArgument("OutSize's dimension size must be 1, " + "but got dimension size is %d .", + out_size_dim.size())); PADDLE_ENFORCE_EQ( out_size_dim[0], 2, platform::errors::InvalidArgument( - "OutSize's dim[0] must be 2, but got dimention = %d .", + "OutSize's dimension[0] must be 2, but got dimension[0] is %d .", out_size_dim[0])); ctx->ShareLoD("X", "Out"); return; @@ -264,12 +265,15 @@ static void Interpolate3DInferShapeCheck(framework::InferShapeContext* ctx) { if (ctx->HasInput("OutSize") && ctx->IsRuntime()) { auto out_size_dim = ctx->GetInputDim("OutSize"); - PADDLE_ENFORCE_EQ(out_size_dim.size(), 1, - "OutSize's dimension size must be 1, but got size =%d .", - out_size_dim.size()); + PADDLE_ENFORCE_EQ( + out_size_dim.size(), 1, + platform::errors::InvalidArgument( + "OutSize's dimension size must be 1, but got size is %d.", + out_size_dim.size())); PADDLE_ENFORCE_EQ(out_size_dim[0], 3, - "OutSize's dim[0] must be 3, but got size = %d .", - out_size_dim[0]); + platform::errors::InvalidArgument( + "OutSize's dim[0] must be 3, but got size is %d.", + out_size_dim[0])); ctx->ShareLoD("X", "Out"); return; } @@ -289,10 +293,8 @@ class InterpolateOp : public framework::OperatorWithKernel { protected: void InferShape(framework::InferShapeContext* ctx) const override { - PADDLE_ENFORCE(ctx->HasInput("X"), - "Input(X) of InterpolateOp should not be null."); - PADDLE_ENFORCE(ctx->HasOutput("Out"), - "Output(Out) of InterpolationOp should not be null."); + OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "Interpolate"); + OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "Interpolate"); auto dim_x = ctx->GetInputDim("X"); // NCHW format PADDLE_ENFORCE( @@ -534,9 +536,10 @@ class InterpolateOpGrad : public framework::OperatorWithKernel { protected: void InferShape(framework::InferShapeContext* ctx) const override { - PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) should not be null"); - PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")), - "Input(Out@GRAD) should not be null"); + OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "InterpolateGrad"); + OP_INOUT_CHECK(ctx->HasInput(framework::GradVarName("Out")), "Input", + "Out@GRAD", "InterpolateGrad"); + auto dim_x = ctx->GetInputDim("X"); if (ctx->HasOutput(framework::GradVarName("X"))) { ctx->SetOutputDim(framework::GradVarName("X"), dim_x); diff --git a/paddle/fluid/operators/merge_lod_tensor_op.cc b/paddle/fluid/operators/merge_lod_tensor_op.cc index c9b852cfc0..87d914aa79 100644 --- a/paddle/fluid/operators/merge_lod_tensor_op.cc +++ b/paddle/fluid/operators/merge_lod_tensor_op.cc @@ -44,8 +44,10 @@ class MergeLoDTensorOp : public framework::OperatorBase { scope.FindVar(Output("Out"))->GetMutable(); auto level = static_cast(Attr("level")); - PADDLE_ENFORCE(in_true.numel() || in_false.numel(), - "Input(InTrue) or Input(InFalse) should be initialized."); + PADDLE_ENFORCE_EQ( + in_true.numel() || in_false.numel(), true, + platform::errors::InvalidArgument( + "Input(InTrue) or Input(InFalse) should be initialized.")); auto &mask_dim = mask.dims(); std::unique_ptr cpu_mask{new framework::LoDTensor()}; @@ -56,7 +58,9 @@ class MergeLoDTensorOp : public framework::OperatorBase { framework::TensorCopy(mask, platform::CPUPlace(), dev_ctx, cpu_mask.get()); #else - PADDLE_THROW("Not supported GPU, Please compile WITH_GPU option"); + PADDLE_THROW(platform::errors::PreconditionNotMet( + "Not supported GPU, Please recompile or reinstall paddle with CUDA " + "support.")); #endif } auto *mask_data = cpu_mask->data(); @@ -109,7 +113,11 @@ class MergeLoDTensorOp : public framework::OperatorBase { size_t start_offset = lod_and_offset.second.first; size_t end_offset = lod_and_offset.second.second; - PADDLE_ENFORCE_GE(end_offset, start_offset); + PADDLE_ENFORCE_GE(end_offset, start_offset, + platform::errors::InvalidArgument( + "The end offset less than start offset, end offset " + "is %d, start offset is %d.", + end_offset, start_offset)); size_t len = end_offset - start_offset; if (len == 0) { continue; @@ -189,22 +197,24 @@ class MergeLoDTensorInferShape : public framework::InferShapeBase { "merge_lod_tensor"); auto mask_dim = context->GetInputDim("Mask"); PADDLE_ENFORCE_EQ(mask_dim.size(), 2, - "If you are using IfElse OP:" - "\n\nie = fluid.layers.IfElse(cond=cond)\nwith " - "ie.true_block():\n out_1 = ie.input(x)\n\n" - "Please ensure that the cond should be a 2-D tensor and " - "the second dim size of cond should be 1. " - "But now the cond's shape is [", - *mask_dim.Get(), "].\n"); + platform::errors::InvalidArgument( + "If you are using IfElse OP:" + "\n\nie = fluid.layers.IfElse(cond=cond)\nwith " + "ie.true_block():\n out_1 = ie.input(x)\n\n" + "Please ensure that the cond is a 2-D tensor and " + "the second dim size of cond is 1. " + "But now the cond's shape is [%s].\n", + mask_dim)); if (context->IsRuntime() || mask_dim[1] > 0) { PADDLE_ENFORCE_EQ(mask_dim[1], 1, - "If you are using IfElse OP:" - "\n\nie = fluid.layers.IfElse(cond=cond)\nwith " - "ie.true_block():\n out_1 = ie.input(x)\n\n" - "Please ensure that the cond should be a 2-D tensor " - "and the second dim size of cond should be 1. " - "But now the cond's shape is [", - *mask_dim.Get(), "].\n"); + platform::errors::InvalidArgument( + "If you are using IfElse OP:" + "\n\nie = fluid.layers.IfElse(cond=cond)\nwith " + "ie.true_block():\n out_1 = ie.input(x)\n\n" + "Please ensure that the cond is a 2-D tensor " + "and the second dim size of cond is 1. " + "But now the cond's shape is [%s].\n", + mask_dim)); } context->SetOutputDim("Out", context->GetInputDim("InTrue")); diff --git a/paddle/fluid/operators/strided_memcpy.h b/paddle/fluid/operators/strided_memcpy.h index f20bada8ab..142b00b4de 100644 --- a/paddle/fluid/operators/strided_memcpy.h +++ b/paddle/fluid/operators/strided_memcpy.h @@ -60,20 +60,33 @@ inline void StridedNumelCopyWithAxis(const platform::DeviceContext& ctx, auto place = ctx.GetPlace(); PADDLE_ENFORCE_EQ(src_stride_numel.size(), dst_stride_numel.size(), - "src and dst tensor should have the same dims size."); + platform::errors::InvalidArgument( + "Source and destination tensor should have the same " + "dimension size, but source tensor dimension size is " + "%u, destination tensor size is %u.", + src_stride_numel.size(), dst_stride_numel.size())); for (int64_t i = 0; i < axis; ++i) { if (i < axis) { - PADDLE_ENFORCE_EQ(src_stride_numel[i] / src_stride_numel[axis], - dst_stride_numel[i] / dst_stride_numel[axis], - "src and dst should have the same elements " - "except the specified axis."); + PADDLE_ENFORCE_EQ( + src_stride_numel[i] / src_stride_numel[axis], + dst_stride_numel[i] / dst_stride_numel[axis], + platform::errors::InvalidArgument( + "Source and destination tensor should have the same number of " + "elements except the specified axis, but the source elements " + "number is %d, destination elements number is %d.", + src_stride_numel[i] / src_stride_numel[axis], + dst_stride_numel[i] / dst_stride_numel[axis])); } else if (i == axis) { continue; } else { - PADDLE_ENFORCE_EQ(src_stride_numel[i], dst_stride_numel[i], - "src and dst should have the same elements " - "except the specified axis."); + PADDLE_ENFORCE_EQ( + src_stride_numel[i], dst_stride_numel[i], + platform::errors::InvalidArgument( + "Source and destination tensor should have the same number of " + "elements except the specified axis, but the source elements " + "number is %d, destination elements number is %d.", + src_stride_numel[i], dst_stride_numel[i])); } } @@ -90,7 +103,8 @@ inline void StridedNumelCopyWithAxis(const platform::DeviceContext& ctx, memory::Copy(gpu_place, dst + i * dst_after, gpu_place, src + i * src_after, sizeof(T) * size, cuda_ctx.stream()); #else - PADDLE_THROW("Paddle is not compiled with GPU"); + PADDLE_THROW(platform::errors::PreconditionNotMet( + "Paddle is not compiled with GPU.")); #endif } } diff --git a/paddle/fluid/operators/var_conv_2d_op.cc b/paddle/fluid/operators/var_conv_2d_op.cc index f8a29a52d7..db8b2c3050 100644 --- a/paddle/fluid/operators/var_conv_2d_op.cc +++ b/paddle/fluid/operators/var_conv_2d_op.cc @@ -78,21 +78,35 @@ void VarConv2dOP::InferShape(framework::InferShapeContext* ctx) const { platform::errors::NotFound("Col(Output) of VarConv2dOP is not found.")); auto x_dims = ctx->GetInputDim("X"); - PADDLE_ENFORCE_EQ(x_dims.size(), 2, - "The rank of X(Input) can't be less than 2."); + PADDLE_ENFORCE_EQ( + x_dims.size(), 2, + platform::errors::InvalidArgument( + "The rank of X(Input) can't be less than 2, but received rank is %u.", + x_dims.size())); auto w_dims = ctx->GetInputDim("W"); - PADDLE_ENFORCE_EQ(w_dims.size(), 2, "W should be 2-D tensor"); + PADDLE_ENFORCE_EQ( + w_dims.size(), 2, + platform::errors::InvalidArgument( + "Input W should be a 2-D tensor, but its actual dimension is %u.", + w_dims.size())); int output_channel = ctx->Attrs().Get("OutputChannel"); int input_channel = ctx->Attrs().Get("InputChannel"); int kernel_h = ctx->Attrs().Get("KernelH"); int kernel_w = ctx->Attrs().Get("KernelW"); - PADDLE_ENFORCE_EQ(w_dims[0], output_channel, - "W dim[0] should be equal to OutputChannel"); + PADDLE_ENFORCE_EQ( + w_dims[0], output_channel, + platform::errors::InvalidArgument( + "Input W's dimension[0] should be equal to OutputChannel, the " + "dimension[0] is %d, OutputChannel is %d.", + w_dims[0], output_channel)); PADDLE_ENFORCE_EQ( w_dims[1], input_channel * kernel_h * kernel_w, - "W dim[1] should be equal to InputChannel * StrideH * StrideW"); + platform::errors::InvalidArgument( + "Input W's dimension[1] should be equal to InputChannel * StrideH * " + "StrideW, the dimension[1] is %d, expected value is %d.", + w_dims[1], input_channel * kernel_h * kernel_w)); if (ctx->IsRuntime()) { framework::Variable* x_var = @@ -103,10 +117,14 @@ void VarConv2dOP::InferShape(framework::InferShapeContext* ctx) const { platform::errors::InvalidArgument("The Input(X) Tensor of VarConv2dOP " "does not contain LoD information.")); - PADDLE_ENFORCE_GE(x_lod.size(), 1, "The Input(X)'s lod info is corrupted."); - PADDLE_ENFORCE_EQ( - x_dims[0], static_cast(x_lod[0].back()), - "The Input(X)'s lod info mismatches the actual tensor shape."); + PADDLE_ENFORCE_GE(x_lod.size(), 1, + platform::errors::InvalidArgument( + "The Input(X)'s lod info is corrupted.")); + PADDLE_ENFORCE_EQ(x_dims[0], static_cast(x_lod[0].back()), + platform::errors::InvalidArgument( + "The Input(X)'s lod info mismatches the actual " + "tensor shape, input lod is %s, tensor shape is %s.", + x_lod, x_dims)); framework::Variable* row_var = BOOST_GET(framework::Variable*, ctx->GetInputVarPtrs("ROW")[0]); -- GitLab