From df67b317e9fa35efb2d83c258cfc9dbb47a5d366 Mon Sep 17 00:00:00 2001 From: swtkiwi <1208425345@qq.com> Date: Tue, 12 Jan 2021 19:53:51 +0800 Subject: [PATCH] [2.0 Cherry-pick]fix 2.0 error message (#30332) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * fix datanorm error msg (#30294) * Optimize the error message of framework. (#30134) * modify error message based on comments (#30189) * modify error message based on comments * edit code according to review. * Correct spelling according to review. * fix enforce msg of sum xpu op (#30113) * enhance error info for py_func (#30138) * enhance error info for py_func * update * fix elugradgrad test fail & error message opt (#30171) * fix elugradgrad test fail and error message opt * fix unitest,test=develop * Update prroi_pool_op.h fix error message * opt message,test=develop * fix ci fail,test=develop * Refine PADDLE_ENFORCE Error Messages. test=develop (#30149) Improve some error messages in parallel_executor.cc, conditional_block_op.cc, recurrent_op.cc * enhance error message, test=develop (#30220) * fix error message for distribute_fpn_proposals_op (#30116) * enhance error msgs of fusion_seqpool_cvm_concat_op.cc, test=develop (#30240) * just add the op error message for the matmul xpu (#30246) add the op error message for the matmul xpu * enhance error message of nll_loss op test=develop (#30125) * enhance error message of nll_loss op test=develop Co-authored-by: yaoxuefeng Co-authored-by: xiemoyuan <71377852+xiemoyuan@users.noreply.github.com> Co-authored-by: WeiXin Co-authored-by: Jack Zhou Co-authored-by: Wilber Co-authored-by: Double_V Co-authored-by: Huihuang Zheng Co-authored-by: zhang wenhui Co-authored-by: wangguanzhong Co-authored-by: 石晓伟 <39303645+Shixiaowei02@users.noreply.github.com> Co-authored-by: wawltor Co-authored-by: lijianshe02 <48898730+lijianshe02@users.noreply.github.com> --- paddle/fluid/framework/parallel_executor.cc | 4 ++- .../inference/tensorrt/convert/pool2d_op.cc | 6 ++-- paddle/fluid/operators/activation_op.h | 2 +- paddle/fluid/operators/attention_lstm_op.cc | 21 +++++++---- .../operators/bilinear_tensor_product_op.cc | 8 +++-- .../controlflow/conditional_block_op.cc | 20 +++++++---- paddle/fluid/operators/cvm_op.cc | 23 +++++++----- paddle/fluid/operators/data_norm_op.cc | 5 +-- .../detection/distribute_fpn_proposals_op.h | 3 +- .../operators/detection/target_assign_op.cc | 4 ++- .../fused/fusion_seqpool_cvm_concat_op.cc | 16 ++++----- paddle/fluid/operators/matmul_op_xpu.cc | 36 +++++++++++++------ .../operators/mkldnn/batch_norm_mkldnn_op.cc | 7 +++- .../operators/mkldnn/matmul_mkldnn_op.cc | 9 +++-- paddle/fluid/operators/nll_loss_op.cc | 15 +++++--- paddle/fluid/operators/optimizers/ftrl_op.cc | 11 +++--- paddle/fluid/operators/prroi_pool_op.h | 19 ++++++---- paddle/fluid/operators/py_func_op.cc | 4 ++- .../fluid/operators/reader/blocking_queue.h | 8 +++-- paddle/fluid/operators/reader/read_op.cc | 11 +++--- paddle/fluid/operators/recurrent_op.cc | 4 ++- paddle/fluid/operators/sum_op_xpu.cc | 21 +++++++++-- paddle/fluid/operators/trace_op.cc | 3 +- .../unittests/test_activation_nn_grad.py | 4 +-- 24 files changed, 180 insertions(+), 84 deletions(-) diff --git a/paddle/fluid/framework/parallel_executor.cc b/paddle/fluid/framework/parallel_executor.cc index e7a2fadf47..bfc3b7c701 100644 --- a/paddle/fluid/framework/parallel_executor.cc +++ b/paddle/fluid/framework/parallel_executor.cc @@ -167,7 +167,9 @@ class ParallelExecutorPrivate { nccl_id = new ncclUniqueId(); PADDLE_ENFORCE_EQ( platform::dynload::ncclGetUniqueId(nccl_id), ncclSuccess, - platform::errors::PreconditionNotMet("Get NCCL unique ID failed.")); + platform::errors::PreconditionNotMet( + "PaddlePaddle failed to get NCCL unique ID. It may due to your " + "system settings or NCCL library error, please debug on NCCL")); VLOG(10) << "can't find nccl_id_var:" << var_name << ", nccl_id:" << nccl_id; } diff --git a/paddle/fluid/inference/tensorrt/convert/pool2d_op.cc b/paddle/fluid/inference/tensorrt/convert/pool2d_op.cc index ca5a1a77bd..2ef8310b09 100644 --- a/paddle/fluid/inference/tensorrt/convert/pool2d_op.cc +++ b/paddle/fluid/inference/tensorrt/convert/pool2d_op.cc @@ -197,9 +197,9 @@ class Pool2dOpConverter : public OpConverter { engine_, Padding, *const_cast(input1), pre_pad, post_pad); PADDLE_ENFORCE_NOT_NULL( - pad_layer, - platform::errors::Fatal( - "pad layer in poolOp converter could not be created.")); + pad_layer, platform::errors::Fatal( + "Pad layer in poolOp converter could not be " + "created. The pointer to pad layer is `NULL`.")); input1 = pad_layer->getOutput(0); } auto *pool_layer = TRT_ENGINE_ADD_LAYER( diff --git a/paddle/fluid/operators/activation_op.h b/paddle/fluid/operators/activation_op.h index 43907744f9..f220fe878b 100755 --- a/paddle/fluid/operators/activation_op.h +++ b/paddle/fluid/operators/activation_op.h @@ -1565,7 +1565,7 @@ struct ELUGradGradFunctor : public BaseActivationFunctor { auto dout = framework::EigenVector::Flatten( GET_DATA_SAFELY(dOut, "Output", "DOut", "ELUGradGrad")); dx.device(*d) = ddx * dout * static_cast(alpha) * x.exp() * - (x < static_cast(0)).template cast(); + (x <= static_cast(0)).template cast(); } if (ddOut) { diff --git a/paddle/fluid/operators/attention_lstm_op.cc b/paddle/fluid/operators/attention_lstm_op.cc index 839b51851d..593a1b861c 100644 --- a/paddle/fluid/operators/attention_lstm_op.cc +++ b/paddle/fluid/operators/attention_lstm_op.cc @@ -44,14 +44,18 @@ void AttentionLSTMOp::InferShape(framework::InferShapeContext* ctx) const { auto x_dims = ctx->GetInputDim("X"); const int M = x_dims[1]; - PADDLE_ENFORCE_EQ(x_dims.size(), 2, platform::errors::InvalidArgument( - "Input(X)'s rank must be 2.")); + PADDLE_ENFORCE_EQ(x_dims.size(), 2, + platform::errors::InvalidArgument( + "Expected input(X)'s dimension is 2. But received %d.", + x_dims.size())); auto w_dims = ctx->GetInputDim("LSTMWeight"); const int D = w_dims[1] / 4; PADDLE_ENFORCE_EQ( w_dims.size(), 2, - platform::errors::InvalidArgument("Input(LSTMWeight)'s rank must be 2.")); + platform::errors::InvalidArgument( + "Expected input(LSTMWeight)'s dimension is 2.But received %d.", + w_dims.size())); PADDLE_ENFORCE_EQ( w_dims[0], D + M, platform::errors::InvalidArgument( @@ -77,8 +81,11 @@ void AttentionLSTMOp::InferShape(framework::InferShapeContext* ctx) const { if (ctx->HasInput("H0")) { auto h_dims = ctx->GetInputDim("H0"); - PADDLE_ENFORCE_EQ(h_dims.size(), 2UL, platform::errors::InvalidArgument( - "Input(H0)'s rank must be 2.")); + PADDLE_ENFORCE_EQ( + h_dims.size(), 2UL, + platform::errors::InvalidArgument( + "Expected input(H0)'s dimension is 2. But received %d.", + h_dims.size())); if (ctx->IsRuntime() || (framework::product(c_dims) > 0 && framework::product(h_dims) > 0)) { PADDLE_ENFORCE_EQ(h_dims, c_dims, @@ -94,7 +101,9 @@ void AttentionLSTMOp::InferShape(framework::InferShapeContext* ctx) const { "Input(AttentionWeight)'s rank must be 2.")); PADDLE_ENFORCE_EQ(atten_w_dims[0], M + D, platform::errors::InvalidArgument( - "AttentionWeight shapes must be (%d + %d) * 1.", M, D)); + "Expected `AttentionWeight` shape is [(%d + %d), 1]. " + "But received shape = [%d, 1], shape[0] is not %d.", + M, D, atten_w_dims[0], M + D)); PADDLE_ENFORCE_EQ(atten_w_dims[1], 1, platform::errors::InvalidArgument( "AttentionWeight shapes must be (%d + %d) * 1.", M, D)); diff --git a/paddle/fluid/operators/bilinear_tensor_product_op.cc b/paddle/fluid/operators/bilinear_tensor_product_op.cc index a2ba74dd7e..253a96004b 100644 --- a/paddle/fluid/operators/bilinear_tensor_product_op.cc +++ b/paddle/fluid/operators/bilinear_tensor_product_op.cc @@ -50,9 +50,11 @@ class BilinearTensorProductOp : public framework::OperatorWithKernel { PADDLE_ENFORCE_EQ( y_dims.size(), 2UL, platform::errors::InvalidArgument("The input(Y) must be a 2D Tensor.")); - PADDLE_ENFORCE_EQ(weight_dims.size(), 3UL, - platform::errors::InvalidArgument( - "The input(Weight) must be a 3D tensor.")); + PADDLE_ENFORCE_EQ( + weight_dims.size(), 3UL, + platform::errors::InvalidArgument("Expected the input(Weight) is a 3D " + "tensor. But received %dD tensor.", + weight_dims.size())); if (ctx->IsRuntime() || (x_dims[0] > 0 && y_dims[0] > 0)) { PADDLE_ENFORCE_EQ( x_dims[0], y_dims[0], diff --git a/paddle/fluid/operators/controlflow/conditional_block_op.cc b/paddle/fluid/operators/controlflow/conditional_block_op.cc index 2713b7fd59..eeb410eba2 100644 --- a/paddle/fluid/operators/controlflow/conditional_block_op.cc +++ b/paddle/fluid/operators/controlflow/conditional_block_op.cc @@ -57,8 +57,10 @@ class ConditionalBlockOp : public ConditionalOp { if (need_run) { auto *scope_var = scope.FindVar(Output(ConditionalOp::kScope)); PADDLE_ENFORCE_NOT_NULL( - scope_var, platform::errors::PreconditionNotMet( - "Scope must be set in conditional_block_op.")); + scope_var, + platform::errors::PreconditionNotMet( + "Expect Scope variable to be set in conditional_block_op, but " + "got a null Scope variable. Please set the Scope variable.")); auto *scopes = scope_var->GetMutable>(); scopes->resize(1); scopes->front() = &scope.NewScope(); @@ -119,12 +121,16 @@ class ConditionalBlockGradOp : public ConditionalOp { auto *scope_var = scope.FindVar(Input(ConditionalOp::kScope)); PADDLE_ENFORCE_NOT_NULL( - scope_var, platform::errors::PreconditionNotMet( - "Scope must be set in conditional block op.")); + scope_var, + platform::errors::PreconditionNotMet( + "Expect Scope variable to be set in conditional_block_op, but " + "got a null Scope variable. Please set the Scope variable.")); auto &scopes = scope_var->Get>(); - PADDLE_ENFORCE_GT(scopes.size(), 0, - platform::errors::InvalidArgument( - "Scope must be set in conditional block op.")); + PADDLE_ENFORCE_GT( + scopes.size(), 0, + platform::errors::InvalidArgument( + "Expect Scope variable contains at least 1 scope, but got: %d", + scopes.size())); framework::Scope &cur_scope = *scopes[0]; framework::Executor exec(dev_place); diff --git a/paddle/fluid/operators/cvm_op.cc b/paddle/fluid/operators/cvm_op.cc index a1a8744c32..be7d4780f8 100644 --- a/paddle/fluid/operators/cvm_op.cc +++ b/paddle/fluid/operators/cvm_op.cc @@ -30,8 +30,10 @@ class CVMOp : public framework::OperatorWithKernel { OP_INOUT_CHECK(ctx->HasOutput("Y"), "Output", "Y", "CVM"); auto x_dims = ctx->GetInputDim("X"); - PADDLE_ENFORCE_EQ(x_dims.size(), 2UL, platform::errors::InvalidArgument( - "Input(X)'s rank should be 2.")); + PADDLE_ENFORCE_EQ( + x_dims.size(), 2UL, + platform::errors::InvalidArgument( + "Input(X)'s rank should be 2, but got %d", x_dims.size())); if (ctx->Attrs().Get("use_cvm")) { ctx->SetOutputDim("Y", {x_dims[0], x_dims[1]}); @@ -68,26 +70,31 @@ class CVMGradientOp : public framework::OperatorWithKernel { auto x_dims = ctx->GetInputDim("X"); auto cvm_dims = ctx->GetInputDim("CVM"); auto dy_dims = ctx->GetInputDim(framework::GradVarName("Y")); - PADDLE_ENFORCE_EQ(x_dims.size(), 2, platform::errors::InvalidArgument( - "Input(X)'s rank should be 2.")); + PADDLE_ENFORCE_EQ( + x_dims.size(), 2, + platform::errors::InvalidArgument( + "Expect Input(X)'s rank == 2, but got %d", x_dims.size())); PADDLE_ENFORCE_EQ( dy_dims.size(), 2, - platform::errors::InvalidArgument("Input(Y@Grad)'s rank should be 2.")); + platform::errors::InvalidArgument( + "Expect Input(X)'s rank == 2, but got %d", dy_dims.size())); PADDLE_ENFORCE_EQ( cvm_dims.size(), 2, - platform::errors::InvalidArgument("Input(CVM)'s rank should be 2.")); + platform::errors::InvalidArgument( + "Expect Input(X)'s rank == 2, but got %d", cvm_dims.size())); PADDLE_ENFORCE_EQ( x_dims[0], dy_dims[0], platform::errors::InvalidArgument( "The 1st dimension of Input(X) and Input(Y@Grad) should " - "be equal.")); + "be equal, X is %d, Y@Grad is %d", + x_dims[0], dy_dims[0])); PADDLE_ENFORCE_EQ( cvm_dims[1], 2, platform::errors::InvalidArgument( "When Attr(soft_label) == false, the 2nd dimension of " - "Input(CVM) should be 2.")); + "Input(CVM) should be 2, but got %d cvm_dims[1]")); ctx->SetOutputDim(framework::GradVarName("X"), x_dims); ctx->ShareLoD("X", framework::GradVarName("X")); } diff --git a/paddle/fluid/operators/data_norm_op.cc b/paddle/fluid/operators/data_norm_op.cc index 7dc1e23207..698c57482d 100644 --- a/paddle/fluid/operators/data_norm_op.cc +++ b/paddle/fluid/operators/data_norm_op.cc @@ -390,7 +390,7 @@ class DataNormKernel } default: PADDLE_THROW(platform::errors::InvalidArgument( - "Unknown storage order: %d", data_layout)); + "Unknown storage order: %d, please use NCHW or NHWC", data_layout)); } } }; @@ -701,7 +701,8 @@ class DataNormGradKernel } default: PADDLE_THROW(platform::errors::InvalidArgument( - "Unknown storage order: %s", data_layout_str)); + "Unknown storage order: %s, please use NCHW or NHWC", + data_layout_str)); } } }; diff --git a/paddle/fluid/operators/detection/distribute_fpn_proposals_op.h b/paddle/fluid/operators/detection/distribute_fpn_proposals_op.h index 79498f0153..465435637c 100644 --- a/paddle/fluid/operators/detection/distribute_fpn_proposals_op.h +++ b/paddle/fluid/operators/detection/distribute_fpn_proposals_op.h @@ -84,7 +84,8 @@ class DistributeFpnProposalsOpKernel : public framework::OpKernel { PADDLE_ENFORCE_EQ(fpn_rois->lod().size(), 1UL, platform::errors::InvalidArgument( "DistributeFpnProposalsOp needs LoD " - "with one level.")); + "with one level. But received level is %d", + fpn_rois->lod().size())); } std::vector fpn_rois_lod; diff --git a/paddle/fluid/operators/detection/target_assign_op.cc b/paddle/fluid/operators/detection/target_assign_op.cc index 1fda795d35..afd50e57e7 100644 --- a/paddle/fluid/operators/detection/target_assign_op.cc +++ b/paddle/fluid/operators/detection/target_assign_op.cc @@ -43,7 +43,9 @@ class TargetAssignOp : public framework::OperatorWithKernel { PADDLE_ENFORCE_EQ( in_dims.size(), 3, - platform::errors::InvalidArgument("The rank of Input(X) must be 3.")); + platform::errors::InvalidArgument( + "Expected the rank of Input(X) is 3. But received %d.", + in_dims.size())); PADDLE_ENFORCE_EQ(mi_dims.size(), 2, platform::errors::InvalidArgument( "The rank of Input(MatchIndices) must be 2.")); diff --git a/paddle/fluid/operators/fused/fusion_seqpool_cvm_concat_op.cc b/paddle/fluid/operators/fused/fusion_seqpool_cvm_concat_op.cc index ecb7db46a9..123c4c885e 100644 --- a/paddle/fluid/operators/fused/fusion_seqpool_cvm_concat_op.cc +++ b/paddle/fluid/operators/fused/fusion_seqpool_cvm_concat_op.cc @@ -31,15 +31,15 @@ void FusionSeqPoolCVMConcatOp::InferShape( paddle::platform::errors::InvalidArgument( "Output(Out) of FusionSeqPoolCVMConcatOp should not be null.")); int axis = ctx->Attrs().Get("axis"); - PADDLE_ENFORCE_EQ( - axis, 1, - paddle::platform::errors::InvalidArgument( - "FusionSeqPoolCVMConcatOp only supports concat axis=1 yet.")); + PADDLE_ENFORCE_EQ(axis, 1, paddle::platform::errors::InvalidArgument( + "FusionSeqPoolCVMConcatOp only supports " + "concat axis=1 yet, but received %d.", + axis)); bool use_cvm = ctx->Attrs().Get("use_cvm"); - PADDLE_ENFORCE_EQ( - use_cvm, true, - paddle::platform::errors::InvalidArgument( - "FusionSeqPoolCVMConcatOp only supports use_cvm is true yet.")); + PADDLE_ENFORCE_EQ(use_cvm, true, paddle::platform::errors::InvalidArgument( + "FusionSeqPoolCVMConcatOp only supports " + "use_cvm is true yet, but received %d.", + use_cvm)); auto ins_dims = ctx->GetInputsDim("X"); const size_t n = ins_dims.size(); diff --git a/paddle/fluid/operators/matmul_op_xpu.cc b/paddle/fluid/operators/matmul_op_xpu.cc index 103ac9add1..14bef89a71 100644 --- a/paddle/fluid/operators/matmul_op_xpu.cc +++ b/paddle/fluid/operators/matmul_op_xpu.cc @@ -127,10 +127,18 @@ class MatMulXPUKernel : public framework::OpKernel { PADDLE_ENFORCE_EQ( mat_dim_a.width_, mat_dim_b.height_, - platform::errors::InvalidArgument("Shape mistake in matmul_op")); - PADDLE_ENFORCE_EQ( - mat_dim_a.batch_size_, mat_dim_b.batch_size_, - platform::errors::InvalidArgument("Shape mistake in matmul_op")); + platform::errors::InvalidArgument("Shape mistake in matmul_op, the " + "first tensor width must be same as " + "second tensor height, but received " + "width:%d, height:%d", + mat_dim_a.width_, mat_dim_b.height_)); + PADDLE_ENFORCE_EQ(mat_dim_a.batch_size_, mat_dim_b.batch_size_, + platform::errors::InvalidArgument( + "Shape mistake in matmul_op, the two input" + "tensor batch_size must be same, but received first " + "tensor batch_size:%d, second " + "tensor batch_size:%d", + mat_dim_a.batch_size_, mat_dim_b.batch_size_)); T alpha = static_cast(context.Attr("alpha")); auto &dev_ctx = context.template device_context(); @@ -251,12 +259,20 @@ class MatMulGradXPUKernel : public framework::OpKernel { } } - PADDLE_ENFORCE_EQ( - mat_dim_a.width_, mat_dim_b.height_, - platform::errors::InvalidArgument("Shape mistake in matmul_grad_op")); - PADDLE_ENFORCE_EQ( - mat_dim_a.batch_size_, mat_dim_b.batch_size_, - platform::errors::InvalidArgument("Shape mistake in matmul_grad_op")); + PADDLE_ENFORCE_EQ(mat_dim_a.width_, mat_dim_b.height_, + platform::errors::InvalidArgument( + "Shape mistake in matmul_grad_op, the " + "first tensor width must be same as second tensor " + "height, but received " + "width:%d, height:%d", + mat_dim_a.width_, mat_dim_b.height_)); + PADDLE_ENFORCE_EQ(mat_dim_a.batch_size_, mat_dim_b.batch_size_, + platform::errors::InvalidArgument( + "Shape mistake in matmul_grad_op, the two input" + "tensor batch_size must be same, but received first " + "tensor batch_size:%d, second " + "tensor batch_size:%d", + mat_dim_a.batch_size_, mat_dim_b.batch_size_)); T alpha = static_cast(context.Attr("alpha")); diff --git a/paddle/fluid/operators/mkldnn/batch_norm_mkldnn_op.cc b/paddle/fluid/operators/mkldnn/batch_norm_mkldnn_op.cc index 622d6685df..e53e052a89 100644 --- a/paddle/fluid/operators/mkldnn/batch_norm_mkldnn_op.cc +++ b/paddle/fluid/operators/mkldnn/batch_norm_mkldnn_op.cc @@ -54,9 +54,14 @@ class BatchNormMKLDNNHandler const float epsilon = ctx.Attr("epsilon"); const bool fuse_with_relu = ctx.Attr("fuse_with_relu"); + std::vector DataLayout_error_msg = {"kNHWC", "kNCHW", + "kAnyLayout", "kMKLDNN"}; PADDLE_ENFORCE_EQ( x->layout(), DataLayout::kMKLDNN, - platform::errors::InvalidArgument("Wrong layout set for X tensor")); + platform::errors::InvalidArgument( + "Wrong layout set for X tensor. Expected layout is `kMKLDNN`, " + "But received %s.", + DataLayout_error_msg[static_cast(DataLayout::kMKLDNN)])); PADDLE_ENFORCE_NE( x->format(), MKLDNNMemoryFormat::undef, platform::errors::InvalidArgument("Wrong format set for X tensor")); diff --git a/paddle/fluid/operators/mkldnn/matmul_mkldnn_op.cc b/paddle/fluid/operators/mkldnn/matmul_mkldnn_op.cc index fddc4b4b2e..fb856d9740 100644 --- a/paddle/fluid/operators/mkldnn/matmul_mkldnn_op.cc +++ b/paddle/fluid/operators/mkldnn/matmul_mkldnn_op.cc @@ -374,9 +374,12 @@ class DNNLMatMulKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { if (ctx.HasAttr("head_number")) { - PADDLE_ENFORCE_EQ(ctx.Attr("head_number"), 1, - platform::errors::Unimplemented( - "DNNL matmul doesn't support multiple heads.")); + PADDLE_ENFORCE_EQ( + ctx.Attr("head_number"), 1, + platform::errors::Unimplemented( + "DNNL matmul doesn't support multiple heads. Expected " + "head_number=1. But received `head_number` is %d", + ctx.Attr("head_number"))); } platform::MKLDNNDeviceContext::tls().log_lib_version(); ExecuteMatMul(ctx); diff --git a/paddle/fluid/operators/nll_loss_op.cc b/paddle/fluid/operators/nll_loss_op.cc index f0b5f4a466..263a73451c 100644 --- a/paddle/fluid/operators/nll_loss_op.cc +++ b/paddle/fluid/operators/nll_loss_op.cc @@ -53,10 +53,14 @@ class NLLLossOp : public framework::OperatorWithKernel { PADDLE_ENFORCE_EQ(w_dims.size(), 1, platform::errors::InvalidArgument( "Input(Weight) should be a 1D tensor.")); - PADDLE_ENFORCE_EQ(x_dims[1], w_dims[0], - platform::errors::InvalidArgument( - "Input(Weight) Tensor's size should match " - "to the the total number of classes.")); + PADDLE_ENFORCE_EQ( + x_dims[1], w_dims[0], + platform::errors::InvalidArgument( + "Expected input tensor Weight's size should equal " + "to the first dimension of the input tensor X. But received " + "Weight's " + "size is %d, the first dimension of input X is %d", + w_dims[0], x_dims[1])); } } if (x_dims.size() == 2) { @@ -68,7 +72,8 @@ class NLLLossOp : public framework::OperatorWithKernel { } else if (x_dims.size() == 4) { PADDLE_ENFORCE_EQ(label_dims.size(), 3, platform::errors::InvalidArgument( - "The tensor rank of Input(Label) must be 3.")); + "Expected Input(Lable) dimensions=3, received %d.", + label_dims.size())); auto input0 = x_dims[0]; auto input2 = x_dims[2]; auto input3 = x_dims[3]; diff --git a/paddle/fluid/operators/optimizers/ftrl_op.cc b/paddle/fluid/operators/optimizers/ftrl_op.cc index 3bdafbb96d..a75be6e580 100644 --- a/paddle/fluid/operators/optimizers/ftrl_op.cc +++ b/paddle/fluid/operators/optimizers/ftrl_op.cc @@ -42,7 +42,9 @@ class FTRLOp : public framework::OperatorWithKernel { auto param_dim = ctx->GetInputDim("Param"); PADDLE_ENFORCE_EQ(param_dim, ctx->GetInputDim("Grad"), platform::errors::InvalidArgument( - "Two input of FTRL Op's dimension must be same.")); + "Two input of FTRL Op's dimension must be same, but " + "param_dim is %d, Grad is %d", + param_dim, ctx->GetInputDim("Grad"))); auto lr_dim = ctx->GetInputDim("LearningRate"); PADDLE_ENFORCE_NE(framework::product(lr_dim), 0, @@ -51,9 +53,10 @@ class FTRLOp : public framework::OperatorWithKernel { "been initialized. You may need to confirm " "if you put exe.run(startup_program) " "after optimizer.minimize function.")); - PADDLE_ENFORCE_EQ( - framework::product(lr_dim), 1, - platform::errors::InvalidArgument("Learning Rate should be a scalar.")); + PADDLE_ENFORCE_EQ(framework::product(lr_dim), 1, + platform::errors::InvalidArgument( + "Learning Rate should be a scalar, but got %d", + framework::product(lr_dim))); ctx->SetOutputDim("ParamOut", param_dim); ctx->SetOutputDim("SquaredAccumOut", param_dim); diff --git a/paddle/fluid/operators/prroi_pool_op.h b/paddle/fluid/operators/prroi_pool_op.h index 5ec846c147..11ecff8845 100644 --- a/paddle/fluid/operators/prroi_pool_op.h +++ b/paddle/fluid/operators/prroi_pool_op.h @@ -293,19 +293,24 @@ class CPUPRROIPoolOpKernel : public framework::OpKernel { } else { PADDLE_ENFORCE_EQ(rois->lod().empty(), false, platform::errors::InvalidArgument( - "the lod of Input ROIs should not be empty when " + "The lod of Input ROIs should not be empty when " "BatchRoINums is None!")); auto rois_lod = rois->lod().back(); int rois_batch_size = rois_lod.size() - 1; - PADDLE_ENFORCE_EQ( - rois_batch_size, batch_size, - platform::errors::InvalidArgument("the rois_batch_size and input(X) " - "batch_size should be the same.")); + PADDLE_ENFORCE_EQ(rois_batch_size, batch_size, + platform::errors::InvalidArgument( + "The rois_batch_size and input(X)'s " + "batch_size should be the same but received" + "rois_batch_size: %d and batch_size: %d", + rois_batch_size, batch_size)); int rois_num_with_lod = rois_lod[rois_batch_size]; PADDLE_ENFORCE_EQ( rois_num_with_lod, rois_num, - platform::errors::InvalidArgument( - "the rois_num from input and lod must be the same")); + platform::errors::InvalidArgument("The rois_num from input should be " + "equal to the rois_num from lod, " + "but received rois_num from input: " + "%d and the rois_num from lod: %d.", + rois_num_with_lod, rois_num)); // calculate batch id index for each roi according to LoD for (int n = 0; n < rois_batch_size; ++n) { diff --git a/paddle/fluid/operators/py_func_op.cc b/paddle/fluid/operators/py_func_op.cc index 7749903e5f..b3622870d0 100644 --- a/paddle/fluid/operators/py_func_op.cc +++ b/paddle/fluid/operators/py_func_op.cc @@ -112,7 +112,9 @@ static void CallPythonFunc(py::object *callable, out->ShareDataWith(*py_out_tensor); } catch (py::cast_error &) { PADDLE_THROW(platform::errors::InvalidArgument( - "The %d-th output must be LoDTensor.", i)); + "py::cast to LoDTensor error. The %d-th output expection is " + "LoDTensor", + i)); } } } diff --git a/paddle/fluid/operators/reader/blocking_queue.h b/paddle/fluid/operators/reader/blocking_queue.h index 4add9afdfd..8929da20b5 100644 --- a/paddle/fluid/operators/reader/blocking_queue.h +++ b/paddle/fluid/operators/reader/blocking_queue.h @@ -54,7 +54,9 @@ class BlockingQueue { PADDLE_ENFORCE_LT( queue_.size(), capacity_, platform::errors::PermissionDenied( - "The queue size cannot exceed the set queue capacity.")); + "The queue size cannot exceed the set queue capacity. Expected " + "queue size is less than %d. But received %d", + capacity_, queue_.size())); queue_.push_back(elem); receive_cv_.notify_one(); return true; @@ -73,7 +75,9 @@ class BlockingQueue { PADDLE_ENFORCE_LT( queue_.size(), capacity_, platform::errors::PermissionDenied( - "The queue size cannot exceed the set queue capacity.")); + "The queue size cannot exceed the set queue capacity. Expected " + "queue size is less than %d. But received %d", + capacity_, queue_.size())); queue_.emplace_back(std::move(elem)); receive_cv_.notify_one(); return true; diff --git a/paddle/fluid/operators/reader/read_op.cc b/paddle/fluid/operators/reader/read_op.cc index d7f81dc24c..9086291e17 100644 --- a/paddle/fluid/operators/reader/read_op.cc +++ b/paddle/fluid/operators/reader/read_op.cc @@ -122,10 +122,13 @@ class ReadOp : public framework::OperatorBase { const std::vector& var_types = reader->VarTypes(); const std::vector& need_check_feed = reader->NeedCheckFeed(); - PADDLE_ENFORCE_EQ(out_arg_names.size(), need_check_feed.size(), - platform::errors::InvalidArgument( - "output size of read_op and the number of fed " - "variables of reader do not match")); + PADDLE_ENFORCE_EQ( + out_arg_names.size(), need_check_feed.size(), + platform::errors::InvalidArgument( + "Output size of read_op and the number of fed " + "variables of reader do not match. Received size of output is %d, " + "number of fed variables of reader is %d", + out_arg_names.size(), need_check_feed.size())); for (size_t i = 0; i < out_arg_names.size(); ++i) { auto* out = diff --git a/paddle/fluid/operators/recurrent_op.cc b/paddle/fluid/operators/recurrent_op.cc index 35f52ffa52..231fb38da2 100644 --- a/paddle/fluid/operators/recurrent_op.cc +++ b/paddle/fluid/operators/recurrent_op.cc @@ -161,7 +161,9 @@ int64_t RecurrentBase::GetSequenceLength(const framework::Scope &scope) const { } PADDLE_ENFORCE_GE(seq_len, 0, platform::errors::InvalidArgument( - "RecurrentOp gets invalid sequence length.")); + "RecurrentOp gets invalid sequence length. Expected " + "seq_len >= 0. Received seq_len = %d", + seq_len)); return seq_len; } diff --git a/paddle/fluid/operators/sum_op_xpu.cc b/paddle/fluid/operators/sum_op_xpu.cc index f15910fd4f..264cc4e2cf 100644 --- a/paddle/fluid/operators/sum_op_xpu.cc +++ b/paddle/fluid/operators/sum_op_xpu.cc @@ -50,8 +50,25 @@ class SumXPUKernel : public framework::OpKernel { } int r = xpu::sum_batch(dev_ctx.x_context(), ptrs.data(), out->data(), valid_count, out->numel()); - PADDLE_ENFORCE_EQ(r, xpu::Error_t::SUCCESS, - platform::errors::Fatal("XPU sum kernel error!")); + if (r == xpu::Error_t::INVALID_PARAM) { + PADDLE_ENFORCE_EQ( + r, xpu::Error_t::SUCCESS, + platform::errors::InvalidArgument( + "XPU kernel error of SumOp, error message: INVALID_PARAM, " + "please check your input & output.")); + } else if (r == xpu::Error_t::RUNTIME_ERROR) { + PADDLE_ENFORCE_EQ(r, xpu::Error_t::SUCCESS, + platform::errors::Unavailable( + "XPU kernel error of SumOp, error message: " + "RUNTIME_ERROR, please check whether Baidu " + "Kunlun Card is properly installed.")); + } else if (r == xpu::Error_t::NO_ENOUGH_WORKSPACE) { + PADDLE_ENFORCE_EQ(r, xpu::Error_t::SUCCESS, + platform::errors::ResourceExhausted( + "XPU kernel error of SumOp, error " + "message: NO_ENOUGH_WORKSPACE, XPU " + "has no enough memory.")); + } } }; diff --git a/paddle/fluid/operators/trace_op.cc b/paddle/fluid/operators/trace_op.cc index e90cf2054f..1b9e7c10eb 100644 --- a/paddle/fluid/operators/trace_op.cc +++ b/paddle/fluid/operators/trace_op.cc @@ -41,7 +41,8 @@ class TraceOp : public framework::OperatorWithKernel { PADDLE_ENFORCE_GE( x_dims.size(), 2, platform::errors::OutOfRange( - "trace requires an tensor of at least two dimensions")); + "Input's dim is out of range (expected at least 2, but got %ld).", + x_dims.size())); PADDLE_ENFORCE_LT( dim1_, x_dims.size(), platform::errors::OutOfRange( diff --git a/python/paddle/fluid/tests/unittests/test_activation_nn_grad.py b/python/paddle/fluid/tests/unittests/test_activation_nn_grad.py index 6c4834b84f..b663f0ffc2 100644 --- a/python/paddle/fluid/tests/unittests/test_activation_nn_grad.py +++ b/python/paddle/fluid/tests/unittests/test_activation_nn_grad.py @@ -78,9 +78,9 @@ class TestLeakyReluDoubleGradCheck(unittest.TestCase): class TestELUDoubleGradCheck(unittest.TestCase): @prog_scope() def func(self, place): - shape = [2, 3, 6, 6] + shape = [2, 4, 4, 4] eps = 1e-6 - alpha = 1.1 + alpha = 0.2 dtype = np.float64 SEED = 0 -- GitLab