diff --git a/paddle/fluid/framework/parallel_executor.cc b/paddle/fluid/framework/parallel_executor.cc index e7a2fadf4705ee97938f05e7218f65cf324d46a1..bfc3b7c70177b8b45ec328710fadc41e73d69889 100644 --- a/paddle/fluid/framework/parallel_executor.cc +++ b/paddle/fluid/framework/parallel_executor.cc @@ -167,7 +167,9 @@ class ParallelExecutorPrivate { nccl_id = new ncclUniqueId(); PADDLE_ENFORCE_EQ( platform::dynload::ncclGetUniqueId(nccl_id), ncclSuccess, - platform::errors::PreconditionNotMet("Get NCCL unique ID failed.")); + platform::errors::PreconditionNotMet( + "PaddlePaddle failed to get NCCL unique ID. It may due to your " + "system settings or NCCL library error, please debug on NCCL")); VLOG(10) << "can't find nccl_id_var:" << var_name << ", nccl_id:" << nccl_id; } diff --git a/paddle/fluid/inference/tensorrt/convert/pool2d_op.cc b/paddle/fluid/inference/tensorrt/convert/pool2d_op.cc index ca5a1a77bd0e8ee8f35fecc838ad303601661d91..2ef8310b092feeb1cfd81fe96b0d86c3137d69b0 100644 --- a/paddle/fluid/inference/tensorrt/convert/pool2d_op.cc +++ b/paddle/fluid/inference/tensorrt/convert/pool2d_op.cc @@ -197,9 +197,9 @@ class Pool2dOpConverter : public OpConverter { engine_, Padding, *const_cast(input1), pre_pad, post_pad); PADDLE_ENFORCE_NOT_NULL( - pad_layer, - platform::errors::Fatal( - "pad layer in poolOp converter could not be created.")); + pad_layer, platform::errors::Fatal( + "Pad layer in poolOp converter could not be " + "created. The pointer to pad layer is `NULL`.")); input1 = pad_layer->getOutput(0); } auto *pool_layer = TRT_ENGINE_ADD_LAYER( diff --git a/paddle/fluid/operators/activation_op.h b/paddle/fluid/operators/activation_op.h index 43907744f956a62b1c21a95559104145a59060cd..f220fe878bf6c8e3baa43893df58ee9ee8f14ec8 100755 --- a/paddle/fluid/operators/activation_op.h +++ b/paddle/fluid/operators/activation_op.h @@ -1565,7 +1565,7 @@ struct ELUGradGradFunctor : public BaseActivationFunctor { auto dout = framework::EigenVector::Flatten( GET_DATA_SAFELY(dOut, "Output", "DOut", "ELUGradGrad")); dx.device(*d) = ddx * dout * static_cast(alpha) * x.exp() * - (x < static_cast(0)).template cast(); + (x <= static_cast(0)).template cast(); } if (ddOut) { diff --git a/paddle/fluid/operators/attention_lstm_op.cc b/paddle/fluid/operators/attention_lstm_op.cc index 839b51851d55103a2fcfb74bfdb8b39a425035dd..593a1b861cb0d80cb71a2e5303720406d3d2a3a3 100644 --- a/paddle/fluid/operators/attention_lstm_op.cc +++ b/paddle/fluid/operators/attention_lstm_op.cc @@ -44,14 +44,18 @@ void AttentionLSTMOp::InferShape(framework::InferShapeContext* ctx) const { auto x_dims = ctx->GetInputDim("X"); const int M = x_dims[1]; - PADDLE_ENFORCE_EQ(x_dims.size(), 2, platform::errors::InvalidArgument( - "Input(X)'s rank must be 2.")); + PADDLE_ENFORCE_EQ(x_dims.size(), 2, + platform::errors::InvalidArgument( + "Expected input(X)'s dimension is 2. But received %d.", + x_dims.size())); auto w_dims = ctx->GetInputDim("LSTMWeight"); const int D = w_dims[1] / 4; PADDLE_ENFORCE_EQ( w_dims.size(), 2, - platform::errors::InvalidArgument("Input(LSTMWeight)'s rank must be 2.")); + platform::errors::InvalidArgument( + "Expected input(LSTMWeight)'s dimension is 2.But received %d.", + w_dims.size())); PADDLE_ENFORCE_EQ( w_dims[0], D + M, platform::errors::InvalidArgument( @@ -77,8 +81,11 @@ void AttentionLSTMOp::InferShape(framework::InferShapeContext* ctx) const { if (ctx->HasInput("H0")) { auto h_dims = ctx->GetInputDim("H0"); - PADDLE_ENFORCE_EQ(h_dims.size(), 2UL, platform::errors::InvalidArgument( - "Input(H0)'s rank must be 2.")); + PADDLE_ENFORCE_EQ( + h_dims.size(), 2UL, + platform::errors::InvalidArgument( + "Expected input(H0)'s dimension is 2. But received %d.", + h_dims.size())); if (ctx->IsRuntime() || (framework::product(c_dims) > 0 && framework::product(h_dims) > 0)) { PADDLE_ENFORCE_EQ(h_dims, c_dims, @@ -94,7 +101,9 @@ void AttentionLSTMOp::InferShape(framework::InferShapeContext* ctx) const { "Input(AttentionWeight)'s rank must be 2.")); PADDLE_ENFORCE_EQ(atten_w_dims[0], M + D, platform::errors::InvalidArgument( - "AttentionWeight shapes must be (%d + %d) * 1.", M, D)); + "Expected `AttentionWeight` shape is [(%d + %d), 1]. " + "But received shape = [%d, 1], shape[0] is not %d.", + M, D, atten_w_dims[0], M + D)); PADDLE_ENFORCE_EQ(atten_w_dims[1], 1, platform::errors::InvalidArgument( "AttentionWeight shapes must be (%d + %d) * 1.", M, D)); diff --git a/paddle/fluid/operators/bilinear_tensor_product_op.cc b/paddle/fluid/operators/bilinear_tensor_product_op.cc index a2ba74dd7edc5dd260e269318410a3ad3efaf7ea..253a96004bd30a2d6c0da456c578e8dc4b522cca 100644 --- a/paddle/fluid/operators/bilinear_tensor_product_op.cc +++ b/paddle/fluid/operators/bilinear_tensor_product_op.cc @@ -50,9 +50,11 @@ class BilinearTensorProductOp : public framework::OperatorWithKernel { PADDLE_ENFORCE_EQ( y_dims.size(), 2UL, platform::errors::InvalidArgument("The input(Y) must be a 2D Tensor.")); - PADDLE_ENFORCE_EQ(weight_dims.size(), 3UL, - platform::errors::InvalidArgument( - "The input(Weight) must be a 3D tensor.")); + PADDLE_ENFORCE_EQ( + weight_dims.size(), 3UL, + platform::errors::InvalidArgument("Expected the input(Weight) is a 3D " + "tensor. But received %dD tensor.", + weight_dims.size())); if (ctx->IsRuntime() || (x_dims[0] > 0 && y_dims[0] > 0)) { PADDLE_ENFORCE_EQ( x_dims[0], y_dims[0], diff --git a/paddle/fluid/operators/controlflow/conditional_block_op.cc b/paddle/fluid/operators/controlflow/conditional_block_op.cc index 2713b7fd59a78b2a91b76b6dddaada769a2dc86c..eeb410eba2b4c21389efbb5196944d40673aa840 100644 --- a/paddle/fluid/operators/controlflow/conditional_block_op.cc +++ b/paddle/fluid/operators/controlflow/conditional_block_op.cc @@ -57,8 +57,10 @@ class ConditionalBlockOp : public ConditionalOp { if (need_run) { auto *scope_var = scope.FindVar(Output(ConditionalOp::kScope)); PADDLE_ENFORCE_NOT_NULL( - scope_var, platform::errors::PreconditionNotMet( - "Scope must be set in conditional_block_op.")); + scope_var, + platform::errors::PreconditionNotMet( + "Expect Scope variable to be set in conditional_block_op, but " + "got a null Scope variable. Please set the Scope variable.")); auto *scopes = scope_var->GetMutable>(); scopes->resize(1); scopes->front() = &scope.NewScope(); @@ -119,12 +121,16 @@ class ConditionalBlockGradOp : public ConditionalOp { auto *scope_var = scope.FindVar(Input(ConditionalOp::kScope)); PADDLE_ENFORCE_NOT_NULL( - scope_var, platform::errors::PreconditionNotMet( - "Scope must be set in conditional block op.")); + scope_var, + platform::errors::PreconditionNotMet( + "Expect Scope variable to be set in conditional_block_op, but " + "got a null Scope variable. Please set the Scope variable.")); auto &scopes = scope_var->Get>(); - PADDLE_ENFORCE_GT(scopes.size(), 0, - platform::errors::InvalidArgument( - "Scope must be set in conditional block op.")); + PADDLE_ENFORCE_GT( + scopes.size(), 0, + platform::errors::InvalidArgument( + "Expect Scope variable contains at least 1 scope, but got: %d", + scopes.size())); framework::Scope &cur_scope = *scopes[0]; framework::Executor exec(dev_place); diff --git a/paddle/fluid/operators/cvm_op.cc b/paddle/fluid/operators/cvm_op.cc index a1a8744c323ca1cd783e0adb83cc260ffe8ce978..be7d4780f83ae5f3dbc1442353e95e85666d77b9 100644 --- a/paddle/fluid/operators/cvm_op.cc +++ b/paddle/fluid/operators/cvm_op.cc @@ -30,8 +30,10 @@ class CVMOp : public framework::OperatorWithKernel { OP_INOUT_CHECK(ctx->HasOutput("Y"), "Output", "Y", "CVM"); auto x_dims = ctx->GetInputDim("X"); - PADDLE_ENFORCE_EQ(x_dims.size(), 2UL, platform::errors::InvalidArgument( - "Input(X)'s rank should be 2.")); + PADDLE_ENFORCE_EQ( + x_dims.size(), 2UL, + platform::errors::InvalidArgument( + "Input(X)'s rank should be 2, but got %d", x_dims.size())); if (ctx->Attrs().Get("use_cvm")) { ctx->SetOutputDim("Y", {x_dims[0], x_dims[1]}); @@ -68,26 +70,31 @@ class CVMGradientOp : public framework::OperatorWithKernel { auto x_dims = ctx->GetInputDim("X"); auto cvm_dims = ctx->GetInputDim("CVM"); auto dy_dims = ctx->GetInputDim(framework::GradVarName("Y")); - PADDLE_ENFORCE_EQ(x_dims.size(), 2, platform::errors::InvalidArgument( - "Input(X)'s rank should be 2.")); + PADDLE_ENFORCE_EQ( + x_dims.size(), 2, + platform::errors::InvalidArgument( + "Expect Input(X)'s rank == 2, but got %d", x_dims.size())); PADDLE_ENFORCE_EQ( dy_dims.size(), 2, - platform::errors::InvalidArgument("Input(Y@Grad)'s rank should be 2.")); + platform::errors::InvalidArgument( + "Expect Input(X)'s rank == 2, but got %d", dy_dims.size())); PADDLE_ENFORCE_EQ( cvm_dims.size(), 2, - platform::errors::InvalidArgument("Input(CVM)'s rank should be 2.")); + platform::errors::InvalidArgument( + "Expect Input(X)'s rank == 2, but got %d", cvm_dims.size())); PADDLE_ENFORCE_EQ( x_dims[0], dy_dims[0], platform::errors::InvalidArgument( "The 1st dimension of Input(X) and Input(Y@Grad) should " - "be equal.")); + "be equal, X is %d, Y@Grad is %d", + x_dims[0], dy_dims[0])); PADDLE_ENFORCE_EQ( cvm_dims[1], 2, platform::errors::InvalidArgument( "When Attr(soft_label) == false, the 2nd dimension of " - "Input(CVM) should be 2.")); + "Input(CVM) should be 2, but got %d cvm_dims[1]")); ctx->SetOutputDim(framework::GradVarName("X"), x_dims); ctx->ShareLoD("X", framework::GradVarName("X")); } diff --git a/paddle/fluid/operators/data_norm_op.cc b/paddle/fluid/operators/data_norm_op.cc index 7dc1e23207d565a7a7636887e1cba78944acc01a..698c57482dd06e8f74db53f494412cab981bad5d 100644 --- a/paddle/fluid/operators/data_norm_op.cc +++ b/paddle/fluid/operators/data_norm_op.cc @@ -390,7 +390,7 @@ class DataNormKernel } default: PADDLE_THROW(platform::errors::InvalidArgument( - "Unknown storage order: %d", data_layout)); + "Unknown storage order: %d, please use NCHW or NHWC", data_layout)); } } }; @@ -701,7 +701,8 @@ class DataNormGradKernel } default: PADDLE_THROW(platform::errors::InvalidArgument( - "Unknown storage order: %s", data_layout_str)); + "Unknown storage order: %s, please use NCHW or NHWC", + data_layout_str)); } } }; diff --git a/paddle/fluid/operators/detection/distribute_fpn_proposals_op.h b/paddle/fluid/operators/detection/distribute_fpn_proposals_op.h index 79498f01536d2fb2616921a2ef1ffa04f13fae64..465435637cff659082570f1ef9fcf1cb91983321 100644 --- a/paddle/fluid/operators/detection/distribute_fpn_proposals_op.h +++ b/paddle/fluid/operators/detection/distribute_fpn_proposals_op.h @@ -84,7 +84,8 @@ class DistributeFpnProposalsOpKernel : public framework::OpKernel { PADDLE_ENFORCE_EQ(fpn_rois->lod().size(), 1UL, platform::errors::InvalidArgument( "DistributeFpnProposalsOp needs LoD " - "with one level.")); + "with one level. But received level is %d", + fpn_rois->lod().size())); } std::vector fpn_rois_lod; diff --git a/paddle/fluid/operators/detection/target_assign_op.cc b/paddle/fluid/operators/detection/target_assign_op.cc index 1fda795d357deb7d77ae2cb017be7099ab79a511..afd50e57e76f22a9f26a147520e3e8de93c8755e 100644 --- a/paddle/fluid/operators/detection/target_assign_op.cc +++ b/paddle/fluid/operators/detection/target_assign_op.cc @@ -43,7 +43,9 @@ class TargetAssignOp : public framework::OperatorWithKernel { PADDLE_ENFORCE_EQ( in_dims.size(), 3, - platform::errors::InvalidArgument("The rank of Input(X) must be 3.")); + platform::errors::InvalidArgument( + "Expected the rank of Input(X) is 3. But received %d.", + in_dims.size())); PADDLE_ENFORCE_EQ(mi_dims.size(), 2, platform::errors::InvalidArgument( "The rank of Input(MatchIndices) must be 2.")); diff --git a/paddle/fluid/operators/fused/fusion_seqpool_cvm_concat_op.cc b/paddle/fluid/operators/fused/fusion_seqpool_cvm_concat_op.cc index ecb7db46a9d8159b8da124e941cc69522f64cd57..123c4c885ead815bf7f04b55d5696b875e42d348 100644 --- a/paddle/fluid/operators/fused/fusion_seqpool_cvm_concat_op.cc +++ b/paddle/fluid/operators/fused/fusion_seqpool_cvm_concat_op.cc @@ -31,15 +31,15 @@ void FusionSeqPoolCVMConcatOp::InferShape( paddle::platform::errors::InvalidArgument( "Output(Out) of FusionSeqPoolCVMConcatOp should not be null.")); int axis = ctx->Attrs().Get("axis"); - PADDLE_ENFORCE_EQ( - axis, 1, - paddle::platform::errors::InvalidArgument( - "FusionSeqPoolCVMConcatOp only supports concat axis=1 yet.")); + PADDLE_ENFORCE_EQ(axis, 1, paddle::platform::errors::InvalidArgument( + "FusionSeqPoolCVMConcatOp only supports " + "concat axis=1 yet, but received %d.", + axis)); bool use_cvm = ctx->Attrs().Get("use_cvm"); - PADDLE_ENFORCE_EQ( - use_cvm, true, - paddle::platform::errors::InvalidArgument( - "FusionSeqPoolCVMConcatOp only supports use_cvm is true yet.")); + PADDLE_ENFORCE_EQ(use_cvm, true, paddle::platform::errors::InvalidArgument( + "FusionSeqPoolCVMConcatOp only supports " + "use_cvm is true yet, but received %d.", + use_cvm)); auto ins_dims = ctx->GetInputsDim("X"); const size_t n = ins_dims.size(); diff --git a/paddle/fluid/operators/matmul_op_xpu.cc b/paddle/fluid/operators/matmul_op_xpu.cc index 103ac9add18876ec078d765bfef3b3fbce3a68af..14bef89a71b8b4ea96f15b5bf4664456045ccb90 100644 --- a/paddle/fluid/operators/matmul_op_xpu.cc +++ b/paddle/fluid/operators/matmul_op_xpu.cc @@ -127,10 +127,18 @@ class MatMulXPUKernel : public framework::OpKernel { PADDLE_ENFORCE_EQ( mat_dim_a.width_, mat_dim_b.height_, - platform::errors::InvalidArgument("Shape mistake in matmul_op")); - PADDLE_ENFORCE_EQ( - mat_dim_a.batch_size_, mat_dim_b.batch_size_, - platform::errors::InvalidArgument("Shape mistake in matmul_op")); + platform::errors::InvalidArgument("Shape mistake in matmul_op, the " + "first tensor width must be same as " + "second tensor height, but received " + "width:%d, height:%d", + mat_dim_a.width_, mat_dim_b.height_)); + PADDLE_ENFORCE_EQ(mat_dim_a.batch_size_, mat_dim_b.batch_size_, + platform::errors::InvalidArgument( + "Shape mistake in matmul_op, the two input" + "tensor batch_size must be same, but received first " + "tensor batch_size:%d, second " + "tensor batch_size:%d", + mat_dim_a.batch_size_, mat_dim_b.batch_size_)); T alpha = static_cast(context.Attr("alpha")); auto &dev_ctx = context.template device_context(); @@ -251,12 +259,20 @@ class MatMulGradXPUKernel : public framework::OpKernel { } } - PADDLE_ENFORCE_EQ( - mat_dim_a.width_, mat_dim_b.height_, - platform::errors::InvalidArgument("Shape mistake in matmul_grad_op")); - PADDLE_ENFORCE_EQ( - mat_dim_a.batch_size_, mat_dim_b.batch_size_, - platform::errors::InvalidArgument("Shape mistake in matmul_grad_op")); + PADDLE_ENFORCE_EQ(mat_dim_a.width_, mat_dim_b.height_, + platform::errors::InvalidArgument( + "Shape mistake in matmul_grad_op, the " + "first tensor width must be same as second tensor " + "height, but received " + "width:%d, height:%d", + mat_dim_a.width_, mat_dim_b.height_)); + PADDLE_ENFORCE_EQ(mat_dim_a.batch_size_, mat_dim_b.batch_size_, + platform::errors::InvalidArgument( + "Shape mistake in matmul_grad_op, the two input" + "tensor batch_size must be same, but received first " + "tensor batch_size:%d, second " + "tensor batch_size:%d", + mat_dim_a.batch_size_, mat_dim_b.batch_size_)); T alpha = static_cast(context.Attr("alpha")); diff --git a/paddle/fluid/operators/mkldnn/batch_norm_mkldnn_op.cc b/paddle/fluid/operators/mkldnn/batch_norm_mkldnn_op.cc index 622d6685dfa718b1220ac4afbf67982b5acce188..e53e052a89c6221e21b536fa8567ae013f5007be 100644 --- a/paddle/fluid/operators/mkldnn/batch_norm_mkldnn_op.cc +++ b/paddle/fluid/operators/mkldnn/batch_norm_mkldnn_op.cc @@ -54,9 +54,14 @@ class BatchNormMKLDNNHandler const float epsilon = ctx.Attr("epsilon"); const bool fuse_with_relu = ctx.Attr("fuse_with_relu"); + std::vector DataLayout_error_msg = {"kNHWC", "kNCHW", + "kAnyLayout", "kMKLDNN"}; PADDLE_ENFORCE_EQ( x->layout(), DataLayout::kMKLDNN, - platform::errors::InvalidArgument("Wrong layout set for X tensor")); + platform::errors::InvalidArgument( + "Wrong layout set for X tensor. Expected layout is `kMKLDNN`, " + "But received %s.", + DataLayout_error_msg[static_cast(DataLayout::kMKLDNN)])); PADDLE_ENFORCE_NE( x->format(), MKLDNNMemoryFormat::undef, platform::errors::InvalidArgument("Wrong format set for X tensor")); diff --git a/paddle/fluid/operators/mkldnn/matmul_mkldnn_op.cc b/paddle/fluid/operators/mkldnn/matmul_mkldnn_op.cc index fddc4b4b2e5596c2f5fa6167869deb7d7cacf600..fb856d97403a4d2d982c4f37537ef6d28d89f6b2 100644 --- a/paddle/fluid/operators/mkldnn/matmul_mkldnn_op.cc +++ b/paddle/fluid/operators/mkldnn/matmul_mkldnn_op.cc @@ -374,9 +374,12 @@ class DNNLMatMulKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { if (ctx.HasAttr("head_number")) { - PADDLE_ENFORCE_EQ(ctx.Attr("head_number"), 1, - platform::errors::Unimplemented( - "DNNL matmul doesn't support multiple heads.")); + PADDLE_ENFORCE_EQ( + ctx.Attr("head_number"), 1, + platform::errors::Unimplemented( + "DNNL matmul doesn't support multiple heads. Expected " + "head_number=1. But received `head_number` is %d", + ctx.Attr("head_number"))); } platform::MKLDNNDeviceContext::tls().log_lib_version(); ExecuteMatMul(ctx); diff --git a/paddle/fluid/operators/nll_loss_op.cc b/paddle/fluid/operators/nll_loss_op.cc index f0b5f4a466a0049c53d51d8610cf115d8bfe0295..263a73451c909422c7c7c5b57f707374e186c31f 100644 --- a/paddle/fluid/operators/nll_loss_op.cc +++ b/paddle/fluid/operators/nll_loss_op.cc @@ -53,10 +53,14 @@ class NLLLossOp : public framework::OperatorWithKernel { PADDLE_ENFORCE_EQ(w_dims.size(), 1, platform::errors::InvalidArgument( "Input(Weight) should be a 1D tensor.")); - PADDLE_ENFORCE_EQ(x_dims[1], w_dims[0], - platform::errors::InvalidArgument( - "Input(Weight) Tensor's size should match " - "to the the total number of classes.")); + PADDLE_ENFORCE_EQ( + x_dims[1], w_dims[0], + platform::errors::InvalidArgument( + "Expected input tensor Weight's size should equal " + "to the first dimension of the input tensor X. But received " + "Weight's " + "size is %d, the first dimension of input X is %d", + w_dims[0], x_dims[1])); } } if (x_dims.size() == 2) { @@ -68,7 +72,8 @@ class NLLLossOp : public framework::OperatorWithKernel { } else if (x_dims.size() == 4) { PADDLE_ENFORCE_EQ(label_dims.size(), 3, platform::errors::InvalidArgument( - "The tensor rank of Input(Label) must be 3.")); + "Expected Input(Lable) dimensions=3, received %d.", + label_dims.size())); auto input0 = x_dims[0]; auto input2 = x_dims[2]; auto input3 = x_dims[3]; diff --git a/paddle/fluid/operators/optimizers/ftrl_op.cc b/paddle/fluid/operators/optimizers/ftrl_op.cc index 3bdafbb96d5c40be651d6cad68806e14a214a28d..a75be6e580dcd7b1f39c313382a9759986e3a1da 100644 --- a/paddle/fluid/operators/optimizers/ftrl_op.cc +++ b/paddle/fluid/operators/optimizers/ftrl_op.cc @@ -42,7 +42,9 @@ class FTRLOp : public framework::OperatorWithKernel { auto param_dim = ctx->GetInputDim("Param"); PADDLE_ENFORCE_EQ(param_dim, ctx->GetInputDim("Grad"), platform::errors::InvalidArgument( - "Two input of FTRL Op's dimension must be same.")); + "Two input of FTRL Op's dimension must be same, but " + "param_dim is %d, Grad is %d", + param_dim, ctx->GetInputDim("Grad"))); auto lr_dim = ctx->GetInputDim("LearningRate"); PADDLE_ENFORCE_NE(framework::product(lr_dim), 0, @@ -51,9 +53,10 @@ class FTRLOp : public framework::OperatorWithKernel { "been initialized. You may need to confirm " "if you put exe.run(startup_program) " "after optimizer.minimize function.")); - PADDLE_ENFORCE_EQ( - framework::product(lr_dim), 1, - platform::errors::InvalidArgument("Learning Rate should be a scalar.")); + PADDLE_ENFORCE_EQ(framework::product(lr_dim), 1, + platform::errors::InvalidArgument( + "Learning Rate should be a scalar, but got %d", + framework::product(lr_dim))); ctx->SetOutputDim("ParamOut", param_dim); ctx->SetOutputDim("SquaredAccumOut", param_dim); diff --git a/paddle/fluid/operators/prroi_pool_op.h b/paddle/fluid/operators/prroi_pool_op.h index 5ec846c147373911281b04c79dc32a43171ddca0..11ecff8845216df6c6e07f5a7ccc2212d742167f 100644 --- a/paddle/fluid/operators/prroi_pool_op.h +++ b/paddle/fluid/operators/prroi_pool_op.h @@ -293,19 +293,24 @@ class CPUPRROIPoolOpKernel : public framework::OpKernel { } else { PADDLE_ENFORCE_EQ(rois->lod().empty(), false, platform::errors::InvalidArgument( - "the lod of Input ROIs should not be empty when " + "The lod of Input ROIs should not be empty when " "BatchRoINums is None!")); auto rois_lod = rois->lod().back(); int rois_batch_size = rois_lod.size() - 1; - PADDLE_ENFORCE_EQ( - rois_batch_size, batch_size, - platform::errors::InvalidArgument("the rois_batch_size and input(X) " - "batch_size should be the same.")); + PADDLE_ENFORCE_EQ(rois_batch_size, batch_size, + platform::errors::InvalidArgument( + "The rois_batch_size and input(X)'s " + "batch_size should be the same but received" + "rois_batch_size: %d and batch_size: %d", + rois_batch_size, batch_size)); int rois_num_with_lod = rois_lod[rois_batch_size]; PADDLE_ENFORCE_EQ( rois_num_with_lod, rois_num, - platform::errors::InvalidArgument( - "the rois_num from input and lod must be the same")); + platform::errors::InvalidArgument("The rois_num from input should be " + "equal to the rois_num from lod, " + "but received rois_num from input: " + "%d and the rois_num from lod: %d.", + rois_num_with_lod, rois_num)); // calculate batch id index for each roi according to LoD for (int n = 0; n < rois_batch_size; ++n) { diff --git a/paddle/fluid/operators/py_func_op.cc b/paddle/fluid/operators/py_func_op.cc index 7749903e5f36f1d93f7e111da4587d6828d445a4..b3622870d070e635acacf82e1be9798ffd7e38e5 100644 --- a/paddle/fluid/operators/py_func_op.cc +++ b/paddle/fluid/operators/py_func_op.cc @@ -112,7 +112,9 @@ static void CallPythonFunc(py::object *callable, out->ShareDataWith(*py_out_tensor); } catch (py::cast_error &) { PADDLE_THROW(platform::errors::InvalidArgument( - "The %d-th output must be LoDTensor.", i)); + "py::cast to LoDTensor error. The %d-th output expection is " + "LoDTensor", + i)); } } } diff --git a/paddle/fluid/operators/reader/blocking_queue.h b/paddle/fluid/operators/reader/blocking_queue.h index 4add9afdfd45b171edd8280b50e1ec13ed64637b..8929da20b53c281d3c1602f68d88ce45acc07da8 100644 --- a/paddle/fluid/operators/reader/blocking_queue.h +++ b/paddle/fluid/operators/reader/blocking_queue.h @@ -54,7 +54,9 @@ class BlockingQueue { PADDLE_ENFORCE_LT( queue_.size(), capacity_, platform::errors::PermissionDenied( - "The queue size cannot exceed the set queue capacity.")); + "The queue size cannot exceed the set queue capacity. Expected " + "queue size is less than %d. But received %d", + capacity_, queue_.size())); queue_.push_back(elem); receive_cv_.notify_one(); return true; @@ -73,7 +75,9 @@ class BlockingQueue { PADDLE_ENFORCE_LT( queue_.size(), capacity_, platform::errors::PermissionDenied( - "The queue size cannot exceed the set queue capacity.")); + "The queue size cannot exceed the set queue capacity. Expected " + "queue size is less than %d. But received %d", + capacity_, queue_.size())); queue_.emplace_back(std::move(elem)); receive_cv_.notify_one(); return true; diff --git a/paddle/fluid/operators/reader/read_op.cc b/paddle/fluid/operators/reader/read_op.cc index d7f81dc24cced8c045223d3f62ea8055d1821aa5..9086291e17db8912b377ba4fac2efe6c099ef705 100644 --- a/paddle/fluid/operators/reader/read_op.cc +++ b/paddle/fluid/operators/reader/read_op.cc @@ -122,10 +122,13 @@ class ReadOp : public framework::OperatorBase { const std::vector& var_types = reader->VarTypes(); const std::vector& need_check_feed = reader->NeedCheckFeed(); - PADDLE_ENFORCE_EQ(out_arg_names.size(), need_check_feed.size(), - platform::errors::InvalidArgument( - "output size of read_op and the number of fed " - "variables of reader do not match")); + PADDLE_ENFORCE_EQ( + out_arg_names.size(), need_check_feed.size(), + platform::errors::InvalidArgument( + "Output size of read_op and the number of fed " + "variables of reader do not match. Received size of output is %d, " + "number of fed variables of reader is %d", + out_arg_names.size(), need_check_feed.size())); for (size_t i = 0; i < out_arg_names.size(); ++i) { auto* out = diff --git a/paddle/fluid/operators/recurrent_op.cc b/paddle/fluid/operators/recurrent_op.cc index 35f52ffa522f4c497a493b7e93736f9f522beb19..231fb38da272a81ad65efa6d86a51e7182076807 100644 --- a/paddle/fluid/operators/recurrent_op.cc +++ b/paddle/fluid/operators/recurrent_op.cc @@ -161,7 +161,9 @@ int64_t RecurrentBase::GetSequenceLength(const framework::Scope &scope) const { } PADDLE_ENFORCE_GE(seq_len, 0, platform::errors::InvalidArgument( - "RecurrentOp gets invalid sequence length.")); + "RecurrentOp gets invalid sequence length. Expected " + "seq_len >= 0. Received seq_len = %d", + seq_len)); return seq_len; } diff --git a/paddle/fluid/operators/sum_op_xpu.cc b/paddle/fluid/operators/sum_op_xpu.cc index f15910fd4f65b50f55b884bf90d5a84bfe3bb601..264cc4e2cf794107bdcd717e963ea7d48c740020 100644 --- a/paddle/fluid/operators/sum_op_xpu.cc +++ b/paddle/fluid/operators/sum_op_xpu.cc @@ -50,8 +50,25 @@ class SumXPUKernel : public framework::OpKernel { } int r = xpu::sum_batch(dev_ctx.x_context(), ptrs.data(), out->data(), valid_count, out->numel()); - PADDLE_ENFORCE_EQ(r, xpu::Error_t::SUCCESS, - platform::errors::Fatal("XPU sum kernel error!")); + if (r == xpu::Error_t::INVALID_PARAM) { + PADDLE_ENFORCE_EQ( + r, xpu::Error_t::SUCCESS, + platform::errors::InvalidArgument( + "XPU kernel error of SumOp, error message: INVALID_PARAM, " + "please check your input & output.")); + } else if (r == xpu::Error_t::RUNTIME_ERROR) { + PADDLE_ENFORCE_EQ(r, xpu::Error_t::SUCCESS, + platform::errors::Unavailable( + "XPU kernel error of SumOp, error message: " + "RUNTIME_ERROR, please check whether Baidu " + "Kunlun Card is properly installed.")); + } else if (r == xpu::Error_t::NO_ENOUGH_WORKSPACE) { + PADDLE_ENFORCE_EQ(r, xpu::Error_t::SUCCESS, + platform::errors::ResourceExhausted( + "XPU kernel error of SumOp, error " + "message: NO_ENOUGH_WORKSPACE, XPU " + "has no enough memory.")); + } } }; diff --git a/paddle/fluid/operators/trace_op.cc b/paddle/fluid/operators/trace_op.cc index e90cf2054f72d8bb59c8fa13a3c3f6502ae14ba2..1b9e7c10eb27ae647864548baa80314f0ca1d5e4 100644 --- a/paddle/fluid/operators/trace_op.cc +++ b/paddle/fluid/operators/trace_op.cc @@ -41,7 +41,8 @@ class TraceOp : public framework::OperatorWithKernel { PADDLE_ENFORCE_GE( x_dims.size(), 2, platform::errors::OutOfRange( - "trace requires an tensor of at least two dimensions")); + "Input's dim is out of range (expected at least 2, but got %ld).", + x_dims.size())); PADDLE_ENFORCE_LT( dim1_, x_dims.size(), platform::errors::OutOfRange( diff --git a/python/paddle/fluid/tests/unittests/test_activation_nn_grad.py b/python/paddle/fluid/tests/unittests/test_activation_nn_grad.py index 6c4834b84f91f68f51b65bfc831775966732b36c..b663f0ffc2d8ad76876ca1edcbc33dcdd79d0ee9 100644 --- a/python/paddle/fluid/tests/unittests/test_activation_nn_grad.py +++ b/python/paddle/fluid/tests/unittests/test_activation_nn_grad.py @@ -78,9 +78,9 @@ class TestLeakyReluDoubleGradCheck(unittest.TestCase): class TestELUDoubleGradCheck(unittest.TestCase): @prog_scope() def func(self, place): - shape = [2, 3, 6, 6] + shape = [2, 4, 4, 4] eps = 1e-6 - alpha = 1.1 + alpha = 0.2 dtype = np.float64 SEED = 0