diff --git a/paddle/fluid/inference/tensorrt/convert/pool2d_op.cc b/paddle/fluid/inference/tensorrt/convert/pool2d_op.cc index ca5a1a77bd0e8ee8f35fecc838ad303601661d91..2ef8310b092feeb1cfd81fe96b0d86c3137d69b0 100644 --- a/paddle/fluid/inference/tensorrt/convert/pool2d_op.cc +++ b/paddle/fluid/inference/tensorrt/convert/pool2d_op.cc @@ -197,9 +197,9 @@ class Pool2dOpConverter : public OpConverter { engine_, Padding, *const_cast(input1), pre_pad, post_pad); PADDLE_ENFORCE_NOT_NULL( - pad_layer, - platform::errors::Fatal( - "pad layer in poolOp converter could not be created.")); + pad_layer, platform::errors::Fatal( + "Pad layer in poolOp converter could not be " + "created. The pointer to pad layer is `NULL`.")); input1 = pad_layer->getOutput(0); } auto *pool_layer = TRT_ENGINE_ADD_LAYER( diff --git a/paddle/fluid/operators/attention_lstm_op.cc b/paddle/fluid/operators/attention_lstm_op.cc index 839b51851d55103a2fcfb74bfdb8b39a425035dd..593a1b861cb0d80cb71a2e5303720406d3d2a3a3 100644 --- a/paddle/fluid/operators/attention_lstm_op.cc +++ b/paddle/fluid/operators/attention_lstm_op.cc @@ -44,14 +44,18 @@ void AttentionLSTMOp::InferShape(framework::InferShapeContext* ctx) const { auto x_dims = ctx->GetInputDim("X"); const int M = x_dims[1]; - PADDLE_ENFORCE_EQ(x_dims.size(), 2, platform::errors::InvalidArgument( - "Input(X)'s rank must be 2.")); + PADDLE_ENFORCE_EQ(x_dims.size(), 2, + platform::errors::InvalidArgument( + "Expected input(X)'s dimension is 2. But received %d.", + x_dims.size())); auto w_dims = ctx->GetInputDim("LSTMWeight"); const int D = w_dims[1] / 4; PADDLE_ENFORCE_EQ( w_dims.size(), 2, - platform::errors::InvalidArgument("Input(LSTMWeight)'s rank must be 2.")); + platform::errors::InvalidArgument( + "Expected input(LSTMWeight)'s dimension is 2.But received %d.", + w_dims.size())); PADDLE_ENFORCE_EQ( w_dims[0], D + M, platform::errors::InvalidArgument( @@ -77,8 +81,11 @@ void AttentionLSTMOp::InferShape(framework::InferShapeContext* ctx) const { if (ctx->HasInput("H0")) { auto h_dims = ctx->GetInputDim("H0"); - PADDLE_ENFORCE_EQ(h_dims.size(), 2UL, platform::errors::InvalidArgument( - "Input(H0)'s rank must be 2.")); + PADDLE_ENFORCE_EQ( + h_dims.size(), 2UL, + platform::errors::InvalidArgument( + "Expected input(H0)'s dimension is 2. But received %d.", + h_dims.size())); if (ctx->IsRuntime() || (framework::product(c_dims) > 0 && framework::product(h_dims) > 0)) { PADDLE_ENFORCE_EQ(h_dims, c_dims, @@ -94,7 +101,9 @@ void AttentionLSTMOp::InferShape(framework::InferShapeContext* ctx) const { "Input(AttentionWeight)'s rank must be 2.")); PADDLE_ENFORCE_EQ(atten_w_dims[0], M + D, platform::errors::InvalidArgument( - "AttentionWeight shapes must be (%d + %d) * 1.", M, D)); + "Expected `AttentionWeight` shape is [(%d + %d), 1]. " + "But received shape = [%d, 1], shape[0] is not %d.", + M, D, atten_w_dims[0], M + D)); PADDLE_ENFORCE_EQ(atten_w_dims[1], 1, platform::errors::InvalidArgument( "AttentionWeight shapes must be (%d + %d) * 1.", M, D)); diff --git a/paddle/fluid/operators/bilinear_tensor_product_op.cc b/paddle/fluid/operators/bilinear_tensor_product_op.cc index a2ba74dd7edc5dd260e269318410a3ad3efaf7ea..253a96004bd30a2d6c0da456c578e8dc4b522cca 100644 --- a/paddle/fluid/operators/bilinear_tensor_product_op.cc +++ b/paddle/fluid/operators/bilinear_tensor_product_op.cc @@ -50,9 +50,11 @@ class BilinearTensorProductOp : public framework::OperatorWithKernel { PADDLE_ENFORCE_EQ( y_dims.size(), 2UL, platform::errors::InvalidArgument("The input(Y) must be a 2D Tensor.")); - PADDLE_ENFORCE_EQ(weight_dims.size(), 3UL, - platform::errors::InvalidArgument( - "The input(Weight) must be a 3D tensor.")); + PADDLE_ENFORCE_EQ( + weight_dims.size(), 3UL, + platform::errors::InvalidArgument("Expected the input(Weight) is a 3D " + "tensor. But received %dD tensor.", + weight_dims.size())); if (ctx->IsRuntime() || (x_dims[0] > 0 && y_dims[0] > 0)) { PADDLE_ENFORCE_EQ( x_dims[0], y_dims[0], diff --git a/paddle/fluid/operators/detection/target_assign_op.cc b/paddle/fluid/operators/detection/target_assign_op.cc index 1fda795d357deb7d77ae2cb017be7099ab79a511..afd50e57e76f22a9f26a147520e3e8de93c8755e 100644 --- a/paddle/fluid/operators/detection/target_assign_op.cc +++ b/paddle/fluid/operators/detection/target_assign_op.cc @@ -43,7 +43,9 @@ class TargetAssignOp : public framework::OperatorWithKernel { PADDLE_ENFORCE_EQ( in_dims.size(), 3, - platform::errors::InvalidArgument("The rank of Input(X) must be 3.")); + platform::errors::InvalidArgument( + "Expected the rank of Input(X) is 3. But received %d.", + in_dims.size())); PADDLE_ENFORCE_EQ(mi_dims.size(), 2, platform::errors::InvalidArgument( "The rank of Input(MatchIndices) must be 2.")); diff --git a/paddle/fluid/operators/mkldnn/batch_norm_mkldnn_op.cc b/paddle/fluid/operators/mkldnn/batch_norm_mkldnn_op.cc index 622d6685dfa718b1220ac4afbf67982b5acce188..e53e052a89c6221e21b536fa8567ae013f5007be 100644 --- a/paddle/fluid/operators/mkldnn/batch_norm_mkldnn_op.cc +++ b/paddle/fluid/operators/mkldnn/batch_norm_mkldnn_op.cc @@ -54,9 +54,14 @@ class BatchNormMKLDNNHandler const float epsilon = ctx.Attr("epsilon"); const bool fuse_with_relu = ctx.Attr("fuse_with_relu"); + std::vector DataLayout_error_msg = {"kNHWC", "kNCHW", + "kAnyLayout", "kMKLDNN"}; PADDLE_ENFORCE_EQ( x->layout(), DataLayout::kMKLDNN, - platform::errors::InvalidArgument("Wrong layout set for X tensor")); + platform::errors::InvalidArgument( + "Wrong layout set for X tensor. Expected layout is `kMKLDNN`, " + "But received %s.", + DataLayout_error_msg[static_cast(DataLayout::kMKLDNN)])); PADDLE_ENFORCE_NE( x->format(), MKLDNNMemoryFormat::undef, platform::errors::InvalidArgument("Wrong format set for X tensor")); diff --git a/paddle/fluid/operators/mkldnn/matmul_mkldnn_op.cc b/paddle/fluid/operators/mkldnn/matmul_mkldnn_op.cc index fddc4b4b2e5596c2f5fa6167869deb7d7cacf600..fb856d97403a4d2d982c4f37537ef6d28d89f6b2 100644 --- a/paddle/fluid/operators/mkldnn/matmul_mkldnn_op.cc +++ b/paddle/fluid/operators/mkldnn/matmul_mkldnn_op.cc @@ -374,9 +374,12 @@ class DNNLMatMulKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { if (ctx.HasAttr("head_number")) { - PADDLE_ENFORCE_EQ(ctx.Attr("head_number"), 1, - platform::errors::Unimplemented( - "DNNL matmul doesn't support multiple heads.")); + PADDLE_ENFORCE_EQ( + ctx.Attr("head_number"), 1, + platform::errors::Unimplemented( + "DNNL matmul doesn't support multiple heads. Expected " + "head_number=1. But received `head_number` is %d", + ctx.Attr("head_number"))); } platform::MKLDNNDeviceContext::tls().log_lib_version(); ExecuteMatMul(ctx); diff --git a/paddle/fluid/operators/reader/blocking_queue.h b/paddle/fluid/operators/reader/blocking_queue.h index 4add9afdfd45b171edd8280b50e1ec13ed64637b..8929da20b53c281d3c1602f68d88ce45acc07da8 100644 --- a/paddle/fluid/operators/reader/blocking_queue.h +++ b/paddle/fluid/operators/reader/blocking_queue.h @@ -54,7 +54,9 @@ class BlockingQueue { PADDLE_ENFORCE_LT( queue_.size(), capacity_, platform::errors::PermissionDenied( - "The queue size cannot exceed the set queue capacity.")); + "The queue size cannot exceed the set queue capacity. Expected " + "queue size is less than %d. But received %d", + capacity_, queue_.size())); queue_.push_back(elem); receive_cv_.notify_one(); return true; @@ -73,7 +75,9 @@ class BlockingQueue { PADDLE_ENFORCE_LT( queue_.size(), capacity_, platform::errors::PermissionDenied( - "The queue size cannot exceed the set queue capacity.")); + "The queue size cannot exceed the set queue capacity. Expected " + "queue size is less than %d. But received %d", + capacity_, queue_.size())); queue_.emplace_back(std::move(elem)); receive_cv_.notify_one(); return true; diff --git a/paddle/fluid/operators/reader/read_op.cc b/paddle/fluid/operators/reader/read_op.cc index d7f81dc24cced8c045223d3f62ea8055d1821aa5..9086291e17db8912b377ba4fac2efe6c099ef705 100644 --- a/paddle/fluid/operators/reader/read_op.cc +++ b/paddle/fluid/operators/reader/read_op.cc @@ -122,10 +122,13 @@ class ReadOp : public framework::OperatorBase { const std::vector& var_types = reader->VarTypes(); const std::vector& need_check_feed = reader->NeedCheckFeed(); - PADDLE_ENFORCE_EQ(out_arg_names.size(), need_check_feed.size(), - platform::errors::InvalidArgument( - "output size of read_op and the number of fed " - "variables of reader do not match")); + PADDLE_ENFORCE_EQ( + out_arg_names.size(), need_check_feed.size(), + platform::errors::InvalidArgument( + "Output size of read_op and the number of fed " + "variables of reader do not match. Received size of output is %d, " + "number of fed variables of reader is %d", + out_arg_names.size(), need_check_feed.size())); for (size_t i = 0; i < out_arg_names.size(); ++i) { auto* out =