From 6c1acf34ed2d612c98b8a3e1709486cf5ca15789 Mon Sep 17 00:00:00 2001 From: xiemoyuan <71377852+xiemoyuan@users.noreply.github.com> Date: Fri, 9 Oct 2020 17:15:40 +0800 Subject: [PATCH] Optimize the error message for OP (#27617) * Optimize the error message for OPs. * Optimize the error message for OPs in details. --- .../fluid/operators/beam_search_decode_op.cc | 3 +- paddle/fluid/operators/chunk_eval_op.h | 2 +- paddle/fluid/operators/cudnn_lstm_op.cc | 4 +-- paddle/fluid/operators/edit_distance_op.cu | 5 +-- paddle/fluid/operators/edit_distance_op.h | 14 ++++---- paddle/fluid/operators/expand_as_op.cc | 5 +-- paddle/fluid/operators/expand_as_op.h | 30 +++++++++------- paddle/fluid/operators/linear_chain_crf_op.h | 36 ++++++++++++------- 8 files changed, 61 insertions(+), 38 deletions(-) diff --git a/paddle/fluid/operators/beam_search_decode_op.cc b/paddle/fluid/operators/beam_search_decode_op.cc index 3cb3f1d48bf..4bf4ba1120d 100644 --- a/paddle/fluid/operators/beam_search_decode_op.cc +++ b/paddle/fluid/operators/beam_search_decode_op.cc @@ -117,7 +117,8 @@ void BeamSearchDecodeFunctor::apply() const { template <> void BeamSearchDecodeFunctor::apply() const { - PADDLE_THROW("beam search decode op does not support bool!"); + PADDLE_THROW(platform::errors::InvalidArgument( + "beam search decode op does not support bool!")); } class BeamSearchDecodeOp : public framework::OperatorBase { diff --git a/paddle/fluid/operators/chunk_eval_op.h b/paddle/fluid/operators/chunk_eval_op.h index bee3ab37448..555130fe852 100644 --- a/paddle/fluid/operators/chunk_eval_op.h +++ b/paddle/fluid/operators/chunk_eval_op.h @@ -146,7 +146,7 @@ class ChunkEvalKernel : public framework::OpKernel { tag_end = -1; tag_single = -1; } else { - PADDLE_THROW("Unknown chunk scheme."); + PADDLE_THROW(platform::errors::InvalidArgument("Unknown chunk scheme.")); } other_chunk_type = num_chunk_types = context.Attr("num_chunk_types"); excluded_chunk_types.insert( diff --git a/paddle/fluid/operators/cudnn_lstm_op.cc b/paddle/fluid/operators/cudnn_lstm_op.cc index 82954bc109a..50486ad041a 100644 --- a/paddle/fluid/operators/cudnn_lstm_op.cc +++ b/paddle/fluid/operators/cudnn_lstm_op.cc @@ -274,8 +274,8 @@ template class NotImpleKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { - PADDLE_THROW( - "CPU is not support for this kernel now. Will be add in the future"); + PADDLE_THROW(platform::errors::Unimplemented( + "CPU is not support for this kernel now. Will be add in the future")); } }; diff --git a/paddle/fluid/operators/edit_distance_op.cu b/paddle/fluid/operators/edit_distance_op.cu index 8d79626aa87..80490af33a1 100644 --- a/paddle/fluid/operators/edit_distance_op.cu +++ b/paddle/fluid/operators/edit_distance_op.cu @@ -111,8 +111,9 @@ class EditDistanceGPUKernel : public framework::OpKernel { if (normalized) { for (size_t i = 1; i < ref_lod.size(); ++i) { - PADDLE_ENFORCE(ref_lod[i] > ref_lod[i - 1], - "Reference string %d is empty.", i); + PADDLE_ENFORCE_GT(ref_lod[i], ref_lod[i - 1], + platform::errors::InvalidArgument( + "Reference string %d is empty.", i)); } } diff --git a/paddle/fluid/operators/edit_distance_op.h b/paddle/fluid/operators/edit_distance_op.h index 3e1aec7ceee..ef290c2eff2 100644 --- a/paddle/fluid/operators/edit_distance_op.h +++ b/paddle/fluid/operators/edit_distance_op.h @@ -58,8 +58,9 @@ class EditDistanceKernel : public framework::OpKernel { if (normalized) { for (size_t i = 1; i < ref_lod.size(); ++i) { - PADDLE_ENFORCE(ref_lod[i] > ref_lod[i - 1], - "Reference string %d is empty.", i); + PADDLE_ENFORCE_GT(ref_lod[i], ref_lod[i - 1], + platform::errors::InvalidArgument( + "Reference string %d is empty.", i)); } } auto num_strs = hyp_lod.size() - 1; @@ -106,10 +107,11 @@ class EditDistanceKernel : public framework::OpKernel { } if (normalized) { - PADDLE_ENFORCE(n > 0, - "The reference string (#%d) cannot be empty " - "when Attr(normalized) is enabled.", - n); + PADDLE_ENFORCE_GT(n, 0UL, + platform::errors::InvalidArgument( + "The reference string (#%d) cannot be empty " + "when Attr(normalized) is enabled.", + n)); distance = distance / n; } out[num] = distance; diff --git a/paddle/fluid/operators/expand_as_op.cc b/paddle/fluid/operators/expand_as_op.cc index 870464efed2..25b83ed93f7 100644 --- a/paddle/fluid/operators/expand_as_op.cc +++ b/paddle/fluid/operators/expand_as_op.cc @@ -89,8 +89,9 @@ class ExpandAsGradOp : public framework::OperatorWithKernel { protected: void InferShape(framework::InferShapeContext* ctx) const override { - PADDLE_ENFORCE_EQ(ctx->HasInput("X"), true); - PADDLE_ENFORCE_EQ(ctx->HasInput(framework::GradVarName("Out")), true); + OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "ExpandAs"); + OP_INOUT_CHECK(ctx->HasInput(framework::GradVarName("Out")), "Input", + framework::GradVarName("Out"), "ExpandAs"); auto x_dims = ctx->GetInputDim("X"); auto x_grad_name = framework::GradVarName("X"); diff --git a/paddle/fluid/operators/expand_as_op.h b/paddle/fluid/operators/expand_as_op.h index b189aa6f122..cbaeb0c4e42 100644 --- a/paddle/fluid/operators/expand_as_op.h +++ b/paddle/fluid/operators/expand_as_op.h @@ -61,7 +61,10 @@ class ExpandAsKernel : public framework::OpKernel { switch (rank) { REP_EXPAND_AS_TEMPLATE(MAX_RANK_SUPPORTED) default: - PADDLE_THROW("Only support tensor with rank being between 1 and 6."); + PADDLE_THROW(platform::errors::InvalidArgument( + "Only support tensor with rank being between 1 and 6. But received " + "tensor X's rank = %d.", + rank)); } } @@ -77,13 +80,19 @@ class ExpandAsKernel : public framework::OpKernel { auto x_dims = in0->dims(); auto y_dims = target_tensor->dims(); for (int i = 0; i < y_dims.size(); ++i) { - PADDLE_ENFORCE_NE(x_dims[i], 0, "X(input) should not have 0 dim"); + PADDLE_ENFORCE_NE( + x_dims[i], 0UL, + platform::errors::InvalidArgument( + "X(input) should not have 0 dim. But received x_dims[%d] = 0.", + i)); bcast_dims[i] = y_dims[i] / x_dims[i]; bcast_dims_remainder += y_dims[i] % x_dims[i]; } - PADDLE_ENFORCE_EQ(bcast_dims_remainder, 0, - "X(input) could not be broadcast together with remapped " - "shape(expand tensor's shape)"); + PADDLE_ENFORCE_EQ( + bcast_dims_remainder, 0UL, + platform::errors::InvalidArgument( + "X(input) could not be broadcast together with remapped " + "shape(expand tensor's shape)")); framework::DDim out_dims(in_dims); for (size_t i = 0; i < bcast_dims.size(); ++i) { out_dims[i] *= bcast_dims[i]; @@ -137,7 +146,10 @@ class ExpandAsGradKernel : public framework::OpKernel { switch (dims) { REP_EXPAND_AS_GRAD_TEMPLATE(MAX_RANK_SUPPORTED) default: - PADDLE_THROW("Only support tensor with rank being between 1 and 6."); + PADDLE_THROW(platform::errors::InvalidArgument( + "Only support tensor with rank being between 1 and 6. But " + "received tensor's rank = %d.", + dims)); } } } @@ -149,12 +161,6 @@ class ExpandAsGradKernel : public framework::OpKernel { const std::vector& reduce_dims_vec) const { size_t reshape_size = reshape_dims_vec.size(); size_t reduce_size = reduce_dims_vec.size(); - PADDLE_ENFORCE_EQ(reshape_size, reshape_dims_vec.size(), - "Inconsistent size between template Dims and " - "reshape dimensions."); - PADDLE_ENFORCE_EQ(reduce_size, reduce_dims_vec.size(), - "Inconsistent size between template Dims and " - "reduce dimensions."); auto* in0 = context.Input(framework::GradVarName("Out")); auto* out0 = context.Output(framework::GradVarName("X")); out0->mutable_data(context.GetPlace()); diff --git a/paddle/fluid/operators/linear_chain_crf_op.h b/paddle/fluid/operators/linear_chain_crf_op.h index 488cbc6d517..d4f3fc5d7a6 100644 --- a/paddle/fluid/operators/linear_chain_crf_op.h +++ b/paddle/fluid/operators/linear_chain_crf_op.h @@ -27,9 +27,10 @@ static inline T NormalizeL1(T* x, size_t len) { // (This comment is from the old LinearChainCRFLayer.) // Right now, we just bet that sum won't be zero. If this really happens, we // will figure out what should be done then. - PADDLE_ENFORCE(sum, - "The unnormalized probabilities of all possible unfinished " - "sequences must be greater than 0."); + PADDLE_ENFORCE_GT( + sum, 0., platform::errors::InvalidArgument( + "The unnormalized probabilities of all possible unfinished " + "sequences must be greater than 0.")); T s = 1. / sum; for (size_t i = 0; i < len; ++i) x[i] *= s; return sum; @@ -84,13 +85,19 @@ class LinearChainCRFOpKernel : public framework::OpKernel { const Tensor* label_length = ctx.Input("Length"); length_data = label_length->data(); seq_num = label_length->numel(); - PADDLE_ENFORCE_EQ(seq_num, emission_dims[0], - "the size of Input(length) must be equal to " - "emission_dims[0]."); + PADDLE_ENFORCE_EQ( + seq_num, emission_dims[0], + platform::errors::InvalidArgument( + "the size of Input(length) must be equal to " + "emission_dims[0]. But input_size = %d, emission_dims[0] = %d.", + seq_num, emission_dims[0])); auto label_dims = label->dims(); - PADDLE_ENFORCE_EQ(seq_num, label_dims[0], - "the size of Input(length) must be equal to " - "label_dims[0]."); + PADDLE_ENFORCE_EQ( + seq_num, label_dims[0], + platform::errors::InvalidArgument( + "the size of Input(length) must be equal to " + "label_dims[0]. But input_size = %d, label_dims[0] = %d.", + seq_num, label_dims[0])); batch_size = emission_dims[0] * emission_dims[1]; tag_num = emission_dims[2]; @@ -102,7 +109,9 @@ class LinearChainCRFOpKernel : public framework::OpKernel { math::set_constant(ctx.device_context(), alpha, 0.0); } else { in_lod = ctx.Input("Label")->lod(); - PADDLE_ENFORCE_NE(in_lod.size(), 0, "Input(Label) must be a sequence."); + PADDLE_ENFORCE_NE(in_lod.size(), 0, + platform::errors::InvalidArgument( + "Input(Label) must be a sequence.")); seq_num = in_lod[0].size() - 1; batch_size = emission_dims[0]; tag_num = emission_dims[1]; @@ -204,7 +213,8 @@ class LinearChainCRFOpKernel : public framework::OpKernel { const int64_t* lbl = label.data(); PADDLE_ENFORCE_LT( static_cast(*std::max_element(lbl, lbl + seq_length)), tag_num, - "An invalid tag label that execesses the largest tag number."); + platform::errors::InvalidArgument( + "An invalid tag label that execesses the largest tag number.")); // Calculate the nominator part, which depends on the label sequence. ll += w[lbl[0]] /*start transition*/ + x[lbl[0]] + @@ -254,7 +264,9 @@ class LinearChainCRFGradOpKernel : public framework::OpKernel { {emission_dims[0] * emission_dims[1], emission_dims[2]}); } else { in_lod = ctx.Input("Label")->lod(); - PADDLE_ENFORCE_NE(in_lod.size(), 0, "Input(Label) must be a sequence."); + PADDLE_ENFORCE_NE(in_lod.size(), 0, + platform::errors::InvalidArgument( + "Input(Label) must be a sequence.")); seq_num = static_cast(in_lod[0].size() - 1); } -- GitLab