From 4ba977c720fc47452a58ee277c290d2bd84c0c0c Mon Sep 17 00:00:00 2001 From: Chen Weihang Date: Wed, 14 Oct 2020 14:23:34 +0800 Subject: [PATCH] Polish some error message in opeators (#27876) * polish some error message * add white list * revert shell script change --- .../fluid/operators/detail/strided_memcpy.h | 6 ++-- .../operators/distributed/parameter_recv.cc | 6 ++-- .../fused_fc_elementwise_layernorm_op.cc | 12 ++++--- paddle/fluid/operators/interpolate_v2_op.cc | 33 ++++++++++--------- .../operators/metrics/accuracy_op_xpu.cc | 2 +- .../fluid/operators/optimizers/adadelta_op.cc | 3 +- paddle/fluid/operators/optimizers/dpsgd_op.h | 10 ++++-- paddle/fluid/operators/scale_op_xpu.cc | 2 +- paddle/fluid/operators/sign_op_xpu.cc | 2 +- paddle/fluid/operators/sum_op_xpu.cc | 2 +- paddle/fluid/operators/top_k_op.cc | 4 ++- paddle/fluid/operators/top_k_op.cu | 3 +- 12 files changed, 53 insertions(+), 32 deletions(-) diff --git a/paddle/fluid/operators/detail/strided_memcpy.h b/paddle/fluid/operators/detail/strided_memcpy.h index 9ebdb369feb..e29b057ed57 100644 --- a/paddle/fluid/operators/detail/strided_memcpy.h +++ b/paddle/fluid/operators/detail/strided_memcpy.h @@ -41,7 +41,8 @@ struct StridedMemcpyFunctor { memory::Copy(gpu_place, dst, gpu_place, src, sizeof(T), cuda_ctx.stream()); #else - PADDLE_THROW("Paddle is not compiled with GPU"); + PADDLE_THROW( + platform::errors::Unavailable("Paddle is not compiled with GPU.")); #endif } } @@ -64,7 +65,8 @@ struct StridedMemcpyFunctor { memory::Copy(gpu_place, dst, gpu_place, src, sizeof(T) * dst_dim[0], cuda_ctx.stream()); #else - PADDLE_THROW("Paddle is not compiled with GPU"); + PADDLE_THROW( + platform::errors::Unavailable("Paddle is not compiled with GPU.")); #endif } } diff --git a/paddle/fluid/operators/distributed/parameter_recv.cc b/paddle/fluid/operators/distributed/parameter_recv.cc index 3dfa1d4ff93..d5d3c9c3c7c 100644 --- a/paddle/fluid/operators/distributed/parameter_recv.cc +++ b/paddle/fluid/operators/distributed/parameter_recv.cc @@ -86,8 +86,10 @@ void RecvSparseLodTensor(const CommContext &rpc_ctx, height += splited_var->Get().dims()[0]; } - PADDLE_ENFORCE_EQ(merged_var->Get().dims()[0], height, - "recved var must has same dims with local var"); + PADDLE_ENFORCE_EQ( + merged_var->Get().dims()[0], height, + platform::errors::InvalidArgument( + "Received variable must has same dimension with local variable.")); auto *merged_t = merged_var->GetMutable(); auto *merged_d = merged_t->mutable_data(cpu_place); diff --git a/paddle/fluid/operators/fused/fused_fc_elementwise_layernorm_op.cc b/paddle/fluid/operators/fused/fused_fc_elementwise_layernorm_op.cc index 08909bcb6fc..6fbe3b8d3bb 100644 --- a/paddle/fluid/operators/fused/fused_fc_elementwise_layernorm_op.cc +++ b/paddle/fluid/operators/fused/fused_fc_elementwise_layernorm_op.cc @@ -218,9 +218,11 @@ class FusedFCElementwiseLayerNormOpMaker .SetDefault(1e-5) .AddCustomChecker([](const float &epsilon) { PADDLE_ENFORCE_GE(epsilon, 0.0f, - "'epsilon' should be between 0.0 and 0.001."); + platform::errors::InvalidArgument( + "'epsilon' should be between 0.0 and 0.001.")); PADDLE_ENFORCE_LE(epsilon, 0.001f, - "'epsilon' should be between 0.0 and 0.001."); + platform::errors::InvalidArgument( + "'epsilon' should be between 0.0 and 0.001.")); }); AddAttr("begin_norm_axis", "the axis of `begin_norm_axis ... Rank(Y) - 1` will be " @@ -228,8 +230,10 @@ class FusedFCElementwiseLayerNormOpMaker "matrix [N,H]. [default 1].") .SetDefault(1) .AddCustomChecker([](const int &begin_norm_axis) { - PADDLE_ENFORCE_GT(begin_norm_axis, 0, - "'begin_norm_axis' should be greater than zero."); + PADDLE_ENFORCE_GT( + begin_norm_axis, 0, + platform::errors::InvalidArgument( + "'begin_norm_axis' should be greater than zero.")); }); AddComment(R"DOC( fc_out <= fc(X, W, Bias0) diff --git a/paddle/fluid/operators/interpolate_v2_op.cc b/paddle/fluid/operators/interpolate_v2_op.cc index 1f7dde9b931..3362f2474fe 100644 --- a/paddle/fluid/operators/interpolate_v2_op.cc +++ b/paddle/fluid/operators/interpolate_v2_op.cc @@ -118,9 +118,10 @@ static void Interpolate2DInferShapeCheck(framework::InferShapeContext* ctx) { PADDLE_ENFORCE( "bilinear" == interp_method || "nearest" == interp_method || "bicubic" == interp_method, - "Interpolation method can only be \"bilinear\" or \"nearest\" when " - "Input(X) dimension is 4, but got method = %s .", - interp_method); + platform::errors::InvalidArgument( + "Interpolation method can only be \"bilinear\" or \"nearest\" when " + "Input(X) dimension is 4, but got method = %s.", + interp_method)); const DataLayout data_layout = framework::StringToDataLayout( ctx->Attrs().Get("data_layout")); @@ -305,12 +306,15 @@ static void Interpolate3DInferShapeCheck(framework::InferShapeContext* ctx) { if (ctx->HasInput("OutSize") && ctx->IsRuntime()) { auto out_size_dim = ctx->GetInputDim("OutSize"); - PADDLE_ENFORCE_EQ(out_size_dim.size(), 1, - "OutSize's dimension size must be 1, but got size =%d .", - out_size_dim.size()); + PADDLE_ENFORCE_EQ( + out_size_dim.size(), 1, + platform::errors::InvalidArgument( + "OutSize's dimension size must be 1, but got size is %d.", + out_size_dim.size())); PADDLE_ENFORCE_EQ(out_size_dim[0], 3, - "OutSize's dim[0] must be 3, but got size = %d .", - out_size_dim[0]); + platform::errors::InvalidArgument( + "OutSize's dim[0] must be 3, but got size is %d.", + out_size_dim[0])); ctx->ShareLoD("X", "Out"); return; } @@ -330,10 +334,8 @@ class InterpolateV2Op : public framework::OperatorWithKernel { protected: void InferShape(framework::InferShapeContext* ctx) const override { - PADDLE_ENFORCE(ctx->HasInput("X"), - "Input(X) of InterpolateV2Op should not be null."); - PADDLE_ENFORCE(ctx->HasOutput("Out"), - "Output(Out) of InterpolationOp should not be null."); + OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "Interpolate"); + OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "Interpolate"); auto dim_x = ctx->GetInputDim("X"); // NCHW format PADDLE_ENFORCE( @@ -576,9 +578,10 @@ class InterpolateV2OpGrad : public framework::OperatorWithKernel { protected: void InferShape(framework::InferShapeContext* ctx) const override { - PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) should not be null"); - PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")), - "Input(Out@GRAD) should not be null"); + OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "InterpolateGrad"); + OP_INOUT_CHECK(ctx->HasInput(framework::GradVarName("Out")), "Input", + "Out@GRAD", "InterpolateGrad"); + auto dim_x = ctx->GetInputDim("X"); if (ctx->HasOutput(framework::GradVarName("X"))) { ctx->SetOutputDim(framework::GradVarName("X"), dim_x); diff --git a/paddle/fluid/operators/metrics/accuracy_op_xpu.cc b/paddle/fluid/operators/metrics/accuracy_op_xpu.cc index c0aa00e7934..294539b696f 100644 --- a/paddle/fluid/operators/metrics/accuracy_op_xpu.cc +++ b/paddle/fluid/operators/metrics/accuracy_op_xpu.cc @@ -98,7 +98,7 @@ class AccuracyXPUKernel : public framework::OpKernel { label_int32_device, num_samples, class_dim, correct_data, total_data, accuracy_data); PADDLE_ENFORCE_EQ(r, xpu::Error_t::SUCCESS, - platform::errors::Fatal("XPU kernel error!")); + platform::errors::Fatal("XPU accuracy kernel error!")); dev_ctx.Wait(); xpu_free(indices_int32_device); xpu_free(label_int32_device); diff --git a/paddle/fluid/operators/optimizers/adadelta_op.cc b/paddle/fluid/operators/optimizers/adadelta_op.cc index e3da79125be..21249d2375a 100644 --- a/paddle/fluid/operators/optimizers/adadelta_op.cc +++ b/paddle/fluid/operators/optimizers/adadelta_op.cc @@ -71,7 +71,8 @@ class AdadeltaOp : public framework::OperatorWithKernel { auto param_dim = ctx->GetInputDim("Param"); PADDLE_ENFORCE_EQ( param_dim, ctx->GetInputDim("Grad"), - "param and grad input of AdadeltaOp should have same dimension"); + platform::errors::InvalidArgument( + "Param and grad input of AdadeltaOp should have same dimension.")); PADDLE_ENFORCE_NE( framework::product(ctx->GetInputDim("AvgSquaredGrad")), 0, platform::errors::InvalidArgument( diff --git a/paddle/fluid/operators/optimizers/dpsgd_op.h b/paddle/fluid/operators/optimizers/dpsgd_op.h index e52a1dd9db1..688a7f1ad84 100644 --- a/paddle/fluid/operators/optimizers/dpsgd_op.h +++ b/paddle/fluid/operators/optimizers/dpsgd_op.h @@ -50,8 +50,14 @@ class DpsgdOpKernel : public framework::OpKernel { auto *param_out = ctx.Output("ParamOut"); auto sz = param_out->numel(); - PADDLE_ENFORCE_EQ(param->numel(), sz); - PADDLE_ENFORCE_EQ(grad->numel(), sz); + PADDLE_ENFORCE_EQ(param->numel(), sz, + platform::errors::InvalidArgument( + "Input parameter's number of elements is error, " + "expected %zu, but received %zu.")); + PADDLE_ENFORCE_EQ(grad->numel(), sz, + platform::errors::InvalidArgument( + "Input gradient's number of elements is error, " + "expected %zu, but received %zu.")); const T *lr = learning_rate->data(); const T *param_data = param->data(); diff --git a/paddle/fluid/operators/scale_op_xpu.cc b/paddle/fluid/operators/scale_op_xpu.cc index 4002be81001..b778bab8f93 100644 --- a/paddle/fluid/operators/scale_op_xpu.cc +++ b/paddle/fluid/operators/scale_op_xpu.cc @@ -49,7 +49,7 @@ class ScaleXPUKernel : public framework::OpKernel { int r = xpu::scale(dev_ctx.x_context(), in->numel(), scale, bias, bias_after_scale, in->data(), out->data()); PADDLE_ENFORCE_EQ(r, xpu::Error_t::SUCCESS, - platform::errors::Fatal("XPU kernel error!")); + platform::errors::Fatal("XPU scale kernel error!")); } }; diff --git a/paddle/fluid/operators/sign_op_xpu.cc b/paddle/fluid/operators/sign_op_xpu.cc index 44fd555544e..86fe826c659 100644 --- a/paddle/fluid/operators/sign_op_xpu.cc +++ b/paddle/fluid/operators/sign_op_xpu.cc @@ -30,7 +30,7 @@ class SignXPUKernel : public framework::OpKernel { int r = xpu::activation_forward(xpu_context, xpu::Activation_t::SIGN, in->numel(), in->data(), out->data()); PADDLE_ENFORCE_EQ(r, xpu::Error_t::SUCCESS, - platform::errors::Fatal("XPU kernel error!")); + platform::errors::Fatal("XPU sign kernel error!")); } }; diff --git a/paddle/fluid/operators/sum_op_xpu.cc b/paddle/fluid/operators/sum_op_xpu.cc index 14928061d23..f15910fd4f6 100644 --- a/paddle/fluid/operators/sum_op_xpu.cc +++ b/paddle/fluid/operators/sum_op_xpu.cc @@ -51,7 +51,7 @@ class SumXPUKernel : public framework::OpKernel { int r = xpu::sum_batch(dev_ctx.x_context(), ptrs.data(), out->data(), valid_count, out->numel()); PADDLE_ENFORCE_EQ(r, xpu::Error_t::SUCCESS, - platform::errors::Fatal("XPU kernel error!")); + platform::errors::Fatal("XPU sum kernel error!")); } }; diff --git a/paddle/fluid/operators/top_k_op.cc b/paddle/fluid/operators/top_k_op.cc index d3f9754d307..cce5ad26317 100644 --- a/paddle/fluid/operators/top_k_op.cc +++ b/paddle/fluid/operators/top_k_op.cc @@ -36,7 +36,9 @@ class TopkOp : public framework::OperatorWithKernel { auto input_dims = ctx->GetInputDim("X"); const int k = static_cast(ctx->Attrs().Get("k")); - PADDLE_ENFORCE_GE(k, 1, "k must >= 1"); + PADDLE_ENFORCE_GE(k, 1, + platform::errors::InvalidArgument( + "Attribute k must be >= 1, but got k is %d.", k)); PADDLE_ENFORCE_GE(input_dims.size(), 1, platform::errors::InvalidArgument( "input must have >= 1d shape")); diff --git a/paddle/fluid/operators/top_k_op.cu b/paddle/fluid/operators/top_k_op.cu index 0a694e1ad5b..39a56f874d9 100644 --- a/paddle/fluid/operators/top_k_op.cu +++ b/paddle/fluid/operators/top_k_op.cu @@ -96,7 +96,8 @@ class TopkOpCUDAKernel : public framework::OpKernel { output_data, k, indices_data, input_data, input_width, input_width, static_cast(k), gridx, input_height)); default: - PADDLE_THROW("Error"); + PADDLE_THROW(platform::errors::Unavailable( + "Calculation error occurred in TopK Operator's CUDA Kernel.")); } } }; -- GitLab